summaryrefslogtreecommitdiff
path: root/test
diff options
context:
space:
mode:
Diffstat (limited to 'test')
-rw-r--r--test/Analysis/MemorySSA/pr36883.ll38
-rw-r--r--test/CodeGen/AArch64/arm64-indexed-vector-ldst-2.ll22
-rw-r--r--test/CodeGen/AArch64/arm64-zero-cycle-zeroing.ll70
-rw-r--r--test/CodeGen/AArch64/falkor-hwpf-fix.mir25
-rw-r--r--test/CodeGen/AArch64/inlineasm-S-constraint.ll20
-rw-r--r--test/CodeGen/AArch64/spill-stack-realignment.mir35
-rw-r--r--test/CodeGen/AMDGPU/ctpop16.ll334
-rw-r--r--test/CodeGen/ARM/peephole-phi.mir36
-rw-r--r--test/CodeGen/Hexagon/ifcvt-diamond-ret.mir25
-rw-r--r--test/CodeGen/MIR/PowerPC/ifcvt-diamond-ret.mir34
-rw-r--r--test/CodeGen/Mips/const-mult.ll645
-rw-r--r--test/CodeGen/Mips/indirect-jump-hazard/calls.ll188
-rw-r--r--test/CodeGen/Mips/indirect-jump-hazard/guards-verify-call.mir58
-rw-r--r--test/CodeGen/Mips/indirect-jump-hazard/guards-verify-tailcall.mir59
-rw-r--r--test/CodeGen/Mips/indirect-jump-hazard/jumptables.ll649
-rw-r--r--test/CodeGen/Mips/indirect-jump-hazard/long-branch.ll138
-rw-r--r--test/CodeGen/Mips/indirect-jump-hazard/long-calls.ll113
-rw-r--r--test/CodeGen/Mips/indirect-jump-hazard/unsupported-micromips.ll5
-rw-r--r--test/CodeGen/Mips/indirect-jump-hazard/unsupported-mips32.ll5
-rw-r--r--test/CodeGen/Mips/inlineasm-cnstrnt-bad-l1.ll13
-rw-r--r--test/CodeGen/Mips/inlineasm-cnstrnt-reg.ll10
-rw-r--r--test/CodeGen/PowerPC/convert-rr-to-ri-instrs.mir198
-rw-r--r--test/CodeGen/PowerPC/no-dup-of-bdnz.ll75
-rw-r--r--test/CodeGen/PowerPC/pr35402.ll18
-rw-r--r--test/CodeGen/Thumb/PR36658.mir359
-rw-r--r--test/CodeGen/X86/GlobalISel/add-scalar.ll14
-rw-r--r--test/CodeGen/X86/O0-pipeline.ll2
-rw-r--r--test/CodeGen/X86/clobber-fi0.ll37
-rw-r--r--test/CodeGen/X86/cmpxchg-clobber-flags.ll363
-rw-r--r--test/CodeGen/X86/copy-eflags.ll357
-rw-r--r--test/CodeGen/X86/domain-reassignment-implicit-def.ll24
-rw-r--r--test/CodeGen/X86/domain-reassignment-test.ll37
-rw-r--r--test/CodeGen/X86/eflags-copy-expansion.mir64
-rw-r--r--test/CodeGen/X86/fast-isel-shift.ll12
-rw-r--r--test/CodeGen/X86/flags-copy-lowering.mir555
-rw-r--r--test/CodeGen/X86/ipra-reg-usage.ll2
-rw-r--r--test/CodeGen/X86/mul-i1024.ll11163
-rw-r--r--test/CodeGen/X86/peephole-na-phys-copy-folding.ll109
-rw-r--r--test/CodeGen/X86/pr37264.ll12
-rw-r--r--test/CodeGen/X86/win64_frame.ll334
-rw-r--r--test/CodeGen/X86/x86-repmov-copy-eflags.ll11
-rw-r--r--test/DebugInfo/X86/dbg-value-inlined-parameter.ll6
-rw-r--r--test/DebugInfo/X86/live-debug-vars-discard-invalid.mir141
-rw-r--r--test/ExecutionEngine/RuntimeDyld/PowerPC/Inputs/ppc64_elf_module_b.s42
-rw-r--r--test/ExecutionEngine/RuntimeDyld/PowerPC/ppc64_elf.s47
-rw-r--r--test/MC/ELF/cfi-large-model.s68
-rw-r--r--test/MC/Mips/unsupported-relocation.s13
-rw-r--r--test/Transforms/ArgumentPromotion/musttail.ll45
-rw-r--r--test/Transforms/CallSiteSplitting/musttail.ll109
-rw-r--r--test/Transforms/DeadArgElim/musttail-caller.ll16
-rw-r--r--test/Transforms/GlobalOpt/musttail_cc.ll34
-rw-r--r--test/Transforms/IPConstantProp/musttail-call.ll58
-rw-r--r--test/Transforms/InstCombine/gep-addrspace.ll19
-rw-r--r--test/Transforms/JumpThreading/header-succ.ll99
-rw-r--r--test/Transforms/MergeFunc/inline-asm.ll53
-rw-r--r--test/Transforms/MergeFunc/weak-small.ll16
56 files changed, 10823 insertions, 6211 deletions
diff --git a/test/Analysis/MemorySSA/pr36883.ll b/test/Analysis/MemorySSA/pr36883.ll
new file mode 100644
index 0000000000000..8411b0c228b8e
--- /dev/null
+++ b/test/Analysis/MemorySSA/pr36883.ll
@@ -0,0 +1,38 @@
+; RUN: opt -basicaa -memoryssa -analyze < %s 2>&1 -S | FileCheck %s
+; RUN: opt -aa-pipeline=basic-aa -passes='print<memoryssa>,verify<memoryssa>' -S < %s 2>&1 | FileCheck %s
+;
+; We weren't properly considering the args in callsites in equality or hashing.
+
+target triple = "armv7-dcg-linux-gnueabi"
+
+; CHECK-LABEL: define <8 x i16> @vpx_idct32_32_neon
+define <8 x i16> @vpx_idct32_32_neon(i8* %p, <8 x i16> %v) {
+entry:
+; CHECK: MemoryUse(liveOnEntry)
+ %load1 = call <8 x i16> @llvm.arm.neon.vld1.v8i16.p0i8(i8* %p, i32 2) #4 ; load CSE replacement
+
+; CHECK: 1 = MemoryDef(liveOnEntry)
+ call void @llvm.arm.neon.vst1.p0i8.v8i16(i8* %p, <8 x i16> %v, i32 2) #4 ; clobber
+
+ %p_next = getelementptr inbounds i8, i8* %p, i32 16
+; CHECK: MemoryUse(liveOnEntry)
+ %load2 = call <8 x i16> @llvm.arm.neon.vld1.v8i16.p0i8(i8* %p_next, i32 2) #4 ; non-aliasing load needed to trigger bug
+
+; CHECK: MemoryUse(1)
+ %load3 = call <8 x i16> @llvm.arm.neon.vld1.v8i16.p0i8(i8* %p, i32 2) #4 ; load CSE removed
+
+ %add = add <8 x i16> %load1, %load2
+ %ret = add <8 x i16> %add, %load3
+ ret <8 x i16> %ret
+}
+
+; Function Attrs: argmemonly nounwind readonly
+declare <8 x i16> @llvm.arm.neon.vld1.v8i16.p0i8(i8*, i32) #2
+
+; Function Attrs: argmemonly nounwind
+declare void @llvm.arm.neon.vst1.p0i8.v8i16(i8*, <8 x i16>, i32) #1
+
+attributes #1 = { argmemonly nounwind }
+attributes #2 = { argmemonly nounwind readonly }
+attributes #3 = { nounwind readnone }
+attributes #4 = { nounwind }
diff --git a/test/CodeGen/AArch64/arm64-indexed-vector-ldst-2.ll b/test/CodeGen/AArch64/arm64-indexed-vector-ldst-2.ll
index 14beb1ae9c367..1032a6d620bae 100644
--- a/test/CodeGen/AArch64/arm64-indexed-vector-ldst-2.ll
+++ b/test/CodeGen/AArch64/arm64-indexed-vector-ldst-2.ll
@@ -28,6 +28,28 @@ return: ; preds = %if.then172, %cond.e
ret void
}
+; Avoid an assert/bad codegen in LD1LANEPOST lowering by not forming
+; LD1LANEPOST ISD nodes with a non-constant lane index.
+define <4 x i32> @f2(i32 *%p, <4 x i1> %m, <4 x i32> %v1, <4 x i32> %v2, i32 %idx) {
+ %L0 = load i32, i32* %p
+ %p1 = getelementptr i32, i32* %p, i64 1
+ %L1 = load i32, i32* %p1
+ %v = select <4 x i1> %m, <4 x i32> %v1, <4 x i32> %v2
+ %vret = insertelement <4 x i32> %v, i32 %L0, i32 %idx
+ store i32 %L1, i32 *%p
+ ret <4 x i32> %vret
+}
+
+; Check that a cycle is avoided during isel between the LD1LANEPOST instruction and the load of %L1.
+define <4 x i32> @f3(i32 *%p, <4 x i1> %m, <4 x i32> %v1, <4 x i32> %v2) {
+ %L0 = load i32, i32* %p
+ %p1 = getelementptr i32, i32* %p, i64 1
+ %L1 = load i32, i32* %p1
+ %v = select <4 x i1> %m, <4 x i32> %v1, <4 x i32> %v2
+ %vret = insertelement <4 x i32> %v, i32 %L0, i32 %L1
+ ret <4 x i32> %vret
+}
+
; Function Attrs: nounwind readnone
declare i64 @llvm.objectsize.i64.p0i8(i8*, i1) #1
diff --git a/test/CodeGen/AArch64/arm64-zero-cycle-zeroing.ll b/test/CodeGen/AArch64/arm64-zero-cycle-zeroing.ll
index 2fb9d3b2d0309..664078fb7e942 100644
--- a/test/CodeGen/AArch64/arm64-zero-cycle-zeroing.ll
+++ b/test/CodeGen/AArch64/arm64-zero-cycle-zeroing.ll
@@ -1,27 +1,31 @@
-; RUN: llc -mtriple=arm64-apple-ios -mcpu=cyclone < %s | FileCheck %s -check-prefix=CYCLONE --check-prefix=ALL
-; RUN: llc -mtriple=aarch64-gnu-linux -mcpu=kryo < %s | FileCheck %s -check-prefix=KRYO --check-prefix=ALL
-; RUN: llc -mtriple=aarch64-gnu-linux -mcpu=falkor < %s | FileCheck %s -check-prefix=FALKOR --check-prefix=ALL
+; RUN: llc -mtriple=arm64-apple-ios -mcpu=cyclone < %s | FileCheck %s -check-prefixes=ALL,CYCLONE
+; RUN: llc -mtriple=arm64-apple-ios -mcpu=cyclone -mattr=+fullfp16 < %s | FileCheck %s -check-prefixes=CYCLONE-FULLFP16
+; RUN: llc -mtriple=aarch64-gnu-linux -mcpu=exynos-m1 < %s | FileCheck %s -check-prefixes=ALL,OTHERS
+; RUN: llc -mtriple=aarch64-gnu-linux -mcpu=exynos-m3 < %s | FileCheck %s -check-prefixes=ALL,OTHERS
+; RUN: llc -mtriple=aarch64-gnu-linux -mcpu=kryo < %s | FileCheck %s -check-prefixes=ALL,OTHERS
+; RUN: llc -mtriple=aarch64-gnu-linux -mcpu=falkor < %s | FileCheck %s -check-prefixes=ALL,OTHERS
-; rdar://11481771
-; rdar://13713797
+declare void @bar(half, float, double, <2 x double>)
+declare void @bari(i32, i32)
+declare void @barl(i64, i64)
+declare void @barf(float, float)
define void @t1() nounwind ssp {
entry:
; ALL-LABEL: t1:
; ALL-NOT: fmov
-; CYCLONE: fmov d0, xzr
-; CYCLONE: fmov d1, xzr
+; ALL: ldr h0,{{.*}}
+; CYCLONE: fmov s1, wzr
; CYCLONE: fmov d2, xzr
-; CYCLONE: fmov d3, xzr
-; KRYO: movi v0.2d, #0000000000000000
-; KRYO: movi v1.2d, #0000000000000000
-; KRYO: movi v2.2d, #0000000000000000
-; KRYO: movi v3.2d, #0000000000000000
-; FALKOR: movi v0.2d, #0000000000000000
-; FALKOR: movi v1.2d, #0000000000000000
-; FALKOR: movi v2.2d, #0000000000000000
-; FALKOR: movi v3.2d, #0000000000000000
- tail call void @bar(double 0.000000e+00, double 0.000000e+00, double 0.000000e+00, double 0.000000e+00) nounwind
+; CYCLONE: movi.16b v3, #0
+; CYCLONE-FULLFP16: fmov h0, wzr
+; CYCLONE-FULLFP16: fmov s1, wzr
+; CYCLONE-FULLFP16: fmov d2, xzr
+; CYCLONE-FULLFP16: movi.16b v3, #0
+; OTHERS: movi v{{[0-3]+}}.2d, #0000000000000000
+; OTHERS: movi v{{[0-3]+}}.2d, #0000000000000000
+; OTHERS: movi v{{[0-3]+}}.2d, #0000000000000000
+ tail call void @bar(half 0.000000e+00, float 0.000000e+00, double 0.000000e+00, <2 x double> <double 0.000000e+00, double 0.000000e+00>) nounwind
ret void
}
@@ -29,8 +33,8 @@ define void @t2() nounwind ssp {
entry:
; ALL-LABEL: t2:
; ALL-NOT: mov w0, wzr
-; ALL: mov w0, #0
-; ALL: mov w1, #0
+; ALL: mov w{{[0-3]+}}, #0
+; ALL: mov w{{[0-3]+}}, #0
tail call void @bari(i32 0, i32 0) nounwind
ret void
}
@@ -39,8 +43,8 @@ define void @t3() nounwind ssp {
entry:
; ALL-LABEL: t3:
; ALL-NOT: mov x0, xzr
-; ALL: mov x0, #0
-; ALL: mov x1, #0
+; ALL: mov x{{[0-3]+}}, #0
+; ALL: mov x{{[0-3]+}}, #0
tail call void @barl(i64 0, i64 0) nounwind
ret void
}
@@ -48,26 +52,21 @@ entry:
define void @t4() nounwind ssp {
; ALL-LABEL: t4:
; ALL-NOT: fmov
-; CYCLONE: fmov s0, wzr
-; CYCLONE: fmov s1, wzr
-; KRYO: movi v0.2d, #0000000000000000
-; KRYO: movi v1.2d, #0000000000000000
-; FALKOR: movi v0.2d, #0000000000000000
-; FALKOR: movi v1.2d, #0000000000000000
+; CYCLONE: fmov s{{[0-3]+}}, wzr
+; CYCLONE: fmov s{{[0-3]+}}, wzr
+; CYCLONE-FULLFP16: fmov s{{[0-3]+}}, wzr
+; CYCLONE-FULLFP16: fmov s{{[0-3]+}}, wzr
+; OTHERS: movi v{{[0-3]+}}.2d, #0000000000000000
+; OTHERS: movi v{{[0-3]+}}.2d, #0000000000000000
tail call void @barf(float 0.000000e+00, float 0.000000e+00) nounwind
ret void
}
-declare void @bar(double, double, double, double)
-declare void @bari(i32, i32)
-declare void @barl(i64, i64)
-declare void @barf(float, float)
-
; We used to produce spills+reloads for a Q register with zero cycle zeroing
; enabled.
; ALL-LABEL: foo:
-; ALL-NOT: str {{q[0-9]+}}
-; ALL-NOT: ldr {{q[0-9]+}}
+; ALL-NOT: str q{{[0-9]+}}
+; ALL-NOT: ldr q{{[0-9]+}}
define double @foo(i32 %n) {
entry:
br label %for.body
@@ -90,8 +89,7 @@ for.end:
define <2 x i64> @t6() {
; ALL-LABEL: t6:
; CYCLONE: movi.16b v0, #0
-; KRYO: movi v0.2d, #0000000000000000
-; FALKOR: movi v0.2d, #0000000000000000
+; OTHERS: movi v0.2d, #0000000000000000
ret <2 x i64> zeroinitializer
}
diff --git a/test/CodeGen/AArch64/falkor-hwpf-fix.mir b/test/CodeGen/AArch64/falkor-hwpf-fix.mir
index 38622ae0e49ab..28b19f8776858 100644
--- a/test/CodeGen/AArch64/falkor-hwpf-fix.mir
+++ b/test/CodeGen/AArch64/falkor-hwpf-fix.mir
@@ -353,3 +353,28 @@ body: |
bb.1:
RET_ReallyLR
...
+---
+# Check that non-base registers are considered live when finding a
+# scratch register by making sure we don't use %x2 for the scratch
+# register for the inserted ORRXrs.
+# CHECK-LABEL: name: hwpf_offreg
+# CHECK: %x3 = ORRXrs %xzr, %x1, 0
+# CHECK: %w10 = LDRWroX %x3, %x2, 0, 0
+name: hwpf_offreg
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: %w0, %x1, %x2, %x17, %x18
+
+ %w10 = LDRWroX %x1, %x2, 0, 0 :: ("aarch64-strided-access" load 4)
+
+ %x2 = ORRXrs %xzr, %x10, 0
+ %w26 = LDRWroX %x1, %x2, 0, 0
+
+ %w0 = SUBWri %w0, 1, 0
+ %wzr = SUBSWri %w0, 0, 0, implicit-def %nzcv
+ Bcc 9, %bb.0, implicit %nzcv
+
+ bb.1:
+ RET_ReallyLR
+...
diff --git a/test/CodeGen/AArch64/inlineasm-S-constraint.ll b/test/CodeGen/AArch64/inlineasm-S-constraint.ll
new file mode 100644
index 0000000000000..3fb2a3f32cea3
--- /dev/null
+++ b/test/CodeGen/AArch64/inlineasm-S-constraint.ll
@@ -0,0 +1,20 @@
+;RUN: llc -mtriple=aarch64-none-linux-gnu -mattr=+neon < %s | FileCheck %s
+@var = global i32 0
+define void @test_inline_constraint_S() {
+; CHECK-LABEL: test_inline_constraint_S:
+ call void asm sideeffect "adrp x0, $0", "S"(i32* @var)
+ call void asm sideeffect "add x0, x0, :lo12:$0", "S"(i32* @var)
+; CHECK: adrp x0, var
+; CHECK: add x0, x0, :lo12:var
+ ret void
+}
+define i32 @test_inline_constraint_S_label(i1 %in) {
+; CHECK-LABEL: test_inline_constraint_S_label:
+ call void asm sideeffect "adr x0, $0", "S"(i8* blockaddress(@test_inline_constraint_S_label, %loc))
+; CHECK: adr x0, .Ltmp{{[0-9]+}}
+br i1 %in, label %loc, label %loc2
+loc:
+ ret i32 0
+loc2:
+ ret i32 42
+}
diff --git a/test/CodeGen/AArch64/spill-stack-realignment.mir b/test/CodeGen/AArch64/spill-stack-realignment.mir
new file mode 100644
index 0000000000000..fe85f4b64027b
--- /dev/null
+++ b/test/CodeGen/AArch64/spill-stack-realignment.mir
@@ -0,0 +1,35 @@
+# RUN: llc -mtriple=aarch64-none-linux-gnu -run-pass=prologepilog %s -o - | FileCheck %s
+
+# Ensure references to scavenged stack slots in the CSR area use the
+# FP as a base when the stack pointer must be aligned to something
+# larger than required by the target. This is necessary because the
+# alignment padding area is between the CSR area and the SP, so the SP
+# cannot be used to reference the CSR area.
+name: test
+tracksRegLiveness: true
+frameInfo:
+ maxAlignment: 64
+# CHECK: stack:
+# CHECK: id: 0, name: '', type: default, offset: -64, size: 4, alignment: 64
+# CHECK-NEXT: stack-id: 0
+# CHECK-NEXT: local-offset: -64
+# CHECK: id: 1, name: '', type: default, offset: -20, size: 4, alignment: 4
+# CHECK-NEXT: stack-id: 0
+# CHECK-NEXT: local-offset: -68
+stack:
+ - { id: 0, size: 4, alignment: 64, local-offset: -64 }
+ - { id: 1, size: 4, alignment: 4, local-offset: -68 }
+
+# CHECK: body:
+# CHECK: %sp = ANDXri killed %{{x[0-9]+}}, 7865
+# CHECK: STRSui %s0, %sp, 0
+# CHECK: STURSi %s0, %fp, -4
+body: |
+ bb.0.entry:
+ liveins: %s0
+
+ STRSui %s0, %stack.0, 0
+ STRSui %s0, %stack.1, 0
+ ; Force preserve a CSR to create a hole in the CSR stack region.
+ %x28 = IMPLICIT_DEF
+ RET_ReallyLR
diff --git a/test/CodeGen/AMDGPU/ctpop16.ll b/test/CodeGen/AMDGPU/ctpop16.ll
new file mode 100644
index 0000000000000..8236ac07a680e
--- /dev/null
+++ b/test/CodeGen/AMDGPU/ctpop16.ll
@@ -0,0 +1,334 @@
+; RUN: llc -march=amdgcn -mcpu=tahiti -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=GCN -check-prefix=FUNC -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=GCN -check-prefix=FUNC -check-prefix=VI %s
+; RUN: llc -march=r600 -mcpu=cypress -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=EG -check-prefix=FUNC %s
+
+declare i16 @llvm.ctpop.i16(i16) nounwind readnone
+declare <2 x i16> @llvm.ctpop.v2i16(<2 x i16>) nounwind readnone
+declare <4 x i16> @llvm.ctpop.v4i16(<4 x i16>) nounwind readnone
+declare <8 x i16> @llvm.ctpop.v8i16(<8 x i16>) nounwind readnone
+declare <16 x i16> @llvm.ctpop.v16i16(<16 x i16>) nounwind readnone
+
+declare i32 @llvm.r600.read.tidig.x() nounwind readnone
+
+; FUNC-LABEL: {{^}}s_ctpop_i16:
+; GCN: s_load_dword [[SVAL:s[0-9]+]],
+; GCN: s_bcnt1_i32_b32 [[SRESULT:s[0-9]+]], [[SVAL]]
+; GCN: v_mov_b32_e32 [[VRESULT:v[0-9]+]], [[SRESULT]]
+; GCN: buffer_store_short [[VRESULT]],
+; GCN: s_endpgm
+
+; EG: BCNT_INT
+define amdgpu_kernel void @s_ctpop_i16(i16 addrspace(1)* noalias %out, i16 %val) nounwind {
+ %ctpop = call i16 @llvm.ctpop.i16(i16 %val) nounwind readnone
+ store i16 %ctpop, i16 addrspace(1)* %out, align 4
+ ret void
+}
+
+; XXX - Why 0 in register?
+; FUNC-LABEL: {{^}}v_ctpop_i16:
+; GCN: {{buffer|flat}}_load_ushort [[VAL:v[0-9]+]],
+; GCN: v_bcnt_u32_b32{{(_e64)*}} [[RESULT:v[0-9]+]], [[VAL]], 0
+; GCN: buffer_store_short [[RESULT]],
+; GCN: s_endpgm
+
+; EG: BCNT_INT
+define amdgpu_kernel void @v_ctpop_i16(i16 addrspace(1)* noalias %out, i16 addrspace(1)* noalias %in) nounwind {
+ %tid = call i32 @llvm.r600.read.tidig.x()
+ %in.gep = getelementptr i16, i16 addrspace(1)* %in, i32 %tid
+ %val = load i16, i16 addrspace(1)* %in.gep, align 4
+ %ctpop = call i16 @llvm.ctpop.i16(i16 %val) nounwind readnone
+ store i16 %ctpop, i16 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: {{^}}v_ctpop_add_chain_i16:
+; SI: buffer_load_ushort [[VAL0:v[0-9]+]],
+; SI: buffer_load_ushort [[VAL1:v[0-9]+]],
+; VI: flat_load_ushort [[VAL0:v[0-9]+]],
+; VI: flat_load_ushort [[VAL1:v[0-9]+]],
+; GCN: v_bcnt_u32_b32{{(_e64)*}} [[MIDRESULT:v[0-9]+]], [[VAL1]], 0
+; SI: v_bcnt_u32_b32_e32 [[RESULT:v[0-9]+]], [[VAL0]], [[MIDRESULT]]
+; VI: v_bcnt_u32_b32 [[RESULT:v[0-9]+]], [[VAL0]], [[MIDRESULT]]
+; GCN: buffer_store_short [[RESULT]],
+; GCN: s_endpgm
+
+; EG: BCNT_INT
+; EG: BCNT_INT
+define amdgpu_kernel void @v_ctpop_add_chain_i16(i16 addrspace(1)* noalias %out, i16 addrspace(1)* noalias %in0, i16 addrspace(1)* noalias %in1) nounwind {
+ %tid = call i32 @llvm.r600.read.tidig.x()
+ %in0.gep = getelementptr i16, i16 addrspace(1)* %in0, i32 %tid
+ %in1.gep = getelementptr i16, i16 addrspace(1)* %in1, i32 %tid
+ %val0 = load volatile i16, i16 addrspace(1)* %in0.gep, align 4
+ %val1 = load volatile i16, i16 addrspace(1)* %in1.gep, align 4
+ %ctpop0 = call i16 @llvm.ctpop.i16(i16 %val0) nounwind readnone
+ %ctpop1 = call i16 @llvm.ctpop.i16(i16 %val1) nounwind readnone
+ %add = add i16 %ctpop0, %ctpop1
+ store i16 %add, i16 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: {{^}}v_ctpop_add_sgpr_i16:
+; GCN: {{buffer|flat}}_load_ushort [[VAL0:v[0-9]+]],
+; GCN: s_waitcnt
+; GCN-NEXT: v_bcnt_u32_b32{{(_e64)*}} [[RESULT:v[0-9]+]], [[VAL0]], s{{[0-9]+}}
+; GCN: buffer_store_short [[RESULT]],
+; GCN: s_endpgm
+define amdgpu_kernel void @v_ctpop_add_sgpr_i16(i16 addrspace(1)* noalias %out, i16 addrspace(1)* noalias %in, i16 %sval) nounwind {
+ %tid = call i32 @llvm.r600.read.tidig.x()
+ %in.gep = getelementptr i16, i16 addrspace(1)* %in, i32 %tid
+ %val = load i16, i16 addrspace(1)* %in.gep, align 4
+ %ctpop = call i16 @llvm.ctpop.i16(i16 %val) nounwind readnone
+ %add = add i16 %ctpop, %sval
+ store i16 %add, i16 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: {{^}}v_ctpop_v2i16:
+; GCN: v_bcnt_u32_b32{{(_e64)*}}
+; GCN: v_bcnt_u32_b32{{(_e64)*}}
+; GCN: s_endpgm
+
+; EG: BCNT_INT
+; EG: BCNT_INT
+define amdgpu_kernel void @v_ctpop_v2i16(<2 x i16> addrspace(1)* noalias %out, <2 x i16> addrspace(1)* noalias %in) nounwind {
+ %tid = call i32 @llvm.r600.read.tidig.x()
+ %in.gep = getelementptr <2 x i16>, <2 x i16> addrspace(1)* %in, i32 %tid
+ %val = load <2 x i16>, <2 x i16> addrspace(1)* %in.gep, align 8
+ %ctpop = call <2 x i16> @llvm.ctpop.v2i16(<2 x i16> %val) nounwind readnone
+ store <2 x i16> %ctpop, <2 x i16> addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: {{^}}v_ctpop_v4i16:
+; GCN: v_bcnt_u32_b32{{(_e64)*}}
+; GCN: v_bcnt_u32_b32{{(_e64)*}}
+; GCN: v_bcnt_u32_b32{{(_e64)*}}
+; GCN: v_bcnt_u32_b32{{(_e64)*}}
+; GCN: s_endpgm
+
+; EG: BCNT_INT
+; EG: BCNT_INT
+; EG: BCNT_INT
+; EG: BCNT_INT
+define amdgpu_kernel void @v_ctpop_v4i16(<4 x i16> addrspace(1)* noalias %out, <4 x i16> addrspace(1)* noalias %in) nounwind {
+ %tid = call i32 @llvm.r600.read.tidig.x()
+ %in.gep = getelementptr <4 x i16>, <4 x i16> addrspace(1)* %in, i32 %tid
+ %val = load <4 x i16>, <4 x i16> addrspace(1)* %in.gep, align 16
+ %ctpop = call <4 x i16> @llvm.ctpop.v4i16(<4 x i16> %val) nounwind readnone
+ store <4 x i16> %ctpop, <4 x i16> addrspace(1)* %out, align 16
+ ret void
+}
+
+; FUNC-LABEL: {{^}}v_ctpop_v8i16:
+; GCN: v_bcnt_u32_b32{{(_e64)*}}
+; GCN: v_bcnt_u32_b32{{(_e64)*}}
+; GCN: v_bcnt_u32_b32{{(_e64)*}}
+; GCN: v_bcnt_u32_b32{{(_e64)*}}
+; GCN: v_bcnt_u32_b32{{(_e64)*}}
+; GCN: v_bcnt_u32_b32{{(_e64)*}}
+; GCN: v_bcnt_u32_b32{{(_e64)*}}
+; GCN: v_bcnt_u32_b32{{(_e64)*}}
+; GCN: s_endpgm
+
+; EG: BCNT_INT
+; EG: BCNT_INT
+; EG: BCNT_INT
+; EG: BCNT_INT
+; EG: BCNT_INT
+; EG: BCNT_INT
+; EG: BCNT_INT
+; EG: BCNT_INT
+define amdgpu_kernel void @v_ctpop_v8i16(<8 x i16> addrspace(1)* noalias %out, <8 x i16> addrspace(1)* noalias %in) nounwind {
+ %tid = call i32 @llvm.r600.read.tidig.x()
+ %in.gep = getelementptr <8 x i16>, <8 x i16> addrspace(1)* %in, i32 %tid
+ %val = load <8 x i16>, <8 x i16> addrspace(1)* %in.gep, align 32
+ %ctpop = call <8 x i16> @llvm.ctpop.v8i16(<8 x i16> %val) nounwind readnone
+ store <8 x i16> %ctpop, <8 x i16> addrspace(1)* %out, align 32
+ ret void
+}
+
+; FUNC-LABEL: {{^}}v_ctpop_v16i16:
+; GCN: v_bcnt_u32_b32{{(_e64)*}}
+; GCN: v_bcnt_u32_b32{{(_e64)*}}
+; GCN: v_bcnt_u32_b32{{(_e64)*}}
+; GCN: v_bcnt_u32_b32{{(_e64)*}}
+; GCN: v_bcnt_u32_b32{{(_e64)*}}
+; GCN: v_bcnt_u32_b32{{(_e64)*}}
+; GCN: v_bcnt_u32_b32{{(_e64)*}}
+; GCN: v_bcnt_u32_b32{{(_e64)*}}
+; GCN: v_bcnt_u32_b32{{(_e64)*}}
+; GCN: v_bcnt_u32_b32{{(_e64)*}}
+; GCN: v_bcnt_u32_b32{{(_e64)*}}
+; GCN: v_bcnt_u32_b32{{(_e64)*}}
+; GCN: v_bcnt_u32_b32{{(_e64)*}}
+; GCN: v_bcnt_u32_b32{{(_e64)*}}
+; GCN: v_bcnt_u32_b32{{(_e64)*}}
+; GCN: v_bcnt_u32_b32{{(_e64)*}}
+; GCN: s_endpgm
+
+; EG: BCNT_INT
+; EG: BCNT_INT
+; EG: BCNT_INT
+; EG: BCNT_INT
+; EG: BCNT_INT
+; EG: BCNT_INT
+; EG: BCNT_INT
+; EG: BCNT_INT
+; EG: BCNT_INT
+; EG: BCNT_INT
+; EG: BCNT_INT
+; EG: BCNT_INT
+; EG: BCNT_INT
+; EG: BCNT_INT
+; EG: BCNT_INT
+; EG: BCNT_INT
+define amdgpu_kernel void @v_ctpop_v16i16(<16 x i16> addrspace(1)* noalias %out, <16 x i16> addrspace(1)* noalias %in) nounwind {
+ %tid = call i32 @llvm.r600.read.tidig.x()
+ %in.gep = getelementptr <16 x i16>, <16 x i16> addrspace(1)* %in, i32 %tid
+ %val = load <16 x i16>, <16 x i16> addrspace(1)* %in.gep, align 32
+ %ctpop = call <16 x i16> @llvm.ctpop.v16i16(<16 x i16> %val) nounwind readnone
+ store <16 x i16> %ctpop, <16 x i16> addrspace(1)* %out, align 32
+ ret void
+}
+
+; FUNC-LABEL: {{^}}v_ctpop_i16_add_inline_constant:
+; GCN: {{buffer|flat}}_load_ushort [[VAL:v[0-9]+]],
+; GCN: v_bcnt_u32_b32{{(_e64)*}} [[RESULT:v[0-9]+]], [[VAL]], 4
+; GCN: buffer_store_short [[RESULT]],
+; GCN: s_endpgm
+
+; EG: BCNT_INT
+define amdgpu_kernel void @v_ctpop_i16_add_inline_constant(i16 addrspace(1)* noalias %out, i16 addrspace(1)* noalias %in) nounwind {
+ %tid = call i32 @llvm.r600.read.tidig.x()
+ %in.gep = getelementptr i16, i16 addrspace(1)* %in, i32 %tid
+ %val = load i16, i16 addrspace(1)* %in.gep, align 4
+ %ctpop = call i16 @llvm.ctpop.i16(i16 %val) nounwind readnone
+ %add = add i16 %ctpop, 4
+ store i16 %add, i16 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: {{^}}v_ctpop_i16_add_inline_constant_inv:
+; GCN: {{buffer|flat}}_load_ushort [[VAL:v[0-9]+]],
+; GCN: v_bcnt_u32_b32{{(_e64)*}} [[RESULT:v[0-9]+]], [[VAL]], 4
+; GCN: buffer_store_short [[RESULT]],
+; GCN: s_endpgm
+
+; EG: BCNT_INT
+define amdgpu_kernel void @v_ctpop_i16_add_inline_constant_inv(i16 addrspace(1)* noalias %out, i16 addrspace(1)* noalias %in) nounwind {
+ %tid = call i32 @llvm.r600.read.tidig.x()
+ %in.gep = getelementptr i16, i16 addrspace(1)* %in, i32 %tid
+ %val = load i16, i16 addrspace(1)* %in.gep, align 4
+ %ctpop = call i16 @llvm.ctpop.i16(i16 %val) nounwind readnone
+ %add = add i16 4, %ctpop
+ store i16 %add, i16 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: {{^}}v_ctpop_i16_add_literal:
+; GCN-DAG: {{buffer|flat}}_load_ushort [[VAL:v[0-9]+]],
+; SI-DAG: v_mov_b32_e32 [[LIT:v[0-9]+]], 0x3e7
+; VI-DAG: s_movk_i32 [[LIT:s[0-9]+]], 0x3e7
+; SI: v_bcnt_u32_b32_e32 [[RESULT:v[0-9]+]], [[VAL]], [[LIT]]
+; VI: v_bcnt_u32_b32 [[RESULT:v[0-9]+]], [[VAL]], [[LIT]]
+; GCN: buffer_store_short [[RESULT]],
+; GCN: s_endpgm
+define amdgpu_kernel void @v_ctpop_i16_add_literal(i16 addrspace(1)* noalias %out, i16 addrspace(1)* noalias %in) nounwind {
+ %tid = call i32 @llvm.r600.read.tidig.x()
+ %in.gep = getelementptr i16, i16 addrspace(1)* %in, i32 %tid
+ %val = load i16, i16 addrspace(1)* %in.gep, align 4
+ %ctpop = call i16 @llvm.ctpop.i16(i16 %val) nounwind readnone
+ %add = add i16 %ctpop, 999
+ store i16 %add, i16 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: {{^}}v_ctpop_i16_add_var:
+; GCN-DAG: {{buffer|flat}}_load_ushort [[VAL:v[0-9]+]],
+; GCN-DAG: s_load_dword [[VAR:s[0-9]+]],
+; GCN: v_bcnt_u32_b32{{(_e64)*}} [[RESULT:v[0-9]+]], [[VAL]], [[VAR]]
+; GCN: buffer_store_short [[RESULT]],
+; GCN: s_endpgm
+
+; EG: BCNT_INT
+define amdgpu_kernel void @v_ctpop_i16_add_var(i16 addrspace(1)* noalias %out, i16 addrspace(1)* noalias %in, i16 %const) nounwind {
+ %tid = call i32 @llvm.r600.read.tidig.x()
+ %in.gep = getelementptr i16, i16 addrspace(1)* %in, i32 %tid
+ %val = load i16, i16 addrspace(1)* %in.gep, align 4
+ %ctpop = call i16 @llvm.ctpop.i16(i16 %val) nounwind readnone
+ %add = add i16 %ctpop, %const
+ store i16 %add, i16 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: {{^}}v_ctpop_i16_add_var_inv:
+; GCN-DAG: {{buffer|flat}}_load_ushort [[VAL:v[0-9]+]],
+; GCN-DAG: s_load_dword [[VAR:s[0-9]+]],
+; GCN: v_bcnt_u32_b32{{(_e64)*}} [[RESULT:v[0-9]+]], [[VAL]], [[VAR]]
+; GCN: buffer_store_short [[RESULT]],
+; GCN: s_endpgm
+
+; EG: BCNT_INT
+define amdgpu_kernel void @v_ctpop_i16_add_var_inv(i16 addrspace(1)* noalias %out, i16 addrspace(1)* noalias %in, i16 %const) nounwind {
+ %tid = call i32 @llvm.r600.read.tidig.x()
+ %in.gep = getelementptr i16, i16 addrspace(1)* %in, i32 %tid
+ %val = load i16, i16 addrspace(1)* %in.gep, align 4
+ %ctpop = call i16 @llvm.ctpop.i16(i16 %val) nounwind readnone
+ %add = add i16 %const, %ctpop
+ store i16 %add, i16 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: {{^}}v_ctpop_i16_add_vvar_inv:
+; SI: buffer_load_ushort [[VAR:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], s[{{[0-9]+:[0-9]+}}], 0 addr64
+; SI: buffer_load_ushort [[VAL:v[0-9]+]], v[{{[0-9]+:[0-9]+}}], s[{{[0-9]+:[0-9]+}}], 0 addr64
+; SI: v_bcnt_u32_b32_e32 [[RESULT:v[0-9]+]], [[VAR]], [[VAL]]
+; VI: flat_load_ushort [[VAR:v[0-9]+]], v[{{[0-9]+:[0-9]+}}]
+; VI: flat_load_ushort [[VAL:v[0-9]+]], v[{{[0-9]+:[0-9]+}}]
+; VI: v_bcnt_u32_b32 [[RESULT:v[0-9]+]], [[VAL]], [[VAR]]
+; GCN: buffer_store_short [[RESULT]],
+; GCN: s_endpgm
+
+; EG: BCNT_INT
+define amdgpu_kernel void @v_ctpop_i16_add_vvar_inv(i16 addrspace(1)* noalias %out, i16 addrspace(1)* noalias %in, i16 addrspace(1)* noalias %constptr) nounwind {
+ %tid = call i32 @llvm.r600.read.tidig.x()
+ %in.gep = getelementptr i16, i16 addrspace(1)* %in, i32 %tid
+ %val = load i16, i16 addrspace(1)* %in.gep, align 4
+ %ctpop = call i16 @llvm.ctpop.i16(i16 %val) nounwind readnone
+ %gep = getelementptr i16, i16 addrspace(1)* %constptr, i32 %tid
+ %const = load i16, i16 addrspace(1)* %gep, align 4
+ %add = add i16 %const, %ctpop
+ store i16 %add, i16 addrspace(1)* %out, align 4
+ ret void
+}
+
+; FIXME: We currently disallow SALU instructions in all branches,
+; but there are some cases when the should be allowed.
+
+; FUNC-LABEL: {{^}}ctpop_i16_in_br:
+; SI: s_load_dword [[VAL:s[0-9]+]], s[{{[0-9]+:[0-9]+}}], 0xd
+; VI: s_load_dword [[VAL:s[0-9]+]], s[{{[0-9]+:[0-9]+}}], 0x34
+; GCN: s_bcnt1_i32_b32 [[SRESULT:s[0-9]+]], [[VAL]]
+; GCN: v_mov_b32_e32 [[RESULT:v[0-9]+]], [[SRESULT]]
+; GCN: buffer_store_short [[RESULT]],
+; GCN: s_endpgm
+; EG: BCNT_INT
+define amdgpu_kernel void @ctpop_i16_in_br(i16 addrspace(1)* %out, i16 addrspace(1)* %in, i16 %ctpop_arg, i16 %cond) {
+entry:
+ %tmp0 = icmp eq i16 %cond, 0
+ br i1 %tmp0, label %if, label %else
+
+if:
+ %tmp2 = call i16 @llvm.ctpop.i16(i16 %ctpop_arg)
+ br label %endif
+
+else:
+ %tmp3 = getelementptr i16, i16 addrspace(1)* %in, i16 1
+ %tmp4 = load i16, i16 addrspace(1)* %tmp3
+ br label %endif
+
+endif:
+ %tmp5 = phi i16 [%tmp2, %if], [%tmp4, %else]
+ store i16 %tmp5, i16 addrspace(1)* %out
+ ret void
+}
diff --git a/test/CodeGen/ARM/peephole-phi.mir b/test/CodeGen/ARM/peephole-phi.mir
index 30343654dea19..54ae0115840bf 100644
--- a/test/CodeGen/ARM/peephole-phi.mir
+++ b/test/CodeGen/ARM/peephole-phi.mir
@@ -65,3 +65,39 @@ body: |
%4:gpr = PHI %0, %bb.1, %2, %bb.2
%5:spr = VMOVSR %4, 14, %noreg
...
+
+# The current implementation doesn't perform any transformations if undef
+# operands are involved.
+# CHECK-LABEL: name: func-undefops
+# CHECK: body: |
+# CHECK: bb.0:
+# CHECK: Bcc %bb.2, 1, undef %cpsr
+#
+# CHECK: bb.1:
+# CHECK: %0:gpr = VMOVRS undef %1:spr, 14, %noreg
+# CHECK: B %bb.3
+#
+# CHECK: bb.2:
+# CHECK: %2:gpr = VMOVRS undef %3:spr, 14, %noreg
+#
+# CHECK: bb.3:
+# CHECK: %4:gpr = PHI %0, %bb.1, %2, %bb.2
+# CHECK: %5:spr = VMOVSR %4, 14, %noreg
+---
+name: func-undefops
+tracksRegLiveness: true
+body: |
+ bb.0:
+ Bcc %bb.2, 1, undef %cpsr
+
+ bb.1:
+ %0:gpr = VMOVRS undef %1:spr, 14, %noreg
+ B %bb.3
+
+ bb.2:
+ %2:gpr = VMOVRS undef %3:spr, 14, %noreg
+
+ bb.3:
+ %4:gpr = PHI %0, %bb.1, %2, %bb.2
+ %5:spr = VMOVSR %4, 14, %noreg
+...
diff --git a/test/CodeGen/Hexagon/ifcvt-diamond-ret.mir b/test/CodeGen/Hexagon/ifcvt-diamond-ret.mir
new file mode 100644
index 0000000000000..e896d9aaa9a41
--- /dev/null
+++ b/test/CodeGen/Hexagon/ifcvt-diamond-ret.mir
@@ -0,0 +1,25 @@
+# RUN: llc -march=hexagon -run-pass if-converter %s -o - | FileCheck %s
+
+# Make sure this gets if-converted and it doesn't crash.
+# CHECK-LABEL: bb.0
+# CHECK: PS_jmpret %r31
+# CHECK-NOT: bb.{{[1-9]+}}:
+
+---
+name: fred
+tracksRegLiveness: true
+body: |
+ bb.0:
+ successors: %bb.1, %bb.2
+ liveins: %r0
+ renamable %p0 = C2_cmpeqi killed renamable %r0, 0
+ J2_jumpf killed renamable %p0, %bb.2, implicit-def dead %pc
+
+ bb.1:
+ S4_storeiri_io undef renamable %r0, 0, 32768 :: (store 4 into `i32* undef`)
+ PS_jmpret %r31, implicit-def dead %pc
+
+ bb.2:
+ S4_storeiri_io undef renamable %r0, 0, 32768 :: (store 4 into `i32* undef`)
+ PS_jmpret %r31, implicit-def dead %pc
+...
diff --git a/test/CodeGen/MIR/PowerPC/ifcvt-diamond-ret.mir b/test/CodeGen/MIR/PowerPC/ifcvt-diamond-ret.mir
new file mode 100644
index 0000000000000..c63c055c3b31b
--- /dev/null
+++ b/test/CodeGen/MIR/PowerPC/ifcvt-diamond-ret.mir
@@ -0,0 +1,34 @@
+# RUN: llc -mtriple=powerpc64le-unknown-linux-gnu -run-pass=if-converter %s -o - | FileCheck %s
+---
+name: foo
+body: |
+ bb.0:
+ liveins: %x0, %x3
+ successors: %bb.1(0x40000000), %bb.2(0x40000000)
+
+ dead renamable %x3 = ANDIo8 killed renamable %x3, 1, implicit-def dead %cr0, implicit-def %cr0gt
+ %cr2lt = CROR %cr0gt, %cr0gt
+ BCn killed renamable %cr2lt, %bb.2
+ B %bb.1
+
+ bb.1:
+ renamable %x3 = LIS8 4096
+ MTLR8 %x0, implicit-def %lr8
+ BLR8 implicit %lr8, implicit %rm, implicit %x3
+
+ bb.2:
+ renamable %x3 = LIS8 4096
+ MTLR8 %x0, implicit-def %lr8
+ BLR8 implicit %lr8, implicit %rm, implicit %x3
+...
+
+# Diamond testcase with equivalent branches terminating in returns.
+
+# CHECK: body: |
+# CHECK: bb.0:
+# CHECK: dead renamable %x3 = ANDIo8 killed renamable %x3, 1, implicit-def dead %cr0, implicit-def %cr0gt
+# CHECK: %cr2lt = CROR %cr0gt, %cr0gt
+# CHECK: renamable %x3 = LIS8 4096
+# CHECK: MTLR8 %x0, implicit-def %lr8
+# CHECK: BLR8 implicit %lr8, implicit %rm, implicit %x3
+
diff --git a/test/CodeGen/Mips/const-mult.ll b/test/CodeGen/Mips/const-mult.ll
index 459aad61828c4..dc4f2f9c862bd 100644
--- a/test/CodeGen/Mips/const-mult.ll
+++ b/test/CodeGen/Mips/const-mult.ll
@@ -1,93 +1,626 @@
-; RUN: llc -march=mipsel < %s | FileCheck %s
-; RUN: llc -march=mips64el < %s | FileCheck %s -check-prefixes=CHECK,CHECK64
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=mipsel-mti-linux-gnu < %s | FileCheck %s -check-prefix=MIPS32
+; RUN: llc -mtriple=mips64el-mti-linux-gnu < %s | FileCheck %s -check-prefix=MIPS64
-; CHECK-LABEL: mul5_32:
-; CHECK: sll $[[R0:[0-9]+]], $4, 2
-; CHECK: addu ${{[0-9]+}}, $[[R0]], $4
define i32 @mul5_32(i32 signext %a) {
+; MIPS32-LABEL: mul5_32:
+; MIPS32: # %bb.0: # %entry
+; MIPS32-NEXT: sll $1, $4, 2
+; MIPS32-NEXT: jr $ra
+; MIPS32-NEXT: addu $2, $1, $4
+;
+; MIPS64-LABEL: mul5_32:
+; MIPS64: # %bb.0: # %entry
+; MIPS64-NEXT: sll $1, $4, 2
+; MIPS64-NEXT: jr $ra
+; MIPS64-NEXT: addu $2, $1, $4
entry:
%mul = mul nsw i32 %a, 5
ret i32 %mul
}
-; CHECK-LABEL: mul27_32:
-; CHECK-DAG: sll $[[R0:[0-9]+]], $4, 2
-; CHECK-DAG: addu $[[R1:[0-9]+]], $[[R0]], $4
-; CHECK-DAG: sll $[[R2:[0-9]+]], $4, 5
-; CHECK: subu ${{[0-9]+}}, $[[R2]], $[[R1]]
-
define i32 @mul27_32(i32 signext %a) {
+; MIPS32-LABEL: mul27_32:
+; MIPS32: # %bb.0: # %entry
+; MIPS32-NEXT: sll $1, $4, 2
+; MIPS32-NEXT: addu $1, $1, $4
+; MIPS32-NEXT: sll $2, $4, 5
+; MIPS32-NEXT: jr $ra
+; MIPS32-NEXT: subu $2, $2, $1
+;
+; MIPS64-LABEL: mul27_32:
+; MIPS64: # %bb.0: # %entry
+; MIPS64-NEXT: sll $1, $4, 2
+; MIPS64-NEXT: addu $1, $1, $4
+; MIPS64-NEXT: sll $2, $4, 5
+; MIPS64-NEXT: jr $ra
+; MIPS64-NEXT: subu $2, $2, $1
entry:
%mul = mul nsw i32 %a, 27
ret i32 %mul
}
-; CHECK-LABEL: muln2147483643_32:
-; CHECK-DAG: sll $[[R0:[0-9]+]], $4, 2
-; CHECK-DAG: addu $[[R1:[0-9]+]], $[[R0]], $4
-; CHECK-DAG: sll $[[R2:[0-9]+]], $4, 31
-; CHECK: addu ${{[0-9]+}}, $[[R2]], $[[R1]]
-
define i32 @muln2147483643_32(i32 signext %a) {
+; MIPS32-LABEL: muln2147483643_32:
+; MIPS32: # %bb.0: # %entry
+; MIPS32-NEXT: sll $1, $4, 2
+; MIPS32-NEXT: addu $1, $1, $4
+; MIPS32-NEXT: sll $2, $4, 31
+; MIPS32-NEXT: jr $ra
+; MIPS32-NEXT: addu $2, $2, $1
+;
+; MIPS64-LABEL: muln2147483643_32:
+; MIPS64: # %bb.0: # %entry
+; MIPS64-NEXT: sll $1, $4, 2
+; MIPS64-NEXT: addu $1, $1, $4
+; MIPS64-NEXT: sll $2, $4, 31
+; MIPS64-NEXT: jr $ra
+; MIPS64-NEXT: addu $2, $2, $1
entry:
%mul = mul nsw i32 %a, -2147483643
ret i32 %mul
}
-; CHECK64-LABEL: muln9223372036854775805_64:
-; CHECK64-DAG: dsll $[[R0:[0-9]+]], $4, 1
-; CHECK64-DAG: daddu $[[R1:[0-9]+]], $[[R0]], $4
-; CHECK64-DAG: dsll $[[R2:[0-9]+]], $4, 63
-; CHECK64: daddu ${{[0-9]+}}, $[[R2]], $[[R1]]
-
define i64 @muln9223372036854775805_64(i64 signext %a) {
+; MIPS32-LABEL: muln9223372036854775805_64:
+; MIPS32: # %bb.0: # %entry
+; MIPS32-NEXT: sll $1, $4, 1
+; MIPS32-NEXT: addu $2, $1, $4
+; MIPS32-NEXT: sltu $1, $2, $1
+; MIPS32-NEXT: srl $3, $4, 31
+; MIPS32-NEXT: sll $6, $5, 1
+; MIPS32-NEXT: or $3, $6, $3
+; MIPS32-NEXT: addu $3, $3, $5
+; MIPS32-NEXT: addu $1, $3, $1
+; MIPS32-NEXT: sll $3, $4, 31
+; MIPS32-NEXT: jr $ra
+; MIPS32-NEXT: addu $3, $3, $1
+;
+; MIPS64-LABEL: muln9223372036854775805_64:
+; MIPS64: # %bb.0: # %entry
+; MIPS64-NEXT: dsll $1, $4, 1
+; MIPS64-NEXT: daddu $1, $1, $4
+; MIPS64-NEXT: dsll $2, $4, 63
+; MIPS64-NEXT: jr $ra
+; MIPS64-NEXT: daddu $2, $2, $1
entry:
%mul = mul nsw i64 %a, -9223372036854775805
ret i64 %mul
}
-; CHECK64-LABEL: muln170141183460469231731687303715884105725_128:
-; CHECK64-DAG: dsrl $[[R0:[0-9]+]], $4, 63
-; CHECK64-DAG: dsll $[[R1:[0-9]+]], $5, 1
-; CHECK64-DAG: or $[[R2:[0-9]+]], $[[R1]], $[[R0]]
-; CHECK64-DAG: daddu $[[R3:[0-9]+]], $[[R2]], $5
-; CHECK64-DAG: dsll $[[R4:[0-9]+]], $4, 1
-; CHECK64-DAG: daddu $[[R5:[0-9]+]], $[[R4]], $4
-; CHECK64-DAG: sltu $[[R6:[0-9]+]], $[[R5]], $[[R4]]
-; CHECK64-DAG: dsll $[[R7:[0-9]+]], $[[R6]], 32
-; CHECK64-DAG: dsrl $[[R8:[0-9]+]], $[[R7]], 32
-; CHECK64-DAG: daddu $[[R9:[0-9]+]], $[[R3]], $[[R8]]
-; CHECK64-DAG: dsll $[[R10:[0-9]+]], $4, 63
-; CHECK64: daddu ${{[0-9]+}}, $[[R10]], $[[R9]]
-
define i128 @muln170141183460469231731687303715884105725_128(i128 signext %a) {
+; MIPS32-LABEL: muln170141183460469231731687303715884105725_128:
+; MIPS32: # %bb.0: # %entry
+; MIPS32-NEXT: sll $1, $4, 1
+; MIPS32-NEXT: addu $2, $1, $4
+; MIPS32-NEXT: sltu $1, $2, $1
+; MIPS32-NEXT: srl $3, $4, 31
+; MIPS32-NEXT: sll $8, $5, 1
+; MIPS32-NEXT: or $8, $8, $3
+; MIPS32-NEXT: addu $3, $8, $5
+; MIPS32-NEXT: addu $3, $3, $1
+; MIPS32-NEXT: sltu $9, $3, $8
+; MIPS32-NEXT: xor $8, $3, $8
+; MIPS32-NEXT: movz $9, $1, $8
+; MIPS32-NEXT: srl $1, $5, 31
+; MIPS32-NEXT: sll $5, $6, 1
+; MIPS32-NEXT: or $5, $5, $1
+; MIPS32-NEXT: addu $8, $5, $6
+; MIPS32-NEXT: addu $1, $8, $9
+; MIPS32-NEXT: sltu $5, $8, $5
+; MIPS32-NEXT: srl $6, $6, 31
+; MIPS32-NEXT: sll $9, $7, 1
+; MIPS32-NEXT: or $6, $9, $6
+; MIPS32-NEXT: addu $6, $6, $7
+; MIPS32-NEXT: addu $5, $6, $5
+; MIPS32-NEXT: sll $4, $4, 31
+; MIPS32-NEXT: sltu $6, $1, $8
+; MIPS32-NEXT: addu $5, $5, $6
+; MIPS32-NEXT: addu $5, $4, $5
+; MIPS32-NEXT: jr $ra
+; MIPS32-NEXT: move $4, $1
+;
+; MIPS64-LABEL: muln170141183460469231731687303715884105725_128:
+; MIPS64: # %bb.0: # %entry
+; MIPS64-NEXT: dsrl $1, $4, 63
+; MIPS64-NEXT: dsll $2, $5, 1
+; MIPS64-NEXT: or $1, $2, $1
+; MIPS64-NEXT: daddu $1, $1, $5
+; MIPS64-NEXT: dsll $3, $4, 1
+; MIPS64-NEXT: daddu $2, $3, $4
+; MIPS64-NEXT: sltu $3, $2, $3
+; MIPS64-NEXT: dsll $3, $3, 32
+; MIPS64-NEXT: dsrl $3, $3, 32
+; MIPS64-NEXT: daddu $1, $1, $3
+; MIPS64-NEXT: dsll $3, $4, 63
+; MIPS64-NEXT: jr $ra
+; MIPS64-NEXT: daddu $3, $3, $1
entry:
%mul = mul nsw i128 %a, -170141183460469231731687303715884105725
ret i128 %mul
}
-; CHECK64-LABEL: mul170141183460469231731687303715884105723_128:
-; CHECK64-DAG: dsrl $[[R0:[0-9]+]], $4, 62
-; CHECK64-DAG: dsll $[[R1:[0-9]+]], $5, 2
-; CHECK64-DAG: or $[[R2:[0-9]+]], $[[R1]], $[[R0]]
-; CHECK64-DAG: daddu $[[R3:[0-9]+]], $[[R2]], $5
-; CHECK64-DAG: dsll $[[R4:[0-9]+]], $4, 2
-; CHECK64-DAG: daddu $[[R5:[0-9]+]], $[[R4]], $4
-; CHECK64-DAG: sltu $[[R6:[0-9]+]], $[[R5]], $[[R4]]
-; CHECK64-DAG: dsll $[[R7:[0-9]+]], $[[R6]], 32
-; CHECK64-DAG: dsrl $[[R8:[0-9]+]], $[[R7]], 32
-; CHECK64-DAG: daddu $[[R9:[0-9]+]], $[[R3]], $[[R8]]
-; CHECK64-DAG: dsll $[[R10:[0-9]+]], $4, 63
-; CHECK64-DAG: dsubu $[[R11:[0-9]+]], $[[R10]], $[[R9]]
-; CHECK64-DAG: sltu $[[R12:[0-9]+]], $zero, $[[R5]]
-; CHECK64-DAG: dsll $[[R13:[0-9]+]], $[[R12]], 32
-; CHECK64-DAG: dsrl $[[R14:[0-9]+]], $[[R13]], 32
-; CHECK64-DAG: dsubu $[[R15:[0-9]+]], $[[R11]], $[[R14]]
-; CHECK64: dnegu ${{[0-9]+}}, $[[R5]]
-
define i128 @mul170141183460469231731687303715884105723_128(i128 signext %a) {
+; MIPS32-LABEL: mul170141183460469231731687303715884105723_128:
+; MIPS32: # %bb.0: # %entry
+; MIPS32-NEXT: sll $1, $4, 2
+; MIPS32-NEXT: addu $2, $1, $4
+; MIPS32-NEXT: sltu $1, $2, $1
+; MIPS32-NEXT: srl $3, $4, 30
+; MIPS32-NEXT: sll $8, $5, 2
+; MIPS32-NEXT: or $3, $8, $3
+; MIPS32-NEXT: addu $8, $3, $5
+; MIPS32-NEXT: addu $8, $8, $1
+; MIPS32-NEXT: sltu $9, $8, $3
+; MIPS32-NEXT: xor $3, $8, $3
+; MIPS32-NEXT: sltu $10, $zero, $8
+; MIPS32-NEXT: sltu $11, $zero, $2
+; MIPS32-NEXT: movz $10, $11, $8
+; MIPS32-NEXT: movz $9, $1, $3
+; MIPS32-NEXT: srl $1, $5, 30
+; MIPS32-NEXT: sll $3, $6, 2
+; MIPS32-NEXT: or $1, $3, $1
+; MIPS32-NEXT: addu $3, $1, $6
+; MIPS32-NEXT: addu $5, $3, $9
+; MIPS32-NEXT: sll $4, $4, 31
+; MIPS32-NEXT: negu $9, $5
+; MIPS32-NEXT: sltu $12, $9, $10
+; MIPS32-NEXT: sltu $13, $5, $3
+; MIPS32-NEXT: sltu $1, $3, $1
+; MIPS32-NEXT: srl $3, $6, 30
+; MIPS32-NEXT: sll $6, $7, 2
+; MIPS32-NEXT: or $3, $6, $3
+; MIPS32-NEXT: addu $3, $3, $7
+; MIPS32-NEXT: addu $1, $3, $1
+; MIPS32-NEXT: addu $1, $1, $13
+; MIPS32-NEXT: subu $1, $4, $1
+; MIPS32-NEXT: sltu $3, $zero, $5
+; MIPS32-NEXT: subu $1, $1, $3
+; MIPS32-NEXT: subu $5, $1, $12
+; MIPS32-NEXT: subu $4, $9, $10
+; MIPS32-NEXT: negu $1, $8
+; MIPS32-NEXT: subu $3, $1, $11
+; MIPS32-NEXT: jr $ra
+; MIPS32-NEXT: negu $2, $2
+;
+; MIPS64-LABEL: mul170141183460469231731687303715884105723_128:
+; MIPS64: # %bb.0: # %entry
+; MIPS64-NEXT: dsrl $1, $4, 62
+; MIPS64-NEXT: dsll $2, $5, 2
+; MIPS64-NEXT: or $1, $2, $1
+; MIPS64-NEXT: daddu $1, $1, $5
+; MIPS64-NEXT: dsll $2, $4, 2
+; MIPS64-NEXT: daddu $5, $2, $4
+; MIPS64-NEXT: sltu $2, $5, $2
+; MIPS64-NEXT: dsll $2, $2, 32
+; MIPS64-NEXT: dsrl $2, $2, 32
+; MIPS64-NEXT: daddu $1, $1, $2
+; MIPS64-NEXT: dsll $2, $4, 63
+; MIPS64-NEXT: dsubu $1, $2, $1
+; MIPS64-NEXT: sltu $2, $zero, $5
+; MIPS64-NEXT: dsll $2, $2, 32
+; MIPS64-NEXT: dsrl $2, $2, 32
+; MIPS64-NEXT: dsubu $3, $1, $2
+; MIPS64-NEXT: jr $ra
+; MIPS64-NEXT: dnegu $2, $5
entry:
%mul = mul nsw i128 %a, 170141183460469231731687303715884105723
ret i128 %mul
}
+
+define i32 @mul42949673_32(i32 %a) {
+; MIPS32-LABEL: mul42949673_32:
+; MIPS32: # %bb.0:
+; MIPS32-NEXT: sll $1, $4, 3
+; MIPS32-NEXT: addu $1, $1, $4
+; MIPS32-NEXT: sll $2, $4, 5
+; MIPS32-NEXT: addu $1, $2, $1
+; MIPS32-NEXT: sll $2, $4, 10
+; MIPS32-NEXT: subu $1, $2, $1
+; MIPS32-NEXT: sll $2, $4, 13
+; MIPS32-NEXT: addu $1, $2, $1
+; MIPS32-NEXT: sll $2, $4, 15
+; MIPS32-NEXT: addu $1, $2, $1
+; MIPS32-NEXT: sll $2, $4, 20
+; MIPS32-NEXT: subu $1, $2, $1
+; MIPS32-NEXT: sll $2, $4, 25
+; MIPS32-NEXT: sll $3, $4, 23
+; MIPS32-NEXT: addu $1, $3, $1
+; MIPS32-NEXT: jr $ra
+; MIPS32-NEXT: addu $2, $2, $1
+;
+; MIPS64-LABEL: mul42949673_32:
+; MIPS64: # %bb.0:
+; MIPS64-NEXT: sll $1, $4, 0
+; MIPS64-NEXT: sll $2, $1, 3
+; MIPS64-NEXT: addu $2, $2, $1
+; MIPS64-NEXT: sll $3, $1, 5
+; MIPS64-NEXT: addu $2, $3, $2
+; MIPS64-NEXT: sll $3, $1, 10
+; MIPS64-NEXT: subu $2, $3, $2
+; MIPS64-NEXT: sll $3, $1, 13
+; MIPS64-NEXT: addu $2, $3, $2
+; MIPS64-NEXT: sll $3, $1, 15
+; MIPS64-NEXT: addu $2, $3, $2
+; MIPS64-NEXT: sll $3, $1, 20
+; MIPS64-NEXT: subu $2, $3, $2
+; MIPS64-NEXT: sll $3, $1, 25
+; MIPS64-NEXT: sll $1, $1, 23
+; MIPS64-NEXT: addu $1, $1, $2
+; MIPS64-NEXT: jr $ra
+; MIPS64-NEXT: addu $2, $3, $1
+ %b = mul i32 %a, 42949673
+ ret i32 %b
+}
+
+define i64 @mul42949673_64(i64 %a) {
+; MIPS32-LABEL: mul42949673_64:
+; MIPS32: # %bb.0: # %entry
+; MIPS32-NEXT: lui $1, 655
+; MIPS32-NEXT: ori $1, $1, 23593
+; MIPS32-NEXT: multu $4, $1
+; MIPS32-NEXT: mflo $2
+; MIPS32-NEXT: mfhi $1
+; MIPS32-NEXT: sll $3, $5, 3
+; MIPS32-NEXT: addu $3, $3, $5
+; MIPS32-NEXT: sll $4, $5, 5
+; MIPS32-NEXT: addu $3, $4, $3
+; MIPS32-NEXT: sll $4, $5, 10
+; MIPS32-NEXT: subu $3, $4, $3
+; MIPS32-NEXT: sll $4, $5, 13
+; MIPS32-NEXT: addu $3, $4, $3
+; MIPS32-NEXT: sll $4, $5, 15
+; MIPS32-NEXT: addu $3, $4, $3
+; MIPS32-NEXT: sll $4, $5, 20
+; MIPS32-NEXT: subu $3, $4, $3
+; MIPS32-NEXT: sll $4, $5, 25
+; MIPS32-NEXT: sll $5, $5, 23
+; MIPS32-NEXT: addu $3, $5, $3
+; MIPS32-NEXT: addu $3, $4, $3
+; MIPS32-NEXT: jr $ra
+; MIPS32-NEXT: addu $3, $1, $3
+;
+; MIPS64-LABEL: mul42949673_64:
+; MIPS64: # %bb.0: # %entry
+; MIPS64-NEXT: dsll $1, $4, 3
+; MIPS64-NEXT: daddu $1, $1, $4
+; MIPS64-NEXT: dsll $2, $4, 5
+; MIPS64-NEXT: daddu $1, $2, $1
+; MIPS64-NEXT: dsll $2, $4, 10
+; MIPS64-NEXT: dsubu $1, $2, $1
+; MIPS64-NEXT: dsll $2, $4, 13
+; MIPS64-NEXT: daddu $1, $2, $1
+; MIPS64-NEXT: dsll $2, $4, 15
+; MIPS64-NEXT: daddu $1, $2, $1
+; MIPS64-NEXT: dsll $2, $4, 20
+; MIPS64-NEXT: dsubu $1, $2, $1
+; MIPS64-NEXT: dsll $2, $4, 25
+; MIPS64-NEXT: dsll $3, $4, 23
+; MIPS64-NEXT: daddu $1, $3, $1
+; MIPS64-NEXT: jr $ra
+; MIPS64-NEXT: daddu $2, $2, $1
+entry:
+ %b = mul i64 %a, 42949673
+ ret i64 %b
+}
+
+define i32 @mul22224078_32(i32 %a) {
+; MIPS32-LABEL: mul22224078_32:
+; MIPS32: # %bb.0: # %entry
+; MIPS32-NEXT: sll $1, $4, 1
+; MIPS32-NEXT: sll $2, $4, 4
+; MIPS32-NEXT: subu $1, $2, $1
+; MIPS32-NEXT: sll $2, $4, 6
+; MIPS32-NEXT: subu $1, $2, $1
+; MIPS32-NEXT: sll $2, $4, 8
+; MIPS32-NEXT: subu $1, $2, $1
+; MIPS32-NEXT: sll $2, $4, 10
+; MIPS32-NEXT: subu $1, $2, $1
+; MIPS32-NEXT: sll $2, $4, 13
+; MIPS32-NEXT: subu $1, $2, $1
+; MIPS32-NEXT: sll $2, $4, 16
+; MIPS32-NEXT: subu $1, $2, $1
+; MIPS32-NEXT: sll $2, $4, 24
+; MIPS32-NEXT: sll $3, $4, 22
+; MIPS32-NEXT: sll $5, $4, 20
+; MIPS32-NEXT: sll $4, $4, 18
+; MIPS32-NEXT: subu $1, $4, $1
+; MIPS32-NEXT: addu $1, $5, $1
+; MIPS32-NEXT: addu $1, $3, $1
+; MIPS32-NEXT: jr $ra
+; MIPS32-NEXT: addu $2, $2, $1
+;
+; MIPS64-LABEL: mul22224078_32:
+; MIPS64: # %bb.0: # %entry
+; MIPS64-NEXT: sll $1, $4, 0
+; MIPS64-NEXT: sll $2, $1, 1
+; MIPS64-NEXT: sll $3, $1, 4
+; MIPS64-NEXT: subu $2, $3, $2
+; MIPS64-NEXT: sll $3, $1, 6
+; MIPS64-NEXT: subu $2, $3, $2
+; MIPS64-NEXT: sll $3, $1, 8
+; MIPS64-NEXT: subu $2, $3, $2
+; MIPS64-NEXT: sll $3, $1, 10
+; MIPS64-NEXT: subu $2, $3, $2
+; MIPS64-NEXT: sll $3, $1, 13
+; MIPS64-NEXT: subu $2, $3, $2
+; MIPS64-NEXT: sll $3, $1, 16
+; MIPS64-NEXT: subu $2, $3, $2
+; MIPS64-NEXT: sll $3, $1, 24
+; MIPS64-NEXT: sll $4, $1, 22
+; MIPS64-NEXT: sll $5, $1, 20
+; MIPS64-NEXT: sll $1, $1, 18
+; MIPS64-NEXT: subu $1, $1, $2
+; MIPS64-NEXT: addu $1, $5, $1
+; MIPS64-NEXT: addu $1, $4, $1
+; MIPS64-NEXT: jr $ra
+; MIPS64-NEXT: addu $2, $3, $1
+entry:
+ %b = mul i32 %a, 22224078
+ ret i32 %b
+}
+
+define i64 @mul22224078_64(i64 %a) {
+; MIPS32-LABEL: mul22224078_64:
+; MIPS32: # %bb.0: # %entry
+; MIPS32-NEXT: lui $1, 339
+; MIPS32-NEXT: ori $1, $1, 7374
+; MIPS32-NEXT: multu $4, $1
+; MIPS32-NEXT: mflo $2
+; MIPS32-NEXT: mfhi $1
+; MIPS32-NEXT: sll $3, $5, 1
+; MIPS32-NEXT: sll $4, $5, 4
+; MIPS32-NEXT: subu $3, $4, $3
+; MIPS32-NEXT: sll $4, $5, 6
+; MIPS32-NEXT: subu $3, $4, $3
+; MIPS32-NEXT: sll $4, $5, 8
+; MIPS32-NEXT: subu $3, $4, $3
+; MIPS32-NEXT: sll $4, $5, 10
+; MIPS32-NEXT: subu $3, $4, $3
+; MIPS32-NEXT: sll $4, $5, 13
+; MIPS32-NEXT: subu $3, $4, $3
+; MIPS32-NEXT: sll $4, $5, 16
+; MIPS32-NEXT: subu $3, $4, $3
+; MIPS32-NEXT: sll $4, $5, 24
+; MIPS32-NEXT: sll $6, $5, 22
+; MIPS32-NEXT: sll $7, $5, 20
+; MIPS32-NEXT: sll $5, $5, 18
+; MIPS32-NEXT: subu $3, $5, $3
+; MIPS32-NEXT: addu $3, $7, $3
+; MIPS32-NEXT: addu $3, $6, $3
+; MIPS32-NEXT: addu $3, $4, $3
+; MIPS32-NEXT: jr $ra
+; MIPS32-NEXT: addu $3, $1, $3
+;
+; MIPS64-LABEL: mul22224078_64:
+; MIPS64: # %bb.0: # %entry
+; MIPS64-NEXT: dsll $1, $4, 1
+; MIPS64-NEXT: dsll $2, $4, 4
+; MIPS64-NEXT: dsubu $1, $2, $1
+; MIPS64-NEXT: dsll $2, $4, 6
+; MIPS64-NEXT: dsubu $1, $2, $1
+; MIPS64-NEXT: dsll $2, $4, 8
+; MIPS64-NEXT: dsubu $1, $2, $1
+; MIPS64-NEXT: dsll $2, $4, 10
+; MIPS64-NEXT: dsubu $1, $2, $1
+; MIPS64-NEXT: dsll $2, $4, 13
+; MIPS64-NEXT: dsubu $1, $2, $1
+; MIPS64-NEXT: dsll $2, $4, 16
+; MIPS64-NEXT: dsubu $1, $2, $1
+; MIPS64-NEXT: dsll $2, $4, 24
+; MIPS64-NEXT: dsll $3, $4, 22
+; MIPS64-NEXT: dsll $5, $4, 20
+; MIPS64-NEXT: dsll $4, $4, 18
+; MIPS64-NEXT: dsubu $1, $4, $1
+; MIPS64-NEXT: daddu $1, $5, $1
+; MIPS64-NEXT: daddu $1, $3, $1
+; MIPS64-NEXT: jr $ra
+; MIPS64-NEXT: daddu $2, $2, $1
+entry:
+ %b = mul i64 %a, 22224078
+ ret i64 %b
+}
+
+define i32 @mul22245375_32(i32 %a) {
+; MIPS32-LABEL: mul22245375_32:
+; MIPS32: # %bb.0: # %entry
+; MIPS32-NEXT: sll $1, $4, 12
+; MIPS32-NEXT: addu $1, $1, $4
+; MIPS32-NEXT: sll $2, $4, 15
+; MIPS32-NEXT: addu $1, $2, $1
+; MIPS32-NEXT: sll $2, $4, 18
+; MIPS32-NEXT: subu $1, $2, $1
+; MIPS32-NEXT: sll $2, $4, 20
+; MIPS32-NEXT: addu $1, $2, $1
+; MIPS32-NEXT: sll $2, $4, 22
+; MIPS32-NEXT: addu $1, $2, $1
+; MIPS32-NEXT: sll $2, $4, 24
+; MIPS32-NEXT: jr $ra
+; MIPS32-NEXT: addu $2, $2, $1
+;
+; MIPS64-LABEL: mul22245375_32:
+; MIPS64: # %bb.0: # %entry
+; MIPS64-NEXT: sll $1, $4, 0
+; MIPS64-NEXT: sll $2, $1, 12
+; MIPS64-NEXT: addu $2, $2, $1
+; MIPS64-NEXT: sll $3, $1, 15
+; MIPS64-NEXT: addu $2, $3, $2
+; MIPS64-NEXT: sll $3, $1, 18
+; MIPS64-NEXT: subu $2, $3, $2
+; MIPS64-NEXT: sll $3, $1, 20
+; MIPS64-NEXT: addu $2, $3, $2
+; MIPS64-NEXT: sll $3, $1, 22
+; MIPS64-NEXT: addu $2, $3, $2
+; MIPS64-NEXT: sll $1, $1, 24
+; MIPS64-NEXT: jr $ra
+; MIPS64-NEXT: addu $2, $1, $2
+entry:
+ %b = mul i32 %a, 22245375
+ ret i32 %b
+}
+
+define i64 @mul22245375_64(i64 %a) {
+; MIPS32-LABEL: mul22245375_64:
+; MIPS32: # %bb.0: # %entry
+; MIPS32-NEXT: lui $1, 339
+; MIPS32-NEXT: ori $1, $1, 28671
+; MIPS32-NEXT: multu $4, $1
+; MIPS32-NEXT: mflo $2
+; MIPS32-NEXT: mfhi $1
+; MIPS32-NEXT: sll $3, $5, 12
+; MIPS32-NEXT: addu $3, $3, $5
+; MIPS32-NEXT: sll $4, $5, 15
+; MIPS32-NEXT: addu $3, $4, $3
+; MIPS32-NEXT: sll $4, $5, 18
+; MIPS32-NEXT: subu $3, $4, $3
+; MIPS32-NEXT: sll $4, $5, 20
+; MIPS32-NEXT: addu $3, $4, $3
+; MIPS32-NEXT: sll $4, $5, 22
+; MIPS32-NEXT: addu $3, $4, $3
+; MIPS32-NEXT: sll $4, $5, 24
+; MIPS32-NEXT: addu $3, $4, $3
+; MIPS32-NEXT: jr $ra
+; MIPS32-NEXT: addu $3, $1, $3
+;
+; MIPS64-LABEL: mul22245375_64:
+; MIPS64: # %bb.0: # %entry
+; MIPS64-NEXT: dsll $1, $4, 12
+; MIPS64-NEXT: daddu $1, $1, $4
+; MIPS64-NEXT: dsll $2, $4, 15
+; MIPS64-NEXT: daddu $1, $2, $1
+; MIPS64-NEXT: dsll $2, $4, 18
+; MIPS64-NEXT: dsubu $1, $2, $1
+; MIPS64-NEXT: dsll $2, $4, 20
+; MIPS64-NEXT: daddu $1, $2, $1
+; MIPS64-NEXT: dsll $2, $4, 22
+; MIPS64-NEXT: daddu $1, $2, $1
+; MIPS64-NEXT: dsll $2, $4, 24
+; MIPS64-NEXT: jr $ra
+; MIPS64-NEXT: daddu $2, $2, $1
+entry:
+ %b = mul i64 %a, 22245375
+ ret i64 %b
+}
+
+define i32 @mul25165824_32(i32 %a) {
+; MIPS32-LABEL: mul25165824_32:
+; MIPS32: # %bb.0: # %entry
+; MIPS32-NEXT: sll $1, $4, 12
+; MIPS32-NEXT: addu $1, $1, $4
+; MIPS32-NEXT: sll $2, $4, 15
+; MIPS32-NEXT: addu $1, $2, $1
+; MIPS32-NEXT: sll $2, $4, 18
+; MIPS32-NEXT: subu $1, $2, $1
+; MIPS32-NEXT: sll $2, $4, 20
+; MIPS32-NEXT: addu $1, $2, $1
+; MIPS32-NEXT: sll $2, $4, 22
+; MIPS32-NEXT: addu $1, $2, $1
+; MIPS32-NEXT: sll $2, $4, 24
+; MIPS32-NEXT: jr $ra
+; MIPS32-NEXT: addu $2, $2, $1
+;
+; MIPS64-LABEL: mul25165824_32:
+; MIPS64: # %bb.0: # %entry
+; MIPS64-NEXT: sll $1, $4, 0
+; MIPS64-NEXT: sll $2, $1, 12
+; MIPS64-NEXT: addu $2, $2, $1
+; MIPS64-NEXT: sll $3, $1, 15
+; MIPS64-NEXT: addu $2, $3, $2
+; MIPS64-NEXT: sll $3, $1, 18
+; MIPS64-NEXT: subu $2, $3, $2
+; MIPS64-NEXT: sll $3, $1, 20
+; MIPS64-NEXT: addu $2, $3, $2
+; MIPS64-NEXT: sll $3, $1, 22
+; MIPS64-NEXT: addu $2, $3, $2
+; MIPS64-NEXT: sll $1, $1, 24
+; MIPS64-NEXT: jr $ra
+; MIPS64-NEXT: addu $2, $1, $2
+entry:
+ %b = mul i32 %a, 22245375
+ ret i32 %b
+}
+
+define i64 @mul25165824_64(i64 %a) {
+; MIPS32-LABEL: mul25165824_64:
+; MIPS32: # %bb.0: # %entry
+; MIPS32-NEXT: srl $1, $4, 9
+; MIPS32-NEXT: sll $2, $5, 23
+; MIPS32-NEXT: or $1, $2, $1
+; MIPS32-NEXT: srl $2, $4, 8
+; MIPS32-NEXT: sll $3, $5, 24
+; MIPS32-NEXT: or $2, $3, $2
+; MIPS32-NEXT: addu $1, $2, $1
+; MIPS32-NEXT: sll $2, $4, 23
+; MIPS32-NEXT: sll $3, $4, 24
+; MIPS32-NEXT: addu $2, $3, $2
+; MIPS32-NEXT: sltu $3, $2, $3
+; MIPS32-NEXT: jr $ra
+; MIPS32-NEXT: addu $3, $1, $3
+;
+; MIPS64-LABEL: mul25165824_64:
+; MIPS64: # %bb.0: # %entry
+; MIPS64-NEXT: dsll $1, $4, 23
+; MIPS64-NEXT: dsll $2, $4, 24
+; MIPS64-NEXT: jr $ra
+; MIPS64-NEXT: daddu $2, $2, $1
+entry:
+ %b = mul i64 %a, 25165824
+ ret i64 %b
+}
+
+define i32 @mul33554432_32(i32 %a) {
+; MIPS32-LABEL: mul33554432_32:
+; MIPS32: # %bb.0: # %entry
+; MIPS32-NEXT: sll $1, $4, 12
+; MIPS32-NEXT: addu $1, $1, $4
+; MIPS32-NEXT: sll $2, $4, 15
+; MIPS32-NEXT: addu $1, $2, $1
+; MIPS32-NEXT: sll $2, $4, 18
+; MIPS32-NEXT: subu $1, $2, $1
+; MIPS32-NEXT: sll $2, $4, 20
+; MIPS32-NEXT: addu $1, $2, $1
+; MIPS32-NEXT: sll $2, $4, 22
+; MIPS32-NEXT: addu $1, $2, $1
+; MIPS32-NEXT: sll $2, $4, 24
+; MIPS32-NEXT: jr $ra
+; MIPS32-NEXT: addu $2, $2, $1
+;
+; MIPS64-LABEL: mul33554432_32:
+; MIPS64: # %bb.0: # %entry
+; MIPS64-NEXT: sll $1, $4, 0
+; MIPS64-NEXT: sll $2, $1, 12
+; MIPS64-NEXT: addu $2, $2, $1
+; MIPS64-NEXT: sll $3, $1, 15
+; MIPS64-NEXT: addu $2, $3, $2
+; MIPS64-NEXT: sll $3, $1, 18
+; MIPS64-NEXT: subu $2, $3, $2
+; MIPS64-NEXT: sll $3, $1, 20
+; MIPS64-NEXT: addu $2, $3, $2
+; MIPS64-NEXT: sll $3, $1, 22
+; MIPS64-NEXT: addu $2, $3, $2
+; MIPS64-NEXT: sll $1, $1, 24
+; MIPS64-NEXT: jr $ra
+; MIPS64-NEXT: addu $2, $1, $2
+entry:
+ %b = mul i32 %a, 22245375
+ ret i32 %b
+}
+
+define i64 @mul33554432_64(i64 %a) {
+; MIPS32-LABEL: mul33554432_64:
+; MIPS32: # %bb.0: # %entry
+; MIPS32-NEXT: srl $1, $4, 7
+; MIPS32-NEXT: sll $2, $5, 25
+; MIPS32-NEXT: or $3, $2, $1
+; MIPS32-NEXT: jr $ra
+; MIPS32-NEXT: sll $2, $4, 25
+;
+; MIPS64-LABEL: mul33554432_64:
+; MIPS64: # %bb.0: # %entry
+; MIPS64-NEXT: jr $ra
+; MIPS64-NEXT: dsll $2, $4, 25
+entry:
+ %b = mul i64 %a, 33554432
+ ret i64 %b
+}
diff --git a/test/CodeGen/Mips/indirect-jump-hazard/calls.ll b/test/CodeGen/Mips/indirect-jump-hazard/calls.ll
new file mode 100644
index 0000000000000..20e89136d87c7
--- /dev/null
+++ b/test/CodeGen/Mips/indirect-jump-hazard/calls.ll
@@ -0,0 +1,188 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=mips-mti-linux-gnu -relocation-model=static \
+; RUN: -mips-tail-calls=1 -mcpu=mips32r2 -mattr=+use-indirect-jump-hazard \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefix=MIPS32R2
+; RUN: llc < %s -mtriple=mips-img-linux-gnu -relocation-model=static \
+; RUN: -mips-tail-calls=1 -mcpu=mips32r6 -mattr=+use-indirect-jump-hazard \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefix=MIPS32R6
+; RUN: llc < %s -mtriple=mips64-mti-linux-gnu -relocation-model=static \
+; RUN: -mips-tail-calls=1 -mcpu=mips64r2 -mattr=+use-indirect-jump-hazard \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefix=MIPS64R2
+; RUN: llc < %s -mtriple=mips64-img-linux-gnu -relocation-model=static \
+; RUN: -mips-tail-calls=1 -mcpu=mips64r6 -mattr=+use-indirect-jump-hazard \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefix=MIPS64R6
+
+; RUN: llc < %s -mtriple=mips-mti-linux-gnu -relocation-model=pic \
+; RUN: -mips-tail-calls=1 -mcpu=mips32r2 -mattr=+use-indirect-jump-hazard \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefix=PIC-MIPS32R2
+; RUN: llc < %s -mtriple=mips-img-linux-gnu -relocation-model=pic \
+; RUN: -mips-tail-calls=1 -mcpu=mips32r6 -mattr=+use-indirect-jump-hazard \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefix=PIC-MIPS32R6
+; RUN: llc < %s -mtriple=mips64-mti-linux-gnu -relocation-model=pic \
+; RUN: -mips-tail-calls=1 -mcpu=mips64r2 -mattr=+use-indirect-jump-hazard \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefix=PIC-MIPS64R2
+; RUN: llc < %s -mtriple=mips64-img-linux-gnu -relocation-model=pic \
+; RUN: -mips-tail-calls=1 -mcpu=mips64r6 -mattr=+use-indirect-jump-hazard \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefix=PIC-MIPS64R6
+
+define void @fooNonTail(void (i32)* nocapture %f1) nounwind {
+; MIPS32R2-LABEL: fooNonTail:
+; MIPS32R2: # %bb.0: # %entry
+; MIPS32R2-NEXT: addiu $sp, $sp, -24
+; MIPS32R2-NEXT: sw $ra, 20($sp) # 4-byte Folded Spill
+; MIPS32R2-NEXT: move $1, $4
+; MIPS32R2-NEXT: move $25, $1
+; MIPS32R2-NEXT: jalr.hb $25
+; MIPS32R2-NEXT: addiu $4, $zero, 13
+; MIPS32R2-NEXT: lw $ra, 20($sp) # 4-byte Folded Reload
+; MIPS32R2-NEXT: jr $ra
+; MIPS32R2-NEXT: addiu $sp, $sp, 24
+;
+; MIPS32R6-LABEL: fooNonTail:
+; MIPS32R6: # %bb.0: # %entry
+; MIPS32R6-NEXT: addiu $sp, $sp, -24
+; MIPS32R6-NEXT: sw $ra, 20($sp) # 4-byte Folded Spill
+; MIPS32R6-NEXT: move $1, $4
+; MIPS32R6-NEXT: move $25, $1
+; MIPS32R6-NEXT: jalr.hb $25
+; MIPS32R6-NEXT: addiu $4, $zero, 13
+; MIPS32R6-NEXT: lw $ra, 20($sp) # 4-byte Folded Reload
+; MIPS32R6-NEXT: jr $ra
+; MIPS32R6-NEXT: addiu $sp, $sp, 24
+;
+; MIPS64R2-LABEL: fooNonTail:
+; MIPS64R2: # %bb.0: # %entry
+; MIPS64R2-NEXT: daddiu $sp, $sp, -16
+; MIPS64R2-NEXT: sd $ra, 8($sp) # 8-byte Folded Spill
+; MIPS64R2-NEXT: move $1, $4
+; MIPS64R2-NEXT: move $25, $1
+; MIPS64R2-NEXT: jalr.hb $25
+; MIPS64R2-NEXT: daddiu $4, $zero, 13
+; MIPS64R2-NEXT: ld $ra, 8($sp) # 8-byte Folded Reload
+; MIPS64R2-NEXT: jr $ra
+; MIPS64R2-NEXT: daddiu $sp, $sp, 16
+;
+; MIPS64R6-LABEL: fooNonTail:
+; MIPS64R6: # %bb.0: # %entry
+; MIPS64R6-NEXT: daddiu $sp, $sp, -16
+; MIPS64R6-NEXT: sd $ra, 8($sp) # 8-byte Folded Spill
+; MIPS64R6-NEXT: move $1, $4
+; MIPS64R6-NEXT: move $25, $1
+; MIPS64R6-NEXT: jalr.hb $25
+; MIPS64R6-NEXT: daddiu $4, $zero, 13
+; MIPS64R6-NEXT: ld $ra, 8($sp) # 8-byte Folded Reload
+; MIPS64R6-NEXT: jr $ra
+; MIPS64R6-NEXT: daddiu $sp, $sp, 16
+;
+; PIC-MIPS32R2-LABEL: fooNonTail:
+; PIC-MIPS32R2: # %bb.0: # %entry
+; PIC-MIPS32R2-NEXT: addiu $sp, $sp, -24
+; PIC-MIPS32R2-NEXT: sw $ra, 20($sp) # 4-byte Folded Spill
+; PIC-MIPS32R2-NEXT: move $1, $4
+; PIC-MIPS32R2-NEXT: move $25, $1
+; PIC-MIPS32R2-NEXT: jalr.hb $25
+; PIC-MIPS32R2-NEXT: addiu $4, $zero, 13
+; PIC-MIPS32R2-NEXT: lw $ra, 20($sp) # 4-byte Folded Reload
+; PIC-MIPS32R2-NEXT: jr $ra
+; PIC-MIPS32R2-NEXT: addiu $sp, $sp, 24
+;
+; PIC-MIPS32R6-LABEL: fooNonTail:
+; PIC-MIPS32R6: # %bb.0: # %entry
+; PIC-MIPS32R6-NEXT: addiu $sp, $sp, -24
+; PIC-MIPS32R6-NEXT: sw $ra, 20($sp) # 4-byte Folded Spill
+; PIC-MIPS32R6-NEXT: move $1, $4
+; PIC-MIPS32R6-NEXT: move $25, $1
+; PIC-MIPS32R6-NEXT: jalr.hb $25
+; PIC-MIPS32R6-NEXT: addiu $4, $zero, 13
+; PIC-MIPS32R6-NEXT: lw $ra, 20($sp) # 4-byte Folded Reload
+; PIC-MIPS32R6-NEXT: jr $ra
+; PIC-MIPS32R6-NEXT: addiu $sp, $sp, 24
+;
+; PIC-MIPS64R2-LABEL: fooNonTail:
+; PIC-MIPS64R2: # %bb.0: # %entry
+; PIC-MIPS64R2-NEXT: daddiu $sp, $sp, -16
+; PIC-MIPS64R2-NEXT: sd $ra, 8($sp) # 8-byte Folded Spill
+; PIC-MIPS64R2-NEXT: move $1, $4
+; PIC-MIPS64R2-NEXT: move $25, $1
+; PIC-MIPS64R2-NEXT: jalr.hb $25
+; PIC-MIPS64R2-NEXT: daddiu $4, $zero, 13
+; PIC-MIPS64R2-NEXT: ld $ra, 8($sp) # 8-byte Folded Reload
+; PIC-MIPS64R2-NEXT: jr $ra
+; PIC-MIPS64R2-NEXT: daddiu $sp, $sp, 16
+;
+; PIC-MIPS64R6-LABEL: fooNonTail:
+; PIC-MIPS64R6: # %bb.0: # %entry
+; PIC-MIPS64R6-NEXT: daddiu $sp, $sp, -16
+; PIC-MIPS64R6-NEXT: sd $ra, 8($sp) # 8-byte Folded Spill
+; PIC-MIPS64R6-NEXT: move $1, $4
+; PIC-MIPS64R6-NEXT: move $25, $1
+; PIC-MIPS64R6-NEXT: jalr.hb $25
+; PIC-MIPS64R6-NEXT: daddiu $4, $zero, 13
+; PIC-MIPS64R6-NEXT: ld $ra, 8($sp) # 8-byte Folded Reload
+; PIC-MIPS64R6-NEXT: jr $ra
+; PIC-MIPS64R6-NEXT: daddiu $sp, $sp, 16
+entry:
+ call void %f1(i32 13) nounwind
+ ret void
+}
+
+define i32 @fooTail(i32 (i32)* nocapture %f1) nounwind {
+; MIPS32R2-LABEL: fooTail:
+; MIPS32R2: # %bb.0: # %entry
+; MIPS32R2-NEXT: move $1, $4
+; MIPS32R2-NEXT: move $25, $1
+; MIPS32R2-NEXT: jr.hb $25
+; MIPS32R2-NEXT: addiu $4, $zero, 14
+;
+; MIPS32R6-LABEL: fooTail:
+; MIPS32R6: # %bb.0: # %entry
+; MIPS32R6-NEXT: move $1, $4
+; MIPS32R6-NEXT: move $25, $1
+; MIPS32R6-NEXT: jr.hb $25
+; MIPS32R6-NEXT: addiu $4, $zero, 14
+;
+; MIPS64R2-LABEL: fooTail:
+; MIPS64R2: # %bb.0: # %entry
+; MIPS64R2-NEXT: move $1, $4
+; MIPS64R2-NEXT: move $25, $1
+; MIPS64R2-NEXT: jr.hb $25
+; MIPS64R2-NEXT: daddiu $4, $zero, 14
+;
+; MIPS64R6-LABEL: fooTail:
+; MIPS64R6: # %bb.0: # %entry
+; MIPS64R6-NEXT: move $1, $4
+; MIPS64R6-NEXT: move $25, $1
+; MIPS64R6-NEXT: jr.hb $25
+; MIPS64R6-NEXT: daddiu $4, $zero, 14
+;
+; PIC-MIPS32R2-LABEL: fooTail:
+; PIC-MIPS32R2: # %bb.0: # %entry
+; PIC-MIPS32R2-NEXT: move $1, $4
+; PIC-MIPS32R2-NEXT: move $25, $1
+; PIC-MIPS32R2-NEXT: jr.hb $25
+; PIC-MIPS32R2-NEXT: addiu $4, $zero, 14
+;
+; PIC-MIPS32R6-LABEL: fooTail:
+; PIC-MIPS32R6: # %bb.0: # %entry
+; PIC-MIPS32R6-NEXT: move $1, $4
+; PIC-MIPS32R6-NEXT: move $25, $1
+; PIC-MIPS32R6-NEXT: jr.hb $25
+; PIC-MIPS32R6-NEXT: addiu $4, $zero, 14
+;
+; PIC-MIPS64R2-LABEL: fooTail:
+; PIC-MIPS64R2: # %bb.0: # %entry
+; PIC-MIPS64R2-NEXT: move $1, $4
+; PIC-MIPS64R2-NEXT: move $25, $1
+; PIC-MIPS64R2-NEXT: jr.hb $25
+; PIC-MIPS64R2-NEXT: daddiu $4, $zero, 14
+;
+; PIC-MIPS64R6-LABEL: fooTail:
+; PIC-MIPS64R6: # %bb.0: # %entry
+; PIC-MIPS64R6-NEXT: move $1, $4
+; PIC-MIPS64R6-NEXT: move $25, $1
+; PIC-MIPS64R6-NEXT: jr.hb $25
+; PIC-MIPS64R6-NEXT: daddiu $4, $zero, 14
+entry:
+ %0 = tail call i32 %f1(i32 14) nounwind
+ ret i32 %0
+}
diff --git a/test/CodeGen/Mips/indirect-jump-hazard/guards-verify-call.mir b/test/CodeGen/Mips/indirect-jump-hazard/guards-verify-call.mir
new file mode 100644
index 0000000000000..1c11d700b53ef
--- /dev/null
+++ b/test/CodeGen/Mips/indirect-jump-hazard/guards-verify-call.mir
@@ -0,0 +1,58 @@
+# RUN: not llc -mtriple=mips-mti-linux-gnu -mcpu=mips32r2 %s \
+# RUN: -start-after=expand-isel-pseudos -stop-after=expand-isel-pseudos \
+# RUN: -verify-machineinstrs -mattr=+use-indirect-jump-hazard -o - 2>&1 \
+# RUN: | FileCheck %s
+
+# Test that calls are checked when using indirect jumps guards (hazard variant).
+
+# CHECK: Bad machine code: invalid instruction when using jump guards!
+--- |
+ define i32 @fooTail(i32 (i32)* nocapture %f1) {
+ entry:
+ %0 = tail call i32 %f1(i32 14)
+ ret i32 %0
+ }
+...
+---
+name: fooTail
+alignment: 2
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: gpr32, preferred-register: '' }
+ - { id: 1, class: gpr32, preferred-register: '' }
+liveins:
+ - { reg: '%a0', virtual-reg: '%0' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 1
+ adjustsStack: false
+ hasCalls: false
+ stackProtector: ''
+ maxCallFrameSize: 4294967295
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+ savePoint: ''
+ restorePoint: ''
+fixedStack:
+stack:
+constants:
+body: |
+ bb.0.entry:
+ liveins: %a0
+
+ %0:gpr32 = COPY %a0
+ %1:gpr32 = ADDiu $zero, 14
+ %a0 = COPY %1
+ TAILCALLREG %0, csr_o32, implicit-def dead %at, implicit %a0
+
+...
diff --git a/test/CodeGen/Mips/indirect-jump-hazard/guards-verify-tailcall.mir b/test/CodeGen/Mips/indirect-jump-hazard/guards-verify-tailcall.mir
new file mode 100644
index 0000000000000..00e22b934bbc9
--- /dev/null
+++ b/test/CodeGen/Mips/indirect-jump-hazard/guards-verify-tailcall.mir
@@ -0,0 +1,59 @@
+# RUN: not llc -mtriple=mips-mti-linux-gnu -mcpu=mips32r2 %s \
+# RUN: -start-after=expand-isel-pseudos -stop-after=expand-isel-pseudos \
+# RUN: -verify-machineinstrs -mattr=+use-indirect-jump-hazard -o - 2>&1 \
+# RUN: | FileCheck %s
+
+# That that tail calls are checked when using indirect jump guards (hazard variant).
+
+# CHECK: Bad machine code: invalid instruction when using jump guards!
+--- |
+ define i32 @fooTail(i32 (i32)* nocapture %f1) {
+ entry:
+ %0 = tail call i32 %f1(i32 14)
+ ret i32 %0
+ }
+
+...
+---
+name: fooTail
+alignment: 2
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: gpr32, preferred-register: '' }
+ - { id: 1, class: gpr32, preferred-register: '' }
+liveins:
+ - { reg: '%a0', virtual-reg: '%0' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 1
+ adjustsStack: false
+ hasCalls: false
+ stackProtector: ''
+ maxCallFrameSize: 4294967295
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+ savePoint: ''
+ restorePoint: ''
+fixedStack:
+stack:
+constants:
+body: |
+ bb.0.entry:
+ liveins: %a0
+
+ %0:gpr32 = COPY %a0
+ %1:gpr32 = ADDiu $zero, 14
+ %a0 = COPY %1
+ TAILCALLREG %0, csr_o32, implicit-def dead %at, implicit %a0
+
+...
diff --git a/test/CodeGen/Mips/indirect-jump-hazard/jumptables.ll b/test/CodeGen/Mips/indirect-jump-hazard/jumptables.ll
new file mode 100644
index 0000000000000..c530dd614ef8d
--- /dev/null
+++ b/test/CodeGen/Mips/indirect-jump-hazard/jumptables.ll
@@ -0,0 +1,649 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=mips-mti-linux-gnu -relocation-model=static \
+; RUN: -mips-tail-calls=1 -mcpu=mips32r2 -mattr=+use-indirect-jump-hazard \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefix=MIPS32R2
+; RUN: llc < %s -mtriple=mips-img-linux-gnu -relocation-model=static \
+; RUN: -mips-tail-calls=1 -mcpu=mips32r6 -mattr=+use-indirect-jump-hazard \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefix=MIPS32R6
+; RUN: llc < %s -mtriple=mips64-mti-linux-gnu -relocation-model=static \
+; RUN: -mips-tail-calls=1 -mcpu=mips64r2 -mattr=+use-indirect-jump-hazard \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefix=MIPS64R2
+; RUN: llc < %s -mtriple=mips64-img-linux-gnu -relocation-model=static \
+; RUN: -mips-tail-calls=1 -mcpu=mips64r6 -mattr=+use-indirect-jump-hazard \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefix=MIPS64R6
+
+; RUN: llc < %s -mtriple=mips-mti-linux-gnu -relocation-model=pic \
+; RUN: -mips-tail-calls=1 -mcpu=mips32r2 -mattr=+use-indirect-jump-hazard \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefix=PIC-MIPS32R2
+; RUN: llc < %s -mtriple=mips-img-linux-gnu -relocation-model=pic \
+; RUN: -mips-tail-calls=1 -mcpu=mips32r6 -mattr=+use-indirect-jump-hazard \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefix=PIC-MIPS32R6
+; RUN: llc < %s -mtriple=mips64-mti-linux-gnu -relocation-model=pic \
+; RUN: -mips-tail-calls=1 -mcpu=mips64r2 -mattr=+use-indirect-jump-hazard \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefix=PIC-MIPS64R2
+; RUN: llc < %s -mtriple=mips64-img-linux-gnu -relocation-model=pic \
+; RUN: -mips-tail-calls=1 -mcpu=mips64r6 -mattr=+use-indirect-jump-hazard \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefix=PIC-MIPS64R6
+
+@.str = private unnamed_addr constant [2 x i8] c"A\00", align 1
+@.str.1 = private unnamed_addr constant [2 x i8] c"B\00", align 1
+@.str.2 = private unnamed_addr constant [2 x i8] c"C\00", align 1
+@.str.3 = private unnamed_addr constant [2 x i8] c"D\00", align 1
+@.str.4 = private unnamed_addr constant [2 x i8] c"E\00", align 1
+@.str.5 = private unnamed_addr constant [2 x i8] c"F\00", align 1
+@.str.6 = private unnamed_addr constant [2 x i8] c"G\00", align 1
+@.str.7 = private unnamed_addr constant [1 x i8] zeroinitializer, align 1
+
+define i8* @_Z3fooi(i32 signext %Letter) {
+; MIPS32R2-LABEL: _Z3fooi:
+; MIPS32R2: # %bb.0: # %entry
+; MIPS32R2-NEXT: addiu $sp, $sp, -16
+; MIPS32R2-NEXT: .cfi_def_cfa_offset 16
+; MIPS32R2-NEXT: sltiu $1, $4, 7
+; MIPS32R2-NEXT: beqz $1, $BB0_3
+; MIPS32R2-NEXT: sw $4, 4($sp)
+; MIPS32R2-NEXT: $BB0_1: # %entry
+; MIPS32R2-NEXT: sll $1, $4, 2
+; MIPS32R2-NEXT: lui $2, %hi($JTI0_0)
+; MIPS32R2-NEXT: addu $1, $1, $2
+; MIPS32R2-NEXT: lw $1, %lo($JTI0_0)($1)
+; MIPS32R2-NEXT: jr.hb $1
+; MIPS32R2-NEXT: nop
+; MIPS32R2-NEXT: $BB0_2: # %sw.bb
+; MIPS32R2-NEXT: lui $1, %hi($.str)
+; MIPS32R2-NEXT: addiu $1, $1, %lo($.str)
+; MIPS32R2-NEXT: j $BB0_10
+; MIPS32R2-NEXT: sw $1, 8($sp)
+; MIPS32R2-NEXT: $BB0_3: # %sw.epilog
+; MIPS32R2-NEXT: lui $1, %hi($.str.7)
+; MIPS32R2-NEXT: addiu $1, $1, %lo($.str.7)
+; MIPS32R2-NEXT: j $BB0_10
+; MIPS32R2-NEXT: sw $1, 8($sp)
+; MIPS32R2-NEXT: $BB0_4: # %sw.bb1
+; MIPS32R2-NEXT: lui $1, %hi($.str.1)
+; MIPS32R2-NEXT: addiu $1, $1, %lo($.str.1)
+; MIPS32R2-NEXT: j $BB0_10
+; MIPS32R2-NEXT: sw $1, 8($sp)
+; MIPS32R2-NEXT: $BB0_5: # %sw.bb2
+; MIPS32R2-NEXT: lui $1, %hi($.str.2)
+; MIPS32R2-NEXT: addiu $1, $1, %lo($.str.2)
+; MIPS32R2-NEXT: j $BB0_10
+; MIPS32R2-NEXT: sw $1, 8($sp)
+; MIPS32R2-NEXT: $BB0_6: # %sw.bb3
+; MIPS32R2-NEXT: lui $1, %hi($.str.3)
+; MIPS32R2-NEXT: addiu $1, $1, %lo($.str.3)
+; MIPS32R2-NEXT: j $BB0_10
+; MIPS32R2-NEXT: sw $1, 8($sp)
+; MIPS32R2-NEXT: $BB0_7: # %sw.bb4
+; MIPS32R2-NEXT: lui $1, %hi($.str.4)
+; MIPS32R2-NEXT: addiu $1, $1, %lo($.str.4)
+; MIPS32R2-NEXT: j $BB0_10
+; MIPS32R2-NEXT: sw $1, 8($sp)
+; MIPS32R2-NEXT: $BB0_8: # %sw.bb5
+; MIPS32R2-NEXT: lui $1, %hi($.str.5)
+; MIPS32R2-NEXT: addiu $1, $1, %lo($.str.5)
+; MIPS32R2-NEXT: j $BB0_10
+; MIPS32R2-NEXT: sw $1, 8($sp)
+; MIPS32R2-NEXT: $BB0_9: # %sw.bb6
+; MIPS32R2-NEXT: lui $1, %hi($.str.6)
+; MIPS32R2-NEXT: addiu $1, $1, %lo($.str.6)
+; MIPS32R2-NEXT: sw $1, 8($sp)
+; MIPS32R2-NEXT: $BB0_10: # %return
+; MIPS32R2-NEXT: lw $2, 8($sp)
+; MIPS32R2-NEXT: jr $ra
+; MIPS32R2-NEXT: addiu $sp, $sp, 16
+;
+; MIPS32R6-LABEL: _Z3fooi:
+; MIPS32R6: # %bb.0: # %entry
+; MIPS32R6-NEXT: addiu $sp, $sp, -16
+; MIPS32R6-NEXT: .cfi_def_cfa_offset 16
+; MIPS32R6-NEXT: sltiu $1, $4, 7
+; MIPS32R6-NEXT: beqz $1, $BB0_3
+; MIPS32R6-NEXT: sw $4, 4($sp)
+; MIPS32R6-NEXT: $BB0_1: # %entry
+; MIPS32R6-NEXT: sll $1, $4, 2
+; MIPS32R6-NEXT: lui $2, %hi($JTI0_0)
+; MIPS32R6-NEXT: addu $1, $1, $2
+; MIPS32R6-NEXT: lw $1, %lo($JTI0_0)($1)
+; MIPS32R6-NEXT: jr.hb $1
+; MIPS32R6-NEXT: nop
+; MIPS32R6-NEXT: $BB0_2: # %sw.bb
+; MIPS32R6-NEXT: lui $1, %hi($.str)
+; MIPS32R6-NEXT: addiu $1, $1, %lo($.str)
+; MIPS32R6-NEXT: j $BB0_10
+; MIPS32R6-NEXT: sw $1, 8($sp)
+; MIPS32R6-NEXT: $BB0_3: # %sw.epilog
+; MIPS32R6-NEXT: lui $1, %hi($.str.7)
+; MIPS32R6-NEXT: addiu $1, $1, %lo($.str.7)
+; MIPS32R6-NEXT: j $BB0_10
+; MIPS32R6-NEXT: sw $1, 8($sp)
+; MIPS32R6-NEXT: $BB0_4: # %sw.bb1
+; MIPS32R6-NEXT: lui $1, %hi($.str.1)
+; MIPS32R6-NEXT: addiu $1, $1, %lo($.str.1)
+; MIPS32R6-NEXT: j $BB0_10
+; MIPS32R6-NEXT: sw $1, 8($sp)
+; MIPS32R6-NEXT: $BB0_5: # %sw.bb2
+; MIPS32R6-NEXT: lui $1, %hi($.str.2)
+; MIPS32R6-NEXT: addiu $1, $1, %lo($.str.2)
+; MIPS32R6-NEXT: j $BB0_10
+; MIPS32R6-NEXT: sw $1, 8($sp)
+; MIPS32R6-NEXT: $BB0_6: # %sw.bb3
+; MIPS32R6-NEXT: lui $1, %hi($.str.3)
+; MIPS32R6-NEXT: addiu $1, $1, %lo($.str.3)
+; MIPS32R6-NEXT: j $BB0_10
+; MIPS32R6-NEXT: sw $1, 8($sp)
+; MIPS32R6-NEXT: $BB0_7: # %sw.bb4
+; MIPS32R6-NEXT: lui $1, %hi($.str.4)
+; MIPS32R6-NEXT: addiu $1, $1, %lo($.str.4)
+; MIPS32R6-NEXT: j $BB0_10
+; MIPS32R6-NEXT: sw $1, 8($sp)
+; MIPS32R6-NEXT: $BB0_8: # %sw.bb5
+; MIPS32R6-NEXT: lui $1, %hi($.str.5)
+; MIPS32R6-NEXT: addiu $1, $1, %lo($.str.5)
+; MIPS32R6-NEXT: j $BB0_10
+; MIPS32R6-NEXT: sw $1, 8($sp)
+; MIPS32R6-NEXT: $BB0_9: # %sw.bb6
+; MIPS32R6-NEXT: lui $1, %hi($.str.6)
+; MIPS32R6-NEXT: addiu $1, $1, %lo($.str.6)
+; MIPS32R6-NEXT: sw $1, 8($sp)
+; MIPS32R6-NEXT: $BB0_10: # %return
+; MIPS32R6-NEXT: lw $2, 8($sp)
+; MIPS32R6-NEXT: jr $ra
+; MIPS32R6-NEXT: addiu $sp, $sp, 16
+;
+; MIPS64R2-LABEL: _Z3fooi:
+; MIPS64R2: # %bb.0: # %entry
+; MIPS64R2-NEXT: daddiu $sp, $sp, -16
+; MIPS64R2-NEXT: .cfi_def_cfa_offset 16
+; MIPS64R2-NEXT: sw $4, 4($sp)
+; MIPS64R2-NEXT: lwu $2, 4($sp)
+; MIPS64R2-NEXT: sltiu $1, $2, 7
+; MIPS64R2-NEXT: beqz $1, .LBB0_3
+; MIPS64R2-NEXT: nop
+; MIPS64R2-NEXT: .LBB0_1: # %entry
+; MIPS64R2-NEXT: daddiu $1, $zero, 8
+; MIPS64R2-NEXT: dmult $2, $1
+; MIPS64R2-NEXT: mflo $1
+; MIPS64R2-NEXT: lui $2, %highest(.LJTI0_0)
+; MIPS64R2-NEXT: daddiu $2, $2, %higher(.LJTI0_0)
+; MIPS64R2-NEXT: dsll $2, $2, 16
+; MIPS64R2-NEXT: daddiu $2, $2, %hi(.LJTI0_0)
+; MIPS64R2-NEXT: dsll $2, $2, 16
+; MIPS64R2-NEXT: daddu $1, $1, $2
+; MIPS64R2-NEXT: ld $1, %lo(.LJTI0_0)($1)
+; MIPS64R2-NEXT: jr.hb $1
+; MIPS64R2-NEXT: nop
+; MIPS64R2-NEXT: .LBB0_2: # %sw.bb
+; MIPS64R2-NEXT: lui $1, %highest(.L.str)
+; MIPS64R2-NEXT: daddiu $1, $1, %higher(.L.str)
+; MIPS64R2-NEXT: dsll $1, $1, 16
+; MIPS64R2-NEXT: daddiu $1, $1, %hi(.L.str)
+; MIPS64R2-NEXT: dsll $1, $1, 16
+; MIPS64R2-NEXT: daddiu $1, $1, %lo(.L.str)
+; MIPS64R2-NEXT: j .LBB0_10
+; MIPS64R2-NEXT: sd $1, 8($sp)
+; MIPS64R2-NEXT: .LBB0_3: # %sw.epilog
+; MIPS64R2-NEXT: lui $1, %highest(.L.str.7)
+; MIPS64R2-NEXT: daddiu $1, $1, %higher(.L.str.7)
+; MIPS64R2-NEXT: dsll $1, $1, 16
+; MIPS64R2-NEXT: daddiu $1, $1, %hi(.L.str.7)
+; MIPS64R2-NEXT: dsll $1, $1, 16
+; MIPS64R2-NEXT: daddiu $1, $1, %lo(.L.str.7)
+; MIPS64R2-NEXT: j .LBB0_10
+; MIPS64R2-NEXT: sd $1, 8($sp)
+; MIPS64R2-NEXT: .LBB0_4: # %sw.bb1
+; MIPS64R2-NEXT: lui $1, %highest(.L.str.1)
+; MIPS64R2-NEXT: daddiu $1, $1, %higher(.L.str.1)
+; MIPS64R2-NEXT: dsll $1, $1, 16
+; MIPS64R2-NEXT: daddiu $1, $1, %hi(.L.str.1)
+; MIPS64R2-NEXT: dsll $1, $1, 16
+; MIPS64R2-NEXT: daddiu $1, $1, %lo(.L.str.1)
+; MIPS64R2-NEXT: j .LBB0_10
+; MIPS64R2-NEXT: sd $1, 8($sp)
+; MIPS64R2-NEXT: .LBB0_5: # %sw.bb2
+; MIPS64R2-NEXT: lui $1, %highest(.L.str.2)
+; MIPS64R2-NEXT: daddiu $1, $1, %higher(.L.str.2)
+; MIPS64R2-NEXT: dsll $1, $1, 16
+; MIPS64R2-NEXT: daddiu $1, $1, %hi(.L.str.2)
+; MIPS64R2-NEXT: dsll $1, $1, 16
+; MIPS64R2-NEXT: daddiu $1, $1, %lo(.L.str.2)
+; MIPS64R2-NEXT: j .LBB0_10
+; MIPS64R2-NEXT: sd $1, 8($sp)
+; MIPS64R2-NEXT: .LBB0_6: # %sw.bb3
+; MIPS64R2-NEXT: lui $1, %highest(.L.str.3)
+; MIPS64R2-NEXT: daddiu $1, $1, %higher(.L.str.3)
+; MIPS64R2-NEXT: dsll $1, $1, 16
+; MIPS64R2-NEXT: daddiu $1, $1, %hi(.L.str.3)
+; MIPS64R2-NEXT: dsll $1, $1, 16
+; MIPS64R2-NEXT: daddiu $1, $1, %lo(.L.str.3)
+; MIPS64R2-NEXT: j .LBB0_10
+; MIPS64R2-NEXT: sd $1, 8($sp)
+; MIPS64R2-NEXT: .LBB0_7: # %sw.bb4
+; MIPS64R2-NEXT: lui $1, %highest(.L.str.4)
+; MIPS64R2-NEXT: daddiu $1, $1, %higher(.L.str.4)
+; MIPS64R2-NEXT: dsll $1, $1, 16
+; MIPS64R2-NEXT: daddiu $1, $1, %hi(.L.str.4)
+; MIPS64R2-NEXT: dsll $1, $1, 16
+; MIPS64R2-NEXT: daddiu $1, $1, %lo(.L.str.4)
+; MIPS64R2-NEXT: j .LBB0_10
+; MIPS64R2-NEXT: sd $1, 8($sp)
+; MIPS64R2-NEXT: .LBB0_8: # %sw.bb5
+; MIPS64R2-NEXT: lui $1, %highest(.L.str.5)
+; MIPS64R2-NEXT: daddiu $1, $1, %higher(.L.str.5)
+; MIPS64R2-NEXT: dsll $1, $1, 16
+; MIPS64R2-NEXT: daddiu $1, $1, %hi(.L.str.5)
+; MIPS64R2-NEXT: dsll $1, $1, 16
+; MIPS64R2-NEXT: daddiu $1, $1, %lo(.L.str.5)
+; MIPS64R2-NEXT: j .LBB0_10
+; MIPS64R2-NEXT: sd $1, 8($sp)
+; MIPS64R2-NEXT: .LBB0_9: # %sw.bb6
+; MIPS64R2-NEXT: lui $1, %highest(.L.str.6)
+; MIPS64R2-NEXT: daddiu $1, $1, %higher(.L.str.6)
+; MIPS64R2-NEXT: dsll $1, $1, 16
+; MIPS64R2-NEXT: daddiu $1, $1, %hi(.L.str.6)
+; MIPS64R2-NEXT: dsll $1, $1, 16
+; MIPS64R2-NEXT: daddiu $1, $1, %lo(.L.str.6)
+; MIPS64R2-NEXT: sd $1, 8($sp)
+; MIPS64R2-NEXT: .LBB0_10: # %return
+; MIPS64R2-NEXT: ld $2, 8($sp)
+; MIPS64R2-NEXT: jr $ra
+; MIPS64R2-NEXT: daddiu $sp, $sp, 16
+;
+; MIPS64R6-LABEL: _Z3fooi:
+; MIPS64R6: # %bb.0: # %entry
+; MIPS64R6-NEXT: daddiu $sp, $sp, -16
+; MIPS64R6-NEXT: .cfi_def_cfa_offset 16
+; MIPS64R6-NEXT: sw $4, 4($sp)
+; MIPS64R6-NEXT: lwu $2, 4($sp)
+; MIPS64R6-NEXT: sltiu $1, $2, 7
+; MIPS64R6-NEXT: beqzc $1, .LBB0_3
+; MIPS64R6-NEXT: .LBB0_1: # %entry
+; MIPS64R6-NEXT: dsll $1, $2, 3
+; MIPS64R6-NEXT: lui $2, %highest(.LJTI0_0)
+; MIPS64R6-NEXT: daddiu $2, $2, %higher(.LJTI0_0)
+; MIPS64R6-NEXT: dsll $2, $2, 16
+; MIPS64R6-NEXT: daddiu $2, $2, %hi(.LJTI0_0)
+; MIPS64R6-NEXT: dsll $2, $2, 16
+; MIPS64R6-NEXT: daddu $1, $1, $2
+; MIPS64R6-NEXT: ld $1, %lo(.LJTI0_0)($1)
+; MIPS64R6-NEXT: jr.hb $1
+; MIPS64R6-NEXT: nop
+; MIPS64R6-NEXT: .LBB0_2: # %sw.bb
+; MIPS64R6-NEXT: lui $1, %highest(.L.str)
+; MIPS64R6-NEXT: daddiu $1, $1, %higher(.L.str)
+; MIPS64R6-NEXT: dsll $1, $1, 16
+; MIPS64R6-NEXT: daddiu $1, $1, %hi(.L.str)
+; MIPS64R6-NEXT: dsll $1, $1, 16
+; MIPS64R6-NEXT: daddiu $1, $1, %lo(.L.str)
+; MIPS64R6-NEXT: j .LBB0_10
+; MIPS64R6-NEXT: sd $1, 8($sp)
+; MIPS64R6-NEXT: .LBB0_3: # %sw.epilog
+; MIPS64R6-NEXT: lui $1, %highest(.L.str.7)
+; MIPS64R6-NEXT: daddiu $1, $1, %higher(.L.str.7)
+; MIPS64R6-NEXT: dsll $1, $1, 16
+; MIPS64R6-NEXT: daddiu $1, $1, %hi(.L.str.7)
+; MIPS64R6-NEXT: dsll $1, $1, 16
+; MIPS64R6-NEXT: daddiu $1, $1, %lo(.L.str.7)
+; MIPS64R6-NEXT: j .LBB0_10
+; MIPS64R6-NEXT: sd $1, 8($sp)
+; MIPS64R6-NEXT: .LBB0_4: # %sw.bb1
+; MIPS64R6-NEXT: lui $1, %highest(.L.str.1)
+; MIPS64R6-NEXT: daddiu $1, $1, %higher(.L.str.1)
+; MIPS64R6-NEXT: dsll $1, $1, 16
+; MIPS64R6-NEXT: daddiu $1, $1, %hi(.L.str.1)
+; MIPS64R6-NEXT: dsll $1, $1, 16
+; MIPS64R6-NEXT: daddiu $1, $1, %lo(.L.str.1)
+; MIPS64R6-NEXT: j .LBB0_10
+; MIPS64R6-NEXT: sd $1, 8($sp)
+; MIPS64R6-NEXT: .LBB0_5: # %sw.bb2
+; MIPS64R6-NEXT: lui $1, %highest(.L.str.2)
+; MIPS64R6-NEXT: daddiu $1, $1, %higher(.L.str.2)
+; MIPS64R6-NEXT: dsll $1, $1, 16
+; MIPS64R6-NEXT: daddiu $1, $1, %hi(.L.str.2)
+; MIPS64R6-NEXT: dsll $1, $1, 16
+; MIPS64R6-NEXT: daddiu $1, $1, %lo(.L.str.2)
+; MIPS64R6-NEXT: j .LBB0_10
+; MIPS64R6-NEXT: sd $1, 8($sp)
+; MIPS64R6-NEXT: .LBB0_6: # %sw.bb3
+; MIPS64R6-NEXT: lui $1, %highest(.L.str.3)
+; MIPS64R6-NEXT: daddiu $1, $1, %higher(.L.str.3)
+; MIPS64R6-NEXT: dsll $1, $1, 16
+; MIPS64R6-NEXT: daddiu $1, $1, %hi(.L.str.3)
+; MIPS64R6-NEXT: dsll $1, $1, 16
+; MIPS64R6-NEXT: daddiu $1, $1, %lo(.L.str.3)
+; MIPS64R6-NEXT: j .LBB0_10
+; MIPS64R6-NEXT: sd $1, 8($sp)
+; MIPS64R6-NEXT: .LBB0_7: # %sw.bb4
+; MIPS64R6-NEXT: lui $1, %highest(.L.str.4)
+; MIPS64R6-NEXT: daddiu $1, $1, %higher(.L.str.4)
+; MIPS64R6-NEXT: dsll $1, $1, 16
+; MIPS64R6-NEXT: daddiu $1, $1, %hi(.L.str.4)
+; MIPS64R6-NEXT: dsll $1, $1, 16
+; MIPS64R6-NEXT: daddiu $1, $1, %lo(.L.str.4)
+; MIPS64R6-NEXT: j .LBB0_10
+; MIPS64R6-NEXT: sd $1, 8($sp)
+; MIPS64R6-NEXT: .LBB0_8: # %sw.bb5
+; MIPS64R6-NEXT: lui $1, %highest(.L.str.5)
+; MIPS64R6-NEXT: daddiu $1, $1, %higher(.L.str.5)
+; MIPS64R6-NEXT: dsll $1, $1, 16
+; MIPS64R6-NEXT: daddiu $1, $1, %hi(.L.str.5)
+; MIPS64R6-NEXT: dsll $1, $1, 16
+; MIPS64R6-NEXT: daddiu $1, $1, %lo(.L.str.5)
+; MIPS64R6-NEXT: j .LBB0_10
+; MIPS64R6-NEXT: sd $1, 8($sp)
+; MIPS64R6-NEXT: .LBB0_9: # %sw.bb6
+; MIPS64R6-NEXT: lui $1, %highest(.L.str.6)
+; MIPS64R6-NEXT: daddiu $1, $1, %higher(.L.str.6)
+; MIPS64R6-NEXT: dsll $1, $1, 16
+; MIPS64R6-NEXT: daddiu $1, $1, %hi(.L.str.6)
+; MIPS64R6-NEXT: dsll $1, $1, 16
+; MIPS64R6-NEXT: daddiu $1, $1, %lo(.L.str.6)
+; MIPS64R6-NEXT: sd $1, 8($sp)
+; MIPS64R6-NEXT: .LBB0_10: # %return
+; MIPS64R6-NEXT: ld $2, 8($sp)
+; MIPS64R6-NEXT: jr $ra
+; MIPS64R6-NEXT: daddiu $sp, $sp, 16
+;
+; PIC-MIPS32R2-LABEL: _Z3fooi:
+; PIC-MIPS32R2: # %bb.0: # %entry
+; PIC-MIPS32R2-NEXT: lui $2, %hi(_gp_disp)
+; PIC-MIPS32R2-NEXT: addiu $2, $2, %lo(_gp_disp)
+; PIC-MIPS32R2-NEXT: addiu $sp, $sp, -16
+; PIC-MIPS32R2-NEXT: .cfi_def_cfa_offset 16
+; PIC-MIPS32R2-NEXT: addu $2, $2, $25
+; PIC-MIPS32R2-NEXT: sltiu $1, $4, 7
+; PIC-MIPS32R2-NEXT: beqz $1, $BB0_3
+; PIC-MIPS32R2-NEXT: sw $4, 4($sp)
+; PIC-MIPS32R2-NEXT: $BB0_1: # %entry
+; PIC-MIPS32R2-NEXT: sll $1, $4, 2
+; PIC-MIPS32R2-NEXT: lw $3, %got($JTI0_0)($2)
+; PIC-MIPS32R2-NEXT: addu $1, $1, $3
+; PIC-MIPS32R2-NEXT: lw $1, %lo($JTI0_0)($1)
+; PIC-MIPS32R2-NEXT: addu $1, $1, $2
+; PIC-MIPS32R2-NEXT: jr.hb $1
+; PIC-MIPS32R2-NEXT: nop
+; PIC-MIPS32R2-NEXT: $BB0_2: # %sw.bb
+; PIC-MIPS32R2-NEXT: lw $1, %got($.str)($2)
+; PIC-MIPS32R2-NEXT: addiu $1, $1, %lo($.str)
+; PIC-MIPS32R2-NEXT: b $BB0_10
+; PIC-MIPS32R2-NEXT: sw $1, 8($sp)
+; PIC-MIPS32R2-NEXT: $BB0_3: # %sw.epilog
+; PIC-MIPS32R2-NEXT: lw $1, %got($.str.7)($2)
+; PIC-MIPS32R2-NEXT: addiu $1, $1, %lo($.str.7)
+; PIC-MIPS32R2-NEXT: b $BB0_10
+; PIC-MIPS32R2-NEXT: sw $1, 8($sp)
+; PIC-MIPS32R2-NEXT: $BB0_4: # %sw.bb1
+; PIC-MIPS32R2-NEXT: lw $1, %got($.str.1)($2)
+; PIC-MIPS32R2-NEXT: addiu $1, $1, %lo($.str.1)
+; PIC-MIPS32R2-NEXT: b $BB0_10
+; PIC-MIPS32R2-NEXT: sw $1, 8($sp)
+; PIC-MIPS32R2-NEXT: $BB0_5: # %sw.bb2
+; PIC-MIPS32R2-NEXT: lw $1, %got($.str.2)($2)
+; PIC-MIPS32R2-NEXT: addiu $1, $1, %lo($.str.2)
+; PIC-MIPS32R2-NEXT: b $BB0_10
+; PIC-MIPS32R2-NEXT: sw $1, 8($sp)
+; PIC-MIPS32R2-NEXT: $BB0_6: # %sw.bb3
+; PIC-MIPS32R2-NEXT: lw $1, %got($.str.3)($2)
+; PIC-MIPS32R2-NEXT: addiu $1, $1, %lo($.str.3)
+; PIC-MIPS32R2-NEXT: b $BB0_10
+; PIC-MIPS32R2-NEXT: sw $1, 8($sp)
+; PIC-MIPS32R2-NEXT: $BB0_7: # %sw.bb4
+; PIC-MIPS32R2-NEXT: lw $1, %got($.str.4)($2)
+; PIC-MIPS32R2-NEXT: addiu $1, $1, %lo($.str.4)
+; PIC-MIPS32R2-NEXT: b $BB0_10
+; PIC-MIPS32R2-NEXT: sw $1, 8($sp)
+; PIC-MIPS32R2-NEXT: $BB0_8: # %sw.bb5
+; PIC-MIPS32R2-NEXT: lw $1, %got($.str.5)($2)
+; PIC-MIPS32R2-NEXT: addiu $1, $1, %lo($.str.5)
+; PIC-MIPS32R2-NEXT: b $BB0_10
+; PIC-MIPS32R2-NEXT: sw $1, 8($sp)
+; PIC-MIPS32R2-NEXT: $BB0_9: # %sw.bb6
+; PIC-MIPS32R2-NEXT: lw $1, %got($.str.6)($2)
+; PIC-MIPS32R2-NEXT: addiu $1, $1, %lo($.str.6)
+; PIC-MIPS32R2-NEXT: sw $1, 8($sp)
+; PIC-MIPS32R2-NEXT: $BB0_10: # %return
+; PIC-MIPS32R2-NEXT: lw $2, 8($sp)
+; PIC-MIPS32R2-NEXT: jr $ra
+; PIC-MIPS32R2-NEXT: addiu $sp, $sp, 16
+;
+; PIC-MIPS32R6-LABEL: _Z3fooi:
+; PIC-MIPS32R6: # %bb.0: # %entry
+; PIC-MIPS32R6-NEXT: lui $2, %hi(_gp_disp)
+; PIC-MIPS32R6-NEXT: addiu $2, $2, %lo(_gp_disp)
+; PIC-MIPS32R6-NEXT: addiu $sp, $sp, -16
+; PIC-MIPS32R6-NEXT: .cfi_def_cfa_offset 16
+; PIC-MIPS32R6-NEXT: addu $2, $2, $25
+; PIC-MIPS32R6-NEXT: sltiu $1, $4, 7
+; PIC-MIPS32R6-NEXT: beqz $1, $BB0_3
+; PIC-MIPS32R6-NEXT: sw $4, 4($sp)
+; PIC-MIPS32R6-NEXT: $BB0_1: # %entry
+; PIC-MIPS32R6-NEXT: sll $1, $4, 2
+; PIC-MIPS32R6-NEXT: lw $3, %got($JTI0_0)($2)
+; PIC-MIPS32R6-NEXT: addu $1, $1, $3
+; PIC-MIPS32R6-NEXT: lw $1, %lo($JTI0_0)($1)
+; PIC-MIPS32R6-NEXT: addu $1, $1, $2
+; PIC-MIPS32R6-NEXT: jr.hb $1
+; PIC-MIPS32R6-NEXT: nop
+; PIC-MIPS32R6-NEXT: $BB0_2: # %sw.bb
+; PIC-MIPS32R6-NEXT: lw $1, %got($.str)($2)
+; PIC-MIPS32R6-NEXT: addiu $1, $1, %lo($.str)
+; PIC-MIPS32R6-NEXT: b $BB0_10
+; PIC-MIPS32R6-NEXT: sw $1, 8($sp)
+; PIC-MIPS32R6-NEXT: $BB0_3: # %sw.epilog
+; PIC-MIPS32R6-NEXT: lw $1, %got($.str.7)($2)
+; PIC-MIPS32R6-NEXT: addiu $1, $1, %lo($.str.7)
+; PIC-MIPS32R6-NEXT: b $BB0_10
+; PIC-MIPS32R6-NEXT: sw $1, 8($sp)
+; PIC-MIPS32R6-NEXT: $BB0_4: # %sw.bb1
+; PIC-MIPS32R6-NEXT: lw $1, %got($.str.1)($2)
+; PIC-MIPS32R6-NEXT: addiu $1, $1, %lo($.str.1)
+; PIC-MIPS32R6-NEXT: b $BB0_10
+; PIC-MIPS32R6-NEXT: sw $1, 8($sp)
+; PIC-MIPS32R6-NEXT: $BB0_5: # %sw.bb2
+; PIC-MIPS32R6-NEXT: lw $1, %got($.str.2)($2)
+; PIC-MIPS32R6-NEXT: addiu $1, $1, %lo($.str.2)
+; PIC-MIPS32R6-NEXT: b $BB0_10
+; PIC-MIPS32R6-NEXT: sw $1, 8($sp)
+; PIC-MIPS32R6-NEXT: $BB0_6: # %sw.bb3
+; PIC-MIPS32R6-NEXT: lw $1, %got($.str.3)($2)
+; PIC-MIPS32R6-NEXT: addiu $1, $1, %lo($.str.3)
+; PIC-MIPS32R6-NEXT: b $BB0_10
+; PIC-MIPS32R6-NEXT: sw $1, 8($sp)
+; PIC-MIPS32R6-NEXT: $BB0_7: # %sw.bb4
+; PIC-MIPS32R6-NEXT: lw $1, %got($.str.4)($2)
+; PIC-MIPS32R6-NEXT: addiu $1, $1, %lo($.str.4)
+; PIC-MIPS32R6-NEXT: b $BB0_10
+; PIC-MIPS32R6-NEXT: sw $1, 8($sp)
+; PIC-MIPS32R6-NEXT: $BB0_8: # %sw.bb5
+; PIC-MIPS32R6-NEXT: lw $1, %got($.str.5)($2)
+; PIC-MIPS32R6-NEXT: addiu $1, $1, %lo($.str.5)
+; PIC-MIPS32R6-NEXT: b $BB0_10
+; PIC-MIPS32R6-NEXT: sw $1, 8($sp)
+; PIC-MIPS32R6-NEXT: $BB0_9: # %sw.bb6
+; PIC-MIPS32R6-NEXT: lw $1, %got($.str.6)($2)
+; PIC-MIPS32R6-NEXT: addiu $1, $1, %lo($.str.6)
+; PIC-MIPS32R6-NEXT: sw $1, 8($sp)
+; PIC-MIPS32R6-NEXT: $BB0_10: # %return
+; PIC-MIPS32R6-NEXT: lw $2, 8($sp)
+; PIC-MIPS32R6-NEXT: jr $ra
+; PIC-MIPS32R6-NEXT: addiu $sp, $sp, 16
+;
+; PIC-MIPS64R2-LABEL: _Z3fooi:
+; PIC-MIPS64R2: # %bb.0: # %entry
+; PIC-MIPS64R2-NEXT: daddiu $sp, $sp, -16
+; PIC-MIPS64R2-NEXT: .cfi_def_cfa_offset 16
+; PIC-MIPS64R2-NEXT: lui $1, %hi(%neg(%gp_rel(_Z3fooi)))
+; PIC-MIPS64R2-NEXT: daddu $1, $1, $25
+; PIC-MIPS64R2-NEXT: daddiu $2, $1, %lo(%neg(%gp_rel(_Z3fooi)))
+; PIC-MIPS64R2-NEXT: sw $4, 4($sp)
+; PIC-MIPS64R2-NEXT: lwu $3, 4($sp)
+; PIC-MIPS64R2-NEXT: sltiu $1, $3, 7
+; PIC-MIPS64R2-NEXT: beqz $1, .LBB0_3
+; PIC-MIPS64R2-NEXT: nop
+; PIC-MIPS64R2-NEXT: .LBB0_1: # %entry
+; PIC-MIPS64R2-NEXT: daddiu $1, $zero, 8
+; PIC-MIPS64R2-NEXT: dmult $3, $1
+; PIC-MIPS64R2-NEXT: mflo $1
+; PIC-MIPS64R2-NEXT: ld $3, %got_page(.LJTI0_0)($2)
+; PIC-MIPS64R2-NEXT: daddu $1, $1, $3
+; PIC-MIPS64R2-NEXT: ld $1, %got_ofst(.LJTI0_0)($1)
+; PIC-MIPS64R2-NEXT: daddu $1, $1, $2
+; PIC-MIPS64R2-NEXT: jr.hb $1
+; PIC-MIPS64R2-NEXT: nop
+; PIC-MIPS64R2-NEXT: .LBB0_2: # %sw.bb
+; PIC-MIPS64R2-NEXT: ld $1, %got_page(.L.str)($2)
+; PIC-MIPS64R2-NEXT: daddiu $1, $1, %got_ofst(.L.str)
+; PIC-MIPS64R2-NEXT: b .LBB0_10
+; PIC-MIPS64R2-NEXT: sd $1, 8($sp)
+; PIC-MIPS64R2-NEXT: .LBB0_3: # %sw.epilog
+; PIC-MIPS64R2-NEXT: ld $1, %got_page(.L.str.7)($2)
+; PIC-MIPS64R2-NEXT: daddiu $1, $1, %got_ofst(.L.str.7)
+; PIC-MIPS64R2-NEXT: b .LBB0_10
+; PIC-MIPS64R2-NEXT: sd $1, 8($sp)
+; PIC-MIPS64R2-NEXT: .LBB0_4: # %sw.bb1
+; PIC-MIPS64R2-NEXT: ld $1, %got_page(.L.str.1)($2)
+; PIC-MIPS64R2-NEXT: daddiu $1, $1, %got_ofst(.L.str.1)
+; PIC-MIPS64R2-NEXT: b .LBB0_10
+; PIC-MIPS64R2-NEXT: sd $1, 8($sp)
+; PIC-MIPS64R2-NEXT: .LBB0_5: # %sw.bb2
+; PIC-MIPS64R2-NEXT: ld $1, %got_page(.L.str.2)($2)
+; PIC-MIPS64R2-NEXT: daddiu $1, $1, %got_ofst(.L.str.2)
+; PIC-MIPS64R2-NEXT: b .LBB0_10
+; PIC-MIPS64R2-NEXT: sd $1, 8($sp)
+; PIC-MIPS64R2-NEXT: .LBB0_6: # %sw.bb3
+; PIC-MIPS64R2-NEXT: ld $1, %got_page(.L.str.3)($2)
+; PIC-MIPS64R2-NEXT: daddiu $1, $1, %got_ofst(.L.str.3)
+; PIC-MIPS64R2-NEXT: b .LBB0_10
+; PIC-MIPS64R2-NEXT: sd $1, 8($sp)
+; PIC-MIPS64R2-NEXT: .LBB0_7: # %sw.bb4
+; PIC-MIPS64R2-NEXT: ld $1, %got_page(.L.str.4)($2)
+; PIC-MIPS64R2-NEXT: daddiu $1, $1, %got_ofst(.L.str.4)
+; PIC-MIPS64R2-NEXT: b .LBB0_10
+; PIC-MIPS64R2-NEXT: sd $1, 8($sp)
+; PIC-MIPS64R2-NEXT: .LBB0_8: # %sw.bb5
+; PIC-MIPS64R2-NEXT: ld $1, %got_page(.L.str.5)($2)
+; PIC-MIPS64R2-NEXT: daddiu $1, $1, %got_ofst(.L.str.5)
+; PIC-MIPS64R2-NEXT: b .LBB0_10
+; PIC-MIPS64R2-NEXT: sd $1, 8($sp)
+; PIC-MIPS64R2-NEXT: .LBB0_9: # %sw.bb6
+; PIC-MIPS64R2-NEXT: ld $1, %got_page(.L.str.6)($2)
+; PIC-MIPS64R2-NEXT: daddiu $1, $1, %got_ofst(.L.str.6)
+; PIC-MIPS64R2-NEXT: sd $1, 8($sp)
+; PIC-MIPS64R2-NEXT: .LBB0_10: # %return
+; PIC-MIPS64R2-NEXT: ld $2, 8($sp)
+; PIC-MIPS64R2-NEXT: jr $ra
+; PIC-MIPS64R2-NEXT: daddiu $sp, $sp, 16
+;
+; PIC-MIPS64R6-LABEL: _Z3fooi:
+; PIC-MIPS64R6: # %bb.0: # %entry
+; PIC-MIPS64R6-NEXT: daddiu $sp, $sp, -16
+; PIC-MIPS64R6-NEXT: .cfi_def_cfa_offset 16
+; PIC-MIPS64R6-NEXT: lui $1, %hi(%neg(%gp_rel(_Z3fooi)))
+; PIC-MIPS64R6-NEXT: daddu $1, $1, $25
+; PIC-MIPS64R6-NEXT: daddiu $2, $1, %lo(%neg(%gp_rel(_Z3fooi)))
+; PIC-MIPS64R6-NEXT: sw $4, 4($sp)
+; PIC-MIPS64R6-NEXT: lwu $3, 4($sp)
+; PIC-MIPS64R6-NEXT: sltiu $1, $3, 7
+; PIC-MIPS64R6-NEXT: beqzc $1, .LBB0_3
+; PIC-MIPS64R6-NEXT: .LBB0_1: # %entry
+; PIC-MIPS64R6-NEXT: dsll $1, $3, 3
+; PIC-MIPS64R6-NEXT: ld $3, %got_page(.LJTI0_0)($2)
+; PIC-MIPS64R6-NEXT: daddu $1, $1, $3
+; PIC-MIPS64R6-NEXT: ld $1, %got_ofst(.LJTI0_0)($1)
+; PIC-MIPS64R6-NEXT: daddu $1, $1, $2
+; PIC-MIPS64R6-NEXT: jr.hb $1
+; PIC-MIPS64R6-NEXT: nop
+; PIC-MIPS64R6-NEXT: .LBB0_2: # %sw.bb
+; PIC-MIPS64R6-NEXT: ld $1, %got_page(.L.str)($2)
+; PIC-MIPS64R6-NEXT: daddiu $1, $1, %got_ofst(.L.str)
+; PIC-MIPS64R6-NEXT: b .LBB0_10
+; PIC-MIPS64R6-NEXT: sd $1, 8($sp)
+; PIC-MIPS64R6-NEXT: .LBB0_3: # %sw.epilog
+; PIC-MIPS64R6-NEXT: ld $1, %got_page(.L.str.7)($2)
+; PIC-MIPS64R6-NEXT: daddiu $1, $1, %got_ofst(.L.str.7)
+; PIC-MIPS64R6-NEXT: b .LBB0_10
+; PIC-MIPS64R6-NEXT: sd $1, 8($sp)
+; PIC-MIPS64R6-NEXT: .LBB0_4: # %sw.bb1
+; PIC-MIPS64R6-NEXT: ld $1, %got_page(.L.str.1)($2)
+; PIC-MIPS64R6-NEXT: daddiu $1, $1, %got_ofst(.L.str.1)
+; PIC-MIPS64R6-NEXT: b .LBB0_10
+; PIC-MIPS64R6-NEXT: sd $1, 8($sp)
+; PIC-MIPS64R6-NEXT: .LBB0_5: # %sw.bb2
+; PIC-MIPS64R6-NEXT: ld $1, %got_page(.L.str.2)($2)
+; PIC-MIPS64R6-NEXT: daddiu $1, $1, %got_ofst(.L.str.2)
+; PIC-MIPS64R6-NEXT: b .LBB0_10
+; PIC-MIPS64R6-NEXT: sd $1, 8($sp)
+; PIC-MIPS64R6-NEXT: .LBB0_6: # %sw.bb3
+; PIC-MIPS64R6-NEXT: ld $1, %got_page(.L.str.3)($2)
+; PIC-MIPS64R6-NEXT: daddiu $1, $1, %got_ofst(.L.str.3)
+; PIC-MIPS64R6-NEXT: b .LBB0_10
+; PIC-MIPS64R6-NEXT: sd $1, 8($sp)
+; PIC-MIPS64R6-NEXT: .LBB0_7: # %sw.bb4
+; PIC-MIPS64R6-NEXT: ld $1, %got_page(.L.str.4)($2)
+; PIC-MIPS64R6-NEXT: daddiu $1, $1, %got_ofst(.L.str.4)
+; PIC-MIPS64R6-NEXT: b .LBB0_10
+; PIC-MIPS64R6-NEXT: sd $1, 8($sp)
+; PIC-MIPS64R6-NEXT: .LBB0_8: # %sw.bb5
+; PIC-MIPS64R6-NEXT: ld $1, %got_page(.L.str.5)($2)
+; PIC-MIPS64R6-NEXT: daddiu $1, $1, %got_ofst(.L.str.5)
+; PIC-MIPS64R6-NEXT: b .LBB0_10
+; PIC-MIPS64R6-NEXT: sd $1, 8($sp)
+; PIC-MIPS64R6-NEXT: .LBB0_9: # %sw.bb6
+; PIC-MIPS64R6-NEXT: ld $1, %got_page(.L.str.6)($2)
+; PIC-MIPS64R6-NEXT: daddiu $1, $1, %got_ofst(.L.str.6)
+; PIC-MIPS64R6-NEXT: sd $1, 8($sp)
+; PIC-MIPS64R6-NEXT: .LBB0_10: # %return
+; PIC-MIPS64R6-NEXT: ld $2, 8($sp)
+; PIC-MIPS64R6-NEXT: jr $ra
+; PIC-MIPS64R6-NEXT: daddiu $sp, $sp, 16
+entry:
+ %retval = alloca i8*, align 8
+ %Letter.addr = alloca i32, align 4
+ store i32 %Letter, i32* %Letter.addr, align 4
+ %0 = load i32, i32* %Letter.addr, align 4
+ switch i32 %0, label %sw.epilog [
+ i32 0, label %sw.bb
+ i32 1, label %sw.bb1
+ i32 2, label %sw.bb2
+ i32 3, label %sw.bb3
+ i32 4, label %sw.bb4
+ i32 5, label %sw.bb5
+ i32 6, label %sw.bb6
+ ]
+
+sw.bb:
+ store i8* getelementptr inbounds ([2 x i8], [2 x i8]* @.str, i32 0, i32 0), i8** %retval, align 8
+ br label %return
+
+sw.bb1:
+ store i8* getelementptr inbounds ([2 x i8], [2 x i8]* @.str.1, i32 0, i32 0), i8** %retval, align 8
+ br label %return
+
+sw.bb2:
+ store i8* getelementptr inbounds ([2 x i8], [2 x i8]* @.str.2, i32 0, i32 0), i8** %retval, align 8
+ br label %return
+
+sw.bb3:
+ store i8* getelementptr inbounds ([2 x i8], [2 x i8]* @.str.3, i32 0, i32 0), i8** %retval, align 8
+ br label %return
+
+sw.bb4:
+ store i8* getelementptr inbounds ([2 x i8], [2 x i8]* @.str.4, i32 0, i32 0), i8** %retval, align 8
+ br label %return
+
+sw.bb5:
+ store i8* getelementptr inbounds ([2 x i8], [2 x i8]* @.str.5, i32 0, i32 0), i8** %retval, align 8
+ br label %return
+
+sw.bb6:
+ store i8* getelementptr inbounds ([2 x i8], [2 x i8]* @.str.6, i32 0, i32 0), i8** %retval, align 8
+ br label %return
+
+sw.epilog:
+ store i8* getelementptr inbounds ([1 x i8], [1 x i8]* @.str.7, i32 0, i32 0), i8** %retval, align 8
+ br label %return
+
+return:
+ %1 = load i8*, i8** %retval, align 8
+ ret i8* %1
+}
diff --git a/test/CodeGen/Mips/indirect-jump-hazard/long-branch.ll b/test/CodeGen/Mips/indirect-jump-hazard/long-branch.ll
new file mode 100644
index 0000000000000..fffda991ae4b6
--- /dev/null
+++ b/test/CodeGen/Mips/indirect-jump-hazard/long-branch.ll
@@ -0,0 +1,138 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; Except for the NACL version which isn't parsed by update_llc_test_checks.py
+
+; RUN: llc -mtriple=mipsel-unknown-linux-gnu -force-mips-long-branch -O3 \
+; RUN: -mcpu=mips32r2 -mattr=+use-indirect-jump-hazard -relocation-model=pic \
+; RUN: -verify-machineinstrs < %s | FileCheck %s -check-prefix=O32-PIC
+
+; RUN: llc -mtriple=mipsel-unknown-linux-gnu -mcpu=mips32r6 \
+; RUN: -force-mips-long-branch -O3 -mattr=+use-indirect-jump-hazard \
+; RUN: -relocation-model=pic -verify-machineinstrs < %s \
+; RUN: | FileCheck %s -check-prefix=O32-R6-PIC
+
+; RUN: llc -mtriple=mips64el-unknown-linux-gnu -mcpu=mips64r2 -target-abi=n64 \
+; RUN: -force-mips-long-branch -O3 -relocation-model=pic \
+; RUN: -mattr=+use-indirect-jump-hazard -verify-machineinstrs \
+; RUN: < %s | FileCheck %s -check-prefix=MIPS64
+
+; RUN: llc -mtriple=mips64el-unknown-linux-gnu -mcpu=mips64r6 -target-abi=n64 \
+; RUN: -force-mips-long-branch -O3 -mattr=+use-indirect-jump-hazard \
+; RUN: -relocation-model=pic -verify-machineinstrs < %s \
+; RUN: | FileCheck %s -check-prefix=N64-R6
+
+; Test that the long branches also get changed to their hazard variants.
+
+@x = external global i32
+
+define void @test1(i32 signext %s) {
+; O32-PIC-LABEL: test1:
+; O32-PIC: # %bb.0: # %entry
+; O32-PIC-NEXT: lui $2, %hi(_gp_disp)
+; O32-PIC-NEXT: addiu $2, $2, %lo(_gp_disp)
+; O32-PIC-NEXT: bnez $4, $BB0_3
+; O32-PIC-NEXT: addu $2, $2, $25
+; O32-PIC-NEXT: # %bb.1: # %entry
+; O32-PIC-NEXT: addiu $sp, $sp, -8
+; O32-PIC-NEXT: sw $ra, 0($sp)
+; O32-PIC-NEXT: lui $1, %hi(($BB0_4)-($BB0_2))
+; O32-PIC-NEXT: bal $BB0_2
+; O32-PIC-NEXT: addiu $1, $1, %lo(($BB0_4)-($BB0_2))
+; O32-PIC-NEXT: $BB0_2: # %entry
+; O32-PIC-NEXT: addu $1, $ra, $1
+; O32-PIC-NEXT: lw $ra, 0($sp)
+; O32-PIC-NEXT: jr.hb $1
+; O32-PIC-NEXT: addiu $sp, $sp, 8
+; O32-PIC-NEXT: $BB0_3: # %then
+; O32-PIC-NEXT: lw $1, %got(x)($2)
+; O32-PIC-NEXT: addiu $2, $zero, 1
+; O32-PIC-NEXT: sw $2, 0($1)
+; O32-PIC-NEXT: $BB0_4: # %end
+; O32-PIC-NEXT: jr $ra
+; O32-PIC-NEXT: nop
+;
+; O32-R6-PIC-LABEL: test1:
+; O32-R6-PIC: # %bb.0: # %entry
+; O32-R6-PIC-NEXT: lui $2, %hi(_gp_disp)
+; O32-R6-PIC-NEXT: addiu $2, $2, %lo(_gp_disp)
+; O32-R6-PIC-NEXT: bnez $4, $BB0_3
+; O32-R6-PIC-NEXT: addu $2, $2, $25
+; O32-R6-PIC-NEXT: # %bb.1: # %entry
+; O32-R6-PIC-NEXT: addiu $sp, $sp, -8
+; O32-R6-PIC-NEXT: sw $ra, 0($sp)
+; O32-R6-PIC-NEXT: lui $1, %hi(($BB0_4)-($BB0_2))
+; O32-R6-PIC-NEXT: addiu $1, $1, %lo(($BB0_4)-($BB0_2))
+; O32-R6-PIC-NEXT: balc $BB0_2
+; O32-R6-PIC-NEXT: $BB0_2: # %entry
+; O32-R6-PIC-NEXT: addu $1, $ra, $1
+; O32-R6-PIC-NEXT: lw $ra, 0($sp)
+; O32-R6-PIC-NEXT: jr.hb $1
+; O32-R6-PIC-NEXT: addiu $sp, $sp, 8
+; O32-R6-PIC-NEXT: $BB0_3: # %then
+; O32-R6-PIC-NEXT: lw $1, %got(x)($2)
+; O32-R6-PIC-NEXT: addiu $2, $zero, 1
+; O32-R6-PIC-NEXT: sw $2, 0($1)
+; O32-R6-PIC-NEXT: $BB0_4: # %end
+; O32-R6-PIC-NEXT: jrc $ra
+;
+; MIPS64-LABEL: test1:
+; MIPS64: # %bb.0: # %entry
+; MIPS64-NEXT: lui $1, %hi(%neg(%gp_rel(test1)))
+; MIPS64-NEXT: bnez $4, .LBB0_3
+; MIPS64-NEXT: daddu $2, $1, $25
+; MIPS64-NEXT: # %bb.1: # %entry
+; MIPS64-NEXT: daddiu $sp, $sp, -16
+; MIPS64-NEXT: sd $ra, 0($sp)
+; MIPS64-NEXT: daddiu $1, $zero, %hi(.LBB0_4-.LBB0_2)
+; MIPS64-NEXT: dsll $1, $1, 16
+; MIPS64-NEXT: bal .LBB0_2
+; MIPS64-NEXT: daddiu $1, $1, %lo(.LBB0_4-.LBB0_2)
+; MIPS64-NEXT: .LBB0_2: # %entry
+; MIPS64-NEXT: daddu $1, $ra, $1
+; MIPS64-NEXT: ld $ra, 0($sp)
+; MIPS64-NEXT: jr.hb $1
+; MIPS64-NEXT: daddiu $sp, $sp, 16
+; MIPS64-NEXT: .LBB0_3: # %then
+; MIPS64-NEXT: daddiu $1, $2, %lo(%neg(%gp_rel(test1)))
+; MIPS64-NEXT: addiu $2, $zero, 1
+; MIPS64-NEXT: ld $1, %got_disp(x)($1)
+; MIPS64-NEXT: sw $2, 0($1)
+; MIPS64-NEXT: .LBB0_4: # %end
+; MIPS64-NEXT: jr $ra
+; MIPS64-NEXT: nop
+;
+; N64-R6-LABEL: test1:
+; N64-R6: # %bb.0: # %entry
+; N64-R6-NEXT: lui $1, %hi(%neg(%gp_rel(test1)))
+; N64-R6-NEXT: bnez $4, .LBB0_3
+; N64-R6-NEXT: daddu $2, $1, $25
+; N64-R6-NEXT: # %bb.1: # %entry
+; N64-R6-NEXT: daddiu $sp, $sp, -16
+; N64-R6-NEXT: sd $ra, 0($sp)
+; N64-R6-NEXT: daddiu $1, $zero, %hi(.LBB0_4-.LBB0_2)
+; N64-R6-NEXT: dsll $1, $1, 16
+; N64-R6-NEXT: daddiu $1, $1, %lo(.LBB0_4-.LBB0_2)
+; N64-R6-NEXT: balc .LBB0_2
+; N64-R6-NEXT: .LBB0_2: # %entry
+; N64-R6-NEXT: daddu $1, $ra, $1
+; N64-R6-NEXT: ld $ra, 0($sp)
+; N64-R6-NEXT: jr.hb $1
+; N64-R6-NEXT: daddiu $sp, $sp, 16
+; N64-R6-NEXT: .LBB0_3: # %then
+; N64-R6-NEXT: daddiu $1, $2, %lo(%neg(%gp_rel(test1)))
+; N64-R6-NEXT: addiu $2, $zero, 1
+; N64-R6-NEXT: ld $1, %got_disp(x)($1)
+; N64-R6-NEXT: sw $2, 0($1)
+; N64-R6-NEXT: .LBB0_4: # %end
+; N64-R6-NEXT: jrc $ra
+entry:
+ %cmp = icmp eq i32 %s, 0
+ br i1 %cmp, label %end, label %then
+
+then:
+ store i32 1, i32* @x, align 4
+ br label %end
+
+end:
+ ret void
+
+}
diff --git a/test/CodeGen/Mips/indirect-jump-hazard/long-calls.ll b/test/CodeGen/Mips/indirect-jump-hazard/long-calls.ll
new file mode 100644
index 0000000000000..88886e13f3266
--- /dev/null
+++ b/test/CodeGen/Mips/indirect-jump-hazard/long-calls.ll
@@ -0,0 +1,113 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=mips-unknwon-linux-gnu -mcpu=mips32r2 \
+; RUN: -mattr=+use-indirect-jump-hazard,+long-calls,+noabicalls %s -o - \
+; RUN: -verify-machineinstrs | FileCheck -check-prefix=O32 %s
+
+; RUN: llc -mtriple=mips64-unknown-linux-gnu -mcpu=mips64r2 -target-abi n32 \
+; RUN: -mattr=+use-indirect-jump-hazard,+long-calls,+noabicalls %s -o - \
+; RUN: -verify-machineinstrs | FileCheck -check-prefix=N32 %s
+
+; RUN: llc -mtriple=mips64-unknown-linux-gnu -mcpu=mips64r2 -target-abi n64 \
+; RUN: -mattr=+use-indirect-jump-hazard,+long-calls,+noabicalls %s -o - \
+; RUN: -verify-machineinstrs | FileCheck -check-prefix=N64 %s
+
+declare void @callee()
+declare void @llvm.memset.p0i8.i32(i8* nocapture writeonly, i8, i32, i32, i1)
+
+@val = internal unnamed_addr global [20 x i32] zeroinitializer, align 4
+
+; Test that the long call sequence uses the hazard barrier instruction variant.
+define void @caller() {
+; O32-LABEL: caller:
+; O32: # %bb.0:
+; O32-NEXT: addiu $sp, $sp, -24
+; O32-NEXT: .cfi_def_cfa_offset 24
+; O32-NEXT: sw $ra, 20($sp) # 4-byte Folded Spill
+; O32-NEXT: .cfi_offset 31, -4
+; O32-NEXT: lui $1, %hi(callee)
+; O32-NEXT: addiu $25, $1, %lo(callee)
+; O32-NEXT: jalr.hb $25
+; O32-NEXT: nop
+; O32-NEXT: lui $1, %hi(val)
+; O32-NEXT: addiu $1, $1, %lo(val)
+; O32-NEXT: lui $2, 20560
+; O32-NEXT: ori $2, $2, 20560
+; O32-NEXT: sw $2, 96($1)
+; O32-NEXT: sw $2, 92($1)
+; O32-NEXT: sw $2, 88($1)
+; O32-NEXT: sw $2, 84($1)
+; O32-NEXT: sw $2, 80($1)
+; O32-NEXT: lw $ra, 20($sp) # 4-byte Folded Reload
+; O32-NEXT: jr $ra
+; O32-NEXT: addiu $sp, $sp, 24
+;
+; N32-LABEL: caller:
+; N32: # %bb.0:
+; N32-NEXT: addiu $sp, $sp, -16
+; N32-NEXT: .cfi_def_cfa_offset 16
+; N32-NEXT: sd $ra, 8($sp) # 8-byte Folded Spill
+; N32-NEXT: .cfi_offset 31, -8
+; N32-NEXT: lui $1, %hi(callee)
+; N32-NEXT: addiu $25, $1, %lo(callee)
+; N32-NEXT: jalr.hb $25
+; N32-NEXT: nop
+; N32-NEXT: lui $1, %hi(val)
+; N32-NEXT: addiu $1, $1, %lo(val)
+; N32-NEXT: lui $2, 1285
+; N32-NEXT: daddiu $2, $2, 1285
+; N32-NEXT: dsll $2, $2, 16
+; N32-NEXT: daddiu $2, $2, 1285
+; N32-NEXT: dsll $2, $2, 20
+; N32-NEXT: daddiu $2, $2, 20560
+; N32-NEXT: sdl $2, 88($1)
+; N32-NEXT: sdl $2, 80($1)
+; N32-NEXT: lui $3, 20560
+; N32-NEXT: ori $3, $3, 20560
+; N32-NEXT: sw $3, 96($1)
+; N32-NEXT: sdr $2, 95($1)
+; N32-NEXT: sdr $2, 87($1)
+; N32-NEXT: ld $ra, 8($sp) # 8-byte Folded Reload
+; N32-NEXT: jr $ra
+; N32-NEXT: addiu $sp, $sp, 16
+;
+; N64-LABEL: caller:
+; N64: # %bb.0:
+; N64-NEXT: daddiu $sp, $sp, -16
+; N64-NEXT: .cfi_def_cfa_offset 16
+; N64-NEXT: sd $ra, 8($sp) # 8-byte Folded Spill
+; N64-NEXT: .cfi_offset 31, -8
+; N64-NEXT: lui $1, %highest(callee)
+; N64-NEXT: daddiu $1, $1, %higher(callee)
+; N64-NEXT: dsll $1, $1, 16
+; N64-NEXT: daddiu $1, $1, %hi(callee)
+; N64-NEXT: dsll $1, $1, 16
+; N64-NEXT: daddiu $25, $1, %lo(callee)
+; N64-NEXT: jalr.hb $25
+; N64-NEXT: nop
+; N64-NEXT: lui $1, %highest(val)
+; N64-NEXT: daddiu $1, $1, %higher(val)
+; N64-NEXT: dsll $1, $1, 16
+; N64-NEXT: daddiu $1, $1, %hi(val)
+; N64-NEXT: dsll $1, $1, 16
+; N64-NEXT: daddiu $1, $1, %lo(val)
+; N64-NEXT: lui $2, 1285
+; N64-NEXT: daddiu $2, $2, 1285
+; N64-NEXT: dsll $2, $2, 16
+; N64-NEXT: daddiu $2, $2, 1285
+; N64-NEXT: dsll $2, $2, 20
+; N64-NEXT: daddiu $2, $2, 20560
+; N64-NEXT: lui $3, 20560
+; N64-NEXT: sdl $2, 88($1)
+; N64-NEXT: sdl $2, 80($1)
+; N64-NEXT: ori $3, $3, 20560
+; N64-NEXT: sw $3, 96($1)
+; N64-NEXT: sdr $2, 95($1)
+; N64-NEXT: sdr $2, 87($1)
+; N64-NEXT: ld $ra, 8($sp) # 8-byte Folded Reload
+; N64-NEXT: jr $ra
+; N64-NEXT: daddiu $sp, $sp, 16
+ call void @callee()
+ call void @llvm.memset.p0i8.i32(i8* bitcast (i32* getelementptr inbounds ([20 x i32], [20 x i32]* @val, i64 1, i32 0) to i8*), i8 80, i32 20, i32 4, i1 false)
+ ret void
+}
+
diff --git a/test/CodeGen/Mips/indirect-jump-hazard/unsupported-micromips.ll b/test/CodeGen/Mips/indirect-jump-hazard/unsupported-micromips.ll
new file mode 100644
index 0000000000000..99612525ae3cb
--- /dev/null
+++ b/test/CodeGen/Mips/indirect-jump-hazard/unsupported-micromips.ll
@@ -0,0 +1,5 @@
+; RUN: not llc -mtriple=mips-unknown-linux -mcpu=mips32r2 -mattr=+micromips,+use-indirect-jump-hazard %s 2>&1 | FileCheck %s
+
+; Test that microMIPS and indirect jump with hazard barriers is not supported.
+
+; CHECK: LLVM ERROR: cannot combine indirect jumps with hazard barriers and microMIPS
diff --git a/test/CodeGen/Mips/indirect-jump-hazard/unsupported-mips32.ll b/test/CodeGen/Mips/indirect-jump-hazard/unsupported-mips32.ll
new file mode 100644
index 0000000000000..48baedf53eaa3
--- /dev/null
+++ b/test/CodeGen/Mips/indirect-jump-hazard/unsupported-mips32.ll
@@ -0,0 +1,5 @@
+; RUN: not llc -mtriple=mips-unknown-linux -mcpu=mips32 -mattr=+use-indirect-jump-hazard %s 2>&1 | FileCheck %s
+
+; Test that mips32 and indirect jump with hazard barriers is not supported.
+
+; CHECK: LLVM ERROR: indirect jumps with hazard barriers requires MIPS32R2 or later
diff --git a/test/CodeGen/Mips/inlineasm-cnstrnt-bad-l1.ll b/test/CodeGen/Mips/inlineasm-cnstrnt-bad-l1.ll
new file mode 100644
index 0000000000000..1cd86d617a24f
--- /dev/null
+++ b/test/CodeGen/Mips/inlineasm-cnstrnt-bad-l1.ll
@@ -0,0 +1,13 @@
+; Negative test. The constraint 'l' represents the register 'lo'.
+; Check error message in case of invalid usage.
+;
+; RUN: not llc -march=mips -filetype=obj < %s 2>&1 | FileCheck %s
+
+define void @constraint_l() nounwind {
+entry:
+
+; CHECK: error: invalid operand for instruction
+
+ tail call i16 asm sideeffect "addiu $0,$1,$2", "=l,r,r,~{$1}"(i16 0, i16 0)
+ ret void
+}
diff --git a/test/CodeGen/Mips/inlineasm-cnstrnt-reg.ll b/test/CodeGen/Mips/inlineasm-cnstrnt-reg.ll
index 63ee42c0c7cd8..b4c1587a8fbff 100644
--- a/test/CodeGen/Mips/inlineasm-cnstrnt-reg.ll
+++ b/test/CodeGen/Mips/inlineasm-cnstrnt-reg.ll
@@ -41,5 +41,15 @@ entry:
call i32 asm sideeffect "\09mtlo $3 \0A\09\09madd $1, $2 ", "=l,r,r,r"(i32 7, i32 6, i32 44) nounwind
store volatile i32 %4, i32* %bosco, align 4
+; Check the 'l' constraint for 16-bit type.
+; CHECK: #APP
+; CHECK: mtlo ${{[0-9]+}}
+; CHECK-NEXT: madd ${{[0-9]+}}, ${{[0-9]+}}
+; CHECK: #NO_APP
+; CHECK-NEXT: mflo ${{[0-9]+}}
+ %bosco16 = alloca i16, align 4
+ call i16 asm sideeffect "\09mtlo $3 \0A\09\09madd $1, $2 ", "=l,r,r,r"(i32 7, i32 6, i32 44) nounwind
+ store volatile i16 %5, i16* %bosco16, align 4
+
ret i32 0
}
diff --git a/test/CodeGen/PowerPC/convert-rr-to-ri-instrs.mir b/test/CodeGen/PowerPC/convert-rr-to-ri-instrs.mir
index 67733795ed5de..f2ca07367b991 100644
--- a/test/CodeGen/PowerPC/convert-rr-to-ri-instrs.mir
+++ b/test/CodeGen/PowerPC/convert-rr-to-ri-instrs.mir
@@ -561,6 +561,25 @@
}
; Function Attrs: norecurse nounwind readnone
+ define i64 @testRLDICLo2(i64 %a, i64 %b) local_unnamed_addr #0 {
+ entry:
+ %shr = lshr i64 %a, 11
+ %and = and i64 %shr, 16777215
+ %tobool = icmp eq i64 %and, 0
+ %cond = select i1 %tobool, i64 %b, i64 %and
+ ret i64 %cond
+ }
+
+ define i64 @testRLDICLo3(i64 %a, i64 %b) local_unnamed_addr #0 {
+ entry:
+ %shr = lshr i64 %a, 11
+ %and = and i64 %shr, 16777215
+ %tobool = icmp eq i64 %and, 0
+ %cond = select i1 %tobool, i64 %b, i64 %and
+ ret i64 %cond
+ }
+
+ ; Function Attrs: norecurse nounwind readnone
define zeroext i32 @testRLWINM(i32 zeroext %a) local_unnamed_addr #0 {
entry:
%shl = shl i32 %a, 4
@@ -602,6 +621,15 @@
}
; Function Attrs: norecurse nounwind readnone
+ define zeroext i32 @testRLWINMo2(i32 zeroext %a, i32 zeroext %b) local_unnamed_addr #0 {
+ entry:
+ %and = and i32 %a, 255
+ %tobool = icmp eq i32 %and, 0
+ %cond = select i1 %tobool, i32 %b, i32 %a
+ ret i32 %cond
+ }
+
+ ; Function Attrs: norecurse nounwind readnone
define i64 @testRLWINM8o(i64 %a, i64 %b) local_unnamed_addr #0 {
entry:
%a.tr = trunc i64 %a to i32
@@ -3904,6 +3932,113 @@ body: |
...
---
+name: testRLDICLo2
+# CHECK-ALL: name: testRLDICLo2
+alignment: 4
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: g8rc, preferred-register: '' }
+ - { id: 1, class: g8rc_and_g8rc_nox0, preferred-register: '' }
+ - { id: 2, class: g8rc_and_g8rc_nox0, preferred-register: '' }
+ - { id: 3, class: crrc, preferred-register: '' }
+ - { id: 4, class: g8rc, preferred-register: '' }
+liveins:
+ - { reg: '%x3', virtual-reg: '%0' }
+ - { reg: '%x4', virtual-reg: '%1' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 0
+ adjustsStack: false
+ hasCalls: false
+ stackProtector: ''
+ maxCallFrameSize: 4294967295
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+ savePoint: ''
+ restorePoint: ''
+fixedStack:
+stack:
+constants:
+body: |
+ bb.0.entry:
+ liveins: %x3, %x4
+
+ %1 = COPY %x4
+ %0 = LI8 200
+ %2 = RLDICLo %0, 61, 3, implicit-def %cr0
+ ; CHECK-NOT: ANDI
+ ; CHECK-LATE-NOT: andi.
+ %3 = COPY killed %cr0
+ %4 = ISEL8 %1, %2, %3.sub_eq
+ %x3 = COPY %4
+ BLR8 implicit %lr8, implicit %rm, implicit %x3
+
+...
+---
+name: testRLDICLo3
+# CHECK-ALL: name: testRLDICLo3
+alignment: 4
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: g8rc, preferred-register: '' }
+ - { id: 1, class: g8rc_and_g8rc_nox0, preferred-register: '' }
+ - { id: 2, class: g8rc_and_g8rc_nox0, preferred-register: '' }
+ - { id: 3, class: crrc, preferred-register: '' }
+ - { id: 4, class: g8rc, preferred-register: '' }
+liveins:
+ - { reg: '%x3', virtual-reg: '%0' }
+ - { reg: '%x4', virtual-reg: '%1' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 0
+ adjustsStack: false
+ hasCalls: false
+ stackProtector: ''
+ maxCallFrameSize: 4294967295
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+ savePoint: ''
+ restorePoint: ''
+fixedStack:
+stack:
+constants:
+body: |
+ bb.0.entry:
+ liveins: %x3, %x4
+
+ %1 = COPY %x4
+ %0 = LI8 2
+ %2 = RLDICLo %0, 32, 32, implicit-def %cr0
+ ; CHECK: ANDIo8 %0, 0
+ ; CHECK-LATE: li 3, 2
+ ; CHECK-LATE: andi. 3, 3, 0
+ %3 = COPY killed %cr0
+ %4 = ISEL8 %1, %2, %3.sub_eq
+ %x3 = COPY %4
+ BLR8 implicit %lr8, implicit %rm, implicit %x3
+
+...
+---
name: testRLWINM
# CHECK-ALL: name: testRLWINM
alignment: 4
@@ -4170,6 +4305,69 @@ body: |
...
---
+name: testRLWINMo2
+# CHECK-ALL: name: testRLWINMo2
+alignment: 4
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: g8rc, preferred-register: '' }
+ - { id: 1, class: g8rc, preferred-register: '' }
+ - { id: 2, class: gprc_and_gprc_nor0, preferred-register: '' }
+ - { id: 3, class: gprc_and_gprc_nor0, preferred-register: '' }
+ - { id: 4, class: gprc, preferred-register: '' }
+ - { id: 5, class: crrc, preferred-register: '' }
+ - { id: 6, class: gprc, preferred-register: '' }
+ - { id: 7, class: g8rc, preferred-register: '' }
+ - { id: 8, class: g8rc, preferred-register: '' }
+ - { id: 9, class: g8rc, preferred-register: '' }
+liveins:
+ - { reg: '%x3', virtual-reg: '%0' }
+ - { reg: '%x4', virtual-reg: '%1' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 0
+ adjustsStack: false
+ hasCalls: false
+ stackProtector: ''
+ maxCallFrameSize: 4294967295
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+ savePoint: ''
+ restorePoint: ''
+fixedStack:
+stack:
+constants:
+body: |
+ bb.0.entry:
+ liveins: %x3, %x4
+
+ %1 = COPY %x4
+ %0 = COPY %x3
+ %2 = COPY %1.sub_32
+ %3 = LI -22
+ %4 = RLWINMo %3, 5, 24, 31, implicit-def %cr0
+ ; CHECK-NOT: ANDI
+ ; CHECK-LATE-NOT: andi.
+ %5 = COPY killed %cr0
+ %6 = ISEL %2, %3, %5.sub_eq
+ %8 = IMPLICIT_DEF
+ %7 = INSERT_SUBREG %8, killed %6, 1
+ %9 = RLDICL killed %7, 0, 32
+ %x3 = COPY %9
+ BLR8 implicit %lr8, implicit %rm, implicit %x3
+
+...
+---
name: testRLWINM8o
# CHECK-ALL: name: testRLWINM8o
alignment: 4
diff --git a/test/CodeGen/PowerPC/no-dup-of-bdnz.ll b/test/CodeGen/PowerPC/no-dup-of-bdnz.ll
new file mode 100644
index 0000000000000..7d72242aa457e
--- /dev/null
+++ b/test/CodeGen/PowerPC/no-dup-of-bdnz.ll
@@ -0,0 +1,75 @@
+; RUN: opt -early-cse-memssa -loop-rotate -licm -loop-rotate -S %s -o - | FileCheck %s
+; ModuleID = 'bugpoint-reduced-simplified.bc'
+source_filename = "bugpoint-output-8903f29.bc"
+target datalayout = "e-m:e-i64:64-n32:64"
+target triple = "powerpc64le-unknown-linux-gnu"
+
+define void @test(i64 %arg.ssa, i64 %arg.nb) local_unnamed_addr {
+; Ensure that loop rotation doesn't duplicate the call to
+; llvm.ppc.is.decremented.ctr.nonzero
+; CHECK-LABEL: test
+; CHECK: call i1 @llvm.ppc.is.decremented.ctr.nonzero
+; CHECK-NOT: call i1 @llvm.ppc.is.decremented.ctr.nonzero
+; CHECK: declare i1 @llvm.ppc.is.decremented.ctr.nonzero
+entry:
+ switch i32 undef, label %BB_8 [
+ i32 -2, label %BB_9
+ i32 0, label %BB_9
+ ]
+
+BB_1: ; preds = %BB_12, %BB_4
+ %bcount.1.us = phi i64 [ %.810.us, %BB_4 ], [ 0, %BB_12 ]
+ %0 = add i64 %arg.ssa, %bcount.1.us
+ %.568.us = load i32, i32* undef, align 4
+ %.15.i.us = icmp slt i32 0, %.568.us
+ br i1 %.15.i.us, label %BB_3, label %BB_2
+
+BB_2: ; preds = %BB_1
+ %.982.us = add nsw i64 %0, 1
+ unreachable
+
+BB_3: ; preds = %BB_1
+ %1 = add i64 %arg.ssa, %bcount.1.us
+ %2 = add i64 %1, 1
+ %3 = call i1 @llvm.ppc.is.decremented.ctr.nonzero()
+ br i1 %3, label %BB_4, label %BB_7
+
+BB_4: ; preds = %BB_3
+ %.810.us = add nuw nsw i64 %bcount.1.us, 1
+ br label %BB_1
+
+BB_5: ; preds = %BB_7, %BB_5
+ %lsr.iv20.i116 = phi i64 [ %2, %BB_7 ], [ %lsr.iv.next21.i126, %BB_5 ]
+ %lsr.iv.next21.i126 = add i64 %lsr.iv20.i116, 1
+ br i1 undef, label %BB_5, label %BB_6
+
+BB_6: ; preds = %BB_5
+ ret void
+
+BB_7: ; preds = %BB_3
+ br label %BB_5
+
+BB_8: ; preds = %entry
+ ret void
+
+BB_9: ; preds = %entry, %entry
+ br label %BB_10
+
+BB_10: ; preds = %BB_9
+ br label %BB_11
+
+BB_11: ; preds = %BB_11, %BB_10
+ br i1 undef, label %BB_11, label %BB_12
+
+BB_12: ; preds = %BB_11
+ call void @llvm.ppc.mtctr.i64(i64 %arg.nb)
+ br label %BB_1
+}
+
+; Function Attrs: nounwind
+declare void @llvm.ppc.mtctr.i64(i64) #0
+
+; Function Attrs: nounwind
+declare i1 @llvm.ppc.is.decremented.ctr.nonzero() #0
+
+attributes #0 = { nounwind }
diff --git a/test/CodeGen/PowerPC/pr35402.ll b/test/CodeGen/PowerPC/pr35402.ll
new file mode 100644
index 0000000000000..06e6d963b13f8
--- /dev/null
+++ b/test/CodeGen/PowerPC/pr35402.ll
@@ -0,0 +1,18 @@
+; RUN: llc -O2 < %s | FileCheck %s
+target triple = "powerpc64le-linux-gnu"
+
+define void @test(i8* %p, i64 %data) {
+entry:
+ %0 = tail call i64 @llvm.bswap.i64(i64 %data)
+ %ptr = bitcast i8* %p to i48*
+ %val = trunc i64 %0 to i48
+ store i48 %val, i48* %ptr, align 1
+ ret void
+
+; CHECK: sth
+; CHECK: stw
+; CHECK-NOT: stdbrx
+
+}
+
+declare i64 @llvm.bswap.i64(i64)
diff --git a/test/CodeGen/Thumb/PR36658.mir b/test/CodeGen/Thumb/PR36658.mir
new file mode 100644
index 0000000000000..15a3c7f407b18
--- /dev/null
+++ b/test/CodeGen/Thumb/PR36658.mir
@@ -0,0 +1,359 @@
+# REQUIRES: asserts
+# RUN: llc -run-pass arm-cp-islands %s -o - | FileCheck %s
+#
+# This is a reduced test made to expose a bug in
+# ARMConstantIslandPass in Thumb1 mode, see PR36658.
+
+# Verify optimized JT code uses TBB instructions.
+# CHECK-LABEL: bb.7.entry:
+# CHECK: tTBB_JT %pc, killed %r2, %jump-table.1, 0
+# CHECK-LABEL: bb.8:
+# CHECK: JUMPTABLE_TBB 0, %jump-table.1, 44
+
+# CHECK-LABEL: bb.11.entry:
+# CHECK: %r1 = tMOVSr %r0, implicit-def dead %cpsr
+# CHECK: tTBB_JT %pc, killed %r2, %jump-table.0, 1
+# CHECK-LABEL: bb.12:
+# CHECK: JUMPTABLE_TBB 1, %jump-table.0, 44
+
+--- |
+ ; ModuleID = 'PR36658.ll'
+ source_filename = "PR36658.ll"
+ target datalayout = "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64"
+ target triple = "thumbv5e-none-linux-gnueabi"
+
+ declare i32 @foo1(...)
+
+ declare i32 @foo2(i32)
+
+ declare i32 @foo3(i32*)
+
+ ; Function Attrs: nounwind optsize
+ define internal fastcc i32 @foo4(i32* nocapture %ignore_ptr) #0 {
+ entry:
+ %call = tail call i32 @foo3(i32* undef)
+ switch i32 %call, label %sw.epilog [
+ i32 120, label %sw.bb
+ i32 48, label %sw.bb73
+ i32 49, label %sw.bb73
+ i32 50, label %sw.bb73
+ i32 51, label %sw.bb73
+ i32 52, label %sw.bb73
+ i32 53, label %sw.bb73
+ i32 54, label %sw.bb73
+ i32 55, label %sw.bb73
+ i32 92, label %cleanup
+ i32 39, label %cleanup
+ i32 34, label %cleanup
+ i32 10, label %sw.bb91
+ i32 110, label %sw.bb93
+ i32 116, label %sw.bb94
+ i32 114, label %sw.bb95
+ i32 102, label %sw.bb96
+ i32 98, label %sw.bb97
+ i32 97, label %sw.bb98
+ i32 118, label %sw.bb106
+ i32 101, label %sw.bb107
+ i32 69, label %sw.bb107
+ i32 63, label %cleanup
+ ]
+
+ sw.bb: ; preds = %entry
+ br label %while.cond
+
+ while.cond: ; preds = %while.cond, %sw.bb
+ %call5 = tail call i32 @foo3(i32* null)
+ br label %while.cond
+
+ sw.bb73: ; preds = %entry, %entry, %entry, %entry, %entry, %entry, %entry, %entry
+ %0 = and i32 %call, -8
+ %1 = icmp eq i32 %0, 48
+ br i1 %1, label %while.body83.preheader, label %while.end88
+
+ while.body83.preheader: ; preds = %sw.bb73
+ br label %while.body83
+
+ while.body83: ; preds = %while.body83.preheader, %while.body83
+ %call87 = tail call i32 @foo3(i32* null)
+ br label %while.body83
+
+ while.end88: ; preds = %sw.bb73
+ %call89 = tail call i32 @foo2(i32 %call)
+ unreachable
+
+ sw.bb91: ; preds = %entry
+ store i32 1, i32* %ignore_ptr, align 4
+ br label %cleanup
+
+ sw.bb93: ; preds = %entry
+ br label %cleanup
+
+ sw.bb94: ; preds = %entry
+ br label %cleanup
+
+ sw.bb95: ; preds = %entry
+ br label %cleanup
+
+ sw.bb96: ; preds = %entry
+ br label %cleanup
+
+ sw.bb97: ; preds = %entry
+ br label %cleanup
+
+ sw.bb98: ; preds = %entry
+ br label %cleanup
+
+ sw.bb106: ; preds = %entry
+ br label %cleanup
+
+ sw.bb107: ; preds = %entry, %entry
+ br i1 undef, label %cleanup, label %if.then109
+
+ if.then109: ; preds = %sw.bb107
+ %call110 = tail call i32 bitcast (i32 (...)* @foo1 to i32 (i8*, i32)*)(i8* undef, i32 %call)
+ unreachable
+
+ sw.epilog: ; preds = %entry
+ %call.off = add i32 %call, -32
+ unreachable
+
+ cleanup: ; preds = %sw.bb107, %sw.bb106, %sw.bb98, %sw.bb97, %sw.bb96, %sw.bb95, %sw.bb94, %sw.bb93, %sw.bb91, %entry, %entry, %entry, %entry
+ %retval.0 = phi i32 [ 11, %sw.bb106 ], [ 7, %sw.bb98 ], [ 8, %sw.bb97 ], [ 12, %sw.bb96 ], [ 13, %sw.bb95 ], [ 9, %sw.bb94 ], [ 10, %sw.bb93 ], [ 0, %sw.bb91 ], [ %call, %entry ], [ %call, %entry ], [ %call, %entry ], [ 27, %sw.bb107 ], [ %call, %entry ]
+ ret i32 %retval.0
+ }
+
+ ; Function Attrs: nounwind
+ declare void @llvm.stackprotector(i8*, i8**) #1
+
+ attributes #0 = { nounwind optsize }
+ attributes #1 = { nounwind }
+
+...
+---
+name: foo4
+alignment: 1
+tracksRegLiveness: true
+liveins:
+ - { reg: '%r0' }
+frameInfo:
+ stackSize: 8
+ maxAlignment: 4
+ adjustsStack: true
+ hasCalls: true
+ maxCallFrameSize: 0
+stack:
+ - { id: 0, type: spill-slot, offset: -4, size: 4, alignment: 4, stack-id: 0,
+ callee-saved-register: '%lr', callee-saved-restored: false }
+ - { id: 1, type: spill-slot, offset: -8, size: 4, alignment: 4, stack-id: 0,
+ callee-saved-register: '%r4' }
+jumpTable:
+ kind: inline
+ entries:
+ - id: 0
+ blocks: [ '%bb.28', '%bb.26', '%bb.26', '%bb.26', '%bb.26',
+ '%bb.24', '%bb.23', '%bb.26', '%bb.26', '%bb.12',
+ '%bb.22' ]
+ - id: 1
+ blocks: [ '%bb.19', '%bb.26', '%bb.26', '%bb.26', '%bb.21',
+ '%bb.26', '%bb.20', '%bb.26', '%bb.25', '%bb.26',
+ '%bb.15' ]
+body: |
+ bb.0.entry:
+ successors: %bb.1(0x42c8590b), %bb.9(0x3d37a6f5)
+ liveins: %r0, %r4, %lr
+
+ frame-setup tPUSH 14, %noreg, killed %r4, killed %lr, implicit-def %sp, implicit %sp
+ frame-setup CFI_INSTRUCTION def_cfa_offset 8
+ frame-setup CFI_INSTRUCTION offset %lr, -4
+ frame-setup CFI_INSTRUCTION offset %r4, -8
+ %r4 = tMOVSr %r0, implicit-def dead %cpsr
+ tBL 14, %noreg, @foo3, csr_aapcs, implicit-def dead %lr, implicit %sp, implicit undef %r0, implicit-def %sp, implicit-def %r0
+ %r1 = tMOVSr %r0, implicit-def dead %cpsr
+ tCMPi8 %r0, 68, 14, %noreg, implicit-def %cpsr
+ tBcc %bb.9, 12, killed %cpsr
+
+ bb.1.entry:
+ successors: %bb.2(0x20000000), %bb.7(0x60000000)
+ liveins: %r0, %r1, %r4
+
+ tCMPi8 renamable %r1, 47, 14, %noreg, implicit-def %cpsr
+ tBcc %bb.2, 13, killed %cpsr
+
+ bb.7.entry:
+ successors: %bb.16(0x71c71c72), %bb.8(0x0e38e38e)
+ liveins: %r0, %r1
+
+ %r2 = tMOVSr %r1, implicit-def dead %cpsr
+ renamable %r2, dead %cpsr = tSUBi8 killed renamable %r2, 48, 14, %noreg
+ tCMPi8 killed renamable %r2, 8, 14, %noreg, implicit-def %cpsr
+ tBcc %bb.8, 2, killed %cpsr
+
+ bb.16.sw.bb73:
+ successors: %bb.17(0x7fffffff), %bb.18(0x00000001)
+ liveins: %r0, %r1
+
+ renamable %r2, dead %cpsr = tMOVi8 7, 14, %noreg
+ renamable %r1, dead %cpsr = tBIC killed renamable %r1, killed renamable %r2, 14, %noreg
+ tCMPi8 killed renamable %r1, 48, 14, %noreg, implicit-def %cpsr
+ tBcc %bb.18, 1, killed %cpsr
+
+ bb.17.while.body83:
+ renamable %r0, dead %cpsr = tMOVi8 0, 14, %noreg
+ tBL 14, %noreg, @foo3, csr_aapcs, implicit-def dead %lr, implicit %sp, implicit %r0, implicit-def %sp, implicit-def dead %r0
+ tB %bb.17, 14, %noreg
+
+ bb.9.entry:
+ successors: %bb.10(0x45d1745d), %bb.29(0x3a2e8ba3)
+ liveins: %r0, %r1
+
+ %r2 = tMOVSr %r1, implicit-def dead %cpsr
+ renamable %r2, dead %cpsr = tSUBi8 killed renamable %r2, 92, 14, %noreg
+ tCMPi8 renamable %r2, 10, 14, %noreg, implicit-def %cpsr
+ tBcc %bb.29, 9, killed %cpsr
+
+ bb.10.entry:
+ successors: %bb.11(0x15555555), %bb.14(0x6aaaaaab)
+ liveins: %r0, %r1
+
+ %r2 = tMOVSr %r1, implicit-def dead %cpsr
+ renamable %r2, dead %cpsr = tSUBi8 killed renamable %r2, 110, 14, %noreg
+ tCMPi8 renamable %r2, 10, 14, %noreg, implicit-def %cpsr
+ tBcc %bb.11, 8, killed %cpsr
+
+ bb.14.entry:
+ successors: %bb.19(0x1999999a), %bb.26(0x00000000), %bb.21(0x1999999a), %bb.20(0x1999999a), %bb.25(0x1999999a), %bb.15(0x1999999a)
+ liveins: %r2
+
+ renamable %r0, dead %cpsr = tLSLri killed renamable %r2, 2, 14, %noreg
+ renamable %r1 = tLEApcrelJT %jump-table.1, 14, %noreg
+ renamable %r0 = tLDRr killed renamable %r1, killed renamable %r0, 14, %noreg :: (load 4 from jump-table)
+ tBR_JTr killed renamable %r0, %jump-table.1
+
+ bb.19.sw.bb93:
+ renamable %r1, dead %cpsr = tMOVi8 10, 14, %noreg
+ tB %bb.28, 14, %noreg
+
+ bb.15.while.cond:
+ renamable %r0, dead %cpsr = tMOVi8 0, 14, %noreg
+ tBL 14, %noreg, @foo3, csr_aapcs, implicit-def dead %lr, implicit %sp, implicit %r0, implicit-def %sp, implicit-def dead %r0
+ tB %bb.15, 14, %noreg
+
+ bb.29.entry:
+ successors: %bb.28(0x1999999a), %bb.26(0x00000000), %bb.24(0x1999999a), %bb.23(0x1999999a), %bb.12(0x1999999a), %bb.22(0x1999999a)
+ liveins: %r0, %r2
+
+ renamable %r1, dead %cpsr = tLSLri killed renamable %r2, 2, 14, %noreg
+ renamable %r2 = tLEApcrelJT %jump-table.0, 14, %noreg
+ renamable %r2 = tLDRr killed renamable %r2, killed renamable %r1, 14, %noreg :: (load 4 from jump-table)
+ %r1 = tMOVSr %r0, implicit-def dead %cpsr
+ tBR_JTr killed renamable %r2, %jump-table.0
+
+ bb.24.sw.bb98:
+ renamable %r1, dead %cpsr = tMOVi8 7, 14, %noreg
+ tB %bb.28, 14, %noreg
+
+ bb.2.entry:
+ successors: %bb.27(0x2aaaaaab), %bb.3(0x55555555)
+ liveins: %r0, %r1, %r4
+
+ tCMPi8 renamable %r1, 10, 14, %noreg, implicit-def %cpsr
+ tBcc %bb.27, 0, killed %cpsr
+
+ bb.3.entry:
+ successors: %bb.4, %bb.5
+ liveins: %r0, %r1
+
+ tCMPi8 renamable %r1, 34, 14, %noreg, implicit-def %cpsr
+ tBcc %bb.5, 1, killed %cpsr
+
+ bb.4:
+ liveins: %r0
+
+ %r1 = tMOVSr killed %r0, implicit-def dead %cpsr
+ tB %bb.28, 14, %noreg
+
+ bb.25.sw.bb106:
+ renamable %r1, dead %cpsr = tMOVi8 11, 14, %noreg
+ tB %bb.28, 14, %noreg
+
+ bb.23.sw.bb97:
+ renamable %r1, dead %cpsr = tMOVi8 8, 14, %noreg
+ tB %bb.28, 14, %noreg
+
+ bb.27.sw.bb91:
+ liveins: %r4
+
+ renamable %r0, dead %cpsr = tMOVi8 1, 14, %noreg
+ tSTRi killed renamable %r0, killed renamable %r4, 0, 14, %noreg :: (store 4 into %ir.ignore_ptr)
+ renamable %r1, dead %cpsr = tMOVi8 0, 14, %noreg
+ tB %bb.28, 14, %noreg
+
+ bb.21.sw.bb95:
+ renamable %r1, dead %cpsr = tMOVi8 13, 14, %noreg
+ tB %bb.28, 14, %noreg
+
+ bb.20.sw.bb94:
+ renamable %r1, dead %cpsr = tMOVi8 9, 14, %noreg
+ tB %bb.28, 14, %noreg
+
+ bb.5.entry:
+ liveins: %r0, %r1
+
+ tCMPi8 killed renamable %r1, 39, 14, %noreg, implicit-def %cpsr
+ tB %bb.6, 14, %noreg
+
+ bb.11.entry:
+ successors: %bb.12(0x80000000), %bb.26(0x00000000)
+ liveins: %r0, %r1
+
+ tCMPi8 killed renamable %r1, 69, 14, %noreg, implicit-def %cpsr
+ tBcc %bb.26, 1, killed %cpsr
+
+ bb.12.sw.bb107:
+ successors: %bb.28(0x7fffffff), %bb.13(0x00000001)
+ liveins: %r0
+
+ renamable %r1, dead %cpsr = tMOVi8 27, 14, %noreg
+ renamable %r2, dead %cpsr = tMOVi8 0, 14, %noreg
+ tCMPi8 killed renamable %r2, 0, 14, %noreg, implicit-def %cpsr
+ tBcc %bb.28, 1, killed %cpsr
+
+ bb.13.if.then109:
+ successors:
+ liveins: %r0
+
+ %r1 = tMOVSr killed %r0, implicit-def dead %cpsr
+ tBL 14, %noreg, @foo1, csr_aapcs, implicit-def dead %lr, implicit %sp, implicit undef %r0, implicit %r1, implicit-def %sp, implicit-def dead %r0
+
+ bb.8.entry:
+ liveins: %r0, %r1
+
+ tCMPi8 killed renamable %r1, 63, 14, %noreg, implicit-def %cpsr
+
+ bb.6.entry:
+ successors: %bb.28(0x80000000), %bb.26(0x00000000)
+ liveins: %cpsr, %r0
+
+ tPUSH 14, %noreg, killed %r0, implicit-def %sp, implicit %sp
+ tPOP 14, %noreg, def %r1, implicit-def %sp, implicit %sp
+ tBcc %bb.28, 0, killed %cpsr
+
+ bb.26.sw.epilog:
+ successors:
+
+
+ bb.22.sw.bb96:
+ renamable %r1, dead %cpsr = tMOVi8 12, 14, %noreg
+
+ bb.28.cleanup:
+ liveins: %r1
+
+ %r0 = tMOVSr killed %r1, implicit-def dead %cpsr
+ tPOP_RET 14, %noreg, def %r4, def %pc, implicit-def %sp, implicit %sp, implicit %r0
+
+ bb.18.while.end88:
+ liveins: %r0
+
+ tBL 14, %noreg, @foo2, csr_aapcs, implicit-def dead %lr, implicit %sp, implicit %r0, implicit-def %sp, implicit-def dead %r0
+
+...
diff --git a/test/CodeGen/X86/GlobalISel/add-scalar.ll b/test/CodeGen/X86/GlobalISel/add-scalar.ll
index 0ef7c956d4934..3d41d759409d5 100644
--- a/test/CodeGen/X86/GlobalISel/add-scalar.ll
+++ b/test/CodeGen/X86/GlobalISel/add-scalar.ll
@@ -10,16 +10,10 @@ define i64 @test_add_i64(i64 %arg1, i64 %arg2) {
;
; X32-LABEL: test_add_i64:
; X32: # %bb.0:
-; X32-NEXT: pushl %ebp
-; X32-NEXT: .cfi_def_cfa_offset 8
-; X32-NEXT: .cfi_offset %ebp, -8
-; X32-NEXT: movl %esp, %ebp
-; X32-NEXT: .cfi_def_cfa_register %ebp
-; X32-NEXT: movl 16(%ebp), %eax
-; X32-NEXT: movl 20(%ebp), %edx
-; X32-NEXT: addl 8(%ebp), %eax
-; X32-NEXT: adcl 12(%ebp), %edx
-; X32-NEXT: popl %ebp
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X32-NEXT: addl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx
; X32-NEXT: retl
%ret = add i64 %arg1, %arg2
ret i64 %ret
diff --git a/test/CodeGen/X86/O0-pipeline.ll b/test/CodeGen/X86/O0-pipeline.ll
index 3a720a5288a27..53707cb31380e 100644
--- a/test/CodeGen/X86/O0-pipeline.ll
+++ b/test/CodeGen/X86/O0-pipeline.ll
@@ -37,6 +37,8 @@
; CHECK-NEXT: X86 PIC Global Base Reg Initialization
; CHECK-NEXT: Expand ISel Pseudo-instructions
; CHECK-NEXT: Local Stack Slot Allocation
+; CHECK-NEXT: MachineDominator Tree Construction
+; CHECK-NEXT: X86 EFLAGS copy lowering
; CHECK-NEXT: X86 WinAlloca Expander
; CHECK-NEXT: Eliminate PHI nodes for register allocation
; CHECK-NEXT: Two-Address instruction pass
diff --git a/test/CodeGen/X86/clobber-fi0.ll b/test/CodeGen/X86/clobber-fi0.ll
deleted file mode 100644
index b69b18531601a..0000000000000
--- a/test/CodeGen/X86/clobber-fi0.ll
+++ /dev/null
@@ -1,37 +0,0 @@
-; RUN: llc < %s -verify-machineinstrs -mcpu=generic -mtriple=x86_64-linux | FileCheck %s
-
-target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
-target triple = "x86_64-apple-macosx10.7.0"
-
-; In the code below we need to copy the EFLAGS because of scheduling constraints.
-; When copying the EFLAGS we need to write to the stack with push/pop. This forces
-; us to emit the prolog.
-
-; CHECK: main
-; CHECK: subq{{.*}}rsp
-; CHECK: ret
-define i32 @main(i32 %arg, i8** %arg1) nounwind {
-bb:
- %tmp = alloca i32, align 4 ; [#uses=3 type=i32*]
- %tmp2 = alloca i32, align 4 ; [#uses=3 type=i32*]
- %tmp3 = alloca i32 ; [#uses=1 type=i32*]
- store volatile i32 1, i32* %tmp, align 4
- store volatile i32 1, i32* %tmp2, align 4
- br label %bb4
-
-bb4: ; preds = %bb4, %bb
- %tmp6 = load volatile i32, i32* %tmp2, align 4 ; [#uses=1 type=i32]
- %tmp7 = add i32 %tmp6, -1 ; [#uses=2 type=i32]
- store volatile i32 %tmp7, i32* %tmp2, align 4
- %tmp8 = icmp eq i32 %tmp7, 0 ; [#uses=1 type=i1]
- %tmp9 = load volatile i32, i32* %tmp ; [#uses=1 type=i32]
- %tmp10 = add i32 %tmp9, -1 ; [#uses=1 type=i32]
- store volatile i32 %tmp10, i32* %tmp3
- br i1 %tmp8, label %bb11, label %bb4
-
-bb11: ; preds = %bb4
- %tmp12 = load volatile i32, i32* %tmp, align 4 ; [#uses=1 type=i32]
- ret i32 %tmp12
-}
-
-
diff --git a/test/CodeGen/X86/cmpxchg-clobber-flags.ll b/test/CodeGen/X86/cmpxchg-clobber-flags.ll
index 8d289fa9fb03f..827aba78699c4 100644
--- a/test/CodeGen/X86/cmpxchg-clobber-flags.ll
+++ b/test/CodeGen/X86/cmpxchg-clobber-flags.ll
@@ -1,100 +1,110 @@
-; RUN: llc -mtriple=i386-linux-gnu %s -o - | FileCheck %s -check-prefix=i386
-; RUN: llc -mtriple=i386-linux-gnu -pre-RA-sched=fast %s -o - | FileCheck %s -check-prefix=i386f
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=i386-linux-gnu -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=32-ALL,32-GOOD-RA
+; RUN: llc -mtriple=i386-linux-gnu -verify-machineinstrs -pre-RA-sched=fast %s -o - | FileCheck %s --check-prefixes=32-ALL,32-FAST-RA
-; RUN: llc -mtriple=x86_64-linux-gnu %s -o - | FileCheck %s -check-prefix=x8664
-; RUN: llc -mtriple=x86_64-linux-gnu -pre-RA-sched=fast %s -o - | FileCheck %s -check-prefix=x8664
-; RUN: llc -mtriple=x86_64-linux-gnu -mattr=+sahf %s -o - | FileCheck %s -check-prefix=x8664-sahf
-; RUN: llc -mtriple=x86_64-linux-gnu -mattr=+sahf -pre-RA-sched=fast %s -o - | FileCheck %s -check-prefix=x8664-sahf
-; RUN: llc -mtriple=x86_64-linux-gnu -mcpu=corei7 %s -o - | FileCheck %s -check-prefix=x8664-sahf
-
-; TODO: Reenable verify-machineinstr once the if (!AXDead) // FIXME
-; in X86InstrInfo::copyPhysReg() is resolved.
+; RUN: llc -mtriple=x86_64-linux-gnu -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=64-ALL,64-GOOD-RA
+; RUN: llc -mtriple=x86_64-linux-gnu -verify-machineinstrs -pre-RA-sched=fast %s -o - | FileCheck %s --check-prefixes=64-ALL,64-FAST-RA
+; RUN: llc -mtriple=x86_64-linux-gnu -verify-machineinstrs -mattr=+sahf %s -o - | FileCheck %s --check-prefixes=64-ALL,64-GOOD-RA-SAHF
+; RUN: llc -mtriple=x86_64-linux-gnu -verify-machineinstrs -mattr=+sahf -pre-RA-sched=fast %s -o - | FileCheck %s --check-prefixes=64-ALL,64-FAST-RA-SAHF
+; RUN: llc -mtriple=x86_64-linux-gnu -verify-machineinstrs -mcpu=corei7 %s -o - | FileCheck %s --check-prefixes=64-ALL,64-GOOD-RA-SAHF
declare i32 @foo()
declare i32 @bar(i64)
-define i64 @test_intervening_call(i64* %foo, i64 %bar, i64 %baz) {
-; i386-LABEL: test_intervening_call:
-; i386: cmpxchg8b
-; i386-NEXT: pushl %eax
-; i386-NEXT: seto %al
-; i386-NEXT: lahf
-; i386-NEXT: movl %eax, [[FLAGS:%.*]]
-; i386-NEXT: popl %eax
-; i386-NEXT: subl $8, %esp
-; i386-NEXT: pushl %edx
-; i386-NEXT: pushl %eax
-; i386-NEXT: calll bar
-; i386-NEXT: addl $16, %esp
-; i386-NEXT: movl [[FLAGS]], %eax
-; i386-NEXT: addb $127, %al
-; i386-NEXT: sahf
-; i386-NEXT: jne
-
-; In the following case we get a long chain of EFLAGS save/restore due to
-; a sequence of:
+; In the following case when using fast scheduling we get a long chain of
+; EFLAGS save/restore due to a sequence of:
; cmpxchg8b (implicit-def eflags)
; eax = copy eflags
; adjcallstackdown32
; ...
; use of eax
; During PEI the adjcallstackdown32 is replaced with the subl which
-; clobbers eflags, effectively interfering in the liveness interval.
-; Is this a case we care about? Maybe no, considering this issue
-; happens with the fast pre-regalloc scheduler enforced. A more
-; performant scheduler would move the adjcallstackdown32 out of the
-; eflags liveness interval.
-
-; i386f-LABEL: test_intervening_call:
-; i386f: cmpxchg8b
-; i386f-NEXT: pushl %eax
-; i386f-NEXT: seto %al
-; i386f-NEXT: lahf
-; i386f-NEXT: movl %eax, [[FLAGS:%.*]]
-; i386f-NEXT: popl %eax
-; i386f-NEXT: subl $8, %esp
-; i386f-NEXT: pushl %eax
-; i386f-NEXT: movl %ecx, %eax
-; i386f-NEXT: addb $127, %al
-; i386f-NEXT: sahf
-; i386f-NEXT: popl %eax
-; i386f-NEXT: pushl %eax
-; i386f-NEXT: seto %al
-; i386f-NEXT: lahf
-; i386f-NEXT: movl %eax, %esi
-; i386f-NEXT: popl %eax
-; i386f-NEXT: pushl %edx
-; i386f-NEXT: pushl %eax
-; i386f-NEXT: calll bar
-; i386f-NEXT: addl $16, %esp
-; i386f-NEXT: movl %esi, %eax
-; i386f-NEXT: addb $127, %al
-
-; x8664-LABEL: test_intervening_call:
-; x8664: cmpxchgq
-; x8664: pushfq
-; x8664-NEXT: popq [[FLAGS:%.*]]
-; x8664-NEXT: movq %rax, %rdi
-; x8664-NEXT: callq bar
-; x8664-NEXT: pushq [[FLAGS]]
-; x8664-NEXT: popfq
-; x8664-NEXT: jne
-
-; x8664-sahf-LABEL: test_intervening_call:
-; x8664-sahf: cmpxchgq
-; x8664-sahf: pushq %rax
-; x8664-sahf-NEXT: seto %al
-; x8664-sahf-NEXT: lahf
-; x8664-sahf-NEXT: movq %rax, [[FLAGS:%.*]]
-; x8664-sahf-NEXT: popq %rax
-; x8664-sahf-NEXT: movq %rax, %rdi
-; x8664-sahf-NEXT: callq bar
-; RAX is dead, no need to push and pop it.
-; x8664-sahf-NEXT: movq [[FLAGS]], %rax
-; x8664-sahf-NEXT: addb $127, %al
-; x8664-sahf-NEXT: sahf
-; x8664-sahf-NEXT: jne
-
+; clobbers eflags, effectively interfering in the liveness interval. However,
+; we then promote these copies into independent conditions in GPRs that avoids
+; repeated saving and restoring logic and can be trivially managed by the
+; register allocator.
+define i64 @test_intervening_call(i64* %foo, i64 %bar, i64 %baz) nounwind {
+; 32-GOOD-RA-LABEL: test_intervening_call:
+; 32-GOOD-RA: # %bb.0: # %entry
+; 32-GOOD-RA-NEXT: pushl %ebx
+; 32-GOOD-RA-NEXT: pushl %esi
+; 32-GOOD-RA-NEXT: pushl %eax
+; 32-GOOD-RA-NEXT: movl {{[0-9]+}}(%esp), %eax
+; 32-GOOD-RA-NEXT: movl {{[0-9]+}}(%esp), %edx
+; 32-GOOD-RA-NEXT: movl {{[0-9]+}}(%esp), %ebx
+; 32-GOOD-RA-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; 32-GOOD-RA-NEXT: movl {{[0-9]+}}(%esp), %esi
+; 32-GOOD-RA-NEXT: lock cmpxchg8b (%esi)
+; 32-GOOD-RA-NEXT: setne %bl
+; 32-GOOD-RA-NEXT: subl $8, %esp
+; 32-GOOD-RA-NEXT: pushl %edx
+; 32-GOOD-RA-NEXT: pushl %eax
+; 32-GOOD-RA-NEXT: calll bar
+; 32-GOOD-RA-NEXT: addl $16, %esp
+; 32-GOOD-RA-NEXT: testb %bl, %bl
+; 32-GOOD-RA-NEXT: jne .LBB0_3
+; 32-GOOD-RA-NEXT: # %bb.1: # %t
+; 32-GOOD-RA-NEXT: movl $42, %eax
+; 32-GOOD-RA-NEXT: jmp .LBB0_2
+; 32-GOOD-RA-NEXT: .LBB0_3: # %f
+; 32-GOOD-RA-NEXT: xorl %eax, %eax
+; 32-GOOD-RA-NEXT: .LBB0_2: # %t
+; 32-GOOD-RA-NEXT: xorl %edx, %edx
+; 32-GOOD-RA-NEXT: addl $4, %esp
+; 32-GOOD-RA-NEXT: popl %esi
+; 32-GOOD-RA-NEXT: popl %ebx
+; 32-GOOD-RA-NEXT: retl
+;
+; 32-FAST-RA-LABEL: test_intervening_call:
+; 32-FAST-RA: # %bb.0: # %entry
+; 32-FAST-RA-NEXT: pushl %ebx
+; 32-FAST-RA-NEXT: pushl %esi
+; 32-FAST-RA-NEXT: pushl %eax
+; 32-FAST-RA-NEXT: movl {{[0-9]+}}(%esp), %esi
+; 32-FAST-RA-NEXT: movl {{[0-9]+}}(%esp), %ebx
+; 32-FAST-RA-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; 32-FAST-RA-NEXT: movl {{[0-9]+}}(%esp), %eax
+; 32-FAST-RA-NEXT: movl {{[0-9]+}}(%esp), %edx
+; 32-FAST-RA-NEXT: lock cmpxchg8b (%esi)
+; 32-FAST-RA-NEXT: setne %bl
+; 32-FAST-RA-NEXT: subl $8, %esp
+; 32-FAST-RA-NEXT: pushl %edx
+; 32-FAST-RA-NEXT: pushl %eax
+; 32-FAST-RA-NEXT: calll bar
+; 32-FAST-RA-NEXT: addl $16, %esp
+; 32-FAST-RA-NEXT: testb %bl, %bl
+; 32-FAST-RA-NEXT: jne .LBB0_3
+; 32-FAST-RA-NEXT: # %bb.1: # %t
+; 32-FAST-RA-NEXT: movl $42, %eax
+; 32-FAST-RA-NEXT: jmp .LBB0_2
+; 32-FAST-RA-NEXT: .LBB0_3: # %f
+; 32-FAST-RA-NEXT: xorl %eax, %eax
+; 32-FAST-RA-NEXT: .LBB0_2: # %t
+; 32-FAST-RA-NEXT: xorl %edx, %edx
+; 32-FAST-RA-NEXT: addl $4, %esp
+; 32-FAST-RA-NEXT: popl %esi
+; 32-FAST-RA-NEXT: popl %ebx
+; 32-FAST-RA-NEXT: retl
+;
+; 64-ALL-LABEL: test_intervening_call:
+; 64-ALL: # %bb.0: # %entry
+; 64-ALL-NEXT: pushq %rbx
+; 64-ALL-NEXT: movq %rsi, %rax
+; 64-ALL-NEXT: lock cmpxchgq %rdx, (%rdi)
+; 64-ALL-NEXT: setne %bl
+; 64-ALL-NEXT: movq %rax, %rdi
+; 64-ALL-NEXT: callq bar
+; 64-ALL-NEXT: testb %bl, %bl
+; 64-ALL-NEXT: jne .LBB0_2
+; 64-ALL-NEXT: # %bb.1: # %t
+; 64-ALL-NEXT: movl $42, %eax
+; 64-ALL-NEXT: popq %rbx
+; 64-ALL-NEXT: retq
+; 64-ALL-NEXT: .LBB0_2: # %f
+; 64-ALL-NEXT: xorl %eax, %eax
+; 64-ALL-NEXT: popq %rbx
+; 64-ALL-NEXT: retq
+entry:
%cx = cmpxchg i64* %foo, i64 %bar, i64 %baz seq_cst seq_cst
%v = extractvalue { i64, i1 } %cx, 0
%p = extractvalue { i64, i1 } %cx, 1
@@ -109,23 +119,62 @@ f:
}
; Interesting in producing a clobber without any function calls.
-define i32 @test_control_flow(i32* %p, i32 %i, i32 %j) {
-; i386-LABEL: test_control_flow:
-; i386: cmpxchg
-; i386-NEXT: jne
-
-; i386f-LABEL: test_control_flow:
-; i386f: cmpxchg
-; i386f-NEXT: jne
-
-; x8664-LABEL: test_control_flow:
-; x8664: cmpxchg
-; x8664-NEXT: jne
-
-; x8664-sahf-LABEL: test_control_flow:
-; x8664-sahf: cmpxchg
-; x8664-sahf-NEXT: jne
-
+define i32 @test_control_flow(i32* %p, i32 %i, i32 %j) nounwind {
+; 32-ALL-LABEL: test_control_flow:
+; 32-ALL: # %bb.0: # %entry
+; 32-ALL-NEXT: movl {{[0-9]+}}(%esp), %eax
+; 32-ALL-NEXT: cmpl {{[0-9]+}}(%esp), %eax
+; 32-ALL-NEXT: jle .LBB1_6
+; 32-ALL-NEXT: # %bb.1: # %loop_start
+; 32-ALL-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; 32-ALL-NEXT: .p2align 4, 0x90
+; 32-ALL-NEXT: .LBB1_2: # %while.condthread-pre-split.i
+; 32-ALL-NEXT: # =>This Loop Header: Depth=1
+; 32-ALL-NEXT: # Child Loop BB1_3 Depth 2
+; 32-ALL-NEXT: movl (%ecx), %edx
+; 32-ALL-NEXT: .p2align 4, 0x90
+; 32-ALL-NEXT: .LBB1_3: # %while.cond.i
+; 32-ALL-NEXT: # Parent Loop BB1_2 Depth=1
+; 32-ALL-NEXT: # => This Inner Loop Header: Depth=2
+; 32-ALL-NEXT: movl %edx, %eax
+; 32-ALL-NEXT: xorl %edx, %edx
+; 32-ALL-NEXT: testl %eax, %eax
+; 32-ALL-NEXT: je .LBB1_3
+; 32-ALL-NEXT: # %bb.4: # %while.body.i
+; 32-ALL-NEXT: # in Loop: Header=BB1_2 Depth=1
+; 32-ALL-NEXT: lock cmpxchgl %eax, (%ecx)
+; 32-ALL-NEXT: jne .LBB1_2
+; 32-ALL-NEXT: # %bb.5:
+; 32-ALL-NEXT: xorl %eax, %eax
+; 32-ALL-NEXT: .LBB1_6: # %cond.end
+; 32-ALL-NEXT: retl
+;
+; 64-ALL-LABEL: test_control_flow:
+; 64-ALL: # %bb.0: # %entry
+; 64-ALL-NEXT: cmpl %edx, %esi
+; 64-ALL-NEXT: jle .LBB1_5
+; 64-ALL-NEXT: .p2align 4, 0x90
+; 64-ALL-NEXT: .LBB1_1: # %while.condthread-pre-split.i
+; 64-ALL-NEXT: # =>This Loop Header: Depth=1
+; 64-ALL-NEXT: # Child Loop BB1_2 Depth 2
+; 64-ALL-NEXT: movl (%rdi), %ecx
+; 64-ALL-NEXT: .p2align 4, 0x90
+; 64-ALL-NEXT: .LBB1_2: # %while.cond.i
+; 64-ALL-NEXT: # Parent Loop BB1_1 Depth=1
+; 64-ALL-NEXT: # => This Inner Loop Header: Depth=2
+; 64-ALL-NEXT: movl %ecx, %eax
+; 64-ALL-NEXT: xorl %ecx, %ecx
+; 64-ALL-NEXT: testl %eax, %eax
+; 64-ALL-NEXT: je .LBB1_2
+; 64-ALL-NEXT: # %bb.3: # %while.body.i
+; 64-ALL-NEXT: # in Loop: Header=BB1_1 Depth=1
+; 64-ALL-NEXT: lock cmpxchgl %eax, (%rdi)
+; 64-ALL-NEXT: jne .LBB1_1
+; 64-ALL-NEXT: # %bb.4:
+; 64-ALL-NEXT: xorl %esi, %esi
+; 64-ALL-NEXT: .LBB1_5: # %cond.end
+; 64-ALL-NEXT: movl %esi, %eax
+; 64-ALL-NEXT: retq
entry:
%cmp = icmp sgt i32 %i, %j
br i1 %cmp, label %loop_start, label %cond.end
@@ -158,52 +207,68 @@ cond.end:
; This one is an interesting case because CMOV doesn't have a chain
; operand. Naive attempts to limit cmpxchg EFLAGS use are likely to fail here.
-define i32 @test_feed_cmov(i32* %addr, i32 %desired, i32 %new) {
-; i386-LABEL: test_feed_cmov:
-; i386: cmpxchgl
-; i386-NEXT: seto %al
-; i386-NEXT: lahf
-; i386-NEXT: movl %eax, [[FLAGS:%.*]]
-; i386-NEXT: calll foo
-; i386-NEXT: pushl %eax
-; i386-NEXT: movl [[FLAGS]], %eax
-; i386-NEXT: addb $127, %al
-; i386-NEXT: sahf
-; i386-NEXT: popl %eax
-
-; i386f-LABEL: test_feed_cmov:
-; i386f: cmpxchgl
-; i386f-NEXT: seto %al
-; i386f-NEXT: lahf
-; i386f-NEXT: movl %eax, [[FLAGS:%.*]]
-; i386f-NEXT: calll foo
-; i386f-NEXT: pushl %eax
-; i386f-NEXT: movl [[FLAGS]], %eax
-; i386f-NEXT: addb $127, %al
-; i386f-NEXT: sahf
-; i386f-NEXT: popl %eax
-
-; x8664-LABEL: test_feed_cmov:
-; x8664: cmpxchg
-; x8664: pushfq
-; x8664-NEXT: popq [[FLAGS:%.*]]
-; x8664-NEXT: callq foo
-; x8664-NEXT: pushq [[FLAGS]]
-; x8664-NEXT: popfq
-
-; x8664-sahf-LABEL: test_feed_cmov:
-; x8664-sahf: cmpxchgl
-; RAX is dead, do not push or pop it.
-; x8664-sahf-NEXT: seto %al
-; x8664-sahf-NEXT: lahf
-; x8664-sahf-NEXT: movq %rax, [[FLAGS:%.*]]
-; x8664-sahf-NEXT: callq foo
-; x8664-sahf-NEXT: pushq %rax
-; x8664-sahf-NEXT: movq [[FLAGS]], %rax
-; x8664-sahf-NEXT: addb $127, %al
-; x8664-sahf-NEXT: sahf
-; x8664-sahf-NEXT: popq %rax
-
+define i32 @test_feed_cmov(i32* %addr, i32 %desired, i32 %new) nounwind {
+; 32-GOOD-RA-LABEL: test_feed_cmov:
+; 32-GOOD-RA: # %bb.0: # %entry
+; 32-GOOD-RA-NEXT: pushl %ebx
+; 32-GOOD-RA-NEXT: pushl %esi
+; 32-GOOD-RA-NEXT: pushl %eax
+; 32-GOOD-RA-NEXT: movl {{[0-9]+}}(%esp), %eax
+; 32-GOOD-RA-NEXT: movl {{[0-9]+}}(%esp), %esi
+; 32-GOOD-RA-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; 32-GOOD-RA-NEXT: lock cmpxchgl %esi, (%ecx)
+; 32-GOOD-RA-NEXT: sete %bl
+; 32-GOOD-RA-NEXT: calll foo
+; 32-GOOD-RA-NEXT: testb %bl, %bl
+; 32-GOOD-RA-NEXT: jne .LBB2_2
+; 32-GOOD-RA-NEXT: # %bb.1: # %entry
+; 32-GOOD-RA-NEXT: movl %eax, %esi
+; 32-GOOD-RA-NEXT: .LBB2_2: # %entry
+; 32-GOOD-RA-NEXT: movl %esi, %eax
+; 32-GOOD-RA-NEXT: addl $4, %esp
+; 32-GOOD-RA-NEXT: popl %esi
+; 32-GOOD-RA-NEXT: popl %ebx
+; 32-GOOD-RA-NEXT: retl
+;
+; 32-FAST-RA-LABEL: test_feed_cmov:
+; 32-FAST-RA: # %bb.0: # %entry
+; 32-FAST-RA-NEXT: pushl %ebx
+; 32-FAST-RA-NEXT: pushl %esi
+; 32-FAST-RA-NEXT: pushl %eax
+; 32-FAST-RA-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; 32-FAST-RA-NEXT: movl {{[0-9]+}}(%esp), %esi
+; 32-FAST-RA-NEXT: movl {{[0-9]+}}(%esp), %eax
+; 32-FAST-RA-NEXT: lock cmpxchgl %esi, (%ecx)
+; 32-FAST-RA-NEXT: sete %bl
+; 32-FAST-RA-NEXT: calll foo
+; 32-FAST-RA-NEXT: testb %bl, %bl
+; 32-FAST-RA-NEXT: jne .LBB2_2
+; 32-FAST-RA-NEXT: # %bb.1: # %entry
+; 32-FAST-RA-NEXT: movl %eax, %esi
+; 32-FAST-RA-NEXT: .LBB2_2: # %entry
+; 32-FAST-RA-NEXT: movl %esi, %eax
+; 32-FAST-RA-NEXT: addl $4, %esp
+; 32-FAST-RA-NEXT: popl %esi
+; 32-FAST-RA-NEXT: popl %ebx
+; 32-FAST-RA-NEXT: retl
+;
+; 64-ALL-LABEL: test_feed_cmov:
+; 64-ALL: # %bb.0: # %entry
+; 64-ALL-NEXT: pushq %rbp
+; 64-ALL-NEXT: pushq %rbx
+; 64-ALL-NEXT: pushq %rax
+; 64-ALL-NEXT: movl %edx, %ebx
+; 64-ALL-NEXT: movl %esi, %eax
+; 64-ALL-NEXT: lock cmpxchgl %ebx, (%rdi)
+; 64-ALL-NEXT: sete %bpl
+; 64-ALL-NEXT: callq foo
+; 64-ALL-NEXT: testb %bpl, %bpl
+; 64-ALL-NEXT: cmovnel %ebx, %eax
+; 64-ALL-NEXT: addq $8, %rsp
+; 64-ALL-NEXT: popq %rbx
+; 64-ALL-NEXT: popq %rbp
+; 64-ALL-NEXT: retq
+entry:
%res = cmpxchg i32* %addr, i32 %desired, i32 %new seq_cst seq_cst
%success = extractvalue { i32, i1 } %res, 1
diff --git a/test/CodeGen/X86/copy-eflags.ll b/test/CodeGen/X86/copy-eflags.ll
index d98d8a7839b1d..1f44559368a75 100644
--- a/test/CodeGen/X86/copy-eflags.ll
+++ b/test/CodeGen/X86/copy-eflags.ll
@@ -1,6 +1,8 @@
-; RUN: llc -o - %s | FileCheck %s
-; This tests for the problem originally reported in http://llvm.org/PR25951
-target triple = "i686-unknown-linux-gnu"
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -o - -mtriple=i686-unknown-unknown %s | FileCheck %s --check-prefixes=ALL,X32
+; RUN: llc -o - -mtriple=x86_64-unknown-unknown %s | FileCheck %s --check-prefixes=ALL,X64
+;
+; Test patterns that require preserving and restoring flags.
@b = common global i8 0, align 1
@c = common global i32 0, align 4
@@ -8,13 +10,61 @@ target triple = "i686-unknown-linux-gnu"
@d = common global i8 0, align 1
@.str = private unnamed_addr constant [4 x i8] c"%d\0A\00", align 1
-; CHECK-LABEL: func:
-; This tests whether eax is properly saved/restored around the
-; lahf/sahf instruction sequences. We make mem op volatile to prevent
-; their reordering to avoid spills.
+declare void @external(i32)
-
-define i32 @func() {
+; A test that re-uses flags in interesting ways due to volatile accesses.
+; Specifically, the first increment's flags are reused for the branch despite
+; being clobbered by the second increment.
+define i32 @test1() nounwind {
+; X32-LABEL: test1:
+; X32: # %bb.0: # %entry
+; X32-NEXT: movb b, %cl
+; X32-NEXT: movl %ecx, %eax
+; X32-NEXT: incb %al
+; X32-NEXT: movb %al, b
+; X32-NEXT: incl c
+; X32-NEXT: sete %dl
+; X32-NEXT: movb a, %ah
+; X32-NEXT: movb %ah, %ch
+; X32-NEXT: incb %ch
+; X32-NEXT: cmpb %cl, %ah
+; X32-NEXT: sete d
+; X32-NEXT: movb %ch, a
+; X32-NEXT: testb %dl, %dl
+; X32-NEXT: jne .LBB0_2
+; X32-NEXT: # %bb.1: # %if.then
+; X32-NEXT: movsbl %al, %eax
+; X32-NEXT: pushl %eax
+; X32-NEXT: calll external
+; X32-NEXT: addl $4, %esp
+; X32-NEXT: .LBB0_2: # %if.end
+; X32-NEXT: xorl %eax, %eax
+; X32-NEXT: retl
+;
+; X64-LABEL: test1:
+; X64: # %bb.0: # %entry
+; X64-NEXT: movb {{.*}}(%rip), %dil
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: incb %al
+; X64-NEXT: movb %al, {{.*}}(%rip)
+; X64-NEXT: incl {{.*}}(%rip)
+; X64-NEXT: sete %sil
+; X64-NEXT: movb {{.*}}(%rip), %cl
+; X64-NEXT: movl %ecx, %edx
+; X64-NEXT: incb %dl
+; X64-NEXT: cmpb %dil, %cl
+; X64-NEXT: sete {{.*}}(%rip)
+; X64-NEXT: movb %dl, {{.*}}(%rip)
+; X64-NEXT: testb %sil, %sil
+; X64-NEXT: jne .LBB0_2
+; X64-NEXT: # %bb.1: # %if.then
+; X64-NEXT: pushq %rax
+; X64-NEXT: movsbl %al, %edi
+; X64-NEXT: callq external
+; X64-NEXT: addq $8, %rsp
+; X64-NEXT: .LBB0_2: # %if.end
+; X64-NEXT: xorl %eax, %eax
+; X64-NEXT: retq
entry:
%bval = load i8, i8* @b
%inc = add i8 %bval, 1
@@ -25,33 +75,290 @@ entry:
%aval = load volatile i8, i8* @a
%inc2 = add i8 %aval, 1
store volatile i8 %inc2, i8* @a
-; Copy flags produced by the incb of %inc1 to a register, need to save+restore
-; eax around it. The flags will be reused by %tobool.
-; CHECK: pushl %eax
-; CHECK: seto %al
-; CHECK: lahf
-; CHECK: movl %eax, [[REG:%[a-z]+]]
-; CHECK: popl %eax
%cmp = icmp eq i8 %aval, %bval
%conv5 = zext i1 %cmp to i8
store i8 %conv5, i8* @d
%tobool = icmp eq i32 %inc1, 0
-; We restore flags with an 'addb, sahf' sequence, need to save+restore eax
-; around it.
-; CHECK: pushl %eax
-; CHECK: movl [[REG]], %eax
-; CHECK: addb $127, %al
-; CHECK: sahf
-; CHECK: popl %eax
br i1 %tobool, label %if.end, label %if.then
if.then:
%conv6 = sext i8 %inc to i32
- %call = tail call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str, i32 0, i32 0), i32 %conv6)
+ call void @external(i32 %conv6)
br label %if.end
if.end:
ret i32 0
}
-declare i32 @printf(i8* nocapture readonly, ...)
+; Preserve increment flags across a call.
+define i32 @test2(i32* %ptr) nounwind {
+; X32-LABEL: test2:
+; X32: # %bb.0: # %entry
+; X32-NEXT: pushl %ebx
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: incl (%eax)
+; X32-NEXT: setne %bl
+; X32-NEXT: pushl $42
+; X32-NEXT: calll external
+; X32-NEXT: addl $4, %esp
+; X32-NEXT: testb %bl, %bl
+; X32-NEXT: je .LBB1_1
+; X32-NEXT: # %bb.2: # %else
+; X32-NEXT: xorl %eax, %eax
+; X32-NEXT: popl %ebx
+; X32-NEXT: retl
+; X32-NEXT: .LBB1_1: # %then
+; X32-NEXT: movl $64, %eax
+; X32-NEXT: popl %ebx
+; X32-NEXT: retl
+;
+; X64-LABEL: test2:
+; X64: # %bb.0: # %entry
+; X64-NEXT: pushq %rbx
+; X64-NEXT: incl (%rdi)
+; X64-NEXT: setne %bl
+; X64-NEXT: movl $42, %edi
+; X64-NEXT: callq external
+; X64-NEXT: testb %bl, %bl
+; X64-NEXT: je .LBB1_1
+; X64-NEXT: # %bb.2: # %else
+; X64-NEXT: xorl %eax, %eax
+; X64-NEXT: popq %rbx
+; X64-NEXT: retq
+; X64-NEXT: .LBB1_1: # %then
+; X64-NEXT: movl $64, %eax
+; X64-NEXT: popq %rbx
+; X64-NEXT: retq
+entry:
+ %val = load i32, i32* %ptr
+ %inc = add i32 %val, 1
+ store i32 %inc, i32* %ptr
+ %cmp = icmp eq i32 %inc, 0
+ call void @external(i32 42)
+ br i1 %cmp, label %then, label %else
+
+then:
+ ret i32 64
+
+else:
+ ret i32 0
+}
+
+declare void @external_a()
+declare void @external_b()
+
+; This lowers to a conditional tail call instead of a conditional branch. This
+; is tricky because we can only do this from a leaf function, and so we have to
+; use volatile stores similar to test1 to force the save and restore of
+; a condition without calling another function. We then set up subsequent calls
+; in tail position.
+define void @test_tail_call(i32* %ptr) nounwind optsize {
+; X32-LABEL: test_tail_call:
+; X32: # %bb.0: # %entry
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: incl (%eax)
+; X32-NEXT: setne %al
+; X32-NEXT: incb a
+; X32-NEXT: sete d
+; X32-NEXT: testb %al, %al
+; X32-NEXT: jne external_b # TAILCALL
+; X32-NEXT: # %bb.1: # %then
+; X32-NEXT: jmp external_a # TAILCALL
+;
+; X64-LABEL: test_tail_call:
+; X64: # %bb.0: # %entry
+; X64-NEXT: incl (%rdi)
+; X64-NEXT: setne %al
+; X64-NEXT: incb {{.*}}(%rip)
+; X64-NEXT: sete {{.*}}(%rip)
+; X64-NEXT: testb %al, %al
+; X64-NEXT: jne external_b # TAILCALL
+; X64-NEXT: # %bb.1: # %then
+; X64-NEXT: jmp external_a # TAILCALL
+entry:
+ %val = load i32, i32* %ptr
+ %inc = add i32 %val, 1
+ store i32 %inc, i32* %ptr
+ %cmp = icmp eq i32 %inc, 0
+ %aval = load volatile i8, i8* @a
+ %inc2 = add i8 %aval, 1
+ store volatile i8 %inc2, i8* @a
+ %cmp2 = icmp eq i8 %inc2, 0
+ %conv5 = zext i1 %cmp2 to i8
+ store i8 %conv5, i8* @d
+ br i1 %cmp, label %then, label %else
+
+then:
+ tail call void @external_a()
+ ret void
+
+else:
+ tail call void @external_b()
+ ret void
+}
+
+; Test a function that gets special select lowering into CFG with copied EFLAGS
+; threaded across the CFG. This requires our EFLAGS copy rewriting to handle
+; cross-block rewrites in at least some narrow cases.
+define void @PR37100(i8 %arg1, i16 %arg2, i64 %arg3, i8 %arg4, i8* %ptr1, i32* %ptr2) {
+; X32-LABEL: PR37100:
+; X32: # %bb.0: # %bb
+; X32-NEXT: pushl %ebp
+; X32-NEXT: .cfi_def_cfa_offset 8
+; X32-NEXT: pushl %ebx
+; X32-NEXT: .cfi_def_cfa_offset 12
+; X32-NEXT: pushl %edi
+; X32-NEXT: .cfi_def_cfa_offset 16
+; X32-NEXT: pushl %esi
+; X32-NEXT: .cfi_def_cfa_offset 20
+; X32-NEXT: .cfi_offset %esi, -20
+; X32-NEXT: .cfi_offset %edi, -16
+; X32-NEXT: .cfi_offset %ebx, -12
+; X32-NEXT: .cfi_offset %ebp, -8
+; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X32-NEXT: movl {{[0-9]+}}(%esp), %edi
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx
+; X32-NEXT: movb {{[0-9]+}}(%esp), %ch
+; X32-NEXT: movb {{[0-9]+}}(%esp), %cl
+; X32-NEXT: jmp .LBB3_1
+; X32-NEXT: .p2align 4, 0x90
+; X32-NEXT: .LBB3_5: # %bb1
+; X32-NEXT: # in Loop: Header=BB3_1 Depth=1
+; X32-NEXT: xorl %eax, %eax
+; X32-NEXT: xorl %edx, %edx
+; X32-NEXT: idivl %ebp
+; X32-NEXT: .LBB3_1: # %bb1
+; X32-NEXT: # =>This Inner Loop Header: Depth=1
+; X32-NEXT: movsbl %cl, %eax
+; X32-NEXT: movl %eax, %edx
+; X32-NEXT: sarl $31, %edx
+; X32-NEXT: cmpl %eax, %esi
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: sbbl %edx, %eax
+; X32-NEXT: setl %al
+; X32-NEXT: setl %dl
+; X32-NEXT: movzbl %dl, %ebp
+; X32-NEXT: negl %ebp
+; X32-NEXT: testb %al, %al
+; X32-NEXT: jne .LBB3_3
+; X32-NEXT: # %bb.2: # %bb1
+; X32-NEXT: # in Loop: Header=BB3_1 Depth=1
+; X32-NEXT: movb %ch, %cl
+; X32-NEXT: .LBB3_3: # %bb1
+; X32-NEXT: # in Loop: Header=BB3_1 Depth=1
+; X32-NEXT: movb %cl, (%ebx)
+; X32-NEXT: movl (%edi), %edx
+; X32-NEXT: testb %al, %al
+; X32-NEXT: jne .LBB3_5
+; X32-NEXT: # %bb.4: # %bb1
+; X32-NEXT: # in Loop: Header=BB3_1 Depth=1
+; X32-NEXT: movl %edx, %ebp
+; X32-NEXT: jmp .LBB3_5
+;
+; X64-LABEL: PR37100:
+; X64: # %bb.0: # %bb
+; X64-NEXT: movq %rdx, %r10
+; X64-NEXT: jmp .LBB3_1
+; X64-NEXT: .p2align 4, 0x90
+; X64-NEXT: .LBB3_5: # %bb1
+; X64-NEXT: # in Loop: Header=BB3_1 Depth=1
+; X64-NEXT: xorl %eax, %eax
+; X64-NEXT: xorl %edx, %edx
+; X64-NEXT: idivl %esi
+; X64-NEXT: .LBB3_1: # %bb1
+; X64-NEXT: # =>This Inner Loop Header: Depth=1
+; X64-NEXT: movsbq %dil, %rax
+; X64-NEXT: xorl %esi, %esi
+; X64-NEXT: cmpq %rax, %r10
+; X64-NEXT: setl %sil
+; X64-NEXT: negl %esi
+; X64-NEXT: cmpq %rax, %r10
+; X64-NEXT: jl .LBB3_3
+; X64-NEXT: # %bb.2: # %bb1
+; X64-NEXT: # in Loop: Header=BB3_1 Depth=1
+; X64-NEXT: movl %ecx, %edi
+; X64-NEXT: .LBB3_3: # %bb1
+; X64-NEXT: # in Loop: Header=BB3_1 Depth=1
+; X64-NEXT: movb %dil, (%r8)
+; X64-NEXT: jl .LBB3_5
+; X64-NEXT: # %bb.4: # %bb1
+; X64-NEXT: # in Loop: Header=BB3_1 Depth=1
+; X64-NEXT: movl (%r9), %esi
+; X64-NEXT: jmp .LBB3_5
+bb:
+ br label %bb1
+
+bb1:
+ %tmp = phi i8 [ %tmp8, %bb1 ], [ %arg1, %bb ]
+ %tmp2 = phi i16 [ %tmp12, %bb1 ], [ %arg2, %bb ]
+ %tmp3 = icmp sgt i16 %tmp2, 7
+ %tmp4 = select i1 %tmp3, i16 %tmp2, i16 7
+ %tmp5 = sext i8 %tmp to i64
+ %tmp6 = icmp slt i64 %arg3, %tmp5
+ %tmp7 = sext i1 %tmp6 to i32
+ %tmp8 = select i1 %tmp6, i8 %tmp, i8 %arg4
+ store volatile i8 %tmp8, i8* %ptr1
+ %tmp9 = load volatile i32, i32* %ptr2
+ %tmp10 = select i1 %tmp6, i32 %tmp7, i32 %tmp9
+ %tmp11 = srem i32 0, %tmp10
+ %tmp12 = trunc i32 %tmp11 to i16
+ br label %bb1
+}
+
+; Use a particular instruction pattern in order to lower to the post-RA pseudo
+; used to lower SETB into an SBB pattern in order to make sure that kind of
+; usage of a copied EFLAGS continues to work.
+define void @PR37431(i32* %arg1, i8* %arg2, i8* %arg3) {
+; X32-LABEL: PR37431:
+; X32: # %bb.0: # %entry
+; X32-NEXT: pushl %esi
+; X32-NEXT: .cfi_def_cfa_offset 8
+; X32-NEXT: .cfi_offset %esi, -8
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: movl (%eax), %eax
+; X32-NEXT: movl %eax, %ecx
+; X32-NEXT: sarl $31, %ecx
+; X32-NEXT: cmpl %eax, %eax
+; X32-NEXT: sbbl %ecx, %eax
+; X32-NEXT: setb %al
+; X32-NEXT: sbbb %cl, %cl
+; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X32-NEXT: movb %cl, (%edx)
+; X32-NEXT: movzbl %al, %eax
+; X32-NEXT: xorl %ecx, %ecx
+; X32-NEXT: subl %eax, %ecx
+; X32-NEXT: xorl %eax, %eax
+; X32-NEXT: xorl %edx, %edx
+; X32-NEXT: idivl %ecx
+; X32-NEXT: movb %dl, (%esi)
+; X32-NEXT: popl %esi
+; X32-NEXT: retl
+;
+; X64-LABEL: PR37431:
+; X64: # %bb.0: # %entry
+; X64-NEXT: movq %rdx, %rcx
+; X64-NEXT: movslq (%rdi), %rax
+; X64-NEXT: cmpq %rax, %rax
+; X64-NEXT: sbbb %dl, %dl
+; X64-NEXT: cmpq %rax, %rax
+; X64-NEXT: movb %dl, (%rsi)
+; X64-NEXT: sbbl %esi, %esi
+; X64-NEXT: xorl %eax, %eax
+; X64-NEXT: xorl %edx, %edx
+; X64-NEXT: idivl %esi
+; X64-NEXT: movb %dl, (%rcx)
+; X64-NEXT: retq
+entry:
+ %tmp = load i32, i32* %arg1
+ %tmp1 = sext i32 %tmp to i64
+ %tmp2 = icmp ugt i64 %tmp1, undef
+ %tmp3 = zext i1 %tmp2 to i8
+ %tmp4 = sub i8 0, %tmp3
+ store i8 %tmp4, i8* %arg2
+ %tmp5 = sext i8 %tmp4 to i32
+ %tmp6 = srem i32 0, %tmp5
+ %tmp7 = trunc i32 %tmp6 to i8
+ store i8 %tmp7, i8* %arg3
+ ret void
+}
diff --git a/test/CodeGen/X86/domain-reassignment-implicit-def.ll b/test/CodeGen/X86/domain-reassignment-implicit-def.ll
new file mode 100644
index 0000000000000..1716b042d8ee7
--- /dev/null
+++ b/test/CodeGen/X86/domain-reassignment-implicit-def.ll
@@ -0,0 +1,24 @@
+; RUN: llc -mcpu=skylake-avx512 -mtriple=x86_64-unknown-linux-gnu %s -o - | FileCheck %s
+
+; Check that the X86 Domain Reassignment pass doesn't drop IMPLICIT_DEF nodes,
+; which would later cause crashes (e.g. in LiveVariables) - see PR37430
+define void @domain_reassignment_implicit_def(i1 %cond, i8 *%mem, float %arg) {
+; CHECK: vxorps %xmm1, %xmm1, %xmm1
+; CHECK: vcmpneqss %xmm1, %xmm0, %k0
+; CHECK: kmovb %k0, (%rsi)
+top:
+ br i1 %cond, label %L19, label %L15
+
+L15: ; preds = %top
+ %tmp47 = fcmp une float 0.000000e+00, %arg
+ %tmp48 = zext i1 %tmp47 to i8
+ br label %L21
+
+L19: ; preds = %top
+ br label %L21
+
+L21: ; preds = %L19, %L15
+ %.sroa.0.0 = phi i8 [ undef, %L19 ], [ %tmp48, %L15 ]
+ store i8 %.sroa.0.0, i8* %mem, align 1
+ ret void
+}
diff --git a/test/CodeGen/X86/domain-reassignment-test.ll b/test/CodeGen/X86/domain-reassignment-test.ll
new file mode 100644
index 0000000000000..2ff5aea9606d6
--- /dev/null
+++ b/test/CodeGen/X86/domain-reassignment-test.ll
@@ -0,0 +1,37 @@
+; RUN: llc -mcpu=skylake-avx512 -mtriple=x86_64-unknown-linux-gnu %s -o - | FileCheck %s
+; RUN: llc -mcpu=skylake-avx512 -mtriple=x86_64-unknown-linux-gnu %s -o - | llvm-mc -triple=x86_64-unknown-linux-gnu -mcpu=skylake-avx512
+
+; Check that the X86 domain reassignment pass doesn't introduce an illegal
+; test instruction. See PR37396
+define void @japi1_foo2_34617() {
+pass2:
+ br label %if5
+
+L174:
+ %tmp = icmp sgt <2 x i64> undef, zeroinitializer
+ %tmp1 = icmp sle <2 x i64> undef, undef
+ %tmp2 = and <2 x i1> %tmp, %tmp1
+ %tmp3 = extractelement <2 x i1> %tmp2, i32 0
+ %tmp4 = extractelement <2 x i1> %tmp2, i32 1
+ %tmp106 = and i1 %tmp4, %tmp3
+ %tmp107 = zext i1 %tmp106 to i8
+ %tmp108 = and i8 %tmp122, %tmp107
+ %tmp109 = icmp eq i8 %tmp108, 0
+; CHECK-NOT: testb {{%k[0-7]}}
+ br i1 %tmp109, label %L188, label %L190
+
+if5:
+ %b.055 = phi i8 [ 1, %pass2 ], [ %tmp122, %if5 ]
+ %tmp118 = icmp sgt i64 undef, 0
+ %tmp119 = icmp sle i64 undef, undef
+ %tmp120 = and i1 %tmp118, %tmp119
+ %tmp121 = zext i1 %tmp120 to i8
+ %tmp122 = and i8 %b.055, %tmp121
+ br i1 undef, label %L174, label %if5
+
+L188:
+ unreachable
+
+L190:
+ ret void
+}
diff --git a/test/CodeGen/X86/eflags-copy-expansion.mir b/test/CodeGen/X86/eflags-copy-expansion.mir
deleted file mode 100644
index 11d4c81b9253f..0000000000000
--- a/test/CodeGen/X86/eflags-copy-expansion.mir
+++ /dev/null
@@ -1,64 +0,0 @@
-# RUN: llc -run-pass postrapseudos -mtriple=i386-apple-macosx -o - %s | FileCheck %s
-
-# Verify that we correctly save and restore eax when copying eflags,
-# even when only a smaller alias of eax is used. We used to check only
-# eax and not its aliases.
-# PR27624.
-
---- |
- target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
-
- define void @foo() {
- entry:
- br label %false
- false:
- ret void
- }
-
-...
-
----
-name: foo
-tracksRegLiveness: true
-liveins:
- - { reg: '%edi' }
-body: |
- bb.0.entry:
- liveins: %edi
- NOOP implicit-def %al
-
- ; The bug was triggered only when LivePhysReg is used, which
- ; happens only when the heuristic for the liveness computation
- ; failed. The liveness computation heuristic looks at 10 instructions
- ; before and after the copy. Make sure we do not reach the definition of
- ; AL in 10 instructions, otherwise the heuristic will see that it is live.
- NOOP
- NOOP
- NOOP
- NOOP
- NOOP
- NOOP
- NOOP
- NOOP
- NOOP
- NOOP
- NOOP
- NOOP
- NOOP
- ; Save AL.
- ; CHECK: PUSH32r killed %eax
-
- ; Copy edi into EFLAGS
- ; CHECK-NEXT: %eax = MOV32rr %edi
- ; CHECK-NEXT: %al = ADD8ri %al, 127, implicit-def %eflags
- ; CHECK-NEXT: SAHF implicit-def %eflags, implicit %ah
- %eflags = COPY %edi
-
- ; Restore AL.
- ; CHECK-NEXT: %eax = POP32r
- bb.1.false:
- liveins: %al
- NOOP implicit %al
- RETQ
-
-...
diff --git a/test/CodeGen/X86/fast-isel-shift.ll b/test/CodeGen/X86/fast-isel-shift.ll
index 5d416e18260c5..e9f01035b53aa 100644
--- a/test/CodeGen/X86/fast-isel-shift.ll
+++ b/test/CodeGen/X86/fast-isel-shift.ll
@@ -381,3 +381,15 @@ define i64 @ashr_imm4_i64(i64 %a) {
%c = ashr i64 %a, 4
ret i64 %c
}
+
+; Make sure we don't crash on out of bounds i8 shifts.
+define i8 @PR36731(i8 %a) {
+; CHECK-LABEL: PR36731:
+; CHECK: ## %bb.0:
+; CHECK-NEXT: movb $255, %cl
+; CHECK-NEXT: shlb %cl, %dil
+; CHECK-NEXT: movl %edi, %eax
+; CHECK-NEXT: retq
+ %b = shl i8 %a, -1
+ ret i8 %b
+}
diff --git a/test/CodeGen/X86/flags-copy-lowering.mir b/test/CodeGen/X86/flags-copy-lowering.mir
new file mode 100644
index 0000000000000..3d8a4ed3c734e
--- /dev/null
+++ b/test/CodeGen/X86/flags-copy-lowering.mir
@@ -0,0 +1,555 @@
+# RUN: llc -run-pass x86-flags-copy-lowering -verify-machineinstrs -o - %s | FileCheck %s
+#
+# Lower various interesting copy patterns of EFLAGS without using LAHF/SAHF.
+
+--- |
+ target triple = "x86_64-unknown-unknown"
+
+ declare void @foo()
+
+ define i32 @test_branch(i64 %a, i64 %b) {
+ entry:
+ call void @foo()
+ ret i32 0
+ }
+
+ define i32 @test_branch_fallthrough(i64 %a, i64 %b) {
+ entry:
+ call void @foo()
+ ret i32 0
+ }
+
+ define void @test_setcc(i64 %a, i64 %b) {
+ entry:
+ call void @foo()
+ ret void
+ }
+
+ define void @test_cmov(i64 %a, i64 %b) {
+ entry:
+ call void @foo()
+ ret void
+ }
+
+ define void @test_adc(i64 %a, i64 %b) {
+ entry:
+ call void @foo()
+ ret void
+ }
+
+ define void @test_sbb(i64 %a, i64 %b) {
+ entry:
+ call void @foo()
+ ret void
+ }
+
+ define void @test_adcx(i64 %a, i64 %b) {
+ entry:
+ call void @foo()
+ ret void
+ }
+
+ define void @test_adox(i64 %a, i64 %b) {
+ entry:
+ call void @foo()
+ ret void
+ }
+
+ define void @test_rcl(i64 %a, i64 %b) {
+ entry:
+ call void @foo()
+ ret void
+ }
+
+ define void @test_rcr(i64 %a, i64 %b) {
+ entry:
+ call void @foo()
+ ret void
+ }
+
+ define void @test_setb_c(i64 %a, i64 %b) {
+ entry:
+ call void @foo()
+ ret void
+ }
+...
+---
+name: test_branch
+# CHECK-LABEL: name: test_branch
+liveins:
+ - { reg: '%rdi', virtual-reg: '%0' }
+ - { reg: '%rsi', virtual-reg: '%1' }
+body: |
+ bb.0:
+ successors: %bb.1, %bb.2, %bb.3
+ liveins: %rdi, %rsi
+
+ %0:gr64 = COPY %rdi
+ %1:gr64 = COPY %rsi
+ CMP64rr %0, %1, implicit-def %eflags
+ %2:gr64 = COPY %eflags
+ ; CHECK-NOT: COPY{{( killed)?}} %eflags
+ ; CHECK: %[[A_REG:[^:]*]]:gr8 = SETAr implicit %eflags
+ ; CHECK-NEXT: %[[B_REG:[^:]*]]:gr8 = SETBr implicit %eflags
+ ; CHECK-NOT: COPY{{( killed)?}} %eflags
+
+ ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead %rsp, implicit-def dead %eflags, implicit-def dead %ssp, implicit %rsp, implicit %ssp
+ CALL64pcrel32 @foo, csr_64, implicit %rsp, implicit %ssp, implicit %rdi, implicit-def %rsp, implicit-def %ssp, implicit-def %eax
+ ADJCALLSTACKUP64 0, 0, implicit-def dead %rsp, implicit-def dead %eflags, implicit-def dead %ssp, implicit %rsp, implicit %ssp
+
+ %eflags = COPY %2
+ JA_1 %bb.1, implicit %eflags
+ JB_1 %bb.2, implicit %eflags
+ JMP_1 %bb.3
+ ; CHECK-NOT: %eflags =
+ ;
+ ; CHECK: TEST8rr %[[A_REG]], %[[A_REG]], implicit-def %eflags
+ ; CHECK-NEXT: JNE_1 %bb.1, implicit killed %eflags
+ ; CHECK-SAME: {{$[[:space:]]}}
+ ; CHECK-NEXT: bb.4:
+ ; CHECK-NEXT: successors: {{.*$}}
+ ; CHECK-SAME: {{$[[:space:]]}}
+ ; CHECK-NEXT: TEST8rr %[[B_REG]], %[[B_REG]], implicit-def %eflags
+ ; CHECK-NEXT: JNE_1 %bb.2, implicit killed %eflags
+ ; CHECK-NEXT: JMP_1 %bb.3
+
+ bb.1:
+ %3:gr32 = MOV32ri64 42
+ %eax = COPY %3
+ RET 0, %eax
+
+ bb.2:
+ %4:gr32 = MOV32ri64 43
+ %eax = COPY %4
+ RET 0, %eax
+
+ bb.3:
+ %5:gr32 = MOV32r0 implicit-def dead %eflags
+ %eax = COPY %5
+ RET 0, %eax
+
+...
+---
+name: test_branch_fallthrough
+# CHECK-LABEL: name: test_branch_fallthrough
+liveins:
+ - { reg: '%rdi', virtual-reg: '%0' }
+ - { reg: '%rsi', virtual-reg: '%1' }
+body: |
+ bb.0:
+ successors: %bb.1, %bb.2, %bb.3
+ liveins: %rdi, %rsi
+
+ %0:gr64 = COPY %rdi
+ %1:gr64 = COPY %rsi
+ CMP64rr %0, %1, implicit-def %eflags
+ %2:gr64 = COPY %eflags
+ ; CHECK-NOT: COPY{{( killed)?}} %eflags
+ ; CHECK: %[[A_REG:[^:]*]]:gr8 = SETAr implicit %eflags
+ ; CHECK-NEXT: %[[B_REG:[^:]*]]:gr8 = SETBr implicit %eflags
+ ; CHECK-NOT: COPY{{( killed)?}} %eflags
+
+ ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead %rsp, implicit-def dead %eflags, implicit-def dead %ssp, implicit %rsp, implicit %ssp
+ CALL64pcrel32 @foo, csr_64, implicit %rsp, implicit %ssp, implicit %rdi, implicit-def %rsp, implicit-def %ssp, implicit-def %eax
+ ADJCALLSTACKUP64 0, 0, implicit-def dead %rsp, implicit-def dead %eflags, implicit-def dead %ssp, implicit %rsp, implicit %ssp
+
+ %eflags = COPY %2
+ JA_1 %bb.2, implicit %eflags
+ JB_1 %bb.3, implicit %eflags
+ ; CHECK-NOT: %eflags =
+ ;
+ ; CHECK: TEST8rr %[[A_REG]], %[[A_REG]], implicit-def %eflags
+ ; CHECK-NEXT: JNE_1 %bb.2, implicit killed %eflags
+ ; CHECK-SAME: {{$[[:space:]]}}
+ ; CHECK-NEXT: bb.4:
+ ; CHECK-NEXT: successors: {{.*$}}
+ ; CHECK-SAME: {{$[[:space:]]}}
+ ; CHECK-NEXT: TEST8rr %[[B_REG]], %[[B_REG]], implicit-def %eflags
+ ; CHECK-NEXT: JNE_1 %bb.3, implicit killed %eflags
+ ; CHECK-SAME: {{$[[:space:]]}}
+ ; CHECK-NEXT: bb.1:
+
+ bb.1:
+ %5:gr32 = MOV32r0 implicit-def dead %eflags
+ %eax = COPY %5
+ RET 0, %eax
+
+ bb.2:
+ %3:gr32 = MOV32ri64 42
+ %eax = COPY %3
+ RET 0, %eax
+
+ bb.3:
+ %4:gr32 = MOV32ri64 43
+ %eax = COPY %4
+ RET 0, %eax
+
+...
+---
+name: test_setcc
+# CHECK-LABEL: name: test_setcc
+liveins:
+ - { reg: '%rdi', virtual-reg: '%0' }
+ - { reg: '%rsi', virtual-reg: '%1' }
+body: |
+ bb.0:
+ liveins: %rdi, %rsi
+
+ %0:gr64 = COPY %rdi
+ %1:gr64 = COPY %rsi
+ CMP64rr %0, %1, implicit-def %eflags
+ %2:gr64 = COPY %eflags
+ ; CHECK-NOT: COPY{{( killed)?}} %eflags
+ ; CHECK: %[[A_REG:[^:]*]]:gr8 = SETAr implicit %eflags
+ ; CHECK-NEXT: %[[B_REG:[^:]*]]:gr8 = SETBr implicit %eflags
+ ; CHECK-NEXT: %[[E_REG:[^:]*]]:gr8 = SETEr implicit %eflags
+ ; CHECK-NEXT: %[[NE_REG:[^:]*]]:gr8 = SETNEr implicit %eflags
+ ; CHECK-NOT: COPY{{( killed)?}} %eflags
+
+ ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead %rsp, implicit-def dead %eflags, implicit-def dead %ssp, implicit %rsp, implicit %ssp
+ CALL64pcrel32 @foo, csr_64, implicit %rsp, implicit %ssp, implicit %rdi, implicit-def %rsp, implicit-def %ssp, implicit-def %eax
+ ADJCALLSTACKUP64 0, 0, implicit-def dead %rsp, implicit-def dead %eflags, implicit-def dead %ssp, implicit %rsp, implicit %ssp
+
+ %eflags = COPY %2
+ %3:gr8 = SETAr implicit %eflags
+ %4:gr8 = SETBr implicit %eflags
+ %5:gr8 = SETEr implicit %eflags
+ SETNEm %rsp, 1, %noreg, -16, %noreg, implicit killed %eflags
+ MOV8mr %rsp, 1, %noreg, -16, %noreg, killed %3
+ MOV8mr %rsp, 1, %noreg, -16, %noreg, killed %4
+ MOV8mr %rsp, 1, %noreg, -16, %noreg, killed %5
+ ; CHECK-NOT: %eflags =
+ ; CHECK-NOT: = SET{{.*}}
+ ; CHECK: MOV8mr {{.*}}, killed %[[A_REG]]
+ ; CHECK-CHECK: MOV8mr {{.*}}, killed %[[B_REG]]
+ ; CHECK-CHECK: MOV8mr {{.*}}, killed %[[E_REG]]
+ ; CHECK-CHECK: MOV8mr {{.*}}, killed %[[NE_REG]]
+
+ RET 0
+
+...
+---
+name: test_cmov
+# CHECK-LABEL: name: test_cmov
+liveins:
+ - { reg: '%rdi', virtual-reg: '%0' }
+ - { reg: '%rsi', virtual-reg: '%1' }
+body: |
+ bb.0:
+ liveins: %rdi, %rsi
+
+ %0:gr64 = COPY %rdi
+ %1:gr64 = COPY %rsi
+ CMP64rr %0, %1, implicit-def %eflags
+ %2:gr64 = COPY %eflags
+ ; CHECK-NOT: COPY{{( killed)?}} %eflags
+ ; CHECK: %[[A_REG:[^:]*]]:gr8 = SETAr implicit %eflags
+ ; CHECK-NEXT: %[[B_REG:[^:]*]]:gr8 = SETBr implicit %eflags
+ ; CHECK-NEXT: %[[E_REG:[^:]*]]:gr8 = SETEr implicit %eflags
+ ; CHECK-NOT: COPY{{( killed)?}} %eflags
+
+ ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead %rsp, implicit-def dead %eflags, implicit-def dead %ssp, implicit %rsp, implicit %ssp
+ CALL64pcrel32 @foo, csr_64, implicit %rsp, implicit %ssp, implicit %rdi, implicit-def %rsp, implicit-def %ssp, implicit-def %eax
+ ADJCALLSTACKUP64 0, 0, implicit-def dead %rsp, implicit-def dead %eflags, implicit-def dead %ssp, implicit %rsp, implicit %ssp
+
+ %eflags = COPY %2
+ %3:gr64 = CMOVA64rr %0, %1, implicit %eflags
+ %4:gr64 = CMOVB64rr %0, %1, implicit %eflags
+ %5:gr64 = CMOVE64rr %0, %1, implicit %eflags
+ %6:gr64 = CMOVNE64rr %0, %1, implicit killed %eflags
+ ; CHECK-NOT: %eflags =
+ ; CHECK: TEST8rr %[[A_REG]], %[[A_REG]], implicit-def %eflags
+ ; CHECK-NEXT: %3:gr64 = CMOVNE64rr %0, %1, implicit killed %eflags
+ ; CHECK-NEXT: TEST8rr %[[B_REG]], %[[B_REG]], implicit-def %eflags
+ ; CHECK-NEXT: %4:gr64 = CMOVNE64rr %0, %1, implicit killed %eflags
+ ; CHECK-NEXT: TEST8rr %[[E_REG]], %[[E_REG]], implicit-def %eflags
+ ; CHECK-NEXT: %5:gr64 = CMOVNE64rr %0, %1, implicit killed %eflags
+ ; CHECK-NEXT: TEST8rr %[[E_REG]], %[[E_REG]], implicit-def %eflags
+ ; CHECK-NEXT: %6:gr64 = CMOVE64rr %0, %1, implicit killed %eflags
+ MOV64mr %rsp, 1, %noreg, -16, %noreg, killed %3
+ MOV64mr %rsp, 1, %noreg, -16, %noreg, killed %4
+ MOV64mr %rsp, 1, %noreg, -16, %noreg, killed %5
+ MOV64mr %rsp, 1, %noreg, -16, %noreg, killed %6
+
+ RET 0
+
+...
+---
+name: test_adc
+# CHECK-LABEL: name: test_adc
+liveins:
+ - { reg: '%rdi', virtual-reg: '%0' }
+ - { reg: '%rsi', virtual-reg: '%1' }
+body: |
+ bb.0:
+ liveins: %rdi, %rsi
+
+ %0:gr64 = COPY %rdi
+ %1:gr64 = COPY %rsi
+ %2:gr64 = ADD64rr %0, %1, implicit-def %eflags
+ %3:gr64 = COPY %eflags
+ ; CHECK-NOT: COPY{{( killed)?}} %eflags
+ ; CHECK: %[[CF_REG:[^:]*]]:gr8 = SETBr implicit %eflags
+ ; CHECK-NOT: COPY{{( killed)?}} %eflags
+
+ ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead %rsp, implicit-def dead %eflags, implicit-def dead %ssp, implicit %rsp, implicit %ssp
+ CALL64pcrel32 @foo, csr_64, implicit %rsp, implicit %ssp, implicit %rdi, implicit-def %rsp, implicit-def %ssp, implicit-def %eax
+ ADJCALLSTACKUP64 0, 0, implicit-def dead %rsp, implicit-def dead %eflags, implicit-def dead %ssp, implicit %rsp, implicit %ssp
+
+ %eflags = COPY %3
+ %4:gr64 = ADC64ri32 %2:gr64, 42, implicit-def %eflags, implicit %eflags
+ %5:gr64 = ADC64ri32 %4:gr64, 42, implicit-def %eflags, implicit %eflags
+ ; CHECK-NOT: %eflags =
+ ; CHECK: dead %{{[^:]*}}:gr8 = ADD8ri %[[CF_REG]], 255, implicit-def %eflags
+ ; CHECK-NEXT: %4:gr64 = ADC64ri32 %2, 42, implicit-def %eflags, implicit killed %eflags
+ ; CHECK-NEXT: %5:gr64 = ADC64ri32 %4, 42, implicit-def{{( dead)?}} %eflags, implicit{{( killed)?}} %eflags
+ MOV64mr %rsp, 1, %noreg, -16, %noreg, killed %5
+
+ RET 0
+
+...
+---
+name: test_sbb
+# CHECK-LABEL: name: test_sbb
+liveins:
+ - { reg: '%rdi', virtual-reg: '%0' }
+ - { reg: '%rsi', virtual-reg: '%1' }
+body: |
+ bb.0:
+ liveins: %rdi, %rsi
+
+ %0:gr64 = COPY %rdi
+ %1:gr64 = COPY %rsi
+ %2:gr64 = SUB64rr %0, %1, implicit-def %eflags
+ %3:gr64 = COPY killed %eflags
+ ; CHECK-NOT: COPY{{( killed)?}} %eflags
+ ; CHECK: %[[CF_REG:[^:]*]]:gr8 = SETBr implicit %eflags
+ ; CHECK-NOT: COPY{{( killed)?}} %eflags
+
+ ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead %rsp, implicit-def dead %eflags, implicit-def dead %ssp, implicit %rsp, implicit %ssp
+ CALL64pcrel32 @foo, csr_64, implicit %rsp, implicit %ssp, implicit %rdi, implicit-def %rsp, implicit-def %ssp, implicit-def %eax
+ ADJCALLSTACKUP64 0, 0, implicit-def dead %rsp, implicit-def dead %eflags, implicit-def dead %ssp, implicit %rsp, implicit %ssp
+
+ %eflags = COPY %3
+ %4:gr64 = SBB64ri32 %2:gr64, 42, implicit-def %eflags, implicit killed %eflags
+ %5:gr64 = SBB64ri32 %4:gr64, 42, implicit-def dead %eflags, implicit killed %eflags
+ ; CHECK-NOT: %eflags =
+ ; CHECK: dead %{{[^:]*}}:gr8 = ADD8ri %[[CF_REG]], 255, implicit-def %eflags
+ ; CHECK-NEXT: %4:gr64 = SBB64ri32 %2, 42, implicit-def %eflags, implicit killed %eflags
+ ; CHECK-NEXT: %5:gr64 = SBB64ri32 %4, 42, implicit-def{{( dead)?}} %eflags, implicit{{( killed)?}} %eflags
+ MOV64mr %rsp, 1, %noreg, -16, %noreg, killed %5
+
+ RET 0
+
+...
+---
+name: test_adcx
+# CHECK-LABEL: name: test_adcx
+liveins:
+ - { reg: '%rdi', virtual-reg: '%0' }
+ - { reg: '%rsi', virtual-reg: '%1' }
+body: |
+ bb.0:
+ liveins: %rdi, %rsi
+
+ %0:gr64 = COPY %rdi
+ %1:gr64 = COPY %rsi
+ %2:gr64 = ADD64rr %0, %1, implicit-def %eflags
+ %3:gr64 = COPY %eflags
+ ; CHECK-NOT: COPY{{( killed)?}} %eflags
+ ; CHECK: %[[E_REG:[^:]*]]:gr8 = SETEr implicit %eflags
+ ; CHECK-NEXT: %[[CF_REG:[^:]*]]:gr8 = SETBr implicit %eflags
+ ; CHECK-NOT: COPY{{( killed)?}} %eflags
+
+ ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead %rsp, implicit-def dead %eflags, implicit-def dead %ssp, implicit %rsp, implicit %ssp
+ CALL64pcrel32 @foo, csr_64, implicit %rsp, implicit %ssp, implicit %rdi, implicit-def %rsp, implicit-def %ssp, implicit-def %eax
+ ADJCALLSTACKUP64 0, 0, implicit-def dead %rsp, implicit-def dead %eflags, implicit-def dead %ssp, implicit %rsp, implicit %ssp
+
+ %eflags = COPY %3
+ %4:gr64 = CMOVE64rr %0, %1, implicit %eflags
+ %5:gr64 = MOV64ri32 42
+ %6:gr64 = ADCX64rr %2, %5, implicit-def %eflags, implicit %eflags
+ ; CHECK-NOT: %eflags =
+ ; CHECK: TEST8rr %[[E_REG]], %[[E_REG]], implicit-def %eflags
+ ; CHECK-NEXT: %4:gr64 = CMOVNE64rr %0, %1, implicit killed %eflags
+ ; CHECK-NEXT: %5:gr64 = MOV64ri32 42
+ ; CHECK-NEXT: dead %{{[^:]*}}:gr8 = ADD8ri %[[CF_REG]], 255, implicit-def %eflags
+ ; CHECK-NEXT: %6:gr64 = ADCX64rr %2, %5, implicit-def{{( dead)?}} %eflags, implicit killed %eflags
+ MOV64mr %rsp, 1, %noreg, -16, %noreg, killed %4
+ MOV64mr %rsp, 1, %noreg, -16, %noreg, killed %6
+
+ RET 0
+
+...
+---
+name: test_adox
+# CHECK-LABEL: name: test_adox
+liveins:
+ - { reg: '%rdi', virtual-reg: '%0' }
+ - { reg: '%rsi', virtual-reg: '%1' }
+body: |
+ bb.0:
+ liveins: %rdi, %rsi
+
+ %0:gr64 = COPY %rdi
+ %1:gr64 = COPY %rsi
+ %2:gr64 = ADD64rr %0, %1, implicit-def %eflags
+ %3:gr64 = COPY %eflags
+ ; CHECK-NOT: COPY{{( killed)?}} %eflags
+ ; CHECK: %[[E_REG:[^:]*]]:gr8 = SETEr implicit %eflags
+ ; CHECK-NEXT: %[[OF_REG:[^:]*]]:gr8 = SETOr implicit %eflags
+ ; CHECK-NOT: COPY{{( killed)?}} %eflags
+
+ ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead %rsp, implicit-def dead %eflags, implicit-def dead %ssp, implicit %rsp, implicit %ssp
+ CALL64pcrel32 @foo, csr_64, implicit %rsp, implicit %ssp, implicit %rdi, implicit-def %rsp, implicit-def %ssp, implicit-def %eax
+ ADJCALLSTACKUP64 0, 0, implicit-def dead %rsp, implicit-def dead %eflags, implicit-def dead %ssp, implicit %rsp, implicit %ssp
+
+ %eflags = COPY %3
+ %4:gr64 = CMOVE64rr %0, %1, implicit %eflags
+ %5:gr64 = MOV64ri32 42
+ %6:gr64 = ADOX64rr %2, %5, implicit-def %eflags, implicit %eflags
+ ; CHECK-NOT: %eflags =
+ ; CHECK: TEST8rr %[[E_REG]], %[[E_REG]], implicit-def %eflags
+ ; CHECK-NEXT: %4:gr64 = CMOVNE64rr %0, %1, implicit killed %eflags
+ ; CHECK-NEXT: %5:gr64 = MOV64ri32 42
+ ; CHECK-NEXT: dead %{{[^:]*}}:gr8 = ADD8ri %[[OF_REG]], 127, implicit-def %eflags
+ ; CHECK-NEXT: %6:gr64 = ADOX64rr %2, %5, implicit-def{{( dead)?}} %eflags, implicit killed %eflags
+ MOV64mr %rsp, 1, %noreg, -16, %noreg, killed %4
+ MOV64mr %rsp, 1, %noreg, -16, %noreg, killed %6
+
+ RET 0
+
+...
+---
+name: test_rcl
+# CHECK-LABEL: name: test_rcl
+liveins:
+ - { reg: '%rdi', virtual-reg: '%0' }
+ - { reg: '%rsi', virtual-reg: '%1' }
+body: |
+ bb.0:
+ liveins: %rdi, %rsi
+
+ %0:gr64 = COPY %rdi
+ %1:gr64 = COPY %rsi
+ %2:gr64 = ADD64rr %0, %1, implicit-def %eflags
+ %3:gr64 = COPY %eflags
+ ; CHECK-NOT: COPY{{( killed)?}} %eflags
+ ; CHECK: %[[CF_REG:[^:]*]]:gr8 = SETBr implicit %eflags
+ ; CHECK-NOT: COPY{{( killed)?}} %eflags
+
+ ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead %rsp, implicit-def dead %eflags, implicit-def dead %ssp, implicit %rsp, implicit %ssp
+ CALL64pcrel32 @foo, csr_64, implicit %rsp, implicit %ssp, implicit %rdi, implicit-def %rsp, implicit-def %ssp, implicit-def %eax
+ ADJCALLSTACKUP64 0, 0, implicit-def dead %rsp, implicit-def dead %eflags, implicit-def dead %ssp, implicit %rsp, implicit %ssp
+
+ %eflags = COPY %3
+ %4:gr64 = RCL64r1 %2:gr64, implicit-def %eflags, implicit %eflags
+ %5:gr64 = RCL64r1 %4:gr64, implicit-def %eflags, implicit %eflags
+ ; CHECK-NOT: %eflags =
+ ; CHECK: dead %{{[^:]*}}:gr8 = ADD8ri %[[CF_REG]], 255, implicit-def %eflags
+ ; CHECK-NEXT: %4:gr64 = RCL64r1 %2, implicit-def %eflags, implicit killed %eflags
+ ; CHECK-NEXT: %5:gr64 = RCL64r1 %4, implicit-def{{( dead)?}} %eflags, implicit{{( killed)?}} %eflags
+ MOV64mr %rsp, 1, %noreg, -16, %noreg, killed %5
+
+ RET 0
+
+...
+---
+name: test_rcr
+# CHECK-LABEL: name: test_rcr
+liveins:
+ - { reg: '%rdi', virtual-reg: '%0' }
+ - { reg: '%rsi', virtual-reg: '%1' }
+body: |
+ bb.0:
+ liveins: %rdi, %rsi
+
+ %0:gr64 = COPY %rdi
+ %1:gr64 = COPY %rsi
+ %2:gr64 = ADD64rr %0, %1, implicit-def %eflags
+ %3:gr64 = COPY %eflags
+ ; CHECK-NOT: COPY{{( killed)?}} %eflags
+ ; CHECK: %[[CF_REG:[^:]*]]:gr8 = SETBr implicit %eflags
+ ; CHECK-NOT: COPY{{( killed)?}} %eflags
+
+ ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead %rsp, implicit-def dead %eflags, implicit-def dead %ssp, implicit %rsp, implicit %ssp
+ CALL64pcrel32 @foo, csr_64, implicit %rsp, implicit %ssp, implicit %rdi, implicit-def %rsp, implicit-def %ssp, implicit-def %eax
+ ADJCALLSTACKUP64 0, 0, implicit-def dead %rsp, implicit-def dead %eflags, implicit-def dead %ssp, implicit %rsp, implicit %ssp
+
+ %eflags = COPY %3
+ %4:gr64 = RCR64r1 %2:gr64, implicit-def %eflags, implicit %eflags
+ %5:gr64 = RCR64r1 %4:gr64, implicit-def %eflags, implicit %eflags
+ ; CHECK-NOT: %eflags =
+ ; CHECK: dead %{{[^:]*}}:gr8 = ADD8ri %[[CF_REG]], 255, implicit-def %eflags
+ ; CHECK-NEXT: %4:gr64 = RCR64r1 %2, implicit-def %eflags, implicit killed %eflags
+ ; CHECK-NEXT: %5:gr64 = RCR64r1 %4, implicit-def{{( dead)?}} %eflags, implicit{{( killed)?}} %eflags
+ MOV64mr %rsp, 1, %noreg, -16, %noreg, killed %5
+
+ RET 0
+
+...
+---
+name: test_setb_c
+# CHECK-LABEL: name: test_setb_c
+liveins:
+ - { reg: '%rdi', virtual-reg: '%0' }
+ - { reg: '%rsi', virtual-reg: '%1' }
+body: |
+ bb.0:
+ liveins: %rdi, %rsi
+
+ %0:gr64 = COPY %rdi
+ %1:gr64 = COPY %rsi
+ %2:gr64 = ADD64rr %0, %1, implicit-def %eflags
+ %3:gr64 = COPY %eflags
+ ; CHECK-NOT: COPY{{( killed)?}} %eflags
+ ; CHECK: %[[CF_REG:[^:]*]]:gr8 = SETBr implicit %eflags
+ ; CHECK-NOT: COPY{{( killed)?}} %eflags
+
+ ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead %rsp, implicit-def dead %eflags, implicit-def dead %ssp, implicit %rsp, implicit %ssp
+ CALL64pcrel32 @foo, csr_64, implicit %rsp, implicit %ssp, implicit %rdi, implicit-def %rsp, implicit-def %ssp, implicit-def %eax
+ ADJCALLSTACKUP64 0, 0, implicit-def dead %rsp, implicit-def dead %eflags, implicit-def dead %ssp, implicit %rsp, implicit %ssp
+
+ %eflags = COPY %3
+ %4:gr8 = SETB_C8r implicit-def %eflags, implicit %eflags
+ MOV8mr %rsp, 1, %noreg, -16, %noreg, killed %4
+ ; CHECK-NOT: %eflags =
+ ; CHECK: %[[ZERO:[^:]*]]:gr32 = MOV32r0 implicit-def %eflags
+ ; CHECK-NEXT: %[[ZERO_SUBREG:[^:]*]]:gr8 = EXTRACT_SUBREG %[[ZERO]], %subreg.sub_8bit
+ ; CHECK-NEXT: %[[REPLACEMENT:[^:]*]]:gr8 = SUB8rr %[[ZERO_SUBREG]], %[[CF_REG]]
+ ; CHECK-NEXT: MOV8mr %rsp, 1, %noreg, -16, %noreg, killed %[[REPLACEMENT]]
+
+ %eflags = COPY %3
+ %5:gr16 = SETB_C16r implicit-def %eflags, implicit %eflags
+ MOV16mr %rsp, 1, %noreg, -16, %noreg, killed %5
+ ; CHECK-NOT: %eflags =
+ ; CHECK: %[[CF_EXT:[^:]*]]:gr32 = MOVZX32rr8 %[[CF_REG]]
+ ; CHECK-NEXT: %[[CF_TRUNC:[^:]*]]:gr16 = EXTRACT_SUBREG %[[CF_EXT]], %subreg.sub_16bit
+ ; CHECK-NEXT: %[[ZERO:[^:]*]]:gr32 = MOV32r0 implicit-def %eflags
+ ; CHECK-NEXT: %[[ZERO_SUBREG:[^:]*]]:gr16 = EXTRACT_SUBREG %[[ZERO]], %subreg.sub_16bit
+ ; CHECK-NEXT: %[[REPLACEMENT:[^:]*]]:gr16 = SUB16rr %[[ZERO_SUBREG]], %[[CF_TRUNC]]
+ ; CHECK-NEXT: MOV16mr %rsp, 1, %noreg, -16, %noreg, killed %[[REPLACEMENT]]
+
+ %eflags = COPY %3
+ %6:gr32 = SETB_C32r implicit-def %eflags, implicit %eflags
+ MOV32mr %rsp, 1, %noreg, -16, %noreg, killed %6
+ ; CHECK-NOT: %eflags =
+ ; CHECK: %[[CF_EXT:[^:]*]]:gr32 = MOVZX32rr8 %[[CF_REG]]
+ ; CHECK-NEXT: %[[ZERO:[^:]*]]:gr32 = MOV32r0 implicit-def %eflags
+ ; CHECK-NEXT: %[[REPLACEMENT:[^:]*]]:gr32 = SUB32rr %[[ZERO]], %[[CF_EXT]]
+ ; CHECK-NEXT: MOV32mr %rsp, 1, %noreg, -16, %noreg, killed %[[REPLACEMENT]]
+
+ %eflags = COPY %3
+ %7:gr64 = SETB_C64r implicit-def %eflags, implicit %eflags
+ MOV64mr %rsp, 1, %noreg, -16, %noreg, killed %7
+ ; CHECK-NOT: %eflags =
+ ; CHECK: %[[CF_EXT1:[^:]*]]:gr32 = MOVZX32rr8 %[[CF_REG]]
+ ; CHECK-NEXT: %[[CF_EXT2:[^:]*]]:gr64 = SUBREG_TO_REG 0, %[[CF_EXT1]], %subreg.sub_32bit
+ ; CHECK-NEXT: %[[ZERO:[^:]*]]:gr32 = MOV32r0 implicit-def %eflags
+ ; CHECK-NEXT: %[[ZERO_EXT:[^:]*]]:gr64 = SUBREG_TO_REG 0, %[[ZERO]], %subreg.sub_32bit
+ ; CHECK-NEXT: %[[REPLACEMENT:[^:]*]]:gr64 = SUB64rr %[[ZERO_EXT]], %[[CF_EXT2]]
+ ; CHECK-NEXT: MOV64mr %rsp, 1, %noreg, -16, %noreg, killed %[[REPLACEMENT]]
+
+ RET 0
+
+...
diff --git a/test/CodeGen/X86/ipra-reg-usage.ll b/test/CodeGen/X86/ipra-reg-usage.ll
index 50c066de9656e..e6cf4c0233484 100644
--- a/test/CodeGen/X86/ipra-reg-usage.ll
+++ b/test/CodeGen/X86/ipra-reg-usage.ll
@@ -3,7 +3,7 @@
target triple = "x86_64-unknown-unknown"
declare void @bar1()
define preserve_allcc void @foo()#0 {
-; CHECK: foo Clobbered Registers: %cs %ds %eflags %eip %eiz %es %fpsw %fs %gs %ip %rip %riz %ss %ssp %bnd0 %bnd1 %bnd2 %bnd3 %cr0 %cr1 %cr2 %cr3 %cr4 %cr5 %cr6 %cr7 %cr8 %cr9 %cr10 %cr11 %cr12 %cr13 %cr14 %cr15 %dr0 %dr1 %dr2 %dr3 %dr4 %dr5 %dr6 %dr7 %dr8 %dr9 %dr10 %dr11 %dr12 %dr13 %dr14 %dr15 %fp0 %fp1 %fp2 %fp3 %fp4 %fp5 %fp6 %fp7 %k0 %k1 %k2 %k3 %k4 %k5 %k6 %k7 %mm0 %mm1 %mm2 %mm3 %mm4 %mm5 %mm6 %mm7 %r11 %st0 %st1 %st2 %st3 %st4 %st5 %st6 %st7 %xmm16 %xmm17 %xmm18 %xmm19 %xmm20 %xmm21 %xmm22 %xmm23 %xmm24 %xmm25 %xmm26 %xmm27 %xmm28 %xmm29 %xmm30 %xmm31 %ymm0 %ymm1 %ymm2 %ymm3 %ymm4 %ymm5 %ymm6 %ymm7 %ymm8 %ymm9 %ymm10 %ymm11 %ymm12 %ymm13 %ymm14 %ymm15 %ymm16 %ymm17 %ymm18 %ymm19 %ymm20 %ymm21 %ymm22 %ymm23 %ymm24 %ymm25 %ymm26 %ymm27 %ymm28 %ymm29 %ymm30 %ymm31 %zmm0 %zmm1 %zmm2 %zmm3 %zmm4 %zmm5 %zmm6 %zmm7 %zmm8 %zmm9 %zmm10 %zmm11 %zmm12 %zmm13 %zmm14 %zmm15 %zmm16 %zmm17 %zmm18 %zmm19 %zmm20 %zmm21 %zmm22 %zmm23 %zmm24 %zmm25 %zmm26 %zmm27 %zmm28 %zmm29 %zmm30 %zmm31 %r11b %r11d %r11w
+; CHECK: foo Clobbered Registers: %cs %df %ds %eflags %eip %eiz %es %fpsw %fs %gs %ip %rip %riz %ss %ssp %bnd0 %bnd1 %bnd2 %bnd3 %cr0 %cr1 %cr2 %cr3 %cr4 %cr5 %cr6 %cr7 %cr8 %cr9 %cr10 %cr11 %cr12 %cr13 %cr14 %cr15 %dr0 %dr1 %dr2 %dr3 %dr4 %dr5 %dr6 %dr7 %dr8 %dr9 %dr10 %dr11 %dr12 %dr13 %dr14 %dr15 %fp0 %fp1 %fp2 %fp3 %fp4 %fp5 %fp6 %fp7 %k0 %k1 %k2 %k3 %k4 %k5 %k6 %k7 %mm0 %mm1 %mm2 %mm3 %mm4 %mm5 %mm6 %mm7 %r11 %st0 %st1 %st2 %st3 %st4 %st5 %st6 %st7 %xmm16 %xmm17 %xmm18 %xmm19 %xmm20 %xmm21 %xmm22 %xmm23 %xmm24 %xmm25 %xmm26 %xmm27 %xmm28 %xmm29 %xmm30 %xmm31 %ymm0 %ymm1 %ymm2 %ymm3 %ymm4 %ymm5 %ymm6 %ymm7 %ymm8 %ymm9 %ymm10 %ymm11 %ymm12 %ymm13 %ymm14 %ymm15 %ymm16 %ymm17 %ymm18 %ymm19 %ymm20 %ymm21 %ymm22 %ymm23 %ymm24 %ymm25 %ymm26 %ymm27 %ymm28 %ymm29 %ymm30 %ymm31 %zmm0 %zmm1 %zmm2 %zmm3 %zmm4 %zmm5 %zmm6 %zmm7 %zmm8 %zmm9 %zmm10 %zmm11 %zmm12 %zmm13 %zmm14 %zmm15 %zmm16 %zmm17 %zmm18 %zmm19 %zmm20 %zmm21 %zmm22 %zmm23 %zmm24 %zmm25 %zmm26 %zmm27 %zmm28 %zmm29 %zmm30 %zmm31 %r11b %r11d %r11w
call void @bar1()
call void @bar2()
ret void
diff --git a/test/CodeGen/X86/mul-i1024.ll b/test/CodeGen/X86/mul-i1024.ll
index 9980042a4ccc8..16fb112efadb2 100644
--- a/test/CodeGen/X86/mul-i1024.ll
+++ b/test/CodeGen/X86/mul-i1024.ll
@@ -6,4687 +6,4637 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X32-LABEL: test_1024:
; X32: # %bb.0:
; X32-NEXT: pushl %ebp
-; X32-NEXT: movl %esp, %ebp
; X32-NEXT: pushl %ebx
; X32-NEXT: pushl %edi
; X32-NEXT: pushl %esi
-; X32-NEXT: subl $996, %esp # imm = 0x3E4
-; X32-NEXT: movl 12(%ebp), %eax
-; X32-NEXT: movl 32(%eax), %eax
-; X32-NEXT: movl %eax, -188(%ebp) # 4-byte Spill
-; X32-NEXT: xorl %ecx, %ecx
-; X32-NEXT: mull %ecx
+; X32-NEXT: subl $1000, %esp # imm = 0x3E8
+; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: movl 48(%eax), %ecx
+; X32-NEXT: movl %eax, %esi
+; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl 32(%edx), %eax
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: xorl %edi, %edi
+; X32-NEXT: mull %edi
+; X32-NEXT: movl %edx, %ebp
; X32-NEXT: movl %eax, %ebx
-; X32-NEXT: movl %edx, %edi
-; X32-NEXT: movl 8(%ebp), %esi
-; X32-NEXT: movl 48(%esi), %eax
-; X32-NEXT: movl %eax, -440(%ebp) # 4-byte Spill
-; X32-NEXT: mull %ecx
-; X32-NEXT: xorl %ecx, %ecx
-; X32-NEXT: movl %edx, -140(%ebp) # 4-byte Spill
-; X32-NEXT: movl %eax, -132(%ebp) # 4-byte Spill
-; X32-NEXT: addl %ebx, %eax
-; X32-NEXT: adcl %edi, %edx
-; X32-NEXT: movl %edx, -884(%ebp) # 4-byte Spill
+; X32-NEXT: movl %ecx, %eax
+; X32-NEXT: mull %edi
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %eax, %ecx
+; X32-NEXT: addl %ebx, %ecx
+; X32-NEXT: movl %edx, %eax
+; X32-NEXT: adcl %ebp, %eax
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: movl 32(%esi), %eax
-; X32-NEXT: movl %eax, -416(%ebp) # 4-byte Spill
-; X32-NEXT: mull %ecx
-; X32-NEXT: movl %edx, -400(%ebp) # 4-byte Spill
-; X32-NEXT: movl %eax, -324(%ebp) # 4-byte Spill
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: mull %edi
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: movl %eax, %ecx
; X32-NEXT: addl %ebx, %ecx
; X32-NEXT: movl %edx, %eax
-; X32-NEXT: adcl %edi, %eax
-; X32-NEXT: movl %edi, %ecx
-; X32-NEXT: movl %ecx, -204(%ebp) # 4-byte Spill
-; X32-NEXT: movl %eax, -892(%ebp) # 4-byte Spill
-; X32-NEXT: movl 12(%ebp), %eax
+; X32-NEXT: adcl %ebp, %eax
+; X32-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl 36(%eax), %eax
-; X32-NEXT: movl %eax, -148(%ebp) # 4-byte Spill
-; X32-NEXT: xorl %edx, %edx
-; X32-NEXT: mull %edx
-; X32-NEXT: movl %edx, -236(%ebp) # 4-byte Spill
-; X32-NEXT: movl %eax, %edi
-; X32-NEXT: movl %edi, -304(%ebp) # 4-byte Spill
-; X32-NEXT: addl %ecx, %edi
-; X32-NEXT: movl %edi, -80(%ebp) # 4-byte Spill
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: mull %edi
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: addl %ebp, %eax
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: movl %edx, %eax
; X32-NEXT: adcl $0, %eax
-; X32-NEXT: movl %eax, -220(%ebp) # 4-byte Spill
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: movl 36(%esi), %eax
-; X32-NEXT: movl %eax, -316(%ebp) # 4-byte Spill
-; X32-NEXT: xorl %ecx, %ecx
-; X32-NEXT: mull %ecx
-; X32-NEXT: movl %edx, %ecx
-; X32-NEXT: movl %ecx, -124(%ebp) # 4-byte Spill
-; X32-NEXT: movl %eax, -184(%ebp) # 4-byte Spill
-; X32-NEXT: movl %eax, %edx
-; X32-NEXT: movl -400(%ebp), %esi # 4-byte Reload
-; X32-NEXT: addl %esi, %edx
-; X32-NEXT: adcl $0, %ecx
-; X32-NEXT: movl %ecx, -64(%ebp) # 4-byte Spill
-; X32-NEXT: movl -324(%ebp), %ecx # 4-byte Reload
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: mull %edi
+; X32-NEXT: movl %edx, %esi
+; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %eax, %ebp
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; X32-NEXT: addl %edi, %ebp
+; X32-NEXT: adcl $0, %esi
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X32-NEXT: movl %ecx, %eax
-; X32-NEXT: movl %ebx, -100(%ebp) # 4-byte Spill
+; X32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: addl %ebx, %eax
-; X32-NEXT: movl %eax, -656(%ebp) # 4-byte Spill
-; X32-NEXT: leal (%ebx,%edi), %eax
-; X32-NEXT: movl %edx, %edi
-; X32-NEXT: leal (%ecx,%edi), %edx
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: leal (%ebx,%eax), %eax
+; X32-NEXT: leal (%ecx,%ebp), %edx
; X32-NEXT: adcl %eax, %edx
-; X32-NEXT: movl %edx, -700(%ebp) # 4-byte Spill
-; X32-NEXT: seto %al
-; X32-NEXT: lahf
-; X32-NEXT: movl %eax, %eax
-; X32-NEXT: movl %eax, -640(%ebp) # 4-byte Spill
-; X32-NEXT: movl %eax, -96(%ebp) # 4-byte Spill
-; X32-NEXT: addl %ecx, %edi
-; X32-NEXT: movl %edi, -112(%ebp) # 4-byte Spill
-; X32-NEXT: adcl %esi, -64(%ebp) # 4-byte Folded Spill
-; X32-NEXT: movl %esi, %ebx
-; X32-NEXT: setb -160(%ebp) # 1-byte Folded Spill
-; X32-NEXT: movl 12(%ebp), %eax
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
+; X32-NEXT: addl %ecx, %ebp
+; X32-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl %edi, %esi
+; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl (%eax), %eax
-; X32-NEXT: movl %eax, -168(%ebp) # 4-byte Spill
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: xorl %ecx, %ecx
; X32-NEXT: mull %ecx
; X32-NEXT: movl %eax, %esi
-; X32-NEXT: movl %edx, %edi
-; X32-NEXT: movl 8(%ebp), %ecx
-; X32-NEXT: movl 16(%ecx), %eax
-; X32-NEXT: movl %eax, -348(%ebp) # 4-byte Spill
-; X32-NEXT: xorl %edx, %edx
-; X32-NEXT: mull %edx
-; X32-NEXT: movl %edx, -320(%ebp) # 4-byte Spill
-; X32-NEXT: movl %eax, -180(%ebp) # 4-byte Spill
-; X32-NEXT: addl %esi, %eax
-; X32-NEXT: adcl %edi, %edx
-; X32-NEXT: movl %edx, -428(%ebp) # 4-byte Spill
-; X32-NEXT: movl (%ecx), %eax
-; X32-NEXT: movl %eax, -260(%ebp) # 4-byte Spill
+; X32-NEXT: movl %edx, %ebx
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ebp
+; X32-NEXT: movl 16(%ebp), %eax
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: mull %ecx
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %eax, %ecx
+; X32-NEXT: addl %esi, %ecx
+; X32-NEXT: adcl %ebx, %edx
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl (%ebp), %eax
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: xorl %ecx, %ecx
; X32-NEXT: mull %ecx
-; X32-NEXT: movl %edx, -264(%ebp) # 4-byte Spill
-; X32-NEXT: movl %eax, -136(%ebp) # 4-byte Spill
-; X32-NEXT: addl %esi, %eax
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %esi, %ebp
+; X32-NEXT: addl %ebp, %eax
; X32-NEXT: movl %edx, %eax
-; X32-NEXT: adcl %edi, %eax
-; X32-NEXT: movl %eax, -452(%ebp) # 4-byte Spill
-; X32-NEXT: movl -132(%ebp), %eax # 4-byte Reload
-; X32-NEXT: addl %esi, %eax
-; X32-NEXT: movl -140(%ebp), %eax # 4-byte Reload
-; X32-NEXT: adcl %edi, %eax
-; X32-NEXT: movl %eax, -764(%ebp) # 4-byte Spill
-; X32-NEXT: movl -324(%ebp), %eax # 4-byte Reload
-; X32-NEXT: addl %esi, %eax
-; X32-NEXT: movl %esi, %ecx
-; X32-NEXT: adcl %edi, %ebx
-; X32-NEXT: movl %ebx, -424(%ebp) # 4-byte Spill
-; X32-NEXT: movl %edi, %ebx
-; X32-NEXT: movl %ebx, -256(%ebp) # 4-byte Spill
-; X32-NEXT: movl -100(%ebp), %eax # 4-byte Reload
-; X32-NEXT: addl %eax, -80(%ebp) # 4-byte Folded Spill
-; X32-NEXT: movl -204(%ebp), %eax # 4-byte Reload
-; X32-NEXT: adcl %eax, -220(%ebp) # 4-byte Folded Spill
-; X32-NEXT: setb -388(%ebp) # 1-byte Folded Spill
-; X32-NEXT: movl 12(%ebp), %eax
-; X32-NEXT: movl 4(%eax), %eax
-; X32-NEXT: movl %eax, -92(%ebp) # 4-byte Spill
-; X32-NEXT: xorl %edx, %edx
-; X32-NEXT: mull %edx
-; X32-NEXT: movl %eax, %edi
-; X32-NEXT: addl %ebx, %edi
-; X32-NEXT: movl %edx, %esi
-; X32-NEXT: adcl $0, %esi
-; X32-NEXT: movl %ecx, -28(%ebp) # 4-byte Spill
-; X32-NEXT: addl %ecx, %edi
-; X32-NEXT: movl %edi, -16(%ebp) # 4-byte Spill
-; X32-NEXT: adcl %ebx, %esi
-; X32-NEXT: setb %bh
-; X32-NEXT: addl %eax, %esi
-; X32-NEXT: movl %esi, -76(%ebp) # 4-byte Spill
-; X32-NEXT: movzbl %bh, %eax
-; X32-NEXT: adcl %edx, %eax
-; X32-NEXT: movl %eax, %edi
-; X32-NEXT: movl %edi, -72(%ebp) # 4-byte Spill
-; X32-NEXT: movl 12(%ebp), %eax
-; X32-NEXT: movl 8(%eax), %eax
-; X32-NEXT: movl %eax, -108(%ebp) # 4-byte Spill
-; X32-NEXT: xorl %ebx, %ebx
-; X32-NEXT: mull %ebx
-; X32-NEXT: movl %eax, -104(%ebp) # 4-byte Spill
-; X32-NEXT: movl %edx, -156(%ebp) # 4-byte Spill
-; X32-NEXT: addl %eax, %ecx
-; X32-NEXT: movl -256(%ebp), %eax # 4-byte Reload
-; X32-NEXT: adcl %edx, %eax
-; X32-NEXT: addl %esi, %ecx
-; X32-NEXT: movl %ecx, -120(%ebp) # 4-byte Spill
-; X32-NEXT: adcl %edi, %eax
-; X32-NEXT: movl %eax, -60(%ebp) # 4-byte Spill
-; X32-NEXT: movl 8(%ebp), %eax
-; X32-NEXT: movl 52(%eax), %eax
-; X32-NEXT: movl %eax, -340(%ebp) # 4-byte Spill
-; X32-NEXT: mull %ebx
-; X32-NEXT: movl %eax, %edi
-; X32-NEXT: movl -140(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: addl %ecx, %edi
-; X32-NEXT: movl %edx, %esi
-; X32-NEXT: adcl $0, %esi
-; X32-NEXT: movl -132(%ebp), %ebx # 4-byte Reload
-; X32-NEXT: addl %ebx, %edi
-; X32-NEXT: movl %edi, -192(%ebp) # 4-byte Spill
-; X32-NEXT: adcl %ecx, %esi
-; X32-NEXT: movl %ecx, %edi
+; X32-NEXT: adcl %ebx, %eax
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: addl %ebp, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: adcl %ebx, %eax
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: addl %ebp, %eax
+; X32-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %edi, %eax
+; X32-NEXT: adcl %ebx, %eax
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
+; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X32-NEXT: movl 4(%esi), %eax
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: xorl %ecx, %ecx
+; X32-NEXT: mull %ecx
+; X32-NEXT: movl %eax, %ecx
+; X32-NEXT: addl %ebx, %ecx
+; X32-NEXT: movl %edx, %edi
+; X32-NEXT: adcl $0, %edi
+; X32-NEXT: addl %ebp, %ecx
+; X32-NEXT: movl %ebp, %esi
+; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl %ebx, %edi
+; X32-NEXT: movl %ebx, %ebp
+; X32-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: setb %cl
-; X32-NEXT: addl %eax, %esi
+; X32-NEXT: addl %eax, %edi
+; X32-NEXT: movl %edi, (%esp) # 4-byte Spill
; X32-NEXT: movzbl %cl, %eax
; X32-NEXT: adcl %edx, %eax
-; X32-NEXT: movl %eax, -216(%ebp) # 4-byte Spill
-; X32-NEXT: movl 8(%ebp), %eax
-; X32-NEXT: movl 56(%eax), %eax
-; X32-NEXT: movl %eax, -408(%ebp) # 4-byte Spill
+; X32-NEXT: movl %eax, %ebx
+; X32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: movl 8(%eax), %eax
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: xorl %ecx, %ecx
; X32-NEXT: mull %ecx
-; X32-NEXT: movl %eax, -392(%ebp) # 4-byte Spill
-; X32-NEXT: movl %edx, -412(%ebp) # 4-byte Spill
-; X32-NEXT: movl %ebx, %ecx
-; X32-NEXT: addl %eax, %ebx
-; X32-NEXT: adcl %edx, %edi
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: addl %eax, %esi
+; X32-NEXT: adcl %edx, %ebp
+; X32-NEXT: addl %edi, %esi
+; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl %ebx, %ebp
+; X32-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ebp
+; X32-NEXT: movl 52(%ebp), %eax
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: xorl %ecx, %ecx
+; X32-NEXT: mull %ecx
+; X32-NEXT: movl %eax, %ebx
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; X32-NEXT: addl %edi, %ebx
+; X32-NEXT: movl %edx, %ecx
+; X32-NEXT: adcl $0, %ecx
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; X32-NEXT: addl %esi, %ebx
-; X32-NEXT: movl %ebx, -272(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -216(%ebp), %edi # 4-byte Folded Reload
-; X32-NEXT: movl %edi, -24(%ebp) # 4-byte Spill
-; X32-NEXT: addl -28(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: movl %ecx, -68(%ebp) # 4-byte Spill
-; X32-NEXT: movl -192(%ebp), %eax # 4-byte Reload
-; X32-NEXT: adcl -16(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl %eax, -420(%ebp) # 4-byte Spill
-; X32-NEXT: movl %ebx, %eax
-; X32-NEXT: adcl -120(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl %eax, -616(%ebp) # 4-byte Spill
+; X32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl %edi, %ecx
+; X32-NEXT: setb %bl
+; X32-NEXT: addl %eax, %ecx
+; X32-NEXT: movzbl %bl, %ebx
+; X32-NEXT: adcl %edx, %ebx
+; X32-NEXT: movl 56(%ebp), %eax
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: xorl %edx, %edx
+; X32-NEXT: mull %edx
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %esi, %ebp
+; X32-NEXT: addl %eax, %ebp
+; X32-NEXT: adcl %edx, %edi
+; X32-NEXT: addl %ecx, %ebp
+; X32-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl %ebx, %edi
+; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %ebp, %eax
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: movl %edi, %eax
-; X32-NEXT: adcl -60(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl %eax, -612(%ebp) # 4-byte Spill
-; X32-NEXT: movl -64(%ebp), %esi # 4-byte Reload
-; X32-NEXT: addl -184(%ebp), %esi # 4-byte Folded Reload
-; X32-NEXT: movl %esi, -64(%ebp) # 4-byte Spill
-; X32-NEXT: movzbl -160(%ebp), %eax # 1-byte Folded Reload
-; X32-NEXT: adcl -124(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl %eax, -152(%ebp) # 4-byte Spill
-; X32-NEXT: movl 8(%ebp), %eax
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 1-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; X32-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl 40(%eax), %eax
-; X32-NEXT: movl %eax, -352(%ebp) # 4-byte Spill
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: xorl %ecx, %ecx
; X32-NEXT: mull %ecx
-; X32-NEXT: movl %eax, -364(%ebp) # 4-byte Spill
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: movl %edx, %ebx
-; X32-NEXT: movl %ebx, -396(%ebp) # 4-byte Spill
-; X32-NEXT: movl -324(%ebp), %edx # 4-byte Reload
+; X32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
; X32-NEXT: movl %edx, %edi
; X32-NEXT: addl %eax, %edi
-; X32-NEXT: movl -400(%ebp), %ecx # 4-byte Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X32-NEXT: adcl %ebx, %ecx
; X32-NEXT: addl %esi, %edi
-; X32-NEXT: movl %edi, -44(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -152(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: movl %ecx, -52(%ebp) # 4-byte Spill
-; X32-NEXT: addl -28(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: movl %edx, -32(%ebp) # 4-byte Spill
-; X32-NEXT: movl -112(%ebp), %eax # 4-byte Reload
-; X32-NEXT: adcl -16(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl %eax, -196(%ebp) # 4-byte Spill
-; X32-NEXT: seto %al
-; X32-NEXT: lahf
-; X32-NEXT: movl %eax, %eax
-; X32-NEXT: movl %eax, -456(%ebp) # 4-byte Spill
+; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl %ebp, %ecx
+; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
; X32-NEXT: movl %edi, %eax
-; X32-NEXT: adcl -120(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl %eax, -504(%ebp) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: movl %ecx, %eax
-; X32-NEXT: adcl -60(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl %eax, -508(%ebp) # 4-byte Spill
-; X32-NEXT: movl 12(%ebp), %ecx
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: movl 16(%ecx), %eax
-; X32-NEXT: movl %eax, -212(%ebp) # 4-byte Spill
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: xorl %ebx, %ebx
; X32-NEXT: mull %ebx
; X32-NEXT: movl %eax, %edi
-; X32-NEXT: movl %edx, %esi
-; X32-NEXT: movl %esi, -84(%ebp) # 4-byte Spill
+; X32-NEXT: movl %edx, %ebp
; X32-NEXT: movl 20(%ecx), %eax
-; X32-NEXT: movl %eax, -252(%ebp) # 4-byte Spill
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: mull %ebx
; X32-NEXT: movl %eax, %ebx
-; X32-NEXT: addl %esi, %ebx
+; X32-NEXT: addl %ebp, %ebx
; X32-NEXT: movl %edx, %ecx
; X32-NEXT: adcl $0, %ecx
; X32-NEXT: addl %edi, %ebx
-; X32-NEXT: movl %ebx, -164(%ebp) # 4-byte Spill
-; X32-NEXT: adcl %esi, %ecx
+; X32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl %ebp, %ecx
; X32-NEXT: setb %bl
; X32-NEXT: addl %eax, %ecx
; X32-NEXT: movzbl %bl, %esi
; X32-NEXT: adcl %edx, %esi
-; X32-NEXT: movl 12(%ebp), %eax
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl 24(%eax), %eax
-; X32-NEXT: movl %eax, -284(%ebp) # 4-byte Spill
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: xorl %edx, %edx
; X32-NEXT: mull %edx
-; X32-NEXT: movl %eax, -308(%ebp) # 4-byte Spill
-; X32-NEXT: movl %edx, -208(%ebp) # 4-byte Spill
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: movl %edi, %ebx
; X32-NEXT: addl %eax, %ebx
-; X32-NEXT: movl -84(%ebp), %eax # 4-byte Reload
+; X32-NEXT: movl %ebp, %eax
; X32-NEXT: adcl %edx, %eax
; X32-NEXT: addl %ecx, %ebx
-; X32-NEXT: movl %ebx, -40(%ebp) # 4-byte Spill
; X32-NEXT: adcl %esi, %eax
-; X32-NEXT: movl %eax, %edx
-; X32-NEXT: movl -324(%ebp), %esi # 4-byte Reload
-; X32-NEXT: movl %esi, %eax
-; X32-NEXT: movl %edi, -116(%ebp) # 4-byte Spill
+; X32-NEXT: movl %eax, %esi
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: movl %ecx, %eax
+; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: addl %edi, %eax
-; X32-NEXT: movl -400(%ebp), %eax # 4-byte Reload
-; X32-NEXT: movl -84(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: adcl %ecx, %eax
-; X32-NEXT: movl %eax, -768(%ebp) # 4-byte Spill
-; X32-NEXT: movl %esi, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl %ebp, %eax
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %ecx, %eax
; X32-NEXT: addl %edi, %eax
-; X32-NEXT: movl %eax, -296(%ebp) # 4-byte Spill
-; X32-NEXT: movl -112(%ebp), %eax # 4-byte Reload
-; X32-NEXT: movl -164(%ebp), %esi # 4-byte Reload
-; X32-NEXT: adcl %esi, %eax
-; X32-NEXT: movl %eax, -776(%ebp) # 4-byte Spill
-; X32-NEXT: movl -44(%ebp), %eax # 4-byte Reload
-; X32-NEXT: adcl %ebx, %eax
-; X32-NEXT: movl %eax, -772(%ebp) # 4-byte Spill
-; X32-NEXT: movl -52(%ebp), %eax # 4-byte Reload
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
; X32-NEXT: adcl %edx, %eax
-; X32-NEXT: movl %edx, %ebx
-; X32-NEXT: movl %ebx, -56(%ebp) # 4-byte Spill
-; X32-NEXT: movl %eax, -780(%ebp) # 4-byte Spill
-; X32-NEXT: movl -132(%ebp), %edx # 4-byte Reload
-; X32-NEXT: movl %edx, %eax
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl %ebx, %eax
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: adcl %esi, %eax
+; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: movl %ecx, %eax
; X32-NEXT: addl %edi, %eax
-; X32-NEXT: movl -140(%ebp), %eax # 4-byte Reload
-; X32-NEXT: adcl %ecx, %eax
-; X32-NEXT: movl %eax, -448(%ebp) # 4-byte Spill
-; X32-NEXT: movl %edx, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: adcl %ebp, %eax
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %ecx, %eax
; X32-NEXT: addl %edi, %eax
-; X32-NEXT: movl %eax, -332(%ebp) # 4-byte Spill
-; X32-NEXT: movl -192(%ebp), %eax # 4-byte Reload
-; X32-NEXT: adcl %esi, %eax
-; X32-NEXT: movl %eax, -648(%ebp) # 4-byte Spill
-; X32-NEXT: movl -272(%ebp), %eax # 4-byte Reload
-; X32-NEXT: adcl -40(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl %eax, -644(%ebp) # 4-byte Spill
-; X32-NEXT: movl -24(%ebp), %eax # 4-byte Reload
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: adcl %edx, %eax
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X32-NEXT: adcl %ebx, %eax
-; X32-NEXT: movl %eax, -572(%ebp) # 4-byte Spill
-; X32-NEXT: movl 8(%ebp), %eax
-; X32-NEXT: movl 20(%eax), %eax
-; X32-NEXT: movl %eax, -216(%ebp) # 4-byte Spill
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: adcl %esi, %eax
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[0-9]+}}(%esp), %edi
+; X32-NEXT: movl 20(%edi), %eax
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: xorl %ecx, %ecx
; X32-NEXT: mull %ecx
; X32-NEXT: movl %eax, %esi
-; X32-NEXT: movl -320(%ebp), %ebx # 4-byte Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
; X32-NEXT: addl %ebx, %esi
; X32-NEXT: movl %edx, %ecx
; X32-NEXT: adcl $0, %ecx
-; X32-NEXT: movl -180(%ebp), %edi # 4-byte Reload
-; X32-NEXT: addl %edi, %esi
-; X32-NEXT: movl %esi, -48(%ebp) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; X32-NEXT: addl %ebp, %esi
+; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: adcl %ebx, %ecx
; X32-NEXT: setb %bl
; X32-NEXT: addl %eax, %ecx
; X32-NEXT: movzbl %bl, %esi
; X32-NEXT: adcl %edx, %esi
-; X32-NEXT: movl 8(%ebp), %eax
-; X32-NEXT: movl 24(%eax), %eax
-; X32-NEXT: movl %eax, -288(%ebp) # 4-byte Spill
+; X32-NEXT: movl 24(%edi), %eax
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: xorl %edx, %edx
; X32-NEXT: mull %edx
-; X32-NEXT: movl %eax, -280(%ebp) # 4-byte Spill
-; X32-NEXT: movl %edx, -312(%ebp) # 4-byte Spill
-; X32-NEXT: movl %edi, %edx
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %ebp, %edi
; X32-NEXT: addl %eax, %edi
-; X32-NEXT: movl -320(%ebp), %ebx # 4-byte Reload
-; X32-NEXT: adcl -312(%ebp), %ebx # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X32-NEXT: adcl %edx, %ebx
; X32-NEXT: addl %ecx, %edi
-; X32-NEXT: movl %edi, -36(%ebp) # 4-byte Spill
+; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: adcl %esi, %ebx
-; X32-NEXT: movl %ebx, -20(%ebp) # 4-byte Spill
-; X32-NEXT: addl -28(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: movl %edx, -228(%ebp) # 4-byte Spill
-; X32-NEXT: movl -48(%ebp), %eax # 4-byte Reload
-; X32-NEXT: adcl -16(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl %eax, -596(%ebp) # 4-byte Spill
+; X32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; X32-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: movl %edi, %eax
-; X32-NEXT: adcl -120(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl %eax, -464(%ebp) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: movl %ebx, %eax
-; X32-NEXT: adcl -60(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl %eax, -536(%ebp) # 4-byte Spill
-; X32-NEXT: movl 8(%ebp), %eax
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: movl %ecx, %eax
; X32-NEXT: movl 4(%eax), %eax
-; X32-NEXT: movl %eax, -124(%ebp) # 4-byte Spill
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: xorl %ecx, %ecx
; X32-NEXT: mull %ecx
; X32-NEXT: movl %eax, %esi
-; X32-NEXT: movl -264(%ebp), %ecx # 4-byte Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X32-NEXT: addl %ecx, %esi
; X32-NEXT: movl %edx, %edi
; X32-NEXT: adcl $0, %edi
-; X32-NEXT: movl -136(%ebp), %ebx # 4-byte Reload
-; X32-NEXT: addl %ebx, %esi
-; X32-NEXT: movl %esi, -276(%ebp) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; X32-NEXT: addl %ebp, %esi
+; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: adcl %ecx, %edi
; X32-NEXT: setb %cl
; X32-NEXT: addl %eax, %edi
-; X32-NEXT: movl %edi, -584(%ebp) # 4-byte Spill
+; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: movzbl %cl, %eax
; X32-NEXT: adcl %edx, %eax
-; X32-NEXT: movl %eax, -432(%ebp) # 4-byte Spill
-; X32-NEXT: movl 8(%ebp), %eax
+; X32-NEXT: movl %eax, %ebx
+; X32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl 8(%eax), %eax
-; X32-NEXT: movl %eax, -184(%ebp) # 4-byte Spill
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: xorl %ecx, %ecx
; X32-NEXT: mull %ecx
; X32-NEXT: movl %eax, %ecx
-; X32-NEXT: movl %ecx, -160(%ebp) # 4-byte Spill
-; X32-NEXT: movl %edx, -268(%ebp) # 4-byte Spill
-; X32-NEXT: movl %ebx, %esi
+; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %ebp, %esi
; X32-NEXT: movl %esi, %eax
; X32-NEXT: addl %ecx, %eax
-; X32-NEXT: movl -264(%ebp), %ebx # 4-byte Reload
-; X32-NEXT: movl %ebx, %ecx
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; X32-NEXT: movl %ebp, %ecx
; X32-NEXT: adcl %edx, %ecx
; X32-NEXT: addl %edi, %eax
-; X32-NEXT: movl %eax, -240(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -432(%ebp), %ecx # 4-byte Folded Reload
+; X32-NEXT: adcl %ebx, %ecx
; X32-NEXT: movl %esi, %edx
-; X32-NEXT: addl -28(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: movl %edx, -344(%ebp) # 4-byte Spill
-; X32-NEXT: movl -276(%ebp), %edi # 4-byte Reload
-; X32-NEXT: movl %edi, %edx
-; X32-NEXT: adcl -16(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: movl %edx, -404(%ebp) # 4-byte Spill
-; X32-NEXT: pushl %eax
-; X32-NEXT: seto %al
-; X32-NEXT: lahf
-; X32-NEXT: movl %eax, %edx
-; X32-NEXT: popl %eax
-; X32-NEXT: movl %edx, -736(%ebp) # 4-byte Spill
+; X32-NEXT: movl %esi, %ebx
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: movl %esi, %edx
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
; X32-NEXT: movl %eax, %edx
-; X32-NEXT: adcl -120(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: movl %edx, -532(%ebp) # 4-byte Spill
+; X32-NEXT: movl %eax, %edi
+; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: movl %ecx, %eax
-; X32-NEXT: movl %ecx, -172(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -60(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl %eax, -592(%ebp) # 4-byte Spill
-; X32-NEXT: movl %esi, %edx
-; X32-NEXT: movl %edx, %eax
-; X32-NEXT: movl -116(%ebp), %esi # 4-byte Reload
-; X32-NEXT: addl %esi, %eax
+; X32-NEXT: movl %ecx, %edx
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: movl %ebx, %eax
-; X32-NEXT: movl -84(%ebp), %ebx # 4-byte Reload
-; X32-NEXT: adcl %ebx, %eax
-; X32-NEXT: movl %eax, -328(%ebp) # 4-byte Spill
-; X32-NEXT: movl %edx, %eax
-; X32-NEXT: addl %esi, %eax
-; X32-NEXT: movl %eax, -368(%ebp) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: addl %ecx, %eax
+; X32-NEXT: movl %ebp, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; X32-NEXT: adcl %ebp, %eax
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: addl %ecx, %ebx
+; X32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %esi, %eax
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: movl %edi, %eax
-; X32-NEXT: adcl -164(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl %eax, -620(%ebp) # 4-byte Spill
-; X32-NEXT: movl -240(%ebp), %eax # 4-byte Reload
-; X32-NEXT: movl -40(%ebp), %edi # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %edx, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
; X32-NEXT: adcl %edi, %eax
-; X32-NEXT: movl %eax, -788(%ebp) # 4-byte Spill
-; X32-NEXT: movl %ecx, %eax
-; X32-NEXT: adcl -56(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl %eax, -784(%ebp) # 4-byte Spill
-; X32-NEXT: movl -180(%ebp), %eax # 4-byte Reload
-; X32-NEXT: movl -100(%ebp), %edx # 4-byte Reload
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X32-NEXT: movl %ebx, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
; X32-NEXT: addl %edx, %eax
-; X32-NEXT: movl -320(%ebp), %esi # 4-byte Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; X32-NEXT: movl %esi, %eax
-; X32-NEXT: movl -204(%ebp), %ecx # 4-byte Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X32-NEXT: adcl %ecx, %eax
-; X32-NEXT: movl %eax, -804(%ebp) # 4-byte Spill
-; X32-NEXT: movl -136(%ebp), %eax # 4-byte Reload
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X32-NEXT: addl %edx, %eax
-; X32-NEXT: movl -264(%ebp), %eax # 4-byte Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X32-NEXT: adcl %ecx, %eax
-; X32-NEXT: movl %eax, -820(%ebp) # 4-byte Spill
-; X32-NEXT: movl -180(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: movl %ecx, %eax
-; X32-NEXT: movl -116(%ebp), %edx # 4-byte Reload
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %ebx, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
; X32-NEXT: addl %edx, %eax
-; X32-NEXT: adcl %ebx, %esi
-; X32-NEXT: movl %esi, -576(%ebp) # 4-byte Spill
-; X32-NEXT: addl %edx, %ecx
-; X32-NEXT: movl %ecx, -540(%ebp) # 4-byte Spill
-; X32-NEXT: movl -48(%ebp), %eax # 4-byte Reload
-; X32-NEXT: adcl -164(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl %eax, -800(%ebp) # 4-byte Spill
-; X32-NEXT: movl -36(%ebp), %eax # 4-byte Reload
+; X32-NEXT: adcl %ebp, %esi
+; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: addl %edx, %ebx
+; X32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X32-NEXT: adcl %edi, %eax
-; X32-NEXT: movl %eax, -796(%ebp) # 4-byte Spill
-; X32-NEXT: movl -20(%ebp), %eax # 4-byte Reload
-; X32-NEXT: adcl -56(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl %eax, -792(%ebp) # 4-byte Spill
-; X32-NEXT: movl -220(%ebp), %esi # 4-byte Reload
-; X32-NEXT: addl -304(%ebp), %esi # 4-byte Folded Reload
-; X32-NEXT: movl %esi, -220(%ebp) # 4-byte Spill
-; X32-NEXT: movzbl -388(%ebp), %eax # 1-byte Folded Reload
-; X32-NEXT: adcl -236(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl %eax, -376(%ebp) # 4-byte Spill
-; X32-NEXT: movl 12(%ebp), %eax
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; X32-NEXT: movl %eax, %ebx
+; X32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl 40(%eax), %eax
-; X32-NEXT: movl %eax, -236(%ebp) # 4-byte Spill
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: xorl %ecx, %ecx
; X32-NEXT: mull %ecx
-; X32-NEXT: movl %eax, -304(%ebp) # 4-byte Spill
-; X32-NEXT: movl %edx, -128(%ebp) # 4-byte Spill
-; X32-NEXT: movl -100(%ebp), %ebx # 4-byte Reload
-; X32-NEXT: movl %ebx, %edi
-; X32-NEXT: addl %eax, %edi
-; X32-NEXT: movl -204(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: adcl %edx, %ecx
-; X32-NEXT: addl %esi, %edi
-; X32-NEXT: adcl -376(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: movl %ecx, %edx
-; X32-NEXT: movl -180(%ebp), %eax # 4-byte Reload
-; X32-NEXT: addl %ebx, %eax
-; X32-NEXT: movl %eax, -468(%ebp) # 4-byte Spill
-; X32-NEXT: movl -48(%ebp), %eax # 4-byte Reload
-; X32-NEXT: movl -80(%ebp), %ecx # 4-byte Reload
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %edx, %ecx
+; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: movl %esi, %edx
+; X32-NEXT: addl %eax, %edx
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; X32-NEXT: adcl %ecx, %ebp
+; X32-NEXT: addl %edi, %edx
+; X32-NEXT: adcl %ebx, %ebp
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: addl %esi, %eax
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X32-NEXT: adcl %ecx, %eax
-; X32-NEXT: movl %eax, -816(%ebp) # 4-byte Spill
-; X32-NEXT: movl -36(%ebp), %eax # 4-byte Reload
-; X32-NEXT: adcl %edi, %eax
-; X32-NEXT: movl %edi, -372(%ebp) # 4-byte Spill
-; X32-NEXT: movl %eax, -812(%ebp) # 4-byte Spill
-; X32-NEXT: movl -20(%ebp), %eax # 4-byte Reload
-; X32-NEXT: movl %edx, -292(%ebp) # 4-byte Spill
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X32-NEXT: adcl %edx, %eax
-; X32-NEXT: movl %eax, -808(%ebp) # 4-byte Spill
-; X32-NEXT: movl -136(%ebp), %eax # 4-byte Reload
-; X32-NEXT: addl %ebx, %eax
-; X32-NEXT: movl %eax, -512(%ebp) # 4-byte Spill
-; X32-NEXT: movl -276(%ebp), %eax # 4-byte Reload
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: adcl %ebp, %eax
+; X32-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: addl %esi, %eax
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X32-NEXT: adcl %ecx, %eax
-; X32-NEXT: movl %eax, -676(%ebp) # 4-byte Spill
-; X32-NEXT: seto %al
-; X32-NEXT: lahf
-; X32-NEXT: movl %eax, %eax
-; X32-NEXT: movl %eax, -740(%ebp) # 4-byte Spill
-; X32-NEXT: movl -240(%ebp), %eax # 4-byte Reload
-; X32-NEXT: adcl %edi, %eax
-; X32-NEXT: movl %eax, -624(%ebp) # 4-byte Spill
-; X32-NEXT: movl -172(%ebp), %eax # 4-byte Reload
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X32-NEXT: adcl %edx, %eax
-; X32-NEXT: movl %eax, -628(%ebp) # 4-byte Spill
-; X32-NEXT: movl 12(%ebp), %esi
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: adcl %ebp, %eax
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
; X32-NEXT: movl 48(%esi), %eax
-; X32-NEXT: movl %eax, -300(%ebp) # 4-byte Spill
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: xorl %ecx, %ecx
; X32-NEXT: mull %ecx
-; X32-NEXT: movl %eax, %ebx
-; X32-NEXT: movl %ebx, -336(%ebp) # 4-byte Spill
+; X32-NEXT: movl %eax, %ebp
; X32-NEXT: movl %edx, %edi
; X32-NEXT: movl 52(%esi), %eax
-; X32-NEXT: movl %eax, -144(%ebp) # 4-byte Spill
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: mull %ecx
; X32-NEXT: movl %eax, %esi
; X32-NEXT: addl %edi, %esi
; X32-NEXT: movl %edx, %ecx
; X32-NEXT: adcl $0, %ecx
-; X32-NEXT: addl %ebx, %esi
-; X32-NEXT: movl %esi, -200(%ebp) # 4-byte Spill
+; X32-NEXT: addl %ebp, %esi
+; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: adcl %edi, %ecx
+; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: setb %bl
; X32-NEXT: addl %eax, %ecx
; X32-NEXT: movzbl %bl, %esi
; X32-NEXT: adcl %edx, %esi
-; X32-NEXT: movl 12(%ebp), %eax
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl 56(%eax), %eax
-; X32-NEXT: movl %eax, -244(%ebp) # 4-byte Spill
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: xorl %edx, %edx
; X32-NEXT: mull %edx
-; X32-NEXT: movl %eax, -224(%ebp) # 4-byte Spill
-; X32-NEXT: movl %edx, -360(%ebp) # 4-byte Spill
-; X32-NEXT: movl -336(%ebp), %ebx # 4-byte Reload
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %ebp, %ebx
; X32-NEXT: addl %eax, %ebx
-; X32-NEXT: movl %edi, %edx
-; X32-NEXT: movl %edx, -176(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -360(%ebp), %edi # 4-byte Folded Reload
+; X32-NEXT: adcl %edx, %edi
; X32-NEXT: addl %ecx, %ebx
-; X32-NEXT: movl %ebx, -472(%ebp) # 4-byte Spill
+; X32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: adcl %esi, %edi
-; X32-NEXT: movl %edi, -436(%ebp) # 4-byte Spill
-; X32-NEXT: movl -136(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: movl %ecx, %eax
-; X32-NEXT: movl -336(%ebp), %esi # 4-byte Reload
-; X32-NEXT: addl %esi, %eax
-; X32-NEXT: movl -264(%ebp), %eax # 4-byte Reload
-; X32-NEXT: adcl %edx, %eax
-; X32-NEXT: movl %eax, -824(%ebp) # 4-byte Spill
-; X32-NEXT: movl %ecx, %eax
-; X32-NEXT: addl %esi, %eax
-; X32-NEXT: movl %eax, -588(%ebp) # 4-byte Spill
-; X32-NEXT: movl -276(%ebp), %eax # 4-byte Reload
-; X32-NEXT: adcl -200(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl %eax, -632(%ebp) # 4-byte Spill
-; X32-NEXT: movl -240(%ebp), %eax # 4-byte Reload
+; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; X32-NEXT: movl %edx, %eax
+; X32-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: addl %ebp, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %edx, %eax
+; X32-NEXT: addl %ebp, %eax
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X32-NEXT: adcl %ebx, %eax
-; X32-NEXT: movl %eax, -828(%ebp) # 4-byte Spill
-; X32-NEXT: movl -172(%ebp), %eax # 4-byte Reload
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X32-NEXT: adcl %edi, %eax
-; X32-NEXT: movl %eax, -636(%ebp) # 4-byte Spill
-; X32-NEXT: movl 8(%ebp), %eax
-; X32-NEXT: movl 64(%eax), %eax
-; X32-NEXT: movl %eax, -476(%ebp) # 4-byte Spill
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[0-9]+}}(%esp), %edi
+; X32-NEXT: movl 64(%edi), %eax
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: xorl %ecx, %ecx
; X32-NEXT: mull %ecx
-; X32-NEXT: movl %edx, %esi
-; X32-NEXT: movl %esi, -384(%ebp) # 4-byte Spill
-; X32-NEXT: movl -116(%ebp), %edi # 4-byte Reload
-; X32-NEXT: movl %edi, %ecx
-; X32-NEXT: movl %eax, %edx
-; X32-NEXT: movl %edx, -480(%ebp) # 4-byte Spill
-; X32-NEXT: addl %edx, %ecx
-; X32-NEXT: movl -84(%ebp), %ebx # 4-byte Reload
-; X32-NEXT: movl %ebx, %eax
-; X32-NEXT: adcl %esi, %eax
-; X32-NEXT: movl %eax, -920(%ebp) # 4-byte Spill
-; X32-NEXT: movl -28(%ebp), %esi # 4-byte Reload
-; X32-NEXT: movl %esi, %eax
-; X32-NEXT: addl %edx, %eax
-; X32-NEXT: movl -256(%ebp), %ecx # 4-byte Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: movl %esi, %ecx
+; X32-NEXT: movl %eax, %ebx
+; X32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: addl %ebx, %ecx
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X32-NEXT: movl %ecx, %eax
-; X32-NEXT: adcl -384(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl %eax, -932(%ebp) # 4-byte Spill
-; X32-NEXT: movl 8(%ebp), %eax
-; X32-NEXT: movl 80(%eax), %eax
-; X32-NEXT: movl %eax, -548(%ebp) # 4-byte Spill
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl %edx, %eax
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; X32-NEXT: movl %ebp, %eax
+; X32-NEXT: addl %ebx, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X32-NEXT: movl %ebx, %eax
+; X32-NEXT: adcl %edx, %eax
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl 80(%edi), %eax
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: xorl %edx, %edx
; X32-NEXT: mull %edx
-; X32-NEXT: movl %eax, -380(%ebp) # 4-byte Spill
-; X32-NEXT: movl %esi, %eax
-; X32-NEXT: movl -380(%ebp), %esi # 4-byte Reload
-; X32-NEXT: addl %esi, %eax
-; X32-NEXT: movl %edx, -356(%ebp) # 4-byte Spill
-; X32-NEXT: adcl %edx, %ecx
-; X32-NEXT: movl %ecx, -948(%ebp) # 4-byte Spill
-; X32-NEXT: addl %esi, %edi
+; X32-NEXT: movl %ebp, %edi
+; X32-NEXT: movl %eax, %ebp
+; X32-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: addl %ebp, %edi
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: adcl %edx, %ebx
-; X32-NEXT: movl %ebx, -960(%ebp) # 4-byte Spill
-; X32-NEXT: movl 12(%ebp), %ecx
+; X32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: addl %ebp, %esi
+; X32-NEXT: adcl %edx, %ecx
+; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: movl 80(%ecx), %eax
-; X32-NEXT: movl %eax, -552(%ebp) # 4-byte Spill
-; X32-NEXT: xorl %ebx, %ebx
-; X32-NEXT: mull %ebx
-; X32-NEXT: movl %edx, -528(%ebp) # 4-byte Spill
-; X32-NEXT: movl %eax, -524(%ebp) # 4-byte Spill
-; X32-NEXT: movl -136(%ebp), %esi # 4-byte Reload
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: xorl %edi, %edi
+; X32-NEXT: mull %edi
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; X32-NEXT: addl %esi, %eax
; X32-NEXT: movl %edx, %eax
-; X32-NEXT: movl -264(%ebp), %edi # 4-byte Reload
-; X32-NEXT: adcl %edi, %eax
-; X32-NEXT: movl %eax, -976(%ebp) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X32-NEXT: adcl %ebx, %eax
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: movl 64(%ecx), %eax
-; X32-NEXT: movl %eax, -520(%ebp) # 4-byte Spill
-; X32-NEXT: mull %ebx
-; X32-NEXT: movl %eax, -500(%ebp) # 4-byte Spill
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: mull %edi
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: movl %eax, %ecx
; X32-NEXT: addl %esi, %ecx
; X32-NEXT: movl %edx, %esi
-; X32-NEXT: movl %esi, -496(%ebp) # 4-byte Spill
+; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: movl %esi, %ecx
-; X32-NEXT: adcl %edi, %ecx
-; X32-NEXT: movl %ecx, -992(%ebp) # 4-byte Spill
+; X32-NEXT: adcl %ebx, %ecx
+; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: movl %eax, %ecx
-; X32-NEXT: movl -180(%ebp), %edx # 4-byte Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
; X32-NEXT: addl %edx, %ecx
; X32-NEXT: movl %esi, %eax
-; X32-NEXT: movl -320(%ebp), %ecx # 4-byte Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X32-NEXT: adcl %ecx, %eax
-; X32-NEXT: movl %eax, -1008(%ebp) # 4-byte Spill
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: movl %edx, %eax
-; X32-NEXT: movl -336(%ebp), %edi # 4-byte Reload
-; X32-NEXT: addl %edi, %eax
-; X32-NEXT: adcl -176(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: movl %ecx, -832(%ebp) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: addl %esi, %eax
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: movl %edx, %eax
-; X32-NEXT: addl %edi, %eax
-; X32-NEXT: movl %eax, -672(%ebp) # 4-byte Spill
-; X32-NEXT: movl -48(%ebp), %eax # 4-byte Reload
-; X32-NEXT: adcl -200(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl %eax, -836(%ebp) # 4-byte Spill
-; X32-NEXT: movl -36(%ebp), %eax # 4-byte Reload
-; X32-NEXT: adcl -472(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl %eax, -840(%ebp) # 4-byte Spill
-; X32-NEXT: movl -20(%ebp), %eax # 4-byte Reload
-; X32-NEXT: adcl -436(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl %eax, -844(%ebp) # 4-byte Spill
-; X32-NEXT: movl -132(%ebp), %eax # 4-byte Reload
-; X32-NEXT: addl -100(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl %eax, -680(%ebp) # 4-byte Spill
-; X32-NEXT: movl -192(%ebp), %eax # 4-byte Reload
-; X32-NEXT: adcl -80(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl %eax, -856(%ebp) # 4-byte Spill
-; X32-NEXT: movl -272(%ebp), %eax # 4-byte Reload
-; X32-NEXT: movl -372(%ebp), %edx # 4-byte Reload
-; X32-NEXT: adcl %edx, %eax
-; X32-NEXT: movl %eax, -852(%ebp) # 4-byte Spill
-; X32-NEXT: movl -24(%ebp), %eax # 4-byte Reload
-; X32-NEXT: movl -292(%ebp), %ecx # 4-byte Reload
+; X32-NEXT: addl %esi, %eax
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; X32-NEXT: adcl %ebp, %eax
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X32-NEXT: adcl %ecx, %eax
-; X32-NEXT: movl %eax, -848(%ebp) # 4-byte Spill
-; X32-NEXT: movl -44(%ebp), %ebx # 4-byte Reload
-; X32-NEXT: movl %ebx, %eax
-; X32-NEXT: movl -96(%ebp), %esi # 4-byte Reload
-; X32-NEXT: pushl %eax
-; X32-NEXT: movl %esi, %eax
-; X32-NEXT: addb $127, %al
-; X32-NEXT: sahf
-; X32-NEXT: popl %eax
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
; X32-NEXT: adcl %edx, %eax
-; X32-NEXT: movl %eax, -860(%ebp) # 4-byte Spill
-; X32-NEXT: movl -52(%ebp), %esi # 4-byte Reload
-; X32-NEXT: movl %esi, %eax
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movb {{[-0-9]+}}(%e{{[sb]}}p), %al # 1-byte Reload
+; X32-NEXT: addb $255, %al
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X32-NEXT: movl %ebx, %eax
; X32-NEXT: adcl %ecx, %eax
-; X32-NEXT: movl %eax, -864(%ebp) # 4-byte Spill
-; X32-NEXT: movl -324(%ebp), %ecx # 4-byte Reload
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; X32-NEXT: movl %edi, %eax
+; X32-NEXT: adcl %edx, %eax
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X32-NEXT: movl %ecx, %eax
-; X32-NEXT: addl %edi, %eax
-; X32-NEXT: movl -400(%ebp), %eax # 4-byte Reload
-; X32-NEXT: movl -176(%ebp), %edx # 4-byte Reload
+; X32-NEXT: addl %esi, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
; X32-NEXT: adcl %edx, %eax
-; X32-NEXT: movl %eax, -868(%ebp) # 4-byte Spill
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: movl %ecx, %eax
-; X32-NEXT: addl %edi, %eax
-; X32-NEXT: movl %eax, -684(%ebp) # 4-byte Spill
-; X32-NEXT: movl -112(%ebp), %eax # 4-byte Reload
-; X32-NEXT: adcl -200(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl %eax, -876(%ebp) # 4-byte Spill
+; X32-NEXT: addl %esi, %eax
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: adcl %ebp, %eax
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: movl %ebx, %eax
-; X32-NEXT: movl -472(%ebp), %ebx # 4-byte Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
; X32-NEXT: adcl %ebx, %eax
-; X32-NEXT: movl %eax, -872(%ebp) # 4-byte Spill
-; X32-NEXT: movl %esi, %eax
-; X32-NEXT: movl -436(%ebp), %esi # 4-byte Reload
-; X32-NEXT: adcl %esi, %eax
-; X32-NEXT: movl %eax, -880(%ebp) # 4-byte Spill
-; X32-NEXT: movl -132(%ebp), %ecx # 4-byte Reload
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %edi, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; X32-NEXT: adcl %edi, %eax
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X32-NEXT: movl %ecx, %eax
-; X32-NEXT: addl %edi, %eax
-; X32-NEXT: movl -140(%ebp), %eax # 4-byte Reload
+; X32-NEXT: addl %esi, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X32-NEXT: adcl %edx, %eax
-; X32-NEXT: movl %eax, -888(%ebp) # 4-byte Spill
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: movl %ecx, %eax
-; X32-NEXT: addl %edi, %eax
-; X32-NEXT: movl %eax, -688(%ebp) # 4-byte Spill
-; X32-NEXT: movl -192(%ebp), %eax # 4-byte Reload
-; X32-NEXT: adcl -200(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl %eax, -900(%ebp) # 4-byte Spill
-; X32-NEXT: movl -272(%ebp), %eax # 4-byte Reload
+; X32-NEXT: addl %esi, %eax
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: adcl %ebp, %eax
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X32-NEXT: adcl %ebx, %eax
-; X32-NEXT: movl %eax, -896(%ebp) # 4-byte Spill
-; X32-NEXT: movl -24(%ebp), %eax # 4-byte Reload
-; X32-NEXT: adcl %esi, %eax
-; X32-NEXT: movl %eax, -904(%ebp) # 4-byte Spill
-; X32-NEXT: movl 8(%ebp), %eax
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: adcl %edi, %eax
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl 68(%eax), %eax
-; X32-NEXT: movl %eax, -248(%ebp) # 4-byte Spill
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: xorl %ecx, %ecx
; X32-NEXT: mull %ecx
-; X32-NEXT: movl %eax, %esi
-; X32-NEXT: movl -384(%ebp), %edi # 4-byte Reload
-; X32-NEXT: addl %edi, %esi
+; X32-NEXT: movl %eax, %edi
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; X32-NEXT: addl %ebp, %edi
; X32-NEXT: movl %edx, %ecx
; X32-NEXT: adcl $0, %ecx
-; X32-NEXT: movl -480(%ebp), %ebx # 4-byte Reload
-; X32-NEXT: addl %ebx, %esi
-; X32-NEXT: movl %esi, -652(%ebp) # 4-byte Spill
-; X32-NEXT: adcl %edi, %ecx
-; X32-NEXT: setb -96(%ebp) # 1-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X32-NEXT: addl %ebx, %edi
+; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl %ebp, %ecx
+; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
; X32-NEXT: addl %eax, %ecx
-; X32-NEXT: movzbl -96(%ebp), %edi # 1-byte Folded Reload
-; X32-NEXT: adcl %edx, %edi
-; X32-NEXT: movl 8(%ebp), %eax
+; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 1-byte Folded Reload
+; X32-NEXT: adcl %edx, %esi
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl 72(%eax), %eax
-; X32-NEXT: movl %eax, -516(%ebp) # 4-byte Spill
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: xorl %edx, %edx
; X32-NEXT: mull %edx
-; X32-NEXT: movl %eax, %esi
-; X32-NEXT: movl %esi, -484(%ebp) # 4-byte Spill
-; X32-NEXT: movl %edx, -488(%ebp) # 4-byte Spill
-; X32-NEXT: movl %ebx, %eax
-; X32-NEXT: addl %esi, %eax
-; X32-NEXT: movl -384(%ebp), %ebx # 4-byte Reload
-; X32-NEXT: adcl %edx, %ebx
-; X32-NEXT: addl %ecx, %eax
-; X32-NEXT: adcl %edi, %ebx
-; X32-NEXT: movl -116(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: movl -480(%ebp), %edx # 4-byte Reload
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %edx, %edi
+; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %ebx, %edx
+; X32-NEXT: addl %eax, %ebx
+; X32-NEXT: adcl %edi, %ebp
+; X32-NEXT: addl %ecx, %ebx
+; X32-NEXT: adcl %esi, %ebp
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: addl %edx, %eax
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: adcl %eax, %ecx
+; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: adcl %ebx, %ecx
+; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: adcl %ebp, %ecx
+; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X32-NEXT: addl %edx, %ecx
-; X32-NEXT: movl %ecx, -692(%ebp) # 4-byte Spill
-; X32-NEXT: movl -164(%ebp), %esi # 4-byte Reload
-; X32-NEXT: movl -652(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: adcl %ecx, %esi
-; X32-NEXT: movl %esi, -908(%ebp) # 4-byte Spill
-; X32-NEXT: movl -40(%ebp), %esi # 4-byte Reload
-; X32-NEXT: adcl %eax, %esi
-; X32-NEXT: movl %esi, -916(%ebp) # 4-byte Spill
-; X32-NEXT: movl -56(%ebp), %esi # 4-byte Reload
-; X32-NEXT: adcl %ebx, %esi
-; X32-NEXT: movl %esi, -912(%ebp) # 4-byte Spill
-; X32-NEXT: movl -28(%ebp), %esi # 4-byte Reload
-; X32-NEXT: addl %edx, %esi
-; X32-NEXT: movl %esi, -696(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -16(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: movl %ecx, -652(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -120(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl %eax, -924(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -60(%ebp), %ebx # 4-byte Folded Reload
-; X32-NEXT: movl %ebx, -928(%ebp) # 4-byte Spill
-; X32-NEXT: movl 8(%ebp), %ecx
+; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
+; X32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; X32-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: movl %ecx, %eax
; X32-NEXT: movl 84(%eax), %eax
-; X32-NEXT: movl %eax, -544(%ebp) # 4-byte Spill
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: xorl %ecx, %ecx
; X32-NEXT: mull %ecx
; X32-NEXT: movl %eax, %esi
-; X32-NEXT: movl -356(%ebp), %ebx # 4-byte Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
; X32-NEXT: addl %ebx, %esi
; X32-NEXT: movl %edx, %ecx
; X32-NEXT: adcl $0, %ecx
-; X32-NEXT: movl -380(%ebp), %edi # 4-byte Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
; X32-NEXT: addl %edi, %esi
-; X32-NEXT: movl %esi, -660(%ebp) # 4-byte Spill
+; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: adcl %ebx, %ecx
-; X32-NEXT: setb %bl
+; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
; X32-NEXT: addl %eax, %ecx
-; X32-NEXT: movzbl %bl, %esi
-; X32-NEXT: adcl %edx, %esi
-; X32-NEXT: movl 8(%ebp), %eax
+; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 1-byte Folded Reload
+; X32-NEXT: adcl %edx, %ebp
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl 88(%eax), %eax
-; X32-NEXT: movl %eax, -580(%ebp) # 4-byte Spill
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: xorl %edx, %edx
; X32-NEXT: mull %edx
-; X32-NEXT: movl %eax, -600(%ebp) # 4-byte Spill
-; X32-NEXT: movl %edx, -604(%ebp) # 4-byte Spill
-; X32-NEXT: movl %edi, %ebx
-; X32-NEXT: addl %eax, %edi
-; X32-NEXT: movl -356(%ebp), %eax # 4-byte Reload
-; X32-NEXT: adcl %edx, %eax
-; X32-NEXT: addl %ecx, %edi
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %edi, %esi
+; X32-NEXT: addl %eax, %esi
+; X32-NEXT: adcl %edx, %ebx
+; X32-NEXT: addl %ecx, %esi
+; X32-NEXT: adcl %ebp, %ebx
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: addl %edi, %eax
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; X32-NEXT: adcl %ebp, %eax
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X32-NEXT: adcl %esi, %eax
-; X32-NEXT: movl %eax, %esi
-; X32-NEXT: movl -28(%ebp), %eax # 4-byte Reload
-; X32-NEXT: addl %ebx, %eax
-; X32-NEXT: movl %eax, -704(%ebp) # 4-byte Spill
-; X32-NEXT: movl -16(%ebp), %edx # 4-byte Reload
-; X32-NEXT: movl -660(%ebp), %eax # 4-byte Reload
-; X32-NEXT: adcl %eax, %edx
-; X32-NEXT: movl %edx, -940(%ebp) # 4-byte Spill
-; X32-NEXT: movl -120(%ebp), %edx # 4-byte Reload
-; X32-NEXT: adcl %edi, %edx
-; X32-NEXT: movl %edx, -944(%ebp) # 4-byte Spill
-; X32-NEXT: movl %edi, %edx
-; X32-NEXT: movl -60(%ebp), %edi # 4-byte Reload
-; X32-NEXT: adcl %esi, %edi
-; X32-NEXT: movl %edi, -936(%ebp) # 4-byte Spill
-; X32-NEXT: movl -116(%ebp), %edi # 4-byte Reload
-; X32-NEXT: addl %ebx, %edi
-; X32-NEXT: movl %edi, -708(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -164(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl %eax, -660(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -40(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: movl %edx, -952(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -56(%ebp), %esi # 4-byte Folded Reload
-; X32-NEXT: movl %esi, -956(%ebp) # 4-byte Spill
-; X32-NEXT: movl 12(%ebp), %eax
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %esi, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; X32-NEXT: adcl %ebx, %edx
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: addl %edi, %esi
+; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; X32-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
+; X32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl 84(%eax), %eax
-; X32-NEXT: movl %eax, -460(%ebp) # 4-byte Spill
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: xorl %ecx, %ecx
; X32-NEXT: mull %ecx
; X32-NEXT: movl %eax, %edi
-; X32-NEXT: movl -528(%ebp), %esi # 4-byte Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; X32-NEXT: addl %esi, %edi
; X32-NEXT: movl %edx, %ecx
; X32-NEXT: adcl $0, %ecx
-; X32-NEXT: addl -524(%ebp), %edi # 4-byte Folded Reload
-; X32-NEXT: movl %edi, -668(%ebp) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; X32-NEXT: addl %ebp, %edi
+; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: adcl %esi, %ecx
; X32-NEXT: setb %bl
; X32-NEXT: addl %eax, %ecx
; X32-NEXT: movzbl %bl, %edi
; X32-NEXT: adcl %edx, %edi
-; X32-NEXT: movl 12(%ebp), %eax
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl 88(%eax), %eax
-; X32-NEXT: movl %eax, -492(%ebp) # 4-byte Spill
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: xorl %edx, %edx
; X32-NEXT: mull %edx
-; X32-NEXT: movl %eax, %esi
-; X32-NEXT: movl %esi, -556(%ebp) # 4-byte Spill
-; X32-NEXT: movl %edx, -560(%ebp) # 4-byte Spill
-; X32-NEXT: movl -524(%ebp), %eax # 4-byte Reload
-; X32-NEXT: movl %eax, %ebx
-; X32-NEXT: addl %esi, %ebx
-; X32-NEXT: movl -528(%ebp), %esi # 4-byte Reload
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %ebp, %ebx
+; X32-NEXT: addl %eax, %ebx
; X32-NEXT: adcl %edx, %esi
; X32-NEXT: addl %ecx, %ebx
-; X32-NEXT: movl %ebx, -732(%ebp) # 4-byte Spill
-; X32-NEXT: adcl %edi, %esi
-; X32-NEXT: movl %esi, %edx
-; X32-NEXT: movl %edx, -728(%ebp) # 4-byte Spill
-; X32-NEXT: addl -136(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl %eax, -712(%ebp) # 4-byte Spill
-; X32-NEXT: movl -668(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: adcl -276(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: movl %ecx, -968(%ebp) # 4-byte Spill
; X32-NEXT: movl %ebx, %eax
-; X32-NEXT: adcl -240(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl %eax, -964(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -172(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: movl %edx, -972(%ebp) # 4-byte Spill
-; X32-NEXT: movl 12(%ebp), %eax
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl %edi, %esi
+; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %ebp, %ecx
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: movl %ecx, %eax
; X32-NEXT: movl 68(%eax), %eax
-; X32-NEXT: movl %eax, -444(%ebp) # 4-byte Spill
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: xorl %ecx, %ecx
; X32-NEXT: mull %ecx
-; X32-NEXT: xorl %ebx, %ebx
; X32-NEXT: movl %eax, %esi
-; X32-NEXT: movl -496(%ebp), %edi # 4-byte Reload
-; X32-NEXT: addl %edi, %esi
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X32-NEXT: addl %ebx, %esi
; X32-NEXT: movl %edx, %ecx
; X32-NEXT: adcl $0, %ecx
-; X32-NEXT: addl -500(%ebp), %esi # 4-byte Folded Reload
-; X32-NEXT: movl %esi, -664(%ebp) # 4-byte Spill
-; X32-NEXT: adcl %edi, %ecx
-; X32-NEXT: setb -96(%ebp) # 1-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; X32-NEXT: addl %ebp, %esi
+; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl %ebx, %ecx
+; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
; X32-NEXT: addl %eax, %ecx
-; X32-NEXT: movzbl -96(%ebp), %esi # 1-byte Folded Reload
+; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 1-byte Folded Reload
; X32-NEXT: adcl %edx, %esi
-; X32-NEXT: movl 12(%ebp), %eax
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl 72(%eax), %eax
-; X32-NEXT: movl %eax, -388(%ebp) # 4-byte Spill
-; X32-NEXT: mull %ebx
-; X32-NEXT: movl %eax, -564(%ebp) # 4-byte Spill
-; X32-NEXT: movl %edx, %ebx
-; X32-NEXT: movl %ebx, -568(%ebp) # 4-byte Spill
-; X32-NEXT: movl -500(%ebp), %edx # 4-byte Reload
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: xorl %edx, %edx
+; X32-NEXT: mull %edx
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: movl %edx, %edi
-; X32-NEXT: addl %eax, %edi
-; X32-NEXT: movl -496(%ebp), %eax # 4-byte Reload
-; X32-NEXT: adcl %ebx, %eax
-; X32-NEXT: addl %ecx, %edi
-; X32-NEXT: adcl %esi, %eax
-; X32-NEXT: movl %eax, %ecx
+; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %ebp, %edx
+; X32-NEXT: addl %eax, %ebp
+; X32-NEXT: adcl %edi, %ebx
+; X32-NEXT: addl %ecx, %ebp
+; X32-NEXT: adcl %esi, %ebx
; X32-NEXT: movl %edx, %eax
-; X32-NEXT: addl -136(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl %eax, -716(%ebp) # 4-byte Spill
-; X32-NEXT: movl -664(%ebp), %eax # 4-byte Reload
-; X32-NEXT: movl %eax, %esi
-; X32-NEXT: adcl -276(%ebp), %esi # 4-byte Folded Reload
-; X32-NEXT: movl %esi, -988(%ebp) # 4-byte Spill
-; X32-NEXT: movl %edi, %esi
-; X32-NEXT: adcl -240(%ebp), %esi # 4-byte Folded Reload
-; X32-NEXT: movl %esi, -984(%ebp) # 4-byte Spill
-; X32-NEXT: movl %ecx, %esi
-; X32-NEXT: adcl -172(%ebp), %esi # 4-byte Folded Reload
-; X32-NEXT: movl %esi, -980(%ebp) # 4-byte Spill
-; X32-NEXT: movl %edx, %esi
-; X32-NEXT: movl -180(%ebp), %edx # 4-byte Reload
-; X32-NEXT: addl %edx, %esi
-; X32-NEXT: movl %esi, -720(%ebp) # 4-byte Spill
-; X32-NEXT: movl -48(%ebp), %esi # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: movl %eax, %ecx
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %ebp, %ecx
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %ebx, %ecx
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %edx, %ecx
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; X32-NEXT: addl %edx, %ecx
+; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; X32-NEXT: adcl %esi, %eax
-; X32-NEXT: movl %eax, -664(%ebp) # 4-byte Spill
-; X32-NEXT: movl -36(%ebp), %ebx # 4-byte Reload
-; X32-NEXT: adcl %ebx, %edi
-; X32-NEXT: movl %edi, -996(%ebp) # 4-byte Spill
-; X32-NEXT: movl -20(%ebp), %edi # 4-byte Reload
-; X32-NEXT: adcl %edi, %ecx
-; X32-NEXT: movl %ecx, -1000(%ebp) # 4-byte Spill
-; X32-NEXT: movl -524(%ebp), %ecx # 4-byte Reload
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; X32-NEXT: adcl %edi, %ebp
+; X32-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; X32-NEXT: adcl %ebp, %ebx
+; X32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X32-NEXT: movl %ecx, %eax
; X32-NEXT: addl %edx, %eax
-; X32-NEXT: movl -528(%ebp), %eax # 4-byte Reload
-; X32-NEXT: adcl -320(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl %eax, -1004(%ebp) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: movl %ecx, %eax
; X32-NEXT: addl %edx, %eax
-; X32-NEXT: movl %eax, -724(%ebp) # 4-byte Spill
-; X32-NEXT: adcl %esi, -668(%ebp) # 4-byte Folded Spill
-; X32-NEXT: adcl %ebx, -732(%ebp) # 4-byte Folded Spill
-; X32-NEXT: adcl %edi, -728(%ebp) # 4-byte Folded Spill
-; X32-NEXT: movl 12(%ebp), %eax
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: adcl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: adcl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl 12(%eax), %eax
-; X32-NEXT: movl %eax, -96(%ebp) # 4-byte Spill
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: xorl %ecx, %ecx
; X32-NEXT: mull %ecx
; X32-NEXT: movl %eax, %edi
-; X32-NEXT: movl -156(%ebp), %esi # 4-byte Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; X32-NEXT: addl %esi, %edi
; X32-NEXT: movl %edx, %ebx
; X32-NEXT: adcl $0, %ebx
-; X32-NEXT: movl -104(%ebp), %ecx # 4-byte Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X32-NEXT: addl %ecx, %edi
-; X32-NEXT: movl %edi, -232(%ebp) # 4-byte Spill
+; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: adcl %esi, %ebx
-; X32-NEXT: setb -88(%ebp) # 1-byte Folded Spill
+; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
; X32-NEXT: addl %eax, %ebx
-; X32-NEXT: movzbl -88(%ebp), %eax # 1-byte Folded Reload
+; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
; X32-NEXT: adcl %edx, %eax
; X32-NEXT: movl %ecx, %edx
; X32-NEXT: addl %edx, %ebx
; X32-NEXT: adcl %esi, %eax
-; X32-NEXT: movl %eax, -88(%ebp) # 4-byte Spill
-; X32-NEXT: movl -28(%ebp), %edi # 4-byte Reload
-; X32-NEXT: movl -76(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: addl %edi, %ecx
-; X32-NEXT: movl -72(%ebp), %esi # 4-byte Reload
-; X32-NEXT: adcl -256(%ebp), %esi # 4-byte Folded Reload
-; X32-NEXT: addl %ecx, %edx
-; X32-NEXT: movl %edx, -72(%ebp) # 4-byte Spill
-; X32-NEXT: movl -232(%ebp), %edx # 4-byte Reload
-; X32-NEXT: adcl %esi, %edx
-; X32-NEXT: movl %esi, %eax
-; X32-NEXT: movl %edx, -76(%ebp) # 4-byte Spill
-; X32-NEXT: movl %ebx, %edx
-; X32-NEXT: adcl $0, %edx
-; X32-NEXT: movl -88(%ebp), %esi # 4-byte Reload
-; X32-NEXT: adcl $0, %esi
-; X32-NEXT: addl %edi, -72(%ebp) # 4-byte Folded Spill
-; X32-NEXT: movl -16(%ebp), %edi # 4-byte Reload
-; X32-NEXT: adcl %edi, -76(%ebp) # 4-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; X32-NEXT: movl (%esp), %edi # 4-byte Reload
+; X32-NEXT: addl %ebp, %edi
+; X32-NEXT: movl %edi, (%esp) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %edx, %esi
+; X32-NEXT: addl %edi, %esi
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; X32-NEXT: adcl %ecx, %edx
+; X32-NEXT: movl %ebx, %ecx
; X32-NEXT: adcl $0, %ecx
-; X32-NEXT: adcl $0, %eax
-; X32-NEXT: addl %edx, %ecx
-; X32-NEXT: adcl %esi, %eax
+; X32-NEXT: movl %eax, %edi
+; X32-NEXT: adcl $0, %edi
+; X32-NEXT: addl %ebp, %esi
+; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl (%esp), %esi # 4-byte Reload
+; X32-NEXT: adcl $0, %esi
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; X32-NEXT: adcl $0, %edx
+; X32-NEXT: addl %ecx, %esi
+; X32-NEXT: adcl %edi, %edx
+; X32-NEXT: movl %edx, %ecx
; X32-NEXT: setb %dl
-; X32-NEXT: addl -104(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: adcl -232(%ebp), %eax # 4-byte Folded Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; X32-NEXT: movl %esi, (%esp) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
; X32-NEXT: movzbl %dl, %edx
; X32-NEXT: adcl %ebx, %edx
-; X32-NEXT: movl %edx, -608(%ebp) # 4-byte Spill
-; X32-NEXT: adcl $0, -88(%ebp) # 4-byte Folded Spill
-; X32-NEXT: movl -28(%ebp), %ebx # 4-byte Reload
-; X32-NEXT: addl -116(%ebp), %ebx # 4-byte Folded Reload
-; X32-NEXT: movl -164(%ebp), %edx # 4-byte Reload
-; X32-NEXT: adcl %edi, %edx
-; X32-NEXT: movl -40(%ebp), %esi # 4-byte Reload
-; X32-NEXT: adcl -120(%ebp), %esi # 4-byte Folded Reload
-; X32-NEXT: movl -56(%ebp), %edi # 4-byte Reload
-; X32-NEXT: adcl -60(%ebp), %edi # 4-byte Folded Reload
-; X32-NEXT: addl %ecx, %ebx
-; X32-NEXT: movl %ebx, -232(%ebp) # 4-byte Spill
-; X32-NEXT: adcl %eax, %edx
-; X32-NEXT: movl %edx, -164(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -608(%ebp), %esi # 4-byte Folded Reload
-; X32-NEXT: movl %esi, -40(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -88(%ebp), %edi # 4-byte Folded Reload
-; X32-NEXT: movl %edi, -56(%ebp) # 4-byte Spill
-; X32-NEXT: movl 8(%ebp), %eax
+; X32-NEXT: movl %edx, %ebx
+; X32-NEXT: adcl $0, %eax
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X32-NEXT: addl (%esp), %ebp # 4-byte Folded Reload
+; X32-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl %ecx, %edx
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl %ebx, %esi
+; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl %eax, %edi
+; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl 44(%eax), %eax
-; X32-NEXT: movl %eax, -120(%ebp) # 4-byte Spill
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: xorl %ecx, %ecx
; X32-NEXT: mull %ecx
-; X32-NEXT: movl %edx, %edi
+; X32-NEXT: movl %edx, %ecx
; X32-NEXT: movl %eax, %ebx
-; X32-NEXT: movl %ebx, %ecx
-; X32-NEXT: movl -396(%ebp), %esi # 4-byte Reload
-; X32-NEXT: addl %esi, %ecx
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: addl %esi, %ebx
; X32-NEXT: adcl $0, %edx
-; X32-NEXT: movl -364(%ebp), %eax # 4-byte Reload
-; X32-NEXT: addl %eax, %ecx
-; X32-NEXT: movl %ecx, -60(%ebp) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; X32-NEXT: addl %edi, %ebx
+; X32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: adcl %esi, %edx
-; X32-NEXT: movl %esi, %ecx
-; X32-NEXT: setb -16(%ebp) # 1-byte Folded Spill
-; X32-NEXT: addl %ebx, %edx
-; X32-NEXT: movzbl -16(%ebp), %ebx # 1-byte Folded Reload
-; X32-NEXT: adcl %edi, %ebx
-; X32-NEXT: movl %eax, %esi
-; X32-NEXT: addl %esi, %edx
-; X32-NEXT: adcl %ecx, %ebx
-; X32-NEXT: movl -64(%ebp), %eax # 4-byte Reload
-; X32-NEXT: addl -324(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl -152(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: adcl -400(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: addl %eax, %esi
-; X32-NEXT: movl %esi, -64(%ebp) # 4-byte Spill
-; X32-NEXT: movl -60(%ebp), %esi # 4-byte Reload
-; X32-NEXT: adcl %ecx, %esi
-; X32-NEXT: movl %esi, -16(%ebp) # 4-byte Spill
+; X32-NEXT: setb %bl
+; X32-NEXT: addl %eax, %edx
+; X32-NEXT: movzbl %bl, %eax
+; X32-NEXT: adcl %ecx, %eax
+; X32-NEXT: movl %edi, %ecx
+; X32-NEXT: addl %ecx, %edx
+; X32-NEXT: adcl %esi, %eax
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: addl %ebp, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
+; X32-NEXT: addl %eax, %edi
+; X32-NEXT: movl %edi, (%esp) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: adcl %ebx, %ecx
+; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: movl %edx, %esi
; X32-NEXT: adcl $0, %esi
-; X32-NEXT: movl %esi, -88(%ebp) # 4-byte Spill
-; X32-NEXT: movl %ebx, %edi
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: movl %ecx, %edi
; X32-NEXT: adcl $0, %edi
-; X32-NEXT: movl -324(%ebp), %esi # 4-byte Reload
-; X32-NEXT: addl %esi, -64(%ebp) # 4-byte Folded Spill
-; X32-NEXT: movl -16(%ebp), %esi # 4-byte Reload
-; X32-NEXT: adcl -112(%ebp), %esi # 4-byte Folded Reload
-; X32-NEXT: movl %esi, -16(%ebp) # 4-byte Spill
+; X32-NEXT: addl %ebp, (%esp) # 4-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; X32-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: adcl $0, %eax
-; X32-NEXT: adcl $0, %ecx
-; X32-NEXT: addl -88(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: adcl %edi, %ecx
-; X32-NEXT: setb -88(%ebp) # 1-byte Folded Spill
-; X32-NEXT: addl -364(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: adcl -60(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: movzbl -88(%ebp), %esi # 1-byte Folded Reload
-; X32-NEXT: adcl %edx, %esi
-; X32-NEXT: movl %esi, -60(%ebp) # 4-byte Spill
; X32-NEXT: adcl $0, %ebx
-; X32-NEXT: movl -324(%ebp), %edx # 4-byte Reload
-; X32-NEXT: addl -132(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: movl %edx, -88(%ebp) # 4-byte Spill
-; X32-NEXT: movl -192(%ebp), %edx # 4-byte Reload
-; X32-NEXT: adcl -112(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: movl -44(%ebp), %esi # 4-byte Reload
-; X32-NEXT: adcl -272(%ebp), %esi # 4-byte Folded Reload
-; X32-NEXT: movl -52(%ebp), %edi # 4-byte Reload
-; X32-NEXT: adcl -24(%ebp), %edi # 4-byte Folded Reload
-; X32-NEXT: addl %eax, -88(%ebp) # 4-byte Folded Spill
-; X32-NEXT: adcl %ecx, %edx
-; X32-NEXT: movl %edx, -192(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -60(%ebp), %esi # 4-byte Folded Reload
-; X32-NEXT: movl %esi, -44(%ebp) # 4-byte Spill
-; X32-NEXT: adcl %ebx, %edi
-; X32-NEXT: movl %edi, -52(%ebp) # 4-byte Spill
-; X32-NEXT: movl -64(%ebp), %eax # 4-byte Reload
-; X32-NEXT: movl -456(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: pushl %eax
-; X32-NEXT: movl %ecx, %eax
-; X32-NEXT: addb $127, %al
-; X32-NEXT: sahf
-; X32-NEXT: popl %eax
-; X32-NEXT: adcl -72(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl %eax, -608(%ebp) # 4-byte Spill
-; X32-NEXT: movl -16(%ebp), %eax # 4-byte Reload
-; X32-NEXT: adcl -76(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl %eax, -760(%ebp) # 4-byte Spill
-; X32-NEXT: movl -88(%ebp), %eax # 4-byte Reload
-; X32-NEXT: adcl -232(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl %eax, -756(%ebp) # 4-byte Spill
+; X32-NEXT: addl %esi, %eax
+; X32-NEXT: adcl %edi, %ebx
+; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
+; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 1-byte Folded Reload
+; X32-NEXT: adcl %edx, %ebp
+; X32-NEXT: adcl $0, %ecx
+; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X32-NEXT: addl %eax, %ecx
+; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl %ebx, %edx
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl %ebp, %esi
+; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: addb $255, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
+; X32-NEXT: movl (%esp), %eax # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: movl %edx, %eax
-; X32-NEXT: adcl -164(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl %eax, -752(%ebp) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: movl %esi, %eax
-; X32-NEXT: adcl -40(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl %eax, -748(%ebp) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: movl %edi, %eax
-; X32-NEXT: adcl -56(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl %eax, -744(%ebp) # 4-byte Spill
-; X32-NEXT: movl 8(%ebp), %eax
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl 12(%eax), %eax
-; X32-NEXT: movl %eax, -60(%ebp) # 4-byte Spill
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: xorl %ecx, %ecx
; X32-NEXT: mull %ecx
-; X32-NEXT: movl %edx, %esi
-; X32-NEXT: movl %eax, %ecx
-; X32-NEXT: movl -268(%ebp), %ebx # 4-byte Reload
-; X32-NEXT: addl %ebx, %ecx
+; X32-NEXT: movl %edx, %ecx
+; X32-NEXT: movl %eax, %ebx
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; X32-NEXT: addl %edi, %ebx
; X32-NEXT: adcl $0, %edx
-; X32-NEXT: movl -160(%ebp), %edi # 4-byte Reload
-; X32-NEXT: addl %edi, %ecx
-; X32-NEXT: movl %ecx, -24(%ebp) # 4-byte Spill
-; X32-NEXT: adcl %ebx, %edx
-; X32-NEXT: setb %cl
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: addl %esi, %ebx
+; X32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl %edi, %edx
+; X32-NEXT: setb %bl
; X32-NEXT: addl %eax, %edx
-; X32-NEXT: movzbl %cl, %eax
-; X32-NEXT: adcl %esi, %eax
-; X32-NEXT: movl %edi, %esi
-; X32-NEXT: addl %esi, %edx
-; X32-NEXT: adcl %ebx, %eax
-; X32-NEXT: movl %eax, -112(%ebp) # 4-byte Spill
-; X32-NEXT: movl -136(%ebp), %edi # 4-byte Reload
-; X32-NEXT: movl -584(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: addl %edi, %ecx
-; X32-NEXT: movl -432(%ebp), %eax # 4-byte Reload
-; X32-NEXT: adcl -264(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: addl %ecx, %esi
-; X32-NEXT: movl %esi, -432(%ebp) # 4-byte Spill
-; X32-NEXT: movl -24(%ebp), %esi # 4-byte Reload
-; X32-NEXT: adcl %eax, %esi
-; X32-NEXT: movl %esi, -456(%ebp) # 4-byte Spill
-; X32-NEXT: movl %edx, %esi
-; X32-NEXT: adcl $0, %esi
-; X32-NEXT: movl -112(%ebp), %ebx # 4-byte Reload
-; X32-NEXT: adcl $0, %ebx
-; X32-NEXT: addl %edi, -432(%ebp) # 4-byte Folded Spill
-; X32-NEXT: movl -276(%ebp), %edi # 4-byte Reload
-; X32-NEXT: adcl %edi, -456(%ebp) # 4-byte Folded Spill
+; X32-NEXT: movzbl %bl, %ebp
+; X32-NEXT: adcl %ecx, %ebp
+; X32-NEXT: movl %esi, %ecx
+; X32-NEXT: addl %ecx, %edx
+; X32-NEXT: adcl %edi, %ebp
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: addl %ebx, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %ecx, %edi
+; X32-NEXT: addl %eax, %edi
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: adcl %esi, %ecx
+; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %edx, %ecx
; X32-NEXT: adcl $0, %ecx
+; X32-NEXT: movl %ebp, %esi
+; X32-NEXT: adcl $0, %esi
+; X32-NEXT: addl %ebx, %edi
+; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; X32-NEXT: adcl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
; X32-NEXT: adcl $0, %eax
-; X32-NEXT: addl %esi, %ecx
-; X32-NEXT: adcl %ebx, %eax
-; X32-NEXT: setb %bl
-; X32-NEXT: addl -160(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: adcl -24(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: movzbl %bl, %esi
-; X32-NEXT: adcl %edx, %esi
-; X32-NEXT: movl %esi, -24(%ebp) # 4-byte Spill
-; X32-NEXT: adcl $0, -112(%ebp) # 4-byte Folded Spill
-; X32-NEXT: movl -136(%ebp), %ebx # 4-byte Reload
-; X32-NEXT: addl -180(%ebp), %ebx # 4-byte Folded Reload
-; X32-NEXT: movl %edi, %edx
-; X32-NEXT: adcl -48(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: movl -240(%ebp), %esi # 4-byte Reload
-; X32-NEXT: adcl -36(%ebp), %esi # 4-byte Folded Reload
-; X32-NEXT: movl -172(%ebp), %edi # 4-byte Reload
-; X32-NEXT: adcl -20(%ebp), %edi # 4-byte Folded Reload
-; X32-NEXT: addl %ecx, %ebx
-; X32-NEXT: movl %ebx, -584(%ebp) # 4-byte Spill
-; X32-NEXT: adcl %eax, %edx
-; X32-NEXT: movl %edx, -276(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -24(%ebp), %esi # 4-byte Folded Reload
-; X32-NEXT: movl %esi, -240(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -112(%ebp), %edi # 4-byte Folded Reload
-; X32-NEXT: movl %edi, -172(%ebp) # 4-byte Spill
-; X32-NEXT: movl -736(%ebp), %eax # 4-byte Reload
-; X32-NEXT: movl %eax, %eax
-; X32-NEXT: addb $127, %al
-; X32-NEXT: sahf
-; X32-NEXT: movl -72(%ebp), %eax # 4-byte Reload
-; X32-NEXT: adcl -432(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl %eax, -72(%ebp) # 4-byte Spill
-; X32-NEXT: movl -76(%ebp), %eax # 4-byte Reload
-; X32-NEXT: adcl -456(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl %eax, -76(%ebp) # 4-byte Spill
-; X32-NEXT: adcl %ebx, -232(%ebp) # 4-byte Folded Spill
-; X32-NEXT: adcl %edx, -164(%ebp) # 4-byte Folded Spill
-; X32-NEXT: adcl %esi, -40(%ebp) # 4-byte Folded Spill
-; X32-NEXT: adcl %edi, -56(%ebp) # 4-byte Folded Spill
-; X32-NEXT: movl 12(%ebp), %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; X32-NEXT: adcl $0, %edi
+; X32-NEXT: addl %ecx, %eax
+; X32-NEXT: adcl %esi, %edi
+; X32-NEXT: setb %cl
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movzbl %cl, %ecx
+; X32-NEXT: adcl %edx, %ecx
+; X32-NEXT: adcl $0, %ebp
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X32-NEXT: addl %eax, %ebx
+; X32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl %ecx, %esi
+; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl %ebp, %edi
+; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: addb $255, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: adcl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: adcl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: adcl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl 44(%eax), %eax
-; X32-NEXT: movl %eax, -112(%ebp) # 4-byte Spill
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: xorl %ecx, %ecx
; X32-NEXT: mull %ecx
-; X32-NEXT: movl %eax, %ebx
-; X32-NEXT: movl -128(%ebp), %edi # 4-byte Reload
-; X32-NEXT: addl %edi, %ebx
-; X32-NEXT: movl %edx, %esi
-; X32-NEXT: adcl $0, %esi
-; X32-NEXT: movl -304(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: addl %ecx, %ebx
-; X32-NEXT: movl %ebx, -36(%ebp) # 4-byte Spill
-; X32-NEXT: adcl %edi, %esi
-; X32-NEXT: setb %bl
-; X32-NEXT: addl %eax, %esi
-; X32-NEXT: movzbl %bl, %eax
+; X32-NEXT: movl %eax, %ecx
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: addl %esi, %ecx
+; X32-NEXT: movl %edx, %ebp
+; X32-NEXT: adcl $0, %ebp
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; X32-NEXT: addl %edi, %ecx
+; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl %esi, %ebp
+; X32-NEXT: setb %cl
+; X32-NEXT: addl %eax, %ebp
+; X32-NEXT: movzbl %cl, %eax
; X32-NEXT: adcl %edx, %eax
-; X32-NEXT: movl %ecx, %edx
-; X32-NEXT: addl %edx, %esi
-; X32-NEXT: adcl %edi, %eax
-; X32-NEXT: movl %eax, -48(%ebp) # 4-byte Spill
-; X32-NEXT: movl -100(%ebp), %edi # 4-byte Reload
-; X32-NEXT: movl -220(%ebp), %eax # 4-byte Reload
-; X32-NEXT: addl %edi, %eax
-; X32-NEXT: movl -376(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: adcl -204(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: addl %eax, %edx
-; X32-NEXT: movl %edx, -376(%ebp) # 4-byte Spill
-; X32-NEXT: movl -36(%ebp), %edx # 4-byte Reload
+; X32-NEXT: movl %edi, %edx
+; X32-NEXT: addl %edx, %ebp
+; X32-NEXT: adcl %esi, %eax
+; X32-NEXT: movl %eax, %ebx
+; X32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: addl %esi, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X32-NEXT: addl %eax, %edi
+; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
; X32-NEXT: adcl %ecx, %edx
-; X32-NEXT: movl %edx, -220(%ebp) # 4-byte Spill
-; X32-NEXT: movl %esi, %edx
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %ebp, %edx
; X32-NEXT: adcl $0, %edx
-; X32-NEXT: movl %edx, -20(%ebp) # 4-byte Spill
-; X32-NEXT: movl -48(%ebp), %ebx # 4-byte Reload
-; X32-NEXT: adcl $0, %ebx
-; X32-NEXT: addl %edi, -376(%ebp) # 4-byte Folded Spill
-; X32-NEXT: movl -220(%ebp), %edx # 4-byte Reload
-; X32-NEXT: adcl -80(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: movl %edx, -220(%ebp) # 4-byte Spill
+; X32-NEXT: movl %ebx, %edi
+; X32-NEXT: adcl $0, %edi
+; X32-NEXT: addl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
+; X32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: adcl $0, %eax
; X32-NEXT: adcl $0, %ecx
-; X32-NEXT: addl -20(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: adcl %ebx, %ecx
+; X32-NEXT: addl %edx, %eax
+; X32-NEXT: adcl %edi, %ecx
; X32-NEXT: setb %dl
-; X32-NEXT: addl -304(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: adcl -36(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: movzbl %dl, %edx
-; X32-NEXT: adcl %esi, %edx
-; X32-NEXT: movl %edx, -36(%ebp) # 4-byte Spill
-; X32-NEXT: adcl $0, -48(%ebp) # 4-byte Folded Spill
-; X32-NEXT: movl %edi, %ebx
-; X32-NEXT: addl -336(%ebp), %ebx # 4-byte Folded Reload
-; X32-NEXT: movl -200(%ebp), %edx # 4-byte Reload
-; X32-NEXT: adcl -80(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: movl -472(%ebp), %edi # 4-byte Reload
-; X32-NEXT: adcl -372(%ebp), %edi # 4-byte Folded Reload
-; X32-NEXT: movl -436(%ebp), %esi # 4-byte Reload
-; X32-NEXT: adcl -292(%ebp), %esi # 4-byte Folded Reload
-; X32-NEXT: addl %eax, %ebx
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X32-NEXT: movzbl %dl, %eax
+; X32-NEXT: adcl %ebp, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; X32-NEXT: adcl $0, %ebp
+; X32-NEXT: movl %esi, %ebx
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
; X32-NEXT: adcl %ecx, %edx
-; X32-NEXT: movl %edx, -200(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -36(%ebp), %edi # 4-byte Folded Reload
-; X32-NEXT: adcl -48(%ebp), %esi # 4-byte Folded Reload
-; X32-NEXT: movl -740(%ebp), %eax # 4-byte Reload
-; X32-NEXT: movl %eax, %eax
-; X32-NEXT: addb $127, %al
-; X32-NEXT: sahf
-; X32-NEXT: movl -376(%ebp), %edx # 4-byte Reload
-; X32-NEXT: adcl %edx, -432(%ebp) # 4-byte Folded Spill
-; X32-NEXT: movl -220(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: adcl %ecx, -456(%ebp) # 4-byte Folded Spill
-; X32-NEXT: adcl %ebx, -584(%ebp) # 4-byte Folded Spill
-; X32-NEXT: movl -200(%ebp), %eax # 4-byte Reload
-; X32-NEXT: adcl %eax, -276(%ebp) # 4-byte Folded Spill
-; X32-NEXT: adcl %edi, -240(%ebp) # 4-byte Folded Spill
-; X32-NEXT: adcl %esi, -172(%ebp) # 4-byte Folded Spill
-; X32-NEXT: movl -640(%ebp), %eax # 4-byte Reload
-; X32-NEXT: movl %eax, %eax
-; X32-NEXT: addb $127, %al
-; X32-NEXT: sahf
-; X32-NEXT: adcl -64(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: movl %edx, -376(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -16(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: movl %ecx, -220(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -88(%ebp), %ebx # 4-byte Folded Reload
-; X32-NEXT: movl %ebx, -640(%ebp) # 4-byte Spill
-; X32-NEXT: movl -192(%ebp), %eax # 4-byte Reload
-; X32-NEXT: adcl %eax, -200(%ebp) # 4-byte Folded Spill
-; X32-NEXT: adcl -44(%ebp), %edi # 4-byte Folded Reload
-; X32-NEXT: movl %edi, -472(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -52(%ebp), %esi # 4-byte Folded Reload
-; X32-NEXT: movl %esi, -436(%ebp) # 4-byte Spill
-; X32-NEXT: movl -408(%ebp), %ecx # 4-byte Reload
+; X32-NEXT: adcl %eax, %edi
+; X32-NEXT: adcl %ebp, %esi
+; X32-NEXT: addb $255, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: adcl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: adcl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: adcl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: adcl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: adcl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: addb $255, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
+; X32-NEXT: adcl (%esp), %eax # 4-byte Folded Reload
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
+; X32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X32-NEXT: movl %ecx, %eax
-; X32-NEXT: movl -168(%ebp), %esi # 4-byte Reload
-; X32-NEXT: mull %esi
-; X32-NEXT: movl %eax, -48(%ebp) # 4-byte Spill
-; X32-NEXT: movl %edx, -16(%ebp) # 4-byte Spill
-; X32-NEXT: movl 8(%ebp), %eax
-; X32-NEXT: movl 60(%eax), %eax
-; X32-NEXT: movl %eax, -192(%ebp) # 4-byte Spill
-; X32-NEXT: mull %esi
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; X32-NEXT: mull %edi
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %edx, %ebp
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: movl 60(%eax), %esi
+; X32-NEXT: movl %esi, %eax
+; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: mull %edi
; X32-NEXT: movl %edx, %edi
; X32-NEXT: movl %eax, %ebx
-; X32-NEXT: addl -16(%ebp), %ebx # 4-byte Folded Reload
+; X32-NEXT: addl %ebp, %ebx
; X32-NEXT: adcl $0, %edi
; X32-NEXT: movl %ecx, %eax
-; X32-NEXT: movl -92(%ebp), %esi # 4-byte Reload
-; X32-NEXT: mull %esi
-; X32-NEXT: movl %edx, %ecx
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: mull %ecx
+; X32-NEXT: movl %edx, %ebp
; X32-NEXT: addl %ebx, %eax
-; X32-NEXT: movl %eax, -36(%ebp) # 4-byte Spill
-; X32-NEXT: adcl %edi, %ecx
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl %edi, %ebp
; X32-NEXT: setb %bl
-; X32-NEXT: movl -192(%ebp), %eax # 4-byte Reload
-; X32-NEXT: mull %esi
-; X32-NEXT: addl %ecx, %eax
-; X32-NEXT: movzbl %bl, %ecx
-; X32-NEXT: adcl %ecx, %edx
-; X32-NEXT: movl -392(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: addl -28(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: movl -412(%ebp), %esi # 4-byte Reload
-; X32-NEXT: adcl -256(%ebp), %esi # 4-byte Folded Reload
+; X32-NEXT: movl %esi, %eax
+; X32-NEXT: mull %ecx
+; X32-NEXT: addl %ebp, %eax
+; X32-NEXT: movzbl %bl, %edi
+; X32-NEXT: adcl %edi, %edx
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
; X32-NEXT: addl %eax, %ecx
-; X32-NEXT: movl %ecx, -80(%ebp) # 4-byte Spill
+; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: adcl %edx, %esi
-; X32-NEXT: movl %esi, -16(%ebp) # 4-byte Spill
-; X32-NEXT: movl -440(%ebp), %esi # 4-byte Reload
+; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: movl %ecx, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X32-NEXT: mull %ebx
+; X32-NEXT: movl %edx, %edi
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; X32-NEXT: movl %esi, %eax
-; X32-NEXT: movl -168(%ebp), %edi # 4-byte Reload
-; X32-NEXT: mull %edi
-; X32-NEXT: movl %edx, %ecx
-; X32-NEXT: movl %eax, -24(%ebp) # 4-byte Spill
-; X32-NEXT: movl -340(%ebp), %eax # 4-byte Reload
-; X32-NEXT: mull %edi
+; X32-NEXT: mull %ebx
+; X32-NEXT: movl %edx, %ebx
+; X32-NEXT: movl %eax, %ebp
+; X32-NEXT: addl %edi, %ebp
+; X32-NEXT: adcl $0, %ebx
+; X32-NEXT: movl %ecx, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: mull %ecx
; X32-NEXT: movl %edx, %edi
-; X32-NEXT: movl %eax, %ebx
-; X32-NEXT: addl %ecx, %ebx
-; X32-NEXT: adcl $0, %edi
+; X32-NEXT: addl %ebp, %eax
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl %ebx, %edi
+; X32-NEXT: setb %bl
; X32-NEXT: movl %esi, %eax
-; X32-NEXT: movl -92(%ebp), %esi # 4-byte Reload
+; X32-NEXT: mull %ecx
+; X32-NEXT: movl %eax, %ebp
+; X32-NEXT: addl %edi, %ebp
+; X32-NEXT: movzbl %bl, %eax
+; X32-NEXT: adcl %eax, %edx
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X32-NEXT: movl %ebx, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; X32-NEXT: mull %esi
; X32-NEXT: movl %edx, %ecx
-; X32-NEXT: addl %ebx, %eax
-; X32-NEXT: movl %eax, -64(%ebp) # 4-byte Spill
-; X32-NEXT: adcl %edi, %ecx
-; X32-NEXT: setb %bl
-; X32-NEXT: movl -340(%ebp), %edi # 4-byte Reload
-; X32-NEXT: movl %edi, %eax
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X32-NEXT: mull %esi
-; X32-NEXT: addl %ecx, %eax
-; X32-NEXT: movzbl %bl, %ecx
-; X32-NEXT: adcl %ecx, %edx
-; X32-NEXT: addl -68(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: adcl -764(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: addl -48(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl %eax, -20(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -36(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: movl %edx, -36(%ebp) # 4-byte Spill
-; X32-NEXT: adcl $0, -80(%ebp) # 4-byte Folded Spill
-; X32-NEXT: adcl $0, -16(%ebp) # 4-byte Folded Spill
-; X32-NEXT: movl -440(%ebp), %esi # 4-byte Reload
-; X32-NEXT: movl %esi, %eax
-; X32-NEXT: movl -108(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: mull %ecx
-; X32-NEXT: movl %edx, -44(%ebp) # 4-byte Spill
-; X32-NEXT: movl %eax, -48(%ebp) # 4-byte Spill
-; X32-NEXT: movl %edi, %eax
+; X32-NEXT: movl %edx, %esi
+; X32-NEXT: movl %eax, %edi
+; X32-NEXT: addl %ecx, %edi
+; X32-NEXT: adcl $0, %esi
+; X32-NEXT: movl %ebx, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X32-NEXT: mull %ecx
; X32-NEXT: movl %edx, %ebx
-; X32-NEXT: movl %eax, %ecx
-; X32-NEXT: addl -44(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: adcl $0, %ebx
-; X32-NEXT: movl %esi, %eax
-; X32-NEXT: movl -96(%ebp), %esi # 4-byte Reload
-; X32-NEXT: mull %esi
-; X32-NEXT: movl %edx, %edi
-; X32-NEXT: addl %ecx, %eax
-; X32-NEXT: movl %eax, -52(%ebp) # 4-byte Spill
-; X32-NEXT: adcl %ebx, %edi
-; X32-NEXT: setb %cl
-; X32-NEXT: movl -340(%ebp), %eax # 4-byte Reload
-; X32-NEXT: mull %esi
; X32-NEXT: addl %edi, %eax
-; X32-NEXT: movzbl %cl, %ecx
+; X32-NEXT: movl %eax, %edi
+; X32-NEXT: adcl %esi, %ebx
+; X32-NEXT: setb (%esp) # 1-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: mull %ecx
+; X32-NEXT: addl %ebx, %eax
+; X32-NEXT: movzbl (%esp), %ecx # 1-byte Folded Reload
; X32-NEXT: adcl %ecx, %edx
-; X32-NEXT: movl -132(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: addl -104(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: movl -140(%ebp), %esi # 4-byte Reload
-; X32-NEXT: adcl -156(%ebp), %esi # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
; X32-NEXT: addl %eax, %ecx
; X32-NEXT: adcl %edx, %esi
-; X32-NEXT: movl -20(%ebp), %eax # 4-byte Reload
-; X32-NEXT: addl %eax, -48(%ebp) # 4-byte Folded Spill
-; X32-NEXT: movl -36(%ebp), %eax # 4-byte Reload
-; X32-NEXT: adcl %eax, -52(%ebp) # 4-byte Folded Spill
+; X32-NEXT: addl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: adcl $0, %ecx
; X32-NEXT: adcl $0, %esi
-; X32-NEXT: addl -80(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: adcl -16(%ebp), %esi # 4-byte Folded Reload
-; X32-NEXT: setb -36(%ebp) # 1-byte Folded Spill
-; X32-NEXT: movl -408(%ebp), %eax # 4-byte Reload
-; X32-NEXT: movl -108(%ebp), %edi # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X32-NEXT: movl %ebx, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
; X32-NEXT: mull %edi
-; X32-NEXT: movl %edx, -80(%ebp) # 4-byte Spill
-; X32-NEXT: movl %eax, -16(%ebp) # 4-byte Spill
-; X32-NEXT: movl -192(%ebp), %eax # 4-byte Reload
+; X32-NEXT: movl %edx, %ebp
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X32-NEXT: mull %edi
-; X32-NEXT: movl %eax, %ebx
-; X32-NEXT: addl -80(%ebp), %ebx # 4-byte Folded Reload
+; X32-NEXT: movl %eax, %edi
+; X32-NEXT: addl %ebp, %edi
; X32-NEXT: adcl $0, %edx
-; X32-NEXT: movl %edx, -20(%ebp) # 4-byte Spill
-; X32-NEXT: movl -408(%ebp), %eax # 4-byte Reload
-; X32-NEXT: movl -96(%ebp), %edi # 4-byte Reload
-; X32-NEXT: mull %edi
-; X32-NEXT: addl %ebx, %eax
-; X32-NEXT: movl %eax, -80(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -20(%ebp), %edx # 4-byte Folded Reload
+; X32-NEXT: movl %edx, (%esp) # 4-byte Spill
+; X32-NEXT: movl %ebx, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; X32-NEXT: mull %ebp
; X32-NEXT: movl %edx, %ebx
-; X32-NEXT: setb -20(%ebp) # 1-byte Folded Spill
-; X32-NEXT: movl -192(%ebp), %eax # 4-byte Reload
-; X32-NEXT: mull %edi
+; X32-NEXT: addl %edi, %eax
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl (%esp), %ebx # 4-byte Folded Reload
+; X32-NEXT: setb (%esp) # 1-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: mull %ebp
; X32-NEXT: addl %ebx, %eax
-; X32-NEXT: movzbl -20(%ebp), %edi # 1-byte Folded Reload
+; X32-NEXT: movzbl (%esp), %edi # 1-byte Folded Reload
; X32-NEXT: adcl %edi, %edx
-; X32-NEXT: movl -392(%ebp), %edi # 4-byte Reload
-; X32-NEXT: addl -104(%ebp), %edi # 4-byte Folded Reload
-; X32-NEXT: movl -412(%ebp), %ebx # 4-byte Reload
-; X32-NEXT: adcl -156(%ebp), %ebx # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
; X32-NEXT: addl %eax, %edi
; X32-NEXT: adcl %edx, %ebx
-; X32-NEXT: movl -16(%ebp), %edx # 4-byte Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
; X32-NEXT: addl %ecx, %edx
-; X32-NEXT: movl -80(%ebp), %ecx # 4-byte Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X32-NEXT: adcl %esi, %ecx
-; X32-NEXT: movzbl -36(%ebp), %eax # 1-byte Folded Reload
+; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
; X32-NEXT: adcl %eax, %edi
; X32-NEXT: adcl $0, %ebx
-; X32-NEXT: addl -68(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: movl %edx, -16(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -420(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: movl %ecx, -80(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -616(%ebp), %edi # 4-byte Folded Reload
-; X32-NEXT: movl %edi, -88(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -612(%ebp), %ebx # 4-byte Folded Reload
-; X32-NEXT: movl %ebx, -272(%ebp) # 4-byte Spill
-; X32-NEXT: movl -352(%ebp), %edi # 4-byte Reload
-; X32-NEXT: movl %edi, %eax
-; X32-NEXT: movl -168(%ebp), %esi # 4-byte Reload
-; X32-NEXT: mull %esi
-; X32-NEXT: movl %edx, %ecx
-; X32-NEXT: movl %eax, -44(%ebp) # 4-byte Spill
-; X32-NEXT: movl -120(%ebp), %eax # 4-byte Reload
-; X32-NEXT: mull %esi
-; X32-NEXT: movl %edx, %ebx
-; X32-NEXT: movl %eax, %esi
-; X32-NEXT: addl %ecx, %esi
-; X32-NEXT: adcl $0, %ebx
-; X32-NEXT: movl %edi, %eax
-; X32-NEXT: movl -92(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: mull %ecx
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
+; X32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: movl %ecx, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; X32-NEXT: mull %edi
+; X32-NEXT: movl %edx, %esi
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X32-NEXT: movl %ebx, %eax
+; X32-NEXT: mull %edi
; X32-NEXT: movl %edx, %edi
-; X32-NEXT: addl %esi, %eax
-; X32-NEXT: movl %eax, -68(%ebp) # 4-byte Spill
-; X32-NEXT: adcl %ebx, %edi
-; X32-NEXT: setb %bl
-; X32-NEXT: movl -120(%ebp), %eax # 4-byte Reload
+; X32-NEXT: movl %eax, %ebp
+; X32-NEXT: addl %esi, %ebp
+; X32-NEXT: adcl $0, %edi
+; X32-NEXT: movl %ecx, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X32-NEXT: mull %ecx
-; X32-NEXT: addl %edi, %eax
-; X32-NEXT: movzbl %bl, %ecx
+; X32-NEXT: movl %edx, %esi
+; X32-NEXT: addl %ebp, %eax
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl %edi, %esi
+; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
+; X32-NEXT: movl %ebx, %eax
+; X32-NEXT: mull %ecx
+; X32-NEXT: addl %esi, %eax
+; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 1-byte Folded Reload
; X32-NEXT: adcl %ecx, %edx
-; X32-NEXT: movl -364(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: addl -28(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: movl -396(%ebp), %esi # 4-byte Reload
-; X32-NEXT: adcl -256(%ebp), %esi # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
; X32-NEXT: addl %eax, %ecx
-; X32-NEXT: movl %ecx, -20(%ebp) # 4-byte Spill
+; X32-NEXT: movl %ecx, (%esp) # 4-byte Spill
; X32-NEXT: adcl %edx, %esi
-; X32-NEXT: movl %esi, -36(%ebp) # 4-byte Spill
-; X32-NEXT: movl -416(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: movl %ecx, %eax
-; X32-NEXT: movl -168(%ebp), %esi # 4-byte Reload
-; X32-NEXT: mull %esi
+; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: movl %esi, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: mull %ecx
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X32-NEXT: movl %ebx, %eax
+; X32-NEXT: mull %ecx
; X32-NEXT: movl %edx, %edi
-; X32-NEXT: movl %eax, -616(%ebp) # 4-byte Spill
-; X32-NEXT: movl -316(%ebp), %eax # 4-byte Reload
+; X32-NEXT: movl %eax, %ebp
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; X32-NEXT: adcl $0, %edi
+; X32-NEXT: movl %esi, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: mull %ecx
+; X32-NEXT: movl %edx, %esi
+; X32-NEXT: addl %ebp, %eax
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl %edi, %esi
+; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
+; X32-NEXT: movl %ebx, %eax
+; X32-NEXT: mull %ecx
+; X32-NEXT: movl %eax, %ebp
+; X32-NEXT: addl %esi, %ebp
+; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
+; X32-NEXT: adcl %eax, %edx
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl $0, (%esp) # 4-byte Folded Spill
+; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; X32-NEXT: movl %edi, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: mull %esi
+; X32-NEXT: movl %edx, %ecx
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %ebx, %eax
; X32-NEXT: mull %esi
; X32-NEXT: movl %edx, %esi
; X32-NEXT: movl %eax, %ebx
-; X32-NEXT: addl %edi, %ebx
+; X32-NEXT: addl %ecx, %ebx
; X32-NEXT: adcl $0, %esi
-; X32-NEXT: movl %ecx, %eax
-; X32-NEXT: movl -92(%ebp), %edi # 4-byte Reload
-; X32-NEXT: mull %edi
-; X32-NEXT: movl %edx, %ecx
-; X32-NEXT: addl %ebx, %eax
-; X32-NEXT: movl %eax, -612(%ebp) # 4-byte Spill
-; X32-NEXT: adcl %esi, %ecx
-; X32-NEXT: setb -152(%ebp) # 1-byte Folded Spill
-; X32-NEXT: movl -316(%ebp), %ebx # 4-byte Reload
-; X32-NEXT: movl %ebx, %eax
-; X32-NEXT: mull %edi
-; X32-NEXT: addl %ecx, %eax
-; X32-NEXT: movzbl -152(%ebp), %ecx # 1-byte Folded Reload
-; X32-NEXT: adcl %ecx, %edx
-; X32-NEXT: addl -32(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: adcl -424(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: addl -44(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl %eax, -152(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -68(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: movl %edx, -44(%ebp) # 4-byte Spill
-; X32-NEXT: adcl $0, -20(%ebp) # 4-byte Folded Spill
-; X32-NEXT: adcl $0, -36(%ebp) # 4-byte Folded Spill
-; X32-NEXT: movl -416(%ebp), %esi # 4-byte Reload
-; X32-NEXT: movl %esi, %eax
-; X32-NEXT: movl -108(%ebp), %ecx # 4-byte Reload
+; X32-NEXT: movl %edi, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X32-NEXT: mull %ecx
; X32-NEXT: movl %edx, %edi
-; X32-NEXT: movl %eax, -424(%ebp) # 4-byte Spill
-; X32-NEXT: movl %ebx, %eax
+; X32-NEXT: addl %ebx, %eax
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl %esi, %edi
+; X32-NEXT: setb %bl
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X32-NEXT: mull %ecx
-; X32-NEXT: movl %edx, %ebx
-; X32-NEXT: movl %eax, %ecx
-; X32-NEXT: addl %edi, %ecx
-; X32-NEXT: adcl $0, %ebx
-; X32-NEXT: movl %esi, %eax
-; X32-NEXT: movl -96(%ebp), %edi # 4-byte Reload
-; X32-NEXT: mull %edi
-; X32-NEXT: movl %edx, %esi
-; X32-NEXT: addl %ecx, %eax
-; X32-NEXT: movl %eax, -420(%ebp) # 4-byte Spill
-; X32-NEXT: adcl %ebx, %esi
-; X32-NEXT: setb %cl
-; X32-NEXT: movl -316(%ebp), %eax # 4-byte Reload
-; X32-NEXT: mull %edi
-; X32-NEXT: addl %esi, %eax
-; X32-NEXT: movzbl %cl, %ecx
+; X32-NEXT: addl %edi, %eax
+; X32-NEXT: movzbl %bl, %ecx
; X32-NEXT: adcl %ecx, %edx
-; X32-NEXT: movl -324(%ebp), %edi # 4-byte Reload
-; X32-NEXT: addl -104(%ebp), %edi # 4-byte Folded Reload
-; X32-NEXT: movl -400(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: adcl -156(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: addl %eax, %edi
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X32-NEXT: addl %eax, %ebx
; X32-NEXT: adcl %edx, %ecx
-; X32-NEXT: movl -152(%ebp), %eax # 4-byte Reload
-; X32-NEXT: addl %eax, -424(%ebp) # 4-byte Folded Spill
-; X32-NEXT: movl -44(%ebp), %eax # 4-byte Reload
-; X32-NEXT: adcl %eax, -420(%ebp) # 4-byte Folded Spill
-; X32-NEXT: adcl $0, %edi
+; X32-NEXT: addl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: adcl $0, %ebx
; X32-NEXT: adcl $0, %ecx
-; X32-NEXT: addl -20(%ebp), %edi # 4-byte Folded Reload
-; X32-NEXT: adcl -36(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: setb -68(%ebp) # 1-byte Folded Spill
-; X32-NEXT: movl -352(%ebp), %eax # 4-byte Reload
-; X32-NEXT: movl -108(%ebp), %esi # 4-byte Reload
+; X32-NEXT: addl (%esp), %ebx # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X32-NEXT: setb (%esp) # 1-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; X32-NEXT: movl %ebp, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; X32-NEXT: mull %esi
-; X32-NEXT: movl %edx, -20(%ebp) # 4-byte Spill
-; X32-NEXT: movl %eax, -36(%ebp) # 4-byte Spill
-; X32-NEXT: movl -120(%ebp), %eax # 4-byte Reload
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X32-NEXT: mull %esi
-; X32-NEXT: movl %eax, %ebx
-; X32-NEXT: addl -20(%ebp), %ebx # 4-byte Folded Reload
+; X32-NEXT: movl %eax, %edi
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
; X32-NEXT: adcl $0, %edx
-; X32-NEXT: movl %edx, -44(%ebp) # 4-byte Spill
-; X32-NEXT: movl -352(%ebp), %eax # 4-byte Reload
-; X32-NEXT: movl -96(%ebp), %esi # 4-byte Reload
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %ebp, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; X32-NEXT: mull %esi
-; X32-NEXT: addl %ebx, %eax
-; X32-NEXT: movl %eax, -20(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -44(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: movl %edx, %ebx
-; X32-NEXT: setb -44(%ebp) # 1-byte Folded Spill
-; X32-NEXT: movl -120(%ebp), %eax # 4-byte Reload
+; X32-NEXT: movl %edx, %ebp
+; X32-NEXT: addl %edi, %eax
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X32-NEXT: mull %esi
-; X32-NEXT: addl %ebx, %eax
-; X32-NEXT: movzbl -44(%ebp), %esi # 1-byte Folded Reload
+; X32-NEXT: addl %ebp, %eax
+; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 1-byte Folded Reload
; X32-NEXT: adcl %esi, %edx
-; X32-NEXT: movl -364(%ebp), %ebx # 4-byte Reload
-; X32-NEXT: addl -104(%ebp), %ebx # 4-byte Folded Reload
-; X32-NEXT: movl -396(%ebp), %esi # 4-byte Reload
-; X32-NEXT: adcl -156(%ebp), %esi # 4-byte Folded Reload
-; X32-NEXT: addl %eax, %ebx
-; X32-NEXT: adcl %edx, %esi
-; X32-NEXT: movl %esi, -44(%ebp) # 4-byte Spill
-; X32-NEXT: movl -36(%ebp), %edx # 4-byte Reload
-; X32-NEXT: addl %edi, %edx
-; X32-NEXT: movl -20(%ebp), %esi # 4-byte Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X32-NEXT: addl %eax, %ebp
+; X32-NEXT: adcl %edx, %edi
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; X32-NEXT: addl %ebx, %edx
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; X32-NEXT: adcl %ecx, %esi
-; X32-NEXT: movzbl -68(%ebp), %eax # 1-byte Folded Reload
-; X32-NEXT: adcl %eax, %ebx
-; X32-NEXT: movl -44(%ebp), %eax # 4-byte Reload
-; X32-NEXT: adcl $0, %eax
-; X32-NEXT: addl -32(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: adcl -196(%ebp), %esi # 4-byte Folded Reload
-; X32-NEXT: adcl -504(%ebp), %ebx # 4-byte Folded Reload
-; X32-NEXT: adcl -508(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: addl -24(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: movl %edx, -36(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -64(%ebp), %esi # 4-byte Folded Reload
-; X32-NEXT: movl %esi, -20(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -48(%ebp), %ebx # 4-byte Folded Reload
-; X32-NEXT: movl %ebx, -292(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -52(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl %eax, -44(%ebp) # 4-byte Spill
-; X32-NEXT: adcl $0, -16(%ebp) # 4-byte Folded Spill
-; X32-NEXT: adcl $0, -80(%ebp) # 4-byte Folded Spill
-; X32-NEXT: adcl $0, -88(%ebp) # 4-byte Folded Spill
-; X32-NEXT: adcl $0, -272(%ebp) # 4-byte Folded Spill
-; X32-NEXT: movl -352(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: movl %ecx, %eax
-; X32-NEXT: movl -212(%ebp), %esi # 4-byte Reload
-; X32-NEXT: mull %esi
-; X32-NEXT: movl %edx, -52(%ebp) # 4-byte Spill
-; X32-NEXT: movl %eax, -48(%ebp) # 4-byte Spill
-; X32-NEXT: movl -120(%ebp), %edi # 4-byte Reload
-; X32-NEXT: movl %edi, %eax
-; X32-NEXT: mull %esi
+; X32-NEXT: movzbl (%esp), %eax # 1-byte Folded Reload
+; X32-NEXT: adcl %eax, %ebp
+; X32-NEXT: adcl $0, %edi
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; X32-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X32-NEXT: movl %ebx, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: mull %ecx
+; X32-NEXT: movl %edx, %esi
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; X32-NEXT: movl %ebp, %eax
+; X32-NEXT: mull %ecx
+; X32-NEXT: movl %edx, %edi
+; X32-NEXT: movl %eax, %ecx
+; X32-NEXT: addl %esi, %ecx
+; X32-NEXT: adcl $0, %edi
+; X32-NEXT: movl %ebx, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X32-NEXT: mull %ebx
; X32-NEXT: movl %edx, %esi
-; X32-NEXT: movl %eax, %ebx
-; X32-NEXT: addl -52(%ebp), %ebx # 4-byte Folded Reload
-; X32-NEXT: adcl $0, %esi
-; X32-NEXT: movl %ecx, %eax
-; X32-NEXT: mull -252(%ebp) # 4-byte Folded Reload
-; X32-NEXT: movl %edx, %ecx
-; X32-NEXT: addl %ebx, %eax
-; X32-NEXT: movl %eax, -64(%ebp) # 4-byte Spill
-; X32-NEXT: adcl %esi, %ecx
-; X32-NEXT: setb %bl
-; X32-NEXT: movl %edi, %eax
-; X32-NEXT: movl -252(%ebp), %edi # 4-byte Reload
-; X32-NEXT: mull %edi
; X32-NEXT: addl %ecx, %eax
-; X32-NEXT: movzbl %bl, %ecx
+; X32-NEXT: movl %eax, (%esp) # 4-byte Spill
+; X32-NEXT: adcl %edi, %esi
+; X32-NEXT: setb %cl
+; X32-NEXT: movl %ebp, %eax
+; X32-NEXT: mull %ebx
+; X32-NEXT: addl %esi, %eax
+; X32-NEXT: movzbl %cl, %ecx
; X32-NEXT: adcl %ecx, %edx
-; X32-NEXT: movl -364(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: addl -116(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: movl -396(%ebp), %esi # 4-byte Reload
-; X32-NEXT: adcl -84(%ebp), %esi # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
; X32-NEXT: addl %eax, %ecx
-; X32-NEXT: movl %ecx, -24(%ebp) # 4-byte Spill
+; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: adcl %edx, %esi
-; X32-NEXT: movl %esi, -52(%ebp) # 4-byte Spill
-; X32-NEXT: movl -416(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: movl %ecx, %eax
-; X32-NEXT: movl -212(%ebp), %esi # 4-byte Reload
+; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X32-NEXT: movl %ebx, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; X32-NEXT: mull %esi
-; X32-NEXT: movl %edx, -68(%ebp) # 4-byte Spill
-; X32-NEXT: movl %eax, -508(%ebp) # 4-byte Spill
-; X32-NEXT: movl -316(%ebp), %eax # 4-byte Reload
+; X32-NEXT: movl %edx, %ecx
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; X32-NEXT: movl %ebp, %eax
; X32-NEXT: mull %esi
; X32-NEXT: movl %edx, %esi
-; X32-NEXT: movl %eax, %ebx
-; X32-NEXT: addl -68(%ebp), %ebx # 4-byte Folded Reload
+; X32-NEXT: movl %eax, %edi
+; X32-NEXT: addl %ecx, %edi
; X32-NEXT: adcl $0, %esi
-; X32-NEXT: movl %ecx, %eax
-; X32-NEXT: mull %edi
+; X32-NEXT: movl %ebx, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X32-NEXT: mull %ebx
; X32-NEXT: movl %edx, %ecx
-; X32-NEXT: addl %ebx, %eax
-; X32-NEXT: movl %eax, -504(%ebp) # 4-byte Spill
+; X32-NEXT: addl %edi, %eax
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: adcl %esi, %ecx
-; X32-NEXT: setb %bl
-; X32-NEXT: movl -316(%ebp), %eax # 4-byte Reload
-; X32-NEXT: mull %edi
-; X32-NEXT: addl %ecx, %eax
-; X32-NEXT: movzbl %bl, %ecx
-; X32-NEXT: adcl %ecx, %edx
-; X32-NEXT: addl -296(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: adcl -768(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: addl -48(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl %eax, -372(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -64(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: movl %edx, -68(%ebp) # 4-byte Spill
-; X32-NEXT: adcl $0, -24(%ebp) # 4-byte Folded Spill
-; X32-NEXT: adcl $0, -52(%ebp) # 4-byte Folded Spill
-; X32-NEXT: movl -416(%ebp), %esi # 4-byte Reload
+; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
+; X32-NEXT: movl %ebp, %eax
+; X32-NEXT: mull %ebx
+; X32-NEXT: movl %eax, %edi
+; X32-NEXT: addl %ecx, %edi
+; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
+; X32-NEXT: adcl %eax, %edx
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X32-NEXT: adcl (%esp), %edx # 4-byte Folded Reload
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; X32-NEXT: movl %esi, %eax
-; X32-NEXT: movl -284(%ebp), %ecx # 4-byte Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X32-NEXT: mull %ecx
; X32-NEXT: movl %edx, %ebx
-; X32-NEXT: movl %eax, -152(%ebp) # 4-byte Spill
-; X32-NEXT: movl -316(%ebp), %eax # 4-byte Reload
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %ebp, %eax
; X32-NEXT: mull %ecx
-; X32-NEXT: movl %edx, %edi
+; X32-NEXT: movl %edx, %ebp
; X32-NEXT: movl %eax, %ecx
; X32-NEXT: addl %ebx, %ecx
-; X32-NEXT: adcl $0, %edi
-; X32-NEXT: movl 12(%ebp), %eax
+; X32-NEXT: adcl $0, %ebp
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl 28(%eax), %ebx
; X32-NEXT: movl %esi, %eax
; X32-NEXT: mull %ebx
; X32-NEXT: movl %ebx, %esi
-; X32-NEXT: movl %esi, -48(%ebp) # 4-byte Spill
+; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: movl %edx, %ebx
; X32-NEXT: addl %ecx, %eax
-; X32-NEXT: movl %eax, -64(%ebp) # 4-byte Spill
-; X32-NEXT: adcl %edi, %ebx
+; X32-NEXT: movl %eax, (%esp) # 4-byte Spill
+; X32-NEXT: adcl %ebp, %ebx
; X32-NEXT: setb %cl
-; X32-NEXT: movl -316(%ebp), %eax # 4-byte Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X32-NEXT: mull %esi
; X32-NEXT: addl %ebx, %eax
; X32-NEXT: movzbl %cl, %ecx
; X32-NEXT: adcl %ecx, %edx
-; X32-NEXT: movl -324(%ebp), %ebx # 4-byte Reload
-; X32-NEXT: addl -308(%ebp), %ebx # 4-byte Folded Reload
-; X32-NEXT: movl -400(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: adcl -208(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: addl %eax, %ebx
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X32-NEXT: addl %eax, %ebp
; X32-NEXT: adcl %edx, %ecx
-; X32-NEXT: movl -372(%ebp), %eax # 4-byte Reload
-; X32-NEXT: addl %eax, -152(%ebp) # 4-byte Folded Spill
-; X32-NEXT: movl -64(%ebp), %eax # 4-byte Reload
-; X32-NEXT: adcl -68(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl %eax, -64(%ebp) # 4-byte Spill
-; X32-NEXT: adcl $0, %ebx
+; X32-NEXT: addl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: movl (%esp), %eax # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; X32-NEXT: movl %eax, (%esp) # 4-byte Spill
+; X32-NEXT: adcl $0, %ebp
; X32-NEXT: adcl $0, %ecx
-; X32-NEXT: addl -24(%ebp), %ebx # 4-byte Folded Reload
-; X32-NEXT: adcl -52(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: setb -372(%ebp) # 1-byte Folded Spill
-; X32-NEXT: movl -352(%ebp), %eax # 4-byte Reload
-; X32-NEXT: movl -284(%ebp), %esi # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; X32-NEXT: movl %edi, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; X32-NEXT: mull %esi
-; X32-NEXT: movl %edx, -24(%ebp) # 4-byte Spill
-; X32-NEXT: movl %eax, -52(%ebp) # 4-byte Spill
-; X32-NEXT: movl -120(%ebp), %eax # 4-byte Reload
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X32-NEXT: mull %esi
-; X32-NEXT: movl %eax, %edi
-; X32-NEXT: addl -24(%ebp), %edi # 4-byte Folded Reload
+; X32-NEXT: movl %eax, %ebx
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
; X32-NEXT: adcl $0, %edx
-; X32-NEXT: movl %edx, -68(%ebp) # 4-byte Spill
-; X32-NEXT: movl -352(%ebp), %eax # 4-byte Reload
-; X32-NEXT: movl -48(%ebp), %esi # 4-byte Reload
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %edi, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; X32-NEXT: mull %esi
-; X32-NEXT: addl %edi, %eax
-; X32-NEXT: movl %eax, -24(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -68(%ebp), %edx # 4-byte Folded Reload
; X32-NEXT: movl %edx, %edi
-; X32-NEXT: setb -68(%ebp) # 1-byte Folded Spill
-; X32-NEXT: movl -120(%ebp), %eax # 4-byte Reload
+; X32-NEXT: addl %ebx, %eax
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X32-NEXT: setb %bl
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X32-NEXT: mull %esi
; X32-NEXT: addl %edi, %eax
-; X32-NEXT: movzbl -68(%ebp), %esi # 1-byte Folded Reload
+; X32-NEXT: movzbl %bl, %esi
; X32-NEXT: adcl %esi, %edx
-; X32-NEXT: movl -364(%ebp), %edi # 4-byte Reload
-; X32-NEXT: addl -308(%ebp), %edi # 4-byte Folded Reload
-; X32-NEXT: movl -396(%ebp), %esi # 4-byte Reload
-; X32-NEXT: adcl -208(%ebp), %esi # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
; X32-NEXT: addl %eax, %edi
-; X32-NEXT: adcl %edx, %esi
-; X32-NEXT: movl %esi, -68(%ebp) # 4-byte Spill
-; X32-NEXT: movl -52(%ebp), %edx # 4-byte Reload
-; X32-NEXT: addl %ebx, %edx
-; X32-NEXT: movl -24(%ebp), %esi # 4-byte Reload
+; X32-NEXT: adcl %edx, %ebx
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; X32-NEXT: addl %ebp, %edx
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; X32-NEXT: adcl %ecx, %esi
-; X32-NEXT: movzbl -372(%ebp), %eax # 1-byte Folded Reload
+; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
; X32-NEXT: adcl %eax, %edi
-; X32-NEXT: movl -68(%ebp), %eax # 4-byte Reload
-; X32-NEXT: adcl $0, %eax
-; X32-NEXT: addl -296(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: adcl -776(%ebp), %esi # 4-byte Folded Reload
-; X32-NEXT: adcl -772(%ebp), %edi # 4-byte Folded Reload
-; X32-NEXT: adcl -780(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl -36(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: addl %ecx, -508(%ebp) # 4-byte Folded Spill
-; X32-NEXT: movl -20(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: adcl %ecx, -504(%ebp) # 4-byte Folded Spill
-; X32-NEXT: movl -292(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: adcl %ecx, -152(%ebp) # 4-byte Folded Spill
-; X32-NEXT: movl -44(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: adcl %ecx, -64(%ebp) # 4-byte Folded Spill
+; X32-NEXT: adcl $0, %ebx
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: addl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: adcl %eax, (%esp) # 4-byte Folded Spill
; X32-NEXT: adcl $0, %edx
; X32-NEXT: adcl $0, %esi
; X32-NEXT: adcl $0, %edi
-; X32-NEXT: adcl $0, %eax
-; X32-NEXT: addl -16(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: movl %edx, -52(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -80(%ebp), %esi # 4-byte Folded Reload
-; X32-NEXT: movl %esi, -24(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -88(%ebp), %edi # 4-byte Folded Reload
-; X32-NEXT: movl %edi, -44(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -272(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl %eax, -68(%ebp) # 4-byte Spill
-; X32-NEXT: setb -20(%ebp) # 1-byte Folded Spill
-; X32-NEXT: movl -408(%ebp), %ebx # 4-byte Reload
-; X32-NEXT: movl %ebx, %eax
-; X32-NEXT: movl -212(%ebp), %esi # 4-byte Reload
+; X32-NEXT: adcl $0, %ebx
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
+; X32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; X32-NEXT: movl %edi, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; X32-NEXT: mull %esi
; X32-NEXT: movl %edx, %ecx
-; X32-NEXT: movl %eax, -36(%ebp) # 4-byte Spill
-; X32-NEXT: movl -192(%ebp), %eax # 4-byte Reload
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; X32-NEXT: movl %ebp, %eax
; X32-NEXT: mull %esi
-; X32-NEXT: movl %edx, %esi
-; X32-NEXT: movl %eax, %edi
-; X32-NEXT: addl %ecx, %edi
-; X32-NEXT: adcl $0, %esi
-; X32-NEXT: movl %ebx, %eax
-; X32-NEXT: movl -252(%ebp), %ebx # 4-byte Reload
-; X32-NEXT: mull %ebx
+; X32-NEXT: movl %edx, %ebx
+; X32-NEXT: movl %eax, %esi
+; X32-NEXT: addl %ecx, %esi
+; X32-NEXT: adcl $0, %ebx
+; X32-NEXT: movl %edi, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; X32-NEXT: mull %edi
; X32-NEXT: movl %edx, %ecx
-; X32-NEXT: addl %edi, %eax
-; X32-NEXT: movl %eax, -272(%ebp) # 4-byte Spill
-; X32-NEXT: adcl %esi, %ecx
-; X32-NEXT: setb -16(%ebp) # 1-byte Folded Spill
-; X32-NEXT: movl -192(%ebp), %eax # 4-byte Reload
-; X32-NEXT: mull %ebx
+; X32-NEXT: addl %esi, %eax
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl %ebx, %ecx
+; X32-NEXT: setb %bl
+; X32-NEXT: movl %ebp, %eax
+; X32-NEXT: mull %edi
; X32-NEXT: addl %ecx, %eax
-; X32-NEXT: movzbl -16(%ebp), %ecx # 1-byte Folded Reload
+; X32-NEXT: movzbl %bl, %ecx
; X32-NEXT: adcl %ecx, %edx
-; X32-NEXT: movl -392(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: addl -116(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: movl -412(%ebp), %esi # 4-byte Reload
-; X32-NEXT: adcl -84(%ebp), %esi # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
; X32-NEXT: addl %eax, %ecx
-; X32-NEXT: movl %ecx, -80(%ebp) # 4-byte Spill
+; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: adcl %edx, %esi
-; X32-NEXT: movl %esi, -16(%ebp) # 4-byte Spill
-; X32-NEXT: movl -440(%ebp), %ebx # 4-byte Reload
-; X32-NEXT: movl %ebx, %eax
-; X32-NEXT: movl -212(%ebp), %esi # 4-byte Reload
+; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; X32-NEXT: movl %ebp, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; X32-NEXT: mull %esi
; X32-NEXT: movl %edx, %ecx
-; X32-NEXT: movl %eax, -292(%ebp) # 4-byte Spill
-; X32-NEXT: movl -340(%ebp), %eax # 4-byte Reload
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; X32-NEXT: movl %edi, %eax
; X32-NEXT: mull %esi
; X32-NEXT: movl %edx, %esi
-; X32-NEXT: movl %eax, %edi
-; X32-NEXT: addl %ecx, %edi
+; X32-NEXT: movl %eax, %ebx
+; X32-NEXT: addl %ecx, %ebx
; X32-NEXT: adcl $0, %esi
-; X32-NEXT: movl %ebx, %eax
-; X32-NEXT: movl -252(%ebp), %ebx # 4-byte Reload
-; X32-NEXT: mull %ebx
+; X32-NEXT: movl %ebp, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; X32-NEXT: mull %ebp
; X32-NEXT: movl %edx, %ecx
-; X32-NEXT: addl %edi, %eax
-; X32-NEXT: movl %eax, -372(%ebp) # 4-byte Spill
+; X32-NEXT: addl %ebx, %eax
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: adcl %esi, %ecx
-; X32-NEXT: setb -88(%ebp) # 1-byte Folded Spill
-; X32-NEXT: movl -340(%ebp), %edi # 4-byte Reload
+; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
; X32-NEXT: movl %edi, %eax
-; X32-NEXT: mull %ebx
-; X32-NEXT: addl %ecx, %eax
-; X32-NEXT: movzbl -88(%ebp), %ecx # 1-byte Folded Reload
-; X32-NEXT: adcl %ecx, %edx
-; X32-NEXT: addl -332(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: adcl -448(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: addl -36(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl %eax, -448(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -272(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: movl %edx, -36(%ebp) # 4-byte Spill
-; X32-NEXT: adcl $0, -80(%ebp) # 4-byte Folded Spill
-; X32-NEXT: adcl $0, -16(%ebp) # 4-byte Folded Spill
-; X32-NEXT: movl -440(%ebp), %esi # 4-byte Reload
-; X32-NEXT: movl %esi, %eax
-; X32-NEXT: movl -284(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: mull %ecx
-; X32-NEXT: movl %edx, %ebx
-; X32-NEXT: movl %eax, -88(%ebp) # 4-byte Spill
+; X32-NEXT: mull %ebp
+; X32-NEXT: movl %eax, %ebx
+; X32-NEXT: addl %ecx, %ebx
+; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
+; X32-NEXT: adcl %eax, %edx
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; X32-NEXT: movl %ebp, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: mull %ecx
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: movl %edi, %eax
; X32-NEXT: mull %ecx
-; X32-NEXT: movl %edx, %edi
-; X32-NEXT: movl %eax, %ecx
-; X32-NEXT: addl %ebx, %ecx
-; X32-NEXT: adcl $0, %edi
-; X32-NEXT: movl %esi, %eax
-; X32-NEXT: movl -48(%ebp), %ebx # 4-byte Reload
-; X32-NEXT: mull %ebx
; X32-NEXT: movl %edx, %esi
+; X32-NEXT: movl %eax, %ecx
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X32-NEXT: adcl $0, %esi
+; X32-NEXT: movl %ebp, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; X32-NEXT: mull %edi
+; X32-NEXT: movl %edx, %ebp
; X32-NEXT: addl %ecx, %eax
-; X32-NEXT: movl %eax, -296(%ebp) # 4-byte Spill
-; X32-NEXT: adcl %edi, %esi
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl %esi, %ebp
; X32-NEXT: setb %cl
-; X32-NEXT: movl -340(%ebp), %eax # 4-byte Reload
-; X32-NEXT: mull %ebx
-; X32-NEXT: addl %esi, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: mull %edi
+; X32-NEXT: addl %ebp, %eax
; X32-NEXT: movzbl %cl, %ecx
; X32-NEXT: adcl %ecx, %edx
-; X32-NEXT: movl -132(%ebp), %ebx # 4-byte Reload
-; X32-NEXT: addl -308(%ebp), %ebx # 4-byte Folded Reload
-; X32-NEXT: movl -140(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: adcl -208(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: addl %eax, %ebx
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X32-NEXT: addl %eax, %ebp
; X32-NEXT: adcl %edx, %ecx
-; X32-NEXT: movl -448(%ebp), %eax # 4-byte Reload
-; X32-NEXT: addl %eax, -88(%ebp) # 4-byte Folded Spill
-; X32-NEXT: movl -36(%ebp), %eax # 4-byte Reload
-; X32-NEXT: adcl %eax, -296(%ebp) # 4-byte Folded Spill
-; X32-NEXT: adcl $0, %ebx
+; X32-NEXT: addl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: adcl $0, %ebp
; X32-NEXT: adcl $0, %ecx
-; X32-NEXT: addl -80(%ebp), %ebx # 4-byte Folded Reload
-; X32-NEXT: adcl -16(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: setb -16(%ebp) # 1-byte Folded Spill
-; X32-NEXT: movl -408(%ebp), %eax # 4-byte Reload
-; X32-NEXT: movl -284(%ebp), %esi # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X32-NEXT: movl %ebx, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; X32-NEXT: mull %esi
-; X32-NEXT: movl %edx, -80(%ebp) # 4-byte Spill
-; X32-NEXT: movl %eax, -272(%ebp) # 4-byte Spill
-; X32-NEXT: movl -192(%ebp), %eax # 4-byte Reload
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X32-NEXT: mull %esi
; X32-NEXT: movl %eax, %edi
-; X32-NEXT: addl -80(%ebp), %edi # 4-byte Folded Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
; X32-NEXT: adcl $0, %edx
-; X32-NEXT: movl %edx, -80(%ebp) # 4-byte Spill
-; X32-NEXT: movl -408(%ebp), %eax # 4-byte Reload
-; X32-NEXT: movl -48(%ebp), %esi # 4-byte Reload
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %ebx, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; X32-NEXT: mull %esi
+; X32-NEXT: movl %edx, %ebx
; X32-NEXT: addl %edi, %eax
-; X32-NEXT: movl %eax, -36(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -80(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: movl %edx, %edi
-; X32-NEXT: setb -80(%ebp) # 1-byte Folded Spill
-; X32-NEXT: movl -192(%ebp), %eax # 4-byte Reload
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
+; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X32-NEXT: mull %esi
-; X32-NEXT: addl %edi, %eax
-; X32-NEXT: movzbl -80(%ebp), %esi # 1-byte Folded Reload
+; X32-NEXT: addl %ebx, %eax
+; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 1-byte Folded Reload
; X32-NEXT: adcl %esi, %edx
-; X32-NEXT: movl -392(%ebp), %edi # 4-byte Reload
-; X32-NEXT: addl -308(%ebp), %edi # 4-byte Folded Reload
-; X32-NEXT: movl -412(%ebp), %esi # 4-byte Reload
-; X32-NEXT: adcl -208(%ebp), %esi # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
; X32-NEXT: addl %eax, %edi
-; X32-NEXT: adcl %edx, %esi
-; X32-NEXT: movl %esi, -80(%ebp) # 4-byte Spill
-; X32-NEXT: movl -272(%ebp), %edx # 4-byte Reload
-; X32-NEXT: addl %ebx, %edx
-; X32-NEXT: movl -36(%ebp), %esi # 4-byte Reload
+; X32-NEXT: adcl %edx, %ebx
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; X32-NEXT: addl %ebp, %edx
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; X32-NEXT: adcl %ecx, %esi
-; X32-NEXT: movzbl -16(%ebp), %eax # 1-byte Folded Reload
+; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
; X32-NEXT: adcl %eax, %edi
-; X32-NEXT: movl -80(%ebp), %eax # 4-byte Reload
-; X32-NEXT: adcl $0, %eax
-; X32-NEXT: addl -332(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: movl %edx, %ebx
-; X32-NEXT: adcl -648(%ebp), %esi # 4-byte Folded Reload
-; X32-NEXT: movl %esi, -36(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -644(%ebp), %edi # 4-byte Folded Reload
-; X32-NEXT: movl %edi, -332(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -572(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl %eax, -80(%ebp) # 4-byte Spill
-; X32-NEXT: movl -292(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: addl -52(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: movl -372(%ebp), %edx # 4-byte Reload
-; X32-NEXT: adcl -24(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: movl -88(%ebp), %esi # 4-byte Reload
-; X32-NEXT: adcl -44(%ebp), %esi # 4-byte Folded Reload
-; X32-NEXT: movl -296(%ebp), %edi # 4-byte Reload
-; X32-NEXT: adcl -68(%ebp), %edi # 4-byte Folded Reload
-; X32-NEXT: movzbl -20(%ebp), %eax # 1-byte Folded Reload
-; X32-NEXT: adcl %eax, %ebx
-; X32-NEXT: movl %ebx, -272(%ebp) # 4-byte Spill
-; X32-NEXT: movl -36(%ebp), %eax # 4-byte Reload
+; X32-NEXT: adcl $0, %ebx
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X32-NEXT: movl %edx, %ebp
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
+; X32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
+; X32-NEXT: adcl %eax, %ebp
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X32-NEXT: adcl $0, %eax
-; X32-NEXT: movl -332(%ebp), %ebx # 4-byte Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
; X32-NEXT: adcl $0, %ebx
-; X32-NEXT: adcl $0, -80(%ebp) # 4-byte Folded Spill
-; X32-NEXT: addl -32(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: movl %ecx, -292(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -196(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: movl %edx, -372(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -608(%ebp), %esi # 4-byte Folded Reload
-; X32-NEXT: movl %esi, -88(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -760(%ebp), %edi # 4-byte Folded Reload
-; X32-NEXT: movl %edi, -296(%ebp) # 4-byte Spill
-; X32-NEXT: movl -756(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: adcl %ecx, -272(%ebp) # 4-byte Folded Spill
-; X32-NEXT: adcl -752(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl %eax, -36(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -748(%ebp), %ebx # 4-byte Folded Reload
-; X32-NEXT: movl %ebx, -332(%ebp) # 4-byte Spill
-; X32-NEXT: movl -744(%ebp), %eax # 4-byte Reload
-; X32-NEXT: adcl %eax, -80(%ebp) # 4-byte Folded Spill
-; X32-NEXT: movl -288(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: movl %ecx, %eax
-; X32-NEXT: movl -168(%ebp), %edi # 4-byte Reload
+; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; X32-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
+; X32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; X32-NEXT: movl %ebp, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
; X32-NEXT: mull %edi
-; X32-NEXT: movl %eax, -52(%ebp) # 4-byte Spill
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: movl %edx, %esi
-; X32-NEXT: movl 8(%ebp), %eax
-; X32-NEXT: movl 28(%eax), %eax
-; X32-NEXT: movl %eax, -16(%ebp) # 4-byte Spill
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: movl 28(%eax), %ecx
+; X32-NEXT: movl %ecx, %eax
+; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: mull %edi
; X32-NEXT: movl %edx, %edi
; X32-NEXT: movl %eax, %ebx
; X32-NEXT: addl %esi, %ebx
; X32-NEXT: adcl $0, %edi
-; X32-NEXT: movl %ecx, %eax
-; X32-NEXT: movl -92(%ebp), %esi # 4-byte Reload
+; X32-NEXT: movl %ebp, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; X32-NEXT: mull %esi
-; X32-NEXT: movl %edx, %ecx
+; X32-NEXT: movl %edx, %ebp
; X32-NEXT: addl %ebx, %eax
-; X32-NEXT: movl %eax, -24(%ebp) # 4-byte Spill
-; X32-NEXT: adcl %edi, %ecx
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl %edi, %ebp
; X32-NEXT: setb %bl
-; X32-NEXT: movl -16(%ebp), %eax # 4-byte Reload
+; X32-NEXT: movl %ecx, %eax
; X32-NEXT: mull %esi
-; X32-NEXT: addl %ecx, %eax
-; X32-NEXT: movzbl %bl, %ecx
-; X32-NEXT: adcl %ecx, %edx
-; X32-NEXT: movl -280(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: addl -28(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: movl -312(%ebp), %esi # 4-byte Reload
-; X32-NEXT: adcl -256(%ebp), %esi # 4-byte Folded Reload
+; X32-NEXT: addl %ebp, %eax
+; X32-NEXT: movzbl %bl, %edi
+; X32-NEXT: adcl %edi, %edx
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
; X32-NEXT: addl %eax, %ecx
-; X32-NEXT: movl %ecx, -44(%ebp) # 4-byte Spill
+; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: adcl %edx, %esi
-; X32-NEXT: movl %esi, -20(%ebp) # 4-byte Spill
-; X32-NEXT: movl -348(%ebp), %ecx # 4-byte Reload
+; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X32-NEXT: movl %ecx, %eax
-; X32-NEXT: movl -168(%ebp), %esi # 4-byte Reload
-; X32-NEXT: mull %esi
-; X32-NEXT: movl %edx, -32(%ebp) # 4-byte Spill
-; X32-NEXT: movl %eax, -572(%ebp) # 4-byte Spill
-; X32-NEXT: movl -216(%ebp), %eax # 4-byte Reload
-; X32-NEXT: mull %esi
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X32-NEXT: mull %ebx
; X32-NEXT: movl %edx, %edi
-; X32-NEXT: movl %eax, %ebx
-; X32-NEXT: addl -32(%ebp), %ebx # 4-byte Folded Reload
-; X32-NEXT: adcl $0, %edi
-; X32-NEXT: movl %ecx, %eax
-; X32-NEXT: movl -92(%ebp), %esi # 4-byte Reload
-; X32-NEXT: mull %esi
-; X32-NEXT: movl %edx, %ecx
-; X32-NEXT: addl %ebx, %eax
-; X32-NEXT: movl %eax, -448(%ebp) # 4-byte Spill
-; X32-NEXT: adcl %edi, %ecx
-; X32-NEXT: setb %bl
-; X32-NEXT: movl -216(%ebp), %edi # 4-byte Reload
-; X32-NEXT: movl %edi, %eax
-; X32-NEXT: mull %esi
-; X32-NEXT: addl %ecx, %eax
-; X32-NEXT: movzbl %bl, %ecx
-; X32-NEXT: adcl %ecx, %edx
-; X32-NEXT: addl -228(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: adcl -428(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: addl -52(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl %eax, -32(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -24(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: movl %edx, -52(%ebp) # 4-byte Spill
-; X32-NEXT: adcl $0, -44(%ebp) # 4-byte Folded Spill
-; X32-NEXT: adcl $0, -20(%ebp) # 4-byte Folded Spill
-; X32-NEXT: movl -348(%ebp), %esi # 4-byte Reload
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; X32-NEXT: movl %esi, %eax
-; X32-NEXT: movl -108(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: mull %ecx
-; X32-NEXT: movl %edx, -24(%ebp) # 4-byte Spill
-; X32-NEXT: movl %eax, -196(%ebp) # 4-byte Spill
-; X32-NEXT: movl %edi, %eax
-; X32-NEXT: mull %ecx
+; X32-NEXT: mull %ebx
; X32-NEXT: movl %edx, %ebx
-; X32-NEXT: movl %eax, %ecx
-; X32-NEXT: addl -24(%ebp), %ecx # 4-byte Folded Reload
+; X32-NEXT: movl %eax, %ebp
+; X32-NEXT: addl %edi, %ebp
; X32-NEXT: adcl $0, %ebx
-; X32-NEXT: movl %esi, %eax
-; X32-NEXT: movl -96(%ebp), %esi # 4-byte Reload
-; X32-NEXT: mull %esi
+; X32-NEXT: movl %ecx, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: mull %ecx
; X32-NEXT: movl %edx, %edi
-; X32-NEXT: addl %ecx, %eax
-; X32-NEXT: movl %eax, -428(%ebp) # 4-byte Spill
+; X32-NEXT: addl %ebp, %eax
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: adcl %ebx, %edi
-; X32-NEXT: setb %cl
-; X32-NEXT: movl -216(%ebp), %eax # 4-byte Reload
+; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
+; X32-NEXT: movl %esi, %ebx
+; X32-NEXT: movl %ebx, %eax
+; X32-NEXT: mull %ecx
+; X32-NEXT: movl %eax, %ebp
+; X32-NEXT: addl %edi, %ebp
+; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
+; X32-NEXT: adcl %eax, %edx
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: movl %ecx, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: mull %esi
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %ebx, %eax
; X32-NEXT: mull %esi
+; X32-NEXT: movl %edx, %esi
+; X32-NEXT: movl %eax, %edi
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X32-NEXT: adcl $0, %esi
+; X32-NEXT: movl %ecx, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: mull %ecx
+; X32-NEXT: movl %edx, %ebx
; X32-NEXT: addl %edi, %eax
-; X32-NEXT: movzbl %cl, %ecx
+; X32-NEXT: movl %eax, %edi
+; X32-NEXT: adcl %esi, %ebx
+; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: mull %ecx
+; X32-NEXT: addl %ebx, %eax
+; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 1-byte Folded Reload
; X32-NEXT: adcl %ecx, %edx
-; X32-NEXT: movl -180(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: addl -104(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: movl -320(%ebp), %esi # 4-byte Reload
-; X32-NEXT: adcl -156(%ebp), %esi # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
; X32-NEXT: addl %eax, %ecx
; X32-NEXT: adcl %edx, %esi
-; X32-NEXT: movl -32(%ebp), %eax # 4-byte Reload
-; X32-NEXT: addl %eax, -196(%ebp) # 4-byte Folded Spill
-; X32-NEXT: movl -52(%ebp), %eax # 4-byte Reload
-; X32-NEXT: adcl %eax, -428(%ebp) # 4-byte Folded Spill
+; X32-NEXT: addl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: adcl $0, %ecx
; X32-NEXT: adcl $0, %esi
-; X32-NEXT: addl -44(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: adcl -20(%ebp), %esi # 4-byte Folded Reload
-; X32-NEXT: setb -52(%ebp) # 1-byte Folded Spill
-; X32-NEXT: movl -288(%ebp), %eax # 4-byte Reload
-; X32-NEXT: movl -108(%ebp), %edi # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X32-NEXT: movl %ebx, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
; X32-NEXT: mull %edi
-; X32-NEXT: movl %edx, -44(%ebp) # 4-byte Spill
-; X32-NEXT: movl %eax, -20(%ebp) # 4-byte Spill
-; X32-NEXT: movl -16(%ebp), %eax # 4-byte Reload
+; X32-NEXT: movl %edx, %ebp
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X32-NEXT: mull %edi
-; X32-NEXT: movl %eax, %ebx
-; X32-NEXT: addl -44(%ebp), %ebx # 4-byte Folded Reload
+; X32-NEXT: movl %eax, %edi
+; X32-NEXT: addl %ebp, %edi
; X32-NEXT: adcl $0, %edx
-; X32-NEXT: movl %edx, -24(%ebp) # 4-byte Spill
-; X32-NEXT: movl -288(%ebp), %eax # 4-byte Reload
-; X32-NEXT: movl -96(%ebp), %edi # 4-byte Reload
-; X32-NEXT: mull %edi
-; X32-NEXT: addl %ebx, %eax
-; X32-NEXT: movl %eax, -44(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -24(%ebp), %edx # 4-byte Folded Reload
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %ebx, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; X32-NEXT: mull %ebp
; X32-NEXT: movl %edx, %ebx
-; X32-NEXT: setb -24(%ebp) # 1-byte Folded Spill
-; X32-NEXT: movl -16(%ebp), %eax # 4-byte Reload
-; X32-NEXT: mull %edi
+; X32-NEXT: addl %edi, %eax
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
+; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: mull %ebp
; X32-NEXT: addl %ebx, %eax
-; X32-NEXT: movzbl -24(%ebp), %edi # 1-byte Folded Reload
+; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 1-byte Folded Reload
; X32-NEXT: adcl %edi, %edx
-; X32-NEXT: movl -280(%ebp), %edi # 4-byte Reload
-; X32-NEXT: addl -104(%ebp), %edi # 4-byte Folded Reload
-; X32-NEXT: movl -312(%ebp), %ebx # 4-byte Reload
-; X32-NEXT: adcl -156(%ebp), %ebx # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
; X32-NEXT: addl %eax, %edi
; X32-NEXT: adcl %edx, %ebx
-; X32-NEXT: movl -20(%ebp), %edx # 4-byte Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
; X32-NEXT: addl %ecx, %edx
-; X32-NEXT: movl -44(%ebp), %ecx # 4-byte Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X32-NEXT: adcl %esi, %ecx
-; X32-NEXT: movzbl -52(%ebp), %eax # 1-byte Folded Reload
+; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
; X32-NEXT: adcl %eax, %edi
; X32-NEXT: adcl $0, %ebx
-; X32-NEXT: addl -228(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: movl %edx, -20(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -596(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: movl %ecx, -44(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -464(%ebp), %edi # 4-byte Folded Reload
-; X32-NEXT: movl %edi, -464(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -536(%ebp), %ebx # 4-byte Folded Reload
-; X32-NEXT: movl %ebx, -68(%ebp) # 4-byte Spill
-; X32-NEXT: movl -184(%ebp), %edi # 4-byte Reload
-; X32-NEXT: movl %edi, %eax
-; X32-NEXT: movl -168(%ebp), %esi # 4-byte Reload
-; X32-NEXT: mull %esi
-; X32-NEXT: movl %edx, %ecx
-; X32-NEXT: movl %eax, -32(%ebp) # 4-byte Spill
-; X32-NEXT: movl -60(%ebp), %eax # 4-byte Reload
-; X32-NEXT: mull %esi
-; X32-NEXT: movl %edx, %ebx
-; X32-NEXT: movl %eax, %esi
-; X32-NEXT: addl %ecx, %esi
-; X32-NEXT: adcl $0, %ebx
-; X32-NEXT: movl %edi, %eax
-; X32-NEXT: movl -92(%ebp), %ecx # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
+; X32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; X32-NEXT: movl %ebp, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: mull %ecx
+; X32-NEXT: movl %edx, %esi
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X32-NEXT: mull %ecx
; X32-NEXT: movl %edx, %edi
-; X32-NEXT: addl %esi, %eax
-; X32-NEXT: movl %eax, -228(%ebp) # 4-byte Spill
-; X32-NEXT: adcl %ebx, %edi
+; X32-NEXT: movl %eax, %ebx
+; X32-NEXT: addl %esi, %ebx
+; X32-NEXT: adcl $0, %edi
+; X32-NEXT: movl %ebp, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: mull %ecx
+; X32-NEXT: movl %edx, %esi
+; X32-NEXT: addl %ebx, %eax
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl %edi, %esi
; X32-NEXT: setb %bl
-; X32-NEXT: movl -60(%ebp), %eax # 4-byte Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X32-NEXT: mull %ecx
-; X32-NEXT: addl %edi, %eax
-; X32-NEXT: movzbl %bl, %ecx
-; X32-NEXT: adcl %ecx, %edx
-; X32-NEXT: movl -160(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: addl -28(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: movl -268(%ebp), %esi # 4-byte Reload
-; X32-NEXT: adcl -256(%ebp), %esi # 4-byte Folded Reload
+; X32-NEXT: addl %esi, %eax
+; X32-NEXT: movzbl %bl, %esi
+; X32-NEXT: adcl %esi, %edx
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
; X32-NEXT: addl %eax, %ecx
-; X32-NEXT: movl %ecx, -24(%ebp) # 4-byte Spill
+; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: adcl %edx, %esi
-; X32-NEXT: movl %esi, -52(%ebp) # 4-byte Spill
-; X32-NEXT: movl -260(%ebp), %edi # 4-byte Reload
+; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: movl %esi, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: mull %ecx
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; X32-NEXT: movl %ebp, %eax
+; X32-NEXT: mull %ecx
+; X32-NEXT: movl %edx, %edi
+; X32-NEXT: movl %eax, %ebx
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
+; X32-NEXT: adcl $0, %edi
+; X32-NEXT: movl %esi, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: mull %ecx
+; X32-NEXT: movl %edx, %esi
+; X32-NEXT: addl %ebx, %eax
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl %edi, %esi
+; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
+; X32-NEXT: movl %ebp, %eax
+; X32-NEXT: mull %ecx
+; X32-NEXT: movl %eax, %ebx
+; X32-NEXT: addl %esi, %ebx
+; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
+; X32-NEXT: adcl %eax, %edx
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
; X32-NEXT: movl %edi, %eax
-; X32-NEXT: movl -168(%ebp), %esi # 4-byte Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; X32-NEXT: mull %esi
; X32-NEXT: movl %edx, %ecx
-; X32-NEXT: movl %eax, -648(%ebp) # 4-byte Spill
-; X32-NEXT: movl -124(%ebp), %eax # 4-byte Reload
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %ebp, %eax
; X32-NEXT: mull %esi
; X32-NEXT: movl %edx, %esi
-; X32-NEXT: movl %eax, %ebx
-; X32-NEXT: addl %ecx, %ebx
+; X32-NEXT: movl %eax, %ebp
+; X32-NEXT: addl %ecx, %ebp
; X32-NEXT: adcl $0, %esi
; X32-NEXT: movl %edi, %eax
-; X32-NEXT: movl -92(%ebp), %edi # 4-byte Reload
-; X32-NEXT: mull %edi
-; X32-NEXT: movl %edx, %ecx
-; X32-NEXT: addl %ebx, %eax
-; X32-NEXT: movl %eax, -644(%ebp) # 4-byte Spill
-; X32-NEXT: adcl %esi, %ecx
-; X32-NEXT: setb -536(%ebp) # 1-byte Folded Spill
-; X32-NEXT: movl -124(%ebp), %ebx # 4-byte Reload
-; X32-NEXT: movl %ebx, %eax
-; X32-NEXT: mull %edi
-; X32-NEXT: addl %ecx, %eax
-; X32-NEXT: movzbl -536(%ebp), %ecx # 1-byte Folded Reload
-; X32-NEXT: adcl %ecx, %edx
-; X32-NEXT: addl -344(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: adcl -452(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: addl -32(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl %eax, -452(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -228(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: movl %edx, -32(%ebp) # 4-byte Spill
-; X32-NEXT: adcl $0, -24(%ebp) # 4-byte Folded Spill
-; X32-NEXT: adcl $0, -52(%ebp) # 4-byte Folded Spill
-; X32-NEXT: movl -260(%ebp), %esi # 4-byte Reload
-; X32-NEXT: movl %esi, %eax
-; X32-NEXT: movl -108(%ebp), %ecx # 4-byte Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X32-NEXT: mull %ecx
; X32-NEXT: movl %edx, %edi
-; X32-NEXT: movl %eax, -536(%ebp) # 4-byte Spill
-; X32-NEXT: movl %ebx, %eax
+; X32-NEXT: addl %ebp, %eax
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl %esi, %edi
+; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X32-NEXT: mull %ecx
-; X32-NEXT: movl %edx, %ebx
-; X32-NEXT: movl %eax, %ecx
-; X32-NEXT: addl %edi, %ecx
-; X32-NEXT: adcl $0, %ebx
-; X32-NEXT: movl %esi, %eax
-; X32-NEXT: movl -96(%ebp), %edi # 4-byte Reload
-; X32-NEXT: mull %edi
-; X32-NEXT: movl %edx, %esi
-; X32-NEXT: addl %ecx, %eax
-; X32-NEXT: movl %eax, -596(%ebp) # 4-byte Spill
-; X32-NEXT: adcl %ebx, %esi
-; X32-NEXT: setb %cl
-; X32-NEXT: movl -124(%ebp), %eax # 4-byte Reload
-; X32-NEXT: mull %edi
-; X32-NEXT: addl %esi, %eax
-; X32-NEXT: movzbl %cl, %ecx
+; X32-NEXT: addl %edi, %eax
+; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 1-byte Folded Reload
; X32-NEXT: adcl %ecx, %edx
-; X32-NEXT: movl -136(%ebp), %edi # 4-byte Reload
-; X32-NEXT: addl -104(%ebp), %edi # 4-byte Folded Reload
-; X32-NEXT: movl -264(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: adcl -156(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: addl %eax, %edi
-; X32-NEXT: adcl %edx, %ecx
-; X32-NEXT: movl -452(%ebp), %eax # 4-byte Reload
-; X32-NEXT: addl %eax, -536(%ebp) # 4-byte Folded Spill
-; X32-NEXT: movl -32(%ebp), %eax # 4-byte Reload
-; X32-NEXT: adcl %eax, -596(%ebp) # 4-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X32-NEXT: addl %eax, %ebp
+; X32-NEXT: adcl %edx, %edi
+; X32-NEXT: addl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: adcl $0, %ebp
; X32-NEXT: adcl $0, %edi
-; X32-NEXT: adcl $0, %ecx
-; X32-NEXT: addl -24(%ebp), %edi # 4-byte Folded Reload
-; X32-NEXT: adcl -52(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: setb -228(%ebp) # 1-byte Folded Spill
-; X32-NEXT: movl -184(%ebp), %eax # 4-byte Reload
-; X32-NEXT: movl -108(%ebp), %esi # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X32-NEXT: movl %ebx, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; X32-NEXT: mull %esi
-; X32-NEXT: movl %edx, -24(%ebp) # 4-byte Spill
-; X32-NEXT: movl %eax, -52(%ebp) # 4-byte Spill
-; X32-NEXT: movl -60(%ebp), %eax # 4-byte Reload
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X32-NEXT: mull %esi
-; X32-NEXT: movl %eax, %ebx
-; X32-NEXT: addl -24(%ebp), %ebx # 4-byte Folded Reload
+; X32-NEXT: movl %eax, %ecx
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
; X32-NEXT: adcl $0, %edx
-; X32-NEXT: movl %edx, -32(%ebp) # 4-byte Spill
-; X32-NEXT: movl -184(%ebp), %eax # 4-byte Reload
-; X32-NEXT: movl -96(%ebp), %esi # 4-byte Reload
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %ebx, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; X32-NEXT: mull %esi
-; X32-NEXT: addl %ebx, %eax
-; X32-NEXT: movl %eax, -24(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -32(%ebp), %edx # 4-byte Folded Reload
; X32-NEXT: movl %edx, %ebx
-; X32-NEXT: setb -32(%ebp) # 1-byte Folded Spill
-; X32-NEXT: movl -60(%ebp), %eax # 4-byte Reload
+; X32-NEXT: addl %ecx, %eax
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
+; X32-NEXT: setb %cl
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X32-NEXT: mull %esi
; X32-NEXT: addl %ebx, %eax
-; X32-NEXT: movzbl -32(%ebp), %esi # 1-byte Folded Reload
+; X32-NEXT: movzbl %cl, %esi
; X32-NEXT: adcl %esi, %edx
-; X32-NEXT: movl -160(%ebp), %ebx # 4-byte Reload
-; X32-NEXT: addl -104(%ebp), %ebx # 4-byte Folded Reload
-; X32-NEXT: movl -268(%ebp), %esi # 4-byte Reload
-; X32-NEXT: adcl -156(%ebp), %esi # 4-byte Folded Reload
-; X32-NEXT: addl %eax, %ebx
-; X32-NEXT: adcl %edx, %esi
-; X32-NEXT: movl %esi, -32(%ebp) # 4-byte Spill
-; X32-NEXT: movl -52(%ebp), %edx # 4-byte Reload
-; X32-NEXT: addl %edi, %edx
-; X32-NEXT: movl -24(%ebp), %esi # 4-byte Reload
-; X32-NEXT: adcl %ecx, %esi
-; X32-NEXT: movzbl -228(%ebp), %eax # 1-byte Folded Reload
-; X32-NEXT: adcl %eax, %ebx
-; X32-NEXT: movl -32(%ebp), %eax # 4-byte Reload
-; X32-NEXT: adcl $0, %eax
-; X32-NEXT: addl -344(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: adcl -404(%ebp), %esi # 4-byte Folded Reload
-; X32-NEXT: adcl -532(%ebp), %ebx # 4-byte Folded Reload
-; X32-NEXT: adcl -592(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: addl -572(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: movl %edx, -52(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -448(%ebp), %esi # 4-byte Folded Reload
-; X32-NEXT: movl %esi, -24(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -196(%ebp), %ebx # 4-byte Folded Reload
-; X32-NEXT: movl %ebx, -572(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -428(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl %eax, -32(%ebp) # 4-byte Spill
-; X32-NEXT: adcl $0, -20(%ebp) # 4-byte Folded Spill
-; X32-NEXT: adcl $0, -44(%ebp) # 4-byte Folded Spill
-; X32-NEXT: adcl $0, -464(%ebp) # 4-byte Folded Spill
-; X32-NEXT: adcl $0, -68(%ebp) # 4-byte Folded Spill
-; X32-NEXT: movl -184(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: movl %ecx, %eax
-; X32-NEXT: movl -212(%ebp), %esi # 4-byte Reload
-; X32-NEXT: mull %esi
-; X32-NEXT: movl %edx, -228(%ebp) # 4-byte Spill
-; X32-NEXT: movl %eax, -428(%ebp) # 4-byte Spill
-; X32-NEXT: movl -60(%ebp), %edi # 4-byte Reload
-; X32-NEXT: movl %edi, %eax
-; X32-NEXT: mull %esi
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
+; X32-NEXT: addl %eax, %esi
+; X32-NEXT: adcl %edx, %ebx
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: addl %ebp, %ecx
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; X32-NEXT: adcl %edi, %edx
+; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
+; X32-NEXT: adcl %eax, %esi
+; X32-NEXT: adcl $0, %ebx
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
+; X32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; X32-NEXT: movl %ebp, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: mull %ecx
+; X32-NEXT: movl %edx, %esi
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X32-NEXT: movl %ebx, %eax
+; X32-NEXT: mull %ecx
+; X32-NEXT: movl %edx, %edi
+; X32-NEXT: movl %eax, %ecx
+; X32-NEXT: addl %esi, %ecx
+; X32-NEXT: adcl $0, %edi
+; X32-NEXT: movl %ebp, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; X32-NEXT: mull %ebp
; X32-NEXT: movl %edx, %esi
-; X32-NEXT: movl %eax, %ebx
-; X32-NEXT: addl -228(%ebp), %ebx # 4-byte Folded Reload
-; X32-NEXT: adcl $0, %esi
-; X32-NEXT: movl %ecx, %eax
-; X32-NEXT: mull -252(%ebp) # 4-byte Folded Reload
-; X32-NEXT: movl %edx, %ecx
-; X32-NEXT: addl %ebx, %eax
-; X32-NEXT: movl %eax, -452(%ebp) # 4-byte Spill
-; X32-NEXT: adcl %esi, %ecx
-; X32-NEXT: setb %bl
-; X32-NEXT: movl %edi, %eax
-; X32-NEXT: movl -252(%ebp), %edi # 4-byte Reload
-; X32-NEXT: mull %edi
; X32-NEXT: addl %ecx, %eax
-; X32-NEXT: movzbl %bl, %ecx
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl %edi, %esi
+; X32-NEXT: setb %cl
+; X32-NEXT: movl %ebx, %eax
+; X32-NEXT: mull %ebp
+; X32-NEXT: addl %esi, %eax
+; X32-NEXT: movzbl %cl, %ecx
; X32-NEXT: adcl %ecx, %edx
-; X32-NEXT: movl -160(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: addl -116(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: movl -268(%ebp), %esi # 4-byte Reload
-; X32-NEXT: adcl -84(%ebp), %esi # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
; X32-NEXT: addl %eax, %ecx
-; X32-NEXT: movl %ecx, -196(%ebp) # 4-byte Spill
+; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: adcl %edx, %esi
-; X32-NEXT: movl %esi, -228(%ebp) # 4-byte Spill
-; X32-NEXT: movl -260(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: movl %ecx, %eax
-; X32-NEXT: movl -212(%ebp), %esi # 4-byte Reload
+; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X32-NEXT: movl %ebx, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; X32-NEXT: mull %esi
-; X32-NEXT: movl %edx, -532(%ebp) # 4-byte Spill
-; X32-NEXT: movl %eax, -592(%ebp) # 4-byte Spill
-; X32-NEXT: movl -124(%ebp), %eax # 4-byte Reload
+; X32-NEXT: movl %edx, %ecx
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; X32-NEXT: movl %ebp, %eax
; X32-NEXT: mull %esi
; X32-NEXT: movl %edx, %esi
-; X32-NEXT: movl %eax, %ebx
-; X32-NEXT: addl -532(%ebp), %ebx # 4-byte Folded Reload
+; X32-NEXT: movl %eax, %edi
+; X32-NEXT: addl %ecx, %edi
; X32-NEXT: adcl $0, %esi
-; X32-NEXT: movl %ecx, %eax
-; X32-NEXT: mull %edi
+; X32-NEXT: movl %ebx, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X32-NEXT: mull %ebx
; X32-NEXT: movl %edx, %ecx
-; X32-NEXT: addl %ebx, %eax
-; X32-NEXT: movl %eax, -532(%ebp) # 4-byte Spill
+; X32-NEXT: addl %edi, %eax
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: adcl %esi, %ecx
-; X32-NEXT: setb %bl
-; X32-NEXT: movl -124(%ebp), %esi # 4-byte Reload
-; X32-NEXT: movl %esi, %eax
-; X32-NEXT: mull %edi
-; X32-NEXT: addl %ecx, %eax
-; X32-NEXT: movzbl %bl, %ecx
-; X32-NEXT: adcl %ecx, %edx
-; X32-NEXT: addl -368(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: adcl -328(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: addl -428(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl %eax, -448(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -452(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: movl %edx, -328(%ebp) # 4-byte Spill
-; X32-NEXT: adcl $0, -196(%ebp) # 4-byte Folded Spill
-; X32-NEXT: adcl $0, -228(%ebp) # 4-byte Folded Spill
-; X32-NEXT: movl -260(%ebp), %edi # 4-byte Reload
-; X32-NEXT: movl %edi, %eax
-; X32-NEXT: movl -284(%ebp), %ecx # 4-byte Reload
+; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
+; X32-NEXT: movl %ebp, %eax
+; X32-NEXT: movl %ebp, %esi
+; X32-NEXT: mull %ebx
+; X32-NEXT: movl %eax, %edi
+; X32-NEXT: addl %ecx, %edi
+; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
+; X32-NEXT: adcl %eax, %edx
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X32-NEXT: movl %ebx, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X32-NEXT: mull %ecx
-; X32-NEXT: movl %edx, %ebx
-; X32-NEXT: movl %eax, -428(%ebp) # 4-byte Spill
+; X32-NEXT: movl %edx, %ebp
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: movl %esi, %eax
; X32-NEXT: mull %ecx
; X32-NEXT: movl %edx, %esi
; X32-NEXT: movl %eax, %ecx
-; X32-NEXT: addl %ebx, %ecx
+; X32-NEXT: addl %ebp, %ecx
; X32-NEXT: adcl $0, %esi
-; X32-NEXT: movl %edi, %eax
-; X32-NEXT: movl -48(%ebp), %edi # 4-byte Reload
-; X32-NEXT: mull %edi
-; X32-NEXT: movl %edx, %ebx
+; X32-NEXT: movl %ebx, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X32-NEXT: mull %ebx
+; X32-NEXT: movl %edx, %ebp
; X32-NEXT: addl %ecx, %eax
-; X32-NEXT: movl %eax, -452(%ebp) # 4-byte Spill
-; X32-NEXT: adcl %esi, %ebx
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl %esi, %ebp
; X32-NEXT: setb %cl
-; X32-NEXT: movl -124(%ebp), %eax # 4-byte Reload
-; X32-NEXT: mull %edi
-; X32-NEXT: addl %ebx, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: mull %ebx
+; X32-NEXT: addl %ebp, %eax
; X32-NEXT: movzbl %cl, %ecx
; X32-NEXT: adcl %ecx, %edx
-; X32-NEXT: movl -136(%ebp), %ebx # 4-byte Reload
-; X32-NEXT: addl -308(%ebp), %ebx # 4-byte Folded Reload
-; X32-NEXT: movl -264(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: adcl -208(%ebp), %ecx # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
; X32-NEXT: addl %eax, %ebx
; X32-NEXT: adcl %edx, %ecx
-; X32-NEXT: movl -448(%ebp), %eax # 4-byte Reload
-; X32-NEXT: addl %eax, -428(%ebp) # 4-byte Folded Spill
-; X32-NEXT: movl -328(%ebp), %eax # 4-byte Reload
-; X32-NEXT: adcl %eax, -452(%ebp) # 4-byte Folded Spill
+; X32-NEXT: addl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
; X32-NEXT: adcl $0, %ebx
; X32-NEXT: adcl $0, %ecx
-; X32-NEXT: addl -196(%ebp), %ebx # 4-byte Folded Reload
-; X32-NEXT: adcl -228(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: setb -448(%ebp) # 1-byte Folded Spill
-; X32-NEXT: movl -184(%ebp), %eax # 4-byte Reload
-; X32-NEXT: movl -284(%ebp), %esi # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; X32-NEXT: movl %edi, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; X32-NEXT: mull %esi
-; X32-NEXT: movl %edx, -196(%ebp) # 4-byte Spill
-; X32-NEXT: movl %eax, -228(%ebp) # 4-byte Spill
-; X32-NEXT: movl -60(%ebp), %eax # 4-byte Reload
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X32-NEXT: mull %esi
-; X32-NEXT: movl %eax, %edi
-; X32-NEXT: addl -196(%ebp), %edi # 4-byte Folded Reload
+; X32-NEXT: movl %eax, %ebp
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
; X32-NEXT: adcl $0, %edx
-; X32-NEXT: movl %edx, -328(%ebp) # 4-byte Spill
-; X32-NEXT: movl -184(%ebp), %eax # 4-byte Reload
-; X32-NEXT: movl -48(%ebp), %esi # 4-byte Reload
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %edi, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; X32-NEXT: mull %esi
-; X32-NEXT: addl %edi, %eax
-; X32-NEXT: movl %eax, -196(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -328(%ebp), %edx # 4-byte Folded Reload
; X32-NEXT: movl %edx, %edi
-; X32-NEXT: setb -328(%ebp) # 1-byte Folded Spill
-; X32-NEXT: movl -60(%ebp), %eax # 4-byte Reload
+; X32-NEXT: addl %ebp, %eax
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X32-NEXT: mull %esi
; X32-NEXT: addl %edi, %eax
-; X32-NEXT: movzbl -328(%ebp), %esi # 1-byte Folded Reload
+; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 1-byte Folded Reload
; X32-NEXT: adcl %esi, %edx
-; X32-NEXT: movl -160(%ebp), %edi # 4-byte Reload
-; X32-NEXT: addl -308(%ebp), %edi # 4-byte Folded Reload
-; X32-NEXT: movl -268(%ebp), %esi # 4-byte Reload
-; X32-NEXT: adcl -208(%ebp), %esi # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
; X32-NEXT: addl %eax, %edi
-; X32-NEXT: adcl %edx, %esi
-; X32-NEXT: movl %esi, -328(%ebp) # 4-byte Spill
-; X32-NEXT: movl -228(%ebp), %edx # 4-byte Reload
+; X32-NEXT: adcl %edx, %ebp
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
; X32-NEXT: addl %ebx, %edx
-; X32-NEXT: movl -196(%ebp), %esi # 4-byte Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; X32-NEXT: adcl %ecx, %esi
-; X32-NEXT: movzbl -448(%ebp), %eax # 1-byte Folded Reload
+; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
; X32-NEXT: adcl %eax, %edi
-; X32-NEXT: movl -328(%ebp), %eax # 4-byte Reload
-; X32-NEXT: adcl $0, %eax
-; X32-NEXT: addl -368(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: adcl -620(%ebp), %esi # 4-byte Folded Reload
-; X32-NEXT: adcl -788(%ebp), %edi # 4-byte Folded Reload
-; X32-NEXT: adcl -784(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl -52(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: addl %ecx, -592(%ebp) # 4-byte Folded Spill
-; X32-NEXT: movl -24(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: adcl %ecx, -532(%ebp) # 4-byte Folded Spill
-; X32-NEXT: movl -572(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: adcl %ecx, -428(%ebp) # 4-byte Folded Spill
-; X32-NEXT: movl -32(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: adcl %ecx, -452(%ebp) # 4-byte Folded Spill
+; X32-NEXT: adcl $0, %ebp
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: addl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
; X32-NEXT: adcl $0, %edx
; X32-NEXT: adcl $0, %esi
; X32-NEXT: adcl $0, %edi
-; X32-NEXT: adcl $0, %eax
-; X32-NEXT: addl -20(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: movl %edx, -228(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -44(%ebp), %esi # 4-byte Folded Reload
-; X32-NEXT: movl %esi, -196(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -464(%ebp), %edi # 4-byte Folded Reload
-; X32-NEXT: movl %edi, -620(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -68(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl %eax, -328(%ebp) # 4-byte Spill
-; X32-NEXT: setb -464(%ebp) # 1-byte Folded Spill
-; X32-NEXT: movl -288(%ebp), %ebx # 4-byte Reload
+; X32-NEXT: adcl $0, %ebp
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; X32-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
; X32-NEXT: movl %ebx, %eax
-; X32-NEXT: movl -212(%ebp), %esi # 4-byte Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; X32-NEXT: mull %esi
; X32-NEXT: movl %edx, %ecx
-; X32-NEXT: movl %eax, -20(%ebp) # 4-byte Spill
-; X32-NEXT: movl -16(%ebp), %eax # 4-byte Reload
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; X32-NEXT: movl %edi, %eax
; X32-NEXT: mull %esi
-; X32-NEXT: movl %edx, %esi
-; X32-NEXT: movl %eax, %edi
-; X32-NEXT: addl %ecx, %edi
-; X32-NEXT: adcl $0, %esi
+; X32-NEXT: movl %edx, %ebp
+; X32-NEXT: movl %eax, %esi
+; X32-NEXT: addl %ecx, %esi
+; X32-NEXT: adcl $0, %ebp
; X32-NEXT: movl %ebx, %eax
-; X32-NEXT: movl -252(%ebp), %ebx # 4-byte Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
; X32-NEXT: mull %ebx
; X32-NEXT: movl %edx, %ecx
-; X32-NEXT: addl %edi, %eax
-; X32-NEXT: movl %eax, -24(%ebp) # 4-byte Spill
-; X32-NEXT: adcl %esi, %ecx
-; X32-NEXT: setb -44(%ebp) # 1-byte Folded Spill
-; X32-NEXT: movl -16(%ebp), %eax # 4-byte Reload
+; X32-NEXT: addl %esi, %eax
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl %ebp, %ecx
+; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
+; X32-NEXT: movl %edi, %eax
; X32-NEXT: mull %ebx
; X32-NEXT: addl %ecx, %eax
-; X32-NEXT: movzbl -44(%ebp), %ecx # 1-byte Folded Reload
+; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 1-byte Folded Reload
; X32-NEXT: adcl %ecx, %edx
-; X32-NEXT: movl -280(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: addl -116(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: movl -312(%ebp), %esi # 4-byte Reload
-; X32-NEXT: adcl -84(%ebp), %esi # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
; X32-NEXT: addl %eax, %ecx
-; X32-NEXT: movl %ecx, -52(%ebp) # 4-byte Spill
+; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: adcl %edx, %esi
-; X32-NEXT: movl %esi, -44(%ebp) # 4-byte Spill
-; X32-NEXT: movl -348(%ebp), %ebx # 4-byte Reload
+; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
; X32-NEXT: movl %ebx, %eax
-; X32-NEXT: movl -212(%ebp), %esi # 4-byte Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; X32-NEXT: mull %esi
; X32-NEXT: movl %edx, %ecx
-; X32-NEXT: movl %eax, -32(%ebp) # 4-byte Spill
-; X32-NEXT: movl -216(%ebp), %eax # 4-byte Reload
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; X32-NEXT: movl %edi, %eax
; X32-NEXT: mull %esi
; X32-NEXT: movl %edx, %esi
-; X32-NEXT: movl %eax, %edi
-; X32-NEXT: addl %ecx, %edi
+; X32-NEXT: movl %eax, %ebp
+; X32-NEXT: addl %ecx, %ebp
; X32-NEXT: adcl $0, %esi
; X32-NEXT: movl %ebx, %eax
-; X32-NEXT: movl -252(%ebp), %ebx # 4-byte Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
; X32-NEXT: mull %ebx
; X32-NEXT: movl %edx, %ecx
-; X32-NEXT: addl %edi, %eax
-; X32-NEXT: movl %eax, -68(%ebp) # 4-byte Spill
+; X32-NEXT: addl %ebp, %eax
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: adcl %esi, %ecx
-; X32-NEXT: setb -368(%ebp) # 1-byte Folded Spill
-; X32-NEXT: movl -216(%ebp), %esi # 4-byte Reload
+; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
+; X32-NEXT: movl %edi, %esi
; X32-NEXT: movl %esi, %eax
; X32-NEXT: mull %ebx
-; X32-NEXT: addl %ecx, %eax
-; X32-NEXT: movzbl -368(%ebp), %ecx # 1-byte Folded Reload
-; X32-NEXT: adcl %ecx, %edx
-; X32-NEXT: addl -540(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: adcl -576(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: addl -20(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl %eax, -576(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -24(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: movl %edx, -24(%ebp) # 4-byte Spill
-; X32-NEXT: adcl $0, -52(%ebp) # 4-byte Folded Spill
-; X32-NEXT: adcl $0, -44(%ebp) # 4-byte Folded Spill
-; X32-NEXT: movl -348(%ebp), %ebx # 4-byte Reload
-; X32-NEXT: movl %ebx, %eax
-; X32-NEXT: movl -284(%ebp), %ecx # 4-byte Reload
+; X32-NEXT: movl %eax, %ebp
+; X32-NEXT: addl %ecx, %ebp
+; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
+; X32-NEXT: adcl %eax, %edx
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; X32-NEXT: movl %edi, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X32-NEXT: mull %ecx
-; X32-NEXT: movl %edx, -368(%ebp) # 4-byte Spill
-; X32-NEXT: movl %eax, -20(%ebp) # 4-byte Spill
+; X32-NEXT: movl %edx, %ebx
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: movl %esi, %eax
; X32-NEXT: mull %ecx
-; X32-NEXT: movl %edx, %edi
-; X32-NEXT: movl %eax, %ecx
-; X32-NEXT: addl -368(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: adcl $0, %edi
-; X32-NEXT: movl %ebx, %eax
-; X32-NEXT: movl -48(%ebp), %ebx # 4-byte Reload
-; X32-NEXT: mull %ebx
; X32-NEXT: movl %edx, %esi
+; X32-NEXT: movl %eax, %ecx
+; X32-NEXT: addl %ebx, %ecx
+; X32-NEXT: adcl $0, %esi
+; X32-NEXT: movl %edi, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; X32-NEXT: mull %edi
+; X32-NEXT: movl %edx, %ebx
; X32-NEXT: addl %ecx, %eax
-; X32-NEXT: movl %eax, -368(%ebp) # 4-byte Spill
-; X32-NEXT: adcl %edi, %esi
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl %esi, %ebx
; X32-NEXT: setb %cl
-; X32-NEXT: movl -216(%ebp), %eax # 4-byte Reload
-; X32-NEXT: mull %ebx
-; X32-NEXT: addl %esi, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: mull %edi
+; X32-NEXT: addl %ebx, %eax
; X32-NEXT: movzbl %cl, %ecx
; X32-NEXT: adcl %ecx, %edx
-; X32-NEXT: movl -180(%ebp), %ebx # 4-byte Reload
-; X32-NEXT: addl -308(%ebp), %ebx # 4-byte Folded Reload
-; X32-NEXT: movl -320(%ebp), %edi # 4-byte Reload
-; X32-NEXT: adcl -208(%ebp), %edi # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
; X32-NEXT: addl %eax, %ebx
-; X32-NEXT: adcl %edx, %edi
-; X32-NEXT: movl -576(%ebp), %eax # 4-byte Reload
-; X32-NEXT: addl %eax, -20(%ebp) # 4-byte Folded Spill
-; X32-NEXT: movl -24(%ebp), %eax # 4-byte Reload
-; X32-NEXT: adcl %eax, -368(%ebp) # 4-byte Folded Spill
+; X32-NEXT: adcl %edx, %ecx
+; X32-NEXT: addl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
; X32-NEXT: adcl $0, %ebx
-; X32-NEXT: adcl $0, %edi
-; X32-NEXT: addl -52(%ebp), %ebx # 4-byte Folded Reload
-; X32-NEXT: adcl -44(%ebp), %edi # 4-byte Folded Reload
-; X32-NEXT: setb -576(%ebp) # 1-byte Folded Spill
-; X32-NEXT: movl -288(%ebp), %eax # 4-byte Reload
-; X32-NEXT: movl -284(%ebp), %esi # 4-byte Reload
+; X32-NEXT: adcl $0, %ecx
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; X32-NEXT: movl %ebp, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; X32-NEXT: mull %esi
-; X32-NEXT: movl %edx, -52(%ebp) # 4-byte Spill
-; X32-NEXT: movl %eax, -44(%ebp) # 4-byte Spill
-; X32-NEXT: movl -16(%ebp), %eax # 4-byte Reload
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X32-NEXT: mull %esi
-; X32-NEXT: movl %eax, %ecx
-; X32-NEXT: addl -52(%ebp), %ecx # 4-byte Folded Reload
+; X32-NEXT: movl %eax, %edi
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
; X32-NEXT: adcl $0, %edx
-; X32-NEXT: movl %edx, -24(%ebp) # 4-byte Spill
-; X32-NEXT: movl -288(%ebp), %eax # 4-byte Reload
-; X32-NEXT: movl -48(%ebp), %esi # 4-byte Reload
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %ebp, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; X32-NEXT: mull %esi
-; X32-NEXT: addl %ecx, %eax
-; X32-NEXT: movl %eax, -52(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -24(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: movl %edx, %ecx
-; X32-NEXT: setb -24(%ebp) # 1-byte Folded Spill
-; X32-NEXT: movl -16(%ebp), %eax # 4-byte Reload
+; X32-NEXT: movl %edx, %ebp
+; X32-NEXT: addl %edi, %eax
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X32-NEXT: mull %esi
-; X32-NEXT: addl %ecx, %eax
-; X32-NEXT: movzbl -24(%ebp), %esi # 1-byte Folded Reload
+; X32-NEXT: addl %ebp, %eax
+; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 1-byte Folded Reload
; X32-NEXT: adcl %esi, %edx
-; X32-NEXT: movl -280(%ebp), %esi # 4-byte Reload
-; X32-NEXT: addl -308(%ebp), %esi # 4-byte Folded Reload
-; X32-NEXT: movl -312(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: adcl -208(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: addl %eax, %esi
-; X32-NEXT: movl %esi, -24(%ebp) # 4-byte Spill
-; X32-NEXT: adcl %edx, %ecx
-; X32-NEXT: movl -44(%ebp), %edx # 4-byte Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; X32-NEXT: addl %eax, %edi
+; X32-NEXT: adcl %edx, %ebp
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
; X32-NEXT: addl %ebx, %edx
-; X32-NEXT: movl -52(%ebp), %esi # 4-byte Reload
-; X32-NEXT: adcl %edi, %esi
-; X32-NEXT: movzbl -576(%ebp), %eax # 1-byte Folded Reload
-; X32-NEXT: movl -24(%ebp), %edi # 4-byte Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: adcl %ecx, %esi
+; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
; X32-NEXT: adcl %eax, %edi
-; X32-NEXT: adcl $0, %ecx
-; X32-NEXT: addl -540(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: movl %edx, -44(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -800(%ebp), %esi # 4-byte Folded Reload
-; X32-NEXT: movl %esi, -52(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -796(%ebp), %edi # 4-byte Folded Reload
-; X32-NEXT: movl %edi, -24(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -792(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: movl %ecx, %edi
-; X32-NEXT: movl -32(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: addl -228(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: movl -68(%ebp), %edx # 4-byte Reload
-; X32-NEXT: adcl -196(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: movl -20(%ebp), %esi # 4-byte Reload
-; X32-NEXT: adcl -620(%ebp), %esi # 4-byte Folded Reload
-; X32-NEXT: movl -368(%ebp), %ebx # 4-byte Reload
-; X32-NEXT: adcl -328(%ebp), %ebx # 4-byte Folded Reload
-; X32-NEXT: movzbl -464(%ebp), %eax # 1-byte Folded Reload
-; X32-NEXT: adcl %eax, -44(%ebp) # 4-byte Folded Spill
-; X32-NEXT: adcl $0, -52(%ebp) # 4-byte Folded Spill
-; X32-NEXT: adcl $0, -24(%ebp) # 4-byte Folded Spill
-; X32-NEXT: adcl $0, %edi
-; X32-NEXT: addl -344(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: movl %ecx, -32(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -404(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: movl %edx, -68(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -72(%ebp), %esi # 4-byte Folded Reload
-; X32-NEXT: movl %esi, -20(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -76(%ebp), %ebx # 4-byte Folded Reload
-; X32-NEXT: movl -44(%ebp), %eax # 4-byte Reload
-; X32-NEXT: adcl -232(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl -52(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: adcl -164(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: movl -24(%ebp), %edx # 4-byte Reload
-; X32-NEXT: adcl -40(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: adcl -56(%ebp), %edi # 4-byte Folded Reload
-; X32-NEXT: movl -32(%ebp), %esi # 4-byte Reload
-; X32-NEXT: addl -616(%ebp), %esi # 4-byte Folded Reload
-; X32-NEXT: movl %esi, -32(%ebp) # 4-byte Spill
-; X32-NEXT: movl -68(%ebp), %esi # 4-byte Reload
-; X32-NEXT: adcl -612(%ebp), %esi # 4-byte Folded Reload
-; X32-NEXT: movl %esi, -68(%ebp) # 4-byte Spill
-; X32-NEXT: movl -20(%ebp), %esi # 4-byte Reload
-; X32-NEXT: adcl -424(%ebp), %esi # 4-byte Folded Reload
-; X32-NEXT: movl %esi, -20(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -420(%ebp), %ebx # 4-byte Folded Reload
-; X32-NEXT: movl %ebx, -368(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -508(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl %eax, -44(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -504(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: movl %ecx, -52(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -152(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: movl %edx, -24(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -64(%ebp), %edi # 4-byte Folded Reload
-; X32-NEXT: movl %edi, -464(%ebp) # 4-byte Spill
-; X32-NEXT: adcl $0, -292(%ebp) # 4-byte Folded Spill
-; X32-NEXT: adcl $0, -372(%ebp) # 4-byte Folded Spill
-; X32-NEXT: adcl $0, -88(%ebp) # 4-byte Folded Spill
-; X32-NEXT: adcl $0, -296(%ebp) # 4-byte Folded Spill
-; X32-NEXT: adcl $0, -272(%ebp) # 4-byte Folded Spill
-; X32-NEXT: adcl $0, -36(%ebp) # 4-byte Folded Spill
-; X32-NEXT: adcl $0, -332(%ebp) # 4-byte Folded Spill
-; X32-NEXT: adcl $0, -80(%ebp) # 4-byte Folded Spill
-; X32-NEXT: movl -288(%ebp), %esi # 4-byte Reload
-; X32-NEXT: movl %esi, %eax
-; X32-NEXT: movl -188(%ebp), %edi # 4-byte Reload
+; X32-NEXT: adcl $0, %ebp
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X32-NEXT: movl %edx, %ebx
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; X32-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
+; X32-NEXT: adcl %eax, %ebx
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: adcl $0, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; X32-NEXT: adcl $0, %ebp
+; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
+; X32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; X32-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl (%esp), %eax # 4-byte Folded Reload
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: movl %ecx, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
; X32-NEXT: mull %edi
-; X32-NEXT: movl %edx, %ecx
-; X32-NEXT: movl %eax, -164(%ebp) # 4-byte Spill
-; X32-NEXT: movl -16(%ebp), %eax # 4-byte Reload
+; X32-NEXT: movl %edx, %ebp
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: movl %esi, %eax
; X32-NEXT: mull %edi
; X32-NEXT: movl %edx, %edi
; X32-NEXT: movl %eax, %ebx
-; X32-NEXT: addl %ecx, %ebx
+; X32-NEXT: addl %ebp, %ebx
; X32-NEXT: adcl $0, %edi
-; X32-NEXT: movl %esi, %eax
-; X32-NEXT: movl -148(%ebp), %esi # 4-byte Reload
-; X32-NEXT: mull %esi
-; X32-NEXT: movl %edx, %ecx
+; X32-NEXT: movl %ecx, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: mull %ecx
+; X32-NEXT: movl %edx, %ebp
; X32-NEXT: addl %ebx, %eax
-; X32-NEXT: movl %eax, -76(%ebp) # 4-byte Spill
-; X32-NEXT: adcl %edi, %ecx
+; X32-NEXT: movl %eax, (%esp) # 4-byte Spill
+; X32-NEXT: adcl %edi, %ebp
; X32-NEXT: setb %bl
-; X32-NEXT: movl -16(%ebp), %eax # 4-byte Reload
-; X32-NEXT: mull %esi
-; X32-NEXT: addl %ecx, %eax
+; X32-NEXT: movl %esi, %eax
+; X32-NEXT: mull %ecx
+; X32-NEXT: addl %ebp, %eax
; X32-NEXT: movzbl %bl, %ecx
; X32-NEXT: adcl %ecx, %edx
-; X32-NEXT: movl -280(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: addl -100(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: movl -312(%ebp), %esi # 4-byte Reload
-; X32-NEXT: adcl -204(%ebp), %esi # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
; X32-NEXT: addl %eax, %ecx
-; X32-NEXT: movl %ecx, -56(%ebp) # 4-byte Spill
+; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: adcl %edx, %esi
-; X32-NEXT: movl %esi, -40(%ebp) # 4-byte Spill
-; X32-NEXT: movl -348(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: movl %ecx, %eax
-; X32-NEXT: movl -188(%ebp), %esi # 4-byte Reload
+; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: movl %esi, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: mull %ecx
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; X32-NEXT: movl %edi, %eax
+; X32-NEXT: mull %ecx
+; X32-NEXT: movl %edx, %ebx
+; X32-NEXT: movl %eax, %ebp
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; X32-NEXT: adcl $0, %ebx
+; X32-NEXT: movl %esi, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; X32-NEXT: mull %esi
-; X32-NEXT: movl %edx, -72(%ebp) # 4-byte Spill
-; X32-NEXT: movl %eax, -152(%ebp) # 4-byte Spill
-; X32-NEXT: movl -216(%ebp), %eax # 4-byte Reload
+; X32-NEXT: movl %edx, %ecx
+; X32-NEXT: addl %ebp, %eax
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl %ebx, %ecx
+; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
+; X32-NEXT: movl %edi, %ebp
+; X32-NEXT: movl %ebp, %eax
; X32-NEXT: mull %esi
-; X32-NEXT: movl %edx, %edi
; X32-NEXT: movl %eax, %ebx
-; X32-NEXT: addl -72(%ebp), %ebx # 4-byte Folded Reload
-; X32-NEXT: adcl $0, %edi
-; X32-NEXT: movl %ecx, %eax
-; X32-NEXT: movl -148(%ebp), %esi # 4-byte Reload
+; X32-NEXT: addl %ecx, %ebx
+; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
+; X32-NEXT: adcl %eax, %edx
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
+; X32-NEXT: adcl (%esp), %edx # 4-byte Folded Reload
+; X32-NEXT: movl %edx, (%esp) # 4-byte Spill
+; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; X32-NEXT: movl %edi, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; X32-NEXT: mull %esi
; X32-NEXT: movl %edx, %ecx
-; X32-NEXT: addl %ebx, %eax
-; X32-NEXT: movl %eax, -228(%ebp) # 4-byte Spill
-; X32-NEXT: adcl %edi, %ecx
-; X32-NEXT: setb %bl
-; X32-NEXT: movl -216(%ebp), %edi # 4-byte Reload
-; X32-NEXT: movl %edi, %eax
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %ebp, %eax
; X32-NEXT: mull %esi
-; X32-NEXT: addl %ecx, %eax
-; X32-NEXT: movzbl %bl, %ecx
-; X32-NEXT: adcl %ecx, %edx
-; X32-NEXT: addl -468(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: adcl -804(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: addl -164(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl %eax, -72(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -76(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: movl %edx, -76(%ebp) # 4-byte Spill
-; X32-NEXT: adcl $0, -56(%ebp) # 4-byte Folded Spill
-; X32-NEXT: adcl $0, -40(%ebp) # 4-byte Folded Spill
-; X32-NEXT: movl -348(%ebp), %esi # 4-byte Reload
-; X32-NEXT: movl %esi, %eax
-; X32-NEXT: movl -236(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: mull %ecx
-; X32-NEXT: movl %edx, %ebx
-; X32-NEXT: movl %eax, -164(%ebp) # 4-byte Spill
+; X32-NEXT: movl %edx, %esi
+; X32-NEXT: movl %eax, %ebp
+; X32-NEXT: addl %ecx, %ebp
+; X32-NEXT: adcl $0, %esi
; X32-NEXT: movl %edi, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X32-NEXT: mull %ecx
; X32-NEXT: movl %edx, %edi
-; X32-NEXT: movl %eax, %ecx
-; X32-NEXT: addl %ebx, %ecx
-; X32-NEXT: adcl $0, %edi
-; X32-NEXT: movl %esi, %eax
-; X32-NEXT: movl -112(%ebp), %esi # 4-byte Reload
-; X32-NEXT: mull %esi
-; X32-NEXT: movl %edx, %ebx
-; X32-NEXT: addl %ecx, %eax
-; X32-NEXT: movl %eax, -232(%ebp) # 4-byte Spill
-; X32-NEXT: adcl %edi, %ebx
-; X32-NEXT: setb %cl
-; X32-NEXT: movl -216(%ebp), %eax # 4-byte Reload
-; X32-NEXT: mull %esi
-; X32-NEXT: addl %ebx, %eax
-; X32-NEXT: movzbl %cl, %ecx
+; X32-NEXT: addl %ebp, %eax
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl %esi, %edi
+; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: mull %ecx
+; X32-NEXT: addl %edi, %eax
+; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 1-byte Folded Reload
; X32-NEXT: adcl %ecx, %edx
-; X32-NEXT: movl -180(%ebp), %ebx # 4-byte Reload
-; X32-NEXT: addl -304(%ebp), %ebx # 4-byte Folded Reload
-; X32-NEXT: movl -320(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: adcl -128(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: addl %eax, %ebx
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X32-NEXT: addl %eax, %ebp
; X32-NEXT: adcl %edx, %ecx
-; X32-NEXT: movl -72(%ebp), %eax # 4-byte Reload
-; X32-NEXT: addl %eax, -164(%ebp) # 4-byte Folded Spill
-; X32-NEXT: movl -76(%ebp), %eax # 4-byte Reload
-; X32-NEXT: adcl %eax, -232(%ebp) # 4-byte Folded Spill
-; X32-NEXT: adcl $0, %ebx
+; X32-NEXT: addl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: movl (%esp), %eax # 4-byte Reload
+; X32-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: adcl $0, %ebp
; X32-NEXT: adcl $0, %ecx
-; X32-NEXT: addl -56(%ebp), %ebx # 4-byte Folded Reload
-; X32-NEXT: adcl -40(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: setb -72(%ebp) # 1-byte Folded Spill
-; X32-NEXT: movl -288(%ebp), %eax # 4-byte Reload
-; X32-NEXT: movl -236(%ebp), %esi # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X32-NEXT: setb (%esp) # 1-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X32-NEXT: movl %ebx, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; X32-NEXT: mull %esi
-; X32-NEXT: movl %edx, -40(%ebp) # 4-byte Spill
-; X32-NEXT: movl %eax, -56(%ebp) # 4-byte Spill
-; X32-NEXT: movl -16(%ebp), %eax # 4-byte Reload
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X32-NEXT: mull %esi
; X32-NEXT: movl %eax, %edi
-; X32-NEXT: addl -40(%ebp), %edi # 4-byte Folded Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
; X32-NEXT: adcl $0, %edx
-; X32-NEXT: movl %edx, -40(%ebp) # 4-byte Spill
-; X32-NEXT: movl -288(%ebp), %eax # 4-byte Reload
-; X32-NEXT: movl -112(%ebp), %esi # 4-byte Reload
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %ebx, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; X32-NEXT: mull %esi
+; X32-NEXT: movl %edx, %ebx
; X32-NEXT: addl %edi, %eax
-; X32-NEXT: movl %eax, -76(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -40(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: movl %edx, %edi
-; X32-NEXT: setb -40(%ebp) # 1-byte Folded Spill
-; X32-NEXT: movl -16(%ebp), %eax # 4-byte Reload
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
+; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X32-NEXT: mull %esi
-; X32-NEXT: addl %edi, %eax
-; X32-NEXT: movzbl -40(%ebp), %esi # 1-byte Folded Reload
+; X32-NEXT: addl %ebx, %eax
+; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 1-byte Folded Reload
; X32-NEXT: adcl %esi, %edx
-; X32-NEXT: movl -280(%ebp), %edi # 4-byte Reload
-; X32-NEXT: addl -304(%ebp), %edi # 4-byte Folded Reload
-; X32-NEXT: movl -312(%ebp), %esi # 4-byte Reload
-; X32-NEXT: adcl -128(%ebp), %esi # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
; X32-NEXT: addl %eax, %edi
-; X32-NEXT: adcl %edx, %esi
-; X32-NEXT: movl %esi, -40(%ebp) # 4-byte Spill
-; X32-NEXT: movl -56(%ebp), %edx # 4-byte Reload
-; X32-NEXT: addl %ebx, %edx
-; X32-NEXT: movl -76(%ebp), %esi # 4-byte Reload
+; X32-NEXT: adcl %edx, %ebx
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; X32-NEXT: addl %ebp, %edx
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; X32-NEXT: adcl %ecx, %esi
-; X32-NEXT: movzbl -72(%ebp), %eax # 1-byte Folded Reload
+; X32-NEXT: movzbl (%esp), %eax # 1-byte Folded Reload
; X32-NEXT: adcl %eax, %edi
-; X32-NEXT: movl -40(%ebp), %eax # 4-byte Reload
-; X32-NEXT: adcl $0, %eax
-; X32-NEXT: addl -468(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: movl %edx, -56(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -816(%ebp), %esi # 4-byte Folded Reload
-; X32-NEXT: movl %esi, -76(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -812(%ebp), %edi # 4-byte Folded Reload
-; X32-NEXT: movl %edi, -344(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -808(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl %eax, -40(%ebp) # 4-byte Spill
-; X32-NEXT: movl -184(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: movl %ecx, %eax
-; X32-NEXT: movl -188(%ebp), %edi # 4-byte Reload
-; X32-NEXT: mull %edi
-; X32-NEXT: movl %edx, %esi
-; X32-NEXT: movl %eax, -196(%ebp) # 4-byte Spill
-; X32-NEXT: movl -60(%ebp), %eax # 4-byte Reload
-; X32-NEXT: mull %edi
+; X32-NEXT: adcl $0, %ebx
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
+; X32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; X32-NEXT: movl %ebp, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: mull %ecx
+; X32-NEXT: movl %edx, (%esp) # 4-byte Spill
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: movl %esi, %eax
+; X32-NEXT: mull %ecx
; X32-NEXT: movl %edx, %edi
; X32-NEXT: movl %eax, %ebx
-; X32-NEXT: addl %esi, %ebx
+; X32-NEXT: addl (%esp), %ebx # 4-byte Folded Reload
; X32-NEXT: adcl $0, %edi
-; X32-NEXT: movl %ecx, %eax
-; X32-NEXT: movl -148(%ebp), %ecx # 4-byte Reload
+; X32-NEXT: movl %ebp, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X32-NEXT: mull %ecx
-; X32-NEXT: movl %edx, %esi
+; X32-NEXT: movl %edx, %ebp
; X32-NEXT: addl %ebx, %eax
-; X32-NEXT: movl %eax, -328(%ebp) # 4-byte Spill
-; X32-NEXT: adcl %edi, %esi
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl %edi, %ebp
; X32-NEXT: setb %bl
-; X32-NEXT: movl -60(%ebp), %eax # 4-byte Reload
+; X32-NEXT: movl %esi, %eax
; X32-NEXT: mull %ecx
-; X32-NEXT: addl %esi, %eax
-; X32-NEXT: movzbl %bl, %ecx
-; X32-NEXT: adcl %ecx, %edx
-; X32-NEXT: movl -160(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: addl -100(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: movl -268(%ebp), %esi # 4-byte Reload
-; X32-NEXT: adcl -204(%ebp), %esi # 4-byte Folded Reload
+; X32-NEXT: addl %ebp, %eax
+; X32-NEXT: movzbl %bl, %edi
+; X32-NEXT: adcl %edi, %edx
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
; X32-NEXT: addl %eax, %ecx
-; X32-NEXT: movl %ecx, -64(%ebp) # 4-byte Spill
+; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: adcl %edx, %esi
-; X32-NEXT: movl %esi, -72(%ebp) # 4-byte Spill
-; X32-NEXT: movl -260(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: movl %ecx, %eax
-; X32-NEXT: movl -188(%ebp), %esi # 4-byte Reload
-; X32-NEXT: mull %esi
-; X32-NEXT: movl %edx, -468(%ebp) # 4-byte Spill
-; X32-NEXT: movl %eax, -508(%ebp) # 4-byte Spill
-; X32-NEXT: movl -124(%ebp), %eax # 4-byte Reload
+; X32-NEXT: movl %esi, (%esp) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: movl %esi, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: mull %ecx
+; X32-NEXT: movl %edx, %edi
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: mull %ecx
+; X32-NEXT: movl %edx, %ebx
+; X32-NEXT: movl %eax, %ebp
+; X32-NEXT: addl %edi, %ebp
+; X32-NEXT: adcl $0, %ebx
+; X32-NEXT: movl %esi, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; X32-NEXT: mull %esi
; X32-NEXT: movl %edx, %edi
-; X32-NEXT: movl %eax, %ebx
-; X32-NEXT: addl -468(%ebp), %ebx # 4-byte Folded Reload
-; X32-NEXT: adcl $0, %edi
+; X32-NEXT: addl %ebp, %eax
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl %ebx, %edi
+; X32-NEXT: setb %cl
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X32-NEXT: movl %ebx, %eax
+; X32-NEXT: mull %esi
+; X32-NEXT: movl %eax, %ebp
+; X32-NEXT: addl %edi, %ebp
+; X32-NEXT: movzbl %cl, %eax
+; X32-NEXT: adcl %eax, %edx
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: adcl $0, (%esp) # 4-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X32-NEXT: movl %ecx, %eax
-; X32-NEXT: movl -148(%ebp), %esi # 4-byte Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; X32-NEXT: mull %esi
-; X32-NEXT: movl %edx, %ecx
-; X32-NEXT: addl %ebx, %eax
-; X32-NEXT: movl %eax, -504(%ebp) # 4-byte Spill
-; X32-NEXT: adcl %edi, %ecx
-; X32-NEXT: setb %bl
-; X32-NEXT: movl -124(%ebp), %edi # 4-byte Reload
-; X32-NEXT: movl %edi, %eax
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %ebx, %eax
; X32-NEXT: mull %esi
-; X32-NEXT: addl %ecx, %eax
-; X32-NEXT: movzbl %bl, %ecx
-; X32-NEXT: adcl %ecx, %edx
-; X32-NEXT: addl -512(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: adcl -820(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: addl -196(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl %eax, -404(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -328(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: movl %edx, -196(%ebp) # 4-byte Spill
-; X32-NEXT: adcl $0, -64(%ebp) # 4-byte Folded Spill
-; X32-NEXT: adcl $0, -72(%ebp) # 4-byte Folded Spill
-; X32-NEXT: movl -260(%ebp), %esi # 4-byte Reload
-; X32-NEXT: movl %esi, %eax
-; X32-NEXT: movl -236(%ebp), %ecx # 4-byte Reload
+; X32-NEXT: movl %edx, %esi
+; X32-NEXT: movl %eax, %edi
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X32-NEXT: adcl $0, %esi
+; X32-NEXT: movl %ecx, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X32-NEXT: mull %ecx
; X32-NEXT: movl %edx, %ebx
-; X32-NEXT: movl %eax, -328(%ebp) # 4-byte Spill
-; X32-NEXT: movl %edi, %eax
+; X32-NEXT: addl %edi, %eax
+; X32-NEXT: movl %eax, %edi
+; X32-NEXT: adcl %esi, %ebx
+; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X32-NEXT: mull %ecx
-; X32-NEXT: movl %edx, %edi
-; X32-NEXT: movl %eax, %ecx
-; X32-NEXT: addl %ebx, %ecx
-; X32-NEXT: adcl $0, %edi
-; X32-NEXT: movl %esi, %eax
-; X32-NEXT: movl -112(%ebp), %esi # 4-byte Reload
-; X32-NEXT: mull %esi
-; X32-NEXT: movl %edx, %ebx
-; X32-NEXT: addl %ecx, %eax
-; X32-NEXT: movl %eax, -468(%ebp) # 4-byte Spill
-; X32-NEXT: adcl %edi, %ebx
-; X32-NEXT: setb %cl
-; X32-NEXT: movl -124(%ebp), %eax # 4-byte Reload
-; X32-NEXT: mull %esi
; X32-NEXT: addl %ebx, %eax
-; X32-NEXT: movzbl %cl, %ecx
+; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 1-byte Folded Reload
; X32-NEXT: adcl %ecx, %edx
-; X32-NEXT: movl -136(%ebp), %esi # 4-byte Reload
-; X32-NEXT: addl -304(%ebp), %esi # 4-byte Folded Reload
-; X32-NEXT: movl -264(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: adcl -128(%ebp), %ecx # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
; X32-NEXT: addl %eax, %esi
; X32-NEXT: adcl %edx, %ecx
-; X32-NEXT: movl -404(%ebp), %eax # 4-byte Reload
-; X32-NEXT: addl %eax, -328(%ebp) # 4-byte Folded Spill
-; X32-NEXT: movl -196(%ebp), %eax # 4-byte Reload
-; X32-NEXT: adcl %eax, -468(%ebp) # 4-byte Folded Spill
+; X32-NEXT: addl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: adcl $0, %esi
; X32-NEXT: adcl $0, %ecx
-; X32-NEXT: addl -64(%ebp), %esi # 4-byte Folded Reload
-; X32-NEXT: adcl -72(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: setb -196(%ebp) # 1-byte Folded Spill
-; X32-NEXT: movl -184(%ebp), %eax # 4-byte Reload
-; X32-NEXT: movl -236(%ebp), %edi # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; X32-NEXT: adcl (%esp), %ecx # 4-byte Folded Reload
+; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; X32-NEXT: movl %ebp, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
; X32-NEXT: mull %edi
-; X32-NEXT: movl %edx, -64(%ebp) # 4-byte Spill
-; X32-NEXT: movl %eax, -72(%ebp) # 4-byte Spill
-; X32-NEXT: movl -60(%ebp), %eax # 4-byte Reload
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %eax, (%esp) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X32-NEXT: mull %edi
; X32-NEXT: movl %eax, %ebx
-; X32-NEXT: addl -64(%ebp), %ebx # 4-byte Folded Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
; X32-NEXT: adcl $0, %edx
-; X32-NEXT: movl %edx, -404(%ebp) # 4-byte Spill
-; X32-NEXT: movl -184(%ebp), %eax # 4-byte Reload
-; X32-NEXT: movl -112(%ebp), %edi # 4-byte Reload
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %ebp, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
; X32-NEXT: mull %edi
+; X32-NEXT: movl %edx, %ebp
; X32-NEXT: addl %ebx, %eax
-; X32-NEXT: movl %eax, -64(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -404(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: movl %edx, %ebx
-; X32-NEXT: setb -404(%ebp) # 1-byte Folded Spill
-; X32-NEXT: movl -60(%ebp), %eax # 4-byte Reload
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; X32-NEXT: setb %bl
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X32-NEXT: mull %edi
-; X32-NEXT: addl %ebx, %eax
-; X32-NEXT: movzbl -404(%ebp), %edi # 1-byte Folded Reload
+; X32-NEXT: addl %ebp, %eax
+; X32-NEXT: movzbl %bl, %edi
; X32-NEXT: adcl %edi, %edx
-; X32-NEXT: movl -160(%ebp), %edi # 4-byte Reload
-; X32-NEXT: addl -304(%ebp), %edi # 4-byte Folded Reload
-; X32-NEXT: movl -268(%ebp), %ebx # 4-byte Reload
-; X32-NEXT: adcl -128(%ebp), %ebx # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
; X32-NEXT: addl %eax, %edi
; X32-NEXT: adcl %edx, %ebx
-; X32-NEXT: movl -72(%ebp), %edx # 4-byte Reload
+; X32-NEXT: movl (%esp), %edx # 4-byte Reload
; X32-NEXT: addl %esi, %edx
-; X32-NEXT: movl -64(%ebp), %esi # 4-byte Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; X32-NEXT: adcl %ecx, %esi
-; X32-NEXT: movzbl -196(%ebp), %eax # 1-byte Folded Reload
+; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
; X32-NEXT: adcl %eax, %edi
; X32-NEXT: adcl $0, %ebx
-; X32-NEXT: addl -512(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: adcl -676(%ebp), %esi # 4-byte Folded Reload
-; X32-NEXT: adcl -624(%ebp), %edi # 4-byte Folded Reload
-; X32-NEXT: adcl -628(%ebp), %ebx # 4-byte Folded Reload
-; X32-NEXT: addl -152(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: movl %edx, -72(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -228(%ebp), %esi # 4-byte Folded Reload
-; X32-NEXT: movl %esi, -64(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -164(%ebp), %edi # 4-byte Folded Reload
-; X32-NEXT: movl %edi, -628(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -232(%ebp), %ebx # 4-byte Folded Reload
-; X32-NEXT: movl %ebx, -624(%ebp) # 4-byte Spill
-; X32-NEXT: adcl $0, -56(%ebp) # 4-byte Folded Spill
-; X32-NEXT: adcl $0, -76(%ebp) # 4-byte Folded Spill
-; X32-NEXT: adcl $0, -344(%ebp) # 4-byte Folded Spill
-; X32-NEXT: adcl $0, -40(%ebp) # 4-byte Folded Spill
-; X32-NEXT: movl -184(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: movl %ecx, %eax
-; X32-NEXT: movl -300(%ebp), %edi # 4-byte Reload
-; X32-NEXT: mull %edi
-; X32-NEXT: movl %edx, -232(%ebp) # 4-byte Spill
-; X32-NEXT: movl %eax, -164(%ebp) # 4-byte Spill
-; X32-NEXT: movl -60(%ebp), %esi # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X32-NEXT: movl %edx, (%esp) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
+; X32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; X32-NEXT: movl %esi, %eax
-; X32-NEXT: mull %edi
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: mull %ecx
+; X32-NEXT: movl %edx, %edi
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; X32-NEXT: movl %ebp, %eax
+; X32-NEXT: mull %ecx
; X32-NEXT: movl %edx, %ebx
-; X32-NEXT: movl %eax, %edi
-; X32-NEXT: addl -232(%ebp), %edi # 4-byte Folded Reload
+; X32-NEXT: movl %eax, %ecx
+; X32-NEXT: addl %edi, %ecx
; X32-NEXT: adcl $0, %ebx
-; X32-NEXT: movl %ecx, %eax
-; X32-NEXT: mull -144(%ebp) # 4-byte Folded Reload
-; X32-NEXT: movl %edx, %ecx
-; X32-NEXT: addl %edi, %eax
-; X32-NEXT: movl %eax, -228(%ebp) # 4-byte Spill
-; X32-NEXT: adcl %ebx, %ecx
-; X32-NEXT: setb %bl
; X32-NEXT: movl %esi, %eax
-; X32-NEXT: mull -144(%ebp) # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: mull %esi
+; X32-NEXT: movl %edx, %edi
; X32-NEXT: addl %ecx, %eax
-; X32-NEXT: movzbl %bl, %ecx
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl %ebx, %edi
+; X32-NEXT: setb %cl
+; X32-NEXT: movl %ebp, %eax
+; X32-NEXT: mull %esi
+; X32-NEXT: addl %edi, %eax
+; X32-NEXT: movzbl %cl, %ecx
; X32-NEXT: adcl %ecx, %edx
-; X32-NEXT: movl -160(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: addl -336(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: movl -268(%ebp), %esi # 4-byte Reload
-; X32-NEXT: adcl -176(%ebp), %esi # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
; X32-NEXT: addl %eax, %ecx
-; X32-NEXT: movl %ecx, -152(%ebp) # 4-byte Spill
+; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: adcl %edx, %esi
-; X32-NEXT: movl %esi, -232(%ebp) # 4-byte Spill
-; X32-NEXT: movl -260(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: movl %ecx, %eax
-; X32-NEXT: movl -300(%ebp), %edi # 4-byte Reload
+; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; X32-NEXT: movl %ebp, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
; X32-NEXT: mull %edi
-; X32-NEXT: movl %edx, %esi
-; X32-NEXT: movl %eax, -404(%ebp) # 4-byte Spill
-; X32-NEXT: movl -124(%ebp), %eax # 4-byte Reload
+; X32-NEXT: movl %edx, %ecx
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: movl %esi, %eax
; X32-NEXT: mull %edi
; X32-NEXT: movl %edx, %edi
; X32-NEXT: movl %eax, %ebx
-; X32-NEXT: addl %esi, %ebx
+; X32-NEXT: addl %ecx, %ebx
; X32-NEXT: adcl $0, %edi
-; X32-NEXT: movl %ecx, %eax
-; X32-NEXT: movl -144(%ebp), %esi # 4-byte Reload
-; X32-NEXT: mull %esi
+; X32-NEXT: movl %ebp, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; X32-NEXT: mull %ebp
; X32-NEXT: movl %edx, %ecx
; X32-NEXT: addl %ebx, %eax
-; X32-NEXT: movl %eax, -540(%ebp) # 4-byte Spill
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: adcl %edi, %ecx
-; X32-NEXT: setb -196(%ebp) # 1-byte Folded Spill
-; X32-NEXT: movl -124(%ebp), %ebx # 4-byte Reload
-; X32-NEXT: movl %ebx, %eax
-; X32-NEXT: mull %esi
-; X32-NEXT: addl %ecx, %eax
-; X32-NEXT: movzbl -196(%ebp), %ecx # 1-byte Folded Reload
-; X32-NEXT: adcl %ecx, %edx
-; X32-NEXT: addl -588(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: adcl -824(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: addl -164(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl %eax, -420(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -228(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: movl %edx, -424(%ebp) # 4-byte Spill
-; X32-NEXT: adcl $0, -152(%ebp) # 4-byte Folded Spill
-; X32-NEXT: adcl $0, -232(%ebp) # 4-byte Folded Spill
-; X32-NEXT: movl -260(%ebp), %edi # 4-byte Reload
-; X32-NEXT: movl %edi, %eax
-; X32-NEXT: movl -244(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: mull %ecx
-; X32-NEXT: movl %edx, %esi
-; X32-NEXT: movl %eax, -228(%ebp) # 4-byte Spill
-; X32-NEXT: movl %ebx, %eax
+; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
+; X32-NEXT: movl %esi, %eax
+; X32-NEXT: movl %esi, %edi
+; X32-NEXT: mull %ebp
+; X32-NEXT: movl %eax, %ebx
+; X32-NEXT: addl %ecx, %ebx
+; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
+; X32-NEXT: adcl %eax, %edx
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: movl %esi, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X32-NEXT: mull %ecx
-; X32-NEXT: movl %edx, %ebx
-; X32-NEXT: movl %eax, %ecx
-; X32-NEXT: addl %esi, %ecx
-; X32-NEXT: adcl $0, %ebx
-; X32-NEXT: movl 12(%ebp), %eax
-; X32-NEXT: movl 60(%eax), %esi
+; X32-NEXT: movl %edx, %ebp
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: movl %edi, %eax
-; X32-NEXT: mull %esi
-; X32-NEXT: movl %esi, -164(%ebp) # 4-byte Spill
+; X32-NEXT: mull %ecx
; X32-NEXT: movl %edx, %edi
+; X32-NEXT: movl %eax, %ecx
+; X32-NEXT: addl %ebp, %ecx
+; X32-NEXT: adcl $0, %edi
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: movl 60(%eax), %ebp
+; X32-NEXT: movl %esi, %eax
+; X32-NEXT: mull %ebp
+; X32-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %edx, %esi
; X32-NEXT: addl %ecx, %eax
-; X32-NEXT: movl %eax, -196(%ebp) # 4-byte Spill
-; X32-NEXT: adcl %ebx, %edi
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl %edi, %esi
; X32-NEXT: setb %cl
-; X32-NEXT: movl -124(%ebp), %eax # 4-byte Reload
-; X32-NEXT: mull %esi
-; X32-NEXT: addl %edi, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: mull %ebp
+; X32-NEXT: addl %esi, %eax
; X32-NEXT: movzbl %cl, %ecx
; X32-NEXT: adcl %ecx, %edx
-; X32-NEXT: movl -136(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: addl -224(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: movl -264(%ebp), %esi # 4-byte Reload
-; X32-NEXT: adcl -360(%ebp), %esi # 4-byte Folded Reload
-; X32-NEXT: addl %eax, %ecx
-; X32-NEXT: adcl %edx, %esi
-; X32-NEXT: movl -420(%ebp), %eax # 4-byte Reload
-; X32-NEXT: addl %eax, -228(%ebp) # 4-byte Folded Spill
-; X32-NEXT: movl -424(%ebp), %eax # 4-byte Reload
-; X32-NEXT: adcl %eax, -196(%ebp) # 4-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X32-NEXT: addl %eax, %ebp
+; X32-NEXT: adcl %edx, %ecx
+; X32-NEXT: addl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: adcl $0, %ebp
; X32-NEXT: adcl $0, %ecx
-; X32-NEXT: adcl $0, %esi
-; X32-NEXT: addl -152(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: adcl -232(%ebp), %esi # 4-byte Folded Reload
-; X32-NEXT: setb -232(%ebp) # 1-byte Folded Spill
-; X32-NEXT: movl -184(%ebp), %eax # 4-byte Reload
-; X32-NEXT: movl -244(%ebp), %edi # 4-byte Reload
-; X32-NEXT: mull %edi
-; X32-NEXT: movl %edx, -152(%ebp) # 4-byte Spill
-; X32-NEXT: movl %eax, -424(%ebp) # 4-byte Spill
-; X32-NEXT: movl -60(%ebp), %eax # 4-byte Reload
-; X32-NEXT: mull %edi
-; X32-NEXT: movl %eax, %ebx
-; X32-NEXT: addl -152(%ebp), %ebx # 4-byte Folded Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X32-NEXT: movl %ebx, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: mull %esi
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: mull %esi
+; X32-NEXT: movl %eax, %edi
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
; X32-NEXT: adcl $0, %edx
-; X32-NEXT: movl %edx, -152(%ebp) # 4-byte Spill
-; X32-NEXT: movl -184(%ebp), %eax # 4-byte Reload
-; X32-NEXT: movl -164(%ebp), %edi # 4-byte Reload
-; X32-NEXT: mull %edi
-; X32-NEXT: addl %ebx, %eax
-; X32-NEXT: movl %eax, -420(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -152(%ebp), %edx # 4-byte Folded Reload
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %ebx, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: mull %esi
; X32-NEXT: movl %edx, %ebx
-; X32-NEXT: setb -152(%ebp) # 1-byte Folded Spill
-; X32-NEXT: movl -60(%ebp), %eax # 4-byte Reload
-; X32-NEXT: mull %edi
+; X32-NEXT: addl %edi, %eax
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
+; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: mull %esi
; X32-NEXT: addl %ebx, %eax
-; X32-NEXT: movzbl -152(%ebp), %edi # 1-byte Folded Reload
-; X32-NEXT: adcl %edi, %edx
-; X32-NEXT: movl -160(%ebp), %edi # 4-byte Reload
-; X32-NEXT: addl -224(%ebp), %edi # 4-byte Folded Reload
-; X32-NEXT: movl -268(%ebp), %ebx # 4-byte Reload
-; X32-NEXT: adcl -360(%ebp), %ebx # 4-byte Folded Reload
+; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 1-byte Folded Reload
+; X32-NEXT: adcl %esi, %edx
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
; X32-NEXT: addl %eax, %edi
; X32-NEXT: adcl %edx, %ebx
-; X32-NEXT: movl -424(%ebp), %edx # 4-byte Reload
-; X32-NEXT: addl %ecx, %edx
-; X32-NEXT: movl -420(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: adcl %esi, %ecx
-; X32-NEXT: movzbl -232(%ebp), %eax # 1-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; X32-NEXT: addl %ebp, %edx
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: adcl %ecx, %esi
+; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
; X32-NEXT: adcl %eax, %edi
; X32-NEXT: adcl $0, %ebx
-; X32-NEXT: addl -588(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: adcl -632(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: adcl -828(%ebp), %edi # 4-byte Folded Reload
-; X32-NEXT: adcl -636(%ebp), %ebx # 4-byte Folded Reload
-; X32-NEXT: movl -72(%ebp), %eax # 4-byte Reload
-; X32-NEXT: addl %eax, -404(%ebp) # 4-byte Folded Spill
-; X32-NEXT: movl -64(%ebp), %eax # 4-byte Reload
-; X32-NEXT: adcl %eax, -540(%ebp) # 4-byte Folded Spill
-; X32-NEXT: movl -628(%ebp), %eax # 4-byte Reload
-; X32-NEXT: adcl %eax, -228(%ebp) # 4-byte Folded Spill
-; X32-NEXT: movl -624(%ebp), %eax # 4-byte Reload
-; X32-NEXT: adcl %eax, -196(%ebp) # 4-byte Folded Spill
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
+; X32-NEXT: movl (%esp), %eax # 4-byte Reload
+; X32-NEXT: addl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
; X32-NEXT: adcl $0, %edx
-; X32-NEXT: adcl $0, %ecx
+; X32-NEXT: adcl $0, %esi
; X32-NEXT: adcl $0, %edi
; X32-NEXT: adcl $0, %ebx
-; X32-NEXT: addl -56(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: movl %edx, -424(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -76(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: movl %ecx, -420(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -344(%ebp), %edi # 4-byte Folded Reload
-; X32-NEXT: movl %edi, -636(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -40(%ebp), %ebx # 4-byte Folded Reload
-; X32-NEXT: movl %ebx, -632(%ebp) # 4-byte Spill
-; X32-NEXT: setb -588(%ebp) # 1-byte Folded Spill
-; X32-NEXT: movl -288(%ebp), %ebx # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
+; X32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
; X32-NEXT: movl %ebx, %eax
-; X32-NEXT: movl -300(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: mull %ecx
-; X32-NEXT: movl %edx, %esi
-; X32-NEXT: movl %eax, -76(%ebp) # 4-byte Spill
-; X32-NEXT: movl -16(%ebp), %eax # 4-byte Reload
-; X32-NEXT: mull %ecx
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: mull %esi
+; X32-NEXT: movl %edx, %ecx
+; X32-NEXT: movl %eax, (%esp) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; X32-NEXT: movl %ebp, %eax
+; X32-NEXT: mull %esi
; X32-NEXT: movl %edx, %edi
-; X32-NEXT: movl %eax, %ecx
-; X32-NEXT: addl %esi, %ecx
+; X32-NEXT: movl %eax, %esi
+; X32-NEXT: addl %ecx, %esi
; X32-NEXT: adcl $0, %edi
; X32-NEXT: movl %ebx, %eax
-; X32-NEXT: movl -144(%ebp), %ebx # 4-byte Reload
-; X32-NEXT: mull %ebx
-; X32-NEXT: movl %edx, %esi
-; X32-NEXT: addl %ecx, %eax
-; X32-NEXT: movl %eax, -72(%ebp) # 4-byte Spill
-; X32-NEXT: adcl %edi, %esi
-; X32-NEXT: setb %cl
-; X32-NEXT: movl -16(%ebp), %eax # 4-byte Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
; X32-NEXT: mull %ebx
+; X32-NEXT: movl %edx, %ecx
; X32-NEXT: addl %esi, %eax
-; X32-NEXT: movzbl %cl, %ecx
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl %edi, %ecx
+; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
+; X32-NEXT: movl %ebp, %eax
+; X32-NEXT: mull %ebx
+; X32-NEXT: addl %ecx, %eax
+; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 1-byte Folded Reload
; X32-NEXT: adcl %ecx, %edx
-; X32-NEXT: movl -280(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: addl -336(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: movl -312(%ebp), %esi # 4-byte Reload
-; X32-NEXT: adcl -176(%ebp), %esi # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
; X32-NEXT: addl %eax, %ecx
-; X32-NEXT: movl %ecx, -56(%ebp) # 4-byte Spill
+; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: adcl %edx, %esi
-; X32-NEXT: movl %esi, -40(%ebp) # 4-byte Spill
-; X32-NEXT: movl -348(%ebp), %ebx # 4-byte Reload
+; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
; X32-NEXT: movl %ebx, %eax
-; X32-NEXT: movl -300(%ebp), %esi # 4-byte Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; X32-NEXT: mull %esi
; X32-NEXT: movl %edx, %ecx
-; X32-NEXT: movl %eax, -232(%ebp) # 4-byte Spill
-; X32-NEXT: movl -216(%ebp), %eax # 4-byte Reload
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; X32-NEXT: movl %ebp, %eax
; X32-NEXT: mull %esi
; X32-NEXT: movl %edx, %esi
; X32-NEXT: movl %eax, %edi
; X32-NEXT: addl %ecx, %edi
; X32-NEXT: adcl $0, %esi
; X32-NEXT: movl %ebx, %eax
-; X32-NEXT: movl -144(%ebp), %ebx # 4-byte Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
; X32-NEXT: mull %ebx
; X32-NEXT: movl %edx, %ecx
; X32-NEXT: addl %edi, %eax
-; X32-NEXT: movl %eax, -152(%ebp) # 4-byte Spill
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: adcl %esi, %ecx
-; X32-NEXT: setb -64(%ebp) # 1-byte Folded Spill
-; X32-NEXT: movl -216(%ebp), %edi # 4-byte Reload
-; X32-NEXT: movl %edi, %eax
+; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
+; X32-NEXT: movl %ebp, %eax
; X32-NEXT: mull %ebx
-; X32-NEXT: addl %ecx, %eax
-; X32-NEXT: movzbl -64(%ebp), %ecx # 1-byte Folded Reload
-; X32-NEXT: adcl %ecx, %edx
-; X32-NEXT: addl -672(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: adcl -832(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: addl -76(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl %eax, -344(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -72(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: movl %edx, -76(%ebp) # 4-byte Spill
-; X32-NEXT: adcl $0, -56(%ebp) # 4-byte Folded Spill
-; X32-NEXT: adcl $0, -40(%ebp) # 4-byte Folded Spill
-; X32-NEXT: movl -348(%ebp), %esi # 4-byte Reload
-; X32-NEXT: movl %esi, %eax
-; X32-NEXT: movl -244(%ebp), %ecx # 4-byte Reload
+; X32-NEXT: movl %eax, %edi
+; X32-NEXT: addl %ecx, %edi
+; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
+; X32-NEXT: adcl %eax, %edx
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X32-NEXT: addl (%esp), %edi # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X32-NEXT: movl %edx, (%esp) # 4-byte Spill
+; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; X32-NEXT: movl %ebp, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X32-NEXT: mull %ecx
; X32-NEXT: movl %edx, %ebx
-; X32-NEXT: movl %eax, -72(%ebp) # 4-byte Spill
-; X32-NEXT: movl %edi, %eax
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X32-NEXT: mull %ecx
-; X32-NEXT: movl %edx, %edi
+; X32-NEXT: movl %edx, %esi
; X32-NEXT: movl %eax, %ecx
; X32-NEXT: addl %ebx, %ecx
-; X32-NEXT: adcl $0, %edi
-; X32-NEXT: movl %esi, %eax
-; X32-NEXT: movl -164(%ebp), %ebx # 4-byte Reload
+; X32-NEXT: adcl $0, %esi
+; X32-NEXT: movl %ebp, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
; X32-NEXT: mull %ebx
-; X32-NEXT: movl %edx, %esi
+; X32-NEXT: movl %edx, %ebp
; X32-NEXT: addl %ecx, %eax
-; X32-NEXT: movl %eax, -64(%ebp) # 4-byte Spill
-; X32-NEXT: adcl %edi, %esi
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl %esi, %ebp
; X32-NEXT: setb %cl
-; X32-NEXT: movl -216(%ebp), %eax # 4-byte Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X32-NEXT: mull %ebx
-; X32-NEXT: addl %esi, %eax
+; X32-NEXT: addl %ebp, %eax
; X32-NEXT: movzbl %cl, %ecx
; X32-NEXT: adcl %ecx, %edx
-; X32-NEXT: movl -180(%ebp), %ebx # 4-byte Reload
-; X32-NEXT: addl -224(%ebp), %ebx # 4-byte Folded Reload
-; X32-NEXT: movl -320(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: adcl -360(%ebp), %ecx # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
; X32-NEXT: addl %eax, %ebx
-; X32-NEXT: adcl %edx, %ecx
-; X32-NEXT: movl -344(%ebp), %eax # 4-byte Reload
-; X32-NEXT: addl %eax, -72(%ebp) # 4-byte Folded Spill
-; X32-NEXT: movl -76(%ebp), %eax # 4-byte Reload
-; X32-NEXT: adcl %eax, -64(%ebp) # 4-byte Folded Spill
+; X32-NEXT: adcl %edx, %ebp
+; X32-NEXT: addl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: movl (%esp), %eax # 4-byte Reload
+; X32-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
; X32-NEXT: adcl $0, %ebx
-; X32-NEXT: adcl $0, %ecx
-; X32-NEXT: addl -56(%ebp), %ebx # 4-byte Folded Reload
-; X32-NEXT: adcl -40(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: setb -56(%ebp) # 1-byte Folded Spill
-; X32-NEXT: movl -288(%ebp), %eax # 4-byte Reload
-; X32-NEXT: movl -244(%ebp), %esi # 4-byte Reload
+; X32-NEXT: adcl $0, %ebp
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: movl %ecx, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; X32-NEXT: mull %esi
-; X32-NEXT: movl %edx, -40(%ebp) # 4-byte Spill
-; X32-NEXT: movl %eax, -344(%ebp) # 4-byte Spill
-; X32-NEXT: movl -16(%ebp), %eax # 4-byte Reload
+; X32-NEXT: movl %edx, (%esp) # 4-byte Spill
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X32-NEXT: mull %esi
+; X32-NEXT: movl %edx, %esi
; X32-NEXT: movl %eax, %edi
-; X32-NEXT: addl -40(%ebp), %edi # 4-byte Folded Reload
-; X32-NEXT: adcl $0, %edx
-; X32-NEXT: movl %edx, -76(%ebp) # 4-byte Spill
-; X32-NEXT: movl -288(%ebp), %eax # 4-byte Reload
-; X32-NEXT: movl -164(%ebp), %esi # 4-byte Reload
-; X32-NEXT: mull %esi
-; X32-NEXT: addl %edi, %eax
-; X32-NEXT: movl %eax, -40(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -76(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: movl %edx, %edi
-; X32-NEXT: setb -76(%ebp) # 1-byte Folded Spill
-; X32-NEXT: movl -16(%ebp), %eax # 4-byte Reload
-; X32-NEXT: mull %esi
+; X32-NEXT: addl (%esp), %edi # 4-byte Folded Reload
+; X32-NEXT: adcl $0, %esi
+; X32-NEXT: movl %ecx, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: mull %ecx
; X32-NEXT: addl %edi, %eax
-; X32-NEXT: movzbl -76(%ebp), %esi # 1-byte Folded Reload
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: adcl %esi, %edx
-; X32-NEXT: movl -280(%ebp), %edi # 4-byte Reload
-; X32-NEXT: addl -224(%ebp), %edi # 4-byte Folded Reload
-; X32-NEXT: movl -312(%ebp), %esi # 4-byte Reload
-; X32-NEXT: adcl -360(%ebp), %esi # 4-byte Folded Reload
-; X32-NEXT: addl %eax, %edi
-; X32-NEXT: adcl %edx, %esi
-; X32-NEXT: movl %esi, -76(%ebp) # 4-byte Spill
-; X32-NEXT: movl -344(%ebp), %edx # 4-byte Reload
-; X32-NEXT: addl %ebx, %edx
-; X32-NEXT: movl -40(%ebp), %esi # 4-byte Reload
-; X32-NEXT: adcl %ecx, %esi
-; X32-NEXT: movzbl -56(%ebp), %eax # 1-byte Folded Reload
-; X32-NEXT: adcl %eax, %edi
-; X32-NEXT: movl -76(%ebp), %eax # 4-byte Reload
-; X32-NEXT: adcl $0, %eax
-; X32-NEXT: addl -672(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: movl %edx, %ebx
-; X32-NEXT: adcl -836(%ebp), %esi # 4-byte Folded Reload
-; X32-NEXT: movl %esi, -40(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -840(%ebp), %edi # 4-byte Folded Reload
-; X32-NEXT: movl %edi, -56(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -844(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl %eax, -76(%ebp) # 4-byte Spill
-; X32-NEXT: movl -232(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: addl -424(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: movl -152(%ebp), %edx # 4-byte Reload
-; X32-NEXT: adcl -420(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: movl -72(%ebp), %esi # 4-byte Reload
-; X32-NEXT: adcl -636(%ebp), %esi # 4-byte Folded Reload
-; X32-NEXT: movl -64(%ebp), %edi # 4-byte Reload
-; X32-NEXT: adcl -632(%ebp), %edi # 4-byte Folded Reload
-; X32-NEXT: movzbl -588(%ebp), %eax # 1-byte Folded Reload
+; X32-NEXT: movl %edx, %esi
+; X32-NEXT: setb (%esp) # 1-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: mull %ecx
+; X32-NEXT: addl %esi, %eax
+; X32-NEXT: movzbl (%esp), %ecx # 1-byte Folded Reload
+; X32-NEXT: adcl %ecx, %edx
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X32-NEXT: addl %eax, %esi
+; X32-NEXT: adcl %edx, %edi
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: addl %ebx, %ecx
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; X32-NEXT: adcl %ebp, %edx
+; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
+; X32-NEXT: adcl %eax, %esi
+; X32-NEXT: adcl $0, %edi
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X32-NEXT: movl %ecx, %ebx
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X32-NEXT: movl %edx, %ebp
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; X32-NEXT: movl %esi, (%esp) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
; X32-NEXT: adcl %eax, %ebx
-; X32-NEXT: movl %ebx, -344(%ebp) # 4-byte Spill
-; X32-NEXT: movl -40(%ebp), %eax # 4-byte Reload
-; X32-NEXT: adcl $0, %eax
-; X32-NEXT: movl -56(%ebp), %ebx # 4-byte Reload
+; X32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl $0, %ebp
+; X32-NEXT: adcl $0, (%esp) # 4-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
; X32-NEXT: adcl $0, %ebx
-; X32-NEXT: adcl $0, -76(%ebp) # 4-byte Folded Spill
-; X32-NEXT: addl -512(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: movl %ecx, -232(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -676(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: movl %edx, -152(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -432(%ebp), %esi # 4-byte Folded Reload
-; X32-NEXT: movl %esi, -72(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -456(%ebp), %edi # 4-byte Folded Reload
-; X32-NEXT: movl %edi, -64(%ebp) # 4-byte Spill
-; X32-NEXT: movl -344(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: adcl -584(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: adcl -276(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl %eax, -40(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -240(%ebp), %ebx # 4-byte Folded Reload
-; X32-NEXT: movl %ebx, -56(%ebp) # 4-byte Spill
-; X32-NEXT: movl -76(%ebp), %eax # 4-byte Reload
-; X32-NEXT: adcl -172(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl -32(%ebp), %edx # 4-byte Reload
-; X32-NEXT: addl %edx, -508(%ebp) # 4-byte Folded Spill
-; X32-NEXT: movl -68(%ebp), %edx # 4-byte Reload
-; X32-NEXT: adcl %edx, -504(%ebp) # 4-byte Folded Spill
-; X32-NEXT: movl -20(%ebp), %edx # 4-byte Reload
-; X32-NEXT: adcl %edx, -328(%ebp) # 4-byte Folded Spill
-; X32-NEXT: movl -368(%ebp), %edx # 4-byte Reload
-; X32-NEXT: adcl %edx, -468(%ebp) # 4-byte Folded Spill
-; X32-NEXT: movl -44(%ebp), %edx # 4-byte Reload
-; X32-NEXT: adcl %edx, -404(%ebp) # 4-byte Folded Spill
-; X32-NEXT: movl -52(%ebp), %edx # 4-byte Reload
-; X32-NEXT: adcl %edx, -540(%ebp) # 4-byte Folded Spill
-; X32-NEXT: movl -24(%ebp), %edx # 4-byte Reload
-; X32-NEXT: adcl %edx, -228(%ebp) # 4-byte Folded Spill
-; X32-NEXT: movl -464(%ebp), %edx # 4-byte Reload
-; X32-NEXT: adcl %edx, -196(%ebp) # 4-byte Folded Spill
-; X32-NEXT: movl -232(%ebp), %edx # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; X32-NEXT: movl (%esp), %eax # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; X32-NEXT: movl %eax, (%esp) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
+; X32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: addl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: adcl $0, %ecx
; X32-NEXT: adcl $0, %edx
-; X32-NEXT: movl -152(%ebp), %esi # 4-byte Reload
; X32-NEXT: adcl $0, %esi
-; X32-NEXT: movl -72(%ebp), %edi # 4-byte Reload
; X32-NEXT: adcl $0, %edi
-; X32-NEXT: movl -64(%ebp), %ebx # 4-byte Reload
-; X32-NEXT: adcl $0, %ebx
-; X32-NEXT: adcl $0, %ecx
-; X32-NEXT: adcl $0, -40(%ebp) # 4-byte Folded Spill
-; X32-NEXT: adcl $0, -56(%ebp) # 4-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X32-NEXT: adcl $0, %eax
-; X32-NEXT: addl -292(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: movl %edx, -232(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -372(%ebp), %esi # 4-byte Folded Reload
-; X32-NEXT: movl %esi, -152(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -88(%ebp), %edi # 4-byte Folded Reload
-; X32-NEXT: movl %edi, -72(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -296(%ebp), %ebx # 4-byte Folded Reload
-; X32-NEXT: movl %ebx, -64(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -272(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: movl %ecx, -344(%ebp) # 4-byte Spill
-; X32-NEXT: movl -40(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: adcl -36(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: movl %ecx, -40(%ebp) # 4-byte Spill
-; X32-NEXT: movl -56(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: adcl -332(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: movl %ecx, -56(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -80(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl %eax, -76(%ebp) # 4-byte Spill
-; X32-NEXT: setb -372(%ebp) # 1-byte Folded Spill
-; X32-NEXT: movl -408(%ebp), %esi # 4-byte Reload
+; X32-NEXT: adcl $0, %ebp
+; X32-NEXT: movl (%esp), %ebx # 4-byte Reload
+; X32-NEXT: adcl $0, %ebx
+; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; X32-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
+; X32-NEXT: movl %ebx, (%esp) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: movl %ecx, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; X32-NEXT: mull %edi
+; X32-NEXT: movl %edx, %ebp
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; X32-NEXT: movl %esi, %eax
-; X32-NEXT: movl -188(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: mull %ecx
-; X32-NEXT: movl %edx, -240(%ebp) # 4-byte Spill
-; X32-NEXT: movl %eax, -276(%ebp) # 4-byte Spill
-; X32-NEXT: movl -192(%ebp), %eax # 4-byte Reload
-; X32-NEXT: mull %ecx
+; X32-NEXT: mull %edi
; X32-NEXT: movl %edx, %edi
; X32-NEXT: movl %eax, %ebx
-; X32-NEXT: addl -240(%ebp), %ebx # 4-byte Folded Reload
+; X32-NEXT: addl %ebp, %ebx
; X32-NEXT: adcl $0, %edi
-; X32-NEXT: movl %esi, %eax
-; X32-NEXT: movl -148(%ebp), %ecx # 4-byte Reload
+; X32-NEXT: movl %ecx, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X32-NEXT: mull %ecx
-; X32-NEXT: movl %edx, %esi
+; X32-NEXT: movl %edx, %ebp
; X32-NEXT: addl %ebx, %eax
-; X32-NEXT: movl %eax, -240(%ebp) # 4-byte Spill
-; X32-NEXT: adcl %edi, %esi
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl %edi, %ebp
; X32-NEXT: setb %bl
-; X32-NEXT: movl -192(%ebp), %eax # 4-byte Reload
+; X32-NEXT: movl %esi, %eax
; X32-NEXT: mull %ecx
-; X32-NEXT: addl %esi, %eax
+; X32-NEXT: addl %ebp, %eax
; X32-NEXT: movzbl %bl, %ecx
; X32-NEXT: adcl %ecx, %edx
-; X32-NEXT: movl -392(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: addl -100(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: movl -412(%ebp), %esi # 4-byte Reload
-; X32-NEXT: adcl -204(%ebp), %esi # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
; X32-NEXT: addl %eax, %ecx
-; X32-NEXT: movl %ecx, -80(%ebp) # 4-byte Spill
+; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: adcl %edx, %esi
-; X32-NEXT: movl %esi, -172(%ebp) # 4-byte Spill
-; X32-NEXT: movl -440(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: movl %ecx, %eax
-; X32-NEXT: movl -188(%ebp), %esi # 4-byte Reload
-; X32-NEXT: mull %esi
-; X32-NEXT: movl %edx, -36(%ebp) # 4-byte Spill
-; X32-NEXT: movl %eax, -272(%ebp) # 4-byte Spill
-; X32-NEXT: movl -340(%ebp), %eax # 4-byte Reload
-; X32-NEXT: mull %esi
-; X32-NEXT: movl %edx, %edi
-; X32-NEXT: movl %eax, %ebx
-; X32-NEXT: addl -36(%ebp), %ebx # 4-byte Folded Reload
-; X32-NEXT: adcl $0, %edi
-; X32-NEXT: movl %ecx, %eax
-; X32-NEXT: movl -148(%ebp), %esi # 4-byte Reload
+; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: movl %esi, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: mull %ecx
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; X32-NEXT: movl %edi, %eax
+; X32-NEXT: mull %ecx
+; X32-NEXT: movl %edx, %ebx
+; X32-NEXT: movl %eax, %ebp
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; X32-NEXT: adcl $0, %ebx
+; X32-NEXT: movl %esi, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; X32-NEXT: mull %esi
; X32-NEXT: movl %edx, %ecx
-; X32-NEXT: addl %ebx, %eax
-; X32-NEXT: movl %eax, -296(%ebp) # 4-byte Spill
-; X32-NEXT: adcl %edi, %ecx
+; X32-NEXT: addl %ebp, %eax
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl %ebx, %ecx
; X32-NEXT: setb %bl
-; X32-NEXT: movl -340(%ebp), %edi # 4-byte Reload
; X32-NEXT: movl %edi, %eax
; X32-NEXT: mull %esi
-; X32-NEXT: addl %ecx, %eax
-; X32-NEXT: movzbl %bl, %ecx
-; X32-NEXT: adcl %ecx, %edx
-; X32-NEXT: addl -680(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: adcl -884(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: addl -276(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl %eax, -20(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -240(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: movl %edx, -36(%ebp) # 4-byte Spill
-; X32-NEXT: adcl $0, -80(%ebp) # 4-byte Folded Spill
-; X32-NEXT: adcl $0, -172(%ebp) # 4-byte Folded Spill
-; X32-NEXT: movl -440(%ebp), %ebx # 4-byte Reload
-; X32-NEXT: movl %ebx, %eax
-; X32-NEXT: movl -236(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: mull %ecx
+; X32-NEXT: movl %eax, %ebp
+; X32-NEXT: addl %ecx, %ebp
+; X32-NEXT: movzbl %bl, %eax
+; X32-NEXT: adcl %eax, %edx
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; X32-NEXT: movl %edi, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: mull %esi
+; X32-NEXT: movl %edx, %ecx
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: mull %esi
; X32-NEXT: movl %edx, %esi
-; X32-NEXT: movl %eax, -276(%ebp) # 4-byte Spill
+; X32-NEXT: movl %eax, %ebx
+; X32-NEXT: addl %ecx, %ebx
+; X32-NEXT: adcl $0, %esi
; X32-NEXT: movl %edi, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X32-NEXT: mull %ecx
; X32-NEXT: movl %edx, %edi
-; X32-NEXT: movl %eax, %ecx
-; X32-NEXT: addl %esi, %ecx
-; X32-NEXT: adcl $0, %edi
-; X32-NEXT: movl %ebx, %eax
-; X32-NEXT: movl -112(%ebp), %esi # 4-byte Reload
-; X32-NEXT: mull %esi
-; X32-NEXT: movl %edx, %ebx
-; X32-NEXT: addl %ecx, %eax
-; X32-NEXT: movl %eax, -240(%ebp) # 4-byte Spill
-; X32-NEXT: adcl %edi, %ebx
-; X32-NEXT: setb %cl
-; X32-NEXT: movl -340(%ebp), %eax # 4-byte Reload
-; X32-NEXT: mull %esi
; X32-NEXT: addl %ebx, %eax
-; X32-NEXT: movzbl %cl, %ecx
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl %esi, %edi
+; X32-NEXT: setb %bl
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: mull %ecx
+; X32-NEXT: addl %edi, %eax
+; X32-NEXT: movzbl %bl, %ecx
; X32-NEXT: adcl %ecx, %edx
-; X32-NEXT: movl -132(%ebp), %ebx # 4-byte Reload
-; X32-NEXT: addl -304(%ebp), %ebx # 4-byte Folded Reload
-; X32-NEXT: movl -140(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: adcl -128(%ebp), %ecx # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
; X32-NEXT: addl %eax, %ebx
; X32-NEXT: adcl %edx, %ecx
-; X32-NEXT: movl -20(%ebp), %eax # 4-byte Reload
-; X32-NEXT: addl %eax, -276(%ebp) # 4-byte Folded Spill
-; X32-NEXT: movl -36(%ebp), %eax # 4-byte Reload
-; X32-NEXT: adcl %eax, -240(%ebp) # 4-byte Folded Spill
+; X32-NEXT: addl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
; X32-NEXT: adcl $0, %ebx
; X32-NEXT: adcl $0, %ecx
-; X32-NEXT: addl -80(%ebp), %ebx # 4-byte Folded Reload
-; X32-NEXT: adcl -172(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: setb -20(%ebp) # 1-byte Folded Spill
-; X32-NEXT: movl -408(%ebp), %eax # 4-byte Reload
-; X32-NEXT: movl -236(%ebp), %esi # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; X32-NEXT: movl %ebp, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; X32-NEXT: mull %esi
-; X32-NEXT: movl %edx, -172(%ebp) # 4-byte Spill
-; X32-NEXT: movl %eax, -80(%ebp) # 4-byte Spill
-; X32-NEXT: movl -192(%ebp), %eax # 4-byte Reload
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X32-NEXT: mull %esi
; X32-NEXT: movl %eax, %edi
-; X32-NEXT: addl -172(%ebp), %edi # 4-byte Folded Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
; X32-NEXT: adcl $0, %edx
-; X32-NEXT: movl %edx, -172(%ebp) # 4-byte Spill
-; X32-NEXT: movl -408(%ebp), %eax # 4-byte Reload
-; X32-NEXT: movl -112(%ebp), %esi # 4-byte Reload
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %ebp, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; X32-NEXT: mull %esi
+; X32-NEXT: movl %edx, %ebp
; X32-NEXT: addl %edi, %eax
-; X32-NEXT: movl %eax, -36(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -172(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: movl %edx, %edi
-; X32-NEXT: setb -172(%ebp) # 1-byte Folded Spill
-; X32-NEXT: movl -192(%ebp), %eax # 4-byte Reload
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X32-NEXT: mull %esi
-; X32-NEXT: addl %edi, %eax
-; X32-NEXT: movzbl -172(%ebp), %esi # 1-byte Folded Reload
+; X32-NEXT: addl %ebp, %eax
+; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 1-byte Folded Reload
; X32-NEXT: adcl %esi, %edx
-; X32-NEXT: movl -392(%ebp), %edi # 4-byte Reload
-; X32-NEXT: addl -304(%ebp), %edi # 4-byte Folded Reload
-; X32-NEXT: movl -412(%ebp), %esi # 4-byte Reload
-; X32-NEXT: adcl -128(%ebp), %esi # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
; X32-NEXT: addl %eax, %edi
-; X32-NEXT: adcl %edx, %esi
-; X32-NEXT: movl %esi, -172(%ebp) # 4-byte Spill
-; X32-NEXT: movl -80(%ebp), %edx # 4-byte Reload
+; X32-NEXT: adcl %edx, %ebp
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
; X32-NEXT: addl %ebx, %edx
-; X32-NEXT: movl -36(%ebp), %esi # 4-byte Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; X32-NEXT: adcl %ecx, %esi
-; X32-NEXT: movzbl -20(%ebp), %eax # 1-byte Folded Reload
+; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
; X32-NEXT: adcl %eax, %edi
-; X32-NEXT: movl -172(%ebp), %eax # 4-byte Reload
-; X32-NEXT: adcl $0, %eax
-; X32-NEXT: addl -680(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: movl %edx, -80(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -856(%ebp), %esi # 4-byte Folded Reload
-; X32-NEXT: movl %esi, -36(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -852(%ebp), %edi # 4-byte Folded Reload
-; X32-NEXT: movl %edi, -292(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -848(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl %eax, -172(%ebp) # 4-byte Spill
-; X32-NEXT: movl -352(%ebp), %ecx # 4-byte Reload
+; X32-NEXT: adcl $0, %ebp
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; X32-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X32-NEXT: movl %ecx, %eax
-; X32-NEXT: movl -188(%ebp), %edi # 4-byte Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
; X32-NEXT: mull %edi
-; X32-NEXT: movl %edx, -20(%ebp) # 4-byte Spill
-; X32-NEXT: movl %eax, -44(%ebp) # 4-byte Spill
-; X32-NEXT: movl -120(%ebp), %esi # 4-byte Reload
+; X32-NEXT: movl %edx, %ebp
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; X32-NEXT: movl %esi, %eax
; X32-NEXT: mull %edi
; X32-NEXT: movl %edx, %edi
; X32-NEXT: movl %eax, %ebx
-; X32-NEXT: addl -20(%ebp), %ebx # 4-byte Folded Reload
+; X32-NEXT: addl %ebp, %ebx
; X32-NEXT: adcl $0, %edi
; X32-NEXT: movl %ecx, %eax
-; X32-NEXT: mull -148(%ebp) # 4-byte Folded Reload
-; X32-NEXT: movl %edx, %ecx
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: mull %ecx
+; X32-NEXT: movl %edx, %ebp
; X32-NEXT: addl %ebx, %eax
-; X32-NEXT: movl %eax, -52(%ebp) # 4-byte Spill
-; X32-NEXT: adcl %edi, %ecx
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl %edi, %ebp
; X32-NEXT: setb %bl
; X32-NEXT: movl %esi, %eax
-; X32-NEXT: movl -148(%ebp), %esi # 4-byte Reload
-; X32-NEXT: mull %esi
-; X32-NEXT: addl %ecx, %eax
+; X32-NEXT: mull %ecx
+; X32-NEXT: addl %ebp, %eax
; X32-NEXT: movzbl %bl, %ecx
; X32-NEXT: adcl %ecx, %edx
-; X32-NEXT: movl -364(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: addl -100(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: movl -396(%ebp), %edi # 4-byte Reload
-; X32-NEXT: adcl -204(%ebp), %edi # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
; X32-NEXT: addl %eax, %ecx
-; X32-NEXT: movl %ecx, -24(%ebp) # 4-byte Spill
-; X32-NEXT: adcl %edx, %edi
-; X32-NEXT: movl %edi, -20(%ebp) # 4-byte Spill
-; X32-NEXT: movl -416(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: movl %ecx, %eax
-; X32-NEXT: movl -188(%ebp), %edi # 4-byte Reload
-; X32-NEXT: mull %edi
-; X32-NEXT: movl %edx, -88(%ebp) # 4-byte Spill
-; X32-NEXT: movl %eax, -432(%ebp) # 4-byte Spill
-; X32-NEXT: movl -316(%ebp), %eax # 4-byte Reload
-; X32-NEXT: mull %edi
-; X32-NEXT: movl %edx, %edi
-; X32-NEXT: movl %eax, %ebx
-; X32-NEXT: addl -88(%ebp), %ebx # 4-byte Folded Reload
-; X32-NEXT: adcl $0, %edi
-; X32-NEXT: movl %ecx, %eax
-; X32-NEXT: mull %esi
-; X32-NEXT: movl %edx, %ecx
-; X32-NEXT: addl %ebx, %eax
-; X32-NEXT: movl %eax, -456(%ebp) # 4-byte Spill
-; X32-NEXT: adcl %edi, %ecx
-; X32-NEXT: setb %bl
-; X32-NEXT: movl -316(%ebp), %edi # 4-byte Reload
-; X32-NEXT: movl %edi, %eax
-; X32-NEXT: mull %esi
-; X32-NEXT: addl %ecx, %eax
-; X32-NEXT: movzbl %bl, %ecx
-; X32-NEXT: adcl %ecx, %edx
-; X32-NEXT: addl -656(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: adcl -892(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: addl -44(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl %eax, -32(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -52(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: movl %edx, -88(%ebp) # 4-byte Spill
-; X32-NEXT: adcl $0, -24(%ebp) # 4-byte Folded Spill
-; X32-NEXT: adcl $0, -20(%ebp) # 4-byte Folded Spill
-; X32-NEXT: movl -416(%ebp), %esi # 4-byte Reload
+; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl %edx, %esi
+; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; X32-NEXT: movl %esi, %eax
-; X32-NEXT: movl -236(%ebp), %ecx # 4-byte Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X32-NEXT: mull %ecx
-; X32-NEXT: movl %edx, %ebx
-; X32-NEXT: movl %eax, -44(%ebp) # 4-byte Spill
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
; X32-NEXT: movl %edi, %eax
; X32-NEXT: mull %ecx
-; X32-NEXT: movl %edx, %edi
-; X32-NEXT: movl %eax, %ecx
-; X32-NEXT: addl %ebx, %ecx
-; X32-NEXT: adcl $0, %edi
+; X32-NEXT: movl %edx, %ebx
+; X32-NEXT: movl %eax, %ebp
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; X32-NEXT: adcl $0, %ebx
; X32-NEXT: movl %esi, %eax
-; X32-NEXT: movl -112(%ebp), %esi # 4-byte Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; X32-NEXT: mull %esi
-; X32-NEXT: movl %edx, %ebx
-; X32-NEXT: addl %ecx, %eax
-; X32-NEXT: movl %eax, -52(%ebp) # 4-byte Spill
-; X32-NEXT: adcl %edi, %ebx
-; X32-NEXT: setb %cl
-; X32-NEXT: movl -316(%ebp), %eax # 4-byte Reload
+; X32-NEXT: movl %edx, %ecx
+; X32-NEXT: addl %ebp, %eax
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl %ebx, %ecx
+; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
+; X32-NEXT: movl %edi, %ebp
+; X32-NEXT: movl %ebp, %eax
; X32-NEXT: mull %esi
-; X32-NEXT: addl %ebx, %eax
-; X32-NEXT: movzbl %cl, %ecx
+; X32-NEXT: movl %eax, %ebx
+; X32-NEXT: addl %ecx, %ebx
+; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
+; X32-NEXT: adcl %eax, %edx
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; X32-NEXT: movl %edi, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: mull %esi
+; X32-NEXT: movl %edx, %ecx
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %ebp, %eax
+; X32-NEXT: mull %esi
+; X32-NEXT: movl %edx, %esi
+; X32-NEXT: movl %eax, %ebp
+; X32-NEXT: addl %ecx, %ebp
+; X32-NEXT: adcl $0, %esi
+; X32-NEXT: movl %edi, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: mull %ecx
+; X32-NEXT: movl %edx, %edi
+; X32-NEXT: addl %ebp, %eax
+; X32-NEXT: movl %eax, %ebp
+; X32-NEXT: adcl %esi, %edi
+; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: mull %ecx
+; X32-NEXT: addl %edi, %eax
+; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 1-byte Folded Reload
; X32-NEXT: adcl %ecx, %edx
-; X32-NEXT: movl -324(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: addl -304(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: movl -400(%ebp), %esi # 4-byte Reload
-; X32-NEXT: adcl -128(%ebp), %esi # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
; X32-NEXT: addl %eax, %ecx
; X32-NEXT: adcl %edx, %esi
-; X32-NEXT: movl -32(%ebp), %eax # 4-byte Reload
-; X32-NEXT: addl %eax, -44(%ebp) # 4-byte Folded Spill
-; X32-NEXT: movl -88(%ebp), %eax # 4-byte Reload
-; X32-NEXT: adcl %eax, -52(%ebp) # 4-byte Folded Spill
+; X32-NEXT: addl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; X32-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: adcl $0, %ecx
; X32-NEXT: adcl $0, %esi
-; X32-NEXT: addl -24(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: adcl -20(%ebp), %esi # 4-byte Folded Reload
-; X32-NEXT: setb -24(%ebp) # 1-byte Folded Spill
-; X32-NEXT: movl -352(%ebp), %eax # 4-byte Reload
-; X32-NEXT: movl -236(%ebp), %edi # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; X32-NEXT: movl %ebp, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
; X32-NEXT: mull %edi
-; X32-NEXT: movl %edx, -88(%ebp) # 4-byte Spill
-; X32-NEXT: movl %eax, -20(%ebp) # 4-byte Spill
-; X32-NEXT: movl -120(%ebp), %eax # 4-byte Reload
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X32-NEXT: mull %edi
; X32-NEXT: movl %eax, %ebx
-; X32-NEXT: addl -88(%ebp), %ebx # 4-byte Folded Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
; X32-NEXT: adcl $0, %edx
-; X32-NEXT: movl %edx, -32(%ebp) # 4-byte Spill
-; X32-NEXT: movl -352(%ebp), %eax # 4-byte Reload
-; X32-NEXT: movl -112(%ebp), %edi # 4-byte Reload
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %ebp, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
; X32-NEXT: mull %edi
+; X32-NEXT: movl %edx, %ebp
; X32-NEXT: addl %ebx, %eax
-; X32-NEXT: movl %eax, -88(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -32(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: movl %edx, %ebx
-; X32-NEXT: setb -32(%ebp) # 1-byte Folded Spill
-; X32-NEXT: movl -120(%ebp), %eax # 4-byte Reload
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; X32-NEXT: setb %bl
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X32-NEXT: mull %edi
-; X32-NEXT: addl %ebx, %eax
-; X32-NEXT: movzbl -32(%ebp), %edi # 1-byte Folded Reload
+; X32-NEXT: addl %ebp, %eax
+; X32-NEXT: movzbl %bl, %edi
; X32-NEXT: adcl %edi, %edx
-; X32-NEXT: movl -364(%ebp), %edi # 4-byte Reload
-; X32-NEXT: addl -304(%ebp), %edi # 4-byte Folded Reload
-; X32-NEXT: movl -396(%ebp), %ebx # 4-byte Reload
-; X32-NEXT: adcl -128(%ebp), %ebx # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
; X32-NEXT: addl %eax, %edi
; X32-NEXT: adcl %edx, %ebx
-; X32-NEXT: movl -20(%ebp), %edx # 4-byte Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
; X32-NEXT: addl %ecx, %edx
-; X32-NEXT: movl -88(%ebp), %ecx # 4-byte Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X32-NEXT: adcl %esi, %ecx
-; X32-NEXT: movzbl -24(%ebp), %eax # 1-byte Folded Reload
+; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
; X32-NEXT: adcl %eax, %edi
; X32-NEXT: adcl $0, %ebx
-; X32-NEXT: addl -656(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: adcl -700(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: adcl -860(%ebp), %edi # 4-byte Folded Reload
-; X32-NEXT: adcl -864(%ebp), %ebx # 4-byte Folded Reload
-; X32-NEXT: addl -272(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: movl %edx, -20(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -296(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: movl %ecx, -88(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -276(%ebp), %edi # 4-byte Folded Reload
-; X32-NEXT: movl %edi, -332(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -240(%ebp), %ebx # 4-byte Folded Reload
-; X32-NEXT: movl %ebx, -368(%ebp) # 4-byte Spill
-; X32-NEXT: adcl $0, -80(%ebp) # 4-byte Folded Spill
-; X32-NEXT: adcl $0, -36(%ebp) # 4-byte Folded Spill
-; X32-NEXT: adcl $0, -292(%ebp) # 4-byte Folded Spill
-; X32-NEXT: adcl $0, -172(%ebp) # 4-byte Folded Spill
-; X32-NEXT: movl -352(%ebp), %ebx # 4-byte Reload
-; X32-NEXT: movl %ebx, %eax
-; X32-NEXT: movl -300(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: mull %ecx
-; X32-NEXT: movl %edx, %esi
-; X32-NEXT: movl %eax, -276(%ebp) # 4-byte Spill
-; X32-NEXT: movl -120(%ebp), %eax # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
+; X32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: movl %esi, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X32-NEXT: mull %ecx
; X32-NEXT: movl %edx, %edi
-; X32-NEXT: movl %eax, %ecx
-; X32-NEXT: addl %esi, %ecx
-; X32-NEXT: adcl $0, %edi
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
; X32-NEXT: movl %ebx, %eax
-; X32-NEXT: movl -144(%ebp), %ebx # 4-byte Reload
-; X32-NEXT: mull %ebx
-; X32-NEXT: movl %edx, %esi
+; X32-NEXT: mull %ecx
+; X32-NEXT: movl %edx, %ebp
+; X32-NEXT: movl %eax, %ecx
+; X32-NEXT: addl %edi, %ecx
+; X32-NEXT: adcl $0, %ebp
+; X32-NEXT: movl %esi, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: mull %esi
+; X32-NEXT: movl %edx, %edi
; X32-NEXT: addl %ecx, %eax
-; X32-NEXT: movl %eax, -240(%ebp) # 4-byte Spill
-; X32-NEXT: adcl %edi, %esi
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl %ebp, %edi
; X32-NEXT: setb %cl
-; X32-NEXT: movl -120(%ebp), %eax # 4-byte Reload
-; X32-NEXT: mull %ebx
-; X32-NEXT: addl %esi, %eax
+; X32-NEXT: movl %ebx, %eax
+; X32-NEXT: mull %esi
+; X32-NEXT: addl %edi, %eax
; X32-NEXT: movzbl %cl, %ecx
; X32-NEXT: adcl %ecx, %edx
-; X32-NEXT: movl -364(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: addl -336(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: movl -396(%ebp), %esi # 4-byte Reload
-; X32-NEXT: adcl -176(%ebp), %esi # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
; X32-NEXT: addl %eax, %ecx
-; X32-NEXT: movl %ecx, -32(%ebp) # 4-byte Spill
+; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: adcl %edx, %esi
-; X32-NEXT: movl %esi, -296(%ebp) # 4-byte Spill
-; X32-NEXT: movl -416(%ebp), %ebx # 4-byte Reload
+; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
; X32-NEXT: movl %ebx, %eax
-; X32-NEXT: movl -300(%ebp), %esi # 4-byte Reload
-; X32-NEXT: mull %esi
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; X32-NEXT: mull %edi
; X32-NEXT: movl %edx, %ecx
-; X32-NEXT: movl %eax, -24(%ebp) # 4-byte Spill
-; X32-NEXT: movl -316(%ebp), %eax # 4-byte Reload
-; X32-NEXT: mull %esi
-; X32-NEXT: movl %edx, %esi
-; X32-NEXT: movl %eax, %edi
-; X32-NEXT: addl %ecx, %edi
-; X32-NEXT: adcl $0, %esi
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: movl %esi, %eax
+; X32-NEXT: mull %edi
+; X32-NEXT: movl %edx, %edi
+; X32-NEXT: movl %eax, %ebp
+; X32-NEXT: addl %ecx, %ebp
+; X32-NEXT: adcl $0, %edi
; X32-NEXT: movl %ebx, %eax
-; X32-NEXT: movl -144(%ebp), %ebx # 4-byte Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
; X32-NEXT: mull %ebx
; X32-NEXT: movl %edx, %ecx
-; X32-NEXT: addl %edi, %eax
-; X32-NEXT: movl %eax, -272(%ebp) # 4-byte Spill
-; X32-NEXT: adcl %esi, %ecx
-; X32-NEXT: setb -68(%ebp) # 1-byte Folded Spill
-; X32-NEXT: movl -316(%ebp), %esi # 4-byte Reload
+; X32-NEXT: addl %ebp, %eax
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl %edi, %ecx
+; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
; X32-NEXT: movl %esi, %eax
; X32-NEXT: mull %ebx
-; X32-NEXT: addl %ecx, %eax
-; X32-NEXT: movzbl -68(%ebp), %ecx # 1-byte Folded Reload
-; X32-NEXT: adcl %ecx, %edx
-; X32-NEXT: addl -684(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: adcl -868(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: addl -276(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl %eax, -512(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -240(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: movl %edx, -68(%ebp) # 4-byte Spill
-; X32-NEXT: adcl $0, -32(%ebp) # 4-byte Folded Spill
-; X32-NEXT: adcl $0, -296(%ebp) # 4-byte Folded Spill
-; X32-NEXT: movl -416(%ebp), %edi # 4-byte Reload
-; X32-NEXT: movl %edi, %eax
-; X32-NEXT: movl -244(%ebp), %ecx # 4-byte Reload
+; X32-NEXT: movl %eax, %ebp
+; X32-NEXT: addl %ecx, %ebp
+; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
+; X32-NEXT: adcl %eax, %edx
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: movl %esi, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X32-NEXT: mull %ecx
; X32-NEXT: movl %edx, %ebx
-; X32-NEXT: movl %eax, -276(%ebp) # 4-byte Spill
-; X32-NEXT: movl %esi, %eax
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X32-NEXT: mull %ecx
-; X32-NEXT: movl %edx, %esi
+; X32-NEXT: movl %edx, %edi
; X32-NEXT: movl %eax, %ecx
; X32-NEXT: addl %ebx, %ecx
-; X32-NEXT: adcl $0, %esi
-; X32-NEXT: movl %edi, %eax
-; X32-NEXT: movl -164(%ebp), %ebx # 4-byte Reload
+; X32-NEXT: adcl $0, %edi
+; X32-NEXT: movl %esi, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
; X32-NEXT: mull %ebx
-; X32-NEXT: movl %edx, %edi
+; X32-NEXT: movl %edx, %esi
; X32-NEXT: addl %ecx, %eax
-; X32-NEXT: movl %eax, -240(%ebp) # 4-byte Spill
-; X32-NEXT: adcl %esi, %edi
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl %edi, %esi
; X32-NEXT: setb %cl
-; X32-NEXT: movl -316(%ebp), %eax # 4-byte Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X32-NEXT: mull %ebx
-; X32-NEXT: addl %edi, %eax
+; X32-NEXT: addl %esi, %eax
; X32-NEXT: movzbl %cl, %ecx
; X32-NEXT: adcl %ecx, %edx
-; X32-NEXT: movl -324(%ebp), %ebx # 4-byte Reload
-; X32-NEXT: addl -224(%ebp), %ebx # 4-byte Folded Reload
-; X32-NEXT: movl -400(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: adcl -360(%ebp), %ecx # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
; X32-NEXT: addl %eax, %ebx
; X32-NEXT: adcl %edx, %ecx
-; X32-NEXT: movl -512(%ebp), %eax # 4-byte Reload
-; X32-NEXT: addl %eax, -276(%ebp) # 4-byte Folded Spill
-; X32-NEXT: movl -68(%ebp), %eax # 4-byte Reload
-; X32-NEXT: adcl %eax, -240(%ebp) # 4-byte Folded Spill
+; X32-NEXT: addl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
; X32-NEXT: adcl $0, %ebx
; X32-NEXT: adcl $0, %ecx
-; X32-NEXT: addl -32(%ebp), %ebx # 4-byte Folded Reload
-; X32-NEXT: adcl -296(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: setb -512(%ebp) # 1-byte Folded Spill
-; X32-NEXT: movl -352(%ebp), %eax # 4-byte Reload
-; X32-NEXT: movl -244(%ebp), %esi # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; X32-NEXT: movl %edi, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; X32-NEXT: mull %esi
-; X32-NEXT: movl %edx, -32(%ebp) # 4-byte Spill
-; X32-NEXT: movl %eax, -296(%ebp) # 4-byte Spill
-; X32-NEXT: movl -120(%ebp), %eax # 4-byte Reload
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X32-NEXT: mull %esi
-; X32-NEXT: movl %eax, %edi
-; X32-NEXT: addl -32(%ebp), %edi # 4-byte Folded Reload
+; X32-NEXT: movl %eax, %ebp
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
; X32-NEXT: adcl $0, %edx
-; X32-NEXT: movl %edx, -68(%ebp) # 4-byte Spill
-; X32-NEXT: movl -352(%ebp), %eax # 4-byte Reload
-; X32-NEXT: movl -164(%ebp), %esi # 4-byte Reload
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %edi, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; X32-NEXT: mull %esi
-; X32-NEXT: addl %edi, %eax
-; X32-NEXT: movl %eax, -32(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -68(%ebp), %edx # 4-byte Folded Reload
; X32-NEXT: movl %edx, %edi
-; X32-NEXT: setb -68(%ebp) # 1-byte Folded Spill
-; X32-NEXT: movl -120(%ebp), %eax # 4-byte Reload
+; X32-NEXT: addl %ebp, %eax
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X32-NEXT: mull %esi
; X32-NEXT: addl %edi, %eax
-; X32-NEXT: movzbl -68(%ebp), %esi # 1-byte Folded Reload
+; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 1-byte Folded Reload
; X32-NEXT: adcl %esi, %edx
-; X32-NEXT: movl -364(%ebp), %edi # 4-byte Reload
-; X32-NEXT: addl -224(%ebp), %edi # 4-byte Folded Reload
-; X32-NEXT: movl -396(%ebp), %esi # 4-byte Reload
-; X32-NEXT: adcl -360(%ebp), %esi # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
; X32-NEXT: addl %eax, %edi
-; X32-NEXT: adcl %edx, %esi
-; X32-NEXT: movl %esi, -68(%ebp) # 4-byte Spill
-; X32-NEXT: movl -296(%ebp), %edx # 4-byte Reload
+; X32-NEXT: adcl %edx, %ebp
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
; X32-NEXT: addl %ebx, %edx
-; X32-NEXT: movl -32(%ebp), %esi # 4-byte Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; X32-NEXT: adcl %ecx, %esi
-; X32-NEXT: movzbl -512(%ebp), %eax # 1-byte Folded Reload
+; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
; X32-NEXT: adcl %eax, %edi
-; X32-NEXT: movl -68(%ebp), %eax # 4-byte Reload
-; X32-NEXT: adcl $0, %eax
-; X32-NEXT: addl -684(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: adcl -876(%ebp), %esi # 4-byte Folded Reload
-; X32-NEXT: adcl -872(%ebp), %edi # 4-byte Folded Reload
-; X32-NEXT: adcl -880(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl -20(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: addl %ecx, -24(%ebp) # 4-byte Folded Spill
-; X32-NEXT: movl -88(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: adcl %ecx, -272(%ebp) # 4-byte Folded Spill
-; X32-NEXT: movl -332(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: adcl %ecx, -276(%ebp) # 4-byte Folded Spill
-; X32-NEXT: movl -368(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: adcl %ecx, -240(%ebp) # 4-byte Folded Spill
+; X32-NEXT: adcl $0, %ebp
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: addl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
; X32-NEXT: adcl $0, %edx
; X32-NEXT: adcl $0, %esi
; X32-NEXT: adcl $0, %edi
-; X32-NEXT: adcl $0, %eax
-; X32-NEXT: addl -80(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: movl %edx, -296(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -36(%ebp), %esi # 4-byte Folded Reload
-; X32-NEXT: movl %esi, -32(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -292(%ebp), %edi # 4-byte Folded Reload
-; X32-NEXT: movl %edi, -292(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -172(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl %eax, -68(%ebp) # 4-byte Spill
-; X32-NEXT: setb -88(%ebp) # 1-byte Folded Spill
-; X32-NEXT: movl -408(%ebp), %ebx # 4-byte Reload
+; X32-NEXT: adcl $0, %ebp
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; X32-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
; X32-NEXT: movl %ebx, %eax
-; X32-NEXT: movl -300(%ebp), %esi # 4-byte Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; X32-NEXT: mull %esi
; X32-NEXT: movl %edx, %ecx
-; X32-NEXT: movl %eax, -36(%ebp) # 4-byte Spill
-; X32-NEXT: movl -192(%ebp), %eax # 4-byte Reload
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; X32-NEXT: movl %edi, %eax
; X32-NEXT: mull %esi
-; X32-NEXT: movl %edx, %edi
+; X32-NEXT: movl %edx, %ebp
; X32-NEXT: movl %eax, %esi
; X32-NEXT: addl %ecx, %esi
-; X32-NEXT: adcl $0, %edi
+; X32-NEXT: adcl $0, %ebp
; X32-NEXT: movl %ebx, %eax
-; X32-NEXT: movl -144(%ebp), %ebx # 4-byte Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
; X32-NEXT: mull %ebx
; X32-NEXT: movl %edx, %ecx
; X32-NEXT: addl %esi, %eax
-; X32-NEXT: movl %eax, -20(%ebp) # 4-byte Spill
-; X32-NEXT: adcl %edi, %ecx
-; X32-NEXT: setb -172(%ebp) # 1-byte Folded Spill
-; X32-NEXT: movl -192(%ebp), %eax # 4-byte Reload
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl %ebp, %ecx
+; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
+; X32-NEXT: movl %edi, %eax
; X32-NEXT: mull %ebx
; X32-NEXT: addl %ecx, %eax
-; X32-NEXT: movzbl -172(%ebp), %ecx # 1-byte Folded Reload
+; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 1-byte Folded Reload
; X32-NEXT: adcl %ecx, %edx
-; X32-NEXT: movl -336(%ebp), %esi # 4-byte Reload
-; X32-NEXT: addl -392(%ebp), %esi # 4-byte Folded Reload
-; X32-NEXT: movl -176(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: adcl -412(%ebp), %ecx # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
; X32-NEXT: addl %eax, %esi
-; X32-NEXT: movl %esi, -336(%ebp) # 4-byte Spill
+; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: adcl %edx, %ecx
-; X32-NEXT: movl %ecx, -176(%ebp) # 4-byte Spill
-; X32-NEXT: movl -440(%ebp), %ebx # 4-byte Reload
+; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
; X32-NEXT: movl %ebx, %eax
-; X32-NEXT: movl -300(%ebp), %esi # 4-byte Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; X32-NEXT: mull %esi
; X32-NEXT: movl %edx, %ecx
-; X32-NEXT: movl %eax, -172(%ebp) # 4-byte Spill
-; X32-NEXT: movl -340(%ebp), %eax # 4-byte Reload
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; X32-NEXT: movl %edi, %eax
; X32-NEXT: mull %esi
; X32-NEXT: movl %edx, %esi
-; X32-NEXT: movl %eax, %edi
-; X32-NEXT: addl %ecx, %edi
+; X32-NEXT: movl %eax, %ebp
+; X32-NEXT: addl %ecx, %ebp
; X32-NEXT: adcl $0, %esi
; X32-NEXT: movl %ebx, %eax
-; X32-NEXT: movl -144(%ebp), %ebx # 4-byte Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
; X32-NEXT: mull %ebx
; X32-NEXT: movl %edx, %ecx
-; X32-NEXT: addl %edi, %eax
-; X32-NEXT: movl %eax, -80(%ebp) # 4-byte Spill
+; X32-NEXT: addl %ebp, %eax
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: adcl %esi, %ecx
-; X32-NEXT: setb -332(%ebp) # 1-byte Folded Spill
-; X32-NEXT: movl -340(%ebp), %eax # 4-byte Reload
+; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
+; X32-NEXT: movl %edi, %eax
; X32-NEXT: mull %ebx
-; X32-NEXT: movl %eax, %edi
-; X32-NEXT: addl %ecx, %edi
-; X32-NEXT: movzbl -332(%ebp), %eax # 1-byte Folded Reload
+; X32-NEXT: movl %eax, %ebp
+; X32-NEXT: addl %ecx, %ebp
+; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
; X32-NEXT: adcl %eax, %edx
-; X32-NEXT: addl -688(%ebp), %edi # 4-byte Folded Reload
-; X32-NEXT: adcl -888(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: addl -36(%ebp), %edi # 4-byte Folded Reload
-; X32-NEXT: adcl -20(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: movl %edx, -332(%ebp) # 4-byte Spill
-; X32-NEXT: adcl $0, -336(%ebp) # 4-byte Folded Spill
-; X32-NEXT: adcl $0, -176(%ebp) # 4-byte Folded Spill
-; X32-NEXT: movl -440(%ebp), %esi # 4-byte Reload
-; X32-NEXT: movl %esi, %eax
-; X32-NEXT: movl -244(%ebp), %ecx # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X32-NEXT: movl %ebx, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X32-NEXT: mull %ecx
-; X32-NEXT: movl %edx, -20(%ebp) # 4-byte Spill
-; X32-NEXT: movl %eax, -36(%ebp) # 4-byte Spill
-; X32-NEXT: movl -340(%ebp), %eax # 4-byte Reload
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %edi, %eax
; X32-NEXT: mull %ecx
-; X32-NEXT: movl %edx, %ebx
-; X32-NEXT: movl %eax, %ecx
-; X32-NEXT: addl -20(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: adcl $0, %ebx
-; X32-NEXT: movl %esi, %eax
-; X32-NEXT: mull -164(%ebp) # 4-byte Folded Reload
; X32-NEXT: movl %edx, %esi
+; X32-NEXT: movl %eax, %ecx
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X32-NEXT: adcl $0, %esi
+; X32-NEXT: movl %ebx, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; X32-NEXT: mull %edi
+; X32-NEXT: movl %edx, %ebx
; X32-NEXT: addl %ecx, %eax
-; X32-NEXT: movl %eax, -20(%ebp) # 4-byte Spill
-; X32-NEXT: adcl %ebx, %esi
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl %esi, %ebx
; X32-NEXT: setb %cl
-; X32-NEXT: movl -340(%ebp), %eax # 4-byte Reload
-; X32-NEXT: movl -164(%ebp), %ebx # 4-byte Reload
-; X32-NEXT: mull %ebx
-; X32-NEXT: addl %esi, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: mull %edi
+; X32-NEXT: addl %ebx, %eax
; X32-NEXT: movzbl %cl, %ecx
; X32-NEXT: adcl %ecx, %edx
-; X32-NEXT: movl -132(%ebp), %esi # 4-byte Reload
-; X32-NEXT: addl -224(%ebp), %esi # 4-byte Folded Reload
-; X32-NEXT: movl -140(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: adcl -360(%ebp), %ecx # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
; X32-NEXT: addl %eax, %esi
; X32-NEXT: adcl %edx, %ecx
-; X32-NEXT: addl %edi, -36(%ebp) # 4-byte Folded Spill
-; X32-NEXT: movl -332(%ebp), %eax # 4-byte Reload
-; X32-NEXT: adcl %eax, -20(%ebp) # 4-byte Folded Spill
+; X32-NEXT: addl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
; X32-NEXT: adcl $0, %esi
; X32-NEXT: adcl $0, %ecx
-; X32-NEXT: addl -336(%ebp), %esi # 4-byte Folded Reload
-; X32-NEXT: movl %esi, -132(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -176(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: movl %ecx, -140(%ebp) # 4-byte Spill
-; X32-NEXT: setb -176(%ebp) # 1-byte Folded Spill
-; X32-NEXT: movl -408(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: movl %ecx, %eax
-; X32-NEXT: movl -244(%ebp), %esi # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; X32-NEXT: movl %ebp, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; X32-NEXT: mull %esi
-; X32-NEXT: movl %edx, -332(%ebp) # 4-byte Spill
-; X32-NEXT: movl %eax, -336(%ebp) # 4-byte Spill
-; X32-NEXT: movl -192(%ebp), %eax # 4-byte Reload
+; X32-NEXT: movl %edx, %ecx
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X32-NEXT: mull %esi
; X32-NEXT: movl %edx, %esi
; X32-NEXT: movl %eax, %edi
-; X32-NEXT: addl -332(%ebp), %edi # 4-byte Folded Reload
+; X32-NEXT: addl %ecx, %edi
; X32-NEXT: adcl $0, %esi
-; X32-NEXT: movl %ecx, %eax
+; X32-NEXT: movl %ebp, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
; X32-NEXT: mull %ebx
; X32-NEXT: movl %edx, %ecx
-; X32-NEXT: addl %edi, %eax
-; X32-NEXT: movl %eax, %edi
+; X32-NEXT: movl %eax, %ebp
+; X32-NEXT: addl %edi, %ebp
; X32-NEXT: adcl %esi, %ecx
-; X32-NEXT: setb -332(%ebp) # 1-byte Folded Spill
-; X32-NEXT: movl -192(%ebp), %eax # 4-byte Reload
+; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X32-NEXT: mull %ebx
; X32-NEXT: addl %ecx, %eax
-; X32-NEXT: movzbl -332(%ebp), %ecx # 1-byte Folded Reload
+; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 1-byte Folded Reload
; X32-NEXT: adcl %ecx, %edx
-; X32-NEXT: movl -392(%ebp), %esi # 4-byte Reload
-; X32-NEXT: addl -224(%ebp), %esi # 4-byte Folded Reload
-; X32-NEXT: movl -412(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: adcl -360(%ebp), %ecx # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
; X32-NEXT: addl %eax, %esi
; X32-NEXT: adcl %edx, %ecx
-; X32-NEXT: movl -336(%ebp), %ebx # 4-byte Reload
-; X32-NEXT: addl -132(%ebp), %ebx # 4-byte Folded Reload
-; X32-NEXT: adcl -140(%ebp), %edi # 4-byte Folded Reload
-; X32-NEXT: movzbl -176(%ebp), %eax # 1-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
; X32-NEXT: adcl %eax, %esi
; X32-NEXT: adcl $0, %ecx
-; X32-NEXT: addl -688(%ebp), %ebx # 4-byte Folded Reload
-; X32-NEXT: adcl -900(%ebp), %edi # 4-byte Folded Reload
-; X32-NEXT: movl %edi, -360(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -896(%ebp), %esi # 4-byte Folded Reload
-; X32-NEXT: movl %esi, -392(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -904(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: movl %ecx, -412(%ebp) # 4-byte Spill
-; X32-NEXT: movl -172(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: addl -296(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: movl -80(%ebp), %edx # 4-byte Reload
-; X32-NEXT: adcl -32(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: movl -36(%ebp), %esi # 4-byte Reload
-; X32-NEXT: adcl -292(%ebp), %esi # 4-byte Folded Reload
-; X32-NEXT: movl -20(%ebp), %edi # 4-byte Reload
-; X32-NEXT: adcl -68(%ebp), %edi # 4-byte Folded Reload
-; X32-NEXT: movzbl -88(%ebp), %eax # 1-byte Folded Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; X32-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; X32-NEXT: movl %esi, %ebp
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
; X32-NEXT: adcl %eax, %ebx
-; X32-NEXT: movl %ebx, -336(%ebp) # 4-byte Spill
-; X32-NEXT: adcl $0, -360(%ebp) # 4-byte Folded Spill
-; X32-NEXT: movl -392(%ebp), %eax # 4-byte Reload
+; X32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X32-NEXT: adcl $0, %eax
-; X32-NEXT: movl -412(%ebp), %ebx # 4-byte Reload
+; X32-NEXT: adcl $0, %ebp
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
; X32-NEXT: adcl $0, %ebx
-; X32-NEXT: addl -656(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: movl %ecx, -172(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -700(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: movl %edx, -80(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -376(%ebp), %esi # 4-byte Folded Reload
-; X32-NEXT: adcl -220(%ebp), %edi # 4-byte Folded Reload
-; X32-NEXT: movl -336(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: adcl -640(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: movl -360(%ebp), %edx # 4-byte Reload
-; X32-NEXT: adcl -200(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: adcl -472(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl %eax, -392(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -436(%ebp), %ebx # 4-byte Folded Reload
-; X32-NEXT: movl -232(%ebp), %eax # 4-byte Reload
-; X32-NEXT: addl %eax, -432(%ebp) # 4-byte Folded Spill
-; X32-NEXT: movl -152(%ebp), %eax # 4-byte Reload
-; X32-NEXT: adcl %eax, -456(%ebp) # 4-byte Folded Spill
-; X32-NEXT: movl -72(%ebp), %eax # 4-byte Reload
-; X32-NEXT: adcl %eax, -44(%ebp) # 4-byte Folded Spill
-; X32-NEXT: movl -64(%ebp), %eax # 4-byte Reload
-; X32-NEXT: adcl %eax, -52(%ebp) # 4-byte Folded Spill
-; X32-NEXT: movl -344(%ebp), %eax # 4-byte Reload
-; X32-NEXT: adcl %eax, -24(%ebp) # 4-byte Folded Spill
-; X32-NEXT: movl -40(%ebp), %eax # 4-byte Reload
-; X32-NEXT: adcl %eax, -272(%ebp) # 4-byte Folded Spill
-; X32-NEXT: movl -56(%ebp), %eax # 4-byte Reload
-; X32-NEXT: adcl %eax, -276(%ebp) # 4-byte Folded Spill
-; X32-NEXT: movl -76(%ebp), %eax # 4-byte Reload
-; X32-NEXT: adcl %eax, -240(%ebp) # 4-byte Folded Spill
-; X32-NEXT: movzbl -372(%ebp), %eax # 1-byte Folded Reload
-; X32-NEXT: adcl %eax, -172(%ebp) # 4-byte Folded Spill
-; X32-NEXT: adcl $0, -80(%ebp) # 4-byte Folded Spill
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: addl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: movl (%esp), %eax # 4-byte Reload
+; X32-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
+; X32-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: adcl $0, %edx
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: adcl $0, %esi
-; X32-NEXT: movl %esi, -36(%ebp) # 4-byte Spill
+; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: adcl $0, %edi
-; X32-NEXT: movl %edi, -20(%ebp) # 4-byte Spill
+; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: adcl $0, %ecx
-; X32-NEXT: movl %ecx, -336(%ebp) # 4-byte Spill
-; X32-NEXT: adcl $0, %edx
-; X32-NEXT: movl %edx, -360(%ebp) # 4-byte Spill
-; X32-NEXT: adcl $0, -392(%ebp) # 4-byte Folded Spill
+; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: adcl $0, %ebp
+; X32-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: adcl $0, %ebx
-; X32-NEXT: movl %ebx, -412(%ebp) # 4-byte Spill
-; X32-NEXT: movl -284(%ebp), %esi # 4-byte Reload
+; X32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; X32-NEXT: movl %ebp, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: mull %ecx
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; X32-NEXT: movl %esi, %eax
-; X32-NEXT: movl -476(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: mull %ecx
-; X32-NEXT: movl %edx, -140(%ebp) # 4-byte Spill
-; X32-NEXT: movl %eax, -132(%ebp) # 4-byte Spill
-; X32-NEXT: movl -48(%ebp), %eax # 4-byte Reload
; X32-NEXT: mull %ecx
; X32-NEXT: movl %edx, %edi
; X32-NEXT: movl %eax, %ebx
-; X32-NEXT: addl -140(%ebp), %ebx # 4-byte Folded Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
; X32-NEXT: adcl $0, %edi
-; X32-NEXT: movl %esi, %eax
-; X32-NEXT: movl -248(%ebp), %ecx # 4-byte Reload
+; X32-NEXT: movl %ebp, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X32-NEXT: mull %ecx
-; X32-NEXT: movl %edx, %esi
+; X32-NEXT: movl %edx, %ebp
; X32-NEXT: addl %ebx, %eax
-; X32-NEXT: movl %eax, -140(%ebp) # 4-byte Spill
-; X32-NEXT: adcl %edi, %esi
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl %edi, %ebp
; X32-NEXT: setb %bl
-; X32-NEXT: movl -48(%ebp), %eax # 4-byte Reload
+; X32-NEXT: movl %esi, %eax
; X32-NEXT: mull %ecx
-; X32-NEXT: addl %esi, %eax
+; X32-NEXT: addl %ebp, %eax
; X32-NEXT: movzbl %bl, %ecx
; X32-NEXT: adcl %ecx, %edx
-; X32-NEXT: movl -308(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: addl -480(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: movl -208(%ebp), %esi # 4-byte Reload
-; X32-NEXT: adcl -384(%ebp), %esi # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
; X32-NEXT: addl %eax, %ecx
-; X32-NEXT: movl %ecx, -200(%ebp) # 4-byte Spill
+; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: adcl %edx, %esi
-; X32-NEXT: movl %esi, -176(%ebp) # 4-byte Spill
-; X32-NEXT: movl -212(%ebp), %esi # 4-byte Reload
-; X32-NEXT: movl %esi, %eax
-; X32-NEXT: movl -476(%ebp), %edi # 4-byte Reload
-; X32-NEXT: mull %edi
-; X32-NEXT: movl %edx, %ecx
-; X32-NEXT: movl %eax, -64(%ebp) # 4-byte Spill
-; X32-NEXT: movl -252(%ebp), %eax # 4-byte Reload
-; X32-NEXT: mull %edi
-; X32-NEXT: movl %edx, %edi
-; X32-NEXT: movl %eax, %ebx
-; X32-NEXT: addl %ecx, %ebx
-; X32-NEXT: adcl $0, %edi
-; X32-NEXT: movl %esi, %eax
-; X32-NEXT: movl -248(%ebp), %esi # 4-byte Reload
+; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; X32-NEXT: movl %edi, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: mull %ecx
+; X32-NEXT: movl %edx, %esi
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: mull %ecx
+; X32-NEXT: movl %edx, %ebx
+; X32-NEXT: movl %eax, %ebp
+; X32-NEXT: addl %esi, %ebp
+; X32-NEXT: adcl $0, %ebx
+; X32-NEXT: movl %edi, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; X32-NEXT: mull %esi
; X32-NEXT: movl %edx, %ecx
-; X32-NEXT: addl %ebx, %eax
-; X32-NEXT: movl %eax, -220(%ebp) # 4-byte Spill
-; X32-NEXT: adcl %edi, %ecx
-; X32-NEXT: setb -40(%ebp) # 1-byte Folded Spill
-; X32-NEXT: movl -252(%ebp), %edi # 4-byte Reload
-; X32-NEXT: movl %edi, %eax
+; X32-NEXT: addl %ebp, %eax
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl %ebx, %ecx
+; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; X32-NEXT: movl %ebp, %eax
; X32-NEXT: mull %esi
; X32-NEXT: movl %eax, %ebx
; X32-NEXT: addl %ecx, %ebx
-; X32-NEXT: movzbl -40(%ebp), %eax # 1-byte Folded Reload
+; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
; X32-NEXT: adcl %eax, %edx
-; X32-NEXT: addl -692(%ebp), %ebx # 4-byte Folded Reload
-; X32-NEXT: adcl -920(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: addl -132(%ebp), %ebx # 4-byte Folded Reload
-; X32-NEXT: adcl -140(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: movl %edx, -56(%ebp) # 4-byte Spill
-; X32-NEXT: adcl $0, -200(%ebp) # 4-byte Folded Spill
-; X32-NEXT: adcl $0, -176(%ebp) # 4-byte Folded Spill
-; X32-NEXT: movl -212(%ebp), %eax # 4-byte Reload
-; X32-NEXT: movl -516(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: mull %ecx
-; X32-NEXT: movl %edx, -132(%ebp) # 4-byte Spill
-; X32-NEXT: movl %eax, -140(%ebp) # 4-byte Spill
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
; X32-NEXT: movl %edi, %eax
-; X32-NEXT: mull %ecx
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: mull %esi
+; X32-NEXT: movl %edx, %ecx
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %ebp, %eax
+; X32-NEXT: mull %esi
; X32-NEXT: movl %edx, %esi
-; X32-NEXT: movl %eax, %ecx
-; X32-NEXT: addl -132(%ebp), %ecx # 4-byte Folded Reload
+; X32-NEXT: movl %eax, %ebp
+; X32-NEXT: addl %ecx, %ebp
; X32-NEXT: adcl $0, %esi
-; X32-NEXT: movl 8(%ebp), %eax
-; X32-NEXT: movl 76(%eax), %edx
-; X32-NEXT: movl %edx, -132(%ebp) # 4-byte Spill
-; X32-NEXT: movl -212(%ebp), %eax # 4-byte Reload
-; X32-NEXT: mull %edx
-; X32-NEXT: movl %edx, %edi
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: movl 76(%eax), %ecx
+; X32-NEXT: movl %edi, %eax
+; X32-NEXT: mull %ecx
+; X32-NEXT: movl %ecx, %edi
+; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %edx, %ecx
+; X32-NEXT: addl %ebp, %eax
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl %esi, %ecx
+; X32-NEXT: setb (%esp) # 1-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: mull %edi
; X32-NEXT: addl %ecx, %eax
-; X32-NEXT: movl %eax, -40(%ebp) # 4-byte Spill
-; X32-NEXT: adcl %esi, %edi
-; X32-NEXT: setb %cl
-; X32-NEXT: movl -252(%ebp), %eax # 4-byte Reload
-; X32-NEXT: mull -132(%ebp) # 4-byte Folded Reload
-; X32-NEXT: addl %edi, %eax
-; X32-NEXT: movzbl %cl, %ecx
+; X32-NEXT: movzbl (%esp), %ecx # 1-byte Folded Reload
; X32-NEXT: adcl %ecx, %edx
-; X32-NEXT: movl -116(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: addl -484(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: movl -84(%ebp), %esi # 4-byte Reload
-; X32-NEXT: adcl -488(%ebp), %esi # 4-byte Folded Reload
-; X32-NEXT: addl %eax, %ecx
-; X32-NEXT: adcl %edx, %esi
-; X32-NEXT: addl %ebx, -140(%ebp) # 4-byte Folded Spill
-; X32-NEXT: movl -56(%ebp), %eax # 4-byte Reload
-; X32-NEXT: adcl %eax, -40(%ebp) # 4-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X32-NEXT: addl %eax, %ebp
+; X32-NEXT: adcl %edx, %ecx
+; X32-NEXT: addl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: adcl $0, %ebp
; X32-NEXT: adcl $0, %ecx
-; X32-NEXT: adcl $0, %esi
-; X32-NEXT: addl -200(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: adcl -176(%ebp), %esi # 4-byte Folded Reload
-; X32-NEXT: setb -56(%ebp) # 1-byte Folded Spill
-; X32-NEXT: movl -284(%ebp), %eax # 4-byte Reload
-; X32-NEXT: movl -516(%ebp), %edi # 4-byte Reload
-; X32-NEXT: mull %edi
-; X32-NEXT: movl %edx, -200(%ebp) # 4-byte Spill
-; X32-NEXT: movl %eax, -176(%ebp) # 4-byte Spill
-; X32-NEXT: movl -48(%ebp), %eax # 4-byte Reload
-; X32-NEXT: mull %edi
-; X32-NEXT: movl %eax, %ebx
-; X32-NEXT: addl -200(%ebp), %ebx # 4-byte Folded Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X32-NEXT: movl %ebx, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: mull %esi
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: mull %esi
+; X32-NEXT: movl %eax, %edi
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
; X32-NEXT: adcl $0, %edx
-; X32-NEXT: movl %edx, -224(%ebp) # 4-byte Spill
-; X32-NEXT: movl -284(%ebp), %eax # 4-byte Reload
-; X32-NEXT: movl -132(%ebp), %edi # 4-byte Reload
-; X32-NEXT: mull %edi
-; X32-NEXT: addl %ebx, %eax
-; X32-NEXT: movl %eax, -200(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -224(%ebp), %edx # 4-byte Folded Reload
+; X32-NEXT: movl %edx, (%esp) # 4-byte Spill
+; X32-NEXT: movl %ebx, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: mull %esi
; X32-NEXT: movl %edx, %ebx
-; X32-NEXT: setb -224(%ebp) # 1-byte Folded Spill
-; X32-NEXT: movl -48(%ebp), %eax # 4-byte Reload
-; X32-NEXT: mull %edi
+; X32-NEXT: addl %edi, %eax
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl (%esp), %ebx # 4-byte Folded Reload
+; X32-NEXT: setb (%esp) # 1-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: mull %esi
; X32-NEXT: addl %ebx, %eax
-; X32-NEXT: movzbl -224(%ebp), %edi # 1-byte Folded Reload
-; X32-NEXT: adcl %edi, %edx
-; X32-NEXT: movl -308(%ebp), %edi # 4-byte Reload
-; X32-NEXT: addl -484(%ebp), %edi # 4-byte Folded Reload
-; X32-NEXT: movl -208(%ebp), %ebx # 4-byte Reload
-; X32-NEXT: adcl -488(%ebp), %ebx # 4-byte Folded Reload
+; X32-NEXT: movzbl (%esp), %esi # 1-byte Folded Reload
+; X32-NEXT: adcl %esi, %edx
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
; X32-NEXT: addl %eax, %edi
; X32-NEXT: adcl %edx, %ebx
-; X32-NEXT: movl -176(%ebp), %edx # 4-byte Reload
-; X32-NEXT: addl %ecx, %edx
-; X32-NEXT: movl -200(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: adcl %esi, %ecx
-; X32-NEXT: movzbl -56(%ebp), %eax # 1-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; X32-NEXT: addl %ebp, %edx
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: adcl %ecx, %esi
+; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
; X32-NEXT: adcl %eax, %edi
; X32-NEXT: adcl $0, %ebx
-; X32-NEXT: addl -692(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: movl %edx, -176(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -908(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: movl %ecx, -200(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -916(%ebp), %edi # 4-byte Folded Reload
-; X32-NEXT: movl %edi, -68(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -912(%ebp), %ebx # 4-byte Folded Reload
-; X32-NEXT: movl %ebx, -32(%ebp) # 4-byte Spill
-; X32-NEXT: movl -108(%ebp), %esi # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
+; X32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; X32-NEXT: movl %ebp, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: mull %ecx
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; X32-NEXT: movl %esi, %eax
-; X32-NEXT: movl -476(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: mull %ecx
-; X32-NEXT: movl %edx, -56(%ebp) # 4-byte Spill
-; X32-NEXT: movl %eax, -76(%ebp) # 4-byte Spill
-; X32-NEXT: movl -96(%ebp), %eax # 4-byte Reload
; X32-NEXT: mull %ecx
; X32-NEXT: movl %edx, %edi
; X32-NEXT: movl %eax, %ebx
-; X32-NEXT: addl -56(%ebp), %ebx # 4-byte Folded Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
; X32-NEXT: adcl $0, %edi
-; X32-NEXT: movl %esi, %eax
-; X32-NEXT: movl -248(%ebp), %ecx # 4-byte Reload
+; X32-NEXT: movl %ebp, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X32-NEXT: mull %ecx
-; X32-NEXT: movl %edx, %esi
+; X32-NEXT: movl %edx, %ebp
; X32-NEXT: addl %ebx, %eax
-; X32-NEXT: movl %eax, -72(%ebp) # 4-byte Spill
-; X32-NEXT: adcl %edi, %esi
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl %edi, %ebp
; X32-NEXT: setb %bl
-; X32-NEXT: movl -96(%ebp), %eax # 4-byte Reload
+; X32-NEXT: movl %esi, %eax
; X32-NEXT: mull %ecx
-; X32-NEXT: addl %esi, %eax
-; X32-NEXT: movzbl %bl, %ecx
-; X32-NEXT: adcl %ecx, %edx
-; X32-NEXT: movl -104(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: addl -480(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: movl -156(%ebp), %esi # 4-byte Reload
-; X32-NEXT: adcl -384(%ebp), %esi # 4-byte Folded Reload
+; X32-NEXT: addl %ebp, %eax
+; X32-NEXT: movzbl %bl, %edi
+; X32-NEXT: adcl %edi, %edx
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
; X32-NEXT: addl %eax, %ecx
-; X32-NEXT: movl %ecx, -224(%ebp) # 4-byte Spill
+; X32-NEXT: movl %ecx, (%esp) # 4-byte Spill
; X32-NEXT: adcl %edx, %esi
-; X32-NEXT: movl %esi, -56(%ebp) # 4-byte Spill
-; X32-NEXT: movl -168(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: movl %ecx, %eax
-; X32-NEXT: movl -476(%ebp), %esi # 4-byte Reload
-; X32-NEXT: mull %esi
-; X32-NEXT: movl %edx, -436(%ebp) # 4-byte Spill
-; X32-NEXT: movl %eax, -344(%ebp) # 4-byte Spill
-; X32-NEXT: movl -92(%ebp), %eax # 4-byte Reload
-; X32-NEXT: mull %esi
-; X32-NEXT: movl %edx, %edi
-; X32-NEXT: movl %eax, %ebx
-; X32-NEXT: addl -436(%ebp), %ebx # 4-byte Folded Reload
-; X32-NEXT: adcl $0, %edi
-; X32-NEXT: movl %ecx, %eax
-; X32-NEXT: movl -248(%ebp), %esi # 4-byte Reload
-; X32-NEXT: mull %esi
-; X32-NEXT: movl %edx, %ecx
-; X32-NEXT: addl %ebx, %eax
-; X32-NEXT: movl %eax, -232(%ebp) # 4-byte Spill
-; X32-NEXT: adcl %edi, %ecx
-; X32-NEXT: setb %bl
-; X32-NEXT: movl -92(%ebp), %edi # 4-byte Reload
-; X32-NEXT: movl %edi, %eax
-; X32-NEXT: mull %esi
-; X32-NEXT: addl %ecx, %eax
-; X32-NEXT: movzbl %bl, %ecx
-; X32-NEXT: adcl %ecx, %edx
-; X32-NEXT: addl -696(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: adcl -932(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: addl -76(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl %eax, -88(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -72(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: movl %edx, -76(%ebp) # 4-byte Spill
-; X32-NEXT: adcl $0, -224(%ebp) # 4-byte Folded Spill
-; X32-NEXT: adcl $0, -56(%ebp) # 4-byte Folded Spill
-; X32-NEXT: movl -168(%ebp), %esi # 4-byte Reload
+; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; X32-NEXT: movl %esi, %eax
-; X32-NEXT: movl -516(%ebp), %ecx # 4-byte Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X32-NEXT: mull %ecx
-; X32-NEXT: movl %edx, -72(%ebp) # 4-byte Spill
-; X32-NEXT: movl %eax, -436(%ebp) # 4-byte Spill
-; X32-NEXT: movl %edi, %eax
+; X32-NEXT: movl %edx, %edi
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X32-NEXT: mull %ecx
; X32-NEXT: movl %edx, %ebx
-; X32-NEXT: movl %eax, %ecx
-; X32-NEXT: addl -72(%ebp), %ecx # 4-byte Folded Reload
+; X32-NEXT: movl %eax, %ebp
+; X32-NEXT: addl %edi, %ebp
; X32-NEXT: adcl $0, %ebx
; X32-NEXT: movl %esi, %eax
-; X32-NEXT: movl -132(%ebp), %esi # 4-byte Reload
-; X32-NEXT: mull %esi
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: mull %ecx
; X32-NEXT: movl %edx, %edi
-; X32-NEXT: addl %ecx, %eax
-; X32-NEXT: movl %eax, -472(%ebp) # 4-byte Spill
+; X32-NEXT: addl %ebp, %eax
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: adcl %ebx, %edi
-; X32-NEXT: setb %cl
-; X32-NEXT: movl -92(%ebp), %eax # 4-byte Reload
-; X32-NEXT: mull %esi
+; X32-NEXT: setb %bl
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: movl %esi, %eax
+; X32-NEXT: mull %ecx
+; X32-NEXT: movl %eax, %ebp
+; X32-NEXT: addl %edi, %ebp
+; X32-NEXT: movzbl %bl, %eax
+; X32-NEXT: adcl %eax, %edx
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl $0, (%esp) # 4-byte Folded Spill
+; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X32-NEXT: movl %ebx, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; X32-NEXT: mull %edi
+; X32-NEXT: movl %edx, %ecx
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %esi, %eax
+; X32-NEXT: mull %edi
+; X32-NEXT: movl %edx, %esi
+; X32-NEXT: movl %eax, %edi
+; X32-NEXT: addl %ecx, %edi
+; X32-NEXT: adcl $0, %esi
+; X32-NEXT: movl %ebx, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: mull %ecx
+; X32-NEXT: movl %edx, %ebx
; X32-NEXT: addl %edi, %eax
-; X32-NEXT: movzbl %cl, %ecx
+; X32-NEXT: movl %eax, %edi
+; X32-NEXT: adcl %esi, %ebx
+; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: mull %ecx
+; X32-NEXT: addl %ebx, %eax
+; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 1-byte Folded Reload
; X32-NEXT: adcl %ecx, %edx
-; X32-NEXT: movl -28(%ebp), %edi # 4-byte Reload
-; X32-NEXT: addl -484(%ebp), %edi # 4-byte Folded Reload
-; X32-NEXT: movl -256(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: adcl -488(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: addl %eax, %edi
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X32-NEXT: addl %eax, %esi
; X32-NEXT: adcl %edx, %ecx
-; X32-NEXT: movl -88(%ebp), %eax # 4-byte Reload
-; X32-NEXT: addl %eax, -436(%ebp) # 4-byte Folded Spill
-; X32-NEXT: movl -76(%ebp), %eax # 4-byte Reload
-; X32-NEXT: adcl %eax, -472(%ebp) # 4-byte Folded Spill
-; X32-NEXT: adcl $0, %edi
+; X32-NEXT: addl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl $0, %esi
; X32-NEXT: adcl $0, %ecx
-; X32-NEXT: addl -224(%ebp), %edi # 4-byte Folded Reload
-; X32-NEXT: adcl -56(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: setb -56(%ebp) # 1-byte Folded Spill
-; X32-NEXT: movl -108(%ebp), %eax # 4-byte Reload
-; X32-NEXT: movl -516(%ebp), %esi # 4-byte Reload
-; X32-NEXT: mull %esi
-; X32-NEXT: movl %edx, -76(%ebp) # 4-byte Spill
-; X32-NEXT: movl %eax, -224(%ebp) # 4-byte Spill
-; X32-NEXT: movl -96(%ebp), %eax # 4-byte Reload
-; X32-NEXT: mull %esi
+; X32-NEXT: addl (%esp), %esi # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; X32-NEXT: movl %ebp, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; X32-NEXT: mull %edi
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %eax, (%esp) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: mull %edi
; X32-NEXT: movl %eax, %ebx
-; X32-NEXT: addl -76(%ebp), %ebx # 4-byte Folded Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
; X32-NEXT: adcl $0, %edx
-; X32-NEXT: movl %edx, -72(%ebp) # 4-byte Spill
-; X32-NEXT: movl -108(%ebp), %eax # 4-byte Reload
-; X32-NEXT: movl -132(%ebp), %esi # 4-byte Reload
-; X32-NEXT: mull %esi
-; X32-NEXT: addl %ebx, %eax
-; X32-NEXT: movl %eax, -76(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -72(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: movl %edx, %ebx
-; X32-NEXT: setb -72(%ebp) # 1-byte Folded Spill
-; X32-NEXT: movl -96(%ebp), %eax # 4-byte Reload
-; X32-NEXT: mull %esi
-; X32-NEXT: addl %ebx, %eax
-; X32-NEXT: movzbl -72(%ebp), %esi # 1-byte Folded Reload
-; X32-NEXT: adcl %esi, %edx
-; X32-NEXT: movl -104(%ebp), %ebx # 4-byte Reload
-; X32-NEXT: addl -484(%ebp), %ebx # 4-byte Folded Reload
-; X32-NEXT: movl -156(%ebp), %esi # 4-byte Reload
-; X32-NEXT: adcl -488(%ebp), %esi # 4-byte Folded Reload
-; X32-NEXT: addl %eax, %ebx
-; X32-NEXT: adcl %edx, %esi
-; X32-NEXT: movl %esi, -72(%ebp) # 4-byte Spill
-; X32-NEXT: movl -224(%ebp), %edx # 4-byte Reload
-; X32-NEXT: addl %edi, %edx
-; X32-NEXT: movl -76(%ebp), %esi # 4-byte Reload
-; X32-NEXT: adcl %ecx, %esi
-; X32-NEXT: movzbl -56(%ebp), %eax # 1-byte Folded Reload
-; X32-NEXT: adcl %eax, %ebx
-; X32-NEXT: movl -72(%ebp), %eax # 4-byte Reload
-; X32-NEXT: adcl $0, %eax
-; X32-NEXT: addl -696(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: adcl -652(%ebp), %esi # 4-byte Folded Reload
-; X32-NEXT: adcl -924(%ebp), %ebx # 4-byte Folded Reload
-; X32-NEXT: adcl -928(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: addl -64(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: movl %edx, -224(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -220(%ebp), %esi # 4-byte Folded Reload
-; X32-NEXT: movl %esi, -76(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -140(%ebp), %ebx # 4-byte Folded Reload
-; X32-NEXT: movl %ebx, -152(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -40(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl %eax, -72(%ebp) # 4-byte Spill
-; X32-NEXT: adcl $0, -176(%ebp) # 4-byte Folded Spill
-; X32-NEXT: adcl $0, -200(%ebp) # 4-byte Folded Spill
-; X32-NEXT: adcl $0, -68(%ebp) # 4-byte Folded Spill
-; X32-NEXT: adcl $0, -32(%ebp) # 4-byte Folded Spill
-; X32-NEXT: movl -108(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: movl %ecx, %eax
-; X32-NEXT: movl -548(%ebp), %esi # 4-byte Reload
-; X32-NEXT: mull %esi
-; X32-NEXT: movl %edx, -40(%ebp) # 4-byte Spill
-; X32-NEXT: movl %eax, -140(%ebp) # 4-byte Spill
-; X32-NEXT: movl -96(%ebp), %edi # 4-byte Reload
-; X32-NEXT: movl %edi, %eax
-; X32-NEXT: mull %esi
-; X32-NEXT: movl %edx, %esi
-; X32-NEXT: movl %eax, %ebx
-; X32-NEXT: addl -40(%ebp), %ebx # 4-byte Folded Reload
-; X32-NEXT: adcl $0, %esi
-; X32-NEXT: movl %ecx, %eax
-; X32-NEXT: mull -544(%ebp) # 4-byte Folded Reload
-; X32-NEXT: movl %edx, %ecx
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %ebp, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; X32-NEXT: mull %edi
+; X32-NEXT: movl %edx, %ebp
; X32-NEXT: addl %ebx, %eax
-; X32-NEXT: movl %eax, -40(%ebp) # 4-byte Spill
-; X32-NEXT: adcl %esi, %ecx
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
; X32-NEXT: setb %bl
-; X32-NEXT: movl %edi, %eax
-; X32-NEXT: movl -544(%ebp), %edi # 4-byte Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X32-NEXT: mull %edi
+; X32-NEXT: addl %ebp, %eax
+; X32-NEXT: movzbl %bl, %edi
+; X32-NEXT: adcl %edi, %edx
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
+; X32-NEXT: addl %eax, %edi
+; X32-NEXT: adcl %edx, %ebx
+; X32-NEXT: movl (%esp), %edx # 4-byte Reload
+; X32-NEXT: addl %esi, %edx
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: adcl %ecx, %esi
+; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
+; X32-NEXT: adcl %eax, %edi
+; X32-NEXT: adcl $0, %ebx
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X32-NEXT: movl %edx, (%esp) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
+; X32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; X32-NEXT: movl %ebp, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: mull %ecx
+; X32-NEXT: movl %edx, %edi
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: movl %esi, %eax
+; X32-NEXT: mull %ecx
+; X32-NEXT: movl %edx, %ebx
+; X32-NEXT: movl %eax, %ecx
+; X32-NEXT: addl %edi, %ecx
+; X32-NEXT: adcl $0, %ebx
+; X32-NEXT: movl %ebp, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; X32-NEXT: mull %ebp
+; X32-NEXT: movl %edx, %edi
; X32-NEXT: addl %ecx, %eax
-; X32-NEXT: movzbl %bl, %ecx
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl %ebx, %edi
+; X32-NEXT: setb %cl
+; X32-NEXT: movl %esi, %eax
+; X32-NEXT: movl %ebp, %esi
+; X32-NEXT: mull %esi
+; X32-NEXT: addl %edi, %eax
+; X32-NEXT: movzbl %cl, %ecx
; X32-NEXT: adcl %ecx, %edx
-; X32-NEXT: movl -104(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: addl -380(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: movl -156(%ebp), %esi # 4-byte Reload
-; X32-NEXT: adcl -356(%ebp), %esi # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
; X32-NEXT: addl %eax, %ecx
-; X32-NEXT: movl %ecx, -220(%ebp) # 4-byte Spill
-; X32-NEXT: adcl %edx, %esi
-; X32-NEXT: movl %esi, -64(%ebp) # 4-byte Spill
-; X32-NEXT: movl -168(%ebp), %ecx # 4-byte Reload
+; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl %edx, %edi
+; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X32-NEXT: movl %ecx, %eax
-; X32-NEXT: movl -548(%ebp), %esi # 4-byte Reload
-; X32-NEXT: mull %esi
-; X32-NEXT: movl %edx, -56(%ebp) # 4-byte Spill
-; X32-NEXT: movl %eax, -88(%ebp) # 4-byte Spill
-; X32-NEXT: movl -92(%ebp), %eax # 4-byte Reload
-; X32-NEXT: mull %esi
-; X32-NEXT: movl %edx, %esi
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; X32-NEXT: mull %edi
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; X32-NEXT: movl %ebp, %eax
+; X32-NEXT: mull %edi
+; X32-NEXT: movl %edx, %edi
; X32-NEXT: movl %eax, %ebx
-; X32-NEXT: addl -56(%ebp), %ebx # 4-byte Folded Reload
-; X32-NEXT: adcl $0, %esi
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
+; X32-NEXT: adcl $0, %edi
; X32-NEXT: movl %ecx, %eax
-; X32-NEXT: mull %edi
+; X32-NEXT: mull %esi
; X32-NEXT: movl %edx, %ecx
; X32-NEXT: addl %ebx, %eax
-; X32-NEXT: movl %eax, -296(%ebp) # 4-byte Spill
-; X32-NEXT: adcl %esi, %ecx
-; X32-NEXT: setb -56(%ebp) # 1-byte Folded Spill
-; X32-NEXT: movl -92(%ebp), %ebx # 4-byte Reload
-; X32-NEXT: movl %ebx, %eax
-; X32-NEXT: mull %edi
-; X32-NEXT: addl %ecx, %eax
-; X32-NEXT: movzbl -56(%ebp), %ecx # 1-byte Folded Reload
-; X32-NEXT: adcl %ecx, %edx
-; X32-NEXT: addl -704(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: adcl -948(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: addl -140(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl %eax, -292(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -40(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: movl %edx, -376(%ebp) # 4-byte Spill
-; X32-NEXT: adcl $0, -220(%ebp) # 4-byte Folded Spill
-; X32-NEXT: adcl $0, -64(%ebp) # 4-byte Folded Spill
-; X32-NEXT: movl -168(%ebp), %esi # 4-byte Reload
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl %edi, %ecx
+; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
+; X32-NEXT: movl %ebp, %eax
+; X32-NEXT: mull %esi
+; X32-NEXT: movl %eax, %ebx
+; X32-NEXT: addl %ecx, %ebx
+; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
+; X32-NEXT: adcl %eax, %edx
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; X32-NEXT: movl %esi, %eax
-; X32-NEXT: movl -580(%ebp), %ecx # 4-byte Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X32-NEXT: mull %ecx
-; X32-NEXT: movl %edx, -140(%ebp) # 4-byte Spill
-; X32-NEXT: movl %eax, -40(%ebp) # 4-byte Spill
-; X32-NEXT: movl %ebx, %eax
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %ebp, %eax
; X32-NEXT: mull %ecx
; X32-NEXT: movl %edx, %edi
; X32-NEXT: movl %eax, %ecx
-; X32-NEXT: addl -140(%ebp), %ecx # 4-byte Folded Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
; X32-NEXT: adcl $0, %edi
-; X32-NEXT: movl 8(%ebp), %eax
-; X32-NEXT: movl 92(%eax), %ebx
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: movl 92(%eax), %ebp
; X32-NEXT: movl %esi, %eax
-; X32-NEXT: mull %ebx
-; X32-NEXT: movl %ebx, %esi
-; X32-NEXT: movl %esi, -140(%ebp) # 4-byte Spill
-; X32-NEXT: movl %edx, %ebx
+; X32-NEXT: mull %ebp
+; X32-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %edx, %esi
; X32-NEXT: addl %ecx, %eax
-; X32-NEXT: movl %eax, -56(%ebp) # 4-byte Spill
-; X32-NEXT: adcl %edi, %ebx
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl %edi, %esi
; X32-NEXT: setb %cl
-; X32-NEXT: movl -92(%ebp), %eax # 4-byte Reload
-; X32-NEXT: mull %esi
-; X32-NEXT: addl %ebx, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: mull %ebp
+; X32-NEXT: addl %esi, %eax
; X32-NEXT: movzbl %cl, %ecx
; X32-NEXT: adcl %ecx, %edx
-; X32-NEXT: movl -28(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: addl -600(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: movl -256(%ebp), %esi # 4-byte Reload
-; X32-NEXT: adcl -604(%ebp), %esi # 4-byte Folded Reload
-; X32-NEXT: addl %eax, %ecx
-; X32-NEXT: adcl %edx, %esi
-; X32-NEXT: movl -292(%ebp), %eax # 4-byte Reload
-; X32-NEXT: addl %eax, -40(%ebp) # 4-byte Folded Spill
-; X32-NEXT: movl -376(%ebp), %eax # 4-byte Reload
-; X32-NEXT: adcl %eax, -56(%ebp) # 4-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X32-NEXT: addl %eax, %ebp
+; X32-NEXT: adcl %edx, %ecx
+; X32-NEXT: addl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: adcl $0, %ebp
; X32-NEXT: adcl $0, %ecx
-; X32-NEXT: adcl $0, %esi
-; X32-NEXT: addl -220(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: adcl -64(%ebp), %esi # 4-byte Folded Reload
-; X32-NEXT: setb -376(%ebp) # 1-byte Folded Spill
-; X32-NEXT: movl -108(%ebp), %eax # 4-byte Reload
-; X32-NEXT: movl -580(%ebp), %edi # 4-byte Reload
-; X32-NEXT: mull %edi
-; X32-NEXT: movl %edx, -220(%ebp) # 4-byte Spill
-; X32-NEXT: movl %eax, -64(%ebp) # 4-byte Spill
-; X32-NEXT: movl -96(%ebp), %eax # 4-byte Reload
-; X32-NEXT: mull %edi
-; X32-NEXT: movl %eax, %ebx
-; X32-NEXT: addl -220(%ebp), %ebx # 4-byte Folded Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X32-NEXT: movl %ebx, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: mull %esi
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: mull %esi
+; X32-NEXT: movl %eax, %edi
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
; X32-NEXT: adcl $0, %edx
-; X32-NEXT: movl %edx, -292(%ebp) # 4-byte Spill
-; X32-NEXT: movl -108(%ebp), %eax # 4-byte Reload
-; X32-NEXT: movl -140(%ebp), %edi # 4-byte Reload
-; X32-NEXT: mull %edi
-; X32-NEXT: addl %ebx, %eax
-; X32-NEXT: movl %eax, -220(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -292(%ebp), %edx # 4-byte Folded Reload
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %ebx, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: mull %esi
; X32-NEXT: movl %edx, %ebx
-; X32-NEXT: setb -292(%ebp) # 1-byte Folded Spill
-; X32-NEXT: movl -96(%ebp), %eax # 4-byte Reload
-; X32-NEXT: mull %edi
+; X32-NEXT: addl %edi, %eax
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
+; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: mull %esi
; X32-NEXT: addl %ebx, %eax
-; X32-NEXT: movzbl -292(%ebp), %edi # 1-byte Folded Reload
-; X32-NEXT: adcl %edi, %edx
-; X32-NEXT: movl -104(%ebp), %edi # 4-byte Reload
-; X32-NEXT: addl -600(%ebp), %edi # 4-byte Folded Reload
-; X32-NEXT: movl -156(%ebp), %ebx # 4-byte Reload
-; X32-NEXT: adcl -604(%ebp), %ebx # 4-byte Folded Reload
+; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 1-byte Folded Reload
+; X32-NEXT: adcl %esi, %edx
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
; X32-NEXT: addl %eax, %edi
; X32-NEXT: adcl %edx, %ebx
-; X32-NEXT: movl -64(%ebp), %edx # 4-byte Reload
-; X32-NEXT: addl %ecx, %edx
-; X32-NEXT: movl -220(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: adcl %esi, %ecx
-; X32-NEXT: movzbl -376(%ebp), %eax # 1-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; X32-NEXT: addl %ebp, %edx
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: adcl %ecx, %esi
+; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
; X32-NEXT: adcl %eax, %edi
; X32-NEXT: adcl $0, %ebx
-; X32-NEXT: addl -704(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: adcl -940(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: adcl -944(%ebp), %edi # 4-byte Folded Reload
-; X32-NEXT: adcl -936(%ebp), %ebx # 4-byte Folded Reload
-; X32-NEXT: movl -224(%ebp), %eax # 4-byte Reload
-; X32-NEXT: addl %eax, -88(%ebp) # 4-byte Folded Spill
-; X32-NEXT: movl -76(%ebp), %eax # 4-byte Reload
-; X32-NEXT: adcl %eax, -296(%ebp) # 4-byte Folded Spill
-; X32-NEXT: movl -152(%ebp), %eax # 4-byte Reload
-; X32-NEXT: adcl %eax, -40(%ebp) # 4-byte Folded Spill
-; X32-NEXT: movl -72(%ebp), %eax # 4-byte Reload
-; X32-NEXT: adcl %eax, -56(%ebp) # 4-byte Folded Spill
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
+; X32-NEXT: movl (%esp), %eax # 4-byte Reload
+; X32-NEXT: addl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
; X32-NEXT: adcl $0, %edx
-; X32-NEXT: adcl $0, %ecx
+; X32-NEXT: adcl $0, %esi
; X32-NEXT: adcl $0, %edi
; X32-NEXT: adcl $0, %ebx
-; X32-NEXT: addl -176(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: movl %edx, -64(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -200(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: movl %ecx, -220(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -68(%ebp), %edi # 4-byte Folded Reload
-; X32-NEXT: movl %edi, -68(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -32(%ebp), %ebx # 4-byte Folded Reload
-; X32-NEXT: movl %ebx, -152(%ebp) # 4-byte Spill
-; X32-NEXT: setb -32(%ebp) # 1-byte Folded Spill
-; X32-NEXT: movl -284(%ebp), %esi # 4-byte Reload
-; X32-NEXT: movl %esi, %eax
-; X32-NEXT: movl -548(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: mull %ecx
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
+; X32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; X32-NEXT: movl %ebp, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: mull %esi
+; X32-NEXT: movl %edx, %ecx
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X32-NEXT: movl %ebx, %eax
+; X32-NEXT: mull %esi
; X32-NEXT: movl %edx, %edi
-; X32-NEXT: movl %eax, -176(%ebp) # 4-byte Spill
-; X32-NEXT: movl -48(%ebp), %eax # 4-byte Reload
-; X32-NEXT: mull %ecx
-; X32-NEXT: movl %edx, %ebx
-; X32-NEXT: movl %eax, %ecx
-; X32-NEXT: addl %edi, %ecx
-; X32-NEXT: adcl $0, %ebx
-; X32-NEXT: movl %esi, %eax
-; X32-NEXT: movl -544(%ebp), %edi # 4-byte Reload
-; X32-NEXT: mull %edi
-; X32-NEXT: movl %edx, %esi
-; X32-NEXT: addl %ecx, %eax
-; X32-NEXT: movl %eax, -200(%ebp) # 4-byte Spill
-; X32-NEXT: adcl %ebx, %esi
-; X32-NEXT: setb %cl
-; X32-NEXT: movl -48(%ebp), %eax # 4-byte Reload
-; X32-NEXT: mull %edi
+; X32-NEXT: movl %eax, %esi
+; X32-NEXT: addl %ecx, %esi
+; X32-NEXT: adcl $0, %edi
+; X32-NEXT: movl %ebp, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; X32-NEXT: mull %ebp
+; X32-NEXT: movl %edx, %ecx
; X32-NEXT: addl %esi, %eax
-; X32-NEXT: movzbl %cl, %ecx
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl %edi, %ecx
+; X32-NEXT: setb (%esp) # 1-byte Folded Spill
+; X32-NEXT: movl %ebx, %eax
+; X32-NEXT: mull %ebp
+; X32-NEXT: addl %ecx, %eax
+; X32-NEXT: movzbl (%esp), %ecx # 1-byte Folded Reload
; X32-NEXT: adcl %ecx, %edx
-; X32-NEXT: movl -380(%ebp), %esi # 4-byte Reload
-; X32-NEXT: addl -308(%ebp), %esi # 4-byte Folded Reload
-; X32-NEXT: movl -356(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: adcl -208(%ebp), %ecx # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
; X32-NEXT: addl %eax, %esi
-; X32-NEXT: movl %esi, -380(%ebp) # 4-byte Spill
+; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: adcl %edx, %ecx
-; X32-NEXT: movl %ecx, -356(%ebp) # 4-byte Spill
-; X32-NEXT: movl -212(%ebp), %edi # 4-byte Reload
-; X32-NEXT: movl %edi, %eax
-; X32-NEXT: movl -548(%ebp), %esi # 4-byte Reload
+; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X32-NEXT: movl %ebx, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; X32-NEXT: mull %esi
; X32-NEXT: movl %edx, %ecx
-; X32-NEXT: movl %eax, -76(%ebp) # 4-byte Spill
-; X32-NEXT: movl -252(%ebp), %eax # 4-byte Reload
+; X32-NEXT: movl %eax, (%esp) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; X32-NEXT: movl %ebp, %eax
; X32-NEXT: mull %esi
; X32-NEXT: movl %edx, %esi
-; X32-NEXT: movl %eax, %ebx
-; X32-NEXT: addl %ecx, %ebx
+; X32-NEXT: movl %eax, %edi
+; X32-NEXT: addl %ecx, %edi
; X32-NEXT: adcl $0, %esi
-; X32-NEXT: movl %edi, %eax
-; X32-NEXT: movl -544(%ebp), %edi # 4-byte Reload
-; X32-NEXT: mull %edi
+; X32-NEXT: movl %ebx, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X32-NEXT: mull %ebx
; X32-NEXT: movl %edx, %ecx
-; X32-NEXT: addl %ebx, %eax
-; X32-NEXT: movl %eax, -72(%ebp) # 4-byte Spill
+; X32-NEXT: addl %edi, %eax
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: adcl %esi, %ecx
-; X32-NEXT: setb %bl
-; X32-NEXT: movl -252(%ebp), %esi # 4-byte Reload
-; X32-NEXT: movl %esi, %eax
-; X32-NEXT: mull %edi
-; X32-NEXT: addl %ecx, %eax
-; X32-NEXT: movzbl %bl, %ecx
-; X32-NEXT: adcl %ecx, %edx
-; X32-NEXT: addl -708(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: adcl -960(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: addl -176(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl %eax, -376(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -200(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: movl %edx, -224(%ebp) # 4-byte Spill
-; X32-NEXT: adcl $0, -380(%ebp) # 4-byte Folded Spill
-; X32-NEXT: adcl $0, -356(%ebp) # 4-byte Folded Spill
-; X32-NEXT: movl -212(%ebp), %eax # 4-byte Reload
-; X32-NEXT: movl -580(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: mull %ecx
-; X32-NEXT: movl %edx, %edi
-; X32-NEXT: movl %eax, -176(%ebp) # 4-byte Spill
-; X32-NEXT: movl %esi, %eax
+; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
+; X32-NEXT: movl %ebp, %eax
+; X32-NEXT: mull %ebx
+; X32-NEXT: movl %eax, %edi
+; X32-NEXT: addl %ecx, %edi
+; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
+; X32-NEXT: adcl %eax, %edx
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; X32-NEXT: movl %ebp, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X32-NEXT: mull %ecx
; X32-NEXT: movl %edx, %ebx
-; X32-NEXT: movl %eax, %ecx
-; X32-NEXT: addl %edi, %ecx
-; X32-NEXT: adcl $0, %ebx
-; X32-NEXT: movl -212(%ebp), %eax # 4-byte Reload
-; X32-NEXT: movl -140(%ebp), %edi # 4-byte Reload
-; X32-NEXT: mull %edi
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: mull %ecx
; X32-NEXT: movl %edx, %esi
+; X32-NEXT: movl %eax, %ecx
+; X32-NEXT: addl %ebx, %ecx
+; X32-NEXT: adcl $0, %esi
+; X32-NEXT: movl %ebp, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X32-NEXT: mull %ebx
+; X32-NEXT: movl %edx, %ebp
; X32-NEXT: addl %ecx, %eax
-; X32-NEXT: movl %eax, -200(%ebp) # 4-byte Spill
-; X32-NEXT: adcl %ebx, %esi
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl %esi, %ebp
; X32-NEXT: setb %cl
-; X32-NEXT: movl -252(%ebp), %eax # 4-byte Reload
-; X32-NEXT: mull %edi
-; X32-NEXT: movl %edi, %ebx
-; X32-NEXT: addl %esi, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: mull %ebx
+; X32-NEXT: addl %ebp, %eax
; X32-NEXT: movzbl %cl, %ecx
; X32-NEXT: adcl %ecx, %edx
-; X32-NEXT: movl -116(%ebp), %esi # 4-byte Reload
-; X32-NEXT: addl -600(%ebp), %esi # 4-byte Folded Reload
-; X32-NEXT: movl -84(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: adcl -604(%ebp), %ecx # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
; X32-NEXT: addl %eax, %esi
; X32-NEXT: adcl %edx, %ecx
-; X32-NEXT: movl -376(%ebp), %eax # 4-byte Reload
-; X32-NEXT: addl %eax, -176(%ebp) # 4-byte Folded Spill
-; X32-NEXT: movl -224(%ebp), %eax # 4-byte Reload
-; X32-NEXT: adcl %eax, -200(%ebp) # 4-byte Folded Spill
+; X32-NEXT: addl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
; X32-NEXT: adcl $0, %esi
; X32-NEXT: adcl $0, %ecx
-; X32-NEXT: addl -380(%ebp), %esi # 4-byte Folded Reload
-; X32-NEXT: movl %esi, -116(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -356(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: movl %ecx, -84(%ebp) # 4-byte Spill
-; X32-NEXT: setb -356(%ebp) # 1-byte Folded Spill
-; X32-NEXT: movl -284(%ebp), %ecx # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X32-NEXT: movl %ecx, %eax
-; X32-NEXT: movl -580(%ebp), %esi # 4-byte Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; X32-NEXT: mull %esi
-; X32-NEXT: movl %edx, -380(%ebp) # 4-byte Spill
-; X32-NEXT: movl %eax, -224(%ebp) # 4-byte Spill
-; X32-NEXT: movl -48(%ebp), %eax # 4-byte Reload
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; X32-NEXT: movl %ebp, %eax
; X32-NEXT: mull %esi
; X32-NEXT: movl %edx, %esi
; X32-NEXT: movl %eax, %edi
-; X32-NEXT: addl -380(%ebp), %edi # 4-byte Folded Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
; X32-NEXT: adcl $0, %esi
; X32-NEXT: movl %ecx, %eax
; X32-NEXT: mull %ebx
@@ -4694,2031 +4644,2038 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X32-NEXT: addl %edi, %eax
; X32-NEXT: movl %eax, %edi
; X32-NEXT: adcl %esi, %ecx
-; X32-NEXT: setb -380(%ebp) # 1-byte Folded Spill
-; X32-NEXT: movl -48(%ebp), %eax # 4-byte Reload
+; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
+; X32-NEXT: movl %ebp, %eax
; X32-NEXT: mull %ebx
; X32-NEXT: addl %ecx, %eax
-; X32-NEXT: movzbl -380(%ebp), %ecx # 1-byte Folded Reload
+; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 1-byte Folded Reload
; X32-NEXT: adcl %ecx, %edx
-; X32-NEXT: movl -308(%ebp), %esi # 4-byte Reload
-; X32-NEXT: addl -600(%ebp), %esi # 4-byte Folded Reload
-; X32-NEXT: movl -208(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: adcl -604(%ebp), %ecx # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
; X32-NEXT: addl %eax, %esi
; X32-NEXT: adcl %edx, %ecx
-; X32-NEXT: movl -224(%ebp), %edx # 4-byte Reload
-; X32-NEXT: addl -116(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: adcl -84(%ebp), %edi # 4-byte Folded Reload
-; X32-NEXT: movzbl -356(%ebp), %eax # 1-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X32-NEXT: movl %edi, %ebp
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
; X32-NEXT: adcl %eax, %esi
; X32-NEXT: adcl $0, %ecx
-; X32-NEXT: addl -708(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: adcl -660(%ebp), %edi # 4-byte Folded Reload
-; X32-NEXT: adcl -952(%ebp), %esi # 4-byte Folded Reload
-; X32-NEXT: adcl -956(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: movl -64(%ebp), %eax # 4-byte Reload
-; X32-NEXT: addl %eax, -76(%ebp) # 4-byte Folded Spill
-; X32-NEXT: movl -220(%ebp), %eax # 4-byte Reload
-; X32-NEXT: adcl %eax, -72(%ebp) # 4-byte Folded Spill
-; X32-NEXT: movl -68(%ebp), %eax # 4-byte Reload
-; X32-NEXT: adcl %eax, -176(%ebp) # 4-byte Folded Spill
-; X32-NEXT: movl -152(%ebp), %eax # 4-byte Reload
-; X32-NEXT: adcl %eax, -200(%ebp) # 4-byte Folded Spill
-; X32-NEXT: movzbl -32(%ebp), %eax # 1-byte Folded Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: addl %eax, (%esp) # 4-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
; X32-NEXT: adcl %eax, %edx
-; X32-NEXT: movl %edx, -224(%ebp) # 4-byte Spill
-; X32-NEXT: adcl $0, %edi
-; X32-NEXT: movl %edi, -380(%ebp) # 4-byte Spill
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl $0, %ebp
+; X32-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: adcl $0, %esi
-; X32-NEXT: movl %esi, -308(%ebp) # 4-byte Spill
+; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: adcl $0, %ecx
-; X32-NEXT: movl %ecx, -208(%ebp) # 4-byte Spill
-; X32-NEXT: movl -516(%ebp), %esi # 4-byte Reload
+; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; X32-NEXT: movl %ebp, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: mull %ecx
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; X32-NEXT: movl %esi, %eax
-; X32-NEXT: movl -188(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: mull %ecx
-; X32-NEXT: movl %edx, -116(%ebp) # 4-byte Spill
-; X32-NEXT: movl %eax, -356(%ebp) # 4-byte Spill
-; X32-NEXT: movl -132(%ebp), %eax # 4-byte Reload
; X32-NEXT: mull %ecx
; X32-NEXT: movl %edx, %edi
; X32-NEXT: movl %eax, %ebx
-; X32-NEXT: addl -116(%ebp), %ebx # 4-byte Folded Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
; X32-NEXT: adcl $0, %edi
-; X32-NEXT: movl %esi, %eax
-; X32-NEXT: movl -148(%ebp), %esi # 4-byte Reload
-; X32-NEXT: mull %esi
-; X32-NEXT: movl %edx, %ecx
+; X32-NEXT: movl %ebp, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: mull %ecx
+; X32-NEXT: movl %edx, %ebp
; X32-NEXT: addl %ebx, %eax
-; X32-NEXT: movl %eax, -32(%ebp) # 4-byte Spill
-; X32-NEXT: adcl %edi, %ecx
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl %edi, %ebp
; X32-NEXT: setb %bl
-; X32-NEXT: movl -132(%ebp), %eax # 4-byte Reload
-; X32-NEXT: mull %esi
-; X32-NEXT: addl %ecx, %eax
-; X32-NEXT: movzbl %bl, %ecx
-; X32-NEXT: adcl %ecx, %edx
-; X32-NEXT: movl -484(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: addl -100(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: movl -488(%ebp), %esi # 4-byte Reload
-; X32-NEXT: adcl -204(%ebp), %esi # 4-byte Folded Reload
+; X32-NEXT: movl %esi, %eax
+; X32-NEXT: mull %ecx
+; X32-NEXT: addl %ebp, %eax
+; X32-NEXT: movzbl %bl, %edi
+; X32-NEXT: adcl %edi, %edx
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
; X32-NEXT: addl %eax, %ecx
-; X32-NEXT: movl %ecx, -84(%ebp) # 4-byte Spill
+; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: adcl %edx, %esi
-; X32-NEXT: movl %esi, -116(%ebp) # 4-byte Spill
-; X32-NEXT: movl -476(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: movl %ecx, %eax
-; X32-NEXT: movl -188(%ebp), %esi # 4-byte Reload
-; X32-NEXT: mull %esi
-; X32-NEXT: movl %edx, -220(%ebp) # 4-byte Spill
-; X32-NEXT: movl %eax, -64(%ebp) # 4-byte Spill
-; X32-NEXT: movl -248(%ebp), %eax # 4-byte Reload
-; X32-NEXT: mull %esi
-; X32-NEXT: movl %edx, %edi
-; X32-NEXT: movl %eax, %ebx
-; X32-NEXT: addl -220(%ebp), %ebx # 4-byte Folded Reload
-; X32-NEXT: adcl $0, %edi
-; X32-NEXT: movl %ecx, %eax
-; X32-NEXT: movl -148(%ebp), %esi # 4-byte Reload
-; X32-NEXT: mull %esi
+; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; X32-NEXT: movl %edi, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X32-NEXT: mull %ebx
; X32-NEXT: movl %edx, %ecx
-; X32-NEXT: addl %ebx, %eax
-; X32-NEXT: movl %eax, -220(%ebp) # 4-byte Spill
-; X32-NEXT: adcl %edi, %ecx
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: movl %esi, %eax
+; X32-NEXT: mull %ebx
+; X32-NEXT: movl %edx, %ebx
+; X32-NEXT: movl %eax, %ebp
+; X32-NEXT: addl %ecx, %ebp
+; X32-NEXT: adcl $0, %ebx
+; X32-NEXT: movl %edi, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: mull %ecx
+; X32-NEXT: movl %edx, %edi
+; X32-NEXT: addl %ebp, %eax
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl %ebx, %edi
; X32-NEXT: setb %bl
-; X32-NEXT: movl -248(%ebp), %eax # 4-byte Reload
-; X32-NEXT: mull %esi
-; X32-NEXT: addl %ecx, %eax
-; X32-NEXT: movzbl %bl, %ecx
-; X32-NEXT: adcl %ecx, %edx
-; X32-NEXT: movl -100(%ebp), %esi # 4-byte Reload
-; X32-NEXT: addl -480(%ebp), %esi # 4-byte Folded Reload
-; X32-NEXT: movl -204(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: adcl -384(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: addl %eax, %esi
-; X32-NEXT: adcl %edx, %ecx
-; X32-NEXT: addl -356(%ebp), %esi # 4-byte Folded Reload
-; X32-NEXT: movl %esi, -100(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -32(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: movl %ecx, -204(%ebp) # 4-byte Spill
-; X32-NEXT: adcl $0, -84(%ebp) # 4-byte Folded Spill
-; X32-NEXT: adcl $0, -116(%ebp) # 4-byte Folded Spill
-; X32-NEXT: movl -476(%ebp), %ebx # 4-byte Reload
+; X32-NEXT: movl %esi, %eax
+; X32-NEXT: movl %esi, %ebp
+; X32-NEXT: mull %ecx
+; X32-NEXT: addl %edi, %eax
+; X32-NEXT: movzbl %bl, %edi
+; X32-NEXT: adcl %edi, %edx
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; X32-NEXT: addl %eax, %edi
+; X32-NEXT: adcl %edx, %esi
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
; X32-NEXT: movl %ebx, %eax
-; X32-NEXT: movl -236(%ebp), %esi # 4-byte Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; X32-NEXT: mull %esi
; X32-NEXT: movl %edx, %ecx
-; X32-NEXT: movl %eax, -356(%ebp) # 4-byte Spill
-; X32-NEXT: movl -248(%ebp), %eax # 4-byte Reload
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %ebp, %eax
; X32-NEXT: mull %esi
; X32-NEXT: movl %edx, %esi
; X32-NEXT: movl %eax, %edi
; X32-NEXT: addl %ecx, %edi
; X32-NEXT: adcl $0, %esi
; X32-NEXT: movl %ebx, %eax
-; X32-NEXT: movl -112(%ebp), %ebx # 4-byte Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
; X32-NEXT: mull %ebx
; X32-NEXT: movl %edx, %ecx
; X32-NEXT: addl %edi, %eax
; X32-NEXT: movl %eax, %edi
; X32-NEXT: adcl %esi, %ecx
-; X32-NEXT: setb -32(%ebp) # 1-byte Folded Spill
-; X32-NEXT: movl -248(%ebp), %eax # 4-byte Reload
+; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
+; X32-NEXT: movl %ebp, %eax
; X32-NEXT: mull %ebx
; X32-NEXT: addl %ecx, %eax
-; X32-NEXT: movzbl -32(%ebp), %ecx # 1-byte Folded Reload
+; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 1-byte Folded Reload
; X32-NEXT: adcl %ecx, %edx
-; X32-NEXT: movl -480(%ebp), %esi # 4-byte Reload
-; X32-NEXT: addl -304(%ebp), %esi # 4-byte Folded Reload
-; X32-NEXT: movl -384(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: adcl -128(%ebp), %ecx # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
; X32-NEXT: addl %eax, %esi
; X32-NEXT: adcl %edx, %ecx
-; X32-NEXT: movl -100(%ebp), %eax # 4-byte Reload
-; X32-NEXT: addl %eax, -356(%ebp) # 4-byte Folded Spill
-; X32-NEXT: adcl -204(%ebp), %edi # 4-byte Folded Reload
-; X32-NEXT: movl %edi, -32(%ebp) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: addl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: adcl $0, %esi
; X32-NEXT: adcl $0, %ecx
-; X32-NEXT: addl -84(%ebp), %esi # 4-byte Folded Reload
-; X32-NEXT: movl %esi, -480(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -116(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: movl %ecx, -384(%ebp) # 4-byte Spill
-; X32-NEXT: setb -204(%ebp) # 1-byte Folded Spill
-; X32-NEXT: movl -516(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: movl %ecx, %eax
-; X32-NEXT: movl -236(%ebp), %esi # 4-byte Reload
-; X32-NEXT: mull %esi
-; X32-NEXT: movl %edx, -100(%ebp) # 4-byte Spill
-; X32-NEXT: movl %eax, -116(%ebp) # 4-byte Spill
-; X32-NEXT: movl -132(%ebp), %ebx # 4-byte Reload
-; X32-NEXT: movl %ebx, %eax
-; X32-NEXT: mull %esi
-; X32-NEXT: movl %edx, %esi
-; X32-NEXT: movl %eax, %edi
-; X32-NEXT: addl -100(%ebp), %edi # 4-byte Folded Reload
-; X32-NEXT: adcl $0, %esi
-; X32-NEXT: movl %ecx, %eax
-; X32-NEXT: mull -112(%ebp) # 4-byte Folded Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: movl %esi, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; X32-NEXT: mull %edi
; X32-NEXT: movl %edx, %ecx
-; X32-NEXT: addl %edi, %eax
-; X32-NEXT: movl %eax, -100(%ebp) # 4-byte Spill
-; X32-NEXT: adcl %esi, %ecx
-; X32-NEXT: setb -84(%ebp) # 1-byte Folded Spill
-; X32-NEXT: movl %ebx, %eax
-; X32-NEXT: movl -112(%ebp), %edi # 4-byte Reload
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; X32-NEXT: movl %ebp, %eax
; X32-NEXT: mull %edi
+; X32-NEXT: movl %edx, %edi
+; X32-NEXT: movl %eax, %ebx
+; X32-NEXT: addl %ecx, %ebx
+; X32-NEXT: adcl $0, %edi
+; X32-NEXT: movl %esi, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: mull %esi
+; X32-NEXT: movl %edx, %ecx
+; X32-NEXT: addl %ebx, %eax
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl %edi, %ecx
+; X32-NEXT: setb %bl
+; X32-NEXT: movl %ebp, %eax
+; X32-NEXT: mull %esi
+; X32-NEXT: movl %esi, %ebp
; X32-NEXT: addl %ecx, %eax
-; X32-NEXT: movzbl -84(%ebp), %ecx # 1-byte Folded Reload
+; X32-NEXT: movzbl %bl, %ecx
; X32-NEXT: adcl %ecx, %edx
-; X32-NEXT: movl -484(%ebp), %esi # 4-byte Reload
-; X32-NEXT: addl -304(%ebp), %esi # 4-byte Folded Reload
-; X32-NEXT: movl -488(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: adcl -128(%ebp), %ecx # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
; X32-NEXT: addl %eax, %esi
; X32-NEXT: adcl %edx, %ecx
-; X32-NEXT: movl -480(%ebp), %eax # 4-byte Reload
-; X32-NEXT: addl %eax, -116(%ebp) # 4-byte Folded Spill
-; X32-NEXT: movl -384(%ebp), %eax # 4-byte Reload
-; X32-NEXT: adcl %eax, -100(%ebp) # 4-byte Folded Spill
-; X32-NEXT: movzbl -204(%ebp), %eax # 1-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: addl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
; X32-NEXT: adcl %eax, %esi
-; X32-NEXT: movl %esi, -484(%ebp) # 4-byte Spill
+; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: adcl $0, %ecx
-; X32-NEXT: movl %ecx, -488(%ebp) # 4-byte Spill
-; X32-NEXT: movl -548(%ebp), %eax # 4-byte Reload
-; X32-NEXT: movl %edi, %ecx
-; X32-NEXT: imull %eax, %ecx
-; X32-NEXT: movl -236(%ebp), %esi # 4-byte Reload
-; X32-NEXT: mull %esi
-; X32-NEXT: movl %eax, -204(%ebp) # 4-byte Spill
-; X32-NEXT: addl %ecx, %edx
-; X32-NEXT: imull -544(%ebp), %esi # 4-byte Folded Reload
-; X32-NEXT: addl %edx, %esi
-; X32-NEXT: movl %esi, -236(%ebp) # 4-byte Spill
-; X32-NEXT: movl -580(%ebp), %eax # 4-byte Reload
+; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: imull %eax, %ebp
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: mull %ecx
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: addl %ebp, %edx
+; X32-NEXT: imull {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X32-NEXT: addl %edx, %ecx
+; X32-NEXT: movl %ecx, %ebp
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X32-NEXT: movl %eax, %esi
-; X32-NEXT: movl -148(%ebp), %ebx # 4-byte Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
; X32-NEXT: imull %ebx, %esi
-; X32-NEXT: movl -188(%ebp), %edi # 4-byte Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
; X32-NEXT: mull %edi
; X32-NEXT: movl %eax, %ecx
; X32-NEXT: addl %esi, %edx
-; X32-NEXT: movl -140(%ebp), %esi # 4-byte Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; X32-NEXT: imull %edi, %esi
; X32-NEXT: addl %edx, %esi
-; X32-NEXT: addl -204(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: movl %ecx, -84(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -236(%ebp), %esi # 4-byte Folded Reload
-; X32-NEXT: movl %esi, -140(%ebp) # 4-byte Spill
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl %ebp, %esi
+; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: movl %edi, %eax
; X32-NEXT: movl %edi, %esi
-; X32-NEXT: movl -548(%ebp), %edi # 4-byte Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
; X32-NEXT: mull %edi
; X32-NEXT: movl %edx, %ecx
-; X32-NEXT: movl %eax, -236(%ebp) # 4-byte Spill
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: movl %ebx, %eax
; X32-NEXT: mull %edi
-; X32-NEXT: movl %edx, %edi
+; X32-NEXT: movl %edx, %ebp
; X32-NEXT: movl %eax, %ebx
; X32-NEXT: addl %ecx, %ebx
-; X32-NEXT: adcl $0, %edi
+; X32-NEXT: adcl $0, %ebp
; X32-NEXT: movl %esi, %eax
-; X32-NEXT: movl -544(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: mull %ecx
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; X32-NEXT: mull %edi
; X32-NEXT: movl %edx, %esi
; X32-NEXT: addl %ebx, %eax
-; X32-NEXT: movl %eax, -204(%ebp) # 4-byte Spill
-; X32-NEXT: adcl %edi, %esi
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl %ebp, %esi
; X32-NEXT: setb %bl
-; X32-NEXT: movl -148(%ebp), %eax # 4-byte Reload
-; X32-NEXT: mull %ecx
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: mull %edi
; X32-NEXT: addl %esi, %eax
; X32-NEXT: movzbl %bl, %ecx
; X32-NEXT: adcl %ecx, %edx
-; X32-NEXT: addl -84(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl %eax, -304(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -140(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: movl %edx, -84(%ebp) # 4-byte Spill
-; X32-NEXT: movl -476(%ebp), %eax # 4-byte Reload
-; X32-NEXT: movl -164(%ebp), %esi # 4-byte Reload
-; X32-NEXT: imull %eax, %esi
-; X32-NEXT: movl -244(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: mull %ecx
-; X32-NEXT: movl %eax, -148(%ebp) # 4-byte Spill
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: imull %ebp, %esi
+; X32-NEXT: movl %ebp, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: mull %ecx
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: addl %esi, %edx
-; X32-NEXT: imull -248(%ebp), %ecx # 4-byte Folded Reload
+; X32-NEXT: imull {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
; X32-NEXT: addl %edx, %ecx
-; X32-NEXT: movl %ecx, -244(%ebp) # 4-byte Spill
-; X32-NEXT: movl -516(%ebp), %eax # 4-byte Reload
-; X32-NEXT: movl %eax, %esi
-; X32-NEXT: movl -144(%ebp), %ebx # 4-byte Reload
-; X32-NEXT: imull %ebx, %esi
-; X32-NEXT: movl -300(%ebp), %ecx # 4-byte Reload
+; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: movl %eax, %ebx
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; X32-NEXT: imull %edi, %ebx
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X32-NEXT: mull %ecx
-; X32-NEXT: movl %eax, %edi
-; X32-NEXT: addl %esi, %edx
+; X32-NEXT: movl %eax, %esi
+; X32-NEXT: addl %ebx, %edx
; X32-NEXT: movl %ecx, %eax
-; X32-NEXT: movl -132(%ebp), %ecx # 4-byte Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X32-NEXT: imull %eax, %ecx
; X32-NEXT: addl %edx, %ecx
-; X32-NEXT: addl -148(%ebp), %edi # 4-byte Folded Reload
-; X32-NEXT: movl %edi, -128(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -244(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: movl %ecx, -132(%ebp) # 4-byte Spill
-; X32-NEXT: movl -476(%ebp), %esi # 4-byte Reload
-; X32-NEXT: mull %esi
-; X32-NEXT: movl %edx, %ecx
-; X32-NEXT: movl %eax, -148(%ebp) # 4-byte Spill
-; X32-NEXT: movl %ebx, %eax
-; X32-NEXT: mull %esi
-; X32-NEXT: movl %edx, %edi
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: movl %eax, %esi
-; X32-NEXT: addl %ecx, %esi
-; X32-NEXT: adcl $0, %edi
-; X32-NEXT: movl -300(%ebp), %eax # 4-byte Reload
-; X32-NEXT: mull -248(%ebp) # 4-byte Folded Reload
+; X32-NEXT: mull %ebp
+; X32-NEXT: movl %edx, %ecx
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %edi, %eax
+; X32-NEXT: mull %ebp
; X32-NEXT: movl %edx, %ebx
-; X32-NEXT: movl %eax, %ecx
-; X32-NEXT: addl %esi, %ecx
-; X32-NEXT: adcl %edi, %ebx
-; X32-NEXT: setb -244(%ebp) # 1-byte Folded Spill
-; X32-NEXT: movl -144(%ebp), %eax # 4-byte Reload
-; X32-NEXT: mull -248(%ebp) # 4-byte Folded Reload
-; X32-NEXT: addl %ebx, %eax
-; X32-NEXT: movzbl -244(%ebp), %esi # 1-byte Folded Reload
-; X32-NEXT: adcl %esi, %edx
-; X32-NEXT: addl -128(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: adcl -132(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: movl -148(%ebp), %esi # 4-byte Reload
-; X32-NEXT: addl -236(%ebp), %esi # 4-byte Folded Reload
-; X32-NEXT: adcl -204(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: adcl -304(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: adcl -84(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: addl -116(%ebp), %esi # 4-byte Folded Reload
-; X32-NEXT: movl %esi, -148(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -100(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: movl %ecx, -164(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -484(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl %eax, -384(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -488(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: movl %edx, -300(%ebp) # 4-byte Spill
-; X32-NEXT: movl 8(%ebp), %esi
-; X32-NEXT: movl 104(%esi), %ebx
-; X32-NEXT: movl %ebx, %eax
-; X32-NEXT: movl %ebx, -244(%ebp) # 4-byte Spill
-; X32-NEXT: movl -168(%ebp), %edi # 4-byte Reload
+; X32-NEXT: movl %eax, %ebp
+; X32-NEXT: addl %ecx, %ebp
+; X32-NEXT: adcl $0, %ebx
+; X32-NEXT: movl %esi, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: mull %ecx
+; X32-NEXT: movl %edx, %esi
+; X32-NEXT: movl %eax, %edi
+; X32-NEXT: addl %ebp, %edi
+; X32-NEXT: adcl %ebx, %esi
+; X32-NEXT: setb %bl
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: mull %ecx
+; X32-NEXT: addl %esi, %eax
+; X32-NEXT: movzbl %bl, %ecx
+; X32-NEXT: adcl %ecx, %edx
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X32-NEXT: movl 104(%esi), %ebp
+; X32-NEXT: movl %ebp, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
; X32-NEXT: mull %edi
-; X32-NEXT: movl %eax, -236(%ebp) # 4-byte Spill
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: movl %edx, %ecx
; X32-NEXT: movl 108(%esi), %eax
-; X32-NEXT: movl %eax, -100(%ebp) # 4-byte Spill
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: mull %edi
; X32-NEXT: movl %edx, %esi
-; X32-NEXT: movl %eax, %edi
-; X32-NEXT: addl %ecx, %edi
+; X32-NEXT: movl %eax, %ebx
+; X32-NEXT: addl %ecx, %ebx
; X32-NEXT: adcl $0, %esi
-; X32-NEXT: movl %ebx, %eax
-; X32-NEXT: movl -92(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: mull %ecx
-; X32-NEXT: movl %edx, %ebx
-; X32-NEXT: addl %edi, %eax
-; X32-NEXT: movl %eax, -204(%ebp) # 4-byte Spill
-; X32-NEXT: adcl %esi, %ebx
-; X32-NEXT: setb -116(%ebp) # 1-byte Folded Spill
-; X32-NEXT: movl -100(%ebp), %eax # 4-byte Reload
-; X32-NEXT: mull %ecx
+; X32-NEXT: movl %ebp, %eax
+; X32-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; X32-NEXT: mull %edi
+; X32-NEXT: movl %edx, %ecx
+; X32-NEXT: addl %ebx, %eax
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl %esi, %ecx
+; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: mull %edi
; X32-NEXT: movl %edx, %esi
-; X32-NEXT: movl %eax, %edi
-; X32-NEXT: addl %ebx, %edi
-; X32-NEXT: movzbl -116(%ebp), %eax # 1-byte Folded Reload
+; X32-NEXT: movl %eax, %ebx
+; X32-NEXT: addl %ecx, %ebx
+; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
; X32-NEXT: adcl %eax, %esi
-; X32-NEXT: movl -244(%ebp), %eax # 4-byte Reload
+; X32-NEXT: movl %ebp, %eax
; X32-NEXT: xorl %ecx, %ecx
; X32-NEXT: mull %ecx
-; X32-NEXT: movl %edx, -128(%ebp) # 4-byte Spill
-; X32-NEXT: movl %eax, -248(%ebp) # 4-byte Spill
-; X32-NEXT: addl -28(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: adcl -256(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: addl %edi, %eax
-; X32-NEXT: movl %eax, -112(%ebp) # 4-byte Spill
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X32-NEXT: addl %ebx, %eax
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: adcl %esi, %edx
-; X32-NEXT: movl %edx, -140(%ebp) # 4-byte Spill
-; X32-NEXT: movl 8(%ebp), %ecx
-; X32-NEXT: movl 96(%ecx), %edi
-; X32-NEXT: movl %edi, %eax
-; X32-NEXT: movl %edi, -84(%ebp) # 4-byte Spill
-; X32-NEXT: movl -168(%ebp), %esi # 4-byte Reload
-; X32-NEXT: mull %esi
-; X32-NEXT: movl %eax, -304(%ebp) # 4-byte Spill
-; X32-NEXT: movl %edx, -132(%ebp) # 4-byte Spill
-; X32-NEXT: movl 100(%ecx), %eax
-; X32-NEXT: movl %eax, -116(%ebp) # 4-byte Spill
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[0-9]+}}(%esp), %edi
+; X32-NEXT: movl 96(%edi), %ebx
+; X32-NEXT: movl %ebx, %eax
+; X32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; X32-NEXT: mull %esi
-; X32-NEXT: movl %edx, %ebx
-; X32-NEXT: movl %eax, %esi
-; X32-NEXT: addl -132(%ebp), %esi # 4-byte Folded Reload
-; X32-NEXT: adcl $0, %ebx
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %edx, %ecx
+; X32-NEXT: movl 100(%edi), %edi
; X32-NEXT: movl %edi, %eax
-; X32-NEXT: movl -92(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: mull %ecx
-; X32-NEXT: movl %edx, %edi
-; X32-NEXT: addl %esi, %eax
-; X32-NEXT: movl %eax, -132(%ebp) # 4-byte Spill
-; X32-NEXT: adcl %ebx, %edi
-; X32-NEXT: setb -144(%ebp) # 1-byte Folded Spill
-; X32-NEXT: movl -116(%ebp), %ebx # 4-byte Reload
-; X32-NEXT: movl %ebx, %eax
-; X32-NEXT: mull %ecx
+; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: mull %esi
; X32-NEXT: movl %edx, %esi
-; X32-NEXT: movl %eax, %ecx
-; X32-NEXT: addl %edi, %ecx
-; X32-NEXT: movzbl -144(%ebp), %eax # 1-byte Folded Reload
-; X32-NEXT: adcl %eax, %esi
-; X32-NEXT: movl -84(%ebp), %eax # 4-byte Reload
-; X32-NEXT: xorl %edx, %edx
-; X32-NEXT: mull %edx
-; X32-NEXT: movl %edx, -188(%ebp) # 4-byte Spill
-; X32-NEXT: movl %eax, -144(%ebp) # 4-byte Spill
-; X32-NEXT: movl -28(%ebp), %edi # 4-byte Reload
-; X32-NEXT: addl %eax, %edi
-; X32-NEXT: movl -256(%ebp), %eax # 4-byte Reload
-; X32-NEXT: adcl %edx, %eax
+; X32-NEXT: movl %eax, %ebp
+; X32-NEXT: addl %ecx, %ebp
+; X32-NEXT: adcl $0, %esi
+; X32-NEXT: movl %ebx, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X32-NEXT: mull %ebx
+; X32-NEXT: movl %edx, %ecx
+; X32-NEXT: addl %ebp, %eax
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl %esi, %ecx
+; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
+; X32-NEXT: movl %edi, %eax
+; X32-NEXT: mull %ebx
+; X32-NEXT: movl %edx, %ebx
+; X32-NEXT: movl %eax, %edi
; X32-NEXT: addl %ecx, %edi
-; X32-NEXT: adcl %esi, %eax
-; X32-NEXT: addl -236(%ebp), %edi # 4-byte Folded Reload
-; X32-NEXT: movl %edi, -28(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -204(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl %eax, -256(%ebp) # 4-byte Spill
-; X32-NEXT: adcl $0, -112(%ebp) # 4-byte Folded Spill
-; X32-NEXT: adcl $0, -140(%ebp) # 4-byte Folded Spill
-; X32-NEXT: movl -84(%ebp), %esi # 4-byte Reload
+; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
+; X32-NEXT: adcl %eax, %ebx
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; X32-NEXT: movl %esi, %eax
-; X32-NEXT: movl -108(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: mull %ecx
-; X32-NEXT: movl %edx, -204(%ebp) # 4-byte Spill
-; X32-NEXT: movl %eax, -236(%ebp) # 4-byte Spill
-; X32-NEXT: movl %ebx, %eax
+; X32-NEXT: xorl %ecx, %ecx
; X32-NEXT: mull %ecx
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %eax, %ebp
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: addl %ebp, %ecx
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: adcl %edx, %eax
+; X32-NEXT: addl %edi, %ecx
+; X32-NEXT: adcl %ebx, %eax
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: movl %esi, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; X32-NEXT: mull %edi
+; X32-NEXT: movl %edx, %ecx
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: mull %edi
; X32-NEXT: movl %edx, %edi
-; X32-NEXT: movl %eax, %ecx
-; X32-NEXT: addl -204(%ebp), %ecx # 4-byte Folded Reload
+; X32-NEXT: movl %eax, %ebx
+; X32-NEXT: addl %ecx, %ebx
; X32-NEXT: adcl $0, %edi
; X32-NEXT: movl %esi, %eax
-; X32-NEXT: movl -96(%ebp), %esi # 4-byte Reload
-; X32-NEXT: mull %esi
-; X32-NEXT: movl %edx, %ebx
-; X32-NEXT: addl %ecx, %eax
-; X32-NEXT: movl %eax, -204(%ebp) # 4-byte Spill
-; X32-NEXT: adcl %edi, %ebx
-; X32-NEXT: setb %cl
-; X32-NEXT: movl -116(%ebp), %eax # 4-byte Reload
-; X32-NEXT: mull %esi
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: mull %ecx
+; X32-NEXT: movl %edx, %esi
; X32-NEXT: addl %ebx, %eax
-; X32-NEXT: movzbl %cl, %ecx
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl %edi, %esi
+; X32-NEXT: setb %bl
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: mull %ecx
+; X32-NEXT: addl %esi, %eax
+; X32-NEXT: movzbl %bl, %ecx
; X32-NEXT: adcl %ecx, %edx
-; X32-NEXT: movl -144(%ebp), %esi # 4-byte Reload
-; X32-NEXT: addl -104(%ebp), %esi # 4-byte Folded Reload
-; X32-NEXT: movl -188(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: adcl -156(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: addl %eax, %esi
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X32-NEXT: addl %eax, %ebp
; X32-NEXT: adcl %edx, %ecx
-; X32-NEXT: movl -28(%ebp), %eax # 4-byte Reload
-; X32-NEXT: addl %eax, -236(%ebp) # 4-byte Folded Spill
-; X32-NEXT: movl -256(%ebp), %eax # 4-byte Reload
-; X32-NEXT: adcl %eax, -204(%ebp) # 4-byte Folded Spill
-; X32-NEXT: adcl $0, %esi
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: addl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: adcl $0, %ebp
; X32-NEXT: adcl $0, %ecx
-; X32-NEXT: addl -112(%ebp), %esi # 4-byte Folded Reload
-; X32-NEXT: movl %esi, -144(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -140(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: movl %ecx, -188(%ebp) # 4-byte Spill
-; X32-NEXT: setb -112(%ebp) # 1-byte Folded Spill
-; X32-NEXT: movl -244(%ebp), %ecx # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; X32-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X32-NEXT: movl %ecx, %eax
-; X32-NEXT: movl -108(%ebp), %esi # 4-byte Reload
-; X32-NEXT: mull %esi
-; X32-NEXT: movl %edx, -256(%ebp) # 4-byte Spill
-; X32-NEXT: movl %eax, -28(%ebp) # 4-byte Spill
-; X32-NEXT: movl -100(%ebp), %edi # 4-byte Reload
-; X32-NEXT: movl %edi, %eax
-; X32-NEXT: mull %esi
-; X32-NEXT: movl %edx, %esi
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; X32-NEXT: mull %edi
+; X32-NEXT: movl %edx, %ebp
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: movl %esi, %eax
+; X32-NEXT: mull %edi
+; X32-NEXT: movl %edx, %edi
; X32-NEXT: movl %eax, %ebx
-; X32-NEXT: addl -256(%ebp), %ebx # 4-byte Folded Reload
-; X32-NEXT: adcl $0, %esi
+; X32-NEXT: addl %ebp, %ebx
+; X32-NEXT: adcl $0, %edi
; X32-NEXT: movl %ecx, %eax
-; X32-NEXT: mull -96(%ebp) # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; X32-NEXT: mull %ebp
; X32-NEXT: movl %edx, %ecx
; X32-NEXT: addl %ebx, %eax
-; X32-NEXT: movl %eax, -256(%ebp) # 4-byte Spill
-; X32-NEXT: adcl %esi, %ecx
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl %edi, %ecx
; X32-NEXT: setb %bl
-; X32-NEXT: movl %edi, %eax
-; X32-NEXT: movl -96(%ebp), %esi # 4-byte Reload
-; X32-NEXT: mull %esi
+; X32-NEXT: movl %esi, %eax
+; X32-NEXT: mull %ebp
; X32-NEXT: addl %ecx, %eax
; X32-NEXT: movzbl %bl, %ecx
; X32-NEXT: adcl %ecx, %edx
-; X32-NEXT: movl -248(%ebp), %edi # 4-byte Reload
-; X32-NEXT: addl -104(%ebp), %edi # 4-byte Folded Reload
-; X32-NEXT: movl -128(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: adcl -156(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: addl %eax, %edi
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X32-NEXT: addl %eax, %esi
; X32-NEXT: adcl %edx, %ecx
-; X32-NEXT: movl -144(%ebp), %eax # 4-byte Reload
-; X32-NEXT: addl %eax, -28(%ebp) # 4-byte Folded Spill
-; X32-NEXT: movl -188(%ebp), %eax # 4-byte Reload
-; X32-NEXT: adcl %eax, -256(%ebp) # 4-byte Folded Spill
-; X32-NEXT: movzbl -112(%ebp), %eax # 1-byte Folded Reload
-; X32-NEXT: adcl %eax, %edi
-; X32-NEXT: movl %edi, -248(%ebp) # 4-byte Spill
-; X32-NEXT: adcl $0, %ecx
-; X32-NEXT: movl %ecx, -128(%ebp) # 4-byte Spill
-; X32-NEXT: movl 8(%ebp), %ecx
-; X32-NEXT: movl 112(%ecx), %eax
-; X32-NEXT: movl %eax, -156(%ebp) # 4-byte Spill
-; X32-NEXT: imull %eax, %esi
-; X32-NEXT: movl -108(%ebp), %edi # 4-byte Reload
-; X32-NEXT: mull %edi
-; X32-NEXT: movl %eax, -144(%ebp) # 4-byte Spill
-; X32-NEXT: addl %esi, %edx
-; X32-NEXT: movl 116(%ecx), %eax
-; X32-NEXT: movl %eax, -104(%ebp) # 4-byte Spill
-; X32-NEXT: imull %eax, %edi
-; X32-NEXT: addl %edx, %edi
-; X32-NEXT: movl %edi, -108(%ebp) # 4-byte Spill
-; X32-NEXT: movl 120(%ecx), %eax
-; X32-NEXT: movl %ecx, %ebx
-; X32-NEXT: movl %eax, %edi
-; X32-NEXT: movl -92(%ebp), %esi # 4-byte Reload
-; X32-NEXT: imull %esi, %edi
-; X32-NEXT: movl -168(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: mull %ecx
-; X32-NEXT: movl %eax, -96(%ebp) # 4-byte Spill
-; X32-NEXT: addl %edi, %edx
-; X32-NEXT: movl 124(%ebx), %ebx
-; X32-NEXT: movl %ecx, %eax
-; X32-NEXT: imull %eax, %ebx
-; X32-NEXT: addl %edx, %ebx
-; X32-NEXT: movl -144(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: addl %ecx, -96(%ebp) # 4-byte Folded Spill
-; X32-NEXT: adcl -108(%ebp), %ebx # 4-byte Folded Reload
-; X32-NEXT: movl -156(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: mull %ecx
-; X32-NEXT: movl %edx, -144(%ebp) # 4-byte Spill
-; X32-NEXT: movl %eax, -108(%ebp) # 4-byte Spill
-; X32-NEXT: movl %esi, %eax
-; X32-NEXT: mull %ecx
-; X32-NEXT: movl %edx, %ecx
-; X32-NEXT: movl %eax, %edi
-; X32-NEXT: addl -144(%ebp), %edi # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: addl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
+; X32-NEXT: adcl %eax, %esi
+; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: adcl $0, %ecx
-; X32-NEXT: movl -168(%ebp), %eax # 4-byte Reload
-; X32-NEXT: mull -104(%ebp) # 4-byte Folded Reload
-; X32-NEXT: movl %edx, %esi
-; X32-NEXT: addl %edi, %eax
-; X32-NEXT: movl %eax, -168(%ebp) # 4-byte Spill
-; X32-NEXT: adcl %ecx, %esi
-; X32-NEXT: setb %cl
-; X32-NEXT: movl -92(%ebp), %eax # 4-byte Reload
-; X32-NEXT: mull -104(%ebp) # 4-byte Folded Reload
-; X32-NEXT: addl %esi, %eax
-; X32-NEXT: movzbl %cl, %ecx
-; X32-NEXT: adcl %ecx, %edx
-; X32-NEXT: addl -96(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl %eax, -92(%ebp) # 4-byte Spill
-; X32-NEXT: adcl %ebx, %edx
-; X32-NEXT: movl %edx, -96(%ebp) # 4-byte Spill
-; X32-NEXT: movl -48(%ebp), %edi # 4-byte Reload
-; X32-NEXT: movl -84(%ebp), %eax # 4-byte Reload
-; X32-NEXT: imull %eax, %edi
-; X32-NEXT: movl -284(%ebp), %ecx # 4-byte Reload
+; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X32-NEXT: movl 112(%esi), %edi
+; X32-NEXT: imull %edi, %ebp
+; X32-NEXT: movl %edi, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X32-NEXT: mull %ecx
-; X32-NEXT: movl %eax, -104(%ebp) # 4-byte Spill
-; X32-NEXT: addl %edi, %edx
-; X32-NEXT: imull -116(%ebp), %ecx # 4-byte Folded Reload
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: addl %ebp, %edx
+; X32-NEXT: movl 116(%esi), %eax
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: imull %eax, %ecx
; X32-NEXT: addl %edx, %ecx
-; X32-NEXT: movl %ecx, -284(%ebp) # 4-byte Spill
-; X32-NEXT: movl -244(%ebp), %eax # 4-byte Reload
+; X32-NEXT: movl %ecx, %ebx
+; X32-NEXT: movl 120(%esi), %eax
; X32-NEXT: movl %eax, %ecx
-; X32-NEXT: movl -252(%ebp), %ebx # 4-byte Reload
-; X32-NEXT: imull %ebx, %ecx
-; X32-NEXT: movl -212(%ebp), %edi # 4-byte Reload
-; X32-NEXT: mull %edi
-; X32-NEXT: movl %eax, %esi
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: imull %esi, %ecx
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; X32-NEXT: mull %ebp
; X32-NEXT: addl %ecx, %edx
-; X32-NEXT: movl -100(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: imull %edi, %ecx
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: movl 124(%ecx), %ecx
+; X32-NEXT: imull %ebp, %ecx
; X32-NEXT: addl %edx, %ecx
-; X32-NEXT: addl -104(%ebp), %esi # 4-byte Folded Reload
-; X32-NEXT: movl %esi, -104(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -284(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: movl %ecx, -100(%ebp) # 4-byte Spill
-; X32-NEXT: movl %edi, %eax
-; X32-NEXT: movl %edi, %ecx
-; X32-NEXT: movl -84(%ebp), %edi # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl %ebx, %ecx
+; X32-NEXT: movl %ebp, %eax
; X32-NEXT: mull %edi
-; X32-NEXT: movl %edx, %esi
-; X32-NEXT: movl %eax, -284(%ebp) # 4-byte Spill
-; X32-NEXT: movl %ebx, %eax
+; X32-NEXT: movl %edx, %ebp
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %esi, %eax
; X32-NEXT: mull %edi
-; X32-NEXT: movl %edx, %edi
-; X32-NEXT: movl %eax, %ebx
-; X32-NEXT: addl %esi, %ebx
-; X32-NEXT: adcl $0, %edi
-; X32-NEXT: movl %ecx, %eax
-; X32-NEXT: mull -116(%ebp) # 4-byte Folded Reload
; X32-NEXT: movl %edx, %esi
-; X32-NEXT: movl %eax, %ecx
-; X32-NEXT: addl %ebx, %ecx
-; X32-NEXT: adcl %edi, %esi
+; X32-NEXT: movl %eax, %ebx
+; X32-NEXT: addl %ebp, %ebx
+; X32-NEXT: adcl $0, %esi
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; X32-NEXT: mull %edi
+; X32-NEXT: movl %edx, %ebp
+; X32-NEXT: addl %ebx, %eax
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl %esi, %ebp
; X32-NEXT: setb %bl
-; X32-NEXT: movl -252(%ebp), %eax # 4-byte Reload
-; X32-NEXT: mull -116(%ebp) # 4-byte Folded Reload
-; X32-NEXT: addl %esi, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: mull %edi
+; X32-NEXT: addl %ebp, %eax
; X32-NEXT: movzbl %bl, %esi
; X32-NEXT: adcl %esi, %edx
-; X32-NEXT: addl -104(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: adcl -100(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: movl -284(%ebp), %esi # 4-byte Reload
-; X32-NEXT: addl -108(%ebp), %esi # 4-byte Folded Reload
-; X32-NEXT: adcl -168(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: adcl -92(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: adcl -96(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: addl -28(%ebp), %esi # 4-byte Folded Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl %ecx, %edx
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: imull %eax, %esi
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X32-NEXT: mull %ebx
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: addl %esi, %edx
+; X32-NEXT: imull {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
+; X32-NEXT: addl %edx, %ebx
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: movl %eax, %esi
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; X32-NEXT: imull %ebp, %esi
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: mull %ecx
+; X32-NEXT: movl %eax, %edi
+; X32-NEXT: addl %esi, %edx
+; X32-NEXT: movl %ecx, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: imull %eax, %ecx
+; X32-NEXT: addl %edx, %ecx
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl %ebx, %ecx
+; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %eax, %ecx
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: mull %esi
+; X32-NEXT: movl %edx, %ebx
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %ebp, %eax
+; X32-NEXT: mull %esi
+; X32-NEXT: movl %edx, %ebp
+; X32-NEXT: movl %eax, %esi
+; X32-NEXT: addl %ebx, %esi
+; X32-NEXT: adcl $0, %ebp
+; X32-NEXT: movl %ecx, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: mull %ecx
+; X32-NEXT: movl %edx, %ebx
+; X32-NEXT: movl %eax, %edi
+; X32-NEXT: addl %esi, %edi
+; X32-NEXT: adcl %ebp, %ebx
+; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: mull %ecx
+; X32-NEXT: addl %ebx, %eax
+; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 1-byte Folded Reload
+; X32-NEXT: adcl %esi, %edx
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; X32-NEXT: movl %edi, %ecx
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
; X32-NEXT: movl %esi, %edi
-; X32-NEXT: adcl -256(%ebp), %ecx # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
; X32-NEXT: movl %ecx, %ebx
-; X32-NEXT: adcl -248(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl %eax, -116(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -128(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: movl %edx, -256(%ebp) # 4-byte Spill
-; X32-NEXT: movl -304(%ebp), %eax # 4-byte Reload
-; X32-NEXT: addl -64(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl -132(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: adcl -220(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: movl -236(%ebp), %edx # 4-byte Reload
-; X32-NEXT: adcl -356(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: movl -204(%ebp), %esi # 4-byte Reload
-; X32-NEXT: adcl -32(%ebp), %esi # 4-byte Folded Reload
-; X32-NEXT: adcl -148(%ebp), %edi # 4-byte Folded Reload
-; X32-NEXT: movl %edi, -284(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -164(%ebp), %ebx # 4-byte Folded Reload
-; X32-NEXT: movl -384(%ebp), %edi # 4-byte Reload
-; X32-NEXT: adcl %edi, -116(%ebp) # 4-byte Folded Spill
-; X32-NEXT: movl -256(%ebp), %edi # 4-byte Reload
-; X32-NEXT: adcl -300(%ebp), %edi # 4-byte Folded Reload
-; X32-NEXT: addl -76(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl %eax, -304(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -72(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: movl %ecx, -132(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -176(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: movl %edx, -236(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -200(%ebp), %esi # 4-byte Folded Reload
-; X32-NEXT: movl %esi, -204(%ebp) # 4-byte Spill
-; X32-NEXT: movl -224(%ebp), %eax # 4-byte Reload
-; X32-NEXT: adcl %eax, -284(%ebp) # 4-byte Folded Spill
-; X32-NEXT: adcl -380(%ebp), %ebx # 4-byte Folded Reload
-; X32-NEXT: movl %ebx, -140(%ebp) # 4-byte Spill
-; X32-NEXT: movl -308(%ebp), %eax # 4-byte Reload
-; X32-NEXT: adcl %eax, -116(%ebp) # 4-byte Folded Spill
-; X32-NEXT: adcl -208(%ebp), %edi # 4-byte Folded Reload
-; X32-NEXT: movl %edi, -256(%ebp) # 4-byte Spill
-; X32-NEXT: movl -492(%ebp), %esi # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X32-NEXT: movl %edx, %ebp
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; X32-NEXT: addl (%esp), %eax # 4-byte Folded Reload
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
+; X32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; X32-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; X32-NEXT: movl %ebp, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: mull %ecx
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: movl 92(%eax), %esi
; X32-NEXT: movl %esi, %eax
-; X32-NEXT: movl -260(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: mull %ecx
-; X32-NEXT: movl %eax, -108(%ebp) # 4-byte Spill
-; X32-NEXT: movl %edx, -28(%ebp) # 4-byte Spill
-; X32-NEXT: movl 12(%ebp), %eax
-; X32-NEXT: movl 92(%eax), %eax
-; X32-NEXT: movl %eax, -96(%ebp) # 4-byte Spill
+; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: mull %ecx
; X32-NEXT: movl %edx, %edi
; X32-NEXT: movl %eax, %ebx
-; X32-NEXT: addl -28(%ebp), %ebx # 4-byte Folded Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
; X32-NEXT: adcl $0, %edi
-; X32-NEXT: movl %esi, %eax
-; X32-NEXT: movl -124(%ebp), %esi # 4-byte Reload
-; X32-NEXT: mull %esi
-; X32-NEXT: movl %edx, %ecx
+; X32-NEXT: movl %ebp, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: mull %ecx
+; X32-NEXT: movl %edx, %ebp
; X32-NEXT: addl %ebx, %eax
-; X32-NEXT: movl %eax, -104(%ebp) # 4-byte Spill
-; X32-NEXT: adcl %edi, %ecx
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl %edi, %ebp
; X32-NEXT: setb %bl
-; X32-NEXT: movl -96(%ebp), %eax # 4-byte Reload
-; X32-NEXT: mull %esi
-; X32-NEXT: addl %ecx, %eax
-; X32-NEXT: movzbl %bl, %ecx
-; X32-NEXT: adcl %ecx, %edx
-; X32-NEXT: movl -556(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: addl -136(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: movl -560(%ebp), %esi # 4-byte Reload
-; X32-NEXT: adcl -264(%ebp), %esi # 4-byte Folded Reload
+; X32-NEXT: movl %esi, %eax
+; X32-NEXT: mull %ecx
+; X32-NEXT: addl %ebp, %eax
+; X32-NEXT: movzbl %bl, %edi
+; X32-NEXT: adcl %edi, %edx
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
; X32-NEXT: addl %eax, %ecx
-; X32-NEXT: movl %ecx, -92(%ebp) # 4-byte Spill
+; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: adcl %edx, %esi
-; X32-NEXT: movl %esi, -28(%ebp) # 4-byte Spill
-; X32-NEXT: movl -552(%ebp), %ecx # 4-byte Reload
+; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X32-NEXT: movl %ecx, %eax
-; X32-NEXT: movl -260(%ebp), %esi # 4-byte Reload
-; X32-NEXT: mull %esi
-; X32-NEXT: movl %edx, -168(%ebp) # 4-byte Spill
-; X32-NEXT: movl %eax, -148(%ebp) # 4-byte Spill
-; X32-NEXT: movl -460(%ebp), %eax # 4-byte Reload
-; X32-NEXT: mull %esi
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X32-NEXT: mull %ebx
; X32-NEXT: movl %edx, %edi
-; X32-NEXT: movl %eax, %ebx
-; X32-NEXT: addl -168(%ebp), %ebx # 4-byte Folded Reload
-; X32-NEXT: adcl $0, %edi
-; X32-NEXT: movl %ecx, %eax
-; X32-NEXT: movl -124(%ebp), %esi # 4-byte Reload
-; X32-NEXT: mull %esi
-; X32-NEXT: movl %edx, %ecx
-; X32-NEXT: addl %ebx, %eax
-; X32-NEXT: movl %eax, -128(%ebp) # 4-byte Spill
-; X32-NEXT: adcl %edi, %ecx
-; X32-NEXT: setb %bl
-; X32-NEXT: movl -460(%ebp), %edi # 4-byte Reload
-; X32-NEXT: movl %edi, %eax
-; X32-NEXT: mull %esi
-; X32-NEXT: addl %ecx, %eax
-; X32-NEXT: movzbl %bl, %ecx
-; X32-NEXT: adcl %ecx, %edx
-; X32-NEXT: addl -712(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: adcl -976(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: addl -108(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl %eax, -48(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -104(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: movl %edx, -168(%ebp) # 4-byte Spill
-; X32-NEXT: adcl $0, -92(%ebp) # 4-byte Folded Spill
-; X32-NEXT: adcl $0, -28(%ebp) # 4-byte Folded Spill
-; X32-NEXT: movl -552(%ebp), %esi # 4-byte Reload
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; X32-NEXT: movl %esi, %eax
-; X32-NEXT: movl -184(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: mull %ecx
-; X32-NEXT: movl %edx, -104(%ebp) # 4-byte Spill
-; X32-NEXT: movl %eax, -108(%ebp) # 4-byte Spill
-; X32-NEXT: movl %edi, %eax
-; X32-NEXT: mull %ecx
+; X32-NEXT: mull %ebx
; X32-NEXT: movl %edx, %ebx
-; X32-NEXT: movl %eax, %ecx
-; X32-NEXT: addl -104(%ebp), %ecx # 4-byte Folded Reload
+; X32-NEXT: movl %eax, %ebp
+; X32-NEXT: addl %edi, %ebp
; X32-NEXT: adcl $0, %ebx
-; X32-NEXT: movl %esi, %eax
-; X32-NEXT: movl -60(%ebp), %esi # 4-byte Reload
-; X32-NEXT: mull %esi
+; X32-NEXT: movl %ecx, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: mull %ecx
; X32-NEXT: movl %edx, %edi
-; X32-NEXT: addl %ecx, %eax
-; X32-NEXT: movl %eax, -104(%ebp) # 4-byte Spill
+; X32-NEXT: addl %ebp, %eax
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: adcl %ebx, %edi
-; X32-NEXT: setb %cl
-; X32-NEXT: movl -460(%ebp), %eax # 4-byte Reload
+; X32-NEXT: setb %bl
+; X32-NEXT: movl %esi, %eax
+; X32-NEXT: mull %ecx
+; X32-NEXT: movl %eax, %ebp
+; X32-NEXT: addl %edi, %ebp
+; X32-NEXT: movzbl %bl, %eax
+; X32-NEXT: adcl %eax, %edx
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X32-NEXT: movl %ebx, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; X32-NEXT: mull %esi
+; X32-NEXT: movl %edx, %ecx
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: mull %esi
+; X32-NEXT: movl %edx, %esi
+; X32-NEXT: movl %eax, %edi
+; X32-NEXT: addl %ecx, %edi
+; X32-NEXT: adcl $0, %esi
+; X32-NEXT: movl %ebx, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: mull %ecx
+; X32-NEXT: movl %edx, %ebx
; X32-NEXT: addl %edi, %eax
-; X32-NEXT: movzbl %cl, %ecx
+; X32-NEXT: movl %eax, %edi
+; X32-NEXT: adcl %esi, %ebx
+; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: mull %ecx
+; X32-NEXT: addl %ebx, %eax
+; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 1-byte Folded Reload
; X32-NEXT: adcl %ecx, %edx
-; X32-NEXT: movl -524(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: addl -160(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: movl -528(%ebp), %esi # 4-byte Reload
-; X32-NEXT: adcl -268(%ebp), %esi # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
; X32-NEXT: addl %eax, %ecx
; X32-NEXT: adcl %edx, %esi
-; X32-NEXT: movl -48(%ebp), %eax # 4-byte Reload
-; X32-NEXT: addl %eax, -108(%ebp) # 4-byte Folded Spill
-; X32-NEXT: movl -168(%ebp), %eax # 4-byte Reload
-; X32-NEXT: adcl %eax, -104(%ebp) # 4-byte Folded Spill
+; X32-NEXT: addl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: adcl $0, %ecx
; X32-NEXT: adcl $0, %esi
-; X32-NEXT: addl -92(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: adcl -28(%ebp), %esi # 4-byte Folded Reload
-; X32-NEXT: setb -28(%ebp) # 1-byte Folded Spill
-; X32-NEXT: movl -492(%ebp), %eax # 4-byte Reload
-; X32-NEXT: movl -184(%ebp), %edi # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X32-NEXT: movl %ebx, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
; X32-NEXT: mull %edi
-; X32-NEXT: movl %edx, -92(%ebp) # 4-byte Spill
-; X32-NEXT: movl %eax, -212(%ebp) # 4-byte Spill
-; X32-NEXT: movl -96(%ebp), %eax # 4-byte Reload
+; X32-NEXT: movl %edx, %ebp
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X32-NEXT: mull %edi
-; X32-NEXT: movl %eax, %ebx
-; X32-NEXT: addl -92(%ebp), %ebx # 4-byte Folded Reload
+; X32-NEXT: movl %eax, %edi
+; X32-NEXT: addl %ebp, %edi
; X32-NEXT: adcl $0, %edx
-; X32-NEXT: movl %edx, -92(%ebp) # 4-byte Spill
-; X32-NEXT: movl -492(%ebp), %eax # 4-byte Reload
-; X32-NEXT: movl -60(%ebp), %edi # 4-byte Reload
-; X32-NEXT: mull %edi
-; X32-NEXT: addl %ebx, %eax
-; X32-NEXT: movl %eax, -208(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -92(%ebp), %edx # 4-byte Folded Reload
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %ebx, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; X32-NEXT: mull %ebp
; X32-NEXT: movl %edx, %ebx
-; X32-NEXT: setb -92(%ebp) # 1-byte Folded Spill
-; X32-NEXT: movl -96(%ebp), %eax # 4-byte Reload
-; X32-NEXT: mull %edi
+; X32-NEXT: addl %edi, %eax
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
+; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: mull %ebp
; X32-NEXT: addl %ebx, %eax
-; X32-NEXT: movzbl -92(%ebp), %edi # 1-byte Folded Reload
+; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 1-byte Folded Reload
; X32-NEXT: adcl %edi, %edx
-; X32-NEXT: movl -556(%ebp), %edi # 4-byte Reload
-; X32-NEXT: addl -160(%ebp), %edi # 4-byte Folded Reload
-; X32-NEXT: movl -560(%ebp), %ebx # 4-byte Reload
-; X32-NEXT: adcl -268(%ebp), %ebx # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
; X32-NEXT: addl %eax, %edi
; X32-NEXT: adcl %edx, %ebx
-; X32-NEXT: movl -212(%ebp), %edx # 4-byte Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
; X32-NEXT: addl %ecx, %edx
-; X32-NEXT: movl -208(%ebp), %ecx # 4-byte Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X32-NEXT: adcl %esi, %ecx
-; X32-NEXT: movzbl -28(%ebp), %eax # 1-byte Folded Reload
+; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
; X32-NEXT: adcl %eax, %edi
; X32-NEXT: adcl $0, %ebx
-; X32-NEXT: addl -712(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: movl %edx, -212(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -968(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: movl %ecx, -208(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -964(%ebp), %edi # 4-byte Folded Reload
-; X32-NEXT: movl %edi, -244(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -972(%ebp), %ebx # 4-byte Folded Reload
-; X32-NEXT: movl %ebx, -248(%ebp) # 4-byte Spill
-; X32-NEXT: movl -388(%ebp), %esi # 4-byte Reload
-; X32-NEXT: movl %esi, %eax
-; X32-NEXT: movl -260(%ebp), %ecx # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
+; X32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X32-NEXT: movl %ebx, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X32-NEXT: mull %ecx
-; X32-NEXT: movl %eax, -92(%ebp) # 4-byte Spill
-; X32-NEXT: movl %edx, -168(%ebp) # 4-byte Spill
-; X32-NEXT: movl 12(%ebp), %eax
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %edx, %esi
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl 76(%eax), %eax
-; X32-NEXT: movl %eax, -28(%ebp) # 4-byte Spill
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: mull %ecx
; X32-NEXT: movl %edx, %edi
-; X32-NEXT: movl %eax, %ebx
-; X32-NEXT: addl -168(%ebp), %ebx # 4-byte Folded Reload
+; X32-NEXT: movl %eax, %ebp
+; X32-NEXT: addl %esi, %ebp
; X32-NEXT: adcl $0, %edi
-; X32-NEXT: movl %esi, %eax
-; X32-NEXT: movl -124(%ebp), %esi # 4-byte Reload
-; X32-NEXT: mull %esi
-; X32-NEXT: movl %edx, %ecx
-; X32-NEXT: addl %ebx, %eax
-; X32-NEXT: movl %eax, -252(%ebp) # 4-byte Spill
-; X32-NEXT: adcl %edi, %ecx
-; X32-NEXT: setb %bl
-; X32-NEXT: movl -28(%ebp), %eax # 4-byte Reload
-; X32-NEXT: mull %esi
-; X32-NEXT: addl %ecx, %eax
-; X32-NEXT: movzbl %bl, %ecx
+; X32-NEXT: movl %ebx, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X32-NEXT: mull %ebx
+; X32-NEXT: movl %edx, %esi
+; X32-NEXT: addl %ebp, %eax
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl %edi, %esi
+; X32-NEXT: setb %cl
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: mull %ebx
+; X32-NEXT: addl %esi, %eax
+; X32-NEXT: movzbl %cl, %ecx
; X32-NEXT: adcl %ecx, %edx
-; X32-NEXT: movl -564(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: addl -136(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: movl -568(%ebp), %esi # 4-byte Reload
-; X32-NEXT: adcl -264(%ebp), %esi # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
; X32-NEXT: addl %eax, %ecx
-; X32-NEXT: movl %ecx, -156(%ebp) # 4-byte Spill
+; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: adcl %edx, %esi
-; X32-NEXT: movl %esi, -48(%ebp) # 4-byte Spill
-; X32-NEXT: movl -520(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: movl %ecx, %eax
-; X32-NEXT: movl -260(%ebp), %esi # 4-byte Reload
-; X32-NEXT: mull %esi
-; X32-NEXT: movl %edx, -308(%ebp) # 4-byte Spill
-; X32-NEXT: movl %eax, -168(%ebp) # 4-byte Spill
-; X32-NEXT: movl -444(%ebp), %eax # 4-byte Reload
-; X32-NEXT: mull %esi
+; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: movl %esi, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: mull %ecx
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X32-NEXT: movl %ebx, %eax
+; X32-NEXT: mull %ecx
; X32-NEXT: movl %edx, %edi
-; X32-NEXT: movl %eax, %ebx
-; X32-NEXT: addl -308(%ebp), %ebx # 4-byte Folded Reload
+; X32-NEXT: movl %eax, %ebp
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
; X32-NEXT: adcl $0, %edi
-; X32-NEXT: movl %ecx, %eax
-; X32-NEXT: movl -124(%ebp), %esi # 4-byte Reload
+; X32-NEXT: movl %esi, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: mull %ecx
+; X32-NEXT: movl %edx, %esi
+; X32-NEXT: addl %ebp, %eax
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl %edi, %esi
+; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
+; X32-NEXT: movl %ebx, %eax
+; X32-NEXT: mull %ecx
+; X32-NEXT: movl %eax, %ebp
+; X32-NEXT: addl %esi, %ebp
+; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
+; X32-NEXT: adcl %eax, %edx
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; X32-NEXT: movl %edi, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; X32-NEXT: mull %esi
; X32-NEXT: movl %edx, %ecx
-; X32-NEXT: addl %ebx, %eax
-; X32-NEXT: movl %eax, -308(%ebp) # 4-byte Spill
-; X32-NEXT: adcl %edi, %ecx
-; X32-NEXT: setb %bl
-; X32-NEXT: movl -444(%ebp), %edi # 4-byte Reload
-; X32-NEXT: movl %edi, %eax
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %ebx, %eax
; X32-NEXT: mull %esi
-; X32-NEXT: addl %ecx, %eax
-; X32-NEXT: movzbl %bl, %ecx
-; X32-NEXT: adcl %ecx, %edx
-; X32-NEXT: addl -716(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: adcl -992(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: addl -92(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl %eax, -84(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -252(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: movl %edx, -100(%ebp) # 4-byte Spill
-; X32-NEXT: adcl $0, -156(%ebp) # 4-byte Folded Spill
-; X32-NEXT: adcl $0, -48(%ebp) # 4-byte Folded Spill
-; X32-NEXT: movl -520(%ebp), %esi # 4-byte Reload
-; X32-NEXT: movl %esi, %eax
-; X32-NEXT: movl -184(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: mull %ecx
-; X32-NEXT: movl %edx, -92(%ebp) # 4-byte Spill
-; X32-NEXT: movl %eax, -252(%ebp) # 4-byte Spill
+; X32-NEXT: movl %edx, %esi
+; X32-NEXT: movl %eax, %ebx
+; X32-NEXT: addl %ecx, %ebx
+; X32-NEXT: adcl $0, %esi
; X32-NEXT: movl %edi, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X32-NEXT: mull %ecx
-; X32-NEXT: movl %edx, %ebx
-; X32-NEXT: movl %eax, %ecx
-; X32-NEXT: addl -92(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: adcl $0, %ebx
-; X32-NEXT: movl %esi, %eax
-; X32-NEXT: movl -60(%ebp), %esi # 4-byte Reload
-; X32-NEXT: mull %esi
; X32-NEXT: movl %edx, %edi
-; X32-NEXT: addl %ecx, %eax
-; X32-NEXT: movl %eax, -92(%ebp) # 4-byte Spill
-; X32-NEXT: adcl %ebx, %edi
-; X32-NEXT: setb %cl
-; X32-NEXT: movl -444(%ebp), %eax # 4-byte Reload
-; X32-NEXT: mull %esi
+; X32-NEXT: addl %ebx, %eax
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl %esi, %edi
+; X32-NEXT: setb %bl
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: mull %ecx
; X32-NEXT: addl %edi, %eax
-; X32-NEXT: movzbl %cl, %ecx
+; X32-NEXT: movzbl %bl, %ecx
; X32-NEXT: adcl %ecx, %edx
-; X32-NEXT: movl -500(%ebp), %esi # 4-byte Reload
-; X32-NEXT: addl -160(%ebp), %esi # 4-byte Folded Reload
-; X32-NEXT: movl -496(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: adcl -268(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: addl %eax, %esi
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X32-NEXT: addl %eax, %ebx
; X32-NEXT: adcl %edx, %ecx
-; X32-NEXT: movl -84(%ebp), %eax # 4-byte Reload
-; X32-NEXT: addl %eax, -252(%ebp) # 4-byte Folded Spill
-; X32-NEXT: movl -100(%ebp), %eax # 4-byte Reload
-; X32-NEXT: adcl %eax, -92(%ebp) # 4-byte Folded Spill
-; X32-NEXT: adcl $0, %esi
+; X32-NEXT: addl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: adcl $0, %ebx
; X32-NEXT: adcl $0, %ecx
-; X32-NEXT: addl -156(%ebp), %esi # 4-byte Folded Reload
-; X32-NEXT: adcl -48(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: setb -48(%ebp) # 1-byte Folded Spill
-; X32-NEXT: movl -388(%ebp), %eax # 4-byte Reload
-; X32-NEXT: movl -184(%ebp), %edi # 4-byte Reload
-; X32-NEXT: mull %edi
-; X32-NEXT: movl %edx, -156(%ebp) # 4-byte Spill
-; X32-NEXT: movl %eax, -100(%ebp) # 4-byte Spill
-; X32-NEXT: movl -28(%ebp), %eax # 4-byte Reload
-; X32-NEXT: mull %edi
-; X32-NEXT: movl %eax, %ebx
-; X32-NEXT: addl -156(%ebp), %ebx # 4-byte Folded Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; X32-NEXT: movl %ebp, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: mull %esi
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: mull %esi
+; X32-NEXT: movl %eax, %edi
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
; X32-NEXT: adcl $0, %edx
-; X32-NEXT: movl %edx, -156(%ebp) # 4-byte Spill
-; X32-NEXT: movl -388(%ebp), %eax # 4-byte Reload
-; X32-NEXT: movl -60(%ebp), %edi # 4-byte Reload
-; X32-NEXT: mull %edi
-; X32-NEXT: addl %ebx, %eax
-; X32-NEXT: movl %eax, -84(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -156(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: movl %edx, %ebx
-; X32-NEXT: setb -156(%ebp) # 1-byte Folded Spill
-; X32-NEXT: movl -28(%ebp), %eax # 4-byte Reload
-; X32-NEXT: mull %edi
-; X32-NEXT: addl %ebx, %eax
-; X32-NEXT: movzbl -156(%ebp), %edi # 1-byte Folded Reload
-; X32-NEXT: adcl %edi, %edx
-; X32-NEXT: movl -564(%ebp), %edi # 4-byte Reload
-; X32-NEXT: addl -160(%ebp), %edi # 4-byte Folded Reload
-; X32-NEXT: movl -568(%ebp), %ebx # 4-byte Reload
-; X32-NEXT: adcl -268(%ebp), %ebx # 4-byte Folded Reload
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %ebp, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: mull %esi
+; X32-NEXT: movl %edx, %ebp
+; X32-NEXT: addl %edi, %eax
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: mull %esi
+; X32-NEXT: addl %ebp, %eax
+; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 1-byte Folded Reload
+; X32-NEXT: adcl %esi, %edx
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
; X32-NEXT: addl %eax, %edi
-; X32-NEXT: adcl %edx, %ebx
-; X32-NEXT: movl -100(%ebp), %edx # 4-byte Reload
-; X32-NEXT: addl %esi, %edx
-; X32-NEXT: movl -84(%ebp), %esi # 4-byte Reload
+; X32-NEXT: adcl %edx, %ebp
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; X32-NEXT: addl %ebx, %edx
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; X32-NEXT: adcl %ecx, %esi
-; X32-NEXT: movzbl -48(%ebp), %eax # 1-byte Folded Reload
+; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
; X32-NEXT: adcl %eax, %edi
-; X32-NEXT: adcl $0, %ebx
-; X32-NEXT: addl -716(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: adcl -988(%ebp), %esi # 4-byte Folded Reload
-; X32-NEXT: adcl -984(%ebp), %edi # 4-byte Folded Reload
-; X32-NEXT: adcl -980(%ebp), %ebx # 4-byte Folded Reload
-; X32-NEXT: addl -148(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: movl %edx, -100(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -128(%ebp), %esi # 4-byte Folded Reload
-; X32-NEXT: movl %esi, -84(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -108(%ebp), %edi # 4-byte Folded Reload
-; X32-NEXT: movl %edi, -144(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -104(%ebp), %ebx # 4-byte Folded Reload
-; X32-NEXT: movl %ebx, -188(%ebp) # 4-byte Spill
-; X32-NEXT: adcl $0, -212(%ebp) # 4-byte Folded Spill
-; X32-NEXT: adcl $0, -208(%ebp) # 4-byte Folded Spill
-; X32-NEXT: adcl $0, -244(%ebp) # 4-byte Folded Spill
-; X32-NEXT: adcl $0, -248(%ebp) # 4-byte Folded Spill
-; X32-NEXT: movl -388(%ebp), %esi # 4-byte Reload
-; X32-NEXT: movl %esi, %eax
-; X32-NEXT: movl -348(%ebp), %edi # 4-byte Reload
-; X32-NEXT: mull %edi
-; X32-NEXT: movl %edx, %ecx
-; X32-NEXT: movl %eax, -108(%ebp) # 4-byte Spill
-; X32-NEXT: movl -28(%ebp), %eax # 4-byte Reload
-; X32-NEXT: mull %edi
+; X32-NEXT: adcl $0, %ebp
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; X32-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X32-NEXT: movl %ebx, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: mull %ecx
+; X32-NEXT: movl %edx, %esi
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; X32-NEXT: movl %ebp, %eax
+; X32-NEXT: mull %ecx
; X32-NEXT: movl %edx, %edi
-; X32-NEXT: movl %eax, %ebx
-; X32-NEXT: addl %ecx, %ebx
+; X32-NEXT: movl %eax, %ecx
+; X32-NEXT: addl %esi, %ecx
; X32-NEXT: adcl $0, %edi
-; X32-NEXT: movl %esi, %eax
-; X32-NEXT: movl -216(%ebp), %esi # 4-byte Reload
-; X32-NEXT: mull %esi
-; X32-NEXT: movl %edx, %ecx
-; X32-NEXT: addl %ebx, %eax
-; X32-NEXT: movl %eax, -48(%ebp) # 4-byte Spill
-; X32-NEXT: adcl %edi, %ecx
-; X32-NEXT: setb %bl
-; X32-NEXT: movl -28(%ebp), %eax # 4-byte Reload
-; X32-NEXT: mull %esi
+; X32-NEXT: movl %ebx, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X32-NEXT: mull %ebx
+; X32-NEXT: movl %edx, %esi
; X32-NEXT: addl %ecx, %eax
-; X32-NEXT: movzbl %bl, %ecx
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl %edi, %esi
+; X32-NEXT: setb %cl
+; X32-NEXT: movl %ebp, %eax
+; X32-NEXT: mull %ebx
+; X32-NEXT: addl %esi, %eax
+; X32-NEXT: movzbl %cl, %ecx
; X32-NEXT: adcl %ecx, %edx
-; X32-NEXT: movl -564(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: addl -180(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: movl -568(%ebp), %esi # 4-byte Reload
-; X32-NEXT: adcl -320(%ebp), %esi # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
; X32-NEXT: addl %eax, %ecx
-; X32-NEXT: movl %ecx, -128(%ebp) # 4-byte Spill
+; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: adcl %edx, %esi
-; X32-NEXT: movl %esi, -148(%ebp) # 4-byte Spill
-; X32-NEXT: movl -520(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: movl %ecx, %eax
-; X32-NEXT: movl -348(%ebp), %edi # 4-byte Reload
-; X32-NEXT: mull %edi
-; X32-NEXT: movl %edx, %esi
-; X32-NEXT: movl %eax, -156(%ebp) # 4-byte Spill
-; X32-NEXT: movl -444(%ebp), %eax # 4-byte Reload
-; X32-NEXT: mull %edi
-; X32-NEXT: movl %edx, %edi
-; X32-NEXT: movl %eax, %ebx
-; X32-NEXT: addl %esi, %ebx
-; X32-NEXT: adcl $0, %edi
-; X32-NEXT: movl %ecx, %eax
-; X32-NEXT: movl -216(%ebp), %esi # 4-byte Reload
+; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X32-NEXT: movl %ebx, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; X32-NEXT: mull %esi
; X32-NEXT: movl %edx, %ecx
-; X32-NEXT: addl %ebx, %eax
-; X32-NEXT: movl %eax, -104(%ebp) # 4-byte Spill
-; X32-NEXT: adcl %edi, %ecx
-; X32-NEXT: setb -112(%ebp) # 1-byte Folded Spill
-; X32-NEXT: movl -444(%ebp), %ebx # 4-byte Reload
-; X32-NEXT: movl %ebx, %eax
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; X32-NEXT: movl %ebp, %eax
; X32-NEXT: mull %esi
-; X32-NEXT: addl %ecx, %eax
-; X32-NEXT: movzbl -112(%ebp), %ecx # 1-byte Folded Reload
-; X32-NEXT: adcl %ecx, %edx
-; X32-NEXT: addl -720(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: adcl -1008(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: addl -108(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl %eax, -300(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -48(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: movl %edx, -112(%ebp) # 4-byte Spill
-; X32-NEXT: adcl $0, -128(%ebp) # 4-byte Folded Spill
-; X32-NEXT: adcl $0, -148(%ebp) # 4-byte Folded Spill
-; X32-NEXT: movl -520(%ebp), %esi # 4-byte Reload
-; X32-NEXT: movl %esi, %eax
-; X32-NEXT: movl -288(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: mull %ecx
-; X32-NEXT: movl %edx, %edi
-; X32-NEXT: movl %eax, -48(%ebp) # 4-byte Spill
+; X32-NEXT: movl %edx, %esi
+; X32-NEXT: movl %eax, %edi
+; X32-NEXT: addl %ecx, %edi
+; X32-NEXT: adcl $0, %esi
; X32-NEXT: movl %ebx, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X32-NEXT: mull %ebx
+; X32-NEXT: movl %edx, %ecx
+; X32-NEXT: addl %edi, %eax
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl %esi, %ecx
+; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
+; X32-NEXT: movl %ebp, %eax
+; X32-NEXT: movl %ebp, %esi
+; X32-NEXT: mull %ebx
+; X32-NEXT: movl %eax, %edi
+; X32-NEXT: addl %ecx, %edi
+; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
+; X32-NEXT: adcl %eax, %edx
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; X32-NEXT: movl %ebp, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X32-NEXT: mull %ecx
; X32-NEXT: movl %edx, %ebx
-; X32-NEXT: movl %eax, %ecx
-; X32-NEXT: addl %edi, %ecx
-; X32-NEXT: adcl $0, %ebx
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: movl %esi, %eax
-; X32-NEXT: movl -16(%ebp), %esi # 4-byte Reload
-; X32-NEXT: mull %esi
-; X32-NEXT: movl %edx, %edi
+; X32-NEXT: mull %ecx
+; X32-NEXT: movl %edx, %esi
+; X32-NEXT: movl %eax, %ecx
+; X32-NEXT: addl %ebx, %ecx
+; X32-NEXT: adcl $0, %esi
+; X32-NEXT: movl %ebp, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; X32-NEXT: mull %ebp
+; X32-NEXT: movl %edx, %ebx
; X32-NEXT: addl %ecx, %eax
-; X32-NEXT: movl %eax, -108(%ebp) # 4-byte Spill
-; X32-NEXT: adcl %ebx, %edi
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl %esi, %ebx
; X32-NEXT: setb %cl
-; X32-NEXT: movl -444(%ebp), %eax # 4-byte Reload
-; X32-NEXT: mull %esi
-; X32-NEXT: addl %edi, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: mull %ebp
+; X32-NEXT: addl %ebx, %eax
; X32-NEXT: movzbl %cl, %ecx
; X32-NEXT: adcl %ecx, %edx
-; X32-NEXT: movl -500(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: addl -280(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: movl -496(%ebp), %esi # 4-byte Reload
-; X32-NEXT: adcl -312(%ebp), %esi # 4-byte Folded Reload
-; X32-NEXT: addl %eax, %ecx
-; X32-NEXT: adcl %edx, %esi
-; X32-NEXT: movl -300(%ebp), %eax # 4-byte Reload
-; X32-NEXT: addl %eax, -48(%ebp) # 4-byte Folded Spill
-; X32-NEXT: movl -112(%ebp), %eax # 4-byte Reload
-; X32-NEXT: adcl %eax, -108(%ebp) # 4-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X32-NEXT: addl %eax, %ebp
+; X32-NEXT: adcl %edx, %ecx
+; X32-NEXT: addl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: adcl $0, %ebp
; X32-NEXT: adcl $0, %ecx
-; X32-NEXT: adcl $0, %esi
-; X32-NEXT: addl -128(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: adcl -148(%ebp), %esi # 4-byte Folded Reload
-; X32-NEXT: setb -112(%ebp) # 1-byte Folded Spill
-; X32-NEXT: movl -388(%ebp), %eax # 4-byte Reload
-; X32-NEXT: movl -288(%ebp), %edi # 4-byte Reload
-; X32-NEXT: mull %edi
-; X32-NEXT: movl %edx, -128(%ebp) # 4-byte Spill
-; X32-NEXT: movl %eax, -148(%ebp) # 4-byte Spill
-; X32-NEXT: movl -28(%ebp), %eax # 4-byte Reload
-; X32-NEXT: mull %edi
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; X32-NEXT: movl %edi, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: mull %esi
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: mull %esi
; X32-NEXT: movl %eax, %ebx
-; X32-NEXT: addl -128(%ebp), %ebx # 4-byte Folded Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
; X32-NEXT: adcl $0, %edx
-; X32-NEXT: movl %edx, -300(%ebp) # 4-byte Spill
-; X32-NEXT: movl -388(%ebp), %eax # 4-byte Reload
-; X32-NEXT: movl -16(%ebp), %edi # 4-byte Reload
-; X32-NEXT: mull %edi
-; X32-NEXT: addl %ebx, %eax
-; X32-NEXT: movl %eax, -128(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -300(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: movl %edx, %ebx
-; X32-NEXT: setb -300(%ebp) # 1-byte Folded Spill
-; X32-NEXT: movl -28(%ebp), %eax # 4-byte Reload
-; X32-NEXT: mull %edi
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %edi, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: mull %esi
+; X32-NEXT: movl %edx, %edi
; X32-NEXT: addl %ebx, %eax
-; X32-NEXT: movzbl -300(%ebp), %edi # 1-byte Folded Reload
-; X32-NEXT: adcl %edi, %edx
-; X32-NEXT: movl -564(%ebp), %edi # 4-byte Reload
-; X32-NEXT: addl -280(%ebp), %edi # 4-byte Folded Reload
-; X32-NEXT: movl -568(%ebp), %ebx # 4-byte Reload
-; X32-NEXT: adcl -312(%ebp), %ebx # 4-byte Folded Reload
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X32-NEXT: setb %bl
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: mull %esi
+; X32-NEXT: addl %edi, %eax
+; X32-NEXT: movzbl %bl, %esi
+; X32-NEXT: adcl %esi, %edx
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
; X32-NEXT: addl %eax, %edi
; X32-NEXT: adcl %edx, %ebx
-; X32-NEXT: movl -148(%ebp), %edx # 4-byte Reload
-; X32-NEXT: addl %ecx, %edx
-; X32-NEXT: movl -128(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: adcl %esi, %ecx
-; X32-NEXT: movzbl -112(%ebp), %eax # 1-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; X32-NEXT: addl %ebp, %edx
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: adcl %ecx, %esi
+; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
; X32-NEXT: adcl %eax, %edi
; X32-NEXT: adcl $0, %ebx
-; X32-NEXT: addl -720(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: adcl -664(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: adcl -996(%ebp), %edi # 4-byte Folded Reload
-; X32-NEXT: adcl -1000(%ebp), %ebx # 4-byte Folded Reload
-; X32-NEXT: movl -100(%ebp), %eax # 4-byte Reload
-; X32-NEXT: addl %eax, -156(%ebp) # 4-byte Folded Spill
-; X32-NEXT: movl -84(%ebp), %eax # 4-byte Reload
-; X32-NEXT: adcl %eax, -104(%ebp) # 4-byte Folded Spill
-; X32-NEXT: movl -144(%ebp), %eax # 4-byte Reload
-; X32-NEXT: adcl %eax, -48(%ebp) # 4-byte Folded Spill
-; X32-NEXT: movl -188(%ebp), %eax # 4-byte Reload
-; X32-NEXT: adcl %eax, -108(%ebp) # 4-byte Folded Spill
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: addl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
; X32-NEXT: adcl $0, %edx
-; X32-NEXT: adcl $0, %ecx
+; X32-NEXT: adcl $0, %esi
; X32-NEXT: adcl $0, %edi
; X32-NEXT: adcl $0, %ebx
-; X32-NEXT: addl -212(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: movl %edx, -148(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -208(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: movl %ecx, -128(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -244(%ebp), %edi # 4-byte Folded Reload
-; X32-NEXT: movl %edi, -84(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -248(%ebp), %ebx # 4-byte Folded Reload
-; X32-NEXT: movl %ebx, -144(%ebp) # 4-byte Spill
-; X32-NEXT: setb -100(%ebp) # 1-byte Folded Spill
-; X32-NEXT: movl -492(%ebp), %ebx # 4-byte Reload
-; X32-NEXT: movl %ebx, %eax
-; X32-NEXT: movl -348(%ebp), %esi # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
+; X32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; X32-NEXT: movl %edi, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; X32-NEXT: mull %esi
; X32-NEXT: movl %edx, %ecx
-; X32-NEXT: movl %eax, -212(%ebp) # 4-byte Spill
-; X32-NEXT: movl -96(%ebp), %eax # 4-byte Reload
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; X32-NEXT: movl %ebp, %eax
; X32-NEXT: mull %esi
-; X32-NEXT: movl %edx, %edi
+; X32-NEXT: movl %edx, %ebx
; X32-NEXT: movl %eax, %esi
; X32-NEXT: addl %ecx, %esi
-; X32-NEXT: adcl $0, %edi
-; X32-NEXT: movl %ebx, %eax
-; X32-NEXT: movl -216(%ebp), %ebx # 4-byte Reload
-; X32-NEXT: mull %ebx
+; X32-NEXT: adcl $0, %ebx
+; X32-NEXT: movl %edi, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; X32-NEXT: mull %edi
; X32-NEXT: movl %edx, %ecx
; X32-NEXT: addl %esi, %eax
-; X32-NEXT: movl %eax, -208(%ebp) # 4-byte Spill
-; X32-NEXT: adcl %edi, %ecx
-; X32-NEXT: setb -248(%ebp) # 1-byte Folded Spill
-; X32-NEXT: movl -96(%ebp), %eax # 4-byte Reload
-; X32-NEXT: mull %ebx
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl %ebx, %ecx
+; X32-NEXT: setb %bl
+; X32-NEXT: movl %ebp, %eax
+; X32-NEXT: mull %edi
; X32-NEXT: addl %ecx, %eax
-; X32-NEXT: movzbl -248(%ebp), %ecx # 1-byte Folded Reload
+; X32-NEXT: movzbl %bl, %ecx
; X32-NEXT: adcl %ecx, %edx
-; X32-NEXT: movl -180(%ebp), %esi # 4-byte Reload
-; X32-NEXT: addl -556(%ebp), %esi # 4-byte Folded Reload
-; X32-NEXT: movl -320(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: adcl -560(%ebp), %ecx # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
; X32-NEXT: addl %eax, %esi
-; X32-NEXT: movl %esi, -180(%ebp) # 4-byte Spill
+; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: adcl %edx, %ecx
-; X32-NEXT: movl %ecx, -320(%ebp) # 4-byte Spill
-; X32-NEXT: movl -552(%ebp), %ebx # 4-byte Reload
-; X32-NEXT: movl %ebx, %eax
-; X32-NEXT: movl -348(%ebp), %esi # 4-byte Reload
+; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; X32-NEXT: movl %ebp, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; X32-NEXT: mull %esi
; X32-NEXT: movl %edx, %ecx
-; X32-NEXT: movl %eax, -248(%ebp) # 4-byte Spill
-; X32-NEXT: movl -460(%ebp), %eax # 4-byte Reload
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; X32-NEXT: movl %edi, %eax
; X32-NEXT: mull %esi
; X32-NEXT: movl %edx, %esi
-; X32-NEXT: movl %eax, %edi
-; X32-NEXT: addl %ecx, %edi
+; X32-NEXT: movl %eax, %ebx
+; X32-NEXT: addl %ecx, %ebx
; X32-NEXT: adcl $0, %esi
-; X32-NEXT: movl %ebx, %eax
-; X32-NEXT: movl -216(%ebp), %ebx # 4-byte Reload
-; X32-NEXT: mull %ebx
+; X32-NEXT: movl %ebp, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; X32-NEXT: mull %ebp
; X32-NEXT: movl %edx, %ecx
-; X32-NEXT: addl %edi, %eax
-; X32-NEXT: movl %eax, -244(%ebp) # 4-byte Spill
+; X32-NEXT: addl %ebx, %eax
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: adcl %esi, %ecx
-; X32-NEXT: setb -188(%ebp) # 1-byte Folded Spill
-; X32-NEXT: movl -460(%ebp), %eax # 4-byte Reload
-; X32-NEXT: mull %ebx
-; X32-NEXT: movl %eax, %edi
-; X32-NEXT: addl %ecx, %edi
-; X32-NEXT: movzbl -188(%ebp), %eax # 1-byte Folded Reload
+; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
+; X32-NEXT: movl %edi, %eax
+; X32-NEXT: movl %edi, %esi
+; X32-NEXT: mull %ebp
+; X32-NEXT: movl %eax, %ebx
+; X32-NEXT: addl %ecx, %ebx
+; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
; X32-NEXT: adcl %eax, %edx
-; X32-NEXT: addl -724(%ebp), %edi # 4-byte Folded Reload
-; X32-NEXT: adcl -1004(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: addl -212(%ebp), %edi # 4-byte Folded Reload
-; X32-NEXT: adcl -208(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: movl %edx, -188(%ebp) # 4-byte Spill
-; X32-NEXT: adcl $0, -180(%ebp) # 4-byte Folded Spill
-; X32-NEXT: adcl $0, -320(%ebp) # 4-byte Folded Spill
-; X32-NEXT: movl -552(%ebp), %esi # 4-byte Reload
-; X32-NEXT: movl %esi, %eax
-; X32-NEXT: movl -288(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: mull %ecx
-; X32-NEXT: movl %edx, -208(%ebp) # 4-byte Spill
-; X32-NEXT: movl %eax, -212(%ebp) # 4-byte Spill
-; X32-NEXT: movl -460(%ebp), %eax # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; X32-NEXT: movl %edi, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X32-NEXT: mull %ecx
-; X32-NEXT: movl %edx, %ebx
-; X32-NEXT: movl %eax, %ecx
-; X32-NEXT: addl -208(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: adcl $0, %ebx
+; X32-NEXT: movl %edx, %ebp
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: movl %esi, %eax
-; X32-NEXT: mull -16(%ebp) # 4-byte Folded Reload
+; X32-NEXT: mull %ecx
; X32-NEXT: movl %edx, %esi
+; X32-NEXT: movl %eax, %ecx
+; X32-NEXT: addl %ebp, %ecx
+; X32-NEXT: adcl $0, %esi
+; X32-NEXT: movl %edi, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; X32-NEXT: mull %edi
+; X32-NEXT: movl %edx, %ebp
; X32-NEXT: addl %ecx, %eax
-; X32-NEXT: movl %eax, -208(%ebp) # 4-byte Spill
-; X32-NEXT: adcl %ebx, %esi
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl %esi, %ebp
; X32-NEXT: setb %cl
-; X32-NEXT: movl -460(%ebp), %eax # 4-byte Reload
-; X32-NEXT: mull -16(%ebp) # 4-byte Folded Reload
-; X32-NEXT: addl %esi, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: mull %edi
+; X32-NEXT: addl %ebp, %eax
; X32-NEXT: movzbl %cl, %ecx
; X32-NEXT: adcl %ecx, %edx
-; X32-NEXT: movl -524(%ebp), %esi # 4-byte Reload
-; X32-NEXT: addl -280(%ebp), %esi # 4-byte Folded Reload
-; X32-NEXT: movl -528(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: adcl -312(%ebp), %ecx # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
; X32-NEXT: addl %eax, %esi
; X32-NEXT: adcl %edx, %ecx
-; X32-NEXT: addl %edi, -212(%ebp) # 4-byte Folded Spill
-; X32-NEXT: movl -188(%ebp), %eax # 4-byte Reload
-; X32-NEXT: adcl %eax, -208(%ebp) # 4-byte Folded Spill
+; X32-NEXT: addl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
; X32-NEXT: adcl $0, %esi
; X32-NEXT: adcl $0, %ecx
-; X32-NEXT: addl -180(%ebp), %esi # 4-byte Folded Reload
-; X32-NEXT: movl %esi, -524(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -320(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: movl %ecx, -528(%ebp) # 4-byte Spill
-; X32-NEXT: setb -180(%ebp) # 1-byte Folded Spill
-; X32-NEXT: movl -492(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: movl %ecx, %eax
-; X32-NEXT: movl -288(%ebp), %esi # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; X32-NEXT: movl %ebp, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; X32-NEXT: mull %esi
-; X32-NEXT: movl %edx, -188(%ebp) # 4-byte Spill
-; X32-NEXT: movl %eax, -320(%ebp) # 4-byte Spill
-; X32-NEXT: movl -96(%ebp), %ebx # 4-byte Reload
+; X32-NEXT: movl %edx, %ecx
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
; X32-NEXT: movl %ebx, %eax
; X32-NEXT: mull %esi
; X32-NEXT: movl %edx, %esi
; X32-NEXT: movl %eax, %edi
-; X32-NEXT: addl -188(%ebp), %edi # 4-byte Folded Reload
+; X32-NEXT: addl %ecx, %edi
; X32-NEXT: adcl $0, %esi
-; X32-NEXT: movl %ecx, %eax
-; X32-NEXT: mull -16(%ebp) # 4-byte Folded Reload
+; X32-NEXT: movl %ebp, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; X32-NEXT: mull %ebp
; X32-NEXT: movl %edx, %ecx
; X32-NEXT: addl %edi, %eax
; X32-NEXT: movl %eax, %edi
; X32-NEXT: adcl %esi, %ecx
-; X32-NEXT: setb -188(%ebp) # 1-byte Folded Spill
+; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
; X32-NEXT: movl %ebx, %eax
-; X32-NEXT: mull -16(%ebp) # 4-byte Folded Reload
+; X32-NEXT: mull %ebp
; X32-NEXT: addl %ecx, %eax
-; X32-NEXT: movzbl -188(%ebp), %ecx # 1-byte Folded Reload
+; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 1-byte Folded Reload
; X32-NEXT: adcl %ecx, %edx
-; X32-NEXT: movl -556(%ebp), %esi # 4-byte Reload
-; X32-NEXT: addl -280(%ebp), %esi # 4-byte Folded Reload
-; X32-NEXT: movl -560(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: adcl -312(%ebp), %ecx # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
; X32-NEXT: addl %eax, %esi
; X32-NEXT: adcl %edx, %ecx
-; X32-NEXT: movl -320(%ebp), %edx # 4-byte Reload
-; X32-NEXT: addl -524(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: adcl -528(%ebp), %edi # 4-byte Folded Reload
-; X32-NEXT: movzbl -180(%ebp), %eax # 1-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
; X32-NEXT: adcl %eax, %esi
; X32-NEXT: adcl $0, %ecx
-; X32-NEXT: addl -724(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: adcl -668(%ebp), %edi # 4-byte Folded Reload
-; X32-NEXT: adcl -732(%ebp), %esi # 4-byte Folded Reload
-; X32-NEXT: adcl -728(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: movl -148(%ebp), %eax # 4-byte Reload
-; X32-NEXT: addl %eax, -248(%ebp) # 4-byte Folded Spill
-; X32-NEXT: movl -128(%ebp), %eax # 4-byte Reload
-; X32-NEXT: adcl %eax, -244(%ebp) # 4-byte Folded Spill
-; X32-NEXT: movl -84(%ebp), %eax # 4-byte Reload
-; X32-NEXT: adcl %eax, -212(%ebp) # 4-byte Folded Spill
-; X32-NEXT: movl -144(%ebp), %eax # 4-byte Reload
-; X32-NEXT: adcl %eax, -208(%ebp) # 4-byte Folded Spill
-; X32-NEXT: movzbl -100(%ebp), %eax # 1-byte Folded Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: addl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
; X32-NEXT: adcl %eax, %edx
-; X32-NEXT: movl %edx, -320(%ebp) # 4-byte Spill
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: adcl $0, %edi
-; X32-NEXT: movl %edi, -300(%ebp) # 4-byte Spill
+; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: adcl $0, %esi
-; X32-NEXT: movl %esi, -556(%ebp) # 4-byte Spill
+; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: adcl $0, %ecx
-; X32-NEXT: movl %ecx, -560(%ebp) # 4-byte Spill
-; X32-NEXT: movl 12(%ebp), %ebx
-; X32-NEXT: movl 96(%ebx), %ecx
-; X32-NEXT: movl %ecx, -312(%ebp) # 4-byte Spill
-; X32-NEXT: movl -184(%ebp), %esi # 4-byte Reload
-; X32-NEXT: movl %esi, %eax
-; X32-NEXT: mull %ecx
-; X32-NEXT: movl %edx, -100(%ebp) # 4-byte Spill
-; X32-NEXT: movl %eax, -180(%ebp) # 4-byte Spill
-; X32-NEXT: movl -60(%ebp), %eax # 4-byte Reload
-; X32-NEXT: mull %ecx
-; X32-NEXT: movl %edx, %edi
-; X32-NEXT: movl %eax, %ecx
-; X32-NEXT: addl -100(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: adcl $0, %edi
-; X32-NEXT: movl 100(%ebx), %ebx
-; X32-NEXT: movl %esi, %eax
+; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: movl 96(%ecx), %ebx
+; X32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: movl %ecx, %eax
; X32-NEXT: mull %ebx
-; X32-NEXT: movl %ebx, -100(%ebp) # 4-byte Spill
; X32-NEXT: movl %edx, %esi
-; X32-NEXT: addl %ecx, %eax
-; X32-NEXT: movl %eax, -148(%ebp) # 4-byte Spill
-; X32-NEXT: adcl %edi, %esi
-; X32-NEXT: setb -280(%ebp) # 1-byte Folded Spill
-; X32-NEXT: movl -60(%ebp), %eax # 4-byte Reload
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; X32-NEXT: movl %edi, %eax
; X32-NEXT: mull %ebx
+; X32-NEXT: movl %edx, %ebx
+; X32-NEXT: movl %eax, %ebp
+; X32-NEXT: addl %esi, %ebp
+; X32-NEXT: adcl $0, %ebx
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: movl 100(%eax), %esi
+; X32-NEXT: movl %ecx, %eax
+; X32-NEXT: mull %esi
+; X32-NEXT: movl %esi, %ecx
+; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %edx, %esi
+; X32-NEXT: addl %ebp, %eax
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl %ebx, %esi
+; X32-NEXT: setb %bl
+; X32-NEXT: movl %edi, %eax
+; X32-NEXT: mull %ecx
; X32-NEXT: movl %edx, %ecx
-; X32-NEXT: movl %eax, %ebx
-; X32-NEXT: addl %esi, %ebx
-; X32-NEXT: movzbl -280(%ebp), %eax # 1-byte Folded Reload
+; X32-NEXT: movl %eax, %edi
+; X32-NEXT: addl %esi, %edi
+; X32-NEXT: movzbl %bl, %eax
; X32-NEXT: adcl %eax, %ecx
-; X32-NEXT: movl -312(%ebp), %eax # 4-byte Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; X32-NEXT: movl %ebp, %eax
; X32-NEXT: xorl %edx, %edx
; X32-NEXT: mull %edx
-; X32-NEXT: movl %eax, -84(%ebp) # 4-byte Spill
-; X32-NEXT: movl %edx, -280(%ebp) # 4-byte Spill
-; X32-NEXT: movl -160(%ebp), %edi # 4-byte Reload
-; X32-NEXT: addl %eax, %edi
-; X32-NEXT: movl -268(%ebp), %esi # 4-byte Reload
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X32-NEXT: addl %eax, %ebx
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; X32-NEXT: adcl %edx, %esi
-; X32-NEXT: addl %ebx, %edi
-; X32-NEXT: movl %edi, -188(%ebp) # 4-byte Spill
+; X32-NEXT: addl %edi, %ebx
+; X32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: adcl %ecx, %esi
-; X32-NEXT: movl %esi, -144(%ebp) # 4-byte Spill
-; X32-NEXT: movl -260(%ebp), %edi # 4-byte Reload
-; X32-NEXT: movl %edi, %eax
-; X32-NEXT: movl -312(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: mull %ecx
-; X32-NEXT: movl %edx, %ebx
-; X32-NEXT: movl %eax, -164(%ebp) # 4-byte Spill
-; X32-NEXT: movl -124(%ebp), %eax # 4-byte Reload
-; X32-NEXT: mull %ecx
+; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X32-NEXT: movl %ebx, %eax
+; X32-NEXT: mull %ebp
+; X32-NEXT: movl %edx, %ecx
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: mull %ebp
; X32-NEXT: movl %edx, %esi
-; X32-NEXT: movl %eax, %ecx
-; X32-NEXT: addl %ebx, %ecx
+; X32-NEXT: movl %eax, %edi
+; X32-NEXT: addl %ecx, %edi
; X32-NEXT: adcl $0, %esi
-; X32-NEXT: movl %edi, %eax
-; X32-NEXT: movl -100(%ebp), %ebx # 4-byte Reload
+; X32-NEXT: movl %ebx, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
; X32-NEXT: mull %ebx
-; X32-NEXT: movl %edx, %edi
-; X32-NEXT: addl %ecx, %eax
-; X32-NEXT: movl %eax, -384(%ebp) # 4-byte Spill
-; X32-NEXT: adcl %esi, %edi
-; X32-NEXT: setb %cl
-; X32-NEXT: movl -124(%ebp), %esi # 4-byte Reload
+; X32-NEXT: movl %edx, %ecx
+; X32-NEXT: addl %edi, %eax
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl %esi, %ecx
+; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; X32-NEXT: movl %esi, %eax
; X32-NEXT: mull %ebx
-; X32-NEXT: addl %edi, %eax
-; X32-NEXT: movzbl %cl, %ecx
+; X32-NEXT: addl %ecx, %eax
+; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 1-byte Folded Reload
; X32-NEXT: adcl %ecx, %edx
-; X32-NEXT: movl -84(%ebp), %edi # 4-byte Reload
-; X32-NEXT: addl -136(%ebp), %edi # 4-byte Folded Reload
-; X32-NEXT: movl -280(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: adcl -264(%ebp), %ecx # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
; X32-NEXT: addl %eax, %edi
; X32-NEXT: adcl %edx, %ecx
-; X32-NEXT: addl -180(%ebp), %edi # 4-byte Folded Reload
-; X32-NEXT: movl %edi, -84(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -148(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: movl %ecx, -280(%ebp) # 4-byte Spill
-; X32-NEXT: adcl $0, -188(%ebp) # 4-byte Folded Spill
-; X32-NEXT: adcl $0, -144(%ebp) # 4-byte Folded Spill
-; X32-NEXT: movl 12(%ebp), %eax
-; X32-NEXT: movl 104(%eax), %ecx
-; X32-NEXT: movl %ecx, -180(%ebp) # 4-byte Spill
-; X32-NEXT: movl -260(%ebp), %ebx # 4-byte Reload
-; X32-NEXT: movl %ebx, %eax
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ebp
+; X32-NEXT: movl 104(%ebp), %ecx
+; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; X32-NEXT: movl %edi, %eax
; X32-NEXT: mull %ecx
-; X32-NEXT: movl %edx, -128(%ebp) # 4-byte Spill
-; X32-NEXT: movl %eax, -148(%ebp) # 4-byte Spill
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: movl %esi, %eax
; X32-NEXT: mull %ecx
-; X32-NEXT: movl %edx, %edi
-; X32-NEXT: movl %eax, %ecx
-; X32-NEXT: addl -128(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: adcl $0, %edi
-; X32-NEXT: movl 12(%ebp), %eax
-; X32-NEXT: movl 108(%eax), %edx
-; X32-NEXT: movl %ebx, %eax
; X32-NEXT: movl %edx, %ebx
-; X32-NEXT: movl %ebx, -112(%ebp) # 4-byte Spill
-; X32-NEXT: mull %ebx
-; X32-NEXT: movl %edx, %esi
+; X32-NEXT: movl %eax, %ecx
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X32-NEXT: adcl $0, %ebx
+; X32-NEXT: movl 108(%ebp), %esi
+; X32-NEXT: movl %edi, %eax
+; X32-NEXT: mull %esi
+; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %edx, %edi
; X32-NEXT: addl %ecx, %eax
-; X32-NEXT: movl %eax, -128(%ebp) # 4-byte Spill
-; X32-NEXT: adcl %edi, %esi
-; X32-NEXT: setb -176(%ebp) # 1-byte Folded Spill
-; X32-NEXT: movl -124(%ebp), %eax # 4-byte Reload
-; X32-NEXT: mull %ebx
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl %ebx, %edi
+; X32-NEXT: setb %bl
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: mull %esi
; X32-NEXT: movl %edx, %ecx
-; X32-NEXT: movl %eax, %edi
-; X32-NEXT: addl %esi, %edi
-; X32-NEXT: movzbl -176(%ebp), %eax # 1-byte Folded Reload
+; X32-NEXT: movl %eax, %esi
+; X32-NEXT: addl %edi, %esi
+; X32-NEXT: movzbl %bl, %eax
; X32-NEXT: adcl %eax, %ecx
-; X32-NEXT: movl -180(%ebp), %ebx # 4-byte Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
; X32-NEXT: movl %ebx, %eax
; X32-NEXT: xorl %edx, %edx
; X32-NEXT: mull %edx
-; X32-NEXT: movl %edx, -200(%ebp) # 4-byte Spill
-; X32-NEXT: movl %eax, -176(%ebp) # 4-byte Spill
-; X32-NEXT: movl -136(%ebp), %esi # 4-byte Reload
-; X32-NEXT: addl %eax, %esi
-; X32-NEXT: movl -264(%ebp), %eax # 4-byte Reload
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; X32-NEXT: addl %eax, %edi
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X32-NEXT: adcl %edx, %eax
-; X32-NEXT: addl %edi, %esi
+; X32-NEXT: addl %esi, %edi
; X32-NEXT: adcl %ecx, %eax
-; X32-NEXT: movl -84(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: addl %ecx, -148(%ebp) # 4-byte Folded Spill
-; X32-NEXT: movl -280(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: adcl %ecx, -128(%ebp) # 4-byte Folded Spill
-; X32-NEXT: adcl $0, %esi
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: addl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: adcl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: adcl $0, %edi
; X32-NEXT: adcl $0, %eax
-; X32-NEXT: addl -188(%ebp), %esi # 4-byte Folded Reload
-; X32-NEXT: movl %esi, -136(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -144(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl %eax, -264(%ebp) # 4-byte Spill
-; X32-NEXT: setb -84(%ebp) # 1-byte Folded Spill
-; X32-NEXT: movl -184(%ebp), %ecx # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X32-NEXT: movl %ecx, %eax
; X32-NEXT: movl %ebx, %esi
; X32-NEXT: mull %esi
-; X32-NEXT: movl %edx, -144(%ebp) # 4-byte Spill
-; X32-NEXT: movl %eax, -280(%ebp) # 4-byte Spill
-; X32-NEXT: movl -60(%ebp), %ebx # 4-byte Reload
-; X32-NEXT: movl %ebx, %eax
+; X32-NEXT: movl %edx, %ebx
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; X32-NEXT: movl %edi, %eax
; X32-NEXT: mull %esi
; X32-NEXT: movl %edx, %esi
-; X32-NEXT: movl %eax, %edi
-; X32-NEXT: addl -144(%ebp), %edi # 4-byte Folded Reload
+; X32-NEXT: movl %eax, %ebp
+; X32-NEXT: addl %ebx, %ebp
; X32-NEXT: adcl $0, %esi
; X32-NEXT: movl %ecx, %eax
-; X32-NEXT: mull -112(%ebp) # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X32-NEXT: mull %ebx
; X32-NEXT: movl %edx, %ecx
-; X32-NEXT: addl %edi, %eax
-; X32-NEXT: movl %eax, %edi
+; X32-NEXT: addl %ebp, %eax
+; X32-NEXT: movl %eax, %ebp
; X32-NEXT: adcl %esi, %ecx
-; X32-NEXT: setb -144(%ebp) # 1-byte Folded Spill
-; X32-NEXT: movl %ebx, %eax
-; X32-NEXT: movl -112(%ebp), %ebx # 4-byte Reload
-; X32-NEXT: mull %ebx
+; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
+; X32-NEXT: movl %edi, %eax
+; X32-NEXT: movl %ebx, %edi
+; X32-NEXT: mull %edi
; X32-NEXT: addl %ecx, %eax
-; X32-NEXT: movzbl -144(%ebp), %ecx # 1-byte Folded Reload
+; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 1-byte Folded Reload
; X32-NEXT: adcl %ecx, %edx
-; X32-NEXT: movl -160(%ebp), %esi # 4-byte Reload
-; X32-NEXT: addl -176(%ebp), %esi # 4-byte Folded Reload
-; X32-NEXT: movl -268(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: adcl -200(%ebp), %ecx # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
; X32-NEXT: addl %eax, %esi
; X32-NEXT: adcl %edx, %ecx
-; X32-NEXT: movl -136(%ebp), %eax # 4-byte Reload
-; X32-NEXT: addl %eax, -280(%ebp) # 4-byte Folded Spill
-; X32-NEXT: adcl -264(%ebp), %edi # 4-byte Folded Reload
-; X32-NEXT: movl %edi, -136(%ebp) # 4-byte Spill
-; X32-NEXT: movzbl -84(%ebp), %eax # 1-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: addl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; X32-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
; X32-NEXT: adcl %eax, %esi
-; X32-NEXT: movl %esi, -160(%ebp) # 4-byte Spill
+; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: adcl $0, %ecx
-; X32-NEXT: movl %ecx, -268(%ebp) # 4-byte Spill
-; X32-NEXT: movl -348(%ebp), %eax # 4-byte Reload
-; X32-NEXT: movl %ebx, %ecx
+; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: movl %edi, %ecx
; X32-NEXT: imull %eax, %ecx
-; X32-NEXT: movl -180(%ebp), %esi # 4-byte Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; X32-NEXT: mull %esi
-; X32-NEXT: movl %eax, -264(%ebp) # 4-byte Spill
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: addl %ecx, %edx
-; X32-NEXT: imull -216(%ebp), %esi # 4-byte Folded Reload
+; X32-NEXT: imull {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
; X32-NEXT: addl %edx, %esi
-; X32-NEXT: movl %esi, -180(%ebp) # 4-byte Spill
-; X32-NEXT: movl -288(%ebp), %eax # 4-byte Reload
+; X32-NEXT: movl %esi, %ebx
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X32-NEXT: movl %eax, %esi
-; X32-NEXT: movl -100(%ebp), %ebx # 4-byte Reload
-; X32-NEXT: imull %ebx, %esi
-; X32-NEXT: movl -312(%ebp), %edi # 4-byte Reload
-; X32-NEXT: mull %edi
-; X32-NEXT: movl %eax, %ecx
-; X32-NEXT: addl %esi, %edx
-; X32-NEXT: movl -16(%ebp), %esi # 4-byte Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
; X32-NEXT: imull %edi, %esi
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: mull %ecx
+; X32-NEXT: movl %eax, %ebp
+; X32-NEXT: addl %esi, %edx
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: imull %ecx, %esi
; X32-NEXT: addl %edx, %esi
-; X32-NEXT: addl -264(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: movl %ecx, -84(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -180(%ebp), %esi # 4-byte Folded Reload
-; X32-NEXT: movl %esi, -16(%ebp) # 4-byte Spill
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; X32-NEXT: adcl %ebx, %esi
+; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %ecx, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: mull %ecx
+; X32-NEXT: movl %edx, %ebx
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: movl %edi, %eax
-; X32-NEXT: movl -348(%ebp), %esi # 4-byte Reload
-; X32-NEXT: mull %esi
-; X32-NEXT: movl %edx, %ecx
-; X32-NEXT: movl %eax, -288(%ebp) # 4-byte Spill
-; X32-NEXT: movl %ebx, %eax
-; X32-NEXT: mull %esi
+; X32-NEXT: mull %ecx
; X32-NEXT: movl %edx, %esi
-; X32-NEXT: movl %eax, %ebx
-; X32-NEXT: addl %ecx, %ebx
+; X32-NEXT: movl %eax, %edi
+; X32-NEXT: addl %ebx, %edi
; X32-NEXT: adcl $0, %esi
-; X32-NEXT: movl %edi, %eax
-; X32-NEXT: movl -216(%ebp), %edi # 4-byte Reload
-; X32-NEXT: mull %edi
-; X32-NEXT: movl %edx, %ecx
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: mull %ecx
+; X32-NEXT: movl %edx, %ebx
+; X32-NEXT: addl %edi, %eax
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl %esi, %ebx
+; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: mull %ecx
; X32-NEXT: addl %ebx, %eax
-; X32-NEXT: movl %eax, -264(%ebp) # 4-byte Spill
-; X32-NEXT: adcl %esi, %ecx
-; X32-NEXT: setb %bl
-; X32-NEXT: movl -100(%ebp), %eax # 4-byte Reload
-; X32-NEXT: mull %edi
-; X32-NEXT: addl %ecx, %eax
-; X32-NEXT: movzbl %bl, %ecx
+; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 1-byte Folded Reload
; X32-NEXT: adcl %ecx, %edx
-; X32-NEXT: addl -84(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl %eax, -348(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -16(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: movl %edx, -180(%ebp) # 4-byte Spill
-; X32-NEXT: movl 12(%ebp), %edx
+; X32-NEXT: addl %ebp, %eax
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
; X32-NEXT: movl 124(%edx), %ecx
-; X32-NEXT: movl -260(%ebp), %eax # 4-byte Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X32-NEXT: imull %eax, %ecx
; X32-NEXT: movl 120(%edx), %esi
; X32-NEXT: movl %edx, %edi
; X32-NEXT: mull %esi
-; X32-NEXT: movl %eax, -216(%ebp) # 4-byte Spill
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: addl %ecx, %edx
-; X32-NEXT: imull -124(%ebp), %esi # 4-byte Folded Reload
+; X32-NEXT: imull {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
; X32-NEXT: addl %edx, %esi
-; X32-NEXT: movl 112(%edi), %ebx
-; X32-NEXT: movl 116(%edi), %ecx
-; X32-NEXT: movl %ecx, -16(%ebp) # 4-byte Spill
-; X32-NEXT: movl -184(%ebp), %eax # 4-byte Reload
+; X32-NEXT: movl 112(%edi), %ebp
+; X32-NEXT: movl 116(%edi), %ebx
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X32-NEXT: movl %eax, %edi
-; X32-NEXT: imull %ecx, %edi
-; X32-NEXT: mull %ebx
+; X32-NEXT: imull %ebx, %edi
+; X32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: mull %ebp
; X32-NEXT: addl %edi, %edx
-; X32-NEXT: movl -60(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: imull %ebx, %ecx
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: imull %ebp, %ecx
; X32-NEXT: addl %edx, %ecx
-; X32-NEXT: addl -216(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl %eax, -184(%ebp) # 4-byte Spill
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: adcl %esi, %ecx
-; X32-NEXT: movl %ecx, -60(%ebp) # 4-byte Spill
-; X32-NEXT: movl %ebx, %eax
-; X32-NEXT: movl -260(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: mull %ecx
-; X32-NEXT: movl %edx, -312(%ebp) # 4-byte Spill
-; X32-NEXT: movl %eax, -216(%ebp) # 4-byte Spill
-; X32-NEXT: movl -16(%ebp), %eax # 4-byte Reload
+; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %ebp, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X32-NEXT: mull %ecx
-; X32-NEXT: movl %edx, %edi
-; X32-NEXT: movl %eax, %esi
-; X32-NEXT: addl -312(%ebp), %esi # 4-byte Folded Reload
-; X32-NEXT: adcl $0, %edi
+; X32-NEXT: movl %edx, %esi
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: movl %ebx, %eax
-; X32-NEXT: mull -124(%ebp) # 4-byte Folded Reload
-; X32-NEXT: movl %edx, %ebx
-; X32-NEXT: movl %eax, %ecx
-; X32-NEXT: addl %esi, %ecx
-; X32-NEXT: adcl %edi, %ebx
-; X32-NEXT: setb -260(%ebp) # 1-byte Folded Spill
-; X32-NEXT: movl -16(%ebp), %eax # 4-byte Reload
-; X32-NEXT: mull -124(%ebp) # 4-byte Folded Reload
-; X32-NEXT: addl %ebx, %eax
-; X32-NEXT: movzbl -260(%ebp), %esi # 1-byte Folded Reload
-; X32-NEXT: adcl %esi, %edx
-; X32-NEXT: addl -184(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: adcl -60(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: movl -216(%ebp), %esi # 4-byte Reload
-; X32-NEXT: addl -288(%ebp), %esi # 4-byte Folded Reload
-; X32-NEXT: adcl -264(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: adcl -348(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: adcl -180(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: addl -280(%ebp), %esi # 4-byte Folded Reload
-; X32-NEXT: movl %esi, -216(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -136(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: movl %ecx, -264(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -160(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl %eax, -180(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -268(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: movl %edx, -288(%ebp) # 4-byte Spill
-; X32-NEXT: movl -352(%ebp), %esi # 4-byte Reload
-; X32-NEXT: movl %esi, %eax
-; X32-NEXT: movl -520(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: mull %ecx
-; X32-NEXT: movl %edx, -16(%ebp) # 4-byte Spill
-; X32-NEXT: movl %eax, -60(%ebp) # 4-byte Spill
-; X32-NEXT: movl -120(%ebp), %eax # 4-byte Reload
; X32-NEXT: mull %ecx
; X32-NEXT: movl %edx, %edi
; X32-NEXT: movl %eax, %ebx
-; X32-NEXT: addl -16(%ebp), %ebx # 4-byte Folded Reload
+; X32-NEXT: addl %esi, %ebx
; X32-NEXT: adcl $0, %edi
-; X32-NEXT: movl %esi, %eax
-; X32-NEXT: movl -444(%ebp), %esi # 4-byte Reload
+; X32-NEXT: movl %ebp, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; X32-NEXT: mull %esi
; X32-NEXT: movl %edx, %ecx
-; X32-NEXT: addl %ebx, %eax
-; X32-NEXT: movl %eax, -136(%ebp) # 4-byte Spill
+; X32-NEXT: movl %eax, %ebp
+; X32-NEXT: addl %ebx, %ebp
; X32-NEXT: adcl %edi, %ecx
; X32-NEXT: setb %bl
-; X32-NEXT: movl -120(%ebp), %eax # 4-byte Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X32-NEXT: mull %esi
; X32-NEXT: addl %ecx, %eax
; X32-NEXT: movzbl %bl, %ecx
; X32-NEXT: adcl %ecx, %edx
-; X32-NEXT: movl -364(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: addl -500(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: movl -396(%ebp), %esi # 4-byte Reload
-; X32-NEXT: adcl -496(%ebp), %esi # 4-byte Folded Reload
-; X32-NEXT: addl %eax, %ecx
-; X32-NEXT: movl %ecx, -160(%ebp) # 4-byte Spill
-; X32-NEXT: adcl %edx, %esi
-; X32-NEXT: movl %esi, -16(%ebp) # 4-byte Spill
-; X32-NEXT: movl -416(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: movl %ecx, %eax
-; X32-NEXT: movl -520(%ebp), %esi # 4-byte Reload
-; X32-NEXT: mull %esi
-; X32-NEXT: movl %edx, -124(%ebp) # 4-byte Spill
-; X32-NEXT: movl %eax, -184(%ebp) # 4-byte Spill
-; X32-NEXT: movl -316(%ebp), %eax # 4-byte Reload
-; X32-NEXT: mull %esi
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; X32-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; X32-NEXT: movl %ebp, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: mull %ecx
; X32-NEXT: movl %edx, %edi
-; X32-NEXT: movl %eax, %ebx
-; X32-NEXT: addl -124(%ebp), %ebx # 4-byte Folded Reload
-; X32-NEXT: adcl $0, %edi
-; X32-NEXT: movl %ecx, %eax
-; X32-NEXT: movl -444(%ebp), %esi # 4-byte Reload
-; X32-NEXT: mull %esi
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: movl %esi, %eax
+; X32-NEXT: mull %ecx
; X32-NEXT: movl %edx, %ecx
+; X32-NEXT: movl %eax, %ebx
+; X32-NEXT: addl %edi, %ebx
+; X32-NEXT: adcl $0, %ecx
+; X32-NEXT: movl %ebp, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; X32-NEXT: mull %edi
+; X32-NEXT: movl %edx, %ebp
; X32-NEXT: addl %ebx, %eax
-; X32-NEXT: movl %eax, -124(%ebp) # 4-byte Spill
-; X32-NEXT: adcl %edi, %ecx
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl %ecx, %ebp
; X32-NEXT: setb %bl
-; X32-NEXT: movl -316(%ebp), %eax # 4-byte Reload
-; X32-NEXT: mull %esi
-; X32-NEXT: addl %ecx, %eax
-; X32-NEXT: movzbl %bl, %ecx
-; X32-NEXT: adcl %ecx, %edx
-; X32-NEXT: movl -500(%ebp), %esi # 4-byte Reload
-; X32-NEXT: addl -324(%ebp), %esi # 4-byte Folded Reload
-; X32-NEXT: movl -496(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: adcl -400(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: addl %eax, %esi
-; X32-NEXT: adcl %edx, %ecx
-; X32-NEXT: addl -60(%ebp), %esi # 4-byte Folded Reload
-; X32-NEXT: movl %esi, -500(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -136(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: movl %ecx, -496(%ebp) # 4-byte Spill
-; X32-NEXT: adcl $0, -160(%ebp) # 4-byte Folded Spill
-; X32-NEXT: adcl $0, -16(%ebp) # 4-byte Folded Spill
-; X32-NEXT: movl -416(%ebp), %ebx # 4-byte Reload
+; X32-NEXT: movl %esi, %eax
+; X32-NEXT: mull %edi
+; X32-NEXT: addl %ebp, %eax
+; X32-NEXT: movzbl %bl, %edi
+; X32-NEXT: adcl %edi, %edx
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; X32-NEXT: addl %eax, %ecx
+; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl %edx, %esi
+; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; X32-NEXT: movl %edi, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X32-NEXT: mull %ebx
+; X32-NEXT: movl %edx, %ecx
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: movl %esi, %eax
+; X32-NEXT: mull %ebx
+; X32-NEXT: movl %edx, %ebx
+; X32-NEXT: movl %eax, %ebp
+; X32-NEXT: addl %ecx, %ebp
+; X32-NEXT: adcl $0, %ebx
+; X32-NEXT: movl %edi, %eax
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: mull %ecx
+; X32-NEXT: movl %edx, %edi
+; X32-NEXT: addl %ebp, %eax
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl %ebx, %edi
+; X32-NEXT: setb %bl
+; X32-NEXT: movl %esi, %eax
+; X32-NEXT: movl %esi, %ebp
+; X32-NEXT: mull %ecx
+; X32-NEXT: addl %edi, %eax
+; X32-NEXT: movzbl %bl, %edi
+; X32-NEXT: adcl %edi, %edx
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; X32-NEXT: addl %eax, %edi
+; X32-NEXT: adcl %edx, %esi
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
; X32-NEXT: movl %ebx, %eax
-; X32-NEXT: movl -388(%ebp), %esi # 4-byte Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; X32-NEXT: mull %esi
; X32-NEXT: movl %edx, %ecx
-; X32-NEXT: movl %eax, -60(%ebp) # 4-byte Spill
-; X32-NEXT: movl -316(%ebp), %eax # 4-byte Reload
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %ebp, %eax
; X32-NEXT: mull %esi
; X32-NEXT: movl %edx, %esi
; X32-NEXT: movl %eax, %edi
; X32-NEXT: addl %ecx, %edi
; X32-NEXT: adcl $0, %esi
; X32-NEXT: movl %ebx, %eax
-; X32-NEXT: movl -28(%ebp), %ebx # 4-byte Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
; X32-NEXT: mull %ebx
; X32-NEXT: movl %edx, %ecx
; X32-NEXT: addl %edi, %eax
; X32-NEXT: movl %eax, %edi
; X32-NEXT: adcl %esi, %ecx
-; X32-NEXT: setb -136(%ebp) # 1-byte Folded Spill
-; X32-NEXT: movl -316(%ebp), %eax # 4-byte Reload
+; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
+; X32-NEXT: movl %ebp, %eax
; X32-NEXT: mull %ebx
; X32-NEXT: addl %ecx, %eax
-; X32-NEXT: movzbl -136(%ebp), %ecx # 1-byte Folded Reload
+; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 1-byte Folded Reload
; X32-NEXT: adcl %ecx, %edx
-; X32-NEXT: movl -324(%ebp), %esi # 4-byte Reload
-; X32-NEXT: addl -564(%ebp), %esi # 4-byte Folded Reload
-; X32-NEXT: movl -400(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: adcl -568(%ebp), %ecx # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
; X32-NEXT: addl %eax, %esi
; X32-NEXT: adcl %edx, %ecx
-; X32-NEXT: movl -500(%ebp), %eax # 4-byte Reload
-; X32-NEXT: addl %eax, -60(%ebp) # 4-byte Folded Spill
-; X32-NEXT: adcl -496(%ebp), %edi # 4-byte Folded Reload
-; X32-NEXT: movl %edi, -136(%ebp) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: addl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: adcl $0, %esi
; X32-NEXT: adcl $0, %ecx
-; X32-NEXT: addl -160(%ebp), %esi # 4-byte Folded Reload
-; X32-NEXT: movl %esi, -324(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -16(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: movl %ecx, -400(%ebp) # 4-byte Spill
-; X32-NEXT: setb -160(%ebp) # 1-byte Folded Spill
-; X32-NEXT: movl -352(%ebp), %ecx # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X32-NEXT: movl %ecx, %eax
-; X32-NEXT: movl -388(%ebp), %esi # 4-byte Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; X32-NEXT: mull %esi
-; X32-NEXT: movl %edx, -268(%ebp) # 4-byte Spill
-; X32-NEXT: movl %eax, -16(%ebp) # 4-byte Spill
-; X32-NEXT: movl -120(%ebp), %ebx # 4-byte Reload
+; X32-NEXT: movl %edx, %ebp
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
; X32-NEXT: movl %ebx, %eax
; X32-NEXT: mull %esi
; X32-NEXT: movl %edx, %esi
; X32-NEXT: movl %eax, %edi
-; X32-NEXT: addl -268(%ebp), %edi # 4-byte Folded Reload
+; X32-NEXT: addl %ebp, %edi
; X32-NEXT: adcl $0, %esi
; X32-NEXT: movl %ecx, %eax
-; X32-NEXT: mull -28(%ebp) # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; X32-NEXT: mull %ebp
; X32-NEXT: movl %edx, %ecx
; X32-NEXT: addl %edi, %eax
-; X32-NEXT: movl %eax, -268(%ebp) # 4-byte Spill
+; X32-NEXT: movl %eax, %edi
; X32-NEXT: adcl %esi, %ecx
-; X32-NEXT: setb -260(%ebp) # 1-byte Folded Spill
+; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
; X32-NEXT: movl %ebx, %eax
-; X32-NEXT: movl -28(%ebp), %edi # 4-byte Reload
-; X32-NEXT: mull %edi
+; X32-NEXT: mull %ebp
; X32-NEXT: addl %ecx, %eax
-; X32-NEXT: movzbl -260(%ebp), %ecx # 1-byte Folded Reload
+; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 1-byte Folded Reload
; X32-NEXT: adcl %ecx, %edx
-; X32-NEXT: movl -364(%ebp), %esi # 4-byte Reload
-; X32-NEXT: addl -564(%ebp), %esi # 4-byte Folded Reload
-; X32-NEXT: movl -396(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: adcl -568(%ebp), %ecx # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
; X32-NEXT: addl %eax, %esi
; X32-NEXT: adcl %edx, %ecx
-; X32-NEXT: movl -324(%ebp), %eax # 4-byte Reload
-; X32-NEXT: addl %eax, -16(%ebp) # 4-byte Folded Spill
-; X32-NEXT: movl -400(%ebp), %eax # 4-byte Reload
-; X32-NEXT: adcl %eax, -268(%ebp) # 4-byte Folded Spill
-; X32-NEXT: movzbl -160(%ebp), %eax # 1-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: addl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
; X32-NEXT: adcl %eax, %esi
-; X32-NEXT: movl %esi, -364(%ebp) # 4-byte Spill
+; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: adcl $0, %ecx
-; X32-NEXT: movl %ecx, -396(%ebp) # 4-byte Spill
-; X32-NEXT: movl -440(%ebp), %eax # 4-byte Reload
-; X32-NEXT: movl %edi, %ecx
+; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: movl %ebp, %ecx
; X32-NEXT: imull %eax, %ecx
-; X32-NEXT: movl -388(%ebp), %esi # 4-byte Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; X32-NEXT: mull %esi
-; X32-NEXT: movl %eax, -28(%ebp) # 4-byte Spill
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: addl %ecx, %edx
-; X32-NEXT: imull -340(%ebp), %esi # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; X32-NEXT: imull %ebp, %esi
; X32-NEXT: addl %edx, %esi
-; X32-NEXT: movl %esi, -388(%ebp) # 4-byte Spill
-; X32-NEXT: movl -408(%ebp), %eax # 4-byte Reload
-; X32-NEXT: movl %eax, %esi
-; X32-NEXT: movl -444(%ebp), %ebx # 4-byte Reload
-; X32-NEXT: imull %ebx, %esi
-; X32-NEXT: movl -520(%ebp), %edi # 4-byte Reload
-; X32-NEXT: mull %edi
-; X32-NEXT: movl %eax, %ecx
-; X32-NEXT: addl %esi, %edx
-; X32-NEXT: movl -192(%ebp), %esi # 4-byte Reload
-; X32-NEXT: imull %edi, %esi
-; X32-NEXT: addl %edx, %esi
-; X32-NEXT: addl -28(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: movl %ecx, -28(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -388(%ebp), %esi # 4-byte Folded Reload
-; X32-NEXT: movl %esi, -192(%ebp) # 4-byte Spill
-; X32-NEXT: movl %edi, %eax
-; X32-NEXT: movl -440(%ebp), %esi # 4-byte Reload
-; X32-NEXT: mull %esi
-; X32-NEXT: movl %edx, %ecx
-; X32-NEXT: movl %eax, -324(%ebp) # 4-byte Spill
-; X32-NEXT: movl %ebx, %eax
+; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: movl %eax, %edi
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X32-NEXT: imull %ebx, %edi
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; X32-NEXT: mull %esi
+; X32-NEXT: movl %eax, %ecx
+; X32-NEXT: addl %edi, %edx
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; X32-NEXT: movl %esi, %eax
+; X32-NEXT: imull %eax, %edi
+; X32-NEXT: addl %edx, %edi
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: mull %ecx
; X32-NEXT: movl %edx, %esi
-; X32-NEXT: movl %eax, %ebx
-; X32-NEXT: addl %ecx, %ebx
-; X32-NEXT: adcl $0, %esi
-; X32-NEXT: movl %edi, %eax
-; X32-NEXT: movl -340(%ebp), %ecx # 4-byte Reload
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %ebx, %eax
; X32-NEXT: mull %ecx
; X32-NEXT: movl %edx, %edi
+; X32-NEXT: movl %eax, %ebx
+; X32-NEXT: addl %esi, %ebx
+; X32-NEXT: adcl $0, %edi
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: mull %ebp
+; X32-NEXT: movl %ebp, %esi
+; X32-NEXT: movl %edx, %ebp
; X32-NEXT: addl %ebx, %eax
-; X32-NEXT: movl %eax, -260(%ebp) # 4-byte Spill
-; X32-NEXT: adcl %esi, %edi
-; X32-NEXT: setb %bl
-; X32-NEXT: movl -444(%ebp), %eax # 4-byte Reload
-; X32-NEXT: mull %ecx
-; X32-NEXT: addl %edi, %eax
-; X32-NEXT: movzbl %bl, %ecx
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl %edi, %ebp
+; X32-NEXT: setb %cl
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: mull %esi
+; X32-NEXT: addl %ebp, %eax
+; X32-NEXT: movzbl %cl, %ecx
; X32-NEXT: adcl %ecx, %edx
-; X32-NEXT: addl -28(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl %eax, -340(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -192(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: movl %edx, -192(%ebp) # 4-byte Spill
-; X32-NEXT: movl -416(%ebp), %eax # 4-byte Reload
-; X32-NEXT: movl -96(%ebp), %edi # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
; X32-NEXT: imull %eax, %edi
-; X32-NEXT: movl %eax, %esi
-; X32-NEXT: movl -492(%ebp), %ecx # 4-byte Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X32-NEXT: mull %ecx
-; X32-NEXT: movl %eax, -28(%ebp) # 4-byte Spill
+; X32-NEXT: movl %eax, %ebp
; X32-NEXT: addl %edi, %edx
-; X32-NEXT: imull -316(%ebp), %ecx # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X32-NEXT: imull %ebx, %ecx
; X32-NEXT: addl %edx, %ecx
-; X32-NEXT: movl %ecx, -492(%ebp) # 4-byte Spill
-; X32-NEXT: movl -352(%ebp), %eax # 4-byte Reload
+; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X32-NEXT: movl %eax, %ecx
-; X32-NEXT: movl -460(%ebp), %edi # 4-byte Reload
-; X32-NEXT: imull %edi, %ecx
-; X32-NEXT: movl -552(%ebp), %ebx # 4-byte Reload
-; X32-NEXT: mull %ebx
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: imull %esi, %ecx
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; X32-NEXT: mull %edi
; X32-NEXT: addl %ecx, %edx
-; X32-NEXT: movl -120(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: imull %ebx, %ecx
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: imull %edi, %ecx
; X32-NEXT: addl %edx, %ecx
-; X32-NEXT: addl -28(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl %eax, -96(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -492(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: movl %ecx, -120(%ebp) # 4-byte Spill
-; X32-NEXT: movl %ebx, %eax
-; X32-NEXT: mull %esi
-; X32-NEXT: movl %edx, %ebx
-; X32-NEXT: movl %eax, -28(%ebp) # 4-byte Spill
+; X32-NEXT: addl %ebp, %eax
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X32-NEXT: movl %edi, %eax
-; X32-NEXT: mull %esi
+; X32-NEXT: movl %edi, %ebp
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: mull %ecx
+; X32-NEXT: movl %edx, %edi
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl %esi, %eax
+; X32-NEXT: mull %ecx
; X32-NEXT: movl %edx, %esi
; X32-NEXT: movl %eax, %ecx
-; X32-NEXT: addl %ebx, %ecx
+; X32-NEXT: addl %edi, %ecx
; X32-NEXT: adcl $0, %esi
-; X32-NEXT: movl -552(%ebp), %eax # 4-byte Reload
-; X32-NEXT: movl -316(%ebp), %ebx # 4-byte Reload
+; X32-NEXT: movl %ebp, %eax
; X32-NEXT: mull %ebx
; X32-NEXT: movl %edx, %edi
-; X32-NEXT: addl %ecx, %eax
-; X32-NEXT: movl %eax, -160(%ebp) # 4-byte Spill
+; X32-NEXT: movl %eax, %ebp
+; X32-NEXT: addl %ecx, %ebp
; X32-NEXT: adcl %esi, %edi
; X32-NEXT: setb %cl
-; X32-NEXT: movl -460(%ebp), %eax # 4-byte Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X32-NEXT: mull %ebx
; X32-NEXT: movl %edx, %esi
-; X32-NEXT: addl %edi, %eax
+; X32-NEXT: movl %eax, %edx
+; X32-NEXT: addl %edi, %edx
; X32-NEXT: movzbl %cl, %ecx
; X32-NEXT: adcl %ecx, %esi
-; X32-NEXT: addl -96(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: adcl -120(%ebp), %esi # 4-byte Folded Reload
-; X32-NEXT: movl -28(%ebp), %edx # 4-byte Reload
-; X32-NEXT: addl -324(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: movl -160(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: adcl -260(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: adcl -340(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: adcl -192(%ebp), %esi # 4-byte Folded Reload
-; X32-NEXT: addl -16(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: movl %edx, %ebx
-; X32-NEXT: adcl -268(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: movl %ecx, -160(%ebp) # 4-byte Spill
-; X32-NEXT: movl %eax, %edx
-; X32-NEXT: adcl -364(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: adcl -396(%ebp), %esi # 4-byte Folded Reload
-; X32-NEXT: movl %esi, -16(%ebp) # 4-byte Spill
-; X32-NEXT: movl -184(%ebp), %eax # 4-byte Reload
-; X32-NEXT: addl -164(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl -124(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: adcl -384(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: movl -60(%ebp), %esi # 4-byte Reload
-; X32-NEXT: adcl -148(%ebp), %esi # 4-byte Folded Reload
-; X32-NEXT: movl -136(%ebp), %edi # 4-byte Reload
-; X32-NEXT: adcl -128(%ebp), %edi # 4-byte Folded Reload
-; X32-NEXT: adcl -216(%ebp), %ebx # 4-byte Folded Reload
-; X32-NEXT: movl %ebx, -28(%ebp) # 4-byte Spill
-; X32-NEXT: movl -160(%ebp), %ebx # 4-byte Reload
-; X32-NEXT: adcl -264(%ebp), %ebx # 4-byte Folded Reload
-; X32-NEXT: adcl -180(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: movl %edx, -120(%ebp) # 4-byte Spill
-; X32-NEXT: movl -16(%ebp), %edx # 4-byte Reload
-; X32-NEXT: adcl -288(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: movl %edx, -16(%ebp) # 4-byte Spill
-; X32-NEXT: addl -248(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl %eax, -184(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -244(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: movl %ecx, -124(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -212(%ebp), %esi # 4-byte Folded Reload
-; X32-NEXT: movl %esi, -60(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -208(%ebp), %edi # 4-byte Folded Reload
-; X32-NEXT: movl %edi, -136(%ebp) # 4-byte Spill
-; X32-NEXT: movl -28(%ebp), %edx # 4-byte Reload
-; X32-NEXT: adcl -320(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: adcl -300(%ebp), %ebx # 4-byte Folded Reload
-; X32-NEXT: movl %ebx, -160(%ebp) # 4-byte Spill
-; X32-NEXT: movl -120(%ebp), %eax # 4-byte Reload
-; X32-NEXT: adcl -556(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl %eax, -120(%ebp) # 4-byte Spill
-; X32-NEXT: movl -16(%ebp), %eax # 4-byte Reload
-; X32-NEXT: adcl -560(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl %eax, -16(%ebp) # 4-byte Spill
-; X32-NEXT: movl -168(%ebp), %eax # 4-byte Reload
-; X32-NEXT: addl -344(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl %eax, -168(%ebp) # 4-byte Spill
-; X32-NEXT: movl -308(%ebp), %esi # 4-byte Reload
-; X32-NEXT: adcl -232(%ebp), %esi # 4-byte Folded Reload
-; X32-NEXT: movl -252(%ebp), %edi # 4-byte Reload
-; X32-NEXT: adcl -436(%ebp), %edi # 4-byte Folded Reload
-; X32-NEXT: movl -92(%ebp), %ebx # 4-byte Reload
-; X32-NEXT: adcl -472(%ebp), %ebx # 4-byte Folded Reload
-; X32-NEXT: movl %ebx, -92(%ebp) # 4-byte Spill
-; X32-NEXT: movl -156(%ebp), %ebx # 4-byte Reload
-; X32-NEXT: adcl -88(%ebp), %ebx # 4-byte Folded Reload
-; X32-NEXT: movl -104(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: adcl -296(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: movl %ecx, -104(%ebp) # 4-byte Spill
-; X32-NEXT: movl -48(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: adcl -40(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: movl %ecx, -48(%ebp) # 4-byte Spill
-; X32-NEXT: movl -108(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: adcl -56(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: movl %ecx, -108(%ebp) # 4-byte Spill
-; X32-NEXT: movl -184(%ebp), %eax # 4-byte Reload
-; X32-NEXT: adcl -304(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl %eax, -184(%ebp) # 4-byte Spill
-; X32-NEXT: movl -124(%ebp), %eax # 4-byte Reload
-; X32-NEXT: adcl -132(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl %eax, -124(%ebp) # 4-byte Spill
-; X32-NEXT: movl -60(%ebp), %eax # 4-byte Reload
-; X32-NEXT: adcl -236(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl %eax, -60(%ebp) # 4-byte Spill
-; X32-NEXT: movl -136(%ebp), %eax # 4-byte Reload
-; X32-NEXT: adcl -204(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl %eax, -136(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -284(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: movl %edx, -28(%ebp) # 4-byte Spill
-; X32-NEXT: movl -160(%ebp), %edx # 4-byte Reload
-; X32-NEXT: adcl -140(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: movl -120(%ebp), %eax # 4-byte Reload
-; X32-NEXT: adcl -116(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl -16(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: adcl -256(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: movl %ecx, -16(%ebp) # 4-byte Spill
-; X32-NEXT: movl -168(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: addl -432(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: movl %ecx, -168(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -456(%ebp), %esi # 4-byte Folded Reload
-; X32-NEXT: movl %esi, -308(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -44(%ebp), %edi # 4-byte Folded Reload
-; X32-NEXT: movl %edi, -252(%ebp) # 4-byte Spill
-; X32-NEXT: movl -92(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: adcl -52(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: movl %ecx, -92(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -24(%ebp), %ebx # 4-byte Folded Reload
-; X32-NEXT: movl %ebx, -156(%ebp) # 4-byte Spill
-; X32-NEXT: movl -104(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: adcl -272(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: movl %ecx, -104(%ebp) # 4-byte Spill
-; X32-NEXT: movl -48(%ebp), %ebx # 4-byte Reload
-; X32-NEXT: adcl -276(%ebp), %ebx # 4-byte Folded Reload
-; X32-NEXT: movl -108(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: adcl -240(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: movl %ecx, -108(%ebp) # 4-byte Spill
-; X32-NEXT: movl -184(%ebp), %edi # 4-byte Reload
-; X32-NEXT: adcl -172(%ebp), %edi # 4-byte Folded Reload
-; X32-NEXT: movl -124(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: adcl -80(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: movl %ecx, -124(%ebp) # 4-byte Spill
-; X32-NEXT: movl -60(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: adcl -36(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: movl %ecx, -60(%ebp) # 4-byte Spill
-; X32-NEXT: movl -136(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: adcl -20(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: movl %ecx, -136(%ebp) # 4-byte Spill
-; X32-NEXT: movl -28(%ebp), %ecx # 4-byte Reload
-; X32-NEXT: adcl -336(%ebp), %ecx # 4-byte Folded Reload
-; X32-NEXT: movl %ecx, -28(%ebp) # 4-byte Spill
-; X32-NEXT: adcl -360(%ebp), %edx # 4-byte Folded Reload
-; X32-NEXT: adcl -392(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl %eax, -120(%ebp) # 4-byte Spill
-; X32-NEXT: movl -16(%ebp), %eax # 4-byte Reload
-; X32-NEXT: adcl -412(%ebp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl %eax, -16(%ebp) # 4-byte Spill
-; X32-NEXT: movl 16(%ebp), %ecx
-; X32-NEXT: movl -648(%ebp), %esi # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; X32-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
+; X32-NEXT: movl %ecx, %ebp
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
+; X32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; X32-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; X32-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
+; X32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
+; X32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
+; X32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
+; X32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
+; X32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
+; X32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
+; X32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
+; X32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; X32-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; X32-NEXT: movl %esi, (%ecx)
-; X32-NEXT: movl -644(%ebp), %esi # 4-byte Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; X32-NEXT: movl %esi, 4(%ecx)
-; X32-NEXT: movl -536(%ebp), %esi # 4-byte Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; X32-NEXT: movl %esi, 8(%ecx)
-; X32-NEXT: movl -596(%ebp), %esi # 4-byte Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; X32-NEXT: movl %esi, 12(%ecx)
-; X32-NEXT: movl -592(%ebp), %esi # 4-byte Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; X32-NEXT: movl %esi, 16(%ecx)
-; X32-NEXT: movl -532(%ebp), %esi # 4-byte Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; X32-NEXT: movl %esi, 20(%ecx)
-; X32-NEXT: movl -428(%ebp), %esi # 4-byte Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; X32-NEXT: movl %esi, 24(%ecx)
-; X32-NEXT: movl -452(%ebp), %esi # 4-byte Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; X32-NEXT: movl %esi, 28(%ecx)
-; X32-NEXT: movl -508(%ebp), %esi # 4-byte Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; X32-NEXT: movl %esi, 32(%ecx)
-; X32-NEXT: movl -504(%ebp), %esi # 4-byte Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; X32-NEXT: movl %esi, 36(%ecx)
-; X32-NEXT: movl -328(%ebp), %esi # 4-byte Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; X32-NEXT: movl %esi, 40(%ecx)
-; X32-NEXT: movl -468(%ebp), %esi # 4-byte Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; X32-NEXT: movl %esi, 44(%ecx)
-; X32-NEXT: movl -404(%ebp), %esi # 4-byte Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; X32-NEXT: movl %esi, 48(%ecx)
-; X32-NEXT: movl -540(%ebp), %esi # 4-byte Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; X32-NEXT: movl %esi, 52(%ecx)
-; X32-NEXT: movl -228(%ebp), %esi # 4-byte Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; X32-NEXT: movl %esi, 56(%ecx)
-; X32-NEXT: movl -196(%ebp), %esi # 4-byte Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; X32-NEXT: movl %esi, 60(%ecx)
-; X32-NEXT: movl -168(%ebp), %eax # 4-byte Reload
-; X32-NEXT: movl %eax, 64(%ecx)
-; X32-NEXT: movl -308(%ebp), %eax # 4-byte Reload
-; X32-NEXT: movl %eax, 68(%ecx)
-; X32-NEXT: movl -252(%ebp), %eax # 4-byte Reload
-; X32-NEXT: movl %eax, 72(%ecx)
-; X32-NEXT: movl -92(%ebp), %eax # 4-byte Reload
-; X32-NEXT: movl %eax, 76(%ecx)
-; X32-NEXT: movl -156(%ebp), %eax # 4-byte Reload
-; X32-NEXT: movl %eax, 80(%ecx)
-; X32-NEXT: movl -104(%ebp), %eax # 4-byte Reload
-; X32-NEXT: movl %eax, 84(%ecx)
-; X32-NEXT: movl %ebx, 88(%ecx)
-; X32-NEXT: movl -108(%ebp), %eax # 4-byte Reload
-; X32-NEXT: movl %eax, 92(%ecx)
-; X32-NEXT: movl %edi, 96(%ecx)
-; X32-NEXT: movl -124(%ebp), %eax # 4-byte Reload
-; X32-NEXT: movl %eax, 100(%ecx)
-; X32-NEXT: movl -60(%ebp), %eax # 4-byte Reload
-; X32-NEXT: movl %eax, 104(%ecx)
-; X32-NEXT: movl -136(%ebp), %eax # 4-byte Reload
-; X32-NEXT: movl %eax, 108(%ecx)
-; X32-NEXT: movl -28(%ebp), %eax # 4-byte Reload
-; X32-NEXT: movl %eax, 112(%ecx)
-; X32-NEXT: movl %edx, 116(%ecx)
-; X32-NEXT: movl -120(%ebp), %eax # 4-byte Reload
-; X32-NEXT: movl %eax, 120(%ecx)
-; X32-NEXT: movl -16(%ebp), %eax # 4-byte Reload
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: movl %esi, 64(%ecx)
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: movl %esi, 68(%ecx)
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: movl %esi, 72(%ecx)
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: movl %esi, 76(%ecx)
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: movl %esi, 80(%ecx)
+; X32-NEXT: movl %ebp, 84(%ecx)
+; X32-NEXT: movl %edi, 88(%ecx)
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: movl %esi, 92(%ecx)
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: movl %esi, 96(%ecx)
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: movl %esi, 100(%ecx)
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: movl %esi, 104(%ecx)
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: movl %esi, 108(%ecx)
+; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT: movl %esi, 112(%ecx)
+; X32-NEXT: movl %ebx, 116(%ecx)
+; X32-NEXT: movl %edx, 120(%ecx)
; X32-NEXT: movl %eax, 124(%ecx)
-; X32-NEXT: addl $996, %esp # imm = 0x3E4
+; X32-NEXT: addl $1000, %esp # imm = 0x3E8
; X32-NEXT: popl %esi
; X32-NEXT: popl %edi
; X32-NEXT: popl %ebx
@@ -6734,13 +6691,13 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X64-NEXT: pushq %r12
; X64-NEXT: pushq %rbx
; X64-NEXT: subq $352, %rsp # imm = 0x160
-; X64-NEXT: movq %rdx, {{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; X64-NEXT: movq 48(%rdi), %r9
-; X64-NEXT: movq %r9, {{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movq %r9, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; X64-NEXT: movq 40(%rdi), %rbp
-; X64-NEXT: movq %rbp, {{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movq %rbp, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; X64-NEXT: movq 32(%rdi), %rax
-; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; X64-NEXT: movq %rdi, %r10
; X64-NEXT: xorl %r8d, %r8d
; X64-NEXT: mulq %r8
@@ -6753,7 +6710,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X64-NEXT: movq %rdx, %rbp
; X64-NEXT: adcq $0, %rbp
; X64-NEXT: addq %rcx, %rbx
-; X64-NEXT: movq %rbx, -{{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movq %rbx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; X64-NEXT: movq %rcx, %r11
; X64-NEXT: adcq %rdi, %rbp
; X64-NEXT: setb %bl
@@ -6762,8 +6719,8 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X64-NEXT: adcq %rdx, %rbx
; X64-NEXT: movq %r9, %rax
; X64-NEXT: mulq %r8
-; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: movq %rdx, {{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; X64-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; X64-NEXT: movq %r11, %r12
; X64-NEXT: movq %r11, %r8
; X64-NEXT: addq %rax, %r12
@@ -6772,17 +6729,17 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X64-NEXT: movq %r9, (%rsp) # 8-byte Spill
; X64-NEXT: adcq %rdx, %rax
; X64-NEXT: addq %rbp, %r12
-; X64-NEXT: movq %r12, {{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movq %r12, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; X64-NEXT: adcq %rbx, %rax
-; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; X64-NEXT: movq (%rsi), %rax
-; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; X64-NEXT: xorl %ebp, %ebp
; X64-NEXT: mulq %rbp
; X64-NEXT: movq %rax, %rdi
; X64-NEXT: movq %rdx, %rcx
; X64-NEXT: movq 8(%rsi), %rax
-; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; X64-NEXT: mulq %rbp
; X64-NEXT: xorl %r11d, %r11d
; X64-NEXT: movq %rax, %r15
@@ -6791,18 +6748,18 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X64-NEXT: adcq $0, %rbp
; X64-NEXT: addq %rdi, %r15
; X64-NEXT: adcq %rcx, %rbp
-; X64-NEXT: movq %rcx, {{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; X64-NEXT: setb %bl
; X64-NEXT: addq %rax, %rbp
; X64-NEXT: movzbl %bl, %ebx
; X64-NEXT: adcq %rdx, %rbx
; X64-NEXT: movq 16(%rsi), %rax
; X64-NEXT: movq %rsi, %r13
-; X64-NEXT: movq %r13, {{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movq %r13, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; X64-NEXT: mulq %r11
-; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: movq %rdx, -{{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; X64-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; X64-NEXT: movq %rdi, %r14
; X64-NEXT: addq %rax, %r14
; X64-NEXT: movq %rcx, %r11
@@ -6811,13 +6768,13 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X64-NEXT: adcq %rbx, %r11
; X64-NEXT: movq %r8, %rax
; X64-NEXT: movq %r8, %rbp
-; X64-NEXT: movq %rbp, -{{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movq %rbp, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; X64-NEXT: addq %rdi, %rax
; X64-NEXT: movq %r9, %rax
; X64-NEXT: adcq %rcx, %rax
-; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; X64-NEXT: movq (%r10), %rax
-; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; X64-NEXT: xorl %r8d, %r8d
; X64-NEXT: mulq %r8
; X64-NEXT: movq %rdx, %rsi
@@ -6826,35 +6783,35 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X64-NEXT: movq %rdi, %r9
; X64-NEXT: movq %rsi, %rax
; X64-NEXT: adcq %rcx, %rax
-; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; X64-NEXT: movq 32(%r13), %rax
-; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; X64-NEXT: mulq %r8
; X64-NEXT: xorl %r8d, %r8d
; X64-NEXT: movq %rax, %r13
-; X64-NEXT: movq %rdx, -{{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; X64-NEXT: movq %rbx, %rax
; X64-NEXT: movq %rbx, %rcx
; X64-NEXT: addq %r13, %rax
; X64-NEXT: movq %rsi, %rax
; X64-NEXT: adcq %rdx, %rax
-; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; X64-NEXT: movq %rbp, %rax
; X64-NEXT: addq %r9, %rax
-; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: movq %r9, {{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rax # 8-byte Reload
+; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; X64-NEXT: movq %r9, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
; X64-NEXT: adcq %r15, %rax
-; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; X64-NEXT: adcq %r14, %r12
-; X64-NEXT: movq %r12, {{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rax # 8-byte Reload
+; X64-NEXT: movq %r12, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
; X64-NEXT: adcq %r11, %rax
-; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; X64-NEXT: movq %r11, %rdi
; X64-NEXT: movq 8(%r10), %rax
-; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: movq %r10, -{{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; X64-NEXT: movq %r10, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; X64-NEXT: mulq %r8
; X64-NEXT: movq %rax, %r11
; X64-NEXT: addq %rsi, %r11
@@ -6862,16 +6819,16 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X64-NEXT: adcq $0, %rbp
; X64-NEXT: addq %rcx, %r11
; X64-NEXT: adcq %rsi, %rbp
-; X64-NEXT: movq %rsi, {{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movq %rsi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; X64-NEXT: setb %bl
; X64-NEXT: addq %rax, %rbp
; X64-NEXT: movzbl %bl, %ebx
; X64-NEXT: adcq %rdx, %rbx
; X64-NEXT: movq 16(%r10), %rax
-; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; X64-NEXT: mulq %r8
-; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: movq %rdx, {{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; X64-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; X64-NEXT: movq %rcx, %r8
; X64-NEXT: addq %rax, %r8
; X64-NEXT: movq %rsi, %r10
@@ -6881,24 +6838,24 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X64-NEXT: adcq %rbx, %r10
; X64-NEXT: movq %rcx, %rdx
; X64-NEXT: movq %rcx, %r12
-; X64-NEXT: movq %r12, {{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movq %r12, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; X64-NEXT: addq %r9, %rdx
-; X64-NEXT: movq %rdx, -{{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; X64-NEXT: movq %r11, %r8
; X64-NEXT: adcq %r8, %r15
-; X64-NEXT: movq %r15, {{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movq %r15, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; X64-NEXT: adcq %rax, %r14
-; X64-NEXT: movq %r14, {{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movq %r14, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; X64-NEXT: movq %rax, %rcx
; X64-NEXT: adcq %r10, %rdi
-; X64-NEXT: movq %rdi, {{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rsi # 8-byte Reload
+; X64-NEXT: movq %rdi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Reload
; X64-NEXT: movq 40(%rsi), %rax
-; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; X64-NEXT: xorl %r14d, %r14d
; X64-NEXT: mulq %r14
; X64-NEXT: movq %rax, %rdi
-; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r9 # 8-byte Reload
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r9 # 8-byte Reload
; X64-NEXT: addq %r9, %rdi
; X64-NEXT: movq %rdx, %rbp
; X64-NEXT: adcq $0, %rbp
@@ -6909,50 +6866,50 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X64-NEXT: movzbl %bl, %r11d
; X64-NEXT: adcq %rdx, %r11
; X64-NEXT: movq 48(%rsi), %rax
-; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; X64-NEXT: mulq %r14
-; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: movq %rdx, {{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; X64-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; X64-NEXT: movq %r13, %rbx
; X64-NEXT: addq %rax, %rbx
; X64-NEXT: movq %r9, %rsi
; X64-NEXT: adcq %rdx, %rsi
; X64-NEXT: addq %rbp, %rbx
; X64-NEXT: adcq %r11, %rsi
-; X64-NEXT: movq %r13, -{{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movq %r13, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; X64-NEXT: addq %r13, %r12
-; X64-NEXT: movq %r12, {{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movq %r12, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; X64-NEXT: adcq %rdi, %r8
-; X64-NEXT: movq %r8, {{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movq %r8, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; X64-NEXT: adcq %rbx, %rcx
-; X64-NEXT: movq %rcx, {{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; X64-NEXT: adcq %rsi, %r10
-; X64-NEXT: movq %r10, {{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rdx # 8-byte Reload
+; X64-NEXT: movq %r10, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Reload
; X64-NEXT: movq %rdx, %rax
; X64-NEXT: addq %r13, %rax
; X64-NEXT: movq (%rsp), %rax # 8-byte Reload
; X64-NEXT: adcq %r9, %rax
-; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; X64-NEXT: movq %rdx, %rax
; X64-NEXT: addq %r13, %rax
-; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %rdi # 8-byte Folded Reload
-; X64-NEXT: movq %rdi, {{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rbx # 8-byte Folded Reload
-; X64-NEXT: movq %rbx, {{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %rsi # 8-byte Folded Reload
-; X64-NEXT: movq %rsi, {{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload
+; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Folded Reload
+; X64-NEXT: movq %rdi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Folded Reload
+; X64-NEXT: movq %rbx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Folded Reload
+; X64-NEXT: movq %rsi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
; X64-NEXT: movq %rcx, %rax
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdi # 8-byte Reload
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Reload
; X64-NEXT: mulq %rdi
; X64-NEXT: movq %rax, %r9
; X64-NEXT: movq %rdx, %rsi
-; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rax # 8-byte Reload
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
; X64-NEXT: movq 56(%rax), %r11
; X64-NEXT: movq %r11, %rax
-; X64-NEXT: movq %r11, -{{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movq %r11, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; X64-NEXT: mulq %rdi
; X64-NEXT: movq %rdi, %r10
; X64-NEXT: movq %rdx, %rbp
@@ -6960,7 +6917,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X64-NEXT: addq %rsi, %rbx
; X64-NEXT: adcq $0, %rbp
; X64-NEXT: movq %rcx, %rax
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdi # 8-byte Reload
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Reload
; X64-NEXT: mulq %rdi
; X64-NEXT: movq %rdx, %rsi
; X64-NEXT: movq %rax, %r8
@@ -6973,19 +6930,19 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X64-NEXT: addq %rsi, %rax
; X64-NEXT: movzbl %cl, %ecx
; X64-NEXT: adcq %rcx, %rdx
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %r15 # 8-byte Reload
-; X64-NEXT: addq {{[0-9]+}}(%rsp), %r15 # 8-byte Folded Reload
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %r12 # 8-byte Reload
-; X64-NEXT: adcq {{[0-9]+}}(%rsp), %r12 # 8-byte Folded Reload
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload
+; X64-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Folded Reload
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r12 # 8-byte Reload
+; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r12 # 8-byte Folded Reload
; X64-NEXT: addq %rax, %r15
; X64-NEXT: adcq %rdx, %r12
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
; X64-NEXT: movq %rcx, %rax
; X64-NEXT: movq %r10, %rbp
; X64-NEXT: mulq %rbp
; X64-NEXT: movq %rdx, %rsi
-; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdi # 8-byte Reload
+; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Reload
; X64-NEXT: movq %rdi, %rax
; X64-NEXT: mulq %rbp
; X64-NEXT: movq %rdx, %rbp
@@ -6997,7 +6954,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X64-NEXT: mulq %r11
; X64-NEXT: movq %rdx, %rcx
; X64-NEXT: addq %rbx, %rax
-; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; X64-NEXT: adcq %rbp, %rcx
; X64-NEXT: setb %bl
; X64-NEXT: movq %rdi, %rax
@@ -7007,15 +6964,15 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X64-NEXT: addq %rcx, %rsi
; X64-NEXT: movzbl %bl, %eax
; X64-NEXT: adcq %rax, %r13
-; X64-NEXT: addq -{{[0-9]+}}(%rsp), %rsi # 8-byte Folded Reload
-; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %r13 # 8-byte Folded Reload
+; X64-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Folded Reload
+; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Folded Reload
; X64-NEXT: addq %r9, %rsi
; X64-NEXT: adcq %r8, %r13
; X64-NEXT: adcq $0, %r15
; X64-NEXT: adcq $0, %r12
; X64-NEXT: movq %r10, %rbx
; X64-NEXT: movq %rbx, %rax
-; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r11 # 8-byte Reload
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Reload
; X64-NEXT: mulq %r11
; X64-NEXT: movq %rdx, %rcx
; X64-NEXT: movq %rax, %r10
@@ -7026,12 +6983,12 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X64-NEXT: movq %rax, %rbp
; X64-NEXT: addq %rcx, %rbp
; X64-NEXT: adcq $0, %rdi
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rax # 8-byte Reload
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
; X64-NEXT: movq 24(%rax), %rcx
; X64-NEXT: movq %rbx, %rax
; X64-NEXT: mulq %rcx
; X64-NEXT: movq %rcx, %rbx
-; X64-NEXT: movq %rbx, -{{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movq %rbx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; X64-NEXT: movq %rdx, %rcx
; X64-NEXT: movq %rax, %r8
; X64-NEXT: addq %rbp, %r8
@@ -7042,30 +6999,30 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X64-NEXT: addq %rcx, %rax
; X64-NEXT: movzbl %dil, %ecx
; X64-NEXT: adcq %rcx, %rdx
-; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rbp # 8-byte Reload
-; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r14 # 8-byte Reload
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbp # 8-byte Reload
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Reload
; X64-NEXT: addq %r14, %rbp
; X64-NEXT: movq (%rsp), %rbx # 8-byte Reload
-; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r9 # 8-byte Reload
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r9 # 8-byte Reload
; X64-NEXT: adcq %r9, %rbx
; X64-NEXT: addq %rax, %rbp
; X64-NEXT: adcq %rdx, %rbx
; X64-NEXT: addq %rsi, %r10
-; X64-NEXT: movq %r10, {{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movq %r10, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; X64-NEXT: adcq %r13, %r8
-; X64-NEXT: movq %r8, {{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movq %r8, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; X64-NEXT: adcq $0, %rbp
; X64-NEXT: adcq $0, %rbx
; X64-NEXT: addq %r15, %rbp
; X64-NEXT: adcq %r12, %rbx
; X64-NEXT: setb %r15b
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
; X64-NEXT: movq %rcx, %rax
; X64-NEXT: movq %r11, %rsi
; X64-NEXT: mulq %rsi
; X64-NEXT: movq %rdx, %r11
; X64-NEXT: movq %rax, %r13
-; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r12 # 8-byte Reload
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r12 # 8-byte Reload
; X64-NEXT: movq %r12, %rax
; X64-NEXT: mulq %rsi
; X64-NEXT: movq %rdx, %rsi
@@ -7073,7 +7030,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X64-NEXT: addq %r11, %rdi
; X64-NEXT: adcq $0, %rsi
; X64-NEXT: movq %rcx, %rax
-; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r8 # 8-byte Reload
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
; X64-NEXT: mulq %r8
; X64-NEXT: movq %rdx, %rcx
; X64-NEXT: movq %rax, %r11
@@ -7086,9 +7043,9 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X64-NEXT: addq %rcx, %rax
; X64-NEXT: movzbl %sil, %ecx
; X64-NEXT: adcq %rcx, %rdx
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
; X64-NEXT: addq %r14, %rcx
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %r14 # 8-byte Reload
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Reload
; X64-NEXT: adcq %r9, %r14
; X64-NEXT: addq %rax, %rcx
; X64-NEXT: adcq %rdx, %r14
@@ -7097,24 +7054,24 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X64-NEXT: movzbl %r15b, %eax
; X64-NEXT: adcq %rax, %rcx
; X64-NEXT: adcq $0, %r14
-; X64-NEXT: addq -{{[0-9]+}}(%rsp), %r13 # 8-byte Folded Reload
-; X64-NEXT: movq %r13, -{{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %r11 # 8-byte Folded Reload
-; X64-NEXT: movq %r11, -{{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rcx # 8-byte Folded Reload
-; X64-NEXT: movq %rcx, {{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %r14 # 8-byte Folded Reload
-; X64-NEXT: movq %r14, -{{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdi # 8-byte Reload
+; X64-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Folded Reload
+; X64-NEXT: movq %r13, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Folded Reload
+; X64-NEXT: movq %r11, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Folded Reload
+; X64-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Folded Reload
+; X64-NEXT: movq %r14, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Reload
; X64-NEXT: movq %rdi, %rax
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rsi # 8-byte Reload
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Reload
; X64-NEXT: mulq %rsi
; X64-NEXT: movq %rax, %r14
; X64-NEXT: movq %rdx, %rbx
-; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rax # 8-byte Reload
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
; X64-NEXT: movq 24(%rax), %rcx
; X64-NEXT: movq %rcx, %rax
-; X64-NEXT: movq %rcx, -{{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; X64-NEXT: mulq %rsi
; X64-NEXT: movq %rsi, %r11
; X64-NEXT: movq %rdx, %rsi
@@ -7122,7 +7079,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X64-NEXT: addq %rbx, %rbp
; X64-NEXT: adcq $0, %rsi
; X64-NEXT: movq %rdi, %rax
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %r9 # 8-byte Reload
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r9 # 8-byte Reload
; X64-NEXT: mulq %r9
; X64-NEXT: movq %rdx, %rbx
; X64-NEXT: movq %rax, %r15
@@ -7134,19 +7091,19 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X64-NEXT: addq %rbx, %rax
; X64-NEXT: movzbl %sil, %ecx
; X64-NEXT: adcq %rcx, %rdx
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %r8 # 8-byte Reload
-; X64-NEXT: addq {{[0-9]+}}(%rsp), %r8 # 8-byte Folded Reload
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %r10 # 8-byte Reload
-; X64-NEXT: adcq {{[0-9]+}}(%rsp), %r10 # 8-byte Folded Reload
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
+; X64-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Folded Reload
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Reload
+; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Folded Reload
; X64-NEXT: addq %rax, %r8
; X64-NEXT: adcq %rdx, %r10
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
; X64-NEXT: movq %rcx, %rax
; X64-NEXT: movq %r11, %rbp
; X64-NEXT: mulq %rbp
; X64-NEXT: movq %rdx, %rdi
-; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rsi # 8-byte Reload
+; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Reload
; X64-NEXT: movq %rsi, %rax
; X64-NEXT: mulq %rbp
; X64-NEXT: movq %rdx, %rbp
@@ -7158,7 +7115,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X64-NEXT: mulq %r9
; X64-NEXT: movq %rdx, %rdi
; X64-NEXT: addq %rbx, %rax
-; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; X64-NEXT: adcq %rbp, %rdi
; X64-NEXT: setb %cl
; X64-NEXT: movq %rsi, %rax
@@ -7169,14 +7126,14 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X64-NEXT: addq %rdi, %rbx
; X64-NEXT: movzbl %cl, %eax
; X64-NEXT: adcq %rax, %rsi
-; X64-NEXT: addq -{{[0-9]+}}(%rsp), %rbx # 8-byte Folded Reload
-; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rsi # 8-byte Folded Reload
+; X64-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Folded Reload
+; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Folded Reload
; X64-NEXT: addq %r14, %rbx
; X64-NEXT: adcq %r15, %rsi
; X64-NEXT: adcq $0, %r8
; X64-NEXT: adcq $0, %r10
; X64-NEXT: movq %r11, %rax
-; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rdi # 8-byte Reload
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Reload
; X64-NEXT: mulq %rdi
; X64-NEXT: movq %rdx, %rcx
; X64-NEXT: movq %rax, %r9
@@ -7200,29 +7157,29 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X64-NEXT: addq %rcx, %rax
; X64-NEXT: movzbl %dil, %ecx
; X64-NEXT: adcq %rcx, %rdx
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdi # 8-byte Reload
-; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r13 # 8-byte Reload
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Reload
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Reload
; X64-NEXT: addq %r13, %rdi
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rbp # 8-byte Reload
-; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r14 # 8-byte Reload
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbp # 8-byte Reload
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Reload
; X64-NEXT: adcq %r14, %rbp
; X64-NEXT: addq %rax, %rdi
; X64-NEXT: adcq %rdx, %rbp
; X64-NEXT: addq %rbx, %r9
-; X64-NEXT: movq %r9, {{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movq %r9, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; X64-NEXT: adcq %rsi, %r11
-; X64-NEXT: movq %r11, {{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movq %r11, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; X64-NEXT: adcq $0, %rdi
; X64-NEXT: adcq $0, %rbp
; X64-NEXT: addq %r8, %rdi
; X64-NEXT: adcq %r10, %rbp
; X64-NEXT: setb %r9b
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
; X64-NEXT: movq %rcx, %rax
; X64-NEXT: mulq %r15
; X64-NEXT: movq %rdx, %r10
; X64-NEXT: movq %rax, %r11
-; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r8 # 8-byte Reload
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
; X64-NEXT: movq %r8, %rax
; X64-NEXT: mulq %r15
; X64-NEXT: movq %rdx, %rsi
@@ -7241,10 +7198,10 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X64-NEXT: addq %rcx, %rax
; X64-NEXT: movzbl %bl, %ecx
; X64-NEXT: adcq %rcx, %rdx
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %r10 # 8-byte Reload
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Reload
; X64-NEXT: movq %r10, %rcx
; X64-NEXT: addq %r13, %rcx
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rbx # 8-byte Reload
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Reload
; X64-NEXT: movq %rbx, %rsi
; X64-NEXT: movq %rbx, %r12
; X64-NEXT: adcq %r14, %rsi
@@ -7255,25 +7212,25 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X64-NEXT: movzbl %r9b, %eax
; X64-NEXT: adcq %rax, %rcx
; X64-NEXT: adcq $0, %rsi
-; X64-NEXT: addq -{{[0-9]+}}(%rsp), %r11 # 8-byte Folded Reload
-; X64-NEXT: adcq {{[0-9]+}}(%rsp), %r15 # 8-byte Folded Reload
-; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rcx # 8-byte Folded Reload
-; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rsi # 8-byte Folded Reload
-; X64-NEXT: addq {{[0-9]+}}(%rsp), %r11 # 8-byte Folded Reload
-; X64-NEXT: movq %r11, {{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: adcq {{[0-9]+}}(%rsp), %r15 # 8-byte Folded Reload
-; X64-NEXT: movq %r15, {{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rcx # 8-byte Folded Reload
-; X64-NEXT: movq %rcx, {{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rsi # 8-byte Folded Reload
-; X64-NEXT: movq %rsi, {{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: adcq $0, -{{[0-9]+}}(%rsp) # 8-byte Folded Spill
-; X64-NEXT: adcq $0, -{{[0-9]+}}(%rsp) # 8-byte Folded Spill
-; X64-NEXT: adcq $0, {{[0-9]+}}(%rsp) # 8-byte Folded Spill
-; X64-NEXT: adcq $0, -{{[0-9]+}}(%rsp) # 8-byte Folded Spill
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdi # 8-byte Reload
+; X64-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Folded Reload
+; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Folded Reload
+; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Folded Reload
+; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Folded Reload
+; X64-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Folded Reload
+; X64-NEXT: movq %r11, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Folded Reload
+; X64-NEXT: movq %r15, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Folded Reload
+; X64-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Folded Reload
+; X64-NEXT: movq %rsi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; X64-NEXT: adcq $0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
+; X64-NEXT: adcq $0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
+; X64-NEXT: adcq $0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
+; X64-NEXT: adcq $0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Reload
; X64-NEXT: movq %rdi, %rax
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
; X64-NEXT: mulq %rcx
; X64-NEXT: movq %rdx, %rsi
; X64-NEXT: movq %rax, %r14
@@ -7286,7 +7243,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X64-NEXT: addq %rsi, %rcx
; X64-NEXT: adcq $0, %rbx
; X64-NEXT: movq %rdi, %rax
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdi # 8-byte Reload
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Reload
; X64-NEXT: mulq %rdi
; X64-NEXT: movq %rdx, %rsi
; X64-NEXT: movq %rax, %r8
@@ -7300,17 +7257,17 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X64-NEXT: movzbl %cl, %ecx
; X64-NEXT: adcq %rcx, %rdx
; X64-NEXT: movq %r10, %r9
-; X64-NEXT: addq -{{[0-9]+}}(%rsp), %r9 # 8-byte Folded Reload
+; X64-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %r9 # 8-byte Folded Reload
; X64-NEXT: movq %r12, %r10
-; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %r10 # 8-byte Folded Reload
+; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Folded Reload
; X64-NEXT: addq %rax, %r9
; X64-NEXT: adcq %rdx, %r10
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rbp # 8-byte Reload
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbp # 8-byte Reload
; X64-NEXT: movq %rbp, %rax
; X64-NEXT: mulq %r11
; X64-NEXT: movq %rdx, %rcx
-; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdi # 8-byte Reload
+; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Reload
; X64-NEXT: movq %rdi, %rax
; X64-NEXT: mulq %r11
; X64-NEXT: movq %rdx, %rsi
@@ -7321,7 +7278,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X64-NEXT: mulq %r15
; X64-NEXT: movq %rdx, %rcx
; X64-NEXT: addq %rbx, %rax
-; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; X64-NEXT: adcq %rsi, %rcx
; X64-NEXT: setb %sil
; X64-NEXT: movq %rdi, %rax
@@ -7331,15 +7288,15 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X64-NEXT: addq %rcx, %rbx
; X64-NEXT: movzbl %sil, %eax
; X64-NEXT: adcq %rax, %r15
-; X64-NEXT: addq {{[0-9]+}}(%rsp), %rbx # 8-byte Folded Reload
-; X64-NEXT: adcq {{[0-9]+}}(%rsp), %r15 # 8-byte Folded Reload
+; X64-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Folded Reload
+; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Folded Reload
; X64-NEXT: addq %r14, %rbx
; X64-NEXT: adcq %r8, %r15
; X64-NEXT: adcq $0, %r9
; X64-NEXT: adcq $0, %r10
; X64-NEXT: movq %rbp, %rsi
; X64-NEXT: movq %rsi, %rax
-; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rcx # 8-byte Reload
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
; X64-NEXT: mulq %rcx
; X64-NEXT: movq %rdx, %r14
; X64-NEXT: movq %rax, %r12
@@ -7350,7 +7307,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X64-NEXT: movq %rax, %rcx
; X64-NEXT: addq %r14, %rcx
; X64-NEXT: adcq $0, %rbp
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rax # 8-byte Reload
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
; X64-NEXT: movq 56(%rax), %rdi
; X64-NEXT: movq %rsi, %rax
; X64-NEXT: mulq %rdi
@@ -7365,11 +7322,11 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X64-NEXT: addq %rsi, %rax
; X64-NEXT: movzbl %cl, %ecx
; X64-NEXT: adcq %rcx, %rdx
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload
-; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r11 # 8-byte Reload
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Reload
; X64-NEXT: addq %r11, %rcx
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rsi # 8-byte Reload
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %r13 # 8-byte Reload
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Reload
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Reload
; X64-NEXT: adcq %r13, %rsi
; X64-NEXT: addq %rax, %rcx
; X64-NEXT: adcq %rdx, %rsi
@@ -7379,14 +7336,14 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X64-NEXT: adcq $0, %rsi
; X64-NEXT: addq %r9, %rcx
; X64-NEXT: adcq %r10, %rsi
-; X64-NEXT: setb {{[0-9]+}}(%rsp) # 1-byte Folded Spill
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rbp # 8-byte Reload
+; X64-NEXT: setb {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Folded Spill
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbp # 8-byte Reload
; X64-NEXT: movq %rbp, %rax
-; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rdi # 8-byte Reload
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Reload
; X64-NEXT: mulq %rdi
; X64-NEXT: movq %rdx, %r9
-; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r10 # 8-byte Reload
+; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Reload
; X64-NEXT: movq %r10, %rax
; X64-NEXT: mulq %rdi
; X64-NEXT: movq %rdx, %r15
@@ -7395,7 +7352,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X64-NEXT: adcq $0, %r15
; X64-NEXT: movq %rbp, %rax
; X64-NEXT: movq %r8, %rdi
-; X64-NEXT: movq %rdi, {{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movq %rdi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; X64-NEXT: mulq %rdi
; X64-NEXT: movq %rdx, %r9
; X64-NEXT: movq %rax, %r8
@@ -7407,48 +7364,48 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X64-NEXT: addq %r9, %rax
; X64-NEXT: movzbl %bl, %edi
; X64-NEXT: adcq %rdi, %rdx
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %r15 # 8-byte Reload
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload
; X64-NEXT: addq %r11, %r15
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rbp # 8-byte Reload
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbp # 8-byte Reload
; X64-NEXT: adcq %r13, %rbp
; X64-NEXT: addq %rax, %r15
; X64-NEXT: adcq %rdx, %rbp
-; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rdx # 8-byte Reload
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Reload
; X64-NEXT: addq %rcx, %rdx
; X64-NEXT: adcq %rsi, %r8
-; X64-NEXT: movzbl {{[0-9]+}}(%rsp), %eax # 1-byte Folded Reload
+; X64-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %eax # 1-byte Folded Reload
; X64-NEXT: adcq %rax, %r15
; X64-NEXT: adcq $0, %rbp
-; X64-NEXT: addq {{[0-9]+}}(%rsp), %rdx # 8-byte Folded Reload
-; X64-NEXT: adcq {{[0-9]+}}(%rsp), %r8 # 8-byte Folded Reload
-; X64-NEXT: adcq {{[0-9]+}}(%rsp), %r15 # 8-byte Folded Reload
-; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rbp # 8-byte Folded Reload
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rax # 8-byte Reload
-; X64-NEXT: addq %rax, {{[0-9]+}}(%rsp) # 8-byte Folded Spill
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rax # 8-byte Reload
-; X64-NEXT: adcq %rax, {{[0-9]+}}(%rsp) # 8-byte Folded Spill
-; X64-NEXT: adcq {{[0-9]+}}(%rsp), %r12 # 8-byte Folded Reload
-; X64-NEXT: movq %r12, {{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: adcq {{[0-9]+}}(%rsp), %r14 # 8-byte Folded Reload
-; X64-NEXT: movq %r14, {{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Folded Reload
+; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Folded Reload
+; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Folded Reload
+; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %rbp # 8-byte Folded Reload
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
+; X64-NEXT: addq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
+; X64-NEXT: adcq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
+; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r12 # 8-byte Folded Reload
+; X64-NEXT: movq %r12, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Folded Reload
+; X64-NEXT: movq %r14, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; X64-NEXT: adcq $0, %rdx
; X64-NEXT: adcq $0, %r8
; X64-NEXT: adcq $0, %r15
; X64-NEXT: adcq $0, %rbp
-; X64-NEXT: addq -{{[0-9]+}}(%rsp), %rdx # 8-byte Folded Reload
-; X64-NEXT: movq %rdx, -{{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %r8 # 8-byte Folded Reload
-; X64-NEXT: movq %r8, -{{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: adcq {{[0-9]+}}(%rsp), %r15 # 8-byte Folded Reload
-; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %rbp # 8-byte Folded Reload
-; X64-NEXT: setb -{{[0-9]+}}(%rsp) # 1-byte Folded Spill
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload
+; X64-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Folded Reload
+; X64-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Folded Reload
+; X64-NEXT: movq %r8, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Folded Reload
+; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %rbp # 8-byte Folded Reload
+; X64-NEXT: setb {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Folded Spill
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
; X64-NEXT: movq %rcx, %rax
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rsi # 8-byte Reload
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Reload
; X64-NEXT: mulq %rsi
; X64-NEXT: movq %rdx, %r11
-; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r9 # 8-byte Reload
+; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r9 # 8-byte Reload
; X64-NEXT: movq %r9, %rax
; X64-NEXT: mulq %rsi
; X64-NEXT: movq %rsi, %r10
@@ -7457,7 +7414,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X64-NEXT: addq %r11, %rbx
; X64-NEXT: adcq $0, %rdi
; X64-NEXT: movq %rcx, %rax
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rsi # 8-byte Reload
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Reload
; X64-NEXT: mulq %rsi
; X64-NEXT: movq %rdx, %rcx
; X64-NEXT: movq %rax, %r12
@@ -7470,20 +7427,20 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X64-NEXT: addq %rcx, %rax
; X64-NEXT: movzbl %bl, %ecx
; X64-NEXT: adcq %rcx, %rdx
-; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r8 # 8-byte Reload
-; X64-NEXT: addq {{[0-9]+}}(%rsp), %r8 # 8-byte Folded Reload
-; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rcx # 8-byte Reload
-; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rcx # 8-byte Folded Reload
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
+; X64-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Folded Reload
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
+; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Folded Reload
; X64-NEXT: addq %rax, %r8
; X64-NEXT: adcq %rdx, %rcx
; X64-NEXT: movq %rcx, %r14
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
; X64-NEXT: movq %rcx, %rax
; X64-NEXT: movq %r10, %rdi
; X64-NEXT: mulq %rdi
; X64-NEXT: movq %rdx, %r11
-; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rsi # 8-byte Reload
+; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Reload
; X64-NEXT: movq %rsi, %rax
; X64-NEXT: mulq %rdi
; X64-NEXT: movq %rdx, %rdi
@@ -7495,7 +7452,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X64-NEXT: mulq %r9
; X64-NEXT: movq %rdx, %rcx
; X64-NEXT: addq %rbx, %rax
-; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; X64-NEXT: adcq %rdi, %rcx
; X64-NEXT: setb %bl
; X64-NEXT: movq %rsi, %rax
@@ -7505,17 +7462,17 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X64-NEXT: addq %rcx, %rdi
; X64-NEXT: movzbl %bl, %eax
; X64-NEXT: adcq %rax, %r11
-; X64-NEXT: addq {{[0-9]+}}(%rsp), %rdi # 8-byte Folded Reload
-; X64-NEXT: adcq {{[0-9]+}}(%rsp), %r11 # 8-byte Folded Reload
-; X64-NEXT: addq {{[0-9]+}}(%rsp), %rdi # 8-byte Folded Reload
+; X64-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Folded Reload
+; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Folded Reload
+; X64-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Folded Reload
; X64-NEXT: adcq %r12, %r11
; X64-NEXT: adcq $0, %r8
-; X64-NEXT: movq %r8, -{{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movq %r8, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; X64-NEXT: adcq $0, %r14
-; X64-NEXT: movq %r14, -{{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movq %r14, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; X64-NEXT: movq %r13, %rbx
; X64-NEXT: movq %rbx, %rax
-; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rcx # 8-byte Reload
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
; X64-NEXT: mulq %rcx
; X64-NEXT: movq %rdx, %r8
; X64-NEXT: movq %rax, %r12
@@ -7528,7 +7485,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X64-NEXT: addq %r8, %rcx
; X64-NEXT: adcq $0, %rsi
; X64-NEXT: movq %rbx, %rax
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %r13 # 8-byte Reload
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Reload
; X64-NEXT: mulq %r13
; X64-NEXT: movq %rdx, %rbx
; X64-NEXT: addq %rcx, %rax
@@ -7541,11 +7498,11 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X64-NEXT: addq %rbx, %rax
; X64-NEXT: movzbl %cl, %ecx
; X64-NEXT: adcq %rcx, %rdx
-; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rsi # 8-byte Reload
-; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r13 # 8-byte Reload
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Reload
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Reload
; X64-NEXT: addq %r13, %rsi
; X64-NEXT: movq (%rsp), %rcx # 8-byte Reload
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %r14 # 8-byte Reload
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Reload
; X64-NEXT: adcq %r14, %rcx
; X64-NEXT: addq %rax, %rsi
; X64-NEXT: adcq %rdx, %rcx
@@ -7554,18 +7511,18 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X64-NEXT: movq %r8, %r11
; X64-NEXT: adcq $0, %rsi
; X64-NEXT: adcq $0, %rcx
-; X64-NEXT: addq -{{[0-9]+}}(%rsp), %rsi # 8-byte Folded Reload
-; X64-NEXT: movq %rsi, -{{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %rcx # 8-byte Folded Reload
+; X64-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Folded Reload
+; X64-NEXT: movq %rsi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Folded Reload
; X64-NEXT: movq %rcx, (%rsp) # 8-byte Spill
-; X64-NEXT: setb -{{[0-9]+}}(%rsp) # 1-byte Folded Spill
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rbx # 8-byte Reload
+; X64-NEXT: setb {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Folded Spill
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Reload
; X64-NEXT: movq %rbx, %rax
; X64-NEXT: movq %r10, %rsi
; X64-NEXT: mulq %rsi
; X64-NEXT: movq %rdx, %rcx
-; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r8 # 8-byte Reload
+; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
; X64-NEXT: movq %r8, %rax
; X64-NEXT: mulq %rsi
; X64-NEXT: movq %rdx, %rsi
@@ -7584,47 +7541,47 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X64-NEXT: addq %rcx, %rax
; X64-NEXT: movzbl %bl, %ecx
; X64-NEXT: adcq %rcx, %rdx
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rsi # 8-byte Reload
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Reload
; X64-NEXT: addq %r13, %rsi
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
; X64-NEXT: adcq %r14, %rcx
; X64-NEXT: addq %rax, %rsi
; X64-NEXT: adcq %rdx, %rcx
-; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r14 # 8-byte Reload
-; X64-NEXT: addq -{{[0-9]+}}(%rsp), %r14 # 8-byte Folded Reload
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Reload
+; X64-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Folded Reload
; X64-NEXT: adcq (%rsp), %r10 # 8-byte Folded Reload
-; X64-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax # 1-byte Folded Reload
+; X64-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %eax # 1-byte Folded Reload
; X64-NEXT: adcq %rax, %rsi
; X64-NEXT: adcq $0, %rcx
-; X64-NEXT: addq {{[0-9]+}}(%rsp), %r14 # 8-byte Folded Reload
-; X64-NEXT: adcq {{[0-9]+}}(%rsp), %r10 # 8-byte Folded Reload
-; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rsi # 8-byte Folded Reload
-; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rcx # 8-byte Folded Reload
-; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rax # 8-byte Reload
-; X64-NEXT: addq %rax, {{[0-9]+}}(%rsp) # 8-byte Folded Spill
-; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rax # 8-byte Reload
-; X64-NEXT: adcq %rax, -{{[0-9]+}}(%rsp) # 8-byte Folded Spill
+; X64-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Folded Reload
+; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Folded Reload
+; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Folded Reload
+; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Folded Reload
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
+; X64-NEXT: addq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
+; X64-NEXT: adcq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
; X64-NEXT: adcq %r15, %r12
-; X64-NEXT: movq %r12, -{{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movq %r12, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; X64-NEXT: adcq %rbp, %r11
; X64-NEXT: movq %r11, (%rsp) # 8-byte Spill
-; X64-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax # 1-byte Folded Reload
+; X64-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %eax # 1-byte Folded Reload
; X64-NEXT: adcq %rax, %r14
-; X64-NEXT: movq %r14, -{{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movq %r14, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; X64-NEXT: adcq $0, %r10
-; X64-NEXT: movq %r10, -{{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movq %r10, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; X64-NEXT: adcq $0, %rsi
-; X64-NEXT: movq %rsi, {{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movq %rsi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; X64-NEXT: adcq $0, %rcx
-; X64-NEXT: movq %rcx, {{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rcx # 8-byte Reload
+; X64-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
; X64-NEXT: movq 64(%rcx), %r11
-; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rdi # 8-byte Reload
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Reload
; X64-NEXT: movq %rdi, %rax
; X64-NEXT: mulq %r11
; X64-NEXT: movq %rdx, %rsi
; X64-NEXT: movq %rax, %r13
-; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r9 # 8-byte Reload
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r9 # 8-byte Reload
; X64-NEXT: movq %r9, %rax
; X64-NEXT: mulq %r11
; X64-NEXT: movq %rdx, %rbp
@@ -7643,7 +7600,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X64-NEXT: movq %r9, %rax
; X64-NEXT: mulq %rcx
; X64-NEXT: movq %rcx, %r10
-; X64-NEXT: movq %r10, -{{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movq %r10, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; X64-NEXT: movq %rdx, %rcx
; X64-NEXT: movq %rax, %rdi
; X64-NEXT: addq %rsi, %rdi
@@ -7654,20 +7611,20 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X64-NEXT: mulq %rdx
; X64-NEXT: movq %rax, %rbx
; X64-NEXT: movq %rdx, %r14
-; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r12 # 8-byte Reload
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r12 # 8-byte Reload
; X64-NEXT: addq %rbx, %r12
-; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r15 # 8-byte Reload
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload
; X64-NEXT: adcq %r14, %r15
; X64-NEXT: addq %rdi, %r12
; X64-NEXT: adcq %rcx, %r15
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
; X64-NEXT: movq %rcx, %rax
; X64-NEXT: movq %r11, %rsi
-; X64-NEXT: movq %rsi, -{{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movq %rsi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; X64-NEXT: mulq %rsi
; X64-NEXT: movq %rdx, %r11
-; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %r9 # 8-byte Reload
+; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r9 # 8-byte Reload
; X64-NEXT: movq %r9, %rax
; X64-NEXT: mulq %rsi
; X64-NEXT: movq %rdx, %rsi
@@ -7679,7 +7636,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X64-NEXT: mulq %r10
; X64-NEXT: movq %rdx, %rcx
; X64-NEXT: addq %rdi, %rax
-; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; X64-NEXT: adcq %rsi, %rcx
; X64-NEXT: setb %sil
; X64-NEXT: movq %r9, %rax
@@ -7687,15 +7644,15 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X64-NEXT: addq %rcx, %rax
; X64-NEXT: movzbl %sil, %ecx
; X64-NEXT: adcq %rcx, %rdx
-; X64-NEXT: addq {{[0-9]+}}(%rsp), %rbx # 8-byte Folded Reload
-; X64-NEXT: adcq {{[0-9]+}}(%rsp), %r14 # 8-byte Folded Reload
+; X64-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Folded Reload
+; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Folded Reload
; X64-NEXT: addq %rax, %rbx
; X64-NEXT: adcq %rdx, %r14
; X64-NEXT: addq %r13, %rbx
; X64-NEXT: adcq %r8, %r14
; X64-NEXT: adcq $0, %r12
; X64-NEXT: adcq $0, %r15
-; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rbp # 8-byte Reload
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbp # 8-byte Reload
; X64-NEXT: movq 80(%rbp), %rdi
; X64-NEXT: movq %r11, %rax
; X64-NEXT: mulq %rdi
@@ -7725,18 +7682,18 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X64-NEXT: movq %rdi, %rax
; X64-NEXT: xorl %edx, %edx
; X64-NEXT: mulq %rdx
-; X64-NEXT: movq %rdx, -{{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; X64-NEXT: movq %rax, %r9
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rbp # 8-byte Reload
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbp # 8-byte Reload
; X64-NEXT: addq %r9, %rbp
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rax # 8-byte Reload
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
; X64-NEXT: adcq %rdx, %rax
; X64-NEXT: addq %rsi, %rbp
; X64-NEXT: adcq %rcx, %rax
; X64-NEXT: addq %rbx, %r13
-; X64-NEXT: movq %r13, {{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movq %r13, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; X64-NEXT: adcq %r14, %r8
-; X64-NEXT: movq %r8, {{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movq %r8, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; X64-NEXT: adcq $0, %rbp
; X64-NEXT: adcq $0, %rax
; X64-NEXT: addq %r12, %rbp
@@ -7744,12 +7701,12 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X64-NEXT: adcq %r15, %rax
; X64-NEXT: movq %rax, %r11
; X64-NEXT: setb %r14b
-; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rcx # 8-byte Reload
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
; X64-NEXT: movq %rcx, %rax
; X64-NEXT: mulq %rdi
; X64-NEXT: movq %rdx, %r15
; X64-NEXT: movq %rax, %r12
-; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rbp # 8-byte Reload
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbp # 8-byte Reload
; X64-NEXT: movq %rbp, %rax
; X64-NEXT: mulq %rdi
; X64-NEXT: movq %rdx, %rsi
@@ -7768,39 +7725,39 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X64-NEXT: addq %rcx, %rax
; X64-NEXT: movzbl %sil, %ecx
; X64-NEXT: adcq %rcx, %rdx
-; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rsi # 8-byte Reload
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Reload
; X64-NEXT: addq %r9, %rsi
-; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rcx # 8-byte Reload
-; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %rcx # 8-byte Folded Reload
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
+; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Folded Reload
; X64-NEXT: addq %rax, %rsi
; X64-NEXT: adcq %rdx, %rcx
; X64-NEXT: addq %r8, %r12
-; X64-NEXT: movq %r12, -{{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movq %r12, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; X64-NEXT: adcq %r11, %rbx
-; X64-NEXT: movq %rbx, -{{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movq %rbx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; X64-NEXT: movzbl %r14b, %eax
; X64-NEXT: adcq %rax, %rsi
-; X64-NEXT: movq %rsi, -{{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movq %rsi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; X64-NEXT: adcq $0, %rcx
-; X64-NEXT: movq %rcx, -{{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rax # 8-byte Reload
+; X64-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
; X64-NEXT: imulq %rax, %r10
; X64-NEXT: movq %rax, %r14
; X64-NEXT: mulq %rdi
; X64-NEXT: movq %rax, %r8
; X64-NEXT: addq %r10, %rdx
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rbp # 8-byte Reload
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbp # 8-byte Reload
; X64-NEXT: imulq %rbp, %rdi
; X64-NEXT: addq %rdx, %rdi
-; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rax # 8-byte Reload
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
; X64-NEXT: movq %rax, %rsi
-; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r11 # 8-byte Reload
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Reload
; X64-NEXT: imulq %r11, %rsi
-; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rcx # 8-byte Reload
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
; X64-NEXT: mulq %rcx
; X64-NEXT: movq %rax, %r9
; X64-NEXT: addq %rsi, %rdx
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rax # 8-byte Reload
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
; X64-NEXT: imulq %rcx, %rax
; X64-NEXT: addq %rdx, %rax
; X64-NEXT: addq %r8, %r9
@@ -7810,7 +7767,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X64-NEXT: movq %rcx, %rdi
; X64-NEXT: mulq %r14
; X64-NEXT: movq %rdx, %rcx
-; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; X64-NEXT: movq %r11, %rax
; X64-NEXT: mulq %r14
; X64-NEXT: movq %rdx, %rsi
@@ -7833,9 +7790,9 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X64-NEXT: adcq %rax, %r12
; X64-NEXT: addq %r9, %r13
; X64-NEXT: adcq %r8, %r12
-; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rdx # 8-byte Reload
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Reload
; X64-NEXT: movq 120(%rdx), %rcx
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %r10 # 8-byte Reload
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Reload
; X64-NEXT: imulq %r10, %rcx
; X64-NEXT: movq 112(%rdx), %rsi
; X64-NEXT: movq %rdx, %rbp
@@ -7843,18 +7800,18 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X64-NEXT: mulq %rsi
; X64-NEXT: movq %rax, %r11
; X64-NEXT: addq %rcx, %rdx
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %r8 # 8-byte Reload
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
; X64-NEXT: imulq %r8, %rsi
; X64-NEXT: addq %rdx, %rsi
; X64-NEXT: movq 96(%rbp), %rdi
; X64-NEXT: movq 104(%rbp), %rbx
-; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rax # 8-byte Reload
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
; X64-NEXT: movq %rax, %rcx
; X64-NEXT: imulq %rbx, %rcx
; X64-NEXT: mulq %rdi
; X64-NEXT: movq %rax, %r9
; X64-NEXT: addq %rcx, %rdx
-; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rax # 8-byte Reload
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
; X64-NEXT: imulq %rdi, %rax
; X64-NEXT: addq %rdx, %rax
; X64-NEXT: addq %r11, %r9
@@ -7884,29 +7841,29 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X64-NEXT: adcq %rcx, %rdx
; X64-NEXT: addq %r9, %rax
; X64-NEXT: adcq %r11, %rdx
-; X64-NEXT: addq -{{[0-9]+}}(%rsp), %r14 # 8-byte Folded Reload
+; X64-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Folded Reload
; X64-NEXT: adcq %r15, %rdi
; X64-NEXT: adcq %r13, %rax
; X64-NEXT: adcq %r12, %rdx
-; X64-NEXT: addq -{{[0-9]+}}(%rsp), %r14 # 8-byte Folded Reload
-; X64-NEXT: movq %r14, -{{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %rdi # 8-byte Folded Reload
-; X64-NEXT: movq %rdi, {{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %rax # 8-byte Folded Reload
-; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %rdx # 8-byte Folded Reload
-; X64-NEXT: movq %rdx, {{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rsi # 8-byte Reload
+; X64-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Folded Reload
+; X64-NEXT: movq %r14, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Folded Reload
+; X64-NEXT: movq %rdi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Folded Reload
+; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Folded Reload
+; X64-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Reload
; X64-NEXT: movq 80(%rsi), %rdi
; X64-NEXT: movq %rdi, %rax
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
; X64-NEXT: mulq %rcx
-; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; X64-NEXT: movq %rdx, %r8
; X64-NEXT: movq 88(%rsi), %rax
; X64-NEXT: movq %rsi, %r9
; X64-NEXT: movq %rax, %rsi
-; X64-NEXT: movq %rsi, -{{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movq %rsi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; X64-NEXT: mulq %rcx
; X64-NEXT: movq %rcx, %r11
; X64-NEXT: movq %rdx, %rbp
@@ -7914,8 +7871,8 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X64-NEXT: addq %r8, %rbx
; X64-NEXT: adcq $0, %rbp
; X64-NEXT: movq %rdi, %rax
-; X64-NEXT: movq %rdi, -{{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %r15 # 8-byte Reload
+; X64-NEXT: movq %rdi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload
; X64-NEXT: mulq %r15
; X64-NEXT: movq %rdx, %rcx
; X64-NEXT: movq %rax, %r14
@@ -7932,13 +7889,13 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X64-NEXT: movq %rdi, %rax
; X64-NEXT: xorl %ecx, %ecx
; X64-NEXT: mulq %rcx
-; X64-NEXT: movq %rdx, -{{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; X64-NEXT: movq %rax, %rsi
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %r12 # 8-byte Reload
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r12 # 8-byte Reload
; X64-NEXT: addq %r12, %rsi
; X64-NEXT: movq %rdx, %r10
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %r8 # 8-byte Reload
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
; X64-NEXT: adcq %r8, %r10
; X64-NEXT: addq %rbx, %rsi
; X64-NEXT: adcq %rbp, %r10
@@ -7946,7 +7903,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X64-NEXT: movq 64(%rdi), %r13
; X64-NEXT: movq %r13, %rax
; X64-NEXT: mulq %r11
-; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; X64-NEXT: movq %rdx, %rcx
; X64-NEXT: movq 72(%rdi), %r9
; X64-NEXT: movq %r9, %rax
@@ -7959,11 +7916,11 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X64-NEXT: mulq %r15
; X64-NEXT: movq %rdx, %rcx
; X64-NEXT: addq %rbx, %rax
-; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; X64-NEXT: adcq %rbp, %rcx
; X64-NEXT: setb %r11b
; X64-NEXT: movq %r9, %rax
-; X64-NEXT: movq %r9, -{{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movq %r9, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; X64-NEXT: mulq %r15
; X64-NEXT: movq %rdx, %rbx
; X64-NEXT: movq %rax, %rbp
@@ -7980,15 +7937,15 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X64-NEXT: adcq %r11, %r8
; X64-NEXT: addq %rbp, %rcx
; X64-NEXT: adcq %rbx, %r8
-; X64-NEXT: addq -{{[0-9]+}}(%rsp), %rcx # 8-byte Folded Reload
-; X64-NEXT: movq %rcx, {{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Folded Reload
+; X64-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; X64-NEXT: adcq %r14, %r8
-; X64-NEXT: movq %r8, {{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movq %r8, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; X64-NEXT: adcq $0, %rsi
; X64-NEXT: adcq $0, %r10
-; X64-NEXT: movq %r13, {{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movq %r13, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; X64-NEXT: movq %r13, %rax
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdi # 8-byte Reload
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Reload
; X64-NEXT: mulq %rdi
; X64-NEXT: movq %rdx, %rcx
; X64-NEXT: movq %rax, %r12
@@ -8000,7 +7957,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X64-NEXT: addq %rcx, %rbp
; X64-NEXT: adcq $0, %rdi
; X64-NEXT: movq %r13, %rax
-; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rbx # 8-byte Reload
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Reload
; X64-NEXT: mulq %rbx
; X64-NEXT: movq %rdx, %rcx
; X64-NEXT: addq %rbp, %rax
@@ -8012,28 +7969,28 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X64-NEXT: addq %rcx, %rax
; X64-NEXT: movzbl %dil, %ecx
; X64-NEXT: adcq %rcx, %rdx
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %r14 # 8-byte Reload
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Reload
; X64-NEXT: addq %r14, %r15
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %r13 # 8-byte Reload
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Reload
; X64-NEXT: adcq %r13, %r11
; X64-NEXT: addq %rax, %r15
; X64-NEXT: adcq %rdx, %r11
-; X64-NEXT: addq {{[0-9]+}}(%rsp), %r12 # 8-byte Folded Reload
-; X64-NEXT: movq %r12, {{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rbp # 8-byte Folded Reload
-; X64-NEXT: movq %rbp, {{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %r12 # 8-byte Folded Reload
+; X64-NEXT: movq %r12, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %rbp # 8-byte Folded Reload
+; X64-NEXT: movq %rbp, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; X64-NEXT: adcq $0, %r15
; X64-NEXT: adcq $0, %r11
; X64-NEXT: addq %rsi, %r15
; X64-NEXT: adcq %r10, %r11
; X64-NEXT: setb %r10b
-; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rsi # 8-byte Reload
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Reload
; X64-NEXT: movq %rsi, %rax
; X64-NEXT: movq %r8, %rdi
; X64-NEXT: mulq %rdi
; X64-NEXT: movq %rdx, %rcx
; X64-NEXT: movq %rax, %r9
-; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rbp # 8-byte Reload
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbp # 8-byte Reload
; X64-NEXT: movq %rbp, %rax
; X64-NEXT: mulq %rdi
; X64-NEXT: movq %rdi, %r12
@@ -8042,7 +7999,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X64-NEXT: addq %rcx, %rbx
; X64-NEXT: adcq $0, %rdi
; X64-NEXT: movq %rsi, %rax
-; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rsi # 8-byte Reload
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Reload
; X64-NEXT: mulq %rsi
; X64-NEXT: movq %rdx, %rcx
; X64-NEXT: addq %rbx, %rax
@@ -8055,22 +8012,22 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X64-NEXT: addq %rcx, %rax
; X64-NEXT: movzbl %r8b, %ecx
; X64-NEXT: adcq %rcx, %rdx
-; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rsi # 8-byte Reload
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Reload
; X64-NEXT: addq %r14, %rsi
-; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rcx # 8-byte Reload
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
; X64-NEXT: adcq %r13, %rcx
; X64-NEXT: addq %rax, %rsi
; X64-NEXT: adcq %rdx, %rcx
; X64-NEXT: addq %r15, %r9
-; X64-NEXT: movq %r9, {{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movq %r9, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; X64-NEXT: adcq %r11, %rbx
-; X64-NEXT: movq %rbx, {{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movq %rbx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; X64-NEXT: movzbl %r10b, %eax
; X64-NEXT: adcq %rax, %rsi
-; X64-NEXT: movq %rsi, -{{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movq %rsi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; X64-NEXT: adcq $0, %rcx
-; X64-NEXT: movq %rcx, -{{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rbp # 8-byte Reload
+; X64-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbp # 8-byte Reload
; X64-NEXT: movq 96(%rbp), %rcx
; X64-NEXT: imulq %rcx, %rdi
; X64-NEXT: movq %rcx, %rax
@@ -8085,9 +8042,9 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X64-NEXT: movq 112(%rbp), %rax
; X64-NEXT: movq %rbp, %rdi
; X64-NEXT: movq %rax, %rsi
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rbp # 8-byte Reload
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbp # 8-byte Reload
; X64-NEXT: imulq %rbp, %rsi
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rbx # 8-byte Reload
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Reload
; X64-NEXT: mulq %rbx
; X64-NEXT: movq %rax, %r10
; X64-NEXT: addq %rsi, %rdx
@@ -8100,7 +8057,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X64-NEXT: movq %rbx, %rsi
; X64-NEXT: mulq %rcx
; X64-NEXT: movq %rdx, %rbx
-; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; X64-NEXT: movq %rbp, %rax
; X64-NEXT: movq %rbp, %r9
; X64-NEXT: mulq %rcx
@@ -8124,32 +8081,32 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X64-NEXT: adcq %rax, %rbx
; X64-NEXT: addq %r10, %rbp
; X64-NEXT: adcq %rdi, %rbx
-; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rsi # 8-byte Reload
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rax # 8-byte Reload
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Reload
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
; X64-NEXT: imulq %rax, %rsi
; X64-NEXT: movq %rax, %r13
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
; X64-NEXT: mulq %rcx
; X64-NEXT: movq %rax, %r8
; X64-NEXT: addq %rsi, %rdx
-; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r11 # 8-byte Reload
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Reload
; X64-NEXT: imulq %r11, %rcx
; X64-NEXT: addq %rdx, %rcx
; X64-NEXT: movq %rcx, %r9
-; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rax # 8-byte Reload
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
; X64-NEXT: movq %rax, %rcx
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %r15 # 8-byte Reload
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload
; X64-NEXT: imulq %r15, %rcx
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %r14 # 8-byte Reload
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Reload
; X64-NEXT: mulq %r14
; X64-NEXT: movq %rax, %r10
; X64-NEXT: addq %rcx, %rdx
-; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rax # 8-byte Reload
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
; X64-NEXT: imulq %r14, %rax
; X64-NEXT: addq %rdx, %rax
; X64-NEXT: addq %r8, %r10
; X64-NEXT: adcq %r9, %rax
-; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; X64-NEXT: movq %r14, %rax
; X64-NEXT: mulq %r13
; X64-NEXT: movq %rdx, %rdi
@@ -8173,53 +8130,53 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X64-NEXT: movzbl %cl, %ecx
; X64-NEXT: adcq %rcx, %rdx
; X64-NEXT: addq %r10, %rax
-; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %rdx # 8-byte Folded Reload
-; X64-NEXT: addq {{[0-9]+}}(%rsp), %r8 # 8-byte Folded Reload
+; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Folded Reload
+; X64-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Folded Reload
; X64-NEXT: adcq %r12, %rsi
; X64-NEXT: adcq %rbp, %rax
; X64-NEXT: adcq %rbx, %rdx
-; X64-NEXT: addq {{[0-9]+}}(%rsp), %r8 # 8-byte Folded Reload
-; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rsi # 8-byte Folded Reload
-; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %rax # 8-byte Folded Reload
-; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %rdx # 8-byte Folded Reload
-; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rcx # 8-byte Reload
-; X64-NEXT: addq {{[0-9]+}}(%rsp), %rcx # 8-byte Folded Reload
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdi # 8-byte Reload
-; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rdi # 8-byte Folded Reload
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rbp # 8-byte Reload
-; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rbp # 8-byte Folded Reload
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rbx # 8-byte Reload
-; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rbx # 8-byte Folded Reload
-; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %r8 # 8-byte Folded Reload
-; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rsi # 8-byte Folded Reload
-; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %rax # 8-byte Folded Reload
-; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rdx # 8-byte Folded Reload
-; X64-NEXT: addq {{[0-9]+}}(%rsp), %rcx # 8-byte Folded Reload
+; X64-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Folded Reload
+; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Folded Reload
+; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Folded Reload
+; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Folded Reload
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
+; X64-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Folded Reload
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Reload
+; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Folded Reload
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbp # 8-byte Reload
+; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %rbp # 8-byte Folded Reload
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Reload
+; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Folded Reload
+; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Folded Reload
+; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Folded Reload
+; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Folded Reload
+; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Folded Reload
+; X64-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Folded Reload
; X64-NEXT: movq %rcx, %r9
-; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %rdi # 8-byte Folded Reload
+; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Folded Reload
; X64-NEXT: movq %rdi, %r10
-; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %rbp # 8-byte Folded Reload
+; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %rbp # 8-byte Folded Reload
; X64-NEXT: adcq (%rsp), %rbx # 8-byte Folded Reload
-; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %r8 # 8-byte Folded Reload
-; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %rsi # 8-byte Folded Reload
-; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rax # 8-byte Folded Reload
-; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rdx # 8-byte Folded Reload
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdi # 8-byte Reload
+; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Folded Reload
+; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Folded Reload
+; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Folded Reload
+; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Folded Reload
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Reload
; X64-NEXT: movq %rdi, (%rcx)
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdi # 8-byte Reload
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Reload
; X64-NEXT: movq %rdi, 8(%rcx)
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdi # 8-byte Reload
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Reload
; X64-NEXT: movq %rdi, 16(%rcx)
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdi # 8-byte Reload
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Reload
; X64-NEXT: movq %rdi, 24(%rcx)
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdi # 8-byte Reload
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Reload
; X64-NEXT: movq %rdi, 32(%rcx)
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdi # 8-byte Reload
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Reload
; X64-NEXT: movq %rdi, 40(%rcx)
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdi # 8-byte Reload
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Reload
; X64-NEXT: movq %rdi, 48(%rcx)
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdi # 8-byte Reload
+; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Reload
; X64-NEXT: movq %rdi, 56(%rcx)
; X64-NEXT: movq %r9, 64(%rcx)
; X64-NEXT: movq %r10, 72(%rcx)
diff --git a/test/CodeGen/X86/peephole-na-phys-copy-folding.ll b/test/CodeGen/X86/peephole-na-phys-copy-folding.ll
index 66047e3677f64..023de041dce92 100644
--- a/test/CodeGen/X86/peephole-na-phys-copy-folding.ll
+++ b/test/CodeGen/X86/peephole-na-phys-copy-folding.ll
@@ -1,13 +1,10 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=i386-linux-gnu %s -o - | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK32
-; RUN: llc -mtriple=x86_64-linux-gnu -mattr=+sahf %s -o - | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK64
-
-; TODO: Reenable verify-machineinstrs once the if (!AXDead) // FIXME in
-; X86InstrInfo::copyPhysReg() is resolved.
+; RUN: llc -mtriple=i386-linux-gnu -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK32
+; RUN: llc -mtriple=x86_64-linux-gnu -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK64
; The peephole optimizer can elide some physical register copies such as
; EFLAGS. Make sure the flags are used directly, instead of needlessly using
-; lahf, when possible.
+; saving and restoring specific conditions.
@L = external global i32
@M = external global i8
@@ -209,29 +206,22 @@ exit2:
define i64 @test_intervening_call(i64* %foo, i64 %bar, i64 %baz) nounwind {
; CHECK32-LABEL: test_intervening_call:
; CHECK32: # %bb.0: # %entry
-; CHECK32-NEXT: pushl %ebp
-; CHECK32-NEXT: movl %esp, %ebp
; CHECK32-NEXT: pushl %ebx
; CHECK32-NEXT: pushl %esi
-; CHECK32-NEXT: movl 12(%ebp), %eax
-; CHECK32-NEXT: movl 16(%ebp), %edx
-; CHECK32-NEXT: movl 20(%ebp), %ebx
-; CHECK32-NEXT: movl 24(%ebp), %ecx
-; CHECK32-NEXT: movl 8(%ebp), %esi
-; CHECK32-NEXT: lock cmpxchg8b (%esi)
; CHECK32-NEXT: pushl %eax
-; CHECK32-NEXT: seto %al
-; CHECK32-NEXT: lahf
-; CHECK32-NEXT: movl %eax, %esi
-; CHECK32-NEXT: popl %eax
+; CHECK32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; CHECK32-NEXT: movl {{[0-9]+}}(%esp), %edx
+; CHECK32-NEXT: movl {{[0-9]+}}(%esp), %ebx
+; CHECK32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; CHECK32-NEXT: movl {{[0-9]+}}(%esp), %esi
+; CHECK32-NEXT: lock cmpxchg8b (%esi)
+; CHECK32-NEXT: setne %bl
; CHECK32-NEXT: subl $8, %esp
; CHECK32-NEXT: pushl %edx
; CHECK32-NEXT: pushl %eax
; CHECK32-NEXT: calll bar
; CHECK32-NEXT: addl $16, %esp
-; CHECK32-NEXT: movl %esi, %eax
-; CHECK32-NEXT: addb $127, %al
-; CHECK32-NEXT: sahf
+; CHECK32-NEXT: testb %bl, %bl
; CHECK32-NEXT: jne .LBB4_3
; CHECK32-NEXT: # %bb.1: # %t
; CHECK32-NEXT: movl $42, %eax
@@ -240,39 +230,28 @@ define i64 @test_intervening_call(i64* %foo, i64 %bar, i64 %baz) nounwind {
; CHECK32-NEXT: xorl %eax, %eax
; CHECK32-NEXT: .LBB4_2: # %t
; CHECK32-NEXT: xorl %edx, %edx
+; CHECK32-NEXT: addl $4, %esp
; CHECK32-NEXT: popl %esi
; CHECK32-NEXT: popl %ebx
-; CHECK32-NEXT: popl %ebp
; CHECK32-NEXT: retl
;
; CHECK64-LABEL: test_intervening_call:
; CHECK64: # %bb.0: # %entry
-; CHECK64-NEXT: pushq %rbp
-; CHECK64-NEXT: movq %rsp, %rbp
; CHECK64-NEXT: pushq %rbx
-; CHECK64-NEXT: pushq %rax
; CHECK64-NEXT: movq %rsi, %rax
; CHECK64-NEXT: lock cmpxchgq %rdx, (%rdi)
-; CHECK64-NEXT: pushq %rax
-; CHECK64-NEXT: seto %al
-; CHECK64-NEXT: lahf
-; CHECK64-NEXT: movq %rax, %rbx
-; CHECK64-NEXT: popq %rax
+; CHECK64-NEXT: setne %bl
; CHECK64-NEXT: movq %rax, %rdi
; CHECK64-NEXT: callq bar
-; CHECK64-NEXT: movq %rbx, %rax
-; CHECK64-NEXT: addb $127, %al
-; CHECK64-NEXT: sahf
-; CHECK64-NEXT: jne .LBB4_3
+; CHECK64-NEXT: testb %bl, %bl
+; CHECK64-NEXT: jne .LBB4_2
; CHECK64-NEXT: # %bb.1: # %t
; CHECK64-NEXT: movl $42, %eax
-; CHECK64-NEXT: jmp .LBB4_2
-; CHECK64-NEXT: .LBB4_3: # %f
+; CHECK64-NEXT: popq %rbx
+; CHECK64-NEXT: retq
+; CHECK64-NEXT: .LBB4_2: # %f
; CHECK64-NEXT: xorl %eax, %eax
-; CHECK64-NEXT: .LBB4_2: # %t
-; CHECK64-NEXT: addq $8, %rsp
; CHECK64-NEXT: popq %rbx
-; CHECK64-NEXT: popq %rbp
; CHECK64-NEXT: retq
entry:
; cmpxchg sets EFLAGS, call clobbers it, then br uses EFLAGS.
@@ -293,32 +272,27 @@ define i64 @test_two_live_flags(i64* %foo0, i64 %bar0, i64 %baz0, i64* %foo1, i6
; CHECK32-LABEL: test_two_live_flags:
; CHECK32: # %bb.0: # %entry
; CHECK32-NEXT: pushl %ebp
-; CHECK32-NEXT: movl %esp, %ebp
; CHECK32-NEXT: pushl %ebx
; CHECK32-NEXT: pushl %edi
; CHECK32-NEXT: pushl %esi
-; CHECK32-NEXT: movl 44(%ebp), %edi
-; CHECK32-NEXT: movl 12(%ebp), %eax
-; CHECK32-NEXT: movl 16(%ebp), %edx
-; CHECK32-NEXT: movl 20(%ebp), %ebx
-; CHECK32-NEXT: movl 24(%ebp), %ecx
-; CHECK32-NEXT: movl 8(%ebp), %esi
+; CHECK32-NEXT: pushl %eax
+; CHECK32-NEXT: movl {{[0-9]+}}(%esp), %edi
+; CHECK32-NEXT: movl {{[0-9]+}}(%esp), %ebp
+; CHECK32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; CHECK32-NEXT: movl {{[0-9]+}}(%esp), %edx
+; CHECK32-NEXT: movl {{[0-9]+}}(%esp), %ebx
+; CHECK32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; CHECK32-NEXT: movl {{[0-9]+}}(%esp), %esi
+; CHECK32-NEXT: lock cmpxchg8b (%esi)
+; CHECK32-NEXT: setne {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
+; CHECK32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; CHECK32-NEXT: movl %edi, %edx
+; CHECK32-NEXT: movl %ebp, %ecx
+; CHECK32-NEXT: movl {{[0-9]+}}(%esp), %ebx
+; CHECK32-NEXT: movl {{[0-9]+}}(%esp), %esi
; CHECK32-NEXT: lock cmpxchg8b (%esi)
-; CHECK32-NEXT: seto %al
-; CHECK32-NEXT: lahf
-; CHECK32-NEXT: movl %eax, %esi
-; CHECK32-NEXT: movl 32(%ebp), %eax
-; CHECK32-NEXT: movl 36(%ebp), %edx
-; CHECK32-NEXT: movl %edi, %ecx
-; CHECK32-NEXT: movl 40(%ebp), %ebx
-; CHECK32-NEXT: movl 28(%ebp), %edi
-; CHECK32-NEXT: lock cmpxchg8b (%edi)
; CHECK32-NEXT: sete %al
-; CHECK32-NEXT: pushl %eax
-; CHECK32-NEXT: movl %esi, %eax
-; CHECK32-NEXT: addb $127, %al
-; CHECK32-NEXT: sahf
-; CHECK32-NEXT: popl %eax
+; CHECK32-NEXT: cmpb $0, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Reload
; CHECK32-NEXT: jne .LBB5_4
; CHECK32-NEXT: # %bb.1: # %entry
; CHECK32-NEXT: testb %al, %al
@@ -330,6 +304,7 @@ define i64 @test_two_live_flags(i64* %foo0, i64 %bar0, i64 %baz0, i64* %foo1, i6
; CHECK32-NEXT: xorl %eax, %eax
; CHECK32-NEXT: .LBB5_3: # %t
; CHECK32-NEXT: xorl %edx, %edx
+; CHECK32-NEXT: addl $4, %esp
; CHECK32-NEXT: popl %esi
; CHECK32-NEXT: popl %edi
; CHECK32-NEXT: popl %ebx
@@ -338,32 +313,22 @@ define i64 @test_two_live_flags(i64* %foo0, i64 %bar0, i64 %baz0, i64* %foo1, i6
;
; CHECK64-LABEL: test_two_live_flags:
; CHECK64: # %bb.0: # %entry
-; CHECK64-NEXT: pushq %rbp
-; CHECK64-NEXT: movq %rsp, %rbp
; CHECK64-NEXT: movq %rsi, %rax
; CHECK64-NEXT: lock cmpxchgq %rdx, (%rdi)
-; CHECK64-NEXT: seto %al
-; CHECK64-NEXT: lahf
-; CHECK64-NEXT: movq %rax, %rdx
+; CHECK64-NEXT: setne %dl
; CHECK64-NEXT: movq %r8, %rax
; CHECK64-NEXT: lock cmpxchgq %r9, (%rcx)
; CHECK64-NEXT: sete %al
-; CHECK64-NEXT: pushq %rax
-; CHECK64-NEXT: movq %rdx, %rax
-; CHECK64-NEXT: addb $127, %al
-; CHECK64-NEXT: sahf
-; CHECK64-NEXT: popq %rax
+; CHECK64-NEXT: testb %dl, %dl
; CHECK64-NEXT: jne .LBB5_3
; CHECK64-NEXT: # %bb.1: # %entry
; CHECK64-NEXT: testb %al, %al
; CHECK64-NEXT: je .LBB5_3
; CHECK64-NEXT: # %bb.2: # %t
; CHECK64-NEXT: movl $42, %eax
-; CHECK64-NEXT: popq %rbp
; CHECK64-NEXT: retq
; CHECK64-NEXT: .LBB5_3: # %f
; CHECK64-NEXT: xorl %eax, %eax
-; CHECK64-NEXT: popq %rbp
; CHECK64-NEXT: retq
entry:
%cx0 = cmpxchg i64* %foo0, i64 %bar0, i64 %baz0 seq_cst seq_cst
diff --git a/test/CodeGen/X86/pr37264.ll b/test/CodeGen/X86/pr37264.ll
new file mode 100644
index 0000000000000..8821960d4b748
--- /dev/null
+++ b/test/CodeGen/X86/pr37264.ll
@@ -0,0 +1,12 @@
+; RUN: llc < %s -mtriple=x86_64--
+
+define void @a() local_unnamed_addr #0 {
+ ret void
+}
+
+define void @b() local_unnamed_addr #1 {
+ ret void
+}
+
+attributes #0 = { "target-features"="+avx,+avx2,+avx512bw,+avx512f,+f16c,+fma,+mmx,+popcnt,+sse,+sse2,+sse3,+sse4.1,+sse4.2,+ssse3,+x87,+xsave" }
+attributes #1 = { "target-features"="+avx,+avx2,+avx512f,+avx512vl,+f16c,+fma,+mmx,+popcnt,+sse,+sse2,+sse3,+sse4.1,+sse4.2,+ssse3,+x87,+xsave" }
diff --git a/test/CodeGen/X86/win64_frame.ll b/test/CodeGen/X86/win64_frame.ll
index 34f78ad0ac204..c011b4dc60240 100644
--- a/test/CodeGen/X86/win64_frame.ll
+++ b/test/CodeGen/X86/win64_frame.ll
@@ -1,43 +1,85 @@
-; RUN: llc < %s -mtriple=x86_64-pc-win32 | FileCheck %s --check-prefix=CHECK --check-prefix=PUSHF
-; RUN: llc < %s -mtriple=x86_64-pc-win32 -mattr=+sahf | FileCheck %s --check-prefix=SAHF
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-pc-win32 | FileCheck %s --check-prefix=ALL --check-prefix=PUSHF
+; RUN: llc < %s -mtriple=x86_64-pc-win32 -mattr=+sahf | FileCheck %s --check-prefix=ALL --check-prefix=SAHF
define i32 @f1(i32 %p1, i32 %p2, i32 %p3, i32 %p4, i32 %p5) "no-frame-pointer-elim"="true" {
- ; CHECK-LABEL: f1:
- ; CHECK: movl 48(%rbp), %eax
+; ALL-LABEL: f1:
+; ALL: # %bb.0:
+; ALL-NEXT: pushq %rbp
+; ALL-NEXT: .seh_pushreg 5
+; ALL-NEXT: movq %rsp, %rbp
+; ALL-NEXT: .seh_setframe 5, 0
+; ALL-NEXT: .seh_endprologue
+; ALL-NEXT: movl 48(%rbp), %eax
+; ALL-NEXT: popq %rbp
+; ALL-NEXT: retq
+; ALL-NEXT: .seh_handlerdata
+; ALL-NEXT: .text
+; ALL-NEXT: .seh_endproc
ret i32 %p5
}
define void @f2(i32 %p, ...) "no-frame-pointer-elim"="true" {
- ; CHECK-LABEL: f2:
- ; CHECK: .seh_stackalloc 8
- ; CHECK: movq %rsp, %rbp
- ; CHECK: .seh_setframe 5, 0
- ; CHECK: movq %rdx, 32(%rbp)
- ; CHECK: leaq 32(%rbp), %rax
+; ALL-LABEL: f2:
+; ALL: # %bb.0:
+; ALL-NEXT: pushq %rbp
+; ALL-NEXT: .seh_pushreg 5
+; ALL-NEXT: pushq %rax
+; ALL-NEXT: .seh_stackalloc 8
+; ALL-NEXT: movq %rsp, %rbp
+; ALL-NEXT: .seh_setframe 5, 0
+; ALL-NEXT: .seh_endprologue
+; ALL-NEXT: movq %r9, 48(%rbp)
+; ALL-NEXT: movq %r8, 40(%rbp)
+; ALL-NEXT: movq %rdx, 32(%rbp)
+; ALL-NEXT: leaq 32(%rbp), %rax
+; ALL-NEXT: movq %rax, (%rbp)
+; ALL-NEXT: addq $8, %rsp
+; ALL-NEXT: popq %rbp
+; ALL-NEXT: retq
+; ALL-NEXT: .seh_handlerdata
+; ALL-NEXT: .text
+; ALL-NEXT: .seh_endproc
%ap = alloca i8, align 8
call void @llvm.va_start(i8* %ap)
ret void
}
define i8* @f3() "no-frame-pointer-elim"="true" {
- ; CHECK-LABEL: f3:
- ; CHECK: movq %rsp, %rbp
- ; CHECK: .seh_setframe 5, 0
- ; CHECK: movq 8(%rbp), %rax
+; ALL-LABEL: f3:
+; ALL: # %bb.0:
+; ALL-NEXT: pushq %rbp
+; ALL-NEXT: .seh_pushreg 5
+; ALL-NEXT: movq %rsp, %rbp
+; ALL-NEXT: .seh_setframe 5, 0
+; ALL-NEXT: .seh_endprologue
+; ALL-NEXT: movq 8(%rbp), %rax
+; ALL-NEXT: popq %rbp
+; ALL-NEXT: retq
+; ALL-NEXT: .seh_handlerdata
+; ALL-NEXT: .text
+; ALL-NEXT: .seh_endproc
%ra = call i8* @llvm.returnaddress(i32 0)
ret i8* %ra
}
define i8* @f4() "no-frame-pointer-elim"="true" {
- ; CHECK-LABEL: f4:
- ; CHECK: pushq %rbp
- ; CHECK: .seh_pushreg 5
- ; CHECK: subq $304, %rsp
- ; CHECK: .seh_stackalloc 304
- ; CHECK: leaq 128(%rsp), %rbp
- ; CHECK: .seh_setframe 5, 128
- ; CHECK: .seh_endprologue
- ; CHECK: movq 184(%rbp), %rax
+; ALL-LABEL: f4:
+; ALL: # %bb.0:
+; ALL-NEXT: pushq %rbp
+; ALL-NEXT: .seh_pushreg 5
+; ALL-NEXT: subq $304, %rsp # imm = 0x130
+; ALL-NEXT: .seh_stackalloc 304
+; ALL-NEXT: leaq {{[0-9]+}}(%rsp), %rbp
+; ALL-NEXT: .seh_setframe 5, 128
+; ALL-NEXT: .seh_endprologue
+; ALL-NEXT: movq 184(%rbp), %rax
+; ALL-NEXT: addq $304, %rsp # imm = 0x130
+; ALL-NEXT: popq %rbp
+; ALL-NEXT: retq
+; ALL-NEXT: .seh_handlerdata
+; ALL-NEXT: .text
+; ALL-NEXT: .seh_endproc
alloca [300 x i8]
%ra = call i8* @llvm.returnaddress(i32 0)
ret i8* %ra
@@ -46,13 +88,24 @@ define i8* @f4() "no-frame-pointer-elim"="true" {
declare void @external(i8*)
define void @f5() "no-frame-pointer-elim"="true" {
- ; CHECK-LABEL: f5:
- ; CHECK: subq $336, %rsp
- ; CHECK: .seh_stackalloc 336
- ; CHECK: leaq 128(%rsp), %rbp
- ; CHECK: .seh_setframe 5, 128
- ; CHECK: leaq -92(%rbp), %rcx
- ; CHECK: callq external
+; ALL-LABEL: f5:
+; ALL: # %bb.0:
+; ALL-NEXT: pushq %rbp
+; ALL-NEXT: .seh_pushreg 5
+; ALL-NEXT: subq $336, %rsp # imm = 0x150
+; ALL-NEXT: .seh_stackalloc 336
+; ALL-NEXT: leaq {{[0-9]+}}(%rsp), %rbp
+; ALL-NEXT: .seh_setframe 5, 128
+; ALL-NEXT: .seh_endprologue
+; ALL-NEXT: leaq -92(%rbp), %rcx
+; ALL-NEXT: callq external
+; ALL-NEXT: nop
+; ALL-NEXT: addq $336, %rsp # imm = 0x150
+; ALL-NEXT: popq %rbp
+; ALL-NEXT: retq
+; ALL-NEXT: .seh_handlerdata
+; ALL-NEXT: .text
+; ALL-NEXT: .seh_endproc
%a = alloca [300 x i8]
%gep = getelementptr [300 x i8], [300 x i8]* %a, i32 0, i32 0
call void @external(i8* %gep)
@@ -60,13 +113,24 @@ define void @f5() "no-frame-pointer-elim"="true" {
}
define void @f6(i32 %p, ...) "no-frame-pointer-elim"="true" {
- ; CHECK-LABEL: f6:
- ; CHECK: subq $336, %rsp
- ; CHECK: .seh_stackalloc 336
- ; CHECK: leaq 128(%rsp), %rbp
- ; CHECK: .seh_setframe 5, 128
- ; CHECK: leaq -92(%rbp), %rcx
- ; CHECK: callq external
+; ALL-LABEL: f6:
+; ALL: # %bb.0:
+; ALL-NEXT: pushq %rbp
+; ALL-NEXT: .seh_pushreg 5
+; ALL-NEXT: subq $336, %rsp # imm = 0x150
+; ALL-NEXT: .seh_stackalloc 336
+; ALL-NEXT: leaq {{[0-9]+}}(%rsp), %rbp
+; ALL-NEXT: .seh_setframe 5, 128
+; ALL-NEXT: .seh_endprologue
+; ALL-NEXT: leaq -92(%rbp), %rcx
+; ALL-NEXT: callq external
+; ALL-NEXT: nop
+; ALL-NEXT: addq $336, %rsp # imm = 0x150
+; ALL-NEXT: popq %rbp
+; ALL-NEXT: retq
+; ALL-NEXT: .seh_handlerdata
+; ALL-NEXT: .text
+; ALL-NEXT: .seh_endproc
%a = alloca [300 x i8]
%gep = getelementptr [300 x i8], [300 x i8]* %a, i32 0, i32 0
call void @external(i8* %gep)
@@ -74,130 +138,147 @@ define void @f6(i32 %p, ...) "no-frame-pointer-elim"="true" {
}
define i32 @f7(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e) "no-frame-pointer-elim"="true" {
- ; CHECK-LABEL: f7:
- ; CHECK: pushq %rbp
- ; CHECK: .seh_pushreg 5
- ; CHECK: subq $304, %rsp
- ; CHECK: .seh_stackalloc 304
- ; CHECK: leaq 128(%rsp), %rbp
- ; CHECK: .seh_setframe 5, 128
- ; CHECK: andq $-64, %rsp
- ; CHECK: movl 224(%rbp), %eax
- ; CHECK: leaq 176(%rbp), %rsp
+; ALL-LABEL: f7:
+; ALL: # %bb.0:
+; ALL-NEXT: pushq %rbp
+; ALL-NEXT: .seh_pushreg 5
+; ALL-NEXT: subq $304, %rsp # imm = 0x130
+; ALL-NEXT: .seh_stackalloc 304
+; ALL-NEXT: leaq {{[0-9]+}}(%rsp), %rbp
+; ALL-NEXT: .seh_setframe 5, 128
+; ALL-NEXT: .seh_endprologue
+; ALL-NEXT: andq $-64, %rsp
+; ALL-NEXT: movl 224(%rbp), %eax
+; ALL-NEXT: leaq 176(%rbp), %rsp
+; ALL-NEXT: popq %rbp
+; ALL-NEXT: retq
+; ALL-NEXT: .seh_handlerdata
+; ALL-NEXT: .text
+; ALL-NEXT: .seh_endproc
alloca [300 x i8], align 64
ret i32 %e
}
define i32 @f8(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e) "no-frame-pointer-elim"="true" {
- ; CHECK-LABEL: f8:
- ; CHECK: subq $352, %rsp
- ; CHECK: .seh_stackalloc 352
- ; CHECK: leaq 128(%rsp), %rbp
- ; CHECK: .seh_setframe 5, 128
-
+; ALL-LABEL: f8:
+; ALL: # %bb.0:
+; ALL-NEXT: pushq %rbp
+; ALL-NEXT: .seh_pushreg 5
+; ALL-NEXT: pushq %rsi
+; ALL-NEXT: .seh_pushreg 6
+; ALL-NEXT: pushq %rbx
+; ALL-NEXT: .seh_pushreg 3
+; ALL-NEXT: subq $352, %rsp # imm = 0x160
+; ALL-NEXT: .seh_stackalloc 352
+; ALL-NEXT: leaq {{[0-9]+}}(%rsp), %rbp
+; ALL-NEXT: .seh_setframe 5, 128
+; ALL-NEXT: .seh_endprologue
+; ALL-NEXT: andq $-64, %rsp
+; ALL-NEXT: movq %rsp, %rbx
+; ALL-NEXT: movl 288(%rbp), %esi
+; ALL-NEXT: movl %ecx, %eax
+; ALL-NEXT: leaq 15(,%rax,4), %rcx
+; ALL-NEXT: movabsq $34359738352, %rax # imm = 0x7FFFFFFF0
+; ALL-NEXT: andq %rcx, %rax
+; ALL-NEXT: callq __chkstk
+; ALL-NEXT: subq %rax, %rsp
+; ALL-NEXT: subq $32, %rsp
+; ALL-NEXT: movq %rbx, %rcx
+; ALL-NEXT: callq external
+; ALL-NEXT: addq $32, %rsp
+; ALL-NEXT: movl %esi, %eax
+; ALL-NEXT: leaq 224(%rbp), %rsp
+; ALL-NEXT: popq %rbx
+; ALL-NEXT: popq %rsi
+; ALL-NEXT: popq %rbp
+; ALL-NEXT: retq
+; ALL-NEXT: .seh_handlerdata
+; ALL-NEXT: .text
+; ALL-NEXT: .seh_endproc
%alloca = alloca [300 x i8], align 64
- ; CHECK: andq $-64, %rsp
- ; CHECK: movq %rsp, %rbx
-
alloca i32, i32 %a
- ; CHECK: movl %ecx, %eax
- ; CHECK: leaq 15(,%rax,4), %rcx
- ; CHECK: movabsq $34359738352, %rax
- ; CHECK: andq %rcx, %rax
- ; CHECK: callq __chkstk
- ; CHECK: subq %rax, %rsp
-
%gep = getelementptr [300 x i8], [300 x i8]* %alloca, i32 0, i32 0
call void @external(i8* %gep)
- ; CHECK: subq $32, %rsp
- ; CHECK: movq %rbx, %rcx
- ; CHECK: callq external
- ; CHECK: addq $32, %rsp
-
ret i32 %e
- ; CHECK: movl %esi, %eax
- ; CHECK: leaq 224(%rbp), %rsp
}
define i64 @f9() {
+; ALL-LABEL: f9:
+; ALL: # %bb.0: # %entry
+; ALL-NEXT: pushq %rbp
+; ALL-NEXT: .seh_pushreg 5
+; ALL-NEXT: movq %rsp, %rbp
+; ALL-NEXT: .seh_setframe 5, 0
+; ALL-NEXT: .seh_endprologue
+; ALL-NEXT: pushfq
+; ALL-NEXT: popq %rax
+; ALL-NEXT: popq %rbp
+; ALL-NEXT: retq
+; ALL-NEXT: .seh_handlerdata
+; ALL-NEXT: .text
+; ALL-NEXT: .seh_endproc
entry:
- ; CHECK-LABEL: f9:
- ; CHECK: pushq %rbp
- ; CHECK: .seh_pushreg 5
- ; CHECK-NEXT: movq %rsp, %rbp
- ; CHECK: .seh_setframe 5, 0
- ; CHECK: .seh_endprologue
-
%call = call i64 @llvm.x86.flags.read.u64()
- ; CHECK-NEXT: pushfq
- ; CHECK-NEXT: popq %rax
-
ret i64 %call
- ; CHECK-NEXT: popq %rbp
- ; CHECK-NEXT: retq
}
declare i64 @dummy()
define i64 @f10(i64* %foo, i64 %bar, i64 %baz) {
- ; CHECK-LABEL: f10:
- ; CHECK: pushq %rbp
- ; CHECK: .seh_pushreg 5
- ; CHECK: pushq %rsi
- ; CHECK: .seh_pushreg 6
- ; CHECK: pushq %rdi
- ; CHECK: .seh_pushreg 7
- ; CHECK: subq $32, %rsp
- ; CHECK: .seh_stackalloc 32
- ; CHECK: leaq 32(%rsp), %rbp
- ; CHECK: .seh_setframe 5, 32
- ; CHECK: .seh_endprologue
-
+; ALL-LABEL: f10:
+; ALL: # %bb.0:
+; ALL-NEXT: pushq %rsi
+; ALL-NEXT: .seh_pushreg 6
+; ALL-NEXT: pushq %rbx
+; ALL-NEXT: .seh_pushreg 3
+; ALL-NEXT: subq $40, %rsp
+; ALL-NEXT: .seh_stackalloc 40
+; ALL-NEXT: .seh_endprologue
+; ALL-NEXT: movq %rdx, %rsi
+; ALL-NEXT: movq %rsi, %rax
+; ALL-NEXT: lock cmpxchgq %r8, (%rcx)
+; ALL-NEXT: sete %bl
+; ALL-NEXT: callq dummy
+; ALL-NEXT: testb %bl, %bl
+; ALL-NEXT: cmoveq %rsi, %rax
+; ALL-NEXT: addq $40, %rsp
+; ALL-NEXT: popq %rbx
+; ALL-NEXT: popq %rsi
+; ALL-NEXT: retq
+; ALL-NEXT: .seh_handlerdata
+; ALL-NEXT: .text
+; ALL-NEXT: .seh_endproc
%cx = cmpxchg i64* %foo, i64 %bar, i64 %baz seq_cst seq_cst
- ; PUSHF: lock cmpxchgq
- ; PUSHF-NEXT: pushfq
- ; PUSHF-NEXT: popq %[[REG:.*]]
- ; SAHF: lock cmpxchgq
- ; SAHF-NEXT: seto %al
- ; SAHF-NEXT: lahf
-
%v = extractvalue { i64, i1 } %cx, 0
%p = extractvalue { i64, i1 } %cx, 1
-
%call = call i64 @dummy()
- ; PUSHF: callq dummy
- ; PUSHF-NEXT: pushq %[[REG]]
- ; PUSHF-NEXT: popfq
- ; SAHF: callq dummy
- ; SAHF-NEXT: pushq
- ; SAHF: addb $127, %al
- ; SAHF-NEXT: sahf
- ; SAHF-NEXT: popq
-
%sel = select i1 %p, i64 %call, i64 %bar
- ; CHECK-NEXT: cmovneq
-
ret i64 %sel
- ; CHECK-NEXT: addq $32, %rsp
- ; CHECK-NEXT: popq %rdi
- ; CHECK-NEXT: popq %rsi
- ; CHECK-NEXT: popq %rbp
}
define i8* @f11() "no-frame-pointer-elim"="true" {
- ; CHECK-LABEL: f11:
- ; CHECK: pushq %rbp
- ; CHECK: movq %rsp, %rbp
- ; CHECK: .seh_setframe 5, 0
- ; CHECK: leaq 8(%rbp), %rax
+; ALL-LABEL: f11:
+; ALL: # %bb.0:
+; ALL-NEXT: pushq %rbp
+; ALL-NEXT: .seh_pushreg 5
+; ALL-NEXT: movq %rsp, %rbp
+; ALL-NEXT: .seh_setframe 5, 0
+; ALL-NEXT: .seh_endprologue
+; ALL-NEXT: leaq 8(%rbp), %rax
+; ALL-NEXT: popq %rbp
+; ALL-NEXT: retq
+; ALL-NEXT: .seh_handlerdata
+; ALL-NEXT: .text
+; ALL-NEXT: .seh_endproc
%aora = call i8* @llvm.addressofreturnaddress()
ret i8* %aora
}
define i8* @f12() {
- ; CHECK-LABEL: f12:
- ; CHECK-NOT: push
- ; CHECK: movq %rsp, %rax
+; ALL-LABEL: f12:
+; ALL: # %bb.0:
+; ALL-NEXT: movq %rsp, %rax
+; ALL-NEXT: retq
%aora = call i8* @llvm.addressofreturnaddress()
ret i8* %aora
}
@@ -205,5 +286,4 @@ define i8* @f12() {
declare i8* @llvm.returnaddress(i32) nounwind readnone
declare i8* @llvm.addressofreturnaddress() nounwind readnone
declare i64 @llvm.x86.flags.read.u64()
-
declare void @llvm.va_start(i8*) nounwind
diff --git a/test/CodeGen/X86/x86-repmov-copy-eflags.ll b/test/CodeGen/X86/x86-repmov-copy-eflags.ll
index ad39888572849..1c168e8ee3da7 100644
--- a/test/CodeGen/X86/x86-repmov-copy-eflags.ll
+++ b/test/CodeGen/X86/x86-repmov-copy-eflags.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s | FileCheck %s
+; RUN: llc -verify-machineinstrs < %s | FileCheck %s
target datalayout = "e-m:x-p:32:32-i64:64-f80:32-n8:16:32-a:0:32-S32"
target triple = "i686-pc-windows-msvc18.0.0"
@@ -39,15 +39,12 @@ declare void @g(%struct.T*)
; CHECK: leal 8(%esp), %esi
; CHECK: decl (%esp)
-; CHECK: seto %al
-; CHECK: lahf
-; CHECK: movl %eax, %edi
+; CHECK: setne %[[NE_REG:.*]]
; CHECK: pushl %esi
; CHECK: calll _g
; CHECK: addl $4, %esp
-; CHECK: movl %edi, %eax
-; CHECK: addb $127, %al
-; CHECK: sahf
+; CHECK: testb %[[NE_REG]], %[[NE_REG]]
+; CHECK: jne
attributes #0 = { nounwind optsize }
attributes #1 = { argmemonly nounwind }
diff --git a/test/DebugInfo/X86/dbg-value-inlined-parameter.ll b/test/DebugInfo/X86/dbg-value-inlined-parameter.ll
index 9954039654bbb..e83cf0aa7ffdf 100644
--- a/test/DebugInfo/X86/dbg-value-inlined-parameter.ll
+++ b/test/DebugInfo/X86/dbg-value-inlined-parameter.ll
@@ -32,10 +32,10 @@
;CHECK-NEXT: DW_AT_call_line
;CHECK: DW_TAG_formal_parameter
-;FIXME: Linux shouldn't drop this parameter either...
;CHECK-NOT: DW_TAG
-;DARWIN: DW_AT_abstract_origin {{.*}} "sp"
-;DARWIN: DW_TAG_formal_parameter
+;FIXME: Shouldn't drop this parameter...
+;XCHECK: DW_AT_abstract_origin {{.*}} "sp"
+;XCHECK: DW_TAG_formal_parameter
;CHECK: DW_AT_abstract_origin {{.*}} "nums"
;CHECK-NOT: DW_TAG_formal_parameter
diff --git a/test/DebugInfo/X86/live-debug-vars-discard-invalid.mir b/test/DebugInfo/X86/live-debug-vars-discard-invalid.mir
new file mode 100644
index 0000000000000..51045f45b2176
--- /dev/null
+++ b/test/DebugInfo/X86/live-debug-vars-discard-invalid.mir
@@ -0,0 +1,141 @@
+# RUN: llc -mtriple=x86_64-linux-gnu -start-before greedy -stop-after virtregrewriter -o - %s | FileCheck %s
+
+--- |
+ ; ModuleID = '<stdin>'
+ source_filename = "test/DebugInfo/X86/dbg-value-inlined-parameter.ll"
+ target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
+ target triple = "x86_64-apple-darwin"
+
+ %struct.S1 = type { float*, i32 }
+
+ @p = common global %struct.S1 zeroinitializer, align 8, !dbg !0
+
+ ; Function Attrs: nounwind optsize ssp
+ define void @foobar() !dbg !15 {
+ entry:
+ tail call void @llvm.dbg.value(metadata %struct.S1* @p, metadata !18, metadata !DIExpression()) , !dbg !25
+ ret void, !dbg !32
+ }
+
+ ; Function Attrs: nounwind readnone speculatable
+ declare void @llvm.dbg.value(metadata, metadata, metadata) #2
+
+ !llvm.dbg.cu = !{!2}
+ !llvm.module.flags = !{!14}
+
+ !0 = !DIGlobalVariableExpression(var: !1, expr: !DIExpression())
+ !1 = !DIGlobalVariable(name: "p", scope: !2, file: !3, line: 14, type: !6, isLocal: false, isDefinition: true)
+ !2 = distinct !DICompileUnit(language: DW_LANG_C99, file: !3, producer: "clang version 2.9 (trunk 125693)", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !4, retainedTypes: !4, globals: !5, imports: !4)
+ !3 = !DIFile(filename: "nm2.c", directory: "/private/tmp")
+ !4 = !{}
+ !5 = !{!0}
+ !6 = !DIDerivedType(tag: DW_TAG_typedef, name: "S1", scope: !2, file: !3, line: 4, baseType: !7)
+ !7 = !DICompositeType(tag: DW_TAG_structure_type, name: "S1", scope: !2, file: !3, line: 1, size: 128, align: 64, elements: !8)
+ !8 = !{!9, !12}
+ !9 = !DIDerivedType(tag: DW_TAG_member, name: "m", scope: !3, file: !3, line: 2, baseType: !10, size: 64, align: 64)
+ !10 = !DIDerivedType(tag: DW_TAG_pointer_type, scope: !2, baseType: !11, size: 64, align: 64)
+ !11 = !DIBasicType(name: "float", size: 32, align: 32, encoding: DW_ATE_float)
+ !12 = !DIDerivedType(tag: DW_TAG_member, name: "nums", scope: !3, file: !3, line: 3, baseType: !13, size: 32, align: 32, offset: 64)
+ !13 = !DIBasicType(name: "int", size: 32, align: 32, encoding: DW_ATE_signed)
+ !14 = !{i32 1, !"Debug Info Version", i32 3}
+ !15 = distinct !DISubprogram(name: "foobar", scope: !3, file: !3, line: 15, type: !16, isLocal: false, isDefinition: true, virtualIndex: 6, isOptimized: true, unit: !2)
+ !16 = !DISubroutineType(types: !17)
+ !17 = !{null}
+ !18 = !DILocalVariable(name: "sp", arg: 1, scope: !19, file: !3, line: 7, type: !24)
+ !19 = distinct !DISubprogram(name: "foo", scope: !3, file: !3, line: 8, type: !20, isLocal: false, isDefinition: true, scopeLine: 8, virtualIndex: 6, flags: DIFlagPrototyped, isOptimized: true, unit: !2, variables: !22)
+ !20 = !DISubroutineType(types: !21)
+ !21 = !{!13}
+ !22 = !{!18, !23}
+ !23 = !DILocalVariable(name: "nums", arg: 2, scope: !19, file: !3, line: 7, type: !13)
+ !24 = !DIDerivedType(tag: DW_TAG_pointer_type, scope: !2, baseType: !6, size: 64, align: 64)
+ !25 = !DILocation(line: 7, column: 13, scope: !19, inlinedAt: !26)
+ !26 = !DILocation(line: 16, column: 3, scope: !27)
+ !27 = distinct !DILexicalBlock(scope: !15, file: !3, line: 15, column: 15)
+ !32 = !DILocation(line: 17, column: 1, scope: !27)
+
+...
+---
+name: foobar
+tracksRegLiveness: true
+body: |
+ bb.0:
+ %1:gr64 = IMPLICIT_DEF
+ %2:gr64 = IMPLICIT_DEF
+
+ bb.1:
+ ; This DBG_VALUE will be discarded (use before def of %0).
+ DBG_VALUE debug-use %0, debug-use %noreg, !18, !DIExpression(), debug-location !25
+ %0:gr64 = IMPLICIT_DEF
+ %0:gr64 = IMPLICIT_DEF
+ %0:gr64 = IMPLICIT_DEF
+ %0:gr64 = IMPLICIT_DEF
+
+ bb.2:
+ ; This DBG_VALUE will be discarded (%1 is defined earlier, but it is not live in, so we do not know where %1 is stored).
+ DBG_VALUE debug-use %1, debug-use %noreg, !18, !DIExpression(), debug-location !25
+ %1:gr64 = IMPLICIT_DEF
+ %1:gr64 = IMPLICIT_DEF
+ %1:gr64 = IMPLICIT_DEF
+ %1:gr64 = IMPLICIT_DEF
+ ; This DBG_VALUE is kept, even if %1 is dead, it was defined in the prev instruction,
+ ; so the value should be available for as long as the register allocated to %1 is live.
+ DBG_VALUE debug-use %1, debug-use %noreg, !18, !DIExpression(), debug-location !25
+
+ bb.3:
+ %1:gr64 = IMPLICIT_DEF
+ DBG_VALUE 0, debug-use %noreg, !23, !DIExpression(), debug-location !25
+ ; This DBG_VALUE is kept, even if %1 is dead, it was defined in the prev non-dbg instruction,
+ ; so the value should be available for as long as the register allocated to %1 is live.
+ DBG_VALUE debug-use %1, debug-use %noreg, !18, !DIExpression(), debug-location !25
+
+ bb.4:
+ ; All DBG_VALUEs here should survive. %2 is livein as it was defined in bb.0, and it has use/def in the BTS64rr instruction.
+ DBG_VALUE debug-use %2, debug-use %noreg, !18, !DIExpression(), debug-location !25
+ %2:gr64 = BTS64rr %2, 0, implicit-def %eflags
+ DBG_VALUE 0, debug-use %noreg, !23, !DIExpression(), debug-location !25
+ DBG_VALUE debug-use %2, debug-use %noreg, !18, !DIExpression(), debug-location !25
+ %2:gr64 = BTS64rr %2, 0, implicit-def %eflags
+ DBG_VALUE debug-use %2, debug-use %noreg, !18, !DIExpression(), debug-location !25
+ %2:gr64 = BTS64rr %2, 0, implicit-def %eflags
+ DBG_VALUE debug-use %2, debug-use %noreg, !18, !DIExpression(), debug-location !25
+
+ bb.5:
+ RET 0, debug-location !32
+...
+
+# CHECK-LABEL: name: foobar
+
+# CHECK-LABEL: bb.1:
+## After solving https://bugs.llvm.org/show_bug.cgi?id=36579 we expect to get a
+## DBG_VALUE debug-use %noreg
+## here.
+# CHECK-NOT: DBG_VALUE
+
+# CHECK-LABEL: bb.2:
+## After solving https://bugs.llvm.org/show_bug.cgi?id=36579 we expect to get a
+## DBG_VALUE debug-use %noreg
+## here.
+# CHECK-NOT: DBG_VALUE
+# CHECK: dead renamable %rcx = IMPLICIT_DEF
+# CHECK-NEXT: dead renamable %rcx = IMPLICIT_DEF
+# CHECK-NEXT: dead renamable %rcx = IMPLICIT_DEF
+# CHECK-NEXT: dead renamable %rcx = IMPLICIT_DEF
+# CHECK-NEXT: DBG_VALUE debug-use %rcx, debug-use %noreg, !18, !DIExpression()
+
+# CHECK-LABEL: bb.3:
+# CHECK: dead renamable %rcx = IMPLICIT_DEF
+# CHECK-NEXT: DBG_VALUE 0, debug-use %noreg, !23, !DIExpression()
+# CHECK-NEXT: DBG_VALUE debug-use %rcx, debug-use %noreg, !18, !DIExpression()
+
+# CHECK-LABEL: bb.4:
+# CHECK: liveins: %rax
+# CHECK: DBG_VALUE debug-use %rax, debug-use %noreg, !18, !DIExpression()
+# CHECK-NEXT: renamable %rax = BTS64rr killed renamable %rax, 0, implicit-def %eflags
+# CHECK-NEXT: DBG_VALUE 0, debug-use %noreg, !23, !DIExpression()
+# CHECK-NEXT: DBG_VALUE debug-use %rax, debug-use %noreg, !18, !DIExpression()
+# CHECK-NEXT: renamable %rax = BTS64rr killed renamable %rax, 0, implicit-def %eflags
+# CHECK-NEXT: DBG_VALUE debug-use %rax, debug-use %noreg, !18, !DIExpression()
+# CHECK-NEXT: dead renamable %rax = BTS64rr killed renamable %rax, 0, implicit-def %eflags
+
+# CHECK-LABEL: bb.5:
+# CHECK-NEXT: RET 0
diff --git a/test/ExecutionEngine/RuntimeDyld/PowerPC/Inputs/ppc64_elf_module_b.s b/test/ExecutionEngine/RuntimeDyld/PowerPC/Inputs/ppc64_elf_module_b.s
new file mode 100644
index 0000000000000..f47ddbd413686
--- /dev/null
+++ b/test/ExecutionEngine/RuntimeDyld/PowerPC/Inputs/ppc64_elf_module_b.s
@@ -0,0 +1,42 @@
+# This module contains a function with its local and global entry points
+# exposed. It is used by the ppc64_elf test to verify that functions with
+# different TOCs are called via their global entry points.
+ .text
+ .abiversion 2
+ .file "ppc64_elf_module_b.ll"
+ .section .rodata.cst4,"aM",@progbits,4
+ .p2align 2 # -- Begin function foo
+.LCPI0_0:
+ .long 1093664768 # float 11
+ .text
+ .globl foo
+ .p2align 4
+ .type foo,@function
+.Lfunc_toc0: # @foo
+ .quad .TOC.-foo_gep
+foo:
+.Lfunc_begin0:
+ .cfi_startproc
+ .globl foo_gep
+foo_gep:
+ ld 2, .Lfunc_toc0-foo_gep(12)
+ add 2, 2, 12
+ .globl foo_lep
+foo_lep:
+ .localentry foo, foo_lep-foo_gep
+# %bb.0:
+ addis 3, 2, .LC0@toc@ha
+ ld 3, .LC0@toc@l(3)
+ lfsx 1, 0, 3
+ blr
+ .long 0
+ .quad 0
+.Lfunc_end0:
+ .size foo, .Lfunc_end0-.Lfunc_begin0
+ .cfi_endproc
+ # -- End function
+ .section .toc,"aw",@progbits
+.LC0:
+ .tc .LCPI0_0[TC],.LCPI0_0
+
+ .section ".note.GNU-stack","",@progbits
diff --git a/test/ExecutionEngine/RuntimeDyld/PowerPC/ppc64_elf.s b/test/ExecutionEngine/RuntimeDyld/PowerPC/ppc64_elf.s
new file mode 100644
index 0000000000000..b43c84caf56c3
--- /dev/null
+++ b/test/ExecutionEngine/RuntimeDyld/PowerPC/ppc64_elf.s
@@ -0,0 +1,47 @@
+# RUN: rm -rf %t && mkdir -p %t
+# RUN: llvm-mc -triple=powerpc64le-unknown-linux-gnu -filetype=obj -o %t/ppc64_elf.o %s
+# RUN: llvm-mc -triple=powerpc64le-unknown-linux-gnu -filetype=obj -o %t/ppc64_elf_module_b.o %S/Inputs/ppc64_elf_module_b.s
+# RUN: llvm-rtdyld -triple=powerpc64le-unknown-linux-gnu -verify -check=%s %t/ppc64_elf.o %t/ppc64_elf_module_b.o
+
+ .text
+ .abiversion 2
+ .file "Module2.ll"
+ .globl bar # -- Begin function bar
+ .p2align 4
+ .type bar,@function
+.Lfunc_toc0: # @bar
+ .quad .TOC.-.Lfunc_gep0
+bar:
+.Lfunc_begin0:
+ .cfi_startproc
+.Lfunc_gep0:
+ ld 2, .Lfunc_toc0-.Lfunc_gep0(12)
+ add 2, 2, 12
+.Lfunc_lep0:
+ .localentry bar, .Lfunc_lep0-.Lfunc_gep0
+# %bb.0:
+ mflr 0
+ std 0, 16(1)
+ stdu 1, -32(1)
+ .cfi_def_cfa_offset 32
+ .cfi_offset lr, 16
+# rtdyld-check: (*{4}(stub_addr(ppc64_elf.o, .text, foo) + 0)) [15:0] = foo_gep [63:48]
+# rtdyld-check: (*{4}(stub_addr(ppc64_elf.o, .text, foo) + 4)) [15:0] = foo_gep [47:32]
+# rtdyld-check: (*{4}(stub_addr(ppc64_elf.o, .text, foo) + 12)) [15:0] = foo_gep [31:16]
+# rtdyld-check: (*{4}(stub_addr(ppc64_elf.o, .text, foo) + 16)) [15:0] = foo_gep [15:0]
+# rtdyld-check: decode_operand(foo_call, 0) = (stub_addr(ppc64_elf.o, .text, foo) - foo_call) >> 2
+foo_call:
+ bl foo
+ nop
+ addi 1, 1, 32
+ ld 0, 16(1)
+ mtlr 0
+ blr
+ .long 0
+ .quad 0
+.Lfunc_end0:
+ .size bar, .Lfunc_end0-.Lfunc_begin0
+ .cfi_endproc
+ # -- End function
+
+ .section ".note.GNU-stack","",@progbits
diff --git a/test/MC/ELF/cfi-large-model.s b/test/MC/ELF/cfi-large-model.s
index 790d75eee1fa6..13e64d3f2851b 100644
--- a/test/MC/ELF/cfi-large-model.s
+++ b/test/MC/ELF/cfi-large-model.s
@@ -1,26 +1,52 @@
// RUN: llvm-mc -filetype=obj -triple x86_64-pc-linux-gnu -large-code-model %s \
-// RUN: -o - | llvm-readobj -s -sd | FileCheck %s
+// RUN: -o - | llvm-readobj -s -sd | FileCheck --check-prefix=CHECK-X86 %s
+// RUN: llvm-mc -filetype=obj -triple powerpc64le-linux-gnu -large-code-model %s \
+// RUN: -o - | llvm-readobj -s -sd | FileCheck --check-prefix=CHECK-PPC %s
-// CHECK: Section {
-// CHECK: Index:
-// CHECK: Name: .eh_frame
-// CHECK-NEXT: Type: SHT_X86_64_UNWIND
-// CHECK-NEXT: Flags [
-// CHECK-NEXT: SHF_ALLOC
-// CHECK-NEXT: ]
-// CHECK-NEXT: Address: 0x0
-// CHECK-NEXT: Offset: 0x40
-// CHECK-NEXT: Size: 56
-// CHECK-NEXT: Link: 0
-// CHECK-NEXT: Info: 0
-// CHECK-NEXT: AddressAlignment: 8
-// CHECK-NEXT: EntrySize: 0
-// CHECK-NEXT: SectionData (
-// CHECK-NEXT: 0000: 14000000 00000000 017A5200 01781001 |.........zR..x..|
-// CHECK-NEXT: 0010: 1C0C0708 90010000 1C000000 1C000000 |................|
-// CHECK-NEXT: 0020: 00000000 00000000 00000000 00000000 |................|
-// CHECK-NEXT: 0030: 00000000 00000000 |........|
-// CHECK-NEXT: )
+// REQUIRES: x86-registered-target
+// REQUIRES: powerpc-registered-target
+
+// CHECK-X86: Section {
+// CHECK-X86: Index:
+// CHECK-X86: Name: .eh_frame
+// CHECK-X86-NEXT: Type: SHT_X86_64_UNWIND
+// CHECK-X86-NEXT: Flags [
+// CHECK-X86-NEXT: SHF_ALLOC
+// CHECK-X86-NEXT: ]
+// CHECK-X86-NEXT: Address: 0x0
+// CHECK-X86-NEXT: Offset: 0x40
+// CHECK-X86-NEXT: Size: 56
+// CHECK-X86-NEXT: Link: 0
+// CHECK-X86-NEXT: Info: 0
+// CHECK-X86-NEXT: AddressAlignment: 8
+// CHECK-X86-NEXT: EntrySize: 0
+// CHECK-X86-NEXT: SectionData (
+// CHECK-X86-NEXT: 0000: 14000000 00000000 017A5200 01781001 |.........zR..x..|
+// CHECK-X86-NEXT: 0010: 1C0C0708 90010000 1C000000 1C000000 |................|
+// CHECK-X86-NEXT: 0020: 00000000 00000000 00000000 00000000 |................|
+// CHECK-X86-NEXT: 0030: 00000000 00000000 |........|
+// CHECK-X86-NEXT: )
+
+// CHECK-PPC: Section {
+// CHECK-PPC: Index:
+// CHECK-PPC: Name: .eh_frame
+// CHECK-PPC-NEXT: Type: SHT_PROGBITS
+// CHECK-PPC-NEXT: Flags [
+// CHECK-PPC-NEXT: SHF_ALLOC
+// CHECK-PPC-NEXT: ]
+// CHECK-PPC-NEXT: Address: 0x0
+// CHECK-PPC-NEXT: Offset: 0x40
+// CHECK-PPC-NEXT: Size: 48
+// CHECK-PPC-NEXT: Link: 0
+// CHECK-PPC-NEXT: Info: 0
+// CHECK-PPC-NEXT: AddressAlignment: 8
+// CHECK-PPC-NEXT: EntrySize: 0
+// CHECK-PPC-NEXT: SectionData (
+// CHECK-PPC-NEXT: 0000: 10000000 00000000 017A5200 04784101 |.........zR..xA.|
+// CHECK-PPC-NEXT: 0010: 1C0C0100 18000000 18000000 00000000 |................|
+// CHECK-PPC-NEXT: 0020: 00000000 00000000 00000000 00000000 |................|
+// CHECK-PPC-NEXT: )
+// CHECK-PPC-NEXT: }
f:
.cfi_startproc
diff --git a/test/MC/Mips/unsupported-relocation.s b/test/MC/Mips/unsupported-relocation.s
new file mode 100644
index 0000000000000..151a559671fba
--- /dev/null
+++ b/test/MC/Mips/unsupported-relocation.s
@@ -0,0 +1,13 @@
+# RUN: not llvm-mc -triple mips-unknown-linux -filetype=obj %s 2>%t
+# RUN: FileCheck %s < %t
+
+# Check that we emit an error for unsupported relocations instead of crashing.
+
+ .globl x
+
+ .data
+foo:
+ .byte x
+ .byte x+1
+
+# CHECK: LLVM ERROR: MIPS does not support one byte relocations
diff --git a/test/Transforms/ArgumentPromotion/musttail.ll b/test/Transforms/ArgumentPromotion/musttail.ll
new file mode 100644
index 0000000000000..aa18711686938
--- /dev/null
+++ b/test/Transforms/ArgumentPromotion/musttail.ll
@@ -0,0 +1,45 @@
+; RUN: opt < %s -argpromotion -S | FileCheck %s
+; PR36543
+
+; Don't promote arguments of musttail callee
+
+%T = type { i32, i32, i32, i32 }
+
+; CHECK-LABEL: define internal i32 @test(%T* %p)
+define internal i32 @test(%T* %p) {
+ %a.gep = getelementptr %T, %T* %p, i64 0, i32 3
+ %b.gep = getelementptr %T, %T* %p, i64 0, i32 2
+ %a = load i32, i32* %a.gep
+ %b = load i32, i32* %b.gep
+ %v = add i32 %a, %b
+ ret i32 %v
+}
+
+; CHECK-LABEL: define i32 @caller(%T* %p)
+define i32 @caller(%T* %p) {
+ %v = musttail call i32 @test(%T* %p)
+ ret i32 %v
+}
+
+; Don't promote arguments of musttail caller
+
+define i32 @foo(%T* %p, i32 %v) {
+ ret i32 0
+}
+
+; CHECK-LABEL: define internal i32 @test2(%T* %p, i32 %p2)
+define internal i32 @test2(%T* %p, i32 %p2) {
+ %a.gep = getelementptr %T, %T* %p, i64 0, i32 3
+ %b.gep = getelementptr %T, %T* %p, i64 0, i32 2
+ %a = load i32, i32* %a.gep
+ %b = load i32, i32* %b.gep
+ %v = add i32 %a, %b
+ %ca = musttail call i32 @foo(%T* undef, i32 %v)
+ ret i32 %ca
+}
+
+; CHECK-LABEL: define i32 @caller2(%T* %g)
+define i32 @caller2(%T* %g) {
+ %v = call i32 @test2(%T* %g, i32 0)
+ ret i32 %v
+}
diff --git a/test/Transforms/CallSiteSplitting/musttail.ll b/test/Transforms/CallSiteSplitting/musttail.ll
new file mode 100644
index 0000000000000..97548501cd5cd
--- /dev/null
+++ b/test/Transforms/CallSiteSplitting/musttail.ll
@@ -0,0 +1,109 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt < %s -callsite-splitting -S | FileCheck %s
+
+define i8* @caller(i8* %a, i8* %b) {
+; CHECK-LABEL: @caller(
+; CHECK-NEXT: Top:
+; CHECK-NEXT: [[C:%.*]] = icmp eq i8* [[A:%.*]], null
+; CHECK-NEXT: br i1 [[C]], label [[TAIL_PREDBB1_SPLIT:%.*]], label [[TBB:%.*]]
+; CHECK: TBB:
+; CHECK-NEXT: [[C2:%.*]] = icmp eq i8* [[B:%.*]], null
+; CHECK-NEXT: br i1 [[C2]], label [[TAIL_PREDBB2_SPLIT:%.*]], label [[END:%.*]]
+; CHECK: Tail.predBB1.split:
+; CHECK-NEXT: [[TMP0:%.*]] = musttail call i8* @callee(i8* null, i8* [[B]])
+; CHECK-NEXT: [[CB1:%.*]] = bitcast i8* [[TMP0]] to i8*
+; CHECK-NEXT: ret i8* [[CB1]]
+; CHECK: Tail.predBB2.split:
+; CHECK-NEXT: [[TMP1:%.*]] = musttail call i8* @callee(i8* nonnull [[A]], i8* null)
+; CHECK-NEXT: [[CB2:%.*]] = bitcast i8* [[TMP1]] to i8*
+; CHECK-NEXT: ret i8* [[CB2]]
+; CHECK: End:
+; CHECK-NEXT: ret i8* null
+;
+Top:
+ %c = icmp eq i8* %a, null
+ br i1 %c, label %Tail, label %TBB
+TBB:
+ %c2 = icmp eq i8* %b, null
+ br i1 %c2, label %Tail, label %End
+Tail:
+ %ca = musttail call i8* @callee(i8* %a, i8* %b)
+ %cb = bitcast i8* %ca to i8*
+ ret i8* %cb
+End:
+ ret i8* null
+}
+
+define i8* @callee(i8* %a, i8* %b) noinline {
+; CHECK-LABEL: define i8* @callee(
+; CHECK-NEXT: ret i8* [[A:%.*]]
+;
+ ret i8* %a
+}
+
+define i8* @no_cast_caller(i8* %a, i8* %b) {
+; CHECK-LABEL: @no_cast_caller(
+; CHECK-NEXT: Top:
+; CHECK-NEXT: [[C:%.*]] = icmp eq i8* [[A:%.*]], null
+; CHECK-NEXT: br i1 [[C]], label [[TAIL_PREDBB1_SPLIT:%.*]], label [[TBB:%.*]]
+; CHECK: TBB:
+; CHECK-NEXT: [[C2:%.*]] = icmp eq i8* [[B:%.*]], null
+; CHECK-NEXT: br i1 [[C2]], label [[TAIL_PREDBB2_SPLIT:%.*]], label [[END:%.*]]
+; CHECK: Tail.predBB1.split:
+; CHECK-NEXT: [[TMP0:%.*]] = musttail call i8* @callee(i8* null, i8* [[B]])
+; CHECK-NEXT: ret i8* [[TMP0]]
+; CHECK: Tail.predBB2.split:
+; CHECK-NEXT: [[TMP1:%.*]] = musttail call i8* @callee(i8* nonnull [[A]], i8* null)
+; CHECK-NEXT: ret i8* [[TMP1]]
+; CHECK: End:
+; CHECK-NEXT: ret i8* null
+;
+Top:
+ %c = icmp eq i8* %a, null
+ br i1 %c, label %Tail, label %TBB
+TBB:
+ %c2 = icmp eq i8* %b, null
+ br i1 %c2, label %Tail, label %End
+Tail:
+ %ca = musttail call i8* @callee(i8* %a, i8* %b)
+ ret i8* %ca
+End:
+ ret i8* null
+}
+
+define void @void_caller(i8* %a, i8* %b) {
+; CHECK-LABEL: @void_caller(
+; CHECK-NEXT: Top:
+; CHECK-NEXT: [[C:%.*]] = icmp eq i8* [[A:%.*]], null
+; CHECK-NEXT: br i1 [[C]], label [[TAIL_PREDBB1_SPLIT:%.*]], label [[TBB:%.*]]
+; CHECK: TBB:
+; CHECK-NEXT: [[C2:%.*]] = icmp eq i8* [[B:%.*]], null
+; CHECK-NEXT: br i1 [[C2]], label [[TAIL_PREDBB2_SPLIT:%.*]], label [[END:%.*]]
+; CHECK: Tail.predBB1.split:
+; CHECK-NEXT: musttail call void @void_callee(i8* null, i8* [[B]])
+; CHECK-NEXT: ret void
+; CHECK: Tail.predBB2.split:
+; CHECK-NEXT: musttail call void @void_callee(i8* nonnull [[A]], i8* null)
+; CHECK-NEXT: ret void
+; CHECK: End:
+; CHECK-NEXT: ret void
+;
+Top:
+ %c = icmp eq i8* %a, null
+ br i1 %c, label %Tail, label %TBB
+TBB:
+ %c2 = icmp eq i8* %b, null
+ br i1 %c2, label %Tail, label %End
+Tail:
+ musttail call void @void_callee(i8* %a, i8* %b)
+ ret void
+End:
+ ret void
+}
+
+define void @void_callee(i8* %a, i8* %b) noinline {
+; CHECK-LABEL: define void @void_callee(
+; CHECK-NEXT: ret void
+;
+ ret void
+}
diff --git a/test/Transforms/DeadArgElim/musttail-caller.ll b/test/Transforms/DeadArgElim/musttail-caller.ll
new file mode 100644
index 0000000000000..981326bba0aa3
--- /dev/null
+++ b/test/Transforms/DeadArgElim/musttail-caller.ll
@@ -0,0 +1,16 @@
+; RUN: opt -deadargelim -S < %s | FileCheck %s
+; PR36441
+; Dead arguments should not be removed in presence of `musttail` calls.
+
+; CHECK-LABEL: define internal void @test(i32 %a, i32 %b)
+; CHECK: musttail call void @foo(i32 %a, i32 0)
+; FIXME: we should replace those with `undef`s
+define internal void @test(i32 %a, i32 %b) {
+ musttail call void @foo(i32 %a, i32 0)
+ ret void
+}
+
+; CHECK-LABEL: define internal void @foo(i32 %a, i32 %b)
+define internal void @foo(i32 %a, i32 %b) {
+ ret void
+}
diff --git a/test/Transforms/GlobalOpt/musttail_cc.ll b/test/Transforms/GlobalOpt/musttail_cc.ll
new file mode 100644
index 0000000000000..fc927ea91dd8f
--- /dev/null
+++ b/test/Transforms/GlobalOpt/musttail_cc.ll
@@ -0,0 +1,34 @@
+; RUN: opt < %s -globalopt -S | FileCheck %s
+; PR36546
+
+; Check that musttail callee preserves its calling convention
+
+define i32 @test(i32 %a) {
+ ; CHECK: %ca = musttail call i32 @foo(i32 %a)
+ %ca = musttail call i32 @foo(i32 %a)
+ ret i32 %ca
+}
+
+; CHECK-LABEL: define internal i32 @foo(i32 %a)
+define internal i32 @foo(i32 %a) {
+ ret i32 %a
+}
+
+; Check that musttail caller preserves its calling convention
+
+define i32 @test2(i32 %a) {
+ %ca = call i32 @foo1(i32 %a)
+ ret i32 %ca
+}
+
+; CHECK-LABEL: define internal i32 @foo1(i32 %a)
+define internal i32 @foo1(i32 %a) {
+ ; CHECK: %ca = musttail call i32 @foo2(i32 %a)
+ %ca = musttail call i32 @foo2(i32 %a)
+ ret i32 %ca
+}
+
+; CHECK-LABEL: define internal i32 @foo2(i32 %a)
+define internal i32 @foo2(i32 %a) {
+ ret i32 %a
+}
diff --git a/test/Transforms/IPConstantProp/musttail-call.ll b/test/Transforms/IPConstantProp/musttail-call.ll
new file mode 100644
index 0000000000000..f02f6992a70d1
--- /dev/null
+++ b/test/Transforms/IPConstantProp/musttail-call.ll
@@ -0,0 +1,58 @@
+; RUN: opt < %s -ipsccp -S | FileCheck %s
+; PR36485
+; musttail call result can\'t be replaced with a constant, unless the call
+; can be removed
+
+declare i32 @external()
+
+define i8* @start(i8 %v) {
+ %c1 = icmp eq i8 %v, 0
+ br i1 %c1, label %true, label %false
+true:
+ ; CHECK: %ca = musttail call i8* @side_effects(i8 %v)
+ ; CHECK: ret i8* %ca
+ %ca = musttail call i8* @side_effects(i8 %v)
+ ret i8* %ca
+false:
+ %c2 = icmp eq i8 %v, 1
+ br i1 %c2, label %c2_true, label %c2_false
+c2_true:
+ ; CHECK: %ca1 = musttail call i8* @no_side_effects(i8 %v)
+ ; CHECK: ret i8* %ca1
+ %ca1 = musttail call i8* @no_side_effects(i8 %v)
+ ret i8* %ca1
+c2_false:
+ ; CHECK: %ca2 = musttail call i8* @dont_zap_me(i8 %v)
+ ; CHECK: ret i8* %ca2
+ %ca2 = musttail call i8* @dont_zap_me(i8 %v)
+ ret i8* %ca2
+}
+
+define internal i8* @side_effects(i8 %v) {
+ %i1 = call i32 @external()
+
+ ; since this goes back to `start` the SCPP should be see that the return value
+ ; is always `null`.
+ ; The call can't be removed due to `external` call above, though.
+
+ ; CHECK: %ca = musttail call i8* @start(i8 %v)
+ %ca = musttail call i8* @start(i8 %v)
+
+ ; Thus the result must be returned anyway
+ ; CHECK: ret i8* %ca
+ ret i8* %ca
+}
+
+define internal i8* @no_side_effects(i8 %v) readonly nounwind {
+ ; CHECK: ret i8* null
+ ret i8* null
+}
+
+define internal i8* @dont_zap_me(i8 %v) {
+ %i1 = call i32 @external()
+
+ ; The call to this function cannot be removed due to side effects. Thus the
+ ; return value should stay as it is, and should not be zapped.
+ ; CHECK: ret i8* null
+ ret i8* null
+}
diff --git a/test/Transforms/InstCombine/gep-addrspace.ll b/test/Transforms/InstCombine/gep-addrspace.ll
index aa46ea6713025..4a4951dee7fd1 100644
--- a/test/Transforms/InstCombine/gep-addrspace.ll
+++ b/test/Transforms/InstCombine/gep-addrspace.ll
@@ -32,3 +32,22 @@ entry:
ret void
}
+declare void @escape_alloca(i16*)
+
+; check that addrspacecast is not ignored (leading to an assertion failure)
+; when trying to mark a GEP as inbounds
+define { i8, i8 } @inbounds_after_addrspacecast() {
+top:
+; CHECK-LABEL: @inbounds_after_addrspacecast
+ %0 = alloca i16, align 2
+ call void @escape_alloca(i16* %0)
+ %tmpcast = bitcast i16* %0 to [2 x i8]*
+; CHECK: addrspacecast [2 x i8]* %tmpcast to [2 x i8] addrspace(11)*
+ %1 = addrspacecast [2 x i8]* %tmpcast to [2 x i8] addrspace(11)*
+; CHECK: getelementptr [2 x i8], [2 x i8] addrspace(11)* %1, i64 0, i64 1
+ %2 = getelementptr [2 x i8], [2 x i8] addrspace(11)* %1, i64 0, i64 1
+; CHECK: addrspace(11)
+ %3 = load i8, i8 addrspace(11)* %2, align 1
+ %.fca.1.insert = insertvalue { i8, i8 } zeroinitializer, i8 %3, 1
+ ret { i8, i8 } %.fca.1.insert
+}
diff --git a/test/Transforms/JumpThreading/header-succ.ll b/test/Transforms/JumpThreading/header-succ.ll
new file mode 100644
index 0000000000000..859d44cff2939
--- /dev/null
+++ b/test/Transforms/JumpThreading/header-succ.ll
@@ -0,0 +1,99 @@
+; RUN: opt -S -jump-threading < %s | FileCheck %s
+
+; Check that the heuristic for avoiding accidental introduction of irreducible
+; loops doesn't also prevent us from threading simple constructs where this
+; isn't a problem.
+
+declare void @opaque_body()
+
+define void @jump_threading_loopheader() {
+; CHECK-LABEL: @jump_threading_loopheader
+top:
+ br label %entry
+
+entry:
+ %ind = phi i32 [0, %top], [%nextind, %latch]
+ %nextind = add i32 %ind, 1
+ %cmp = icmp ule i32 %ind, 10
+; CHECK: br i1 %cmp, label %latch, label %exit
+ br i1 %cmp, label %body, label %latch
+
+body:
+ call void @opaque_body()
+; CHECK: br label %entry
+ br label %latch
+
+latch:
+ %cond = phi i2 [1, %entry], [2, %body]
+ switch i2 %cond, label %unreach [
+ i2 2, label %entry
+ i2 1, label %exit
+ ]
+
+unreach:
+ unreachable
+
+exit:
+ ret void
+}
+
+; We also need to check the opposite order of the branches, in the switch
+; instruction because jump-threading relies on that to decide which edge to
+; try to thread first.
+define void @jump_threading_loopheader2() {
+; CHECK-LABEL: @jump_threading_loopheader2
+top:
+ br label %entry
+
+entry:
+ %ind = phi i32 [0, %top], [%nextind, %latch]
+ %nextind = add i32 %ind, 1
+ %cmp = icmp ule i32 %ind, 10
+; CHECK: br i1 %cmp, label %exit, label %latch
+ br i1 %cmp, label %body, label %latch
+
+body:
+ call void @opaque_body()
+; CHECK: br label %entry
+ br label %latch
+
+latch:
+ %cond = phi i2 [1, %entry], [2, %body]
+ switch i2 %cond, label %unreach [
+ i2 1, label %entry
+ i2 2, label %exit
+ ]
+
+unreach:
+ unreachable
+
+exit:
+ ret void
+}
+
+; Check if we can handle undef branch condition.
+define void @jump_threading_loopheader3() {
+; CHECK-LABEL: @jump_threading_loopheader3
+top:
+ br label %entry
+
+entry:
+ %ind = phi i32 [0, %top], [%nextind, %latch]
+ %nextind = add i32 %ind, 1
+ %cmp = icmp ule i32 %ind, 10
+; CHECK: br i1 %cmp, label %latch, label %exit
+ br i1 %cmp, label %body, label %latch
+
+body:
+ call void @opaque_body()
+; CHECK: br label %entry
+ br label %latch
+
+latch:
+ %phi = phi i32 [undef, %entry], [0, %body]
+ %cmp1 = icmp eq i32 %phi, 0
+ br i1 %cmp1, label %entry, label %exit
+
+exit:
+ ret void
+}
diff --git a/test/Transforms/MergeFunc/inline-asm.ll b/test/Transforms/MergeFunc/inline-asm.ll
new file mode 100644
index 0000000000000..370d3c56f060e
--- /dev/null
+++ b/test/Transforms/MergeFunc/inline-asm.ll
@@ -0,0 +1,53 @@
+; RUN: opt -mergefunc -S < %s | FileCheck %s
+
+; CHECK-LABEL: @int_ptr_arg_different
+; CHECK-NEXT: call void asm
+
+; CHECK-LABEL: @int_ptr_arg_same
+; CHECK-NEXT: %2 = bitcast i32* %0 to float*
+; CHECK-NEXT: tail call void @float_ptr_arg_same(float* %2)
+
+; CHECK-LABEL: @int_ptr_null
+; CHECK-NEXT: tail call void @float_ptr_null()
+
+; Used to satisfy minimum size limit
+declare void @stuff()
+
+; Can be merged
+define void @float_ptr_null() {
+ call void asm "nop", "r"(float* null)
+ call void @stuff()
+ ret void
+}
+
+define void @int_ptr_null() {
+ call void asm "nop", "r"(i32* null)
+ call void @stuff()
+ ret void
+}
+
+; Can be merged (uses same argument differing by pointer type)
+define void @float_ptr_arg_same(float*) {
+ call void asm "nop", "r"(float* %0)
+ call void @stuff()
+ ret void
+}
+
+define void @int_ptr_arg_same(i32*) {
+ call void asm "nop", "r"(i32* %0)
+ call void @stuff()
+ ret void
+}
+
+; Can not be merged (uses different arguments)
+define void @float_ptr_arg_different(float*, float*) {
+ call void asm "nop", "r"(float* %0)
+ call void @stuff()
+ ret void
+}
+
+define void @int_ptr_arg_different(i32*, i32*) {
+ call void asm "nop", "r"(i32* %1)
+ call void @stuff()
+ ret void
+}
diff --git a/test/Transforms/MergeFunc/weak-small.ll b/test/Transforms/MergeFunc/weak-small.ll
new file mode 100644
index 0000000000000..64f1083174623
--- /dev/null
+++ b/test/Transforms/MergeFunc/weak-small.ll
@@ -0,0 +1,16 @@
+; RUN: opt -mergefunc -S < %s | FileCheck %s
+
+; Weak functions too small for merging to be profitable
+
+; CHECK: define weak i32 @foo(i8*, i32)
+; CHECK-NEXT: ret i32 %1
+; CHECK: define weak i32 @bar(i8*, i32)
+; CHECK-NEXT: ret i32 %1
+
+define weak i32 @foo(i8*, i32) #0 {
+ ret i32 %1
+}
+
+define weak i32 @bar(i8*, i32) #0 {
+ ret i32 %1
+}