summaryrefslogtreecommitdiff
path: root/test/CodeGen/ARM/gpr-paired-spill-thumbinst.ll
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2013-06-10 20:36:52 +0000
committerDimitry Andric <dim@FreeBSD.org>2013-06-10 20:36:52 +0000
commit59d6cff90eecf31cb3dd860c4e786674cfdd42eb (patch)
tree909310b2e05119d1d6efda049977042abbb58bb1 /test/CodeGen/ARM/gpr-paired-spill-thumbinst.ll
parent4a16efa3e43e35f0cc9efe3a67f620f0017c3d36 (diff)
Notes
Diffstat (limited to 'test/CodeGen/ARM/gpr-paired-spill-thumbinst.ll')
-rw-r--r--test/CodeGen/ARM/gpr-paired-spill-thumbinst.ll30
1 files changed, 30 insertions, 0 deletions
diff --git a/test/CodeGen/ARM/gpr-paired-spill-thumbinst.ll b/test/CodeGen/ARM/gpr-paired-spill-thumbinst.ll
new file mode 100644
index 0000000000000..00027119f9e00
--- /dev/null
+++ b/test/CodeGen/ARM/gpr-paired-spill-thumbinst.ll
@@ -0,0 +1,30 @@
+; REQUIRES: asserts
+; RUN: llc -mtriple=thumbv7-none-linux-gnueabi -debug -o /dev/null < %s 2>&1 | FileCheck %s
+
+; This test makes sure spills of 64-bit pairs in Thumb mode actually
+; generate thumb instructions. Previously we were inserting an ARM
+; STMIA which happened to have the same encoding.
+
+define void @foo(i64* %addr) {
+ %val1 = tail call i64 asm sideeffect "ldrexd $0, ${0:H}, [r0]", "=&r,r"(i64* %addr)
+ %val2 = tail call i64 asm sideeffect "ldrexd $0, ${0:H}, [r0]", "=&r,r"(i64* %addr)
+ %val3 = tail call i64 asm sideeffect "ldrexd $0, ${0:H}, [r0]", "=&r,r"(i64* %addr)
+ %val4 = tail call i64 asm sideeffect "ldrexd $0, ${0:H}, [r0]", "=&r,r"(i64* %addr)
+ %val5 = tail call i64 asm sideeffect "ldrexd $0, ${0:H}, [r0]", "=&r,r"(i64* %addr)
+ %val6 = tail call i64 asm sideeffect "ldrexd $0, ${0:H}, [r0]", "=&r,r"(i64* %addr)
+ %val7 = tail call i64 asm sideeffect "ldrexd $0, ${0:H}, [r0]", "=&r,r"(i64* %addr)
+
+ ; Make sure we are actually creating the Thumb versions of the spill
+ ; instructions.
+; CHECK: t2STRDi8
+; CHECK: t2LDRDi8
+
+ store volatile i64 %val1, i64* %addr
+ store volatile i64 %val2, i64* %addr
+ store volatile i64 %val3, i64* %addr
+ store volatile i64 %val4, i64* %addr
+ store volatile i64 %val5, i64* %addr
+ store volatile i64 %val6, i64* %addr
+ store volatile i64 %val7, i64* %addr
+ ret void
+}