summaryrefslogtreecommitdiff
path: root/test/CodeGen/ARM
diff options
context:
space:
mode:
Diffstat (limited to 'test/CodeGen/ARM')
-rw-r--r--test/CodeGen/ARM/arm-interleaved-accesses.ll204
-rw-r--r--test/CodeGen/ARM/build-attributes.ll19
-rw-r--r--test/CodeGen/ARM/fnattr-trap.ll40
-rw-r--r--test/CodeGen/ARM/ldrd.ll29
-rw-r--r--test/CodeGen/ARM/load-store-flags.ll43
-rw-r--r--test/CodeGen/ARM/wrong-t2stmia-size-opt.ll2
6 files changed, 319 insertions, 18 deletions
diff --git a/test/CodeGen/ARM/arm-interleaved-accesses.ll b/test/CodeGen/ARM/arm-interleaved-accesses.ll
new file mode 100644
index 000000000000..9a9885ccdd0c
--- /dev/null
+++ b/test/CodeGen/ARM/arm-interleaved-accesses.ll
@@ -0,0 +1,204 @@
+; RUN: llc -mtriple=arm-eabi -mattr=+neon -lower-interleaved-accesses=true < %s | FileCheck %s
+
+; CHECK-LABEL: load_factor2:
+; CHECK: vld2.8 {d16, d17}, [r0]
+define <8 x i8> @load_factor2(<16 x i8>* %ptr) {
+ %wide.vec = load <16 x i8>, <16 x i8>* %ptr, align 4
+ %strided.v0 = shufflevector <16 x i8> %wide.vec, <16 x i8> undef, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
+ %strided.v1 = shufflevector <16 x i8> %wide.vec, <16 x i8> undef, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
+ %add = add nsw <8 x i8> %strided.v0, %strided.v1
+ ret <8 x i8> %add
+}
+
+; CHECK-LABEL: load_factor3:
+; CHECK: vld3.32 {d16, d17, d18}, [r0]
+define <2 x i32> @load_factor3(i32* %ptr) {
+ %base = bitcast i32* %ptr to <6 x i32>*
+ %wide.vec = load <6 x i32>, <6 x i32>* %base, align 4
+ %strided.v2 = shufflevector <6 x i32> %wide.vec, <6 x i32> undef, <2 x i32> <i32 2, i32 5>
+ %strided.v1 = shufflevector <6 x i32> %wide.vec, <6 x i32> undef, <2 x i32> <i32 1, i32 4>
+ %add = add nsw <2 x i32> %strided.v2, %strided.v1
+ ret <2 x i32> %add
+}
+
+; CHECK-LABEL: load_factor4:
+; CHECK: vld4.32 {d16, d18, d20, d22}, [r0]!
+; CHECK: vld4.32 {d17, d19, d21, d23}, [r0]
+define <4 x i32> @load_factor4(i32* %ptr) {
+ %base = bitcast i32* %ptr to <16 x i32>*
+ %wide.vec = load <16 x i32>, <16 x i32>* %base, align 4
+ %strided.v0 = shufflevector <16 x i32> %wide.vec, <16 x i32> undef, <4 x i32> <i32 0, i32 4, i32 8, i32 12>
+ %strided.v2 = shufflevector <16 x i32> %wide.vec, <16 x i32> undef, <4 x i32> <i32 2, i32 6, i32 10, i32 14>
+ %add = add nsw <4 x i32> %strided.v0, %strided.v2
+ ret <4 x i32> %add
+}
+
+; CHECK-LABEL: store_factor2:
+; CHECK: vst2.8 {d16, d17}, [r0]
+define void @store_factor2(<16 x i8>* %ptr, <8 x i8> %v0, <8 x i8> %v1) {
+ %interleaved.vec = shufflevector <8 x i8> %v0, <8 x i8> %v1, <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
+ store <16 x i8> %interleaved.vec, <16 x i8>* %ptr, align 4
+ ret void
+}
+
+; CHECK-LABEL: store_factor3:
+; CHECK: vst3.32 {d16, d18, d20}, [r0]!
+; CHECK: vst3.32 {d17, d19, d21}, [r0]
+define void @store_factor3(i32* %ptr, <4 x i32> %v0, <4 x i32> %v1, <4 x i32> %v2) {
+ %base = bitcast i32* %ptr to <12 x i32>*
+ %v0_v1 = shufflevector <4 x i32> %v0, <4 x i32> %v1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %v2_u = shufflevector <4 x i32> %v2, <4 x i32> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef>
+ %interleaved.vec = shufflevector <8 x i32> %v0_v1, <8 x i32> %v2_u, <12 x i32> <i32 0, i32 4, i32 8, i32 1, i32 5, i32 9, i32 2, i32 6, i32 10, i32 3, i32 7, i32 11>
+ store <12 x i32> %interleaved.vec, <12 x i32>* %base, align 4
+ ret void
+}
+
+; CHECK-LABEL: store_factor4:
+; CHECK: vst4.32 {d16, d18, d20, d22}, [r0]!
+; CHECK: vst4.32 {d17, d19, d21, d23}, [r0]
+define void @store_factor4(i32* %ptr, <4 x i32> %v0, <4 x i32> %v1, <4 x i32> %v2, <4 x i32> %v3) {
+ %base = bitcast i32* %ptr to <16 x i32>*
+ %v0_v1 = shufflevector <4 x i32> %v0, <4 x i32> %v1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %v2_v3 = shufflevector <4 x i32> %v2, <4 x i32> %v3, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %interleaved.vec = shufflevector <8 x i32> %v0_v1, <8 x i32> %v2_v3, <16 x i32> <i32 0, i32 4, i32 8, i32 12, i32 1, i32 5, i32 9, i32 13, i32 2, i32 6, i32 10, i32 14, i32 3, i32 7, i32 11, i32 15>
+ store <16 x i32> %interleaved.vec, <16 x i32>* %base, align 4
+ ret void
+}
+
+; The following cases test that interleaved access of pointer vectors can be
+; matched to ldN/stN instruction.
+
+; CHECK-LABEL: load_ptrvec_factor2:
+; CHECK: vld2.32 {d16, d17}, [r0]
+define <2 x i32*> @load_ptrvec_factor2(i32** %ptr) {
+ %base = bitcast i32** %ptr to <4 x i32*>*
+ %wide.vec = load <4 x i32*>, <4 x i32*>* %base, align 4
+ %strided.v0 = shufflevector <4 x i32*> %wide.vec, <4 x i32*> undef, <2 x i32> <i32 0, i32 2>
+ ret <2 x i32*> %strided.v0
+}
+
+; CHECK-LABEL: load_ptrvec_factor3:
+; CHECK: vld3.32 {d16, d17, d18}, [r0]
+define void @load_ptrvec_factor3(i32** %ptr, <2 x i32*>* %ptr1, <2 x i32*>* %ptr2) {
+ %base = bitcast i32** %ptr to <6 x i32*>*
+ %wide.vec = load <6 x i32*>, <6 x i32*>* %base, align 4
+ %strided.v2 = shufflevector <6 x i32*> %wide.vec, <6 x i32*> undef, <2 x i32> <i32 2, i32 5>
+ store <2 x i32*> %strided.v2, <2 x i32*>* %ptr1
+ %strided.v1 = shufflevector <6 x i32*> %wide.vec, <6 x i32*> undef, <2 x i32> <i32 1, i32 4>
+ store <2 x i32*> %strided.v1, <2 x i32*>* %ptr2
+ ret void
+}
+
+; CHECK-LABEL: load_ptrvec_factor4:
+; CHECK: vld4.32 {d16, d17, d18, d19}, [r0]
+define void @load_ptrvec_factor4(i32** %ptr, <2 x i32*>* %ptr1, <2 x i32*>* %ptr2) {
+ %base = bitcast i32** %ptr to <8 x i32*>*
+ %wide.vec = load <8 x i32*>, <8 x i32*>* %base, align 4
+ %strided.v1 = shufflevector <8 x i32*> %wide.vec, <8 x i32*> undef, <2 x i32> <i32 1, i32 5>
+ %strided.v3 = shufflevector <8 x i32*> %wide.vec, <8 x i32*> undef, <2 x i32> <i32 3, i32 7>
+ store <2 x i32*> %strided.v1, <2 x i32*>* %ptr1
+ store <2 x i32*> %strided.v3, <2 x i32*>* %ptr2
+ ret void
+}
+
+; CHECK-LABEL: store_ptrvec_factor2:
+; CHECK: vst2.32 {d16, d17}, [r0]
+define void @store_ptrvec_factor2(i32** %ptr, <2 x i32*> %v0, <2 x i32*> %v1) {
+ %base = bitcast i32** %ptr to <4 x i32*>*
+ %interleaved.vec = shufflevector <2 x i32*> %v0, <2 x i32*> %v1, <4 x i32> <i32 0, i32 2, i32 1, i32 3>
+ store <4 x i32*> %interleaved.vec, <4 x i32*>* %base, align 4
+ ret void
+}
+
+; CHECK-LABEL: store_ptrvec_factor3:
+; CHECK: vst3.32 {d16, d17, d18}, [r0]
+define void @store_ptrvec_factor3(i32** %ptr, <2 x i32*> %v0, <2 x i32*> %v1, <2 x i32*> %v2) {
+ %base = bitcast i32** %ptr to <6 x i32*>*
+ %v0_v1 = shufflevector <2 x i32*> %v0, <2 x i32*> %v1, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %v2_u = shufflevector <2 x i32*> %v2, <2 x i32*> undef, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef>
+ %interleaved.vec = shufflevector <4 x i32*> %v0_v1, <4 x i32*> %v2_u, <6 x i32> <i32 0, i32 2, i32 4, i32 1, i32 3, i32 5>
+ store <6 x i32*> %interleaved.vec, <6 x i32*>* %base, align 4
+ ret void
+}
+
+; CHECK-LABEL: store_ptrvec_factor4:
+; CHECK: vst4.32 {d16, d17, d18, d19}, [r0]
+define void @store_ptrvec_factor4(i32* %ptr, <2 x i32*> %v0, <2 x i32*> %v1, <2 x i32*> %v2, <2 x i32*> %v3) {
+ %base = bitcast i32* %ptr to <8 x i32*>*
+ %v0_v1 = shufflevector <2 x i32*> %v0, <2 x i32*> %v1, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %v2_v3 = shufflevector <2 x i32*> %v2, <2 x i32*> %v3, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %interleaved.vec = shufflevector <4 x i32*> %v0_v1, <4 x i32*> %v2_v3, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 1, i32 3, i32 5, i32 7>
+ store <8 x i32*> %interleaved.vec, <8 x i32*>* %base, align 4
+ ret void
+}
+
+; Following cases check that shuffle maskes with undef indices can be matched
+; into ldN/stN instruction.
+
+; CHECK-LABEL: load_undef_mask_factor2:
+; CHECK: vld2.32 {d16, d17, d18, d19}, [r0]
+define <4 x i32> @load_undef_mask_factor2(i32* %ptr) {
+ %base = bitcast i32* %ptr to <8 x i32>*
+ %wide.vec = load <8 x i32>, <8 x i32>* %base, align 4
+ %strided.v0 = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 undef, i32 2, i32 undef, i32 6>
+ %strided.v1 = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> <i32 undef, i32 3, i32 undef, i32 7>
+ %add = add nsw <4 x i32> %strided.v0, %strided.v1
+ ret <4 x i32> %add
+}
+
+; CHECK-LABEL: load_undef_mask_factor3:
+; CHECK: vld3.32 {d16, d18, d20}, [r0]!
+; CHECK: vld3.32 {d17, d19, d21}, [r0]
+define <4 x i32> @load_undef_mask_factor3(i32* %ptr) {
+ %base = bitcast i32* %ptr to <12 x i32>*
+ %wide.vec = load <12 x i32>, <12 x i32>* %base, align 4
+ %strided.v2 = shufflevector <12 x i32> %wide.vec, <12 x i32> undef, <4 x i32> <i32 2, i32 undef, i32 undef, i32 undef>
+ %strided.v1 = shufflevector <12 x i32> %wide.vec, <12 x i32> undef, <4 x i32> <i32 1, i32 4, i32 7, i32 10>
+ %add = add nsw <4 x i32> %strided.v2, %strided.v1
+ ret <4 x i32> %add
+}
+
+; CHECK-LABEL: load_undef_mask_factor4:
+; CHECK: vld4.32 {d16, d18, d20, d22}, [r0]!
+; CHECK: vld4.32 {d17, d19, d21, d23}, [r0]
+define <4 x i32> @load_undef_mask_factor4(i32* %ptr) {
+ %base = bitcast i32* %ptr to <16 x i32>*
+ %wide.vec = load <16 x i32>, <16 x i32>* %base, align 4
+ %strided.v0 = shufflevector <16 x i32> %wide.vec, <16 x i32> undef, <4 x i32> <i32 0, i32 4, i32 undef, i32 undef>
+ %strided.v2 = shufflevector <16 x i32> %wide.vec, <16 x i32> undef, <4 x i32> <i32 2, i32 6, i32 undef, i32 undef>
+ %add = add nsw <4 x i32> %strided.v0, %strided.v2
+ ret <4 x i32> %add
+}
+
+; CHECK-LABEL: store_undef_mask_factor2:
+; CHECK: vst2.32 {d16, d17, d18, d19}, [r0]
+define void @store_undef_mask_factor2(i32* %ptr, <4 x i32> %v0, <4 x i32> %v1) {
+ %base = bitcast i32* %ptr to <8 x i32>*
+ %interleaved.vec = shufflevector <4 x i32> %v0, <4 x i32> %v1, <8 x i32> <i32 undef, i32 undef, i32 undef, i32 undef, i32 2, i32 6, i32 3, i32 7>
+ store <8 x i32> %interleaved.vec, <8 x i32>* %base, align 4
+ ret void
+}
+
+; CHECK-LABEL: store_undef_mask_factor3:
+; CHECK: vst3.32 {d16, d18, d20}, [r0]!
+; CHECK: vst3.32 {d17, d19, d21}, [r0]
+define void @store_undef_mask_factor3(i32* %ptr, <4 x i32> %v0, <4 x i32> %v1, <4 x i32> %v2) {
+ %base = bitcast i32* %ptr to <12 x i32>*
+ %v0_v1 = shufflevector <4 x i32> %v0, <4 x i32> %v1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %v2_u = shufflevector <4 x i32> %v2, <4 x i32> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef>
+ %interleaved.vec = shufflevector <8 x i32> %v0_v1, <8 x i32> %v2_u, <12 x i32> <i32 0, i32 4, i32 undef, i32 1, i32 undef, i32 9, i32 2, i32 6, i32 10, i32 3, i32 7, i32 11>
+ store <12 x i32> %interleaved.vec, <12 x i32>* %base, align 4
+ ret void
+}
+
+; CHECK-LABEL: store_undef_mask_factor4:
+; CHECK: vst4.32 {d16, d18, d20, d22}, [r0]!
+; CHECK: vst4.32 {d17, d19, d21, d23}, [r0]
+define void @store_undef_mask_factor4(i32* %ptr, <4 x i32> %v0, <4 x i32> %v1, <4 x i32> %v2, <4 x i32> %v3) {
+ %base = bitcast i32* %ptr to <16 x i32>*
+ %v0_v1 = shufflevector <4 x i32> %v0, <4 x i32> %v1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %v2_v3 = shufflevector <4 x i32> %v2, <4 x i32> %v3, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %interleaved.vec = shufflevector <8 x i32> %v0_v1, <8 x i32> %v2_v3, <16 x i32> <i32 0, i32 4, i32 8, i32 undef, i32 undef, i32 5, i32 9, i32 13, i32 2, i32 6, i32 10, i32 14, i32 3, i32 7, i32 11, i32 15>
+ store <16 x i32> %interleaved.vec, <16 x i32>* %base, align 4
+ ret void
+}
diff --git a/test/CodeGen/ARM/build-attributes.ll b/test/CodeGen/ARM/build-attributes.ll
index 0cc4f230f284..29c702304a3f 100644
--- a/test/CodeGen/ARM/build-attributes.ll
+++ b/test/CodeGen/ARM/build-attributes.ll
@@ -51,6 +51,13 @@
; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a17 -enable-unsafe-fp-math -disable-fp-elim -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A17-FAST
; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a17 -mattr=-vfp2 | FileCheck %s --check-prefix=CORTEX-A17-NOFPU
; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a17 -mattr=-vfp2 -enable-unsafe-fp-math -disable-fp-elim -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A17-NOFPU-FAST
+
+; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mattr=-neon,+vfp3,+fp16 | FileCheck %s --check-prefix=GENERIC-FPU-VFPV3-FP16
+; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mattr=-neon,+vfp3,+d16,+fp16 | FileCheck %s --check-prefix=GENERIC-FPU-VFPV3-D16-FP16
+; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mattr=-neon,+vfp3,+fp-only-sp,+d16 | FileCheck %s --check-prefix=GENERIC-FPU-VFPV3XD
+; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mattr=-neon,+vfp3,+fp-only-sp,+d16,+fp16 | FileCheck %s --check-prefix=GENERIC-FPU-VFPV3XD-FP16
+; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mattr=+neon,+fp16 | FileCheck %s --check-prefix=GENERIC-FPU-NEON-FP16
+
; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a17 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING
; RUN: llc < %s -mtriple=thumbv6m-linux-gnueabi -mcpu=cortex-m0 | FileCheck %s --check-prefix=CORTEX-M0
; RUN: llc < %s -mtriple=thumbv6m-linux-gnueabi -mcpu=cortex-m0 -enable-unsafe-fp-math -disable-fp-elim -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-M0-FAST
@@ -1049,7 +1056,7 @@
; CORTEX-R4F: .eabi_attribute 23, 3
; CORTEX-R4F: .eabi_attribute 24, 1
; CORTEX-R4F: .eabi_attribute 25, 1
-; CORTEX-R4F: .eabi_attribute 27, 1
+; CORTEX-R4F-NOT: .eabi_attribute 27, 1
; CORTEX-R4F-NOT: .eabi_attribute 28
; CORTEX-R4F-NOT: .eabi_attribute 36
; CORTEX-R4F: .eabi_attribute 38, 1
@@ -1071,7 +1078,7 @@
; CORTEX-R5: .eabi_attribute 23, 3
; CORTEX-R5: .eabi_attribute 24, 1
; CORTEX-R5: .eabi_attribute 25, 1
-; CORTEX-R5: .eabi_attribute 27, 1
+; CORTEX-R5-NOT: .eabi_attribute 27, 1
; CORTEX-R5-NOT: .eabi_attribute 28
; CORTEX-R5-NOT: .eabi_attribute 36
; CORTEX-R5: .eabi_attribute 38, 1
@@ -1091,7 +1098,7 @@
; CORTEX-R7: .eabi_attribute 7, 82
; CORTEX-R7: .eabi_attribute 8, 1
; CORTEX-R7: .eabi_attribute 9, 2
-; CORTEX-R7: .fpu vfpv3-d16
+; CORTEX-R7: .fpu vfpv3xd
; CORTEX-R7-NOT: .eabi_attribute 19
;; We default to IEEE 754 compliance
; CORTEX-R7: .eabi_attribute 20, 1
@@ -1205,6 +1212,12 @@
; CORTEX-A72-FAST-NOT: .eabi_attribute 22
; CORTEX-A72-FAST: .eabi_attribute 23, 1
+; GENERIC-FPU-VFPV3-FP16: .fpu vfpv3-fp16
+; GENERIC-FPU-VFPV3-D16-FP16: .fpu vfpv3-d16-fp16
+; GENERIC-FPU-VFPV3XD: .fpu vfpv3xd
+; GENERIC-FPU-VFPV3XD-FP16: .fpu vfpv3xd-fp16
+; GENERIC-FPU-NEON-FP16: .fpu neon-fp16
+
; GENERIC-ARMV8_1-A: .eabi_attribute 6, 14
; GENERIC-ARMV8_1-A: .eabi_attribute 7, 65
; GENERIC-ARMV8_1-A: .eabi_attribute 8, 1
diff --git a/test/CodeGen/ARM/fnattr-trap.ll b/test/CodeGen/ARM/fnattr-trap.ll
new file mode 100644
index 000000000000..492e31b4b9d1
--- /dev/null
+++ b/test/CodeGen/ARM/fnattr-trap.ll
@@ -0,0 +1,40 @@
+; RUN: llc < %s -mtriple=arm-unknown-unknown | FileCheck %s -check-prefix=NOOPTION
+; RUN: llc < %s -mtriple=arm-unknown-unknown -trap-func=trap_llc | FileCheck %s -check-prefix=TRAP
+
+; NOOPTION-LABEL: {{\_?}}foo0:
+; NOOPTION: trap{{$}}
+
+; TRAP-LABEL: {{\_?}}foo0:
+; TRAP: bl {{\_?}}trap_llc
+
+define void @foo0() {
+ call void @llvm.trap()
+ unreachable
+}
+
+; NOOPTION-LABEL: {{\_?}}foo1:
+; NOOPTION: bl {{\_?}}trap_func_attr0
+
+; TRAP-LABEL: {{\_?}}foo1:
+; TRAP: bl {{\_?}}trap_llc
+
+define void @foo1() {
+ call void @llvm.trap() #0
+ unreachable
+}
+
+; NOOPTION-LABEL: {{\_?}}foo2:
+; NOOPTION: bl {{\_?}}trap_func_attr1
+
+; TRAP-LABEL: {{\_?}}foo2:
+; TRAP: bl {{\_?}}trap_llc
+
+define void @foo2() {
+ call void @llvm.trap() #1
+ unreachable
+}
+
+declare void @llvm.trap() nounwind
+
+attributes #0 = { "trap-func-name"="trap_func_attr0" }
+attributes #1 = { "trap-func-name"="trap_func_attr1" }
diff --git a/test/CodeGen/ARM/ldrd.ll b/test/CodeGen/ARM/ldrd.ll
index a8070ea68aa2..f3e13671ac37 100644
--- a/test/CodeGen/ARM/ldrd.ll
+++ b/test/CodeGen/ARM/ldrd.ll
@@ -6,23 +6,24 @@
; Magic ARM pair hints works best with linearscan / fast.
-; Cortex-M3 errata 602117: LDRD with base in list may result in incorrect base
-; register when interrupted or faulted.
-
@b = external global i64*
-define i64 @t(i64 %a) nounwind readonly {
-entry:
-; A8-LABEL: t:
-; A8: ldrd r2, r3, [r2]
-
-; M3-LABEL: t:
-; M3-NOT: ldrd
+; We use the following two to force values into specific registers.
+declare i64* @get_ptr()
+declare void @use_i64(i64 %v)
- %0 = load i64*, i64** @b, align 4
- %1 = load i64, i64* %0, align 4
- %2 = mul i64 %1, %a
- ret i64 %2
+define void @test_ldrd(i64 %a) nounwind readonly {
+; CHECK-LABEL: test_ldrd:
+; CHECK: bl{{x?}} _get_ptr
+; A8: ldrd r0, r1, [r0]
+; Cortex-M3 errata 602117: LDRD with base in list may result in incorrect base
+; register when interrupted or faulted.
+; M3-NOT: ldrd r[[REGNUM:[0-9]+]], {{r[0-9]+}}, [r[[REGNUM]]]
+; CHECK: bl{{x?}} _use_i64
+ %ptr = call i64* @get_ptr()
+ %v = load i64, i64* %ptr, align 8
+ call void @use_i64(i64 %v)
+ ret void
}
; rdar://10435045 mixed LDRi8/LDRi12
diff --git a/test/CodeGen/ARM/load-store-flags.ll b/test/CodeGen/ARM/load-store-flags.ll
new file mode 100644
index 000000000000..5825a30109d0
--- /dev/null
+++ b/test/CodeGen/ARM/load-store-flags.ll
@@ -0,0 +1,43 @@
+; RUN: llc -mtriple=thumbv7-apple-ios7.0 -o - %s -verify-machineinstrs | FileCheck %s
+
+; The base register for the store is killed by the last instruction, but is
+; actually also used during as part of the store itself. If an extra ADD is
+; inserted, it should not kill the base.
+define void @test_base_kill(i32 %v0, i32 %v1, i32* %addr) {
+; CHECK-LABEL: test_base_kill:
+; CHECK: adds [[NEWBASE:r[0-9]+]], r2, #4
+; CHECK: stm.w [[NEWBASE]], {r0, r1, r2}
+
+ %addr.1 = getelementptr i32, i32* %addr, i32 1
+ store i32 %v0, i32* %addr.1
+
+ %addr.2 = getelementptr i32, i32* %addr, i32 2
+ store i32 %v1, i32* %addr.2
+
+ %addr.3 = getelementptr i32, i32* %addr, i32 3
+ %val = ptrtoint i32* %addr to i32
+ store i32 %val, i32* %addr.3
+
+ ret void
+}
+
+; Similar, but it's not sufficient to look at just the last instruction (where
+; liveness of the base is determined). An intervening instruction might be moved
+; past it to form the STM.
+define void @test_base_kill_mid(i32 %v0, i32* %addr, i32 %v1) {
+; CHECK-LABEL: test_base_kill_mid:
+; CHECK: adds [[NEWBASE:r[0-9]+]], r1, #4
+; CHECK: stm.w [[NEWBASE]], {r0, r1, r2}
+
+ %addr.1 = getelementptr i32, i32* %addr, i32 1
+ store i32 %v0, i32* %addr.1
+
+ %addr.2 = getelementptr i32, i32* %addr, i32 2
+ %val = ptrtoint i32* %addr to i32
+ store i32 %val, i32* %addr.2
+
+ %addr.3 = getelementptr i32, i32* %addr, i32 3
+ store i32 %v1, i32* %addr.3
+
+ ret void
+}
diff --git a/test/CodeGen/ARM/wrong-t2stmia-size-opt.ll b/test/CodeGen/ARM/wrong-t2stmia-size-opt.ll
index 4b274d2aedc2..96c5fb8961ef 100644
--- a/test/CodeGen/ARM/wrong-t2stmia-size-opt.ll
+++ b/test/CodeGen/ARM/wrong-t2stmia-size-opt.ll
@@ -1,4 +1,4 @@
-; RUN: llc -mcpu=cortex-a9 -O1 -filetype=obj %s -o - | llvm-objdump -arch thumb -mcpu=cortex-a9 -d - | FileCheck %s
+; RUN: llc -mtriple=thumbv7-- -mcpu=cortex-a9 -O1 -filetype=obj %s -o - | llvm-objdump -triple=thumbv7-- -mcpu=cortex-a9 -d - | FileCheck %s
target datalayout = "e-m:e-p:32:32-i1:8:32-i8:8:32-i16:16:32-i64:64-v128:64:128-a:0:32-n32-S64"
target triple = "thumbv7--linux-gnueabi"