summaryrefslogtreecommitdiff
path: root/test/CodeGen/ARM
diff options
context:
space:
mode:
Diffstat (limited to 'test/CodeGen/ARM')
-rw-r--r--test/CodeGen/ARM/2009-08-21-PostRAKill2.ll2
-rw-r--r--test/CodeGen/ARM/2009-08-21-PostRAKill3.ll2
-rw-r--r--test/CodeGen/ARM/2011-03-10-DAGCombineCrash.ll2
-rw-r--r--test/CodeGen/ARM/2011-03-15-LdStMultipleBug.ll2
-rw-r--r--test/CodeGen/ARM/2011-03-23-PeepholeBug.ll2
-rw-r--r--test/CodeGen/ARM/2012-04-24-SplitEHCriticalEdge.ll2
-rw-r--r--test/CodeGen/ARM/ARMLoadStoreDBG.mir22
-rw-r--r--test/CodeGen/ARM/CGP/arm-cgp-calls.ll230
-rw-r--r--test/CodeGen/ARM/CGP/arm-cgp-casts.ll590
-rw-r--r--test/CodeGen/ARM/CGP/arm-cgp-icmps.ll (renamed from test/CodeGen/ARM/arm-cgp-icmps.ll)258
-rw-r--r--test/CodeGen/ARM/CGP/arm-cgp-overflow.ll232
-rw-r--r--test/CodeGen/ARM/CGP/arm-cgp-phis-ret.ll186
-rw-r--r--test/CodeGen/ARM/CGP/arm-cgp-pointers.ll135
-rw-r--r--test/CodeGen/ARM/CGP/arm-cgp-signed-icmps.ll108
-rw-r--r--test/CodeGen/ARM/CGP/arm-cgp-signed.ll (renamed from test/CodeGen/ARM/arm-cgp-signed.ll)0
-rw-r--r--test/CodeGen/ARM/CGP/arm-cgp-switch.ll168
-rw-r--r--test/CodeGen/ARM/GlobalISel/arm-call-lowering.ll12
-rw-r--r--test/CodeGen/ARM/GlobalISel/arm-instruction-select-combos.mir57
-rw-r--r--test/CodeGen/ARM/GlobalISel/arm-instruction-select.mir21
-rw-r--r--test/CodeGen/ARM/GlobalISel/arm-irtranslator.ll12
-rw-r--r--test/CodeGen/ARM/GlobalISel/arm-legalize-binops.mir561
-rw-r--r--test/CodeGen/ARM/GlobalISel/arm-legalize-bitcounts.mir177
-rw-r--r--test/CodeGen/ARM/GlobalISel/arm-legalize-casts.mir50
-rw-r--r--test/CodeGen/ARM/GlobalISel/arm-legalize-consts.mir57
-rw-r--r--test/CodeGen/ARM/GlobalISel/arm-legalize-exts.mir79
-rw-r--r--test/CodeGen/ARM/GlobalISel/arm-legalize-load-store.mir49
-rw-r--r--test/CodeGen/ARM/GlobalISel/arm-legalizer.mir736
-rw-r--r--test/CodeGen/ARM/GlobalISel/arm-param-lowering.ll132
-rw-r--r--test/CodeGen/ARM/GlobalISel/arm-regbankselect.mir22
-rw-r--r--test/CodeGen/ARM/GlobalISel/arm-select-globals-ropi-rwpi.mir12
-rw-r--r--test/CodeGen/ARM/GlobalISel/thumb-select-arithmetic-ops.mir251
-rw-r--r--test/CodeGen/ARM/GlobalISel/thumb-select-casts.mir51
-rw-r--r--test/CodeGen/ARM/GlobalISel/thumb-select-exts.mir288
-rw-r--r--test/CodeGen/ARM/GlobalISel/thumb-select-imm.mir66
-rw-r--r--test/CodeGen/ARM/GlobalISel/thumb-select-load-store.mir84
-rw-r--r--test/CodeGen/ARM/GlobalISel/thumb-select-logical-ops.mir219
-rw-r--r--test/CodeGen/ARM/Windows/alloca.ll2
-rw-r--r--test/CodeGen/ARM/Windows/chkstk-movw-movt-isel.ll6
-rw-r--r--test/CodeGen/ARM/Windows/chkstk.ll6
-rw-r--r--test/CodeGen/ARM/Windows/frame-register.ll2
-rw-r--r--test/CodeGen/ARM/Windows/memset.ll4
-rw-r--r--test/CodeGen/ARM/Windows/mingw-refptr.ll79
-rw-r--r--test/CodeGen/ARM/Windows/pic.ll5
-rw-r--r--test/CodeGen/ARM/Windows/vla.ll4
-rw-r--r--test/CodeGen/ARM/acle-intrinsics-rot.ll143
-rw-r--r--test/CodeGen/ARM/alloca-align.ll3
-rw-r--r--test/CodeGen/ARM/analyze-branch-bkpt.ll61
-rw-r--r--test/CodeGen/ARM/and-cmpz.ll9
-rw-r--r--test/CodeGen/ARM/and-load-combine.ll617
-rw-r--r--test/CodeGen/ARM/arm-and-tst-peephole.ll5
-rw-r--r--test/CodeGen/ARM/arm-cgp-phis-calls-ret.ll392
-rw-r--r--test/CodeGen/ARM/arm-shrink-wrapping.ll28
-rw-r--r--test/CodeGen/ARM/arm-storebytesmerge.ll178
-rw-r--r--test/CodeGen/ARM/armv8.2a-fp16-vector-intrinsics.ll1285
-rw-r--r--test/CodeGen/ARM/atomic-cmpxchg.ll6
-rw-r--r--test/CodeGen/ARM/atomic-ops-m33.ll140
-rw-r--r--test/CodeGen/ARM/build-attributes.ll92
-rw-r--r--test/CodeGen/ARM/cbz-implicit-it-range.ll47
-rw-r--r--test/CodeGen/ARM/clz.ll8
-rw-r--r--test/CodeGen/ARM/cmn.ll26
-rw-r--r--test/CodeGen/ARM/cmp.ll24
-rw-r--r--test/CodeGen/ARM/cmpxchg.mir24
-rw-r--r--test/CodeGen/ARM/codemodel.ll9
-rw-r--r--test/CodeGen/ARM/constant-island-movwt.mir902
-rw-r--r--test/CodeGen/ARM/constantpool-promote-dbg.ll8
-rw-r--r--test/CodeGen/ARM/constantpool-promote.ll46
-rw-r--r--test/CodeGen/ARM/copy-by-struct-i32.ll61
-rw-r--r--test/CodeGen/ARM/cortex-a57-misched-ldm-wrback.ll4
-rw-r--r--test/CodeGen/ARM/cortex-a57-misched-ldm.ll2
-rw-r--r--test/CodeGen/ARM/cortex-a57-misched-vldm-wrback.ll4
-rw-r--r--test/CodeGen/ARM/cortex-a57-misched-vldm.ll4
-rw-r--r--test/CodeGen/ARM/crash-O0.ll2
-rw-r--r--test/CodeGen/ARM/crash-greedy-v6.ll4
-rw-r--r--test/CodeGen/ARM/crash-greedy.ll2
-rw-r--r--test/CodeGen/ARM/cttz_vector.ll419
-rw-r--r--test/CodeGen/ARM/dagcombine-anyexttozeroext.ll124
-rw-r--r--test/CodeGen/ARM/dbg-range-extension.mir54
-rw-r--r--test/CodeGen/ARM/debug-frame-large-stack.ll2
-rw-r--r--test/CodeGen/ARM/debug-frame-no-debug.ll2
-rw-r--r--test/CodeGen/ARM/debug-frame-vararg.ll4
-rw-r--r--test/CodeGen/ARM/debug-frame.ll26
-rw-r--r--test/CodeGen/ARM/debug-info-qreg.ll4
-rw-r--r--test/CodeGen/ARM/debugtrap.ll8
-rw-r--r--test/CodeGen/ARM/demanded-bits-and.ll35
-rw-r--r--test/CodeGen/ARM/disable-fp-elim.ll8
-rw-r--r--test/CodeGen/ARM/ehabi-unwind.ll2
-rw-r--r--test/CodeGen/ARM/ehabi.ll24
-rw-r--r--test/CodeGen/ARM/emutls_generic.ll5
-rw-r--r--test/CodeGen/ARM/execute-only-big-stack-frame.ll2
-rw-r--r--test/CodeGen/ARM/execute-only-section.ll1
-rw-r--r--test/CodeGen/ARM/execute-only.ll4
-rw-r--r--test/CodeGen/ARM/fast-isel-align.ll8
-rw-r--r--test/CodeGen/ARM/fast-isel-ldrh-strh-arm.ll18
-rw-r--r--test/CodeGen/ARM/fast-isel.ll20
-rw-r--r--test/CodeGen/ARM/float-helpers.s16
-rw-r--r--test/CodeGen/ARM/fmacs.ll20
-rw-r--r--test/CodeGen/ARM/fold-sext-sextload.ll17
-rw-r--r--test/CodeGen/ARM/fold-stack-adjust.ll6
-rw-r--r--test/CodeGen/ARM/fold-zext-zextload.ll13
-rw-r--r--test/CodeGen/ARM/fp16-instructions.ll6
-rw-r--r--test/CodeGen/ARM/fp16-promote.ll8
-rw-r--r--test/CodeGen/ARM/fp16-vld.ll48
-rw-r--r--test/CodeGen/ARM/fp16-vminmaxnm-vector.ll302
-rw-r--r--test/CodeGen/ARM/fpconv.ll22
-rw-r--r--test/CodeGen/ARM/frame-register.ll8
-rw-r--r--test/CodeGen/ARM/fusedMAC.ll9
-rw-r--r--test/CodeGen/ARM/global-merge-external-2.ll65
-rw-r--r--test/CodeGen/ARM/half.ll7
-rw-r--r--test/CodeGen/ARM/hello.ll2
-rw-r--r--test/CodeGen/ARM/illegal-bitfield-loadstore.ll4
-rw-r--r--test/CodeGen/ARM/inline-asm-clobber.ll27
-rw-r--r--test/CodeGen/ARM/inline-asm-operand-implicit-cast.ll122
-rw-r--r--test/CodeGen/ARM/inlineasm-X-allocation.ll29
-rw-r--r--test/CodeGen/ARM/inlineasm-operand-implicit-cast.ll307
-rw-r--r--test/CodeGen/ARM/intrinsics-overflow.ll19
-rw-r--r--test/CodeGen/ARM/invalid-target.ll16
-rw-r--r--test/CodeGen/ARM/ldrcppic.ll56
-rw-r--r--test/CodeGen/ARM/ldrd.ll2
-rw-r--r--test/CodeGen/ARM/ldrex-frame-size.ll36
-rw-r--r--test/CodeGen/ARM/ldstrex-m.ll21
-rw-r--r--test/CodeGen/ARM/ldstrex.ll85
-rw-r--r--test/CodeGen/ARM/load_store_opt_clobber_cpsr.mir33
-rw-r--r--test/CodeGen/ARM/load_store_opt_reg_limit.mir40
-rw-r--r--test/CodeGen/ARM/loop-align-cortex-m.ll49
-rw-r--r--test/CodeGen/ARM/lowerMUL-newload.ll57
-rw-r--r--test/CodeGen/ARM/machine-licm.ll6
-rw-r--r--test/CodeGen/ARM/macho-frame-offset.ll2
-rw-r--r--test/CodeGen/ARM/memcpy-inline.ll7
-rw-r--r--test/CodeGen/ARM/memcpy-ldm-stm.ll14
-rw-r--r--test/CodeGen/ARM/memfunc.ll8
-rw-r--r--test/CodeGen/ARM/misched-fusion-aes.ll15
-rw-r--r--test/CodeGen/ARM/misched-int-basic-thumb2.mir2
-rw-r--r--test/CodeGen/ARM/misched-int-basic.mir2
-rw-r--r--test/CodeGen/ARM/none-macho.ll4
-rw-r--r--test/CodeGen/ARM/nonreserved-callframe-with-basereg.mir54
-rw-r--r--test/CodeGen/ARM/popcnt.ll191
-rw-r--r--test/CodeGen/ARM/pow.ll92
-rw-r--r--test/CodeGen/ARM/pr36577.ll9
-rw-r--r--test/CodeGen/ARM/pr39060.ll33
-rw-r--r--test/CodeGen/ARM/pr39571.ll33
-rw-r--r--test/CodeGen/ARM/print-registers.ll10
-rw-r--r--test/CodeGen/ARM/readonly-aliases.ll17
-rw-r--r--test/CodeGen/ARM/sched-it-debug-nodes.mir22
-rw-r--r--test/CodeGen/ARM/sdiv-pow2-arm-size.ll79
-rw-r--r--test/CodeGen/ARM/sdiv-pow2-thumb-size.ll105
-rw-r--r--test/CodeGen/ARM/select-imm.ll22
-rw-r--r--test/CodeGen/ARM/select.ll13
-rw-r--r--test/CodeGen/ARM/select_const.ll5
-rw-r--r--test/CodeGen/ARM/setcc-logic.ll3
-rw-r--r--test/CodeGen/ARM/shuffle.ll2
-rw-r--r--test/CodeGen/ARM/sincos.ll18
-rw-r--r--test/CodeGen/ARM/smlad0.ll80
-rw-r--r--test/CodeGen/ARM/smladx-1.ll240
-rw-r--r--test/CodeGen/ARM/smlald0.ll173
-rw-r--r--test/CodeGen/ARM/smlald1.ll94
-rw-r--r--test/CodeGen/ARM/smlald2.ll224
-rw-r--r--test/CodeGen/ARM/smlaldx-1.ll249
-rw-r--r--test/CodeGen/ARM/smlaldx-2.ll248
-rw-r--r--test/CodeGen/ARM/smml.ll4
-rw-r--r--test/CodeGen/ARM/smul.ll181
-rw-r--r--test/CodeGen/ARM/softfp-fabs-fneg.ll4
-rw-r--r--test/CodeGen/ARM/ssp-data-layout.ll2
-rw-r--r--test/CodeGen/ARM/sub-cmp-peephole.ll2
-rw-r--r--test/CodeGen/ARM/subreg-remat.ll2
-rw-r--r--test/CodeGen/ARM/tail-call.ll12
-rw-r--r--test/CodeGen/ARM/tls-models.ll4
-rw-r--r--test/CodeGen/ARM/trap.ll14
-rw-r--r--test/CodeGen/ARM/twoaddrinstr.ll4
-rw-r--r--test/CodeGen/ARM/umulo-128-legalisation-lowering.ll210
-rw-r--r--test/CodeGen/ARM/umulo-32.ll4
-rw-r--r--test/CodeGen/ARM/umulo-64-legalisation-lowering.ll69
-rw-r--r--test/CodeGen/ARM/unwind-fp.ll15
-rw-r--r--test/CodeGen/ARM/v8m-tail-call.ll167
-rw-r--r--test/CodeGen/ARM/vcombine.ll8
-rw-r--r--test/CodeGen/ARM/vcvt.ll10
-rw-r--r--test/CodeGen/ARM/vdup.ll353
-rw-r--r--test/CodeGen/ARM/vector-DAGCombine.ll13
-rw-r--r--test/CodeGen/ARM/vector-extend-narrow.ll4
-rw-r--r--test/CodeGen/ARM/vfcmp.ll110
-rw-r--r--test/CodeGen/ARM/vfp-reg-stride.ll23
-rw-r--r--test/CodeGen/ARM/vtrn.ll200
-rw-r--r--test/CodeGen/ARM/vuzp.ll242
-rw-r--r--test/CodeGen/ARM/wide-compares.ll226
183 files changed, 12540 insertions, 2762 deletions
diff --git a/test/CodeGen/ARM/2009-08-21-PostRAKill2.ll b/test/CodeGen/ARM/2009-08-21-PostRAKill2.ll
index 489d4e45236c..d5570df717f5 100644
--- a/test/CodeGen/ARM/2009-08-21-PostRAKill2.ll
+++ b/test/CodeGen/ARM/2009-08-21-PostRAKill2.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -asm-verbose=false -O3 -relocation-model=pic -disable-fp-elim -mtriple=thumbv7-apple-darwin -mcpu=cortex-a8 -post-RA-scheduler
+; RUN: llc < %s -asm-verbose=false -O3 -relocation-model=pic -frame-pointer=all -mtriple=thumbv7-apple-darwin -mcpu=cortex-a8 -post-RA-scheduler
; ModuleID = '<stdin>'
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:32-f32:32:32-f64:32:32-v64:64:64-v128:128:128-a0:0:64"
diff --git a/test/CodeGen/ARM/2009-08-21-PostRAKill3.ll b/test/CodeGen/ARM/2009-08-21-PostRAKill3.ll
index 133fc0588a91..b7a252beefbd 100644
--- a/test/CodeGen/ARM/2009-08-21-PostRAKill3.ll
+++ b/test/CodeGen/ARM/2009-08-21-PostRAKill3.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -asm-verbose=false -O3 -relocation-model=pic -disable-fp-elim -mtriple=thumbv7-apple-darwin -mcpu=cortex-a8 -post-RA-scheduler
+; RUN: llc < %s -asm-verbose=false -O3 -relocation-model=pic -frame-pointer=all -mtriple=thumbv7-apple-darwin -mcpu=cortex-a8 -post-RA-scheduler
; ModuleID = '<stdin>'
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:32-f32:32:32-f64:32:32-v64:64:64-v128:128:128-a0:0:64"
diff --git a/test/CodeGen/ARM/2011-03-10-DAGCombineCrash.ll b/test/CodeGen/ARM/2011-03-10-DAGCombineCrash.ll
index 30a388bb5877..b33b333b299d 100644
--- a/test/CodeGen/ARM/2011-03-10-DAGCombineCrash.ll
+++ b/test/CodeGen/ARM/2011-03-10-DAGCombineCrash.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=thumbv7-apple-darwin10 -relocation-model=pic -disable-fp-elim -mcpu=cortex-a8
+; RUN: llc < %s -mtriple=thumbv7-apple-darwin10 -relocation-model=pic -frame-pointer=all -mcpu=cortex-a8
; rdar://9117613
diff --git a/test/CodeGen/ARM/2011-03-15-LdStMultipleBug.ll b/test/CodeGen/ARM/2011-03-15-LdStMultipleBug.ll
index 92bdd19a7b3d..b526b8c3075d 100644
--- a/test/CodeGen/ARM/2011-03-15-LdStMultipleBug.ll
+++ b/test/CodeGen/ARM/2011-03-15-LdStMultipleBug.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=thumbv7-apple-darwin10 -relocation-model=pic -disable-fp-elim -mcpu=cortex-a8 | FileCheck %s
+; RUN: llc < %s -mtriple=thumbv7-apple-darwin10 -relocation-model=pic -frame-pointer=all -mcpu=cortex-a8 | FileCheck %s
; Do not form Thumb2 ldrd / strd if the offset is not multiple of 4.
; rdar://9133587
diff --git a/test/CodeGen/ARM/2011-03-23-PeepholeBug.ll b/test/CodeGen/ARM/2011-03-23-PeepholeBug.ll
index 83c7676e57ef..4567b7f5fe21 100644
--- a/test/CodeGen/ARM/2011-03-23-PeepholeBug.ll
+++ b/test/CodeGen/ARM/2011-03-23-PeepholeBug.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=thumbv7-apple-darwin10 -relocation-model=pic -disable-fp-elim -mcpu=cortex-a8 | FileCheck %s
+; RUN: llc < %s -mtriple=thumbv7-apple-darwin10 -relocation-model=pic -frame-pointer=all -mcpu=cortex-a8 | FileCheck %s
; subs r4, #1
; cmp r4, 0
diff --git a/test/CodeGen/ARM/2012-04-24-SplitEHCriticalEdge.ll b/test/CodeGen/ARM/2012-04-24-SplitEHCriticalEdge.ll
index ef33b2f50184..4ddc7284f58e 100644
--- a/test/CodeGen/ARM/2012-04-24-SplitEHCriticalEdge.ll
+++ b/test/CodeGen/ARM/2012-04-24-SplitEHCriticalEdge.ll
@@ -1,4 +1,4 @@
-; RUN: llc -mtriple=thumbv7-apple-ios -relocation-model=pic -disable-fp-elim -mcpu=cortex-a8 < %s
+; RUN: llc -mtriple=thumbv7-apple-ios -relocation-model=pic -frame-pointer=all -mcpu=cortex-a8 < %s
; CodeGen SplitCriticalEdge() shouldn't try to break edge to a landing pad.
; rdar://11300144
diff --git a/test/CodeGen/ARM/ARMLoadStoreDBG.mir b/test/CodeGen/ARM/ARMLoadStoreDBG.mir
index 76f1523f7790..ce33dcf52ec4 100644
--- a/test/CodeGen/ARM/ARMLoadStoreDBG.mir
+++ b/test/CodeGen/ARM/ARMLoadStoreDBG.mir
@@ -120,19 +120,19 @@ body: |
bb.0.entry:
liveins: $r0, $r1, $r2, $r3, $lr, $r7
- DBG_VALUE debug-use $r0, debug-use $noreg, !18, !27, debug-location !28
- DBG_VALUE debug-use $r1, debug-use $noreg, !19, !27, debug-location !28
- DBG_VALUE debug-use $r2, debug-use $noreg, !20, !27, debug-location !28
- DBG_VALUE debug-use $r3, debug-use $noreg, !21, !27, debug-location !28
+ DBG_VALUE $r0, $noreg, !18, !27, debug-location !28
+ DBG_VALUE $r1, $noreg, !19, !27, debug-location !28
+ DBG_VALUE $r2, $noreg, !20, !27, debug-location !28
+ DBG_VALUE $r3, $noreg, !21, !27, debug-location !28
t2CMPri $r3, 4, 14, $noreg, implicit-def $cpsr, debug-location !31
t2Bcc %bb.2.if.end, 2, killed $cpsr
bb.1:
liveins: $lr, $r7
- DBG_VALUE debug-use $r1, debug-use $noreg, !19, !27, debug-location !28
+ DBG_VALUE $r1, $noreg, !19, !27, debug-location !28
$r0 = t2MOVi -1, 14, $noreg, $noreg
- DBG_VALUE debug-use $r1, debug-use $noreg, !19, !27, debug-location !28
+ DBG_VALUE $r1, $noreg, !19, !27, debug-location !28
tBX_RET 14, $noreg, implicit $r0, debug-location !34
bb.2.if.end:
@@ -142,12 +142,12 @@ body: |
frame-setup CFI_INSTRUCTION def_cfa_offset 8
frame-setup CFI_INSTRUCTION offset $lr, -4
frame-setup CFI_INSTRUCTION offset $r7, -8
- DBG_VALUE debug-use $r0, debug-use $noreg, !18, !27, debug-location !28
- DBG_VALUE debug-use $r1, debug-use $noreg, !19, !27, debug-location !28
- DBG_VALUE debug-use $r2, debug-use $noreg, !20, !27, debug-location !28
- DBG_VALUE debug-use $r3, debug-use $noreg, !21, !27, debug-location !28
+ DBG_VALUE $r0, $noreg, !18, !27, debug-location !28
+ DBG_VALUE $r1, $noreg, !19, !27, debug-location !28
+ DBG_VALUE $r2, $noreg, !20, !27, debug-location !28
+ DBG_VALUE $r3, $noreg, !21, !27, debug-location !28
$r1 = COPY killed $r2, debug-location !32
- DBG_VALUE debug-use $r1, debug-use $noreg, !19, !27, debug-location !28
+ DBG_VALUE $r1, $noreg, !19, !27, debug-location !28
$r2 = COPY killed $r3, debug-location !32
tBL 14, $noreg, @g, csr_aapcs, implicit-def dead $lr, implicit $sp, implicit $r0, implicit $r1, implicit $r2, implicit-def $sp, debug-location !32
$r0 = t2MOVi 0, 14, $noreg, $noreg
diff --git a/test/CodeGen/ARM/CGP/arm-cgp-calls.ll b/test/CodeGen/ARM/CGP/arm-cgp-calls.ll
new file mode 100644
index 000000000000..4c95ade534dc
--- /dev/null
+++ b/test/CodeGen/ARM/CGP/arm-cgp-calls.ll
@@ -0,0 +1,230 @@
+; RUN: llc -mtriple=thumbv8 -arm-disable-cgp=false %s -o - | FileCheck %s
+; RUN: llc -mtriple=armv8 -arm-disable-cgp=false %s -o - | FileCheck %s
+
+; Check that the pass doesn't try to promote the immediate parameters.
+; CHECK-LABEL: call_with_imms
+; CHECK-NOT: uxt
+define i8 @call_with_imms(i8* %arg) {
+ %call = tail call arm_aapcs_vfpcc zeroext i8 @dummy2(i8* nonnull %arg, i8 zeroext 0, i8 zeroext 0)
+ %cmp = icmp eq i8 %call, 0
+ %res = select i1 %cmp, i8 %call, i8 1
+ ret i8 %res
+}
+
+; Test that the call result is still extended.
+; CHECK-LABEL: test_call:
+; CHECK: bl
+; CHECK-NEXT: sxtb r1, r0
+define i16 @test_call(i8 zeroext %arg) {
+ %call = call i8 @dummy_i8(i8 %arg)
+ %cmp = icmp ult i8 %call, 128
+ %conv = zext i1 %cmp to i16
+ ret i16 %conv
+}
+
+; CHECK-LABEL: promote_i8_sink_i16_1
+; CHECK: bl dummy_i8
+; CHECK: add{{.*}} r0, #1
+; CHECK-NOT: uxt
+; CHECK: cmp r0
+define i16 @promote_i8_sink_i16_1(i8 zeroext %arg0, i16 zeroext %arg1, i16 zeroext %arg2) {
+ %call = tail call zeroext i8 @dummy_i8(i8 %arg0)
+ %add = add nuw i8 %call, 1
+ %conv = zext i8 %add to i16
+ %cmp = icmp ne i16 %conv, %arg1
+ %sel = select i1 %cmp, i16 %arg1, i16 %arg2
+ %res = tail call zeroext i16 @dummy3(i16 %sel)
+ ret i16 %res
+}
+
+; CHECK-LABEL: promote_i8_sink_i16_2
+; CHECK: bl dummy_i8
+; CHECK: add{{.*}} r0, #1
+; CHECK-NOT: uxt
+; CHECK: cmp r0
+define i16 @promote_i8_sink_i16_2(i8 zeroext %arg0, i8 zeroext %arg1, i16 zeroext %arg2) {
+ %call = tail call zeroext i8 @dummy_i8(i8 %arg0)
+ %add = add nuw i8 %call, 1
+ %cmp = icmp ne i8 %add, %arg1
+ %conv = zext i8 %arg1 to i16
+ %sel = select i1 %cmp, i16 %conv, i16 %arg2
+ %res = tail call zeroext i16 @dummy3(i16 %sel)
+ ret i16 %res
+}
+
+@uc = global i8 42, align 1
+@LL = global i64 0, align 8
+
+; CHECK-LABEL: zext_i64
+; CHECK: ldrb
+; CHECK: strd
+define void @zext_i64() {
+entry:
+ %0 = load i8, i8* @uc, align 1
+ %conv = zext i8 %0 to i64
+ store i64 %conv, i64* @LL, align 8
+ %cmp = icmp eq i8 %0, 42
+ %conv1 = zext i1 %cmp to i32
+ %call = tail call i32 bitcast (i32 (...)* @assert to i32 (i32)*)(i32 %conv1)
+ ret void
+}
+
+@a = global i16* null, align 4
+@b = global i32 0, align 4
+
+; CHECK-LABEL: constexpr
+; CHECK: uxth
+define i32 @constexpr() {
+entry:
+ store i32 ptrtoint (i32* @b to i32), i32* @b, align 4
+ %0 = load i16*, i16** @a, align 4
+ %1 = load i16, i16* %0, align 2
+ %or = or i16 %1, ptrtoint (i32* @b to i16)
+ store i16 %or, i16* %0, align 2
+ %cmp = icmp ne i16 %or, 4
+ %conv3 = zext i1 %cmp to i32
+ %call = tail call i32 bitcast (i32 (...)* @e to i32 (i32)*)(i32 %conv3) #2
+ ret i32 undef
+}
+
+; The call to safe_lshift_func takes two parameters, but they're the same value
+; just one is zext. We do support zext now, so the transformation should
+; trigger and we don't want see uxtb here.
+; CHECK-LABEL: call_zext_i8_i32
+; CHECK-NOT: uxt
+define fastcc i32 @call_zext_i8_i32(i32 %p_45, i8 zeroext %p_46) {
+for.cond8.preheader:
+ %call217 = call fastcc zeroext i8 @safe_mul_func_uint8_t_u_u(i8 zeroext undef)
+ %tobool219 = icmp eq i8 %call217, 0
+ br i1 %tobool219, label %for.end411, label %for.cond273.preheader
+
+for.cond273.preheader: ; preds = %for.cond8.preheader
+ %call217.lcssa = phi i8 [ %call217, %for.cond8.preheader ]
+ %conv218.le = zext i8 %call217.lcssa to i32
+ %call346 = call fastcc zeroext i8 @safe_lshift_func(i8 zeroext %call217.lcssa, i32 %conv218.le)
+ unreachable
+
+for.end411: ; preds = %for.cond8.preheader
+ %call452 = call fastcc i64 @safe_sub_func_int64_t_s_s(i64 undef, i64 4)
+ unreachable
+}
+
+%struct.anon = type { i32 }
+
+@g_57 = hidden local_unnamed_addr global %struct.anon zeroinitializer, align 4
+@g_893 = hidden local_unnamed_addr global %struct.anon zeroinitializer, align 4
+@g_82 = hidden local_unnamed_addr global i32 0, align 4
+
+; Test that the transform bails on finding %conv4, a trunc
+; CHECK-LABEL: call_return_pointer
+; CHECK: sxth
+; CHECK: uxt
+define hidden i32 @call_return_pointer(i8 zeroext %p_13) local_unnamed_addr #0 {
+entry:
+ %conv1 = zext i8 %p_13 to i16
+ %call = tail call i16** @func_62(i8 zeroext undef, i32 undef, i16 signext %conv1, i32* undef)
+ %0 = load i32, i32* getelementptr inbounds (%struct.anon, %struct.anon* @g_893, i32 0, i32 0), align 4
+ %conv2 = trunc i32 %0 to i16
+ br label %for.cond
+
+for.cond: ; preds = %for.cond.backedge, %entry
+ %p_13.addr.0 = phi i8 [ %p_13, %entry ], [ %p_13.addr.0.be, %for.cond.backedge ]
+ %tobool = icmp eq i8 %p_13.addr.0, 0
+ br i1 %tobool, label %for.cond.backedge, label %if.then
+
+for.cond.backedge: ; preds = %for.cond, %if.then
+ %p_13.addr.0.be = phi i8 [ %conv4, %if.then ], [ 0, %for.cond ]
+ br label %for.cond
+
+if.then: ; preds = %for.cond
+ %call3 = tail call fastcc signext i16 @safe_sub_func_int16_t_s_s(i16 signext %conv2)
+ %conv4 = trunc i16 %call3 to i8
+ br label %for.cond.backedge
+}
+
+; Check that d.sroa.0.0.be is promoted passed directly into the tail call.
+; CHECK-LABEL: check_zext_phi_call_arg
+; CHECK-NOT: uxt
+define i32 @check_zext_phi_call_arg() {
+entry:
+ br label %for.cond
+
+for.cond: ; preds = %for.cond.backedge, %entry
+ %d.sroa.0.0 = phi i16 [ 30, %entry ], [ %d.sroa.0.0.be, %for.cond.backedge ]
+ %tobool = icmp eq i16 %d.sroa.0.0, 0
+ br i1 %tobool, label %for.cond.backedge, label %if.then
+
+for.cond.backedge: ; preds = %for.cond, %if.then
+ %d.sroa.0.0.be = phi i16 [ %call, %if.then ], [ 0, %for.cond ]
+ br label %for.cond
+
+if.then: ; preds = %for.cond
+ %d.sroa.0.0.insert.ext = zext i16 %d.sroa.0.0 to i32
+ %call = tail call zeroext i16 bitcast (i16 (...)* @f to i16 (i32)*)(i32 %d.sroa.0.0.insert.ext) #2
+ br label %for.cond.backedge
+}
+
+%struct.atomic_flag = type { i8 }
+
+; CHECK-LABEL: atomic_flag_test_and_set
+; CHECK-NOT: uxt
+define zeroext i1 @atomic_flag_test_and_set(%struct.atomic_flag* %object) {
+entry:
+ %_Value = getelementptr inbounds %struct.atomic_flag, %struct.atomic_flag* %object, i32 0, i32 0
+ %call = tail call arm_aapcscc zeroext i8 @__atomic_exchange_1(i8* %_Value, i8 zeroext 1, i32 5) #1
+ %0 = and i8 %call, 1
+ %tobool = icmp ne i8 %0, 0
+ ret i1 %tobool
+}
+
+; CHECK-LABEL: i1_zeroext_call
+; CHECK: uxt
+define i1 @i1_zeroext_call(i16* %ts, i32 %a, i16* %b, i8* %c) {
+entry:
+ %0 = load i16, i16* %ts, align 2
+ %conv.i860 = trunc i32 %a to i16
+ store i16 %conv.i860, i16* %b, align 2
+ %call.i848 = call zeroext i1 @i1_zeroext(i8* %c, i32 64, i16 zeroext %conv.i860)
+ br i1 %call.i848, label %if.then223, label %if.else227
+
+if.then223:
+ %cmp235 = icmp eq i16 %0, %conv.i860
+ br label %exit
+
+if.else227:
+ %cmp236 = icmp ult i16 %0, %conv.i860
+ br label %exit
+
+exit:
+ %retval = phi i1 [ %cmp235, %if.then223 ], [ %cmp236, %if.else227 ]
+ ret i1 %retval
+}
+
+; CHECK-LABEL: promote_arg_pass_to_call
+; CHECK-NOT: uxt
+define i16 @promote_arg_pass_to_call(i16 zeroext %arg1, i16 zeroext %arg2) {
+ %conv = add nuw i16 %arg1, 15
+ %mul = mul nuw nsw i16 %conv, 3
+ %cmp = icmp ult i16 %mul, %arg2
+ %trunc = trunc i16 %arg1 to i8
+ %res = call zeroext i16 @dummy4(i1 %cmp, i8 %trunc, i16 %arg1)
+ ret i16 %res
+}
+
+
+declare i32 @assert(...)
+declare i8 @dummy_i8(i8)
+declare i8 @dummy2(i8*, i8, i8)
+declare i16 @dummy3(i16)
+declare i16 @dummy4(i1, i8, i16)
+
+declare dso_local i32 @e(...) local_unnamed_addr #1
+declare dso_local zeroext i16 @f(...) local_unnamed_addr #1
+declare dso_local arm_aapcscc i8 @__atomic_exchange_1(i8*, i8, i32) local_unnamed_addr
+
+declare noalias i16** @func_62(i8 zeroext %p_63, i32 %p_64, i16 signext %p_65, i32* nocapture readnone %p_66)
+declare fastcc signext i16 @safe_sub_func_int16_t_s_s(i16 signext %si2)
+declare dso_local fastcc i64 @safe_sub_func_int64_t_s_s(i64, i64)
+declare dso_local fastcc zeroext i8 @safe_lshift_func(i8 zeroext, i32)
+declare dso_local fastcc zeroext i8 @safe_mul_func_uint8_t_u_u(i8 returned zeroext)
+declare i1 @i1_zeroext(i8*, i32, i16 zeroext)
diff --git a/test/CodeGen/ARM/CGP/arm-cgp-casts.ll b/test/CodeGen/ARM/CGP/arm-cgp-casts.ll
new file mode 100644
index 000000000000..273fea370a1f
--- /dev/null
+++ b/test/CodeGen/ARM/CGP/arm-cgp-casts.ll
@@ -0,0 +1,590 @@
+; RUN: llc -mtriple=thumbv8.main -mcpu=cortex-m33 %s -arm-disable-cgp=false -o - | FileCheck %s --check-prefix=CHECK-COMMON --check-prefix=CHECK-NODSP
+; RUN: llc -mtriple=thumbv7-linux-android %s -arm-disable-cgp=false -o - | FileCheck %s --check-prefix=CHECK-COMMON --check-prefix=CHECK-NODSP
+; RUN: llc -mtriple=thumbv7em %s -arm-disable-cgp=false -arm-enable-scalar-dsp=true -o - | FileCheck %s --check-prefix=CHECK-COMMON --check-prefix=CHECK-DSP
+; RUN: llc -mtriple=thumbv8 %s -arm-disable-cgp=false -arm-enable-scalar-dsp=true -arm-enable-scalar-dsp-imms=true -o - | FileCheck %s --check-prefix=CHECK-COMMON --check-prefix=CHECK-DSP-IMM
+
+; Transform will fail because the trunc is not a sink.
+; CHECK-COMMON-LABEL: dsp_trunc
+; CHECK-COMMON: add [[ADD:[^ ]+]],
+; CHECK-DSP-NEXT: ldrh r1, [r3]
+; CHECK-DSP-NEXT: ldrh r2, [r2]
+; CHECK-DSP-NEXT: subs r1, r1, [[ADD]]
+; CHECK-DSP-NEXT: add r0, r2
+; CHECK-DSP-NEXT: uxth r3, r1
+; CHECK-DSP-NEXT: uxth r2, r0
+; CHECK-DSP-NEXT: cmp r2, r3
+
+; With DSP-IMM, we could have:
+; movs r1, #0
+; uxth r0, r0
+; usub16 r1, r1, r0
+; ldrh r0, [r2]
+; ldrh r3, [r3]
+; usub16 r0, r0, r1
+; uadd16 r1, r3, r1
+; cmp r0, r1
+define i16 @dsp_trunc(i32 %arg0, i32 %arg1, i16* %gep0, i16* %gep1) {
+entry:
+ %add0 = add i32 %arg0, %arg1
+ %conv0 = trunc i32 %add0 to i16
+ %sub0 = sub i16 0, %conv0
+ %load0 = load i16, i16* %gep0, align 2
+ %load1 = load i16, i16* %gep1, align 2
+ %sub1 = sub i16 %load0, %sub0
+ %add1 = add i16 %load1, %sub0
+ %cmp = icmp ult i16 %sub1, %add1
+ %res = select i1 %cmp, i16 %add1, i16 %sub1
+ ret i16 %res
+}
+
+; CHECK-COMMON-LABEL: trunc_i16_i8
+; CHECK-COMMON: ldrh
+; CHECK-COMMON: uxtb
+; CHECK-COMMON: cmp
+define i8 @trunc_i16_i8(i16* %ptr, i16 zeroext %arg0, i8 zeroext %arg1) {
+entry:
+ %0 = load i16, i16* %ptr
+ %1 = add i16 %0, %arg0
+ %2 = trunc i16 %1 to i8
+ %3 = icmp ugt i8 %2, %arg1
+ %4 = select i1 %3, i8 %2, i8 %arg1
+ ret i8 %4
+}
+
+; The pass perform the transform, but a uxtb will still be inserted to handle
+; the zext to the icmp.
+; CHECK-COMMON-LABEL: icmp_i32_zext:
+; CHECK-COMMON: sub
+; CHECK-COMMON: uxtb
+; CHECK-COMMON: cmp
+define i8 @icmp_i32_zext(i8* %ptr) {
+entry:
+ %gep = getelementptr inbounds i8, i8* %ptr, i32 0
+ %0 = load i8, i8* %gep, align 1
+ %1 = sub nuw nsw i8 %0, 1
+ %conv44 = zext i8 %0 to i32
+ br label %preheader
+
+preheader:
+ br label %body
+
+body:
+ %2 = phi i8 [ %1, %preheader ], [ %3, %if.end ]
+ %si.0274 = phi i32 [ %conv44, %preheader ], [ %inc, %if.end ]
+ %conv51266 = zext i8 %2 to i32
+ %cmp52267 = icmp eq i32 %si.0274, %conv51266
+ br i1 %cmp52267, label %if.end, label %exit
+
+if.end:
+ %inc = add i32 %si.0274, 1
+ %gep1 = getelementptr inbounds i8, i8* %ptr, i32 %inc
+ %3 = load i8, i8* %gep1, align 1
+ br label %body
+
+exit:
+ ret i8 %2
+}
+
+; Won't don't handle sext
+; CHECK-COMMON-LABEL: icmp_sext_zext_store_i8_i16
+; CHECK-COMMON: ldrb
+; CHECK-COMMON: ldrsh
+define i32 @icmp_sext_zext_store_i8_i16() {
+entry:
+ %0 = load i8, i8* getelementptr inbounds ([16 x i8], [16 x i8]* @d_uch, i32 0, i32 2), align 1
+ %conv = zext i8 %0 to i16
+ store i16 %conv, i16* @sh1, align 2
+ %conv1 = zext i8 %0 to i32
+ %1 = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @d_sh, i32 0, i32 2), align 2
+ %conv2 = sext i16 %1 to i32
+ %cmp = icmp eq i32 %conv1, %conv2
+ %conv3 = zext i1 %cmp to i32
+ ret i32 %conv3
+}
+
+; CHECK-COMMON-LABEL: or_icmp_ugt:
+; CHECK-COMMON: ldrb
+; CHECK-COMMON: subs.w
+; CHECK-COMMON-NOT: uxt
+; CHECK-COMMON: cmp
+define i1 @or_icmp_ugt(i32 %arg, i8* %ptr) {
+entry:
+ %0 = load i8, i8* %ptr
+ %1 = zext i8 %0 to i32
+ %mul = shl nuw nsw i32 %1, 1
+ %add0 = add nuw nsw i32 %mul, 6
+ %cmp0 = icmp ne i32 %arg, %add0
+ %add1 = add i8 %0, -1
+ %cmp1 = icmp ugt i8 %add1, 3
+ %or = or i1 %cmp0, %cmp1
+ ret i1 %or
+}
+
+; We currently only handle truncs as sinks, so a uxt will still be needed for
+; the icmp ugt instruction.
+; CHECK-COMMON-LABEL: urem_trunc_icmps
+; CHECK-COMMON: cmp
+; CHECK-COMMON: uxt
+; CHECK-COMMON: cmp
+define void @urem_trunc_icmps(i16** %in, i32* %g, i32* %k) {
+entry:
+ %ptr = load i16*, i16** %in, align 4
+ %ld = load i16, i16* %ptr, align 2
+ %cmp.i = icmp eq i16 %ld, 0
+ br i1 %cmp.i, label %exit, label %cond.false.i
+
+cond.false.i:
+ %rem = urem i16 5, %ld
+ %extract.t = trunc i16 %rem to i8
+ br label %body
+
+body:
+ %cond.in.i.off0 = phi i8 [ %extract.t, %cond.false.i ], [ %add, %for.inc ]
+ %cmp = icmp ugt i8 %cond.in.i.off0, 7
+ %conv5 = zext i1 %cmp to i32
+ store i32 %conv5, i32* %g, align 4
+ %.pr = load i32, i32* %k, align 4
+ %tobool13150 = icmp eq i32 %.pr, 0
+ br i1 %tobool13150, label %for.inc, label %exit
+
+for.inc:
+ %add = add nuw i8 %cond.in.i.off0, 1
+ br label %body
+
+exit:
+ ret void
+}
+
+; Check that %exp requires uxth in all cases, and will also be required to
+; promote %1 for the call - unless we can generate a uadd16.
+; CHECK-COMMON-LABEL: zext_load_sink_call:
+; CHECK-COMMON: uxt
+; CHECK-DSP-IMM: uadd16
+; CHECK-COMMON: cmp
+; CHECK-NODSP: uxt
+; CHECK-DSP-IMM-NOT: uxt
+define i32 @zext_load_sink_call(i16* %ptr, i16 %exp) {
+entry:
+ %0 = load i16, i16* %ptr, align 4
+ %1 = add i16 %exp, 3
+ %cmp = icmp eq i16 %0, %exp
+ br i1 %cmp, label %exit, label %if.then
+
+if.then:
+ %conv0 = zext i16 %0 to i32
+ %conv1 = zext i16 %1 to i32
+ %call = tail call arm_aapcs_vfpcc i32 @dummy(i32 %conv0, i32 %conv1)
+ br label %exit
+
+exit:
+ %exitval = phi i32 [ %call, %if.then ], [ 0, %entry ]
+ ret i32 %exitval
+}
+
+; CHECK-COMMON-LABEL: bitcast_i16
+; CHECK-COMMON-NOT: uxt
+define i16 @bitcast_i16(i16 zeroext %arg0, i16 zeroext %arg1) {
+entry:
+ %cast = bitcast i16 12345 to i16
+ %add = add nuw i16 %arg0, 1
+ %cmp = icmp ule i16 %add, %cast
+ %res = select i1 %cmp, i16 %arg1, i16 32657
+ ret i16 %res
+}
+
+; CHECK-COMMON-LABEL: bitcast_i8
+; CHECK-COMMON-NOT: uxt
+define i8 @bitcast_i8(i8 zeroext %arg0, i8 zeroext %arg1) {
+entry:
+ %cast = bitcast i8 127 to i8
+ %mul = shl nuw i8 %arg0, 1
+ %cmp = icmp uge i8 %mul, %arg1
+ %res = select i1 %cmp, i8 %cast, i8 128
+ ret i8 %res
+}
+
+; CHECK-COMMON-LABEL: bitcast_i16_minus
+; CHECK-COMMON-NOT: uxt
+define i16 @bitcast_i16_minus(i16 zeroext %arg0, i16 zeroext %arg1) {
+entry:
+ %cast = bitcast i16 -12345 to i16
+ %xor = xor i16 %arg0, 7
+ %cmp = icmp eq i16 %xor, %arg1
+ %res = select i1 %cmp, i16 %cast, i16 32657
+ ret i16 %res
+}
+
+; CHECK-COMMON-LABEL: bitcast_i8_minus
+; CHECK-COMMON-NOT: uxt
+define i8 @bitcast_i8_minus(i8 zeroext %arg0, i8 zeroext %arg1) {
+entry:
+ %cast = bitcast i8 -127 to i8
+ %and = and i8 %arg0, 3
+ %cmp = icmp ne i8 %and, %arg1
+ %res = select i1 %cmp, i8 %cast, i8 128
+ ret i8 %res
+}
+
+declare i32 @dummy(i32, i32)
+
+@d_uch = hidden local_unnamed_addr global [16 x i8] zeroinitializer, align 1
+@sh1 = hidden local_unnamed_addr global i16 0, align 2
+@d_sh = hidden local_unnamed_addr global [16 x i16] zeroinitializer, align 2
+
+; CHECK-COMMON-LABEL: two_stage_zext_trunc_mix
+; CHECK-COMMON-NOT: uxt
+define i8* @two_stage_zext_trunc_mix(i32* %this, i32 %__pos1, i32 %__n1, i32** %__str, i32 %__pos2, i32 %__n2) {
+entry:
+ %__size_.i.i.i.i = bitcast i32** %__str to i8*
+ %0 = load i8, i8* %__size_.i.i.i.i, align 4
+ %1 = and i8 %0, 1
+ %tobool.i.i.i.i = icmp eq i8 %1, 0
+ %__size_.i5.i.i = getelementptr inbounds i32*, i32** %__str, i32 %__n1
+ %cast = bitcast i32** %__size_.i5.i.i to i32*
+ %2 = load i32, i32* %cast, align 4
+ %3 = lshr i8 %0, 1
+ %4 = zext i8 %3 to i32
+ %cond.i.i = select i1 %tobool.i.i.i.i, i32 %4, i32 %2
+ %__size_.i.i.i.i.i = bitcast i32* %this to i8*
+ %5 = load i8, i8* %__size_.i.i.i.i.i, align 4
+ %6 = and i8 %5, 1
+ %tobool.i.i.i.i.i = icmp eq i8 %6, 0
+ %7 = getelementptr inbounds i8, i8* %__size_.i.i.i.i, i32 %__pos1
+ %8 = getelementptr inbounds i8, i8* %__size_.i.i.i.i, i32 %__pos2
+ %res = select i1 %tobool.i.i.i.i.i, i8* %7, i8* %8
+ ret i8* %res
+}
+
+; CHECK-COMMON-LABEL: search_through_zext_1
+; CHECK-COMMON-NOT: uxt
+define i8 @search_through_zext_1(i8 zeroext %a, i8 zeroext %b, i16 zeroext %c) {
+entry:
+ %add = add nuw i8 %a, %b
+ %conv = zext i8 %add to i16
+ %cmp = icmp ult i16 %conv, %c
+ br i1 %cmp, label %if.then, label %if.end
+
+if.then:
+ %sub = sub nuw i8 %b, %a
+ %conv2 = zext i8 %sub to i16
+ %cmp2 = icmp ugt i16 %conv2, %c
+ %res = select i1 %cmp2, i8 %a, i8 %b
+ br label %if.end
+
+if.end:
+ %retval = phi i8 [ 0, %entry ], [ %res, %if.then ]
+ ret i8 %retval
+}
+
+; TODO: We should be able to remove the uxtb here. The transform fails because
+; the icmp ugt uses an i32, which is too large... but this doesn't matter
+; because it won't be writing a large value to a register as a result.
+; CHECK-COMMON-LABEL: search_through_zext_2
+; CHECK-COMMON: uxtb
+; CHECK-COMMON: uxtb
+define i8 @search_through_zext_2(i8 zeroext %a, i8 zeroext %b, i16 zeroext %c, i32 %d) {
+entry:
+ %add = add nuw i8 %a, %b
+ %conv = zext i8 %add to i16
+ %cmp = icmp ult i16 %conv, %c
+ br i1 %cmp, label %if.then, label %if.end
+
+if.then:
+ %sub = sub nuw i8 %b, %a
+ %conv2 = zext i8 %sub to i32
+ %cmp2 = icmp ugt i32 %conv2, %d
+ %res = select i1 %cmp2, i8 %a, i8 %b
+ br label %if.end
+
+if.end:
+ %retval = phi i8 [ 0, %entry ], [ %res, %if.then ]
+ ret i8 %retval
+}
+
+; TODO: We should be able to remove the uxtb here as all the calculations are
+; performed on i8s. The promotion of i8 to i16 and then the later truncation
+; results in the uxtb.
+; CHECK-COMMON-LABEL: search_through_zext_3
+; CHECK-COMMON: uxtb
+; CHECK-COMMON: uxtb
+define i8 @search_through_zext_3(i8 zeroext %a, i8 zeroext %b, i16 zeroext %c, i32 %d) {
+entry:
+ %add = add nuw i8 %a, %b
+ %conv = zext i8 %add to i16
+ %cmp = icmp ult i16 %conv, %c
+ br i1 %cmp, label %if.then, label %if.end
+
+if.then:
+ %trunc = trunc i16 %conv to i8
+ %sub = sub nuw i8 %b, %trunc
+ %conv2 = zext i8 %sub to i32
+ %cmp2 = icmp ugt i32 %conv2, %d
+ %res = select i1 %cmp2, i8 %a, i8 %b
+ br label %if.end
+
+if.end:
+ %retval = phi i8 [ 0, %entry ], [ %res, %if.then ]
+ ret i8 %retval
+}
+
+; TODO: We should be able to remove the uxt that gets introduced for %conv2
+; CHECK-COMMON-LABEL: search_through_zext_cmp
+; CHECK-COMMON: uxt
+define i8 @search_through_zext_cmp(i8 zeroext %a, i8 zeroext %b, i16 zeroext %c) {
+entry:
+ %cmp = icmp ne i8 %a, %b
+ %conv = zext i1 %cmp to i16
+ %cmp1 = icmp ult i16 %conv, %c
+ br i1 %cmp1, label %if.then, label %if.end
+
+if.then:
+ %sub = sub nuw i8 %b, %a
+ %conv2 = zext i8 %sub to i16
+ %cmp3 = icmp ugt i16 %conv2, %c
+ %res = select i1 %cmp3, i8 %a, i8 %b
+ br label %if.end
+
+if.end:
+ %retval = phi i8 [ 0, %entry ], [ %res, %if.then ]
+ ret i8 %retval
+}
+
+; CHECK-COMMON-LABEL: search_through_zext_load
+; CHECK-COMMON-NOT: uxt
+define i8 @search_through_zext_load(i8* %a, i8 zeroext %b, i16 zeroext %c) {
+entry:
+ %load = load i8, i8* %a
+ %conv = zext i8 %load to i16
+ %cmp1 = icmp ult i16 %conv, %c
+ br i1 %cmp1, label %if.then, label %if.end
+
+if.then:
+ %sub = sub nuw i8 %b, %load
+ %conv2 = zext i8 %sub to i16
+ %cmp3 = icmp ugt i16 %conv2, %c
+ %res = select i1 %cmp3, i8 %load, i8 %b
+ br label %if.end
+
+if.end:
+ %retval = phi i8 [ 0, %entry ], [ %res, %if.then ]
+ ret i8 %retval
+}
+
+; CHECK-COMMON-LABEL: trunc_sink_less_than
+; CHECK-COMMON-NOT: uxth
+; CHECK-COMMON: cmp
+; CHECK-COMMON: uxtb
+define i16 @trunc_sink_less_than_cmp(i16 zeroext %a, i16 zeroext %b, i16 zeroext %c, i8 zeroext %d) {
+entry:
+ %sub = sub nuw i16 %b, %a
+ %cmp = icmp ult i16 %sub, %c
+ br i1 %cmp, label %if.then, label %if.end
+
+if.then:
+ %trunc = trunc i16 %sub to i8
+ %add = add nuw i8 %d, 1
+ %cmp2 = icmp ugt i8 %trunc, %add
+ %res = select i1 %cmp2, i16 %a, i16 %b
+ br label %if.end
+
+if.end:
+ %retval = phi i16 [ 0, %entry ], [ %res, %if.then ]
+ ret i16 %retval
+}
+
+; TODO: We should be able to remove the uxth introduced to handle %sub
+; CHECK-COMMON-LABEL: trunc_sink_less_than_arith
+; CHECK-COMMON: uxth
+; CHECK-COMMON: cmp
+; CHECK-COMMON: uxtb
+define i16 @trunc_sink_less_than_arith(i16 zeroext %a, i16 zeroext %b, i16 zeroext %c, i8 zeroext %d, i8 zeroext %e) {
+entry:
+ %sub = sub nuw i16 %b, %a
+ %cmp = icmp ult i16 %sub, %c
+ br i1 %cmp, label %if.then, label %if.end
+
+if.then:
+ %trunc = trunc i16 %sub to i8
+ %add = add nuw i8 %d, %trunc
+ %cmp2 = icmp ugt i8 %e, %add
+ %res = select i1 %cmp2, i16 %a, i16 %b
+ br label %if.end
+
+if.end:
+ %retval = phi i16 [ 0, %entry ], [ %res, %if.then ]
+ ret i16 %retval
+}
+
+; CHECK-COMMON-LABEL: trunc_sink_less_than_store
+; CHECK-COMMON-NOT: uxt
+; CHECK-COMMON: cmp
+; CHECK-COMMON-NOT: uxt
+define i16 @trunc_sink_less_than_store(i16 zeroext %a, i16 zeroext %b, i16 zeroext %c, i8 zeroext %d, i8* %e) {
+entry:
+ %sub = sub nuw i16 %b, %a
+ %cmp = icmp ult i16 %sub, %c
+ br i1 %cmp, label %if.then, label %if.end
+
+if.then:
+ %trunc = trunc i16 %sub to i8
+ %add = add nuw i8 %d, %trunc
+ store i8 %add, i8* %e
+ br label %if.end
+
+if.end:
+ %retval = phi i16 [ 0, %entry ], [ %sub, %if.then ]
+ ret i16 %retval
+}
+
+; CHECK-COMMON-LABEL: trunc_sink_less_than_ret
+; CHECK-COMMON-NOT: uxt
+define i8 @trunc_sink_less_than_ret(i16 zeroext %a, i16 zeroext %b, i16 zeroext %c, i8 zeroext %d, i8 zeroext %e) {
+entry:
+ %sub = sub nuw i16 %b, %a
+ %cmp = icmp ult i16 %sub, %c
+ br i1 %cmp, label %if.then, label %if.end
+
+if.then:
+ %trunc = trunc i16 %sub to i8
+ %add = add nuw i8 %d, %trunc
+ br label %if.end
+
+if.end:
+ %retval = phi i8 [ 0, %entry ], [ %add, %if.then ]
+ ret i8 %retval
+}
+
+; CHECK-COMMON-LABEL: trunc_sink_less_than_zext_ret
+; CHECK-COMMON-NOT: uxth
+; CHECK-COMMON: sub
+; CHECK-COMMON: uxtb
+define zeroext i8 @trunc_sink_less_than_zext_ret(i16 zeroext %a, i16 zeroext %b, i16 zeroext %c, i8 zeroext %d, i8 zeroext %e) {
+entry:
+ %sub = sub nuw i16 %b, %a
+ %cmp = icmp ult i16 %sub, %c
+ br i1 %cmp, label %if.then, label %if.end
+
+if.then:
+ %trunc = trunc i16 %sub to i8
+ %add = add nuw i8 %d, %trunc
+ br label %if.end
+
+if.end:
+ %retval = phi i8 [ 0, %entry ], [ %add, %if.then ]
+ ret i8 %retval
+}
+
+; CHECK-COMMON-LABEL: bitcast_i1
+; CHECK-COMMON-NOT: uxt
+define i32 @bitcast_i1(i16 zeroext %a, i32 %b, i32 %c) {
+entry:
+ %0 = bitcast i1 1 to i1
+ %1 = trunc i16 %a to i1
+ %cmp = icmp eq i1 %1, %0
+ br i1 %cmp, label %if.then, label %exit
+
+if.then:
+ %conv = zext i1 %0 to i16
+ %conv1 = zext i1 %1 to i16
+ %cmp1 = icmp uge i16 %conv, %conv1
+ %select = select i1 %cmp1, i32 %b, i32 %c
+ br label %exit
+
+exit:
+ %retval = phi i32 [ %select, %if.then ], [ 0, %entry ]
+ ret i32 %retval
+}
+
+; CHECK-COMMON-LABEL: search_back_through_trunc
+; CHECK-COMMON-NOT: uxt
+; CHECK-COMMON: cmp
+; CHECK-COMMON: strb
+; CHECK-COMMON: strb
+define void @search_back_through_trunc(i8* %a, i8* %b, i8* %c, i8* %d, i16* %e) {
+entry:
+ %0 = load i8, i8* %a, align 1
+ %conv106 = zext i8 %0 to i16
+ %shl = shl nuw i16 %conv106, 8
+ %1 = load i8, i8* %b, align 1
+ %conv108 = zext i8 %1 to i16
+ %or109 = or i16 %shl, %conv108
+ %2 = load i8, i8* %c, align 1
+ %conv119 = zext i8 %2 to i16
+ %shl120 = shl nuw i16 %conv119, 8
+ %3 = load i8, i8* %d, align 1
+ %conv122 = zext i8 %3 to i16
+ %or123 = or i16 %shl120, %conv122
+ %cmp133 = icmp eq i16 %or109, %or123
+ br i1 %cmp133, label %if.end183, label %if.else136
+
+if.else136:
+ %4 = load i16, i16* %e, align 2
+ %extract.t854 = trunc i16 %4 to i8
+ %extract856 = lshr i16 %4, 8
+ %extract.t857 = trunc i16 %extract856 to i8
+ br label %if.end183
+
+if.end183:
+ %w.0.off0 = phi i8 [ %extract.t854, %if.else136 ], [ %1, %entry ]
+ %w.0.off8 = phi i8 [ %extract.t857, %if.else136 ], [ %2, %entry ]
+ store i8 %w.0.off8, i8* %c, align 1
+ store i8 %w.0.off0, i8* %d, align 1
+ ret void
+}
+
+@c = common dso_local local_unnamed_addr global i16 0, align 2
+@b = common dso_local local_unnamed_addr global i16 0, align 2
+@f = common dso_local local_unnamed_addr global i32 0, align 4
+@e = common dso_local local_unnamed_addr global i8 0, align 1
+@a = common dso_local local_unnamed_addr global i8 0, align 1
+@d = common dso_local local_unnamed_addr global i32 0, align 4
+
+; CHECK-LABEL: and_trunc
+; CHECK: ldrh
+; CHECK: sxth
+; CHECK: uxtb
+define void @and_trunc_two_zext() {
+entry:
+ %0 = load i16, i16* @c, align 2
+ %1 = load i16, i16* @b, align 2
+ %conv = sext i16 %1 to i32
+ store i32 %conv, i32* @f, align 4
+ %2 = trunc i16 %1 to i8
+ %conv1 = and i8 %2, 1
+ store i8 %conv1, i8* @e, align 1
+ %3 = load i8, i8* @a, align 1
+ %narrow = mul nuw i8 %3, %conv1
+ %mul = zext i8 %narrow to i32
+ store i32 %mul, i32* @d, align 4
+ %4 = zext i8 %narrow to i16
+ %conv5 = or i16 %0, %4
+ %tobool = icmp eq i16 %conv5, 0
+ br i1 %tobool, label %if.end, label %for.cond
+
+for.cond:
+ br label %for.cond
+
+if.end:
+ ret void
+}
+
+; CHECK-LABEL: zext_urem_trunc
+; CHECK-NOT: uxt
+define void @zext_urem_trunc() {
+entry:
+ %0 = load i16, i16* @c, align 2
+ %cmp = icmp eq i16 %0, 0
+ %1 = load i8, i8* @e, align 1
+ br i1 %cmp, label %cond.end, label %cond.false
+
+cond.false:
+ %rem.lhs.trunc = zext i8 %1 to i16
+ %rem7 = urem i16 %rem.lhs.trunc, %0
+ %rem.zext = trunc i16 %rem7 to i8
+ br label %cond.end
+
+cond.end:
+ %cond = phi i8 [ %rem.zext, %cond.false ], [ %1, %entry ]
+ store i8 %cond, i8* @a, align 1
+ ret void
+}
diff --git a/test/CodeGen/ARM/arm-cgp-icmps.ll b/test/CodeGen/ARM/CGP/arm-cgp-icmps.ll
index 18df13f732ef..8ff7db51e65f 100644
--- a/test/CodeGen/ARM/arm-cgp-icmps.ll
+++ b/test/CodeGen/ARM/CGP/arm-cgp-icmps.ll
@@ -1,4 +1,4 @@
-; RUN: llc -mtriple=thumbv8.main -mcpu=cortex-m33 %s -arm-disable-cgp=false -o - | FileCheck %s --check-prefix=CHECK-COMMON --check-prefix=CHECK-NODSP
+; RUN: llc -mtriple=thumbv8m.main -mcpu=cortex-m33 %s -arm-disable-cgp=false -o - | FileCheck %s --check-prefix=CHECK-COMMON --check-prefix=CHECK-NODSP
; RUN: llc -mtriple=thumbv7em %s -arm-disable-cgp=false -arm-enable-scalar-dsp=true -o - | FileCheck %s --check-prefix=CHECK-COMMON --check-prefix=CHECK-DSP
; RUN: llc -mtriple=thumbv8 %s -arm-disable-cgp=false -arm-enable-scalar-dsp=true -arm-enable-scalar-dsp-imms=true -o - | FileCheck %s --check-prefix=CHECK-COMMON --check-prefix=CHECK-DSP-IMM
@@ -158,39 +158,6 @@ entry:
ret i32 %res
}
-; CHECK-COMMON-LABEL: dsp_imm2
-; CHECK-COMMON: add r0, r1
-; CHECK-DSP-NEXT: ldrh r1, [r3]
-; CHECK-DSP-NEXT: ldrh r2, [r2]
-; CHECK-DSP-NEXT: subs r1, r1, r0
-; CHECK-DSP-NEXT: add r0, r2
-; CHECK-DSP-NEXT: uxth r3, r1
-; CHECK-DSP-NEXT: uxth r2, r0
-; CHECK-DSP-NEXT: cmp r2, r3
-
-; CHECK-DSP-IMM: movs r1, #0
-; CHECK-DSP-IMM-NEXT: uxth r0, r0
-; CHECK-DSP-IMM-NEXT: usub16 r1, r1, r0
-; CHECK-DSP-IMM-NEXT: ldrh r0, [r2]
-; CHECK-DSP-IMM-NEXT: ldrh r3, [r3]
-; CHECK-DSP-IMM-NEXT: usub16 r0, r0, r1
-; CHECK-DSP-IMM-NEXT: uadd16 r1, r3, r1
-; CHECK-DSP-IMM-NEXT: cmp r0, r1
-
-define i16 @dsp_imm2(i32 %arg0, i32 %arg1, i16* %gep0, i16* %gep1) {
-entry:
- %add0 = add i32 %arg0, %arg1
- %conv0 = trunc i32 %add0 to i16
- %sub0 = sub i16 0, %conv0
- %load0 = load i16, i16* %gep0, align 2
- %load1 = load i16, i16* %gep1, align 2
- %sub1 = sub i16 %load0, %sub0
- %add1 = add i16 %load1, %sub0
- %cmp = icmp ult i16 %sub1, %add1
- %res = select i1 %cmp, i16 %add1, i16 %sub1
- ret i16 %res
-}
-
; CHECK-COMMON-LABEL: dsp_var:
; CHECK-COMMON: eors r1, r0
; CHECK-COMMON: and r2, r0, #7
@@ -267,109 +234,6 @@ entry:
ret i32 %res
}
-; CHECK-COMMON-LABEL: icmp_i32_zext:
-; CHECK-COMMON: ldrb [[LD:r[^ ]+]], [r0]
-; CHECK-COMMON: subs [[SUB:r[^ ]+]], [[LD]], #1
-; CHECK-COMMON-NOT: uxt
-; CHECK-COMMON: cmp [[LD]], [[SUB]]
-; CHECK-COMMON-NOT: uxt
-define i8 @icmp_i32_zext(i8* %ptr) {
-entry:
- %gep = getelementptr inbounds i8, i8* %ptr, i32 0
- %0 = load i8, i8* %gep, align 1
- %1 = sub nuw nsw i8 %0, 1
- %conv44 = zext i8 %0 to i32
- br label %preheader
-
-preheader:
- br label %body
-
-body:
- %2 = phi i8 [ %1, %preheader ], [ %3, %if.end ]
- %si.0274 = phi i32 [ %conv44, %preheader ], [ %inc, %if.end ]
- %conv51266 = zext i8 %2 to i32
- %cmp52267 = icmp eq i32 %si.0274, %conv51266
- br i1 %cmp52267, label %if.end, label %exit
-
-if.end:
- %inc = add i32 %si.0274, 1
- %gep1 = getelementptr inbounds i8, i8* %ptr, i32 %inc
- %3 = load i8, i8* %gep1, align 1
- br label %body
-
-exit:
- ret i8 %2
-}
-
-@d_uch = hidden local_unnamed_addr global [16 x i8] zeroinitializer, align 1
-@sh1 = hidden local_unnamed_addr global i16 0, align 2
-@d_sh = hidden local_unnamed_addr global [16 x i16] zeroinitializer, align 2
-
-; CHECK-COMMON-LABEL: icmp_sext_zext_store_i8_i16
-; CHECK-NODSP: ldrb [[BYTE:r[^ ]+]],
-; CHECK-NODSP: strh [[BYTE]],
-; CHECK-NODSP: ldrsh.w
-define i32 @icmp_sext_zext_store_i8_i16() {
-entry:
- %0 = load i8, i8* getelementptr inbounds ([16 x i8], [16 x i8]* @d_uch, i32 0, i32 2), align 1
- %conv = zext i8 %0 to i16
- store i16 %conv, i16* @sh1, align 2
- %conv1 = zext i8 %0 to i32
- %1 = load i16, i16* getelementptr inbounds ([16 x i16], [16 x i16]* @d_sh, i32 0, i32 2), align 2
- %conv2 = sext i16 %1 to i32
- %cmp = icmp eq i32 %conv1, %conv2
- %conv3 = zext i1 %cmp to i32
- ret i32 %conv3
-}
-
-; CHECK-COMMON-LABEL: or_icmp_ugt:
-; CHECK-COMMON: ldrb [[LD:r[^ ]+]], [r1]
-; CHECK-COMMON: subs [[SUB:r[^ ]+]], #1
-; CHECK-COMMON-NOT: uxtb
-; CHECK-COMMON: cmp [[SUB]], #3
-define i1 @or_icmp_ugt(i32 %arg, i8* %ptr) {
-entry:
- %0 = load i8, i8* %ptr
- %1 = zext i8 %0 to i32
- %mul = shl nuw nsw i32 %1, 1
- %add0 = add nuw nsw i32 %mul, 6
- %cmp0 = icmp ne i32 %arg, %add0
- %add1 = add i8 %0, -1
- %cmp1 = icmp ugt i8 %add1, 3
- %or = or i1 %cmp0, %cmp1
- ret i1 %or
-}
-
-; CHECK-COMMON-LABEL: icmp_switch_trunc:
-; CHECK-COMMON-NOT: uxt
-define i16 @icmp_switch_trunc(i16 zeroext %arg) {
-entry:
- %conv = add nuw i16 %arg, 15
- %mul = mul nuw nsw i16 %conv, 3
- %trunc = trunc i16 %arg to i3
- switch i3 %trunc, label %default [
- i3 0, label %sw.bb
- i3 1, label %sw.bb.i
- ]
-
-sw.bb:
- %cmp0 = icmp ult i16 %mul, 127
- %select = select i1 %cmp0, i16 %mul, i16 127
- br label %exit
-
-sw.bb.i:
- %cmp1 = icmp ugt i16 %mul, 34
- %select.i = select i1 %cmp1, i16 %mul, i16 34
- br label %exit
-
-default:
- br label %exit
-
-exit:
- %res = phi i16 [ %select, %sw.bb ], [ %select.i, %sw.bb.i ], [ %mul, %default ]
- ret i16 %res
-}
-
; CHECK-COMMON-LABEL: icmp_eq_minus_one
; CHECK-COMMON: cmp r0, #255
define i32 @icmp_eq_minus_one(i8* %ptr) {
@@ -392,77 +256,77 @@ define i32 @icmp_not(i16 zeroext %arg0, i16 zeroext %arg1) {
ret i32 %res
}
-; CHECK-COMMON-LABEL: mul_wrap
-; CHECK-COMMON: mul
-; CHECK-COMMON: uxth
-; CHECK-COMMON: cmp
-define i16 @mul_wrap(i16 %arg0, i16 %arg1) {
- %mul = mul i16 %arg0, %arg1
- %cmp = icmp eq i16 %mul, 1
- %res = select i1 %cmp, i16 %arg0, i16 47
- ret i16 %res
+; CHECK-COMMON-LABEL: icmp_i1
+; CHECK-NOT: uxt
+define i32 @icmp_i1(i1* %arg0, i1 zeroext %arg1, i32 %a, i32 %b) {
+entry:
+ %load = load i1, i1* %arg0
+ %not = xor i1 %load, 1
+ %cmp = icmp eq i1 %arg1, %not
+ %res = select i1 %cmp, i32 %a, i32 %b
+ ret i32 %res
}
-; CHECK-COMMON-LABEL: shl_wrap
-; CHECK-COMMON: lsl
-; CHECK-COMMON: uxth
+; CHECK-COMMON-LABEL: icmp_i7
+; CHECK-COMMON: ldrb
; CHECK-COMMON: cmp
-define i16 @shl_wrap(i16 %arg0) {
- %mul = shl i16 %arg0, 4
- %cmp = icmp eq i16 %mul, 1
- %res = select i1 %cmp, i16 %arg0, i16 47
- ret i16 %res
+define i32 @icmp_i7(i7* %arg0, i7 zeroext %arg1, i32 %a, i32 %b) {
+entry:
+ %load = load i7, i7* %arg0
+ %add = add nuw i7 %load, 1
+ %cmp = icmp ult i7 %arg1, %add
+ %res = select i1 %cmp, i32 %a, i32 %b
+ ret i32 %res
}
-; CHECK-COMMON-LABEL: add_wrap
-; CHECK-COMMON: add
-; CHECK-COMMON: uxth
-; CHECK-COMMON: cmp
-define i16 @add_wrap(i16 %arg0, i16 %arg1) {
- %add = add i16 %arg0, 128
- %cmp = icmp eq i16 %add, %arg1
- %res = select i1 %cmp, i16 %arg0, i16 1
- ret i16 %res
+; CHECK-COMMON-LABEL: icmp_i15
+; CHECK-COMMON: movw [[MINUS_ONE:r[0-9]+]], #32767
+define i32 @icmp_i15(i15 zeroext %arg0, i15 zeroext %arg1) {
+ %xor = xor i15 %arg0, -1
+ %cmp = icmp eq i15 %xor, %arg1
+ %res = select i1 %cmp, i32 21, i32 42
+ ret i32 %res
}
-; CHECK-COMMON-LABEL: sub_wrap
-; CHECK-COMMON: sub
-; CHECK-COMMON: uxth
-; CHECK-COMMON: cmp
-define i16 @sub_wrap(i16 %arg0, i16 %arg1, i16 %arg2) {
- %sub = sub i16 %arg0, %arg2
- %cmp = icmp eq i16 %sub, %arg1
- %res = select i1 %cmp, i16 %arg0, i16 1
- ret i16 %res
-}
+; CHECK-COMMON-LABEL: icmp_minus_imm
+; CHECK-NODSP: subs [[SUB:r[0-9]+]],
+; CHECK-NODSP: uxtb [[UXT:r[0-9]+]],
+; CHECK-NODSP: cmp [[UXT]], #251
-; CHECK-COMMON-LABEL: urem_trunc_icmps
-; CHECK-COMMON-NOT: uxt
-define void @urem_trunc_icmps(i16** %in, i32* %g, i32* %k) {
-entry:
- %ptr = load i16*, i16** %in, align 4
- %ld = load i16, i16* %ptr, align 2
- %cmp.i = icmp eq i16 %ld, 0
- br i1 %cmp.i, label %exit, label %cond.false.i
+; CHECK-DSP: subs [[SUB:r[0-9]+]],
+; CHECK-DSP: uxtb [[UXT:r[0-9]+]],
+; CHECK-DSP: cmp [[UXT]], #251
-cond.false.i:
- %rem = urem i16 5, %ld
- %extract.t = trunc i16 %rem to i8
- br label %body
+; CHECK-DSP-IMM: ldrb [[A:r[0-9]+]],
+; CHECK-DSP-IMM: movs [[MINUS_7:r[0-9]+]], #249
+; CHECK-DSP-IMM: uadd8 [[RES:r[0-9]+]], [[A]], [[MINUS_7]]
+; CHECK-DSP-IMM: cmp [[RES]], #251
+define i32 @icmp_minus_imm(i8* %a) {
+entry:
+ %0 = load i8, i8* %a, align 1
+ %add.i = add i8 %0, -7
+ %cmp = icmp ugt i8 %add.i, -5
+ %conv1 = zext i1 %cmp to i32
+ ret i32 %conv1
+}
-body:
- %cond.in.i.off0 = phi i8 [ %extract.t, %cond.false.i ], [ %add, %for.inc ]
- %cmp = icmp ugt i8 %cond.in.i.off0, 7
- %conv5 = zext i1 %cmp to i32
- store i32 %conv5, i32* %g, align 4
- %.pr = load i32, i32* %k, align 4
- %tobool13150 = icmp eq i32 %.pr, 0
- br i1 %tobool13150, label %for.inc, label %exit
+; CHECK-COMMON-LABEL: mul_with_neg_imm
+; CHECK-COMMON-NOT: uxtb
+; CHECK-COMMON: and [[BIT0:r[0-9]+]], r0, #1
+; CHECK-COMMON: add.w [[MUL32:r[0-9]+]], [[BIT0]], [[BIT0]], lsl #5
+; CHECK-COMMON: cmp.w r0, [[MUL32]], lsl #2
+define void @mul_with_neg_imm(i32, i32* %b) {
+entry:
+ %1 = trunc i32 %0 to i8
+ %2 = and i8 %1, 1
+ %conv.i = mul nuw i8 %2, -124
+ %tobool = icmp eq i8 %conv.i, 0
+ br i1 %tobool, label %if.end, label %if.then
-for.inc:
- %add = add nuw i8 %cond.in.i.off0, 1
- br label %body
+if.then:
+ store i32 0, i32* %b, align 4
+ br label %if.end
-exit:
+if.end:
ret void
}
diff --git a/test/CodeGen/ARM/CGP/arm-cgp-overflow.ll b/test/CodeGen/ARM/CGP/arm-cgp-overflow.ll
new file mode 100644
index 000000000000..8e10876c0b10
--- /dev/null
+++ b/test/CodeGen/ARM/CGP/arm-cgp-overflow.ll
@@ -0,0 +1,232 @@
+; RUN: llc -mtriple=thumbv8m.main -mcpu=cortex-m33 %s -arm-disable-cgp=false -o - | FileCheck %s
+
+; CHECK: overflow_add
+; CHECK: add
+; CHECK: uxth
+; CHECK: cmp
+define zeroext i16 @overflow_add(i16 zeroext %a, i16 zeroext %b) {
+ %add = add i16 %a, %b
+ %or = or i16 %add, 1
+ %cmp = icmp ugt i16 %or, 1024
+ %res = select i1 %cmp, i16 2, i16 5
+ ret i16 %res
+}
+
+; CHECK-LABEL: overflow_sub
+; CHECK: sub
+; CHECK: uxth
+; CHECK: cmp
+define zeroext i16 @overflow_sub(i16 zeroext %a, i16 zeroext %b) {
+ %add = sub i16 %a, %b
+ %or = or i16 %add, 1
+ %cmp = icmp ugt i16 %or, 1024
+ %res = select i1 %cmp, i16 2, i16 5
+ ret i16 %res
+}
+
+; CHECK-LABEL: overflow_mul
+; CHECK: mul
+; CHECK: uxth
+; CHECK: cmp
+define zeroext i16 @overflow_mul(i16 zeroext %a, i16 zeroext %b) {
+ %add = mul i16 %a, %b
+ %or = or i16 %add, 1
+ %cmp = icmp ugt i16 %or, 1024
+ %res = select i1 %cmp, i16 2, i16 5
+ ret i16 %res
+}
+
+; CHECK-LABEL: overflow_shl
+; CHECK-COMMON: lsl
+; CHECK-COMMON: uxth
+; CHECK-COMMON: cmp
+define zeroext i16 @overflow_shl(i16 zeroext %a, i16 zeroext %b) {
+ %add = shl i16 %a, %b
+ %or = or i16 %add, 1
+ %cmp = icmp ugt i16 %or, 1024
+ %res = select i1 %cmp, i16 2, i16 5
+ ret i16 %res
+}
+
+; CHECK-LABEL: overflow_add_no_consts:
+; CHECK: add r0, r1
+; CHECK: uxtb [[EXT:r[0-9]+]], r0
+; CHECK: cmp [[EXT]], r2
+; CHECK: movhi r0, #8
+define i32 @overflow_add_no_consts(i8 zeroext %a, i8 zeroext %b, i8 zeroext %limit) {
+ %add = add i8 %a, %b
+ %cmp = icmp ugt i8 %add, %limit
+ %res = select i1 %cmp, i32 8, i32 16
+ ret i32 %res
+}
+
+; CHECK-LABEL: overflow_add_const_limit:
+; CHECK: add r0, r1
+; CHECK: uxtb [[EXT:r[0-9]+]], r0
+; CHECK: cmp [[EXT]], #128
+; CHECK: movhi r0, #8
+define i32 @overflow_add_const_limit(i8 zeroext %a, i8 zeroext %b) {
+ %add = add i8 %a, %b
+ %cmp = icmp ugt i8 %add, 128
+ %res = select i1 %cmp, i32 8, i32 16
+ ret i32 %res
+}
+
+; CHECK-LABEL: overflow_add_positive_const_limit:
+; CHECK: adds r0, #1
+; CHECK: uxtb [[EXT:r[0-9]+]], r0
+; CHECK: cmp [[EXT]], #128
+; CHECK: movhi r0, #8
+define i32 @overflow_add_positive_const_limit(i8 zeroext %a) {
+ %add = add i8 %a, 1
+ %cmp = icmp ugt i8 %add, 128
+ %res = select i1 %cmp, i32 8, i32 16
+ ret i32 %res
+}
+
+; CHECK-LABEL: unsafe_add_underflow:
+; CHECK: subs r0, #2
+; CHECK: uxtb [[EXT:r[0-9]+]], r0
+; CHECK: cmp [[EXT]], #255
+; CHECK: moveq r0, #8
+define i32 @unsafe_add_underflow(i8 zeroext %a) {
+ %add = add i8 %a, -2
+ %cmp = icmp ugt i8 %add, 254
+ %res = select i1 %cmp, i32 8, i32 16
+ ret i32 %res
+}
+
+; CHECK-LABEL: safe_add_underflow:
+; CHECK: subs [[MINUS_1:r[0-9]+]], r0, #1
+; CHECK-NOT: uxtb
+; CHECK: cmp [[MINUS_1]], #254
+; CHECK: movhi r0, #8
+define i32 @safe_add_underflow(i8 zeroext %a) {
+ %add = add i8 %a, -1
+ %cmp = icmp ugt i8 %add, 254
+ %res = select i1 %cmp, i32 8, i32 16
+ ret i32 %res
+}
+
+; CHECK-LABEL: safe_add_underflow_neg:
+; CHECK: subs [[MINUS_1:r[0-9]+]], r0, #2
+; CHECK-NOT: uxtb
+; CHECK: cmp [[MINUS_1]], #251
+; CHECK: movlo r0, #8
+define i32 @safe_add_underflow_neg(i8 zeroext %a) {
+ %add = add i8 %a, -2
+ %cmp = icmp ule i8 %add, -6
+ %res = select i1 %cmp, i32 8, i32 16
+ ret i32 %res
+}
+
+; CHECK-LABEL: overflow_sub_negative_const_limit:
+; CHECK: adds r0, #1
+; CHECK: uxtb [[EXT:r[0-9]+]], r0
+; CHECK: cmp [[EXT]], #128
+; CHECK: movhi r0, #8
+define i32 @overflow_sub_negative_const_limit(i8 zeroext %a) {
+ %sub = sub i8 %a, -1
+ %cmp = icmp ugt i8 %sub, 128
+ %res = select i1 %cmp, i32 8, i32 16
+ ret i32 %res
+}
+
+; CHECK-LABEL: unsafe_sub_underflow:
+; CHECK: subs r0, #6
+; CHECK: uxtb [[EXT:r[0-9]+]], r0
+; CHECK: cmp [[EXT]], #250
+; CHECK: movhi r0, #8
+define i32 @unsafe_sub_underflow(i8 zeroext %a) {
+ %sub = sub i8 %a, 6
+ %cmp = icmp ugt i8 %sub, 250
+ %res = select i1 %cmp, i32 8, i32 16
+ ret i32 %res
+}
+
+; CHECK-LABEL: safe_sub_underflow:
+; CHECK: subs [[MINUS_1:r[0-9]+]], r0, #1
+; CHECK-NOT: uxtb
+; CHECK: cmp [[MINUS_1]], #255
+; CHECK: movlo r0, #8
+define i32 @safe_sub_underflow(i8 zeroext %a) {
+ %sub = sub i8 %a, 1
+ %cmp = icmp ule i8 %sub, 254
+ %res = select i1 %cmp, i32 8, i32 16
+ ret i32 %res
+}
+
+; CHECK-LABEL: safe_sub_underflow_neg
+; CHECK: subs [[MINUS_1:r[0-9]+]], r0, #4
+; CHECK-NOT: uxtb
+; CHECK: cmp [[MINUS_1]], #250
+; CHECK: movhi r0, #8
+define i32 @safe_sub_underflow_neg(i8 zeroext %a) {
+ %sub = sub i8 %a, 4
+ %cmp = icmp uge i8 %sub, -5
+ %res = select i1 %cmp, i32 8, i32 16
+ ret i32 %res
+}
+
+; CHECK-LABEL: unsafe_sub_underflow_neg
+; CHECK: subs r0, #4
+; CHECK: uxtb [[EXT:r[0-9]+]], r0
+; CHECK: cmp [[EXT]], #253
+; CHECK: movlo r0, #8
+define i32 @unsafe_sub_underflow_neg(i8 zeroext %a) {
+ %sub = sub i8 %a, 4
+ %cmp = icmp ult i8 %sub, -3
+ %res = select i1 %cmp, i32 8, i32 16
+ ret i32 %res
+}
+
+; CHECK: rsb.w [[RSUB:r[0-9]+]], r0, #248
+; CHECK-NOT: uxt
+; CHECK: cmp [[RSUB]], #252
+define i32 @safe_sub_imm_var(i8* %b) {
+entry:
+ %0 = load i8, i8* %b, align 1
+ %sub = sub nuw nsw i8 -8, %0
+ %cmp = icmp ugt i8 %sub, 252
+ %conv4 = zext i1 %cmp to i32
+ ret i32 %conv4
+}
+
+; CHECK-LABEL: safe_sub_var_imm
+; CHECK: add.w [[ADD:r[0-9]+]], r0, #8
+; CHECK-NOT: uxt
+; CHECK: cmp [[ADD]], #252
+define i32 @safe_sub_var_imm(i8* %b) {
+entry:
+ %0 = load i8, i8* %b, align 1
+ %sub = sub nuw nsw i8 %0, -8
+ %cmp = icmp ugt i8 %sub, 252
+ %conv4 = zext i1 %cmp to i32
+ ret i32 %conv4
+}
+
+; CHECK-LABEL: safe_add_imm_var
+; CHECK: add.w [[ADD:r[0-9]+]], r0, #129
+; CHECK-NOT: uxt
+; CHECK: cmp [[ADD]], #127
+define i32 @safe_add_imm_var(i8* %b) {
+entry:
+ %0 = load i8, i8* %b, align 1
+ %add = add nuw nsw i8 -127, %0
+ %cmp = icmp ugt i8 %add, 127
+ %conv4 = zext i1 %cmp to i32
+ ret i32 %conv4
+}
+
+; CHECK-LABEL: safe_add_var_imm
+; CHECK: sub.w [[SUB:r[0-9]+]], r0, #127
+; CHECK-NOT: uxt
+; CHECK: cmp [[SUB]], #127
+define i32 @safe_add_var_imm(i8* %b) {
+entry:
+ %0 = load i8, i8* %b, align 1
+ %add = add nuw nsw i8 %0, -127
+ %cmp = icmp ugt i8 %add, 127
+ %conv4 = zext i1 %cmp to i32
+ ret i32 %conv4
+}
diff --git a/test/CodeGen/ARM/CGP/arm-cgp-phis-ret.ll b/test/CodeGen/ARM/CGP/arm-cgp-phis-ret.ll
new file mode 100644
index 000000000000..e7adc5ae2491
--- /dev/null
+++ b/test/CodeGen/ARM/CGP/arm-cgp-phis-ret.ll
@@ -0,0 +1,186 @@
+; RUN: llc -mtriple=thumbv7m -arm-disable-cgp=false %s -o - | FileCheck %s --check-prefix=CHECK-COMMON
+; RUN: llc -mtriple=thumbv8m.main -arm-disable-cgp=false %s -o - | FileCheck %s --check-prefix=CHECK-COMMON
+; RUN: llc -mtriple=thumbv8m.main -arm-disable-cgp=false -arm-enable-scalar-dsp=true -mcpu=cortex-m33 %s -o - | FileCheck %s --check-prefix=CHECK-COMMON
+; RUN: llc -mtriple=thumbv7em %s -arm-disable-cgp=false -arm-enable-scalar-dsp=true -arm-enable-scalar-dsp-imms=true -o - | FileCheck %s --check-prefix=CHECK-COMMON
+
+; Test that ARMCodeGenPrepare can handle:
+; - loops
+; - call operands
+; - call return values
+; - ret instructions
+; We use nuw on the arithmetic instructions to avoid complications.
+
+; Check that the arguments are extended but then nothing else is.
+; This also ensures that the pass can handle loops.
+; CHECK-COMMON-LABEL: phi_feeding_phi_args
+; CHECK-COMMON: uxtb
+; CHECK-COMMON: uxtb
+; CHECK-NOT: uxtb
+define void @phi_feeding_phi_args(i8 %a, i8 %b) {
+entry:
+ %0 = icmp ugt i8 %a, %b
+ br i1 %0, label %preheader, label %empty
+
+empty:
+ br label %preheader
+
+preheader:
+ %1 = phi i8 [ %a, %entry ], [ %b, %empty ]
+ br label %loop
+
+loop:
+ %val = phi i8 [ %1, %preheader ], [ %inc2, %if.end ]
+ %cmp = icmp ult i8 %val, 254
+ br i1 %cmp, label %if.then, label %if.else
+
+if.then:
+ %inc = sub nuw i8 %val, 2
+ br label %if.end
+
+if.else:
+ %inc1 = shl nuw i8 %val, 1
+ br label %if.end
+
+if.end:
+ %inc2 = phi i8 [ %inc, %if.then], [ %inc1, %if.else ]
+ %cmp1 = icmp eq i8 %inc2, 255
+ br i1 %cmp1, label %exit, label %loop
+
+exit:
+ ret void
+}
+
+; Same as above, but as the args are zeroext, we shouldn't see any uxts.
+; CHECK-COMMON-LABEL: phi_feeding_phi_zeroext_args
+; CHECK-COMMON-NOT: uxt
+define void @phi_feeding_phi_zeroext_args(i8 zeroext %a, i8 zeroext %b) {
+entry:
+ %0 = icmp ugt i8 %a, %b
+ br i1 %0, label %preheader, label %empty
+
+empty:
+ br label %preheader
+
+preheader:
+ %1 = phi i8 [ %a, %entry ], [ %b, %empty ]
+ br label %loop
+
+loop:
+ %val = phi i8 [ %1, %preheader ], [ %inc2, %if.end ]
+ %cmp = icmp ult i8 %val, 254
+ br i1 %cmp, label %if.then, label %if.else
+
+if.then:
+ %inc = sub nuw i8 %val, 2
+ br label %if.end
+
+if.else:
+ %inc1 = shl nuw i8 %val, 1
+ br label %if.end
+
+if.end:
+ %inc2 = phi i8 [ %inc, %if.then], [ %inc1, %if.else ]
+ %cmp1 = icmp eq i8 %inc2, 255
+ br i1 %cmp1, label %exit, label %loop
+
+exit:
+ ret void
+}
+
+; Just check that phis also work with i16s.
+; CHECK-COMMON-LABEL: phi_i16:
+; CHECK-COMMON-NOT: uxt
+define void @phi_i16() {
+entry:
+ br label %loop
+
+loop:
+ %val = phi i16 [ 0, %entry ], [ %inc2, %if.end ]
+ %cmp = icmp ult i16 %val, 128
+ br i1 %cmp, label %if.then, label %if.else
+
+if.then:
+ %inc = add nuw i16 %val, 2
+ br label %if.end
+
+if.else:
+ %inc1 = add nuw i16 %val, 1
+ br label %if.end
+
+if.end:
+ %inc2 = phi i16 [ %inc, %if.then], [ %inc1, %if.else ]
+ %cmp1 = icmp ult i16 %inc2, 253
+ br i1 %cmp1, label %loop, label %exit
+
+exit:
+ ret void
+}
+
+; CHECK-COMMON-LABEL: ret_i8
+; CHECK-COMMON-NOT: uxt
+define i8 @ret_i8() {
+entry:
+ br label %loop
+
+loop:
+ %val = phi i8 [ 0, %entry ], [ %inc2, %if.end ]
+ %cmp = icmp ult i8 %val, 128
+ br i1 %cmp, label %if.then, label %if.else
+
+if.then:
+ %inc = add nuw i8 %val, 2
+ br label %if.end
+
+if.else:
+ %inc1 = add nuw i8 %val, 1
+ br label %if.end
+
+if.end:
+ %inc2 = phi i8 [ %inc, %if.then], [ %inc1, %if.else ]
+ %cmp1 = icmp ult i8 %inc2, 253
+ br i1 %cmp1, label %exit, label %loop
+
+exit:
+ ret i8 %inc2
+}
+
+; CHECK-COMMON-LABEL: phi_multiple_undefs
+; CHECK-COMMON-NOT: uxt
+define i16 @phi_multiple_undefs(i16 zeroext %arg) {
+entry:
+ br label %loop
+
+loop:
+ %val = phi i16 [ undef, %entry ], [ %inc2, %if.end ]
+ %cmp = icmp ult i16 %val, 128
+ br i1 %cmp, label %if.then, label %if.else
+
+if.then:
+ %inc = add nuw i16 %val, 2
+ br label %if.end
+
+if.else:
+ %inc1 = add nuw i16 %val, 1
+ br label %if.end
+
+if.end:
+ %inc2 = phi i16 [ %inc, %if.then], [ %inc1, %if.else ]
+ %unrelated = phi i16 [ undef, %if.then ], [ %arg, %if.else ]
+ %cmp1 = icmp ult i16 %inc2, 253
+ br i1 %cmp1, label %loop, label %exit
+
+exit:
+ ret i16 %unrelated
+}
+
+; CHECK-COMMON-LABEL: promote_arg_return
+; CHECK-COMMON-NOT: uxt
+; CHECK-COMMON: strb
+define i16 @promote_arg_return(i16 zeroext %arg1, i16 zeroext %arg2, i8* %res) {
+ %add = add nuw i16 %arg1, 15
+ %mul = mul nuw nsw i16 %add, 3
+ %cmp = icmp ult i16 %mul, %arg2
+ %conv = zext i1 %cmp to i8
+ store i8 %conv, i8* %res
+ ret i16 %arg1
+}
diff --git a/test/CodeGen/ARM/CGP/arm-cgp-pointers.ll b/test/CodeGen/ARM/CGP/arm-cgp-pointers.ll
new file mode 100644
index 000000000000..e7f800232d45
--- /dev/null
+++ b/test/CodeGen/ARM/CGP/arm-cgp-pointers.ll
@@ -0,0 +1,135 @@
+; RUN: llc -mtriple=thumbv8 -arm-disable-cgp=false %s -o - | FileCheck %s
+; RUN: llc -mtriple=armv8 -arm-disable-cgp=false %s -o - | FileCheck %s
+
+; CHECK-LABEL: phi_pointers
+; CHECK-NOT: uxt
+define void @phi_pointers(i16* %a, i16* %b, i8 zeroext %M, i8 zeroext %N) {
+entry:
+ %add = add nuw i8 %M, 1
+ %and = and i8 %add, 1
+ %cmp = icmp ugt i8 %add, %N
+ %base = select i1 %cmp, i16* %a, i16* %b
+ %other = select i1 %cmp, i16* %b, i16* %b
+ br label %loop
+
+loop:
+ %ptr = phi i16* [ %base, %entry ], [ %gep, %loop ]
+ %idx = phi i8 [ %and, %entry ], [ %inc, %loop ]
+ %load = load i16, i16* %ptr, align 2
+ %inc = add nuw nsw i8 %idx, 1
+ %gep = getelementptr inbounds i16, i16* %ptr, i8 %inc
+ %cond = icmp eq i16* %gep, %other
+ br i1 %cond, label %exit, label %loop
+
+exit:
+ ret void
+}
+
+; CHECK-LABEL: phi_pointers_null
+; CHECK-NOT: uxt
+define void @phi_pointers_null(i16* %a, i16* %b, i8 zeroext %M, i8 zeroext %N) {
+entry:
+ %add = add nuw i8 %M, 1
+ %and = and i8 %add, 1
+ %cmp = icmp ugt i8 %add, %N
+ %base = select i1 %cmp, i16* %a, i16* %b
+ %other = select i1 %cmp, i16* %b, i16* %b
+ %cmp.1 = icmp eq i16* %base, %other
+ br i1 %cmp.1, label %fail, label %loop
+
+fail:
+ br label %loop
+
+loop:
+ %ptr = phi i16* [ %base, %entry ], [ null, %fail ], [ %gep, %if.then ]
+ %idx = phi i8 [ %and, %entry ], [ 0, %fail ], [ %inc, %if.then ]
+ %undef = icmp eq i16* %ptr, undef
+ br i1 %undef, label %exit, label %if.then
+
+if.then:
+ %load = load i16, i16* %ptr, align 2
+ %inc = add nuw nsw i8 %idx, 1
+ %gep = getelementptr inbounds i16, i16* %ptr, i8 %inc
+ %cond = icmp eq i16* %gep, %other
+ br i1 %cond, label %exit, label %loop
+
+exit:
+ ret void
+}
+
+declare i8 @do_something_with_ptr(i8, i16*)
+
+; CHECK-LABEL: call_pointer
+; CHECK-NOT: uxt
+define i8 @call_pointer(i8 zeroext %x, i8 zeroext %y, i16* %a, i16* %b) {
+ %or = or i8 %x, %y
+ %shr = lshr i8 %or, 1
+ %add = add nuw i8 %shr, 2
+ %cmp = icmp ne i8 %add, 0
+ %ptr = select i1 %cmp, i16* %a, i16* %b
+ %call = tail call zeroext i8 @do_something_with_ptr(i8 %shr, i16* %ptr)
+ ret i8 %call
+}
+
+; CHECK-LABEL: pointer_to_pointer
+; CHECK-NOT: uxt
+define i16 @pointer_to_pointer(i16** %arg, i16 zeroext %limit) {
+entry:
+ %addr = load i16*, i16** %arg
+ %val = load i16, i16* %addr
+ %add = add nuw i16 %val, 7
+ %cmp = icmp ult i16 %add, 256
+ %res = select i1 %cmp, i16 128, i16 255
+ ret i16 %res
+}
+
+; CHECK-LABEL: gep_2d_array
+; CHECK-NOT: uxt
+define i8 @gep_2d_array(i8** %a, i8 zeroext %arg) {
+entry:
+ %arrayidx.us = getelementptr inbounds i8*, i8** %a, i32 0
+ %0 = load i8*, i8** %arrayidx.us, align 4
+ %1 = load i8, i8* %0, align 1
+ %sub = sub nuw i8 %1, 1
+ %cmp = icmp ult i8 %sub, %arg
+ %res = select i1 %cmp, i8 27, i8 54
+ ret i8 %res
+}
+
+; CHECK-LABEL: gep_2d_array_loop
+; CHECK-NOT: uxt
+define void @gep_2d_array_loop(i16** nocapture readonly %a, i16** nocapture readonly %b, i32 %N) {
+entry:
+ %cmp30 = icmp eq i32 %N, 0
+ br i1 %cmp30, label %for.cond.cleanup, label %for.cond1.preheader.us
+
+for.cond1.preheader.us:
+ %y.031.us = phi i32 [ %inc13.us, %for.cond1.for.cond.cleanup3_crit_edge.us ], [ 0, %entry ]
+ br label %for.body4.us
+
+for.body4.us:
+ %x.029.us = phi i32 [ 0, %for.cond1.preheader.us ], [ %inc.us, %for.body4.us ]
+ %arrayidx.us = getelementptr inbounds i16*, i16** %a, i32 %x.029.us
+ %0 = load i16*, i16** %arrayidx.us, align 4
+ %arrayidx5.us = getelementptr inbounds i16, i16* %0, i32 %y.031.us
+ %1 = load i16, i16* %arrayidx5.us, align 2
+ %dec.us = add nuw i16 %1, -1
+ %cmp6.us = icmp ult i16 %dec.us, 16383
+ %shl.us = shl nuw i16 %dec.us, 2
+ %spec.select.us = select i1 %cmp6.us, i16 %shl.us, i16 %dec.us
+ %arrayidx10.us = getelementptr inbounds i16*, i16** %b, i32 %x.029.us
+ %2 = load i16*, i16** %arrayidx10.us, align 4
+ %arrayidx11.us = getelementptr inbounds i16, i16* %2, i32 %y.031.us
+ store i16 %spec.select.us, i16* %arrayidx11.us, align 2
+ %inc.us = add nuw i32 %x.029.us, 1
+ %exitcond = icmp eq i32 %inc.us, %N
+ br i1 %exitcond, label %for.cond1.for.cond.cleanup3_crit_edge.us, label %for.body4.us
+
+for.cond1.for.cond.cleanup3_crit_edge.us:
+ %inc13.us = add nuw i32 %y.031.us, 1
+ %exitcond32 = icmp eq i32 %inc13.us, %N
+ br i1 %exitcond32, label %for.cond.cleanup, label %for.cond1.preheader.us
+
+for.cond.cleanup:
+ ret void
+}
diff --git a/test/CodeGen/ARM/CGP/arm-cgp-signed-icmps.ll b/test/CodeGen/ARM/CGP/arm-cgp-signed-icmps.ll
new file mode 100644
index 000000000000..98794f500d4c
--- /dev/null
+++ b/test/CodeGen/ARM/CGP/arm-cgp-signed-icmps.ll
@@ -0,0 +1,108 @@
+; RUN: llc -mtriple=thumbv8m.main -mcpu=cortex-m33 -arm-disable-cgp=false -mattr=-use-misched %s -o - | FileCheck %s --check-prefix=CHECK-COMMON --check-prefix=CHECK-NODSP
+; RUN: llc -mtriple=thumbv7em %s -arm-disable-cgp=false -arm-enable-scalar-dsp=true -o - | FileCheck %s --check-prefix=CHECK-COMMON --check-prefix=CHECK-DSP
+; RUN: llc -mtriple=thumbv8 %s -arm-disable-cgp=false -arm-enable-scalar-dsp=true -arm-enable-scalar-dsp-imms=true -o - | FileCheck %s --check-prefix=CHECK-COMMON --check-prefix=CHECK-DSP-IMM
+
+; CHECK-COMMON-LABEL: eq_sgt
+; CHECK-NODSP: add
+; CHECK-NODSP: uxtb
+; CHECK-NODSP: sxtb
+; CHECK-NODSP: cmp
+; CHECK-NODSP: sub
+; CHECK-NODSP: sxtb
+; CHECK-NODSP: cmp
+
+; CHECK-DSP: uadd8
+; CHECK-DSP: sub
+; CHECK-DSP: cmp
+; CHECK-DSP: sxtb
+; CHECK-DSP: sxtb
+; CHECK-DSP: cmp
+
+; CHECK-DSP-IMM: uadd8 [[ADD:r[0-9]+]],
+; CHECK-DSP-IMM: cmp [[ADD]],
+; CHECK-DSP-IMM: subs [[SUB:r[0-9]+]],
+; CHECK-DSP-IMM: sxtb [[SEXT0:r[0-9]+]], [[ADD]]
+; CHECK-DSP-IMM: sxtb [[SEXT1:r[0-9]+]], [[SUB]]
+; CHECK-DSP-IMM: cmp [[SEXT1]], [[SEXT0]]
+define i8 @eq_sgt(i8* %x, i8 *%y, i8 zeroext %z) {
+entry:
+ %load0 = load i8, i8* %x, align 1
+ %load1 = load i8, i8* %y, align 1
+ %add = add i8 %load0, %z
+ %sub = sub i8 %load1, 1
+ %cmp = icmp eq i8 %add, 200
+ %cmp1 = icmp sgt i8 %sub, %add
+ %res0 = select i1 %cmp, i8 35, i8 47
+ %res1 = select i1 %cmp1, i8 %res0, i8 %sub
+ ret i8 %res1
+}
+
+; CHECK-COMMON-LABEL: ugt_slt
+; CHECK-NODSP: sub
+; CHECK-NODSP: sxth
+; CHECK-NODSP: uxth
+; CHECK-NODSP: add
+; CHECK-NODSP: sxth
+; CHECK-NODSP: cmp
+; CHECK-NODSP: cmp
+
+; CHECK-DSP: sub
+; CHECK-DSP: sxth
+; CHECK-DSP: add
+; CHECK-DSP: uxth
+; CHECK-DSP: sxth
+; CHECK-DSP: cmp
+; CHECK-DSP: cmp
+
+; CHECK-DSP-IMM: sxth [[ARG:r[0-9]+]], r2
+; CHECK-DSP-IMM: uadd16 [[ADD:r[0-9]+]],
+; CHECK-DSP-IMM: sxth.w [[SEXT:r[0-9]+]], [[ADD]]
+; CHECK-DSP-IMM: cmp [[SEXT]], [[ARG]]
+; CHECK-DSP-IMM-NOT: uxt
+; CHECK-DSP-IMM: movs [[ONE:r[0-9]+]], #1
+; CHECK-DSP-IMM: usub16 [[SUB:r[0-9]+]], r1, [[ONE]]
+; CHECK-DSP-IMM: cmp [[SUB]], r2
+define i16 @ugt_slt(i16 *%x, i16 zeroext %y, i16 zeroext %z) {
+entry:
+ %load0 = load i16, i16* %x, align 1
+ %add = add i16 %load0, %z
+ %sub = sub i16 %y, 1
+ %cmp = icmp slt i16 %add, %z
+ %cmp1 = icmp ugt i16 %sub, %z
+ %res0 = select i1 %cmp, i16 35, i16 -1
+ %res1 = select i1 %cmp1, i16 %res0, i16 0
+ ret i16 %res1
+}
+
+; CHECK-COMMON-LABEL: urem_trunc_icmps
+; CHECK-COMMON-NOT: uxt
+; CHECK-COMMON: sxtb [[SEXT:r[0-9]+]],
+; CHECK-COMMON: cmp [[SEXT]], #7
+define void @urem_trunc_icmps(i16** %in, i32* %g, i32* %k) {
+entry:
+ %ptr = load i16*, i16** %in, align 4
+ %ld = load i16, i16* %ptr, align 2
+ %cmp.i = icmp eq i16 %ld, 0
+ br i1 %cmp.i, label %exit, label %cond.false.i
+
+cond.false.i:
+ %rem = urem i16 5, %ld
+ %extract.t = trunc i16 %rem to i8
+ br label %body
+
+body:
+ %cond.in.i.off0 = phi i8 [ %extract.t, %cond.false.i ], [ %add, %for.inc ]
+ %cmp = icmp sgt i8 %cond.in.i.off0, 7
+ %conv5 = zext i1 %cmp to i32
+ store i32 %conv5, i32* %g, align 4
+ %.pr = load i32, i32* %k, align 4
+ %tobool13150 = icmp eq i32 %.pr, 0
+ br i1 %tobool13150, label %for.inc, label %exit
+
+for.inc:
+ %add = add nuw i8 %cond.in.i.off0, 1
+ br label %body
+
+exit:
+ ret void
+}
diff --git a/test/CodeGen/ARM/arm-cgp-signed.ll b/test/CodeGen/ARM/CGP/arm-cgp-signed.ll
index 7494b57f4259..7494b57f4259 100644
--- a/test/CodeGen/ARM/arm-cgp-signed.ll
+++ b/test/CodeGen/ARM/CGP/arm-cgp-signed.ll
diff --git a/test/CodeGen/ARM/CGP/arm-cgp-switch.ll b/test/CodeGen/ARM/CGP/arm-cgp-switch.ll
new file mode 100644
index 000000000000..29c35fbc96e0
--- /dev/null
+++ b/test/CodeGen/ARM/CGP/arm-cgp-switch.ll
@@ -0,0 +1,168 @@
+; RUN: llc -mtriple=thumbv7em %s -arm-disable-cgp=false -o - | FileCheck %s
+; RUN: llc -mtriple=thumbv7-linux-android %s -arm-disable-cgp=false -o - | FileCheck %s
+
+; CHECK-LABEL: truncate_source_phi_switch
+; CHECK: ldrb
+; CHECK: uxtb
+define void @truncate_source_phi_switch(i8* %memblock, i8* %store, i16 %arg) {
+entry:
+ %pre = load i8, i8* %memblock, align 1
+ %conv = trunc i16 %arg to i8
+ br label %header
+
+header:
+ %phi.0 = phi i8 [ %pre, %entry ], [ %count, %latch ]
+ %phi.1 = phi i8 [ %conv, %entry ], [ %phi.3, %latch ]
+ %phi.2 = phi i8 [ 0, %entry], [ %count, %latch ]
+ switch i8 %phi.0, label %default [
+ i8 43, label %for.inc.i
+ i8 45, label %for.inc.i.i
+ ]
+
+for.inc.i:
+ %xor = xor i8 %phi.1, 1
+ br label %latch
+
+for.inc.i.i:
+ %and = and i8 %phi.1, 3
+ br label %latch
+
+default:
+ %sub = sub i8 %phi.0, 1
+ %cmp2 = icmp ugt i8 %sub, 4
+ br i1 %cmp2, label %latch, label %exit
+
+latch:
+ %phi.3 = phi i8 [ %xor, %for.inc.i ], [ %and, %for.inc.i.i ], [ %phi.2, %default ]
+ %count = add nuw i8 %phi.2, 1
+ store i8 %count, i8* %store, align 1
+ br label %header
+
+exit:
+ ret void
+}
+
+; CHECK-LABEL: icmp_switch_source:
+; CHECK-NOT: uxt
+define i16 @icmp_switch_source(i16 zeroext %arg) {
+entry:
+ %conv = add nuw i16 %arg, 15
+ %mul = mul nuw nsw i16 %conv, 3
+ switch i16 %arg, label %default [
+ i16 0, label %sw.bb
+ i16 1, label %sw.bb.i
+ ]
+
+sw.bb:
+ %cmp0 = icmp ult i16 %mul, 127
+ %select = select i1 %cmp0, i16 %mul, i16 127
+ br label %exit
+
+sw.bb.i:
+ %cmp1 = icmp ugt i16 %mul, 34
+ %select.i = select i1 %cmp1, i16 %mul, i16 34
+ br label %exit
+
+default:
+ br label %exit
+
+exit:
+ %res = phi i16 [ %select, %sw.bb ], [ %select.i, %sw.bb.i ], [ %mul, %default ]
+ ret i16 %res
+}
+
+; CHECK-LABEL: icmp_switch_narrow_source:
+; CHECK-NOT: uxt
+define i16 @icmp_switch_narrow_source(i8 zeroext %arg) {
+entry:
+ %conv = zext i8 %arg to i16
+ %add = add nuw i16 %conv, 15
+ %mul = mul nuw nsw i16 %add, 3
+ switch i8 %arg, label %default [
+ i8 0, label %sw.bb
+ i8 1, label %sw.bb.i
+ ]
+
+sw.bb:
+ %cmp0 = icmp ult i16 %mul, 127
+ %select = select i1 %cmp0, i16 %mul, i16 127
+ br label %exit
+
+sw.bb.i:
+ %cmp1 = icmp ugt i16 %mul, 34
+ %select.i = select i1 %cmp1, i16 %mul, i16 34
+ br label %exit
+
+default:
+ br label %exit
+
+exit:
+ %res = phi i16 [ %select, %sw.bb ], [ %select.i, %sw.bb.i ], [ %mul, %default ]
+ ret i16 %res
+}
+
+; CHECK-LABEL: icmp_switch_trunc:
+; CHECK-NOT: uxt
+define i16 @icmp_switch_trunc(i16 zeroext %arg) {
+entry:
+ %conv = add nuw i16 %arg, 15
+ %mul = mul nuw nsw i16 %conv, 3
+ %trunc = trunc i16 %arg to i3
+ switch i3 %trunc, label %default [
+ i3 0, label %sw.bb
+ i3 1, label %sw.bb.i
+ ]
+
+sw.bb:
+ %cmp0 = icmp ult i16 %mul, 127
+ %select = select i1 %cmp0, i16 %mul, i16 127
+ br label %exit
+
+sw.bb.i:
+ %cmp1 = icmp ugt i16 %mul, 34
+ %select.i = select i1 %cmp1, i16 %mul, i16 34
+ br label %exit
+
+default:
+ br label %exit
+
+exit:
+ %res = phi i16 [ %select, %sw.bb ], [ %select.i, %sw.bb.i ], [ %mul, %default ]
+ ret i16 %res
+}
+
+%class.ae = type { i8 }
+%class.x = type { i8 }
+%class.v = type { %class.q }
+%class.q = type { i16 }
+declare %class.x* @_ZNK2ae2afEv(%class.ae*) local_unnamed_addr
+declare %class.v* @_ZN1x2acEv(%class.x*) local_unnamed_addr
+
+; CHECK-LABEL: trunc_i16_i9_switch
+; CHECK-NOT: uxt
+define i32 @trunc_i16_i9_switch(%class.ae* %this) {
+entry:
+ %call = tail call %class.x* @_ZNK2ae2afEv(%class.ae* %this)
+ %call2 = tail call %class.v* @_ZN1x2acEv(%class.x* %call)
+ %0 = getelementptr inbounds %class.v, %class.v* %call2, i32 0, i32 0, i32 0
+ %1 = load i16, i16* %0, align 2
+ %2 = trunc i16 %1 to i9
+ %trunc = and i9 %2, -64
+ switch i9 %trunc, label %cleanup.fold.split [
+ i9 0, label %cleanup
+ i9 -256, label %if.then7
+ ]
+
+if.then7:
+ %3 = and i16 %1, 7
+ %tobool = icmp eq i16 %3, 0
+ %cond = select i1 %tobool, i32 2, i32 1
+ br label %cleanup
+
+cleanup.fold.split:
+ br label %cleanup
+
+cleanup:
+ %retval.0 = phi i32 [ %cond, %if.then7 ], [ 0, %entry ], [ 2, %cleanup.fold.split ]
+ ret i32 %retval.0
+}
diff --git a/test/CodeGen/ARM/GlobalISel/arm-call-lowering.ll b/test/CodeGen/ARM/GlobalISel/arm-call-lowering.ll
index d667f1756eb0..84602a3c4738 100644
--- a/test/CodeGen/ARM/GlobalISel/arm-call-lowering.ll
+++ b/test/CodeGen/ARM/GlobalISel/arm-call-lowering.ll
@@ -1,13 +1,16 @@
-; RUN: llc -mtriple arm-unknown -mattr=-v4t -global-isel -stop-after=irtranslator -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=CHECK,NOV4T
-; RUN: llc -mtriple arm-unknown -mattr=+v4t -global-isel -stop-after=irtranslator -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=CHECK,V4T
-; RUN: llc -mtriple arm-unknown -mattr=+v5t -global-isel -stop-after=irtranslator -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=CHECK,V5T
+; RUN: llc -mtriple arm-unknown -mattr=-v4t -global-isel -stop-after=irtranslator -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=CHECK,NOV4T,ARM
+; RUN: llc -mtriple arm-unknown -mattr=+v4t -global-isel -stop-after=irtranslator -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=CHECK,V4T,ARM
+; RUN: llc -mtriple arm-unknown -mattr=+v5t -global-isel -stop-after=irtranslator -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=CHECK,V5T,ARM
+; RUN: llc -mtriple thumb-unknown -mattr=+v6t2 -global-isel -stop-after=irtranslator -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=CHECK,THUMB
define arm_aapcscc void @test_indirect_call(void() *%fptr) {
; CHECK-LABEL: name: test_indirect_call
+; THUMB: %[[FPTR:[0-9]+]]:gpr(p0) = COPY $r0
; V5T: %[[FPTR:[0-9]+]]:gpr(p0) = COPY $r0
; V4T: %[[FPTR:[0-9]+]]:tgpr(p0) = COPY $r0
; NOV4T: %[[FPTR:[0-9]+]]:tgpr(p0) = COPY $r0
; CHECK: ADJCALLSTACKDOWN 0, 0, 14, $noreg, implicit-def $sp, implicit $sp
+; THUMB: tBLXr 14, $noreg, %[[FPTR]](p0), csr_aapcs, implicit-def $lr, implicit $sp
; V5T: BLX %[[FPTR]](p0), csr_aapcs, implicit-def $lr, implicit $sp
; V4T: BX_CALL %[[FPTR]](p0), csr_aapcs, implicit-def $lr, implicit $sp
; NOV4T: BMOVPCRX_CALL %[[FPTR]](p0), csr_aapcs, implicit-def $lr, implicit $sp
@@ -22,7 +25,8 @@ declare arm_aapcscc void @call_target()
define arm_aapcscc void @test_direct_call() {
; CHECK-LABEL: name: test_direct_call
; CHECK: ADJCALLSTACKDOWN 0, 0, 14, $noreg, implicit-def $sp, implicit $sp
-; CHECK: BL @call_target, csr_aapcs, implicit-def $lr, implicit $sp
+; THUMB: tBL 14, $noreg, @call_target, csr_aapcs, implicit-def $lr, implicit $sp
+; ARM: BL @call_target, csr_aapcs, implicit-def $lr, implicit $sp
; CHECK: ADJCALLSTACKUP 0, 0, 14, $noreg, implicit-def $sp, implicit $sp
entry:
notail call arm_aapcscc void @call_target()
diff --git a/test/CodeGen/ARM/GlobalISel/arm-instruction-select-combos.mir b/test/CodeGen/ARM/GlobalISel/arm-instruction-select-combos.mir
index b361023067de..d3e9796b72f5 100644
--- a/test/CodeGen/ARM/GlobalISel/arm-instruction-select-combos.mir
+++ b/test/CodeGen/ARM/GlobalISel/arm-instruction-select-combos.mir
@@ -42,6 +42,9 @@
define void @test_vfnmss() #4 { ret void }
+ define void @test_bfc() #2 { ret void }
+ define void @test_no_bfc_bad_mask() #2 { ret void }
+
attributes #0 = { "target-features"="+v6" }
attributes #1 = { "target-features"="-v6" }
attributes #2 = { "target-features"="+v6t2" }
@@ -1142,3 +1145,57 @@ body: |
BX_RET 14, $noreg, implicit $s0
; CHECK: BX_RET 14, $noreg, implicit $s0
...
+---
+name: test_bfc
+# CHECK-LABEL: name: test_bfc
+legalized: true
+regBankSelected: true
+selected: false
+# CHECK: selected: true
+registers:
+ - { id: 0, class: gprb }
+ - { id: 1, class: gprb }
+ - { id: 2, class: gprb }
+body: |
+ bb.0:
+ liveins: $r0
+
+ %0(s32) = COPY $r0
+ %1(s32) = G_CONSTANT i32 -65529 ; 0xFFFF0007
+ %2(s32) = G_AND %0, %1
+ ; CHECK: [[RS:%[0-9]+]]:gpr = COPY $r0
+ ; CHECK: [[RD:%[0-9]+]]:gpr = BFC [[RS]], -65529, 14, $noreg
+
+ $r0 = COPY %2(s32)
+ ; CHECK: $r0 = COPY [[RD]]
+
+ BX_RET 14, $noreg, implicit $r0
+ ; CHECK: BX_RET 14, $noreg, implicit $r0
+...
+---
+name: test_no_bfc_bad_mask
+# CHECK-LABEL: name: test_no_bfc_bad_mask
+legalized: true
+regBankSelected: true
+selected: false
+# CHECK: selected: true
+registers:
+ - { id: 0, class: gprb }
+ - { id: 1, class: gprb }
+ - { id: 2, class: gprb }
+body: |
+ bb.0:
+ liveins: $r0
+
+ %0(s32) = COPY $r0
+ %1(s32) = G_CONSTANT i32 6 ; 0x00000006
+ %2(s32) = G_AND %0, %1
+ ; CHECK: [[RS:%[0-9]+]]:gpr = COPY $r0
+ ; CHECK-NOT: BFC
+
+ $r0 = COPY %2(s32)
+ ; CHECK: $r0 = COPY [[RD]]
+
+ BX_RET 14, $noreg, implicit $r0
+ ; CHECK: BX_RET 14, $noreg, implicit $r0
+...
diff --git a/test/CodeGen/ARM/GlobalISel/arm-instruction-select.mir b/test/CodeGen/ARM/GlobalISel/arm-instruction-select.mir
index 4b9812c50d15..f030a5aa01d0 100644
--- a/test/CodeGen/ARM/GlobalISel/arm-instruction-select.mir
+++ b/test/CodeGen/ARM/GlobalISel/arm-instruction-select.mir
@@ -64,8 +64,12 @@
define void @test_stores() #0 { ret void }
define void @test_gep() { ret void }
+
+ define void @test_MOVi32imm() #3 { ret void }
+
define void @test_constant_imm() { ret void }
define void @test_constant_cimm() { ret void }
+
define void @test_pointer_constant_unconstrained() { ret void }
define void @test_pointer_constant_constrained() { ret void }
@@ -1481,6 +1485,23 @@ body: |
BX_RET 14, $noreg, implicit $r0
...
---
+name: test_MOVi32imm
+# CHECK-LABEL: name: test_MOVi32imm
+legalized: true
+regBankSelected: true
+selected: false
+# CHECK: selected: true
+registers:
+ - { id: 0, class: gprb }
+body: |
+ bb.0:
+ %0(s32) = G_CONSTANT 65537
+ ; CHECK: %[[C:[0-9]+]]:gpr = MOVi32imm 65537
+
+ $r0 = COPY %0(s32)
+ BX_RET 14, $noreg, implicit $r0
+...
+---
name: test_constant_imm
# CHECK-LABEL: name: test_constant_imm
legalized: true
diff --git a/test/CodeGen/ARM/GlobalISel/arm-irtranslator.ll b/test/CodeGen/ARM/GlobalISel/arm-irtranslator.ll
index 95c454cdc154..175673740cdd 100644
--- a/test/CodeGen/ARM/GlobalISel/arm-irtranslator.ll
+++ b/test/CodeGen/ARM/GlobalISel/arm-irtranslator.ll
@@ -440,7 +440,7 @@ define i32 @test_shufflevector_s32_v2s32(i32 %arg) {
; CHECK: [[ARG:%[0-9]+]]:_(s32) = COPY $r0
; CHECK-DAG: [[UNDEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
; CHECK-DAG: [[C0:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-; CHECK-DAG: [[MASK:%[0-9]+]]:_(<2 x s32>) = G_MERGE_VALUES [[C0]](s32), [[C0]](s32)
+; CHECK-DAG: [[MASK:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[C0]](s32), [[C0]](s32)
; CHECK: [[VEC:%[0-9]+]]:_(<2 x s32>) = G_SHUFFLE_VECTOR [[ARG]](s32), [[UNDEF]], [[MASK]](<2 x s32>)
; CHECK: G_EXTRACT_VECTOR_ELT [[VEC]](<2 x s32>)
%vec = insertelement <1 x i32> undef, i32 %arg, i32 0
@@ -456,7 +456,7 @@ define i32 @test_shufflevector_v2s32_v3s32(i32 %arg1, i32 %arg2) {
; CHECK-DAG: [[UNDEF:%[0-9]+]]:_(<2 x s32>) = G_IMPLICIT_DEF
; CHECK-DAG: [[C0:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; CHECK-DAG: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
-; CHECK-DAG: [[MASK:%[0-9]+]]:_(<3 x s32>) = G_MERGE_VALUES [[C1]](s32), [[C0]](s32), [[C1]](s32)
+; CHECK-DAG: [[MASK:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[C1]](s32), [[C0]](s32), [[C1]](s32)
; CHECK-DAG: [[V1:%[0-9]+]]:_(<2 x s32>) = G_INSERT_VECTOR_ELT [[UNDEF]], [[ARG1]](s32), [[C0]](s32)
; CHECK-DAG: [[V2:%[0-9]+]]:_(<2 x s32>) = G_INSERT_VECTOR_ELT [[V1]], [[ARG2]](s32), [[C1]](s32)
; CHECK: [[VEC:%[0-9]+]]:_(<3 x s32>) = G_SHUFFLE_VECTOR [[V2]](<2 x s32>), [[UNDEF]], [[MASK]](<3 x s32>)
@@ -476,7 +476,7 @@ define i32 @test_shufflevector_v2s32_v4s32(i32 %arg1, i32 %arg2) {
; CHECK-DAG: [[UNDEF:%[0-9]+]]:_(<2 x s32>) = G_IMPLICIT_DEF
; CHECK-DAG: [[C0:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; CHECK-DAG: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
-; CHECK-DAG: [[MASK:%[0-9]+]]:_(<4 x s32>) = G_MERGE_VALUES [[C0]](s32), [[C0]](s32), [[C0]](s32), [[C0]](s32)
+; CHECK-DAG: [[MASK:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[C0]](s32), [[C0]](s32), [[C0]](s32), [[C0]](s32)
; CHECK-DAG: [[V1:%[0-9]+]]:_(<2 x s32>) = G_INSERT_VECTOR_ELT [[UNDEF]], [[ARG1]](s32), [[C0]](s32)
; CHECK-DAG: [[V2:%[0-9]+]]:_(<2 x s32>) = G_INSERT_VECTOR_ELT [[V1]], [[ARG2]](s32), [[C1]](s32)
; CHECK: [[VEC:%[0-9]+]]:_(<4 x s32>) = G_SHUFFLE_VECTOR [[V2]](<2 x s32>), [[UNDEF]], [[MASK]](<4 x s32>)
@@ -499,7 +499,7 @@ define i32 @test_shufflevector_v4s32_v2s32(i32 %arg1, i32 %arg2, i32 %arg3, i32
; CHECK-DAG: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; CHECK-DAG: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
; CHECK-DAG: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 3
-; CHECK-DAG: [[MASK:%[0-9]+]]:_(<2 x s32>) = G_MERGE_VALUES [[C1]](s32), [[C3]](s32)
+; CHECK-DAG: [[MASK:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[C1]](s32), [[C3]](s32)
; CHECK-DAG: [[V1:%[0-9]+]]:_(<4 x s32>) = G_INSERT_VECTOR_ELT [[UNDEF]], [[ARG1]](s32), [[C0]](s32)
; CHECK-DAG: [[V2:%[0-9]+]]:_(<4 x s32>) = G_INSERT_VECTOR_ELT [[V1]], [[ARG2]](s32), [[C1]](s32)
; CHECK-DAG: [[V3:%[0-9]+]]:_(<4 x s32>) = G_INSERT_VECTOR_ELT [[V2]], [[ARG3]](s32), [[C2]](s32)
@@ -521,7 +521,7 @@ define i32 @test_constantstruct_v2s32() {
; CHECK-LABEL: name: test_constantstruct_v2s32
; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; CHECK: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
-; CHECK: [[VEC:%[0-9]+]]:_(<2 x s32>) = G_MERGE_VALUES [[C1]](s32), [[C2]](s32)
+; CHECK: [[VEC:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[C1]](s32), [[C2]](s32)
; CHECK: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; CHECK: G_EXTRACT_VECTOR_ELT [[VEC]](<2 x s32>), [[C3]]
%vec = extractvalue %struct.v2s32 {<2 x i32><i32 1, i32 2>}, 0
@@ -535,7 +535,7 @@ define i32 @test_constantstruct_v2s32_s32_s32() {
; CHECK-LABEL: name: test_constantstruct_v2s32_s32_s32
; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; CHECK: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
-; CHECK: [[VEC:%[0-9]+]]:_(<2 x s32>) = G_MERGE_VALUES [[C1]](s32), [[C2]](s32)
+; CHECK: [[VEC:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[C1]](s32), [[C2]](s32)
; CHECK: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 3
; CHECK: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
; CHECK: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
diff --git a/test/CodeGen/ARM/GlobalISel/arm-legalize-binops.mir b/test/CodeGen/ARM/GlobalISel/arm-legalize-binops.mir
new file mode 100644
index 000000000000..51783caee3c6
--- /dev/null
+++ b/test/CodeGen/ARM/GlobalISel/arm-legalize-binops.mir
@@ -0,0 +1,561 @@
+# RUN: llc -mtriple arm-- -run-pass=legalizer %s -o - | FileCheck %s
+# RUN: llc -mtriple thumb-- -mattr=+v6t2 -run-pass=legalizer %s -o - | FileCheck %s
+--- |
+ define void @test_add_s8() { ret void }
+ define void @test_add_s16() { ret void }
+ define void @test_add_s32() { ret void }
+
+ define void @test_sub_s8() { ret void }
+ define void @test_sub_s16() { ret void }
+ define void @test_sub_s32() { ret void }
+
+ define void @test_mul_s8() { ret void }
+ define void @test_mul_s16() { ret void }
+ define void @test_mul_s32() { ret void }
+
+ define void @test_and_s8() { ret void }
+ define void @test_and_s16() { ret void }
+ define void @test_and_s32() { ret void }
+
+ define void @test_or_s8() { ret void }
+ define void @test_or_s16() { ret void }
+ define void @test_or_s32() { ret void }
+
+ define void @test_xor_s8() { ret void }
+ define void @test_xor_s16() { ret void }
+ define void @test_xor_s32() { ret void }
+...
+---
+name: test_add_s8
+# CHECK-LABEL: name: test_add_s8
+legalized: false
+# CHECK: legalized: true
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+ - { id: 3, class: _ }
+ - { id: 4, class: _ }
+ - { id: 5, class: _ }
+body: |
+ bb.0:
+ liveins: $r0, $r1
+
+ %0(p0) = COPY $r0
+ %1(s8) = G_LOAD %0 :: (load 1)
+ %2(p0) = COPY $r0
+ %3(s8) = G_LOAD %2 :: (load 1)
+ %4(s8) = G_ADD %1, %3
+ ; G_ADD with s8 should widen
+ ; CHECK-NOT: {{%[0-9]+}}:_(s8) = G_ADD {{%[0-9]+, %[0-9]+}}
+ ; CHECK: {{%[0-9]+}}:_(s32) = G_ADD {{%[0-9]+, %[0-9]+}}
+ ; CHECK-NOT: {{%[0-9]+}}:_(s8) = G_ADD {{%[0-9]+, %[0-9]+}}
+ %5(s32) = G_SEXT %4(s8)
+ $r0 = COPY %5(s32)
+ BX_RET 14, $noreg, implicit $r0
+...
+---
+name: test_add_s16
+# CHECK-LABEL: name: test_add_s16
+legalized: false
+# CHECK: legalized: true
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+ - { id: 3, class: _ }
+ - { id: 4, class: _ }
+ - { id: 5, class: _ }
+body: |
+ bb.0:
+ liveins: $r0, $r1
+
+ %0(p0) = COPY $r0
+ %1(s16) = G_LOAD %0 :: (load 2)
+ %2(p0) = COPY $r0
+ %3(s16) = G_LOAD %2 :: (load 2)
+ %4(s16) = G_ADD %1, %3
+ ; G_ADD with s16 should widen
+ ; CHECK-NOT: {{%[0-9]+}}:_(s16) = G_ADD {{%[0-9]+, %[0-9]+}}
+ ; CHECK: {{%[0-9]+}}:_(s32) = G_ADD {{%[0-9]+, %[0-9]+}}
+ ; CHECK-NOT: {{%[0-9]+}}:_(s16) = G_ADD {{%[0-9]+, %[0-9]+}}
+ %5(s32) = G_SEXT %4(s16)
+ $r0 = COPY %5(s32)
+ BX_RET 14, $noreg, implicit $r0
+...
+---
+name: test_add_s32
+# CHECK-LABEL: name: test_add_s32
+legalized: false
+# CHECK: legalized: true
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+body: |
+ bb.0:
+ liveins: $r0, $r1
+
+ %0(s32) = COPY $r0
+ %1(s32) = COPY $r1
+ %2(s32) = G_ADD %0, %1
+ ; G_ADD with s32 is legal, so we should find it unchanged in the output
+ ; CHECK: {{%[0-9]+}}:_(s32) = G_ADD {{%[0-9]+, %[0-9]+}}
+ $r0 = COPY %2(s32)
+ BX_RET 14, $noreg, implicit $r0
+
+...
+---
+name: test_sub_s8
+# CHECK-LABEL: name: test_sub_s8
+legalized: false
+# CHECK: legalized: true
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+ - { id: 3, class: _ }
+ - { id: 4, class: _ }
+ - { id: 5, class: _ }
+body: |
+ bb.0:
+ liveins: $r0, $r1
+
+ %0(p0) = COPY $r0
+ %1(s8) = G_LOAD %0 :: (load 1)
+ %2(p0) = COPY $r0
+ %3(s8) = G_LOAD %2 :: (load 1)
+ %4(s8) = G_SUB %1, %3
+ ; G_SUB with s8 should widen
+ ; CHECK-NOT: {{%[0-9]+}}:_(s8) = G_SUB {{%[0-9]+, %[0-9]+}}
+ ; CHECK: {{%[0-9]+}}:_(s32) = G_SUB {{%[0-9]+, %[0-9]+}}
+ ; CHECK-NOT: {{%[0-9]+}}:_(s8) = G_SUB {{%[0-9]+, %[0-9]+}}
+ %5(s32) = G_SEXT %4(s8)
+ $r0 = COPY %5(s32)
+ BX_RET 14, $noreg, implicit $r0
+...
+---
+name: test_sub_s16
+# CHECK-LABEL: name: test_sub_s16
+legalized: false
+# CHECK: legalized: true
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+ - { id: 3, class: _ }
+ - { id: 4, class: _ }
+ - { id: 5, class: _ }
+body: |
+ bb.0:
+ liveins: $r0, $r1
+
+ %0(p0) = COPY $r0
+ %1(s16) = G_LOAD %0 :: (load 2)
+ %2(p0) = COPY $r0
+ %3(s16) = G_LOAD %2 :: (load 2)
+ %4(s16) = G_SUB %1, %3
+ ; G_SUB with s16 should widen
+ ; CHECK-NOT: {{%[0-9]+}}:_(s16) = G_SUB {{%[0-9]+, %[0-9]+}}
+ ; CHECK: {{%[0-9]+}}:_(s32) = G_SUB {{%[0-9]+, %[0-9]+}}
+ ; CHECK-NOT: {{%[0-9]+}}:_(s16) = G_SUB {{%[0-9]+, %[0-9]+}}
+ %5(s32) = G_SEXT %4(s16)
+ $r0 = COPY %5(s32)
+ BX_RET 14, $noreg, implicit $r0
+...
+---
+name: test_sub_s32
+# CHECK-LABEL: name: test_sub_s32
+legalized: false
+# CHECK: legalized: true
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+body: |
+ bb.0:
+ liveins: $r0, $r1
+
+ %0(s32) = COPY $r0
+ %1(s32) = COPY $r1
+ %2(s32) = G_SUB %0, %1
+ ; G_SUB with s32 is legal, so we should find it unchanged in the output
+ ; CHECK: {{%[0-9]+}}:_(s32) = G_SUB {{%[0-9]+, %[0-9]+}}
+ $r0 = COPY %2(s32)
+ BX_RET 14, $noreg, implicit $r0
+
+...
+---
+name: test_mul_s8
+# CHECK-LABEL: name: test_mul_s8
+legalized: false
+# CHECK: legalized: true
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+ - { id: 3, class: _ }
+ - { id: 4, class: _ }
+ - { id: 5, class: _ }
+body: |
+ bb.0:
+ liveins: $r0, $r1
+
+ %0(p0) = COPY $r0
+ %1(s8) = G_LOAD %0 :: (load 1)
+ %2(p0) = COPY $r0
+ %3(s8) = G_LOAD %2 :: (load 1)
+ %4(s8) = G_MUL %1, %3
+ ; G_MUL with s8 should widen
+ ; CHECK-NOT: {{%[0-9]+}}:_(s8) = G_MUL {{%[0-9]+, %[0-9]+}}
+ ; CHECK: {{%[0-9]+}}:_(s32) = G_MUL {{%[0-9]+, %[0-9]+}}
+ ; CHECK-NOT: {{%[0-9]+}}:_(s8) = G_MUL {{%[0-9]+, %[0-9]+}}
+ %5(s32) = G_SEXT %4(s8)
+ $r0 = COPY %5(s32)
+ BX_RET 14, $noreg, implicit $r0
+...
+---
+name: test_mul_s16
+# CHECK-LABEL: name: test_mul_s16
+legalized: false
+# CHECK: legalized: true
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+ - { id: 3, class: _ }
+ - { id: 4, class: _ }
+ - { id: 5, class: _ }
+body: |
+ bb.0:
+ liveins: $r0, $r1
+
+ %0(p0) = COPY $r0
+ %1(s16) = G_LOAD %0 :: (load 2)
+ %2(p0) = COPY $r0
+ %3(s16) = G_LOAD %2 :: (load 2)
+ %4(s16) = G_MUL %1, %3
+ ; G_MUL with s16 should widen
+ ; CHECK-NOT: {{%[0-9]+}}:_(s16) = G_MUL {{%[0-9]+, %[0-9]+}}
+ ; CHECK: {{%[0-9]+}}:_(s32) = G_MUL {{%[0-9]+, %[0-9]+}}
+ ; CHECK-NOT: {{%[0-9]+}}:_(s16) = G_MUL {{%[0-9]+, %[0-9]+}}
+ %5(s32) = G_SEXT %4(s16)
+ $r0 = COPY %5(s32)
+ BX_RET 14, $noreg, implicit $r0
+...
+---
+name: test_mul_s32
+# CHECK-LABEL: name: test_mul_s32
+legalized: false
+# CHECK: legalized: true
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+body: |
+ bb.0:
+ liveins: $r0, $r1
+
+ %0(s32) = COPY $r0
+ %1(s32) = COPY $r1
+ %2(s32) = G_MUL %0, %1
+ ; G_MUL with s32 is legal, so we should find it unchanged in the output
+ ; CHECK: {{%[0-9]+}}:_(s32) = G_MUL {{%[0-9]+, %[0-9]+}}
+ $r0 = COPY %2(s32)
+ BX_RET 14, $noreg, implicit $r0
+
+...
+---
+name: test_and_s8
+# CHECK-LABEL: name: test_and_s8
+legalized: false
+# CHECK: legalized: true
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+ - { id: 3, class: _ }
+ - { id: 4, class: _ }
+ - { id: 5, class: _ }
+body: |
+ bb.0:
+ liveins: $r0, $r1
+
+ %0(p0) = COPY $r0
+ %1(s8) = G_LOAD %0 :: (load 1)
+ %2(p0) = COPY $r0
+ %3(s8) = G_LOAD %2 :: (load 1)
+ %4(s8) = G_AND %1, %3
+ ; G_AND with s8 should widen
+ ; CHECK-NOT: {{%[0-9]+}}:_(s8) = G_AND {{%[0-9]+, %[0-9]+}}
+ ; CHECK: {{%[0-9]+}}:_(s32) = G_AND {{%[0-9]+, %[0-9]+}}
+ ; CHECK-NOT: {{%[0-9]+}}:_(s8) = G_AND {{%[0-9]+, %[0-9]+}}
+ %5(s32) = G_SEXT %4(s8)
+ $r0 = COPY %5(s32)
+ BX_RET 14, $noreg, implicit $r0
+...
+---
+name: test_and_s16
+# CHECK-LABEL: name: test_and_s16
+legalized: false
+# CHECK: legalized: true
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+ - { id: 3, class: _ }
+ - { id: 4, class: _ }
+ - { id: 5, class: _ }
+body: |
+ bb.0:
+ liveins: $r0, $r1
+
+ %0(p0) = COPY $r0
+ %1(s16) = G_LOAD %0 :: (load 2)
+ %2(p0) = COPY $r0
+ %3(s16) = G_LOAD %2 :: (load 2)
+ %4(s16) = G_AND %1, %3
+ ; G_AND with s16 should widen
+ ; CHECK-NOT: {{%[0-9]+}}:_(s16) = G_AND {{%[0-9]+, %[0-9]+}}
+ ; CHECK: {{%[0-9]+}}:_(s32) = G_AND {{%[0-9]+, %[0-9]+}}
+ ; CHECK-NOT: {{%[0-9]+}}:_(s16) = G_AND {{%[0-9]+, %[0-9]+}}
+ %5(s32) = G_SEXT %4(s16)
+ $r0 = COPY %5(s32)
+ BX_RET 14, $noreg, implicit $r0
+...
+---
+name: test_and_s32
+# CHECK-LABEL: name: test_and_s32
+legalized: false
+# CHECK: legalized: true
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+body: |
+ bb.0:
+ liveins: $r0, $r1
+
+ %0(s32) = COPY $r0
+ %1(s32) = COPY $r1
+ %2(s32) = G_AND %0, %1
+ ; G_AND with s32 is legal, so we should find it unchanged in the output
+ ; CHECK: {{%[0-9]+}}:_(s32) = G_AND {{%[0-9]+, %[0-9]+}}
+ $r0 = COPY %2(s32)
+ BX_RET 14, $noreg, implicit $r0
+
+...
+---
+name: test_or_s8
+# CHECK-LABEL: name: test_or_s8
+legalized: false
+# CHECK: legalized: true
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+ - { id: 3, class: _ }
+ - { id: 4, class: _ }
+ - { id: 5, class: _ }
+body: |
+ bb.0:
+ liveins: $r0, $r1
+
+ %0(p0) = COPY $r0
+ %1(s8) = G_LOAD %0 :: (load 1)
+ %2(p0) = COPY $r0
+ %3(s8) = G_LOAD %2 :: (load 1)
+ %4(s8) = G_OR %1, %3
+ ; G_OR with s8 should widen
+ ; CHECK-NOT: {{%[0-9]+}}:_(s8) = G_OR {{%[0-9]+, %[0-9]+}}
+ ; CHECK: {{%[0-9]+}}:_(s32) = G_OR {{%[0-9]+, %[0-9]+}}
+ ; CHECK-NOT: {{%[0-9]+}}:_(s8) = G_OR {{%[0-9]+, %[0-9]+}}
+ %5(s32) = G_SEXT %4(s8)
+ $r0 = COPY %5(s32)
+ BX_RET 14, $noreg, implicit $r0
+...
+---
+name: test_or_s16
+# CHECK-LABEL: name: test_or_s16
+legalized: false
+# CHECK: legalized: true
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+ - { id: 3, class: _ }
+ - { id: 4, class: _ }
+ - { id: 5, class: _ }
+body: |
+ bb.0:
+ liveins: $r0, $r1
+
+ %0(p0) = COPY $r0
+ %1(s16) = G_LOAD %0 :: (load 2)
+ %2(p0) = COPY $r0
+ %3(s16) = G_LOAD %2 :: (load 2)
+ %4(s16) = G_OR %1, %3
+ ; G_OR with s16 should widen
+ ; CHECK-NOT: {{%[0-9]+}}:_(s16) = G_OR {{%[0-9]+, %[0-9]+}}
+ ; CHECK: {{%[0-9]+}}:_(s32) = G_OR {{%[0-9]+, %[0-9]+}}
+ ; CHECK-NOT: {{%[0-9]+}}:_(s16) = G_OR {{%[0-9]+, %[0-9]+}}
+ %5(s32) = G_SEXT %4(s16)
+ $r0 = COPY %5(s32)
+ BX_RET 14, $noreg, implicit $r0
+...
+---
+name: test_or_s32
+# CHECK-LABEL: name: test_or_s32
+legalized: false
+# CHECK: legalized: true
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+body: |
+ bb.0:
+ liveins: $r0, $r1
+
+ %0(s32) = COPY $r0
+ %1(s32) = COPY $r1
+ %2(s32) = G_OR %0, %1
+ ; G_OR with s32 is legal, so we should find it unchanged in the output
+ ; CHECK: {{%[0-9]+}}:_(s32) = G_OR {{%[0-9]+, %[0-9]+}}
+ $r0 = COPY %2(s32)
+ BX_RET 14, $noreg, implicit $r0
+
+...
+---
+name: test_xor_s8
+# CHECK-LABEL: name: test_xor_s8
+legalized: false
+# CHECK: legalized: true
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+ - { id: 3, class: _ }
+ - { id: 4, class: _ }
+ - { id: 5, class: _ }
+body: |
+ bb.0:
+ liveins: $r0, $r1
+
+ %0(p0) = COPY $r0
+ %1(s8) = G_LOAD %0 :: (load 1)
+ %2(p0) = COPY $r0
+ %3(s8) = G_LOAD %2 :: (load 1)
+ %4(s8) = G_XOR %1, %3
+ ; G_XOR with s8 should widen
+ ; CHECK-NOT: {{%[0-9]+}}:_(s8) = G_XOR {{%[0-9]+, %[0-9]+}}
+ ; CHECK: {{%[0-9]+}}:_(s32) = G_XOR {{%[0-9]+, %[0-9]+}}
+ ; CHECK-NOT: {{%[0-9]+}}:_(s8) = G_XOR {{%[0-9]+, %[0-9]+}}
+ %5(s32) = G_SEXT %4(s8)
+ $r0 = COPY %5(s32)
+ BX_RET 14, $noreg, implicit $r0
+...
+---
+name: test_xor_s16
+# CHECK-LABEL: name: test_xor_s16
+legalized: false
+# CHECK: legalized: true
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+ - { id: 3, class: _ }
+ - { id: 4, class: _ }
+ - { id: 5, class: _ }
+body: |
+ bb.0:
+ liveins: $r0, $r1
+
+ %0(p0) = COPY $r0
+ %1(s16) = G_LOAD %0 :: (load 2)
+ %2(p0) = COPY $r0
+ %3(s16) = G_LOAD %2 :: (load 2)
+ %4(s16) = G_XOR %1, %3
+ ; G_XOR with s16 should widen
+ ; CHECK-NOT: {{%[0-9]+}}:_(s16) = G_XOR {{%[0-9]+, %[0-9]+}}
+ ; CHECK: {{%[0-9]+}}:_(s32) = G_XOR {{%[0-9]+, %[0-9]+}}
+ ; CHECK-NOT: {{%[0-9]+}}:_(s16) = G_XOR {{%[0-9]+, %[0-9]+}}
+ %5(s32) = G_SEXT %4(s16)
+ $r0 = COPY %5(s32)
+ BX_RET 14, $noreg, implicit $r0
+...
+---
+name: test_xor_s32
+# CHECK-LABEL: name: test_xor_s32
+legalized: false
+# CHECK: legalized: true
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+body: |
+ bb.0:
+ liveins: $r0, $r1
+
+ %0(s32) = COPY $r0
+ %1(s32) = COPY $r1
+ %2(s32) = G_XOR %0, %1
+ ; G_XOR with s32 is legal, so we should find it unchanged in the output
+ ; CHECK: {{%[0-9]+}}:_(s32) = G_XOR {{%[0-9]+, %[0-9]+}}
+ $r0 = COPY %2(s32)
+ BX_RET 14, $noreg, implicit $r0
+
+...
diff --git a/test/CodeGen/ARM/GlobalISel/arm-legalize-bitcounts.mir b/test/CodeGen/ARM/GlobalISel/arm-legalize-bitcounts.mir
new file mode 100644
index 000000000000..c9323eedced4
--- /dev/null
+++ b/test/CodeGen/ARM/GlobalISel/arm-legalize-bitcounts.mir
@@ -0,0 +1,177 @@
+# RUN: llc -mtriple arm-linux-gnueabi -mattr=+v5t -run-pass=legalizer %s -o - | FileCheck %s -check-prefixes=CHECK,CLZ
+# RUN: llc -mtriple arm-linux-gnueabi -mattr=-v5t -run-pass=legalizer %s -o - | FileCheck %s -check-prefixes=CHECK,LIBCALLS
+--- |
+ define void @test_ctlz_s32() { ret void }
+ define void @test_ctlz_zero_undef_s32() { ret void }
+
+ ; same as above but with extensions
+ define void @test_ctlz_s16() { ret void }
+ define void @test_ctlz_zero_undef_s8() { ret void }
+...
+---
+name: test_ctlz_s32
+# CHECK-LABEL: name: test_ctlz_s32
+legalized: false
+# CHECK: legalized: true
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+body: |
+ bb.0:
+ liveins: $r0
+
+ ; CHECK: [[X:%[0-9]+]]:_(s32) = COPY $r0
+ %0(s32) = COPY $r0
+
+ ; CLZ: [[R:%[0-9]+]]:_(s32) = G_CTLZ [[X]]
+ ; LIBCALLS-NOT: G_CTLZ
+ ; LIBCALLS: ADJCALLSTACKDOWN
+ ; LIBCALLS: $r0 = COPY [[X]]
+ ; LIBCALLS: BL &__clzsi2, {{.*}}, implicit $r0, implicit-def $r0
+ ; LIBCALLS: [[COUNT:%[0-9]+]]:_(s32) = COPY $r0
+ ; LIBCALLS: ADJCALLSTACKUP
+ ; LIBCALLS-NOT: G_CTLZ
+ ; LIBCALLS: [[ZERO:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; LIBCALLS: [[BITS:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
+ ; LIBCALLS: [[CMP:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[X]](s32), [[ZERO]]
+ ; LIBCALLS: [[R:%[0-9]+]]:_(s32) = G_SELECT [[CMP]](s1), [[BITS]], [[COUNT]]
+ ; LIBCALLS-NOT: G_CTLZ
+ %1(s32) = G_CTLZ %0
+
+ ; CHECK: $r0 = COPY [[R]]
+ $r0 = COPY %1(s32)
+ BX_RET 14, $noreg, implicit $r0
+...
+---
+name: test_ctlz_zero_undef_s32
+# CHECK-LABEL: name: test_ctlz_zero_undef_s32
+legalized: false
+# CHECK: legalized: true
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+body: |
+ bb.0:
+ liveins: $r0
+
+ ; CHECK: [[X:%[0-9]+]]:_(s32) = COPY $r0
+ %0(s32) = COPY $r0
+
+ ; CLZ: [[R:%[0-9]+]]:_(s32) = G_CTLZ [[X]]
+ ; LIBCALLS-NOT: G_CTLZ
+ ; LIBCALLS: ADJCALLSTACKDOWN
+ ; LIBCALLS: $r0 = COPY [[X]]
+ ; LIBCALLS: BL &__clzsi2, {{.*}}, implicit $r0, implicit-def $r0
+ ; LIBCALLS: [[R:%[0-9]+]]:_(s32) = COPY $r0
+ ; LIBCALLS: ADJCALLSTACKUP
+ ; LIBCALLS-NOT: G_CTLZ
+ %1(s32) = G_CTLZ_ZERO_UNDEF %0
+
+ ; CHECK: $r0 = COPY [[R]]
+ $r0 = COPY %1(s32)
+ BX_RET 14, $noreg, implicit $r0
+...
+---
+name: test_ctlz_s16
+# CHECK-LABEL: name: test_ctlz_s16
+legalized: false
+# CHECK: legalized: true
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+ - { id: 3, class: _ }
+body: |
+ bb.0:
+ liveins: $r0
+
+ ; CHECK: [[X:%[0-9]+]]:_(s32) = COPY $r0
+ ; CHECK: [[BITMASK:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
+ ; CHECK: [[XAGAIN:%[0-9]+]]:_(s32) = COPY [[X]]
+ ; CHECK: [[X32:%[0-9]+]]:_(s32) = G_AND [[XAGAIN]], [[BITMASK]]
+ %0(s32) = COPY $r0
+ %1(s16) = G_TRUNC %0(s32)
+
+ ; Check that the operation is performed for 32 bits
+ ; CLZ: [[COUNT:%[0-9]+]]:_(s32) = G_CTLZ [[X32]]
+ ; LIBCALLS-NOT: G_CTLZ
+ ; LIBCALLS: ADJCALLSTACKDOWN
+ ; LIBCALLS: $r0 = COPY [[X32]]
+ ; LIBCALLS: BL &__clzsi2, {{.*}}, implicit $r0, implicit-def $r0
+ ; LIBCALLS: [[UNDEFCOUNT:%[0-9]+]]:_(s32) = COPY $r0
+ ; LIBCALLS: ADJCALLSTACKUP
+ ; LIBCALLS-NOT: G_CTLZ
+ ; LIBCALLS: [[ZERO:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; LIBCALLS: [[BITS:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
+ ; LIBCALLS: [[CMP:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), {{%[0-9]+}}(s32), [[ZERO]]
+ ; LIBCALLS: [[COUNT:%[0-9]+]]:_(s32) = G_SELECT [[CMP]](s1), [[BITS]], [[UNDEFCOUNT]]
+ ; LIBCALLS-NOT: G_CTLZ
+ ; CHECK: [[BITDIFF:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+ ; CHECK: [[R32:%[0-9]+]]:_(s32) = G_SUB [[COUNT]], [[BITDIFF]]
+ %2(s16) = G_CTLZ %1
+
+ ; CHECK: [[BITDIFF:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+ ; CHECK: [[RAGAIN:%[0-9]+]]:_(s32) = COPY [[R32]]
+ ; CHECK: [[SHIFTEDR:%[0-9]+]]:_(s32) = G_SHL [[RAGAIN]], [[BITDIFF]]
+ ; CHECK: [[R:%[0-9]+]]:_(s32) = G_ASHR [[SHIFTEDR]], [[BITDIFF]]
+ ; CHECK: $r0 = COPY [[R]]
+ %3(s32) = G_SEXT %2(s16)
+ $r0 = COPY %3(s32)
+ BX_RET 14, $noreg, implicit $r0
+...
+---
+name: test_ctlz_zero_undef_s8
+# CHECK-LABEL: name: test_ctlz_zero_undef_s8
+legalized: false
+# CHECK: legalized: true
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+ - { id: 3, class: _ }
+body: |
+ bb.0:
+ liveins: $r0
+
+ ; CHECK: [[X:%[0-9]+]]:_(s32) = COPY $r0
+ ; CHECK: [[BITMASK:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
+ ; CHECK: [[XAGAIN:%[0-9]+]]:_(s32) = COPY [[X]]
+ ; CHECK: [[X32:%[0-9]+]]:_(s32) = G_AND [[XAGAIN]], [[BITMASK]]
+ %0(s32) = COPY $r0
+ %1(s8) = G_TRUNC %0(s32)
+
+ ; Check that the operation is performed for 32 bits
+ ; CLZ: [[COUNT:%[0-9]+]]:_(s32) = G_CTLZ
+ ; CLZ-NOT: G_CTLZ_ZERO_UNDEF
+ ; LIBCALLS-NOT: G_CTLZ
+ ; LIBCALLS: ADJCALLSTACKDOWN
+ ; LIBCALLS: $r0 = COPY [[X32]]
+ ; LIBCALLS: BL &__clzsi2, {{.*}}, implicit $r0, implicit-def $r0
+ ; LIBCALLS: [[COUNT:%[0-9]+]]:_(s32) = COPY $r0
+ ; LIBCALLS: ADJCALLSTACKUP
+ ; LIBCALLS-NOT: G_CTLZ
+ ; CHECK: [[BITDIFF:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
+ ; CHECK: [[R32:%[0-9]+]]:_(s32) = G_SUB [[COUNT]], [[BITDIFF]]
+ %2(s8) = G_CTLZ_ZERO_UNDEF %1
+
+ ; CHECK: [[BITDIFF:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
+ ; CHECK: [[RAGAIN:%[0-9]+]]:_(s32) = COPY [[R32]]
+ ; CHECK: [[SHIFTEDR:%[0-9]+]]:_(s32) = G_SHL [[RAGAIN]], [[BITDIFF]]
+ ; CHECK: [[R:%[0-9]+]]:_(s32) = G_ASHR [[SHIFTEDR]], [[BITDIFF]]
+ ; CHECK: $r0 = COPY [[R]]
+ %3(s32) = G_SEXT %2(s8)
+ $r0 = COPY %3(s32)
+ BX_RET 14, $noreg, implicit $r0
+...
diff --git a/test/CodeGen/ARM/GlobalISel/arm-legalize-casts.mir b/test/CodeGen/ARM/GlobalISel/arm-legalize-casts.mir
new file mode 100644
index 000000000000..c45bc3d5385d
--- /dev/null
+++ b/test/CodeGen/ARM/GlobalISel/arm-legalize-casts.mir
@@ -0,0 +1,50 @@
+# RUN: llc -mtriple arm-- -run-pass=legalizer %s -o - | FileCheck %s
+# RUN: llc -mtriple thumb-- -mattr=+v6t2 -run-pass=legalizer %s -o - | FileCheck %s
+--- |
+ define void @test_inttoptr_s32() { ret void }
+ define void @test_ptrtoint_s32() { ret void }
+...
+---
+name: test_inttoptr_s32
+# CHECK-LABEL: name: test_inttoptr_s32
+legalized: false
+# CHECK: legalized: true
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+body: |
+ bb.0:
+ liveins: $r0
+
+ %0(s32) = COPY $r0
+ %1(p0) = G_INTTOPTR %0(s32)
+ ; G_INTTOPTR with s32 is legal, so we should find it unchanged in the output
+ ; CHECK: {{%[0-9]+}}:_(p0) = G_INTTOPTR {{%[0-9]+}}
+ $r0 = COPY %1(p0)
+ BX_RET 14, $noreg, implicit $r0
+...
+---
+name: test_ptrtoint_s32
+# CHECK-LABEL: name: test_ptrtoint_s32
+legalized: false
+# CHECK: legalized: true
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+body: |
+ bb.0:
+ liveins: $r0
+
+ %0(p0) = COPY $r0
+ %1(s32) = G_PTRTOINT %0(p0)
+ ; G_PTRTOINT with s32 is legal, so we should find it unchanged in the output
+ ; CHECK: {{%[0-9]+}}:_(s32) = G_PTRTOINT {{%[0-9]+}}
+ $r0 = COPY %1(s32)
+ BX_RET 14, $noreg, implicit $r0
+...
diff --git a/test/CodeGen/ARM/GlobalISel/arm-legalize-consts.mir b/test/CodeGen/ARM/GlobalISel/arm-legalize-consts.mir
new file mode 100644
index 000000000000..d3248fe63a02
--- /dev/null
+++ b/test/CodeGen/ARM/GlobalISel/arm-legalize-consts.mir
@@ -0,0 +1,57 @@
+# RUN: llc -mtriple arm-- -run-pass=legalizer %s -o - | FileCheck %s
+# RUN: llc -mtriple thumb-- -mattr=+v6t2 -run-pass=legalizer %s -o - | FileCheck %s
+--- |
+ define void @test_constants() { ret void }
+...
+---
+name: test_constants
+# CHECK-LABEL: name: test_constants
+legalized: false
+# CHECK: legalized: true
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+ - { id: 3, class: _ }
+ - { id: 4, class: _ }
+ - { id: 5, class: _ }
+body: |
+ bb.0:
+ liveins: $r0
+
+ %4(p0) = COPY $r0
+
+ %0(s32) = G_CONSTANT 42
+ ; CHECK: {{%[0-9]+}}:_(s32) = G_CONSTANT 42
+
+ %1(s16) = G_CONSTANT i16 21
+ G_STORE %1(s16), %4(p0) :: (store 2)
+ ; CHECK-NOT: G_CONSTANT i16
+ ; CHECK: [[EXT:%[0-9]+]]:_(s32) = G_CONSTANT i32 21
+ ; CHECK: {{%[0-9]+}}:_(s16) = G_TRUNC [[EXT]](s32)
+ ; CHECK-NOT: G_CONSTANT i16
+
+ %2(s8) = G_CONSTANT i8 10
+ G_STORE %2(s8), %4(p0) :: (store 1)
+ ; CHECK-NOT: G_CONSTANT i8
+ ; CHECK: [[EXT:%[0-9]+]]:_(s32) = G_CONSTANT i32 10
+ ; CHECK: {{%[0-9]+}}:_(s8) = G_TRUNC [[EXT]](s32)
+ ; CHECK-NOT: G_CONSTANT i8
+
+ %3(s1) = G_CONSTANT i1 1
+ G_STORE %3(s1), %4(p0) :: (store 1)
+ ; CHECK-NOT: G_CONSTANT i1
+ ; CHECK: [[EXT:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; CHECK: {{%[0-9]+}}:_(s1) = G_TRUNC [[EXT]](s32)
+ ; CHECK-NOT: G_CONSTANT i1
+
+ %5(p0) = G_CONSTANT 0
+ G_STORE %5(p0), %4(p0) :: (store 4)
+ ; CHECK: {{%[0-9]+}}:_(p0) = G_CONSTANT 0
+
+ $r0 = COPY %0(s32)
+ BX_RET 14, $noreg, implicit $r0
+...
diff --git a/test/CodeGen/ARM/GlobalISel/arm-legalize-exts.mir b/test/CodeGen/ARM/GlobalISel/arm-legalize-exts.mir
new file mode 100644
index 000000000000..672b42671a15
--- /dev/null
+++ b/test/CodeGen/ARM/GlobalISel/arm-legalize-exts.mir
@@ -0,0 +1,79 @@
+# RUN: llc -mtriple arm-- -run-pass=legalizer %s -o - | FileCheck %s
+# RUN: llc -mtriple thumb-- -mattr=+v6t2 -run-pass=legalizer %s -o - | FileCheck %s
+--- |
+ define void @test_zext_s16() { ret void }
+ define void @test_sext_s8() { ret void }
+ define void @test_anyext_s1() { ret void }
+...
+---
+name: test_zext_s16
+# CHECK-LABEL: name: test_zext_s16
+legalized: false
+# CHECK: legalized: true
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+body: |
+ bb.0:
+ liveins: $r0
+
+ %0(p0) = COPY $r0
+ %1(s16) = G_LOAD %0 :: (load 2)
+ %2(s32) = G_ZEXT %1
+ ; G_ZEXT with s16 is legal, so we should find it unchanged in the output
+ ; CHECK: {{%[0-9]+}}:_(s32) = G_ZEXT {{%[0-9]+}}
+ $r0 = COPY %2(s32)
+ BX_RET 14, $noreg, implicit $r0
+...
+---
+name: test_sext_s8
+# CHECK-LABEL: name: test_sext_s8
+legalized: false
+# CHECK: legalized: true
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+body: |
+ bb.0:
+ liveins: $r0
+
+ %0(p0) = COPY $r0
+ %1(s8) = G_LOAD %0(p0) :: (load 1)
+ %2(s32) = G_SEXT %1
+ ; G_SEXT with s8 is legal, so we should find it unchanged in the output
+ ; CHECK: {{%[0-9]+}}:_(s32) = G_SEXT {{%[0-9]+}}
+ $r0 = COPY %2(s32)
+ BX_RET 14, $noreg, implicit $r0
+...
+---
+name: test_anyext_s1
+# CHECK-LABEL: name: test_anyext_s1
+legalized: false
+# CHECK: legalized: true
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+body: |
+ bb.0:
+ liveins: $r0
+
+ %0(p0) = COPY $r0
+ %1(s1) = G_LOAD %0(p0) :: (load 1)
+ %2(s32) = G_ANYEXT %1
+ ; G_ANYEXT with s1 is legal, so we should find it unchanged in the output
+ ; CHECK: {{%[0-9]+}}:_(s32) = G_ANYEXT {{%[0-9]+}}
+ $r0 = COPY %2(s32)
+ BX_RET 14, $noreg, implicit $r0
+...
diff --git a/test/CodeGen/ARM/GlobalISel/arm-legalize-load-store.mir b/test/CodeGen/ARM/GlobalISel/arm-legalize-load-store.mir
new file mode 100644
index 000000000000..a955af9a97ff
--- /dev/null
+++ b/test/CodeGen/ARM/GlobalISel/arm-legalize-load-store.mir
@@ -0,0 +1,49 @@
+# RUN: llc -mtriple arm-- -run-pass=legalizer %s -o - | FileCheck %s
+# RUN: llc -mtriple thumb-- -mattr=+v6t2 -run-pass=legalizer %s -o - | FileCheck %s
+--- |
+ define void @test_legal_loads_stores() { ret void }
+...
+---
+name: test_legal_loads_stores
+# CHECK-LABEL: name: test_legal_loads_stores
+legalized: false
+# CHECK: legalized: true
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+ - { id: 3, class: _ }
+ - { id: 4, class: _ }
+ - { id: 5, class: _ }
+ - { id: 6, class: _ }
+body: |
+ bb.0:
+ liveins: $r0
+
+ ; These are all legal, so we should find them unchanged in the output
+ ; CHECK-DAG: G_STORE {{%[0-9]+}}(s32), %0(p0)
+ ; CHECK-DAG: G_STORE {{%[0-9]+}}(s16), %0(p0)
+ ; CHECK-DAG: G_STORE {{%[0-9]+}}(s8), %0(p0)
+ ; CHECK-DAG: G_STORE {{%[0-9]+}}(s1), %0(p0)
+ ; CHECK-DAG: G_STORE {{%[0-9]+}}(p0), %0(p0)
+ ; CHECK-DAG: {{%[0-9]+}}:_(s32) = G_LOAD %0(p0)
+ ; CHECK-DAG: {{%[0-9]+}}:_(s16) = G_LOAD %0(p0)
+ ; CHECK-DAG: {{%[0-9]+}}:_(s8) = G_LOAD %0(p0)
+ ; CHECK-DAG: {{%[0-9]+}}:_(s1) = G_LOAD %0(p0)
+ ; CHECK-DAG: {{%[0-9]+}}:_(p0) = G_LOAD %0(p0)
+ %0(p0) = COPY $r0
+ %2(s32) = G_LOAD %0(p0) :: (load 4)
+ G_STORE %2(s32), %0(p0) :: (store 4)
+ %3(s16) = G_LOAD %0(p0) :: (load 2)
+ G_STORE %3(s16), %0(p0) :: (store 2)
+ %4(s8) = G_LOAD %0(p0) :: (load 1)
+ G_STORE %4(s8), %0(p0) :: (store 1)
+ %5(s1) = G_LOAD %0(p0) :: (load 1)
+ G_STORE %5(s1), %0(p0) :: (store 1)
+ %6(p0) = G_LOAD %0(p0) :: (load 4)
+ G_STORE %6(p0), %0(p0) :: (store 4)
+ BX_RET 14, $noreg
+...
diff --git a/test/CodeGen/ARM/GlobalISel/arm-legalizer.mir b/test/CodeGen/ARM/GlobalISel/arm-legalizer.mir
index 4d61593b47a1..bee8b7bbbabd 100644
--- a/test/CodeGen/ARM/GlobalISel/arm-legalizer.mir
+++ b/test/CodeGen/ARM/GlobalISel/arm-legalizer.mir
@@ -1,45 +1,15 @@
# RUN: llc -mtriple arm-- -run-pass=legalizer %s -o - | FileCheck %s
--- |
- define void @test_sext_s8() { ret void }
- define void @test_zext_s16() { ret void }
-
- define void @test_inttoptr_s32() { ret void }
- define void @test_ptrtoint_s32() { ret void }
-
- define void @test_add_s8() { ret void }
- define void @test_add_s16() { ret void }
- define void @test_add_s32() { ret void }
-
- define void @test_sub_s8() { ret void }
- define void @test_sub_s16() { ret void }
- define void @test_sub_s32() { ret void }
-
- define void @test_mul_s8() { ret void }
- define void @test_mul_s16() { ret void }
- define void @test_mul_s32() { ret void }
-
- define void @test_and_s8() { ret void }
- define void @test_and_s16() { ret void }
- define void @test_and_s32() { ret void }
-
- define void @test_or_s8() { ret void }
- define void @test_or_s16() { ret void }
- define void @test_or_s32() { ret void }
-
- define void @test_xor_s8() { ret void }
- define void @test_xor_s16() { ret void }
- define void @test_xor_s32() { ret void }
-
define void @test_lshr_s32() { ret void }
define void @test_ashr_s32() { ret void }
define void @test_shl_s32() { ret void }
define void @test_load_from_stack() { ret void }
- define void @test_legal_loads_stores() #0 { ret void }
+ define void @test_load_store_64() #0 { ret void }
define void @test_gep() { ret void }
- define void @test_constants() { ret void }
+ define void @test_constants_s64() { ret void }
define void @test_icmp_s8() { ret void }
define void @test_icmp_s16() { ret void }
@@ -61,632 +31,6 @@
attributes #0 = { "target-features"="+vfp2" }
...
---
-name: test_sext_s8
-# CHECK-LABEL: name: test_sext_s8
-legalized: false
-# CHECK: legalized: true
-regBankSelected: false
-selected: false
-tracksRegLiveness: true
-registers:
- - { id: 0, class: _ }
- - { id: 1, class: _ }
- - { id: 2, class: _ }
-body: |
- bb.0:
- liveins: $r0
-
- %0(p0) = COPY $r0
- %1(s8) = G_LOAD %0(p0) :: (load 1)
- %2(s32) = G_SEXT %1
- ; G_SEXT with s8 is legal, so we should find it unchanged in the output
- ; CHECK: {{%[0-9]+}}:_(s32) = G_SEXT {{%[0-9]+}}
- $r0 = COPY %2(s32)
- BX_RET 14, $noreg, implicit $r0
-...
----
-name: test_zext_s16
-# CHECK-LABEL: name: test_zext_s16
-legalized: false
-# CHECK: legalized: true
-regBankSelected: false
-selected: false
-tracksRegLiveness: true
-registers:
- - { id: 0, class: _ }
- - { id: 1, class: _ }
- - { id: 2, class: _ }
-body: |
- bb.0:
- liveins: $r0
-
- %0(p0) = COPY $r0
- %1(s16) = G_LOAD %0 :: (load 2)
- %2(s32) = G_ZEXT %1
- ; G_ZEXT with s16 is legal, so we should find it unchanged in the output
- ; CHECK: {{%[0-9]+}}:_(s32) = G_ZEXT {{%[0-9]+}}
- $r0 = COPY %2(s32)
- BX_RET 14, $noreg, implicit $r0
-...
----
-name: test_inttoptr_s32
-# CHECK-LABEL: name: test_inttoptr_s32
-legalized: false
-# CHECK: legalized: true
-regBankSelected: false
-selected: false
-tracksRegLiveness: true
-registers:
- - { id: 0, class: _ }
- - { id: 1, class: _ }
-body: |
- bb.0:
- liveins: $r0
-
- %0(s32) = COPY $r0
- %1(p0) = G_INTTOPTR %0(s32)
- ; G_INTTOPTR with s32 is legal, so we should find it unchanged in the output
- ; CHECK: {{%[0-9]+}}:_(p0) = G_INTTOPTR {{%[0-9]+}}
- $r0 = COPY %1(p0)
- BX_RET 14, $noreg, implicit $r0
-...
----
-name: test_ptrtoint_s32
-# CHECK-LABEL: name: test_ptrtoint_s32
-legalized: false
-# CHECK: legalized: true
-regBankSelected: false
-selected: false
-tracksRegLiveness: true
-registers:
- - { id: 0, class: _ }
- - { id: 1, class: _ }
-body: |
- bb.0:
- liveins: $r0
-
- %0(p0) = COPY $r0
- %1(s32) = G_PTRTOINT %0(p0)
- ; G_PTRTOINT with s32 is legal, so we should find it unchanged in the output
- ; CHECK: {{%[0-9]+}}:_(s32) = G_PTRTOINT {{%[0-9]+}}
- $r0 = COPY %1(s32)
- BX_RET 14, $noreg, implicit $r0
-...
----
-name: test_add_s8
-# CHECK-LABEL: name: test_add_s8
-legalized: false
-# CHECK: legalized: true
-regBankSelected: false
-selected: false
-tracksRegLiveness: true
-registers:
- - { id: 0, class: _ }
- - { id: 1, class: _ }
- - { id: 2, class: _ }
- - { id: 3, class: _ }
- - { id: 4, class: _ }
- - { id: 5, class: _ }
-body: |
- bb.0:
- liveins: $r0, $r1
-
- %0(p0) = COPY $r0
- %1(s8) = G_LOAD %0 :: (load 1)
- %2(p0) = COPY $r0
- %3(s8) = G_LOAD %2 :: (load 1)
- %4(s8) = G_ADD %1, %3
- ; G_ADD with s8 should widen
- ; CHECK-NOT: {{%[0-9]+}}:_(s8) = G_ADD {{%[0-9]+, %[0-9]+}}
- ; CHECK: {{%[0-9]+}}:_(s32) = G_ADD {{%[0-9]+, %[0-9]+}}
- ; CHECK-NOT: {{%[0-9]+}}:_(s8) = G_ADD {{%[0-9]+, %[0-9]+}}
- %5(s32) = G_SEXT %4(s8)
- $r0 = COPY %5(s32)
- BX_RET 14, $noreg, implicit $r0
-...
----
-name: test_add_s16
-# CHECK-LABEL: name: test_add_s16
-legalized: false
-# CHECK: legalized: true
-regBankSelected: false
-selected: false
-tracksRegLiveness: true
-registers:
- - { id: 0, class: _ }
- - { id: 1, class: _ }
- - { id: 2, class: _ }
- - { id: 3, class: _ }
- - { id: 4, class: _ }
- - { id: 5, class: _ }
-body: |
- bb.0:
- liveins: $r0, $r1
-
- %0(p0) = COPY $r0
- %1(s16) = G_LOAD %0 :: (load 2)
- %2(p0) = COPY $r0
- %3(s16) = G_LOAD %2 :: (load 2)
- %4(s16) = G_ADD %1, %3
- ; G_ADD with s16 should widen
- ; CHECK-NOT: {{%[0-9]+}}:_(s16) = G_ADD {{%[0-9]+, %[0-9]+}}
- ; CHECK: {{%[0-9]+}}:_(s32) = G_ADD {{%[0-9]+, %[0-9]+}}
- ; CHECK-NOT: {{%[0-9]+}}:_(s16) = G_ADD {{%[0-9]+, %[0-9]+}}
- %5(s32) = G_SEXT %4(s16)
- $r0 = COPY %5(s32)
- BX_RET 14, $noreg, implicit $r0
-...
----
-name: test_add_s32
-# CHECK-LABEL: name: test_add_s32
-legalized: false
-# CHECK: legalized: true
-regBankSelected: false
-selected: false
-tracksRegLiveness: true
-registers:
- - { id: 0, class: _ }
- - { id: 1, class: _ }
- - { id: 2, class: _ }
-body: |
- bb.0:
- liveins: $r0, $r1
-
- %0(s32) = COPY $r0
- %1(s32) = COPY $r1
- %2(s32) = G_ADD %0, %1
- ; G_ADD with s32 is legal, so we should find it unchanged in the output
- ; CHECK: {{%[0-9]+}}:_(s32) = G_ADD {{%[0-9]+, %[0-9]+}}
- $r0 = COPY %2(s32)
- BX_RET 14, $noreg, implicit $r0
-
-...
----
-name: test_sub_s8
-# CHECK-LABEL: name: test_sub_s8
-legalized: false
-# CHECK: legalized: true
-regBankSelected: false
-selected: false
-tracksRegLiveness: true
-registers:
- - { id: 0, class: _ }
- - { id: 1, class: _ }
- - { id: 2, class: _ }
- - { id: 3, class: _ }
- - { id: 4, class: _ }
- - { id: 5, class: _ }
-body: |
- bb.0:
- liveins: $r0, $r1
-
- %0(p0) = COPY $r0
- %1(s8) = G_LOAD %0 :: (load 1)
- %2(p0) = COPY $r0
- %3(s8) = G_LOAD %2 :: (load 1)
- %4(s8) = G_SUB %1, %3
- ; G_SUB with s8 should widen
- ; CHECK-NOT: {{%[0-9]+}}:_(s8) = G_SUB {{%[0-9]+, %[0-9]+}}
- ; CHECK: {{%[0-9]+}}:_(s32) = G_SUB {{%[0-9]+, %[0-9]+}}
- ; CHECK-NOT: {{%[0-9]+}}:_(s8) = G_SUB {{%[0-9]+, %[0-9]+}}
- %5(s32) = G_SEXT %4(s8)
- $r0 = COPY %5(s32)
- BX_RET 14, $noreg, implicit $r0
-...
----
-name: test_sub_s16
-# CHECK-LABEL: name: test_sub_s16
-legalized: false
-# CHECK: legalized: true
-regBankSelected: false
-selected: false
-tracksRegLiveness: true
-registers:
- - { id: 0, class: _ }
- - { id: 1, class: _ }
- - { id: 2, class: _ }
- - { id: 3, class: _ }
- - { id: 4, class: _ }
- - { id: 5, class: _ }
-body: |
- bb.0:
- liveins: $r0, $r1
-
- %0(p0) = COPY $r0
- %1(s16) = G_LOAD %0 :: (load 2)
- %2(p0) = COPY $r0
- %3(s16) = G_LOAD %2 :: (load 2)
- %4(s16) = G_SUB %1, %3
- ; G_SUB with s16 should widen
- ; CHECK-NOT: {{%[0-9]+}}:_(s16) = G_SUB {{%[0-9]+, %[0-9]+}}
- ; CHECK: {{%[0-9]+}}:_(s32) = G_SUB {{%[0-9]+, %[0-9]+}}
- ; CHECK-NOT: {{%[0-9]+}}:_(s16) = G_SUB {{%[0-9]+, %[0-9]+}}
- %5(s32) = G_SEXT %4(s16)
- $r0 = COPY %5(s32)
- BX_RET 14, $noreg, implicit $r0
-...
----
-name: test_sub_s32
-# CHECK-LABEL: name: test_sub_s32
-legalized: false
-# CHECK: legalized: true
-regBankSelected: false
-selected: false
-tracksRegLiveness: true
-registers:
- - { id: 0, class: _ }
- - { id: 1, class: _ }
- - { id: 2, class: _ }
-body: |
- bb.0:
- liveins: $r0, $r1
-
- %0(s32) = COPY $r0
- %1(s32) = COPY $r1
- %2(s32) = G_SUB %0, %1
- ; G_SUB with s32 is legal, so we should find it unchanged in the output
- ; CHECK: {{%[0-9]+}}:_(s32) = G_SUB {{%[0-9]+, %[0-9]+}}
- $r0 = COPY %2(s32)
- BX_RET 14, $noreg, implicit $r0
-
-...
----
-name: test_mul_s8
-# CHECK-LABEL: name: test_mul_s8
-legalized: false
-# CHECK: legalized: true
-regBankSelected: false
-selected: false
-tracksRegLiveness: true
-registers:
- - { id: 0, class: _ }
- - { id: 1, class: _ }
- - { id: 2, class: _ }
- - { id: 3, class: _ }
- - { id: 4, class: _ }
- - { id: 5, class: _ }
-body: |
- bb.0:
- liveins: $r0, $r1
-
- %0(p0) = COPY $r0
- %1(s8) = G_LOAD %0 :: (load 1)
- %2(p0) = COPY $r0
- %3(s8) = G_LOAD %2 :: (load 1)
- %4(s8) = G_MUL %1, %3
- ; G_MUL with s8 should widen
- ; CHECK-NOT: {{%[0-9]+}}:_(s8) = G_MUL {{%[0-9]+, %[0-9]+}}
- ; CHECK: {{%[0-9]+}}:_(s32) = G_MUL {{%[0-9]+, %[0-9]+}}
- ; CHECK-NOT: {{%[0-9]+}}:_(s8) = G_MUL {{%[0-9]+, %[0-9]+}}
- %5(s32) = G_SEXT %4(s8)
- $r0 = COPY %5(s32)
- BX_RET 14, $noreg, implicit $r0
-...
----
-name: test_mul_s16
-# CHECK-LABEL: name: test_mul_s16
-legalized: false
-# CHECK: legalized: true
-regBankSelected: false
-selected: false
-tracksRegLiveness: true
-registers:
- - { id: 0, class: _ }
- - { id: 1, class: _ }
- - { id: 2, class: _ }
- - { id: 3, class: _ }
- - { id: 4, class: _ }
- - { id: 5, class: _ }
-body: |
- bb.0:
- liveins: $r0, $r1
-
- %0(p0) = COPY $r0
- %1(s16) = G_LOAD %0 :: (load 2)
- %2(p0) = COPY $r0
- %3(s16) = G_LOAD %2 :: (load 2)
- %4(s16) = G_MUL %1, %3
- ; G_MUL with s16 should widen
- ; CHECK-NOT: {{%[0-9]+}}:_(s16) = G_MUL {{%[0-9]+, %[0-9]+}}
- ; CHECK: {{%[0-9]+}}:_(s32) = G_MUL {{%[0-9]+, %[0-9]+}}
- ; CHECK-NOT: {{%[0-9]+}}:_(s16) = G_MUL {{%[0-9]+, %[0-9]+}}
- %5(s32) = G_SEXT %4(s16)
- $r0 = COPY %5(s32)
- BX_RET 14, $noreg, implicit $r0
-...
----
-name: test_mul_s32
-# CHECK-LABEL: name: test_mul_s32
-legalized: false
-# CHECK: legalized: true
-regBankSelected: false
-selected: false
-tracksRegLiveness: true
-registers:
- - { id: 0, class: _ }
- - { id: 1, class: _ }
- - { id: 2, class: _ }
-body: |
- bb.0:
- liveins: $r0, $r1
-
- %0(s32) = COPY $r0
- %1(s32) = COPY $r1
- %2(s32) = G_MUL %0, %1
- ; G_MUL with s32 is legal, so we should find it unchanged in the output
- ; CHECK: {{%[0-9]+}}:_(s32) = G_MUL {{%[0-9]+, %[0-9]+}}
- $r0 = COPY %2(s32)
- BX_RET 14, $noreg, implicit $r0
-
-...
----
-name: test_and_s8
-# CHECK-LABEL: name: test_and_s8
-legalized: false
-# CHECK: legalized: true
-regBankSelected: false
-selected: false
-tracksRegLiveness: true
-registers:
- - { id: 0, class: _ }
- - { id: 1, class: _ }
- - { id: 2, class: _ }
- - { id: 3, class: _ }
- - { id: 4, class: _ }
- - { id: 5, class: _ }
-body: |
- bb.0:
- liveins: $r0, $r1
-
- %0(p0) = COPY $r0
- %1(s8) = G_LOAD %0 :: (load 1)
- %2(p0) = COPY $r0
- %3(s8) = G_LOAD %2 :: (load 1)
- %4(s8) = G_AND %1, %3
- ; G_AND with s8 should widen
- ; CHECK-NOT: {{%[0-9]+}}:_(s8) = G_AND {{%[0-9]+, %[0-9]+}}
- ; CHECK: {{%[0-9]+}}:_(s32) = G_AND {{%[0-9]+, %[0-9]+}}
- ; CHECK-NOT: {{%[0-9]+}}:_(s8) = G_AND {{%[0-9]+, %[0-9]+}}
- %5(s32) = G_SEXT %4(s8)
- $r0 = COPY %5(s32)
- BX_RET 14, $noreg, implicit $r0
-...
----
-name: test_and_s16
-# CHECK-LABEL: name: test_and_s16
-legalized: false
-# CHECK: legalized: true
-regBankSelected: false
-selected: false
-tracksRegLiveness: true
-registers:
- - { id: 0, class: _ }
- - { id: 1, class: _ }
- - { id: 2, class: _ }
- - { id: 3, class: _ }
- - { id: 4, class: _ }
- - { id: 5, class: _ }
-body: |
- bb.0:
- liveins: $r0, $r1
-
- %0(p0) = COPY $r0
- %1(s16) = G_LOAD %0 :: (load 2)
- %2(p0) = COPY $r0
- %3(s16) = G_LOAD %2 :: (load 2)
- %4(s16) = G_AND %1, %3
- ; G_AND with s16 should widen
- ; CHECK-NOT: {{%[0-9]+}}:_(s16) = G_AND {{%[0-9]+, %[0-9]+}}
- ; CHECK: {{%[0-9]+}}:_(s32) = G_AND {{%[0-9]+, %[0-9]+}}
- ; CHECK-NOT: {{%[0-9]+}}:_(s16) = G_AND {{%[0-9]+, %[0-9]+}}
- %5(s32) = G_SEXT %4(s16)
- $r0 = COPY %5(s32)
- BX_RET 14, $noreg, implicit $r0
-...
----
-name: test_and_s32
-# CHECK-LABEL: name: test_and_s32
-legalized: false
-# CHECK: legalized: true
-regBankSelected: false
-selected: false
-tracksRegLiveness: true
-registers:
- - { id: 0, class: _ }
- - { id: 1, class: _ }
- - { id: 2, class: _ }
-body: |
- bb.0:
- liveins: $r0, $r1
-
- %0(s32) = COPY $r0
- %1(s32) = COPY $r1
- %2(s32) = G_AND %0, %1
- ; G_AND with s32 is legal, so we should find it unchanged in the output
- ; CHECK: {{%[0-9]+}}:_(s32) = G_AND {{%[0-9]+, %[0-9]+}}
- $r0 = COPY %2(s32)
- BX_RET 14, $noreg, implicit $r0
-
-...
----
-name: test_or_s8
-# CHECK-LABEL: name: test_or_s8
-legalized: false
-# CHECK: legalized: true
-regBankSelected: false
-selected: false
-tracksRegLiveness: true
-registers:
- - { id: 0, class: _ }
- - { id: 1, class: _ }
- - { id: 2, class: _ }
- - { id: 3, class: _ }
- - { id: 4, class: _ }
- - { id: 5, class: _ }
-body: |
- bb.0:
- liveins: $r0, $r1
-
- %0(p0) = COPY $r0
- %1(s8) = G_LOAD %0 :: (load 1)
- %2(p0) = COPY $r0
- %3(s8) = G_LOAD %2 :: (load 1)
- %4(s8) = G_OR %1, %3
- ; G_OR with s8 should widen
- ; CHECK-NOT: {{%[0-9]+}}:_(s8) = G_OR {{%[0-9]+, %[0-9]+}}
- ; CHECK: {{%[0-9]+}}:_(s32) = G_OR {{%[0-9]+, %[0-9]+}}
- ; CHECK-NOT: {{%[0-9]+}}:_(s8) = G_OR {{%[0-9]+, %[0-9]+}}
- %5(s32) = G_SEXT %4(s8)
- $r0 = COPY %5(s32)
- BX_RET 14, $noreg, implicit $r0
-...
----
-name: test_or_s16
-# CHECK-LABEL: name: test_or_s16
-legalized: false
-# CHECK: legalized: true
-regBankSelected: false
-selected: false
-tracksRegLiveness: true
-registers:
- - { id: 0, class: _ }
- - { id: 1, class: _ }
- - { id: 2, class: _ }
- - { id: 3, class: _ }
- - { id: 4, class: _ }
- - { id: 5, class: _ }
-body: |
- bb.0:
- liveins: $r0, $r1
-
- %0(p0) = COPY $r0
- %1(s16) = G_LOAD %0 :: (load 2)
- %2(p0) = COPY $r0
- %3(s16) = G_LOAD %2 :: (load 2)
- %4(s16) = G_OR %1, %3
- ; G_OR with s16 should widen
- ; CHECK-NOT: {{%[0-9]+}}:_(s16) = G_OR {{%[0-9]+, %[0-9]+}}
- ; CHECK: {{%[0-9]+}}:_(s32) = G_OR {{%[0-9]+, %[0-9]+}}
- ; CHECK-NOT: {{%[0-9]+}}:_(s16) = G_OR {{%[0-9]+, %[0-9]+}}
- %5(s32) = G_SEXT %4(s16)
- $r0 = COPY %5(s32)
- BX_RET 14, $noreg, implicit $r0
-...
----
-name: test_or_s32
-# CHECK-LABEL: name: test_or_s32
-legalized: false
-# CHECK: legalized: true
-regBankSelected: false
-selected: false
-tracksRegLiveness: true
-registers:
- - { id: 0, class: _ }
- - { id: 1, class: _ }
- - { id: 2, class: _ }
-body: |
- bb.0:
- liveins: $r0, $r1
-
- %0(s32) = COPY $r0
- %1(s32) = COPY $r1
- %2(s32) = G_OR %0, %1
- ; G_OR with s32 is legal, so we should find it unchanged in the output
- ; CHECK: {{%[0-9]+}}:_(s32) = G_OR {{%[0-9]+, %[0-9]+}}
- $r0 = COPY %2(s32)
- BX_RET 14, $noreg, implicit $r0
-
-...
----
-name: test_xor_s8
-# CHECK-LABEL: name: test_xor_s8
-legalized: false
-# CHECK: legalized: true
-regBankSelected: false
-selected: false
-tracksRegLiveness: true
-registers:
- - { id: 0, class: _ }
- - { id: 1, class: _ }
- - { id: 2, class: _ }
- - { id: 3, class: _ }
- - { id: 4, class: _ }
- - { id: 5, class: _ }
-body: |
- bb.0:
- liveins: $r0, $r1
-
- %0(p0) = COPY $r0
- %1(s8) = G_LOAD %0 :: (load 1)
- %2(p0) = COPY $r0
- %3(s8) = G_LOAD %2 :: (load 1)
- %4(s8) = G_XOR %1, %3
- ; G_XOR with s8 should widen
- ; CHECK-NOT: {{%[0-9]+}}:_(s8) = G_XOR {{%[0-9]+, %[0-9]+}}
- ; CHECK: {{%[0-9]+}}:_(s32) = G_XOR {{%[0-9]+, %[0-9]+}}
- ; CHECK-NOT: {{%[0-9]+}}:_(s8) = G_XOR {{%[0-9]+, %[0-9]+}}
- %5(s32) = G_SEXT %4(s8)
- $r0 = COPY %5(s32)
- BX_RET 14, $noreg, implicit $r0
-...
----
-name: test_xor_s16
-# CHECK-LABEL: name: test_xor_s16
-legalized: false
-# CHECK: legalized: true
-regBankSelected: false
-selected: false
-tracksRegLiveness: true
-registers:
- - { id: 0, class: _ }
- - { id: 1, class: _ }
- - { id: 2, class: _ }
- - { id: 3, class: _ }
- - { id: 4, class: _ }
- - { id: 5, class: _ }
-body: |
- bb.0:
- liveins: $r0, $r1
-
- %0(p0) = COPY $r0
- %1(s16) = G_LOAD %0 :: (load 2)
- %2(p0) = COPY $r0
- %3(s16) = G_LOAD %2 :: (load 2)
- %4(s16) = G_XOR %1, %3
- ; G_XOR with s16 should widen
- ; CHECK-NOT: {{%[0-9]+}}:_(s16) = G_XOR {{%[0-9]+, %[0-9]+}}
- ; CHECK: {{%[0-9]+}}:_(s32) = G_XOR {{%[0-9]+, %[0-9]+}}
- ; CHECK-NOT: {{%[0-9]+}}:_(s16) = G_XOR {{%[0-9]+, %[0-9]+}}
- %5(s32) = G_SEXT %4(s16)
- $r0 = COPY %5(s32)
- BX_RET 14, $noreg, implicit $r0
-...
----
-name: test_xor_s32
-# CHECK-LABEL: name: test_xor_s32
-legalized: false
-# CHECK: legalized: true
-regBankSelected: false
-selected: false
-tracksRegLiveness: true
-registers:
- - { id: 0, class: _ }
- - { id: 1, class: _ }
- - { id: 2, class: _ }
-body: |
- bb.0:
- liveins: $r0, $r1
-
- %0(s32) = COPY $r0
- %1(s32) = COPY $r1
- %2(s32) = G_XOR %0, %1
- ; G_XOR with s32 is legal, so we should find it unchanged in the output
- ; CHECK: {{%[0-9]+}}:_(s32) = G_XOR {{%[0-9]+, %[0-9]+}}
- $r0 = COPY %2(s32)
- BX_RET 14, $noreg, implicit $r0
-
-...
----
name: test_lshr_s32
# CHECK-LABEL: name: test_lshr_s32
legalized: false
@@ -792,8 +136,8 @@ body: |
BX_RET 14, $noreg, implicit $r0
...
---
-name: test_legal_loads_stores
-# CHECK-LABEL: name: test_legal_loads_stores
+name: test_load_store_64
+# CHECK-LABEL: name: test_load_store_64
legalized: false
# CHECK: legalized: true
regBankSelected: false
@@ -811,32 +155,12 @@ body: |
bb.0:
liveins: $r0
- ; These are all legal, so we should find them unchanged in the output
+ ; These are legal, so we should find them unchanged in the output
; CHECK-DAG: G_STORE {{%[0-9]+}}(s64), %0(p0)
- ; CHECK-DAG: G_STORE {{%[0-9]+}}(s32), %0(p0)
- ; CHECK-DAG: G_STORE {{%[0-9]+}}(s16), %0(p0)
- ; CHECK-DAG: G_STORE {{%[0-9]+}}(s8), %0(p0)
- ; CHECK-DAG: G_STORE {{%[0-9]+}}(s1), %0(p0)
- ; CHECK-DAG: G_STORE {{%[0-9]+}}(p0), %0(p0)
; CHECK-DAG: {{%[0-9]+}}:_(s64) = G_LOAD %0(p0)
- ; CHECK-DAG: {{%[0-9]+}}:_(s32) = G_LOAD %0(p0)
- ; CHECK-DAG: {{%[0-9]+}}:_(s16) = G_LOAD %0(p0)
- ; CHECK-DAG: {{%[0-9]+}}:_(s8) = G_LOAD %0(p0)
- ; CHECK-DAG: {{%[0-9]+}}:_(s1) = G_LOAD %0(p0)
- ; CHECK-DAG: {{%[0-9]+}}:_(p0) = G_LOAD %0(p0)
%0(p0) = COPY $r0
%1(s64) = G_LOAD %0(p0) :: (load 8)
G_STORE %1(s64), %0(p0) :: (store 8)
- %2(s32) = G_LOAD %0(p0) :: (load 4)
- G_STORE %2(s32), %0(p0) :: (store 4)
- %3(s16) = G_LOAD %0(p0) :: (load 2)
- G_STORE %3(s16), %0(p0) :: (store 2)
- %4(s8) = G_LOAD %0(p0) :: (load 1)
- G_STORE %4(s8), %0(p0) :: (store 1)
- %5(s1) = G_LOAD %0(p0) :: (load 1)
- G_STORE %5(s1), %0(p0) :: (store 1)
- %6(p0) = G_LOAD %0(p0) :: (load 4)
- G_STORE %6(p0), %0(p0) :: (store 4)
BX_RET 14, $noreg
...
---
@@ -865,8 +189,8 @@ body: |
BX_RET 14, $noreg, implicit $r0
...
---
-name: test_constants
-# CHECK-LABEL: name: test_constants
+name: test_constants_s64
+# CHECK-LABEL: name: test_constants_s64
legalized: false
# CHECK: legalized: true
regBankSelected: false
@@ -877,55 +201,21 @@ registers:
- { id: 1, class: _ }
- { id: 2, class: _ }
- { id: 3, class: _ }
- - { id: 4, class: _ }
- - { id: 5, class: _ }
- - { id: 6, class: _ }
- - { id: 7, class: _ }
- - { id: 8, class: _ }
body: |
bb.0:
liveins: $r0
- %4(p0) = COPY $r0
-
- %0(s32) = G_CONSTANT 42
- ; CHECK: {{%[0-9]+}}:_(s32) = G_CONSTANT 42
-
- %1(s16) = G_CONSTANT i16 21
- G_STORE %1(s16), %4(p0) :: (store 2)
- ; CHECK-NOT: G_CONSTANT i16
- ; CHECK: [[EXT:%[0-9]+]]:_(s32) = G_CONSTANT i32 21
- ; CHECK: {{%[0-9]+}}:_(s16) = G_TRUNC [[EXT]](s32)
- ; CHECK-NOT: G_CONSTANT i16
-
- %2(s8) = G_CONSTANT i8 10
- G_STORE %2(s8), %4(p0) :: (store 1)
- ; CHECK-NOT: G_CONSTANT i8
- ; CHECK: [[EXT:%[0-9]+]]:_(s32) = G_CONSTANT i32 10
- ; CHECK: {{%[0-9]+}}:_(s8) = G_TRUNC [[EXT]](s32)
- ; CHECK-NOT: G_CONSTANT i8
-
- %3(s1) = G_CONSTANT i1 1
- G_STORE %3(s1), %4(p0) :: (store 1)
- ; CHECK-NOT: G_CONSTANT i1
- ; CHECK: [[EXT:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
- ; CHECK: {{%[0-9]+}}:_(s1) = G_TRUNC [[EXT]](s32)
- ; CHECK-NOT: G_CONSTANT i1
-
- %5(p0) = G_CONSTANT 0
- G_STORE %5(p0), %4(p0) :: (store 4)
- ; CHECK: {{%[0-9]+}}:_(p0) = G_CONSTANT 0
+ %0(p0) = COPY $r0
- %6(s64) = G_CONSTANT i64 17179869200 ; = 4 * 2 ^ 32 + 16
- %7(s32), %8(s32) = G_UNMERGE_VALUES %6(s64)
- G_STORE %7(s32), %4(p0) :: (store 4)
- G_STORE %8(s32), %4(p0) :: (store 4)
+ %1(s64) = G_CONSTANT i64 17179869200 ; = 4 * 2 ^ 32 + 16
+ %2(s32), %3(s32) = G_UNMERGE_VALUES %1(s64)
+ G_STORE %2(s32), %0(p0) :: (store 4)
+ G_STORE %3(s32), %0(p0) :: (store 4)
; CHECK-DAG: {{%[0-9]+}}:_(s32) = G_CONSTANT i32 4
; CHECK-DAG: {{%[0-9]+}}:_(s32) = G_CONSTANT i32 16
; CHECK-NOT: G_CONSTANT i64
- $r0 = COPY %0(s32)
- BX_RET 14, $noreg, implicit $r0
+ BX_RET 14, $noreg
...
---
name: test_icmp_s8
diff --git a/test/CodeGen/ARM/GlobalISel/arm-param-lowering.ll b/test/CodeGen/ARM/GlobalISel/arm-param-lowering.ll
index ff307af1e179..3c3da0edd53e 100644
--- a/test/CodeGen/ARM/GlobalISel/arm-param-lowering.ll
+++ b/test/CodeGen/ARM/GlobalISel/arm-param-lowering.ll
@@ -1,6 +1,7 @@
-; RUN: llc -mtriple arm-unknown -mattr=+vfp2,+v4t -global-isel -stop-after=irtranslator -verify-machineinstrs %s -o - | FileCheck %s -check-prefix=CHECK -check-prefix=LITTLE
-; RUN: llc -mtriple armeb-unknown -mattr=+vfp2,+v4t -global-isel -global-isel-abort=0 -stop-after=irtranslator -verify-machineinstrs %s -o - | FileCheck %s -check-prefix=CHECK -check-prefix=BIG
+; RUN: llc -mtriple arm-unknown -mattr=+vfp2,+v4t -global-isel -stop-after=irtranslator -verify-machineinstrs %s -o - | FileCheck %s -check-prefix=CHECK -check-prefix=ARM -check-prefix=LITTLE
+; RUN: llc -mtriple armeb-unknown -mattr=+vfp2,+v4t -global-isel -global-isel-abort=0 -stop-after=irtranslator -verify-machineinstrs %s -o - | FileCheck %s -check-prefix=CHECK -check-prefix=ARM -check-prefix=BIG
; XFAIL: armeb
+; RUN: llc -mtriple thumb-unknown -mattr=+vfp2,+v6t2 -global-isel -stop-after=irtranslator -verify-machineinstrs %s -o - | FileCheck %s -check-prefix=CHECK -check-prefix=LITTLE -check-prefix=THUMB
declare arm_aapcscc i32* @simple_reg_params_target(i32, i32*)
@@ -11,11 +12,13 @@ define arm_aapcscc i32* @test_call_simple_reg_params(i32 *%a, i32 %b) {
; CHECK: ADJCALLSTACKDOWN 0, 0, 14, $noreg, implicit-def $sp, implicit $sp
; CHECK-DAG: $r0 = COPY [[BVREG]]
; CHECK-DAG: $r1 = COPY [[AVREG]]
-; CHECK: BL @simple_reg_params_target, csr_aapcs, implicit-def $lr, implicit $sp, implicit $r0, implicit $r1, implicit-def $r0
+; ARM: BL @simple_reg_params_target, csr_aapcs, implicit-def $lr, implicit $sp, implicit $r0, implicit $r1, implicit-def $r0
+; THUMB: tBL 14, $noreg, @simple_reg_params_target, csr_aapcs, implicit-def $lr, implicit $sp, implicit $r0, implicit $r1, implicit-def $r0
; CHECK: [[RVREG:%[0-9]+]]:_(p0) = COPY $r0
; CHECK: ADJCALLSTACKUP 0, 0, 14, $noreg, implicit-def $sp, implicit $sp
; CHECK: $r0 = COPY [[RVREG]]
-; CHECK: BX_RET 14, $noreg, implicit $r0
+; ARM: BX_RET 14, $noreg, implicit $r0
+; THUMB: tBX_RET 14, $noreg, implicit $r0
entry:
%r = notail call arm_aapcscc i32 *@simple_reg_params_target(i32 %b, i32 *%a)
ret i32 *%r
@@ -40,11 +43,13 @@ define arm_aapcscc i32* @test_call_simple_stack_params(i32 *%a, i32 %b) {
; CHECK: [[OFF2:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
; CHECK: [[FI2:%[0-9]+]]:_(p0) = G_GEP [[SP2]], [[OFF2]](s32)
; CHECK: G_STORE [[AVREG]](p0), [[FI2]](p0){{.*}}store 4
-; CHECK: BL @simple_stack_params_target, csr_aapcs, implicit-def $lr, implicit $sp, implicit $r0, implicit $r1, implicit $r2, implicit $r3, implicit-def $r0
+; ARM: BL @simple_stack_params_target, csr_aapcs, implicit-def $lr, implicit $sp, implicit $r0, implicit $r1, implicit $r2, implicit $r3, implicit-def $r0
+; THUMB: tBL 14, $noreg, @simple_stack_params_target, csr_aapcs, implicit-def $lr, implicit $sp, implicit $r0, implicit $r1, implicit $r2, implicit $r3, implicit-def $r0
; CHECK: [[RVREG:%[0-9]+]]:_(p0) = COPY $r0
; CHECK: ADJCALLSTACKUP 8, 0, 14, $noreg, implicit-def $sp, implicit $sp
; CHECK: $r0 = COPY [[RVREG]]
-; CHECK: BX_RET 14, $noreg, implicit $r0
+; ARM: BX_RET 14, $noreg, implicit $r0
+; THUMB: tBX_RET 14, $noreg, implicit $r0
entry:
%r = notail call arm_aapcscc i32 *@simple_stack_params_target(i32 %b, i32 *%a, i32 %b, i32 *%a, i32 %b, i32 *%a)
ret i32 *%r
@@ -94,13 +99,15 @@ define arm_aapcscc signext i16 @test_call_ext_params(i8 %a, i16 %b, i1 %c) {
; CHECK: [[FI5:%[0-9]+]]:_(p0) = G_GEP [[SP5]], [[OFF5]](s32)
; CHECK: [[ZEXTC:%[0-9]+]]:_(s32) = G_ZEXT [[CVREG]]
; CHECK: G_STORE [[ZEXTC]](s32), [[FI5]](p0){{.*}}store 4
-; CHECK: BL @ext_target, csr_aapcs, implicit-def $lr, implicit $sp, implicit $r0, implicit $r1, implicit $r2, implicit $r3, implicit-def $r0
+; ARM: BL @ext_target, csr_aapcs, implicit-def $lr, implicit $sp, implicit $r0, implicit $r1, implicit $r2, implicit $r3, implicit-def $r0
+; THUMB: tBL 14, $noreg, @ext_target, csr_aapcs, implicit-def $lr, implicit $sp, implicit $r0, implicit $r1, implicit $r2, implicit $r3, implicit-def $r0
; CHECK: [[R0VREG:%[0-9]+]]:_(s32) = COPY $r0
; CHECK: [[RVREG:%[0-9]+]]:_(s16) = G_TRUNC [[R0VREG]]
; CHECK: ADJCALLSTACKUP 20, 0, 14, $noreg, implicit-def $sp, implicit $sp
; CHECK: [[RExtVREG:%[0-9]+]]:_(s32) = G_SEXT [[RVREG]]
; CHECK: $r0 = COPY [[RExtVREG]]
-; CHECK: BX_RET 14, $noreg, implicit $r0
+; ARM: BX_RET 14, $noreg, implicit $r0
+; THUMB: tBX_RET 14, $noreg, implicit $r0
entry:
%r = notail call arm_aapcscc signext i16 @ext_target(i8 signext %a, i8 zeroext %a, i16 signext %b, i16 zeroext %b, i8 signext %a, i8 zeroext %a, i16 signext %b, i16 zeroext %b, i1 zeroext %c)
ret i16 %r
@@ -115,11 +122,13 @@ define arm_aapcs_vfpcc double @test_call_vfpcc_fp_params(double %a, float %b) {
; CHECK: ADJCALLSTACKDOWN 0, 0, 14, $noreg, implicit-def $sp, implicit $sp
; CHECK-DAG: $s0 = COPY [[BVREG]]
; CHECK-DAG: $d1 = COPY [[AVREG]]
-; CHECK: BL @vfpcc_fp_target, csr_aapcs, implicit-def $lr, implicit $sp, implicit $s0, implicit $d1, implicit-def $d0
+; ARM: BL @vfpcc_fp_target, csr_aapcs, implicit-def $lr, implicit $sp, implicit $s0, implicit $d1, implicit-def $d0
+; THUMB: tBL 14, $noreg, @vfpcc_fp_target, csr_aapcs, implicit-def $lr, implicit $sp, implicit $s0, implicit $d1, implicit-def $d0
; CHECK: [[RVREG:%[0-9]+]]:_(s64) = COPY $d0
; CHECK: ADJCALLSTACKUP 0, 0, 14, $noreg, implicit-def $sp, implicit $sp
; CHECK: $d0 = COPY [[RVREG]]
-; CHECK: BX_RET 14, $noreg, implicit $d0
+; ARM: BX_RET 14, $noreg, implicit $d0
+; THUMB: tBX_RET 14, $noreg, implicit $d0
entry:
%r = notail call arm_aapcs_vfpcc double @vfpcc_fp_target(float %b, double %a)
ret double %r
@@ -149,7 +158,8 @@ define arm_aapcscc double @test_call_aapcs_fp_params(double %a, float %b) {
; CHECK: [[OFF2:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CHECK: [[FI2:%[0-9]+]]:_(p0) = G_GEP [[SP2]], [[OFF2]](s32)
; CHECK: G_STORE [[AVREG]](s64), [[FI2]](p0){{.*}}store 8
-; CHECK: BL @aapcscc_fp_target, csr_aapcs, implicit-def $lr, implicit $sp, implicit $r0, implicit $r2, implicit $r3, implicit-def $r0, implicit-def $r1
+; ARM: BL @aapcscc_fp_target, csr_aapcs, implicit-def $lr, implicit $sp, implicit $r0, implicit $r2, implicit $r3, implicit-def $r0, implicit-def $r1
+; THUMB: tBL 14, $noreg, @aapcscc_fp_target, csr_aapcs, implicit-def $lr, implicit $sp, implicit $r0, implicit $r2, implicit $r3, implicit-def $r0, implicit-def $r1
; CHECK-DAG: [[R1:%[0-9]+]]:_(s32) = COPY $r0
; CHECK-DAG: [[R2:%[0-9]+]]:_(s32) = COPY $r1
; LITTLE: [[RVREG:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[R1]](s32), [[R2]](s32)
@@ -160,7 +170,8 @@ define arm_aapcscc double @test_call_aapcs_fp_params(double %a, float %b) {
; LITTLE-DAG: $r1 = COPY [[R2]]
; BIG-DAG: $r0 = COPY [[R2]]
; BIG-DAG: $r1 = COPY [[R1]]
-; CHECK: BX_RET 14, $noreg, implicit $r0, implicit $r1
+; ARM: BX_RET 14, $noreg, implicit $r0, implicit $r1
+; THUMB: tBX_RET 14, $noreg, implicit $r0, implicit $r1
entry:
%r = notail call arm_aapcscc double @aapcscc_fp_target(float %b, double %a, float %b, double %a)
ret double %r
@@ -173,11 +184,13 @@ define arm_aapcs_vfpcc float @test_call_different_call_conv(float %x) {
; CHECK: [[X:%[0-9]+]]:_(s32) = COPY $s0
; CHECK: ADJCALLSTACKDOWN 0, 0, 14, $noreg, implicit-def $sp, implicit $sp
; CHECK: $r0 = COPY [[X]]
-; CHECK: BL @different_call_conv_target, csr_aapcs, implicit-def $lr, implicit $sp, implicit $r0, implicit-def $r0
+; ARM: BL @different_call_conv_target, csr_aapcs, implicit-def $lr, implicit $sp, implicit $r0, implicit-def $r0
+; THUMB: tBL 14, $noreg, @different_call_conv_target, csr_aapcs, implicit-def $lr, implicit $sp, implicit $r0, implicit-def $r0
; CHECK: [[R:%[0-9]+]]:_(s32) = COPY $r0
; CHECK: ADJCALLSTACKUP 0, 0, 14, $noreg, implicit-def $sp, implicit $sp
; CHECK: $s0 = COPY [[R]]
-; CHECK: BX_RET 14, $noreg, implicit $s0
+; ARM: BX_RET 14, $noreg, implicit $s0
+; THUMB: tBX_RET 14, $noreg, implicit $s0
entry:
%r = notail call arm_aapcscc float @different_call_conv_target(float %x)
ret float %r
@@ -200,7 +213,8 @@ define arm_aapcscc [3 x i32] @test_tiny_int_arrays([2 x i32] %arr) {
; CHECK: [[R0:%[0-9]+]]:_(s32), [[R1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[INS2]](s64)
; CHECK: $r0 = COPY [[R0]]
; CHECK: $r1 = COPY [[R1]]
-; CHECK: BL @tiny_int_arrays_target, csr_aapcs, implicit-def $lr, implicit $sp, implicit $r0, implicit $r1, implicit-def $r0, implicit-def $r1
+; ARM: BL @tiny_int_arrays_target, csr_aapcs, implicit-def $lr, implicit $sp, implicit $r0, implicit $r1, implicit-def $r0, implicit-def $r1
+; THUMB: tBL 14, $noreg, @tiny_int_arrays_target, csr_aapcs, implicit-def $lr, implicit $sp, implicit $r0, implicit $r1, implicit-def $r0, implicit-def $r1
; CHECK: [[R0:%[0-9]+]]:_(s32) = COPY $r0
; CHECK: [[R1:%[0-9]+]]:_(s32) = COPY $r1
; CHECK: [[R2:%[0-9]+]]:_(s32) = COPY $r2
@@ -209,18 +223,14 @@ define arm_aapcscc [3 x i32] @test_tiny_int_arrays([2 x i32] %arr) {
; CHECK: [[EXT3:%[0-9]+]]:_(s32) = G_EXTRACT [[RES_ARR]](s96), 0
; CHECK: [[EXT4:%[0-9]+]]:_(s32) = G_EXTRACT [[RES_ARR]](s96), 32
; CHECK: [[EXT5:%[0-9]+]]:_(s32) = G_EXTRACT [[RES_ARR]](s96), 64
-; CHECK: [[IMPDEF2:%[0-9]+]]:_(s96) = G_IMPLICIT_DEF
-; CHECK: [[INS3:%[0-9]+]]:_(s96) = G_INSERT [[IMPDEF2]], [[EXT3]](s32), 0
-; CHECK: [[INS4:%[0-9]+]]:_(s96) = G_INSERT [[INS3]], [[EXT4]](s32), 32
-; CHECK: [[INS5:%[0-9]+]]:_(s96) = G_INSERT [[INS4]], [[EXT5]](s32), 64
-; CHECK: [[R0:%[0-9]+]]:_(s32), [[R1:%[0-9]+]]:_(s32), [[R2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[INS5]](s96)
; FIXME: This doesn't seem correct with regard to the AAPCS docs (which say
; that composite types larger than 4 bytes should be passed through memory),
; but it's what DAGISel does. We should fix it in the common code for both.
-; CHECK: $r0 = COPY [[R0]]
-; CHECK: $r1 = COPY [[R1]]
-; CHECK: $r2 = COPY [[R2]]
-; CHECK: BX_RET 14, $noreg, implicit $r0, implicit $r1, implicit $r2
+; CHECK: $r0 = COPY [[EXT3]]
+; CHECK: $r1 = COPY [[EXT4]]
+; CHECK: $r2 = COPY [[EXT5]]
+; ARM: BX_RET 14, $noreg, implicit $r0, implicit $r1, implicit $r2
+; THUMB: tBX_RET 14, $noreg, implicit $r0, implicit $r1, implicit $r2
entry:
%r = notail call arm_aapcscc [3 x i32] @tiny_int_arrays_target([2 x i32] %arr)
ret [3 x i32] %r
@@ -254,9 +264,11 @@ define arm_aapcscc void @test_multiple_int_arrays([2 x i32] %arr0, [2 x i32] %ar
; CHECK: $r1 = COPY [[R1]]
; CHECK: $r2 = COPY [[R2]]
; CHECK: $r3 = COPY [[R3]]
-; CHECK: BL @multiple_int_arrays_target, csr_aapcs, implicit-def $lr, implicit $sp, implicit $r0, implicit $r1, implicit $r2, implicit $r3
+; ARM: BL @multiple_int_arrays_target, csr_aapcs, implicit-def $lr, implicit $sp, implicit $r0, implicit $r1, implicit $r2, implicit $r3
+; THUMB: tBL 14, $noreg, @multiple_int_arrays_target, csr_aapcs, implicit-def $lr, implicit $sp, implicit $r0, implicit $r1, implicit $r2, implicit $r3
; CHECK: ADJCALLSTACKUP 0, 0, 14, $noreg, implicit-def $sp, implicit $sp
-; CHECK: BX_RET 14, $noreg
+; ARM: BX_RET 14, $noreg
+; THUMB: tBX_RET 14, $noreg
entry:
notail call arm_aapcscc void @multiple_int_arrays_target([2 x i32] %arr0, [2 x i32] %arr1)
ret void
@@ -298,9 +310,11 @@ define arm_aapcscc void @test_large_int_arrays([20 x i32] %arr) {
; CHECK: [[OFF_LAST_ELEMENT:%[0-9]+]]:_(s32) = G_CONSTANT i32 60
; CHECK: [[LAST_STACK_ARG_ADDR:%[0-9]+]]:_(p0) = G_GEP [[SP]], [[OFF_LAST_ELEMENT]](s32)
; CHECK: G_STORE [[LAST_STACK_ELEMENT]](s32), [[LAST_STACK_ARG_ADDR]]{{.*}}store 4
-; CHECK: BL @large_int_arrays_target, csr_aapcs, implicit-def $lr, implicit $sp, implicit $r0, implicit $r1, implicit $r2, implicit $r3
+; ARM: BL @large_int_arrays_target, csr_aapcs, implicit-def $lr, implicit $sp, implicit $r0, implicit $r1, implicit $r2, implicit $r3
+; THUMB: tBL 14, $noreg, @large_int_arrays_target, csr_aapcs, implicit-def $lr, implicit $sp, implicit $r0, implicit $r1, implicit $r2, implicit $r3
; CHECK: ADJCALLSTACKUP 64, 0, 14, $noreg, implicit-def $sp, implicit $sp
-; CHECK: BX_RET 14, $noreg
+; ARM: BX_RET 14, $noreg
+; THUMB: tBX_RET 14, $noreg
entry:
notail call arm_aapcscc void @large_int_arrays_target([20 x i32] %arr)
ret void
@@ -347,20 +361,18 @@ define arm_aapcscc [2 x float] @test_fp_arrays_aapcs([3 x double] %arr) {
; CHECK: [[ARR2_OFFSET:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; CHECK: [[ARR2_ADDR:%[0-9]+]]:_(p0) = G_GEP [[SP]], [[ARR2_OFFSET]](s32)
; CHECK: G_STORE [[ARR2]](s64), [[ARR2_ADDR]](p0){{.*}}store 8
-; CHECK: BL @fp_arrays_aapcs_target, csr_aapcs, implicit-def $lr, implicit $sp, implicit $r0, implicit $r1, implicit $r2, implicit $r3, implicit-def $r0, implicit-def $r1
+; ARM: BL @fp_arrays_aapcs_target, csr_aapcs, implicit-def $lr, implicit $sp, implicit $r0, implicit $r1, implicit $r2, implicit $r3, implicit-def $r0, implicit-def $r1
+; THUMB: tBL 14, $noreg, @fp_arrays_aapcs_target, csr_aapcs, implicit-def $lr, implicit $sp, implicit $r0, implicit $r1, implicit $r2, implicit $r3, implicit-def $r0, implicit-def $r1
; CHECK: [[R0:%[0-9]+]]:_(s32) = COPY $r0
; CHECK: [[R1:%[0-9]+]]:_(s32) = COPY $r1
; CHECK: [[R_MERGED:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[R0]](s32), [[R1]](s32)
; CHECK: ADJCALLSTACKUP 8, 0, 14, $noreg, implicit-def $sp, implicit $sp
; CHECK: [[EXT4:%[0-9]+]]:_(s32) = G_EXTRACT [[R_MERGED]](s64), 0
; CHECK: [[EXT5:%[0-9]+]]:_(s32) = G_EXTRACT [[R_MERGED]](s64), 32
-; CHECK: [[IMPDEF2:%[0-9]+]]:_(s64) = G_IMPLICIT_DEF
-; CHECK: [[INS4:%[0-9]+]]:_(s64) = G_INSERT [[IMPDEF2]], [[EXT4]](s32), 0
-; CHECK: [[INS5:%[0-9]+]]:_(s64) = G_INSERT [[INS4]], [[EXT5]](s32), 32
-; CHECK: [[R0:%[0-9]+]]:_(s32), [[R1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[INS5]](s64)
-; CHECK: $r0 = COPY [[R0]]
-; CHECK: $r1 = COPY [[R1]]
-; CHECK: BX_RET 14, $noreg, implicit $r0, implicit $r1
+; CHECK: $r0 = COPY [[EXT4]]
+; CHECK: $r1 = COPY [[EXT5]]
+; ARM: BX_RET 14, $noreg, implicit $r0, implicit $r1
+; THUMB: tBX_RET 14, $noreg, implicit $r0, implicit $r1
entry:
%r = notail call arm_aapcscc [2 x float] @fp_arrays_aapcs_target([3 x double] %arr)
ret [2 x float] %r
@@ -442,7 +454,8 @@ define arm_aapcs_vfpcc [4 x float] @test_fp_arrays_aapcs_vfp([3 x double] %x, [3
; CHECK: [[Z3_OFFSET:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
; CHECK: [[Z3_ADDR:%[0-9]+]]:_(p0) = G_GEP [[SP]], [[Z3_OFFSET]](s32)
; CHECK: G_STORE [[Z3]](s64), [[Z3_ADDR]](p0){{.*}}store 8
-; CHECK: BL @fp_arrays_aapcs_vfp_target, csr_aapcs, implicit-def $lr, implicit $sp, implicit $d0, implicit $d1, implicit $d2, implicit $s6, implicit $s7, implicit $s8, implicit-def $s0, implicit-def $s1, implicit-def $s2, implicit-def $s3
+; ARM: BL @fp_arrays_aapcs_vfp_target, csr_aapcs, implicit-def $lr, implicit $sp, implicit $d0, implicit $d1, implicit $d2, implicit $s6, implicit $s7, implicit $s8, implicit-def $s0, implicit-def $s1, implicit-def $s2, implicit-def $s3
+; THUMB: tBL 14, $noreg, @fp_arrays_aapcs_vfp_target, csr_aapcs, implicit-def $lr, implicit $sp, implicit $d0, implicit $d1, implicit $d2, implicit $s6, implicit $s7, implicit $s8, implicit-def $s0, implicit-def $s1, implicit-def $s2, implicit-def $s3
; CHECK: [[R0:%[0-9]+]]:_(s32) = COPY $s0
; CHECK: [[R1:%[0-9]+]]:_(s32) = COPY $s1
; CHECK: [[R2:%[0-9]+]]:_(s32) = COPY $s2
@@ -453,17 +466,12 @@ define arm_aapcs_vfpcc [4 x float] @test_fp_arrays_aapcs_vfp([3 x double] %x, [3
; CHECK: [[EXT12:%[0-9]+]]:_(s32) = G_EXTRACT [[R_MERGED]](s128), 32
; CHECK: [[EXT13:%[0-9]+]]:_(s32) = G_EXTRACT [[R_MERGED]](s128), 64
; CHECK: [[EXT14:%[0-9]+]]:_(s32) = G_EXTRACT [[R_MERGED]](s128), 96
-; CHECK: [[IMPDEF4:%[0-9]+]]:_(s128) = G_IMPLICIT_DEF
-; CHECK: [[INS11:%[0-9]+]]:_(s128) = G_INSERT [[IMPDEF4]], [[EXT11]](s32), 0
-; CHECK: [[INS12:%[0-9]+]]:_(s128) = G_INSERT [[INS11]], [[EXT12]](s32), 32
-; CHECK: [[INS13:%[0-9]+]]:_(s128) = G_INSERT [[INS12]], [[EXT13]](s32), 64
-; CHECK: [[INS14:%[0-9]+]]:_(s128) = G_INSERT [[INS13]], [[EXT14]](s32), 96
-; CHECK: [[R0:%[0-9]+]]:_(s32), [[R1:%[0-9]+]]:_(s32), [[R2:%[0-9]+]]:_(s32), [[R3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[INS14]](s128)
-; CHECK: $s0 = COPY [[R0]]
-; CHECK: $s1 = COPY [[R1]]
-; CHECK: $s2 = COPY [[R2]]
-; CHECK: $s3 = COPY [[R3]]
-; CHECK: BX_RET 14, $noreg, implicit $s0, implicit $s1, implicit $s2, implicit $s3
+; CHECK: $s0 = COPY [[EXT11]]
+; CHECK: $s1 = COPY [[EXT12]]
+; CHECK: $s2 = COPY [[EXT13]]
+; CHECK: $s3 = COPY [[EXT14]]
+; ARM: BX_RET 14, $noreg, implicit $s0, implicit $s1, implicit $s2, implicit $s3
+; THUMB: tBX_RET 14, $noreg, implicit $s0, implicit $s1, implicit $s2, implicit $s3
entry:
%r = notail call arm_aapcs_vfpcc [4 x float] @fp_arrays_aapcs_vfp_target([3 x double] %x, [3 x float] %y, [4 x double] %z)
ret [4 x float] %r
@@ -505,20 +513,18 @@ define arm_aapcscc [2 x i32*] @test_tough_arrays([6 x [4 x i32]] %arr) {
; CHECK: [[OFF_LAST_ELEMENT:%[0-9]+]]:_(s32) = G_CONSTANT i32 76
; CHECK: [[LAST_STACK_ARG_ADDR:%[0-9]+]]:_(p0) = G_GEP [[SP]], [[OFF_LAST_ELEMENT]](s32)
; CHECK: G_STORE [[LAST_STACK_ELEMENT]](s32), [[LAST_STACK_ARG_ADDR]]{{.*}}store 4
-; CHECK: BL @tough_arrays_target, csr_aapcs, implicit-def $lr, implicit $sp, implicit $r0, implicit $r1, implicit $r2, implicit $r3, implicit-def $r0, implicit-def $r1
+; ARM: BL @tough_arrays_target, csr_aapcs, implicit-def $lr, implicit $sp, implicit $r0, implicit $r1, implicit $r2, implicit $r3, implicit-def $r0, implicit-def $r1
+; THUMB: tBL 14, $noreg, @tough_arrays_target, csr_aapcs, implicit-def $lr, implicit $sp, implicit $r0, implicit $r1, implicit $r2, implicit $r3, implicit-def $r0, implicit-def $r1
; CHECK: [[R0:%[0-9]+]]:_(s32) = COPY $r0
; CHECK: [[R1:%[0-9]+]]:_(s32) = COPY $r1
; CHECK: [[RES_ARR:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[R0]](s32), [[R1]](s32)
; CHECK: ADJCALLSTACKUP 80, 0, 14, $noreg, implicit-def $sp, implicit $sp
; CHECK: [[EXT1:%[0-9]+]]:_(p0) = G_EXTRACT [[RES_ARR]](s64), 0
; CHECK: [[EXT2:%[0-9]+]]:_(p0) = G_EXTRACT [[RES_ARR]](s64), 32
-; CHECK: [[IMPDEF:%[0-9]+]]:_(s64) = G_IMPLICIT_DEF
-; CHECK: [[INS2:%[0-9]+]]:_(s64) = G_INSERT [[IMPDEF]], [[EXT1]](p0), 0
-; CHECK: [[INS3:%[0-9]+]]:_(s64) = G_INSERT [[INS2]], [[EXT2]](p0), 32
-; CHECK: [[R0:%[0-9]+]]:_(s32), [[R1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[INS3]](s64)
-; CHECK: $r0 = COPY [[R0]]
-; CHECK: $r1 = COPY [[R1]]
-; CHECK: BX_RET 14, $noreg, implicit $r0, implicit $r1
+; CHECK: $r0 = COPY [[EXT1]]
+; CHECK: $r1 = COPY [[EXT2]]
+; ARM: BX_RET 14, $noreg, implicit $r0, implicit $r1
+; THUMB: tBX_RET 14, $noreg, implicit $r0, implicit $r1
entry:
%r = notail call arm_aapcscc [2 x i32*] @tough_arrays_target([6 x [4 x i32]] %arr)
ret [2 x i32*] %r
@@ -541,20 +547,18 @@ define arm_aapcscc {i32, i32} @test_structs({i32, i32} %x) {
; CHECK: [[X0:%[0-9]+]]:_(s32), [[X1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[INS2]](s64)
; CHECK-DAG: $r0 = COPY [[X0]](s32)
; CHECK-DAG: $r1 = COPY [[X1]](s32)
-; CHECK: BL @structs_target, csr_aapcs, implicit-def $lr, implicit $sp, implicit $r0, implicit $r1, implicit-def $r0, implicit-def $r1
+; ARM: BL @structs_target, csr_aapcs, implicit-def $lr, implicit $sp, implicit $r0, implicit $r1, implicit-def $r0, implicit-def $r1
+; THUMB: tBL 14, $noreg, @structs_target, csr_aapcs, implicit-def $lr, implicit $sp, implicit $r0, implicit $r1, implicit-def $r0, implicit-def $r1
; CHECK: [[R0:%[0-9]+]]:_(s32) = COPY $r0
; CHECK: [[R1:%[0-9]+]]:_(s32) = COPY $r1
; CHECK: [[R:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[R0]](s32), [[R1]](s32)
; CHECK: ADJCALLSTACKUP 0, 0, 14, $noreg, implicit-def $sp, implicit $sp
; CHECK: [[EXT3:%[0-9]+]]:_(s32) = G_EXTRACT [[R]](s64), 0
; CHECK: [[EXT4:%[0-9]+]]:_(s32) = G_EXTRACT [[R]](s64), 32
-; CHECK: [[IMPDEF2:%[0-9]+]]:_(s64) = G_IMPLICIT_DEF
-; CHECK: [[INS3:%[0-9]+]]:_(s64) = G_INSERT [[IMPDEF2]], [[EXT3]](s32), 0
-; CHECK: [[INS4:%[0-9]+]]:_(s64) = G_INSERT [[INS3]], [[EXT4]](s32), 32
-; CHECK: [[R0:%[0-9]+]]:_(s32), [[R1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[INS4]](s64)
-; CHECK: $r0 = COPY [[R0]](s32)
-; CHECK: $r1 = COPY [[R1]](s32)
-; CHECK: BX_RET 14, $noreg, implicit $r0, implicit $r1
+; CHECK: $r0 = COPY [[EXT3]](s32)
+; CHECK: $r1 = COPY [[EXT4]](s32)
+; ARM: BX_RET 14, $noreg, implicit $r0, implicit $r1
+; THUMB: tBX_RET 14, $noreg, implicit $r0, implicit $r1
%r = notail call arm_aapcscc {i32, i32} @structs_target({i32, i32} %x)
ret {i32, i32} %r
}
diff --git a/test/CodeGen/ARM/GlobalISel/arm-regbankselect.mir b/test/CodeGen/ARM/GlobalISel/arm-regbankselect.mir
index 4634e5a0d9df..281218192a18 100644
--- a/test/CodeGen/ARM/GlobalISel/arm-regbankselect.mir
+++ b/test/CodeGen/ARM/GlobalISel/arm-regbankselect.mir
@@ -27,6 +27,8 @@
define void @test_inttoptr_s32() { ret void }
define void @test_ptrtoint_s32() { ret void }
+ define void @test_ctlz_s32() #3 { ret void }
+
@a_global = global float 1.0
define void @test_globals() { ret void }
@@ -83,6 +85,7 @@
attributes #0 = { "target-features"="+vfp2"}
attributes #1 = { "target-features"="+hwdiv-arm" }
attributes #2 = { "target-features"="+vfp4"}
+ attributes #3 = { "target-features"="+v5t"}
...
---
name: test_add_s32
@@ -561,6 +564,25 @@ body: |
BX_RET 14, $noreg, implicit $r0
...
---
+name: test_ctlz_s32
+# CHECK-LABEL: name: test_ctlz_s32
+legalized: true
+regBankSelected: false
+selected: false
+# CHECK: registers:
+# CHECK: - { id: 0, class: gprb, preferred-register: '' }
+# CHECK: - { id: 1, class: gprb, preferred-register: '' }
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+body: |
+ bb.0:
+ %0(s32) = COPY $r0
+ %1(s32) = G_CTLZ %0(s32)
+ $r0 = COPY %1(s32)
+ BX_RET 14, $noreg, implicit $r0
+...
+---
name: test_globals
# CHECK-LABEL: name: test_globals
legalized: true
diff --git a/test/CodeGen/ARM/GlobalISel/arm-select-globals-ropi-rwpi.mir b/test/CodeGen/ARM/GlobalISel/arm-select-globals-ropi-rwpi.mir
index fa15f09497e5..dc017cc2654e 100644
--- a/test/CodeGen/ARM/GlobalISel/arm-select-globals-ropi-rwpi.mir
+++ b/test/CodeGen/ARM/GlobalISel/arm-select-globals-ropi-rwpi.mir
@@ -1,9 +1,9 @@
-# RUN: llc -O0 -mtriple arm-linux -relocation-model=ropi -mattr=-no-movt,+v8m -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=CHECK,RW-DEFAULT-MOVT,RW-DEFAULT,ROPI-MOVT,ROPI
-# RUN: llc -O0 -mtriple arm-linux -relocation-model=ropi -mattr=+no-movt -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=CHECK,RW-DEFAULT-NOMOVT,RW-DEFAULT,ROPI-NOMOVT,ROPI
-# RUN: llc -O0 -mtriple arm-linux -relocation-model=rwpi -mattr=-no-movt,+v8m -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=CHECK,RWPI-MOVT,RWPI,RO-DEFAULT-MOVT,RO-DEFAULT
-# RUN: llc -O0 -mtriple arm-linux -relocation-model=rwpi -mattr=+no-movt -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=CHECK,RWPI-NOMOVT,RWPI,RO-DEFAULT-NOMOVT,RO-DEFAULT
-# RUN: llc -O0 -mtriple arm-linux -relocation-model=ropi-rwpi -mattr=-no-movt,+v8m -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=CHECK,RWPI-MOVT,RWPI,ROPI-MOVT,ROPI
-# RUN: llc -O0 -mtriple arm-linux -relocation-model=ropi-rwpi -mattr=+no-movt -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=CHECK,RWPI-NOMOVT,RWPI,ROPI-NOMOVT,ROPI
+# RUN: llc -O0 -mtriple arm-linux -relocation-model=ropi -mattr=-no-movt,+v8m -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=CHECK,RW-DEFAULT-MOVT,ROPI-MOVT
+# RUN: llc -O0 -mtriple arm-linux -relocation-model=ropi -mattr=+no-movt -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=CHECK,RW-DEFAULT-NOMOVT,ROPI-NOMOVT
+# RUN: llc -O0 -mtriple arm-linux -relocation-model=rwpi -mattr=-no-movt,+v8m -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=CHECK,RWPI-MOVT,RWPI,RO-DEFAULT-MOVT
+# RUN: llc -O0 -mtriple arm-linux -relocation-model=rwpi -mattr=+no-movt -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=CHECK,RWPI-NOMOVT,RWPI,RO-DEFAULT-NOMOVT
+# RUN: llc -O0 -mtriple arm-linux -relocation-model=ropi-rwpi -mattr=-no-movt,+v8m -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=CHECK,RWPI-MOVT,RWPI,ROPI-MOVT
+# RUN: llc -O0 -mtriple arm-linux -relocation-model=ropi-rwpi -mattr=+no-movt -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=CHECK,RWPI-NOMOVT,RWPI,ROPI-NOMOVT
--- |
@internal_global = internal global i32 42
define void @test_internal_global() { ret void }
diff --git a/test/CodeGen/ARM/GlobalISel/thumb-select-arithmetic-ops.mir b/test/CodeGen/ARM/GlobalISel/thumb-select-arithmetic-ops.mir
new file mode 100644
index 000000000000..cf42a815cd4b
--- /dev/null
+++ b/test/CodeGen/ARM/GlobalISel/thumb-select-arithmetic-ops.mir
@@ -0,0 +1,251 @@
+# RUN: llc -O0 -mtriple thumb-- -mattr=+v6t2 -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s
+--- |
+ define void @test_add_regs() { ret void }
+ define void @test_add_fold_imm() { ret void }
+ define void @test_add_fold_imm12() { ret void }
+ define void @test_add_no_fold_imm() { ret void }
+
+ define void @test_sub_imm_lhs() { ret void }
+ define void @test_sub_imm_rhs() { ret void }
+
+ define void @test_mul() { ret void }
+ define void @test_mla() { ret void }
+...
+---
+name: test_add_regs
+# CHECK-LABEL: name: test_add_regs
+legalized: true
+regBankSelected: true
+selected: false
+# CHECK: selected: true
+registers:
+ - { id: 0, class: gprb }
+ - { id: 1, class: gprb }
+ - { id: 2, class: gprb }
+body: |
+ bb.0:
+ liveins: $r0, $r1
+
+ %0(s32) = COPY $r0
+ ; CHECK: [[VREGX:%[0-9]+]]:gprnopc = COPY $r0
+
+ %1(s32) = COPY $r1
+ ; CHECK: [[VREGY:%[0-9]+]]:rgpr = COPY $r1
+
+ %2(s32) = G_ADD %0, %1
+ ; CHECK: [[VREGRES:%[0-9]+]]:gprnopc = t2ADDrr [[VREGX]], [[VREGY]], 14, $noreg, $noreg
+
+ $r0 = COPY %2(s32)
+ ; CHECK: $r0 = COPY [[VREGRES]]
+
+ BX_RET 14, $noreg, implicit $r0
+ ; CHECK: BX_RET 14, $noreg, implicit $r0
+...
+---
+name: test_add_fold_imm
+# CHECK-LABEL: name: test_add_fold_imm
+legalized: true
+regBankSelected: true
+selected: false
+# CHECK: selected: true
+registers:
+ - { id: 0, class: gprb }
+ - { id: 1, class: gprb }
+ - { id: 2, class: gprb }
+body: |
+ bb.0:
+ liveins: $r0
+
+ %0(s32) = COPY $r0
+ ; CHECK: [[VREGX:%[0-9]+]]:gprnopc = COPY $r0
+
+ %1(s32) = G_CONSTANT i32 786444 ; 0x000c000c
+ %2(s32) = G_ADD %0, %1
+ ; CHECK: [[VREGRES:%[0-9]+]]:gprnopc = t2ADDri [[VREGX]], 786444, 14, $noreg, $noreg
+
+ $r0 = COPY %2(s32)
+ ; CHECK: $r0 = COPY [[VREGRES]]
+
+ BX_RET 14, $noreg, implicit $r0
+ ; CHECK: BX_RET 14, $noreg, implicit $r0
+...
+---
+name: test_add_fold_imm12
+# CHECK-LABEL: name: test_add_fold_imm12
+legalized: true
+regBankSelected: true
+selected: false
+# CHECK: selected: true
+registers:
+ - { id: 0, class: gprb }
+ - { id: 1, class: gprb }
+ - { id: 2, class: gprb }
+body: |
+ bb.0:
+ liveins: $r0
+
+ %0(s32) = COPY $r0
+ ; CHECK: [[VREGX:%[0-9]+]]:gpr = COPY $r0
+
+ %1(s32) = G_CONSTANT i32 4093
+ %2(s32) = G_ADD %0, %1
+ ; CHECK: [[VREGRES:%[0-9]+]]:gprnopc = t2ADDri12 [[VREGX]], 4093, 14, $noreg
+
+ $r0 = COPY %2(s32)
+ ; CHECK: $r0 = COPY [[VREGRES]]
+
+ BX_RET 14, $noreg, implicit $r0
+ ; CHECK: BX_RET 14, $noreg, implicit $r0
+...
+---
+name: test_add_no_fold_imm
+# CHECK-LABEL: name: test_add_no_fold_imm
+legalized: true
+regBankSelected: true
+selected: false
+# CHECK: selected: true
+registers:
+ - { id: 0, class: gprb }
+ - { id: 1, class: gprb }
+ - { id: 2, class: gprb }
+body: |
+ bb.0:
+ liveins: $r0
+
+ %0(s32) = COPY $r0
+ ; CHECK: [[VREGX:%[0-9]+]]:gprnopc = COPY $r0
+
+ %1(s32) = G_CONSTANT i32 185470479 ; 0x0b0e0e0f
+ ; CHECK: [[VREGY:%[0-9]+]]:rgpr = t2MOVi32imm 185470479
+
+ %2(s32) = G_ADD %0, %1
+ ; CHECK: [[VREGRES:%[0-9]+]]:gprnopc = t2ADDrr [[VREGX]], [[VREGY]], 14, $noreg, $noreg
+
+ $r0 = COPY %2(s32)
+ ; CHECK: $r0 = COPY [[VREGRES]]
+
+ BX_RET 14, $noreg, implicit $r0
+ ; CHECK: BX_RET 14, $noreg, implicit $r0
+...
+---
+name: test_sub_imm_lhs
+# CHECK-LABEL: name: test_sub_imm_lhs
+legalized: true
+regBankSelected: true
+selected: false
+# CHECK: selected: true
+registers:
+ - { id: 0, class: gprb }
+ - { id: 1, class: gprb }
+ - { id: 2, class: gprb }
+body: |
+ bb.0:
+ liveins: $r0
+
+ %0(s32) = COPY $r0
+ ; CHECK: [[VREGX:%[0-9]+]]:rgpr = COPY $r0
+
+ %1(s32) = G_CONSTANT i32 786444 ; 0x000c000c
+ %2(s32) = G_SUB %1, %0
+ ; CHECK: [[VREGRES:%[0-9]+]]:rgpr = t2RSBri [[VREGX]], 786444, 14, $noreg, $noreg
+
+ $r0 = COPY %2(s32)
+ ; CHECK: $r0 = COPY [[VREGRES]]
+
+ BX_RET 14, $noreg, implicit $r0
+ ; CHECK: BX_RET 14, $noreg, implicit $r0
+...
+---
+name: test_sub_imm_rhs
+# CHECK-LABEL: name: test_sub_imm_rhs
+legalized: true
+regBankSelected: true
+selected: false
+# CHECK: selected: true
+registers:
+ - { id: 0, class: gprb }
+ - { id: 1, class: gprb }
+ - { id: 2, class: gprb }
+body: |
+ bb.0:
+ liveins: $r0
+
+ %0(s32) = COPY $r0
+ ; CHECK: [[VREGX:%[0-9]+]]:gprnopc = COPY $r0
+
+ %1(s32) = G_CONSTANT i32 786444 ; 0x000c000c
+ %2(s32) = G_SUB %0, %1
+ ; CHECK: [[VREGRES:%[0-9]+]]:gprnopc = t2SUBri [[VREGX]], 786444, 14, $noreg, $noreg
+
+ $r0 = COPY %2(s32)
+ ; CHECK: $r0 = COPY [[VREGRES]]
+
+ BX_RET 14, $noreg, implicit $r0
+ ; CHECK: BX_RET 14, $noreg, implicit $r0
+...
+---
+name: test_mul
+# CHECK-LABEL: name: test_mul
+legalized: true
+regBankSelected: true
+selected: false
+# CHECK: selected: true
+registers:
+ - { id: 0, class: gprb }
+ - { id: 1, class: gprb }
+ - { id: 2, class: gprb }
+body: |
+ bb.0:
+ liveins: $r0, $r1
+
+ %0(s32) = COPY $r0
+ ; CHECK: [[VREGX:%[0-9]+]]:rgpr = COPY $r0
+
+ %1(s32) = COPY $r1
+ ; CHECK: [[VREGY:%[0-9]+]]:rgpr = COPY $r1
+
+ %2(s32) = G_MUL %0, %1
+ ; CHECK: [[VREGRES:%[0-9]+]]:rgpr = t2MUL [[VREGX]], [[VREGY]], 14, $noreg
+
+ $r0 = COPY %2(s32)
+ ; CHECK: $r0 = COPY [[VREGRES]]
+
+ BX_RET 14, $noreg, implicit $r0
+ ; CHECK: BX_RET 14, $noreg, implicit $r0
+...
+---
+name: test_mla
+# CHECK-LABEL: name: test_mla
+legalized: true
+regBankSelected: true
+selected: false
+# CHECK: selected: true
+registers:
+ - { id: 0, class: gprb }
+ - { id: 1, class: gprb }
+ - { id: 2, class: gprb }
+ - { id: 3, class: gprb }
+ - { id: 4, class: gprb }
+body: |
+ bb.0:
+ liveins: $r0, $r1, $r2
+
+ %0(s32) = COPY $r0
+ ; CHECK: [[VREGX:%[0-9]+]]:rgpr = COPY $r0
+
+ %1(s32) = COPY $r1
+ ; CHECK: [[VREGY:%[0-9]+]]:rgpr = COPY $r1
+
+ %2(s32) = COPY $r2
+ ; CHECK: [[VREGZ:%[0-9]+]]:rgpr = COPY $r2
+
+ %3(s32) = G_MUL %0, %1
+ %4(s32) = G_ADD %3, %2
+ ; CHECK: [[VREGRES:%[0-9]+]]:rgpr = t2MLA [[VREGX]], [[VREGY]], [[VREGZ]], 14, $noreg
+
+ $r0 = COPY %4(s32)
+ ; CHECK: $r0 = COPY [[VREGRES]]
+
+ BX_RET 14, $noreg, implicit $r0
+ ; CHECK: BX_RET 14, $noreg, implicit $r0
+...
diff --git a/test/CodeGen/ARM/GlobalISel/thumb-select-casts.mir b/test/CodeGen/ARM/GlobalISel/thumb-select-casts.mir
new file mode 100644
index 000000000000..df5417e7a917
--- /dev/null
+++ b/test/CodeGen/ARM/GlobalISel/thumb-select-casts.mir
@@ -0,0 +1,51 @@
+# RUN: llc -O0 -mtriple thumb-- -mattr=+v6t2 -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s
+--- |
+ define void @test_inttoptr_s32() { ret void }
+ define void @test_ptrtoint_s32() { ret void }
+...
+---
+name: test_inttoptr_s32
+# CHECK-LABEL: name: test_inttoptr_s32
+legalized: true
+regBankSelected: true
+selected: false
+# CHECK: selected: true
+registers:
+ - { id: 0, class: gprb }
+ - { id: 1, class: gprb }
+body: |
+ bb.0:
+ liveins: $r0
+
+ %0(s32) = COPY $r0
+ %1(p0) = G_INTTOPTR %0(s32)
+ ; CHECK: [[INT:%[0-9]+]]:gpr = COPY $r0
+
+ $r0 = COPY %1(p0)
+ ; CHECK: $r0 = COPY [[INT]]
+
+ BX_RET 14, $noreg, implicit $r0
+...
+---
+name: test_ptrtoint_s32
+# CHECK-LABEL: name: test_ptrtoint_s32
+legalized: true
+regBankSelected: true
+selected: false
+# CHECK: selected: true
+registers:
+ - { id: 0, class: gprb }
+ - { id: 1, class: gprb }
+body: |
+ bb.0:
+ liveins: $r0
+
+ %0(p0) = COPY $r0
+ %1(s32) = G_PTRTOINT %0(p0)
+ ; CHECK: [[PTR:%[0-9]+]]:gpr = COPY $r0
+
+ $r0 = COPY %1(s32)
+ ; CHECK: $r0 = COPY [[PTR]]
+
+ BX_RET 14, $noreg, implicit $r0
+...
diff --git a/test/CodeGen/ARM/GlobalISel/thumb-select-exts.mir b/test/CodeGen/ARM/GlobalISel/thumb-select-exts.mir
new file mode 100644
index 000000000000..365a614d6ec3
--- /dev/null
+++ b/test/CodeGen/ARM/GlobalISel/thumb-select-exts.mir
@@ -0,0 +1,288 @@
+# RUN: llc -O0 -mtriple thumb-- -mattr=+v6t2 -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s
+--- |
+ define void @test_trunc_and_zext_s1() { ret void }
+ define void @test_trunc_and_sext_s1() { ret void }
+ define void @test_trunc_and_anyext_s1() { ret void }
+
+ define void @test_trunc_and_zext_s8() { ret void }
+ define void @test_trunc_and_sext_s8() { ret void }
+ define void @test_trunc_and_anyext_s8() { ret void }
+
+ define void @test_trunc_and_zext_s16() { ret void }
+ define void @test_trunc_and_sext_s16() { ret void }
+ define void @test_trunc_and_anyext_s16() { ret void }
+...
+---
+name: test_trunc_and_zext_s1
+# CHECK-LABEL: name: test_trunc_and_zext_s1
+legalized: true
+regBankSelected: true
+selected: false
+# CHECK: selected: true
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: gprb }
+ - { id: 1, class: gprb }
+ - { id: 2, class: gprb }
+body: |
+ bb.0:
+ liveins: $r0
+
+ %0(s32) = COPY $r0
+ ; CHECK: [[VREG:%[0-9]+]]:gpr = COPY $r0
+
+ %1(s1) = G_TRUNC %0(s32)
+
+ %2(s32) = G_ZEXT %1(s1)
+ ; CHECK: [[RVREG:%[0-9]+]]:rgpr = COPY [[VREG]]
+ ; CHECK: [[VREGEXT:%[0-9]+]]:rgpr = t2ANDri [[RVREG]], 1, 14, $noreg, $noreg
+
+ $r0 = COPY %2(s32)
+ ; CHECK: $r0 = COPY [[VREGEXT]]
+
+ BX_RET 14, $noreg, implicit $r0
+ ; CHECK: BX_RET 14, $noreg, implicit $r0
+...
+---
+name: test_trunc_and_sext_s1
+# CHECK-LABEL: name: test_trunc_and_sext_s1
+legalized: true
+regBankSelected: true
+selected: false
+# CHECK: selected: true
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: gprb }
+ - { id: 1, class: gprb }
+ - { id: 2, class: gprb }
+body: |
+ bb.0:
+ liveins: $r0
+
+ %0(s32) = COPY $r0
+ ; CHECK: [[VREG:%[0-9]+]]:gpr = COPY $r0
+
+ %1(s1) = G_TRUNC %0(s32)
+
+ %2(s32) = G_SEXT %1(s1)
+ ; CHECK: [[RVREG:%[0-9]+]]:rgpr = COPY [[VREG]]
+ ; CHECK: [[VREGAND:%[0-9]+]]:rgpr = t2ANDri [[RVREG]], 1, 14, $noreg, $noreg
+ ; CHECK: [[VREGEXT:%[0-9]+]]:rgpr = t2RSBri [[VREGAND]], 0, 14, $noreg, $noreg
+
+ $r0 = COPY %2(s32)
+ ; CHECK: $r0 = COPY [[VREGEXT]]
+
+ BX_RET 14, $noreg, implicit $r0
+ ; CHECK: BX_RET 14, $noreg, implicit $r0
+...
+---
+name: test_trunc_and_anyext_s1
+# CHECK-LABEL: name: test_trunc_and_anyext_s1
+legalized: true
+regBankSelected: true
+selected: false
+# CHECK: selected: true
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: gprb }
+ - { id: 1, class: gprb }
+ - { id: 2, class: gprb }
+body: |
+ bb.0:
+ liveins: $r0
+
+ %0(s32) = COPY $r0
+ ; CHECK: [[VREG:%[0-9]+]]:gpr = COPY $r0
+
+ %1(s1) = G_TRUNC %0(s32)
+
+ %2(s32) = G_ANYEXT %1(s1)
+
+ $r0 = COPY %2(s32)
+ ; CHECK: $r0 = COPY [[VREG]]
+
+ BX_RET 14, $noreg, implicit $r0
+ ; CHECK: BX_RET 14, $noreg, implicit $r0
+...
+---
+name: test_trunc_and_zext_s8
+# CHECK-LABEL: name: test_trunc_and_zext_s8
+legalized: true
+regBankSelected: true
+selected: false
+# CHECK: selected: true
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: gprb }
+ - { id: 1, class: gprb }
+ - { id: 2, class: gprb }
+body: |
+ bb.0:
+ liveins: $r0
+
+ %0(s32) = COPY $r0
+ ; CHECK: [[VREG:%[0-9]+]]:gpr = COPY $r0
+
+ %1(s8) = G_TRUNC %0(s32)
+ ; CHECK: [[VREGTRUNC:%[0-9]+]]:rgpr = COPY [[VREG]]
+
+ %2(s32) = G_ZEXT %1(s8)
+ ; CHECK: [[VREGEXT:%[0-9]+]]:rgpr = t2UXTB [[VREGTRUNC]], 0, 14, $noreg
+
+ $r0 = COPY %2(s32)
+ ; CHECK: $r0 = COPY [[VREGEXT]]
+
+ BX_RET 14, $noreg, implicit $r0
+ ; CHECK: BX_RET 14, $noreg, implicit $r0
+...
+---
+name: test_trunc_and_sext_s8
+# CHECK-LABEL: name: test_trunc_and_sext_s8
+legalized: true
+regBankSelected: true
+selected: false
+# CHECK: selected: true
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: gprb }
+ - { id: 1, class: gprb }
+ - { id: 2, class: gprb }
+body: |
+ bb.0:
+ liveins: $r0
+
+ %0(s32) = COPY $r0
+ ; CHECK: [[VREG:%[0-9]+]]:gpr = COPY $r0
+
+ %1(s8) = G_TRUNC %0(s32)
+ ; CHECK: [[VREGTRUNC:%[0-9]+]]:rgpr = COPY [[VREG]]
+
+ %2(s32) = G_SEXT %1(s8)
+ ; CHECK: [[VREGEXT:%[0-9]+]]:rgpr = t2SXTB [[VREGTRUNC]], 0, 14, $noreg
+
+ $r0 = COPY %2(s32)
+ ; CHECK: $r0 = COPY [[VREGEXT]]
+
+ BX_RET 14, $noreg, implicit $r0
+ ; CHECK: BX_RET 14, $noreg, implicit $r0
+...
+---
+name: test_trunc_and_anyext_s8
+# CHECK-LABEL: name: test_trunc_and_anyext_s8
+legalized: true
+regBankSelected: true
+selected: false
+# CHECK: selected: true
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: gprb }
+ - { id: 1, class: gprb }
+ - { id: 2, class: gprb }
+body: |
+ bb.0:
+ liveins: $r0
+
+ %0(s32) = COPY $r0
+ ; CHECK: [[VREG:%[0-9]+]]:gpr = COPY $r0
+
+ %1(s8) = G_TRUNC %0(s32)
+
+ %2(s32) = G_ANYEXT %1(s8)
+
+ $r0 = COPY %2(s32)
+ ; CHECK: $r0 = COPY [[VREG]]
+
+ BX_RET 14, $noreg, implicit $r0
+ ; CHECK: BX_RET 14, $noreg, implicit $r0
+...
+---
+name: test_trunc_and_zext_s16
+# CHECK-LABEL: name: test_trunc_and_zext_s16
+legalized: true
+regBankSelected: true
+selected: false
+# CHECK: selected: true
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: gprb }
+ - { id: 1, class: gprb }
+ - { id: 2, class: gprb }
+body: |
+ bb.0:
+ liveins: $r0
+
+ %0(s32) = COPY $r0
+ ; CHECK: [[VREG:%[0-9]+]]:gpr = COPY $r0
+
+ %1(s16) = G_TRUNC %0(s32)
+ ; CHECK: [[VREGTRUNC:%[0-9]+]]:rgpr = COPY [[VREG]]
+
+ %2(s32) = G_ZEXT %1(s16)
+ ; CHECK: [[VREGEXT:%[0-9]+]]:rgpr = t2UXTH [[VREGTRUNC]], 0, 14, $noreg
+
+ $r0 = COPY %2(s32)
+ ; CHECK: $r0 = COPY [[VREGEXT]]
+
+ BX_RET 14, $noreg, implicit $r0
+ ; CHECK: BX_RET 14, $noreg, implicit $r0
+...
+---
+name: test_trunc_and_sext_s16
+# CHECK-LABEL: name: test_trunc_and_sext_s16
+legalized: true
+regBankSelected: true
+selected: false
+# CHECK: selected: true
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: gprb }
+ - { id: 1, class: gprb }
+ - { id: 2, class: gprb }
+body: |
+ bb.0:
+ liveins: $r0
+
+ %0(s32) = COPY $r0
+ ; CHECK: [[VREG:%[0-9]+]]:gpr = COPY $r0
+
+ %1(s16) = G_TRUNC %0(s32)
+ ; CHECK: [[VREGTRUNC:%[0-9]+]]:rgpr = COPY [[VREG]]
+
+ %2(s32) = G_SEXT %1(s16)
+ ; CHECK: [[VREGEXT:%[0-9]+]]:rgpr = t2SXTH [[VREGTRUNC]], 0, 14, $noreg
+
+ $r0 = COPY %2(s32)
+ ; CHECK: $r0 = COPY [[VREGEXT]]
+
+ BX_RET 14, $noreg, implicit $r0
+ ; CHECK: BX_RET 14, $noreg, implicit $r0
+...
+---
+name: test_trunc_and_anyext_s16
+# CHECK-LABEL: name: test_trunc_and_anyext_s16
+legalized: true
+regBankSelected: true
+selected: false
+# CHECK: selected: true
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: gprb }
+ - { id: 1, class: gprb }
+ - { id: 2, class: gprb }
+body: |
+ bb.0:
+ liveins: $r0
+
+ %0(s32) = COPY $r0
+ ; CHECK: [[VREG:%[0-9]+]]:gpr = COPY $r0
+
+ %1(s16) = G_TRUNC %0(s32)
+
+ %2(s32) = G_ANYEXT %1(s16)
+
+ $r0 = COPY %2(s32)
+ ; CHECK: $r0 = COPY [[VREG]]
+
+ BX_RET 14, $noreg, implicit $r0
+ ; CHECK: BX_RET 14, $noreg, implicit $r0
+...
diff --git a/test/CodeGen/ARM/GlobalISel/thumb-select-imm.mir b/test/CodeGen/ARM/GlobalISel/thumb-select-imm.mir
new file mode 100644
index 000000000000..4979491aca03
--- /dev/null
+++ b/test/CodeGen/ARM/GlobalISel/thumb-select-imm.mir
@@ -0,0 +1,66 @@
+# RUN: llc -O0 -mtriple thumb-- -mattr=+v6t2 -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s
+--- |
+ define void @test_movi() { ret void }
+ define void @test_movi16() { ret void }
+ define void @test_movi32() { ret void }
+...
+---
+name: test_movi
+# CHECK-LABEL: name: test_movi
+legalized: true
+regBankSelected: true
+selected: false
+# CHECK: selected: true
+registers:
+ - { id: 0, class: gprb }
+body: |
+ bb.0:
+ %0(s32) = G_CONSTANT i32 786444 ; 0x000c000c
+ ; CHECK: [[VREGRES:%[0-9]+]]:rgpr = t2MOVi 786444, 14, $noreg, $noreg
+
+ $r0 = COPY %0(s32)
+ ; CHECK: $r0 = COPY [[VREGRES]]
+
+ BX_RET 14, $noreg, implicit $r0
+ ; CHECK: BX_RET 14, $noreg, implicit $r0
+...
+---
+name: test_movi16
+# CHECK-LABEL: name: test_movi16
+legalized: true
+regBankSelected: true
+selected: false
+# CHECK: selected: true
+registers:
+ - { id: 0, class: gprb }
+body: |
+ bb.0:
+ %0(s32) = G_CONSTANT i32 65533
+ ; CHECK: [[VREGRES:%[0-9]+]]:rgpr = t2MOVi16 65533, 14, $noreg
+
+ $r0 = COPY %0(s32)
+ ; CHECK: $r0 = COPY [[VREGRES]]
+
+ BX_RET 14, $noreg, implicit $r0
+ ; CHECK: BX_RET 14, $noreg, implicit $r0
+...
+---
+name: test_movi32
+# CHECK-LABEL: name: test_movi32
+legalized: true
+regBankSelected: true
+selected: false
+# CHECK: selected: true
+registers:
+ - { id: 0, class: gprb }
+body: |
+ bb.0:
+ %0(s32) = G_CONSTANT i32 185470479 ; 0x0b0e0e0f
+ ; CHECK: [[VREGY:%[0-9]+]]:rgpr = t2MOVi32imm 185470479
+
+ $r0 = COPY %0(s32)
+ ; CHECK: $r0 = COPY [[VREGRES]]
+
+ BX_RET 14, $noreg, implicit $r0
+ ; CHECK: BX_RET 14, $noreg, implicit $r0
+...
diff --git a/test/CodeGen/ARM/GlobalISel/thumb-select-load-store.mir b/test/CodeGen/ARM/GlobalISel/thumb-select-load-store.mir
new file mode 100644
index 000000000000..170c26e996cb
--- /dev/null
+++ b/test/CodeGen/ARM/GlobalISel/thumb-select-load-store.mir
@@ -0,0 +1,84 @@
+# RUN: llc -O0 -mtriple thumb-- -mattr=+v6t2 -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s
+--- |
+ define void @test_s8() { ret void }
+ define void @test_s16() { ret void }
+ define void @test_s32() { ret void }
+...
+---
+name: test_s8
+# CHECK-LABEL: name: test_s8
+legalized: true
+regBankSelected: true
+selected: false
+# CHECK: selected: true
+registers:
+ - { id: 0, class: gprb }
+ - { id: 1, class: gprb }
+body: |
+ bb.0:
+ liveins: $r0
+
+ %0(p0) = COPY $r0
+ ; CHECK: %[[P:[0-9]+]]:gpr = COPY $r0
+
+ %1(s8) = G_LOAD %0(p0) :: (load 1)
+ ; CHECK: %[[V:[0-9]+]]:rgpr = t2LDRBi12 %[[P]], 0, 14, $noreg :: (load 1)
+
+ G_STORE %1(s8), %0(p0) :: (store 1)
+ ; CHECK: t2STRBi12 %[[V]], %[[P]], 0, 14, $noreg :: (store 1)
+
+ BX_RET 14, $noreg
+ ; CHECK: BX_RET 14, $noreg
+...
+---
+name: test_s16
+# CHECK-LABEL: name: test_s16
+legalized: true
+regBankSelected: true
+selected: false
+# CHECK: selected: true
+registers:
+ - { id: 0, class: gprb }
+ - { id: 1, class: gprb }
+body: |
+ bb.0:
+ liveins: $r0
+
+ %0(p0) = COPY $r0
+ ; CHECK: %[[P:[0-9]+]]:gpr = COPY $r0
+
+ %1(s16) = G_LOAD %0(p0) :: (load 2)
+ ; CHECK: %[[V:[0-9]+]]:rgpr = t2LDRHi12 %[[P]], 0, 14, $noreg :: (load 2)
+
+ G_STORE %1(s16), %0(p0) :: (store 2)
+ ; CHECK: t2STRHi12 %[[V]], %[[P]], 0, 14, $noreg :: (store 2)
+
+ BX_RET 14, $noreg
+ ; CHECK: BX_RET 14, $noreg
+...
+---
+name: test_s32
+# CHECK-LABEL: name: test_s32
+legalized: true
+regBankSelected: true
+selected: false
+# CHECK: selected: true
+registers:
+ - { id: 0, class: gprb }
+ - { id: 1, class: gprb }
+body: |
+ bb.0:
+ liveins: $r0
+
+ %0(p0) = COPY $r0
+ ; CHECK: %[[P:[0-9]+]]:gpr = COPY $r0
+
+ %1(s32) = G_LOAD %0(p0) :: (load 4)
+ ; CHECK: %[[V:[0-9]+]]:gpr = t2LDRi12 %[[P]], 0, 14, $noreg :: (load 4)
+
+ G_STORE %1(s32), %0(p0) :: (store 4)
+ ; CHECK: t2STRi12 %[[V]], %[[P]], 0, 14, $noreg :: (store 4)
+
+ BX_RET 14, $noreg
+ ; CHECK: BX_RET 14, $noreg
+...
diff --git a/test/CodeGen/ARM/GlobalISel/thumb-select-logical-ops.mir b/test/CodeGen/ARM/GlobalISel/thumb-select-logical-ops.mir
new file mode 100644
index 000000000000..d63c59942b78
--- /dev/null
+++ b/test/CodeGen/ARM/GlobalISel/thumb-select-logical-ops.mir
@@ -0,0 +1,219 @@
+# RUN: llc -O0 -mtriple thumb-- -mattr=+v6t2 -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s
+--- |
+ define void @test_and_regs() { ret void }
+ define void @test_and_imm() { ret void }
+
+ define void @test_bfc() { ret void }
+ define void @test_no_bfc_bad_mask() { ret void }
+
+ define void @test_mvn() { ret void }
+ define void @test_bic() { ret void }
+ define void @test_orn() { ret void }
+...
+---
+name: test_and_regs
+# CHECK-LABEL: name: test_and_regs
+legalized: true
+regBankSelected: true
+selected: false
+# CHECK: selected: true
+registers:
+ - { id: 0, class: gprb }
+ - { id: 1, class: gprb }
+ - { id: 2, class: gprb }
+body: |
+ bb.0:
+ liveins: $r0, $r1
+
+ %0(s32) = COPY $r0
+ ; CHECK: [[VREGX:%[0-9]+]]:rgpr = COPY $r0
+
+ %1(s32) = COPY $r1
+ ; CHECK: [[VREGY:%[0-9]+]]:rgpr = COPY $r1
+
+ %2(s32) = G_AND %0, %1
+ ; CHECK: [[VREGRES:%[0-9]+]]:rgpr = t2ANDrr [[VREGX]], [[VREGY]], 14, $noreg, $noreg
+
+ $r0 = COPY %2(s32)
+ ; CHECK: $r0 = COPY [[VREGRES]]
+
+ BX_RET 14, $noreg, implicit $r0
+ ; CHECK: BX_RET 14, $noreg, implicit $r0
+...
+---
+name: test_and_imm
+# CHECK-LABEL: name: test_and_imm
+legalized: true
+regBankSelected: true
+selected: false
+# CHECK: selected: true
+registers:
+ - { id: 0, class: gprb }
+ - { id: 1, class: gprb }
+ - { id: 2, class: gprb }
+body: |
+ bb.0:
+ liveins: $r0
+
+ %0(s32) = COPY $r0
+ ; CHECK: [[VREGX:%[0-9]+]]:rgpr = COPY $r0
+
+ %1(s32) = G_CONSTANT i32 786444 ; 0x000c000c
+ %2(s32) = G_AND %0, %1
+ ; CHECK: [[VREGRES:%[0-9]+]]:rgpr = t2ANDri [[VREGX]], 786444, 14, $noreg, $noreg
+
+ $r0 = COPY %2(s32)
+ ; CHECK: $r0 = COPY [[VREGRES]]
+
+ BX_RET 14, $noreg, implicit $r0
+ ; CHECK: BX_RET 14, $noreg, implicit $r0
+...
+---
+name: test_bfc
+# CHECK-LABEL: name: test_bfc
+legalized: true
+regBankSelected: true
+selected: false
+# CHECK: selected: true
+registers:
+ - { id: 0, class: gprb }
+ - { id: 1, class: gprb }
+ - { id: 2, class: gprb }
+body: |
+ bb.0:
+ liveins: $r0
+
+ %0(s32) = COPY $r0
+ ; CHECK: [[VREGX:%[0-9]+]]:rgpr = COPY $r0
+
+ %1(s32) = G_CONSTANT i32 -65529 ; 0xFFFF0007
+ %2(s32) = G_AND %0, %1
+ ; CHECK: [[VREGRES:%[0-9]+]]:rgpr = t2BFC [[VREGX]], -65529, 14, $noreg
+
+ $r0 = COPY %2(s32)
+ ; CHECK: $r0 = COPY [[VREGRES]]
+
+ BX_RET 14, $noreg, implicit $r0
+ ; CHECK: BX_RET 14, $noreg, implicit $r0
+...
+---
+name: test_no_bfc_bad_mask
+# CHECK-LABEL: name: test_no_bfc_bad_mask
+legalized: true
+regBankSelected: true
+selected: false
+# CHECK: selected: true
+registers:
+ - { id: 0, class: gprb }
+ - { id: 1, class: gprb }
+ - { id: 2, class: gprb }
+body: |
+ bb.0:
+ liveins: $r0
+
+ %0(s32) = COPY $r0
+
+ %1(s32) = G_CONSTANT i32 786444 ; 0x000c000c
+ %2(s32) = G_AND %0, %1
+ ; CHECK-NOT: t2BFC
+
+ $r0 = COPY %2(s32)
+
+ BX_RET 14, $noreg, implicit $r0
+...
+---
+name: test_mvn
+# CHECK-LABEL: name: test_mvn
+legalized: true
+regBankSelected: true
+selected: false
+# CHECK: selected: true
+registers:
+ - { id: 0, class: gprb }
+ - { id: 1, class: gprb }
+ - { id: 2, class: gprb }
+body: |
+ bb.0:
+ liveins: $r0
+
+ %0(s32) = COPY $r0
+ ; CHECK: [[VREGX:%[0-9]+]]:rgpr = COPY $r0
+
+ %1(s32) = G_CONSTANT i32 -1
+ %2(s32) = G_XOR %0, %1
+ ; CHECK: [[VREGRES:%[0-9]+]]:rgpr = t2MVNr [[VREGX]], 14, $noreg, $noreg
+
+ $r0 = COPY %2(s32)
+ ; CHECK: $r0 = COPY [[VREGRES]]
+
+ BX_RET 14, $noreg, implicit $r0
+ ; CHECK: BX_RET 14, $noreg, implicit $r0
+...
+---
+name: test_bic
+# CHECK-LABEL: name: test_bic
+legalized: true
+regBankSelected: true
+selected: false
+# CHECK: selected: true
+registers:
+ - { id: 0, class: gprb }
+ - { id: 1, class: gprb }
+ - { id: 2, class: gprb }
+ - { id: 3, class: gprb }
+ - { id: 4, class: gprb }
+body: |
+ bb.0:
+ liveins: $r0, $r1
+
+ %0(s32) = COPY $r0
+ %1(s32) = COPY $r1
+ ; CHECK-DAG: [[VREGX:%[0-9]+]]:rgpr = COPY $r0
+ ; CHECK-DAG: [[VREGY:%[0-9]+]]:rgpr = COPY $r1
+
+ %2(s32) = G_CONSTANT i32 -1
+ %3(s32) = G_XOR %1, %2
+
+ %4(s32) = G_AND %0, %3
+ ; CHECK: [[VREGRES:%[0-9]+]]:rgpr = t2BICrr [[VREGX]], [[VREGY]], 14, $noreg, $noreg
+
+ $r0 = COPY %4(s32)
+ ; CHECK: $r0 = COPY [[VREGRES]]
+
+ BX_RET 14, $noreg, implicit $r0
+ ; CHECK: BX_RET 14, $noreg, implicit $r0
+...
+---
+name: test_orn
+# CHECK-LABEL: name: test_orn
+legalized: true
+regBankSelected: true
+selected: false
+# CHECK: selected: true
+registers:
+ - { id: 0, class: gprb }
+ - { id: 1, class: gprb }
+ - { id: 2, class: gprb }
+ - { id: 3, class: gprb }
+ - { id: 4, class: gprb }
+body: |
+ bb.0:
+ liveins: $r0, $r1
+
+ %0(s32) = COPY $r0
+ %1(s32) = COPY $r1
+ ; CHECK-DAG: [[VREGX:%[0-9]+]]:rgpr = COPY $r0
+ ; CHECK-DAG: [[VREGY:%[0-9]+]]:rgpr = COPY $r1
+
+ %2(s32) = G_CONSTANT i32 -1
+ %3(s32) = G_XOR %1, %2
+
+ %4(s32) = G_OR %0, %3
+ ; CHECK: [[VREGRES:%[0-9]+]]:rgpr = t2ORNrr [[VREGX]], [[VREGY]], 14, $noreg, $noreg
+
+ $r0 = COPY %4(s32)
+ ; CHECK: $r0 = COPY [[VREGRES]]
+
+ BX_RET 14, $noreg, implicit $r0
+ ; CHECK: BX_RET 14, $noreg, implicit $r0
+...
diff --git a/test/CodeGen/ARM/Windows/alloca.ll b/test/CodeGen/ARM/Windows/alloca.ll
index ea893fa9163f..203333e71a8e 100644
--- a/test/CodeGen/ARM/Windows/alloca.ll
+++ b/test/CodeGen/ARM/Windows/alloca.ll
@@ -19,7 +19,7 @@ entry:
; because we do not have the kill flag on R0.
; CHECK: mov.w [[R1:lr]], #7
; CHECK: add.w [[R0:r[0-9]+]], [[R1]], [[R0]], lsl #2
-; CHECK: bic [[R0]], [[R0]], #7
+; CHECK: bic [[R0]], [[R0]], #4
; CHECK: lsrs r4, [[R0]], #2
; CHECK: bl __chkstk
; CHECK: sub.w sp, sp, r4
diff --git a/test/CodeGen/ARM/Windows/chkstk-movw-movt-isel.ll b/test/CodeGen/ARM/Windows/chkstk-movw-movt-isel.ll
index 019298d20808..e75df160e004 100644
--- a/test/CodeGen/ARM/Windows/chkstk-movw-movt-isel.ll
+++ b/test/CodeGen/ARM/Windows/chkstk-movw-movt-isel.ll
@@ -19,9 +19,9 @@ entry:
; CHECK-LABEL: isel
; CHECK: push {r4, r5, r6, lr}
-; CHECK: movw r12, #0
-; CHECK: movt r12, #0
-; CHECK: movw r4, #{{\d*}}
+; CHECK-DAG: movw r12, #0
+; CHECK-DAG: movt r12, #0
+; CHECK-DAG: movw r4, #{{\d*}}
; CHECK: blx r12
; CHECK: sub.w sp, sp, r4
diff --git a/test/CodeGen/ARM/Windows/chkstk.ll b/test/CodeGen/ARM/Windows/chkstk.ll
index 330c1f458500..8fd414614598 100644
--- a/test/CodeGen/ARM/Windows/chkstk.ll
+++ b/test/CodeGen/ARM/Windows/chkstk.ll
@@ -16,9 +16,9 @@ entry:
; CHECK-DEFAULT-CODE-MODEL: sub.w sp, sp, r4
; CHECK-LARGE-CODE-MODEL: check_watermark:
-; CHECK-LARGE-CODE-MODEL: movw r12, :lower16:__chkstk
-; CHECK-LARGE-CODE-MODEL: movt r12, :upper16:__chkstk
-; CHECK-LARGE-CODE-MODEL: movw r4, #1024
+; CHECK-LARGE-CODE-MODEL-DAG: movw r12, :lower16:__chkstk
+; CHECK-LARGE-CODE-MODEL-DAG: movt r12, :upper16:__chkstk
+; CHECK-LARGE-CODE-MODEL-DAG: movw r4, #1024
; CHECK-LARGE-CODE-MODEL: blx r12
; CHECK-LARGE-CODE-MODEL: sub.w sp, sp, r4
diff --git a/test/CodeGen/ARM/Windows/frame-register.ll b/test/CodeGen/ARM/Windows/frame-register.ll
index 7ecfc1a71714..6605ffc60f4a 100644
--- a/test/CodeGen/ARM/Windows/frame-register.ll
+++ b/test/CodeGen/ARM/Windows/frame-register.ll
@@ -1,4 +1,4 @@
-; RUN: llc -mtriple thumbv7-windows -disable-fp-elim -filetype asm -o - %s \
+; RUN: llc -mtriple thumbv7-windows -frame-pointer=all -filetype asm -o - %s \
; RUN: | FileCheck %s
declare void @callee(i32)
diff --git a/test/CodeGen/ARM/Windows/memset.ll b/test/CodeGen/ARM/Windows/memset.ll
index c9b22f47a152..8cb257c15660 100644
--- a/test/CodeGen/ARM/Windows/memset.ll
+++ b/test/CodeGen/ARM/Windows/memset.ll
@@ -10,9 +10,9 @@ entry:
unreachable
}
-; CHECK: movw r0, :lower16:source
-; CHECK: movt r0, :upper16:source
; CHECK: movs r1, #0
; CHECK: mov.w r2, #512
+; CHECK: movw r0, :lower16:source
+; CHECK: movt r0, :upper16:source
; CHECK: memset
diff --git a/test/CodeGen/ARM/Windows/mingw-refptr.ll b/test/CodeGen/ARM/Windows/mingw-refptr.ll
new file mode 100644
index 000000000000..06b3536b7db9
--- /dev/null
+++ b/test/CodeGen/ARM/Windows/mingw-refptr.ll
@@ -0,0 +1,79 @@
+; RUN: llc < %s -mtriple=thumbv7-w64-mingw32 | FileCheck %s
+
+@var = external local_unnamed_addr global i32, align 4
+@dsolocalvar = external dso_local local_unnamed_addr global i32, align 4
+@localvar = dso_local local_unnamed_addr global i32 0, align 4
+@localcommon = common dso_local local_unnamed_addr global i32 0, align 4
+@extvar = external dllimport local_unnamed_addr global i32, align 4
+
+define dso_local i32 @getVar() {
+; CHECK-LABEL: getVar:
+; CHECK: movw r0, :lower16:.refptr.var
+; CHECK: movt r0, :upper16:.refptr.var
+; CHECK: ldr r0, [r0]
+; CHECK: ldr r0, [r0]
+; CHECK: bx lr
+entry:
+ %0 = load i32, i32* @var, align 4
+ ret i32 %0
+}
+
+define dso_local i32 @getDsoLocalVar() {
+; CHECK-LABEL: getDsoLocalVar:
+; CHECK: movw r0, :lower16:dsolocalvar
+; CHECK: movt r0, :upper16:dsolocalvar
+; CHECK: ldr r0, [r0]
+; CHECK: bx lr
+entry:
+ %0 = load i32, i32* @dsolocalvar, align 4
+ ret i32 %0
+}
+
+define dso_local i32 @getLocalVar() {
+; CHECK-LABEL: getLocalVar:
+; CHECK: movw r0, :lower16:localvar
+; CHECK: movt r0, :upper16:localvar
+; CHECK: ldr r0, [r0]
+; CHECK: bx lr
+entry:
+ %0 = load i32, i32* @localvar, align 4
+ ret i32 %0
+}
+
+define dso_local i32 @getLocalCommon() {
+; CHECK-LABEL: getLocalCommon:
+; CHECK: movw r0, :lower16:localcommon
+; CHECK: movt r0, :upper16:localcommon
+; CHECK: ldr r0, [r0]
+; CHECK: bx lr
+entry:
+ %0 = load i32, i32* @localcommon, align 4
+ ret i32 %0
+}
+
+define dso_local i32 @getExtVar() {
+; CHECK-LABEL: getExtVar:
+; CHECK: movw r0, :lower16:__imp_extvar
+; CHECK: movt r0, :upper16:__imp_extvar
+; CHECK: ldr r0, [r0]
+; CHECK: ldr r0, [r0]
+; CHECK: bx lr
+entry:
+ %0 = load i32, i32* @extvar, align 4
+ ret i32 %0
+}
+
+define dso_local void @callFunc() {
+; CHECK-LABEL: callFunc:
+; CHECK: b otherFunc
+entry:
+ tail call void @otherFunc()
+ ret void
+}
+
+declare dso_local void @otherFunc()
+
+; CHECK: .section .rdata$.refptr.var,"dr",discard,.refptr.var
+; CHECK: .globl .refptr.var
+; CHECK: .refptr.var:
+; CHECK: .long var
diff --git a/test/CodeGen/ARM/Windows/pic.ll b/test/CodeGen/ARM/Windows/pic.ll
index df4c400035a3..958fc26e4f57 100644
--- a/test/CodeGen/ARM/Windows/pic.ll
+++ b/test/CodeGen/ARM/Windows/pic.ll
@@ -18,6 +18,7 @@ entry:
; CHECK-WIN: ldrb r0, [r0]
; CHECK-GNU-LABEL: return_external
-; CHECK-GNU: movw r0, :lower16:external
-; CHECK-GNU: movt r0, :upper16:external
+; CHECK-GNU: movw r0, :lower16:.refptr.external
+; CHECK-GNU: movt r0, :upper16:.refptr.external
+; CHECK-GNU: ldr r0, [r0]
; CHECK-GNU: ldrb r0, [r0]
diff --git a/test/CodeGen/ARM/Windows/vla.ll b/test/CodeGen/ARM/Windows/vla.ll
index 03709758048d..f095197f3d64 100644
--- a/test/CodeGen/ARM/Windows/vla.ll
+++ b/test/CodeGen/ARM/Windows/vla.ll
@@ -14,13 +14,13 @@ entry:
}
; CHECK-SMALL-CODE: adds [[R4:r[0-9]+]], #7
-; CHECK-SMALL-CODE: bic [[R4]], [[R4]], #7
+; CHECK-SMALL-CODE: bic [[R4]], [[R4]], #4
; CHECK-SMALL-CODE: lsrs r4, [[R4]], #2
; CHECK-SMALL-CODE: bl __chkstk
; CHECK-SMALL-CODE: sub.w sp, sp, r4
; CHECK-LARGE-CODE: adds [[R4:r[0-9]+]], #7
-; CHECK-LARGE-CODE: bic [[R4]], [[R4]], #7
+; CHECK-LARGE-CODE: bic [[R4]], [[R4]], #4
; CHECK-LARGE-CODE: lsrs r4, [[R4]], #2
; CHECK-LARGE-CODE: movw [[IP:r[0-9]+]], :lower16:__chkstk
; CHECK-LARGE-CODE: movt [[IP]], :upper16:__chkstk
diff --git a/test/CodeGen/ARM/acle-intrinsics-rot.ll b/test/CodeGen/ARM/acle-intrinsics-rot.ll
new file mode 100644
index 000000000000..fab4d0e765c3
--- /dev/null
+++ b/test/CodeGen/ARM/acle-intrinsics-rot.ll
@@ -0,0 +1,143 @@
+; RUN: llc -mtriple=thumbv8m.main -mcpu=cortex-m33 %s -o - | FileCheck %s
+; RUN: llc -mtriple=thumbv7em %s -o - | FileCheck %s
+; RUN: llc -mtriple=armv6 %s -o - | FileCheck %s
+; RUN: llc -mtriple=armv7 %s -o - | FileCheck %s
+; RUN: llc -mtriple=armv8 %s -o - | FileCheck %s
+
+; CHECK-LABEL: sxtb16_ror_8
+; CHECK: sxtb16 r0, r0, ror #8
+define i32 @sxtb16_ror_8(i32 %a) {
+entry:
+ %shr.i = lshr i32 %a, 8
+ %shl.i = shl i32 %a, 24
+ %or.i = or i32 %shl.i, %shr.i
+ %0 = tail call i32 @llvm.arm.sxtb16(i32 %or.i)
+ ret i32 %0
+}
+
+; CHECK-LABEL: sxtb16_ror_16
+; CHECK: sxtb16 r0, r0, ror #16
+define i32 @sxtb16_ror_16(i32 %a) {
+entry:
+ %shr.i = lshr i32 %a, 16
+ %shl.i = shl i32 %a, 16
+ %or.i = or i32 %shl.i, %shr.i
+ %0 = tail call i32 @llvm.arm.sxtb16(i32 %or.i)
+ ret i32 %0
+}
+
+; CHECK-LABEL: sxtb16_ror_24
+; CHECK: sxtb16 r0, r0, ror #24
+define i32 @sxtb16_ror_24(i32 %a) {
+entry:
+ %shr.i = lshr i32 %a, 24
+ %shl.i = shl i32 %a, 8
+ %or.i = or i32 %shl.i, %shr.i
+ %0 = tail call i32 @llvm.arm.sxtb16(i32 %or.i)
+ ret i32 %0
+}
+
+; CHECK-LABEL: uxtb16_ror_8
+; CHECK: uxtb16 r0, r0, ror #8
+define i32 @uxtb16_ror_8(i32 %a) {
+entry:
+ %shr.i = lshr i32 %a, 8
+ %shl.i = shl i32 %a, 24
+ %or.i = or i32 %shl.i, %shr.i
+ %0 = tail call i32 @llvm.arm.uxtb16(i32 %or.i)
+ ret i32 %0
+}
+
+; CHECK-LABEL: uxtb16_ror_16
+; CHECK: uxtb16 r0, r0, ror #16
+define i32 @uxtb16_ror_16(i32 %a) {
+entry:
+ %shr.i = lshr i32 %a, 16
+ %shl.i = shl i32 %a, 16
+ %or.i = or i32 %shl.i, %shr.i
+ %0 = tail call i32 @llvm.arm.uxtb16(i32 %or.i)
+ ret i32 %0
+}
+
+; CHECK-LABEL: uxtb16_ror_24
+; CHECK: uxtb16 r0, r0, ror #24
+define i32 @uxtb16_ror_24(i32 %a) {
+entry:
+ %shr.i = lshr i32 %a, 24
+ %shl.i = shl i32 %a, 8
+ %or.i = or i32 %shl.i, %shr.i
+ %0 = tail call i32 @llvm.arm.uxtb16(i32 %or.i)
+ ret i32 %0
+}
+
+; CHECK-LABEL: sxtab16_ror_8
+; CHECK: sxtab16 r0, r0, r1, ror #8
+define i32 @sxtab16_ror_8(i32 %a, i32 %b) {
+entry:
+ %shr.i = lshr i32 %b, 8
+ %shl.i = shl i32 %b, 24
+ %or.i = or i32 %shl.i, %shr.i
+ %0 = tail call i32 @llvm.arm.sxtab16(i32 %a, i32 %or.i)
+ ret i32 %0
+}
+
+; CHECK-LABEL: sxtab16_ror_16
+; CHECK: sxtab16 r0, r0, r1, ror #16
+define i32 @sxtab16_ror_16(i32 %a, i32 %b) {
+entry:
+ %shr.i = lshr i32 %b, 16
+ %shl.i = shl i32 %b, 16
+ %or.i = or i32 %shl.i, %shr.i
+ %0 = tail call i32 @llvm.arm.sxtab16(i32 %a, i32 %or.i)
+ ret i32 %0
+}
+
+; CHECK-LABEL: sxtab16_ror_24
+; CHECK: sxtab16 r0, r0, r1, ror #24
+define i32 @sxtab16_ror_24(i32 %a, i32 %b) {
+entry:
+ %shr.i = lshr i32 %b, 24
+ %shl.i = shl i32 %b, 8
+ %or.i = or i32 %shl.i, %shr.i
+ %0 = tail call i32 @llvm.arm.sxtab16(i32 %a, i32 %or.i)
+ ret i32 %0
+}
+
+; CHECK-LABEL: uxtab16_ror_8
+; CHECK: uxtab16 r0, r0, r1, ror #8
+define i32 @uxtab16_ror_8(i32 %a, i32 %b) {
+entry:
+ %shr.i = lshr i32 %b, 8
+ %shl.i = shl i32 %b, 24
+ %or.i = or i32 %shl.i, %shr.i
+ %0 = tail call i32 @llvm.arm.uxtab16(i32 %a, i32 %or.i)
+ ret i32 %0
+}
+
+; CHECK-LABEL: uxtab16_ror_16
+; CHECK: uxtab16 r0, r0, r1, ror #16
+define i32 @uxtab16_ror_16(i32 %a, i32 %b) {
+entry:
+ %shr.i = lshr i32 %b, 16
+ %shl.i = shl i32 %b, 16
+ %or.i = or i32 %shl.i, %shr.i
+ %0 = tail call i32 @llvm.arm.uxtab16(i32 %a, i32 %or.i)
+ ret i32 %0
+}
+
+; CHECK-LABEL: uxtab16_ror_24
+; CHECK: uxtab16 r0, r0, r1, ror #24
+define i32 @uxtab16_ror_24(i32 %a, i32 %b) {
+entry:
+ %shr.i = lshr i32 %b, 24
+ %shl.i = shl i32 %b, 8
+ %or.i = or i32 %shl.i, %shr.i
+ %0 = tail call i32 @llvm.arm.uxtab16(i32 %a, i32 %or.i)
+ ret i32 %0
+}
+
+declare i32 @llvm.arm.sxtb16(i32)
+declare i32 @llvm.arm.uxtb16(i32)
+declare i32 @llvm.arm.sxtab16(i32, i32)
+declare i32 @llvm.arm.uxtab16(i32, i32)
+
diff --git a/test/CodeGen/ARM/alloca-align.ll b/test/CodeGen/ARM/alloca-align.ll
index 6186d137ef7f..3326d361c07f 100644
--- a/test/CodeGen/ARM/alloca-align.ll
+++ b/test/CodeGen/ARM/alloca-align.ll
@@ -12,8 +12,7 @@ declare void @bar(i32*, [20000 x i8]* byval)
; And a base pointer getting used.
; CHECK: mov r6, sp
; Which is passed to the call
-; CHECK: add [[REG:r[0-9]+|lr]], r6, #19456
-; CHECK: add r0, [[REG]], #536
+; CHECK: mov r0, r6
; CHECK: bl bar
define void @foo([20000 x i8]* %addr) {
%tmp = alloca [4 x i32], align 32
diff --git a/test/CodeGen/ARM/analyze-branch-bkpt.ll b/test/CodeGen/ARM/analyze-branch-bkpt.ll
new file mode 100644
index 000000000000..cba89fe99870
--- /dev/null
+++ b/test/CodeGen/ARM/analyze-branch-bkpt.ll
@@ -0,0 +1,61 @@
+; RUN: llc -o - %s -mtriple thumbv4-unknown-linux-android | FileCheck --check-prefix=V4 %s
+; RUN: llc -o - %s -mtriple thumbv5-unknown-linux-android | FileCheck --check-prefix=V5 %s
+
+; V4: udf #254
+; V5: bkpt #0
+
+define i1 @a(i32 %b) !dbg !3 {
+ br i1 undef, label %c, label %d, !dbg !4
+
+d: ; preds = %0
+ call void @llvm.debugtrap()
+ br label %ah, !dbg !4
+
+c: ; preds = %0
+ %aj = icmp ne i20 undef, 5
+ br label %ah, !dbg !4
+
+ah: ; preds = %c, %d
+ %ak = phi i1 [ false, %d ], [ %aj, %c ]
+ call void @llvm.dbg.value(metadata i1 %ak, metadata !7, metadata !DIExpression()), !dbg !9
+ switch i32 %b, label %al [
+ i32 0, label %am
+ i32 10, label %an
+ ]
+
+an: ; preds = %ah
+ %ch = select i1 %ak, i32 0, i32 5
+ br label %am, !dbg !10
+
+al: ; preds = %ah
+ br label %am, !dbg !9
+
+am: ; preds = %al, %an, %ah
+ %1 = phi i32 [ 0, %al ], [ %ch, %an ], [ %b, %ah ]
+ unreachable
+}
+
+; Function Attrs: nounwind readnone speculatable
+declare void @llvm.dbg.value(metadata, metadata, metadata) #0
+
+; Function Attrs: nounwind
+declare void @llvm.debugtrap() #1
+
+attributes #0 = { nounwind readnone speculatable }
+attributes #1 = { nounwind }
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!2}
+
+!0 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus, file: !1, isOptimized: false, runtimeVersion: 0, emissionKind: NoDebug)
+!1 = !DIFile(filename: "a", directory: "")
+!2 = !{i32 2, !"Debug Info Version", i32 3}
+!3 = distinct !DISubprogram(scope: null, isLocal: false, isDefinition: true, isOptimized: false, unit: !0)
+!4 = !DILocation(line: 0, scope: !5, inlinedAt: !6)
+!5 = distinct !DISubprogram(scope: null, isLocal: false, isDefinition: true, isOptimized: false, unit: !0)
+!6 = !DILocation(line: 0, scope: !3)
+!7 = !DILocalVariable(scope: !8)
+!8 = distinct !DISubprogram(scope: null, isLocal: false, isDefinition: true, isOptimized: false, unit: !0)
+!9 = !DILocation(line: 0, scope: !8, inlinedAt: !6)
+!10 = !DILocation(line: 0, scope: !11, inlinedAt: !6)
+!11 = !DILexicalBlock(scope: !8)
diff --git a/test/CodeGen/ARM/and-cmpz.ll b/test/CodeGen/ARM/and-cmpz.ll
index fb5f8fe280f2..b327f04ba06f 100644
--- a/test/CodeGen/ARM/and-cmpz.ll
+++ b/test/CodeGen/ARM/and-cmpz.ll
@@ -89,11 +89,10 @@ false:
}
; CHECK-LABEL: i16_cmpz:
-; T1: movs r2, #127
-; T1-NEXT: lsls r2, r2, #9
-; T1-NEXT: ands r2, r0
-; T1-NEXT: lsrs r0, r2, #9
-; T2: and r0, r0, #65024
+; T1: uxth r0, r0
+; T1-NEXT: lsrs r0, r0, #9
+; T1-NEXT: bne
+; T2: uxth r0, r0
; T2-NEXT: movs r2, #0
; T2-NEXT: cmp.w r2, r0, lsr #9
define void @i16_cmpz(i16 %x, void (i32)* %foo) {
diff --git a/test/CodeGen/ARM/and-load-combine.ll b/test/CodeGen/ARM/and-load-combine.ll
index dac8e113d322..ef6a2ad7603f 100644
--- a/test/CodeGen/ARM/and-load-combine.ll
+++ b/test/CodeGen/ARM/and-load-combine.ll
@@ -4,8 +4,7 @@
; RUN: llc -mtriple=armv6m %s -o - | FileCheck %s --check-prefix=THUMB1
; RUN: llc -mtriple=thumbv8m.main %s -o - | FileCheck %s --check-prefix=THUMB2
-define arm_aapcscc zeroext i1 @cmp_xor8_short_short(i16* nocapture readonly %a,
- i16* nocapture readonly %b) {
+define arm_aapcscc zeroext i1 @cmp_xor8_short_short(i16* nocapture readonly %a, i16* nocapture readonly %b) {
; ARM-LABEL: cmp_xor8_short_short:
; ARM: @ %bb.0: @ %entry
; ARM-NEXT: ldrb r0, [r0]
@@ -29,8 +28,7 @@ define arm_aapcscc zeroext i1 @cmp_xor8_short_short(i16* nocapture readonly %a,
; THUMB1-NEXT: ldrb r0, [r0]
; THUMB1-NEXT: ldrb r1, [r1]
; THUMB1-NEXT: eors r1, r0
-; THUMB1-NEXT: movs r0, #0
-; THUMB1-NEXT: subs r0, r0, r1
+; THUMB1-NEXT: rsbs r0, r1, #0
; THUMB1-NEXT: adcs r0, r1
; THUMB1-NEXT: bx lr
;
@@ -51,8 +49,7 @@ entry:
ret i1 %cmp
}
-define arm_aapcscc zeroext i1 @cmp_xor8_short_int(i16* nocapture readonly %a,
- i32* nocapture readonly %b) {
+define arm_aapcscc zeroext i1 @cmp_xor8_short_int(i16* nocapture readonly %a, i32* nocapture readonly %b) {
; ARM-LABEL: cmp_xor8_short_int:
; ARM: @ %bb.0: @ %entry
; ARM-NEXT: ldrb r0, [r0]
@@ -76,8 +73,7 @@ define arm_aapcscc zeroext i1 @cmp_xor8_short_int(i16* nocapture readonly %a,
; THUMB1-NEXT: ldrb r0, [r0]
; THUMB1-NEXT: ldrb r1, [r1]
; THUMB1-NEXT: eors r1, r0
-; THUMB1-NEXT: movs r0, #0
-; THUMB1-NEXT: subs r0, r0, r1
+; THUMB1-NEXT: rsbs r0, r1, #0
; THUMB1-NEXT: adcs r0, r1
; THUMB1-NEXT: bx lr
;
@@ -99,8 +95,7 @@ entry:
ret i1 %cmp
}
-define arm_aapcscc zeroext i1 @cmp_xor8_int_int(i32* nocapture readonly %a,
- i32* nocapture readonly %b) {
+define arm_aapcscc zeroext i1 @cmp_xor8_int_int(i32* nocapture readonly %a, i32* nocapture readonly %b) {
; ARM-LABEL: cmp_xor8_int_int:
; ARM: @ %bb.0: @ %entry
; ARM-NEXT: ldrb r0, [r0]
@@ -124,8 +119,7 @@ define arm_aapcscc zeroext i1 @cmp_xor8_int_int(i32* nocapture readonly %a,
; THUMB1-NEXT: ldrb r0, [r0]
; THUMB1-NEXT: ldrb r1, [r1]
; THUMB1-NEXT: eors r1, r0
-; THUMB1-NEXT: movs r0, #0
-; THUMB1-NEXT: subs r0, r0, r1
+; THUMB1-NEXT: rsbs r0, r1, #0
; THUMB1-NEXT: adcs r0, r1
; THUMB1-NEXT: bx lr
;
@@ -146,8 +140,7 @@ entry:
ret i1 %cmp
}
-define arm_aapcscc zeroext i1 @cmp_xor16(i32* nocapture readonly %a,
- i32* nocapture readonly %b) {
+define arm_aapcscc zeroext i1 @cmp_xor16(i32* nocapture readonly %a, i32* nocapture readonly %b) {
; ARM-LABEL: cmp_xor16:
; ARM: @ %bb.0: @ %entry
; ARM-NEXT: ldrh r0, [r0]
@@ -171,8 +164,7 @@ define arm_aapcscc zeroext i1 @cmp_xor16(i32* nocapture readonly %a,
; THUMB1-NEXT: ldrh r0, [r0]
; THUMB1-NEXT: ldrh r1, [r1]
; THUMB1-NEXT: eors r1, r0
-; THUMB1-NEXT: movs r0, #0
-; THUMB1-NEXT: subs r0, r0, r1
+; THUMB1-NEXT: rsbs r0, r1, #0
; THUMB1-NEXT: adcs r0, r1
; THUMB1-NEXT: bx lr
;
@@ -193,8 +185,7 @@ entry:
ret i1 %cmp
}
-define arm_aapcscc zeroext i1 @cmp_or8_short_short(i16* nocapture readonly %a,
- i16* nocapture readonly %b) {
+define arm_aapcscc zeroext i1 @cmp_or8_short_short(i16* nocapture readonly %a, i16* nocapture readonly %b) {
; ARM-LABEL: cmp_or8_short_short:
; ARM: @ %bb.0: @ %entry
; ARM-NEXT: ldrb r0, [r0]
@@ -218,8 +209,7 @@ define arm_aapcscc zeroext i1 @cmp_or8_short_short(i16* nocapture readonly %a,
; THUMB1-NEXT: ldrb r0, [r0]
; THUMB1-NEXT: ldrb r1, [r1]
; THUMB1-NEXT: orrs r1, r0
-; THUMB1-NEXT: movs r0, #0
-; THUMB1-NEXT: subs r0, r0, r1
+; THUMB1-NEXT: rsbs r0, r1, #0
; THUMB1-NEXT: adcs r0, r1
; THUMB1-NEXT: bx lr
;
@@ -240,8 +230,7 @@ entry:
ret i1 %cmp
}
-define arm_aapcscc zeroext i1 @cmp_or8_short_int(i16* nocapture readonly %a,
- i32* nocapture readonly %b) {
+define arm_aapcscc zeroext i1 @cmp_or8_short_int(i16* nocapture readonly %a, i32* nocapture readonly %b) {
; ARM-LABEL: cmp_or8_short_int:
; ARM: @ %bb.0: @ %entry
; ARM-NEXT: ldrb r0, [r0]
@@ -265,8 +254,7 @@ define arm_aapcscc zeroext i1 @cmp_or8_short_int(i16* nocapture readonly %a,
; THUMB1-NEXT: ldrb r0, [r0]
; THUMB1-NEXT: ldrb r1, [r1]
; THUMB1-NEXT: orrs r1, r0
-; THUMB1-NEXT: movs r0, #0
-; THUMB1-NEXT: subs r0, r0, r1
+; THUMB1-NEXT: rsbs r0, r1, #0
; THUMB1-NEXT: adcs r0, r1
; THUMB1-NEXT: bx lr
;
@@ -288,8 +276,7 @@ entry:
ret i1 %cmp
}
-define arm_aapcscc zeroext i1 @cmp_or8_int_int(i32* nocapture readonly %a,
- i32* nocapture readonly %b) {
+define arm_aapcscc zeroext i1 @cmp_or8_int_int(i32* nocapture readonly %a, i32* nocapture readonly %b) {
; ARM-LABEL: cmp_or8_int_int:
; ARM: @ %bb.0: @ %entry
; ARM-NEXT: ldrb r0, [r0]
@@ -313,8 +300,7 @@ define arm_aapcscc zeroext i1 @cmp_or8_int_int(i32* nocapture readonly %a,
; THUMB1-NEXT: ldrb r0, [r0]
; THUMB1-NEXT: ldrb r1, [r1]
; THUMB1-NEXT: orrs r1, r0
-; THUMB1-NEXT: movs r0, #0
-; THUMB1-NEXT: subs r0, r0, r1
+; THUMB1-NEXT: rsbs r0, r1, #0
; THUMB1-NEXT: adcs r0, r1
; THUMB1-NEXT: bx lr
;
@@ -335,8 +321,7 @@ entry:
ret i1 %cmp
}
-define arm_aapcscc zeroext i1 @cmp_or16(i32* nocapture readonly %a,
- i32* nocapture readonly %b) {
+define arm_aapcscc zeroext i1 @cmp_or16(i32* nocapture readonly %a, i32* nocapture readonly %b) {
; ARM-LABEL: cmp_or16:
; ARM: @ %bb.0: @ %entry
; ARM-NEXT: ldrh r0, [r0]
@@ -360,8 +345,7 @@ define arm_aapcscc zeroext i1 @cmp_or16(i32* nocapture readonly %a,
; THUMB1-NEXT: ldrh r0, [r0]
; THUMB1-NEXT: ldrh r1, [r1]
; THUMB1-NEXT: orrs r1, r0
-; THUMB1-NEXT: movs r0, #0
-; THUMB1-NEXT: subs r0, r0, r1
+; THUMB1-NEXT: rsbs r0, r1, #0
; THUMB1-NEXT: adcs r0, r1
; THUMB1-NEXT: bx lr
;
@@ -382,8 +366,7 @@ entry:
ret i1 %cmp
}
-define arm_aapcscc zeroext i1 @cmp_and8_short_short(i16* nocapture readonly %a,
- i16* nocapture readonly %b) {
+define arm_aapcscc zeroext i1 @cmp_and8_short_short(i16* nocapture readonly %a, i16* nocapture readonly %b) {
; ARM-LABEL: cmp_and8_short_short:
; ARM: @ %bb.0: @ %entry
; ARM-NEXT: ldrb r1, [r1]
@@ -407,8 +390,7 @@ define arm_aapcscc zeroext i1 @cmp_and8_short_short(i16* nocapture readonly %a,
; THUMB1-NEXT: ldrb r1, [r1]
; THUMB1-NEXT: ldrb r2, [r0]
; THUMB1-NEXT: ands r2, r1
-; THUMB1-NEXT: movs r0, #0
-; THUMB1-NEXT: subs r0, r0, r2
+; THUMB1-NEXT: rsbs r0, r2, #0
; THUMB1-NEXT: adcs r0, r2
; THUMB1-NEXT: bx lr
;
@@ -429,8 +411,7 @@ entry:
ret i1 %cmp
}
-define arm_aapcscc zeroext i1 @cmp_and8_short_int(i16* nocapture readonly %a,
- i32* nocapture readonly %b) {
+define arm_aapcscc zeroext i1 @cmp_and8_short_int(i16* nocapture readonly %a, i32* nocapture readonly %b) {
; ARM-LABEL: cmp_and8_short_int:
; ARM: @ %bb.0: @ %entry
; ARM-NEXT: ldrb r0, [r0]
@@ -454,8 +435,7 @@ define arm_aapcscc zeroext i1 @cmp_and8_short_int(i16* nocapture readonly %a,
; THUMB1-NEXT: ldrb r0, [r0]
; THUMB1-NEXT: ldrb r1, [r1]
; THUMB1-NEXT: ands r1, r0
-; THUMB1-NEXT: movs r0, #0
-; THUMB1-NEXT: subs r0, r0, r1
+; THUMB1-NEXT: rsbs r0, r1, #0
; THUMB1-NEXT: adcs r0, r1
; THUMB1-NEXT: bx lr
;
@@ -477,8 +457,7 @@ entry:
ret i1 %cmp
}
-define arm_aapcscc zeroext i1 @cmp_and8_int_int(i32* nocapture readonly %a,
- i32* nocapture readonly %b) {
+define arm_aapcscc zeroext i1 @cmp_and8_int_int(i32* nocapture readonly %a, i32* nocapture readonly %b) {
; ARM-LABEL: cmp_and8_int_int:
; ARM: @ %bb.0: @ %entry
; ARM-NEXT: ldrb r1, [r1]
@@ -502,8 +481,7 @@ define arm_aapcscc zeroext i1 @cmp_and8_int_int(i32* nocapture readonly %a,
; THUMB1-NEXT: ldrb r1, [r1]
; THUMB1-NEXT: ldrb r2, [r0]
; THUMB1-NEXT: ands r2, r1
-; THUMB1-NEXT: movs r0, #0
-; THUMB1-NEXT: subs r0, r0, r2
+; THUMB1-NEXT: rsbs r0, r2, #0
; THUMB1-NEXT: adcs r0, r2
; THUMB1-NEXT: bx lr
;
@@ -524,8 +502,7 @@ entry:
ret i1 %cmp
}
-define arm_aapcscc zeroext i1 @cmp_and16(i32* nocapture readonly %a,
- i32* nocapture readonly %b) {
+define arm_aapcscc zeroext i1 @cmp_and16(i32* nocapture readonly %a, i32* nocapture readonly %b) {
; ARM-LABEL: cmp_and16:
; ARM: @ %bb.0: @ %entry
; ARM-NEXT: ldrh r1, [r1]
@@ -549,8 +526,7 @@ define arm_aapcscc zeroext i1 @cmp_and16(i32* nocapture readonly %a,
; THUMB1-NEXT: ldrh r1, [r1]
; THUMB1-NEXT: ldrh r2, [r0]
; THUMB1-NEXT: ands r2, r1
-; THUMB1-NEXT: movs r0, #0
-; THUMB1-NEXT: subs r0, r0, r2
+; THUMB1-NEXT: rsbs r0, r2, #0
; THUMB1-NEXT: adcs r0, r2
; THUMB1-NEXT: bx lr
;
@@ -893,8 +869,7 @@ define arm_aapcscc i1 @test6(i8* %x, i8 %y, i8 %z) {
; THUMB1-NEXT: ands r0, r1
; THUMB1-NEXT: uxtb r1, r2
; THUMB1-NEXT: subs r1, r0, r1
-; THUMB1-NEXT: movs r0, #0
-; THUMB1-NEXT: subs r0, r0, r1
+; THUMB1-NEXT: rsbs r0, r1, #0
; THUMB1-NEXT: adcs r0, r1
; THUMB1-NEXT: bx lr
;
@@ -941,8 +916,7 @@ define arm_aapcscc i1 @test7(i16* %x, i16 %y, i8 %z) {
; THUMB1-NEXT: ands r0, r1
; THUMB1-NEXT: uxtb r1, r2
; THUMB1-NEXT: subs r1, r0, r1
-; THUMB1-NEXT: movs r0, #0
-; THUMB1-NEXT: subs r0, r0, r1
+; THUMB1-NEXT: rsbs r0, r1, #0
; THUMB1-NEXT: adcs r0, r1
; THUMB1-NEXT: bx lr
;
@@ -1037,6 +1011,7 @@ entry:
ret void
}
+define arm_aapcscc void @test10(i32* nocapture %p) {
; ARM-LABEL: test10:
; ARM: @ %bb.0: @ %entry
; ARM-NEXT: ldrb r1, [r0]
@@ -1065,7 +1040,6 @@ entry:
; THUMB2-NEXT: eor r1, r1, #255
; THUMB2-NEXT: str r1, [r0]
; THUMB2-NEXT: bx lr
-define arm_aapcscc void @test10(i32* nocapture %p) {
entry:
%0 = load i32, i32* %p, align 4
%neg = and i32 %0, 255
@@ -1074,3 +1048,540 @@ entry:
ret void
}
+define arm_aapcscc i32 @test11(i32* nocapture %p) {
+; ARM-LABEL: test11:
+; ARM: @ %bb.0:
+; ARM-NEXT: ldrb r0, [r0, #1]
+; ARM-NEXT: lsl r0, r0, #8
+; ARM-NEXT: bx lr
+;
+; ARMEB-LABEL: test11:
+; ARMEB: @ %bb.0:
+; ARMEB-NEXT: ldrb r0, [r0, #2]
+; ARMEB-NEXT: lsl r0, r0, #8
+; ARMEB-NEXT: bx lr
+;
+; THUMB1-LABEL: test11:
+; THUMB1: @ %bb.0:
+; THUMB1-NEXT: ldrb r0, [r0, #1]
+; THUMB1-NEXT: lsls r0, r0, #8
+; THUMB1-NEXT: bx lr
+;
+; THUMB2-LABEL: test11:
+; THUMB2: @ %bb.0:
+; THUMB2-NEXT: ldrb r0, [r0, #1]
+; THUMB2-NEXT: lsls r0, r0, #8
+; THUMB2-NEXT: bx lr
+ %1 = load i32, i32* %p, align 4
+ %and = and i32 %1, 65280
+ ret i32 %and
+}
+
+define arm_aapcscc i32 @test12(i32* nocapture %p) {
+; ARM-LABEL: test12:
+; ARM: @ %bb.0:
+; ARM-NEXT: ldrb r0, [r0, #2]
+; ARM-NEXT: lsl r0, r0, #16
+; ARM-NEXT: bx lr
+;
+; ARMEB-LABEL: test12:
+; ARMEB: @ %bb.0:
+; ARMEB-NEXT: ldrb r0, [r0, #1]
+; ARMEB-NEXT: lsl r0, r0, #16
+; ARMEB-NEXT: bx lr
+;
+; THUMB1-LABEL: test12:
+; THUMB1: @ %bb.0:
+; THUMB1-NEXT: ldrb r0, [r0, #2]
+; THUMB1-NEXT: lsls r0, r0, #16
+; THUMB1-NEXT: bx lr
+;
+; THUMB2-LABEL: test12:
+; THUMB2: @ %bb.0:
+; THUMB2-NEXT: ldrb r0, [r0, #2]
+; THUMB2-NEXT: lsls r0, r0, #16
+; THUMB2-NEXT: bx lr
+ %1 = load i32, i32* %p, align 4
+ %and = and i32 %1, 16711680
+ ret i32 %and
+}
+
+define arm_aapcscc i32 @test13(i32* nocapture %p) {
+; ARM-LABEL: test13:
+; ARM: @ %bb.0:
+; ARM-NEXT: ldrb r0, [r0, #3]
+; ARM-NEXT: lsl r0, r0, #24
+; ARM-NEXT: bx lr
+;
+; ARMEB-LABEL: test13:
+; ARMEB: @ %bb.0:
+; ARMEB-NEXT: ldrb r0, [r0]
+; ARMEB-NEXT: lsl r0, r0, #24
+; ARMEB-NEXT: bx lr
+;
+; THUMB1-LABEL: test13:
+; THUMB1: @ %bb.0:
+; THUMB1-NEXT: ldrb r0, [r0, #3]
+; THUMB1-NEXT: lsls r0, r0, #24
+; THUMB1-NEXT: bx lr
+;
+; THUMB2-LABEL: test13:
+; THUMB2: @ %bb.0:
+; THUMB2-NEXT: ldrb r0, [r0, #3]
+; THUMB2-NEXT: lsls r0, r0, #24
+; THUMB2-NEXT: bx lr
+ %1 = load i32, i32* %p, align 4
+ %and = and i32 %1, 4278190080
+ ret i32 %and
+}
+
+define arm_aapcscc i32 @test14(i32* nocapture %p) {
+; ARM-LABEL: test14:
+; ARM: @ %bb.0:
+; ARM-NEXT: ldrh r0, [r0, #1]
+; ARM-NEXT: lsl r0, r0, #8
+; ARM-NEXT: bx lr
+;
+; ARMEB-LABEL: test14:
+; ARMEB: @ %bb.0:
+; ARMEB-NEXT: ldrh r0, [r0, #1]
+; ARMEB-NEXT: lsl r0, r0, #8
+; ARMEB-NEXT: bx lr
+;
+; THUMB1-LABEL: test14:
+; THUMB1: @ %bb.0:
+; THUMB1-NEXT: ldr r1, [r0]
+; THUMB1-NEXT: ldr r0, .LCPI26_0
+; THUMB1-NEXT: ands r0, r1
+; THUMB1-NEXT: bx lr
+; THUMB1-NEXT: .p2align 2
+; THUMB1-NEXT: @ %bb.1:
+; THUMB1-NEXT: .LCPI26_0:
+; THUMB1-NEXT: .long 16776960 @ 0xffff00
+;
+; THUMB2-LABEL: test14:
+; THUMB2: @ %bb.0:
+; THUMB2-NEXT: ldrh.w r0, [r0, #1]
+; THUMB2-NEXT: lsls r0, r0, #8
+; THUMB2-NEXT: bx lr
+ %1 = load i32, i32* %p, align 4
+ %and = and i32 %1, 16776960
+ ret i32 %and
+}
+
+define arm_aapcscc i32 @test15(i32* nocapture %p) {
+; ARM-LABEL: test15:
+; ARM: @ %bb.0:
+; ARM-NEXT: ldrh r0, [r0, #2]
+; ARM-NEXT: lsl r0, r0, #16
+; ARM-NEXT: bx lr
+;
+; ARMEB-LABEL: test15:
+; ARMEB: @ %bb.0:
+; ARMEB-NEXT: ldrh r0, [r0]
+; ARMEB-NEXT: lsl r0, r0, #16
+; ARMEB-NEXT: bx lr
+;
+; THUMB1-LABEL: test15:
+; THUMB1: @ %bb.0:
+; THUMB1-NEXT: ldrh r0, [r0, #2]
+; THUMB1-NEXT: lsls r0, r0, #16
+; THUMB1-NEXT: bx lr
+;
+; THUMB2-LABEL: test15:
+; THUMB2: @ %bb.0:
+; THUMB2-NEXT: ldrh r0, [r0, #2]
+; THUMB2-NEXT: lsls r0, r0, #16
+; THUMB2-NEXT: bx lr
+ %1 = load i32, i32* %p, align 4
+ %and = and i32 %1, 4294901760
+ ret i32 %and
+}
+
+define arm_aapcscc i32 @test16(i64* nocapture %p) {
+; ARM-LABEL: test16:
+; ARM: @ %bb.0:
+; ARM-NEXT: ldrb r0, [r0, #1]
+; ARM-NEXT: lsl r0, r0, #8
+; ARM-NEXT: bx lr
+;
+; ARMEB-LABEL: test16:
+; ARMEB: @ %bb.0:
+; ARMEB-NEXT: ldrb r0, [r0, #6]
+; ARMEB-NEXT: lsl r0, r0, #8
+; ARMEB-NEXT: bx lr
+;
+; THUMB1-LABEL: test16:
+; THUMB1: @ %bb.0:
+; THUMB1-NEXT: ldrb r0, [r0, #1]
+; THUMB1-NEXT: lsls r0, r0, #8
+; THUMB1-NEXT: bx lr
+;
+; THUMB2-LABEL: test16:
+; THUMB2: @ %bb.0:
+; THUMB2-NEXT: ldrb r0, [r0, #1]
+; THUMB2-NEXT: lsls r0, r0, #8
+; THUMB2-NEXT: bx lr
+ %1 = load i64, i64* %p, align 8
+ %and = and i64 %1, 65280
+ %trunc = trunc i64 %and to i32
+ ret i32 %trunc
+}
+
+define arm_aapcscc i32 @test17(i64* nocapture %p) {
+; ARM-LABEL: test17:
+; ARM: @ %bb.0:
+; ARM-NEXT: ldrb r0, [r0, #2]
+; ARM-NEXT: lsl r0, r0, #16
+; ARM-NEXT: bx lr
+;
+; ARMEB-LABEL: test17:
+; ARMEB: @ %bb.0:
+; ARMEB-NEXT: ldrb r0, [r0, #5]
+; ARMEB-NEXT: lsl r0, r0, #16
+; ARMEB-NEXT: bx lr
+;
+; THUMB1-LABEL: test17:
+; THUMB1: @ %bb.0:
+; THUMB1-NEXT: ldrb r0, [r0, #2]
+; THUMB1-NEXT: lsls r0, r0, #16
+; THUMB1-NEXT: bx lr
+;
+; THUMB2-LABEL: test17:
+; THUMB2: @ %bb.0:
+; THUMB2-NEXT: ldrb r0, [r0, #2]
+; THUMB2-NEXT: lsls r0, r0, #16
+; THUMB2-NEXT: bx lr
+ %1 = load i64, i64* %p, align 8
+ %and = and i64 %1, 16711680
+ %trunc = trunc i64 %and to i32
+ ret i32 %trunc
+}
+
+define arm_aapcscc i32 @test18(i64* nocapture %p) {
+; ARM-LABEL: test18:
+; ARM: @ %bb.0:
+; ARM-NEXT: ldrb r0, [r0, #3]
+; ARM-NEXT: lsl r0, r0, #24
+; ARM-NEXT: bx lr
+;
+; ARMEB-LABEL: test18:
+; ARMEB: @ %bb.0:
+; ARMEB-NEXT: ldrb r0, [r0, #4]
+; ARMEB-NEXT: lsl r0, r0, #24
+; ARMEB-NEXT: bx lr
+;
+; THUMB1-LABEL: test18:
+; THUMB1: @ %bb.0:
+; THUMB1-NEXT: ldrb r0, [r0, #3]
+; THUMB1-NEXT: lsls r0, r0, #24
+; THUMB1-NEXT: bx lr
+;
+; THUMB2-LABEL: test18:
+; THUMB2: @ %bb.0:
+; THUMB2-NEXT: ldrb r0, [r0, #3]
+; THUMB2-NEXT: lsls r0, r0, #24
+; THUMB2-NEXT: bx lr
+ %1 = load i64, i64* %p, align 8
+ %and = and i64 %1, 4278190080
+ %trunc = trunc i64 %and to i32
+ ret i32 %trunc
+}
+
+define arm_aapcscc i64 @test19(i64* nocapture %p) {
+; ARM-LABEL: test19:
+; ARM: @ %bb.0:
+; ARM-NEXT: ldrb r1, [r0, #4]
+; ARM-NEXT: mov r0, #0
+; ARM-NEXT: bx lr
+;
+; ARMEB-LABEL: test19:
+; ARMEB: @ %bb.0:
+; ARMEB-NEXT: ldrb r0, [r0, #3]
+; ARMEB-NEXT: mov r1, #0
+; ARMEB-NEXT: bx lr
+;
+; THUMB1-LABEL: test19:
+; THUMB1: @ %bb.0:
+; THUMB1-NEXT: ldrb r1, [r0, #4]
+; THUMB1-NEXT: movs r0, #0
+; THUMB1-NEXT: bx lr
+;
+; THUMB2-LABEL: test19:
+; THUMB2: @ %bb.0:
+; THUMB2-NEXT: ldrb r1, [r0, #4]
+; THUMB2-NEXT: movs r0, #0
+; THUMB2-NEXT: bx lr
+ %1 = load i64, i64* %p, align 8
+ %and = and i64 %1, 1095216660480
+ ret i64 %and
+}
+
+define arm_aapcscc i64 @test20(i64* nocapture %p) {
+; ARM-LABEL: test20:
+; ARM: @ %bb.0:
+; ARM-NEXT: ldrb r0, [r0, #5]
+; ARM-NEXT: lsl r1, r0, #8
+; ARM-NEXT: mov r0, #0
+; ARM-NEXT: bx lr
+;
+; ARMEB-LABEL: test20:
+; ARMEB: @ %bb.0:
+; ARMEB-NEXT: ldrb r0, [r0, #2]
+; ARMEB-NEXT: mov r1, #0
+; ARMEB-NEXT: lsl r0, r0, #8
+; ARMEB-NEXT: bx lr
+;
+; THUMB1-LABEL: test20:
+; THUMB1: @ %bb.0:
+; THUMB1-NEXT: ldrb r0, [r0, #5]
+; THUMB1-NEXT: lsls r1, r0, #8
+; THUMB1-NEXT: movs r0, #0
+; THUMB1-NEXT: bx lr
+;
+; THUMB2-LABEL: test20:
+; THUMB2: @ %bb.0:
+; THUMB2-NEXT: ldrb r0, [r0, #5]
+; THUMB2-NEXT: lsls r1, r0, #8
+; THUMB2-NEXT: movs r0, #0
+; THUMB2-NEXT: bx lr
+ %1 = load i64, i64* %p, align 8
+ %and = and i64 %1, 280375465082880
+ ret i64 %and
+}
+
+define arm_aapcscc i64 @test21(i64* nocapture %p) {
+; ARM-LABEL: test21:
+; ARM: @ %bb.0:
+; ARM-NEXT: ldrb r0, [r0, #6]
+; ARM-NEXT: lsl r1, r0, #16
+; ARM-NEXT: mov r0, #0
+; ARM-NEXT: bx lr
+;
+; ARMEB-LABEL: test21:
+; ARMEB: @ %bb.0:
+; ARMEB-NEXT: ldrb r0, [r0, #1]
+; ARMEB-NEXT: mov r1, #0
+; ARMEB-NEXT: lsl r0, r0, #16
+; ARMEB-NEXT: bx lr
+;
+; THUMB1-LABEL: test21:
+; THUMB1: @ %bb.0:
+; THUMB1-NEXT: ldrb r0, [r0, #6]
+; THUMB1-NEXT: lsls r1, r0, #16
+; THUMB1-NEXT: movs r0, #0
+; THUMB1-NEXT: bx lr
+;
+; THUMB2-LABEL: test21:
+; THUMB2: @ %bb.0:
+; THUMB2-NEXT: ldrb r0, [r0, #6]
+; THUMB2-NEXT: lsls r1, r0, #16
+; THUMB2-NEXT: movs r0, #0
+; THUMB2-NEXT: bx lr
+ %1 = load i64, i64* %p, align 8
+ %and = and i64 %1, 71776119061217280
+ ret i64 %and
+}
+
+define arm_aapcscc i64 @test22(i64* nocapture %p) {
+; ARM-LABEL: test22:
+; ARM: @ %bb.0:
+; ARM-NEXT: ldrb r0, [r0, #7]
+; ARM-NEXT: lsl r1, r0, #24
+; ARM-NEXT: mov r0, #0
+; ARM-NEXT: bx lr
+;
+; ARMEB-LABEL: test22:
+; ARMEB: @ %bb.0:
+; ARMEB-NEXT: ldrb r0, [r0]
+; ARMEB-NEXT: mov r1, #0
+; ARMEB-NEXT: lsl r0, r0, #24
+; ARMEB-NEXT: bx lr
+;
+; THUMB1-LABEL: test22:
+; THUMB1: @ %bb.0:
+; THUMB1-NEXT: ldrb r0, [r0, #7]
+; THUMB1-NEXT: lsls r1, r0, #24
+; THUMB1-NEXT: movs r0, #0
+; THUMB1-NEXT: bx lr
+;
+; THUMB2-LABEL: test22:
+; THUMB2: @ %bb.0:
+; THUMB2-NEXT: ldrb r0, [r0, #7]
+; THUMB2-NEXT: lsls r1, r0, #24
+; THUMB2-NEXT: movs r0, #0
+; THUMB2-NEXT: bx lr
+ %1 = load i64, i64* %p, align 8
+ %and = and i64 %1, -72057594037927936
+ ret i64 %and
+}
+
+define arm_aapcscc i64 @test23(i64* nocapture %p) {
+; ARM-LABEL: test23:
+; ARM: @ %bb.0:
+; ARM-NEXT: ldrh r1, [r0, #3]
+; ARM-NEXT: lsl r0, r1, #24
+; ARM-NEXT: lsr r1, r1, #8
+; ARM-NEXT: bx lr
+;
+; ARMEB-LABEL: test23:
+; ARMEB: @ %bb.0:
+; ARMEB-NEXT: ldrh r1, [r0, #3]
+; ARMEB-NEXT: lsr r0, r1, #8
+; ARMEB-NEXT: lsl r1, r1, #24
+; ARMEB-NEXT: bx lr
+;
+; THUMB1-LABEL: test23:
+; THUMB1: @ %bb.0:
+; THUMB1-NEXT: ldrb r1, [r0, #3]
+; THUMB1-NEXT: ldrb r0, [r0, #4]
+; THUMB1-NEXT: lsls r0, r0, #8
+; THUMB1-NEXT: adds r1, r0, r1
+; THUMB1-NEXT: lsls r0, r1, #24
+; THUMB1-NEXT: lsrs r1, r1, #8
+; THUMB1-NEXT: bx lr
+;
+; THUMB2-LABEL: test23:
+; THUMB2: @ %bb.0:
+; THUMB2-NEXT: ldrh.w r1, [r0, #3]
+; THUMB2-NEXT: lsls r0, r1, #24
+; THUMB2-NEXT: lsrs r1, r1, #8
+; THUMB2-NEXT: bx lr
+ %1 = load i64, i64* %p, align 8
+ %and = and i64 %1, 1099494850560
+ ret i64 %and
+}
+
+define arm_aapcscc i64 @test24(i64* nocapture %p) {
+; ARM-LABEL: test24:
+; ARM: @ %bb.0:
+; ARM-NEXT: ldrh r1, [r0, #4]
+; ARM-NEXT: mov r0, #0
+; ARM-NEXT: bx lr
+;
+; ARMEB-LABEL: test24:
+; ARMEB: @ %bb.0:
+; ARMEB-NEXT: ldrh r0, [r0, #2]
+; ARMEB-NEXT: mov r1, #0
+; ARMEB-NEXT: bx lr
+;
+; THUMB1-LABEL: test24:
+; THUMB1: @ %bb.0:
+; THUMB1-NEXT: ldrh r1, [r0, #4]
+; THUMB1-NEXT: movs r0, #0
+; THUMB1-NEXT: bx lr
+;
+; THUMB2-LABEL: test24:
+; THUMB2: @ %bb.0:
+; THUMB2-NEXT: ldrh r1, [r0, #4]
+; THUMB2-NEXT: movs r0, #0
+; THUMB2-NEXT: bx lr
+ %1 = load i64, i64* %p, align 8
+ %and = and i64 %1, 281470681743360
+ ret i64 %and
+}
+
+define arm_aapcscc i64 @test25(i64* nocapture %p) {
+; ARM-LABEL: test25:
+; ARM: @ %bb.0:
+; ARM-NEXT: ldrh r0, [r0, #5]
+; ARM-NEXT: lsl r1, r0, #8
+; ARM-NEXT: mov r0, #0
+; ARM-NEXT: bx lr
+;
+; ARMEB-LABEL: test25:
+; ARMEB: @ %bb.0:
+; ARMEB-NEXT: ldrh r0, [r0, #1]
+; ARMEB-NEXT: mov r1, #0
+; ARMEB-NEXT: lsl r0, r0, #8
+; ARMEB-NEXT: bx lr
+;
+; THUMB1-LABEL: test25:
+; THUMB1: @ %bb.0:
+; THUMB1-NEXT: ldrb r1, [r0, #5]
+; THUMB1-NEXT: ldrb r0, [r0, #6]
+; THUMB1-NEXT: lsls r0, r0, #8
+; THUMB1-NEXT: adds r0, r0, r1
+; THUMB1-NEXT: lsls r1, r0, #8
+; THUMB1-NEXT: movs r0, #0
+; THUMB1-NEXT: bx lr
+;
+; THUMB2-LABEL: test25:
+; THUMB2: @ %bb.0:
+; THUMB2-NEXT: ldrh.w r0, [r0, #5]
+; THUMB2-NEXT: lsls r1, r0, #8
+; THUMB2-NEXT: movs r0, #0
+; THUMB2-NEXT: bx lr
+ %1 = load i64, i64* %p, align 8
+ %and = and i64 %1, 72056494526300160
+ ret i64 %and
+}
+
+define arm_aapcscc i64 @test26(i64* nocapture %p) {
+; ARM-LABEL: test26:
+; ARM: @ %bb.0:
+; ARM-NEXT: ldrh r0, [r0, #6]
+; ARM-NEXT: lsl r1, r0, #16
+; ARM-NEXT: mov r0, #0
+; ARM-NEXT: bx lr
+;
+; ARMEB-LABEL: test26:
+; ARMEB: @ %bb.0:
+; ARMEB-NEXT: ldrh r0, [r0]
+; ARMEB-NEXT: mov r1, #0
+; ARMEB-NEXT: lsl r0, r0, #16
+; ARMEB-NEXT: bx lr
+;
+; THUMB1-LABEL: test26:
+; THUMB1: @ %bb.0:
+; THUMB1-NEXT: ldrh r0, [r0, #6]
+; THUMB1-NEXT: lsls r1, r0, #16
+; THUMB1-NEXT: movs r0, #0
+; THUMB1-NEXT: bx lr
+;
+; THUMB2-LABEL: test26:
+; THUMB2: @ %bb.0:
+; THUMB2-NEXT: ldrh r0, [r0, #6]
+; THUMB2-NEXT: lsls r1, r0, #16
+; THUMB2-NEXT: movs r0, #0
+; THUMB2-NEXT: bx lr
+ %1 = load i64, i64* %p, align 8
+ %and = and i64 %1, -281474976710656
+ ret i64 %and
+}
+
+; ARM-LABEL: test27:
+; ARM: @ %bb.0:
+; ARM-NEXT: ldrb r1, [r0, #1]
+; ARM-NEXT: lsl r1, r1, #16
+; ARM-NEXT: str r1, [r0]
+; ARM-NEXT: bx lr
+;
+; ARMEB-LABEL: test27:
+; ARMEB: @ %bb.0:
+; ARMEB-NEXT: ldrb r1, [r0, #2]
+; ARMEB-NEXT: lsl r1, r1, #16
+; ARMEB-NEXT: str r1, [r0]
+; ARMEB-NEXT: bx lr
+;
+; THUMB1-LABEL: test27:
+; THUMB1: @ %bb.0:
+; THUMB1-NEXT: ldrb r1, [r0, #1]
+; THUMB1-NEXT: lsls r1, r1, #16
+; THUMB1-NEXT: str r1, [r0]
+; THUMB1-NEXT: bx lr
+;
+; THUMB2-LABEL: test27:
+; THUMB2: @ %bb.0:
+; THUMB2-NEXT: ldrb r1, [r0, #1]
+; THUMB2-NEXT: lsls r1, r1, #16
+; THUMB2-NEXT: str r1, [r0]
+; THUMB2-NEXT: bx lr
+define void @test27(i32* nocapture %ptr) {
+entry:
+ %0 = load i32, i32* %ptr, align 4
+ %and = and i32 %0, 65280
+ %shl = shl i32 %and, 8
+ store i32 %shl, i32* %ptr, align 4
+ ret void
+}
diff --git a/test/CodeGen/ARM/arm-and-tst-peephole.ll b/test/CodeGen/ARM/arm-and-tst-peephole.ll
index c6ca6a624b11..b81cf443e537 100644
--- a/test/CodeGen/ARM/arm-and-tst-peephole.ll
+++ b/test/CodeGen/ARM/arm-and-tst-peephole.ll
@@ -153,18 +153,17 @@ define i32 @test_tst_assessment(i32 %a, i32 %b) {
; THUMB-NEXT: movs r2, r0
; THUMB-NEXT: movs r0, #1
; THUMB-NEXT: ands r0, r2
-; THUMB-NEXT: subs r2, r0, #1
; THUMB-NEXT: lsls r1, r1, #31
; THUMB-NEXT: beq .LBB2_2
; THUMB-NEXT: @ %bb.1:
-; THUMB-NEXT: movs r0, r2
+; THUMB-NEXT: subs r0, r0, #1
; THUMB-NEXT: .LBB2_2:
; THUMB-NEXT: bx lr
;
; T2-LABEL: test_tst_assessment:
; T2: @ %bb.0:
-; T2-NEXT: lsls r1, r1, #31
; T2-NEXT: and r0, r0, #1
+; T2-NEXT: lsls r1, r1, #31
; T2-NEXT: it ne
; T2-NEXT: subne r0, #1
; T2-NEXT: bx lr
diff --git a/test/CodeGen/ARM/arm-cgp-phis-calls-ret.ll b/test/CodeGen/ARM/arm-cgp-phis-calls-ret.ll
deleted file mode 100644
index 8587a907616d..000000000000
--- a/test/CodeGen/ARM/arm-cgp-phis-calls-ret.ll
+++ /dev/null
@@ -1,392 +0,0 @@
-; RUN: llc -mtriple=thumbv7m -arm-disable-cgp=false %s -o - | FileCheck %s --check-prefix=CHECK-COMMON --check-prefix=CHECK-NODSP
-; RUN: llc -mtriple=thumbv8m.main -arm-disable-cgp=false %s -o - | FileCheck %s --check-prefix=CHECK-COMMON --check-prefix=CHECK-NODSP
-; RUN: llc -mtriple=thumbv8m.main -arm-disable-cgp=false -arm-enable-scalar-dsp=true -mcpu=cortex-m33 %s -o - | FileCheck %s --check-prefix=CHECK-COMMON --check-prefix=CHECK-DSP
-; RUN: llc -mtriple=thumbv7em %s -arm-disable-cgp=false -arm-enable-scalar-dsp=true -arm-enable-scalar-dsp-imms=true -o - | FileCheck %s --check-prefix=CHECK-COMMON --check-prefix=CHECK-DSP-IMM
-
-; Test that ARMCodeGenPrepare can handle:
-; - loops
-; - call operands
-; - call return values
-; - ret instructions
-; We use nuw on the arithmetic instructions to avoid complications.
-
-; Check that the arguments are extended but then nothing else is.
-; This also ensures that the pass can handle loops.
-; CHECK-COMMON-LABEL: phi_feeding_phi_args
-; CHECK-COMMON: uxtb
-; CHECK-COMMON: uxtb
-; CHECK-NOT: uxtb
-define void @phi_feeding_phi_args(i8 %a, i8 %b) {
-entry:
- %0 = icmp ugt i8 %a, %b
- br i1 %0, label %preheader, label %empty
-
-empty:
- br label %preheader
-
-preheader:
- %1 = phi i8 [ %a, %entry ], [ %b, %empty ]
- br label %loop
-
-loop:
- %val = phi i8 [ %1, %preheader ], [ %inc2, %if.end ]
- %cmp = icmp ult i8 %val, 254
- br i1 %cmp, label %if.then, label %if.else
-
-if.then:
- %inc = sub nuw i8 %val, 2
- br label %if.end
-
-if.else:
- %inc1 = shl nuw i8 %val, 1
- br label %if.end
-
-if.end:
- %inc2 = phi i8 [ %inc, %if.then], [ %inc1, %if.else ]
- %cmp1 = icmp eq i8 %inc2, 255
- br i1 %cmp1, label %exit, label %loop
-
-exit:
- ret void
-}
-
-; Same as above, but as the args are zeroext, we shouldn't see any uxts.
-; CHECK-COMMON-LABEL: phi_feeding_phi_zeroext_args
-; CHECK-COMMON-NOT: uxt
-define void @phi_feeding_phi_zeroext_args(i8 zeroext %a, i8 zeroext %b) {
-entry:
- %0 = icmp ugt i8 %a, %b
- br i1 %0, label %preheader, label %empty
-
-empty:
- br label %preheader
-
-preheader:
- %1 = phi i8 [ %a, %entry ], [ %b, %empty ]
- br label %loop
-
-loop:
- %val = phi i8 [ %1, %preheader ], [ %inc2, %if.end ]
- %cmp = icmp ult i8 %val, 254
- br i1 %cmp, label %if.then, label %if.else
-
-if.then:
- %inc = sub nuw i8 %val, 2
- br label %if.end
-
-if.else:
- %inc1 = shl nuw i8 %val, 1
- br label %if.end
-
-if.end:
- %inc2 = phi i8 [ %inc, %if.then], [ %inc1, %if.else ]
- %cmp1 = icmp eq i8 %inc2, 255
- br i1 %cmp1, label %exit, label %loop
-
-exit:
- ret void
-}
-
-; Just check that phis also work with i16s.
-; CHECK-COMMON-LABEL: phi_i16:
-; CHECK-COMMON-NOT: uxt
-define void @phi_i16() {
-entry:
- br label %loop
-
-loop:
- %val = phi i16 [ 0, %entry ], [ %inc2, %if.end ]
- %cmp = icmp ult i16 %val, 128
- br i1 %cmp, label %if.then, label %if.else
-
-if.then:
- %inc = add nuw i16 %val, 2
- br label %if.end
-
-if.else:
- %inc1 = add nuw i16 %val, 1
- br label %if.end
-
-if.end:
- %inc2 = phi i16 [ %inc, %if.then], [ %inc1, %if.else ]
- %cmp1 = icmp ult i16 %inc2, 253
- br i1 %cmp1, label %loop, label %exit
-
-exit:
- ret void
-}
-
-; CHECK-COMMON-LABEL: phi_feeding_switch
-; CHECK-COMMON: ldrb
-; CHECK-COMMON: uxtb
-; CHECK-COMMON-NOT: uxt
-define void @phi_feeding_switch(i8* %memblock, i8* %store, i16 %arg) {
-entry:
- %pre = load i8, i8* %memblock, align 1
- %conv = trunc i16 %arg to i8
- br label %header
-
-header:
- %phi.0 = phi i8 [ %pre, %entry ], [ %count, %latch ]
- %phi.1 = phi i8 [ %conv, %entry ], [ %phi.3, %latch ]
- %phi.2 = phi i8 [ 0, %entry], [ %count, %latch ]
- switch i8 %phi.0, label %default [
- i8 43, label %for.inc.i
- i8 45, label %for.inc.i.i
- ]
-
-for.inc.i:
- %xor = xor i8 %phi.1, 1
- br label %latch
-
-for.inc.i.i:
- %and = and i8 %phi.1, 3
- br label %latch
-
-default:
- %sub = sub i8 %phi.0, 1
- %cmp2 = icmp ugt i8 %sub, 4
- br i1 %cmp2, label %latch, label %exit
-
-latch:
- %phi.3 = phi i8 [ %xor, %for.inc.i ], [ %and, %for.inc.i.i ], [ %phi.2, %default ]
- %count = add nuw i8 %phi.2, 1
- store i8 %count, i8* %store, align 1
- br label %header
-
-exit:
- ret void
-}
-
-; CHECK-COMMON-LABEL: ret_i8
-; CHECK-COMMON-NOT: uxt
-define i8 @ret_i8() {
-entry:
- br label %loop
-
-loop:
- %val = phi i8 [ 0, %entry ], [ %inc2, %if.end ]
- %cmp = icmp ult i8 %val, 128
- br i1 %cmp, label %if.then, label %if.else
-
-if.then:
- %inc = add nuw i8 %val, 2
- br label %if.end
-
-if.else:
- %inc1 = add nuw i8 %val, 1
- br label %if.end
-
-if.end:
- %inc2 = phi i8 [ %inc, %if.then], [ %inc1, %if.else ]
- %cmp1 = icmp ult i8 %inc2, 253
- br i1 %cmp1, label %exit, label %loop
-
-exit:
- ret i8 %inc2
-}
-
-; Check that %exp requires uxth in all cases, and will also be required to
-; promote %1 for the call - unless we can generate a uadd16.
-; CHECK-COMMON-LABEL: zext_load_sink_call:
-; CHECK-COMMON: uxt
-; CHECK-DSP-IMM: uadd16
-; CHECK-COMMON: cmp
-; CHECK-DSP: uxt
-; CHECK-DSP-IMM-NOT: uxt
-define i32 @zext_load_sink_call(i16* %ptr, i16 %exp) {
-entry:
- %0 = load i16, i16* %ptr, align 4
- %1 = add i16 %exp, 3
- %cmp = icmp eq i16 %0, %exp
- br i1 %cmp, label %exit, label %if.then
-
-if.then:
- %conv0 = zext i16 %0 to i32
- %conv1 = zext i16 %1 to i32
- %call = tail call arm_aapcs_vfpcc i32 @dummy(i32 %conv0, i32 %conv1)
- br label %exit
-
-exit:
- %exitval = phi i32 [ %call, %if.then ], [ 0, %entry ]
- ret i32 %exitval
-}
-
-
-; Check that the pass doesn't try to promote the immediate parameters.
-; CHECK-COMMON-LABEL: call_with_imms
-; CHECK-COMMON-NOT: uxt
-define i8 @call_with_imms(i8* %arg) {
- %call = tail call arm_aapcs_vfpcc zeroext i8 @dummy2(i8* nonnull %arg, i8 zeroext 0, i8 zeroext 0)
- %cmp = icmp eq i8 %call, 0
- %res = select i1 %cmp, i8 %call, i8 1
- ret i8 %res
-}
-
-; Test that the call result is still extended.
-; CHECK-COMMON-LABEL: test_call:
-; CHECK-COMMON: bl
-; CHECK-COMMONNEXT: sxtb r1, r0
-define i16 @test_call(i8 zeroext %arg) {
- %call = call i8 @dummy_i8(i8 %arg)
- %cmp = icmp ult i8 %call, 128
- %conv = zext i1 %cmp to i16
- ret i16 %conv
-}
-
-; Test that the transformation bails when it finds that i16 is larger than i8.
-; TODO: We should be able to remove the uxtb in these cases.
-; CHECK-LABEL: promote_i8_sink_i16_1
-; CHECK-COMMON: bl dummy_i8
-; CHECK-COMMON: adds r0, #1
-; CHECK-COMMON: uxtb r0, r0
-; CHECK-COMMON: cmp r0
-define i16 @promote_i8_sink_i16_1(i8 zeroext %arg0, i16 zeroext %arg1, i16 zeroext %arg2) {
- %call = tail call zeroext i8 @dummy_i8(i8 %arg0)
- %add = add nuw i8 %call, 1
- %conv = zext i8 %add to i16
- %cmp = icmp ne i16 %conv, %arg1
- %sel = select i1 %cmp, i16 %arg1, i16 %arg2
- %res = tail call zeroext i16 @dummy3(i16 %sel)
- ret i16 %res
-}
-
-; CHECK-COMMON-LABEL: promote_i8_sink_i16_2
-; CHECK-COMMON: bl dummy_i8
-; CHECK-COMMON: adds r0, #1
-; CHECK-COMMON: uxtb r0, r0
-; CHECK-COMMON: cmp r0
-define i16 @promote_i8_sink_i16_2(i8 zeroext %arg0, i8 zeroext %arg1, i16 zeroext %arg2) {
- %call = tail call zeroext i8 @dummy_i8(i8 %arg0)
- %add = add nuw i8 %call, 1
- %cmp = icmp ne i8 %add, %arg1
- %conv = zext i8 %arg1 to i16
- %sel = select i1 %cmp, i16 %conv, i16 %arg2
- %res = tail call zeroext i16 @dummy3(i16 %sel)
- ret i16 %res
-}
-
-@uc = global i8 42, align 1
-@LL = global i64 0, align 8
-
-; CHECK-COMMON-LABEL: zext_i64
-; CHECK-COMMON: ldrb
-; CHECK-COMMON: strd
-define void @zext_i64() {
-entry:
- %0 = load i8, i8* @uc, align 1
- %conv = zext i8 %0 to i64
- store i64 %conv, i64* @LL, align 8
- %cmp = icmp eq i8 %0, 42
- %conv1 = zext i1 %cmp to i32
- %call = tail call i32 bitcast (i32 (...)* @assert to i32 (i32)*)(i32 %conv1)
- ret void
-}
-
-@a = global i16* null, align 4
-@b = global i32 0, align 4
-
-; CHECK-COMMON-LABEL: constexpr
-; CHECK-COMMON: uxth
-define i32 @constexpr() {
-entry:
- store i32 ptrtoint (i32* @b to i32), i32* @b, align 4
- %0 = load i16*, i16** @a, align 4
- %1 = load i16, i16* %0, align 2
- %or = or i16 %1, ptrtoint (i32* @b to i16)
- store i16 %or, i16* %0, align 2
- %cmp = icmp ne i16 %or, 4
- %conv3 = zext i1 %cmp to i32
- %call = tail call i32 bitcast (i32 (...)* @e to i32 (i32)*)(i32 %conv3) #2
- ret i32 undef
-}
-
-; Check that d.sroa.0.0.be is promoted passed directly into the tail call.
-; CHECK-COMMON-LABEL: check_zext_phi_call_arg
-; CHECK-COMMON-NOT: uxt
-define i32 @check_zext_phi_call_arg() {
-entry:
- br label %for.cond
-
-for.cond: ; preds = %for.cond.backedge, %entry
- %d.sroa.0.0 = phi i16 [ 30, %entry ], [ %d.sroa.0.0.be, %for.cond.backedge ]
- %tobool = icmp eq i16 %d.sroa.0.0, 0
- br i1 %tobool, label %for.cond.backedge, label %if.then
-
-for.cond.backedge: ; preds = %for.cond, %if.then
- %d.sroa.0.0.be = phi i16 [ %call, %if.then ], [ 0, %for.cond ]
- br label %for.cond
-
-if.then: ; preds = %for.cond
- %d.sroa.0.0.insert.ext = zext i16 %d.sroa.0.0 to i32
- %call = tail call zeroext i16 bitcast (i16 (...)* @f to i16 (i32)*)(i32 %d.sroa.0.0.insert.ext) #2
- br label %for.cond.backedge
-}
-
-
-; The call to safe_lshift_func takes two parameters, but they're the same value just one is zext.
-; CHECK-COMMON-LABEL: call_zext_i8_i32
-define fastcc i32 @call_zext_i8_i32(i32 %p_45, i8 zeroext %p_46) {
-for.cond8.preheader:
- %call217 = call fastcc zeroext i8 @safe_mul_func_uint8_t_u_u(i8 zeroext undef)
- %tobool219 = icmp eq i8 %call217, 0
- br i1 %tobool219, label %for.end411, label %for.cond273.preheader
-
-for.cond273.preheader: ; preds = %for.cond8.preheader
- %call217.lcssa = phi i8 [ %call217, %for.cond8.preheader ]
- %conv218.le = zext i8 %call217.lcssa to i32
- %call346 = call fastcc zeroext i8 @safe_lshift_func(i8 zeroext %call217.lcssa, i32 %conv218.le)
- unreachable
-
-for.end411: ; preds = %for.cond8.preheader
- %call452 = call fastcc i64 @safe_sub_func_int64_t_s_s(i64 undef, i64 4)
- unreachable
-}
-
-%struct.anon = type { i32 }
-
-@g_57 = hidden local_unnamed_addr global %struct.anon zeroinitializer, align 4
-@g_893 = hidden local_unnamed_addr global %struct.anon zeroinitializer, align 4
-@g_82 = hidden local_unnamed_addr global i32 0, align 4
-
-; Test that the transform bails on finding a call which returns a i16**
-; CHECK-COMMON-LABEL: call_return_pointer
-; CHECK-COMMON: sxth
-; CHECK-COMMON-NOT: uxt
-define hidden i32 @call_return_pointer(i8 zeroext %p_13) local_unnamed_addr #0 {
-entry:
- %conv1 = zext i8 %p_13 to i16
- %call = tail call i16** @func_62(i8 zeroext undef, i32 undef, i16 signext %conv1, i32* undef)
- %0 = load i32, i32* getelementptr inbounds (%struct.anon, %struct.anon* @g_893, i32 0, i32 0), align 4
- %conv2 = trunc i32 %0 to i16
- br label %for.cond
-
-for.cond: ; preds = %for.cond.backedge, %entry
- %p_13.addr.0 = phi i8 [ %p_13, %entry ], [ %p_13.addr.0.be, %for.cond.backedge ]
- %tobool = icmp eq i8 %p_13.addr.0, 0
- br i1 %tobool, label %for.cond.backedge, label %if.then
-
-for.cond.backedge: ; preds = %for.cond, %if.then
- %p_13.addr.0.be = phi i8 [ %conv4, %if.then ], [ 0, %for.cond ]
- br label %for.cond
-
-if.then: ; preds = %for.cond
- %call3 = tail call fastcc signext i16 @safe_sub_func_int16_t_s_s(i16 signext %conv2)
- %conv4 = trunc i16 %call3 to i8
- br label %for.cond.backedge
-}
-
-declare noalias i16** @func_62(i8 zeroext %p_63, i32 %p_64, i16 signext %p_65, i32* nocapture readnone %p_66)
-declare fastcc signext i16 @safe_sub_func_int16_t_s_s(i16 signext %si2)
-declare dso_local fastcc i64 @safe_sub_func_int64_t_s_s(i64, i64)
-declare dso_local fastcc zeroext i8 @safe_lshift_func(i8 zeroext, i32)
-declare dso_local fastcc zeroext i8 @safe_mul_func_uint8_t_u_u(i8 returned zeroext)
-
-declare dso_local i32 @e(...) local_unnamed_addr #1
-declare dso_local zeroext i16 @f(...) local_unnamed_addr #1
-
-declare i32 @dummy(i32, i32)
-declare i8 @dummy_i8(i8)
-declare i8 @dummy2(i8*, i8, i8)
-declare i16 @dummy3(i16)
-declare i32 @assert(...)
diff --git a/test/CodeGen/ARM/arm-shrink-wrapping.ll b/test/CodeGen/ARM/arm-shrink-wrapping.ll
index c943f60c56df..bf4f1bd0d0ca 100644
--- a/test/CodeGen/ARM/arm-shrink-wrapping.ll
+++ b/test/CodeGen/ARM/arm-shrink-wrapping.ll
@@ -104,10 +104,10 @@ declare i32 @doSomething(i32, i32*)
; Next BB.
; CHECK: [[LOOP:LBB[0-9_]+]]: @ %for.body
; CHECK: mov{{(\.w)?}} [[TMP:r[0-9]+]], #1
-; ARM: subs [[IV]], [[IV]], #1
-; THUMB: subs [[IV]], #1
-; ARM-NEXT: add [[SUM]], [[TMP]], [[SUM]]
-; THUMB-NEXT: add [[SUM]], [[TMP]]
+; ARM: add [[SUM]], [[TMP]], [[SUM]]
+; THUMB: add [[SUM]], [[TMP]]
+; ARM-NEXT: subs [[IV]], [[IV]], #1
+; THUMB-NEXT: subs [[IV]], #1
; CHECK-NEXT: bne [[LOOP]]
;
; Next BB.
@@ -169,10 +169,10 @@ declare i32 @something(...)
; Next BB.
; CHECK: [[LOOP_LABEL:LBB[0-9_]+]]: @ %for.body
; CHECK: mov{{(\.w)?}} [[TMP:r[0-9]+]], #1
-; ARM: subs [[IV]], [[IV]], #1
-; THUMB: subs [[IV]], #1
; ARM: add [[SUM]], [[TMP]], [[SUM]]
; THUMB: add [[SUM]], [[TMP]]
+; ARM: subs [[IV]], [[IV]], #1
+; THUMB: subs [[IV]], #1
; CHECK-NEXT: bne [[LOOP_LABEL]]
; Next BB.
; CHECK: @ %for.exit
@@ -228,10 +228,10 @@ for.end: ; preds = %for.body
; Next BB.
; CHECK: [[LOOP:LBB[0-9_]+]]: @ %for.body
; CHECK: mov{{(\.w)?}} [[TMP:r[0-9]+]], #1
-; ARM: subs [[IV]], [[IV]], #1
-; THUMB: subs [[IV]], #1
-; ARM-NEXT: add [[SUM]], [[TMP]], [[SUM]]
-; THUMB-NEXT: add [[SUM]], [[TMP]]
+; ARM: add [[SUM]], [[TMP]], [[SUM]]
+; THUMB: add [[SUM]], [[TMP]]
+; ARM-NEXT: subs [[IV]], [[IV]], #1
+; THUMB-NEXT: subs [[IV]], #1
; CHECK-NEXT: bne [[LOOP]]
;
; Next BB.
@@ -307,10 +307,10 @@ declare void @somethingElse(...)
; Next BB.
; CHECK: [[LOOP:LBB[0-9_]+]]: @ %for.body
; CHECK: mov{{(\.w)?}} [[TMP:r[0-9]+]], #1
-; ARM: subs [[IV]], [[IV]], #1
-; THUMB: subs [[IV]], #1
-; ARM-NEXT: add [[SUM]], [[TMP]], [[SUM]]
-; THUMB-NEXT: add [[SUM]], [[TMP]]
+; ARM: add [[SUM]], [[TMP]], [[SUM]]
+; THUMB: add [[SUM]], [[TMP]]
+; ARM-NEXT: subs [[IV]], [[IV]], #1
+; THUMB-NEXT: subs [[IV]], #1
; CHECK-NEXT: bne [[LOOP]]
;
; Next BB.
diff --git a/test/CodeGen/ARM/arm-storebytesmerge.ll b/test/CodeGen/ARM/arm-storebytesmerge.ll
index edc25302f7c5..00c5914b34b8 100644
--- a/test/CodeGen/ARM/arm-storebytesmerge.ll
+++ b/test/CodeGen/ARM/arm-storebytesmerge.ll
@@ -8,101 +8,95 @@ target triple = "thumbv7em-arm-none-eabi"
define arm_aapcs_vfpcc void @test(i8* %v50) #0 {
; CHECK-LABEL: test:
; CHECK: @ %bb.0:
-; CHECK-NEXT: movw r1, #35722
-; CHECK-NEXT: movt r1, #36236
-; CHECK-NEXT: str.w r1, [r0, #394]
-; CHECK-NEXT: movw r1, #36750
-; CHECK-NEXT: movt r1, #37264
-; CHECK-NEXT: str.w r1, [r0, #398]
-; CHECK-NEXT: movw r1, #37778
-; CHECK-NEXT: movt r1, #38292
-; CHECK-NEXT: str.w r1, [r0, #402]
-; CHECK-NEXT: movw r1, #38806
-; CHECK-NEXT: movt r1, #39320
-; CHECK-NEXT: str.w r1, [r0, #406]
-; CHECK-NEXT: movw r1, #39834
-; CHECK-NEXT: strh.w r1, [r0, #410]
-; CHECK-NEXT: movw r1, #40348
-; CHECK-NEXT: movt r1, #40862
-; CHECK-NEXT: str.w r1, [r0, #412]
-; CHECK-NEXT: movw r1, #41376
-; CHECK-NEXT: movt r1, #41890
-; CHECK-NEXT: str.w r1, [r0, #416]
-; CHECK-NEXT: movw r1, #42404
-; CHECK-NEXT: movt r1, #42918
-; CHECK-NEXT: str.w r1, [r0, #420]
-; CHECK-NEXT: movw r1, #43432
-; CHECK-NEXT: movt r1, #43946
-; CHECK-NEXT: str.w r1, [r0, #424]
-; CHECK-NEXT: movw r1, #44460
-; CHECK-NEXT: movt r1, #44974
-; CHECK-NEXT: str.w r1, [r0, #428]
-; CHECK-NEXT: movw r1, #45488
-; CHECK-NEXT: strh.w r1, [r0, #432]
+; CHECK-NEXT: movw r1, #65534
+; CHECK-NEXT: strh.w r1, [r0, #510]
+; CHECK-NEXT: movw r1, #64506
+; CHECK-NEXT: movt r1, #65020
+; CHECK-NEXT: str.w r1, [r0, #506]
+; CHECK-NEXT: movw r1, #63478
+; CHECK-NEXT: movt r1, #63992
+; CHECK-NEXT: str.w r1, [r0, #502]
+; CHECK-NEXT: movw r1, #62450
+; CHECK-NEXT: movt r1, #62964
+; CHECK-NEXT: str.w r1, [r0, #498]
+; CHECK-NEXT: movw r1, #61422
+; CHECK-NEXT: movt r1, #61936
+; CHECK-NEXT: str.w r1, [r0, #494]
+; CHECK-NEXT: movw r1, #60394
+; CHECK-NEXT: movt r1, #60908
+; CHECK-NEXT: str.w r1, [r0, #490]
+; CHECK-NEXT: movw r1, #59366
+; CHECK-NEXT: movt r1, #59880
+; CHECK-NEXT: str.w r1, [r0, #486]
+; CHECK-NEXT: movw r1, #58338
+; CHECK-NEXT: movt r1, #58852
+; CHECK-NEXT: str.w r1, [r0, #482]
+; CHECK-NEXT: movw r1, #57310
+; CHECK-NEXT: movt r1, #57824
+; CHECK-NEXT: str.w r1, [r0, #478]
+; CHECK-NEXT: movw r1, #56282
+; CHECK-NEXT: movt r1, #56796
+; CHECK-NEXT: str.w r1, [r0, #474]
+; CHECK-NEXT: movw r1, #55254
+; CHECK-NEXT: movt r1, #55768
+; CHECK-NEXT: str.w r1, [r0, #470]
+; CHECK-NEXT: movw r1, #54226
+; CHECK-NEXT: movt r1, #54740
+; CHECK-NEXT: str.w r1, [r0, #466]
+; CHECK-NEXT: movw r1, #53198
+; CHECK-NEXT: movt r1, #53712
+; CHECK-NEXT: str.w r1, [r0, #462]
+; CHECK-NEXT: movw r1, #52170
+; CHECK-NEXT: movt r1, #52684
+; CHECK-NEXT: str.w r1, [r0, #458]
+; CHECK-NEXT: movw r1, #51142
+; CHECK-NEXT: movt r1, #51656
+; CHECK-NEXT: str.w r1, [r0, #454]
+; CHECK-NEXT: movw r1, #50114
+; CHECK-NEXT: movt r1, #50628
+; CHECK-NEXT: str.w r1, [r0, #450]
+; CHECK-NEXT: movw r1, #49086
+; CHECK-NEXT: movt r1, #49600
+; CHECK-NEXT: str.w r1, [r0, #446]
+; CHECK-NEXT: movw r1, #48058
+; CHECK-NEXT: movt r1, #48572
+; CHECK-NEXT: str.w r1, [r0, #442]
+; CHECK-NEXT: movw r1, #47030
+; CHECK-NEXT: movt r1, #47544
+; CHECK-NEXT: str.w r1, [r0, #438]
; CHECK-NEXT: movw r1, #46002
; CHECK-NEXT: movt r1, #46516
; CHECK-NEXT: str.w r1, [r0, #434]
-; CHECK-NEXT: movw r1, #47030
-; CHECK-NEXT: strh.w r1, [r0, #438]
-; CHECK-NEXT: movw r1, #47544
-; CHECK-NEXT: movt r1, #48058
-; CHECK-NEXT: str.w r1, [r0, #440]
-; CHECK-NEXT: movw r1, #48572
-; CHECK-NEXT: movt r1, #49086
-; CHECK-NEXT: str.w r1, [r0, #444]
-; CHECK-NEXT: movw r1, #49600
-; CHECK-NEXT: strh.w r1, [r0, #448]
-; CHECK-NEXT: movs r1, #194
-; CHECK-NEXT: strb.w r1, [r0, #450]
-; CHECK-NEXT: movw r1, #50371
-; CHECK-NEXT: movt r1, #50885
-; CHECK-NEXT: str.w r1, [r0, #451]
-; CHECK-NEXT: movw r1, #51399
-; CHECK-NEXT: movt r1, #51913
-; CHECK-NEXT: str.w r1, [r0, #455]
-; CHECK-NEXT: movw r1, #52427
-; CHECK-NEXT: movt r1, #52941
-; CHECK-NEXT: str.w r1, [r0, #459]
-; CHECK-NEXT: movw r1, #53455
-; CHECK-NEXT: movt r1, #53969
-; CHECK-NEXT: str.w r1, [r0, #463]
-; CHECK-NEXT: movw r1, #54483
-; CHECK-NEXT: strh.w r1, [r0, #467]
-; CHECK-NEXT: movw r1, #54997
-; CHECK-NEXT: movt r1, #55511
-; CHECK-NEXT: str.w r1, [r0, #469]
-; CHECK-NEXT: movw r1, #56025
-; CHECK-NEXT: movt r1, #56539
-; CHECK-NEXT: str.w r1, [r0, #473]
-; CHECK-NEXT: movw r1, #57053
-; CHECK-NEXT: movt r1, #57567
-; CHECK-NEXT: str.w r1, [r0, #477]
-; CHECK-NEXT: movw r1, #58081
-; CHECK-NEXT: movt r1, #58595
-; CHECK-NEXT: str.w r1, [r0, #481]
-; CHECK-NEXT: movw r1, #59109
-; CHECK-NEXT: movt r1, #59623
-; CHECK-NEXT: str.w r1, [r0, #485]
-; CHECK-NEXT: movw r1, #60137
-; CHECK-NEXT: strh.w r1, [r0, #489]
-; CHECK-NEXT: movw r1, #60651
-; CHECK-NEXT: movt r1, #61165
-; CHECK-NEXT: str.w r1, [r0, #491]
-; CHECK-NEXT: movw r1, #61679
-; CHECK-NEXT: strh.w r1, [r0, #495]
-; CHECK-NEXT: movw r1, #62193
-; CHECK-NEXT: movt r1, #62707
-; CHECK-NEXT: str.w r1, [r0, #497]
-; CHECK-NEXT: movw r1, #63221
-; CHECK-NEXT: movt r1, #63735
-; CHECK-NEXT: str.w r1, [r0, #501]
-; CHECK-NEXT: movw r1, #64249
-; CHECK-NEXT: strh.w r1, [r0, #505]
-; CHECK-NEXT: movs r1, #251
-; CHECK-NEXT: strb.w r1, [r0, #507]
-; CHECK-NEXT: movw r1, #65020
-; CHECK-NEXT: movt r1, #65534
-; CHECK-NEXT: str.w r1, [r0, #508]
+; CHECK-NEXT: movw r1, #44974
+; CHECK-NEXT: movt r1, #45488
+; CHECK-NEXT: str.w r1, [r0, #430]
+; CHECK-NEXT: movw r1, #43946
+; CHECK-NEXT: movt r1, #44460
+; CHECK-NEXT: str.w r1, [r0, #426]
+; CHECK-NEXT: movw r1, #42918
+; CHECK-NEXT: movt r1, #43432
+; CHECK-NEXT: str.w r1, [r0, #422]
+; CHECK-NEXT: movw r1, #41890
+; CHECK-NEXT: movt r1, #42404
+; CHECK-NEXT: str.w r1, [r0, #418]
+; CHECK-NEXT: movw r1, #40862
+; CHECK-NEXT: movt r1, #41376
+; CHECK-NEXT: str.w r1, [r0, #414]
+; CHECK-NEXT: movw r1, #39834
+; CHECK-NEXT: movt r1, #40348
+; CHECK-NEXT: str.w r1, [r0, #410]
+; CHECK-NEXT: movw r1, #38806
+; CHECK-NEXT: movt r1, #39320
+; CHECK-NEXT: str.w r1, [r0, #406]
+; CHECK-NEXT: movw r1, #37778
+; CHECK-NEXT: movt r1, #38292
+; CHECK-NEXT: str.w r1, [r0, #402]
+; CHECK-NEXT: movw r1, #36750
+; CHECK-NEXT: movt r1, #37264
+; CHECK-NEXT: str.w r1, [r0, #398]
+; CHECK-NEXT: movw r1, #35722
+; CHECK-NEXT: movt r1, #36236
+; CHECK-NEXT: str.w r1, [r0, #394]
; CHECK-NEXT: bx lr
%v190 = getelementptr inbounds i8, i8* %v50, i32 394
store i8 -118, i8* %v190, align 1
diff --git a/test/CodeGen/ARM/armv8.2a-fp16-vector-intrinsics.ll b/test/CodeGen/ARM/armv8.2a-fp16-vector-intrinsics.ll
new file mode 100644
index 000000000000..a9c5838f104a
--- /dev/null
+++ b/test/CodeGen/ARM/armv8.2a-fp16-vector-intrinsics.ll
@@ -0,0 +1,1285 @@
+; RUN: llc -mtriple=arm-eabi -mattr=+v8.2a,+neon,+fullfp16 -float-abi=hard < %s | FileCheck %s
+
+%struct.float16x4x2_t = type { [2 x <4 x half>] }
+%struct.float16x8x2_t = type { [2 x <8 x half>] }
+
+define dso_local <4 x half> @test_vabs_f16(<4 x half> %a) {
+; CHECKLABEL: test_vabs_f16:
+; CHECK: vabs.f16 d0, d0
+; CHECK-NEXT: bx lr
+entry:
+ %vabs1.i = tail call <4 x half> @llvm.fabs.v4f16(<4 x half> %a)
+ ret <4 x half> %vabs1.i
+}
+
+define dso_local <8 x half> @test_vabsq_f16(<8 x half> %a) {
+; CHECKLABEL: test_vabsq_f16:
+; CHECK: vabs.f16 q0, q0
+; CHECK-NEXT: bx lr
+entry:
+ %vabs1.i = tail call <8 x half> @llvm.fabs.v8f16(<8 x half> %a)
+ ret <8 x half> %vabs1.i
+}
+
+define dso_local <4 x i16> @test_vceqz_f16(<4 x half> %a) {
+; CHECKLABEL: test_vceqz_f16:
+; CHECK: vceq.f16 d0, d0, #0
+; CHECK-NEXT: bx lr
+entry:
+ %0 = fcmp oeq <4 x half> %a, zeroinitializer
+ %vceqz.i = sext <4 x i1> %0 to <4 x i16>
+ ret <4 x i16> %vceqz.i
+}
+
+define dso_local <8 x i16> @test_vceqzq_f16(<8 x half> %a) {
+; CHECKLABEL: test_vceqzq_f16:
+; CHECK: vceq.f16 q0, q0, #0
+; CHECK-NEXT: bx lr
+entry:
+ %0 = fcmp oeq <8 x half> %a, zeroinitializer
+ %vceqz.i = sext <8 x i1> %0 to <8 x i16>
+ ret <8 x i16> %vceqz.i
+}
+
+define dso_local <4 x i16> @test_vcgez_f16(<4 x half> %a) {
+; CHECKLABEL: test_vcgez_f16:
+; CHECK: vcge.f16 d0, d0, #0
+; CHECK-NEXT: bx lr
+entry:
+ %0 = fcmp oge <4 x half> %a, zeroinitializer
+ %vcgez.i = sext <4 x i1> %0 to <4 x i16>
+ ret <4 x i16> %vcgez.i
+}
+
+define dso_local <8 x i16> @test_vcgezq_f16(<8 x half> %a) {
+; CHECKLABEL: test_vcgezq_f16:
+; CHECK: vcge.f16 q0, q0, #0
+; CHECK-NEXT: bx lr
+entry:
+ %0 = fcmp oge <8 x half> %a, zeroinitializer
+ %vcgez.i = sext <8 x i1> %0 to <8 x i16>
+ ret <8 x i16> %vcgez.i
+}
+
+define dso_local <4 x i16> @test_vcgtz_f16(<4 x half> %a) {
+; CHECKLABEL: test_vcgtz_f16:
+; CHECK: vcgt.f16 d0, d0, #0
+; CHECK-NEXT: bx lr
+entry:
+ %0 = fcmp ogt <4 x half> %a, zeroinitializer
+ %vcgtz.i = sext <4 x i1> %0 to <4 x i16>
+ ret <4 x i16> %vcgtz.i
+}
+
+define dso_local <8 x i16> @test_vcgtzq_f16(<8 x half> %a) {
+; CHECKLABEL: test_vcgtzq_f16:
+; CHECK: vcgt.f16 q0, q0, #0
+; CHECK-NEXT: bx lr
+entry:
+ %0 = fcmp ogt <8 x half> %a, zeroinitializer
+ %vcgtz.i = sext <8 x i1> %0 to <8 x i16>
+ ret <8 x i16> %vcgtz.i
+}
+
+define dso_local <4 x i16> @test_vclez_f16(<4 x half> %a) {
+; CHECKLABEL: test_vclez_f16:
+; CHECK: vcle.f16 d0, d0, #0
+; CHECK-NEXT: bx lr
+entry:
+ %0 = fcmp ole <4 x half> %a, zeroinitializer
+ %vclez.i = sext <4 x i1> %0 to <4 x i16>
+ ret <4 x i16> %vclez.i
+}
+
+define dso_local <8 x i16> @test_vclezq_f16(<8 x half> %a) {
+; CHECKLABEL: test_vclezq_f16:
+; CHECK: vcle.f16 q0, q0, #0
+; CHECK-NEXT: bx lr
+entry:
+ %0 = fcmp ole <8 x half> %a, zeroinitializer
+ %vclez.i = sext <8 x i1> %0 to <8 x i16>
+ ret <8 x i16> %vclez.i
+}
+
+define dso_local <4 x i16> @test_vcltz_f16(<4 x half> %a) {
+; CHECKLABEL: test_vcltz_f16:
+; CHECK: vclt.f16 d0, d0, #0
+; CHECK-NEXT: bx lr
+entry:
+ %0 = fcmp olt <4 x half> %a, zeroinitializer
+ %vcltz.i = sext <4 x i1> %0 to <4 x i16>
+ ret <4 x i16> %vcltz.i
+}
+
+define dso_local <8 x i16> @test_vcltzq_f16(<8 x half> %a) {
+; CHECKLABEL: test_vcltzq_f16:
+; CHECK: vclt.f16 q0, q0, #0
+; CHECK-NEXT: bx lr
+entry:
+ %0 = fcmp olt <8 x half> %a, zeroinitializer
+ %vcltz.i = sext <8 x i1> %0 to <8 x i16>
+ ret <8 x i16> %vcltz.i
+}
+
+define dso_local <4 x half> @test_vcvt_f16_s16(<4 x i16> %a) {
+; CHECK-LABEL: test_vcvt_f16_s16:
+; CHECK: vcvt.f16.s16 d0, d0
+; CHECK-NEXT: bx lr
+entry:
+ %vcvt.i = sitofp <4 x i16> %a to <4 x half>
+ ret <4 x half> %vcvt.i
+}
+
+define dso_local <8 x half> @test_vcvtq_f16_s16(<8 x i16> %a) {
+; CHECK-LABEL: test_vcvtq_f16_s16:
+; CHECK: vcvt.f16.s16 q0, q0
+; CHECK-NEXT: bx lr
+entry:
+ %vcvt.i = sitofp <8 x i16> %a to <8 x half>
+ ret <8 x half> %vcvt.i
+}
+
+define dso_local <4 x half> @test_vcvt_f16_u16(<4 x i16> %a) {
+; CHECK-LABEL: test_vcvt_f16_u16:
+; CHECK: vcvt.f16.u16 d0, d0
+; CHECK-NEXT: bx lr
+entry:
+ %vcvt.i = uitofp <4 x i16> %a to <4 x half>
+ ret <4 x half> %vcvt.i
+}
+
+define dso_local <8 x half> @test_vcvtq_f16_u16(<8 x i16> %a) {
+; CHECK-LABEL: test_vcvtq_f16_u16:
+; CHECK: vcvt.f16.u16 q0, q0
+; CHECK-NEXT: bx lr
+entry:
+ %vcvt.i = uitofp <8 x i16> %a to <8 x half>
+ ret <8 x half> %vcvt.i
+}
+
+define dso_local <4 x i16> @test_vcvt_s16_f16(<4 x half> %a) {
+; CHECK-LABEL: test_vcvt_s16_f16:
+; CHECK: vcvt.s16.f16 d0, d0
+; CHECK-NEXT: bx lr
+entry:
+ %vcvt.i = fptosi <4 x half> %a to <4 x i16>
+ ret <4 x i16> %vcvt.i
+}
+
+define dso_local <8 x i16> @test_vcvtq_s16_f16(<8 x half> %a) {
+; CHECK-LABEL: test_vcvtq_s16_f16:
+; CHECK: vcvt.s16.f16 q0, q0
+; CHECK-NEXT: bx lr
+entry:
+ %vcvt.i = fptosi <8 x half> %a to <8 x i16>
+ ret <8 x i16> %vcvt.i
+}
+
+define dso_local <4 x i16> @test_vcvt_u16_f16(<4 x half> %a) {
+; CHECK-LABEL: test_vcvt_u16_f16:
+; CHECK: vcvt.u16.f16 d0, d0
+; CHECK-NEXT: bx lr
+entry:
+ %vcvt.i = fptoui <4 x half> %a to <4 x i16>
+ ret <4 x i16> %vcvt.i
+}
+
+define dso_local <8 x i16> @test_vcvtq_u16_f16(<8 x half> %a) {
+; CHECK-LABEL: test_vcvtq_u16_f16:
+; CHECK: vcvt.u16.f16 q0, q0
+; CHECK-NEXT: bx lr
+entry:
+ %vcvt.i = fptoui <8 x half> %a to <8 x i16>
+ ret <8 x i16> %vcvt.i
+}
+
+define dso_local <4 x i16> @test_vcvta_s16_f16(<4 x half> %a) {
+; CHECK-LABEL: test_vcvta_s16_f16:
+; CHECK: vcvta.s16.f16 d0, d0
+; CHECK-NEXT: bx lr
+entry:
+ %vcvta_s16_v1.i = tail call <4 x i16> @llvm.arm.neon.vcvtas.v4i16.v4f16(<4 x half> %a)
+ ret <4 x i16> %vcvta_s16_v1.i
+}
+
+define dso_local <4 x i16> @test_vcvta_u16_f16(<4 x half> %a) {
+; CHECK-LABEL: test_vcvta_u16_f16:
+; CHECK: vcvta.u16.f16 d0, d0
+; CHECK-NEXT: bx lr
+entry:
+ %vcvta_u16_v1.i = tail call <4 x i16> @llvm.arm.neon.vcvtau.v4i16.v4f16(<4 x half> %a)
+ ret <4 x i16> %vcvta_u16_v1.i
+}
+
+define dso_local <8 x i16> @test_vcvtaq_s16_f16(<8 x half> %a) {
+; CHECK-LABEL: test_vcvtaq_s16_f16:
+; CHECK: vcvta.s16.f16 q0, q0
+; CHECK-NEXT: bx lr
+entry:
+ %vcvtaq_s16_v1.i = tail call <8 x i16> @llvm.arm.neon.vcvtas.v8i16.v8f16(<8 x half> %a)
+ ret <8 x i16> %vcvtaq_s16_v1.i
+}
+
+define dso_local <4 x i16> @test_vcvtm_s16_f16(<4 x half> %a) {
+; CHECK-LABEL: test_vcvtm_s16_f16:
+; CHECK: vcvtm.s16.f16 d0, d0
+; CHECK-NEXT: bx lr
+entry:
+ %vcvtm_s16_v1.i = tail call <4 x i16> @llvm.arm.neon.vcvtms.v4i16.v4f16(<4 x half> %a)
+ ret <4 x i16> %vcvtm_s16_v1.i
+}
+
+define dso_local <8 x i16> @test_vcvtmq_s16_f16(<8 x half> %a) {
+; CHECK-LABEL: test_vcvtmq_s16_f16:
+; CHECK: vcvtm.s16.f16 q0, q0
+; CHECK-NEXT: bx lr
+entry:
+ %vcvtmq_s16_v1.i = tail call <8 x i16> @llvm.arm.neon.vcvtms.v8i16.v8f16(<8 x half> %a)
+ ret <8 x i16> %vcvtmq_s16_v1.i
+}
+
+define dso_local <4 x i16> @test_vcvtm_u16_f16(<4 x half> %a) {
+; CHECK-LABEL: test_vcvtm_u16_f16:
+; CHECK: vcvtm.u16.f16 d0, d0
+; CHECK-NEXT: bx lr
+entry:
+ %vcvtm_u16_v1.i = tail call <4 x i16> @llvm.arm.neon.vcvtmu.v4i16.v4f16(<4 x half> %a)
+ ret <4 x i16> %vcvtm_u16_v1.i
+}
+
+define dso_local <8 x i16> @test_vcvtmq_u16_f16(<8 x half> %a) {
+; CHECK-LABEL: test_vcvtmq_u16_f16:
+; CHECK: vcvtm.u16.f16 q0, q0
+; CHECK-NEXT: bx lr
+entry:
+ %vcvtmq_u16_v1.i = tail call <8 x i16> @llvm.arm.neon.vcvtmu.v8i16.v8f16(<8 x half> %a)
+ ret <8 x i16> %vcvtmq_u16_v1.i
+}
+
+define dso_local <4 x i16> @test_vcvtn_s16_f16(<4 x half> %a) {
+; CHECK-LABEL: test_vcvtn_s16_f16:
+; CHECK: vcvtn.s16.f16 d0, d0
+; CHECK-NEXT: bx lr
+entry:
+ %vcvtn_s16_v1.i = tail call <4 x i16> @llvm.arm.neon.vcvtns.v4i16.v4f16(<4 x half> %a)
+ ret <4 x i16> %vcvtn_s16_v1.i
+}
+
+define dso_local <8 x i16> @test_vcvtnq_s16_f16(<8 x half> %a) {
+; CHECK-LABEL: test_vcvtnq_s16_f16:
+; CHECK: vcvtn.s16.f16 q0, q0
+; CHECK-NEXT: bx lr
+entry:
+ %vcvtnq_s16_v1.i = tail call <8 x i16> @llvm.arm.neon.vcvtns.v8i16.v8f16(<8 x half> %a)
+ ret <8 x i16> %vcvtnq_s16_v1.i
+}
+
+define dso_local <4 x i16> @test_vcvtn_u16_f16(<4 x half> %a) {
+; CHECK-LABEL: test_vcvtn_u16_f16:
+; CHECK: vcvtn.u16.f16 d0, d0
+; CHECK-NEXT: bx lr
+entry:
+ %vcvtn_u16_v1.i = tail call <4 x i16> @llvm.arm.neon.vcvtnu.v4i16.v4f16(<4 x half> %a)
+ ret <4 x i16> %vcvtn_u16_v1.i
+}
+
+define dso_local <8 x i16> @test_vcvtnq_u16_f16(<8 x half> %a) {
+; CHECK-LABEL: test_vcvtnq_u16_f16:
+; CHECK: vcvtn.u16.f16 q0, q0
+; CHECK-NEXT: bx lr
+entry:
+ %vcvtnq_u16_v1.i = tail call <8 x i16> @llvm.arm.neon.vcvtnu.v8i16.v8f16(<8 x half> %a)
+ ret <8 x i16> %vcvtnq_u16_v1.i
+}
+
+define dso_local <4 x i16> @test_vcvtp_s16_f16(<4 x half> %a) {
+; CHECK-LABEL: test_vcvtp_s16_f16:
+; CHECK: vcvtp.s16.f16 d0, d0
+; CHECK-NEXT: bx lr
+entry:
+ %vcvtp_s16_v1.i = tail call <4 x i16> @llvm.arm.neon.vcvtps.v4i16.v4f16(<4 x half> %a)
+ ret <4 x i16> %vcvtp_s16_v1.i
+}
+
+define dso_local <8 x i16> @test_vcvtpq_s16_f16(<8 x half> %a) {
+; CHECK-LABEL: test_vcvtpq_s16_f16:
+; CHECK: vcvtp.s16.f16 q0, q0
+; CHECK-NEXT: bx lr
+entry:
+ %vcvtpq_s16_v1.i = tail call <8 x i16> @llvm.arm.neon.vcvtps.v8i16.v8f16(<8 x half> %a)
+ ret <8 x i16> %vcvtpq_s16_v1.i
+}
+
+define dso_local <4 x i16> @test_vcvtp_u16_f16(<4 x half> %a) {
+; CHECK-LABEL: test_vcvtp_u16_f16:
+; CHECK: vcvtp.u16.f16 d0, d0
+; CHECK-NEXT: bx lr
+entry:
+ %vcvtp_u16_v1.i = tail call <4 x i16> @llvm.arm.neon.vcvtpu.v4i16.v4f16(<4 x half> %a)
+ ret <4 x i16> %vcvtp_u16_v1.i
+}
+
+define dso_local <8 x i16> @test_vcvtpq_u16_f16(<8 x half> %a) {
+; CHECK-LABEL: test_vcvtpq_u16_f16:
+; CHECK: vcvtp.u16.f16 q0, q0
+; CHECK-NEXT: bx lr
+entry:
+ %vcvtpq_u16_v1.i = tail call <8 x i16> @llvm.arm.neon.vcvtpu.v8i16.v8f16(<8 x half> %a)
+ ret <8 x i16> %vcvtpq_u16_v1.i
+}
+
+define dso_local <4 x half> @test_vneg_f16(<4 x half> %a) {
+; CHECKLABEL: test_vneg_f16:
+; CHECK: vneg.f16 d0, d0
+; CHECK-NEXT: bx lr
+entry:
+ %sub.i = fsub <4 x half> <half 0xH8000, half 0xH8000, half 0xH8000, half 0xH8000>, %a
+ ret <4 x half> %sub.i
+}
+
+define dso_local <8 x half> @test_vnegq_f16(<8 x half> %a) {
+; CHECKLABEL: test_vnegq_f16:
+; CHECK: vneg.f16 q0, q0
+; CHECK-NEXT: bx lr
+entry:
+ %sub.i = fsub <8 x half> <half 0xH8000, half 0xH8000, half 0xH8000, half 0xH8000, half 0xH8000, half 0xH8000, half 0xH8000, half 0xH8000>, %a
+ ret <8 x half> %sub.i
+}
+
+define dso_local <4 x half> @test_vrecpe_f16(<4 x half> %a) {
+; CHECKLABEL: test_vrecpe_f16:
+; CHECK: vrecpe.f16 d0, d0
+; CHECK-NEXT: bx lr
+entry:
+ %vrecpe_v1.i = tail call <4 x half> @llvm.arm.neon.vrecpe.v4f16(<4 x half> %a)
+ ret <4 x half> %vrecpe_v1.i
+}
+
+define dso_local <8 x half> @test_vrecpeq_f16(<8 x half> %a) {
+; CHECKLABEL: test_vrecpeq_f16:
+; CHECK: vrecpe.f16 q0, q0
+; CHECK-NEXT: bx lr
+entry:
+ %vrecpeq_v1.i = tail call <8 x half> @llvm.arm.neon.vrecpe.v8f16(<8 x half> %a)
+ ret <8 x half> %vrecpeq_v1.i
+}
+
+define dso_local <4 x half> @test_vrnd_f16(<4 x half> %a) {
+; CHECKLABEL: test_vrnd_f16:
+; CHECK: vrintz.f16 d0, d0
+; CHECK-NEXT: bx lr
+entry:
+ %vrnd_v1.i = tail call <4 x half> @llvm.arm.neon.vrintz.v4f16(<4 x half> %a)
+ ret <4 x half> %vrnd_v1.i
+}
+
+define dso_local <8 x half> @test_vrndq_f16(<8 x half> %a) {
+; CHECKLABEL: test_vrndq_f16:
+; CHECK: vrintz.f16 q0, q0
+; CHECK-NEXT: bx lr
+entry:
+ %vrndq_v1.i = tail call <8 x half> @llvm.arm.neon.vrintz.v8f16(<8 x half> %a)
+ ret <8 x half> %vrndq_v1.i
+}
+
+define dso_local <4 x half> @test_vrnda_f16(<4 x half> %a) {
+; CHECKLABEL: test_vrnda_f16:
+; CHECK: vrinta.f16 d0, d0
+; CHECK-NEXT: bx lr
+entry:
+ %vrnda_v1.i = tail call <4 x half> @llvm.arm.neon.vrinta.v4f16(<4 x half> %a)
+ ret <4 x half> %vrnda_v1.i
+}
+
+define dso_local <8 x half> @test_vrndaq_f16(<8 x half> %a) {
+; CHECKLABEL: test_vrndaq_f16:
+; CHECK: vrinta.f16 q0, q0
+; CHECK-NEXT: bx lr
+entry:
+ %vrndaq_v1.i = tail call <8 x half> @llvm.arm.neon.vrinta.v8f16(<8 x half> %a)
+ ret <8 x half> %vrndaq_v1.i
+}
+
+define dso_local <4 x half> @test_vrndm_f16(<4 x half> %a) {
+; CHECKLABEL: test_vrndm_f16:
+; CHECK: vrintm.f16 d0, d0
+; CHECK-NEXT: bx lr
+entry:
+ %vrndm_v1.i = tail call <4 x half> @llvm.arm.neon.vrintm.v4f16(<4 x half> %a)
+ ret <4 x half> %vrndm_v1.i
+}
+
+define dso_local <8 x half> @test_vrndmq_f16(<8 x half> %a) {
+; CHECKLABEL: test_vrndmq_f16:
+; CHECK: vrintm.f16 q0, q0
+; CHECK-NEXT: bx lr
+entry:
+ %vrndmq_v1.i = tail call <8 x half> @llvm.arm.neon.vrintm.v8f16(<8 x half> %a)
+ ret <8 x half> %vrndmq_v1.i
+}
+
+define dso_local <4 x half> @test_vrndn_f16(<4 x half> %a) {
+; CHECKLABEL: test_vrndn_f16:
+; CHECK: vrintn.f16 d0, d0
+; CHECK-NEXT: bx lr
+entry:
+ %vrndn_v1.i = tail call <4 x half> @llvm.arm.neon.vrintn.v4f16(<4 x half> %a)
+ ret <4 x half> %vrndn_v1.i
+}
+
+define dso_local <8 x half> @test_vrndnq_f16(<8 x half> %a) {
+; CHECKLABEL: test_vrndnq_f16:
+; CHECK: vrintn.f16 q0, q0
+; CHECK-NEXT: bx lr
+entry:
+ %vrndnq_v1.i = tail call <8 x half> @llvm.arm.neon.vrintn.v8f16(<8 x half> %a)
+ ret <8 x half> %vrndnq_v1.i
+}
+
+define dso_local <4 x half> @test_vrndp_f16(<4 x half> %a) {
+; CHECKLABEL: test_vrndp_f16:
+; CHECK: vrintp.f16 d0, d0
+; CHECK-NEXT: bx lr
+entry:
+ %vrndp_v1.i = tail call <4 x half> @llvm.arm.neon.vrintp.v4f16(<4 x half> %a)
+ ret <4 x half> %vrndp_v1.i
+}
+
+define dso_local <8 x half> @test_vrndpq_f16(<8 x half> %a) {
+; CHECKLABEL: test_vrndpq_f16:
+; CHECK: vrintp.f16 q0, q0
+; CHECK-NEXT: bx lr
+entry:
+ %vrndpq_v1.i = tail call <8 x half> @llvm.arm.neon.vrintp.v8f16(<8 x half> %a)
+ ret <8 x half> %vrndpq_v1.i
+}
+
+define dso_local <4 x half> @test_vrndx_f16(<4 x half> %a) {
+; CHECKLABEL: test_vrndx_f16:
+; CHECK: vrintx.f16 d0, d0
+; CHECK-NEXT: bx lr
+entry:
+ %vrndx_v1.i = tail call <4 x half> @llvm.arm.neon.vrintx.v4f16(<4 x half> %a)
+ ret <4 x half> %vrndx_v1.i
+}
+
+define dso_local <8 x half> @test_vrndxq_f16(<8 x half> %a) {
+; CHECKLABEL: test_vrndxq_f16:
+; CHECK: vrintx.f16 q0, q0
+; CHECK-NEXT: bx lr
+entry:
+ %vrndxq_v1.i = tail call <8 x half> @llvm.arm.neon.vrintx.v8f16(<8 x half> %a)
+ ret <8 x half> %vrndxq_v1.i
+}
+
+define dso_local <4 x half> @test_vrsqrte_f16(<4 x half> %a) {
+; CHECKLABEL: test_vrsqrte_f16:
+; CHECK: vrsqrte.f16 d0, d0
+; CHECK-NEXT: bx lr
+entry:
+ %vrsqrte_v1.i = tail call <4 x half> @llvm.arm.neon.vrsqrte.v4f16(<4 x half> %a)
+ ret <4 x half> %vrsqrte_v1.i
+}
+
+define dso_local <8 x half> @test_vrsqrteq_f16(<8 x half> %a) {
+; CHECKLABEL: test_vrsqrteq_f16:
+; CHECK: vrsqrte.f16 q0, q0
+; CHECK-NEXT: bx lr
+entry:
+ %vrsqrteq_v1.i = tail call <8 x half> @llvm.arm.neon.vrsqrte.v8f16(<8 x half> %a)
+ ret <8 x half> %vrsqrteq_v1.i
+}
+
+define dso_local <4 x half> @test_vadd_f16(<4 x half> %a, <4 x half> %b) {
+; CHECKLABEL: test_vadd_f16:
+; CHECK: vadd.f16 d0, d0, d1
+; CHECK-NEXT: bx lr
+entry:
+ %add.i = fadd <4 x half> %a, %b
+ ret <4 x half> %add.i
+}
+
+define dso_local <8 x half> @test_vaddq_f16(<8 x half> %a, <8 x half> %b) {
+; CHECKLABEL: test_vaddq_f16:
+; CHECK: vadd.f16 q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %add.i = fadd <8 x half> %a, %b
+ ret <8 x half> %add.i
+}
+
+define dso_local <4 x half> @test_vabd_f16(<4 x half> %a, <4 x half> %b) {
+; CHECKLABEL: test_vabd_f16:
+; CHECK: vabd.f16 d0, d0, d1
+; CHECK-NEXT: bx lr
+entry:
+ %vabd_v2.i = tail call <4 x half> @llvm.arm.neon.vabds.v4f16(<4 x half> %a, <4 x half> %b)
+ ret <4 x half> %vabd_v2.i
+}
+
+define dso_local <8 x half> @test_vabdq_f16(<8 x half> %a, <8 x half> %b) {
+; CHECKLABEL: test_vabdq_f16:
+; CHECK: vabd.f16 q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %vabdq_v2.i = tail call <8 x half> @llvm.arm.neon.vabds.v8f16(<8 x half> %a, <8 x half> %b)
+ ret <8 x half> %vabdq_v2.i
+}
+
+define dso_local <4 x i16> @test_vcage_f16(<4 x half> %a, <4 x half> %b) {
+; CHECKLABEL: test_vcage_f16:
+; CHECK: vacge.f16 d0, d0, d1
+; CHECK-NEXT: bx lr
+entry:
+ %vcage_v2.i = tail call <4 x i16> @llvm.arm.neon.vacge.v4i16.v4f16(<4 x half> %a, <4 x half> %b)
+ ret <4 x i16> %vcage_v2.i
+}
+
+define dso_local <8 x i16> @test_vcageq_f16(<8 x half> %a, <8 x half> %b) {
+; CHECKLABEL: test_vcageq_f16:
+; CHECK: vacge.f16 q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %vcageq_v2.i = tail call <8 x i16> @llvm.arm.neon.vacge.v8i16.v8f16(<8 x half> %a, <8 x half> %b)
+ ret <8 x i16> %vcageq_v2.i
+}
+
+define dso_local <4 x i16> @test_vcagt_f16(<4 x half> %a, <4 x half> %b) {
+; CHECK-LABEL: test_vcagt_f16:
+; CHECK: vacgt.f16 d0, d0, d1
+; CHECK-NEXT: bx lr
+entry:
+ %vcagt_v2.i = tail call <4 x i16> @llvm.arm.neon.vacgt.v4i16.v4f16(<4 x half> %a, <4 x half> %b)
+ ret <4 x i16> %vcagt_v2.i
+}
+
+define dso_local <8 x i16> @test_vcagtq_f16(<8 x half> %a, <8 x half> %b) {
+; CHECK-LABEL: test_vcagtq_f16:
+; CHECK: vacgt.f16 q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %vcagtq_v2.i = tail call <8 x i16> @llvm.arm.neon.vacgt.v8i16.v8f16(<8 x half> %a, <8 x half> %b)
+ ret <8 x i16> %vcagtq_v2.i
+}
+
+define dso_local <4 x i16> @test_vcale_f16(<4 x half> %a, <4 x half> %b) {
+; CHECKLABEL: test_vcale_f16:
+; CHECK: vacge.f16 d0, d1, d0
+; CHECK-NEXT: bx lr
+entry:
+ %vcale_v2.i = tail call <4 x i16> @llvm.arm.neon.vacge.v4i16.v4f16(<4 x half> %b, <4 x half> %a)
+ ret <4 x i16> %vcale_v2.i
+}
+
+define dso_local <8 x i16> @test_vcaleq_f16(<8 x half> %a, <8 x half> %b) {
+; CHECKLABEL: test_vcaleq_f16:
+; CHECK: vacge.f16 q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %vcaleq_v2.i = tail call <8 x i16> @llvm.arm.neon.vacge.v8i16.v8f16(<8 x half> %b, <8 x half> %a)
+ ret <8 x i16> %vcaleq_v2.i
+}
+
+define dso_local <4 x i16> @test_vceq_f16(<4 x half> %a, <4 x half> %b) {
+; CHECKLABEL: test_vceq_f16:
+; CHECK: vceq.f16 d0, d0, d1
+; CHECK-NEXT: bx lr
+entry:
+ %cmp.i = fcmp oeq <4 x half> %a, %b
+ %sext.i = sext <4 x i1> %cmp.i to <4 x i16>
+ ret <4 x i16> %sext.i
+}
+
+define dso_local <8 x i16> @test_vceqq_f16(<8 x half> %a, <8 x half> %b) {
+; CHECKLABEL: test_vceqq_f16:
+; CHECK: vceq.f16 q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %cmp.i = fcmp oeq <8 x half> %a, %b
+ %sext.i = sext <8 x i1> %cmp.i to <8 x i16>
+ ret <8 x i16> %sext.i
+}
+
+define dso_local <4 x i16> @test_vcge_f16(<4 x half> %a, <4 x half> %b) {
+; CHECKLABEL: test_vcge_f16:
+; CHECK: vcge.f16 d0, d0, d1
+; CHECK-NEXT: bx lr
+entry:
+ %cmp.i = fcmp oge <4 x half> %a, %b
+ %sext.i = sext <4 x i1> %cmp.i to <4 x i16>
+ ret <4 x i16> %sext.i
+}
+
+define dso_local <8 x i16> @test_vcgeq_f16(<8 x half> %a, <8 x half> %b) {
+; CHECKLABEL: test_vcgeq_f16:
+; CHECK: vcge.f16 q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %cmp.i = fcmp oge <8 x half> %a, %b
+ %sext.i = sext <8 x i1> %cmp.i to <8 x i16>
+ ret <8 x i16> %sext.i
+}
+
+define dso_local <4 x i16> @test_vcgt_f16(<4 x half> %a, <4 x half> %b) {
+; CHECKLABEL: test_vcgt_f16:
+; CHECK: vcgt.f16 d0, d0, d1
+; CHECK-NEXT: bx lr
+entry:
+ %cmp.i = fcmp ogt <4 x half> %a, %b
+ %sext.i = sext <4 x i1> %cmp.i to <4 x i16>
+ ret <4 x i16> %sext.i
+}
+
+define dso_local <8 x i16> @test_vcgtq_f16(<8 x half> %a, <8 x half> %b) {
+; CHECKLABEL: test_vcgtq_f16:
+; CHECK: vcgt.f16 q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %cmp.i = fcmp ogt <8 x half> %a, %b
+ %sext.i = sext <8 x i1> %cmp.i to <8 x i16>
+ ret <8 x i16> %sext.i
+}
+
+define dso_local <4 x i16> @test_vcle_f16(<4 x half> %a, <4 x half> %b) {
+; CHECKLABEL: test_vcle_f16:
+; CHECK: vcge.f16 d0, d1, d0
+; CHECK-NEXT: bx lr
+entry:
+ %cmp.i = fcmp ole <4 x half> %a, %b
+ %sext.i = sext <4 x i1> %cmp.i to <4 x i16>
+ ret <4 x i16> %sext.i
+}
+
+define dso_local <8 x i16> @test_vcleq_f16(<8 x half> %a, <8 x half> %b) {
+; CHECKLABEL: test_vcleq_f16:
+; CHECK: vcge.f16 q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %cmp.i = fcmp ole <8 x half> %a, %b
+ %sext.i = sext <8 x i1> %cmp.i to <8 x i16>
+ ret <8 x i16> %sext.i
+}
+
+define dso_local <4 x i16> @test_vclt_f16(<4 x half> %a, <4 x half> %b) {
+; CHECKLABEL: test_vclt_f16:
+; CHECK: vcgt.f16 d0, d1, d0
+; CHECK-NEXT: bx lr
+entry:
+ %cmp.i = fcmp olt <4 x half> %a, %b
+ %sext.i = sext <4 x i1> %cmp.i to <4 x i16>
+ ret <4 x i16> %sext.i
+}
+
+define dso_local <8 x i16> @test_vcltq_f16(<8 x half> %a, <8 x half> %b) {
+; CHECKLABEL: test_vcltq_f16:
+; CHECK: vcgt.f16 q0, q1, q0
+; CHECK-NEXT: bx lr
+entry:
+ %cmp.i = fcmp olt <8 x half> %a, %b
+ %sext.i = sext <8 x i1> %cmp.i to <8 x i16>
+ ret <8 x i16> %sext.i
+}
+
+define dso_local <4 x half> @test_vcvt_n_f16_s16(<4 x i16> %a) {
+; CHECKLABEL: test_vcvt_n_f16_s16:
+; CHECK: vcvt.f16.s16 d0, d0, #2
+; CHECK-NEXT: bx lr
+entry:
+ %vcvt_n1 = tail call <4 x half> @llvm.arm.neon.vcvtfxs2fp.v4f16.v4i16(<4 x i16> %a, i32 2)
+ ret <4 x half> %vcvt_n1
+}
+
+declare <4 x half> @llvm.arm.neon.vcvtfxs2fp.v4f16.v4i16(<4 x i16>, i32) #2
+
+define dso_local <8 x half> @test_vcvtq_n_f16_s16(<8 x i16> %a) {
+; CHECKLABEL: test_vcvtq_n_f16_s16:
+; CHECK: vcvt.f16.s16 q0, q0, #2
+; CHECK-NEXT: bx lr
+entry:
+ %vcvt_n1 = tail call <8 x half> @llvm.arm.neon.vcvtfxs2fp.v8f16.v8i16(<8 x i16> %a, i32 2)
+ ret <8 x half> %vcvt_n1
+}
+
+declare <8 x half> @llvm.arm.neon.vcvtfxs2fp.v8f16.v8i16(<8 x i16>, i32) #2
+
+define dso_local <4 x half> @test_vcvt_n_f16_u16(<4 x i16> %a) {
+; CHECKLABEL: test_vcvt_n_f16_u16:
+; CHECK: vcvt.f16.u16 d0, d0, #2
+; CHECK-NEXT: bx lr
+entry:
+ %vcvt_n1 = tail call <4 x half> @llvm.arm.neon.vcvtfxu2fp.v4f16.v4i16(<4 x i16> %a, i32 2)
+ ret <4 x half> %vcvt_n1
+}
+
+declare <4 x half> @llvm.arm.neon.vcvtfxu2fp.v4f16.v4i16(<4 x i16>, i32) #2
+
+define dso_local <8 x half> @test_vcvtq_n_f16_u16(<8 x i16> %a) {
+; CHECKLABEL: test_vcvtq_n_f16_u16:
+; CHECK: vcvt.f16.u16 q0, q0, #2
+; CHECK-NEXT: bx lr
+entry:
+ %vcvt_n1 = tail call <8 x half> @llvm.arm.neon.vcvtfxu2fp.v8f16.v8i16(<8 x i16> %a, i32 2)
+ ret <8 x half> %vcvt_n1
+}
+
+declare <8 x half> @llvm.arm.neon.vcvtfxu2fp.v8f16.v8i16(<8 x i16>, i32) #2
+
+define dso_local <4 x i16> @test_vcvt_n_s16_f16(<4 x half> %a) {
+; CHECKLABEL: test_vcvt_n_s16_f16:
+; CHECK: vcvt.s16.f16 d0, d0, #2
+; CHECK-NEXT: bx lr
+entry:
+ %vcvt_n1 = tail call <4 x i16> @llvm.arm.neon.vcvtfp2fxs.v4i16.v4f16(<4 x half> %a, i32 2)
+ ret <4 x i16> %vcvt_n1
+}
+
+declare <4 x i16> @llvm.arm.neon.vcvtfp2fxs.v4i16.v4f16(<4 x half>, i32) #2
+
+define dso_local <8 x i16> @test_vcvtq_n_s16_f16(<8 x half> %a) {
+; CHECKLABEL: test_vcvtq_n_s16_f16:
+; CHECK: vcvt.s16.f16 q0, q0, #2
+; CHECK-NEXT: bx lr
+entry:
+ %vcvt_n1 = tail call <8 x i16> @llvm.arm.neon.vcvtfp2fxs.v8i16.v8f16(<8 x half> %a, i32 2)
+ ret <8 x i16> %vcvt_n1
+}
+
+declare <8 x i16> @llvm.arm.neon.vcvtfp2fxs.v8i16.v8f16(<8 x half>, i32) #2
+
+define dso_local <4 x i16> @test_vcvt_n_u16_f16(<4 x half> %a) {
+; CHECKLABEL: test_vcvt_n_u16_f16:
+; CHECK: vcvt.u16.f16 d0, d0, #2
+; CHECK-NEXT: bx lr
+entry:
+ %vcvt_n1 = tail call <4 x i16> @llvm.arm.neon.vcvtfp2fxu.v4i16.v4f16(<4 x half> %a, i32 2)
+ ret <4 x i16> %vcvt_n1
+}
+
+declare <4 x i16> @llvm.arm.neon.vcvtfp2fxu.v4i16.v4f16(<4 x half>, i32) #2
+
+define dso_local <8 x i16> @test_vcvtq_n_u16_f16(<8 x half> %a) {
+; CHECKLABEL: test_vcvtq_n_u16_f16:
+; CHECK: vcvt.u16.f16 q0, q0, #2
+; CHECK-NEXT: bx lr
+entry:
+ %vcvt_n1 = tail call <8 x i16> @llvm.arm.neon.vcvtfp2fxu.v8i16.v8f16(<8 x half> %a, i32 2)
+ ret <8 x i16> %vcvt_n1
+}
+
+declare <8 x i16> @llvm.arm.neon.vcvtfp2fxu.v8i16.v8f16(<8 x half>, i32) #2
+
+define dso_local <4 x half> @test_vmax_f16(<4 x half> %a, <4 x half> %b) {
+; CHECKLABEL: test_vmax_f16:
+; CHECK: vmax.f16 d0, d0, d1
+; CHECK-NEXT: bx lr
+entry:
+ %vmax_v2.i = tail call <4 x half> @llvm.arm.neon.vmaxs.v4f16(<4 x half> %a, <4 x half> %b)
+ ret <4 x half> %vmax_v2.i
+}
+
+define dso_local <8 x half> @test_vmaxq_f16(<8 x half> %a, <8 x half> %b) {
+; CHECKLABEL: test_vmaxq_f16:
+; CHECK: vmax.f16 q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %vmaxq_v2.i = tail call <8 x half> @llvm.arm.neon.vmaxs.v8f16(<8 x half> %a, <8 x half> %b)
+ ret <8 x half> %vmaxq_v2.i
+}
+
+define dso_local <4 x half> @test_vmaxnm_f16(<4 x half> %a, <4 x half> %b) {
+; CHECK-LABEL: test_vmaxnm_f16:
+; CHECK: vmaxnm.f16 d0, d0, d1
+; CHECK-NEXT: bx lr
+entry:
+ %vmaxnm_v2.i = tail call <4 x half> @llvm.arm.neon.vmaxnm.v4f16(<4 x half> %a, <4 x half> %b)
+ ret <4 x half> %vmaxnm_v2.i
+}
+
+define dso_local <8 x half> @test_vmaxnmq_f16(<8 x half> %a, <8 x half> %b) {
+; CHECK-LABEL: test_vmaxnmq_f16:
+; CHECK: vmaxnm.f16 q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %vmaxnmq_v2.i = tail call <8 x half> @llvm.arm.neon.vmaxnm.v8f16(<8 x half> %a, <8 x half> %b)
+ ret <8 x half> %vmaxnmq_v2.i
+}
+
+define dso_local <4 x half> @test_vmin_f16(<4 x half> %a, <4 x half> %b) {
+; CHECK-LABEL: test_vmin_f16:
+; CHECK: vmin.f16 d0, d0, d1
+; CHECK-NEXT: bx lr
+entry:
+ %vmin_v2.i = tail call <4 x half> @llvm.arm.neon.vmins.v4f16(<4 x half> %a, <4 x half> %b)
+ ret <4 x half> %vmin_v2.i
+}
+
+define dso_local <8 x half> @test_vminq_f16(<8 x half> %a, <8 x half> %b) {
+; CHECK-LABEL: test_vminq_f16:
+; CHECK: vmin.f16 q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %vminq_v2.i = tail call <8 x half> @llvm.arm.neon.vmins.v8f16(<8 x half> %a, <8 x half> %b)
+ ret <8 x half> %vminq_v2.i
+}
+
+define dso_local <4 x half> @test_vminnm_f16(<4 x half> %a, <4 x half> %b) {
+; CHECK-LABEL: test_vminnm_f16:
+; CHECK: vminnm.f16 d0, d0, d1
+; CHECK-NEXT: bx lr
+entry:
+ %vminnm_v2.i = tail call <4 x half> @llvm.arm.neon.vminnm.v4f16(<4 x half> %a, <4 x half> %b)
+ ret <4 x half> %vminnm_v2.i
+}
+
+define dso_local <8 x half> @test_vminnmq_f16(<8 x half> %a, <8 x half> %b) {
+; CHECK-LABEL: test_vminnmq_f16:
+; CHECK: vminnm.f16 q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %vminnmq_v2.i = tail call <8 x half> @llvm.arm.neon.vminnm.v8f16(<8 x half> %a, <8 x half> %b)
+ ret <8 x half> %vminnmq_v2.i
+}
+
+define dso_local <4 x half> @test_vmul_f16(<4 x half> %a, <4 x half> %b) {
+; CHECKLABEL: test_vmul_f16:
+; CHECK: vmul.f16 d0, d0, d1
+; CHECK-NEXT: bx lr
+entry:
+ %mul.i = fmul <4 x half> %a, %b
+ ret <4 x half> %mul.i
+}
+
+define dso_local <8 x half> @test_vmulq_f16(<8 x half> %a, <8 x half> %b) {
+; CHECKLABEL: test_vmulq_f16:
+; CHECK: vmul.f16 q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %mul.i = fmul <8 x half> %a, %b
+ ret <8 x half> %mul.i
+}
+
+define dso_local <4 x half> @test_vpadd_f16(<4 x half> %a, <4 x half> %b) {
+; CHECKLABEL: test_vpadd_f16:
+; CHECK: vpadd.f16 d0, d0, d1
+; CHECK-NEXT: bx lr
+entry:
+ %vpadd_v2.i = tail call <4 x half> @llvm.arm.neon.vpadd.v4f16(<4 x half> %a, <4 x half> %b)
+ ret <4 x half> %vpadd_v2.i
+}
+
+define dso_local <4 x half> @test_vpmax_f16(<4 x half> %a, <4 x half> %b) {
+; CHECKLABEL: test_vpmax_f16:
+; CHECK: vpmax.f16 d0, d0, d1
+; CHECK-NEXT: bx lr
+entry:
+ %vpmax_v2.i = tail call <4 x half> @llvm.arm.neon.vpmaxs.v4f16(<4 x half> %a, <4 x half> %b)
+ ret <4 x half> %vpmax_v2.i
+}
+
+define dso_local <4 x half> @test_vpmin_f16(<4 x half> %a, <4 x half> %b) {
+; CHECKLABEL: test_vpmin_f16:
+; CHECK: vpmin.f16 d0, d0, d1
+; CHECK-NEXT: bx lr
+entry:
+ %vpmin_v2.i = tail call <4 x half> @llvm.arm.neon.vpmins.v4f16(<4 x half> %a, <4 x half> %b)
+ ret <4 x half> %vpmin_v2.i
+}
+
+define dso_local <4 x half> @test_vrecps_f16(<4 x half> %a, <4 x half> %b) {
+; CHECKLABEL: test_vrecps_f16:
+; CHECK: vrecps.f16 d0, d0, d1
+; CHECK-NEXT: bx lr
+entry:
+ %vrecps_v2.i = tail call <4 x half> @llvm.arm.neon.vrecps.v4f16(<4 x half> %a, <4 x half> %b)
+ ret <4 x half> %vrecps_v2.i
+}
+
+define dso_local <8 x half> @test_vrecpsq_f16(<8 x half> %a, <8 x half> %b) {
+; CHECKLABEL: test_vrecpsq_f16:
+; CHECK: vrecps.f16 q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %vrecpsq_v2.i = tail call <8 x half> @llvm.arm.neon.vrecps.v8f16(<8 x half> %a, <8 x half> %b)
+ ret <8 x half> %vrecpsq_v2.i
+}
+
+define dso_local <4 x half> @test_vrsqrts_f16(<4 x half> %a, <4 x half> %b) {
+; CHECKLABEL: test_vrsqrts_f16:
+; CHECK: vrsqrts.f16 d0, d0, d1
+; CHECK-NEXT: bx lr
+entry:
+ %vrsqrts_v2.i = tail call <4 x half> @llvm.arm.neon.vrsqrts.v4f16(<4 x half> %a, <4 x half> %b)
+ ret <4 x half> %vrsqrts_v2.i
+}
+
+define dso_local <8 x half> @test_vrsqrtsq_f16(<8 x half> %a, <8 x half> %b) {
+; CHECKLABEL: test_vrsqrtsq_f16:
+; CHECK: vrsqrts.f16 q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %vrsqrtsq_v2.i = tail call <8 x half> @llvm.arm.neon.vrsqrts.v8f16(<8 x half> %a, <8 x half> %b)
+ ret <8 x half> %vrsqrtsq_v2.i
+}
+
+define dso_local <4 x half> @test_vsub_f16(<4 x half> %a, <4 x half> %b) {
+; CHECKLABEL: test_vsub_f16:
+; CHECK: vsub.f16 d0, d0, d1
+; CHECK-NEXT: bx lr
+entry:
+ %sub.i = fsub <4 x half> %a, %b
+ ret <4 x half> %sub.i
+}
+
+define dso_local <8 x half> @test_vsubq_f16(<8 x half> %a, <8 x half> %b) {
+; CHECKLABEL: test_vsubq_f16:
+; CHECK: vsub.f16 q0, q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %sub.i = fsub <8 x half> %a, %b
+ ret <8 x half> %sub.i
+}
+
+define dso_local <4 x half> @test_vfma_f16(<4 x half> %a, <4 x half> %b, <4 x half> %c) {
+; CHECK-LABEL: test_vfma_f16:
+; CHECK: vfma.f16 d0, d1, d2
+; CHECK-NEXT: bx lr
+entry:
+ %0 = tail call <4 x half> @llvm.fma.v4f16(<4 x half> %b, <4 x half> %c, <4 x half> %a)
+ ret <4 x half> %0
+}
+
+define dso_local <8 x half> @test_vfmaq_f16(<8 x half> %a, <8 x half> %b, <8 x half> %c) {
+; CHECK-LABEL: test_vfmaq_f16:
+; CHECK: vfma.f16 q0, q1, q2
+; CHECK-NEXT: bx lr
+entry:
+ %0 = tail call <8 x half> @llvm.fma.v8f16(<8 x half> %b, <8 x half> %c, <8 x half> %a)
+ ret <8 x half> %0
+}
+
+define dso_local <4 x half> @test_vfms_f16(<4 x half> %a, <4 x half> %b, <4 x half> %c) {
+; CHECK-LABEL: test_vfms_f16:
+; CHECK: vneg.f16 [[D16:d[0-9]+]], d1
+; CHECK-NEXT: vfma.f16 d0, [[D16]], d2
+; CHECK-NEXT: bx lr
+entry:
+ %sub.i = fsub <4 x half> <half 0xH8000, half 0xH8000, half 0xH8000, half 0xH8000>, %b
+ %0 = tail call <4 x half> @llvm.fma.v4f16(<4 x half> %sub.i, <4 x half> %c, <4 x half> %a)
+ ret <4 x half> %0
+}
+
+define dso_local <8 x half> @test_vfmsq_f16(<8 x half> %a, <8 x half> %b, <8 x half> %c) {
+; CHECK-LABEL: test_vfmsq_f16:
+; CHECK: vneg.f16 [[Q8:q[0-9]+]], q1
+; CHECK-NEXT: vfma.f16 q0, [[Q8]], q2
+; CHECK-NEXT: bx lr
+entry:
+ %sub.i = fsub <8 x half> <half 0xH8000, half 0xH8000, half 0xH8000, half 0xH8000, half 0xH8000, half 0xH8000, half 0xH8000, half 0xH8000>, %b
+ %0 = tail call <8 x half> @llvm.fma.v8f16(<8 x half> %sub.i, <8 x half> %c, <8 x half> %a)
+ ret <8 x half> %0
+}
+
+define dso_local <4 x half> @test_vmul_lane_f16(<4 x half> %a, <4 x half> %b) {
+; CHECK-LABEL: test_vmul_lane_f16:
+; CHECK: vmul.f16 d0, d0, d1[3]
+; CHECK-NEXT: bx lr
+entry:
+ %shuffle = shufflevector <4 x half> %b, <4 x half> undef, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
+ %mul = fmul <4 x half> %shuffle, %a
+ ret <4 x half> %mul
+}
+
+define dso_local <8 x half> @test_vmulq_lane_f16(<8 x half> %a, <4 x half> %b) {
+; CHECK-LABEL: test_vmulq_lane_f16:
+; CHECK: vmul.f16 q0, q0, d2[3]
+; CHECK-NEXT: bx lr
+entry:
+ %shuffle = shufflevector <4 x half> %b, <4 x half> undef, <8 x i32> <i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3>
+ %mul = fmul <8 x half> %shuffle, %a
+ ret <8 x half> %mul
+}
+
+define dso_local <4 x half> @test_vmul_n_f16(<4 x half> %a, float %b.coerce) {
+; CHECK-LABEL: test_vmul_n_f16:
+; CHECK: vmul.f16 d0, d0, d1[0]
+; CHECK-NEXT: bx lr
+entry:
+ %0 = bitcast float %b.coerce to i32
+ %tmp.0.extract.trunc = trunc i32 %0 to i16
+ %1 = bitcast i16 %tmp.0.extract.trunc to half
+ %vecinit = insertelement <4 x half> undef, half %1, i32 0
+ %vecinit4 = shufflevector <4 x half> %vecinit, <4 x half> undef, <4 x i32> zeroinitializer
+ %mul = fmul <4 x half> %vecinit4, %a
+ ret <4 x half> %mul
+}
+
+define dso_local <8 x half> @test_vmulq_n_f16(<8 x half> %a, float %b.coerce) {
+; CHECK-LABEL: test_vmulq_n_f16:
+; CHECK: vmul.f16 q0, q0, d2[0]
+; CHECK-NEXT: bx lr
+entry:
+ %0 = bitcast float %b.coerce to i32
+ %tmp.0.extract.trunc = trunc i32 %0 to i16
+ %1 = bitcast i16 %tmp.0.extract.trunc to half
+ %vecinit = insertelement <8 x half> undef, half %1, i32 0
+ %vecinit8 = shufflevector <8 x half> %vecinit, <8 x half> undef, <8 x i32> zeroinitializer
+ %mul = fmul <8 x half> %vecinit8, %a
+ ret <8 x half> %mul
+}
+
+define dso_local <4 x half> @test_vbsl_f16(<4 x i16> %a, <4 x half> %b, <4 x half> %c) {
+; CHECKLABEL: test_vbsl_f16:
+; CHECK: vbsl d0, d1, d2
+; CHECK-NEXT: bx lr
+entry:
+ %0 = bitcast <4 x i16> %a to <8 x i8>
+ %1 = bitcast <4 x half> %b to <8 x i8>
+ %2 = bitcast <4 x half> %c to <8 x i8>
+ %vbsl_v.i = tail call <8 x i8> @llvm.arm.neon.vbsl.v8i8(<8 x i8> %0, <8 x i8> %1, <8 x i8> %2)
+ %3 = bitcast <8 x i8> %vbsl_v.i to <4 x half>
+ ret <4 x half> %3
+}
+
+define dso_local <8 x half> @test_vbslq_f16(<8 x i16> %a, <8 x half> %b, <8 x half> %c) {
+; CHECKLABEL: test_vbslq_f16:
+; CHECK: vbsl q0, q1, q2
+; CHECK-NEXT: bx lr
+entry:
+ %0 = bitcast <8 x i16> %a to <16 x i8>
+ %1 = bitcast <8 x half> %b to <16 x i8>
+ %2 = bitcast <8 x half> %c to <16 x i8>
+ %vbslq_v.i = tail call <16 x i8> @llvm.arm.neon.vbsl.v16i8(<16 x i8> %0, <16 x i8> %1, <16 x i8> %2)
+ %3 = bitcast <16 x i8> %vbslq_v.i to <8 x half>
+ ret <8 x half> %3
+}
+
+define dso_local %struct.float16x4x2_t @test_vzip_f16(<4 x half> %a, <4 x half> %b) {
+; CHECK-LABEL: test_vzip_f16:
+; CHECK: vzip.16 d0, d1
+; CHECK-NEXT: bx lr
+entry:
+ %vzip.i = shufflevector <4 x half> %a, <4 x half> %b, <4 x i32> <i32 0, i32 4, i32 1, i32 5>
+ %vzip1.i = shufflevector <4 x half> %a, <4 x half> %b, <4 x i32> <i32 2, i32 6, i32 3, i32 7>
+ %.fca.0.0.insert = insertvalue %struct.float16x4x2_t undef, <4 x half> %vzip.i, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.float16x4x2_t %.fca.0.0.insert, <4 x half> %vzip1.i, 0, 1
+ ret %struct.float16x4x2_t %.fca.0.1.insert
+}
+
+define dso_local %struct.float16x8x2_t @test_vzipq_f16(<8 x half> %a, <8 x half> %b) {
+; CHECK-LABEL: test_vzipq_f16:
+; CHECK: vzip.16 q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %vzip.i = shufflevector <8 x half> %a, <8 x half> %b, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11>
+ %vzip1.i = shufflevector <8 x half> %a, <8 x half> %b, <8 x i32> <i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
+ %.fca.0.0.insert = insertvalue %struct.float16x8x2_t undef, <8 x half> %vzip.i, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.float16x8x2_t %.fca.0.0.insert, <8 x half> %vzip1.i, 0, 1
+ ret %struct.float16x8x2_t %.fca.0.1.insert
+}
+
+define dso_local %struct.float16x4x2_t @test_vuzp_f16(<4 x half> %a, <4 x half> %b) {
+; CHECK-LABEL: test_vuzp_f16:
+; CHECK: vuzp.16 d0, d1
+; CHECK-NEXT: bx lr
+entry:
+ %vuzp.i = shufflevector <4 x half> %a, <4 x half> %b, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
+ %vuzp1.i = shufflevector <4 x half> %a, <4 x half> %b, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
+ %.fca.0.0.insert = insertvalue %struct.float16x4x2_t undef, <4 x half> %vuzp.i, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.float16x4x2_t %.fca.0.0.insert, <4 x half> %vuzp1.i, 0, 1
+ ret %struct.float16x4x2_t %.fca.0.1.insert
+}
+
+define dso_local %struct.float16x8x2_t @test_vuzpq_f16(<8 x half> %a, <8 x half> %b) {
+; CHECK-LABEL: test_vuzpq_f16:
+; CHECK: vuzp.16 q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %vuzp.i = shufflevector <8 x half> %a, <8 x half> %b, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
+ %vuzp1.i = shufflevector <8 x half> %a, <8 x half> %b, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
+ %.fca.0.0.insert = insertvalue %struct.float16x8x2_t undef, <8 x half> %vuzp.i, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.float16x8x2_t %.fca.0.0.insert, <8 x half> %vuzp1.i, 0, 1
+ ret %struct.float16x8x2_t %.fca.0.1.insert
+}
+
+define dso_local %struct.float16x4x2_t @test_vtrn_f16(<4 x half> %a, <4 x half> %b) {
+; CHECK-LABEL: test_vtrn_f16:
+; CHECK: vtrn.16 d0, d1
+; CHECK-NEXT: bx lr
+entry:
+ %vtrn.i = shufflevector <4 x half> %a, <4 x half> %b, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
+ %vtrn1.i = shufflevector <4 x half> %a, <4 x half> %b, <4 x i32> <i32 1, i32 5, i32 3, i32 7>
+ %.fca.0.0.insert = insertvalue %struct.float16x4x2_t undef, <4 x half> %vtrn.i, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.float16x4x2_t %.fca.0.0.insert, <4 x half> %vtrn1.i, 0, 1
+ ret %struct.float16x4x2_t %.fca.0.1.insert
+}
+
+define dso_local %struct.float16x8x2_t @test_vtrnq_f16(<8 x half> %a, <8 x half> %b) {
+; CHECK-LABEL: test_vtrnq_f16:
+; CHECK: vtrn.16 q0, q1
+; CHECK-NEXT: bx lr
+entry:
+ %vtrn.i = shufflevector <8 x half> %a, <8 x half> %b, <8 x i32> <i32 0, i32 8, i32 2, i32 10, i32 4, i32 12, i32 6, i32 14>
+ %vtrn1.i = shufflevector <8 x half> %a, <8 x half> %b, <8 x i32> <i32 1, i32 9, i32 3, i32 11, i32 5, i32 13, i32 7, i32 15>
+ %.fca.0.0.insert = insertvalue %struct.float16x8x2_t undef, <8 x half> %vtrn.i, 0, 0
+ %.fca.0.1.insert = insertvalue %struct.float16x8x2_t %.fca.0.0.insert, <8 x half> %vtrn1.i, 0, 1
+ ret %struct.float16x8x2_t %.fca.0.1.insert
+}
+
+define dso_local <4 x half> @test_vmov_n_f16(float %a.coerce) {
+; CHECK-LABEL: test_vmov_n_f16:
+; CHECK: vdup.16 d0, d0[0]
+; CHECK-NEXT: bx lr
+entry:
+ %0 = bitcast float %a.coerce to i32
+ %tmp.0.extract.trunc = trunc i32 %0 to i16
+ %1 = bitcast i16 %tmp.0.extract.trunc to half
+ %vecinit = insertelement <4 x half> undef, half %1, i32 0
+ %vecinit4 = shufflevector <4 x half> %vecinit, <4 x half> undef, <4 x i32> zeroinitializer
+ ret <4 x half> %vecinit4
+}
+
+define dso_local <8 x half> @test_vmovq_n_f16(float %a.coerce) {
+; CHECK-LABEL: test_vmovq_n_f16:
+; CHECK: vdup.16 q0, d0[0]
+; CHECK-NEXT: bx lr
+entry:
+ %0 = bitcast float %a.coerce to i32
+ %tmp.0.extract.trunc = trunc i32 %0 to i16
+ %1 = bitcast i16 %tmp.0.extract.trunc to half
+ %vecinit = insertelement <8 x half> undef, half %1, i32 0
+ %vecinit8 = shufflevector <8 x half> %vecinit, <8 x half> undef, <8 x i32> zeroinitializer
+ ret <8 x half> %vecinit8
+}
+
+define dso_local <4 x half> @test_vdup_n_f16(float %a.coerce) {
+; CHECK-LABEL: test_vdup_n_f16:
+; CHECK: vdup.16 d0, d0[0]
+; CHECK-NEXT: bx lr
+entry:
+ %0 = bitcast float %a.coerce to i32
+ %tmp.0.extract.trunc = trunc i32 %0 to i16
+ %1 = bitcast i16 %tmp.0.extract.trunc to half
+ %vecinit = insertelement <4 x half> undef, half %1, i32 0
+ %vecinit4 = shufflevector <4 x half> %vecinit, <4 x half> undef, <4 x i32> zeroinitializer
+ ret <4 x half> %vecinit4
+}
+
+define dso_local <8 x half> @test_vdupq_n_f16(float %a.coerce) {
+; CHECK-LABEL: test_vdupq_n_f16:
+; CHECK: vdup.16 q0, d0[0]
+; CHECK-NEXT: bx lr
+entry:
+ %0 = bitcast float %a.coerce to i32
+ %tmp.0.extract.trunc = trunc i32 %0 to i16
+ %1 = bitcast i16 %tmp.0.extract.trunc to half
+ %vecinit = insertelement <8 x half> undef, half %1, i32 0
+ %vecinit8 = shufflevector <8 x half> %vecinit, <8 x half> undef, <8 x i32> zeroinitializer
+ ret <8 x half> %vecinit8
+}
+
+define dso_local <4 x half> @test_vdup_lane_f16(<4 x half> %a) {
+; CHECK-LABEL: test_vdup_lane_f16:
+; CHECK: vdup.32 d0, d0[3]
+; CHECK-NEXT: bx lr
+entry:
+ %shuffle = shufflevector <4 x half> %a, <4 x half> undef, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
+ ret <4 x half> %shuffle
+}
+
+define dso_local <8 x half> @test_vdupq_lane_f16(<4 x half> %a) {
+; CHECK-LABEL: test_vdupq_lane_f16:
+; CHECK: vdup.16 q0, d0[3]
+; CHECK-NEXT: bx lr
+entry:
+ %shuffle = shufflevector <4 x half> %a, <4 x half> undef, <8 x i32> <i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3>
+ ret <8 x half> %shuffle
+}
+
+define dso_local <4 x half> @test_vext_f16(<4 x half> %a, <4 x half> %b) {
+; CHECK-LABEL: test_vext_f16:
+; CHECK: vext.16 d0, d0, d1, #2
+; CHECK-NEXT: bx lr
+entry:
+ %vext = shufflevector <4 x half> %a, <4 x half> %b, <4 x i32> <i32 2, i32 3, i32 4, i32 5>
+ ret <4 x half> %vext
+}
+
+define dso_local <8 x half> @test_vextq_f16(<8 x half> %a, <8 x half> %b) {
+; CHECK-LABEL: test_vextq_f16:
+; CHECK: vext.16 q0, q0, q1, #5
+; CHECK-NEXT: bx lr
+entry:
+ %vext = shufflevector <8 x half> %a, <8 x half> %b, <8 x i32> <i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12>
+ ret <8 x half> %vext
+}
+
+define dso_local <4 x half> @test_vrev64_f16(<4 x half> %a) {
+entry:
+ %shuffle.i = shufflevector <4 x half> %a, <4 x half> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+ ret <4 x half> %shuffle.i
+}
+
+define dso_local <8 x half> @test_vrev64q_f16(<8 x half> %a) {
+entry:
+ %shuffle.i = shufflevector <8 x half> %a, <8 x half> undef, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4>
+ ret <8 x half> %shuffle.i
+}
+
+declare <4 x half> @llvm.fabs.v4f16(<4 x half>)
+declare <8 x half> @llvm.fabs.v8f16(<8 x half>)
+declare <4 x i16> @llvm.arm.neon.vcvtas.v4i16.v4f16(<4 x half>)
+declare <4 x i16> @llvm.arm.neon.vcvtau.v4i16.v4f16(<4 x half>)
+declare <8 x i16> @llvm.arm.neon.vcvtas.v8i16.v8f16(<8 x half>)
+declare <4 x i16> @llvm.arm.neon.vcvtms.v4i16.v4f16(<4 x half>)
+declare <8 x i16> @llvm.arm.neon.vcvtms.v8i16.v8f16(<8 x half>)
+declare <4 x i16> @llvm.arm.neon.vcvtmu.v4i16.v4f16(<4 x half>)
+declare <8 x i16> @llvm.arm.neon.vcvtmu.v8i16.v8f16(<8 x half>)
+declare <4 x i16> @llvm.arm.neon.vcvtns.v4i16.v4f16(<4 x half>)
+declare <8 x i16> @llvm.arm.neon.vcvtns.v8i16.v8f16(<8 x half>)
+declare <4 x i16> @llvm.arm.neon.vcvtnu.v4i16.v4f16(<4 x half>)
+declare <8 x i16> @llvm.arm.neon.vcvtnu.v8i16.v8f16(<8 x half>)
+declare <4 x i16> @llvm.arm.neon.vcvtps.v4i16.v4f16(<4 x half>)
+declare <8 x i16> @llvm.arm.neon.vcvtps.v8i16.v8f16(<8 x half>)
+declare <4 x i16> @llvm.arm.neon.vcvtpu.v4i16.v4f16(<4 x half>)
+declare <8 x i16> @llvm.arm.neon.vcvtpu.v8i16.v8f16(<8 x half>)
+declare <4 x half> @llvm.arm.neon.vrecpe.v4f16(<4 x half>)
+declare <8 x half> @llvm.arm.neon.vrecpe.v8f16(<8 x half>)
+declare <4 x half> @llvm.arm.neon.vrintz.v4f16(<4 x half>)
+declare <8 x half> @llvm.arm.neon.vrintz.v8f16(<8 x half>)
+declare <4 x half> @llvm.arm.neon.vrinta.v4f16(<4 x half>)
+declare <8 x half> @llvm.arm.neon.vrinta.v8f16(<8 x half>)
+declare <4 x half> @llvm.arm.neon.vrintm.v4f16(<4 x half>)
+declare <8 x half> @llvm.arm.neon.vrintm.v8f16(<8 x half>)
+declare <4 x half> @llvm.arm.neon.vrintn.v4f16(<4 x half>)
+declare <8 x half> @llvm.arm.neon.vrintn.v8f16(<8 x half>)
+declare <4 x half> @llvm.arm.neon.vrintp.v4f16(<4 x half>)
+declare <8 x half> @llvm.arm.neon.vrintp.v8f16(<8 x half>)
+declare <4 x half> @llvm.arm.neon.vrintx.v4f16(<4 x half>)
+declare <8 x half> @llvm.arm.neon.vrintx.v8f16(<8 x half>)
+declare <4 x half> @llvm.arm.neon.vrsqrte.v4f16(<4 x half>)
+declare <8 x half> @llvm.arm.neon.vrsqrte.v8f16(<8 x half>)
+declare <4 x half> @llvm.arm.neon.vabds.v4f16(<4 x half>, <4 x half>)
+declare <8 x half> @llvm.arm.neon.vabds.v8f16(<8 x half>, <8 x half>)
+declare <4 x i16> @llvm.arm.neon.vacge.v4i16.v4f16(<4 x half>, <4 x half>)
+declare <8 x i16> @llvm.arm.neon.vacge.v8i16.v8f16(<8 x half>, <8 x half>)
+declare <4 x i16> @llvm.arm.neon.vacgt.v4i16.v4f16(<4 x half>, <4 x half>)
+declare <8 x i16> @llvm.arm.neon.vacgt.v8i16.v8f16(<8 x half>, <8 x half>)
+declare <4 x half> @llvm.arm.neon.vmaxs.v4f16(<4 x half>, <4 x half>)
+declare <8 x half> @llvm.arm.neon.vmaxs.v8f16(<8 x half>, <8 x half>)
+declare <4 x half> @llvm.arm.neon.vmaxnm.v4f16(<4 x half>, <4 x half>)
+declare <8 x half> @llvm.arm.neon.vmaxnm.v8f16(<8 x half>, <8 x half>)
+declare <4 x half> @llvm.arm.neon.vmins.v4f16(<4 x half>, <4 x half>)
+declare <8 x half> @llvm.arm.neon.vmins.v8f16(<8 x half>, <8 x half>)
+declare <4 x half> @llvm.arm.neon.vminnm.v4f16(<4 x half>, <4 x half>)
+declare <8 x half> @llvm.arm.neon.vminnm.v8f16(<8 x half>, <8 x half>)
+declare <4 x half> @llvm.arm.neon.vpadd.v4f16(<4 x half>, <4 x half>)
+declare <4 x half> @llvm.arm.neon.vpmaxs.v4f16(<4 x half>, <4 x half>)
+declare <4 x half> @llvm.arm.neon.vpmins.v4f16(<4 x half>, <4 x half>)
+declare <4 x half> @llvm.arm.neon.vrecps.v4f16(<4 x half>, <4 x half>)
+declare <8 x half> @llvm.arm.neon.vrecps.v8f16(<8 x half>, <8 x half>)
+declare <4 x half> @llvm.arm.neon.vrsqrts.v4f16(<4 x half>, <4 x half>)
+declare <8 x half> @llvm.arm.neon.vrsqrts.v8f16(<8 x half>, <8 x half>)
+declare <4 x half> @llvm.fma.v4f16(<4 x half>, <4 x half>, <4 x half>)
+declare <8 x half> @llvm.fma.v8f16(<8 x half>, <8 x half>, <8 x half>)
+declare <8 x i8> @llvm.arm.neon.vbsl.v8i8(<8 x i8>, <8 x i8>, <8 x i8>)
+declare <16 x i8> @llvm.arm.neon.vbsl.v16i8(<16 x i8>, <16 x i8>, <16 x i8>)
diff --git a/test/CodeGen/ARM/atomic-cmpxchg.ll b/test/CodeGen/ARM/atomic-cmpxchg.ll
index fd87e50d0b77..b5214f8d67e5 100644
--- a/test/CodeGen/ARM/atomic-cmpxchg.ll
+++ b/test/CodeGen/ARM/atomic-cmpxchg.ll
@@ -24,8 +24,7 @@ entry:
; CHECK-THUMB: bl __sync_val_compare_and_swap_1
; CHECK-THUMB-NOT: mov [[R1:r[0-7]]], r0
; CHECK-THUMB: subs [[R1:r[0-7]]], r0, {{r[0-9]+}}
-; CHECK-THUMB: movs r0, #0
-; CHECK-THUMB: subs r0, r0, [[R1]]
+; CHECK-THUMB: rsbs r0, [[R1]], #0
; CHECK-THUMB: adcs r0, [[R1]]
; CHECK-ARMV6-LABEL: test_cmpxchg_res_i8:
@@ -47,8 +46,7 @@ entry:
; CHECK-THUMBV6-NEXT: bl __sync_val_compare_and_swap_1
; CHECK-THUMBV6-NEXT: uxtb r1, r4
; CHECK-THUMBV6-NEXT: subs [[R1:r[0-7]]], r0, {{r[0-9]+}}
-; CHECK-THUMBV6-NEXT: movs r0, #0
-; CHECK-THUMBV6-NEXT: subs r0, r0, [[R1]]
+; CHECK-THUMBV6-NEXT: rsbs r0, [[R1]], #0
; CHECK-THUMBV6-NEXT: adcs r0, [[R1]]
; CHECK-ARMV7-LABEL: test_cmpxchg_res_i8:
diff --git a/test/CodeGen/ARM/atomic-ops-m33.ll b/test/CodeGen/ARM/atomic-ops-m33.ll
new file mode 100644
index 000000000000..474ad8960cf5
--- /dev/null
+++ b/test/CodeGen/ARM/atomic-ops-m33.ll
@@ -0,0 +1,140 @@
+; RUN: llc -mtriple=thumbv7-none-eabi -mcpu=cortex-m33 -verify-machineinstrs -o - %s | FileCheck %s
+
+define i8 @test_atomic_load_add_i8(i8 %offset) nounwind {
+; CHECK-LABEL: test_atomic_load_add_i8:
+ %old = atomicrmw add i8* @var8, i8 %offset seq_cst
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var8
+; CHECK: movt r[[ADDR]], :upper16:var8
+
+; CHECK: .LBB{{[0-9]+}}_1:
+; CHECK: ldaexb r[[OLD:[0-9]+]], [r[[ADDR]]]
+ ; r0 below is a reasonable guess but could change: it certainly comes into the
+ ; function there.
+; CHECK-NEXT: add{{s?}} [[NEW:r[0-9]+]], r[[OLD]], r0
+; CHECK-NEXT: stlexb [[STATUS:r[0-9]+]], [[NEW]], [r[[ADDR]]]
+; CHECK-NEXT: cmp [[STATUS]], #0
+; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+
+; CHECK: mov r0, r[[OLD]]
+ ret i8 %old
+}
+
+define i16 @test_atomic_load_add_i16(i16 %offset) nounwind {
+; CHECK-LABEL: test_atomic_load_add_i16:
+ %old = atomicrmw add i16* @var16, i16 %offset acquire
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var16
+; CHECK: movt r[[ADDR]], :upper16:var16
+
+; CHECK: .LBB{{[0-9]+}}_1:
+; CHECK: ldaexh r[[OLD:[0-9]+]], [r[[ADDR]]]
+ ; r0 below is a reasonable guess but could change: it certainly comes into the
+ ; function there.
+; CHECK-NEXT: add{{s?}} [[NEW:r[0-9]+]], r[[OLD]], r0
+; CHECK-NEXT: strexh [[STATUS:r[0-9]+]], [[NEW]], [r[[ADDR]]]
+; CHECK-NEXT: cmp [[STATUS]], #0
+; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+
+; CHECK: mov r0, r[[OLD]]
+ ret i16 %old
+}
+
+define i32 @test_atomic_load_add_i32(i32 %offset) nounwind {
+; CHECK-LABEL: test_atomic_load_add_i32:
+ %old = atomicrmw add i32* @var32, i32 %offset release
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var32
+; CHECK: movt r[[ADDR]], :upper16:var32
+
+; CHECK: .LBB{{[0-9]+}}_1:
+; CHECK: ldrex r[[OLD:[0-9]+]], [r[[ADDR]]]
+ ; r0 below is a reasonable guess but could change: it certainly comes into the
+ ; function there.
+; CHECK-NEXT: add{{s?}} [[NEW:r[0-9]+]], r[[OLD]], r0
+; CHECK-NEXT: stlex [[STATUS:r[0-9]+]], [[NEW]], [r[[ADDR]]]
+; CHECK-NEXT: cmp [[STATUS]], #0
+; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
+; CHECK-NOT: dmb
+; CHECK-NOT: mcr
+
+; CHECK: mov r0, r[[OLD]]
+ ret i32 %old
+}
+
+define void @test_atomic_load_add_i64(i64 %offset) nounwind {
+; CHECK-LABEL: test_atomic_load_add_i64:
+; CHECK: bl __sync_fetch_and_add_8
+ %old = atomicrmw add i64* @var64, i64 %offset monotonic
+ store i64 %old, i64* @var64
+ ret void
+}
+
+define i8 @test_load_acquire_i8(i8* %ptr) {
+; CHECK-LABEL: test_load_acquire_i8:
+; CHECK: ldab r0, [r0]
+ %val = load atomic i8, i8* %ptr seq_cst, align 1
+ ret i8 %val
+}
+
+define i16 @test_load_acquire_i16(i16* %ptr) {
+; CHECK-LABEL: test_load_acquire_i16:
+; CHECK: ldah r0, [r0]
+ %val = load atomic i16, i16* %ptr acquire, align 2
+ ret i16 %val
+}
+
+define i32 @test_load_acquire_i32(i32* %ptr) {
+; CHECK-LABEL: test_load_acquire_i32:
+; CHECK: lda r0, [r0]
+ %val = load atomic i32, i32* %ptr acquire, align 4
+ ret i32 %val
+}
+
+define i64 @test_load_acquire_i64(i64* %ptr) {
+; CHECK-LABEL: test_load_acquire_i64:
+; CHECK: bl __atomic_load
+ %val = load atomic i64, i64* %ptr acquire, align 4
+ ret i64 %val
+}
+
+define void @test_store_release_i8(i8 %val, i8* %ptr) {
+; CHECK-LABEL: test_store_release_i8:
+; CHECK: stlb r0, [r1]
+ store atomic i8 %val, i8* %ptr seq_cst, align 1
+ ret void
+}
+
+define void @test_store_release_i16(i16 %val, i16* %ptr) {
+; CHECK-LABEL: test_store_release_i16:
+; CHECK: stlh r0, [r1]
+ store atomic i16 %val, i16* %ptr release, align 2
+ ret void
+}
+
+define void @test_store_release_i32(i32 %val, i32* %ptr) {
+; CHECK-LABEL: test_store_release_i32:
+; CHECK: stl r0, [r1]
+ store atomic i32 %val, i32* %ptr seq_cst, align 4
+ ret void
+}
+
+define void @test_store_release_i64(i64 %val, i64* %ptr) {
+; CHECK-LABEL: test_store_release_i64:
+; CHECK: bl __atomic_store
+ store atomic i64 %val, i64* %ptr seq_cst, align 4
+ ret void
+}
+
+
+@var8 = global i8 0
+@var16 = global i16 0
+@var32 = global i32 0
+@var64 = global i64 0
diff --git a/test/CodeGen/ARM/build-attributes.ll b/test/CodeGen/ARM/build-attributes.ll
index 3d0c9419e8fe..fefe0c8fd231 100644
--- a/test/CodeGen/ARM/build-attributes.ll
+++ b/test/CodeGen/ARM/build-attributes.ll
@@ -3,23 +3,23 @@
; RUN: llc < %s -mtriple=thumbv5-linux-gnueabi -mcpu=xscale -mattr=+strict-align | FileCheck %s --check-prefix=XSCALE
; RUN: llc < %s -mtriple=armv6-linux-gnueabi -mattr=+strict-align | FileCheck %s --check-prefix=V6
-; RUN: llc < %s -mtriple=armv6-linux-gnueabi -mattr=+strict-align -enable-unsafe-fp-math -disable-fp-elim -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=V6-FAST
+; RUN: llc < %s -mtriple=armv6-linux-gnueabi -mattr=+strict-align -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=V6-FAST
; RUN: llc < %s -mtriple=armv6-linux-gnueabi -mattr=+strict-align -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING
; RUN: llc < %s -mtriple=thumbv6m-linux-gnueabi -mattr=+strict-align | FileCheck %s --check-prefix=V6M
-; RUN: llc < %s -mtriple=thumbv6m-linux-gnueabi -mattr=+strict-align -enable-unsafe-fp-math -disable-fp-elim -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=V6M-FAST
+; RUN: llc < %s -mtriple=thumbv6m-linux-gnueabi -mattr=+strict-align -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=V6M-FAST
; RUN: llc < %s -mtriple=thumbv6sm-linux-gnueabi -mattr=+strict-align | FileCheck %s --check-prefix=V6M
-; RUN: llc < %s -mtriple=thumbv6sm-linux-gnueabi -mattr=+strict-align -enable-unsafe-fp-math -disable-fp-elim -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=V6M-FAST
+; RUN: llc < %s -mtriple=thumbv6sm-linux-gnueabi -mattr=+strict-align -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=V6M-FAST
; RUN: llc < %s -mtriple=armv6-linux-gnueabi -mcpu=arm1156t2f-s -mattr=+strict-align | FileCheck %s --check-prefix=ARM1156T2F-S
-; RUN: llc < %s -mtriple=armv6-linux-gnueabi -mcpu=arm1156t2f-s -mattr=+strict-align -enable-unsafe-fp-math -disable-fp-elim -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=ARM1156T2F-S-FAST
+; RUN: llc < %s -mtriple=armv6-linux-gnueabi -mcpu=arm1156t2f-s -mattr=+strict-align -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=ARM1156T2F-S-FAST
; RUN: llc < %s -mtriple=armv6-linux-gnueabi -mcpu=arm1156t2f-s -mattr=+strict-align -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING
; RUN: llc < %s -mtriple=thumbv7m-linux-gnueabi | FileCheck %s --check-prefix=V7M
-; RUN: llc < %s -mtriple=thumbv7m-linux-gnueabi -enable-unsafe-fp-math -disable-fp-elim -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=V7M-FAST
+; RUN: llc < %s -mtriple=thumbv7m-linux-gnueabi -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=V7M-FAST
; RUN: llc < %s -mtriple=thumbv7m-linux-gnueabi -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING
; RUN: llc < %s -mtriple=armv7-linux-gnueabi | FileCheck %s --check-prefix=V7
; RUN: llc < %s -mtriple=armv7-linux-gnueabi -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING
-; RUN: llc < %s -mtriple=armv7-linux-gnueabi -enable-unsafe-fp-math -disable-fp-elim -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=V7-FAST
+; RUN: llc < %s -mtriple=armv7-linux-gnueabi -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=V7-FAST
; RUN: llc < %s -mtriple=armv8-linux-gnueabi | FileCheck %s --check-prefix=V8
-; RUN: llc < %s -mtriple=armv8-linux-gnueabi -enable-unsafe-fp-math -disable-fp-elim -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=V8-FAST
+; RUN: llc < %s -mtriple=armv8-linux-gnueabi -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=V8-FAST
; RUN: llc < %s -mtriple=armv8-linux-gnueabi -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING
; RUN: llc < %s -mtriple=thumbv8-linux-gnueabi | FileCheck %s --check-prefix=Vt8
; RUN: llc < %s -mtriple=thumbv8-linux-gnueabi -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING
@@ -31,35 +31,35 @@
; RUN: llc < %s -mtriple=thumbv8m.main-linux-gnueabi | FileCheck %s --check-prefix=V8MMAINLINE
; RUN: llc < %s -mtriple=thumbv8m.main-linux-gnueabi -mattr=+dsp | FileCheck %s --check-prefix=V8MMAINLINE_DSP
; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a5 | FileCheck %s --check-prefix=CORTEX-A5-DEFAULT
-; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a5 -enable-unsafe-fp-math -disable-fp-elim -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A5-DEFAULT-FAST
+; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a5 -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A5-DEFAULT-FAST
; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a5 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING
; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a5 -mattr=-neon,+d16 | FileCheck %s --check-prefix=CORTEX-A5-NONEON
; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a5 -mattr=-vfp2 | FileCheck %s --check-prefix=CORTEX-A5-NOFPU
-; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a5 -mattr=-vfp2 -enable-unsafe-fp-math -disable-fp-elim -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A5-NOFPU-FAST
+; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a5 -mattr=-vfp2 -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A5-NOFPU-FAST
; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a8 -float-abi=soft | FileCheck %s --check-prefix=CORTEX-A8-SOFT
-; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a8 -float-abi=soft -enable-unsafe-fp-math -disable-fp-elim -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A8-SOFT-FAST
+; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a8 -float-abi=soft -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A8-SOFT-FAST
; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a8 -float-abi=hard | FileCheck %s --check-prefix=CORTEX-A8-HARD
-; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a8 -float-abi=hard -enable-unsafe-fp-math -disable-fp-elim -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A8-HARD-FAST
+; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a8 -float-abi=hard -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A8-HARD-FAST
; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a8 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING
; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a8 -float-abi=soft | FileCheck %s --check-prefix=CORTEX-A8-SOFT
; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a9 -float-abi=soft | FileCheck %s --check-prefix=CORTEX-A9-SOFT
-; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a9 -float-abi=soft -enable-unsafe-fp-math -disable-fp-elim -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A9-SOFT-FAST
+; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a9 -float-abi=soft -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A9-SOFT-FAST
; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a9 -float-abi=hard | FileCheck %s --check-prefix=CORTEX-A9-HARD
-; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a9 -float-abi=hard -enable-unsafe-fp-math -disable-fp-elim -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A9-HARD-FAST
+; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a9 -float-abi=hard -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A9-HARD-FAST
; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a9 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING
; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a12 | FileCheck %s --check-prefix=CORTEX-A12-DEFAULT
; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a9 -float-abi=soft | FileCheck %s --check-prefix=CORTEX-A9-SOFT
-; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a12 -enable-unsafe-fp-math -disable-fp-elim -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A12-DEFAULT-FAST
+; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a12 -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A12-DEFAULT-FAST
; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a12 -mattr=-vfp2 | FileCheck %s --check-prefix=CORTEX-A12-NOFPU
-; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a12 -mattr=-vfp2 -enable-unsafe-fp-math -disable-fp-elim -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A12-NOFPU-FAST
+; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a12 -mattr=-vfp2 -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A12-NOFPU-FAST
; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a12 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING
; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a15 | FileCheck %s --check-prefix=CORTEX-A15
-; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a15 -enable-unsafe-fp-math -disable-fp-elim -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A15-FAST
+; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a15 -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A15-FAST
; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a15 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING
; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a17 | FileCheck %s --check-prefix=CORTEX-A17-DEFAULT
-; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a17 -enable-unsafe-fp-math -disable-fp-elim -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A17-FAST
+; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a17 -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A17-FAST
; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a17 -mattr=-vfp2 | FileCheck %s --check-prefix=CORTEX-A17-NOFPU
-; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a17 -mattr=-vfp2 -enable-unsafe-fp-math -disable-fp-elim -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A17-NOFPU-FAST
+; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a17 -mattr=-vfp2 -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A17-NOFPU-FAST
; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a15 -enable-no-trapping-fp-math | FileCheck %s --check-prefix=NO-TRAPPING-MATH
; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a15 -denormal-fp-math=ieee | FileCheck %s --check-prefix=DENORMAL-IEEE
@@ -74,87 +74,87 @@
; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a17 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING
; RUN: llc < %s -mtriple=thumbv6m-linux-gnueabi -mcpu=cortex-m0 | FileCheck %s --check-prefix=CORTEX-M0
-; RUN: llc < %s -mtriple=thumbv6m-linux-gnueabi -mcpu=cortex-m0 -enable-unsafe-fp-math -disable-fp-elim -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-M0-FAST
+; RUN: llc < %s -mtriple=thumbv6m-linux-gnueabi -mcpu=cortex-m0 -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-M0-FAST
; RUN: llc < %s -mtriple=thumbv6m-linux-gnueabi -mcpu=cortex-m0 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING
; RUN: llc < %s -mtriple=thumbv6m-linux-gnueabi -mcpu=cortex-m0plus | FileCheck %s --check-prefix=CORTEX-M0PLUS
-; RUN: llc < %s -mtriple=thumbv6m-linux-gnueabi -mcpu=cortex-m0plus -enable-unsafe-fp-math -disable-fp-elim -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-M0PLUS-FAST
+; RUN: llc < %s -mtriple=thumbv6m-linux-gnueabi -mcpu=cortex-m0plus -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-M0PLUS-FAST
; RUN: llc < %s -mtriple=thumbv6m-linux-gnueabi -mcpu=cortex-m0plus -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING
; RUN: llc < %s -mtriple=thumbv6m-linux-gnueabi -mcpu=cortex-m1 | FileCheck %s --check-prefix=CORTEX-M1
-; RUN: llc < %s -mtriple=thumbv6m-linux-gnueabi -mcpu=cortex-m1 -mattr=+strict-align -enable-unsafe-fp-math -disable-fp-elim -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-M1-FAST
+; RUN: llc < %s -mtriple=thumbv6m-linux-gnueabi -mcpu=cortex-m1 -mattr=+strict-align -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-M1-FAST
; RUN: llc < %s -mtriple=thumbv6m-linux-gnueabi -mcpu=cortex-m1 -mattr=+strict-align -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING
; RUN: llc < %s -mtriple=thumbv6m-linux-gnueabi -mcpu=sc000 -mattr=+strict-align | FileCheck %s --check-prefix=SC000
-; RUN: llc < %s -mtriple=thumbv6m-linux-gnueabi -mcpu=sc000 -mattr=+strict-align -enable-unsafe-fp-math -disable-fp-elim -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=SC000-FAST
+; RUN: llc < %s -mtriple=thumbv6m-linux-gnueabi -mcpu=sc000 -mattr=+strict-align -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=SC000-FAST
; RUN: llc < %s -mtriple=thumbv6m-linux-gnueabi -mcpu=sc000 -mattr=+strict-align -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING
; RUN: llc < %s -mtriple=thumbv7m-linux-gnueabi -mcpu=cortex-m3 | FileCheck %s --check-prefix=CORTEX-M3
-; RUN: llc < %s -mtriple=thumbv7m-linux-gnueabi -mcpu=cortex-m3 -enable-unsafe-fp-math -disable-fp-elim -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-M3-FAST
+; RUN: llc < %s -mtriple=thumbv7m-linux-gnueabi -mcpu=cortex-m3 -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-M3-FAST
; RUN: llc < %s -mtriple=thumbv7m-linux-gnueabi -mcpu=cortex-m3 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING
; RUN: llc < %s -mtriple=thumbv7m-linux-gnueabi -mcpu=sc300 | FileCheck %s --check-prefix=SC300
-; RUN: llc < %s -mtriple=thumbv7m-linux-gnueabi -mcpu=sc300 -enable-unsafe-fp-math -disable-fp-elim -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=SC300-FAST
+; RUN: llc < %s -mtriple=thumbv7m-linux-gnueabi -mcpu=sc300 -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=SC300-FAST
; RUN: llc < %s -mtriple=thumbv7m-linux-gnueabi -mcpu=sc300 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING
; RUN: llc < %s -mtriple=thumbv7m-linux-gnueabi -mcpu=cortex-m4 -float-abi=soft | FileCheck %s --check-prefix=CORTEX-M4-SOFT
-; RUN: llc < %s -mtriple=thumbv7m-linux-gnueabi -mcpu=cortex-m4 -float-abi=soft -enable-unsafe-fp-math -disable-fp-elim -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-M4-SOFT-FAST
+; RUN: llc < %s -mtriple=thumbv7m-linux-gnueabi -mcpu=cortex-m4 -float-abi=soft -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-M4-SOFT-FAST
; RUN: llc < %s -mtriple=thumbv7m-linux-gnueabi -mcpu=cortex-m4 -float-abi=hard | FileCheck %s --check-prefix=CORTEX-M4-HARD
-; RUN: llc < %s -mtriple=thumbv7m-linux-gnueabi -mcpu=cortex-m4 -float-abi=hard -enable-unsafe-fp-math -disable-fp-elim -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-M4-HARD-FAST
+; RUN: llc < %s -mtriple=thumbv7m-linux-gnueabi -mcpu=cortex-m4 -float-abi=hard -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-M4-HARD-FAST
; RUN: llc < %s -mtriple=thumbv7m-linux-gnueabi -mcpu=cortex-m4 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING
; RUN: llc < %s -mtriple=thumbv7em-linux-gnueabi -mcpu=cortex-m7 -mattr=-vfp2 | FileCheck %s --check-prefix=CORTEX-M7 --check-prefix=CORTEX-M7-SOFT
-; RUN: llc < %s -mtriple=thumbv7em-linux-gnueabi -mcpu=cortex-m7 -mattr=-vfp2 -enable-unsafe-fp-math -disable-fp-elim -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-M7-NOFPU-FAST
+; RUN: llc < %s -mtriple=thumbv7em-linux-gnueabi -mcpu=cortex-m7 -mattr=-vfp2 -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-M7-NOFPU-FAST
; RUN: llc < %s -mtriple=thumbv7em-linux-gnueabi -mcpu=cortex-m7 -mattr=+fp-only-sp | FileCheck %s --check-prefix=CORTEX-M7 --check-prefix=CORTEX-M7-SINGLE
-; RUN: llc < %s -mtriple=thumbv7em-linux-gnueabi -mcpu=cortex-m7 -mattr=+fp-only-sp -enable-unsafe-fp-math -disable-fp-elim -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-M7-FAST
+; RUN: llc < %s -mtriple=thumbv7em-linux-gnueabi -mcpu=cortex-m7 -mattr=+fp-only-sp -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-M7-FAST
; RUN: llc < %s -mtriple=thumbv7em-linux-gnueabi -mcpu=cortex-m7 | FileCheck %s --check-prefix=CORTEX-M7-DOUBLE
; RUN: llc < %s -mtriple=thumbv7em-linux-gnueabi -mcpu=cortex-m7 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING
; RUN: llc < %s -mtriple=thumbv8-linux-gnueabi -mcpu=cortex-m23 | FileCheck %s --check-prefix=CORTEX-M23
; RUN: llc < %s -mtriple=thumbv8-linux-gnueabi -mcpu=cortex-m33 | FileCheck %s --check-prefix=CORTEX-M33
-; RUN: llc < %s -mtriple=thumbv8-linux-gnueabi -mcpu=cortex-m33 -enable-unsafe-fp-math -disable-fp-elim -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-M33-FAST
+; RUN: llc < %s -mtriple=thumbv8-linux-gnueabi -mcpu=cortex-m33 -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-M33-FAST
; RUN: llc < %s -mtriple=thumbv8-linux-gnueabi -mcpu=cortex-m33 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING
; RUN: llc < %s -mtriple=armv7r-linux-gnueabi -mcpu=cortex-r4 | FileCheck %s --check-prefix=CORTEX-R4
; RUN: llc < %s -mtriple=armv7r-linux-gnueabi -mcpu=cortex-r4f | FileCheck %s --check-prefix=CORTEX-R4F
; RUN: llc < %s -mtriple=armv7r-linux-gnueabi -mcpu=cortex-r5 | FileCheck %s --check-prefix=CORTEX-R5
-; RUN: llc < %s -mtriple=armv7r-linux-gnueabi -mcpu=cortex-r5 -enable-unsafe-fp-math -disable-fp-elim -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-R5-FAST
+; RUN: llc < %s -mtriple=armv7r-linux-gnueabi -mcpu=cortex-r5 -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-R5-FAST
; RUN: llc < %s -mtriple=armv7r-linux-gnueabi -mcpu=cortex-r5 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING
; RUN: llc < %s -mtriple=armv7r-linux-gnueabi -mcpu=cortex-r7 | FileCheck %s --check-prefix=CORTEX-R7
-; RUN: llc < %s -mtriple=armv7r-linux-gnueabi -mcpu=cortex-r7 -enable-unsafe-fp-math -disable-fp-elim -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-R7-FAST
+; RUN: llc < %s -mtriple=armv7r-linux-gnueabi -mcpu=cortex-r7 -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-R7-FAST
; RUN: llc < %s -mtriple=armv7r-linux-gnueabi -mcpu=cortex-r7 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING
; RUN: llc < %s -mtriple=armv7r-linux-gnueabi -mcpu=cortex-r8 | FileCheck %s --check-prefix=CORTEX-R8
-; RUN: llc < %s -mtriple=armv7r-linux-gnueabi -mcpu=cortex-r8 -enable-unsafe-fp-math -disable-fp-elim -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-R8-FAST
+; RUN: llc < %s -mtriple=armv7r-linux-gnueabi -mcpu=cortex-r8 -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-R8-FAST
; RUN: llc < %s -mtriple=armv7r-linux-gnueabi -mcpu=cortex-r8 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING
; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=cortex-a32 | FileCheck %s --check-prefix=CORTEX-A32
-; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=cortex-a32 -enable-unsafe-fp-math -disable-fp-elim -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A32-FAST
+; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=cortex-a32 -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A32-FAST
; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=cortex-a32 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING
; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=cortex-a35 | FileCheck %s --check-prefix=CORTEX-A35
-; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=cortex-a35 -enable-unsafe-fp-math -disable-fp-elim -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A35-FAST
+; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=cortex-a35 -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A35-FAST
; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=cortex-a35 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING
; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=cortex-a53 | FileCheck %s --check-prefix=CORTEX-A53
-; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=cortex-a53 -enable-unsafe-fp-math -disable-fp-elim -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A53-FAST
+; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=cortex-a53 -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A53-FAST
; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=cortex-a53 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING
; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=cortex-a57 | FileCheck %s --check-prefix=CORTEX-A57
-; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=cortex-a57 -enable-unsafe-fp-math -disable-fp-elim -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A57-FAST
+; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=cortex-a57 -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A57-FAST
; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=cortex-a57 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING
; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=cortex-a72 | FileCheck %s --check-prefix=CORTEX-A72
-; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=cortex-a72 -enable-unsafe-fp-math -disable-fp-elim -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A72-FAST
+; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=cortex-a72 -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A72-FAST
; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=cortex-a72 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING
; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=cortex-a73 | FileCheck %s --check-prefix=CORTEX-A73
; RUN: llc < %s -mtriple=armv8.1a-linux-gnueabi | FileCheck %s --check-prefix=GENERIC-ARMV8_1-A
; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=exynos-m1 | FileCheck %s --check-prefix=EXYNOS-M1
-; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=exynos-m1 -enable-unsafe-fp-math -disable-fp-elim -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=EXYNOS-M1-FAST
+; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=exynos-m1 -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=EXYNOS-M1-FAST
; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=exynos-m1 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING
; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=exynos-m2 | FileCheck %s --check-prefix=EXYNOS-M2
-; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=exynos-m2 -enable-unsafe-fp-math -disable-fp-elim -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=EXYNOS-M1-FAST
+; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=exynos-m2 -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=EXYNOS-M1-FAST
; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=exynos-m2 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING
; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=exynos-m3 | FileCheck %s --check-prefix=EXYNOS-M3
-; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=exynos-m3 -enable-unsafe-fp-math -disable-fp-elim -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=EXYNOS-M1-FAST
+; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=exynos-m3 -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=EXYNOS-M1-FAST
; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=exynos-m3 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING
; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=exynos-m4 | FileCheck %s --check-prefix=EXYNOS-M4
-; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=exynos-m4 -enable-unsafe-fp-math -disable-fp-elim -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=EXYNOS-M1-FAST
+; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=exynos-m4 -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=EXYNOS-M1-FAST
; RUN: llc < %s -mtriple=armv8-linux-gnueabi -mcpu=exynos-m4 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING
-; RUN: llc < %s -mtriple=armv8.1a-linux-gnueabi -enable-unsafe-fp-math -disable-fp-elim -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=GENERIC-ARMV8_1-A-FAST
+; RUN: llc < %s -mtriple=armv8.1a-linux-gnueabi -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=GENERIC-ARMV8_1-A-FAST
; RUN: llc < %s -mtriple=armv8.1a-linux-gnueabi -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING
; RUN: llc < %s -mtriple=armv7-none-linux-gnueabi -mcpu=cortex-a7 | FileCheck %s --check-prefix=CORTEX-A7-CHECK
-; RUN: llc < %s -mtriple=armv7-none-linux-gnueabi -mcpu=cortex-a7 -enable-unsafe-fp-math -disable-fp-elim -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A7-CHECK-FAST
+; RUN: llc < %s -mtriple=armv7-none-linux-gnueabi -mcpu=cortex-a7 -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A7-CHECK-FAST
; RUN: llc < %s -mtriple=armv7-none-linux-gnueabi -mcpu=cortex-a7 -mattr=-vfp2,-vfp3,-vfp4,-neon,-fp16 | FileCheck %s --check-prefix=CORTEX-A7-NOFPU
-; RUN: llc < %s -mtriple=armv7-none-linux-gnueabi -mcpu=cortex-a7 -mattr=-vfp2,-vfp3,-vfp4,-neon,-fp16 -enable-unsafe-fp-math -disable-fp-elim -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A7-NOFPU-FAST
+; RUN: llc < %s -mtriple=armv7-none-linux-gnueabi -mcpu=cortex-a7 -mattr=-vfp2,-vfp3,-vfp4,-neon,-fp16 -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A7-NOFPU-FAST
; RUN: llc < %s -mtriple=armv7-none-linux-gnueabi -mcpu=cortex-a7 -mattr=+vfp4,-neon | FileCheck %s --check-prefix=CORTEX-A7-FPUV4
; RUN: llc < %s -mtriple=armv7-none-linux-gnueabi -mcpu=cortex-a7 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING
-; RUN: llc < %s -mtriple=armv7-none-linux-gnueabi -mcpu=cortex-a7 -mattr=+vfp4,-neon -enable-unsafe-fp-math -disable-fp-elim -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A7-FPUV4-FAST
+; RUN: llc < %s -mtriple=armv7-none-linux-gnueabi -mcpu=cortex-a7 -mattr=+vfp4,-neon -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A7-FPUV4-FAST
; RUN: llc < %s -mtriple=armv7-none-linux-gnueabi -mcpu=cortex-a7 -mattr=+vfp4,,+d16,-neon | FileCheck %s --check-prefix=CORTEX-A7-FPUV4
; RUN: llc < %s -mtriple=arm-none-linux-gnueabi -mattr=+strict-align -relocation-model=pic | FileCheck %s --check-prefix=RELOC-PIC
; RUN: llc < %s -mtriple=arm-none-linux-gnueabi -mattr=+strict-align -relocation-model=static | FileCheck %s --check-prefix=RELOC-OTHER
@@ -1623,7 +1623,7 @@
; EXYNOS-M4: .eabi_attribute 8, 1
; EXYNOS-M4: .eabi_attribute 9, 2
; EXYNOS-M4: .fpu crypto-neon-fp-armv8
-; EXYNOS-M4: .eabi_attribute 12, 3
+; EXYNOS-M4: .eabi_attribute 12, 4
; EXYNOS-M4-NOT: .eabi_attribute 27
; EXYNOS-M4: .eabi_attribute 36, 1
; EXYNOS-M4: .eabi_attribute 42, 1
diff --git a/test/CodeGen/ARM/cbz-implicit-it-range.ll b/test/CodeGen/ARM/cbz-implicit-it-range.ll
new file mode 100644
index 000000000000..b553a9ad6dfc
--- /dev/null
+++ b/test/CodeGen/ARM/cbz-implicit-it-range.ll
@@ -0,0 +1,47 @@
+;RUN: llc -O2 -mtriple=thumbv7a-linux-gnueabihf -arm-implicit-it=always %s -o - | FileCheck %s
+;RUN: llc -O2 -mtriple=thumbv7a-linux-gnueabihf -no-integrated-as %s -o - | FileCheck %s
+
+; Check that we do not produce a CBZ instruction to jump over the inline
+; assembly as the distance is too far if the implicit IT instructions are
+; added.
+
+define void @f0(i32 %p1, i32 %p2, i32 %p3) nounwind {
+entry:
+ %cmp = icmp eq i32 %p1, 0
+ br i1 %cmp, label %if.else, label %if.then
+
+if.then:
+ tail call void asm sideeffect "movseq r0, #0\0A", ""()
+ tail call void asm sideeffect "movseq r0, #0\0A", ""()
+ tail call void asm sideeffect "movseq r0, #0\0A", ""()
+ tail call void asm sideeffect "movseq r0, #0\0A", ""()
+ tail call void asm sideeffect "movseq r0, #0\0A", ""()
+ tail call void asm sideeffect "movseq r0, #0\0A", ""()
+ tail call void asm sideeffect "movseq r0, #0\0A", ""()
+ tail call void asm sideeffect "movseq r0, #0\0A", ""()
+ tail call void asm sideeffect "movseq r0, #0\0A", ""()
+ tail call void asm sideeffect "movseq r0, #0\0A", ""()
+ tail call void asm sideeffect "movseq r0, #0\0A", ""()
+ tail call void asm sideeffect "movseq r0, #0\0A", ""()
+ tail call void asm sideeffect "movseq r0, #0\0A", ""()
+ tail call void asm sideeffect "movseq r0, #0\0A", ""()
+ tail call void asm sideeffect "movseq r0, #0\0A", ""()
+ tail call void asm sideeffect "movseq r0, #0\0A", ""()
+ tail call void asm sideeffect "movseq r0, #0\0A", ""()
+ tail call void asm sideeffect "movseq r0, #0\0A", ""()
+ tail call void asm sideeffect "movseq r0, #0\0A", ""()
+ tail call void asm sideeffect "movseq r0, #0\0A", ""()
+ tail call void asm sideeffect "movseq r0, #0\0A", ""()
+ tail call void asm sideeffect "movseq r0, #0\0A", ""()
+ br label %if.end
+
+if.else:
+ tail call void asm sideeffect "nop\0A", ""()
+ br label %if.end
+
+if.end:
+ ret void
+}
+; CHECK-LABEL: f0:
+; CHECK: beq .LBB0_{{[0-9]+}}
+
diff --git a/test/CodeGen/ARM/clz.ll b/test/CodeGen/ARM/clz.ll
index 68e8c7cef1bc..0f49fbba1184 100644
--- a/test/CodeGen/ARM/clz.ll
+++ b/test/CodeGen/ARM/clz.ll
@@ -1,10 +1,12 @@
-; RUN: llc -mtriple=arm-eabi -mattr=+v5t %s -o - | FileCheck %s
+; RUN: llc -mtriple=arm-eabi -mattr=+v5t %s -o - | FileCheck %s -check-prefixes=CHECK,INLINE
+; RUN: llc -mtriple=arm-eabi %s -o - | FileCheck %s -check-prefixes=CHECK,LIBCALL
declare i32 @llvm.ctlz.i32(i32, i1)
define i32 @test(i32 %x) {
-; CHECK: test
-; CHECK: clz r0, r0
+; CHECK-LABEL: test
+; INLINE: clz r0, r0
+; LIBCALL: b __clzsi2
%tmp.1 = call i32 @llvm.ctlz.i32( i32 %x, i1 true )
ret i32 %tmp.1
}
diff --git a/test/CodeGen/ARM/cmn.ll b/test/CodeGen/ARM/cmn.ll
index 9321527a9e25..fbcee5196b63 100644
--- a/test/CodeGen/ARM/cmn.ll
+++ b/test/CodeGen/ARM/cmn.ll
@@ -15,16 +15,15 @@ define i32 @compare_i_gt(i32 %a) {
;
; T1-LABEL: compare_i_gt:
; T1: @ %bb.0: @ %entry
-; T1-NEXT: mov r1, r0
-; T1-NEXT: movs r0, #77
-; T1-NEXT: mvns r3, r0
-; T1-NEXT: movs r0, #42
-; T1-NEXT: movs r2, #24
-; T1-NEXT: cmp r1, r3
+; T1-NEXT: movs r1, #77
+; T1-NEXT: mvns r1, r1
+; T1-NEXT: cmp r0, r1
; T1-NEXT: bgt .LBB0_2
; T1-NEXT: @ %bb.1: @ %entry
-; T1-NEXT: mov r0, r2
-; T1-NEXT: .LBB0_2: @ %entry
+; T1-NEXT: movs r0, #24
+; T1-NEXT: bx lr
+; T1-NEXT: .LBB0_2:
+; T1-NEXT: movs r0, #42
; T1-NEXT: bx lr
entry:
%cmp = icmp sgt i32 %a, -78
@@ -44,14 +43,13 @@ define i32 @compare_r_eq(i32 %a, i32 %b) {
;
; T1-LABEL: compare_r_eq:
; T1: @ %bb.0: @ %entry
-; T1-NEXT: mov r2, r0
-; T1-NEXT: movs r0, #42
-; T1-NEXT: movs r3, #24
-; T1-NEXT: cmn r2, r1
+; T1-NEXT: cmn r0, r1
; T1-NEXT: beq .LBB1_2
; T1-NEXT: @ %bb.1: @ %entry
-; T1-NEXT: mov r0, r3
-; T1-NEXT: .LBB1_2: @ %entry
+; T1-NEXT: movs r0, #24
+; T1-NEXT: bx lr
+; T1-NEXT: .LBB1_2:
+; T1-NEXT: movs r0, #42
; T1-NEXT: bx lr
entry:
%sub = sub nsw i32 0, %b
diff --git a/test/CodeGen/ARM/cmp.ll b/test/CodeGen/ARM/cmp.ll
index 5c1630912579..2e6b20cce732 100644
--- a/test/CodeGen/ARM/cmp.ll
+++ b/test/CodeGen/ARM/cmp.ll
@@ -39,15 +39,11 @@ define i1 @f6(i32 %a, i32 %b) {
define i1 @f7(i32 %a, i32 %b) {
; CHECK-LABEL: f7:
-; CHECK: sub r2, r0, r1, lsr #6
-; CHECK: cmp r0, r1, lsr #6
-; CHECK: movwne r2, #1
-; CHECK: mov r0, r2
-; CHECK-T2: sub.w r2, r0, r1, lsr #6
-; CHECK-T2: cmp.w r0, r1, lsr #6
+; CHECK: subs r0, r0, r1, lsr #6
+; CHECK: movwne r0, #1
+; CHECK-T2: subs.w r0, r0, r1, lsr #6
; CHECK-T2: it ne
-; CHECK-T2: movne r2, #1
-; CHECK-T2: mov r0, r2
+; CHECK-T2: movne r0, #1
%tmp = lshr i32 %b, 6
%tmp1 = icmp ne i32 %a, %tmp
ret i1 %tmp1
@@ -68,15 +64,11 @@ define i1 @f8(i32 %a, i32 %b) {
define i1 @f9(i32 %a) {
; CHECK-LABEL: f9:
-; CHECK: sub r1, r0, r0, ror #8
-; CHECK: cmp r0, r0, ror #8
-; CHECK: movwne r1, #1
-; CHECK: mov r0, r1
-; CHECK-T2: sub.w r1, r0, r0, ror #8
-; CHECK-T2: cmp.w r0, r0, ror #8
+; CHECK: subs r0, r0, r0, ror #8
+; CHECK: movwne r0, #1
+; CHECK-T2: subs.w r0, r0, r0, ror #8
; CHECK-T2: it ne
-; CHECK-T2: movne r1, #1
-; CHECK-T2: mov r0, r1
+; CHECK-T2: movne r0, #1
%l8 = shl i32 %a, 24
%r8 = lshr i32 %a, 8
%tmp = or i32 %l8, %r8
diff --git a/test/CodeGen/ARM/cmpxchg.mir b/test/CodeGen/ARM/cmpxchg.mir
new file mode 100644
index 000000000000..6ae7e6372493
--- /dev/null
+++ b/test/CodeGen/ARM/cmpxchg.mir
@@ -0,0 +1,24 @@
+# RUN: llc -o - %s -mtriple=armv7-unknown-linux-gnu -verify-machineinstrs -run-pass=arm-pseudo | FileCheck %s
+---
+# CHECK-LABEL: name: func
+name: func
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $r0_r1, $r4_r5, $r3, $lr
+ dead early-clobber renamable $r0_r1, dead early-clobber renamable $r2 = CMP_SWAP_64 killed renamable $r3, killed renamable $r4_r5, renamable $r4_r5 :: (volatile load store monotonic monotonic 8)
+ ; CHECK: bb.0:
+ ; CHECK: liveins: $r0_r1, $r4_r5, $r3, $lr
+ ; CHECK: bb.1:
+ ; CHEKC: liveins: $r4_r5, $r3
+ ; CHECK: $r0_r1 = LDREXD $r3, 14, $noreg
+ ; CHECK: CMPrr killed $r0, $r4, 14, $noreg, implicit-def $cpsr
+ ; CHECK: CMPrr killed $r1, $r5, 0, killed $cpsr, implicit-def $cpsr
+ ; CHECK: Bcc %bb.3, 1, killed $cpsr
+ ; CHECK: bb.2:
+ ; CHECK: liveins: $r4_r5, $r3
+ ; CHECK: early-clobber $r2 = STREXD $r4_r5, $r3, 14, $noreg
+ ; CHECK: CMPri killed $r2, 0, 14, $noreg, implicit-def $cpsr
+ ; CHECK: Bcc %bb.1, 1, killed $cpsr
+ ; CHECK: bb.3:
+...
diff --git a/test/CodeGen/ARM/codemodel.ll b/test/CodeGen/ARM/codemodel.ll
new file mode 100644
index 000000000000..ec9982faba12
--- /dev/null
+++ b/test/CodeGen/ARM/codemodel.ll
@@ -0,0 +1,9 @@
+; RUN: not llc -verify-machineinstrs -o - -mtriple=arm-none-eabi -code-model=tiny < %s 2>&1 | FileCheck %s --check-prefix=TINY
+; RUN: not llc -verify-machineinstrs -o - -mtriple=arm-none-eabi -code-model=kernel < %s 2>&1 | FileCheck %s --check-prefix=KERNEL
+
+; TINY: Target does not support the tiny CodeModel
+; KERNEL: Target does not support the kernel CodeModel
+
+define void @foo() {
+ ret void
+}
diff --git a/test/CodeGen/ARM/constant-island-movwt.mir b/test/CodeGen/ARM/constant-island-movwt.mir
new file mode 100644
index 000000000000..6f955af4e88b
--- /dev/null
+++ b/test/CodeGen/ARM/constant-island-movwt.mir
@@ -0,0 +1,902 @@
+# RUN: llc -mtriple=thumbv7-win32-gnu -run-pass=arm-cp-islands -o - %s | FileCheck %s
+
+--- |
+ ; ModuleID = '<stdin>'
+ source_filename = "<stdin>"
+ target datalayout = "e-m:w-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64"
+ target triple = "thumbv7-unknown-windows-gnu"
+
+ %struct.A = type { [201 x i8*] }
+
+ @.str.17 = private unnamed_addr constant [10 x i8] c"__ashlhi3\00", align 1
+ @.str.18 = private unnamed_addr constant [10 x i8] c"__ashlsi3\00", align 1
+ @.str.19 = private unnamed_addr constant [10 x i8] c"__ashldi3\00", align 1
+ @.str.20 = private unnamed_addr constant [10 x i8] c"__ashlti3\00", align 1
+ @.str.21 = private unnamed_addr constant [10 x i8] c"__lshrhi3\00", align 1
+ @.str.22 = private unnamed_addr constant [10 x i8] c"__lshrsi3\00", align 1
+ @.str.23 = private unnamed_addr constant [10 x i8] c"__lshrdi3\00", align 1
+ @.str.24 = private unnamed_addr constant [10 x i8] c"__lshrti3\00", align 1
+ @.str.25 = private unnamed_addr constant [10 x i8] c"__ashrhi3\00", align 1
+ @.str.26 = private unnamed_addr constant [10 x i8] c"__ashrsi3\00", align 1
+ @.str.27 = private unnamed_addr constant [10 x i8] c"__ashrdi3\00", align 1
+ @.str.28 = private unnamed_addr constant [10 x i8] c"__ashrti3\00", align 1
+ @.str.29 = private unnamed_addr constant [9 x i8] c"__mulqi3\00", align 1
+ @.str.30 = private unnamed_addr constant [9 x i8] c"__mulhi3\00", align 1
+ @.str.31 = private unnamed_addr constant [9 x i8] c"__mulsi3\00", align 1
+ @.str.32 = private unnamed_addr constant [9 x i8] c"__muldi3\00", align 1
+ @.str.33 = private unnamed_addr constant [9 x i8] c"__multi3\00", align 1
+ @.str.34 = private unnamed_addr constant [10 x i8] c"__mulosi4\00", align 1
+ @.str.35 = private unnamed_addr constant [10 x i8] c"__mulodi4\00", align 1
+ @.str.36 = private unnamed_addr constant [10 x i8] c"__muloti4\00", align 1
+ @.str.37 = private unnamed_addr constant [9 x i8] c"__divqi3\00", align 1
+ @.str.38 = private unnamed_addr constant [9 x i8] c"__divhi3\00", align 1
+ @.str.39 = private unnamed_addr constant [9 x i8] c"__divsi3\00", align 1
+ @.str.40 = private unnamed_addr constant [9 x i8] c"__divdi3\00", align 1
+ @.str.41 = private unnamed_addr constant [9 x i8] c"__divti3\00", align 1
+ @.str.42 = private unnamed_addr constant [10 x i8] c"__udivqi3\00", align 1
+ @.str.43 = private unnamed_addr constant [10 x i8] c"__udivhi3\00", align 1
+ @.str.44 = private unnamed_addr constant [10 x i8] c"__udivsi3\00", align 1
+ @.str.45 = private unnamed_addr constant [10 x i8] c"__udivdi3\00", align 1
+ @.str.46 = private unnamed_addr constant [10 x i8] c"__udivti3\00", align 1
+ @.str.47 = private unnamed_addr constant [9 x i8] c"__modqi3\00", align 1
+ @.str.48 = private unnamed_addr constant [9 x i8] c"__modhi3\00", align 1
+ @.str.49 = private unnamed_addr constant [9 x i8] c"__modsi3\00", align 1
+ @.str.50 = private unnamed_addr constant [9 x i8] c"__moddi3\00", align 1
+ @.str.51 = private unnamed_addr constant [9 x i8] c"__modti3\00", align 1
+ @.str.52 = private unnamed_addr constant [10 x i8] c"__umodqi3\00", align 1
+ @.str.53 = private unnamed_addr constant [10 x i8] c"__umodhi3\00", align 1
+ @.str.54 = private unnamed_addr constant [10 x i8] c"__umodsi3\00", align 1
+ @.str.55 = private unnamed_addr constant [10 x i8] c"__umoddi3\00", align 1
+ @.str.56 = private unnamed_addr constant [10 x i8] c"__umodti3\00", align 1
+ @.str.57 = private unnamed_addr constant [9 x i8] c"__negsi2\00", align 1
+ @.str.58 = private unnamed_addr constant [9 x i8] c"__negdi2\00", align 1
+ @.str.59 = private unnamed_addr constant [9 x i8] c"__addsf3\00", align 1
+ @.str.60 = private unnamed_addr constant [9 x i8] c"__adddf3\00", align 1
+ @.str.61 = private unnamed_addr constant [9 x i8] c"__addxf3\00", align 1
+ @.str.62 = private unnamed_addr constant [9 x i8] c"__addtf3\00", align 1
+ @.str.63 = private unnamed_addr constant [11 x i8] c"__gcc_qadd\00", align 1
+ @.str.64 = private unnamed_addr constant [9 x i8] c"__subsf3\00", align 1
+ @.str.65 = private unnamed_addr constant [9 x i8] c"__subdf3\00", align 1
+ @.str.66 = private unnamed_addr constant [9 x i8] c"__subxf3\00", align 1
+ @.str.67 = private unnamed_addr constant [9 x i8] c"__subtf3\00", align 1
+ @.str.68 = private unnamed_addr constant [11 x i8] c"__gcc_qsub\00", align 1
+ @.str.69 = private unnamed_addr constant [9 x i8] c"__mulsf3\00", align 1
+ @.str.70 = private unnamed_addr constant [9 x i8] c"__muldf3\00", align 1
+ @.str.71 = private unnamed_addr constant [9 x i8] c"__mulxf3\00", align 1
+ @.str.72 = private unnamed_addr constant [9 x i8] c"__multf3\00", align 1
+ @.str.73 = private unnamed_addr constant [11 x i8] c"__gcc_qmul\00", align 1
+ @.str.74 = private unnamed_addr constant [9 x i8] c"__divsf3\00", align 1
+ @.str.75 = private unnamed_addr constant [9 x i8] c"__divdf3\00", align 1
+ @.str.76 = private unnamed_addr constant [9 x i8] c"__divxf3\00", align 1
+ @.str.77 = private unnamed_addr constant [9 x i8] c"__divtf3\00", align 1
+ @.str.78 = private unnamed_addr constant [11 x i8] c"__gcc_qdiv\00", align 1
+ @.str.79 = private unnamed_addr constant [6 x i8] c"fmodf\00", align 1
+ @.str.80 = private unnamed_addr constant [5 x i8] c"fmod\00", align 1
+ @.str.81 = private unnamed_addr constant [6 x i8] c"fmodl\00", align 1
+ @.str.82 = private unnamed_addr constant [5 x i8] c"fmaf\00", align 1
+ @.str.83 = private unnamed_addr constant [4 x i8] c"fma\00", align 1
+ @.str.84 = private unnamed_addr constant [5 x i8] c"fmal\00", align 1
+ @.str.85 = private unnamed_addr constant [10 x i8] c"__powisf2\00", align 1
+ @.str.86 = private unnamed_addr constant [10 x i8] c"__powidf2\00", align 1
+ @.str.87 = private unnamed_addr constant [10 x i8] c"__powixf2\00", align 1
+ @.str.88 = private unnamed_addr constant [10 x i8] c"__powitf2\00", align 1
+ @.str.89 = private unnamed_addr constant [6 x i8] c"sqrtf\00", align 1
+ @.str.90 = private unnamed_addr constant [5 x i8] c"sqrt\00", align 1
+ @.str.91 = private unnamed_addr constant [6 x i8] c"sqrtl\00", align 1
+ @.str.92 = private unnamed_addr constant [5 x i8] c"logf\00", align 1
+ @.str.93 = private unnamed_addr constant [4 x i8] c"log\00", align 1
+ @.str.94 = private unnamed_addr constant [5 x i8] c"logl\00", align 1
+ @.str.95 = private unnamed_addr constant [14 x i8] c"__logf_finite\00", align 1
+ @.str.96 = private unnamed_addr constant [13 x i8] c"__log_finite\00", align 1
+ @.str.97 = private unnamed_addr constant [14 x i8] c"__logl_finite\00", align 1
+ @.str.98 = private unnamed_addr constant [6 x i8] c"log2f\00", align 1
+ @.str.99 = private unnamed_addr constant [5 x i8] c"log2\00", align 1
+ @.str.100 = private unnamed_addr constant [6 x i8] c"log2l\00", align 1
+ @.str.101 = private unnamed_addr constant [15 x i8] c"__log2f_finite\00", align 1
+ @.str.102 = private unnamed_addr constant [14 x i8] c"__log2_finite\00", align 1
+ @.str.103 = private unnamed_addr constant [15 x i8] c"__log2l_finite\00", align 1
+ @.str.104 = private unnamed_addr constant [7 x i8] c"log10f\00", align 1
+ @.str.105 = private unnamed_addr constant [6 x i8] c"log10\00", align 1
+ @.str.106 = private unnamed_addr constant [7 x i8] c"log10l\00", align 1
+ @.str.107 = private unnamed_addr constant [16 x i8] c"__log10f_finite\00", align 1
+ @.str.108 = private unnamed_addr constant [15 x i8] c"__log10_finite\00", align 1
+ @.str.109 = private unnamed_addr constant [16 x i8] c"__log10l_finite\00", align 1
+ @.str.110 = private unnamed_addr constant [5 x i8] c"expf\00", align 1
+ @.str.111 = private unnamed_addr constant [4 x i8] c"exp\00", align 1
+ @.str.112 = private unnamed_addr constant [5 x i8] c"expl\00", align 1
+ @.str.113 = private unnamed_addr constant [14 x i8] c"__expf_finite\00", align 1
+ @.str.114 = private unnamed_addr constant [13 x i8] c"__exp_finite\00", align 1
+ @.str.115 = private unnamed_addr constant [14 x i8] c"__expl_finite\00", align 1
+ @.str.116 = private unnamed_addr constant [6 x i8] c"exp2f\00", align 1
+ @.str.117 = private unnamed_addr constant [5 x i8] c"exp2\00", align 1
+ @.str.118 = private unnamed_addr constant [6 x i8] c"exp2l\00", align 1
+ @.str.119 = private unnamed_addr constant [15 x i8] c"__exp2f_finite\00", align 1
+ @.str.120 = private unnamed_addr constant [14 x i8] c"__exp2_finite\00", align 1
+ @.str.121 = private unnamed_addr constant [15 x i8] c"__exp2l_finite\00", align 1
+ @.str.122 = private unnamed_addr constant [5 x i8] c"sinf\00", align 1
+ @.str.123 = private unnamed_addr constant [4 x i8] c"sin\00", align 1
+ @.str.124 = private unnamed_addr constant [5 x i8] c"sinl\00", align 1
+ @.str.125 = private unnamed_addr constant [5 x i8] c"cosf\00", align 1
+ @.str.126 = private unnamed_addr constant [4 x i8] c"cos\00", align 1
+ @.str.127 = private unnamed_addr constant [5 x i8] c"cosl\00", align 1
+ @.str.128 = private unnamed_addr constant [5 x i8] c"powf\00", align 1
+ @.str.129 = private unnamed_addr constant [4 x i8] c"pow\00", align 1
+ @.str.130 = private unnamed_addr constant [5 x i8] c"powl\00", align 1
+ @.str.131 = private unnamed_addr constant [14 x i8] c"__powf_finite\00", align 1
+ @.str.132 = private unnamed_addr constant [13 x i8] c"__pow_finite\00", align 1
+ @.str.133 = private unnamed_addr constant [14 x i8] c"__powl_finite\00", align 1
+ @.str.134 = private unnamed_addr constant [6 x i8] c"ceilf\00", align 1
+ @.str.135 = private unnamed_addr constant [5 x i8] c"ceil\00", align 1
+ @.str.136 = private unnamed_addr constant [6 x i8] c"ceill\00", align 1
+ @.str.137 = private unnamed_addr constant [7 x i8] c"truncf\00", align 1
+ @.str.138 = private unnamed_addr constant [6 x i8] c"trunc\00", align 1
+ @.str.139 = private unnamed_addr constant [7 x i8] c"truncl\00", align 1
+ @.str.140 = private unnamed_addr constant [6 x i8] c"rintf\00", align 1
+ @.str.141 = private unnamed_addr constant [5 x i8] c"rint\00", align 1
+ @.str.142 = private unnamed_addr constant [6 x i8] c"rintl\00", align 1
+ @.str.143 = private unnamed_addr constant [11 x i8] c"nearbyintf\00", align 1
+ @.str.144 = private unnamed_addr constant [10 x i8] c"nearbyint\00", align 1
+ @.str.145 = private unnamed_addr constant [11 x i8] c"nearbyintl\00", align 1
+ @.str.146 = private unnamed_addr constant [7 x i8] c"roundf\00", align 1
+ @.str.147 = private unnamed_addr constant [6 x i8] c"round\00", align 1
+ @.str.148 = private unnamed_addr constant [7 x i8] c"roundl\00", align 1
+ @.str.149 = private unnamed_addr constant [7 x i8] c"floorf\00", align 1
+ @.str.150 = private unnamed_addr constant [6 x i8] c"floor\00", align 1
+ @.str.151 = private unnamed_addr constant [7 x i8] c"floorl\00", align 1
+ @.str.152 = private unnamed_addr constant [10 x i8] c"copysignf\00", align 1
+ @.str.153 = private unnamed_addr constant [9 x i8] c"copysign\00", align 1
+
+ ; Function Attrs: nounwind
+ define arm_aapcs_vfpcc void @func(%struct.A* %obj) #0 {
+ entry:
+ %arrayidx.i1 = bitcast %struct.A* %obj to i8**
+ %0 = bitcast i8** %arrayidx.i1 to <4 x i8*>*
+ store <4 x i8*> <i8* null, i8* getelementptr inbounds ([10 x i8], [10 x i8]* @.str.18, i32 0, i32 0), i8* getelementptr inbounds ([10 x i8], [10 x i8]* @.str.19, i32 0, i32 0), i8* getelementptr inbounds ([10 x i8], [10 x i8]* @.str.20, i32 0, i32 0)>, <4 x i8*>* %0
+ %arrayidx.i62 = getelementptr inbounds %struct.A, %struct.A* %obj, i32 0, i32 0, i32 4
+ %1 = bitcast i8** %arrayidx.i62 to <4 x i8*>*
+ store <4 x i8*> <i8* getelementptr inbounds ([10 x i8], [10 x i8]* @.str.21, i32 0, i32 0), i8* getelementptr inbounds ([10 x i8], [10 x i8]* @.str.22, i32 0, i32 0), i8* getelementptr inbounds ([10 x i8], [10 x i8]* @.str.23, i32 0, i32 0), i8* getelementptr inbounds ([10 x i8], [10 x i8]* @.str.24, i32 0, i32 0)>, <4 x i8*>* %1
+ %arrayidx.i523 = getelementptr inbounds %struct.A, %struct.A* %obj, i32 0, i32 0, i32 8
+ %2 = bitcast i8** %arrayidx.i523 to <4 x i8*>*
+ store <4 x i8*> <i8* getelementptr inbounds ([10 x i8], [10 x i8]* @.str.25, i32 0, i32 0), i8* getelementptr inbounds ([10 x i8], [10 x i8]* @.str.26, i32 0, i32 0), i8* getelementptr inbounds ([10 x i8], [10 x i8]* @.str.27, i32 0, i32 0), i8* getelementptr inbounds ([10 x i8], [10 x i8]* @.str.28, i32 0, i32 0)>, <4 x i8*>* %2
+ %arrayidx.i519 = getelementptr inbounds %struct.A, %struct.A* %obj, i32 0, i32 0, i32 12
+ %3 = bitcast i8** %arrayidx.i519 to <4 x i8*>*
+ store <4 x i8*> <i8* getelementptr inbounds ([9 x i8], [9 x i8]* @.str.29, i32 0, i32 0), i8* getelementptr inbounds ([9 x i8], [9 x i8]* @.str.30, i32 0, i32 0), i8* getelementptr inbounds ([9 x i8], [9 x i8]* @.str.31, i32 0, i32 0), i8* getelementptr inbounds ([9 x i8], [9 x i8]* @.str.32, i32 0, i32 0)>, <4 x i8*>* %3
+ %arrayidx.i515 = getelementptr inbounds %struct.A, %struct.A* %obj, i32 0, i32 0, i32 16
+ %4 = bitcast i8** %arrayidx.i515 to i8*
+ call void @llvm.memset.p0i8.i64(i8* align 4 %4, i8 0, i64 40, i1 false)
+ %arrayidx.i511 = getelementptr inbounds %struct.A, %struct.A* %obj, i32 0, i32 0, i32 20
+ %5 = bitcast i8** %arrayidx.i511 to <4 x i8*>*
+ store <4 x i8*> <i8* getelementptr inbounds ([9 x i8], [9 x i8]* @.str.37, i32 0, i32 0), i8* getelementptr inbounds ([9 x i8], [9 x i8]* @.str.38, i32 0, i32 0), i8* getelementptr inbounds ([9 x i8], [9 x i8]* @.str.39, i32 0, i32 0), i8* getelementptr inbounds ([9 x i8], [9 x i8]* @.str.40, i32 0, i32 0)>, <4 x i8*>* %5
+ %arrayidx.i507 = getelementptr inbounds %struct.A, %struct.A* %obj, i32 0, i32 0, i32 24
+ %6 = bitcast i8** %arrayidx.i507 to <4 x i8*>*
+ store <4 x i8*> <i8* getelementptr inbounds ([9 x i8], [9 x i8]* @.str.41, i32 0, i32 0), i8* getelementptr inbounds ([10 x i8], [10 x i8]* @.str.42, i32 0, i32 0), i8* getelementptr inbounds ([10 x i8], [10 x i8]* @.str.43, i32 0, i32 0), i8* getelementptr inbounds ([10 x i8], [10 x i8]* @.str.44, i32 0, i32 0)>, <4 x i8*>* %6
+ %arrayidx.i503 = getelementptr inbounds %struct.A, %struct.A* %obj, i32 0, i32 0, i32 28
+ %7 = bitcast i8** %arrayidx.i503 to <4 x i8*>*
+ store <4 x i8*> <i8* getelementptr inbounds ([10 x i8], [10 x i8]* @.str.45, i32 0, i32 0), i8* getelementptr inbounds ([10 x i8], [10 x i8]* @.str.46, i32 0, i32 0), i8* getelementptr inbounds ([9 x i8], [9 x i8]* @.str.47, i32 0, i32 0), i8* getelementptr inbounds ([9 x i8], [9 x i8]* @.str.48, i32 0, i32 0)>, <4 x i8*>* %7
+ %arrayidx.i499 = getelementptr inbounds %struct.A, %struct.A* %obj, i32 0, i32 0, i32 32
+ %8 = bitcast i8** %arrayidx.i499 to <4 x i8*>*
+ store <4 x i8*> <i8* getelementptr inbounds ([9 x i8], [9 x i8]* @.str.49, i32 0, i32 0), i8* getelementptr inbounds ([9 x i8], [9 x i8]* @.str.50, i32 0, i32 0), i8* getelementptr inbounds ([9 x i8], [9 x i8]* @.str.51, i32 0, i32 0), i8* getelementptr inbounds ([10 x i8], [10 x i8]* @.str.52, i32 0, i32 0)>, <4 x i8*>* %8
+ %arrayidx.i495 = getelementptr inbounds %struct.A, %struct.A* %obj, i32 0, i32 0, i32 36
+ %9 = bitcast i8** %arrayidx.i495 to <4 x i8*>*
+ store <4 x i8*> <i8* getelementptr inbounds ([10 x i8], [10 x i8]* @.str.53, i32 0, i32 0), i8* getelementptr inbounds ([10 x i8], [10 x i8]* @.str.54, i32 0, i32 0), i8* getelementptr inbounds ([10 x i8], [10 x i8]* @.str.55, i32 0, i32 0), i8* getelementptr inbounds ([10 x i8], [10 x i8]* @.str.56, i32 0, i32 0)>, <4 x i8*>* %9
+ %arrayidx.i491 = getelementptr inbounds %struct.A, %struct.A* %obj, i32 0, i32 0, i32 40
+ %arrayidx.i481 = getelementptr inbounds %struct.A, %struct.A* %obj, i32 0, i32 0, i32 50
+ %10 = bitcast i8** %arrayidx.i491 to i8*
+ call void @llvm.memset.p0i8.i64(i8* align 4 %10, i8 0, i64 40, i1 false)
+ %11 = bitcast i8** %arrayidx.i481 to <4 x i8*>*
+ store <4 x i8*> <i8* getelementptr inbounds ([9 x i8], [9 x i8]* @.str.57, i32 0, i32 0), i8* getelementptr inbounds ([9 x i8], [9 x i8]* @.str.58, i32 0, i32 0), i8* getelementptr inbounds ([9 x i8], [9 x i8]* @.str.59, i32 0, i32 0), i8* getelementptr inbounds ([9 x i8], [9 x i8]* @.str.60, i32 0, i32 0)>, <4 x i8*>* %11
+ %arrayidx.i477 = getelementptr inbounds %struct.A, %struct.A* %obj, i32 0, i32 0, i32 54
+ %12 = bitcast i8** %arrayidx.i477 to <4 x i8*>*
+ store <4 x i8*> <i8* getelementptr inbounds ([9 x i8], [9 x i8]* @.str.61, i32 0, i32 0), i8* getelementptr inbounds ([9 x i8], [9 x i8]* @.str.62, i32 0, i32 0), i8* getelementptr inbounds ([11 x i8], [11 x i8]* @.str.63, i32 0, i32 0), i8* getelementptr inbounds ([9 x i8], [9 x i8]* @.str.64, i32 0, i32 0)>, <4 x i8*>* %12
+ %arrayidx.i473 = getelementptr inbounds %struct.A, %struct.A* %obj, i32 0, i32 0, i32 58
+ %13 = bitcast i8** %arrayidx.i473 to <4 x i8*>*
+ store <4 x i8*> <i8* getelementptr inbounds ([9 x i8], [9 x i8]* @.str.65, i32 0, i32 0), i8* getelementptr inbounds ([9 x i8], [9 x i8]* @.str.66, i32 0, i32 0), i8* getelementptr inbounds ([9 x i8], [9 x i8]* @.str.67, i32 0, i32 0), i8* getelementptr inbounds ([11 x i8], [11 x i8]* @.str.68, i32 0, i32 0)>, <4 x i8*>* %13
+ %arrayidx.i469 = getelementptr inbounds %struct.A, %struct.A* %obj, i32 0, i32 0, i32 62
+ %14 = bitcast i8** %arrayidx.i469 to <4 x i8*>*
+ store <4 x i8*> <i8* getelementptr inbounds ([9 x i8], [9 x i8]* @.str.69, i32 0, i32 0), i8* getelementptr inbounds ([9 x i8], [9 x i8]* @.str.70, i32 0, i32 0), i8* getelementptr inbounds ([9 x i8], [9 x i8]* @.str.71, i32 0, i32 0), i8* getelementptr inbounds ([9 x i8], [9 x i8]* @.str.72, i32 0, i32 0)>, <4 x i8*>* %14
+ %arrayidx.i465 = getelementptr inbounds %struct.A, %struct.A* %obj, i32 0, i32 0, i32 66
+ %15 = bitcast i8** %arrayidx.i465 to <4 x i8*>*
+ store <4 x i8*> <i8* getelementptr inbounds ([11 x i8], [11 x i8]* @.str.73, i32 0, i32 0), i8* getelementptr inbounds ([9 x i8], [9 x i8]* @.str.74, i32 0, i32 0), i8* getelementptr inbounds ([9 x i8], [9 x i8]* @.str.75, i32 0, i32 0), i8* getelementptr inbounds ([9 x i8], [9 x i8]* @.str.76, i32 0, i32 0)>, <4 x i8*>* %15
+ %arrayidx.i461 = getelementptr inbounds %struct.A, %struct.A* %obj, i32 0, i32 0, i32 70
+ %16 = bitcast i8** %arrayidx.i461 to i8*
+ call void @llvm.memset.p0i8.i64(i8* align 4 %16, i8 0, i64 40, i1 false)
+ %arrayidx.i457 = getelementptr inbounds %struct.A, %struct.A* %obj, i32 0, i32 0, i32 74
+ %17 = bitcast i8** %arrayidx.i457 to <4 x i8*>*
+ store <4 x i8*> <i8* getelementptr inbounds ([6 x i8], [6 x i8]* @.str.81, i32 0, i32 0), i8* getelementptr inbounds ([6 x i8], [6 x i8]* @.str.81, i32 0, i32 0), i8* getelementptr inbounds ([6 x i8], [6 x i8]* @.str.81, i32 0, i32 0), i8* getelementptr inbounds ([5 x i8], [5 x i8]* @.str.82, i32 0, i32 0)>, <4 x i8*>* %17
+ %arrayidx.i453 = getelementptr inbounds %struct.A, %struct.A* %obj, i32 0, i32 0, i32 78
+ %18 = bitcast i8** %arrayidx.i453 to <4 x i8*>*
+ store <4 x i8*> <i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str.83, i32 0, i32 0), i8* getelementptr inbounds ([5 x i8], [5 x i8]* @.str.84, i32 0, i32 0), i8* getelementptr inbounds ([5 x i8], [5 x i8]* @.str.84, i32 0, i32 0), i8* getelementptr inbounds ([5 x i8], [5 x i8]* @.str.84, i32 0, i32 0)>, <4 x i8*>* %18
+ %arrayidx.i449 = getelementptr inbounds %struct.A, %struct.A* %obj, i32 0, i32 0, i32 82
+ %arrayidx.i445 = getelementptr inbounds %struct.A, %struct.A* %obj, i32 0, i32 0, i32 86
+ %19 = bitcast i8** %arrayidx.i445 to <4 x i8*>*
+ store <4 x i8*> <i8* getelementptr inbounds ([10 x i8], [10 x i8]* @.str.88, i32 0, i32 0), i8* getelementptr inbounds ([6 x i8], [6 x i8]* @.str.89, i32 0, i32 0), i8* getelementptr inbounds ([5 x i8], [5 x i8]* @.str.90, i32 0, i32 0), i8* getelementptr inbounds ([6 x i8], [6 x i8]* @.str.91, i32 0, i32 0)>, <4 x i8*>* %19
+ %arrayidx.i441 = getelementptr inbounds %struct.A, %struct.A* %obj, i32 0, i32 0, i32 90
+ %20 = bitcast i8** %arrayidx.i441 to <4 x i8*>*
+ store <4 x i8*> <i8* getelementptr inbounds ([6 x i8], [6 x i8]* @.str.91, i32 0, i32 0), i8* getelementptr inbounds ([6 x i8], [6 x i8]* @.str.91, i32 0, i32 0), i8* getelementptr inbounds ([5 x i8], [5 x i8]* @.str.92, i32 0, i32 0), i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str.93, i32 0, i32 0)>, <4 x i8*>* %20
+ %arrayidx.i437 = getelementptr inbounds %struct.A, %struct.A* %obj, i32 0, i32 0, i32 94
+ %21 = bitcast i8** %arrayidx.i437 to i8*
+ call void @llvm.memset.p0i8.i64(i8* align 4 %21, i8 0, i64 28, i1 false)
+ %arrayidx.i433 = getelementptr inbounds %struct.A, %struct.A* %obj, i32 0, i32 0, i32 98
+ %22 = bitcast i8** %arrayidx.i433 to <4 x i8*>*
+ store <4 x i8*> <i8* getelementptr inbounds ([13 x i8], [13 x i8]* @.str.96, i32 0, i32 0), i8* getelementptr inbounds ([14 x i8], [14 x i8]* @.str.97, i32 0, i32 0), i8* getelementptr inbounds ([14 x i8], [14 x i8]* @.str.97, i32 0, i32 0), i8* getelementptr inbounds ([14 x i8], [14 x i8]* @.str.97, i32 0, i32 0)>, <4 x i8*>* %22
+ %arrayidx.i429 = getelementptr inbounds %struct.A, %struct.A* %obj, i32 0, i32 0, i32 102
+ %23 = bitcast i8** %arrayidx.i429 to <4 x i8*>*
+ store <4 x i8*> <i8* getelementptr inbounds ([6 x i8], [6 x i8]* @.str.98, i32 0, i32 0), i8* getelementptr inbounds ([5 x i8], [5 x i8]* @.str.99, i32 0, i32 0), i8* getelementptr inbounds ([6 x i8], [6 x i8]* @.str.100, i32 0, i32 0), i8* getelementptr inbounds ([6 x i8], [6 x i8]* @.str.100, i32 0, i32 0)>, <4 x i8*>* %23
+ %arrayidx.i425 = getelementptr inbounds %struct.A, %struct.A* %obj, i32 0, i32 0, i32 106
+ %24 = bitcast i8** %arrayidx.i425 to i8*
+ call void @llvm.memset.p0i8.i64(i8* align 4 %24, i8 0, i64 28, i1 false)
+ %arrayidx.i421 = getelementptr inbounds %struct.A, %struct.A* %obj, i32 0, i32 0, i32 110
+ %25 = bitcast i8** %arrayidx.i421 to <4 x i8*>*
+ store <4 x i8*> <i8* getelementptr inbounds ([15 x i8], [15 x i8]* @.str.103, i32 0, i32 0), i8* getelementptr inbounds ([15 x i8], [15 x i8]* @.str.103, i32 0, i32 0), i8* getelementptr inbounds ([7 x i8], [7 x i8]* @.str.104, i32 0, i32 0), i8* getelementptr inbounds ([6 x i8], [6 x i8]* @.str.105, i32 0, i32 0)>, <4 x i8*>* %25
+ %arrayidx.i417 = getelementptr inbounds %struct.A, %struct.A* %obj, i32 0, i32 0, i32 114
+ %26 = bitcast i8** %arrayidx.i417 to <4 x i8*>*
+ store <4 x i8*> <i8* getelementptr inbounds ([7 x i8], [7 x i8]* @.str.106, i32 0, i32 0), i8* getelementptr inbounds ([7 x i8], [7 x i8]* @.str.106, i32 0, i32 0), i8* getelementptr inbounds ([7 x i8], [7 x i8]* @.str.106, i32 0, i32 0), i8* getelementptr inbounds ([16 x i8], [16 x i8]* @.str.107, i32 0, i32 0)>, <4 x i8*>* %26
+ %arrayidx.i413 = getelementptr inbounds %struct.A, %struct.A* %obj, i32 0, i32 0, i32 118
+ %27 = bitcast i8** %arrayidx.i413 to <4 x i8*>*
+ store <4 x i8*> <i8* getelementptr inbounds ([15 x i8], [15 x i8]* @.str.108, i32 0, i32 0), i8* getelementptr inbounds ([16 x i8], [16 x i8]* @.str.109, i32 0, i32 0), i8* getelementptr inbounds ([16 x i8], [16 x i8]* @.str.109, i32 0, i32 0), i8* getelementptr inbounds ([16 x i8], [16 x i8]* @.str.109, i32 0, i32 0)>, <4 x i8*>* %27
+ %arrayidx.i409 = getelementptr inbounds %struct.A, %struct.A* %obj, i32 0, i32 0, i32 122
+ %28 = bitcast i8** %arrayidx.i409 to <4 x i8*>*
+ store <4 x i8*> <i8* getelementptr inbounds ([5 x i8], [5 x i8]* @.str.110, i32 0, i32 0), i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str.111, i32 0, i32 0), i8* getelementptr inbounds ([5 x i8], [5 x i8]* @.str.112, i32 0, i32 0), i8* getelementptr inbounds ([5 x i8], [5 x i8]* @.str.112, i32 0, i32 0)>, <4 x i8*>* %28
+ %arrayidx.i405 = getelementptr inbounds %struct.A, %struct.A* %obj, i32 0, i32 0, i32 126
+ %29 = bitcast i8** %arrayidx.i405 to <4 x i8*>*
+ store <4 x i8*> <i8* getelementptr inbounds ([5 x i8], [5 x i8]* @.str.112, i32 0, i32 0), i8* getelementptr inbounds ([14 x i8], [14 x i8]* @.str.113, i32 0, i32 0), i8* getelementptr inbounds ([13 x i8], [13 x i8]* @.str.114, i32 0, i32 0), i8* getelementptr inbounds ([14 x i8], [14 x i8]* @.str.115, i32 0, i32 0)>, <4 x i8*>* %29
+ %arrayidx.i401 = getelementptr inbounds %struct.A, %struct.A* %obj, i32 0, i32 0, i32 130
+ %30 = bitcast i8** %arrayidx.i401 to <4 x i8*>*
+ store <4 x i8*> <i8* getelementptr inbounds ([14 x i8], [14 x i8]* @.str.115, i32 0, i32 0), i8* getelementptr inbounds ([14 x i8], [14 x i8]* @.str.115, i32 0, i32 0), i8* getelementptr inbounds ([6 x i8], [6 x i8]* @.str.116, i32 0, i32 0), i8* getelementptr inbounds ([5 x i8], [5 x i8]* @.str.117, i32 0, i32 0)>, <4 x i8*>* %30
+ %arrayidx.i397 = getelementptr inbounds %struct.A, %struct.A* %obj, i32 0, i32 0, i32 134
+ %31 = bitcast i8** %arrayidx.i397 to <4 x i8*>*
+ store <4 x i8*> <i8* getelementptr inbounds ([6 x i8], [6 x i8]* @.str.118, i32 0, i32 0), i8* getelementptr inbounds ([6 x i8], [6 x i8]* @.str.118, i32 0, i32 0), i8* getelementptr inbounds ([6 x i8], [6 x i8]* @.str.118, i32 0, i32 0), i8* getelementptr inbounds ([15 x i8], [15 x i8]* @.str.119, i32 0, i32 0)>, <4 x i8*>* %31
+ %arrayidx.i393 = getelementptr inbounds %struct.A, %struct.A* %obj, i32 0, i32 0, i32 138
+ %32 = bitcast i8** %arrayidx.i393 to <4 x i8*>*
+ store <4 x i8*> <i8* getelementptr inbounds ([14 x i8], [14 x i8]* @.str.120, i32 0, i32 0), i8* getelementptr inbounds ([15 x i8], [15 x i8]* @.str.121, i32 0, i32 0), i8* getelementptr inbounds ([15 x i8], [15 x i8]* @.str.121, i32 0, i32 0), i8* getelementptr inbounds ([15 x i8], [15 x i8]* @.str.121, i32 0, i32 0)>, <4 x i8*>* %32
+ %arrayidx.i389 = getelementptr inbounds %struct.A, %struct.A* %obj, i32 0, i32 0, i32 142
+ %33 = bitcast i8** %arrayidx.i389 to <4 x i8*>*
+ store <4 x i8*> <i8* getelementptr inbounds ([5 x i8], [5 x i8]* @.str.122, i32 0, i32 0), i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str.123, i32 0, i32 0), i8* getelementptr inbounds ([5 x i8], [5 x i8]* @.str.124, i32 0, i32 0), i8* getelementptr inbounds ([5 x i8], [5 x i8]* @.str.124, i32 0, i32 0)>, <4 x i8*>* %33
+ %arrayidx.i385 = getelementptr inbounds %struct.A, %struct.A* %obj, i32 0, i32 0, i32 146
+ %34 = bitcast i8** %arrayidx.i385 to <4 x i8*>*
+ store <4 x i8*> <i8* getelementptr inbounds ([5 x i8], [5 x i8]* @.str.124, i32 0, i32 0), i8* getelementptr inbounds ([5 x i8], [5 x i8]* @.str.125, i32 0, i32 0), i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str.126, i32 0, i32 0), i8* getelementptr inbounds ([5 x i8], [5 x i8]* @.str.127, i32 0, i32 0)>, <4 x i8*>* %34
+ %arrayidx.i381 = getelementptr inbounds %struct.A, %struct.A* %obj, i32 0, i32 0, i32 150
+ store i8* getelementptr inbounds ([5 x i8], [5 x i8]* @.str.127, i32 0, i32 0), i8** %arrayidx.i381
+ %arrayidx.i380 = getelementptr inbounds %struct.A, %struct.A* %obj, i32 0, i32 0, i32 151
+ store i8* getelementptr inbounds ([5 x i8], [5 x i8]* @.str.127, i32 0, i32 0), i8** %arrayidx.i380
+ %arrayidx.i379 = getelementptr inbounds %struct.A, %struct.A* %obj, i32 0, i32 0, i32 152
+ %arrayidx.i375 = getelementptr inbounds %struct.A, %struct.A* %obj, i32 0, i32 0, i32 156
+ %arrayidx.i374 = getelementptr inbounds %struct.A, %struct.A* %obj, i32 0, i32 0, i32 157
+ %arrayidx.i373 = getelementptr inbounds %struct.A, %struct.A* %obj, i32 0, i32 0, i32 158
+ %arrayidx.i372 = getelementptr inbounds %struct.A, %struct.A* %obj, i32 0, i32 0, i32 159
+ %35 = bitcast i8** %arrayidx.i379 to i8*
+ call void @llvm.memset.p0i8.i64(i8* align 4 %35, i8 0, i64 28, i1 false)
+ store i8* getelementptr inbounds ([5 x i8], [5 x i8]* @.str.128, i32 0, i32 0), i8** %arrayidx.i372
+ %arrayidx.i371 = getelementptr inbounds %struct.A, %struct.A* %obj, i32 0, i32 0, i32 160
+ store i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str.129, i32 0, i32 0), i8** %arrayidx.i371
+ %arrayidx.i370 = getelementptr inbounds %struct.A, %struct.A* %obj, i32 0, i32 0, i32 161
+ %36 = bitcast i8** %arrayidx.i370 to <4 x i8*>*
+ store <4 x i8*> <i8* getelementptr inbounds ([5 x i8], [5 x i8]* @.str.130, i32 0, i32 0), i8* getelementptr inbounds ([5 x i8], [5 x i8]* @.str.130, i32 0, i32 0), i8* getelementptr inbounds ([5 x i8], [5 x i8]* @.str.130, i32 0, i32 0), i8* getelementptr inbounds ([14 x i8], [14 x i8]* @.str.131, i32 0, i32 0)>, <4 x i8*>* %36
+ %arrayidx.i366 = getelementptr inbounds %struct.A, %struct.A* %obj, i32 0, i32 0, i32 165
+ %37 = bitcast i8** %arrayidx.i366 to <4 x i8*>*
+ store <4 x i8*> <i8* getelementptr inbounds ([13 x i8], [13 x i8]* @.str.132, i32 0, i32 0), i8* getelementptr inbounds ([14 x i8], [14 x i8]* @.str.133, i32 0, i32 0), i8* getelementptr inbounds ([14 x i8], [14 x i8]* @.str.133, i32 0, i32 0), i8* getelementptr inbounds ([14 x i8], [14 x i8]* @.str.133, i32 0, i32 0)>, <4 x i8*>* %37
+ %arrayidx.i362 = getelementptr inbounds %struct.A, %struct.A* %obj, i32 0, i32 0, i32 169
+ %38 = bitcast i8** %arrayidx.i362 to <4 x i8*>*
+ store <4 x i8*> <i8* getelementptr inbounds ([6 x i8], [6 x i8]* @.str.134, i32 0, i32 0), i8* getelementptr inbounds ([5 x i8], [5 x i8]* @.str.135, i32 0, i32 0), i8* getelementptr inbounds ([6 x i8], [6 x i8]* @.str.136, i32 0, i32 0), i8* getelementptr inbounds ([6 x i8], [6 x i8]* @.str.136, i32 0, i32 0)>, <4 x i8*>* %38
+ %arrayidx.i358 = getelementptr inbounds %struct.A, %struct.A* %obj, i32 0, i32 0, i32 173
+ %39 = bitcast i8** %arrayidx.i358 to <4 x i8*>*
+ store <4 x i8*> <i8* getelementptr inbounds ([6 x i8], [6 x i8]* @.str.136, i32 0, i32 0), i8* getelementptr inbounds ([7 x i8], [7 x i8]* @.str.137, i32 0, i32 0), i8* getelementptr inbounds ([6 x i8], [6 x i8]* @.str.138, i32 0, i32 0), i8* getelementptr inbounds ([7 x i8], [7 x i8]* @.str.139, i32 0, i32 0)>, <4 x i8*>* %39
+ %arrayidx.i354 = getelementptr inbounds %struct.A, %struct.A* %obj, i32 0, i32 0, i32 177
+ %40 = bitcast i8** %arrayidx.i354 to <4 x i8*>*
+ store <4 x i8*> <i8* getelementptr inbounds ([7 x i8], [7 x i8]* @.str.139, i32 0, i32 0), i8* getelementptr inbounds ([7 x i8], [7 x i8]* @.str.139, i32 0, i32 0), i8* getelementptr inbounds ([6 x i8], [6 x i8]* @.str.140, i32 0, i32 0), i8* getelementptr inbounds ([5 x i8], [5 x i8]* @.str.141, i32 0, i32 0)>, <4 x i8*>* %40
+ %arrayidx.i350 = getelementptr inbounds %struct.A, %struct.A* %obj, i32 0, i32 0, i32 181
+ %41 = bitcast i8** %arrayidx.i350 to <4 x i8*>*
+ store <4 x i8*> <i8* getelementptr inbounds ([6 x i8], [6 x i8]* @.str.142, i32 0, i32 0), i8* getelementptr inbounds ([6 x i8], [6 x i8]* @.str.142, i32 0, i32 0), i8* getelementptr inbounds ([6 x i8], [6 x i8]* @.str.142, i32 0, i32 0), i8* getelementptr inbounds ([11 x i8], [11 x i8]* @.str.143, i32 0, i32 0)>, <4 x i8*>* %41
+ %arrayidx.i346 = getelementptr inbounds %struct.A, %struct.A* %obj, i32 0, i32 0, i32 185
+ %42 = bitcast i8** %arrayidx.i346 to <4 x i8*>*
+ store <4 x i8*> <i8* getelementptr inbounds ([10 x i8], [10 x i8]* @.str.144, i32 0, i32 0), i8* getelementptr inbounds ([11 x i8], [11 x i8]* @.str.145, i32 0, i32 0), i8* getelementptr inbounds ([11 x i8], [11 x i8]* @.str.145, i32 0, i32 0), i8* getelementptr inbounds ([11 x i8], [11 x i8]* @.str.145, i32 0, i32 0)>, <4 x i8*>* %42
+ %arrayidx.i342 = getelementptr inbounds %struct.A, %struct.A* %obj, i32 0, i32 0, i32 189
+ %43 = bitcast i8** %arrayidx.i342 to <4 x i8*>*
+ store <4 x i8*> <i8* getelementptr inbounds ([7 x i8], [7 x i8]* @.str.146, i32 0, i32 0), i8* getelementptr inbounds ([6 x i8], [6 x i8]* @.str.147, i32 0, i32 0), i8* getelementptr inbounds ([7 x i8], [7 x i8]* @.str.148, i32 0, i32 0), i8* getelementptr inbounds ([7 x i8], [7 x i8]* @.str.148, i32 0, i32 0)>, <4 x i8*>* %43
+ %arrayidx.i338 = getelementptr inbounds %struct.A, %struct.A* %obj, i32 0, i32 0, i32 193
+ %44 = bitcast i8** %arrayidx.i338 to <4 x i8*>*
+ store <4 x i8*> <i8* getelementptr inbounds ([7 x i8], [7 x i8]* @.str.148, i32 0, i32 0), i8* getelementptr inbounds ([7 x i8], [7 x i8]* @.str.149, i32 0, i32 0), i8* getelementptr inbounds ([6 x i8], [6 x i8]* @.str.150, i32 0, i32 0), i8* getelementptr inbounds ([7 x i8], [7 x i8]* @.str.151, i32 0, i32 0)>, <4 x i8*>* %44
+ %arrayidx.i334 = getelementptr inbounds %struct.A, %struct.A* %obj, i32 0, i32 0, i32 197
+ %45 = bitcast i8** %arrayidx.i334 to <4 x i8*>*
+ store <4 x i8*> <i8* getelementptr inbounds ([7 x i8], [7 x i8]* @.str.151, i32 0, i32 0), i8* getelementptr inbounds ([7 x i8], [7 x i8]* @.str.151, i32 0, i32 0), i8* getelementptr inbounds ([10 x i8], [10 x i8]* @.str.152, i32 0, i32 0), i8* getelementptr inbounds ([9 x i8], [9 x i8]* @.str.153, i32 0, i32 0)>, <4 x i8*>* %45
+ ret void
+ }
+
+ ; Function Attrs: argmemonly nounwind
+ declare void @llvm.memset.p0i8.i64(i8* nocapture writeonly, i8, i64, i1) #1
+
+ ; Function Attrs: nounwind
+ declare void @llvm.stackprotector(i8*, i8**) #2
+
+ attributes #0 = { nounwind "target-cpu"="cortex-a9" "target-features"="+dsp,+fp16,+neon,+strict-align,+thumb-mode,+vfp3" }
+ attributes #1 = { argmemonly nounwind }
+ attributes #2 = { nounwind }
+
+...
+---
+name: func
+alignment: 1
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+failedISel: false
+tracksRegLiveness: true
+registers:
+liveins:
+ - { reg: '$r0', virtual-reg: '' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 56
+ offsetAdjustment: 0
+ maxAlignment: 8
+ adjustsStack: false
+ hasCalls: false
+ stackProtector: ''
+ maxCallFrameSize: 0
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+ localFrameSize: 0
+ savePoint: ''
+ restorePoint: ''
+fixedStack:
+stack:
+ - { id: 0, name: '', type: spill-slot, offset: -4, size: 4, alignment: 4,
+ stack-id: 0, callee-saved-register: '$lr', callee-saved-restored: false,
+ debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
+ - { id: 1, name: '', type: spill-slot, offset: -8, size: 4, alignment: 4,
+ stack-id: 0, callee-saved-register: '$r11', callee-saved-restored: true,
+ debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
+ - { id: 2, name: '', type: spill-slot, offset: -12, size: 4, alignment: 4,
+ stack-id: 0, callee-saved-register: '$r7', callee-saved-restored: true,
+ debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
+ - { id: 3, name: '', type: spill-slot, offset: -16, size: 4, alignment: 4,
+ stack-id: 0, callee-saved-register: '$r6', callee-saved-restored: true,
+ debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
+ - { id: 4, name: '', type: spill-slot, offset: -20, size: 4, alignment: 4,
+ stack-id: 0, callee-saved-register: '$r5', callee-saved-restored: true,
+ debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
+ - { id: 5, name: '', type: spill-slot, offset: -24, size: 4, alignment: 4,
+ stack-id: 0, callee-saved-register: '$r4', callee-saved-restored: true,
+ debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
+ - { id: 6, name: '', type: spill-slot, offset: -32, size: 8, alignment: 8,
+ stack-id: 0, callee-saved-register: '$d11', callee-saved-restored: true,
+ debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
+ - { id: 7, name: '', type: spill-slot, offset: -40, size: 8, alignment: 8,
+ stack-id: 0, callee-saved-register: '$d10', callee-saved-restored: true,
+ debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
+ - { id: 8, name: '', type: spill-slot, offset: -48, size: 8, alignment: 8,
+ stack-id: 0, callee-saved-register: '$d9', callee-saved-restored: true,
+ debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
+ - { id: 9, name: '', type: spill-slot, offset: -56, size: 8, alignment: 8,
+ stack-id: 0, callee-saved-register: '$d8', callee-saved-restored: true,
+ debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
+constants:
+ - id: 0
+ value: 'float 0.000000e+00'
+ alignment: 4
+ isTargetSpecific: false
+body: |
+ bb.0.entry:
+ liveins: $r0, $r4, $r5, $r6, $r7, $r11, $lr, $d8, $d9, $d10, $d11
+
+ $sp = frame-setup t2STMDB_UPD $sp, 14, $noreg, killed $r4, killed $r5, killed $r6, killed $r7, killed $r11, killed $lr
+ frame-setup CFI_INSTRUCTION def_cfa_offset 24
+ frame-setup CFI_INSTRUCTION offset $lr, -4
+ frame-setup CFI_INSTRUCTION offset $r11, -8
+ frame-setup CFI_INSTRUCTION offset $r7, -12
+ frame-setup CFI_INSTRUCTION offset $r6, -16
+ frame-setup CFI_INSTRUCTION offset $r5, -20
+ frame-setup CFI_INSTRUCTION offset $r4, -24
+ $sp = frame-setup VSTMDDB_UPD $sp, 14, $noreg, killed $d8, killed $d9, killed $d10, killed $d11
+ frame-setup CFI_INSTRUCTION def_cfa_offset 56
+ frame-setup CFI_INSTRUCTION offset $d11, -32
+ frame-setup CFI_INSTRUCTION offset $d10, -40
+ frame-setup CFI_INSTRUCTION offset $d9, -48
+ frame-setup CFI_INSTRUCTION offset $d8, -56
+ $r1 = t2MOVi16 target-flags(arm-lo16) @.str.18, 14, $noreg
+ $r1 = t2MOVTi16 $r1, target-flags(arm-hi16) @.str.18, 14, $noreg
+ renamable $r4, dead $cpsr = tMOVi8 100, 14, $noreg
+ renamable $r6 = t2ADDri renamable $r0, 520, 14, $noreg, $noreg
+ $d2 = VSETLNi32 undef $d2, killed $r1, 1, 14, $noreg, implicit-def $q1, implicit-def $s5
+ $r1 = t2MOVi16 target-flags(arm-lo16) @.str.71, 14, $noreg
+ $r1 = t2MOVTi16 $r1, target-flags(arm-hi16) @.str.71, 14, $noreg
+ renamable $d25 = VSETLNi32 undef renamable $d25, killed renamable $r1, 0, 14, $noreg, implicit-def $q12
+ $r1 = t2MOVi16 target-flags(arm-lo16) @.str.73, 14, $noreg
+ $r1 = t2MOVTi16 $r1, target-flags(arm-hi16) @.str.73, 14, $noreg
+ renamable $d26 = VSETLNi32 undef renamable $d26, killed renamable $r1, 0, 14, $noreg, implicit-def $q13
+ $r1 = t2MOVi16 target-flags(arm-lo16) @.str.75, 14, $noreg
+ $r1 = t2MOVTi16 $r1, target-flags(arm-hi16) @.str.75, 14, $noreg
+ renamable $d27 = VSETLNi32 undef renamable $d27, killed renamable $r1, 0, 14, $noreg, implicit killed $q13, implicit-def $q13
+ $r1 = t2MOVi16 target-flags(arm-lo16) @.str.19, 14, $noreg
+ $r1 = t2MOVTi16 $r1, target-flags(arm-hi16) @.str.19, 14, $noreg
+ renamable $s4 = VLDRS %const.0, 0, 14, $noreg, implicit killed $q1, implicit-def $q1 :: (load 4 from constant-pool)
+ renamable $d3 = VSETLNi32 undef renamable $d3, killed renamable $r1, 0, 14, $noreg, implicit killed $q1, implicit-def $q1
+ $r1 = t2MOVi16 target-flags(arm-lo16) @.str.61, 14, $noreg
+ $r1 = t2MOVTi16 $r1, target-flags(arm-hi16) @.str.61, 14, $noreg
+ renamable $d20 = VSETLNi32 undef renamable $d20, killed renamable $r1, 0, 14, $noreg, implicit-def $q10
+ $r1 = t2MOVi16 target-flags(arm-lo16) @.str.63, 14, $noreg
+ $r1 = t2MOVTi16 $r1, target-flags(arm-hi16) @.str.63, 14, $noreg
+ renamable $d21 = VSETLNi32 undef renamable $d21, killed renamable $r1, 0, 14, $noreg, implicit killed $q10, implicit-def $q10
+ $r1 = t2MOVi16 target-flags(arm-lo16) @.str.122, 14, $noreg
+ $r1 = t2MOVTi16 $r1, target-flags(arm-hi16) @.str.122, 14, $noreg
+ $r12 = t2MOVi16 target-flags(arm-lo16) @.str.112, 14, $noreg
+ $r12 = t2MOVTi16 $r12, target-flags(arm-hi16) @.str.112, 14, $noreg
+ renamable $d16 = VSETLNi32 undef renamable $d16, killed renamable $r1, 0, 14, $noreg, implicit-def $q8
+ $r1 = t2MOVi16 target-flags(arm-lo16) @.str.114, 14, $noreg
+ $r1 = t2MOVTi16 $r1, target-flags(arm-hi16) @.str.114, 14, $noreg
+ renamable $d18 = VSETLNi32 undef renamable $d18, renamable $r12, 0, 14, $noreg, implicit-def $q9
+ renamable $d19 = VSETLNi32 undef renamable $d19, killed renamable $r1, 0, 14, $noreg, implicit killed $q9, implicit-def $q9
+ $r1 = t2MOVi16 target-flags(arm-lo16) @.str.57, 14, $noreg
+ $r1 = t2MOVTi16 $r1, target-flags(arm-hi16) @.str.57, 14, $noreg
+ renamable $d28 = VSETLNi32 undef renamable $d28, killed renamable $r1, 0, 14, $noreg, implicit-def $q14
+ $r1 = t2MOVi16 target-flags(arm-lo16) @.str.53, 14, $noreg
+ $r1 = t2MOVTi16 $r1, target-flags(arm-hi16) @.str.53, 14, $noreg
+ renamable $d22 = VSETLNi32 undef renamable $d22, killed renamable $r1, 0, 14, $noreg, implicit-def $q11
+ $r1 = t2MOVi16 target-flags(arm-lo16) @.str.49, 14, $noreg
+ $r1 = t2MOVTi16 $r1, target-flags(arm-hi16) @.str.49, 14, $noreg
+ renamable $d30 = VSETLNi32 undef renamable $d30, killed renamable $r1, 0, 14, $noreg, implicit-def $q15
+ $r1 = t2MOVi16 target-flags(arm-lo16) @.str.45, 14, $noreg
+ $r1 = t2MOVTi16 $r1, target-flags(arm-hi16) @.str.45, 14, $noreg
+ renamable $d0 = VSETLNi32 undef renamable $d0, killed renamable $r1, 0, 14, $noreg, implicit-def $q0
+ $r1 = t2MOVi16 target-flags(arm-lo16) @.str.37, 14, $noreg
+ $r1 = t2MOVTi16 $r1, target-flags(arm-hi16) @.str.37, 14, $noreg
+ renamable $d8 = VSETLNi32 undef renamable $d8, killed renamable $r1, 0, 14, $noreg, implicit-def $q4
+ $r1 = t2MOVi16 target-flags(arm-lo16) @.str.25, 14, $noreg
+ $r1 = t2MOVTi16 $r1, target-flags(arm-hi16) @.str.25, 14, $noreg
+ renamable $d4 = VSETLNi32 undef renamable $d4, killed renamable $r1, 0, 14, $noreg, implicit-def $q2
+ $r1 = t2MOVi16 target-flags(arm-lo16) @.str.21, 14, $noreg
+ $r1 = t2MOVTi16 $r1, target-flags(arm-hi16) @.str.21, 14, $noreg
+ renamable $d6 = VSETLNi32 undef renamable $d6, killed renamable $r1, 0, 14, $noreg, implicit-def $q3
+ $r1 = t2MOVi16 target-flags(arm-lo16) @.str.27, 14, $noreg
+ $r1 = t2MOVTi16 $r1, target-flags(arm-hi16) @.str.27, 14, $noreg
+ renamable $d5 = VSETLNi32 undef renamable $d5, killed renamable $r1, 0, 14, $noreg, implicit killed $q2, implicit-def $q2
+ $r1 = t2MOVi16 target-flags(arm-lo16) @.str.23, 14, $noreg
+ $r1 = t2MOVTi16 $r1, target-flags(arm-hi16) @.str.23, 14, $noreg
+ renamable $d7 = VSETLNi32 undef renamable $d7, killed renamable $r1, 0, 14, $noreg, implicit killed $q3, implicit-def $q3
+ $r1 = t2MOVi16 target-flags(arm-lo16) @.str.28, 14, $noreg
+ $r1 = t2MOVTi16 $r1, target-flags(arm-hi16) @.str.28, 14, $noreg
+ renamable $d5 = VSETLNi32 killed renamable $d5, killed renamable $r1, 1, 14, $noreg, implicit $q2, implicit-def $q2
+ $r1 = t2MOVi16 target-flags(arm-lo16) @.str.24, 14, $noreg
+ $r1 = t2MOVTi16 $r1, target-flags(arm-hi16) @.str.24, 14, $noreg
+ renamable $d7 = VSETLNi32 killed renamable $d7, killed renamable $r1, 1, 14, $noreg, implicit $q3, implicit-def $q3
+ $r1 = t2MOVi16 target-flags(arm-lo16) @.str.22, 14, $noreg
+ $r1 = t2MOVTi16 $r1, target-flags(arm-hi16) @.str.22, 14, $noreg
+ renamable $d6 = VSETLNi32 killed renamable $d6, killed renamable $r1, 1, 14, $noreg, implicit $q3, implicit-def $q3
+ $r1 = t2MOVi16 target-flags(arm-lo16) @.str.26, 14, $noreg
+ $r1 = t2MOVTi16 $r1, target-flags(arm-hi16) @.str.26, 14, $noreg
+ renamable $d4 = VSETLNi32 killed renamable $d4, killed renamable $r1, 1, 14, $noreg, implicit $q2, implicit-def $q2
+ $r1 = t2MOVi16 target-flags(arm-lo16) @.str.29, 14, $noreg
+ $r1 = t2MOVTi16 $r1, target-flags(arm-hi16) @.str.29, 14, $noreg
+ renamable $d10 = VSETLNi32 undef renamable $d10, killed renamable $r1, 0, 14, $noreg, implicit-def $q5
+ renamable $r1 = t2ADDri renamable $r0, 16, 14, $noreg, $noreg
+ VST1q64 killed $r1, 0, killed $q3, 14, $noreg :: (store 16 into %ir.1, align 8)
+ $r1 = t2MOVi16 target-flags(arm-lo16) @.str.39, 14, $noreg
+ $r1 = t2MOVTi16 $r1, target-flags(arm-hi16) @.str.39, 14, $noreg
+ renamable $d9 = VSETLNi32 undef renamable $d9, killed renamable $r1, 0, 14, $noreg, implicit killed $q4, implicit-def $q4
+ $r1 = t2MOVi16 target-flags(arm-lo16) @.str.69, 14, $noreg
+ $r1 = t2MOVTi16 $r1, target-flags(arm-hi16) @.str.69, 14, $noreg
+ renamable $d24 = VSETLNi32 undef renamable $d24, killed renamable $r1, 0, 14, $noreg, implicit killed $q12, implicit-def $q12
+ renamable $r1 = t2ADDri renamable $r0, 32, 14, $noreg, $noreg
+ VST1q64 killed $r1, 0, killed $q2, 14, $noreg :: (store 16 into %ir.2, align 8)
+ $r1 = t2MOVi16 target-flags(arm-lo16) @.str.31, 14, $noreg
+ $r1 = t2MOVTi16 $r1, target-flags(arm-hi16) @.str.31, 14, $noreg
+ renamable $d11 = VSETLNi32 undef renamable $d11, killed renamable $r1, 0, 14, $noreg, implicit killed $q5, implicit-def $q5
+ $r1 = t2MOVi16 target-flags(arm-lo16) @.str.40, 14, $noreg
+ $r1 = t2MOVTi16 $r1, target-flags(arm-hi16) @.str.40, 14, $noreg
+ renamable $d9 = VSETLNi32 killed renamable $d9, killed renamable $r1, 1, 14, $noreg, implicit $q4, implicit-def $q4
+ $r1 = t2MOVi16 target-flags(arm-lo16) @.str.32, 14, $noreg
+ $r1 = t2MOVTi16 $r1, target-flags(arm-hi16) @.str.32, 14, $noreg
+ renamable $d11 = VSETLNi32 killed renamable $d11, killed renamable $r1, 1, 14, $noreg, implicit $q5, implicit-def $q5
+ $r1 = t2MOVi16 target-flags(arm-lo16) @.str.30, 14, $noreg
+ $r1 = t2MOVTi16 $r1, target-flags(arm-hi16) @.str.30, 14, $noreg
+ renamable $d10 = VSETLNi32 killed renamable $d10, killed renamable $r1, 1, 14, $noreg, implicit $q5, implicit-def $q5
+ $r1 = t2MOVi16 target-flags(arm-lo16) @.str.38, 14, $noreg
+ $r1 = t2MOVTi16 $r1, target-flags(arm-hi16) @.str.38, 14, $noreg
+ renamable $d8 = VSETLNi32 killed renamable $d8, killed renamable $r1, 1, 14, $noreg, implicit $q4, implicit-def $q4
+ $r1 = t2MOVi16 target-flags(arm-lo16) @.str.67, 14, $noreg
+ $r1 = t2MOVTi16 $r1, target-flags(arm-hi16) @.str.67, 14, $noreg
+ renamable $d5 = VSETLNi32 undef renamable $d5, killed renamable $r1, 0, 14, $noreg, implicit-def $q2
+ renamable $r1 = t2ADDri renamable $r0, 48, 14, $noreg, $noreg
+ VST1q64 killed $r1, 0, killed $q5, 14, $noreg :: (store 16 into %ir.3, align 8)
+ $r1 = t2MOVi16 target-flags(arm-lo16) @.str.51, 14, $noreg
+ $r1 = t2MOVTi16 $r1, target-flags(arm-hi16) @.str.51, 14, $noreg
+ renamable $d31 = VSETLNi32 undef renamable $d31, killed renamable $r1, 0, 14, $noreg, implicit killed $q15, implicit-def $q15
+ $r1 = t2MOVi16 target-flags(arm-lo16) @.str.43, 14, $noreg
+ $r1 = t2MOVTi16 $r1, target-flags(arm-hi16) @.str.43, 14, $noreg
+ renamable $d7 = VSETLNi32 undef renamable $d7, killed renamable $r1, 0, 14, $noreg, implicit-def $q3
+ renamable $r1 = t2ADDri renamable $r0, 80, 14, $noreg, $noreg
+ VST1q64 killed $r1, 0, killed $q4, 14, $noreg :: (store 16 into %ir.5, align 8)
+ $r1 = t2MOVi16 target-flags(arm-lo16) @.str.47, 14, $noreg
+ $r1 = t2MOVTi16 $r1, target-flags(arm-hi16) @.str.47, 14, $noreg
+ renamable $d1 = VSETLNi32 undef renamable $d1, killed renamable $r1, 0, 14, $noreg, implicit killed $q0, implicit-def $q0
+ $r1 = t2MOVi16 target-flags(arm-lo16) @.str.52, 14, $noreg
+ $r1 = t2MOVTi16 $r1, target-flags(arm-hi16) @.str.52, 14, $noreg
+ renamable $d31 = VSETLNi32 killed renamable $d31, killed renamable $r1, 1, 14, $noreg, implicit $q15, implicit-def $q15
+ $r1 = t2MOVi16 target-flags(arm-lo16) @.str.48, 14, $noreg
+ $r1 = t2MOVTi16 $r1, target-flags(arm-hi16) @.str.48, 14, $noreg
+ renamable $d1 = VSETLNi32 killed renamable $d1, killed renamable $r1, 1, 14, $noreg, implicit $q0, implicit-def $q0
+ $r1 = t2MOVi16 target-flags(arm-lo16) @.str.46, 14, $noreg
+ $r1 = t2MOVTi16 $r1, target-flags(arm-hi16) @.str.46, 14, $noreg
+ renamable $d0 = VSETLNi32 killed renamable $d0, killed renamable $r1, 1, 14, $noreg, implicit $q0, implicit-def $q0
+ $r1 = t2MOVi16 target-flags(arm-lo16) @.str.50, 14, $noreg
+ $r1 = t2MOVTi16 $r1, target-flags(arm-hi16) @.str.50, 14, $noreg
+ renamable $d30 = VSETLNi32 killed renamable $d30, killed renamable $r1, 1, 14, $noreg, implicit $q15, implicit-def $q15
+ $r1 = t2MOVi16 target-flags(arm-lo16) @.str.41, 14, $noreg
+ $r1 = t2MOVTi16 $r1, target-flags(arm-hi16) @.str.41, 14, $noreg
+ renamable $d6 = VSETLNi32 undef renamable $d6, killed renamable $r1, 0, 14, $noreg, implicit killed $q3, implicit-def $q3
+ renamable $r1 = t2ADDri renamable $r0, 112, 14, $noreg, $noreg
+ VST1q64 killed $r1, 0, killed $q0, 14, $noreg :: (store 16 into %ir.7, align 8)
+ $r1 = t2MOVi16 target-flags(arm-lo16) @.str.59, 14, $noreg
+ $r1 = t2MOVTi16 $r1, target-flags(arm-hi16) @.str.59, 14, $noreg
+ renamable $d29 = VSETLNi32 undef renamable $d29, killed renamable $r1, 0, 14, $noreg, implicit killed $q14, implicit-def $q14
+ $r1 = t2MOVi16 target-flags(arm-lo16) @.str.65, 14, $noreg
+ $r1 = t2MOVTi16 $r1, target-flags(arm-hi16) @.str.65, 14, $noreg
+ renamable $d4 = VSETLNi32 undef renamable $d4, killed renamable $r1, 0, 14, $noreg, implicit killed $q2, implicit-def $q2
+ renamable $r1 = t2ADDri renamable $r0, 128, 14, $noreg, $noreg
+ VST1q64 killed $r1, 0, killed $q15, 14, $noreg :: (store 16 into %ir.8, align 8)
+ $r1 = t2MOVi16 target-flags(arm-lo16) @.str.55, 14, $noreg
+ $r1 = t2MOVTi16 $r1, target-flags(arm-hi16) @.str.55, 14, $noreg
+ renamable $d23 = VSETLNi32 undef renamable $d23, killed renamable $r1, 0, 14, $noreg, implicit killed $q11, implicit-def $q11
+ $r1 = t2MOVi16 target-flags(arm-lo16) @.str.60, 14, $noreg
+ $r1 = t2MOVTi16 $r1, target-flags(arm-hi16) @.str.60, 14, $noreg
+ renamable $d29 = VSETLNi32 killed renamable $d29, killed renamable $r1, 1, 14, $noreg, implicit $q14, implicit-def $q14
+ $r1 = t2MOVi16 target-flags(arm-lo16) @.str.56, 14, $noreg
+ $r1 = t2MOVTi16 $r1, target-flags(arm-hi16) @.str.56, 14, $noreg
+ renamable $d23 = VSETLNi32 killed renamable $d23, killed renamable $r1, 1, 14, $noreg, implicit $q11, implicit-def $q11
+ $r1 = t2MOVi16 target-flags(arm-lo16) @.str.54, 14, $noreg
+ $r1 = t2MOVTi16 $r1, target-flags(arm-hi16) @.str.54, 14, $noreg
+ renamable $d22 = VSETLNi32 killed renamable $d22, killed renamable $r1, 1, 14, $noreg, implicit $q11, implicit-def $q11
+ $r1 = t2MOVi16 target-flags(arm-lo16) @.str.58, 14, $noreg
+ $r1 = t2MOVTi16 $r1, target-flags(arm-hi16) @.str.58, 14, $noreg
+ renamable $d28 = VSETLNi32 killed renamable $d28, killed renamable $r1, 1, 14, $noreg, implicit $q14, implicit-def $q14
+ $r1 = t2MOVi16 target-flags(arm-lo16) @.str.104, 14, $noreg
+ $r1 = t2MOVTi16 $r1, target-flags(arm-hi16) @.str.104, 14, $noreg
+ renamable $d31 = VSETLNi32 undef renamable $d31, killed renamable $r1, 0, 14, $noreg, implicit-def $q15
+ renamable $r1 = t2ADDri renamable $r0, 144, 14, $noreg, $noreg
+ VST1q64 killed $r1, 0, killed $q11, 14, $noreg :: (store 16 into %ir.9, align 8)
+ $r1 = t2MOVi16 target-flags(arm-lo16) @.str.126, 14, $noreg
+ $r1 = t2MOVTi16 $r1, target-flags(arm-hi16) @.str.126, 14, $noreg
+ renamable $d23 = VSETLNi32 undef renamable $d23, killed renamable $r1, 0, 14, $noreg, implicit-def $q11
+ $r1 = t2MOVi16 target-flags(arm-lo16) @.str.98, 14, $noreg
+ $r1 = t2MOVTi16 $r1, target-flags(arm-hi16) @.str.98, 14, $noreg
+ renamable $d0 = VSETLNi32 undef renamable $d0, killed renamable $r1, 0, 14, $noreg, implicit-def $q0
+ renamable $r1 = t2ADDri renamable $r0, 200, 14, $noreg, $noreg
+ VST1q64 killed $r1, 0, killed $q14, 14, $noreg :: (store 16 into %ir.11, align 8)
+ $lr = t2MOVi16 target-flags(arm-lo16) @.str.124, 14, $noreg
+ $lr = t2MOVTi16 $lr, target-flags(arm-hi16) @.str.124, 14, $noreg
+ $r2 = t2MOVi16 target-flags(arm-lo16) @.str.127, 14, $noreg
+ $r2 = t2MOVTi16 $r2, target-flags(arm-hi16) @.str.127, 14, $noreg
+ $r1 = t2MOVi16 target-flags(arm-lo16) @.str.125, 14, $noreg
+ $r1 = t2MOVTi16 $r1, target-flags(arm-hi16) @.str.125, 14, $noreg
+ $r7 = t2MOVi16 target-flags(arm-lo16) @.str.115, 14, $noreg
+ $r7 = t2MOVTi16 $r7, target-flags(arm-hi16) @.str.115, 14, $noreg
+ $r3 = t2MOVi16 target-flags(arm-lo16) @.str.113, 14, $noreg
+ $r3 = t2MOVTi16 $r3, target-flags(arm-hi16) @.str.113, 14, $noreg
+ renamable $d22 = VSETLNi32 undef renamable $d22, renamable $lr, 0, 14, $noreg, implicit killed $q11, implicit-def $q11
+ renamable $d19 = VSETLNi32 killed renamable $d19, renamable $r7, 1, 14, $noreg, implicit $q9, implicit-def $q9
+ renamable $d23 = VSETLNi32 killed renamable $d23, renamable $r2, 1, 14, $noreg, implicit $q11, implicit-def $q11
+ renamable $d18 = VSETLNi32 killed renamable $d18, killed renamable $r3, 1, 14, $noreg, implicit $q9, implicit-def $q9
+ $r3 = t2MOVi16 target-flags(arm-lo16) @.str.123, 14, $noreg
+ $r3 = t2MOVTi16 $r3, target-flags(arm-hi16) @.str.123, 14, $noreg
+ renamable $d16 = VSETLNi32 killed renamable $d16, killed renamable $r3, 1, 14, $noreg, implicit $q8, implicit-def $q8
+ $r3 = t2MOVi16 target-flags(arm-lo16) @.str.64, 14, $noreg
+ $r3 = t2MOVTi16 $r3, target-flags(arm-hi16) @.str.64, 14, $noreg
+ renamable $d21 = VSETLNi32 killed renamable $d21, killed renamable $r3, 1, 14, $noreg, implicit $q10, implicit-def $q10
+ $r3 = t2MOVi16 target-flags(arm-lo16) @.str.62, 14, $noreg
+ $r3 = t2MOVTi16 $r3, target-flags(arm-hi16) @.str.62, 14, $noreg
+ renamable $d22 = VSETLNi32 killed renamable $d22, killed renamable $r1, 1, 14, $noreg, implicit $q11, implicit-def $q11
+ renamable $r1 = t2ADDri renamable $r0, 456, 14, $noreg, $noreg
+ renamable $d20 = VSETLNi32 killed renamable $d20, killed renamable $r3, 1, 14, $noreg, implicit $q10, implicit-def $q10
+ $r3 = t2MOVi16 target-flags(arm-lo16) @.str.20, 14, $noreg
+ $r3 = t2MOVTi16 $r3, target-flags(arm-hi16) @.str.20, 14, $noreg
+ renamable $d3 = VSETLNi32 killed renamable $d3, killed renamable $r3, 1, 14, $noreg, implicit $q1, implicit-def $q1
+ $r3 = t2MOVi16 target-flags(arm-lo16) @.str.121, 14, $noreg
+ $r3 = t2MOVTi16 $r3, target-flags(arm-hi16) @.str.121, 14, $noreg
+ renamable $q14 = VDUP32q killed renamable $r3, 14, $noreg
+ renamable $r3 = t2ADDri renamable $r0, 216, 14, $noreg, $noreg
+ VST1q64 killed $r3, 0, killed $q10, 14, $noreg :: (store 16 into %ir.12, align 8)
+ $r3 = tMOVr $r0, 14, $noreg
+ renamable $r3 = VST1q32wb_register killed $r3, 0, killed $r4, killed $q1, 14, $noreg :: (store 16 into %ir.0, align 8)
+ $r4 = t2MOVi16 target-flags(arm-lo16) @.str.120, 14, $noreg
+ $r4 = t2MOVTi16 $r4, target-flags(arm-hi16) @.str.120, 14, $noreg
+ renamable $q10 = VMOVv4i32 0, 14, $noreg
+ renamable $d28 = VSETLNi32 killed renamable $d28, killed renamable $r4, 0, 14, $noreg, implicit $q14, implicit-def $q14
+ $r4 = t2MOVi16 target-flags(arm-lo16) @.str.76, 14, $noreg
+ $r4 = t2MOVTi16 $r4, target-flags(arm-hi16) @.str.76, 14, $noreg
+ renamable $d27 = VSETLNi32 killed renamable $d27, killed renamable $r4, 1, 14, $noreg, implicit $q13, implicit-def $q13
+ $r4 = t2MOVi16 target-flags(arm-lo16) @.str.74, 14, $noreg
+ $r4 = t2MOVTi16 $r4, target-flags(arm-hi16) @.str.74, 14, $noreg
+ renamable $d17 = VDUP32d killed renamable $lr, 14, $noreg, implicit killed $q8, implicit-def $q8
+ renamable $lr = t2ADDri renamable $r0, 788, 14, $noreg, $noreg
+ renamable $d26 = VSETLNi32 killed renamable $d26, killed renamable $r4, 1, 14, $noreg, implicit $q13, implicit-def $q13
+ $r4 = t2MOVi16 target-flags(arm-lo16) @.str.72, 14, $noreg
+ $r4 = t2MOVTi16 $r4, target-flags(arm-hi16) @.str.72, 14, $noreg
+ renamable $d25 = VSETLNi32 killed renamable $d25, killed renamable $r4, 1, 14, $noreg, implicit $q12, implicit-def $q12
+ $r4 = t2MOVi16 target-flags(arm-lo16) @.str.70, 14, $noreg
+ $r4 = t2MOVTi16 $r4, target-flags(arm-hi16) @.str.70, 14, $noreg
+ renamable $d24 = VSETLNi32 killed renamable $d24, killed renamable $r4, 1, 14, $noreg, implicit $q12, implicit-def $q12
+ $r4 = t2MOVi16 target-flags(arm-lo16) @.str.68, 14, $noreg
+ $r4 = t2MOVTi16 $r4, target-flags(arm-hi16) @.str.68, 14, $noreg
+ renamable $d5 = VSETLNi32 killed renamable $d5, killed renamable $r4, 1, 14, $noreg, implicit $q2, implicit-def $q2
+ $r4 = t2MOVi16 target-flags(arm-lo16) @.str.44, 14, $noreg
+ $r4 = t2MOVTi16 $r4, target-flags(arm-hi16) @.str.44, 14, $noreg
+ renamable $d7 = VSETLNi32 killed renamable $d7, killed renamable $r4, 1, 14, $noreg, implicit $q3, implicit-def $q3
+ $r4 = t2MOVi16 target-flags(arm-lo16) @.str.42, 14, $noreg
+ $r4 = t2MOVTi16 $r4, target-flags(arm-hi16) @.str.42, 14, $noreg
+ renamable $d6 = VSETLNi32 killed renamable $d6, killed renamable $r4, 1, 14, $noreg, implicit $q3, implicit-def $q3
+ $r4 = t2MOVi16 target-flags(arm-lo16) @.str.66, 14, $noreg
+ $r4 = t2MOVTi16 $r4, target-flags(arm-hi16) @.str.66, 14, $noreg
+ renamable $d4 = VSETLNi32 killed renamable $d4, killed renamable $r4, 1, 14, $noreg, implicit $q2, implicit-def $q2
+ $r4 = t2MOVi16 target-flags(arm-lo16) @.str.88, 14, $noreg
+ $r4 = t2MOVTi16 $r4, target-flags(arm-hi16) @.str.88, 14, $noreg
+ renamable $d8 = VSETLNi32 undef renamable $d8, killed renamable $r4, 0, 14, $noreg, implicit-def $q4
+ renamable $r4, dead $cpsr = tMOVi8 0, 14, $noreg
+ tSTRi renamable $r4, killed renamable $r3, 0, 14, $noreg :: (store 4 into %ir.4 + 36)
+ $r3 = tMOVr $r0, 14, $noreg
+ t2STRDi8 $r4, $r4, $r0, 192, 14, $noreg
+ early-clobber renamable $r3 = t2STR_PRE renamable $r4, killed renamable $r3, 96, 14, $noreg :: (store 4 into %ir.4 + 32)
+ VST1q64 killed $r3, 0, killed $q3, 14, $noreg :: (store 16 into %ir.6, align 8)
+ renamable $r3 = t2ADDri renamable $r0, 64, 14, $noreg, $noreg
+ $r5 = t2MOVi16 target-flags(arm-lo16) @.str.81, 14, $noreg
+ $r5 = t2MOVTi16 $r5, target-flags(arm-hi16) @.str.81, 14, $noreg
+ VST1q32 killed $r3, 0, $q10, 14, $noreg :: (store 16 into %ir.4, align 4)
+ renamable $r3 = t2ADDri renamable $r0, 176, 14, $noreg, $noreg
+ VST1q32 killed $r3, 0, $q10, 14, $noreg :: (store 16 into %ir.10 + 16, align 4)
+ renamable $r3 = t2ADDri renamable $r0, 160, 14, $noreg, $noreg
+ renamable $q1 = VDUP32q killed renamable $r5, 14, $noreg
+ renamable $r5 = t2ADDri renamable $r0, 248, 14, $noreg, $noreg
+ VST1q32 killed $r3, 0, $q10, 14, $noreg :: (store 16 into %ir.10, align 4)
+ renamable $r3 = t2ADDri renamable $r0, 232, 14, $noreg, $noreg
+ VST1q64 killed $r3, 0, killed $q2, 14, $noreg :: (store 16 into %ir.13, align 8)
+ $r3 = t2MOVi16 target-flags(arm-lo16) @.str.82, 14, $noreg
+ $r3 = t2MOVTi16 $r3, target-flags(arm-hi16) @.str.82, 14, $noreg
+ VST1q64 killed $r5, 0, killed $q12, 14, $noreg :: (store 16 into %ir.14, align 8)
+ renamable $d3 = VSETLNi32 killed renamable $d3, killed renamable $r3, 1, 14, $noreg, implicit $q1, implicit-def $q1
+ renamable $r3 = t2ADDri renamable $r0, 264, 14, $noreg, $noreg
+ VST1q64 killed $r3, 0, killed $q13, 14, $noreg :: (store 16 into %ir.15, align 8)
+ $r3 = t2MOVi16 target-flags(arm-lo16) @.str.91, 14, $noreg
+ $r3 = t2MOVTi16 $r3, target-flags(arm-hi16) @.str.91, 14, $noreg
+ $r5 = t2MOVi16 target-flags(arm-lo16) @.str.90, 14, $noreg
+ $r5 = t2MOVTi16 $r5, target-flags(arm-hi16) @.str.90, 14, $noreg
+ renamable $d9 = VSETLNi32 undef renamable $d9, killed renamable $r5, 0, 14, $noreg, implicit killed $q4, implicit-def $q4
+ $r5 = t2MOVi16 target-flags(arm-lo16) @.str.118, 14, $noreg
+ $r5 = t2MOVTi16 $r5, target-flags(arm-hi16) @.str.118, 14, $noreg
+ renamable $q12 = VDUP32q killed renamable $r5, 14, $noreg
+ $r5 = t2MOVi16 target-flags(arm-lo16) @.str.84, 14, $noreg
+ $r5 = t2MOVTi16 $r5, target-flags(arm-hi16) @.str.84, 14, $noreg
+ renamable $q13 = VDUP32q killed renamable $r5, 14, $noreg
+ $r5 = t2MOVi16 target-flags(arm-lo16) @.str.92, 14, $noreg
+ $r5 = t2MOVTi16 $r5, target-flags(arm-hi16) @.str.92, 14, $noreg
+ t2STRDi8 killed $r4, $r4, $r0, 312, 14, $noreg
+ renamable $r4 = t2ADDri renamable $r0, 296, 14, $noreg, $noreg
+ renamable $d5 = VSETLNi32 undef renamable $d5, killed renamable $r5, 0, 14, $noreg, implicit-def $q2
+ renamable $r5 = t2ADDri renamable $r0, 280, 14, $noreg, $noreg
+ VST1q32 $r4, 0, $q10, 14, $noreg :: (store 16 into %ir.16 + 16, align 4)
+ VST1q32 killed $r5, 0, $q10, 14, $noreg :: (store 16 into %ir.16, align 4)
+ VST1q64 killed $r4, 0, killed $q1, 14, $noreg :: (store 16 into %ir.17, align 8)
+ $r5 = t2MOVi16 target-flags(arm-lo16) @.str.83, 14, $noreg
+ $r5 = t2MOVTi16 $r5, target-flags(arm-hi16) @.str.83, 14, $noreg
+ renamable $d9 = VSETLNi32 killed renamable $d9, renamable $r3, 1, 14, $noreg, implicit $q4, implicit-def $q4
+ renamable $r4 = t2ADDri renamable $r0, 312, 14, $noreg, $noreg
+ renamable $d26 = VSETLNi32 killed renamable $d26, killed renamable $r5, 0, 14, $noreg, implicit $q13, implicit-def $q13
+ $r5 = t2MOVi16 target-flags(arm-lo16) @.str.89, 14, $noreg
+ $r5 = t2MOVTi16 $r5, target-flags(arm-hi16) @.str.89, 14, $noreg
+ renamable $d8 = VSETLNi32 killed renamable $d8, killed renamable $r5, 1, 14, $noreg, implicit $q4, implicit-def $q4
+ $r5 = t2MOVi16 target-flags(arm-lo16) @.str.106, 14, $noreg
+ $r5 = t2MOVTi16 $r5, target-flags(arm-hi16) @.str.106, 14, $noreg
+ VST1q64 killed $r4, 0, killed $q13, 14, $noreg :: (store 16 into %ir.18, align 8)
+ $r4 = t2MOVi16 target-flags(arm-lo16) @.str.119, 14, $noreg
+ $r4 = t2MOVTi16 $r4, target-flags(arm-hi16) @.str.119, 14, $noreg
+ renamable $q1 = VDUP32q killed renamable $r5, 14, $noreg
+ renamable $r5 = t2ADDri renamable $r0, 436, 14, $noreg, $noreg
+ renamable $d25 = VSETLNi32 killed renamable $d25, killed renamable $r4, 1, 14, $noreg, implicit $q12, implicit-def $q12
+ $r4 = t2MOVi16 target-flags(arm-lo16) @.str.116, 14, $noreg
+ $r4 = t2MOVTi16 $r4, target-flags(arm-hi16) @.str.116, 14, $noreg
+ renamable $d27 = VSETLNi32 undef renamable $d27, killed renamable $r4, 0, 14, $noreg, implicit-def $q13
+ renamable $r4 = t2ADDri renamable $r0, 344, 14, $noreg, $noreg
+ VST1q64 killed $r4, 0, killed $q4, 14, $noreg :: (store 16 into %ir.19, align 8)
+ $r4 = t2MOVi16 target-flags(arm-lo16) @.str.107, 14, $noreg
+ $r4 = t2MOVTi16 $r4, target-flags(arm-hi16) @.str.107, 14, $noreg
+ renamable $d3 = VSETLNi32 killed renamable $d3, killed renamable $r4, 1, 14, $noreg, implicit $q1, implicit-def $q1
+ $r4 = t2MOVi16 target-flags(arm-lo16) @.str.105, 14, $noreg
+ $r4 = t2MOVTi16 $r4, target-flags(arm-hi16) @.str.105, 14, $noreg
+ renamable $d31 = VSETLNi32 killed renamable $d31, killed renamable $r4, 1, 14, $noreg, implicit $q15, implicit-def $q15
+ $r4 = t2MOVi16 target-flags(arm-lo16) @.str.99, 14, $noreg
+ $r4 = t2MOVTi16 $r4, target-flags(arm-hi16) @.str.99, 14, $noreg
+ renamable $d0 = VSETLNi32 killed renamable $d0, killed renamable $r4, 1, 14, $noreg, implicit $q0, implicit-def $q0
+ $r4 = t2MOVi16 target-flags(arm-lo16) @.str.93, 14, $noreg
+ $r4 = t2MOVTi16 $r4, target-flags(arm-hi16) @.str.93, 14, $noreg
+ renamable $d5 = VSETLNi32 killed renamable $d5, killed renamable $r4, 1, 14, $noreg, implicit $q2, implicit-def $q2
+ $r4 = t2MOVi16 target-flags(arm-lo16) @.str.117, 14, $noreg
+ $r4 = t2MOVTi16 $r4, target-flags(arm-hi16) @.str.117, 14, $noreg
+ renamable $d27 = VSETLNi32 killed renamable $d27, killed renamable $r4, 1, 14, $noreg, implicit $q13, implicit-def $q13
+ renamable $r4 = t2ADDri renamable $r0, 392, 14, $noreg, $noreg
+ renamable $d4 = VDUP32d killed renamable $r3, 14, $noreg, implicit killed $q2, implicit-def $q2
+ renamable $r3 = t2ADDri renamable $r0, 360, 14, $noreg, $noreg
+ renamable $d26 = VDUP32d killed renamable $r7, 14, $noreg, implicit killed $q13, implicit-def $q13
+ renamable $r7 = t2ADDri renamable $r0, 504, 14, $noreg, $noreg
+ VST1q64 killed $r3, 0, killed $q2, 14, $noreg :: (store 16 into %ir.20, align 8)
+ $r3 = t2MOVi16 target-flags(arm-lo16) @.str.97, 14, $noreg
+ $r3 = t2MOVTi16 $r3, target-flags(arm-hi16) @.str.97, 14, $noreg
+ renamable $q2 = VDUP32q killed renamable $r3, 14, $noreg
+ $r3 = t2MOVi16 target-flags(arm-lo16) @.str.96, 14, $noreg
+ $r3 = t2MOVTi16 $r3, target-flags(arm-hi16) @.str.96, 14, $noreg
+ renamable $d4 = VSETLNi32 killed renamable $d4, killed renamable $r3, 0, 14, $noreg, implicit $q2, implicit-def $q2
+ renamable $r3 = t2ADDri renamable $r0, 388, 14, $noreg, $noreg
+ VST1q32 killed $r3, 0, $q10, 14, $noreg :: (store 16 into %ir.21 + 12, align 4)
+ renamable $r3 = t2ADDri renamable $r0, 376, 14, $noreg, $noreg
+ VST1q32 killed $r3, 0, $q10, 14, $noreg :: (store 16 into %ir.21, align 4)
+ renamable $r3 = t2ADDri renamable $r0, 584, 14, $noreg, $noreg
+ VST1q64 killed $r4, 0, killed $q2, 14, $noreg :: (store 16 into %ir.22, align 8)
+ $r4 = t2MOVi16 target-flags(arm-lo16) @.str.100, 14, $noreg
+ $r4 = t2MOVTi16 $r4, target-flags(arm-hi16) @.str.100, 14, $noreg
+ renamable $d1 = VDUP32d killed renamable $r4, 14, $noreg, implicit killed $q0, implicit-def $q0
+ renamable $r4 = t2ADDri renamable $r0, 408, 14, $noreg, $noreg
+ VST1q64 killed $r4, 0, killed $q0, 14, $noreg :: (store 16 into %ir.23, align 8)
+ renamable $r4 = t2ADDri renamable $r0, 440, 14, $noreg, $noreg
+ VST1q32 killed $r5, 0, $q10, 14, $noreg :: (store 16 into %ir.24 + 12, align 4)
+ $r5 = t2MOVi16 target-flags(arm-lo16) @.str.103, 14, $noreg
+ $r5 = t2MOVTi16 $r5, target-flags(arm-hi16) @.str.103, 14, $noreg
+ renamable $d30 = VDUP32d killed renamable $r5, 14, $noreg, implicit killed $q15, implicit-def $q15
+ renamable $r5 = t2ADDri renamable $r0, 424, 14, $noreg, $noreg
+ VST1q32 killed $r5, 0, $q10, 14, $noreg :: (store 16 into %ir.24, align 4)
+ VST1q64 killed $r4, 0, killed $q15, 14, $noreg :: (store 16 into %ir.25, align 8)
+ $r4 = t2MOVi16 target-flags(arm-lo16) @.str.111, 14, $noreg
+ $r4 = t2MOVTi16 $r4, target-flags(arm-hi16) @.str.111, 14, $noreg
+ $r5 = t2MOVi16 target-flags(arm-lo16) @.str.110, 14, $noreg
+ $r5 = t2MOVTi16 $r5, target-flags(arm-hi16) @.str.110, 14, $noreg
+ renamable $d30 = VSETLNi32 undef renamable $d30, killed renamable $r5, 0, 14, $noreg, implicit-def $q15
+ $r5 = t2MOVi16 target-flags(arm-lo16) @.str.109, 14, $noreg
+ $r5 = t2MOVTi16 $r5, target-flags(arm-hi16) @.str.109, 14, $noreg
+ renamable $q0 = VDUP32q killed renamable $r5, 14, $noreg
+ renamable $r5 = t2ADDri renamable $r0, 488, 14, $noreg, $noreg
+ renamable $d30 = VSETLNi32 killed renamable $d30, killed renamable $r4, 1, 14, $noreg, implicit $q15, implicit-def $q15
+ $r4 = t2MOVi16 target-flags(arm-lo16) @.str.108, 14, $noreg
+ $r4 = t2MOVTi16 $r4, target-flags(arm-hi16) @.str.108, 14, $noreg
+ renamable $d0 = VSETLNi32 killed renamable $d0, killed renamable $r4, 0, 14, $noreg, implicit $q0, implicit-def $q0
+ renamable $r4 = t2ADDri renamable $r0, 568, 14, $noreg, $noreg
+ renamable $d31 = VDUP32d killed renamable $r12, 14, $noreg, implicit killed $q15, implicit-def $q15
+ VST1q64 killed $r1, 0, killed $q1, 14, $noreg :: (store 16 into %ir.26, align 8)
+ renamable $r1 = t2ADDri renamable $r0, 472, 14, $noreg, $noreg
+ VST1q64 killed $r1, 0, killed $q0, 14, $noreg :: (store 16 into %ir.27, align 8)
+ renamable $r1 = t2ADDri renamable $r0, 552, 14, $noreg, $noreg
+ VST1q64 killed $r5, 0, killed $q15, 14, $noreg :: (store 16 into %ir.28, align 8)
+ renamable $r5 = t2ADDri renamable $r0, 536, 14, $noreg, $noreg
+ VST1q64 killed $r7, 0, killed $q9, 14, $noreg :: (store 16 into %ir.29, align 8)
+ renamable $r7 = t2ADDri renamable $r0, 660, 14, $noreg, $noreg
+ VST1q64 killed $r6, 0, killed $q13, 14, $noreg :: (store 16 into %ir.30, align 8)
+ VST1q64 killed $r5, 0, killed $q12, 14, $noreg :: (store 16 into %ir.31, align 8)
+ VST1q64 killed $r1, 0, killed $q14, 14, $noreg :: (store 16 into %ir.32, align 8)
+ renamable $r1 = t2ADDri renamable $r0, 608, 14, $noreg, $noreg
+ VST1q64 killed $r4, 0, killed $q8, 14, $noreg :: (store 16 into %ir.33, align 8)
+ VST1q64 killed $r3, 0, killed $q11, 14, $noreg :: (store 16 into %ir.34, align 8)
+ t2STRDi8 killed $r2, $r2, $r0, 600, 14, $noreg
+ VST1q32 killed $r1, 0, $q10, 14, $noreg :: (store 16 into %ir.35, align 4)
+ $r12 = t2MOVi16 target-flags(arm-lo16) @.str.139, 14, $noreg
+ $r12 = t2MOVTi16 $r12, target-flags(arm-hi16) @.str.139, 14, $noreg
+ $r2 = t2MOVi16 target-flags(arm-lo16) @.str.151, 14, $noreg
+ $r2 = t2MOVTi16 $r2, target-flags(arm-hi16) @.str.151, 14, $noreg
+ $r3 = t2MOVi16 target-flags(arm-lo16) @.str.134, 14, $noreg
+ $r3 = t2MOVTi16 $r3, target-flags(arm-hi16) @.str.134, 14, $noreg
+ renamable $r1 = t2ADDri renamable $r0, 620, 14, $noreg, $noreg
+ renamable $d24 = VSETLNi32 undef renamable $d24, killed renamable $r3, 0, 14, $noreg, implicit-def $q12
+ $r3 = t2MOVi16 target-flags(arm-lo16) @.str.140, 14, $noreg
+ $r3 = t2MOVTi16 $r3, target-flags(arm-hi16) @.str.140, 14, $noreg
+ renamable $d23 = VSETLNi32 undef renamable $d23, killed renamable $r3, 0, 14, $noreg, implicit-def $q11
+ $r3 = t2MOVi16 target-flags(arm-lo16) @.str.146, 14, $noreg
+ $r3 = t2MOVTi16 $r3, target-flags(arm-hi16) @.str.146, 14, $noreg
+ renamable $d18 = VSETLNi32 undef renamable $d18, killed renamable $r3, 0, 14, $noreg, implicit-def $q9
+ $r3 = t2MOVi16 target-flags(arm-lo16) @.str.152, 14, $noreg
+ $r3 = t2MOVTi16 $r3, target-flags(arm-hi16) @.str.152, 14, $noreg
+ renamable $d17 = VSETLNi32 undef renamable $d17, killed renamable $r3, 0, 14, $noreg, implicit-def $q8
+ $r3 = t2MOVi16 target-flags(arm-lo16) @.str.136, 14, $noreg
+ $r3 = t2MOVTi16 $r3, target-flags(arm-hi16) @.str.136, 14, $noreg
+ $r4 = t2MOVi16 target-flags(arm-lo16) @.str.138, 14, $noreg
+ $r4 = t2MOVTi16 $r4, target-flags(arm-hi16) @.str.138, 14, $noreg
+ renamable $d28 = VSETLNi32 undef renamable $d28, renamable $r3, 0, 14, $noreg, implicit-def $q14
+ renamable $d29 = VSETLNi32 undef renamable $d29, killed renamable $r4, 0, 14, $noreg, implicit killed $q14, implicit-def $q14
+ $r4 = t2MOVi16 target-flags(arm-lo16) @.str.148, 14, $noreg
+ $r4 = t2MOVTi16 $r4, target-flags(arm-hi16) @.str.148, 14, $noreg
+ $r5 = t2MOVi16 target-flags(arm-lo16) @.str.150, 14, $noreg
+ $r5 = t2MOVTi16 $r5, target-flags(arm-hi16) @.str.150, 14, $noreg
+ renamable $d26 = VSETLNi32 undef renamable $d26, renamable $r4, 0, 14, $noreg, implicit-def $q13
+ renamable $d29 = VSETLNi32 killed renamable $d29, renamable $r12, 1, 14, $noreg, implicit $q14, implicit-def $q14
+ renamable $d27 = VSETLNi32 undef renamable $d27, killed renamable $r5, 0, 14, $noreg, implicit killed $q13, implicit-def $q13
+ $r5 = t2MOVi16 target-flags(arm-lo16) @.str.130, 14, $noreg
+ $r5 = t2MOVTi16 $r5, target-flags(arm-hi16) @.str.130, 14, $noreg
+ renamable $q1 = VDUP32q killed renamable $r5, 14, $noreg
+ $r5 = t2MOVi16 target-flags(arm-lo16) @.str.142, 14, $noreg
+ $r5 = t2MOVTi16 $r5, target-flags(arm-hi16) @.str.142, 14, $noreg
+ renamable $d27 = VSETLNi32 killed renamable $d27, renamable $r2, 1, 14, $noreg, implicit $q13, implicit-def $q13
+ renamable $q15 = VDUP32q killed renamable $r5, 14, $noreg
+ $r5 = t2MOVi16 target-flags(arm-lo16) @.str.149, 14, $noreg
+ $r5 = t2MOVTi16 $r5, target-flags(arm-hi16) @.str.149, 14, $noreg
+ renamable $d26 = VSETLNi32 killed renamable $d26, killed renamable $r5, 1, 14, $noreg, implicit $q13, implicit-def $q13
+ $r5 = t2MOVi16 target-flags(arm-lo16) @.str.145, 14, $noreg
+ $r5 = t2MOVTi16 $r5, target-flags(arm-hi16) @.str.145, 14, $noreg
+ renamable $q0 = VDUP32q killed renamable $r5, 14, $noreg
+ $r5 = t2MOVi16 target-flags(arm-lo16) @.str.137, 14, $noreg
+ $r5 = t2MOVTi16 $r5, target-flags(arm-hi16) @.str.137, 14, $noreg
+ renamable $d28 = VSETLNi32 killed renamable $d28, killed renamable $r5, 1, 14, $noreg, implicit $q14, implicit-def $q14
+ $r5 = t2MOVi16 target-flags(arm-lo16) @.str.153, 14, $noreg
+ $r5 = t2MOVTi16 $r5, target-flags(arm-hi16) @.str.153, 14, $noreg
+ renamable $d17 = VSETLNi32 killed renamable $d17, killed renamable $r5, 1, 14, $noreg, implicit $q8, implicit-def $q8
+ $r5 = t2MOVi16 target-flags(arm-lo16) @.str.147, 14, $noreg
+ $r5 = t2MOVTi16 $r5, target-flags(arm-hi16) @.str.147, 14, $noreg
+ renamable $d18 = VSETLNi32 killed renamable $d18, killed renamable $r5, 1, 14, $noreg, implicit $q9, implicit-def $q9
+ $r5 = t2MOVi16 target-flags(arm-lo16) @.str.141, 14, $noreg
+ $r5 = t2MOVTi16 $r5, target-flags(arm-hi16) @.str.141, 14, $noreg
+ renamable $d23 = VSETLNi32 killed renamable $d23, killed renamable $r5, 1, 14, $noreg, implicit $q11, implicit-def $q11
+ $r5 = t2MOVi16 target-flags(arm-lo16) @.str.135, 14, $noreg
+ $r5 = t2MOVTi16 $r5, target-flags(arm-hi16) @.str.135, 14, $noreg
+ renamable $d24 = VSETLNi32 killed renamable $d24, killed renamable $r5, 1, 14, $noreg, implicit $q12, implicit-def $q12
+ $r5 = t2MOVi16 target-flags(arm-lo16) @.str.144, 14, $noreg
+ $r5 = t2MOVTi16 $r5, target-flags(arm-hi16) @.str.144, 14, $noreg
+ renamable $d0 = VSETLNi32 killed renamable $d0, killed renamable $r5, 0, 14, $noreg, implicit $q0, implicit-def $q0
+ $r5 = t2MOVi16 target-flags(arm-lo16) @.str.143, 14, $noreg
+ $r5 = t2MOVTi16 $r5, target-flags(arm-hi16) @.str.143, 14, $noreg
+ renamable $d31 = VSETLNi32 killed renamable $d31, killed renamable $r5, 1, 14, $noreg, implicit $q15, implicit-def $q15
+ $r5 = t2MOVi16 target-flags(arm-lo16) @.str.131, 14, $noreg
+ $r5 = t2MOVTi16 $r5, target-flags(arm-hi16) @.str.131, 14, $noreg
+ renamable $d3 = VSETLNi32 killed renamable $d3, killed renamable $r5, 1, 14, $noreg, implicit $q1, implicit-def $q1
+ $r5 = t2MOVi16 target-flags(arm-lo16) @.str.133, 14, $noreg
+ $r5 = t2MOVTi16 $r5, target-flags(arm-hi16) @.str.133, 14, $noreg
+ renamable $q2 = VDUP32q killed renamable $r5, 14, $noreg
+ renamable $r5 = t2ADDri renamable $r0, 756, 14, $noreg, $noreg
+ VST1q32 killed $r1, 0, killed $q10, 14, $noreg :: (store 16 into %ir.35 + 12, align 4)
+ $r1 = t2MOVi16 target-flags(arm-lo16) @.str.132, 14, $noreg
+ $r1 = t2MOVTi16 $r1, target-flags(arm-hi16) @.str.132, 14, $noreg
+ renamable $d19 = VDUP32d killed renamable $r4, 14, $noreg, implicit killed $q9, implicit-def $q9
+ $r4 = t2MOVi16 target-flags(arm-lo16) @.str.128, 14, $noreg
+ $r4 = t2MOVTi16 $r4, target-flags(arm-hi16) @.str.128, 14, $noreg
+ renamable $d4 = VSETLNi32 killed renamable $d4, killed renamable $r1, 0, 14, $noreg, implicit $q2, implicit-def $q2
+ $r6 = t2MOVi16 target-flags(arm-lo16) @.str.129, 14, $noreg
+ $r6 = t2MOVTi16 $r6, target-flags(arm-hi16) @.str.129, 14, $noreg
+ renamable $r1 = t2ADDri renamable $r0, 772, 14, $noreg, $noreg
+ renamable $d16 = VDUP32d killed renamable $r2, 14, $noreg, implicit killed $q8, implicit-def $q8
+ renamable $r2 = t2ADDri renamable $r0, 740, 14, $noreg, $noreg
+ renamable $d22 = VDUP32d killed renamable $r12, 14, $noreg, implicit killed $q11, implicit-def $q11
+ renamable $d25 = VDUP32d killed renamable $r3, 14, $noreg, implicit killed $q12, implicit-def $q12
+ t2STRDi8 killed $r4, killed $r6, $r0, 636, 14, $noreg
+ renamable $r4 = t2ADDri renamable $r0, 644, 14, $noreg, $noreg
+ renamable $r6 = t2ADDri renamable $r0, 692, 14, $noreg, $noreg
+ VST1q64 killed $r4, 0, killed $q1, 14, $noreg :: (store 16 into %ir.36, align 8)
+ renamable $r4 = t2ADDri renamable $r0, 724, 14, $noreg, $noreg
+ VST1q64 killed $r7, 0, killed $q2, 14, $noreg :: (store 16 into %ir.37, align 8)
+ renamable $r7 = t2ADDri renamable $r0, 708, 14, $noreg, $noreg
+ renamable $r0 = t2ADDri killed renamable $r0, 676, 14, $noreg, $noreg
+ VST1q64 killed $r0, 0, killed $q12, 14, $noreg :: (store 16 into %ir.38, align 8)
+ VST1q64 killed $r6, 0, killed $q14, 14, $noreg :: (store 16 into %ir.39, align 8)
+ VST1q64 killed $r7, 0, killed $q11, 14, $noreg :: (store 16 into %ir.40, align 8)
+ VST1q64 killed $r4, 0, killed $q15, 14, $noreg :: (store 16 into %ir.41, align 8)
+ VST1q64 killed $r2, 0, killed $q0, 14, $noreg :: (store 16 into %ir.42, align 8)
+ VST1q64 killed $r5, 0, killed $q9, 14, $noreg :: (store 16 into %ir.43, align 8)
+ VST1q64 killed $r1, 0, killed $q13, 14, $noreg :: (store 16 into %ir.44, align 8)
+ VST1q64 killed $lr, 0, killed $q8, 14, $noreg :: (store 16 into %ir.45, align 8)
+ $sp = VLDMDIA_UPD $sp, 14, $noreg, def $d8, def $d9, def $d10, def $d11
+ $sp = t2LDMIA_RET $sp, 14, $noreg, def $r4, def $r5, def $r6, def $r7, def $r11, def $pc
+
+...
+
+# Check that the constant island isn't emitted in the middle of the movw+movt
+# pair. On windows, the IMAGE_REL_ARM_MOV32T relocation on the movw instruction
+# covers both movw and movt, so we can't allow anything to be inserted between
+# them.
+#
+# CHECK: $r5 = t2MOVTi16 $r5, target-flags(arm-hi16) @.str.118, 14, $noreg
+# CHECK-NEXT: renamable $q12 = VDUP32q killed renamable $r5, 14, $noreg
+# CHECK-NEXT: t2B %bb.2, 14, $noreg
+# CHECK-NEXT: {{^ $}}
+# CHECK-NEXT: bb.1 (align 2):
+# CHECK-NEXT: successors:{{ }}
+# CHECK-NEXT: {{^ $}}
+# CHECK-NEXT: CONSTPOOL_ENTRY 1, %const.0, 4
+# CHECK-NEXT: {{^ $}}
+# CHECK-NEXT: bb.2.entry (align 1):
+# CHECK-NEXT: $r5 = t2MOVi16 target-flags(arm-lo16) @.str.84, 14, $noreg
+# CHECK-NEXT: $r5 = t2MOVTi16 $r5, target-flags(arm-hi16) @.str.84, 14, $noreg
diff --git a/test/CodeGen/ARM/constantpool-promote-dbg.ll b/test/CodeGen/ARM/constantpool-promote-dbg.ll
index bd5cb9ae0600..261131032d18 100644
--- a/test/CodeGen/ARM/constantpool-promote-dbg.ll
+++ b/test/CodeGen/ARM/constantpool-promote-dbg.ll
@@ -6,14 +6,14 @@ target triple = "thumbv7m--linux-gnu"
@.str = private unnamed_addr constant [4 x i8] c"abc\00", align 1
; CHECK-LABEL: fn1
-; CHECK: .str:
+; CHECK: .long .L.str
define arm_aapcscc i8* @fn1() local_unnamed_addr #0 !dbg !8 {
entry:
ret i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str, i32 0, i32 0), !dbg !14
}
; CHECK-LABEL: fn2
-; CHECK-NOT: .str:
+; CHECK: .long .L.str
define arm_aapcscc i8* @fn2() local_unnamed_addr #0 !dbg !15 {
entry:
ret i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str, i32 0, i32 1), !dbg !16
@@ -25,14 +25,14 @@ attributes #0 = { minsize norecurse nounwind optsize readnone "disable-tail-call
!llvm.module.flags = !{!3, !4, !5, !6}
!llvm.ident = !{!7}
-!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, producer: "clang version 3.9.0 (http://llvm.org/git/clang.git 075a2bc2570dfcbb6d6aed6c836e4c62b37afea6)", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !2)
+!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, producer: "clang version 3.9.0", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !2)
!1 = !DIFile(filename: "/Users/jammol01/Code/test.c", directory: "/Users/jammol01/Code/llvm-git/build")
!2 = !{}
!3 = !{i32 2, !"Dwarf Version", i32 4}
!4 = !{i32 2, !"Debug Info Version", i32 3}
!5 = !{i32 1, !"wchar_size", i32 4}
!6 = !{i32 1, !"min_enum_size", i32 4}
-!7 = !{!"clang version 3.9.0 (http://llvm.org/git/clang.git 075a2bc2570dfcbb6d6aed6c836e4c62b37afea6)"}
+!7 = !{!"clang version 3.9.0"}
!8 = distinct !DISubprogram(name: "fn1", scope: !1, file: !1, line: 1, type: !9, isLocal: false, isDefinition: true, scopeLine: 1, isOptimized: true, unit: !0, retainedNodes: !2)
!9 = !DISubroutineType(types: !10)
!10 = !{!11}
diff --git a/test/CodeGen/ARM/constantpool-promote.ll b/test/CodeGen/ARM/constantpool-promote.ll
index ccd86257dd36..ac16e600c145 100644
--- a/test/CodeGen/ARM/constantpool-promote.ll
+++ b/test/CodeGen/ARM/constantpool-promote.ll
@@ -1,5 +1,5 @@
-; RUN: llc -mtriple armv7--linux-gnueabihf -relocation-model=static -arm-promote-constant < %s | FileCheck %s --check-prefixes=CHECK,CHECK-V7,CHECK-V7ARM
-; RUN: llc -mtriple armv7--linux-gnueabihf -relocation-model=pic -arm-promote-constant < %s | FileCheck %s --check-prefixes=CHECK,CHECK-V7,CHECK-V7ARM
+; RUN: llc -mtriple armv7--linux-gnueabihf -relocation-model=static -arm-promote-constant < %s | FileCheck %s --check-prefixes=CHECK,CHECK-V7,CHECK-V7ARM,CHECK-STATIC
+; RUN: llc -mtriple armv7--linux-gnueabihf -relocation-model=pic -arm-promote-constant < %s | FileCheck %s --check-prefixes=CHECK,CHECK-V7,CHECK-V7ARM,CHECK-PIC
; RUN: llc -mtriple armv7--linux-gnueabihf -relocation-model=ropi -arm-promote-constant < %s | FileCheck %s --check-prefixes=CHECK,CHECK-V7,CHECK-V7ARM
; RUN: llc -mtriple armv7--linux-gnueabihf -relocation-model=rwpi -arm-promote-constant < %s | FileCheck %s --check-prefixes=CHECK,CHECK-V7,CHECK-V7ARM
; RUN: llc -mtriple thumbv7--linux-gnueabihf -relocation-model=static -arm-promote-constant < %s | FileCheck %s --check-prefixes=CHECK,CHECK-V7,CHECK-V7THUMB
@@ -16,12 +16,15 @@
@.str2 = private unnamed_addr constant [27 x i8] c"this string is just right!\00", align 1
@.str3 = private unnamed_addr constant [26 x i8] c"this string is used twice\00", align 1
@.str4 = private unnamed_addr constant [29 x i8] c"same string in two functions\00", align 1
+@.str5 = private unnamed_addr constant [2 x i8] c"s\00", align 1
@.arr1 = private unnamed_addr constant [2 x i16] [i16 3, i16 4], align 2
@.arr2 = private unnamed_addr constant [2 x i16] [i16 7, i16 8], align 2
@.arr3 = private unnamed_addr constant [2 x i16*] [i16* null, i16* null], align 4
@.ptr = private unnamed_addr constant [2 x i16*] [i16* getelementptr inbounds ([2 x i16], [2 x i16]* @.arr2, i32 0, i32 0), i16* null], align 2
@.arr4 = private unnamed_addr constant [2 x i16] [i16 3, i16 4], align 16
+@.arr5 = private unnamed_addr constant [2 x i16] [i16 3, i16 4], align 2
@.zerosize = private unnamed_addr constant [0 x i16] zeroinitializer, align 4
+@implicit_alignment_vector = private unnamed_addr constant <4 x i32> <i32 1, i32 2, i32 3, i32 4>
; CHECK-LABEL: @test1
; CHECK: adr r0, [[x:.*]]
@@ -75,20 +78,14 @@ define void @test5b() #0 {
}
; CHECK-LABEL: @test6a
-; CHECK: adr r0, [[x:.*]]
-; CHECK: [[x]]:
-; CHECK: .short 3
-; CHECK: .short 4
+; CHECK: L.arr1
define void @test6a() #0 {
tail call void @c(i16* getelementptr inbounds ([2 x i16], [2 x i16]* @.arr1, i32 0, i32 0)) #2
ret void
}
; CHECK-LABEL: @test6b
-; CHECK: adr r0, [[x:.*]]
-; CHECK: [[x]]:
-; CHECK: .short 3
-; CHECK: .short 4
+; CHECK: L.arr1
define void @test6b() #0 {
tail call void @c(i16* getelementptr inbounds ([2 x i16], [2 x i16]* @.arr1, i32 0, i32 0)) #2
ret void
@@ -102,9 +99,9 @@ define void @test7() #0 {
ret void
}
-; This shouldn't be promoted, because the array contains pointers.
+; This can be promoted; it contains pointers, but they don't need relocations.
; CHECK-LABEL: @test8
-; CHECK-NOT: .zero
+; CHECK: .zero
; CHECK: .fnend
define void @test8() #0 {
%a = load i16*, i16** getelementptr inbounds ([2 x i16*], [2 x i16*]* @.arr3, i32 0, i32 0)
@@ -112,6 +109,17 @@ define void @test8() #0 {
ret void
}
+; This can't be promoted in PIC mode because it contains pointers to other globals.
+; CHECK-LABEL: @test8a
+; CHECK-STATIC: .long .L.arr2
+; CHECK-PIC: .long .L.ptr
+; CHECK: .fnend
+define void @test8a() #0 {
+ %a = load i16*, i16** getelementptr inbounds ([2 x i16*], [2 x i16*]* @.ptr, i32 0, i32 0)
+ tail call void @c(i16* %a) #2
+ ret void
+}
+
@fn1.a = private unnamed_addr constant [4 x i16] [i16 4, i16 0, i16 0, i16 0], align 2
@fn2.a = private unnamed_addr constant [8 x i8] [i8 4, i8 0, i8 0, i8 0, i8 23, i8 0, i8 6, i8 0], align 1
@@ -156,7 +164,7 @@ define void @pr32130() #0 {
; CHECK-V7: [[x]]:
; CHECK-V7: .asciz "s\000\000"
define void @test10(i8* %a) local_unnamed_addr #0 {
- call void @llvm.memmove.p0i8.p0i8.i32(i8* align 1 %a, i8* align 1 getelementptr inbounds ([2 x i8], [2 x i8]* @.str, i32 0, i32 0), i32 1, i1 false)
+ call void @llvm.memmove.p0i8.p0i8.i32(i8* align 1 %a, i8* align 1 getelementptr inbounds ([2 x i8], [2 x i8]* @.str5, i32 0, i32 0), i32 1, i1 false)
ret void
}
@@ -174,13 +182,23 @@ define void @test10(i8* %a) local_unnamed_addr #0 {
; CHECK-V7ARM: .short 3
; CHECK-V7ARM: .short 4
define void @test11(i16* %a) local_unnamed_addr #0 {
- call void @llvm.memmove.p0i16.p0i16.i32(i16* align 2 %a, i16* align 2 getelementptr inbounds ([2 x i16], [2 x i16]* @.arr1, i32 0, i32 0), i32 2, i1 false)
+ call void @llvm.memmove.p0i16.p0i16.i32(i16* align 2 %a, i16* align 2 getelementptr inbounds ([2 x i16], [2 x i16]* @.arr5, i32 0, i32 0), i32 2, i1 false)
+ ret void
+}
+
+; Promotion only works with globals with alignment 4 or less; a vector has
+; implicit alignment 16.
+; CHECK-LABEL: @test12
+; CHECK-NOT: adr
+define void @test12() local_unnamed_addr #0 {
+ call void @d(<4 x i32>* @implicit_alignment_vector)
ret void
}
declare void @b(i8*) #1
declare void @c(i16*) #1
+declare void @d(<4 x i32>*) #1
declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture writeonly, i8* nocapture readonly, i32, i1)
declare void @llvm.memmove.p0i8.p0i8.i32(i8*, i8*, i32, i1) local_unnamed_addr
declare void @llvm.memmove.p0i16.p0i16.i32(i16*, i16*, i32, i1) local_unnamed_addr
diff --git a/test/CodeGen/ARM/copy-by-struct-i32.ll b/test/CodeGen/ARM/copy-by-struct-i32.ll
new file mode 100644
index 000000000000..a57506872f9e
--- /dev/null
+++ b/test/CodeGen/ARM/copy-by-struct-i32.ll
@@ -0,0 +1,61 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=armv7-unknown-linux < %s -stop-before=expand-isel-pseudos | FileCheck --check-prefix=BEFORE-EXPAND %s
+; RUN: llc -mtriple=armv7-unknown-linux < %s | FileCheck --check-prefix=ASSEMBLY %s
+
+; Check COPY_STRUCT_BYVAL_I32 has CPSR as operand.
+; BEFORE-EXPAND: COPY_STRUCT_BYVAL_I32 {{.*}} implicit-def dead $cpsr
+; BEFORE-EXPAND: COPY_STRUCT_BYVAL_I32 {{.*}} implicit-def dead $cpsr
+
+%struct.anon = type { i32, i32, i32, i32, i32, i32, i32, %struct.f, i32, i64, i32 }
+%struct.f = type { i32, i32, i32, i32, i32 }
+
+define arm_aapcscc void @s(i64* %q, %struct.anon* %p) {
+; ASSEMBLY-LABEL: s:
+; ASSEMBLY: @ %bb.0: @ %entry
+; ASSEMBLY-NEXT: push {r4, r5, r11, lr}
+; ASSEMBLY-NEXT: sub sp, sp, #136
+; ASSEMBLY-NEXT: ldrd r4, r5, [r0]
+; ASSEMBLY-NEXT: add lr, sp, #56
+; ASSEMBLY-NEXT: ldm r1, {r0, r12}
+; ASSEMBLY-NEXT: subs r4, r4, #1
+; ASSEMBLY-NEXT: sbc r5, r5, #0
+; ASSEMBLY-NEXT: ldr r2, [r1, #8]
+; ASSEMBLY-NEXT: ldr r3, [r1, #12]
+; ASSEMBLY-NEXT: str r5, [sp, #132]
+; ASSEMBLY-NEXT: add r5, r1, #16
+; ASSEMBLY-NEXT: str r4, [sp, #128]
+; ASSEMBLY-NEXT: mov r4, sp
+; ASSEMBLY-NEXT: vld1.32 {d16}, [r5]!
+; ASSEMBLY-NEXT: vst1.32 {d16}, [r4]!
+; ASSEMBLY-NEXT: vld1.32 {d16}, [r5]!
+; ASSEMBLY-NEXT: vst1.32 {d16}, [r4]!
+; ASSEMBLY-NEXT: vld1.32 {d16}, [r5]!
+; ASSEMBLY-NEXT: vst1.32 {d16}, [r4]!
+; ASSEMBLY-NEXT: vld1.32 {d16}, [r5]!
+; ASSEMBLY-NEXT: vst1.32 {d16}, [r4]!
+; ASSEMBLY-NEXT: vld1.32 {d16}, [r5]!
+; ASSEMBLY-NEXT: vst1.32 {d16}, [r4]!
+; ASSEMBLY-NEXT: vld1.32 {d16}, [r5]!
+; ASSEMBLY-NEXT: vst1.32 {d16}, [r4]!
+; ASSEMBLY-NEXT: vld1.32 {d16}, [r5]!
+; ASSEMBLY-NEXT: vst1.32 {d16}, [r4]!
+; ASSEMBLY-NEXT: movw r4, #72
+; ASSEMBLY-NEXT: .LBB0_1: @ %entry
+; ASSEMBLY-NEXT: @ =>This Inner Loop Header: Depth=1
+; ASSEMBLY-NEXT: vld1.32 {d16}, [r1]!
+; ASSEMBLY-NEXT: subs r4, r4, #8
+; ASSEMBLY-NEXT: vst1.32 {d16}, [lr]!
+; ASSEMBLY-NEXT: bne .LBB0_1
+; ASSEMBLY-NEXT: @ %bb.2: @ %entry
+; ASSEMBLY-NEXT: mov r1, r12
+; ASSEMBLY-NEXT: bl r
+; ASSEMBLY-NEXT: add sp, sp, #136
+; ASSEMBLY-NEXT: pop {r4, r5, r11, pc}
+entry:
+ %0 = load i64, i64* %q, align 8
+ %sub = add nsw i64 %0, -1
+ tail call arm_aapcscc void bitcast (void (...)* @r to void (%struct.anon*, %struct.anon*, i64)*)(%struct.anon* byval nonnull align 8 %p, %struct.anon* byval nonnull align 8 %p, i64 %sub)
+ ret void
+}
+
+declare arm_aapcscc void @r(...)
diff --git a/test/CodeGen/ARM/cortex-a57-misched-ldm-wrback.ll b/test/CodeGen/ARM/cortex-a57-misched-ldm-wrback.ll
index 0ae2d5f6f2f2..2c0aa98eae03 100644
--- a/test/CodeGen/ARM/cortex-a57-misched-ldm-wrback.ll
+++ b/test/CodeGen/ARM/cortex-a57-misched-ldm-wrback.ll
@@ -18,9 +18,9 @@
; CHECK-NEXT: Data
; CHECK-SAME: Latency=3
; CHECK-NEXT: Data
-; CHECK-SAME: Latency=3
+; CHECK-SAME: Latency=0
; CHECK-NEXT: Data
-; CHECK-SAME: Latency=4
+; CHECK-SAME: Latency=0
define i32 @bar(i32 %a1, i32 %b1, i32 %c1) minsize optsize {
%1 = load i32, i32* @a, align 4
%2 = load i32, i32* @b, align 4
diff --git a/test/CodeGen/ARM/cortex-a57-misched-ldm.ll b/test/CodeGen/ARM/cortex-a57-misched-ldm.ll
index bc7a14b1028e..02d1c2f55f99 100644
--- a/test/CodeGen/ARM/cortex-a57-misched-ldm.ll
+++ b/test/CodeGen/ARM/cortex-a57-misched-ldm.ll
@@ -11,7 +11,7 @@
; CHECK: Data
; CHECK-SAME: Latency=3
; CHECK-NEXT: Data
-; CHECK-SAME: Latency=3
+; CHECK-SAME: Latency=0
define i32 @foo(i32* %a) nounwind optsize {
entry:
diff --git a/test/CodeGen/ARM/cortex-a57-misched-vldm-wrback.ll b/test/CodeGen/ARM/cortex-a57-misched-vldm-wrback.ll
index b5edcc304229..1baf472ca49d 100644
--- a/test/CodeGen/ARM/cortex-a57-misched-vldm-wrback.ll
+++ b/test/CodeGen/ARM/cortex-a57-misched-vldm-wrback.ll
@@ -20,9 +20,9 @@
; CHECK-NEXT: Data
; CHECK-SAME: Latency=5
; CHECK-NEXT: Data
-; CHECK-SAME: Latency=5
+; CHECK-SAME: Latency=0
; CHECK-NEXT: Data
-; CHECK-SAME: Latency=6
+; CHECK-SAME: Latency=0
define i32 @bar(i32* %iptr) minsize optsize {
%1 = load double, double* @a, align 8
%2 = load double, double* @b, align 8
diff --git a/test/CodeGen/ARM/cortex-a57-misched-vldm.ll b/test/CodeGen/ARM/cortex-a57-misched-vldm.ll
index 12c7b3270c3b..8da133e806ef 100644
--- a/test/CodeGen/ARM/cortex-a57-misched-vldm.ll
+++ b/test/CodeGen/ARM/cortex-a57-misched-vldm.ll
@@ -11,9 +11,9 @@
; CHECK: Data
; CHECK-SAME: Latency=5
; CHECK-NEXT: Data
-; CHECK-SAME: Latency=5
+; CHECK-SAME: Latency=0
; CHECK-NEXT: Data
-; CHECK-SAME: Latency=6
+; CHECK-SAME: Latency=0
define double @foo(double* %a) nounwind optsize {
entry:
diff --git a/test/CodeGen/ARM/crash-O0.ll b/test/CodeGen/ARM/crash-O0.ll
index bfbab8a99336..29110fcf3bd2 100644
--- a/test/CodeGen/ARM/crash-O0.ll
+++ b/test/CodeGen/ARM/crash-O0.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -O0 -relocation-model=pic -disable-fp-elim -no-integrated-as
+; RUN: llc < %s -O0 -relocation-model=pic -frame-pointer=all -no-integrated-as
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:32-f32:32:32-f64:32:32-v64:64:64-v128:128:128-a0:0:64-n32"
target triple = "armv6-apple-darwin10"
diff --git a/test/CodeGen/ARM/crash-greedy-v6.ll b/test/CodeGen/ARM/crash-greedy-v6.ll
index 287c081ac5ec..d3c5057e3821 100644
--- a/test/CodeGen/ARM/crash-greedy-v6.ll
+++ b/test/CodeGen/ARM/crash-greedy-v6.ll
@@ -1,5 +1,5 @@
-; RUN: llc -disable-fp-elim -relocation-model=pic < %s
-; RUN: llc -disable-fp-elim -relocation-model=pic -O0 -pre-RA-sched=source < %s | FileCheck %s --check-prefix=SOURCE-SCHED
+; RUN: llc -frame-pointer=all -relocation-model=pic < %s
+; RUN: llc -frame-pointer=all -relocation-model=pic -O0 -pre-RA-sched=source < %s | FileCheck %s --check-prefix=SOURCE-SCHED
target triple = "armv6-apple-ios"
; Reduced from 177.mesa. This test causes a live range split before an LDR_POST instruction.
diff --git a/test/CodeGen/ARM/crash-greedy.ll b/test/CodeGen/ARM/crash-greedy.ll
index 5320a163c0b8..444505f8786a 100644
--- a/test/CodeGen/ARM/crash-greedy.ll
+++ b/test/CodeGen/ARM/crash-greedy.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -regalloc=greedy -mcpu=cortex-a8 -relocation-model=pic -disable-fp-elim -verify-machineinstrs | FileCheck %s
+; RUN: llc < %s -regalloc=greedy -mcpu=cortex-a8 -relocation-model=pic -frame-pointer=all -verify-machineinstrs | FileCheck %s
;
; ARM tests that crash or fail with the greedy register allocator.
diff --git a/test/CodeGen/ARM/cttz_vector.ll b/test/CodeGen/ARM/cttz_vector.ll
index bed644980415..f27c1e4b4173 100644
--- a/test/CodeGen/ARM/cttz_vector.ll
+++ b/test/CodeGen/ARM/cttz_vector.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple armv7-linux-gnueabihf -mattr=+neon | FileCheck %s
; This test checks the @llvm.cttz.* intrinsics for vectors.
@@ -23,7 +24,14 @@ declare <2 x i64> @llvm.cttz.v2i64(<2 x i64>, i1)
;------------------------------------------------------------------------------
define void @test_v1i8(<1 x i8>* %p) {
-; CHECK-LABEL: test_v1i8
+; CHECK-LABEL: test_v1i8:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: ldrb r1, [r0]
+; CHECK-NEXT: orr r1, r1, #256
+; CHECK-NEXT: rbit r1, r1
+; CHECK-NEXT: clz r1, r1
+; CHECK-NEXT: strb r1, [r0]
+; CHECK-NEXT: bx lr
%a = load <1 x i8>, <1 x i8>* %p
%tmp = call <1 x i8> @llvm.cttz.v1i8(<1 x i8> %a, i1 false)
store <1 x i8> %tmp, <1 x i8>* %p
@@ -32,6 +40,21 @@ define void @test_v1i8(<1 x i8>* %p) {
define void @test_v2i8(<2 x i8>* %p) {
; CHECK-LABEL: test_v2i8:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vld1.16 {d16[0]}, [r0:16]
+; CHECK-NEXT: vmovl.u8 q8, d16
+; CHECK-NEXT: vmovl.u16 q8, d16
+; CHECK-NEXT: vorr.i32 d16, #0x100
+; CHECK-NEXT: vneg.s32 d18, d16
+; CHECK-NEXT: vand d16, d16, d18
+; CHECK-NEXT: vmov.i32 d17, #0x1f
+; CHECK-NEXT: vclz.i32 d16, d16
+; CHECK-NEXT: vsub.i32 d16, d17, d16
+; CHECK-NEXT: vmov.32 r1, d16[1]
+; CHECK-NEXT: vmov.32 r2, d16[0]
+; CHECK-NEXT: strb r1, [r0, #1]
+; CHECK-NEXT: strb r2, [r0]
+; CHECK-NEXT: bx lr
%a = load <2 x i8>, <2 x i8>* %p
%tmp = call <2 x i8> @llvm.cttz.v2i8(<2 x i8> %a, i1 false)
store <2 x i8> %tmp, <2 x i8>* %p
@@ -40,6 +63,19 @@ define void @test_v2i8(<2 x i8>* %p) {
define void @test_v4i8(<4 x i8>* %p) {
; CHECK-LABEL: test_v4i8:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vld1.32 {d16[0]}, [r0:32]
+; CHECK-NEXT: vmov.i16 d19, #0x1
+; CHECK-NEXT: vmovl.u8 q8, d16
+; CHECK-NEXT: vorr.i16 d16, #0x100
+; CHECK-NEXT: vneg.s16 d18, d16
+; CHECK-NEXT: vand d16, d16, d18
+; CHECK-NEXT: vsub.i16 d16, d16, d19
+; CHECK-NEXT: vcnt.8 d16, d16
+; CHECK-NEXT: vpaddl.u8 d16, d16
+; CHECK-NEXT: vuzp.8 d16, d17
+; CHECK-NEXT: vst1.32 {d16[0]}, [r0:32]
+; CHECK-NEXT: bx lr
%a = load <4 x i8>, <4 x i8>* %p
%tmp = call <4 x i8> @llvm.cttz.v4i8(<4 x i8> %a, i1 false)
store <4 x i8> %tmp, <4 x i8>* %p
@@ -48,13 +84,15 @@ define void @test_v4i8(<4 x i8>* %p) {
define void @test_v8i8(<8 x i8>* %p) {
; CHECK-LABEL: test_v8i8:
-; CHECK: vldr [[D1:d[0-9]+]], [r0]
-; CHECK: vmov.i8 [[D2:d[0-9]+]], #0x1
-; CHECK: vneg.s8 [[D3:d[0-9]+]], [[D1]]
-; CHECK: vand [[D1]], [[D1]], [[D3]]
-; CHECK: vsub.i8 [[D1]], [[D1]], [[D2]]
-; CHECK: vcnt.8 [[D1]], [[D1]]
-; CHECK: vstr [[D1]], [r0]
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vldr d16, [r0]
+; CHECK-NEXT: vmov.i8 d18, #0x1
+; CHECK-NEXT: vneg.s8 d17, d16
+; CHECK-NEXT: vand d16, d16, d17
+; CHECK-NEXT: vsub.i8 d16, d16, d18
+; CHECK-NEXT: vcnt.8 d16, d16
+; CHECK-NEXT: vstr d16, [r0]
+; CHECK-NEXT: bx lr
%a = load <8 x i8>, <8 x i8>* %p
%tmp = call <8 x i8> @llvm.cttz.v8i8(<8 x i8> %a, i1 false)
store <8 x i8> %tmp, <8 x i8>* %p
@@ -63,13 +101,15 @@ define void @test_v8i8(<8 x i8>* %p) {
define void @test_v16i8(<16 x i8>* %p) {
; CHECK-LABEL: test_v16i8:
-; CHECK: vld1.64 {[[D1:d[0-9]+]], [[D2:d[0-9]+]]}, [r0]
-; CHECK: vmov.i8 [[Q2:q[0-9]+]], #0x1
-; CHECK: vneg.s8 [[Q3:q[0-9]+]], [[Q1:q[0-9]+]]
-; CHECK: vand [[Q1]], [[Q1]], [[Q3]]
-; CHECK: vsub.i8 [[Q1]], [[Q1]], [[Q2]]
-; CHECK: vcnt.8 [[Q1]], [[Q1]]
-; CHECK: vst1.64 {[[D1]], [[D2]]}, [r0]
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vld1.64 {d16, d17}, [r0]
+; CHECK-NEXT: vmov.i8 q10, #0x1
+; CHECK-NEXT: vneg.s8 q9, q8
+; CHECK-NEXT: vand q8, q8, q9
+; CHECK-NEXT: vsub.i8 q8, q8, q10
+; CHECK-NEXT: vcnt.8 q8, q8
+; CHECK-NEXT: vst1.64 {d16, d17}, [r0]
+; CHECK-NEXT: bx lr
%a = load <16 x i8>, <16 x i8>* %p
%tmp = call <16 x i8> @llvm.cttz.v16i8(<16 x i8> %a, i1 false)
store <16 x i8> %tmp, <16 x i8>* %p
@@ -78,6 +118,13 @@ define void @test_v16i8(<16 x i8>* %p) {
define void @test_v1i16(<1 x i16>* %p) {
; CHECK-LABEL: test_v1i16:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: ldrh r1, [r0]
+; CHECK-NEXT: orr r1, r1, #65536
+; CHECK-NEXT: rbit r1, r1
+; CHECK-NEXT: clz r1, r1
+; CHECK-NEXT: strh r1, [r0]
+; CHECK-NEXT: bx lr
%a = load <1 x i16>, <1 x i16>* %p
%tmp = call <1 x i16> @llvm.cttz.v1i16(<1 x i16> %a, i1 false)
store <1 x i16> %tmp, <1 x i16>* %p
@@ -86,6 +133,18 @@ define void @test_v1i16(<1 x i16>* %p) {
define void @test_v2i16(<2 x i16>* %p) {
; CHECK-LABEL: test_v2i16:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vld1.32 {d16[0]}, [r0:32]
+; CHECK-NEXT: vmovl.u16 q8, d16
+; CHECK-NEXT: vorr.i32 d16, #0x10000
+; CHECK-NEXT: vneg.s32 d18, d16
+; CHECK-NEXT: vand d16, d16, d18
+; CHECK-NEXT: vmov.i32 d17, #0x1f
+; CHECK-NEXT: vclz.i32 d16, d16
+; CHECK-NEXT: vsub.i32 d16, d17, d16
+; CHECK-NEXT: vuzp.16 d16, d17
+; CHECK-NEXT: vst1.32 {d16[0]}, [r0:32]
+; CHECK-NEXT: bx lr
%a = load <2 x i16>, <2 x i16>* %p
%tmp = call <2 x i16> @llvm.cttz.v2i16(<2 x i16> %a, i1 false)
store <2 x i16> %tmp, <2 x i16>* %p
@@ -94,14 +153,16 @@ define void @test_v2i16(<2 x i16>* %p) {
define void @test_v4i16(<4 x i16>* %p) {
; CHECK-LABEL: test_v4i16:
-; CHECK: vldr [[D1:d[0-9]+]], [r0]
-; CHECK: vmov.i16 [[D2:d[0-9]+]], #0x1
-; CHECK: vneg.s16 [[D3:d[0-9]+]], [[D1]]
-; CHECK: vand [[D1]], [[D1]], [[D3]]
-; CHECK: vsub.i16 [[D1]], [[D1]], [[D2]]
-; CHECK: vcnt.8 [[D1]], [[D1]]
-; CHECK: vpaddl.u8 [[D1]], [[D1]]
-; CHECK: vstr [[D1]], [r0]
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vldr d16, [r0]
+; CHECK-NEXT: vmov.i16 d18, #0x1
+; CHECK-NEXT: vneg.s16 d17, d16
+; CHECK-NEXT: vand d16, d16, d17
+; CHECK-NEXT: vsub.i16 d16, d16, d18
+; CHECK-NEXT: vcnt.8 d16, d16
+; CHECK-NEXT: vpaddl.u8 d16, d16
+; CHECK-NEXT: vstr d16, [r0]
+; CHECK-NEXT: bx lr
%a = load <4 x i16>, <4 x i16>* %p
%tmp = call <4 x i16> @llvm.cttz.v4i16(<4 x i16> %a, i1 false)
store <4 x i16> %tmp, <4 x i16>* %p
@@ -110,14 +171,16 @@ define void @test_v4i16(<4 x i16>* %p) {
define void @test_v8i16(<8 x i16>* %p) {
; CHECK-LABEL: test_v8i16:
-; CHECK: vld1.64 {[[D1:d[0-9]+]], [[D2:d[0-9]+]]}, [r0]
-; CHECK: vmov.i16 [[Q2:q[0-9]+]], #0x1
-; CHECK: vneg.s16 [[Q3:q[0-9]+]], [[Q1:q[0-9]+]]
-; CHECK: vand [[Q1]], [[Q1]], [[Q3]]
-; CHECK: vsub.i16 [[Q1]], [[Q1]], [[Q2]]
-; CHECK: vcnt.8 [[Q1]], [[Q1]]
-; CHECK: vpaddl.u8 [[Q1]], [[Q1]]
-; CHECK: vst1.64 {[[D1]], [[D2]]}, [r0]
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vld1.64 {d16, d17}, [r0]
+; CHECK-NEXT: vmov.i16 q10, #0x1
+; CHECK-NEXT: vneg.s16 q9, q8
+; CHECK-NEXT: vand q8, q8, q9
+; CHECK-NEXT: vsub.i16 q8, q8, q10
+; CHECK-NEXT: vcnt.8 q8, q8
+; CHECK-NEXT: vpaddl.u8 q8, q8
+; CHECK-NEXT: vst1.64 {d16, d17}, [r0]
+; CHECK-NEXT: bx lr
%a = load <8 x i16>, <8 x i16>* %p
%tmp = call <8 x i16> @llvm.cttz.v8i16(<8 x i16> %a, i1 false)
store <8 x i16> %tmp, <8 x i16>* %p
@@ -126,6 +189,12 @@ define void @test_v8i16(<8 x i16>* %p) {
define void @test_v1i32(<1 x i32>* %p) {
; CHECK-LABEL: test_v1i32:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: ldr r1, [r0]
+; CHECK-NEXT: rbit r1, r1
+; CHECK-NEXT: clz r1, r1
+; CHECK-NEXT: str r1, [r0]
+; CHECK-NEXT: bx lr
%a = load <1 x i32>, <1 x i32>* %p
%tmp = call <1 x i32> @llvm.cttz.v1i32(<1 x i32> %a, i1 false)
store <1 x i32> %tmp, <1 x i32>* %p
@@ -134,15 +203,17 @@ define void @test_v1i32(<1 x i32>* %p) {
define void @test_v2i32(<2 x i32>* %p) {
; CHECK-LABEL: test_v2i32:
-; CHECK: vldr [[D1:d[0-9]+]], [r0]
-; CHECK: vmov.i32 [[D2:d[0-9]+]], #0x1
-; CHECK: vneg.s32 [[D3:d[0-9]+]], [[D1]]
-; CHECK: vand [[D1]], [[D1]], [[D3]]
-; CHECK: vsub.i32 [[D1]], [[D1]], [[D2]]
-; CHECK: vcnt.8 [[D1]], [[D1]]
-; CHECK: vpaddl.u8 [[D1]], [[D1]]
-; CHECK: vpaddl.u16 [[D1]], [[D1]]
-; CHECK: vstr [[D1]], [r0]
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vldr d16, [r0]
+; CHECK-NEXT: vmov.i32 d18, #0x1
+; CHECK-NEXT: vneg.s32 d17, d16
+; CHECK-NEXT: vand d16, d16, d17
+; CHECK-NEXT: vsub.i32 d16, d16, d18
+; CHECK-NEXT: vcnt.8 d16, d16
+; CHECK-NEXT: vpaddl.u8 d16, d16
+; CHECK-NEXT: vpaddl.u16 d16, d16
+; CHECK-NEXT: vstr d16, [r0]
+; CHECK-NEXT: bx lr
%a = load <2 x i32>, <2 x i32>* %p
%tmp = call <2 x i32> @llvm.cttz.v2i32(<2 x i32> %a, i1 false)
store <2 x i32> %tmp, <2 x i32>* %p
@@ -151,15 +222,17 @@ define void @test_v2i32(<2 x i32>* %p) {
define void @test_v4i32(<4 x i32>* %p) {
; CHECK-LABEL: test_v4i32:
-; CHECK: vld1.64 {[[D1:d[0-9]+]], [[D2:d[0-9]+]]}, [r0]
-; CHECK: vmov.i32 [[Q2:q[0-9]+]], #0x1
-; CHECK: vneg.s32 [[Q3:q[0-9]+]], [[Q1:q[0-9]+]]
-; CHECK: vand [[Q1]], [[Q1]], [[Q3]]
-; CHECK: vsub.i32 [[Q1]], [[Q1]], [[Q2]]
-; CHECK: vcnt.8 [[Q1]], [[Q1]]
-; CHECK: vpaddl.u8 [[Q1]], [[Q1]]
-; CHECK: vpaddl.u16 [[Q1]], [[Q1]]
-; CHECK: vst1.64 {[[D1]], [[D2]]}, [r0]
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vld1.64 {d16, d17}, [r0]
+; CHECK-NEXT: vmov.i32 q10, #0x1
+; CHECK-NEXT: vneg.s32 q9, q8
+; CHECK-NEXT: vand q8, q8, q9
+; CHECK-NEXT: vsub.i32 q8, q8, q10
+; CHECK-NEXT: vcnt.8 q8, q8
+; CHECK-NEXT: vpaddl.u8 q8, q8
+; CHECK-NEXT: vpaddl.u16 q8, q8
+; CHECK-NEXT: vst1.64 {d16, d17}, [r0]
+; CHECK-NEXT: bx lr
%a = load <4 x i32>, <4 x i32>* %p
%tmp = call <4 x i32> @llvm.cttz.v4i32(<4 x i32> %a, i1 false)
store <4 x i32> %tmp, <4 x i32>* %p
@@ -168,17 +241,19 @@ define void @test_v4i32(<4 x i32>* %p) {
define void @test_v1i64(<1 x i64>* %p) {
; CHECK-LABEL: test_v1i64:
-; CHECK: vmov.i32 [[D2:d[0-9]+]], #0x0
-; CHECK: vldr [[D1:d[0-9]+]], [r0]
-; CHECK: vmov.i64 [[D3:d[0-9]+]], #0xffffffffffffffff
-; CHECK: vsub.i64 [[D2]], [[D2]], [[D1]]
-; CHECK: vand [[D2]], [[D1]], [[D2]]
-; CHECK: vadd.i64 [[D2]], [[D2]], [[D3]]
-; CHECK: vcnt.8 [[D2]], [[D2]]
-; CHECK: vpaddl.u8 [[D2]], [[D2]]
-; CHECK: vpaddl.u16 [[D2]], [[D2]]
-; CHECK: vpaddl.u32 [[D2]], [[D2]]
-; CHECK: vstr [[D2]], [r0]
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vmov.i32 d16, #0x0
+; CHECK-NEXT: vldr d17, [r0]
+; CHECK-NEXT: vmov.i64 d18, #0xffffffffffffffff
+; CHECK-NEXT: vsub.i64 d16, d16, d17
+; CHECK-NEXT: vand d16, d17, d16
+; CHECK-NEXT: vadd.i64 d16, d16, d18
+; CHECK-NEXT: vcnt.8 d16, d16
+; CHECK-NEXT: vpaddl.u8 d16, d16
+; CHECK-NEXT: vpaddl.u16 d16, d16
+; CHECK-NEXT: vpaddl.u32 d16, d16
+; CHECK-NEXT: vstr d16, [r0]
+; CHECK-NEXT: bx lr
%a = load <1 x i64>, <1 x i64>* %p
%tmp = call <1 x i64> @llvm.cttz.v1i64(<1 x i64> %a, i1 false)
store <1 x i64> %tmp, <1 x i64>* %p
@@ -187,17 +262,19 @@ define void @test_v1i64(<1 x i64>* %p) {
define void @test_v2i64(<2 x i64>* %p) {
; CHECK-LABEL: test_v2i64:
-; CHECK: vmov.i32 [[Q2:q[0-9]+]], #0x0
-; CHECK: vld1.64 {[[D1:d[0-9]+]], [[D2:d[0-9]+]]}, [r0]
-; CHECK: vmov.i64 [[Q3:q[0-9]+]], #0xffffffffffffffff
-; CHECK: vsub.i64 [[Q2]], [[Q2]], [[Q1:q[0-9]+]]
-; CHECK: vand [[Q2]], [[Q1]], [[Q2]]
-; CHECK: vadd.i64 [[Q2]], [[Q2]], [[Q3]]
-; CHECK: vcnt.8 [[Q2]], [[Q2]]
-; CHECK: vpaddl.u8 [[Q2]], [[Q2]]
-; CHECK: vpaddl.u16 [[Q2]], [[Q2]]
-; CHECK: vpaddl.u32 [[Q2]], [[Q2]]
-; CHECK: vst1.64 {d{{[0-9]+}}, d{{[0-9]+}}}, [r0]
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vmov.i32 q8, #0x0
+; CHECK-NEXT: vld1.64 {d18, d19}, [r0]
+; CHECK-NEXT: vmov.i64 q10, #0xffffffffffffffff
+; CHECK-NEXT: vsub.i64 q8, q8, q9
+; CHECK-NEXT: vand q8, q9, q8
+; CHECK-NEXT: vadd.i64 q8, q8, q10
+; CHECK-NEXT: vcnt.8 q8, q8
+; CHECK-NEXT: vpaddl.u8 q8, q8
+; CHECK-NEXT: vpaddl.u16 q8, q8
+; CHECK-NEXT: vpaddl.u32 q8, q8
+; CHECK-NEXT: vst1.64 {d16, d17}, [r0]
+; CHECK-NEXT: bx lr
%a = load <2 x i64>, <2 x i64>* %p
%tmp = call <2 x i64> @llvm.cttz.v2i64(<2 x i64> %a, i1 false)
store <2 x i64> %tmp, <2 x i64>* %p
@@ -207,7 +284,13 @@ define void @test_v2i64(<2 x i64>* %p) {
;------------------------------------------------------------------------------
define void @test_v1i8_zero_undef(<1 x i8>* %p) {
-; CHECK-LABEL: test_v1i8_zero_undef
+; CHECK-LABEL: test_v1i8_zero_undef:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: ldrb r1, [r0]
+; CHECK-NEXT: rbit r1, r1
+; CHECK-NEXT: clz r1, r1
+; CHECK-NEXT: strb r1, [r0]
+; CHECK-NEXT: bx lr
%a = load <1 x i8>, <1 x i8>* %p
%tmp = call <1 x i8> @llvm.cttz.v1i8(<1 x i8> %a, i1 true)
store <1 x i8> %tmp, <1 x i8>* %p
@@ -216,6 +299,20 @@ define void @test_v1i8_zero_undef(<1 x i8>* %p) {
define void @test_v2i8_zero_undef(<2 x i8>* %p) {
; CHECK-LABEL: test_v2i8_zero_undef:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vld1.16 {d16[0]}, [r0:16]
+; CHECK-NEXT: vmovl.u8 q8, d16
+; CHECK-NEXT: vmovl.u16 q8, d16
+; CHECK-NEXT: vneg.s32 d18, d16
+; CHECK-NEXT: vand d16, d16, d18
+; CHECK-NEXT: vmov.i32 d17, #0x1f
+; CHECK-NEXT: vclz.i32 d16, d16
+; CHECK-NEXT: vsub.i32 d16, d17, d16
+; CHECK-NEXT: vmov.32 r1, d16[1]
+; CHECK-NEXT: vmov.32 r2, d16[0]
+; CHECK-NEXT: strb r1, [r0, #1]
+; CHECK-NEXT: strb r2, [r0]
+; CHECK-NEXT: bx lr
%a = load <2 x i8>, <2 x i8>* %p
%tmp = call <2 x i8> @llvm.cttz.v2i8(<2 x i8> %a, i1 true)
store <2 x i8> %tmp, <2 x i8>* %p
@@ -224,6 +321,17 @@ define void @test_v2i8_zero_undef(<2 x i8>* %p) {
define void @test_v4i8_zero_undef(<4 x i8>* %p) {
; CHECK-LABEL: test_v4i8_zero_undef:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vld1.32 {d16[0]}, [r0:32]
+; CHECK-NEXT: vmovl.u8 q8, d16
+; CHECK-NEXT: vneg.s16 d18, d16
+; CHECK-NEXT: vand d16, d16, d18
+; CHECK-NEXT: vmov.i16 d17, #0xf
+; CHECK-NEXT: vclz.i16 d16, d16
+; CHECK-NEXT: vsub.i16 d16, d17, d16
+; CHECK-NEXT: vuzp.8 d16, d17
+; CHECK-NEXT: vst1.32 {d16[0]}, [r0:32]
+; CHECK-NEXT: bx lr
%a = load <4 x i8>, <4 x i8>* %p
%tmp = call <4 x i8> @llvm.cttz.v4i8(<4 x i8> %a, i1 true)
store <4 x i8> %tmp, <4 x i8>* %p
@@ -232,13 +340,15 @@ define void @test_v4i8_zero_undef(<4 x i8>* %p) {
define void @test_v8i8_zero_undef(<8 x i8>* %p) {
; CHECK-LABEL: test_v8i8_zero_undef:
-; CHECK: vldr [[D1:d[0-9]+]], [r0]
-; CHECK: vmov.i8 [[D2:d[0-9]+]], #0x1
-; CHECK: vneg.s8 [[D3:d[0-9]+]], [[D1]]
-; CHECK: vand [[D1]], [[D1]], [[D3]]
-; CHECK: vsub.i8 [[D1]], [[D1]], [[D2]]
-; CHECK: vcnt.8 [[D1]], [[D1]]
-; CHECK: vstr [[D1]], [r0]
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vldr d16, [r0]
+; CHECK-NEXT: vmov.i8 d18, #0x1
+; CHECK-NEXT: vneg.s8 d17, d16
+; CHECK-NEXT: vand d16, d16, d17
+; CHECK-NEXT: vsub.i8 d16, d16, d18
+; CHECK-NEXT: vcnt.8 d16, d16
+; CHECK-NEXT: vstr d16, [r0]
+; CHECK-NEXT: bx lr
%a = load <8 x i8>, <8 x i8>* %p
%tmp = call <8 x i8> @llvm.cttz.v8i8(<8 x i8> %a, i1 true)
store <8 x i8> %tmp, <8 x i8>* %p
@@ -247,13 +357,15 @@ define void @test_v8i8_zero_undef(<8 x i8>* %p) {
define void @test_v16i8_zero_undef(<16 x i8>* %p) {
; CHECK-LABEL: test_v16i8_zero_undef:
-; CHECK: vld1.64 {[[D1:d[0-9]+]], [[D2:d[0-9]+]]}, [r0]
-; CHECK: vmov.i8 [[Q2:q[0-9]+]], #0x1
-; CHECK: vneg.s8 [[Q3:q[0-9]+]], [[Q1:q[0-9]+]]
-; CHECK: vand [[Q1]], [[Q1]], [[Q3]]
-; CHECK: vsub.i8 [[Q1]], [[Q1]], [[Q2]]
-; CHECK: vcnt.8 [[Q1]], [[Q1]]
-; CHECK: vst1.64 {[[D1]], [[D2]]}, [r0]
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vld1.64 {d16, d17}, [r0]
+; CHECK-NEXT: vmov.i8 q10, #0x1
+; CHECK-NEXT: vneg.s8 q9, q8
+; CHECK-NEXT: vand q8, q8, q9
+; CHECK-NEXT: vsub.i8 q8, q8, q10
+; CHECK-NEXT: vcnt.8 q8, q8
+; CHECK-NEXT: vst1.64 {d16, d17}, [r0]
+; CHECK-NEXT: bx lr
%a = load <16 x i8>, <16 x i8>* %p
%tmp = call <16 x i8> @llvm.cttz.v16i8(<16 x i8> %a, i1 true)
store <16 x i8> %tmp, <16 x i8>* %p
@@ -262,6 +374,12 @@ define void @test_v16i8_zero_undef(<16 x i8>* %p) {
define void @test_v1i16_zero_undef(<1 x i16>* %p) {
; CHECK-LABEL: test_v1i16_zero_undef:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: ldrh r1, [r0]
+; CHECK-NEXT: rbit r1, r1
+; CHECK-NEXT: clz r1, r1
+; CHECK-NEXT: strh r1, [r0]
+; CHECK-NEXT: bx lr
%a = load <1 x i16>, <1 x i16>* %p
%tmp = call <1 x i16> @llvm.cttz.v1i16(<1 x i16> %a, i1 true)
store <1 x i16> %tmp, <1 x i16>* %p
@@ -270,6 +388,17 @@ define void @test_v1i16_zero_undef(<1 x i16>* %p) {
define void @test_v2i16_zero_undef(<2 x i16>* %p) {
; CHECK-LABEL: test_v2i16_zero_undef:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vld1.32 {d16[0]}, [r0:32]
+; CHECK-NEXT: vmovl.u16 q8, d16
+; CHECK-NEXT: vneg.s32 d18, d16
+; CHECK-NEXT: vand d16, d16, d18
+; CHECK-NEXT: vmov.i32 d17, #0x1f
+; CHECK-NEXT: vclz.i32 d16, d16
+; CHECK-NEXT: vsub.i32 d16, d17, d16
+; CHECK-NEXT: vuzp.16 d16, d17
+; CHECK-NEXT: vst1.32 {d16[0]}, [r0:32]
+; CHECK-NEXT: bx lr
%a = load <2 x i16>, <2 x i16>* %p
%tmp = call <2 x i16> @llvm.cttz.v2i16(<2 x i16> %a, i1 true)
store <2 x i16> %tmp, <2 x i16>* %p
@@ -278,13 +407,15 @@ define void @test_v2i16_zero_undef(<2 x i16>* %p) {
define void @test_v4i16_zero_undef(<4 x i16>* %p) {
; CHECK-LABEL: test_v4i16_zero_undef:
-; CHECK: vldr [[D1:d[0-9]+]], [r0]
-; CHECK: vneg.s16 [[D2:d[0-9]+]], [[D1]]
-; CHECK: vand [[D1]], [[D1]], [[D2]]
-; CHECK: vmov.i16 [[D3:d[0-9]+]], #0xf
-; CHECK: vclz.i16 [[D1]], [[D1]]
-; CHECK: vsub.i16 [[D1]], [[D3]], [[D1]]
-; CHECK: vstr [[D1]], [r0]
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vldr d16, [r0]
+; CHECK-NEXT: vneg.s16 d17, d16
+; CHECK-NEXT: vand d16, d16, d17
+; CHECK-NEXT: vmov.i16 d17, #0xf
+; CHECK-NEXT: vclz.i16 d16, d16
+; CHECK-NEXT: vsub.i16 d16, d17, d16
+; CHECK-NEXT: vstr d16, [r0]
+; CHECK-NEXT: bx lr
%a = load <4 x i16>, <4 x i16>* %p
%tmp = call <4 x i16> @llvm.cttz.v4i16(<4 x i16> %a, i1 true)
store <4 x i16> %tmp, <4 x i16>* %p
@@ -293,13 +424,15 @@ define void @test_v4i16_zero_undef(<4 x i16>* %p) {
define void @test_v8i16_zero_undef(<8 x i16>* %p) {
; CHECK-LABEL: test_v8i16_zero_undef:
-; CHECK: vld1.64 {[[D1:d[0-9]+]], [[D2:d[0-9]+]]}, [r0]
-; CHECK: vneg.s16 [[Q2:q[0-9]+]], [[Q1:q[0-9]+]]
-; CHECK: vand [[Q1]], [[Q1]], [[Q2]]
-; CHECK: vmov.i16 [[Q3:q[0-9]+]], #0xf
-; CHECK: vclz.i16 [[Q1]], [[Q1]]
-; CHECK: vsub.i16 [[Q1]], [[Q3]], [[Q1]]
-; CHECK: vst1.64 {[[D1]], [[D2]]}, [r0]
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vld1.64 {d16, d17}, [r0]
+; CHECK-NEXT: vneg.s16 q9, q8
+; CHECK-NEXT: vand q8, q8, q9
+; CHECK-NEXT: vmov.i16 q9, #0xf
+; CHECK-NEXT: vclz.i16 q8, q8
+; CHECK-NEXT: vsub.i16 q8, q9, q8
+; CHECK-NEXT: vst1.64 {d16, d17}, [r0]
+; CHECK-NEXT: bx lr
%a = load <8 x i16>, <8 x i16>* %p
%tmp = call <8 x i16> @llvm.cttz.v8i16(<8 x i16> %a, i1 true)
store <8 x i16> %tmp, <8 x i16>* %p
@@ -308,6 +441,12 @@ define void @test_v8i16_zero_undef(<8 x i16>* %p) {
define void @test_v1i32_zero_undef(<1 x i32>* %p) {
; CHECK-LABEL: test_v1i32_zero_undef:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: ldr r1, [r0]
+; CHECK-NEXT: rbit r1, r1
+; CHECK-NEXT: clz r1, r1
+; CHECK-NEXT: str r1, [r0]
+; CHECK-NEXT: bx lr
%a = load <1 x i32>, <1 x i32>* %p
%tmp = call <1 x i32> @llvm.cttz.v1i32(<1 x i32> %a, i1 true)
store <1 x i32> %tmp, <1 x i32>* %p
@@ -316,13 +455,15 @@ define void @test_v1i32_zero_undef(<1 x i32>* %p) {
define void @test_v2i32_zero_undef(<2 x i32>* %p) {
; CHECK-LABEL: test_v2i32_zero_undef:
-; CHECK: vldr [[D1:d[0-9]+]], [r0]
-; CHECK: vneg.s32 [[D2:d[0-9]+]], [[D1]]
-; CHECK: vand [[D1]], [[D1]], [[D2]]
-; CHECK: vmov.i32 [[D3:d[0-9]+]], #0x1f
-; CHECK: vclz.i32 [[D1]], [[D1]]
-; CHECK: vsub.i32 [[D1]], [[D3]], [[D1]]
-; CHECK: vstr [[D1]], [r0]
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vldr d16, [r0]
+; CHECK-NEXT: vneg.s32 d17, d16
+; CHECK-NEXT: vand d16, d16, d17
+; CHECK-NEXT: vmov.i32 d17, #0x1f
+; CHECK-NEXT: vclz.i32 d16, d16
+; CHECK-NEXT: vsub.i32 d16, d17, d16
+; CHECK-NEXT: vstr d16, [r0]
+; CHECK-NEXT: bx lr
%a = load <2 x i32>, <2 x i32>* %p
%tmp = call <2 x i32> @llvm.cttz.v2i32(<2 x i32> %a, i1 true)
store <2 x i32> %tmp, <2 x i32>* %p
@@ -331,13 +472,15 @@ define void @test_v2i32_zero_undef(<2 x i32>* %p) {
define void @test_v4i32_zero_undef(<4 x i32>* %p) {
; CHECK-LABEL: test_v4i32_zero_undef:
-; CHECK: vld1.64 {[[D1:d[0-9]+]], [[D2:d[0-9]+]]}, [r0]
-; CHECK: vneg.s32 [[Q2:q[0-9]+]], [[Q1:q[0-9]+]]
-; CHECK: vand [[Q1]], [[Q1]], [[Q2]]
-; CHECK: vmov.i32 [[Q3:q[0-9]+]], #0x1f
-; CHECK: vclz.i32 [[Q1]], [[Q1]]
-; CHECK: vsub.i32 [[Q1]], [[Q3]], [[Q1]]
-; CHECK: vst1.64 {[[D1]], [[D2]]}, [r0]
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vld1.64 {d16, d17}, [r0]
+; CHECK-NEXT: vneg.s32 q9, q8
+; CHECK-NEXT: vand q8, q8, q9
+; CHECK-NEXT: vmov.i32 q9, #0x1f
+; CHECK-NEXT: vclz.i32 q8, q8
+; CHECK-NEXT: vsub.i32 q8, q9, q8
+; CHECK-NEXT: vst1.64 {d16, d17}, [r0]
+; CHECK-NEXT: bx lr
%a = load <4 x i32>, <4 x i32>* %p
%tmp = call <4 x i32> @llvm.cttz.v4i32(<4 x i32> %a, i1 true)
store <4 x i32> %tmp, <4 x i32>* %p
@@ -346,17 +489,19 @@ define void @test_v4i32_zero_undef(<4 x i32>* %p) {
define void @test_v1i64_zero_undef(<1 x i64>* %p) {
; CHECK-LABEL: test_v1i64_zero_undef:
-; CHECK: vmov.i32 [[D2:d[0-9]+]], #0x0
-; CHECK: vldr [[D1:d[0-9]+]], [r0]
-; CHECK: vmov.i64 [[D3:d[0-9]+]], #0xffffffffffffffff
-; CHECK: vsub.i64 [[D2]], [[D2]], [[D1]]
-; CHECK: vand [[D2]], [[D1]], [[D2]]
-; CHECK: vadd.i64 [[D2]], [[D2]], [[D3]]
-; CHECK: vcnt.8 [[D2]], [[D2]]
-; CHECK: vpaddl.u8 [[D2]], [[D2]]
-; CHECK: vpaddl.u16 [[D2]], [[D2]]
-; CHECK: vpaddl.u32 [[D2]], [[D2]]
-; CHECK: vstr [[D2]], [r0]
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vmov.i32 d16, #0x0
+; CHECK-NEXT: vldr d17, [r0]
+; CHECK-NEXT: vmov.i64 d18, #0xffffffffffffffff
+; CHECK-NEXT: vsub.i64 d16, d16, d17
+; CHECK-NEXT: vand d16, d17, d16
+; CHECK-NEXT: vadd.i64 d16, d16, d18
+; CHECK-NEXT: vcnt.8 d16, d16
+; CHECK-NEXT: vpaddl.u8 d16, d16
+; CHECK-NEXT: vpaddl.u16 d16, d16
+; CHECK-NEXT: vpaddl.u32 d16, d16
+; CHECK-NEXT: vstr d16, [r0]
+; CHECK-NEXT: bx lr
%a = load <1 x i64>, <1 x i64>* %p
%tmp = call <1 x i64> @llvm.cttz.v1i64(<1 x i64> %a, i1 true)
store <1 x i64> %tmp, <1 x i64>* %p
@@ -365,17 +510,19 @@ define void @test_v1i64_zero_undef(<1 x i64>* %p) {
define void @test_v2i64_zero_undef(<2 x i64>* %p) {
; CHECK-LABEL: test_v2i64_zero_undef:
-; CHECK: vmov.i32 [[Q2:q[0-9]+]], #0x0
-; CHECK: vld1.64 {[[D1:d[0-9]+]], [[D2:d[0-9]+]]}, [r0]
-; CHECK: vmov.i64 [[Q3:q[0-9]+]], #0xffffffffffffffff
-; CHECK: vsub.i64 [[Q2]], [[Q2]], [[Q1:q[0-9]+]]
-; CHECK: vand [[Q2]], [[Q1]], [[Q2]]
-; CHECK: vadd.i64 [[Q2]], [[Q2]], [[Q3]]
-; CHECK: vcnt.8 [[Q2]], [[Q2]]
-; CHECK: vpaddl.u8 [[Q2]], [[Q2]]
-; CHECK: vpaddl.u16 [[Q2]], [[Q2]]
-; CHECK: vpaddl.u32 [[Q2]], [[Q2]]
-; CHECK: vst1.64 {d{{[0-9]+}}, d{{[0-9]+}}}, [r0]
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vmov.i32 q8, #0x0
+; CHECK-NEXT: vld1.64 {d18, d19}, [r0]
+; CHECK-NEXT: vmov.i64 q10, #0xffffffffffffffff
+; CHECK-NEXT: vsub.i64 q8, q8, q9
+; CHECK-NEXT: vand q8, q9, q8
+; CHECK-NEXT: vadd.i64 q8, q8, q10
+; CHECK-NEXT: vcnt.8 q8, q8
+; CHECK-NEXT: vpaddl.u8 q8, q8
+; CHECK-NEXT: vpaddl.u16 q8, q8
+; CHECK-NEXT: vpaddl.u32 q8, q8
+; CHECK-NEXT: vst1.64 {d16, d17}, [r0]
+; CHECK-NEXT: bx lr
%a = load <2 x i64>, <2 x i64>* %p
%tmp = call <2 x i64> @llvm.cttz.v2i64(<2 x i64> %a, i1 true)
store <2 x i64> %tmp, <2 x i64>* %p
diff --git a/test/CodeGen/ARM/dagcombine-anyexttozeroext.ll b/test/CodeGen/ARM/dagcombine-anyexttozeroext.ll
index 86291aaeaa64..de24681470d1 100644
--- a/test/CodeGen/ARM/dagcombine-anyexttozeroext.ll
+++ b/test/CodeGen/ARM/dagcombine-anyexttozeroext.ll
@@ -1,50 +1,52 @@
; RUN: llc -mtriple armv7 %s -o - | FileCheck %s
-; CHECK-LABEL: f:
define float @f(<4 x i16>* nocapture %in) {
- ; CHECK: vld1
- ; CHECK: vmovl.u16
- ; CHECK-NOT: vand
+; CHECK-LABEL: f:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vld1.16 {d16}, [r0:64]
+; CHECK-NEXT: vmovl.u16 q8, d16
+; CHECK-NEXT: vcvt.f32.u32 q0, q8
+; CHECK-NEXT: vadd.f32 s4, s0, s1
+; CHECK-NEXT: vadd.f32 s0, s4, s2
+; CHECK-NEXT: vmov r0, s0
+; CHECK-NEXT: bx lr
%1 = load <4 x i16>, <4 x i16>* %in
- ; CHECK: vcvt.f32.u32
%2 = uitofp <4 x i16> %1 to <4 x float>
%3 = extractelement <4 x float> %2, i32 0
%4 = extractelement <4 x float> %2, i32 1
%5 = extractelement <4 x float> %2, i32 2
- ; CHECK: vadd.f32
%6 = fadd float %3, %4
%7 = fadd float %6, %5
ret float %7
}
-; CHECK-LABEL: g:
define float @g(<4 x i16>* nocapture %in) {
- ; CHECK: vldr
+; CHECK-LABEL: g:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vldr d16, [r0]
+; CHECK-NEXT: vmov.u16 r0, d16[0]
+; CHECK-NEXT: vmov s0, r0
+; CHECK-NEXT: vcvt.f32.u32 s0, s0
+; CHECK-NEXT: vmov r0, s0
+; CHECK-NEXT: bx lr
%1 = load <4 x i16>, <4 x i16>* %in
-
- ; For now we're generating a vmov.16 and a uxth instruction.
- ; The uxth is redundant, and we should be able to extend without
- ; having to generate cross-domain copies. Once we can do this
- ; we should modify the checks below.
-
- ; CHECK: uxth
%2 = extractelement <4 x i16> %1, i32 0
- ; CHECK: vcvt.f32.u32
%3 = uitofp i16 %2 to float
ret float %3
}
; Make sure we generate zext from <4 x i8> to <4 x 32>.
-
-; CHECK-LABEL: h:
-; CHECK: vld1.32
-; CHECK: vmovl.u8 q8, d16
-; CHECK: vmovl.u16 q8, d16
-; CHECK: vmov r0, r1, d16
-; CHECK: vmov r2, r3, d17
define <4 x i32> @h(<4 x i8> *%in) {
+; CHECK-LABEL: h:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vld1.32 {d16[0]}, [r0:32]
+; CHECK-NEXT: vmovl.u8 q8, d16
+; CHECK-NEXT: vmovl.u16 q8, d16
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: vmov r2, r3, d17
+; CHECK-NEXT: bx lr
%1 = load <4 x i8>, <4 x i8>* %in, align 4
%2 = extractelement <4 x i8> %1, i32 0
%3 = zext i8 %2 to i32
@@ -60,3 +62,79 @@ define <4 x i32> @h(<4 x i8> *%in) {
%13 = insertelement <4 x i32> %10, i32 %12, i32 3
ret <4 x i32> %13
}
+
+define float @i(<4 x i16>* nocapture %in) {
+ ; FIXME: The vmov.u + sxt can convert to a vmov.s
+; CHECK-LABEL: i:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vldr d16, [r0]
+; CHECK-NEXT: vmov.u16 r0, d16[0]
+; CHECK-NEXT: sxth r0, r0
+; CHECK-NEXT: vmov s0, r0
+; CHECK-NEXT: vcvt.f32.s32 s0, s0
+; CHECK-NEXT: vmov r0, s0
+; CHECK-NEXT: bx lr
+ %1 = load <4 x i16>, <4 x i16>* %in
+ %2 = extractelement <4 x i16> %1, i32 0
+ %3 = sitofp i16 %2 to float
+ ret float %3
+}
+
+define float @j(<8 x i8>* nocapture %in) {
+; CHECK-LABEL: j:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vldr d16, [r0]
+; CHECK-NEXT: vmov.u8 r0, d16[7]
+; CHECK-NEXT: vmov s0, r0
+; CHECK-NEXT: vcvt.f32.u32 s0, s0
+; CHECK-NEXT: vmov r0, s0
+; CHECK-NEXT: bx lr
+ %1 = load <8 x i8>, <8 x i8>* %in
+ %2 = extractelement <8 x i8> %1, i32 7
+ %3 = uitofp i8 %2 to float
+ ret float %3
+}
+
+define float @k(<8 x i8>* nocapture %in) {
+; FIXME: The vmov.u + sxt can convert to a vmov.s
+; CHECK-LABEL: k:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vldr d16, [r0]
+; CHECK-NEXT: vmov.u8 r0, d16[7]
+; CHECK-NEXT: sxtb r0, r0
+; CHECK-NEXT: vmov s0, r0
+; CHECK-NEXT: vcvt.f32.s32 s0, s0
+; CHECK-NEXT: vmov r0, s0
+; CHECK-NEXT: bx lr
+ %1 = load <8 x i8>, <8 x i8>* %in
+ %2 = extractelement <8 x i8> %1, i32 7
+ %3 = sitofp i8 %2 to float
+ ret float %3
+}
+
+define float @KnownUpperZero(<4 x i16> %v) {
+; FIXME: uxtb are not required
+; CHECK-LABEL: KnownUpperZero:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vmov.i16 d16, #0x3
+; CHECK-NEXT: vmov d17, r0, r1
+; CHECK-NEXT: vand d16, d17, d16
+; CHECK-NEXT: vmov.u16 r0, d16[0]
+; CHECK-NEXT: vmov.u16 r1, d16[3]
+; CHECK-NEXT: uxtb r0, r0
+; CHECK-NEXT: vmov s0, r0
+; CHECK-NEXT: uxtb r0, r1
+; CHECK-NEXT: vmov s2, r0
+; CHECK-NEXT: vcvt.f32.s32 s0, s0
+; CHECK-NEXT: vcvt.f32.s32 s2, s2
+; CHECK-NEXT: vadd.f32 s0, s2, s0
+; CHECK-NEXT: vmov r0, s0
+; CHECK-NEXT: bx lr
+ %1 = and <4 x i16> %v, <i16 3,i16 3,i16 3,i16 3>
+ %2 = extractelement <4 x i16> %1, i32 3
+ %3 = extractelement <4 x i16> %1, i32 0
+ %sinf1 = sitofp i16 %2 to float
+ %sinf2 = sitofp i16 %3 to float
+ %sum = fadd float %sinf1, %sinf2
+ ret float %sum
+}
diff --git a/test/CodeGen/ARM/dbg-range-extension.mir b/test/CodeGen/ARM/dbg-range-extension.mir
index 0dd9ed2b207f..0a48ba83c09f 100644
--- a/test/CodeGen/ARM/dbg-range-extension.mir
+++ b/test/CodeGen/ARM/dbg-range-extension.mir
@@ -23,37 +23,37 @@
# CHECK: [[VAR_I:![0-9]+]] = !DILocalVariable(name: "i",
# CHECK: bb.0.entry
-# CHECK: DBG_VALUE debug-use $r0, debug-use $noreg, [[VAR_A]]
-# CHECK: DBG_VALUE debug-use [[REG_A:\$r[0-9]+]], debug-use $noreg, [[VAR_A]]
-# CHECK: DBG_VALUE debug-use [[REG_B:\$r[0-9]+]], debug-use $noreg, [[VAR_B]]
+# CHECK: DBG_VALUE $r0, $noreg, [[VAR_A]]
+# CHECK: DBG_VALUE [[REG_A:\$r[0-9]+]], $noreg, [[VAR_A]]
+# CHECK: DBG_VALUE [[REG_B:\$r[0-9]+]], $noreg, [[VAR_B]]
# CHECK: bb.1.if.then
-# CHECK: DBG_VALUE debug-use [[REG_B]], debug-use $noreg, [[VAR_B]]
-# CHECK: DBG_VALUE debug-use [[REG_A]], debug-use $noreg, [[VAR_A]]
-# CHECK: DBG_VALUE debug-use [[REG_C:\$r[0-9]+]], debug-use $noreg, [[VAR_C]]
+# CHECK: DBG_VALUE [[REG_B]], $noreg, [[VAR_B]]
+# CHECK: DBG_VALUE [[REG_A]], $noreg, [[VAR_A]]
+# CHECK: DBG_VALUE [[REG_C:\$r[0-9]+]], $noreg, [[VAR_C]]
# CHECK: DBG_VALUE 1, 0, [[VAR_I]]
# CHECK: bb.2.for.body
-# CHECK: DBG_VALUE debug-use [[REG_I:\$r[0-9]+]], debug-use $noreg, [[VAR_I]]
-# CHECK: DBG_VALUE debug-use [[REG_C]], debug-use $noreg, [[VAR_C]]
-# CHECK: DBG_VALUE debug-use [[REG_B]], debug-use $noreg, [[VAR_B]]
-# CHECK: DBG_VALUE debug-use [[REG_A]], debug-use $noreg, [[VAR_A]]
-# CHECK: DBG_VALUE debug-use [[REG_I]], debug-use $noreg, [[VAR_I]]
+# CHECK: DBG_VALUE [[REG_I:\$r[0-9]+]], $noreg, [[VAR_I]]
+# CHECK: DBG_VALUE [[REG_C]], $noreg, [[VAR_C]]
+# CHECK: DBG_VALUE [[REG_B]], $noreg, [[VAR_B]]
+# CHECK: DBG_VALUE [[REG_A]], $noreg, [[VAR_A]]
+# CHECK: DBG_VALUE [[REG_I]], $noreg, [[VAR_I]]
# CHECK: bb.3.for.cond
-# CHECK: DBG_VALUE debug-use [[REG_C]], debug-use $noreg, [[VAR_C]]
-# CHECK: DBG_VALUE debug-use [[REG_B]], debug-use $noreg, [[VAR_B]]
-# CHECK: DBG_VALUE debug-use [[REG_A]], debug-use $noreg, [[VAR_A]]
-# CHECK: DBG_VALUE debug-use [[REG_I]], debug-use $noreg, [[VAR_I]]
+# CHECK: DBG_VALUE [[REG_C]], $noreg, [[VAR_C]]
+# CHECK: DBG_VALUE [[REG_B]], $noreg, [[VAR_B]]
+# CHECK: DBG_VALUE [[REG_A]], $noreg, [[VAR_A]]
+# CHECK: DBG_VALUE [[REG_I]], $noreg, [[VAR_I]]
# CHECK: bb.4.for.cond.cleanup
-# CHECK: DBG_VALUE debug-use [[REG_C]], debug-use $noreg, [[VAR_C]]
-# CHECK: DBG_VALUE debug-use [[REG_B]], debug-use $noreg, [[VAR_B]]
-# CHECK: DBG_VALUE debug-use [[REG_A]], debug-use $noreg, [[VAR_A]]
+# CHECK: DBG_VALUE [[REG_C]], $noreg, [[VAR_C]]
+# CHECK: DBG_VALUE [[REG_B]], $noreg, [[VAR_B]]
+# CHECK: DBG_VALUE [[REG_A]], $noreg, [[VAR_A]]
# CHECK: bb.5.if.end
-# CHECK: DBG_VALUE debug-use [[REG_B]], debug-use $noreg, [[VAR_B]]
-# CHECK: DBG_VALUE debug-use [[REG_A]], debug-use $noreg, [[VAR_A]]
+# CHECK: DBG_VALUE [[REG_B]], $noreg, [[VAR_B]]
+# CHECK: DBG_VALUE [[REG_A]], $noreg, [[VAR_A]]
--- |
; ModuleID = '/data/kwalker/work/OpenSource-llvm/llvm/test/CodeGen/ARM/dbg-range-extension.ll'
source_filename = "/data/kwalker/work/OpenSource-llvm/llvm/test/CodeGen/ARM/dbg-range-extension.ll"
@@ -122,7 +122,7 @@
!4 = !{i32 2, !"Debug Info Version", i32 3}
!5 = !{i32 1, !"wchar_size", i32 4}
!6 = !{i32 1, !"min_enum_size", i32 4}
- !7 = !{!"clang version 4.0.0 (http://llvm.org/git/clang.git b8f10df3679b36f51e1de7c4351b82d297825089) (http://llvm.org/git/llvm.git c2a5d16d1e3b8c49f5bbb1ff87a76ac4f88edb89)"}
+ !7 = !{!"clang version 4.0.0"}
!8 = distinct !DISubprogram(name: "func", scope: !1, file: !1, line: 2, type: !9, isLocal: false, isDefinition: true, scopeLine: 2, flags: DIFlagPrototyped, isOptimized: true, unit: !0, retainedNodes: !12)
!9 = !DISubroutineType(types: !10)
!10 = !{null, !11}
@@ -219,14 +219,14 @@ body: |
frame-setup CFI_INSTRUCTION offset $r6, -16
frame-setup CFI_INSTRUCTION offset $r5, -20
frame-setup CFI_INSTRUCTION offset $r4, -24
- DBG_VALUE debug-use $r0, debug-use $noreg, !13, !20, debug-location !21
+ DBG_VALUE $r0, $noreg, !13, !20, debug-location !21
$r4 = MOVr killed $r0, 14, $noreg, $noreg
- DBG_VALUE debug-use $r4, debug-use $noreg, !13, !20, debug-location !21
+ DBG_VALUE $r4, $noreg, !13, !20, debug-location !21
$r0 = MOVi 10, 14, $noreg, _, debug-location !22
$r1 = MOVi 11, 14, $noreg, _, debug-location !22
BL @func2, csr_aapcs, implicit-def dead $lr, implicit $sp, implicit killed $r0, implicit killed $r1, implicit-def $sp, implicit-def $r0, debug-location !22
$r5 = MOVr killed $r0, 14, $noreg, _, debug-location !22
- DBG_VALUE debug-use $r5, debug-use $noreg, !14, !20, debug-location !23
+ DBG_VALUE $r5, $noreg, !14, !20, debug-location !23
CMPri $r4, 0, 14, $noreg, implicit-def $cpsr, debug-location !25
Bcc %bb.5.if.end, 0, killed $cpsr
@@ -237,7 +237,7 @@ body: |
$r1 = MOVi 13, 14, $noreg, _, debug-location !26
BL @func2, csr_aapcs, implicit-def dead $lr, implicit $sp, implicit killed $r0, implicit killed $r1, implicit-def $sp, implicit-def $r0, debug-location !26
$r6 = MOVr killed $r0, 14, $noreg, _, debug-location !26
- DBG_VALUE debug-use $r6, debug-use $noreg, !15, !20, debug-location !27
+ DBG_VALUE $r6, $noreg, !15, !20, debug-location !27
$r7 = MOVi 1, 14, $noreg, $noreg
DBG_VALUE 1, 0, !18, !20, debug-location !28
B %bb.3.for.cond
@@ -249,12 +249,12 @@ body: |
$r0 = MOVr $r7, 14, $noreg, _, debug-location !36
BL @func2, csr_aapcs, implicit-def dead $lr, implicit $sp, implicit killed $r0, implicit killed $r1, implicit-def $sp, implicit-def dead $r0, debug-location !36
$r7 = ADDri killed $r7, 1, 14, $noreg, _, debug-location !38
- DBG_VALUE debug-use $r7, debug-use $noreg, !18, !20, debug-location !28
+ DBG_VALUE $r7, $noreg, !18, !20, debug-location !28
bb.3.for.cond:
liveins: $r4, $r5, $r6, $r7
- DBG_VALUE debug-use $r7, debug-use $noreg, !18, !20, debug-location !28
+ DBG_VALUE $r7, $noreg, !18, !20, debug-location !28
CMPrr $r7, $r4, 14, $noreg, implicit-def $cpsr, debug-location !33
Bcc %bb.2.for.body, 11, killed $cpsr, debug-location !33
diff --git a/test/CodeGen/ARM/debug-frame-large-stack.ll b/test/CodeGen/ARM/debug-frame-large-stack.ll
index b81672553d83..e5d24253b988 100644
--- a/test/CodeGen/ARM/debug-frame-large-stack.ll
+++ b/test/CodeGen/ARM/debug-frame-large-stack.ll
@@ -1,4 +1,4 @@
-; RUN: llc -filetype=asm -o - < %s -mtriple arm-arm-netbsd-eabi -disable-fp-elim| FileCheck %s --check-prefix=CHECK-ARM
+; RUN: llc -filetype=asm -o - < %s -mtriple arm-arm-netbsd-eabi -frame-pointer=all| FileCheck %s --check-prefix=CHECK-ARM
; RUN: llc -filetype=asm -o - < %s -mtriple arm-arm-netbsd-eabi | FileCheck %s --check-prefix=CHECK-ARM-FP-ELIM
define void @test1() {
diff --git a/test/CodeGen/ARM/debug-frame-no-debug.ll b/test/CodeGen/ARM/debug-frame-no-debug.ll
index 8a07f261f41b..8f3965a32e62 100644
--- a/test/CodeGen/ARM/debug-frame-no-debug.ll
+++ b/test/CodeGen/ARM/debug-frame-no-debug.ll
@@ -8,7 +8,7 @@
; RUN: | FileCheck %s --check-prefix=CHECK-FP-ELIM
; RUN: llc -mtriple thumb-unknown-linux-gnueabi \
-; RUN: -disable-fp-elim -filetype=asm -o - %s \
+; RUN: -frame-pointer=all -filetype=asm -o - %s \
; RUN: | FileCheck %s --check-prefix=CHECK-THUMB-FP
;-------------------------------------------------------------------------------
diff --git a/test/CodeGen/ARM/debug-frame-vararg.ll b/test/CodeGen/ARM/debug-frame-vararg.ll
index e675647e26ce..c9dcc0b468d9 100644
--- a/test/CodeGen/ARM/debug-frame-vararg.ll
+++ b/test/CodeGen/ARM/debug-frame-vararg.ll
@@ -1,7 +1,7 @@
; RUN: llc -mtriple arm-unknown-linux-gnueabi -filetype asm -o - %s | FileCheck %s --check-prefix=CHECK-FP
-; RUN: llc -mtriple arm-unknown-linux-gnueabi -filetype asm -o - %s -disable-fp-elim | FileCheck %s --check-prefix=CHECK-FP-ELIM
+; RUN: llc -mtriple arm-unknown-linux-gnueabi -filetype asm -o - %s -frame-pointer=all | FileCheck %s --check-prefix=CHECK-FP-ELIM
; RUN: llc -mtriple thumb-unknown-linux-gnueabi -filetype asm -o - %s | FileCheck %s --check-prefix=CHECK-THUMB-FP
-; RUN: llc -mtriple thumb-unknown-linux-gnueabi -filetype asm -o - %s -disable-fp-elim | FileCheck %s --check-prefix=CHECK-THUMB-FP-ELIM
+; RUN: llc -mtriple thumb-unknown-linux-gnueabi -filetype asm -o - %s -frame-pointer=all | FileCheck %s --check-prefix=CHECK-THUMB-FP-ELIM
; Tests that the initial space allocated to the varargs on the stack is
; taken into account in the .cfi_ directives.
diff --git a/test/CodeGen/ARM/debug-frame.ll b/test/CodeGen/ARM/debug-frame.ll
index f0333634cb5e..6efe58afb38a 100644
--- a/test/CodeGen/ARM/debug-frame.ll
+++ b/test/CodeGen/ARM/debug-frame.ll
@@ -4,18 +4,18 @@
; are properly generated or not.
; We have to check several cases:
-; (1) arm with -disable-fp-elim
-; (2) arm without -disable-fp-elim
-; (3) armv7 with -disable-fp-elim
-; (4) armv7 without -disable-fp-elim
-; (5) thumb with -disable-fp-elim
-; (6) thumb without -disable-fp-elim
-; (7) thumbv7 with -disable-fp-elim
-; (8) thumbv7 without -disable-fp-elim
+; (1) arm with -frame-pointer=all
+; (2) arm without -frame-pointer=all
+; (3) armv7 with -frame-pointer=all
+; (4) armv7 without -frame-pointer=all
+; (5) thumb with -frame-pointer=all
+; (6) thumb without -frame-pointer=all
+; (7) thumbv7 with -frame-pointer=all
+; (8) thumbv7 without -frame-pointer=all
; (9) thumbv7 with -no-integrated-as
; RUN: llc -mtriple arm-unknown-linux-gnueabi \
-; RUN: -disable-fp-elim -filetype=asm -o - %s \
+; RUN: -frame-pointer=all -filetype=asm -o - %s \
; RUN: | FileCheck %s --check-prefix=CHECK-FP
; RUN: llc -mtriple arm-unknown-linux-gnueabi \
@@ -23,7 +23,7 @@
; RUN: | FileCheck %s --check-prefix=CHECK-FP-ELIM
; RUN: llc -mtriple armv7-unknown-linux-gnueabi \
-; RUN: -disable-fp-elim -filetype=asm -o - %s \
+; RUN: -frame-pointer=all -filetype=asm -o - %s \
; RUN: | FileCheck %s --check-prefix=CHECK-V7-FP
; RUN: llc -mtriple armv7-unknown-linux-gnueabi \
@@ -31,7 +31,7 @@
; RUN: | FileCheck %s --check-prefix=CHECK-V7-FP-ELIM
; RUN: llc -mtriple thumbv5-unknown-linux-gnueabi \
-; RUN: -disable-fp-elim -filetype=asm -o - %s \
+; RUN: -frame-pointer=all -filetype=asm -o - %s \
; RUN: | FileCheck %s --check-prefix=CHECK-THUMB-FP
; RUN: llc -mtriple thumbv5-unknown-linux-gnueabi \
@@ -39,7 +39,7 @@
; RUN: | FileCheck %s --check-prefix=CHECK-THUMB-FP-ELIM
; RUN: llc -mtriple thumbv7-unknown-linux-gnueabi \
-; RUN: -disable-fp-elim -filetype=asm -o - %s \
+; RUN: -frame-pointer=all -filetype=asm -o - %s \
; RUN: | FileCheck %s --check-prefix=CHECK-THUMB-V7-FP
; RUN: llc -mtriple thumbv7-unknown-linux-gnueabi \
@@ -47,7 +47,7 @@
; RUN: | FileCheck %s --check-prefix=CHECK-THUMB-V7-FP-ELIM
; RUN: llc -mtriple thumbv7-unknown-linux-gnueabi \
-; RUN: -disable-fp-elim -no-integrated-as -filetype=asm -o - %s \
+; RUN: -frame-pointer=all -no-integrated-as -filetype=asm -o - %s \
; RUN: | FileCheck %s --check-prefix=CHECK-THUMB-V7-FP-NOIAS
;-------------------------------------------------------------------------------
diff --git a/test/CodeGen/ARM/debug-info-qreg.ll b/test/CodeGen/ARM/debug-info-qreg.ll
index fa4d79c604af..67628dd2a3a2 100644
--- a/test/CodeGen/ARM/debug-info-qreg.ll
+++ b/test/CodeGen/ARM/debug-info-qreg.ll
@@ -17,12 +17,12 @@ target triple = "thumbv7-apple-macosx10.6.7"
declare <4 x float> @test0001(float) nounwind readnone ssp
-define i32 @main(i32 %argc, i8** nocapture %argv, <4 x float> %x) nounwind ssp !dbg !10 {
+define i32 @main(i32 %argc, i8** nocapture %argv, <4 x float> %x, <4 x float> %y) nounwind ssp !dbg !10 {
entry:
br label %for.body9
for.body9: ; preds = %for.body9, %entry
- %add19 = fadd <4 x float> %x, <float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 1.000000e+00>, !dbg !39
+ %add19 = fadd <4 x float> %x, %y, !dbg !39
br i1 undef, label %for.end54, label %for.body9, !dbg !44
for.end54: ; preds = %for.body9
diff --git a/test/CodeGen/ARM/debugtrap.ll b/test/CodeGen/ARM/debugtrap.ll
index 5064a4ec2ca9..88ca81c4f2cf 100644
--- a/test/CodeGen/ARM/debugtrap.ll
+++ b/test/CodeGen/ARM/debugtrap.ll
@@ -1,7 +1,10 @@
; This test ensures the @llvm.debugtrap() call is not removed when generating
; the 'pop' instruction to restore the callee saved registers on ARM.
-; RUN: llc < %s -mtriple=armv7 -O0 -filetype=asm | FileCheck %s
+; RUN: llc < %s -mtriple=armv4 -O0 -filetype=asm | FileCheck --check-prefixes=CHECK,V4 %s
+; RUN: llc < %s -mtriple=armv5 -O0 -filetype=asm | FileCheck --check-prefixes=CHECK,V5 %s
+; RUN: llc < %s -mtriple=thumbv4 -O0 -filetype=asm | FileCheck --check-prefixes=CHECK,V4 %s
+; RUN: llc < %s -mtriple=thumbv5 -O0 -filetype=asm | FileCheck --check-prefixes=CHECK,V5 %s
declare void @llvm.debugtrap() nounwind
declare void @foo() nounwind
@@ -9,8 +12,9 @@ declare void @foo() nounwind
define void @test() nounwind {
entry:
; CHECK: bl foo
+ ; V4-NEXT: udf #254
+ ; V5-NEXT: bkpt #0
; CHECK-NEXT: pop
- ; CHECK-NEXT: .inst 0xe7ffdefe
call void @foo()
call void @llvm.debugtrap()
ret void
diff --git a/test/CodeGen/ARM/demanded-bits-and.ll b/test/CodeGen/ARM/demanded-bits-and.ll
new file mode 100644
index 000000000000..42b6ca5e6447
--- /dev/null
+++ b/test/CodeGen/ARM/demanded-bits-and.ll
@@ -0,0 +1,35 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=arm-eabi < %s | FileCheck %s
+
+; Make sure this doesn't hang, and there are no unnecessary
+; "and" instructions.
+
+define dso_local void @f(i16* %p) {
+; CHECK-LABEL: f:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: .LBB0_1: @ %bb
+; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: ldrh r1, [r0]
+; CHECK-NEXT: and r2, r1, #255
+; CHECK-NEXT: add r3, r2, r1, lsr #8
+; CHECK-NEXT: add r2, r3, r2
+; CHECK-NEXT: add r1, r2, r1, lsr #8
+; CHECK-NEXT: add r1, r1, #2
+; CHECK-NEXT: lsr r1, r1, #2
+; CHECK-NEXT: strh r1, [r0]
+; CHECK-NEXT: b .LBB0_1
+entry:
+ br label %bb
+
+bb:
+ %_p_scalar_ = load i16, i16* %p, align 2
+ %p_and = and i16 %_p_scalar_, 255
+ %p_ = lshr i16 %_p_scalar_, 8
+ %p_add = add nuw nsw i16 %p_, 2
+ %p_add14 = add nuw nsw i16 %p_add, %p_and
+ %p_add18 = add nuw nsw i16 %p_add14, %p_and
+ %p_add19 = add nuw nsw i16 %p_add18, %p_
+ %p_200 = lshr i16 %p_add19, 2
+ store i16 %p_200, i16* %p, align 2
+ br label %bb
+}
diff --git a/test/CodeGen/ARM/disable-fp-elim.ll b/test/CodeGen/ARM/disable-fp-elim.ll
index dafeda2ac762..ddbe36597e17 100644
--- a/test/CodeGen/ARM/disable-fp-elim.ll
+++ b/test/CodeGen/ARM/disable-fp-elim.ll
@@ -1,9 +1,9 @@
; RUN: llc < %s -mtriple armv7-none-linux-gnueabi -O1 | FileCheck %s --check-prefix=DISABLE-FP-ELIM
-; RUN: llc < %s -mtriple armv7-none-linux-gnueabi -disable-fp-elim -O1 | FileCheck %s --check-prefix=DISABLE-FP-ELIM
-; RUN: llc < %s -mtriple armv7-none-linux-gnueabi -disable-fp-elim=false -O1 | FileCheck %s --check-prefix=ENABLE-FP-ELIM
-; RUN: llc < %s -mtriple armv7-none-linux-gnueabi -disable-fp-elim=false -O0 | FileCheck %s --check-prefix=DISABLE-FP-ELIM
+; RUN: llc < %s -mtriple armv7-none-linux-gnueabi -frame-pointer=all -O1 | FileCheck %s --check-prefix=DISABLE-FP-ELIM
+; RUN: llc < %s -mtriple armv7-none-linux-gnueabi -frame-pointer=none -O1 | FileCheck %s --check-prefix=ENABLE-FP-ELIM
+; RUN: llc < %s -mtriple armv7-none-linux-gnueabi -frame-pointer=none -O0 | FileCheck %s --check-prefix=DISABLE-FP-ELIM
-; Check that command line option "-disable-fp-elim" overrides function attribute
+; Check that command line option "-frame-pointer=all" overrides function attribute
; "no-frame-pointer-elim". Also, check frame pointer elimination is disabled
; when fast-isel is used.
diff --git a/test/CodeGen/ARM/ehabi-unwind.ll b/test/CodeGen/ARM/ehabi-unwind.ll
index a86f340d74e6..57d3eda921c1 100644
--- a/test/CodeGen/ARM/ehabi-unwind.ll
+++ b/test/CodeGen/ARM/ehabi-unwind.ll
@@ -1,6 +1,6 @@
; Test that the EHABI unwind instruction generator does not encounter any
; unfamiliar instructions.
-; RUN: llc < %s -mtriple=thumbv7 -disable-fp-elim
+; RUN: llc < %s -mtriple=thumbv7 -frame-pointer=all
; RUN: llc < %s -mtriple=thumbv7
define void @_Z1fv() nounwind {
diff --git a/test/CodeGen/ARM/ehabi.ll b/test/CodeGen/ARM/ehabi.ll
index f5a433b14bf1..5c4a2b620a1f 100644
--- a/test/CodeGen/ARM/ehabi.ll
+++ b/test/CodeGen/ARM/ehabi.ll
@@ -13,13 +13,13 @@
; nounwind function attribute.
; We have to check several cases:
-; (1) arm with -disable-fp-elim
-; (2) arm without -disable-fp-elim
-; (3) armv7 with -disable-fp-elim
-; (4) armv7 without -disable-fp-elim
+; (1) arm with -frame-pointer=all
+; (2) arm without -frame-pointer=all
+; (3) armv7 with -frame-pointer=all
+; (4) armv7 without -frame-pointer=all
; RUN: llc -mtriple arm-unknown-linux-gnueabi \
-; RUN: -disable-fp-elim -filetype=asm -o - %s \
+; RUN: -frame-pointer=all -filetype=asm -o - %s \
; RUN: | FileCheck %s --check-prefix=CHECK-FP
; RUN: llc -mtriple arm-unknown-linux-gnueabi \
@@ -27,7 +27,7 @@
; RUN: | FileCheck %s --check-prefix=CHECK-FP-ELIM
; RUN: llc -mtriple armv7-unknown-linux-gnueabi \
-; RUN: -disable-fp-elim -filetype=asm -o - %s \
+; RUN: -frame-pointer=all -filetype=asm -o - %s \
; RUN: | FileCheck %s --check-prefix=CHECK-V7-FP
; RUN: llc -mtriple armv7-unknown-linux-gnueabi \
@@ -35,7 +35,7 @@
; RUN: | FileCheck %s --check-prefix=CHECK-V7-FP-ELIM
; RUN: llc -mtriple arm-unknown-linux-musleabi \
-; RUN: -disable-fp-elim -filetype=asm -o - %s \
+; RUN: -frame-pointer=all -filetype=asm -o - %s \
; RUN: | FileCheck %s --check-prefix=CHECK-FP
; RUN: llc -mtriple arm-unknown-linux-musleabi \
@@ -43,7 +43,7 @@
; RUN: | FileCheck %s --check-prefix=CHECK-FP-ELIM
; RUN: llc -mtriple armv7-unknown-linux-musleabi \
-; RUN: -disable-fp-elim -filetype=asm -o - %s \
+; RUN: -frame-pointer=all -filetype=asm -o - %s \
; RUN: | FileCheck %s --check-prefix=CHECK-V7-FP
; RUN: llc -mtriple armv7-unknown-linux-musleabi \
@@ -51,7 +51,7 @@
; RUN: | FileCheck %s --check-prefix=CHECK-V7-FP-ELIM
; RUN: llc -mtriple arm-unknown-linux-androideabi \
-; RUN: -disable-fp-elim -filetype=asm -o - %s \
+; RUN: -frame-pointer=all -filetype=asm -o - %s \
; RUN: | FileCheck %s --check-prefix=CHECK-FP
; RUN: llc -mtriple arm-unknown-linux-androideabi \
@@ -59,7 +59,7 @@
; RUN: | FileCheck %s --check-prefix=CHECK-FP-ELIM
; RUN: llc -mtriple armv7-unknown-linux-androideabi \
-; RUN: -disable-fp-elim -filetype=asm -o - %s \
+; RUN: -frame-pointer=all -filetype=asm -o - %s \
; RUN: | FileCheck %s --check-prefix=CHECK-V7-FP
; RUN: llc -mtriple armv7-unknown-linux-androideabi \
@@ -67,7 +67,7 @@
; RUN: | FileCheck %s --check-prefix=CHECK-V7-FP-ELIM
; RUN: llc -mtriple arm-unknown-netbsd-eabi \
-; RUN: -disable-fp-elim -filetype=asm -o - %s \
+; RUN: -frame-pointer=all -filetype=asm -o - %s \
; RUN: | FileCheck %s --check-prefix=DWARF-FP
; RUN: llc -mtriple arm-unknown-netbsd-eabi \
@@ -75,7 +75,7 @@
; RUN: | FileCheck %s --check-prefix=DWARF-FP-ELIM
; RUN: llc -mtriple armv7-unknown-netbsd-eabi \
-; RUN: -disable-fp-elim -filetype=asm -o - %s \
+; RUN: -frame-pointer=all -filetype=asm -o - %s \
; RUN: | FileCheck %s --check-prefix=DWARF-V7-FP
; RUN: llc -mtriple armv7-unknown-netbsd-eabi \
diff --git a/test/CodeGen/ARM/emutls_generic.ll b/test/CodeGen/ARM/emutls_generic.ll
index 41a46b41b77e..8bf0ab301244 100644
--- a/test/CodeGen/ARM/emutls_generic.ll
+++ b/test/CodeGen/ARM/emutls_generic.ll
@@ -78,8 +78,9 @@ entry:
; ARM_32-NEXT: .long 0
; WIN-LABEL: get_external_x:
-; WIN: movw r0, :lower16:__emutls_v.external_x
-; WIN: movt r0, :upper16:__emutls_v.external_x
+; WIN: movw r0, :lower16:.refptr.__emutls_v.external_x
+; WIN: movt r0, :upper16:.refptr.__emutls_v.external_x
+; WIN: ldr r0, [r0]
; WIN: bl __emutls_get_address
; WIN-LABEL: get_external_y:
; WIN: movw r0, :lower16:__emutls_v.external_y
diff --git a/test/CodeGen/ARM/execute-only-big-stack-frame.ll b/test/CodeGen/ARM/execute-only-big-stack-frame.ll
index 24c6a06d6af1..5e4e718020e7 100644
--- a/test/CodeGen/ARM/execute-only-big-stack-frame.ll
+++ b/test/CodeGen/ARM/execute-only-big-stack-frame.ll
@@ -2,6 +2,8 @@
; RUN: | FileCheck --check-prefix=CHECK-SUBW-ADDW %s
; RUN: llc < %s -mtriple=thumbv8m.base -mattr=+execute-only -O0 %s -o - \
; RUN: | FileCheck --check-prefix=CHECK-MOVW-MOVT-ADD %s
+; RUN: llc < %s -mtriple=thumbv8m.base -mcpu=cortex-m23 -mattr=+execute-only -O0 %s -o - \
+; RUN: | FileCheck --check-prefix=CHECK-MOVW-MOVT-ADD %s
; RUN: llc < %s -mtriple=thumbv8m.main -mattr=+execute-only -O0 %s -o - \
; RUN: | FileCheck --check-prefix=CHECK-SUBW-ADDW %s
diff --git a/test/CodeGen/ARM/execute-only-section.ll b/test/CodeGen/ARM/execute-only-section.ll
index a3313d8c2f73..8c7eb0c43767 100644
--- a/test/CodeGen/ARM/execute-only-section.ll
+++ b/test/CodeGen/ARM/execute-only-section.ll
@@ -1,5 +1,6 @@
; RUN: llc < %s -mtriple=thumbv7m -mattr=+execute-only %s -o - | FileCheck %s
; RUN: llc < %s -mtriple=thumbv8m.base -mattr=+execute-only %s -o - | FileCheck %s
+; RUN: llc < %s -mtriple=thumbv8m.base -mcpu=cortex-m23 -mattr=+execute-only %s -o - | FileCheck %s
; RUN: llc < %s -mtriple=thumbv8m.main -mattr=+execute-only %s -o - | FileCheck %s
; CHECK: .section .text,"axy",%progbits,unique,0
diff --git a/test/CodeGen/ARM/execute-only.ll b/test/CodeGen/ARM/execute-only.ll
index b5d9d89f1f75..169f44c7ffa6 100644
--- a/test/CodeGen/ARM/execute-only.ll
+++ b/test/CodeGen/ARM/execute-only.ll
@@ -1,7 +1,11 @@
; RUN: llc -mtriple=thumbv8m.base-eabi -mattr=+execute-only %s -o - | FileCheck --check-prefix=CHECK --check-prefix=CHECK-T2BASE %s
+; RUN: llc -mtriple=thumbv8m.base-eabi -mcpu=cortex-m23 -mattr=+execute-only %s -o - | FileCheck --check-prefix=CHECK --check-prefix=CHECK-T2BASE %s
; RUN: llc -mtriple=thumbv7m-eabi -mattr=+execute-only %s -o - | FileCheck --check-prefix=CHECK --check-prefix=CHECK-T2 %s
; RUN: llc -mtriple=thumbv8m.main-eabi -mattr=+execute-only %s -o - | FileCheck --check-prefix=CHECK --check-prefix=CHECK-T2 %s
+; CHECK-NOT: {{^ *}}.text{{$}}
+; CHECK: .section .text,"axy",%progbits,unique,0
+
@var = global i32 0
define i32 @global() minsize {
diff --git a/test/CodeGen/ARM/fast-isel-align.ll b/test/CodeGen/ARM/fast-isel-align.ll
index 71cd73a4a25d..9dab0abedb64 100644
--- a/test/CodeGen/ARM/fast-isel-align.ll
+++ b/test/CodeGen/ARM/fast-isel-align.ll
@@ -26,12 +26,12 @@
define void @unaligned_store(float %x, float %y) nounwind {
entry:
; ARM: @unaligned_store
-; ARM: vmov r1, s0
-; ARM: str r1, [r0]
+; ARM: vmov [[REG:r[0-9]+]], s0
+; ARM: str [[REG]], [{{r[0-9]+}}]
; THUMB: @unaligned_store
-; THUMB: vmov r1, s0
-; THUMB: str r1, [r0]
+; THUMB: vmov [[REG:r[0-9]+]], s0
+; THUMB: str [[REG]], [{{r[0-9]+}}]
%add = fadd float %x, %y
%0 = load %struct.anon*, %struct.anon** @a, align 4
diff --git a/test/CodeGen/ARM/fast-isel-ldrh-strh-arm.ll b/test/CodeGen/ARM/fast-isel-ldrh-strh-arm.ll
index ca512970c9cf..f49c907c4145 100644
--- a/test/CodeGen/ARM/fast-isel-ldrh-strh-arm.ll
+++ b/test/CodeGen/ARM/fast-isel-ldrh-strh-arm.ll
@@ -82,7 +82,8 @@ entry:
; ARM: t9
%add.ptr = getelementptr inbounds i16, i16* %a, i64 -8
store i16 0, i16* %add.ptr, align 2
-; ARM: strh r1, [r0, #-16]
+; ARM: movw [[REG0:r[0-9]+]], #0
+; ARM: strh [[REG0]], [{{r[0-9]+}}, #-16]
ret void
}
@@ -93,9 +94,10 @@ entry:
; ARM: t10
%add.ptr = getelementptr inbounds i16, i16* %a, i64 -128
store i16 0, i16* %add.ptr, align 2
-; ARM: mvn r{{[1-9]}}, #255
-; ARM: add r0, r0, r{{[1-9]}}
-; ARM: strh r{{[1-9]}}, [r0]
+; ARM: mvn r1, #255
+; ARM: add [[REG0:r[0-9]+]], r0, r1
+; ARM: movw [[REG1:r[0-9]+]], #0
+; ARM: strh [[REG1]], {{\[}}[[REG0]]]
ret void
}
@@ -104,7 +106,8 @@ entry:
; ARM: t11
%add.ptr = getelementptr inbounds i16, i16* %a, i64 8
store i16 0, i16* %add.ptr, align 2
-; ARM: strh r{{[1-9]}}, [r0, #16]
+; ARM: movw [[REG1:r[0-9]+]], #0
+; ARM: strh [[REG1]], [{{r[0-9]+}}, #16]
ret void
}
@@ -115,8 +118,9 @@ entry:
; ARM: t12
%add.ptr = getelementptr inbounds i16, i16* %a, i64 128
store i16 0, i16* %add.ptr, align 2
-; ARM: add r0, r0, #256
-; ARM: strh r{{[1-9]}}, [r0]
+; ARM: add [[REG0:r[0-9]+]], r0, #256
+; ARM: movw [[REG1:r[0-9]+]], #0
+; ARM: strh [[REG1]], {{\[}}[[REG0]]]
ret void
}
diff --git a/test/CodeGen/ARM/fast-isel.ll b/test/CodeGen/ARM/fast-isel.ll
index 502285e85dfd..3661beab5c02 100644
--- a/test/CodeGen/ARM/fast-isel.ll
+++ b/test/CodeGen/ARM/fast-isel.ll
@@ -149,21 +149,21 @@ define void @test4() {
; THUMB: {{(movw r0, :lower16:L_test4g\$non_lazy_ptr)|(ldr.n r0, .LCPI)}}
; THUMB: {{(movt r0, :upper16:L_test4g\$non_lazy_ptr)?}}
-; THUMB: ldr r0, [r0]
-; THUMB: ldr r1, [r0]
-; THUMB: adds r1, #1
-; THUMB: str r1, [r0]
+; THUMB: ldr [[REG:r[0-9]+]], [r0]
+; THUMB: ldr [[REG1:r[0-9]+]], {{\[}}[[REG]]]
+; THUMB: adds [[REG1]], #1
+; THUMB: str [[REG1]], {{\[}}[[REG]]]
; ARM-MACHO: {{(movw r0, :lower16:L_test4g\$non_lazy_ptr)|(ldr r0, .LCPI)}}
; ARM-MACHO: {{(movt r0, :upper16:L_test4g\$non_lazy_ptr)?}}
-; ARM-MACHO: ldr r0, [r0]
+; ARM-MACHO: ldr [[REG:r[0-9]+]], [r0]
-; ARM-ELF: movw r0, :lower16:test4g
-; ARM-ELF: movt r0, :upper16:test4g
+; ARM-ELF: movw [[REG:r[0-9]+]], :lower16:test4g
+; ARM-ELF: movt [[REG]], :upper16:test4g
-; ARM: ldr r1, [r0]
-; ARM: add r1, r1, #1
-; ARM: str r1, [r0]
+; ARM: ldr [[REG1:r[0-9]+]], {{\[}}[[REG]]]
+; ARM: add [[REG2:r[0-9]+]], [[REG1]], #1
+; ARM: str [[REG2]], {{\[}}[[REG]]]
}
; ARM: @urem_fold
diff --git a/test/CodeGen/ARM/float-helpers.s b/test/CodeGen/ARM/float-helpers.s
index 0a9f2490d20d..42ab56084d45 100644
--- a/test/CodeGen/ARM/float-helpers.s
+++ b/test/CodeGen/ARM/float-helpers.s
@@ -1,12 +1,12 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -asm-verbose=false -mattr=-vfp2 -mtriple=arm-eabi < %s | FileCheck %s -check-prefix=CHECK-ALL -check-prefix=CHECK-SOFT
-; RUN: llc -asm-verbose=false -mattr=-vfp2 -mtriple=arm-eabi -meabi=gnu < %s | FileCheck %s -check-prefix=CHECK-ALL -check-prefix=CHECK-SOFT
-; RUN: llc -asm-verbose=false -mattr=+vfp3 -mtriple=arm-eabi < %s | FileCheck %s -check-prefix=CHECK-ALL -check-prefix=CHECK-SOFTFP
-; RUN: llc -asm-verbose=false -mattr=+vfp3 -meabi=gnu -mtriple=arm-eabi < %s | FileCheck %s -check-prefix=CHECK-ALL -check-prefix=CHECK-SOFTFP
-; RUN: llc -asm-verbose=false -mattr=+vfp3 -float-abi=hard -mtriple=arm-eabi < %s | FileCheck %s -check-prefix=CHECK-ALL -check-prefix=CHECK-HARDFP-SP -check-prefix=CHECK-HARDFP-DP
-; RUN: llc -asm-verbose=false -mattr=+vfp3 -float-abi=hard -meabi=gnu -mtriple=arm-eabi < %s | FileCheck %s -check-prefix=CHECK-ALL -check-prefix=CHECK-HARDFP-SP -check-prefix=CHECK-HARDFP-DP
-; RUN: llc -asm-verbose=false -mattr=+vfp3,+fp-only-sp -float-abi=hard -mtriple=arm-eabi < %s | FileCheck %s -check-prefix=CHECK-ALL -check-prefix=CHECK-HARDFP-SP -check-prefix=CHECK-HARDFP-SPONLY
-; RUN: llc -asm-verbose=false -mattr=+vfp3,+fp-only-sp -float-abi=hard -mtriple=arm-eabi -meabi=gnu < %s | FileCheck %s -check-prefix=CHECK-ALL -check-prefix=CHECK-HARDFP-SP -check-prefix=CHECK-HARDFP-SPONLY
+; RUN: llc -asm-verbose=false -mattr=-vfp2 -mtriple=arm-eabi < %s | FileCheck %s -check-prefix=CHECK-SOFT
+; RUN: llc -asm-verbose=false -mattr=-vfp2 -mtriple=arm-eabi -meabi=gnu < %s | FileCheck %s -check-prefix=CHECK-SOFT
+; RUN: llc -asm-verbose=false -mattr=+vfp3 -mtriple=arm-eabi < %s | FileCheck %s -check-prefix=CHECK-SOFTFP
+; RUN: llc -asm-verbose=false -mattr=+vfp3 -meabi=gnu -mtriple=arm-eabi < %s | FileCheck %s -check-prefix=CHECK-SOFTFP
+; RUN: llc -asm-verbose=false -mattr=+vfp3 -float-abi=hard -mtriple=arm-eabi < %s | FileCheck %s -check-prefix=CHECK-HARDFP-SP -check-prefix=CHECK-HARDFP-DP
+; RUN: llc -asm-verbose=false -mattr=+vfp3 -float-abi=hard -meabi=gnu -mtriple=arm-eabi < %s | FileCheck %s -check-prefix=CHECK-HARDFP-SP -check-prefix=CHECK-HARDFP-DP
+; RUN: llc -asm-verbose=false -mattr=+vfp3,+fp-only-sp -float-abi=hard -mtriple=arm-eabi < %s | FileCheck %s -check-prefix=CHECK-HARDFP-SP -check-prefix=CHECK-HARDFP-SPONLY
+; RUN: llc -asm-verbose=false -mattr=+vfp3,+fp-only-sp -float-abi=hard -mtriple=arm-eabi -meabi=gnu < %s | FileCheck %s -check-prefix=CHECK-HARDFP-SP -check-prefix=CHECK-HARDFP-SPONLY
; The Runtime ABI for the ARM Architecture IHI0043 section 4.1.2 The
; floating-point helper functions to always use the base AAPCS (soft-float)
diff --git a/test/CodeGen/ARM/fmacs.ll b/test/CodeGen/ARM/fmacs.ll
index aa492708c0b6..140ab933d0cd 100644
--- a/test/CodeGen/ARM/fmacs.ll
+++ b/test/CodeGen/ARM/fmacs.ll
@@ -3,6 +3,8 @@
; RUN: llc -mtriple=arm-eabi -mcpu=cortex-a8 %s -o - | FileCheck %s -check-prefix=A8
; RUN: llc -mtriple=arm-eabi -mcpu=cortex-a9 %s -o - | FileCheck %s -check-prefix=A9
; RUN: llc -mtriple=arm-linux-gnueabi -mcpu=cortex-a9 -float-abi=hard %s -o - | FileCheck %s -check-prefix=HARD
+; RUN: llc -mtriple=arm-linux-gnueabi -mcpu=cortex-m4 -float-abi=hard %s -o - | FileCheck %s -check-prefix=VMLA
+; RUN: llc -mtriple=arm-linux-gnueabi -mcpu=cortex-m33 -float-abi=hard %s -o - | FileCheck %s -check-prefix=VMLA
define float @t1(float %acc, float %a, float %b) {
entry:
@@ -15,6 +17,22 @@ entry:
; A8-LABEL: t1:
; A8: vmul.f32
; A8: vadd.f32
+
+; VMLA-LABEL: t1:
+; VMLA: vmul.f32
+; VMLA-NEXT: vadd.f32
+
+ %0 = fmul float %a, %b
+ %1 = fadd float %acc, %0
+ ret float %1
+}
+
+define float @vmla_minsize(float %acc, float %a, float %b) #0 {
+entry:
+; VMLA-LABEL: vmla_minsize:
+; VMLA: vmla.f32 s0, s1, s2
+; VMLA-NEXT: bx lr
+
%0 = fmul float %a, %b
%1 = fadd float %acc, %0
ret float %1
@@ -102,3 +120,5 @@ entry:
%3 = fadd float %1, %2
ret float %3
}
+
+attributes #0 = { minsize nounwind optsize }
diff --git a/test/CodeGen/ARM/fold-sext-sextload.ll b/test/CodeGen/ARM/fold-sext-sextload.ll
index 484e93f59d48..96e2e78a47d4 100644
--- a/test/CodeGen/ARM/fold-sext-sextload.ll
+++ b/test/CodeGen/ARM/fold-sext-sextload.ll
@@ -1,15 +1,14 @@
; RUN: llc -mtriple armv7 %s -stop-before=livedebugvalues -o - | FileCheck %s
-define <4 x i8> @i(<4 x i8>*) !dbg !8 {
- %2 = load <4 x i8>, <4 x i8>* %0, align 4, !dbg !14
+define <4 x i8> @i(<4 x i8>*, <4 x i8>) !dbg !8 {
+ %3 = load <4 x i8>, <4 x i8>* %0, align 4, !dbg !14
; CHECK: $[[reg:.*]] = VLD1LNd32 {{.*}} debug-location !14 :: (load 4 from %ir.0)
- ; CHECK-NEXT: VMOVLsv8i16 {{.*}} $[[reg]], {{.*}} debug-location !14
- ; CHECK-NEXT: VMOVLsv4i32 {{.*}} $[[reg]], {{.*}} debug-location !14
-
- %3 = sdiv <4 x i8> zeroinitializer, %2, !dbg !15
- call void @llvm.dbg.value(metadata <4 x i8> %2, metadata !11, metadata !DIExpression()), !dbg !14
- call void @llvm.dbg.value(metadata <4 x i8> %3, metadata !13, metadata !DIExpression()), !dbg !15
- ret <4 x i8> %3, !dbg !16
+ ; CHECK: VMOVLsv8i16 {{.*}} $[[reg]], {{.*}} debug-location !14
+ ; CHECK: VMOVLsv4i32 {{.*}} $[[reg]], {{.*}} debug-location !14
+ %4 = sdiv <4 x i8> %1, %3, !dbg !15
+ call void @llvm.dbg.value(metadata <4 x i8> %3, metadata !11, metadata !DIExpression()), !dbg !14
+ call void @llvm.dbg.value(metadata <4 x i8> %4, metadata !13, metadata !DIExpression()), !dbg !15
+ ret <4 x i8> %4, !dbg !16
}
declare void @llvm.dbg.value(metadata, metadata, metadata)
diff --git a/test/CodeGen/ARM/fold-stack-adjust.ll b/test/CodeGen/ARM/fold-stack-adjust.ll
index eb32ee54c095..6b86c6a9d230 100644
--- a/test/CodeGen/ARM/fold-stack-adjust.ll
+++ b/test/CodeGen/ARM/fold-stack-adjust.ll
@@ -1,9 +1,9 @@
; Disable shrink-wrapping on the first test otherwise we wouldn't
; exerce the path for PR18136.
; RUN: llc -mtriple=thumbv7-apple-none-macho < %s -enable-shrink-wrap=false | FileCheck %s
-; RUN: llc -mtriple=thumbv6m-apple-none-macho -disable-fp-elim < %s | FileCheck %s --check-prefix=CHECK-T1
-; RUN: llc -mtriple=thumbv7-apple-darwin-ios -disable-fp-elim < %s | FileCheck %s --check-prefix=CHECK-IOS
-; RUN: llc -mtriple=thumbv7--linux-gnueabi -disable-fp-elim < %s | FileCheck %s --check-prefix=CHECK-LINUX
+; RUN: llc -mtriple=thumbv6m-apple-none-macho -frame-pointer=all < %s | FileCheck %s --check-prefix=CHECK-T1
+; RUN: llc -mtriple=thumbv7-apple-darwin-ios -frame-pointer=all < %s | FileCheck %s --check-prefix=CHECK-IOS
+; RUN: llc -mtriple=thumbv7--linux-gnueabi -frame-pointer=all < %s | FileCheck %s --check-prefix=CHECK-LINUX
declare void @bar(i8*)
diff --git a/test/CodeGen/ARM/fold-zext-zextload.ll b/test/CodeGen/ARM/fold-zext-zextload.ll
index 3ff0dd885a80..25e226fda664 100644
--- a/test/CodeGen/ARM/fold-zext-zextload.ll
+++ b/test/CodeGen/ARM/fold-zext-zextload.ll
@@ -1,15 +1,14 @@
; RUN: llc -mtriple armv7 %s -stop-before=livedebugvalues -o - | FileCheck %s
-define <4 x i8> @i(<4 x i8>*) !dbg !8 {
- %2 = load <4 x i8>, <4 x i8>* %0, align 4, !dbg !14
+define <4 x i8> @i(<4 x i8>*, <4 x i8>) !dbg !8 {
+ %3 = load <4 x i8>, <4 x i8>* %0, align 4, !dbg !14
; CHECK: $[[reg:.*]] = VLD1LNd32 {{.*}} debug-location !14 :: (load 4 from %ir.0)
; CHECK-NEXT: VMOVLuv8i16 {{.*}} $[[reg]], {{.*}} debug-location !14
; CHECK-NEXT: VMOVLuv4i32 {{.*}} $[[reg]], {{.*}} debug-location !14
-
- %3 = udiv <4 x i8> zeroinitializer, %2, !dbg !15
- call void @llvm.dbg.value(metadata <4 x i8> %2, metadata !11, metadata !DIExpression()), !dbg !14
- call void @llvm.dbg.value(metadata <4 x i8> %3, metadata !13, metadata !DIExpression()), !dbg !15
- ret <4 x i8> %3, !dbg !16
+ %4 = udiv <4 x i8> %1, %3, !dbg !15
+ call void @llvm.dbg.value(metadata <4 x i8> %3, metadata !11, metadata !DIExpression()), !dbg !14
+ call void @llvm.dbg.value(metadata <4 x i8> %4, metadata !13, metadata !DIExpression()), !dbg !15
+ ret <4 x i8> %4, !dbg !16
}
declare void @llvm.dbg.value(metadata, metadata, metadata)
diff --git a/test/CodeGen/ARM/fp16-instructions.ll b/test/CodeGen/ARM/fp16-instructions.ll
index eb5ec5eb87d9..670fcf58b1ed 100644
--- a/test/CodeGen/ARM/fp16-instructions.ll
+++ b/test/CodeGen/ARM/fp16-instructions.ll
@@ -935,9 +935,9 @@ entry:
; CHECK-SOFTFP-FP16-T32: vmov [[S6:s[0-9]]], r0
; CHECK-SOFTFP-FP16-T32: vldr s0, .LCP{{.*}}
; CHECK-SOFTFP-FP16-T32: vcvtb.f32.f16 [[S6]], [[S6]]
-; CHECK-SOFTFP-FP16-T32: vmov.f32 [[S2:s[0-9]]], #-2.000000e+00
-; CHECK-SOFTFP-FP16-T32: vcmp.f32 [[S6]], s0
; CHECK-SOFTFP-FP16-T32: vldr [[S4:s[0-9]]], .LCPI{{.*}}
+; CHECK-SOFTFP-FP16-T32: vcmp.f32 [[S6]], s0
+; CHECK-SOFTFP-FP16-T32: vmov.f32 [[S2:s[0-9]]], #-2.000000e+00
; CHECK-SOFTFP-FP16-T32: vmrs APSR_nzcv, fpscr
; CHECK-SOFTFP-FP16-T32: it eq
; CHECK-SOFTFP-FP16-T32: vmoveq.f32 [[S4]], [[S2]]
@@ -1043,7 +1043,7 @@ entry:
; CHECK-SPILL-RELOAD-LABEL: fn1:
; CHECK-SPILL-RELOAD: vstr.16 s0, [sp, #{{.}}] @ 2-byte Spill
-; CHECK-SPILL-RELOAD-NEXT: bl fn2
+; CHECK-SPILL-RELOAD: bl fn2
; CHECK-SPILL-RELOAD-NEXT: vldr.16 s0, [sp, #{{.}}] @ 2-byte Reload
}
diff --git a/test/CodeGen/ARM/fp16-promote.ll b/test/CodeGen/ARM/fp16-promote.ll
index dae9ef2ea83a..d7eaddc9e408 100644
--- a/test/CodeGen/ARM/fp16-promote.ll
+++ b/test/CodeGen/ARM/fp16-promote.ll
@@ -644,7 +644,7 @@ define void @test_maxnum(half* %p, half* %q) #0 {
ret void
}
-; CHECK-ALL-LABEL: test_minnan:
+; CHECK-ALL-LABEL: test_minimum:
; CHECK-FP16: vmov.f32 s0, #1.000000e+00
; CHECK-FP16: vcvtb.f32.f16
; CHECK-LIBCALL: bl __aeabi_h2f
@@ -654,7 +654,7 @@ define void @test_maxnum(half* %p, half* %q) #0 {
; CHECK-NOVFP: bl __aeabi_fcmpge
; CHECK-FP16: vcvtb.f16.f32
; CHECK-LIBCALL: bl __aeabi_f2h
-define void @test_minnan(half* %p) #0 {
+define void @test_minimum(half* %p) #0 {
%a = load half, half* %p, align 2
%c = fcmp ult half %a, 1.0
%r = select i1 %c, half %a, half 1.0
@@ -662,7 +662,7 @@ define void @test_minnan(half* %p) #0 {
ret void
}
-; CHECK-ALL-LABEL: test_maxnan:
+; CHECK-ALL-LABEL: test_maximum:
; CHECK-FP16: vmov.f32 s0, #1.000000e+00
; CHECK-FP16: vcvtb.f32.f16
; CHECK-LIBCALL: bl __aeabi_h2f
@@ -672,7 +672,7 @@ define void @test_minnan(half* %p) #0 {
; CHECK-NOVFP: bl __aeabi_fcmple
; CHECK-FP16: vcvtb.f16.f32
; CHECK-LIBCALL: bl __aeabi_f2h
-define void @test_maxnan(half* %p) #0 {
+define void @test_maximum(half* %p) #0 {
%a = load half, half* %p, align 2
%c = fcmp ugt half %a, 1.0
%r = select i1 %c, half %a, half 1.0
diff --git a/test/CodeGen/ARM/fp16-vld.ll b/test/CodeGen/ARM/fp16-vld.ll
new file mode 100644
index 000000000000..5052b99e6c9d
--- /dev/null
+++ b/test/CodeGen/ARM/fp16-vld.ll
@@ -0,0 +1,48 @@
+; RUN: llc -asm-verbose=false < %s | FileCheck %s
+
+target datalayout = "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64"
+target triple = "armv8.2a-arm-unknown-eabihf"
+
+define dso_local void @vec8(half* nocapture readonly %V, i32 %N) local_unnamed_addr #0 {
+; CHECK: .LBB0_1:
+; CHECK-NEXT: vld1.16 {d16, d17}, [r0]!
+; CHECK-NEXT: subs r1, r1, #8
+; CHECK-NEXT: bne .LBB0_1
+entry:
+ br label %vector.body
+
+vector.body:
+ %index = phi i32 [ 0, %entry ], [ %index.next, %vector.body ]
+ %0 = getelementptr inbounds half, half* %V, i32 %index
+ %1 = bitcast half* %0 to <8 x half>*
+ %wide.load = load volatile <8 x half>, <8 x half>* %1, align 2
+ %index.next = add i32 %index, 8
+ %cmp = icmp eq i32 %index.next, %N
+ br i1 %cmp, label %byeblock, label %vector.body
+
+byeblock:
+ ret void
+}
+
+define dso_local void @vec4(half* nocapture readonly %V, i32 %N) local_unnamed_addr #0 {
+; CHECK: .LBB1_1:
+; CHECK-NEXT: vld1.16 {d16}, [r0]!
+; CHECK-NEXT: subs r1, r1, #4
+; CHECK-NEXT: bne .LBB1_1
+entry:
+ br label %vector.body
+
+vector.body:
+ %index = phi i32 [ 0, %entry ], [ %index.next, %vector.body ]
+ %0 = getelementptr inbounds half, half* %V, i32 %index
+ %1 = bitcast half* %0 to <4 x half>*
+ %wide.load = load volatile <4 x half>, <4 x half>* %1, align 2
+ %index.next = add i32 %index, 4
+ %cmp = icmp eq i32 %index.next, %N
+ br i1 %cmp, label %byeblock, label %vector.body
+
+byeblock:
+ ret void
+}
+
+attributes #0 = { norecurse nounwind readonly "no-infs-fp-math"="true" "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" "no-trapping-math"="true" "target-cpu"="generic" "target-features"="+armv8.2-a,+fullfp16,+strict-align,-thumb-mode" "unsafe-fp-math"="true" "use-soft-float"="false" }
diff --git a/test/CodeGen/ARM/fp16-vminmaxnm-vector.ll b/test/CodeGen/ARM/fp16-vminmaxnm-vector.ll
new file mode 100644
index 000000000000..6a61bb594b43
--- /dev/null
+++ b/test/CodeGen/ARM/fp16-vminmaxnm-vector.ll
@@ -0,0 +1,302 @@
+; RUN: llc < %s -mtriple=arm-eabi -mattr=+v8.2a,+neon,+fullfp16 -float-abi=hard | FileCheck %s
+; RUN: llc < %s -mtriple=thumbv7a -mattr=+v8.2a,+neon,+fullfp16 -float-abi=hard | FileCheck %s
+
+; 4-element vector
+
+; Ordered
+
+define <4 x half> @test1(<4 x half> %A, <4 x half> %B) {
+; CHECK-LABEL: test1:
+; CHECK: vmaxnm.f16 d0, d0, d1
+; CHECK-NEXT: bx lr
+ %tmp3 = fcmp fast ogt <4 x half> %A, %B
+ %tmp4 = select <4 x i1> %tmp3, <4 x half> %A, <4 x half> %B
+ ret <4 x half> %tmp4
+}
+
+define <4 x half> @test2(<4 x half> %A, <4 x half> %B) {
+; CHECK-LABEL: test2:
+; CHECK: vminnm.f16 d0, d0, d1
+; CHECK-NEXT: bx lr
+ %tmp3 = fcmp fast ogt <4 x half> %A, %B
+ %tmp4 = select <4 x i1> %tmp3, <4 x half> %B, <4 x half> %A
+ ret <4 x half> %tmp4
+}
+
+define <4 x half> @test3(<4 x half> %A, <4 x half> %B) {
+; CHECK-LABEL: test3:
+; CHECK: vminnm.f16 d0, d0, d1
+; CHECK-NEXT: bx lr
+ %tmp3 = fcmp fast oge <4 x half> %A, %B
+ %tmp4 = select <4 x i1> %tmp3, <4 x half> %B, <4 x half> %A
+ ret <4 x half> %tmp4
+}
+
+define <4 x half> @test4(<4 x half> %A, <4 x half> %B) {
+; CHECK-LABEL: test4:
+; CHECK: vmaxnm.f16 d0, d0, d1
+; CHECK-NEXT: bx lr
+ %tmp3 = fcmp fast oge <4 x half> %A, %B
+ %tmp4 = select <4 x i1> %tmp3, <4 x half> %A, <4 x half> %B
+ ret <4 x half> %tmp4
+}
+
+define <4 x half> @test5(<4 x half> %A, <4 x half> %B) {
+; CHECK-LABEL: test5:
+; CHECK: vminnm.f16 d0, d0, d1
+; CHECK-NEXT: bx lr
+ %tmp3 = fcmp fast olt <4 x half> %A, %B
+ %tmp4 = select <4 x i1> %tmp3, <4 x half> %A, <4 x half> %B
+ ret <4 x half> %tmp4
+}
+
+define <4 x half> @test6(<4 x half> %A, <4 x half> %B) {
+; CHECK-LABEL: test6:
+; CHECK: vmaxnm.f16 d0, d0, d1
+; CHECK-NEXT: bx lr
+ %tmp3 = fcmp fast olt <4 x half> %A, %B
+ %tmp4 = select <4 x i1> %tmp3, <4 x half> %B, <4 x half> %A
+ ret <4 x half> %tmp4
+}
+
+define <4 x half> @test7(<4 x half> %A, <4 x half> %B) {
+; CHECK-LABEL: test7:
+; CHECK: vminnm.f16 d0, d0, d1
+; CHECK-NEXT: bx lr
+ %tmp3 = fcmp fast ole <4 x half> %A, %B
+ %tmp4 = select <4 x i1> %tmp3, <4 x half> %A, <4 x half> %B
+ ret <4 x half> %tmp4
+}
+
+define <4 x half> @test8(<4 x half> %A, <4 x half> %B) {
+; CHECK-LABEL: test8:
+; CHECK: vmaxnm.f16 d0, d0, d1
+; CHECK-NEXT: bx lr
+ %tmp3 = fcmp fast ole <4 x half> %A, %B
+ %tmp4 = select <4 x i1> %tmp3, <4 x half> %B, <4 x half> %A
+ ret <4 x half> %tmp4
+}
+
+; Unordered
+
+define <4 x half> @test11(<4 x half> %A, <4 x half> %B) {
+; CHECK-LABEL: test11:
+; CHECK: vmaxnm.f16 d0, d0, d1
+; CHECK-NEXT: bx lr
+ %tmp3 = fcmp fast ugt <4 x half> %A, %B
+ %tmp4 = select <4 x i1> %tmp3, <4 x half> %A, <4 x half> %B
+ ret <4 x half> %tmp4
+}
+
+define <4 x half> @test12(<4 x half> %A, <4 x half> %B) {
+; CHECK-LABEL: test12:
+; CHECK: vminnm.f16 d0, d0, d1
+; CHECK-NEXT: bx lr
+ %tmp3 = fcmp fast ugt <4 x half> %A, %B
+ %tmp4 = select <4 x i1> %tmp3, <4 x half> %B, <4 x half> %A
+ ret <4 x half> %tmp4
+}
+
+define <4 x half> @test13(<4 x half> %A, <4 x half> %B) {
+; CHECK-LABEL: test13:
+; CHECK: vminnm.f16 d0, d0, d1
+; CHECK-NEXT: bx lr
+ %tmp3 = fcmp fast uge <4 x half> %A, %B
+ %tmp4 = select <4 x i1> %tmp3, <4 x half> %B, <4 x half> %A
+ ret <4 x half> %tmp4
+}
+
+define <4 x half> @test14(<4 x half> %A, <4 x half> %B) {
+; CHECK-LABEL: test14:
+; CHECK: vmaxnm.f16 d0, d0, d1
+; CHECK-NEXT: bx lr
+ %tmp3 = fcmp fast uge <4 x half> %A, %B
+ %tmp4 = select <4 x i1> %tmp3, <4 x half> %A, <4 x half> %B
+ ret <4 x half> %tmp4
+}
+
+define <4 x half> @test15(<4 x half> %A, <4 x half> %B) {
+; CHECK-LABEL: test15:
+; CHECK: vminnm.f16 d0, d0, d1
+; CHECK-NEXT: bx lr
+ %tmp3 = fcmp fast ult <4 x half> %A, %B
+ %tmp4 = select <4 x i1> %tmp3, <4 x half> %A, <4 x half> %B
+ ret <4 x half> %tmp4
+}
+
+define <4 x half> @test16(<4 x half> %A, <4 x half> %B) {
+; CHECK-LABEL: test16:
+; CHECK: vmaxnm.f16 d0, d0, d1
+; CHECK-NEXT: bx lr
+ %tmp3 = fcmp fast ult <4 x half> %A, %B
+ %tmp4 = select <4 x i1> %tmp3, <4 x half> %B, <4 x half> %A
+ ret <4 x half> %tmp4
+}
+
+define <4 x half> @test17(<4 x half> %A, <4 x half> %B) {
+; CHECK-LABEL: test17:
+; CHECK: vminnm.f16 d0, d0, d1
+; CHECK-NEXT: bx lr
+ %tmp3 = fcmp fast ule <4 x half> %A, %B
+ %tmp4 = select <4 x i1> %tmp3, <4 x half> %A, <4 x half> %B
+ ret <4 x half> %tmp4
+}
+
+define <4 x half> @test18(<4 x half> %A, <4 x half> %B) {
+; CHECK-LABEL: test18:
+; CHECK: vmaxnm.f16 d0, d0, d1
+; CHECK-NEXT: bx lr
+ %tmp3 = fcmp fast ule <4 x half> %A, %B
+ %tmp4 = select <4 x i1> %tmp3, <4 x half> %B, <4 x half> %A
+ ret <4 x half> %tmp4
+}
+
+; 8-element vector
+
+; Ordered
+
+define <8 x half> @test201(<8 x half> %A, <8 x half> %B) {
+; CHECK-LABEL: test201:
+; CHECK: vmaxnm.f16 q0, q0, q1
+; CHECK-NEXT: bx lr
+ %tmp3 = fcmp fast ogt <8 x half> %A, %B
+ %tmp4 = select <8 x i1> %tmp3, <8 x half> %A, <8 x half> %B
+ ret <8 x half> %tmp4
+}
+
+define <8 x half> @test202(<8 x half> %A, <8 x half> %B) {
+; CHECK-LABEL: test202:
+; CHECK: vminnm.f16 q0, q0, q1
+; CHECK-NEXT: bx lr
+ %tmp3 = fcmp fast ogt <8 x half> %A, %B
+ %tmp4 = select <8 x i1> %tmp3, <8 x half> %B, <8 x half> %A
+ ret <8 x half> %tmp4
+}
+
+define <8 x half> @test203(<8 x half> %A, <8 x half> %B) {
+; CHECK-LABEL: test203:
+; CHECK: vmaxnm.f16 q0, q0, q1
+; CHECK-NEXT: bx lr
+ %tmp3 = fcmp fast oge <8 x half> %A, %B
+ %tmp4 = select <8 x i1> %tmp3, <8 x half> %A, <8 x half> %B
+ ret <8 x half> %tmp4
+}
+
+define <8 x half> @test204(<8 x half> %A, <8 x half> %B) {
+; CHECK-LABEL: test204:
+; CHECK: vminnm.f16 q0, q0, q1
+; CHECK-NEXT: bx lr
+ %tmp3 = fcmp fast oge <8 x half> %A, %B
+ %tmp4 = select <8 x i1> %tmp3, <8 x half> %B, <8 x half> %A
+ ret <8 x half> %tmp4
+}
+
+define <8 x half> @test205(<8 x half> %A, <8 x half> %B) {
+; CHECK-LABEL: test205:
+; CHECK: vminnm.f16 q0, q0, q1
+; CHECK-NEXT: bx lr
+ %tmp3 = fcmp fast olt <8 x half> %A, %B
+ %tmp4 = select <8 x i1> %tmp3, <8 x half> %A, <8 x half> %B
+ ret <8 x half> %tmp4
+}
+
+define <8 x half> @test206(<8 x half> %A, <8 x half> %B) {
+; CHECK-LABEL: test206:
+; CHECK: vmaxnm.f16 q0, q0, q1
+; CHECK-NEXT: bx lr
+ %tmp3 = fcmp fast olt <8 x half> %A, %B
+ %tmp4 = select <8 x i1> %tmp3, <8 x half> %B, <8 x half> %A
+ ret <8 x half> %tmp4
+}
+
+define <8 x half> @test207(<8 x half> %A, <8 x half> %B) {
+; CHECK-LABEL: test207:
+; CHECK: vminnm.f16 q0, q0, q1
+; CHECK-NEXT: bx lr
+ %tmp3 = fcmp fast ole <8 x half> %A, %B
+ %tmp4 = select <8 x i1> %tmp3, <8 x half> %A, <8 x half> %B
+ ret <8 x half> %tmp4
+}
+
+define <8 x half> @test208(<8 x half> %A, <8 x half> %B) {
+; CHECK-LABEL: test208:
+; CHECK: vmaxnm.f16 q0, q0, q1
+; CHECK-NEXT: bx lr
+ %tmp3 = fcmp fast ole <8 x half> %A, %B
+ %tmp4 = select <8 x i1> %tmp3, <8 x half> %B, <8 x half> %A
+ ret <8 x half> %tmp4
+}
+
+; Unordered
+
+define <8 x half> @test209(<8 x half> %A, <8 x half> %B) {
+; CHECK-LABEL: test209:
+; CHECK: vmaxnm.f16 q0, q0, q1
+; CHECK-NEXT: bx lr
+ %tmp3 = fcmp fast ugt <8 x half> %A, %B
+ %tmp4 = select <8 x i1> %tmp3, <8 x half> %A, <8 x half> %B
+ ret <8 x half> %tmp4
+}
+
+define <8 x half> @test210(<8 x half> %A, <8 x half> %B) {
+; CHECK-LABEL: test210:
+; CHECK: vminnm.f16 q0, q0, q1
+; CHECK-NEXT: bx lr
+ %tmp3 = fcmp fast ugt <8 x half> %A, %B
+ %tmp4 = select <8 x i1> %tmp3, <8 x half> %B, <8 x half> %A
+ ret <8 x half> %tmp4
+}
+
+define <8 x half> @test211(<8 x half> %A, <8 x half> %B) {
+; CHECK-LABEL: test211:
+; CHECK: vmaxnm.f16 q0, q0, q1
+; CHECK-NEXT: bx lr
+ %tmp3 = fcmp fast uge <8 x half> %A, %B
+ %tmp4 = select <8 x i1> %tmp3, <8 x half> %A, <8 x half> %B
+ ret <8 x half> %tmp4
+}
+
+define <8 x half> @test214(<8 x half> %A, <8 x half> %B) {
+; CHECK-LABEL: test214:
+; CHECK: vminnm.f16 q0, q0, q1
+; CHECK-NEXT: bx lr
+ %tmp3 = fcmp fast uge <8 x half> %A, %B
+ %tmp4 = select <8 x i1> %tmp3, <8 x half> %B, <8 x half> %A
+ ret <8 x half> %tmp4
+}
+
+define <8 x half> @test215(<8 x half> %A, <8 x half> %B) {
+; CHECK-LABEL: test215:
+; CHECK: vminnm.f16 q0, q0, q1
+; CHECK-NEXT: bx lr
+ %tmp3 = fcmp fast ult <8 x half> %A, %B
+ %tmp4 = select <8 x i1> %tmp3, <8 x half> %A, <8 x half> %B
+ ret <8 x half> %tmp4
+}
+
+define <8 x half> @test216(<8 x half> %A, <8 x half> %B) {
+; CHECK-LABEL: test216:
+; CHECK: vmaxnm.f16 q0, q0, q1
+; CHECK-NEXT: bx lr
+ %tmp3 = fcmp fast ult <8 x half> %A, %B
+ %tmp4 = select <8 x i1> %tmp3, <8 x half> %B, <8 x half> %A
+ ret <8 x half> %tmp4
+}
+
+define <8 x half> @test217(<8 x half> %A, <8 x half> %B) {
+; CHECK-LABEL: test217:
+; CHECK: vminnm.f16 q0, q0, q1
+; CHECK-NEXT: bx lr
+ %tmp3 = fcmp fast ule <8 x half> %A, %B
+ %tmp4 = select <8 x i1> %tmp3, <8 x half> %A, <8 x half> %B
+ ret <8 x half> %tmp4
+}
+
+define <8 x half> @test218(<8 x half> %A, <8 x half> %B) {
+; CHECK-LABEL: test218:
+; CHECK: vmaxnm.f16 q0, q0, q1
+; CHECK-NEXT: bx lr
+ %tmp3 = fcmp fast ule <8 x half> %A, %B
+ %tmp4 = select <8 x i1> %tmp3, <8 x half> %B, <8 x half> %A
+ ret <8 x half> %tmp4
+}
diff --git a/test/CodeGen/ARM/fpconv.ll b/test/CodeGen/ARM/fpconv.ll
index eadf9afd4764..8d740d88fc4a 100644
--- a/test/CodeGen/ARM/fpconv.ll
+++ b/test/CodeGen/ARM/fpconv.ll
@@ -1,9 +1,13 @@
; RUN: llc -mtriple=arm-eabi -mattr=+vfp2 %s -o - | FileCheck %s --check-prefix=CHECK-VFP
; RUN: llc -mtriple=arm-apple-darwin %s -o - | FileCheck %s
+; RUN: llc -mtriple=armv8r-none-none-eabi %s -o - | FileCheck %s --check-prefix=CHECK-VFP
+; RUN: llc -mtriple=armv8r-none-none-eabi -mattr=+fp-only-sp %s -o - | FileCheck %s --check-prefix=CHECK-VFP-SP
define float @f1(double %x) {
;CHECK-VFP-LABEL: f1:
;CHECK-VFP: vcvt.f32.f64
+;CHECK-VFP-SP-LABEL: f1:
+;CHECK-VFP-SP: bl __aeabi_d2f
;CHECK-LABEL: f1:
;CHECK: truncdfsf2
entry:
@@ -14,6 +18,8 @@ entry:
define double @f2(float %x) {
;CHECK-VFP-LABEL: f2:
;CHECK-VFP: vcvt.f64.f32
+;CHECK-VFP-SP-LABEL: f2:
+;CHECK-VFP-SP: bl __aeabi_f2d
;CHECK-LABEL: f2:
;CHECK: extendsfdf2
entry:
@@ -24,6 +30,8 @@ entry:
define i32 @f3(float %x) {
;CHECK-VFP-LABEL: f3:
;CHECK-VFP: vcvt.s32.f32
+;CHECK-VFP-SP-LABEL: f3:
+;CHECK-VFP-SP: vcvt.s32.f32
;CHECK-LABEL: f3:
;CHECK: fixsfsi
entry:
@@ -34,6 +42,8 @@ entry:
define i32 @f4(float %x) {
;CHECK-VFP-LABEL: f4:
;CHECK-VFP: vcvt.u32.f32
+;CHECK-VFP-SP-LABEL: f4:
+;CHECK-VFP-SP: vcvt.u32.f32
;CHECK-LABEL: f4:
;CHECK: fixunssfsi
entry:
@@ -44,6 +54,8 @@ entry:
define i32 @f5(double %x) {
;CHECK-VFP-LABEL: f5:
;CHECK-VFP: vcvt.s32.f64
+;CHECK-VFP-SP-LABEL: f5:
+;CHECK-VFP-SP: bl __aeabi_d2iz
;CHECK-LABEL: f5:
;CHECK: fixdfsi
entry:
@@ -54,6 +66,8 @@ entry:
define i32 @f6(double %x) {
;CHECK-VFP-LABEL: f6:
;CHECK-VFP: vcvt.u32.f64
+;CHECK-VFP-SP-LABEL: f6:
+;CHECK-VFP-SP: bl __aeabi_d2uiz
;CHECK-LABEL: f6:
;CHECK: fixunsdfsi
entry:
@@ -64,6 +78,8 @@ entry:
define float @f7(i32 %a) {
;CHECK-VFP-LABEL: f7:
;CHECK-VFP: vcvt.f32.s32
+;CHECK-VFP-SP-LABEL: f7:
+;CHECK-VFP-SP: vcvt.f32.s32
;CHECK-LABEL: f7:
;CHECK: floatsisf
entry:
@@ -74,6 +90,8 @@ entry:
define double @f8(i32 %a) {
;CHECK-VFP-LABEL: f8:
;CHECK-VFP: vcvt.f64.s32
+;CHECK-VFP-SP-LABEL: f8:
+;CHECK-VFP-SP: bl __aeabi_i2d
;CHECK-LABEL: f8:
;CHECK: floatsidf
entry:
@@ -84,6 +102,8 @@ entry:
define float @f9(i32 %a) {
;CHECK-VFP-LABEL: f9:
;CHECK-VFP: vcvt.f32.u32
+;CHECK-VFP-SP-LABEL: f9:
+;CHECK-VFP-SP: vcvt.f32.u32
;CHECK-LABEL: f9:
;CHECK: floatunsisf
entry:
@@ -94,6 +114,8 @@ entry:
define double @f10(i32 %a) {
;CHECK-VFP-LABEL: f10:
;CHECK-VFP: vcvt.f64.u32
+;CHECK-VFP-SP-LABEL: f10:
+;CHECK-VFP-SP: bl __aeabi_ui2d
;CHECK-LABEL: f10:
;CHECK: floatunsidf
entry:
diff --git a/test/CodeGen/ARM/frame-register.ll b/test/CodeGen/ARM/frame-register.ll
index 0cc5005ec488..c008b21a2909 100644
--- a/test/CodeGen/ARM/frame-register.ll
+++ b/test/CodeGen/ARM/frame-register.ll
@@ -1,13 +1,13 @@
-; RUN: llc -mtriple arm-eabi -disable-fp-elim -filetype asm -o - %s \
+; RUN: llc -mtriple arm-eabi -frame-pointer=all -filetype asm -o - %s \
; RUN: | FileCheck -check-prefix CHECK-ARM %s
-; RUN: llc -mtriple thumb-eabi -disable-fp-elim -filetype asm -o - %s \
+; RUN: llc -mtriple thumb-eabi -frame-pointer=all -filetype asm -o - %s \
; RUN: | FileCheck -check-prefix CHECK-THUMB %s
-; RUN: llc -mtriple arm-darwin -disable-fp-elim -filetype asm -o - %s \
+; RUN: llc -mtriple arm-darwin -frame-pointer=all -filetype asm -o - %s \
; RUN: | FileCheck -check-prefix CHECK-DARWIN-ARM %s
-; RUN: llc -mtriple thumb-darwin -disable-fp-elim -filetype asm -o - %s \
+; RUN: llc -mtriple thumb-darwin -frame-pointer=all -filetype asm -o - %s \
; RUN: | FileCheck -check-prefix CHECK-DARWIN-THUMB %s
declare void @callee(i32)
diff --git a/test/CodeGen/ARM/fusedMAC.ll b/test/CodeGen/ARM/fusedMAC.ll
index 6f6cdc11491e..6b922895b006 100644
--- a/test/CodeGen/ARM/fusedMAC.ll
+++ b/test/CodeGen/ARM/fusedMAC.ll
@@ -1,4 +1,8 @@
; RUN: llc < %s -mtriple=armv7-eabi -mattr=+neon,+vfp4 -fp-contract=fast | FileCheck %s
+; RUN: llc < %s -mtriple=arm-arm-eabi -mcpu=cortex-m7 -fp-contract=fast | FileCheck %s
+; RUN: llc < %s -mtriple=arm-arm-eabi -mcpu=cortex-m4 -fp-contract=fast | FileCheck %s -check-prefix=DONT-FUSE
+; RUN: llc < %s -mtriple=arm-arm-eabi -mcpu=cortex-m33 -fp-contract=fast | FileCheck %s -check-prefix=DONT-FUSE
+
; Check generated fused MAC and MLS.
define double @fusedMACTest1(double %d1, double %d2, double %d3) {
@@ -12,6 +16,11 @@ define double @fusedMACTest1(double %d1, double %d2, double %d3) {
define float @fusedMACTest2(float %f1, float %f2, float %f3) {
;CHECK-LABEL: fusedMACTest2:
;CHECK: vfma.f32
+
+;DONT-FUSE-LABEL: fusedMACTest2:
+;DONT-FUSE: vmul.f32
+;DONT-FUSE-NEXT: vadd.f32
+
%1 = fmul float %f1, %f2
%2 = fadd float %1, %f3
ret float %2
diff --git a/test/CodeGen/ARM/global-merge-external-2.ll b/test/CodeGen/ARM/global-merge-external-2.ll
new file mode 100644
index 000000000000..0abca99e6b82
--- /dev/null
+++ b/test/CodeGen/ARM/global-merge-external-2.ll
@@ -0,0 +1,65 @@
+; RUN: llc < %s -mtriple=arm-eabi -arm-global-merge | FileCheck %s --check-prefixes=CHECK,CHECK-MERGE
+; RUN: llc < %s -mtriple=arm-eabi -arm-global-merge -global-merge-on-external=true | FileCheck %s --check-prefixes=CHECK,CHECK-MERGE
+; RUN: llc < %s -mtriple=arm-eabi -arm-global-merge -global-merge-on-external=false | FileCheck %s --check-prefixes=CHECK,CHECK-NO-MERGE
+; RUN: llc < %s -mtriple=arm-macho -arm-global-merge | FileCheck %s --check-prefixes=CHECK,CHECK-NO-MERGE
+; RUN: llc < %s -mtriple=arm-eabi -arm-global-merge -relocation-model=pic | FileCheck %s --check-prefixes=CHECK,CHECK-NO-MERGE
+; RUN: llc < %s -mtriple=thumbv7-win32 -arm-global-merge | FileCheck %s --check-prefixes=CHECK-WIN32
+
+@x = global i32 0, align 4
+@y = global i32 0, align 4
+@z = internal global i32 1, align 4
+
+define void @f1(i32 %a1, i32 %a2) {
+;CHECK: f1:
+;CHECK: ldr {{r[0-9]+}}, [[LABEL1:\.?LCPI[0-9]+_[0-9]]]
+;CHECK: [[LABEL1]]:
+;CHECK-MERGE: .long .L_MergedGlobals
+;CHECK-NO-MERGE: .long {{_?x}}
+;CHECK-WIN32: f1:
+;CHECK-WIN32: movw [[REG1:r[0-9]+]], :lower16:.L_MergedGlobals
+;CHECK-WIN32: movt [[REG1]], :upper16:.L_MergedGlobals
+ store i32 %a1, i32* @x, align 4
+ store i32 %a2, i32* @y, align 4
+ ret void
+}
+
+define void @g1(i32 %a1, i32 %a2) {
+;CHECK: g1:
+;CHECK: ldr {{r[0-9]+}}, [[LABEL2:\.?LCPI[0-9]+_[0-9]]]
+;CHECK: ldr {{r[0-9]+}}, [[LABEL3:\.?LCPI[0-9]+_[0-9]]]
+;CHECK: [[LABEL2]]:
+;CHECK-MERGE: .long {{_?z}}
+;CHECK: [[LABEL3]]:
+;CHECK-MERGE: .long .L_MergedGlobals
+;CHECK-NO-MERGE: .long {{_?y}}
+;CHECK-WIN32: g1:
+;CHECK-WIN32: movw [[REG2:r[0-9]+]], :lower16:z
+;CHECK-WIN32: movt [[REG2]], :upper16:z
+;CHECK-WIN32: movw [[REG3:r[0-9]+]], :lower16:.L_MergedGlobals
+;CHECK-WIN32: movt [[REG3]], :upper16:.L_MergedGlobals
+ store i32 %a1, i32* @y, align 4
+ store i32 %a2, i32* @z, align 4
+ ret void
+}
+
+;CHECK-NO-MERGE-NOT: .globl .L_MergedGlobals
+
+;CHECK-MERGE: .type .L_MergedGlobals,%object
+;CHECK-MERGE: .local .L_MergedGlobals
+;CHECK-MERGE: .comm .L_MergedGlobals,8,4
+;CHECK-WIN32: .lcomm .L_MergedGlobals,8,4
+
+;CHECK-MERGE: .globl x
+;CHECK-MERGE: .set x, .L_MergedGlobals
+;CHECK-MERGE: .size x, 4
+;CHECK-MERGE: .globl y
+;CHECK-MERGE: .set y, .L_MergedGlobals+4
+;CHECK-MERGE: .size y, 4
+;CHECK-MERGE-NOT: .set z, .L_MergedGlobals+8
+
+
+;CHECK-WIN32: .globl x
+;CHECK-WIN32: .set x, .L_MergedGlobals
+;CHECK-WIN32: .globl y
+;CHECK-WIN32: .set y, .L_MergedGlobals+4
+;CHECK-WIN32-NOT: .set z, .L_MergedGlobals+8
diff --git a/test/CodeGen/ARM/half.ll b/test/CodeGen/ARM/half.ll
index ad039b9d6865..a334adc37916 100644
--- a/test/CodeGen/ARM/half.ll
+++ b/test/CodeGen/ARM/half.ll
@@ -1,6 +1,8 @@
; RUN: llc < %s -mtriple=thumbv7-apple-ios7.0 | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-OLD
; RUN: llc < %s -mtriple=thumbv7s-apple-ios7.0 | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-F16
; RUN: llc < %s -mtriple=thumbv8-apple-ios7.0 | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-V8
+; RUN: llc < %s -mtriple=armv8r-none-none-eabi | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-V8
+; RUN: llc < %s -mtriple=armv8r-none-none-eabi -mattr=+fp-only-sp | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-V8-SP
define void @test_load_store(half* %in, half* %out) {
; CHECK-LABEL: test_load_store:
@@ -33,6 +35,7 @@ define float @test_extend32(half* %addr) {
; CHECK-OLD: b.w ___extendhfsf2
; CHECK-F16: vcvtb.f32.f16
; CHECK-V8: vcvtb.f32.f16
+; CHECK-V8-SP: vcvtb.f32.f16
%val16 = load half, half* %addr
%val32 = fpext half %val16 to float
ret float %val32
@@ -46,6 +49,8 @@ define double @test_extend64(half* %addr) {
; CHECK-F16: vcvtb.f32.f16
; CHECK-F16: vcvt.f64.f32
; CHECK-V8: vcvtb.f64.f16
+; CHECK-V8-SP: vcvtb.f32.f16
+; CHECK-V8-SP: bl __aeabi_f2d
%val16 = load half, half* %addr
%val32 = fpext half %val16 to double
ret double %val32
@@ -57,6 +62,7 @@ define void @test_trunc32(float %in, half* %addr) {
; CHECK-OLD: bl ___truncsfhf2
; CHECK-F16: vcvtb.f16.f32
; CHECK-V8: vcvtb.f16.f32
+; CHECK-V8-SP: vcvtb.f16.f32
%val16 = fptrunc float %in to half
store half %val16, half* %addr
ret void
@@ -68,6 +74,7 @@ define void @test_trunc64(double %in, half* %addr) {
; CHECK-OLD: bl ___truncdfhf2
; CHECK-F16: bl ___truncdfhf2
; CHECK-V8: vcvtb.f16.f64
+; CHECK-V8-SP: bl __aeabi_d2h
%val16 = fptrunc double %in to half
store half %val16, half* %addr
ret void
diff --git a/test/CodeGen/ARM/hello.ll b/test/CodeGen/ARM/hello.ll
index 264105994935..bdeb41decc84 100644
--- a/test/CodeGen/ARM/hello.ll
+++ b/test/CodeGen/ARM/hello.ll
@@ -1,7 +1,7 @@
; RUN: llc -mtriple=arm-eabi %s -o /dev/null
; RUN: llc -mtriple=armv6-linux-gnueabi %s -o - | FileCheck %s
-; RUN: llc -mtriple=armv6-linux-gnu --disable-fp-elim %s -o - \
+; RUN: llc -mtriple=armv6-linux-gnu --frame-pointer=all %s -o - \
; RUN: | FileCheck %s -check-prefix CHECK-FP-ELIM
; RUN: llc -mtriple=armv6-apple-ios %s -o - \
diff --git a/test/CodeGen/ARM/illegal-bitfield-loadstore.ll b/test/CodeGen/ARM/illegal-bitfield-loadstore.ll
index 6f1e18ffdfca..93ba3fbc8530 100644
--- a/test/CodeGen/ARM/illegal-bitfield-loadstore.ll
+++ b/test/CodeGen/ARM/illegal-bitfield-loadstore.ll
@@ -30,10 +30,8 @@ define void @i24_and_or(i24* %a) {
; LE-LABEL: i24_and_or:
; LE: @ %bb.0:
; LE-NEXT: ldrh r1, [r0]
-; LE-NEXT: mov r2, #16256
-; LE-NEXT: orr r2, r2, #49152
; LE-NEXT: orr r1, r1, #384
-; LE-NEXT: and r1, r1, r2
+; LE-NEXT: bic r1, r1, #127
; LE-NEXT: strh r1, [r0]
; LE-NEXT: mov pc, lr
;
diff --git a/test/CodeGen/ARM/inline-asm-clobber.ll b/test/CodeGen/ARM/inline-asm-clobber.ll
new file mode 100644
index 000000000000..cb2069c20bf6
--- /dev/null
+++ b/test/CodeGen/ARM/inline-asm-clobber.ll
@@ -0,0 +1,27 @@
+; RUN: llc <%s -mtriple=arm-none-eabi 2>&1 | FileCheck %s -check-prefix=CHECK
+
+; RUN: llc <%s -mtriple=arm-none-eabi -relocation-model=rwpi 2>&1 \
+; RUN: | FileCheck %s -check-prefix=RWPI
+
+; RUN: llc <%s -mtriple=arm-none-eabi --frame-pointer=all 2>&1 \
+; RUN: | FileCheck %s -check-prefix=NO_FP_ELIM
+
+; CHECK: warning: inline asm clobber list contains reserved registers: SP, PC
+; CHECK: warning: inline asm clobber list contains reserved registers: R11
+; RWPI: warning: inline asm clobber list contains reserved registers: R9, SP, PC
+; RWPI: warning: inline asm clobber list contains reserved registers: R11
+; NO_FP_ELIM: warning: inline asm clobber list contains reserved registers: R11, SP, PC
+; NO_FP_ELIM: warning: inline asm clobber list contains reserved registers: R11
+
+define void @foo() nounwind {
+ call void asm sideeffect "mov r7, #1",
+ "~{r9},~{r11},~{r12},~{lr},~{sp},~{pc},~{r10}"()
+ ret void
+}
+
+define i32 @bar(i32 %i) {
+ %vla = alloca i32, i32 %i, align 4
+ tail call void asm sideeffect "mov r7, #1", "~{r11}"()
+ %1 = load volatile i32, i32* %vla, align 4
+ ret i32 %1
+}
diff --git a/test/CodeGen/ARM/inline-asm-operand-implicit-cast.ll b/test/CodeGen/ARM/inline-asm-operand-implicit-cast.ll
deleted file mode 100644
index 45bdb124e032..000000000000
--- a/test/CodeGen/ARM/inline-asm-operand-implicit-cast.ll
+++ /dev/null
@@ -1,122 +0,0 @@
-; RUN: llc -mtriple armv7-arm-linux-gnueabihf -O2 -mcpu=cortex-a7 < %s | FileCheck %s
-
-; Check support for returning a float in GPR with soft float ABI
-define arm_aapcscc float @zerobits_float_soft() #0 {
-; CHECK-LABEL: zerobits_float_soft
-; CHECK: mov r0, #0
- %1 = tail call float asm "mov ${0}, #0", "=&r"()
- ret float %1
-}
-
-; Check support for returning a double in GPR with soft float ABI
-define arm_aapcscc double @zerobits_double_soft() #0 {
-; CHECK-LABEL: zerobits_double_soft
-; CHECK: mov r0, #0
-; CHECK-NEXT: mov r1, #0
- %1 = tail call double asm "mov ${0:Q}, #0\0Amov ${0:R}, #0", "=&r"()
- ret double %1
-}
-
-; Check support for returning a float in GPR with matching float input with
-; soft float ABI
-define arm_aapcscc float @flt_gpr_matching_in_op_soft(float %f) #0 {
-; CHECK-LABEL: flt_gpr_matching_in_op_soft
-; CHECK: mov r0, r0
- %1 = call float asm "mov $0, $1", "=&r,0"(float %f)
- ret float %1
-}
-
-; Check support for returning a double in GPR with matching double input with
-; soft float ABI
-define arm_aapcscc double @dbl_gpr_matching_in_op_soft(double %d) #0 {
-; CHECK-LABEL: dbl_gpr_matching_in_op_soft
-; CHECK: mov r1, r0
- %1 = call double asm "mov ${0:R}, ${1:Q}", "=&r,0"(double %d)
- ret double %1
-}
-
-; Check support for returning a float in specific GPR with matching float input
-; with soft float ABI
-define arm_aapcscc float @flt_gpr_matching_spec_reg_in_op_soft(float %f) #0 {
-; CHECK-LABEL: flt_gpr_matching_spec_reg_in_op_soft
-; CHECK: mov r3, r3
- %1 = call float asm "mov $0, $1", "=&{r3},0"(float %f)
- ret float %1
-}
-
-; Check support for returning a double in specific GPR with matching double
-; input with soft float ABI
-define arm_aapcscc double @dbl_gpr_matching_spec_reg_in_op_soft(double %d) #0 {
-; CHECK-LABEL: dbl_gpr_matching_spec_reg_in_op_soft
-; CHECK: mov r3, r2
- %1 = call double asm "mov ${0:R}, ${1:Q}", "=&{r2},0"(double %d)
- ret double %1
-}
-
-attributes #0 = { nounwind "target-features"="+d16,+vfp2,+vfp3,-fp-only-sp" "use-soft-float"="true" }
-
-
-; Check support for returning a float in GPR with hard float ABI
-define float @zerobits_float_hard() #1 {
-; CHECK-LABEL: zerobits_float_hard
-; CHECK: mov r0, #0
-; CHECK: vmov s0, r0
- %1 = tail call float asm "mov ${0}, #0", "=&r"()
- ret float %1
-}
-
-; Check support for returning a double in GPR with hard float ABI
-define double @zerobits_double_hard() #1 {
-; CHECK-LABEL: zerobits_double_hard
-; CHECK: mov r0, #0
-; CHECK-NEXT: mov r1, #0
-; CHECK: vmov d0, r0, r1
- %1 = tail call double asm "mov ${0:Q}, #0\0Amov ${0:R}, #0", "=&r"()
- ret double %1
-}
-
-; Check support for returning a float in GPR with matching float input with
-; hard float ABI
-define float @flt_gpr_matching_in_op_hard(float %f) #1 {
-; CHECK-LABEL: flt_gpr_matching_in_op_hard
-; CHECK: vmov r0, s0
-; CHECK: mov r0, r0
-; CHECK: vmov s0, r0
- %1 = call float asm "mov $0, $1", "=&r,0"(float %f)
- ret float %1
-}
-
-; Check support for returning a double in GPR with matching double input with
-; hard float ABI
-define double @dbl_gpr_matching_in_op_hard(double %d) #1 {
-; CHECK-LABEL: dbl_gpr_matching_in_op_hard
-; CHECK: vmov r0, r1, d0
-; CHECK: mov r1, r0
-; CHECK: vmov d0, r0, r1
- %1 = call double asm "mov ${0:R}, ${1:Q}", "=&r,0"(double %d)
- ret double %1
-}
-
-; Check support for returning a float in specific GPR with matching float
-; input with hard float ABI
-define float @flt_gpr_matching_spec_reg_in_op_hard(float %f) #1 {
-; CHECK-LABEL: flt_gpr_matching_spec_reg_in_op_hard
-; CHECK: vmov r3, s0
-; CHECK: mov r3, r3
-; CHECK: vmov s0, r3
- %1 = call float asm "mov $0, $1", "=&{r3},0"(float %f)
- ret float %1
-}
-
-; Check support for returning a double in specific GPR with matching double
-; input with hard float ABI
-define double @dbl_gpr_matching_spec_reg_in_op_hard(double %d) #1 {
-; CHECK-LABEL: dbl_gpr_matching_spec_reg_in_op_hard
-; CHECK: vmov r2, r3, d0
-; CHECK: mov r3, r2
-; CHECK: vmov d0, r2, r3
- %1 = call double asm "mov ${0:R}, ${1:Q}", "=&{r2},0"(double %d)
- ret double %1
-}
-
-attributes #1 = { nounwind "target-features"="+d16,+vfp2,+vfp3,-fp-only-sp" "use-soft-float"="false" }
diff --git a/test/CodeGen/ARM/inlineasm-X-allocation.ll b/test/CodeGen/ARM/inlineasm-X-allocation.ll
index e88d668f5ccf..b2cb932f9055 100644
--- a/test/CodeGen/ARM/inlineasm-X-allocation.ll
+++ b/test/CodeGen/ARM/inlineasm-X-allocation.ll
@@ -1,21 +1,20 @@
-; RUN: llc -mtriple=armv7-none-eabi -mattr=-neon,-vfpv2 %s -o - | FileCheck %s -check-prefix=novfp
-; RUN: llc -mtriple=armv7-none-eabi -mattr=+neon %s -float-abi=hard -o - | FileCheck %s -check-prefix=vfp
+; RUN: llc -mtriple=armv7-none-eabi -mattr=-neon,-vfp2 %s -o - | FileCheck %s -check-prefixes=COMMON,NOVFP
+; RUN: llc -mtriple=armv7-none-eabi -mattr=+neon %s -float-abi=hard -o - | FileCheck %s -check-prefixes=COMMON,VFP
-; vfp-LABEL: f1
-; vfp-CHECK: vadd.f32 s0, s0, s0
+; The intent here is to test "X", which says that any operand whatsoever is allowed.
+; Using this mechanism, we want to test toggling allocating GPR or SPR registers
+; depending on whether the float registers are available. Thus, the mnemonic is
+; totally irrelevant here, which is why we use FOO and also comment it out using "@"
+; to avoid assembler errors.
-; In the novfp case, the compiler is forced to assign a core register.
-; Although this register class can't be used with the vadd.f32 instruction,
-; the compiler behaved as expected since it is allowed to emit anything.
+; Note that this kind of IR can be generated by a function such as:
+; void f1(float f) {asm volatile ("@FOO $0, $0" : : "X" (f));}
-; novfp-LABEL: f1
-; novfp-CHECK: vadd.f32 r0, r0, r0
-
-; This can be generated by a function such as:
-; void f1(float f) {asm volatile ("add.f32 $0, $0, $0" : : "X" (f));}
-
-define arm_aapcs_vfpcc void @f1(float %f) {
+define arm_aapcs_vfpcc void @func(float %f) {
+; COMMON-LABEL: func
+; NOVFP: FOO r0, r0
+; VFP: FOO s0, s0
entry:
- call void asm sideeffect "vadd.f32 $0, $0, $0", "X" (float %f) nounwind
+ call void asm sideeffect "@FOO $0, $0", "X" (float %f) nounwind
ret void
}
diff --git a/test/CodeGen/ARM/inlineasm-operand-implicit-cast.ll b/test/CodeGen/ARM/inlineasm-operand-implicit-cast.ll
new file mode 100644
index 000000000000..7b98f0f0de31
--- /dev/null
+++ b/test/CodeGen/ARM/inlineasm-operand-implicit-cast.ll
@@ -0,0 +1,307 @@
+; RUN: llc -mtriple armv7-arm-linux-gnueabihf -O2 -mcpu=cortex-a7 < %s | FileCheck %s
+
+%struct.twofloat = type { float, float }
+%struct.twodouble = type { double, double }
+
+; Check support for returning a float in GPR with soft float ABI
+define arm_aapcscc float @zerobits_float_soft() #0 {
+; CHECK-LABEL: zerobits_float_soft
+; CHECK: mov r0, #0
+ %1 = tail call float asm "mov ${0}, #0", "=&r"()
+ ret float %1
+}
+
+; Check support for returning a double in GPR with soft float ABI
+define arm_aapcscc double @zerobits_double_soft() #0 {
+; CHECK-LABEL: zerobits_double_soft
+; CHECK: mov r0, #0
+; CHECK-NEXT: mov r1, #0
+ %1 = tail call double asm "mov ${0:Q}, #0\0Amov ${0:R}, #0", "=&r"()
+ ret double %1
+}
+
+; Check support for returning a float in GPR with matching float input with
+; soft float ABI
+define arm_aapcscc float @flt_gpr_matching_in_op_soft(float %f) #0 {
+; CHECK-LABEL: flt_gpr_matching_in_op_soft
+; CHECK: mov r0, r0
+ %1 = call float asm "mov $0, $1", "=&r,0"(float %f)
+ ret float %1
+}
+
+; Check support for returning a double in GPR with matching double input with
+; soft float ABI
+define arm_aapcscc double @dbl_gpr_matching_in_op_soft(double %d) #0 {
+; CHECK-LABEL: dbl_gpr_matching_in_op_soft
+; CHECK: mov r1, r0
+ %1 = call double asm "mov ${0:R}, ${1:Q}", "=&r,0"(double %d)
+ ret double %1
+}
+
+; Check support for returning a float in specific GPR with matching float input
+; with soft float ABI
+define arm_aapcscc float @flt_gpr_matching_spec_reg_in_op_soft(float %f) #0 {
+; CHECK-LABEL: flt_gpr_matching_spec_reg_in_op_soft
+; CHECK: mov r3, r3
+ %1 = call float asm "mov $0, $1", "=&{r3},0"(float %f)
+ ret float %1
+}
+
+; Check support for returning a double in specific GPR with matching double
+; input with soft float ABI
+define arm_aapcscc double @dbl_gpr_matching_spec_reg_in_op_soft(double %d) #0 {
+; CHECK-LABEL: dbl_gpr_matching_spec_reg_in_op_soft
+; CHECK: mov r3, r2
+ %1 = call double asm "mov ${0:R}, ${1:Q}", "=&{r2},0"(double %d)
+ ret double %1
+}
+
+; Check support for returning several float in GPR
+define arm_aapcscc float @zerobits_float_convoluted_soft() #0 {
+; CHECK-LABEL: zerobits_float_convoluted_soft
+; CHECK: mov r0, #0
+; CHECK-NEXT: mov r1, #0
+ %1 = call { float, float } asm "mov $0, #0; mov $1, #0", "=r,=r"()
+ %asmresult = extractvalue { float, float } %1, 0
+ %asmresult1 = extractvalue { float, float } %1, 1
+ %add = fadd float %asmresult, %asmresult1
+ ret float %add
+}
+
+; Check support for returning several double in GPR
+define double @zerobits_double_convoluted_soft() #0 {
+; CHECK-LABEL: zerobits_double_convoluted_soft
+; CHECK: mov r0, #0
+; CHECK-NEXT: mov r1, #0
+; CHECK-NEXT: mov r2, #0
+; CHECK-NEXT: mov r3, #0
+ %1 = call { double, double } asm "mov ${0:Q}, #0; mov ${0:R}, #0; mov ${1:Q}, #0; mov ${1:R}, #0", "=r,=r"()
+ %asmresult = extractvalue { double, double } %1, 0
+ %asmresult1 = extractvalue { double, double } %1, 1
+ %add = fadd double %asmresult, %asmresult1
+ ret double %add
+}
+
+; Check support for returning several floats in GPRs with matching float inputs
+; with soft float ABI
+define arm_aapcscc float @flt_gprs_matching_in_op_soft(float %f1, float %f2) #0 {
+; CHECK-LABEL: flt_gprs_matching_in_op_soft
+; CHECK: mov r0, r0
+; CHECK-NEXT: mov r1, r1
+ %1 = call { float, float } asm "mov $0, $2; mov $1, $3", "=&r,=&r,0,1"(float %f1, float %f2)
+ %asmresult1 = extractvalue { float, float } %1, 0
+ %asmresult2 = extractvalue { float, float } %1, 1
+ %add = fadd float %asmresult1, %asmresult2
+ ret float %add
+}
+
+; Check support for returning several double in GPRs with matching double input
+; with soft float ABI
+define arm_aapcscc double @dbl_gprs_matching_in_op_soft(double %d1, double %d2) #0 {
+; CHECK-LABEL: dbl_gprs_matching_in_op_soft
+; CHECK: mov r1, r0
+; CHECK-NEXT: mov r3, r2
+ %1 = call { double, double } asm "mov ${0:R}, ${2:Q}; mov ${1:R}, ${3:Q}", "=&r,=&r,0,1"(double %d1, double %d2)
+ %asmresult1 = extractvalue { double, double } %1, 0
+ %asmresult2 = extractvalue { double, double } %1, 1
+ %add = fadd double %asmresult1, %asmresult2
+ ret double %add
+}
+
+; Check support for returning several float in specific GPRs with matching
+; float input with soft float ABI
+define arm_aapcscc float @flt_gprs_matching_spec_reg_in_op_soft(float %f1, float %f2) #0 {
+; CHECK-LABEL: flt_gprs_matching_spec_reg_in_op_soft
+; CHECK: mov r3, r3
+; CHECK-NEXT: mov r4, r4
+ %1 = call { float, float } asm "mov $0, $2; mov $1, $3", "=&{r3},=&{r4},0,1"(float %f1, float %f2)
+ %asmresult1 = extractvalue { float, float } %1, 0
+ %asmresult2 = extractvalue { float, float } %1, 1
+ %add = fadd float %asmresult1, %asmresult2
+ ret float %add
+}
+
+; Check support for returning several double in specific GPRs with matching
+; double input with soft float ABI
+define arm_aapcscc double @dbl_gprs_matching_spec_reg_in_op_soft(double %d1, double %d2) #0 {
+; CHECK-LABEL: dbl_gprs_matching_spec_reg_in_op_soft
+; CHECK: mov r3, r2
+; CHECK-NEXT: mov r5, r4
+ %1 = call { double, double } asm "mov ${0:R}, ${2:Q}; mov ${1:R}, ${3:Q}", "=&{r2},=&{r4},0,1"(double %d1, double %d2)
+ %asmresult1 = extractvalue { double, double } %1, 0
+ %asmresult2 = extractvalue { double, double } %1, 1
+ %add = fadd double %asmresult1, %asmresult2
+ ret double %add
+}
+
+attributes #0 = { nounwind "target-features"="+d16,+vfp2,+vfp3,-fp-only-sp" "use-soft-float"="true" }
+
+
+; Check support for returning a float in GPR with hard float ABI
+define float @zerobits_float_hard() #1 {
+; CHECK-LABEL: zerobits_float_hard
+; CHECK: mov r0, #0
+; CHECK: vmov s0, r0
+ %1 = tail call float asm "mov ${0}, #0", "=&r"()
+ ret float %1
+}
+
+; Check support for returning a double in GPR with hard float ABI
+define double @zerobits_double_hard() #1 {
+; CHECK-LABEL: zerobits_double_hard
+; CHECK: mov r0, #0
+; CHECK-NEXT: mov r1, #0
+; CHECK: vmov d0, r0, r1
+ %1 = tail call double asm "mov ${0:Q}, #0\0Amov ${0:R}, #0", "=&r"()
+ ret double %1
+}
+
+; Check support for returning a float in GPR with matching float input with
+; hard float ABI
+define float @flt_gpr_matching_in_op_hard(float %f) #1 {
+; CHECK-LABEL: flt_gpr_matching_in_op_hard
+; CHECK: vmov r0, s0
+; CHECK: mov r0, r0
+; CHECK: vmov s0, r0
+ %1 = call float asm "mov $0, $1", "=&r,0"(float %f)
+ ret float %1
+}
+
+; Check support for returning a double in GPR with matching double input with
+; hard float ABI
+define double @dbl_gpr_matching_in_op_hard(double %d) #1 {
+; CHECK-LABEL: dbl_gpr_matching_in_op_hard
+; CHECK: vmov r0, r1, d0
+; CHECK: mov r1, r0
+; CHECK: vmov d0, r0, r1
+ %1 = call double asm "mov ${0:R}, ${1:Q}", "=&r,0"(double %d)
+ ret double %1
+}
+
+; Check support for returning a float in specific GPR with matching float
+; input with hard float ABI
+define float @flt_gpr_matching_spec_reg_in_op_hard(float %f) #1 {
+; CHECK-LABEL: flt_gpr_matching_spec_reg_in_op_hard
+; CHECK: vmov r3, s0
+; CHECK: mov r3, r3
+; CHECK: vmov s0, r3
+ %1 = call float asm "mov $0, $1", "=&{r3},0"(float %f)
+ ret float %1
+}
+
+; Check support for returning a double in specific GPR with matching double
+; input with hard float ABI
+define double @dbl_gpr_matching_spec_reg_in_op_hard(double %d) #1 {
+; CHECK-LABEL: dbl_gpr_matching_spec_reg_in_op_hard
+; CHECK: vmov r2, r3, d0
+; CHECK: mov r3, r2
+; CHECK: vmov d0, r2, r3
+ %1 = call double asm "mov ${0:R}, ${1:Q}", "=&{r2},0"(double %d)
+ ret double %1
+}
+
+; Check support for returning several float in GPR
+define %struct.twofloat @zerobits_float_convoluted_hard() #1 {
+; CHECK-LABEL: zerobits_float_convoluted_hard
+; CHECK: mov r0, #0
+; CHECK-NEXT: mov r1, #0
+; CHECK: vmov s0, r0
+; CHECK-NEXT: vmov s1, r1
+ %1 = call { float, float } asm "mov $0, #0; mov $1, #0", "=r,=r"()
+ %asmresult1 = extractvalue { float, float } %1, 0
+ %asmresult2 = extractvalue { float, float } %1, 1
+ %partialres = insertvalue %struct.twofloat undef, float %asmresult1, 0
+ %res = insertvalue %struct.twofloat %partialres, float %asmresult2, 1
+ ret %struct.twofloat %res
+}
+
+; Check support for returning several double in GPR
+define %struct.twodouble @zerobits_double_convoluted_hard() #1 {
+; CHECK-LABEL: zerobits_double_convoluted_hard
+; CHECK: mov r0, #0
+; CHECK-NEXT: mov r1, #0
+; CHECK-NEXT: mov r2, #0
+; CHECK-NEXT: mov r3, #0
+; CHECK: vmov d0, r0, r1
+; CHECK-NEXT: vmov d1, r2, r3
+ %1 = call { double, double } asm "mov ${0:Q}, #0; mov ${0:R}, #0; mov ${1:Q}, #0; mov ${1:R}, #0", "=r,=r"()
+ %asmresult1 = extractvalue { double, double } %1, 0
+ %asmresult2 = extractvalue { double, double } %1, 1
+ %partialres = insertvalue %struct.twodouble undef, double %asmresult1, 0
+ %res = insertvalue %struct.twodouble %partialres, double %asmresult2, 1
+ ret %struct.twodouble %res
+}
+
+; Check support for returning several floats in GPRs with matching float inputs
+; with hard float ABI
+define %struct.twofloat @flt_gprs_matching_in_op_hard(float %f1, float %f2) #1 {
+; CHECK-LABEL: flt_gprs_matching_in_op_hard
+; CHECK: vmov r0, s0
+; CHECK-NEXT: vmov r1, s1
+; CHECK: mov r0, r0
+; CHECK-NEXT: mov r1, r1
+; CHECK: vmov s0, r0
+; CHECK-NEXT: vmov s1, r1
+ %1 = call { float, float } asm "mov $0, $2; mov $1, $3", "=&r,=&r,0,1"(float %f1, float %f2)
+ %asmresult1 = extractvalue { float, float } %1, 0
+ %asmresult2 = extractvalue { float, float } %1, 1
+ %partialres = insertvalue %struct.twofloat undef, float %asmresult1, 0
+ %res = insertvalue %struct.twofloat %partialres, float %asmresult2, 1
+ ret %struct.twofloat %res
+}
+
+; Check support for returning several double in GPRs with matching double input
+; with hard float ABI
+define %struct.twodouble @dbl_gprs_matching_in_op_hard(double %d1, double %d2) #1 {
+; CHECK-LABEL: dbl_gprs_matching_in_op_hard
+; CHECK: vmov r0, r1, d0
+; CHECK-NEXT: vmov r2, r3, d1
+; CHECK: mov r1, r0
+; CHECK-NEXT: mov r3, r2
+; CHECK: vmov d0, r0, r1
+; CHECK-NEXT: vmov d1, r2, r3
+ %1 = call { double, double } asm "mov ${0:R}, ${2:Q}; mov ${1:R}, ${3:Q}", "=&r,=&r,0,1"(double %d1, double %d2)
+ %asmresult1 = extractvalue { double, double } %1, 0
+ %asmresult2 = extractvalue { double, double } %1, 1
+ %partialres = insertvalue %struct.twodouble undef, double %asmresult1, 0
+ %res = insertvalue %struct.twodouble %partialres, double %asmresult2, 1
+ ret %struct.twodouble %res
+}
+
+; Check support for returning several float in specific GPRs with matching
+; float input with hard float ABI
+define %struct.twofloat @flt_gprs_matching_spec_reg_in_op_hard(float %f1, float %f2) #1 {
+; CHECK-LABEL: flt_gprs_matching_spec_reg_in_op_hard
+; CHECK: vmov r3, s0
+; CHECK-NEXT: vmov r4, s1
+; CHECK: mov r3, r3
+; CHECK-NEXT: mov r4, r4
+; CHECK: vmov s0, r3
+; CHECK-NEXT: vmov s1, r4
+ %1 = call { float, float } asm "mov $0, $2; mov $1, $3", "=&{r3},=&{r4},0,1"(float %f1, float %f2)
+ %asmresult1 = extractvalue { float, float } %1, 0
+ %asmresult2 = extractvalue { float, float } %1, 1
+ %partialres = insertvalue %struct.twofloat undef, float %asmresult1, 0
+ %res = insertvalue %struct.twofloat %partialres, float %asmresult2, 1
+ ret %struct.twofloat %res
+}
+
+; Check support for returning several double in specific GPRs with matching
+; double input with hard float ABI
+define %struct.twodouble @dbl_gprs_matching_spec_reg_in_op_hard(double %d1, double %d2) #1 {
+; CHECK-LABEL: dbl_gprs_matching_spec_reg_in_op_hard
+; CHECK: vmov r2, r3, d0
+; CHECK-NEXT: vmov r4, r5, d1
+; CHECK: mov r3, r2
+; CHECK-NEXT: mov r5, r4
+; CHECK: vmov d0, r2, r3
+; CHECK-NEXT: vmov d1, r4, r5
+ %1 = call { double, double } asm "mov ${0:R}, ${2:Q}; mov ${1:R}, ${3:Q}", "=&{r2},=&{r4},0,1"(double %d1, double %d2)
+ %asmresult1 = extractvalue { double, double } %1, 0
+ %asmresult2 = extractvalue { double, double } %1, 1
+ %partialres = insertvalue %struct.twodouble undef, double %asmresult1, 0
+ %res = insertvalue %struct.twodouble %partialres, double %asmresult2, 1
+ ret %struct.twodouble %res
+}
+
+attributes #1 = { nounwind "target-features"="+d16,+vfp2,+vfp3,-fp-only-sp" "use-soft-float"="false" }
diff --git a/test/CodeGen/ARM/intrinsics-overflow.ll b/test/CodeGen/ARM/intrinsics-overflow.ll
index 835be7e949d3..d4c20dfacce6 100644
--- a/test/CodeGen/ARM/intrinsics-overflow.ll
+++ b/test/CodeGen/ARM/intrinsics-overflow.ll
@@ -38,14 +38,9 @@ define i32 @sadd_overflow(i32 %a, i32 %b) #0 {
; ARM: movvc r[[R0]], #0
; ARM: mov pc, lr
- ; THUMBV6: mov r[[R2:[0-9]+]], r[[R0:[0-9]+]]
- ; THUMBV6: adds r[[R3:[0-9]+]], r[[R0]], r[[R1:[0-9]+]]
- ; THUMBV6: movs r[[R0]], #0
- ; THUMBV6: movs r[[R1]], #1
- ; THUMBV6: cmp r[[R3]], r[[R2]]
- ; THUMBV6: bvc .L[[LABEL:.*]]
- ; THUMBV6: mov r[[R0]], r[[R1]]
- ; THUMBV6: .L[[LABEL]]:
+ ; THUMBV6: adds r1, r0, r1
+ ; THUMBV6: cmp r1, r0
+ ; THUMBV6: bvc .LBB1_2
; THUMBV7: adds r[[R2:[0-9]+]], r[[R0]], r[[R1:[0-9]+]]
; THUMBV7: mov.w r[[R0:[0-9]+]], #1
@@ -94,12 +89,8 @@ define i32 @ssub_overflow(i32 %a, i32 %b) #0 {
; ARM: cmp r[[R0]], r[[R1]]
; ARM: movvc r[[R2]], #0
- ; THUMBV6: movs r[[R0]], #0
- ; THUMBV6: movs r[[R3:[0-9]+]], #1
- ; THUMBV6: cmp r[[R2]], r[[R1:[0-9]+]]
- ; THUMBV6: bvc .L[[LABEL:.*]]
- ; THUMBV6: mov r[[R0]], r[[R3]]
- ; THUMBV6: .L[[LABEL]]:
+ ; THUMBV6: cmp r0, r1
+ ; THUMBV6: bvc .LBB3_2
; THUMBV7: movs r[[R2:[0-9]+]], #1
; THUMBV7: cmp r[[R0:[0-9]+]], r[[R1:[0-9]+]]
diff --git a/test/CodeGen/ARM/invalid-target.ll b/test/CodeGen/ARM/invalid-target.ll
index bb0ada4c2fdc..d20bc738db29 100644
--- a/test/CodeGen/ARM/invalid-target.ll
+++ b/test/CodeGen/ARM/invalid-target.ll
@@ -22,11 +22,11 @@
; RUN: not llc -mtriple aarch64invalid-linux-gnu %s -o - 2>&1 | \
; RUN: FileCheck %s --check-prefix=AARCH64INVALID
-; ARMVINVALID: error: unable to get target for 'armvinvalid--linux-gnueabi'
-; ARMEBVINVALID: error: unable to get target for 'armebvinvalid--linux-gnueabi'
-; THUMBVINVALID: error: unable to get target for 'thumbvinvalid--linux-gnueabi'
-; THUMBEBVINVALID: error: unable to get target for 'thumbebvinvalid--linux-gnueabi'
-; THUMBV2: error: unable to get target for 'thumbv2--linux-gnueabi'
-; THUMBV3: error: unable to get target for 'thumbv3--linux-gnueabi'
-; ARM64INVALID: error: unable to get target for 'arm64invalid--linux-gnu'
-; AARCH64INVALID: error: unable to get target for 'aarch64invalid--linux-gnu'
+; ARMVINVALID: error: unable to get target for 'armvinvalid-unknown-linux-gnueabi'
+; ARMEBVINVALID: error: unable to get target for 'armebvinvalid-unknown-linux-gnueabi'
+; THUMBVINVALID: error: unable to get target for 'thumbvinvalid-unknown-linux-gnueabi'
+; THUMBEBVINVALID: error: unable to get target for 'thumbebvinvalid-unknown-linux-gnueabi'
+; THUMBV2: error: unable to get target for 'thumbv2-unknown-linux-gnueabi'
+; THUMBV3: error: unable to get target for 'thumbv3-unknown-linux-gnueabi'
+; ARM64INVALID: error: unable to get target for 'arm64invalid-unknown-linux-gnu'
+; AARCH64INVALID: error: unable to get target for 'aarch64invalid-unknown-linux-gnu'
diff --git a/test/CodeGen/ARM/ldrcppic.ll b/test/CodeGen/ARM/ldrcppic.ll
new file mode 100644
index 000000000000..c7727290d842
--- /dev/null
+++ b/test/CodeGen/ARM/ldrcppic.ll
@@ -0,0 +1,56 @@
+; The failure is caused by ARM LDRcp/PICADD pairs. In PIC mode, the constant pool
+; need a label to do address computation. This label is emitted when backend emits
+; PICADD. When the target becomes dead, PICADD will be deleted. without this patch
+; LDRcp is dead but not being deleted. This will cause a dead contant pool entry
+; using a non existing label. This will cause an error in MC object emitting pass.
+
+; RUN: llc -relocation-model=pic -mcpu=cortex-a53 %s -filetype=obj -o - | llvm-nm - | FileCheck %s
+
+target datalayout = "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64"
+target triple = "armv8-unknown-linux-android"
+
+@_ZN15UsecaseSelector25AllowedImplDefinedFormatsE = external dso_local unnamed_addr constant <{ i32, i32, i32, i32, [12 x i32] }>, align 4
+
+; Function Attrs: noinline nounwind optnone sspstrong uwtable
+define dso_local fastcc void @_ZN15UsecaseSelector26IsAllowedImplDefinedFormatE15ChiBufferFormatj() unnamed_addr #1 align 2 {
+ br label %1
+
+; <label>:1: ; preds = %13, %0
+ %2 = icmp ult i32 undef, 4
+ br i1 %2, label %3, label %14
+
+; <label>:3: ; preds = %1
+ br i1 undef, label %4, label %13
+
+; <label>:4: ; preds = %3
+ %5 = getelementptr inbounds [16 x i32], [16 x i32]* bitcast (<{ i32, i32, i32, i32, [12 x i32] }>* @_ZN15UsecaseSelector25AllowedImplDefinedFormatsE to [16 x i32]*), i32 0, i32 undef
+ %6 = load i32, i32* %5, align 4
+ %7 = icmp eq i32 10, %6
+ br i1 %7, label %9, label %8
+
+; <label>:8: ; preds = %4
+ br i1 undef, label %9, label %12
+
+; <label>:9: ; preds = %8, %4
+ br i1 undef, label %10, label %13
+
+; <label>:10: ; preds = %9
+ br i1 undef, label %11, label %13
+
+; <label>:11: ; preds = %10
+ br label %14
+
+; <label>:12: ; preds = %8
+ br label %14
+
+; <label>:13: ; preds = %10, %9, %3
+ br label %1
+
+; <label>:14: ; preds = %12, %11, %1
+ ret void
+}
+
+attributes #1 = { noinline optnone }
+
+; CHECK: _ZN15UsecaseSelector26IsAllowedImplDefinedFormatE15ChiBufferFormatj
+
diff --git a/test/CodeGen/ARM/ldrd.ll b/test/CodeGen/ARM/ldrd.ll
index 7cb3b407bf16..4cdafa72f62e 100644
--- a/test/CodeGen/ARM/ldrd.ll
+++ b/test/CodeGen/ARM/ldrd.ll
@@ -3,7 +3,7 @@
; rdar://6949835
; RUN: llc < %s -mtriple=thumbv7-apple-ios -mcpu=cortex-a8 -regalloc=basic | FileCheck %s -check-prefix=BASIC -check-prefix=CHECK -check-prefix=NORMAL
; RUN: llc < %s -mtriple=thumbv7-apple-ios -mcpu=cortex-a8 -regalloc=greedy | FileCheck %s -check-prefix=GREEDY -check-prefix=CHECK -check-prefix=NORMAL
-; RUN: llc < %s -mtriple=thumbv7-apple-ios -mcpu=swift | FileCheck %s -check-prefix=SWIFT -check-prefix=CHECK -check-prefix=NORMAL
+; RUN: llc < %s -mtriple=thumbv7-apple-ios -mcpu=swift | FileCheck %s -check-prefix=CHECK -check-prefix=NORMAL
; RUN: llc < %s -mtriple=thumbv7-apple-ios -arm-assume-misaligned-load-store | FileCheck %s -check-prefix=CHECK -check-prefix=CONSERVATIVE
diff --git a/test/CodeGen/ARM/ldrex-frame-size.ll b/test/CodeGen/ARM/ldrex-frame-size.ll
new file mode 100644
index 000000000000..595540578a00
--- /dev/null
+++ b/test/CodeGen/ARM/ldrex-frame-size.ll
@@ -0,0 +1,36 @@
+; RUN: llc -mtriple=thumbv7-linux-gnueabi -o - %s | FileCheck %s
+
+; This alloca is just large enough that FrameLowering decides it needs a frame
+; to guarantee access, based on the range of ldrex.
+
+; The actual alloca size is a bit of black magic, unfortunately: the real
+; maximum accessible is 1020, but FrameLowering adds 16 bytes to its estimated
+; stack size just because so the alloca is not actually the what the limit gets
+; compared to. The important point is that we don't go up to ~4096, which is the
+; default with no strange instructions.
+define void @test_large_frame() {
+; CHECK-LABEL: test_large_frame:
+; CHECK: push
+; CHECK: sub.w sp, sp, #1004
+
+ %ptr = alloca i32, i32 251
+
+ %addr = getelementptr i32, i32* %ptr, i32 1
+ call i32 @llvm.arm.ldrex.p0i32(i32* %addr)
+ ret void
+}
+
+; This alloca is just is just the other side of the limit, so no frame
+define void @test_small_frame() {
+; CHECK-LABEL: test_small_frame:
+; CHECK-NOT: push
+; CHECK: sub.w sp, sp, #1000
+
+ %ptr = alloca i32, i32 250
+
+ %addr = getelementptr i32, i32* %ptr, i32 1
+ call i32 @llvm.arm.ldrex.p0i32(i32* %addr)
+ ret void
+}
+
+declare i32 @llvm.arm.ldrex.p0i32(i32*)
diff --git a/test/CodeGen/ARM/ldstrex-m.ll b/test/CodeGen/ARM/ldstrex-m.ll
index 5b717f7f1ae9..713fb9e3df01 100644
--- a/test/CodeGen/ARM/ldstrex-m.ll
+++ b/test/CodeGen/ARM/ldstrex-m.ll
@@ -1,6 +1,6 @@
-; RUN: llc < %s -mtriple=thumbv7m-none-eabi -mcpu=cortex-m4 | FileCheck %s
-; RUN: llc < %s -mtriple=thumbv8m.main-none-eabi | FileCheck %s
-; RUN: llc < %s -mtriple=thumbv8m.base-none-eabi | FileCheck %s
+; RUN: llc < %s -mtriple=thumbv7m-none-eabi -mcpu=cortex-m4 | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-V7
+; RUN: llc < %s -mtriple=thumbv8m.main-none-eabi | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-V8
+; RUN: llc < %s -mtriple=thumbv8m.base-none-eabi | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-V8
; CHECK-LABEL: f0:
; CHECK-NOT: ldrexd
@@ -28,7 +28,8 @@ entry:
}
; CHECK-LABEL: f3:
-; CHECK: ldr
+; CHECK-V7: ldr
+; CHECK-V8: lda
define i32 @f3(i32* %p) nounwind readonly {
entry:
%0 = load atomic i32, i32* %p seq_cst, align 4
@@ -36,7 +37,8 @@ entry:
}
; CHECK-LABEL: f4:
-; CHECK: ldrb
+; CHECK-V7: ldrb
+; CHECK-V8: ldab
define i8 @f4(i8* %p) nounwind readonly {
entry:
%0 = load atomic i8, i8* %p seq_cst, align 4
@@ -44,7 +46,8 @@ entry:
}
; CHECK-LABEL: f5:
-; CHECK: str
+; CHECK-V7: str
+; CHECK-V8: stl
define void @f5(i32* %p) nounwind readonly {
entry:
store atomic i32 0, i32* %p seq_cst, align 4
@@ -52,8 +55,10 @@ entry:
}
; CHECK-LABEL: f6:
-; CHECK: ldrex
-; CHECK: strex
+; CHECK-V7: ldrex
+; CHECK-V7: strex
+; CHECK-V8: ldaex
+; CHECK-V8: stlex
define i32 @f6(i32* %p) nounwind readonly {
entry:
%0 = atomicrmw add i32* %p, i32 1 seq_cst
diff --git a/test/CodeGen/ARM/ldstrex.ll b/test/CodeGen/ARM/ldstrex.ll
index 59349f72a8fe..73afa0e27469 100644
--- a/test/CodeGen/ARM/ldstrex.ll
+++ b/test/CodeGen/ARM/ldstrex.ll
@@ -142,6 +142,91 @@ define void @excl_addrmode() {
ret void
}
+define void @test_excl_addrmode_folded() {
+; CHECK-LABEL: test_excl_addrmode_folded:
+ %local = alloca i8, i32 4096
+
+ %local.0 = getelementptr i8, i8* %local, i32 4
+ %local32.0 = bitcast i8* %local.0 to i32*
+ call i32 @llvm.arm.ldrex.p0i32(i32* %local32.0)
+ call i32 @llvm.arm.strex.p0i32(i32 0, i32* %local32.0)
+; CHECK-T2ADDRMODE: ldrex {{r[0-9]+}}, [sp, #4]
+; CHECK-T2ADDRMODE: strex {{r[0-9]+}}, {{r[0-9]+}}, [sp, #4]
+
+ %local.1 = getelementptr i8, i8* %local, i32 1020
+ %local32.1 = bitcast i8* %local.1 to i32*
+ call i32 @llvm.arm.ldrex.p0i32(i32* %local32.1)
+ call i32 @llvm.arm.strex.p0i32(i32 0, i32* %local32.1)
+; CHECK-T2ADDRMODE: ldrex {{r[0-9]+}}, [sp, #1020]
+; CHECK-T2ADDRMODE: strex {{r[0-9]+}}, {{r[0-9]+}}, [sp, #1020]
+
+ ret void
+}
+
+define void @test_excl_addrmode_range() {
+; CHECK-LABEL: test_excl_addrmode_range:
+ %local = alloca i8, i32 4096
+
+ %local.0 = getelementptr i8, i8* %local, i32 1024
+ %local32.0 = bitcast i8* %local.0 to i32*
+ call i32 @llvm.arm.ldrex.p0i32(i32* %local32.0)
+ call i32 @llvm.arm.strex.p0i32(i32 0, i32* %local32.0)
+; CHECK-T2ADDRMODE: mov r[[TMP:[0-9]+]], sp
+; CHECK-T2ADDRMODE: add.w r[[ADDR:[0-9]+]], r[[TMP]], #1024
+; CHECK-T2ADDRMODE: ldrex {{r[0-9]+}}, [r[[ADDR]]]
+; CHECK-T2ADDRMODE: strex {{r[0-9]+}}, {{r[0-9]+}}, [r[[ADDR]]]
+
+ ret void
+}
+
+define void @test_excl_addrmode_align() {
+; CHECK-LABEL: test_excl_addrmode_align:
+ %local = alloca i8, i32 4096
+
+ %local.0 = getelementptr i8, i8* %local, i32 2
+ %local32.0 = bitcast i8* %local.0 to i32*
+ call i32 @llvm.arm.ldrex.p0i32(i32* %local32.0)
+ call i32 @llvm.arm.strex.p0i32(i32 0, i32* %local32.0)
+; CHECK-T2ADDRMODE: mov r[[ADDR:[0-9]+]], sp
+; CHECK-T2ADDRMODE: adds r[[ADDR:[0-9]+]], #2
+; CHECK-T2ADDRMODE: ldrex {{r[0-9]+}}, [r[[ADDR]]]
+; CHECK-T2ADDRMODE: strex {{r[0-9]+}}, {{r[0-9]+}}, [r[[ADDR]]]
+
+ ret void
+}
+
+define void @test_excl_addrmode_sign() {
+; CHECK-LABEL: test_excl_addrmode_sign:
+ %local = alloca i8, i32 4096
+
+ %local.0 = getelementptr i8, i8* %local, i32 -4
+ %local32.0 = bitcast i8* %local.0 to i32*
+ call i32 @llvm.arm.ldrex.p0i32(i32* %local32.0)
+ call i32 @llvm.arm.strex.p0i32(i32 0, i32* %local32.0)
+; CHECK-T2ADDRMODE: mov r[[ADDR:[0-9]+]], sp
+; CHECK-T2ADDRMODE: subs r[[ADDR:[0-9]+]], #4
+; CHECK-T2ADDRMODE: ldrex {{r[0-9]+}}, [r[[ADDR]]]
+; CHECK-T2ADDRMODE: strex {{r[0-9]+}}, {{r[0-9]+}}, [r[[ADDR]]]
+
+ ret void
+}
+
+define void @test_excl_addrmode_combination() {
+; CHECK-LABEL: test_excl_addrmode_combination:
+ %local = alloca i8, i32 4096
+ %unused = alloca i8, i32 64
+
+ %local.0 = getelementptr i8, i8* %local, i32 4
+ %local32.0 = bitcast i8* %local.0 to i32*
+ call i32 @llvm.arm.ldrex.p0i32(i32* %local32.0)
+ call i32 @llvm.arm.strex.p0i32(i32 0, i32* %local32.0)
+; CHECK-T2ADDRMODE: ldrex {{r[0-9]+}}, [sp, #68]
+; CHECK-T2ADDRMODE: strex {{r[0-9]+}}, {{r[0-9]+}}, [sp, #68]
+
+ ret void
+}
+
+
; LLVM should know, even across basic blocks, that ldrex is setting the high
; bits of its i32 to 0. There should be no zero-extend operation.
define zeroext i8 @test_cross_block_zext_i8(i1 %tst, i8* %addr) {
diff --git a/test/CodeGen/ARM/load_store_opt_clobber_cpsr.mir b/test/CodeGen/ARM/load_store_opt_clobber_cpsr.mir
new file mode 100644
index 000000000000..7a4db88479ba
--- /dev/null
+++ b/test/CodeGen/ARM/load_store_opt_clobber_cpsr.mir
@@ -0,0 +1,33 @@
+# RUN: llc -mtriple=thumbv6m--eabi -verify-machineinstrs -run-pass=arm-ldst-opt %s -o - | FileCheck %s
+
+# Make sure bb.0 isn't transformed: it would incorrectly clobber CPSR.
+#
+# Make sure bb.1 is transformed, so the test doesn't accidentally break.
+
+# CHECK-LABEL: bb.0:
+# CHECK: renamable $r0 = tLDRi renamable $r4, 0, 14, $noreg :: (load 4)
+# CHECK: renamable $r1 = tLDRi renamable $r4, 1, 14, $noreg :: (load 4)
+
+# CHECK-LABEL: bb.1:
+# CHECK: $r4 = tLDMIA_UPD $r4, 14, $noreg, def $r0, def $r1
+# CHECK: $r4, dead $cpsr = tSUBi8 $r4, 8, 14, $noreg
+
+name: foo
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $r2, $r4
+ renamable $r0 = tLDRi renamable $r2, 4, 14, $noreg :: (load 4)
+ dead renamable $r0, $cpsr = tADDi3 killed renamable $r0, 1, 14, $noreg
+ renamable $r0 = tLDRi renamable $r4, 0, 14, $noreg :: (load 4)
+ renamable $r1 = tLDRi renamable $r4, 1, 14, $noreg :: (load 4)
+ tBcc %bb.1, 0, killed $cpsr
+ bb.1:
+ liveins: $r2, $r4
+ renamable $r0 = tLDRi renamable $r2, 4, 14, $noreg :: (load 4)
+ dead renamable $r0, $cpsr = tADDi3 killed renamable $r0, 1, 14, $noreg
+ renamable $r0 = tLDRi renamable $r4, 0, 14, $noreg :: (load 4)
+ renamable $r1 = tLDRi renamable $r4, 1, 14, $noreg :: (load 4)
+ bb.2:
+ liveins: $r4
+ TRAP
diff --git a/test/CodeGen/ARM/load_store_opt_reg_limit.mir b/test/CodeGen/ARM/load_store_opt_reg_limit.mir
new file mode 100644
index 000000000000..11d13399cec9
--- /dev/null
+++ b/test/CodeGen/ARM/load_store_opt_reg_limit.mir
@@ -0,0 +1,40 @@
+# RUN: llc -mtriple=thumbv7--linux-android -verify-machineinstrs -run-pass=arm-ldst-opt %s -o - | FileCheck %s --check-prefix=CHECK-MERGE
+#CHECK-MERGE: foo
+name: foo
+# CHECK-MERGE: VSTMDIA $r4, 14, $noreg, $d15, $d16, $d17, $d18, $d19, $d20, $d21, $d22, $d23, $d24, $d25, $d26, $d27, $d28, $d29, $d30
+# CHECK-MERGE-NEXT: VSTRD $d31, $r4, 32, 14, $noreg :: (store 8)
+# CHECK-MERGE: VSTMDIA killed $r0, 14, $noreg, $d4, $d5, $d6, $d7, $d8, $d9, $d10, $d11, $d12, $d13, $d14
+body: |
+ bb.0:
+ VSTRD $d15, $r4, 0, 14, $noreg :: (store 8)
+ VSTRD $d16, $r4, 2, 14, $noreg :: (store 8)
+ VSTRD $d17, $r4, 4, 14, $noreg :: (store 8)
+ VSTRD $d18, $r4, 6, 14, $noreg :: (store 8)
+ VSTRD $d19, $r4, 8, 14, $noreg :: (store 8)
+ VSTRD $d20, $r4, 10, 14, $noreg :: (store 8)
+ VSTRD $d21, $r4, 12, 14, $noreg :: (store 8)
+ VSTRD $d22, $r4, 14, 14, $noreg :: (store 8)
+ VSTRD $d23, $r4, 16, 14, $noreg :: (store 8)
+ VSTRD $d24, $r4, 18, 14, $noreg :: (store 8)
+ VSTRD $d25, $r4, 20, 14, $noreg :: (store 8)
+ VSTRD $d26, $r4, 22, 14, $noreg :: (store 8)
+ VSTRD $d27, $r4, 24, 14, $noreg :: (store 8)
+ VSTRD $d28, $r4, 26, 14, $noreg :: (store 8)
+ VSTRD $d29, $r4, 28, 14, $noreg :: (store 8)
+ VSTRD $d30, $r4, 30, 14, $noreg :: (store 8)
+ VSTRD $d31, $r4, 32, 14, $noreg :: (store 8)
+ VSTRD $d0, $r4, 34, 14, $noreg :: (store 8)
+ VSTRD $d1, $r4, 36, 14, $noreg :: (store 8)
+ VSTRD $d3, $r4, 38, 14, $noreg :: (store 8)
+ VSTRD $d2, $r4, 40, 14, $noreg :: (store 8)
+ VSTRD $d4, $r4, 42, 14, $noreg :: (store 8)
+ VSTRD $d5, $r4, 44, 14, $noreg :: (store 8)
+ VSTRD $d6, $r4, 46, 14, $noreg :: (store 8)
+ VSTRD $d7, $r4, 48, 14, $noreg :: (store 8)
+ VSTRD $d8, $r4, 50, 14, $noreg :: (store 8)
+ VSTRD $d9, $r4, 52, 14, $noreg :: (store 8)
+ VSTRD $d10, $r4, 54, 14, $noreg :: (store 8)
+ VSTRD $d11, $r4, 56, 14, $noreg :: (store 8)
+ VSTRD $d12, $r4, 58, 14, $noreg :: (store 8)
+ VSTRD $d13, $r4, 60, 14, $noreg :: (store 8)
+ VSTRD $d14, $r4, 62, 14, $noreg :: (store 8)
diff --git a/test/CodeGen/ARM/loop-align-cortex-m.ll b/test/CodeGen/ARM/loop-align-cortex-m.ll
new file mode 100644
index 000000000000..1b41c1b6c3f1
--- /dev/null
+++ b/test/CodeGen/ARM/loop-align-cortex-m.ll
@@ -0,0 +1,49 @@
+; RUN: llc -mtriple=thumbv7m-none-eabi %s -mcpu=cortex-m3 -o - | FileCheck %s
+; RUN: llc -mtriple=thumbv7m-none-eabi %s -mcpu=cortex-m4 -o - | FileCheck %s
+; RUN: llc -mtriple=thumbv7m-none-eabi %s -mcpu=cortex-m33 -o - | FileCheck %s
+
+define void @test_loop_alignment(i32* %in, i32* %out) optsize {
+; CHECK-LABEL: test_loop_alignment:
+; CHECK: movs {{r[0-9]+}}, #0
+; CHECK: .p2align 2
+
+entry:
+ br label %loop
+
+loop:
+ %i = phi i32 [ 0, %entry ], [ %i.next, %loop ]
+ %in.addr = getelementptr inbounds i32, i32* %in, i32 %i
+ %lhs = load i32, i32* %in.addr, align 4
+ %res = mul nsw i32 %lhs, 5
+ %out.addr = getelementptr inbounds i32, i32* %out, i32 %i
+ store i32 %res, i32* %out.addr, align 4
+ %i.next = add i32 %i, 1
+ %done = icmp eq i32 %i.next, 1024
+ br i1 %done, label %end, label %loop
+
+end:
+ ret void
+}
+
+define void @test_loop_alignment_minsize(i32* %in, i32* %out) minsize {
+; CHECK-LABEL: test_loop_alignment_minsize:
+; CHECK: movs {{r[0-9]+}}, #0
+; CHECK-NOT: .p2align
+
+entry:
+ br label %loop
+
+loop:
+ %i = phi i32 [ 0, %entry ], [ %i.next, %loop ]
+ %in.addr = getelementptr inbounds i32, i32* %in, i32 %i
+ %lhs = load i32, i32* %in.addr, align 4
+ %res = mul nsw i32 %lhs, 5
+ %out.addr = getelementptr inbounds i32, i32* %out, i32 %i
+ store i32 %res, i32* %out.addr, align 4
+ %i.next = add i32 %i, 1
+ %done = icmp eq i32 %i.next, 1024
+ br i1 %done, label %end, label %loop
+
+end:
+ ret void
+}
diff --git a/test/CodeGen/ARM/lowerMUL-newload.ll b/test/CodeGen/ARM/lowerMUL-newload.ll
index 93d765cba116..1d483c96f7ec 100644
--- a/test/CodeGen/ARM/lowerMUL-newload.ll
+++ b/test/CodeGen/ARM/lowerMUL-newload.ll
@@ -1,25 +1,41 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=arm-eabi -mcpu=krait | FileCheck %s
define void @func1(i16* %a, i16* %b, i16* %c) {
+; CHECK-LABEL: func1:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: add r3, r1, #16
+; CHECK-NEXT: vldr d18, [r2, #16]
+; CHECK-NEXT: vld1.16 {d16}, [r3:64]
+; CHECK-NEXT: vmovl.u16 q8, d16
+; CHECK-NEXT: vaddw.s16 q10, q8, d18
+; CHECK-NEXT: vmovn.i32 d19, q10
+; CHECK-NEXT: vldr d20, [r0, #16]
+; CHECK-NEXT: vstr d19, [r0, #16]
+; CHECK-NEXT: vldr d19, [r2, #16]
+; CHECK-NEXT: vmull.s16 q11, d18, d19
+; CHECK-NEXT: vmovl.s16 q9, d19
+; CHECK-NEXT: vmla.i32 q11, q8, q9
+; CHECK-NEXT: vmovn.i32 d16, q11
+; CHECK-NEXT: vstr d16, [r1, #16]
+; CHECK-NEXT: vldr d16, [r2, #16]
+; CHECK-NEXT: vmlal.s16 q11, d16, d20
+; CHECK-NEXT: vmovn.i32 d16, q11
+; CHECK-NEXT: vstr d16, [r0, #16]
+; CHECK-NEXT: bx lr
entry:
; The test case trying to vectorize the pseudo code below.
; a[i] = b[i] + c[i];
; b[i] = a[i] * c[i];
; a[i] = b[i] + a[i] * c[i];
-;
; Checking that vector load a[i] for "a[i] = b[i] + a[i] * c[i]" is
; scheduled before the first vector store to "a[i] = b[i] + c[i]".
; Checking that there is no vector load a[i] scheduled between the vector
; stores to a[i], otherwise the load of a[i] will be polluted by the first
; vector store to a[i].
-;
; This test case check that the chain information is updated during
; lowerMUL for the new created Load SDNode.
-; CHECK: vldr {{.*}} [r0, #16]
-; CHECK: vstr {{.*}} [r0, #16]
-; CHECK-NOT: vldr {{.*}} [r0, #16]
-; CHECK: vstr {{.*}} [r0, #16]
%scevgep0 = getelementptr i16, i16* %a, i32 8
%vector_ptr0 = bitcast i16* %scevgep0 to <4 x i16>*
@@ -57,26 +73,41 @@ entry:
}
define void @func2(i16* %a, i16* %b, i16* %c) {
+; CHECK-LABEL: func2:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: add r3, r1, #16
+; CHECK-NEXT: vldr d18, [r2, #16]
+; CHECK-NEXT: vld1.16 {d16}, [r3:64]
+; CHECK-NEXT: vmovl.u16 q8, d16
+; CHECK-NEXT: vaddw.s16 q10, q8, d18
+; CHECK-NEXT: vmovn.i32 d19, q10
+; CHECK-NEXT: vldr d20, [r0, #16]
+; CHECK-NEXT: vstr d19, [r0, #16]
+; CHECK-NEXT: vldr d19, [r2, #16]
+; CHECK-NEXT: vmull.s16 q11, d18, d19
+; CHECK-NEXT: vmovl.s16 q9, d19
+; CHECK-NEXT: vmla.i32 q11, q8, q9
+; CHECK-NEXT: vmovn.i32 d16, q11
+; CHECK-NEXT: vstr d16, [r1, #16]
+; CHECK-NEXT: vldr d16, [r2, #16]
+; CHECK-NEXT: vmlal.s16 q11, d16, d20
+; CHECK-NEXT: vaddw.s16 q8, q11, d20
+; CHECK-NEXT: vmovn.i32 d16, q8
+; CHECK-NEXT: vstr d16, [r0, #16]
+; CHECK-NEXT: bx lr
entry:
; The test case trying to vectorize the pseudo code below.
; a[i] = b[i] + c[i];
; b[i] = a[i] * c[i];
; a[i] = b[i] + a[i] * c[i] + a[i];
-;
; Checking that vector load a[i] for "a[i] = b[i] + a[i] * c[i] + a[i]"
; is scheduled before the first vector store to "a[i] = b[i] + c[i]".
; Checking that there is no vector load a[i] scheduled between the first
; vector store to a[i] and the vector add of a[i], otherwise the load of
; a[i] will be polluted by the first vector store to a[i].
-;
; This test case check that both the chain and value of the new created
; Load SDNode are updated during lowerMUL.
-; CHECK: vldr {{.*}} [r0, #16]
-; CHECK: vstr {{.*}} [r0, #16]
-; CHECK-NOT: vldr {{.*}} [r0, #16]
-; CHECK: vaddw.s16
-; CHECK: vstr {{.*}} [r0, #16]
%scevgep0 = getelementptr i16, i16* %a, i32 8
%vector_ptr0 = bitcast i16* %scevgep0 to <4 x i16>*
diff --git a/test/CodeGen/ARM/machine-licm.ll b/test/CodeGen/ARM/machine-licm.ll
index 9ed1a57616c9..1cf291be6621 100644
--- a/test/CodeGen/ARM/machine-licm.ll
+++ b/test/CodeGen/ARM/machine-licm.ll
@@ -1,6 +1,6 @@
-; RUN: llc < %s -mtriple=thumb-apple-darwin -relocation-model=pic -disable-fp-elim | FileCheck %s -check-prefix=THUMB
-; RUN: llc < %s -mtriple=arm-apple-darwin -relocation-model=pic -disable-fp-elim | FileCheck %s -check-prefix=ARM
-; RUN: llc < %s -mtriple=arm-apple-darwin -relocation-model=pic -disable-fp-elim -mattr=+v6t2 | FileCheck %s -check-prefix=MOVT
+; RUN: llc < %s -mtriple=thumb-apple-darwin -relocation-model=pic -frame-pointer=all | FileCheck %s -check-prefix=THUMB
+; RUN: llc < %s -mtriple=arm-apple-darwin -relocation-model=pic -frame-pointer=all | FileCheck %s -check-prefix=ARM
+; RUN: llc < %s -mtriple=arm-apple-darwin -relocation-model=pic -frame-pointer=all -mattr=+v6t2 | FileCheck %s -check-prefix=MOVT
; rdar://7353541
; rdar://7354376
; rdar://8887598
diff --git a/test/CodeGen/ARM/macho-frame-offset.ll b/test/CodeGen/ARM/macho-frame-offset.ll
index f3dacf66b6c3..b61a7d706f05 100644
--- a/test/CodeGen/ARM/macho-frame-offset.ll
+++ b/test/CodeGen/ARM/macho-frame-offset.ll
@@ -1,4 +1,4 @@
-; RUN: llc -mtriple thumbv7m-apple-macho -disable-fp-elim -o - %s | FileCheck %s
+; RUN: llc -mtriple thumbv7m-apple-macho -frame-pointer=all -o - %s | FileCheck %s
define void @func() {
; CHECK-LABEL: func:
diff --git a/test/CodeGen/ARM/memcpy-inline.ll b/test/CodeGen/ARM/memcpy-inline.ll
index c410403a9f11..8dab9b67d559 100644
--- a/test/CodeGen/ARM/memcpy-inline.ll
+++ b/test/CodeGen/ARM/memcpy-inline.ll
@@ -86,10 +86,9 @@ entry:
define void @t5(i8* nocapture %C) nounwind {
entry:
; CHECK-LABEL: t5:
-; CHECK: movs [[REG5:r[0-9]+]], #0
-; CHECK: strb [[REG5]], [r0, #6]
-; CHECK: movw [[REG6:r[0-9]+]], #21587
-; CHECK: strh [[REG6]], [r0, #4]
+; CHECK: movw [[REG5:r[0-9]+]], #21337
+; CHECK: movt [[REG5]], #84
+; CHECK: str.w [[REG5]], [r0, #3]
; CHECK: movw [[REG7:r[0-9]+]], #18500
; CHECK: movt [[REG7:r[0-9]+]], #22866
; CHECK: str [[REG7]]
diff --git a/test/CodeGen/ARM/memcpy-ldm-stm.ll b/test/CodeGen/ARM/memcpy-ldm-stm.ll
index 314f559e357a..4009de5bd46a 100644
--- a/test/CodeGen/ARM/memcpy-ldm-stm.ll
+++ b/test/CodeGen/ARM/memcpy-ldm-stm.ll
@@ -34,14 +34,16 @@ entry:
; CHECK-LABEL: t2:
; CHECKV6: ldr [[LB:r[0-7]]],
; CHECKV6-NEXT: ldr [[SB:r[0-7]]],
+; CHECKV6-NEXT: ldm{{(\.w)?}} [[LB]]!,
+; CHECKV6-NEXT: stm{{(\.w)?}} [[SB]]!,
+; CHECKV6-NEXT: ldrh{{(\.w)?}} {{.*}}, {{\[}}[[LB]]]
+; CHECKV6-NEXT: ldrb{{(\.w)?}} {{.*}}, {{\[}}[[LB]], #2]
+; CHECKV6-NEXT: strb{{(\.w)?}} {{.*}}, {{\[}}[[SB]], #2]
+; CHECKV6-NEXT: strh{{(\.w)?}} {{.*}}, {{\[}}[[SB]]]
; CHECKV7: movt [[LB:[rl0-9]+]], :upper16:d
; CHECKV7-NEXT: movt [[SB:[rl0-9]+]], :upper16:s
-; CHECK-NEXT: ldm{{(\.w)?}} [[LB]]!,
-; CHECK-NEXT: stm{{(\.w)?}} [[SB]]!,
-; CHECK-NEXT: ldrh{{(\.w)?}} {{.*}}, {{\[}}[[LB]]]
-; CHECK-NEXT: ldrb{{(\.w)?}} {{.*}}, {{\[}}[[LB]], #2]
-; CHECK-NEXT: strb{{(\.w)?}} {{.*}}, {{\[}}[[SB]], #2]
-; CHECK-NEXT: strh{{(\.w)?}} {{.*}}, {{\[}}[[SB]]]
+; CHECKV7: ldr{{(\.w)?}} {{.*}}, {{\[}}[[LB]], #11]
+; CHECKV7-NEXT: str{{(\.w)?}} {{.*}}, {{\[}}[[SB]], #11]
tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 bitcast ([64 x i32]* @s to i8*), i8* align 4 bitcast ([64 x i32]* @d to i8*), i32 15, i1 false)
ret void
}
diff --git a/test/CodeGen/ARM/memfunc.ll b/test/CodeGen/ARM/memfunc.ll
index b415ff7b7f41..6c0668a53e82 100644
--- a/test/CodeGen/ARM/memfunc.ll
+++ b/test/CodeGen/ARM/memfunc.ll
@@ -388,6 +388,7 @@ entry:
@arr7 = external global [7 x i8], align 1
@arr8 = internal global [128 x i8] undef
@arr9 = weak_odr global [128 x i8] undef
+@arr10 = dso_local global [8 x i8] c"\01\02\03\04\05\06\07\08", align 1
define void @f9(i8* %dest, i32 %n) "no-frame-pointer-elim"="true" {
entry:
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dest, i8* getelementptr inbounds ([7 x i8], [7 x i8]* @arr1, i32 0, i32 0), i32 %n, i1 false)
@@ -399,7 +400,7 @@ entry:
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dest, i8* getelementptr inbounds ([7 x i8], [7 x i8]* @arr7, i32 0, i32 0), i32 %n, i1 false)
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dest, i8* getelementptr inbounds ([128 x i8], [128 x i8]* @arr8, i32 0, i32 0), i32 %n, i1 false)
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dest, i8* getelementptr inbounds ([128 x i8], [128 x i8]* @arr9, i32 0, i32 0), i32 %n, i1 false)
-
+ call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dest, i8* getelementptr inbounds ([8 x i8], [8 x i8]* @arr10, i32 0, i32 0), i32 %n, i1 false)
unreachable
}
@@ -427,6 +428,11 @@ entry:
; CHECK-GNUEABI: arr8,128,16
; CHECK: .p2align 4
; CHECK: arr9:
+; CHECK-IOS: .p2align 3
+; CHECK-DARWIN: .p2align 2
+; CHECK-EABI: .p2align 2
+; CHECK-GNUEABI: .p2align 2
+; CHECK: arr10:
; CHECK-NOT: arr7:
diff --git a/test/CodeGen/ARM/misched-fusion-aes.ll b/test/CodeGen/ARM/misched-fusion-aes.ll
index 483f26cc8e00..b6ca49646f83 100644
--- a/test/CodeGen/ARM/misched-fusion-aes.ll
+++ b/test/CodeGen/ARM/misched-fusion-aes.ll
@@ -72,20 +72,27 @@ define void @aesea(<16 x i8>* %a0, <16 x i8>* %b0, <16 x i8>* %c0, <16 x i8> %d,
; CHECK-LABEL: aesea:
; CHECK: aese.8 [[QA:q[0-9][0-9]?]], {{q[0-9][0-9]?}}
; CHECK-NEXT: aesmc.8 {{q[0-9][0-9]?}}, [[QA]]
+
; CHECK: aese.8 [[QB:q[0-9][0-9]?]], {{q[0-9][0-9]?}}
; CHECK-NEXT: aesmc.8 {{q[0-9][0-9]?}}, [[QB]]
-; CHECK: aese.8 {{q[0-9][0-9]?}}, {{q[0-9][0-9]?}}
+
; CHECK: aese.8 [[QC:q[0-9][0-9]?]], {{q[0-9][0-9]?}}
; CHECK-NEXT: aesmc.8 {{q[0-9][0-9]?}}, [[QC]]
+
+; CHECK: aese.8 {{q[0-9][0-9]?}}, {{q[0-9][0-9]?}}
; CHECK: aese.8 [[QD:q[0-9][0-9]?]], {{q[0-9][0-9]?}}
; CHECK-NEXT: aesmc.8 {{q[0-9][0-9]?}}, [[QD]]
+
+; CHECK: aese.8 {{q[0-9][0-9]?}}, {{q[0-9][0-9]?}}
; CHECK: aese.8 [[QE:q[0-9][0-9]?]], {{q[0-9][0-9]?}}
; CHECK-NEXT: aesmc.8 {{q[0-9][0-9]?}}, [[QE]]
-; CHECK: aese.8 {{q[0-9][0-9]?}}, {{q[0-9][0-9]?}}
+
; CHECK: aese.8 [[QF:q[0-9][0-9]?]], {{q[0-9][0-9]?}}
; CHECK-NEXT: aesmc.8 {{q[0-9][0-9]?}}, [[QF]]
+
; CHECK: aese.8 [[QG:q[0-9][0-9]?]], {{q[0-9][0-9]?}}
; CHECK-NEXT: aesmc.8 {{q[0-9][0-9]?}}, [[QG]]
+
; CHECK: aese.8 {{q[0-9][0-9]?}}, {{q[0-9][0-9]?}}
; CHECK: aese.8 [[QH:q[0-9][0-9]?]], {{q[0-9][0-9]?}}
; CHECK-NEXT: aesmc.8 {{q[0-9][0-9]?}}, [[QH]]
@@ -160,14 +167,14 @@ define void @aesda(<16 x i8>* %a0, <16 x i8>* %b0, <16 x i8>* %c0, <16 x i8> %d,
; CHECK-NEXT: aesimc.8 {{q[0-9][0-9]?}}, [[QA]]
; CHECK: aesd.8 [[QB:q[0-9][0-9]?]], {{q[0-9][0-9]?}}
; CHECK-NEXT: aesimc.8 {{q[0-9][0-9]?}}, [[QB]]
-; CHECK: aesd.8 {{q[0-9][0-9]?}}, {{q[0-9][0-9]?}}
; CHECK: aesd.8 [[QC:q[0-9][0-9]?]], {{q[0-9][0-9]?}}
; CHECK-NEXT: aesimc.8 {{q[0-9][0-9]?}}, [[QC]]
+; CHECK: aesd.8 {{q[0-9][0-9]?}}, {{q[0-9][0-9]?}}
; CHECK: aesd.8 [[QD:q[0-9][0-9]?]], {{q[0-9][0-9]?}}
; CHECK-NEXT: aesimc.8 {{q[0-9][0-9]?}}, [[QD]]
+; CHECK: aesd.8 {{q[0-9][0-9]?}}, {{q[0-9][0-9]?}}
; CHECK: aesd.8 [[QE:q[0-9][0-9]?]], {{q[0-9][0-9]?}}
; CHECK-NEXT: aesimc.8 {{q[0-9][0-9]?}}, [[QE]]
-; CHECK: aesd.8 {{q[0-9][0-9]?}}, {{q[0-9][0-9]?}}
; CHECK: aesd.8 [[QF:q[0-9][0-9]?]], {{q[0-9][0-9]?}}
; CHECK-NEXT: aesimc.8 {{q[0-9][0-9]?}}, [[QF]]
; CHECK: aesd.8 [[QG:q[0-9][0-9]?]], {{q[0-9][0-9]?}}
diff --git a/test/CodeGen/ARM/misched-int-basic-thumb2.mir b/test/CodeGen/ARM/misched-int-basic-thumb2.mir
index 78e2ab035b98..04a6f6b00051 100644
--- a/test/CodeGen/ARM/misched-int-basic-thumb2.mir
+++ b/test/CodeGen/ARM/misched-int-basic-thumb2.mir
@@ -92,7 +92,7 @@
# CHECK_SWIFT: Latency : 5
# CHECK_R52: Latency : 4
#
-# CHECK: SU(18): %19:rgpr, %20:rgpr = t2UMLAL %12:rgpr, %12:rgpr, %19:rgpr, %20:rgpr, 14, $noreg
+# CHECK: SU(18): %19:rgpr, %20:rgpr = t2UMLAL %12:rgpr, %12:rgpr, %19:rgpr(tied-def 0), %20:rgpr(tied-def 1), 14, $noreg
# CHECK_A9: Latency : 3
# CHECK_SWIFT: Latency : 7
# CHECK_R52: Latency : 4
diff --git a/test/CodeGen/ARM/misched-int-basic.mir b/test/CodeGen/ARM/misched-int-basic.mir
index ec607d1d8f8f..41a92831af8d 100644
--- a/test/CodeGen/ARM/misched-int-basic.mir
+++ b/test/CodeGen/ARM/misched-int-basic.mir
@@ -58,7 +58,7 @@
# CHECK_SWIFT: Latency : 5
# CHECK_R52: Latency : 4
#
-# CHECK: SU(11): %13:gpr, %14:gprnopc = UMLAL %6:gprnopc, %6:gprnopc, %13:gpr, %14:gprnopc, 14, $noreg, $noreg
+# CHECK: SU(11): %13:gpr, %14:gprnopc = UMLAL %6:gprnopc, %6:gprnopc, %13:gpr(tied-def 0), %14:gprnopc(tied-def 1), 14, $noreg, $noreg
# CHECK_SWIFT: Latency : 7
# CHECK_A9: Latency : 3
# CHECK_R52: Latency : 4
diff --git a/test/CodeGen/ARM/none-macho.ll b/test/CodeGen/ARM/none-macho.ll
index fee459f4f5e1..057da9496145 100644
--- a/test/CodeGen/ARM/none-macho.ll
+++ b/test/CodeGen/ARM/none-macho.ll
@@ -1,5 +1,5 @@
-; RUN: llc -mtriple=thumbv7m-none-macho %s -o - -relocation-model=pic -disable-fp-elim | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-NON-FAST
-; RUN: llc -mtriple=thumbv7m-none-macho -O0 %s -o - -relocation-model=pic -disable-fp-elim | FileCheck %s
+; RUN: llc -mtriple=thumbv7m-none-macho %s -o - -relocation-model=pic -frame-pointer=all | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-NON-FAST
+; RUN: llc -mtriple=thumbv7m-none-macho -O0 %s -o - -relocation-model=pic -frame-pointer=all | FileCheck %s
; RUN: llc -mtriple=thumbv7m-none-macho -filetype=obj %s -o /dev/null
@var = external global i32
diff --git a/test/CodeGen/ARM/nonreserved-callframe-with-basereg.mir b/test/CodeGen/ARM/nonreserved-callframe-with-basereg.mir
new file mode 100644
index 000000000000..a262594473ff
--- /dev/null
+++ b/test/CodeGen/ARM/nonreserved-callframe-with-basereg.mir
@@ -0,0 +1,54 @@
+# RUN: llc -run-pass=prologepilog %s -o - | FileCheck %s
+
+# Make sure we use the correct offset for stack accesses using the base pointer
+# within call frame blocks. Key points of test:
+# + A large SP in ADJCALLSTACKDOWN forces each call to get its own adjustment.
+# + An over-aligned stack variable means that we must use r6 rather than fp
+# to access this variables.
+#
+# Under these circumstances, the ADJCALLSTACKDOWN must not apply to r6 offsets.
+
+--- |
+ ; ModuleID = 'simple.ll'
+ source_filename = "simple.ll"
+ target datalayout = "e-m:o-p:32:32-i64:64-a:0:32-n32-S128"
+ target triple = "thumbv7k-apple-ios"
+
+ declare void @bar([4 x i32], i32)
+
+ define void @foo(i32 %n) {
+ ret void
+ }
+
+...
+---
+name: foo
+liveins:
+ - { reg: '$r0', virtual-reg: '' }
+frameInfo:
+ adjustsStack: true
+ hasCalls: true
+ maxCallFrameSize: 2276
+stack:
+ - { id: 0, name: '', type: spill-slot, offset: 0, alignment: 32, size: 4 }
+constants: []
+body: |
+ bb.0 (%ir-block.0):
+ liveins: $r0
+
+ ; CHECK: t2STRi12 killed $r0, $r6, [[OFFSET:[0-9]+]]
+ t2STRi12 killed $r0, %stack.0, 0, 14, $noreg :: (store 4 into %stack.0)
+
+ ADJCALLSTACKDOWN 2276, 0, 14, $noreg, implicit-def dead $sp, implicit $sp
+
+ ; CHECK: renamable $r0 = t2LDRi12 $r6, [[OFFSET]]
+ renamable $r0 = t2LDRi12 %stack.0, 0, 14, $noreg, :: (load 4 from %stack.0)
+ renamable $r1 = IMPLICIT_DEF
+ renamable $r2 = IMPLICIT_DEF
+ renamable $r3 = IMPLICIT_DEF
+ tBL 14, $noreg, @bar, csr_ios, implicit-def dead $lr, implicit $sp, implicit killed $r0, implicit killed $r1, implicit killed $r2, implicit killed $r3, implicit-def $sp
+
+ ADJCALLSTACKUP 2276, 0, 14, $noreg, implicit-def dead $sp, implicit $sp
+ tBX_RET 14, $noreg
+
+...
diff --git a/test/CodeGen/ARM/popcnt.ll b/test/CodeGen/ARM/popcnt.ll
index fd61811f49cf..e3ce5cd1ff99 100644
--- a/test/CodeGen/ARM/popcnt.ll
+++ b/test/CodeGen/ARM/popcnt.ll
@@ -1,17 +1,27 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=arm-eabi -mattr=+neon %s -o - | FileCheck %s
; Implement ctpop with vcnt
define <8 x i8> @vcnt8(<8 x i8>* %A) nounwind {
-;CHECK-LABEL: vcnt8:
-;CHECK: vcnt.8 {{d[0-9]+}}, {{d[0-9]+}}
+; CHECK-LABEL: vcnt8:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vldr d16, [r0]
+; CHECK-NEXT: vcnt.8 d16, d16
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: mov pc, lr
%tmp1 = load <8 x i8>, <8 x i8>* %A
%tmp2 = call <8 x i8> @llvm.ctpop.v8i8(<8 x i8> %tmp1)
ret <8 x i8> %tmp2
}
define <16 x i8> @vcntQ8(<16 x i8>* %A) nounwind {
-;CHECK-LABEL: vcntQ8:
-;CHECK: vcnt.8 {{q[0-9]+}}, {{q[0-9]+}}
+; CHECK-LABEL: vcntQ8:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vld1.64 {d16, d17}, [r0]
+; CHECK-NEXT: vcnt.8 q8, q8
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: vmov r2, r3, d17
+; CHECK-NEXT: mov pc, lr
%tmp1 = load <16 x i8>, <16 x i8>* %A
%tmp2 = call <16 x i8> @llvm.ctpop.v16i8(<16 x i8> %tmp1)
ret <16 x i8> %tmp2
@@ -19,11 +29,12 @@ define <16 x i8> @vcntQ8(<16 x i8>* %A) nounwind {
define <4 x i16> @vcnt16(<4 x i16>* %A) nounwind {
; CHECK-LABEL: vcnt16:
-; CHECK: vcnt.8 {{d[0-9]+}}, {{d[0-9]+}}
-; CHECK: vrev16.8 {{d[0-9]+}}, {{d[0-9]+}}
-; CHECK: vadd.i8 {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}
-; CHECK: vuzp.8 {{d[0-9]+}}, {{d[0-9]+}}
-; CHECK: vmovl.u8 {{q[0-9]+}}, {{d[0-9]+}}
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vldr d16, [r0]
+; CHECK-NEXT: vcnt.8 d16, d16
+; CHECK-NEXT: vpaddl.u8 d16, d16
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: mov pc, lr
%tmp1 = load <4 x i16>, <4 x i16>* %A
%tmp2 = call <4 x i16> @llvm.ctpop.v4i16(<4 x i16> %tmp1)
ret <4 x i16> %tmp2
@@ -31,11 +42,13 @@ define <4 x i16> @vcnt16(<4 x i16>* %A) nounwind {
define <8 x i16> @vcntQ16(<8 x i16>* %A) nounwind {
; CHECK-LABEL: vcntQ16:
-; CHECK: vcnt.8 {{q[0-9]+}}, {{q[0-9]+}}
-; CHECK: vrev16.8 {{q[0-9]+}}, {{q[0-9]+}}
-; CHECK: vadd.i8 {{q[0-9]+}}, {{q[0-9]+}}, {{q[0-9]+}}
-; CHECK: vuzp.8 {{q[0-9]+}}, {{q[0-9]+}}
-; CHECK: vmovl.u8 {{q[0-9]+}}, {{d[0-9]+}}
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vld1.64 {d16, d17}, [r0]
+; CHECK-NEXT: vcnt.8 q8, q8
+; CHECK-NEXT: vpaddl.u8 q8, q8
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: vmov r2, r3, d17
+; CHECK-NEXT: mov pc, lr
%tmp1 = load <8 x i16>, <8 x i16>* %A
%tmp2 = call <8 x i16> @llvm.ctpop.v8i16(<8 x i16> %tmp1)
ret <8 x i16> %tmp2
@@ -43,14 +56,13 @@ define <8 x i16> @vcntQ16(<8 x i16>* %A) nounwind {
define <2 x i32> @vcnt32(<2 x i32>* %A) nounwind {
; CHECK-LABEL: vcnt32:
-; CHECK: vcnt.8 {{d[0-9]+}}, {{d[0-9]+}}
-; CHECK: vrev16.8 {{d[0-9]+}}, {{d[0-9]+}}
-; CHECK: vadd.i8 {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}
-; CHECK: vuzp.8 {{d[0-9]+}}, {{d[0-9]+}}
-; CHECK: vmovl.u8 {{q[0-9]+}}, {{d[0-9]+}}
-; CHECK: vrev32.16 {{d[0-9]+}}, {{d[0-9]+}}
-; CHECK: vuzp.16 {{d[0-9]+}}, {{d[0-9]+}}
-; CHECK: vmovl.u16 {{q[0-9]+}}, {{d[0-9]+}}
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vldr d16, [r0]
+; CHECK-NEXT: vcnt.8 d16, d16
+; CHECK-NEXT: vpaddl.u8 d16, d16
+; CHECK-NEXT: vpaddl.u16 d16, d16
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: mov pc, lr
%tmp1 = load <2 x i32>, <2 x i32>* %A
%tmp2 = call <2 x i32> @llvm.ctpop.v2i32(<2 x i32> %tmp1)
ret <2 x i32> %tmp2
@@ -58,14 +70,14 @@ define <2 x i32> @vcnt32(<2 x i32>* %A) nounwind {
define <4 x i32> @vcntQ32(<4 x i32>* %A) nounwind {
; CHECK-LABEL: vcntQ32:
-; CHECK: vcnt.8 {{q[0-9]+}}, {{q[0-9]+}}
-; CHECK: vrev16.8 {{q[0-9]+}}, {{q[0-9]+}}
-; CHECK: vadd.i8 {{q[0-9]+}}, {{q[0-9]+}}, {{q[0-9]+}}
-; CHECK: vuzp.8 {{q[0-9]+}}, {{q[0-9]+}}
-; CHECK: vmovl.u8 {{q[0-9]+}}, {{d[0-9]+}}
-; CHECK: vrev32.16 {{q[0-9]+}}, {{q[0-9]+}}
-; CHECK: vuzp.16 {{q[0-9]+}}, {{q[0-9]+}}
-; CHECK: vmovl.u16 {{q[0-9]+}}, {{d[0-9]+}}
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vld1.64 {d16, d17}, [r0]
+; CHECK-NEXT: vcnt.8 q8, q8
+; CHECK-NEXT: vpaddl.u8 q8, q8
+; CHECK-NEXT: vpaddl.u16 q8, q8
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: vmov r2, r3, d17
+; CHECK-NEXT: mov pc, lr
%tmp1 = load <4 x i32>, <4 x i32>* %A
%tmp2 = call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %tmp1)
ret <4 x i32> %tmp2
@@ -73,6 +85,14 @@ define <4 x i32> @vcntQ32(<4 x i32>* %A) nounwind {
define <1 x i64> @vcnt64(<1 x i64>* %A) nounwind {
; CHECK-LABEL: vcnt64:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vldr d16, [r0]
+; CHECK-NEXT: vcnt.8 d16, d16
+; CHECK-NEXT: vpaddl.u8 d16, d16
+; CHECK-NEXT: vpaddl.u16 d16, d16
+; CHECK-NEXT: vpaddl.u32 d16, d16
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: mov pc, lr
%tmp1 = load <1 x i64>, <1 x i64>* %A
%tmp2 = call <1 x i64> @llvm.ctpop.v1i64(<1 x i64> %tmp1)
ret <1 x i64> %tmp2
@@ -80,6 +100,15 @@ define <1 x i64> @vcnt64(<1 x i64>* %A) nounwind {
define <2 x i64> @vcntQ64(<2 x i64>* %A) nounwind {
; CHECK-LABEL: vcntQ64:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vld1.64 {d16, d17}, [r0]
+; CHECK-NEXT: vcnt.8 q8, q8
+; CHECK-NEXT: vpaddl.u8 q8, q8
+; CHECK-NEXT: vpaddl.u16 q8, q8
+; CHECK-NEXT: vpaddl.u32 q8, q8
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: vmov r2, r3, d17
+; CHECK-NEXT: mov pc, lr
%tmp1 = load <2 x i64>, <2 x i64>* %A
%tmp2 = call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %tmp1)
ret <2 x i64> %tmp2
@@ -95,48 +124,75 @@ declare <1 x i64> @llvm.ctpop.v1i64(<1 x i64>) nounwind readnone
declare <2 x i64> @llvm.ctpop.v2i64(<2 x i64>) nounwind readnone
define <8 x i8> @vclz8(<8 x i8>* %A) nounwind {
-;CHECK-LABEL: vclz8:
-;CHECK: vclz.i8 {{d[0-9]+}}, {{d[0-9]+}}
+; CHECK-LABEL: vclz8:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vldr d16, [r0]
+; CHECK-NEXT: vclz.i8 d16, d16
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: mov pc, lr
%tmp1 = load <8 x i8>, <8 x i8>* %A
%tmp2 = call <8 x i8> @llvm.ctlz.v8i8(<8 x i8> %tmp1, i1 0)
ret <8 x i8> %tmp2
}
define <4 x i16> @vclz16(<4 x i16>* %A) nounwind {
-;CHECK-LABEL: vclz16:
-;CHECK: vclz.i16 {{d[0-9]+}}, {{d[0-9]+}}
+; CHECK-LABEL: vclz16:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vldr d16, [r0]
+; CHECK-NEXT: vclz.i16 d16, d16
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: mov pc, lr
%tmp1 = load <4 x i16>, <4 x i16>* %A
%tmp2 = call <4 x i16> @llvm.ctlz.v4i16(<4 x i16> %tmp1, i1 0)
ret <4 x i16> %tmp2
}
define <2 x i32> @vclz32(<2 x i32>* %A) nounwind {
-;CHECK-LABEL: vclz32:
-;CHECK: vclz.i32 {{d[0-9]+}}, {{d[0-9]+}}
+; CHECK-LABEL: vclz32:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vldr d16, [r0]
+; CHECK-NEXT: vclz.i32 d16, d16
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: mov pc, lr
%tmp1 = load <2 x i32>, <2 x i32>* %A
%tmp2 = call <2 x i32> @llvm.ctlz.v2i32(<2 x i32> %tmp1, i1 0)
ret <2 x i32> %tmp2
}
define <16 x i8> @vclzQ8(<16 x i8>* %A) nounwind {
-;CHECK-LABEL: vclzQ8:
-;CHECK: vclz.i8 {{q[0-9]+}}, {{q[0-9]+}}
+; CHECK-LABEL: vclzQ8:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vld1.64 {d16, d17}, [r0]
+; CHECK-NEXT: vclz.i8 q8, q8
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: vmov r2, r3, d17
+; CHECK-NEXT: mov pc, lr
%tmp1 = load <16 x i8>, <16 x i8>* %A
%tmp2 = call <16 x i8> @llvm.ctlz.v16i8(<16 x i8> %tmp1, i1 0)
ret <16 x i8> %tmp2
}
define <8 x i16> @vclzQ16(<8 x i16>* %A) nounwind {
-;CHECK-LABEL: vclzQ16:
-;CHECK: vclz.i16 {{q[0-9]+}}, {{q[0-9]+}}
+; CHECK-LABEL: vclzQ16:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vld1.64 {d16, d17}, [r0]
+; CHECK-NEXT: vclz.i16 q8, q8
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: vmov r2, r3, d17
+; CHECK-NEXT: mov pc, lr
%tmp1 = load <8 x i16>, <8 x i16>* %A
%tmp2 = call <8 x i16> @llvm.ctlz.v8i16(<8 x i16> %tmp1, i1 0)
ret <8 x i16> %tmp2
}
define <4 x i32> @vclzQ32(<4 x i32>* %A) nounwind {
-;CHECK-LABEL: vclzQ32:
-;CHECK: vclz.i32 {{q[0-9]+}}, {{q[0-9]+}}
+; CHECK-LABEL: vclzQ32:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vld1.64 {d16, d17}, [r0]
+; CHECK-NEXT: vclz.i32 q8, q8
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: vmov r2, r3, d17
+; CHECK-NEXT: mov pc, lr
%tmp1 = load <4 x i32>, <4 x i32>* %A
%tmp2 = call <4 x i32> @llvm.ctlz.v4i32(<4 x i32> %tmp1, i1 0)
ret <4 x i32> %tmp2
@@ -151,48 +207,75 @@ declare <8 x i16> @llvm.ctlz.v8i16(<8 x i16>, i1) nounwind readnone
declare <4 x i32> @llvm.ctlz.v4i32(<4 x i32>, i1) nounwind readnone
define <8 x i8> @vclss8(<8 x i8>* %A) nounwind {
-;CHECK-LABEL: vclss8:
-;CHECK: vcls.s8
+; CHECK-LABEL: vclss8:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vldr d16, [r0]
+; CHECK-NEXT: vcls.s8 d16, d16
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: mov pc, lr
%tmp1 = load <8 x i8>, <8 x i8>* %A
%tmp2 = call <8 x i8> @llvm.arm.neon.vcls.v8i8(<8 x i8> %tmp1)
ret <8 x i8> %tmp2
}
define <4 x i16> @vclss16(<4 x i16>* %A) nounwind {
-;CHECK-LABEL: vclss16:
-;CHECK: vcls.s16
+; CHECK-LABEL: vclss16:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vldr d16, [r0]
+; CHECK-NEXT: vcls.s16 d16, d16
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: mov pc, lr
%tmp1 = load <4 x i16>, <4 x i16>* %A
%tmp2 = call <4 x i16> @llvm.arm.neon.vcls.v4i16(<4 x i16> %tmp1)
ret <4 x i16> %tmp2
}
define <2 x i32> @vclss32(<2 x i32>* %A) nounwind {
-;CHECK-LABEL: vclss32:
-;CHECK: vcls.s32
+; CHECK-LABEL: vclss32:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vldr d16, [r0]
+; CHECK-NEXT: vcls.s32 d16, d16
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: mov pc, lr
%tmp1 = load <2 x i32>, <2 x i32>* %A
%tmp2 = call <2 x i32> @llvm.arm.neon.vcls.v2i32(<2 x i32> %tmp1)
ret <2 x i32> %tmp2
}
define <16 x i8> @vclsQs8(<16 x i8>* %A) nounwind {
-;CHECK-LABEL: vclsQs8:
-;CHECK: vcls.s8
+; CHECK-LABEL: vclsQs8:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vld1.64 {d16, d17}, [r0]
+; CHECK-NEXT: vcls.s8 q8, q8
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: vmov r2, r3, d17
+; CHECK-NEXT: mov pc, lr
%tmp1 = load <16 x i8>, <16 x i8>* %A
%tmp2 = call <16 x i8> @llvm.arm.neon.vcls.v16i8(<16 x i8> %tmp1)
ret <16 x i8> %tmp2
}
define <8 x i16> @vclsQs16(<8 x i16>* %A) nounwind {
-;CHECK-LABEL: vclsQs16:
-;CHECK: vcls.s16
+; CHECK-LABEL: vclsQs16:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vld1.64 {d16, d17}, [r0]
+; CHECK-NEXT: vcls.s16 q8, q8
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: vmov r2, r3, d17
+; CHECK-NEXT: mov pc, lr
%tmp1 = load <8 x i16>, <8 x i16>* %A
%tmp2 = call <8 x i16> @llvm.arm.neon.vcls.v8i16(<8 x i16> %tmp1)
ret <8 x i16> %tmp2
}
define <4 x i32> @vclsQs32(<4 x i32>* %A) nounwind {
-;CHECK-LABEL: vclsQs32:
-;CHECK: vcls.s32
+; CHECK-LABEL: vclsQs32:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vld1.64 {d16, d17}, [r0]
+; CHECK-NEXT: vcls.s32 q8, q8
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: vmov r2, r3, d17
+; CHECK-NEXT: mov pc, lr
%tmp1 = load <4 x i32>, <4 x i32>* %A
%tmp2 = call <4 x i32> @llvm.arm.neon.vcls.v4i32(<4 x i32> %tmp1)
ret <4 x i32> %tmp2
diff --git a/test/CodeGen/ARM/pow.ll b/test/CodeGen/ARM/pow.ll
new file mode 100644
index 000000000000..2b3df92aab5b
--- /dev/null
+++ b/test/CodeGen/ARM/pow.ll
@@ -0,0 +1,92 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=thumbv7m-linux-gnueabi | FileCheck %s --check-prefixes=ANY,SOFTFLOAT
+; RUN: llc < %s -mtriple=thumbv8-linux-gnueabihf -mattr=neon | FileCheck %s --check-prefixes=ANY,HARDFLOAT
+
+declare float @llvm.pow.f32(float, float)
+declare <4 x float> @llvm.pow.v4f32(<4 x float>, <4 x float>)
+
+declare double @llvm.pow.f64(double, double)
+declare <2 x double> @llvm.pow.v2f64(<2 x double>, <2 x double>)
+
+define float @pow_f32_one_fourth_fmf(float %x) nounwind {
+; ANY-LABEL: pow_f32_one_fourth_fmf:
+; SOFTFLOAT: bl powf
+; HARDFLOAT: vsqrt.f32
+; HARDFLOAT: vsqrt.f32
+ %r = call nsz ninf afn float @llvm.pow.f32(float %x, float 2.5e-01)
+ ret float %r
+}
+
+define double @pow_f64_one_fourth_fmf(double %x) nounwind {
+; ANY-LABEL: pow_f64_one_fourth_fmf:
+; SOFTFLOAT: bl pow
+; HARDFLOAT: vsqrt.f64
+; HARDFLOAT: vsqrt.f64
+ %r = call nsz ninf afn double @llvm.pow.f64(double %x, double 2.5e-01)
+ ret double %r
+}
+
+define <4 x float> @pow_v4f32_one_fourth_fmf(<4 x float> %x) nounwind {
+; ANY-LABEL: pow_v4f32_one_fourth_fmf:
+; SOFTFLOAT: bl powf
+; SOFTFLOAT: bl powf
+; SOFTFLOAT: bl powf
+; SOFTFLOAT: bl powf
+; HARDFLOAT: vsqrt.f32
+; HARDFLOAT: vsqrt.f32
+; HARDFLOAT: vsqrt.f32
+; HARDFLOAT: vsqrt.f32
+; HARDFLOAT: vsqrt.f32
+; HARDFLOAT: vsqrt.f32
+; HARDFLOAT: vsqrt.f32
+; HARDFLOAT: vsqrt.f32
+ %r = call fast <4 x float> @llvm.pow.v4f32(<4 x float> %x, <4 x float> <float 2.5e-1, float 2.5e-1, float 2.5e-01, float 2.5e-01>)
+ ret <4 x float> %r
+}
+
+define <2 x double> @pow_v2f64_one_fourth_fmf(<2 x double> %x) nounwind {
+; ANY-LABEL: pow_v2f64_one_fourth_fmf:
+; SOFTFLOAT: bl pow
+; SOFTFLOAT: bl pow
+; HARDFLOAT: vsqrt.f64
+; HARDFLOAT: vsqrt.f64
+; HARDFLOAT: vsqrt.f64
+; HARDFLOAT: vsqrt.f64
+ %r = call fast <2 x double> @llvm.pow.v2f64(<2 x double> %x, <2 x double> <double 2.5e-1, double 2.5e-1>)
+ ret <2 x double> %r
+}
+
+define float @pow_f32_one_fourth_not_enough_fmf(float %x) nounwind {
+; ANY-LABEL: pow_f32_one_fourth_not_enough_fmf:
+; SOFTFLOAT: bl powf
+; HARDFLOAT: b powf
+ %r = call afn ninf float @llvm.pow.f32(float %x, float 2.5e-01)
+ ret float %r
+}
+
+define double @pow_f64_one_fourth_not_enough_fmf(double %x) nounwind {
+; ANY-LABEL: pow_f64_one_fourth_not_enough_fmf:
+; SOFTFLOAT: bl pow
+; HARDFLOAT: b pow
+ %r = call nsz ninf double @llvm.pow.f64(double %x, double 2.5e-01)
+ ret double %r
+}
+
+define <4 x float> @pow_v4f32_one_fourth_not_enough_fmf(<4 x float> %x) nounwind {
+; ANY-LABEL: pow_v4f32_one_fourth_not_enough_fmf:
+; ANY: bl powf
+; ANY: bl powf
+; ANY: bl powf
+; ANY: bl powf
+ %r = call afn nsz <4 x float> @llvm.pow.v4f32(<4 x float> %x, <4 x float> <float 2.5e-1, float 2.5e-1, float 2.5e-01, float 2.5e-01>)
+ ret <4 x float> %r
+}
+
+define <2 x double> @pow_v2f64_one_fourth_not_enough_fmf(<2 x double> %x) nounwind {
+; ANY-LABEL: pow_v2f64_one_fourth_not_enough_fmf:
+; ANY: bl pow
+; ANY: bl pow
+ %r = call nsz nnan reassoc <2 x double> @llvm.pow.v2f64(<2 x double> %x, <2 x double> <double 2.5e-1, double 2.5e-1>)
+ ret <2 x double> %r
+}
+
diff --git a/test/CodeGen/ARM/pr36577.ll b/test/CodeGen/ARM/pr36577.ll
index 2309ce21292a..11805cb5d84e 100644
--- a/test/CodeGen/ARM/pr36577.ll
+++ b/test/CodeGen/ARM/pr36577.ll
@@ -9,12 +9,11 @@
; CHECK-LABEL: pr36577
; CHECK: ldrh r0, [r0]
-; CHECK: bic r0, r1, r0, lsr #5
-; CHECK: mvn r1, #7
-; CHECK: orr r0, r0, r1
+; CHECK: mvn r0, r0, lsr #7
+; CHECK: orr r0, r1, r0, lsl #2
; CHECK-T2: ldrh r0, [r0]
-; CHECK-T2: bic.w r0, r1, r0, lsr #5
-; CHECK-T2: orn r0, r0, #7
+; CHECK-T2: mvn.w r0, r0, lsr #7
+; CHECK-T2: orr.w r0, r1, r0, lsl #2
define dso_local arm_aapcscc i32** @pr36577() {
entry:
%0 = load i16, i16* @a, align 2
diff --git a/test/CodeGen/ARM/pr39060.ll b/test/CodeGen/ARM/pr39060.ll
new file mode 100644
index 000000000000..dfaabfa374a5
--- /dev/null
+++ b/test/CodeGen/ARM/pr39060.ll
@@ -0,0 +1,33 @@
+; RUN: llc -mtriple=armv7a-linux-androideabi %s -o - | FileCheck %s
+
+@a = local_unnamed_addr global i16 -1, align 2
+@b = local_unnamed_addr global i16 0, align 2
+
+; CHECK-LABEL: pr39060:
+; CHECK: ldrh
+; CHECK: ldrh
+; CHECK: sub
+; CHECK: uxth
+define void @pr39060() local_unnamed_addr #0 {
+entry:
+ %0 = load i16, i16* @a, align 2
+ %1 = load i16, i16* @b, align 2
+ %sub = add i16 %1, -1
+ %cmp = icmp eq i16 %0, %sub
+ br i1 %cmp, label %if.else, label %if.then
+
+if.then:
+ tail call void bitcast (void (...)* @f to void ()*)() #2
+ br label %if.end
+
+if.else:
+ tail call void bitcast (void (...)* @g to void ()*)() #2
+ br label %if.end
+
+if.end:
+ ret void
+}
+
+declare void @f(...) local_unnamed_addr #1
+
+declare void @g(...) local_unnamed_addr #1
diff --git a/test/CodeGen/ARM/pr39571.ll b/test/CodeGen/ARM/pr39571.ll
new file mode 100644
index 000000000000..fbc910a557af
--- /dev/null
+++ b/test/CodeGen/ARM/pr39571.ll
@@ -0,0 +1,33 @@
+; RUN: llc < %s -mtriple armv4t-unknown-linux-gnueabi -mattr=+strict-align
+
+; Avoid crash from forwarding indexed-loads back to store.
+%struct.anon = type { %struct.ma*, %struct.mb }
+%struct.ma = type { i8 }
+%struct.mb = type { i8, i8 }
+%struct.anon.0 = type { %struct.anon.1 }
+%struct.anon.1 = type { %struct.ds }
+%struct.ds = type <{ i8, %union.ie }>
+%union.ie = type { %struct.ib }
+%struct.ib = type { i8, i8, i16 }
+
+@a = common dso_local local_unnamed_addr global %struct.anon* null, align 4
+@b = common dso_local local_unnamed_addr global %struct.anon.0 zeroinitializer, align 1
+
+; Function Attrs: norecurse nounwind
+define dso_local void @func() local_unnamed_addr {
+entry:
+ %0 = load %struct.anon*, %struct.anon** @a, align 4
+ %ad = getelementptr inbounds %struct.anon, %struct.anon* %0, i32 0, i32 0
+ %1 = load %struct.ma*, %struct.ma** %ad, align 4
+ %c.sroa.0.0..sroa_idx = getelementptr inbounds %struct.ma, %struct.ma* %1, i32 0, i32 0
+ %c.sroa.0.0.copyload = load i8, i8* %c.sroa.0.0..sroa_idx, align 1
+ %cb = getelementptr inbounds %struct.anon, %struct.anon* %0, i32 0, i32 1
+ %band = getelementptr inbounds %struct.anon, %struct.anon* %0, i32 0, i32 1, i32 1
+ store i8 %c.sroa.0.0.copyload, i8* %band, align 4
+ store i8 6, i8* getelementptr inbounds (%struct.anon.0, %struct.anon.0* @b, i32 0, i32 0, i32 0, i32 1, i32 0, i32 0), align 1
+ store i8 2, i8* getelementptr inbounds (%struct.anon.0, %struct.anon.0* @b, i32 0, i32 0, i32 0, i32 1, i32 0, i32 1), align 1
+ %2 = bitcast %struct.mb* %cb to i32*
+ %3 = load i32, i32* bitcast (i8* getelementptr inbounds (%struct.anon.0, %struct.anon.0* @b, i32 0, i32 0, i32 0, i32 1, i32 0, i32 0) to i32*), align 1
+ store i32 %3, i32* %2, align 1
+ ret void
+}
diff --git a/test/CodeGen/ARM/print-registers.ll b/test/CodeGen/ARM/print-registers.ll
new file mode 100644
index 000000000000..a95a71db6d38
--- /dev/null
+++ b/test/CodeGen/ARM/print-registers.ll
@@ -0,0 +1,10 @@
+; RUN: llc -mtriple=armeb-arm-none-eabi < %s -o -| FileCheck %s -check-prefixes=CHECK-BE
+; RUN: llc -mtriple=arm-arm-none-eabi < %s -o -| FileCheck %s -check-prefixes=CHECK-LE
+
+define dso_local void @_Z3fooi(i32 %a) local_unnamed_addr #0 {
+entry:
+; CHECK-BE: @ plain: [[LOW_REG:r[0-9]+]] Q: [[HIGH_REG:r[0-9]+]] R: [[LOW_REG]] H: [[HIGH_REG]]
+; CHECK-LE: @ plain: [[LOW_REG:r[0-9]+]] Q: [[LOW_REG]] R: [[HIGH_REG:r[0-9]+]] H: [[HIGH_REG]]
+ tail call void asm sideeffect "// plain: $0 Q: ${0:Q} R: ${0:R} H: ${0:H}", "r"(i64 1) #1
+ ret void
+}
diff --git a/test/CodeGen/ARM/readonly-aliases.ll b/test/CodeGen/ARM/readonly-aliases.ll
new file mode 100644
index 000000000000..c90650d3a81d
--- /dev/null
+++ b/test/CodeGen/ARM/readonly-aliases.ll
@@ -0,0 +1,17 @@
+; RUN: llc -mtriple thumbv7-unknown-linux-android -filetype asm -o - %s | FileCheck %s
+
+@a = protected constant <{ i32, i32 }> <{ i32 0, i32 0 }>
+@b = protected alias i32, getelementptr(i32, i32* getelementptr inbounds (<{ i32, i32 }>, <{ i32, i32 }>* @a, i32 0, i32 1), i32 -1)
+
+declare void @f(i32*)
+
+define void @g() {
+entry:
+ call void @f(i32* @b)
+ ret void
+}
+
+; CHECK-LABEL: g:
+; CHECK: movw [[REGISTER:r[0-9]+]], :lower16:b
+; CHECK: movt [[REGISTER]], :upper16:b
+
diff --git a/test/CodeGen/ARM/sched-it-debug-nodes.mir b/test/CodeGen/ARM/sched-it-debug-nodes.mir
index 8d0688ef01d3..ec42e7df3b2f 100644
--- a/test/CodeGen/ARM/sched-it-debug-nodes.mir
+++ b/test/CodeGen/ARM/sched-it-debug-nodes.mir
@@ -33,7 +33,7 @@
; hopefully, triggering an assert).
; CHECK: BUNDLE implicit-def dead $itstate{{.*}} {
- ; CHECK: DBG_VALUE debug-use $r1, debug-use $noreg, !"u"
+ ; CHECK: DBG_VALUE $r1, $noreg, !"u"
; CHECK-NOT: DBG_VALUE killed $r1, $noreg, !"u"
declare arm_aapcscc void @g(%struct.s*, i8*, i32) #1
@@ -131,23 +131,23 @@ body: |
bb.0.entry:
liveins: $r0, $r1, $r2, $r3, $lr, $r7
- DBG_VALUE debug-use $r0, debug-use $noreg, !18, !27, debug-location !28
- DBG_VALUE debug-use $r1, debug-use $noreg, !19, !27, debug-location !28
- DBG_VALUE debug-use $r2, debug-use $noreg, !20, !27, debug-location !28
- DBG_VALUE debug-use $r3, debug-use $noreg, !21, !27, debug-location !28
+ DBG_VALUE $r0, $noreg, !18, !27, debug-location !28
+ DBG_VALUE $r1, $noreg, !19, !27, debug-location !28
+ DBG_VALUE $r2, $noreg, !20, !27, debug-location !28
+ DBG_VALUE $r3, $noreg, !21, !27, debug-location !28
t2CMPri $r3, 4, 14, $noreg, implicit-def $cpsr, debug-location !31
- DBG_VALUE debug-use $r1, debug-use $noreg, !19, !27, debug-location !28
+ DBG_VALUE $r1, $noreg, !19, !27, debug-location !28
$r0 = t2MOVi -1, 3, $cpsr, $noreg, implicit undef $r0
- DBG_VALUE debug-use $r1, debug-use $noreg, !19, !27, debug-location !28
+ DBG_VALUE $r1, $noreg, !19, !27, debug-location !28
tBX_RET 3, $cpsr, implicit $r0, debug-location !34
$sp = frame-setup t2STMDB_UPD $sp, 14, $noreg, killed $r7, killed $lr
frame-setup CFI_INSTRUCTION def_cfa_offset 8
frame-setup CFI_INSTRUCTION offset $lr, -4
frame-setup CFI_INSTRUCTION offset $r7, -8
- DBG_VALUE debug-use $r0, debug-use $noreg, !18, !27, debug-location !28
- DBG_VALUE debug-use $r1, debug-use $noreg, !19, !27, debug-location !28
- DBG_VALUE debug-use $r2, debug-use $noreg, !20, !27, debug-location !28
- DBG_VALUE debug-use $r3, debug-use $noreg, !21, !27, debug-location !28
+ DBG_VALUE $r0, $noreg, !18, !27, debug-location !28
+ DBG_VALUE $r1, $noreg, !19, !27, debug-location !28
+ DBG_VALUE $r2, $noreg, !20, !27, debug-location !28
+ DBG_VALUE $r3, $noreg, !21, !27, debug-location !28
$r1 = tMOVr killed $r2, 14, $noreg, debug-location !32
$r2 = tMOVr killed $r3, 14, $noreg, debug-location !32
tBL 14, $noreg, @g, csr_aapcs, implicit-def dead $lr, implicit $sp, implicit $r0, implicit $r1, implicit $r2, implicit-def $sp, debug-location !32
diff --git a/test/CodeGen/ARM/sdiv-pow2-arm-size.ll b/test/CodeGen/ARM/sdiv-pow2-arm-size.ll
new file mode 100644
index 000000000000..a9eda31e729e
--- /dev/null
+++ b/test/CodeGen/ARM/sdiv-pow2-arm-size.ll
@@ -0,0 +1,79 @@
+; RUN: llc -mtriple=armv7a -mattr=+hwdiv-arm %s -o - | FileCheck %s --check-prefixes=CHECK,DIV
+; RUN: llc -mtriple=armv7a -mattr=-hwdiv-arm %s -o - | FileCheck %s --check-prefixes=CHECK,NODIV
+
+; Check SREM
+define dso_local i32 @test_rem(i32 %F) local_unnamed_addr #0 {
+; CHECK-LABEL: test_rem
+; CHECK: asr r1, r0, #31
+; CHECK-NEXT: add r1, r0, r1, lsr #30
+; CHECK-NEXT: bic r1, r1, #3
+; CHECK-NEXT: sub r0, r0, r1
+
+entry:
+ %div = srem i32 %F, 4
+ ret i32 %div
+}
+
+; Try an i16 sdiv, with a small immediate.
+define dso_local signext i16 @f0(i16 signext %F) local_unnamed_addr #0 {
+; CHECK-LABEL: f0
+
+; DIV: mov r1, #2
+; DIV-NEXT: sdiv r0, r0, r1
+; DIV-NEXT: sxth r0, r0
+; DIV-NEXT: bx lr
+
+; NODIV: uxth r1, r0
+; NODIV-NEXT: add r0, r0, r1, lsr #15
+; NODIV-NEXT: sxth r0, r0
+; NODIV-NEXT: asr r0, r0, #1
+; NODIV-NEXT: bx lr
+
+entry:
+ %0 = sdiv i16 %F, 2
+ ret i16 %0
+}
+
+; Try an i32 sdiv, with a small immediate.
+define dso_local i32 @f1(i32 %F) local_unnamed_addr #0 {
+; CHECK-LABEL: f1
+
+; DIV: mov r1, #4
+; DIV-NEXT: sdiv r0, r0, r1
+; DIV-NEXT: bx lr
+
+; NODIV: asr r1, r0, #31
+; NODIV-NEXT: add r0, r0, r1, lsr #30
+; NODIV-NEXT: asr r0, r0, #2
+; NODIV-NEXT: bx lr
+
+entry:
+ %div = sdiv i32 %F, 4
+ ret i32 %div
+}
+
+; Try a large power of 2 immediate, which should also be materialised with 1
+; move immediate instruction.
+define dso_local i32 @f2(i32 %F) local_unnamed_addr #0 {
+; CHECK-LABEL: f2
+; DIV: mov r1, #131072
+; DIV-NEXT: sdiv r0, r0, r1
+; DIV-NEXT: bx lr
+entry:
+ %div = sdiv i32 %F, 131072
+ ret i32 %div
+}
+
+; MinSize not set, so should expand to the faster but longer sequence.
+define dso_local i32 @f3(i32 %F) {
+; CHECK-LABEL: f3
+; CHECK: asr r1, r0, #31
+; CHECK-NEXT: add r0, r0, r1, lsr #30
+; CHECK-NEXT: asr r0, r0, #2
+; CHECK-NEXT: bx lr
+entry:
+ %div = sdiv i32 %F, 4
+ ret i32 %div
+}
+
+attributes #0 = { minsize norecurse nounwind optsize readnone }
diff --git a/test/CodeGen/ARM/sdiv-pow2-thumb-size.ll b/test/CodeGen/ARM/sdiv-pow2-thumb-size.ll
new file mode 100644
index 000000000000..4b0419577cdf
--- /dev/null
+++ b/test/CodeGen/ARM/sdiv-pow2-thumb-size.ll
@@ -0,0 +1,105 @@
+; RUN: llc -mtriple=thumbv8 %s -o - | FileCheck %s --check-prefixes=CHECK,T2
+; RUN: llc -mtriple=thumbv8m.main %s -o - | FileCheck %s --check-prefixes=CHECK,T2
+; RUN: llc -mtriple=thumbv8m.base %s -o - | FileCheck %s --check-prefixes=CHECK,T1
+; RUN: llc -mtriple=thumbv7em %s -o - | FileCheck %s --check-prefixes=CHECK,T2
+; RUN: llc -mtriple=thumbv6m %s -o - | FileCheck %s --check-prefixes=V6M
+
+; Armv6m targets don't have a sdiv instruction, so sdiv should not appear at
+; all in the output:
+
+; V6M: .file {{.*}}
+; V6M-NOT: sdiv
+; V6M-NOT: idiv
+
+; Test sdiv i16
+define dso_local signext i16 @f0(i16 signext %F) local_unnamed_addr #0 {
+; CHECK-LABEL: f0
+; CHECK: movs r1, #2
+; CHECK-NEXT: sdiv r0, r0, r1
+; CHECK-NEXT: sxth r0, r0
+; CHECK-NEXT: bx lr
+
+entry:
+ %0 = sdiv i16 %F, 2
+ ret i16 %0
+}
+
+; Same as above, but now with i32
+define dso_local i32 @f1(i32 %F) local_unnamed_addr #0 {
+; CHECK-LABEL: f1
+; CHECK: movs r1, #4
+; CHECK-NEXT: sdiv r0, r0, r1
+; CHECK-NEXT: bx lr
+
+entry:
+ %div = sdiv i32 %F, 4
+ ret i32 %div
+}
+
+; The immediate is not a power of 2, so we expect a sdiv.
+define dso_local i32 @f2(i32 %F) local_unnamed_addr #0 {
+; CHECK-LABEL: f2
+; CHECK: movs r1, #5
+; CHECK-NEXT: sdiv r0, r0, r1
+; CHECK-NEXT: bx lr
+
+entry:
+ %div = sdiv i32 %F, 5
+ ret i32 %div
+}
+
+; Try a larger power of 2 immediate: immediates larger than
+; 128 don't give any code size savings.
+define dso_local i32 @f3(i32 %F) local_unnamed_addr #0 {
+; CHECK-LABEL: f3
+; CHECK-NOT: sdiv
+entry:
+ %div = sdiv i32 %F, 256
+ ret i32 %div
+}
+
+attributes #0 = { minsize norecurse nounwind optsize readnone }
+
+
+; These functions don't have the minsize attribute set, so should not lower
+; the sdiv to sdiv, but to the faster instruction sequence.
+
+define dso_local signext i16 @f4(i16 signext %F) {
+; T2-LABEL: f4
+; T2: uxth r1, r0
+; T2-NEXT: add.w r0, r0, r1, lsr #15
+; T2-NEXT: sxth r0, r0
+; T2-NEXT: asrs r0, r0, #1
+; T2-NEXT: bx lr
+
+; T1-LABEL: f4
+; T1: uxth r1, r0
+; T1-NEXT: lsrs r1, r1, #15
+; T1-NEXT: adds r0, r0, r1
+; T1-NEXT: sxth r0, r0
+; T1-NEXT: asrs r0, r0, #1
+; T1-NEXT: bx lr
+
+entry:
+ %0 = sdiv i16 %F, 2
+ ret i16 %0
+}
+
+define dso_local i32 @f5(i32 %F) {
+; T2-LABEL: f5
+; T2: asrs r1, r0, #31
+; T2-NEXT: add.w r0, r0, r1, lsr #30
+; T2-NEXT: asrs r0, r0, #2
+; T2-NEXT: bx lr
+
+; T1-LABEL: f5
+; T1: asrs r1, r0, #31
+; T1-NEXT: lsrs r1, r1, #30
+; T1-NEXT: adds r0, r0, r1
+; T1-NEXT: asrs r0, r0, #2
+; T1-NEXT: bx lr
+
+entry:
+ %div = sdiv i32 %F, 4
+ ret i32 %div
+}
diff --git a/test/CodeGen/ARM/select-imm.ll b/test/CodeGen/ARM/select-imm.ll
index c0cebf833a06..1e27024e7c79 100644
--- a/test/CodeGen/ARM/select-imm.ll
+++ b/test/CodeGen/ARM/select-imm.ll
@@ -24,12 +24,8 @@ entry:
; ARMT2: movwgt [[R]], #123
; THUMB1-LABEL: t1:
-; THUMB1: mov r1, r0
-; THUMB1: movs r2, #255
-; THUMB1: adds r2, #102
-; THUMB1: movs r0, #123
-; THUMB1: cmp r1, #1
-; THUMB1: bgt
+; THUMB1: cmp r0, #1
+; THUMB1: bgt .LBB0_2
; THUMB2-LABEL: t1:
; THUMB2: movw [[R:r[0-1]]], #357
@@ -75,8 +71,7 @@ entry:
; ARMT2: lsr r0, r0, #5
; THUMB1-LABEL: t3:
-; THUMB1: movs r1, #0
-; THUMB1: subs r1, r1, r0
+; THUMB1: rsbs r1, r0, #0
; THUMB1: adcs r0, r1
; THUMB2-LABEL: t3:
@@ -120,8 +115,7 @@ entry:
; THUMB1-LABEL: t5:
; THUMB1-NOT: bne
-; THUMB1: movs r0, #0
-; THUMB1: subs r0, r0, r1
+; THUMB1: rsbs r0, r1, #0
; THUMB1: adcs r0, r1
; THUMB2-LABEL: t5:
@@ -144,7 +138,7 @@ entry:
; THUMB1-LABEL: t6:
; THUMB1: cmp r{{[0-9]+}}, #0
-; THUMB1: bne
+; THUMB1: beq
; THUMB2-LABEL: t6:
; THUMB2-NOT: mov
@@ -200,8 +194,7 @@ entry:
; THUMB1: bl t7
; THUMB1: mov r1, r0
; THUMB1: subs r2, r4, #5
-; THUMB1: movs r0, #0
-; THUMB1: subs r0, r0, r2
+; THUMB1: rsbs r0, r2, #0
; THUMB1: adcs r0, r2
; THUMB2-LABEL: t8:
@@ -306,8 +299,7 @@ entry:
; ARMT2: lsr r0, r0, #5
; THUMB1-LABEL: t10:
-; THUMB1: movs r0, #0
-; THUMB1: subs r0, r0, r1
+; THUMB1: rsbs r0, r1, #0
; THUMB1: adcs r0, r1
; THUMB2-LABEL: t10:
diff --git a/test/CodeGen/ARM/select.ll b/test/CodeGen/ARM/select.ll
index e9394a720738..54bc1b9a9925 100644
--- a/test/CodeGen/ARM/select.ll
+++ b/test/CodeGen/ARM/select.ll
@@ -80,8 +80,8 @@ define double @f7(double %a, double %b) {
; block generated, odds are good that we have close to the ideal code for this:
;
; CHECK-NEON-LABEL: f8:
-; CHECK-NEON: movw [[R3:r[0-9]+]], #1123
; CHECK-NEON: adr [[R2:r[0-9]+]], LCPI7_0
+; CHECK-NEON: movw [[R3:r[0-9]+]], #1123
; CHECK-NEON-NEXT: cmp r0, [[R3]]
; CHECK-NEON-NEXT: it eq
; CHECK-NEON-NEXT: addeq{{.*}} [[R2]], #4
@@ -142,3 +142,14 @@ define float @f12(i32 %a, i32 %b) nounwind uwtable readnone ssp {
ret float %2
}
+; CHECK-LABEL: test_overflow_recombine:
+define i1 @test_overflow_recombine(i32 %in) {
+; CHECK: smull [[LO:r[0-9]+]], [[HI:r[0-9]+]]
+; CHECK: subs [[ZERO:r[0-9]+]], [[HI]], [[LO]], asr #31
+; CHECK: movne [[ZERO]], #1
+ %prod = call { i32, i1 } @llvm.smul.with.overflow.i32(i32 0, i32 %in)
+ %overflow = extractvalue { i32, i1 } %prod, 1
+ ret i1 %overflow
+}
+
+declare { i32, i1 } @llvm.smul.with.overflow.i32(i32, i32)
diff --git a/test/CodeGen/ARM/select_const.ll b/test/CodeGen/ARM/select_const.ll
index 7cce0b082037..81b0db3b3136 100644
--- a/test/CodeGen/ARM/select_const.ll
+++ b/test/CodeGen/ARM/select_const.ll
@@ -314,9 +314,8 @@ define i64 @opaque_constant2(i1 %cond, i64 %x) {
; CHECK-NEXT: mov r1, #1
; CHECK-NEXT: tst r0, #1
; CHECK-NEXT: orr r1, r1, #65536
-; CHECK-NEXT: mov r0, r1
-; CHECK-NEXT: moveq r0, #23
-; CHECK-NEXT: and r0, r0, r1
+; CHECK-NEXT: moveq r1, #23
+; CHECK-NEXT: bic r0, r1, #22
; CHECK-NEXT: mov r1, #0
; CHECK-NEXT: mov pc, lr
%sel = select i1 %cond, i64 65537, i64 23
diff --git a/test/CodeGen/ARM/setcc-logic.ll b/test/CodeGen/ARM/setcc-logic.ll
index 2c2792ed9868..cf482f39f2b5 100644
--- a/test/CodeGen/ARM/setcc-logic.ll
+++ b/test/CodeGen/ARM/setcc-logic.ll
@@ -61,9 +61,8 @@ define <4 x i1> @and_eq_vec(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c, <4 x i32>
; CHECK-NEXT: vceq.i32 q8, q9, q8
; CHECK-NEXT: vld1.64 {d22, d23}, [r0]
; CHECK-NEXT: vceq.i32 q9, q11, q10
+; CHECK-NEXT: vand q8, q8, q9
; CHECK-NEXT: vmovn.i32 d16, q8
-; CHECK-NEXT: vmovn.i32 d17, q9
-; CHECK-NEXT: vand d16, d16, d17
; CHECK-NEXT: vmov r0, r1, d16
; CHECK-NEXT: pop {r11, pc}
%cmp1 = icmp eq <4 x i32> %a, %b
diff --git a/test/CodeGen/ARM/shuffle.ll b/test/CodeGen/ARM/shuffle.ll
index 7d6be4f5e6c3..17ec7c51daf4 100644
--- a/test/CodeGen/ARM/shuffle.ll
+++ b/test/CodeGen/ARM/shuffle.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=thumbv7-apple-darwin -relocation-model=pic -disable-fp-elim | FileCheck %s
+; RUN: llc < %s -mtriple=thumbv7-apple-darwin -relocation-model=pic -frame-pointer=all | FileCheck %s
target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:32-f32:32:32-f64:32:32-v64:64:64-v128:128:128-a0:0:32-n32"
target triple = "thumbv7-apple-darwin"
diff --git a/test/CodeGen/ARM/sincos.ll b/test/CodeGen/ARM/sincos.ll
index 42a834d24b3e..dc8fdf69ca61 100644
--- a/test/CodeGen/ARM/sincos.ll
+++ b/test/CodeGen/ARM/sincos.ll
@@ -3,6 +3,8 @@
; RUN: llc < %s -mtriple=armv7-linux-gnu -mcpu=cortex-a8 | FileCheck %s --check-prefix=SINCOS-GNU
; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a8 \
; RUN: --enable-unsafe-fp-math | FileCheck %s --check-prefix=SINCOS-GNU
+; RUN: llc < %s -mtriple=armv7-linux-android -mcpu=cortex-a8 | FileCheck %s --check-prefix=NOOPT-ANDROID
+; RUN: llc < %s -mtriple=armv7-linux-android9 -mcpu=cortex-a8 | FileCheck %s --check-prefix=SINCOS-GNU
; Combine sin / cos into a single call unless they may write errno (as
; captured by readnone attrbiute, controlled by clang -fmath-errno
@@ -21,6 +23,10 @@ entry:
; NOOPT: bl _sinf
; NOOPT: bl _cosf
+; NOOPT-ANDROID-LABEL: test1:
+; NOOPT-ANDROID: bl sinf
+; NOOPT-ANDROID: bl cosf
+
%call = tail call float @sinf(float %x) readnone
%call1 = tail call float @cosf(float %x) readnone
%add = fadd float %call, %call1
@@ -41,6 +47,10 @@ entry:
; NOOPT: bl _sinf
; NOOPT: bl _cosf
+; NOOPT-ANDROID-LABEL: test1_errno:
+; NOOPT-ANDROID: bl sinf
+; NOOPT-ANDROID: bl cosf
+
%call = tail call float @sinf(float %x)
%call1 = tail call float @cosf(float %x)
%add = fadd float %call, %call1
@@ -59,6 +69,10 @@ entry:
; NOOPT: bl _sin
; NOOPT: bl _cos
+; NOOPT-ANDROID-LABEL: test2:
+; NOOPT-ANDROID: bl sin
+; NOOPT-ANDROID: bl cos
+
%call = tail call double @sin(double %x) readnone
%call1 = tail call double @cos(double %x) readnone
%add = fadd double %call, %call1
@@ -79,6 +93,10 @@ entry:
; NOOPT: bl _sin
; NOOPT: bl _cos
+; NOOPT-ANDROID-LABEL: test2_errno:
+; NOOPT-ANDROID: bl sin
+; NOOPT-ANDROID: bl cos
+
%call = tail call double @sin(double %x)
%call1 = tail call double @cos(double %x)
%add = fadd double %call, %call1
diff --git a/test/CodeGen/ARM/smlad0.ll b/test/CodeGen/ARM/smlad0.ll
index b9278b4c22b1..477f5659c162 100644
--- a/test/CodeGen/ARM/smlad0.ll
+++ b/test/CodeGen/ARM/smlad0.ll
@@ -130,3 +130,83 @@ for.body:
%cmp = icmp slt i32 %add29, %arg
br i1 %cmp, label %for.body, label %for.cond.cleanup
}
+
+define i32 @one_zext(i32 %arg, i32* nocapture readnone %arg1, i16* nocapture readonly %arg2, i16* nocapture readonly %arg3) {
+; CHECK-LABEL: @one_zext
+; CHECK-NOT: call i32 @llvm.arm.smlad
+entry:
+ %cmp24 = icmp sgt i32 %arg, 0
+ br i1 %cmp24, label %for.body.preheader, label %for.cond.cleanup
+
+for.body.preheader:
+ %.pre = load i16, i16* %arg3, align 2
+ %.pre27 = load i16, i16* %arg2, align 2
+ br label %for.body
+
+for.cond.cleanup:
+ %mac1.0.lcssa = phi i32 [ 0, %entry ], [ %add11, %for.body ]
+ ret i32 %mac1.0.lcssa
+
+for.body:
+ %mac1.026 = phi i32 [ %add11, %for.body ], [ 0, %for.body.preheader ]
+ %i.025 = phi i32 [ %add, %for.body ], [ 0, %for.body.preheader ]
+ %arrayidx = getelementptr inbounds i16, i16* %arg3, i32 %i.025
+ %0 = load i16, i16* %arrayidx, align 2
+ %add = add nuw nsw i32 %i.025, 1
+ %arrayidx1 = getelementptr inbounds i16, i16* %arg3, i32 %add
+ %1 = load i16, i16* %arrayidx1, align 2
+ %arrayidx3 = getelementptr inbounds i16, i16* %arg2, i32 %i.025
+ %2 = load i16, i16* %arrayidx3, align 2
+ %conv = sext i16 %2 to i32
+ %conv4 = zext i16 %0 to i32
+ %mul = mul nsw i32 %conv, %conv4
+ %arrayidx6 = getelementptr inbounds i16, i16* %arg2, i32 %add
+ %3 = load i16, i16* %arrayidx6, align 2
+ %conv7 = sext i16 %3 to i32
+ %conv8 = zext i16 %1 to i32
+ %mul9 = mul nsw i32 %conv7, %conv8
+ %add10 = add i32 %mul, %mac1.026
+ %add11 = add i32 %mul9, %add10
+ %exitcond = icmp ne i32 %add, %arg
+ br i1 %exitcond, label %for.body, label %for.cond.cleanup
+}
+
+define i32 @two_zext(i32 %arg, i32* nocapture readnone %arg1, i16* nocapture readonly %arg2, i16* nocapture readonly %arg3) {
+; CHECK-LABEL: @two_zext
+; CHECK-NOT: call i32 @llvm.arm.smlad
+entry:
+ %cmp24 = icmp sgt i32 %arg, 0
+ br i1 %cmp24, label %for.body.preheader, label %for.cond.cleanup
+
+for.body.preheader:
+ %.pre = load i16, i16* %arg3, align 2
+ %.pre27 = load i16, i16* %arg2, align 2
+ br label %for.body
+
+for.cond.cleanup:
+ %mac1.0.lcssa = phi i32 [ 0, %entry ], [ %add11, %for.body ]
+ ret i32 %mac1.0.lcssa
+
+for.body:
+ %mac1.026 = phi i32 [ %add11, %for.body ], [ 0, %for.body.preheader ]
+ %i.025 = phi i32 [ %add, %for.body ], [ 0, %for.body.preheader ]
+ %arrayidx = getelementptr inbounds i16, i16* %arg3, i32 %i.025
+ %0 = load i16, i16* %arrayidx, align 2
+ %add = add nuw nsw i32 %i.025, 1
+ %arrayidx1 = getelementptr inbounds i16, i16* %arg3, i32 %add
+ %1 = load i16, i16* %arrayidx1, align 2
+ %arrayidx3 = getelementptr inbounds i16, i16* %arg2, i32 %i.025
+ %2 = load i16, i16* %arrayidx3, align 2
+ %conv = zext i16 %2 to i32
+ %conv4 = zext i16 %0 to i32
+ %mul = mul nsw i32 %conv, %conv4
+ %arrayidx6 = getelementptr inbounds i16, i16* %arg2, i32 %add
+ %3 = load i16, i16* %arrayidx6, align 2
+ %conv7 = zext i16 %3 to i32
+ %conv8 = zext i16 %1 to i32
+ %mul9 = mul nsw i32 %conv7, %conv8
+ %add10 = add i32 %mul, %mac1.026
+ %add11 = add i32 %mul9, %add10
+ %exitcond = icmp ne i32 %add, %arg
+ br i1 %exitcond, label %for.body, label %for.cond.cleanup
+}
diff --git a/test/CodeGen/ARM/smladx-1.ll b/test/CodeGen/ARM/smladx-1.ll
new file mode 100644
index 000000000000..d5e9a0622ca8
--- /dev/null
+++ b/test/CodeGen/ARM/smladx-1.ll
@@ -0,0 +1,240 @@
+; RUN: opt -mtriple=thumbv8m.main -mcpu=cortex-m33 -arm-parallel-dsp %s -S -o - | FileCheck %s
+; RUN: opt -mtriple=arm-arm-eabi -mcpu=cortex-m0 < %s -arm-parallel-dsp -S | FileCheck %s --check-prefix=CHECK-UNSUPPORTED
+; RUN: opt -mtriple=arm-arm-eabi -mcpu=cortex-m33 -mattr=-dsp < %s -arm-parallel-dsp -S | FileCheck %s --check-prefix=CHECK-UNSUPPORTED
+
+define i32 @smladx(i16* nocapture readonly %pIn1, i16* nocapture readonly %pIn2, i32 %j, i32 %limit) {
+
+; CHECK-LABEL: smladx
+; CHECK: = phi i32 [ 0, %for.body.preheader.new ],
+; CHECK: [[ACC0:%[^ ]+]] = phi i32 [ 0, %for.body.preheader.new ], [ [[ACC2:%[^ ]+]], %for.body ]
+; CHECK: [[PIN23:%[^ ]+]] = bitcast i16* %pIn2.3 to i32*
+; CHECK: [[IN23:%[^ ]+]] = load i32, i32* [[PIN23]], align 2
+; CHECK: [[PIN12:%[^ ]+]] = bitcast i16* %pIn1.2 to i32*
+; CHECK: [[IN12:%[^ ]+]] = load i32, i32* [[PIN12]], align 2
+; CHECK: [[ACC1:%[^ ]+]] = call i32 @llvm.arm.smladx(i32 [[IN23]], i32 [[IN12]], i32 [[ACC0]])
+; CHECK: [[PIN21:%[^ ]+]] = bitcast i16* %pIn2.1 to i32*
+; CHECK: [[IN21:%[^ ]+]] = load i32, i32* [[PIN21]], align 2
+; CHECK: [[PIN10:%[^ ]+]] = bitcast i16* %pIn1.0 to i32*
+; CHECK: [[IN10:%[^ ]+]] = load i32, i32* [[PIN10]], align 2
+; CHECK: [[ACC2]] = call i32 @llvm.arm.smladx(i32 [[IN21]], i32 [[IN10]], i32 [[ACC1]])
+; CHECK-NOT: call i32 @llvm.arm.smlad
+; CHECK-UNSUPPORTED-NOT: call i32 @llvm.arm.smlad
+
+entry:
+ %cmp9 = icmp eq i32 %limit, 0
+ br i1 %cmp9, label %for.cond.cleanup, label %for.body.preheader
+
+for.body.preheader:
+ %0 = add i32 %limit, -1
+ %xtraiter = and i32 %limit, 3
+ %1 = icmp ult i32 %0, 3
+ br i1 %1, label %for.cond.cleanup.loopexit.unr-lcssa, label %for.body.preheader.new
+
+for.body.preheader.new:
+ %unroll_iter = sub i32 %limit, %xtraiter
+ br label %for.body
+
+for.cond.cleanup.loopexit.unr-lcssa:
+ %add.lcssa.ph = phi i32 [ undef, %for.body.preheader ], [ %add.3, %for.body ]
+ %i.011.unr = phi i32 [ 0, %for.body.preheader ], [ %inc.3, %for.body ]
+ %sum.010.unr = phi i32 [ 0, %for.body.preheader ], [ %add.3, %for.body ]
+ %lcmp.mod = icmp eq i32 %xtraiter, 0
+ br i1 %lcmp.mod, label %for.cond.cleanup, label %for.body.epil
+
+for.body.epil:
+ %i.011.epil = phi i32 [ %inc.epil, %for.body.epil ], [ %i.011.unr, %for.cond.cleanup.loopexit.unr-lcssa ]
+ %sum.010.epil = phi i32 [ %add.epil, %for.body.epil ], [ %sum.010.unr, %for.cond.cleanup.loopexit.unr-lcssa ]
+ %epil.iter = phi i32 [ %epil.iter.sub, %for.body.epil ], [ %xtraiter, %for.cond.cleanup.loopexit.unr-lcssa ]
+ %sub.epil = sub i32 %j, %i.011.epil
+ %arrayidx.epil = getelementptr inbounds i16, i16* %pIn2, i32 %sub.epil
+ %2 = load i16, i16* %arrayidx.epil, align 2
+ %conv.epil = sext i16 %2 to i32
+ %arrayidx1.epil = getelementptr inbounds i16, i16* %pIn1, i32 %i.011.epil
+ %3 = load i16, i16* %arrayidx1.epil, align 2
+ %conv2.epil = sext i16 %3 to i32
+ %mul.epil = mul nsw i32 %conv2.epil, %conv.epil
+ %add.epil = add nsw i32 %mul.epil, %sum.010.epil
+ %inc.epil = add nuw i32 %i.011.epil, 1
+ %epil.iter.sub = add i32 %epil.iter, -1
+ %epil.iter.cmp = icmp eq i32 %epil.iter.sub, 0
+ br i1 %epil.iter.cmp, label %for.cond.cleanup, label %for.body.epil
+
+for.cond.cleanup:
+ %sum.0.lcssa = phi i32 [ 0, %entry ], [ %add.lcssa.ph, %for.cond.cleanup.loopexit.unr-lcssa ], [ %add.epil, %for.body.epil ]
+ ret i32 %sum.0.lcssa
+
+for.body:
+ %i.011 = phi i32 [ 0, %for.body.preheader.new ], [ %inc.3, %for.body ]
+ %sum.010 = phi i32 [ 0, %for.body.preheader.new ], [ %add.3, %for.body ]
+ %niter = phi i32 [ %unroll_iter, %for.body.preheader.new ], [ %niter.nsub.3, %for.body ]
+ %pIn2Base = phi i16* [ %pIn2, %for.body.preheader.new ], [ %pIn2.4, %for.body ]
+ %pIn2.0 = getelementptr inbounds i16, i16* %pIn2Base, i32 0
+ %In2 = load i16, i16* %pIn2.0, align 2
+ %pIn1.0 = getelementptr inbounds i16, i16* %pIn1, i32 %i.011
+ %In1 = load i16, i16* %pIn1.0, align 2
+ %inc = or i32 %i.011, 1
+ %pIn2.1 = getelementptr inbounds i16, i16* %pIn2Base, i32 -1
+ %In2.1 = load i16, i16* %pIn2.1, align 2
+ %pIn1.1 = getelementptr inbounds i16, i16* %pIn1, i32 %inc
+ %In1.1 = load i16, i16* %pIn1.1, align 2
+ %inc.1 = or i32 %i.011, 2
+ %pIn2.2 = getelementptr inbounds i16, i16* %pIn2Base, i32 -2
+ %In2.2 = load i16, i16* %pIn2.2, align 2
+ %pIn1.2 = getelementptr inbounds i16, i16* %pIn1, i32 %inc.1
+ %In1.2 = load i16, i16* %pIn1.2, align 2
+ %inc.2 = or i32 %i.011, 3
+ %pIn2.3 = getelementptr inbounds i16, i16* %pIn2Base, i32 -3
+ %In2.3 = load i16, i16* %pIn2.3, align 2
+ %pIn1.3 = getelementptr inbounds i16, i16* %pIn1, i32 %inc.2
+ %In1.3 = load i16, i16* %pIn1.3, align 2
+ %sextIn1 = sext i16 %In1 to i32
+ %sextIn1.1 = sext i16 %In1.1 to i32
+ %sextIn1.2 = sext i16 %In1.2 to i32
+ %sextIn1.3 = sext i16 %In1.3 to i32
+ %sextIn2 = sext i16 %In2 to i32
+ %sextIn2.1 = sext i16 %In2.1 to i32
+ %sextIn2.2 = sext i16 %In2.2 to i32
+ %sextIn2.3 = sext i16 %In2.3 to i32
+ %mul = mul nsw i32 %sextIn1, %sextIn2
+ %mul.1 = mul nsw i32 %sextIn1.1, %sextIn2.1
+ %mul.2 = mul nsw i32 %sextIn1.2, %sextIn2.2
+ %mul.3 = mul nsw i32 %sextIn1.3, %sextIn2.3
+ %add = add nsw i32 %mul, %sum.010
+ %add.1 = add nsw i32 %mul.1, %add
+ %add.2 = add nsw i32 %mul.2, %add.1
+ %add.3 = add nsw i32 %mul.3, %add.2
+ %inc.3 = add i32 %i.011, 4
+ %pIn2.4 = getelementptr inbounds i16, i16* %pIn2Base, i32 -4
+ %niter.nsub.3 = add i32 %niter, -4
+ %niter.ncmp.3 = icmp eq i32 %niter.nsub.3, 0
+ br i1 %niter.ncmp.3, label %for.cond.cleanup.loopexit.unr-lcssa, label %for.body
+}
+
+define i32 @smladx_swap(i16* nocapture readonly %pIn1, i16* nocapture readonly %pIn2, i32 %j, i32 %limit) {
+
+; CHECK-LABEL: smladx_swap
+; CHECK: for.body.preheader.new:
+; CHECK: [[PIN1Base:[^ ]+]] = getelementptr i16, i16* %pIn1
+; CHECK: [[PIN2Base:[^ ]+]] = getelementptr i16, i16* %pIn2
+
+; CHECK: for.body:
+; CHECK: [[PIN2:%[^ ]+]] = phi i16* [ [[PIN2_NEXT:%[^ ]+]], %for.body ], [ [[PIN2Base]], %for.body.preheader.new ]
+; CHECK: [[PIN1:%[^ ]+]] = phi i16* [ [[PIN1_NEXT:%[^ ]+]], %for.body ], [ [[PIN1Base]], %for.body.preheader.new ]
+; CHECK: [[IV:%[^ ]+]] = phi i32
+; CHECK: [[ACC0:%[^ ]+]] = phi i32 [ 0, %for.body.preheader.new ], [ [[ACC2:%[^ ]+]], %for.body ]
+
+; CHECK: [[PIN1_2:%[^ ]+]] = getelementptr i16, i16* [[PIN1]], i32 -2
+; CHECK: [[PIN2_2:%[^ ]+]] = getelementptr i16, i16* [[PIN2]], i32 -2
+
+
+; CHECK: [[PIN2_2_CAST:%[^ ]+]] = bitcast i16* [[PIN2_2]] to i32*
+; CHECK: [[IN2_2:%[^ ]+]] = load i32, i32* [[PIN2_2_CAST]], align 2
+; CHECK: [[PIN1_CAST:%[^ ]+]] = bitcast i16* [[PIN1]] to i32*
+; CHECK: [[IN1:%[^ ]+]] = load i32, i32* [[PIN1_CAST]], align 2
+; CHECK: [[ACC1:%[^ ]+]] = call i32 @llvm.arm.smladx(i32 [[IN2_2]], i32 [[IN1]], i32 [[ACC0]])
+
+; CHECK: [[PIN2_CAST:%[^ ]+]] = bitcast i16* [[PIN2]] to i32*
+; CHECK: [[IN2:%[^ ]+]] = load i32, i32* [[PIN2_CAST]], align 2
+; CHECK: [[PIN1_2_CAST:%[^ ]+]] = bitcast i16* [[PIN1_2]] to i32*
+; CHECK: [[IN1_2:%[^ ]+]] = load i32, i32* [[PIN1_2_CAST]], align 2
+; CHECK: [[ACC2]] = call i32 @llvm.arm.smladx(i32 [[IN2]], i32 [[IN1_2]], i32 [[ACC1]])
+
+; CHECK: [[PIN1_NEXT]] = getelementptr i16, i16* [[PIN1]], i32 4
+; CHECK: [[PIN2_NEXT]] = getelementptr i16, i16* [[PIN2]], i32 -4
+
+; CHECK-NOT: call i32 @llvm.arm.smlad
+; CHECK-UNSUPPORTED-NOT: call i32 @llvm.arm.smlad
+
+entry:
+ %cmp9 = icmp eq i32 %limit, 0
+ br i1 %cmp9, label %for.cond.cleanup, label %for.body.preheader
+
+for.body.preheader:
+ %0 = add i32 %limit, -1
+ %xtraiter = and i32 %limit, 3
+ %1 = icmp ult i32 %0, 3
+ br i1 %1, label %for.cond.cleanup.loopexit.unr-lcssa, label %for.body.preheader.new
+
+for.body.preheader.new:
+ %unroll_iter = sub i32 %limit, %xtraiter
+ %scevgep6 = getelementptr i16, i16* %pIn1, i32 2
+ %2 = add i32 %j, -1
+ %scevgep11 = getelementptr i16, i16* %pIn2, i32 %2
+ br label %for.body
+
+for.cond.cleanup.loopexit.unr-lcssa:
+ %add.lcssa.ph = phi i32 [ undef, %for.body.preheader ], [ %add.3, %for.body ]
+ %i.011.unr = phi i32 [ 0, %for.body.preheader ], [ %inc.3, %for.body ]
+ %sum.010.unr = phi i32 [ 0, %for.body.preheader ], [ %add.3, %for.body ]
+ %lcmp.mod = icmp eq i32 %xtraiter, 0
+ br i1 %lcmp.mod, label %for.cond.cleanup, label %for.body.epil.preheader
+
+for.body.epil.preheader:
+ %scevgep = getelementptr i16, i16* %pIn1, i32 %i.011.unr
+ %3 = sub i32 %j, %i.011.unr
+ %scevgep2 = getelementptr i16, i16* %pIn2, i32 %3
+ %4 = sub i32 0, %xtraiter
+ br label %for.body.epil
+
+for.body.epil:
+ %lsr.iv5 = phi i32 [ %4, %for.body.epil.preheader ], [ %lsr.iv.next, %for.body.epil ]
+ %lsr.iv3 = phi i16* [ %scevgep2, %for.body.epil.preheader ], [ %scevgep4, %for.body.epil ]
+ %lsr.iv = phi i16* [ %scevgep, %for.body.epil.preheader ], [ %scevgep1, %for.body.epil ]
+ %sum.010.epil = phi i32 [ %add.epil, %for.body.epil ], [ %sum.010.unr, %for.body.epil.preheader ]
+ %5 = load i16, i16* %lsr.iv3, align 2
+ %conv.epil = sext i16 %5 to i32
+ %6 = load i16, i16* %lsr.iv, align 2
+ %conv2.epil = sext i16 %6 to i32
+ %mul.epil = mul nsw i32 %conv2.epil, %conv.epil
+ %add.epil = add nsw i32 %mul.epil, %sum.010.epil
+ %scevgep1 = getelementptr i16, i16* %lsr.iv, i32 1
+ %scevgep4 = getelementptr i16, i16* %lsr.iv3, i32 -1
+ %lsr.iv.next = add nsw i32 %lsr.iv5, 1
+ %epil.iter.cmp = icmp eq i32 %lsr.iv.next, 0
+ br i1 %epil.iter.cmp, label %for.cond.cleanup, label %for.body.epil
+
+for.cond.cleanup:
+ %sum.0.lcssa = phi i32 [ 0, %entry ], [ %add.lcssa.ph, %for.cond.cleanup.loopexit.unr-lcssa ], [ %add.epil, %for.body.epil ]
+ ret i32 %sum.0.lcssa
+
+for.body:
+ %pin2 = phi i16* [ %pin2_sub4, %for.body ], [ %scevgep11, %for.body.preheader.new ]
+ %pin1 = phi i16* [ %pin1_add4, %for.body ], [ %scevgep6, %for.body.preheader.new ]
+ %i.011 = phi i32 [ 0, %for.body.preheader.new ], [ %inc.3, %for.body ]
+ %sum.010 = phi i32 [ 0, %for.body.preheader.new ], [ %add.3, %for.body ]
+ %pin2_add1 = getelementptr i16, i16* %pin2, i32 1
+ %In2 = load i16, i16* %pin2_add1, align 2
+ %pin1_sub2 = getelementptr i16, i16* %pin1, i32 -2
+ %In1 = load i16, i16* %pin1_sub2, align 2
+ %In2.1 = load i16, i16* %pin2, align 2
+ %pin1_sub1 = getelementptr i16, i16* %pin1, i32 -1
+ %In1.1 = load i16, i16* %pin1_sub1, align 2
+ %pin2_sub1 = getelementptr i16, i16* %pin2, i32 -1
+ %In2.2 = load i16, i16* %pin2_sub1, align 2
+ %In1.2 = load i16, i16* %pin1, align 2
+ %pin2_sub2 = getelementptr i16, i16* %pin2, i32 -2
+ %In2.3 = load i16, i16* %pin2_sub2, align 2
+ %pin1_add1 = getelementptr i16, i16* %pin1, i32 1
+ %In1.3 = load i16, i16* %pin1_add1, align 2
+ %sextIn2 = sext i16 %In2 to i32
+ %sextIn1 = sext i16 %In1 to i32
+ %sextIn2.1 = sext i16 %In2.1 to i32
+ %sextIn1.1 = sext i16 %In1.1 to i32
+ %sextIn2.2 = sext i16 %In2.2 to i32
+ %sextIn1.2 = sext i16 %In1.2 to i32
+ %sextIn2.3 = sext i16 %In2.3 to i32
+ %sextIn1.3 = sext i16 %In1.3 to i32
+ %mul = mul nsw i32 %sextIn2, %sextIn1
+ %add = add nsw i32 %mul, %sum.010
+ %mul.1 = mul nsw i32 %sextIn2.1, %sextIn1.1
+ %add.1 = add nsw i32 %mul.1, %add
+ %mul.2 = mul nsw i32 %sextIn2.2, %sextIn1.2
+ %add.2 = add nsw i32 %mul.2, %add.1
+ %mul.3 = mul nsw i32 %sextIn2.3, %sextIn1.3
+ %add.3 = add nsw i32 %mul.3, %add.2
+ %inc.3 = add i32 %i.011, 4
+ %pin1_add4 = getelementptr i16, i16* %pin1, i32 4
+ %pin2_sub4 = getelementptr i16, i16* %pin2, i32 -4
+ %niter.ncmp.3 = icmp eq i32 %unroll_iter, %inc.3
+ br i1 %niter.ncmp.3, label %for.cond.cleanup.loopexit.unr-lcssa, label %for.body
+}
diff --git a/test/CodeGen/ARM/smlald0.ll b/test/CodeGen/ARM/smlald0.ll
new file mode 100644
index 000000000000..97177366d566
--- /dev/null
+++ b/test/CodeGen/ARM/smlald0.ll
@@ -0,0 +1,173 @@
+; RUN: opt -mtriple=arm-arm-eabi -mcpu=cortex-m33 < %s -arm-parallel-dsp -S | FileCheck %s
+;
+; The Cortex-M0 does not support unaligned accesses:
+; RUN: opt -mtriple=arm-arm-eabi -mcpu=cortex-m0 < %s -arm-parallel-dsp -S | FileCheck %s --check-prefix=CHECK-UNSUPPORTED
+;
+; Check DSP extension:
+; RUN: opt -mtriple=arm-arm-eabi -mcpu=cortex-m33 -mattr=-dsp < %s -arm-parallel-dsp -S | FileCheck %s --check-prefix=CHECK-UNSUPPORTED
+
+define dso_local i64 @OneReduction(i32 %arg, i32* nocapture readnone %arg1, i16* nocapture readonly %arg2, i16* nocapture readonly %arg3) {
+;
+; CHECK-LABEL: @OneReduction
+; CHECK: %mac1{{\.}}026 = phi i64 [ [[V8:%[0-9]+]], %for.body ], [ 0, %for.body.preheader ]
+; CHECK: [[V4:%[0-9]+]] = bitcast i16* %arrayidx3 to i32*
+; CHECK: [[V5:%[0-9]+]] = load i32, i32* [[V4]], align 2
+; CHECK: [[V6:%[0-9]+]] = bitcast i16* %arrayidx to i32*
+; CHECK: [[V7:%[0-9]+]] = load i32, i32* [[V6]], align 2
+; CHECK: [[V8]] = call i64 @llvm.arm.smlald(i32 [[V5]], i32 [[V7]], i64 %mac1{{\.}}026)
+; CHECK-NOT: call i64 @llvm.arm.smlald
+;
+; CHECK-UNSUPPORTED-NOT: call i64 @llvm.arm.smlald
+;
+entry:
+ %cmp24 = icmp sgt i32 %arg, 0
+ br i1 %cmp24, label %for.body.preheader, label %for.cond.cleanup
+
+for.body.preheader:
+ %.pre = load i16, i16* %arg3, align 2
+ %.pre27 = load i16, i16* %arg2, align 2
+ br label %for.body
+
+for.cond.cleanup:
+ %mac1.0.lcssa = phi i64 [ 0, %entry ], [ %add11, %for.body ]
+ ret i64 %mac1.0.lcssa
+
+for.body:
+; One reduction statement here:
+ %mac1.026 = phi i64 [ %add11, %for.body ], [ 0, %for.body.preheader ]
+
+ %i.025 = phi i32 [ %add, %for.body ], [ 0, %for.body.preheader ]
+ %arrayidx = getelementptr inbounds i16, i16* %arg3, i32 %i.025
+ %0 = load i16, i16* %arrayidx, align 2
+ %add = add nuw nsw i32 %i.025, 1
+ %arrayidx1 = getelementptr inbounds i16, i16* %arg3, i32 %add
+ %1 = load i16, i16* %arrayidx1, align 2
+ %arrayidx3 = getelementptr inbounds i16, i16* %arg2, i32 %i.025
+ %2 = load i16, i16* %arrayidx3, align 2
+ %conv = sext i16 %2 to i64
+ %conv4 = sext i16 %0 to i64
+ %mul = mul nsw i64 %conv, %conv4
+ %arrayidx6 = getelementptr inbounds i16, i16* %arg2, i32 %add
+ %3 = load i16, i16* %arrayidx6, align 2
+ %conv7 = sext i16 %3 to i64
+ %conv8 = sext i16 %1 to i64
+ %mul9 = mul nsw i64 %conv7, %conv8
+ %add10 = add i64 %mul, %mac1.026
+
+; Here the Mul is the LHS, and the Add the RHS.
+ %add11 = add i64 %mul9, %add10
+
+ %exitcond = icmp ne i32 %add, %arg
+ br i1 %exitcond, label %for.body, label %for.cond.cleanup
+}
+
+define dso_local arm_aapcs_vfpcc i64 @TwoReductions(i32 %arg, i32* nocapture readnone %arg1, i16* nocapture readonly %arg2, i16* nocapture readonly %arg3) {
+;
+; CHECK-LABEL: @TwoReductions
+;
+; CHECK: %mac1{{\.}}058 = phi i64 [ [[V10:%[0-9]+]], %for.body ], [ 0, %for.body.preheader ]
+; CHECK: %mac2{{\.}}057 = phi i64 [ [[V17:%[0-9]+]], %for.body ], [ 0, %for.body.preheader ]
+; CHECK: [[V10]] = call i64 @llvm.arm.smlald(i32 %{{.*}}, i32 %{{.*}}, i64 %mac1{{\.}}058)
+; CHECK: [[V17]] = call i64 @llvm.arm.smlald(i32 %{{.*}}, i32 %{{.*}}, i64 %mac2{{\.}}057)
+; CHECK-NOT: call i64 @llvm.arm.smlald
+;
+entry:
+ %cmp55 = icmp sgt i32 %arg, 0
+ br i1 %cmp55, label %for.body.preheader, label %for.cond.cleanup
+
+for.cond.cleanup:
+ %mac2.0.lcssa = phi i64 [ 0, %entry ], [ %add28, %for.body ]
+ %mac1.0.lcssa = phi i64 [ 0, %entry ], [ %add16, %for.body ]
+ %add30 = add nsw i64 %mac1.0.lcssa, %mac2.0.lcssa
+ ret i64 %add30
+
+for.body.preheader:
+ br label %for.body
+
+for.body:
+; And two reduction statements here:
+ %mac1.058 = phi i64 [ %add16, %for.body ], [ 0, %for.body.preheader ]
+ %mac2.057 = phi i64 [ %add28, %for.body ], [ 0, %for.body.preheader ]
+
+ %i.056 = phi i32 [ %add29, %for.body ], [ 0, %for.body.preheader ]
+ %arrayidx = getelementptr inbounds i16, i16* %arg3, i32 %i.056
+ %0 = load i16, i16* %arrayidx, align 2
+ %add1 = or i32 %i.056, 1
+ %arrayidx2 = getelementptr inbounds i16, i16* %arg3, i32 %add1
+ %1 = load i16, i16* %arrayidx2, align 2
+ %add3 = or i32 %i.056, 2
+ %arrayidx4 = getelementptr inbounds i16, i16* %arg3, i32 %add3
+ %2 = load i16, i16* %arrayidx4, align 2
+
+ %add5 = or i32 %i.056, 3
+ %arrayidx6 = getelementptr inbounds i16, i16* %arg3, i32 %add5
+ %3 = load i16, i16* %arrayidx6, align 2
+ %arrayidx8 = getelementptr inbounds i16, i16* %arg2, i32 %i.056
+ %4 = load i16, i16* %arrayidx8, align 2
+ %conv = sext i16 %4 to i64
+ %conv9 = sext i16 %0 to i64
+ %mul = mul nsw i64 %conv, %conv9
+ %arrayidx11 = getelementptr inbounds i16, i16* %arg2, i32 %add1
+ %5 = load i16, i16* %arrayidx11, align 2
+ %conv12 = sext i16 %5 to i64
+ %conv13 = sext i16 %1 to i64
+ %mul14 = mul nsw i64 %conv12, %conv13
+ %add15 = add i64 %mul, %mac1.058
+ %add16 = add i64 %add15, %mul14
+ %arrayidx18 = getelementptr inbounds i16, i16* %arg2, i32 %add3
+ %6 = load i16, i16* %arrayidx18, align 2
+ %conv19 = sext i16 %6 to i64
+ %conv20 = sext i16 %2 to i64
+ %mul21 = mul nsw i64 %conv19, %conv20
+ %arrayidx23 = getelementptr inbounds i16, i16* %arg2, i32 %add5
+ %7 = load i16, i16* %arrayidx23, align 2
+ %conv24 = sext i16 %7 to i64
+ %conv25 = sext i16 %3 to i64
+ %mul26 = mul nsw i64 %conv24, %conv25
+ %add27 = add i64 %mul21, %mac2.057
+ %add28 = add i64 %add27, %mul26
+ %add29 = add nuw nsw i32 %i.056, 4
+ %cmp = icmp slt i32 %add29, %arg
+ br i1 %cmp, label %for.body, label %for.cond.cleanup
+}
+
+define i64 @reduction_zext(i32 %arg, i32* nocapture readnone %arg1, i16* nocapture readonly %arg2, i16* nocapture readonly %arg3) {
+; CHECK-LABEL: @reduction_zext
+; CHECK-NOT: call i64 @llvm.arm.smlald
+; CHECK-NOT: call i32 @llvm.arm.smlad
+entry:
+ %cmp24 = icmp sgt i32 %arg, 0
+ br i1 %cmp24, label %for.body.preheader, label %for.cond.cleanup
+
+for.body.preheader:
+ %.pre = load i16, i16* %arg3, align 2
+ %.pre27 = load i16, i16* %arg2, align 2
+ br label %for.body
+
+for.cond.cleanup:
+ %mac1.0.lcssa = phi i64 [ 0, %entry ], [ %add11, %for.body ]
+ ret i64 %mac1.0.lcssa
+
+for.body:
+ %mac1.026 = phi i64 [ %add11, %for.body ], [ 0, %for.body.preheader ]
+ %i.025 = phi i32 [ %add, %for.body ], [ 0, %for.body.preheader ]
+ %arrayidx = getelementptr inbounds i16, i16* %arg3, i32 %i.025
+ %0 = load i16, i16* %arrayidx, align 2
+ %add = add nuw nsw i32 %i.025, 1
+ %arrayidx1 = getelementptr inbounds i16, i16* %arg3, i32 %add
+ %1 = load i16, i16* %arrayidx1, align 2
+ %arrayidx3 = getelementptr inbounds i16, i16* %arg2, i32 %i.025
+ %2 = load i16, i16* %arrayidx3, align 2
+ %conv = sext i16 %2 to i64
+ %conv4 = zext i16 %0 to i64
+ %mul = mul nsw i64 %conv, %conv4
+ %arrayidx6 = getelementptr inbounds i16, i16* %arg2, i32 %add
+ %3 = load i16, i16* %arrayidx6, align 2
+ %conv7 = sext i16 %3 to i64
+ %conv8 = zext i16 %1 to i64
+ %mul9 = mul nsw i64 %conv7, %conv8
+ %add10 = add i64 %mul, %mac1.026
+ %add11 = add i64 %mul9, %add10
+ %exitcond = icmp ne i32 %add, %arg
+ br i1 %exitcond, label %for.body, label %for.cond.cleanup
+}
diff --git a/test/CodeGen/ARM/smlald1.ll b/test/CodeGen/ARM/smlald1.ll
new file mode 100644
index 000000000000..61435e976742
--- /dev/null
+++ b/test/CodeGen/ARM/smlald1.ll
@@ -0,0 +1,94 @@
+; RUN: opt -mtriple=arm-arm-eabi -mcpu=cortex-m33 < %s -arm-parallel-dsp -S | FileCheck %s
+
+; CHECK-LABEL: @test1
+; CHECK: %mac1{{\.}}026 = phi i64 [ [[V8:%[0-9]+]], %for.body ], [ 0, %for.body.preheader ]
+; CHECK: [[V4:%[0-9]+]] = bitcast i16* %arrayidx3 to i32*
+; CHECK: [[V5:%[0-9]+]] = load i32, i32* [[V4]], align 2
+; CHECK: [[V6:%[0-9]+]] = bitcast i16* %arrayidx to i32*
+; CHECK: [[V7:%[0-9]+]] = load i32, i32* [[V6]], align 2
+; CHECK: [[V8]] = call i64 @llvm.arm.smlald(i32 [[V5]], i32 [[V7]], i64 %mac1{{\.}}026)
+
+define dso_local i64 @test1(i32 %arg, i32* nocapture readnone %arg1, i16* nocapture readonly %arg2, i16* nocapture readonly %arg3) {
+entry:
+ %cmp24 = icmp sgt i32 %arg, 0
+ br i1 %cmp24, label %for.body.preheader, label %for.cond.cleanup
+
+for.body.preheader:
+ %.pre = load i16, i16* %arg3, align 2
+ %.pre27 = load i16, i16* %arg2, align 2
+ br label %for.body
+
+for.cond.cleanup:
+ %mac1.0.lcssa = phi i64 [ 0, %entry ], [ %add11, %for.body ]
+ ret i64 %mac1.0.lcssa
+
+for.body:
+ %mac1.026 = phi i64 [ %add11, %for.body ], [ 0, %for.body.preheader ]
+ %i.025 = phi i32 [ %add, %for.body ], [ 0, %for.body.preheader ]
+ %arrayidx = getelementptr inbounds i16, i16* %arg3, i32 %i.025
+ %0 = load i16, i16* %arrayidx, align 2
+ %add = add nuw nsw i32 %i.025, 1
+ %arrayidx1 = getelementptr inbounds i16, i16* %arg3, i32 %add
+ %1 = load i16, i16* %arrayidx1, align 2
+ %arrayidx3 = getelementptr inbounds i16, i16* %arg2, i32 %i.025
+ %2 = load i16, i16* %arrayidx3, align 2
+ %conv = sext i16 %2 to i64
+ %conv4 = sext i16 %0 to i64
+ %mul = mul nsw i64 %conv, %conv4
+ %arrayidx6 = getelementptr inbounds i16, i16* %arg2, i32 %add
+ %3 = load i16, i16* %arrayidx6, align 2
+ %conv7 = sext i16 %3 to i64
+ %conv8 = sext i16 %1 to i64
+ %mul9 = mul nsw i64 %conv7, %conv8
+ %add10 = add i64 %mul, %mac1.026
+
+; And here the Add is the LHS, the Mul the RHS
+ %add11 = add i64 %add10, %mul9
+
+ %exitcond = icmp ne i32 %add, %arg
+ br i1 %exitcond, label %for.body, label %for.cond.cleanup
+}
+
+; Here we have i8 loads, which we do want to support, but don't handle yet.
+;
+; CHECK-LABEL: @test2
+; CHECK-NOT: call i64 @llvm.arm.smlad
+;
+define dso_local i64 @test2(i32 %arg, i32* nocapture readnone %arg1, i8* nocapture readonly %arg2, i8* nocapture readonly %arg3) {
+entry:
+ %cmp24 = icmp sgt i32 %arg, 0
+ br i1 %cmp24, label %for.body.preheader, label %for.cond.cleanup
+
+for.body.preheader:
+ %.pre = load i8, i8* %arg3, align 2
+ %.pre27 = load i8, i8* %arg2, align 2
+ br label %for.body
+
+for.cond.cleanup:
+ %mac1.0.lcssa = phi i64 [ 0, %entry ], [ %add11, %for.body ]
+ ret i64 %mac1.0.lcssa
+
+for.body:
+ %mac1.026 = phi i64 [ %add11, %for.body ], [ 0, %for.body.preheader ]
+ %i.025 = phi i32 [ %add, %for.body ], [ 0, %for.body.preheader ]
+ %arrayidx = getelementptr inbounds i8, i8* %arg3, i32 %i.025
+ %0 = load i8, i8* %arrayidx, align 2
+ %add = add nuw nsw i32 %i.025, 1
+ %arrayidx1 = getelementptr inbounds i8, i8* %arg3, i32 %add
+ %1 = load i8, i8* %arrayidx1, align 2
+ %arrayidx3 = getelementptr inbounds i8, i8* %arg2, i32 %i.025
+ %2 = load i8, i8* %arrayidx3, align 2
+ %conv = sext i8 %2 to i64
+ %conv4 = sext i8 %0 to i64
+ %mul = mul nsw i64 %conv, %conv4
+ %arrayidx6 = getelementptr inbounds i8, i8* %arg2, i32 %add
+ %3 = load i8, i8* %arrayidx6, align 2
+ %conv7 = sext i8 %3 to i64
+ %conv8 = sext i8 %1 to i64
+ %mul9 = mul nsw i64 %conv7, %conv8
+ %add10 = add i64 %mul, %mac1.026
+ %add11 = add i64 %add10, %mul9
+ %exitcond = icmp ne i32 %add, %arg
+ br i1 %exitcond, label %for.body, label %for.cond.cleanup
+}
+
diff --git a/test/CodeGen/ARM/smlald2.ll b/test/CodeGen/ARM/smlald2.ll
new file mode 100644
index 000000000000..517a9456c0ec
--- /dev/null
+++ b/test/CodeGen/ARM/smlald2.ll
@@ -0,0 +1,224 @@
+; RUN: opt -mtriple=arm-arm-eabi -mcpu=cortex-m33 < %s -arm-parallel-dsp -S | FileCheck %s
+;
+; The Cortex-M0 does not support unaligned accesses:
+; RUN: opt -mtriple=arm-arm-eabi -mcpu=cortex-m0 < %s -arm-parallel-dsp -S | FileCheck %s --check-prefix=CHECK-UNSUPPORTED
+;
+; Check DSP extension:
+; RUN: opt -mtriple=arm-arm-eabi -mcpu=cortex-m33 -mattr=-dsp < %s -arm-parallel-dsp -S | FileCheck %s --check-prefix=CHECK-UNSUPPORTED
+
+define dso_local i64 @OneReduction(i32 %arg, i32* nocapture readnone %arg1, i16* nocapture readonly %arg2, i16* nocapture readonly %arg3) {
+;
+; CHECK-LABEL: @OneReduction
+; CHECK: %mac1{{\.}}026 = phi i64 [ [[V8:%[0-9]+]], %for.body ], [ 0, %for.body.preheader ]
+; CHECK: [[V4:%[0-9]+]] = bitcast i16* %arrayidx3 to i32*
+; CHECK: [[V5:%[0-9]+]] = load i32, i32* [[V4]], align 2
+; CHECK: [[V6:%[0-9]+]] = bitcast i16* %arrayidx to i32*
+; CHECK: [[V7:%[0-9]+]] = load i32, i32* [[V6]], align 2
+; CHECK: [[V8]] = call i64 @llvm.arm.smlald(i32 [[V5]], i32 [[V7]], i64 %mac1{{\.}}026)
+; CHECK-NOT: call i64 @llvm.arm.smlald
+;
+; CHECK-UNSUPPORTED-NOT: call i64 @llvm.arm.smlald
+;
+entry:
+ %cmp24 = icmp sgt i32 %arg, 0
+ br i1 %cmp24, label %for.body.preheader, label %for.cond.cleanup
+
+for.body.preheader:
+ %.pre = load i16, i16* %arg3, align 2
+ %.pre27 = load i16, i16* %arg2, align 2
+ br label %for.body
+
+for.cond.cleanup:
+ %mac1.0.lcssa = phi i64 [ 0, %entry ], [ %add11, %for.body ]
+ ret i64 %mac1.0.lcssa
+
+for.body:
+; One reduction statement here:
+ %mac1.026 = phi i64 [ %add11, %for.body ], [ 0, %for.body.preheader ]
+
+ %i.025 = phi i32 [ %add, %for.body ], [ 0, %for.body.preheader ]
+ %arrayidx = getelementptr inbounds i16, i16* %arg3, i32 %i.025
+ %0 = load i16, i16* %arrayidx, align 2
+ %add = add nuw nsw i32 %i.025, 1
+ %arrayidx1 = getelementptr inbounds i16, i16* %arg3, i32 %add
+ %1 = load i16, i16* %arrayidx1, align 2
+ %arrayidx3 = getelementptr inbounds i16, i16* %arg2, i32 %i.025
+ %2 = load i16, i16* %arrayidx3, align 2
+ %conv = sext i16 %2 to i32
+ %conv4 = sext i16 %0 to i32
+ %mul = mul nsw i32 %conv, %conv4
+ %sext0 = sext i32 %mul to i64
+ %arrayidx6 = getelementptr inbounds i16, i16* %arg2, i32 %add
+ %3 = load i16, i16* %arrayidx6, align 2
+ %conv7 = sext i16 %3 to i32
+ %conv8 = sext i16 %1 to i32
+ %mul9 = mul nsw i32 %conv7, %conv8
+ %sext1 = sext i32 %mul9 to i64
+ %add10 = add i64 %sext0, %mac1.026
+
+; Here the Mul is the LHS, and the Add the RHS.
+ %add11 = add i64 %sext1, %add10
+
+ %exitcond = icmp ne i32 %add, %arg
+ br i1 %exitcond, label %for.body, label %for.cond.cleanup
+}
+
+define dso_local arm_aapcs_vfpcc i64 @TwoReductions(i32 %arg, i32* nocapture readnone %arg1, i16* nocapture readonly %arg2, i16* nocapture readonly %arg3) {
+;
+; CHECK-LABEL: @TwoReductions
+;
+; CHECK: %mac1{{\.}}058 = phi i64 [ [[V10:%[0-9]+]], %for.body ], [ 0, %for.body.preheader ]
+; CHECK: %mac2{{\.}}057 = phi i64 [ [[V17:%[0-9]+]], %for.body ], [ 0, %for.body.preheader ]
+; CHECK: [[V10]] = call i64 @llvm.arm.smlald(i32 %{{.*}}, i32 %{{.*}}, i64 %mac1{{\.}}058)
+; CHECK: [[V17]] = call i64 @llvm.arm.smlald(i32 %{{.*}}, i32 %{{.*}}, i64 %mac2{{\.}}057)
+; CHECK-NOT: call i64 @llvm.arm.smlald
+;
+entry:
+ %cmp55 = icmp sgt i32 %arg, 0
+ br i1 %cmp55, label %for.body.preheader, label %for.cond.cleanup
+
+for.cond.cleanup:
+ %mac2.0.lcssa = phi i64 [ 0, %entry ], [ %add28, %for.body ]
+ %mac1.0.lcssa = phi i64 [ 0, %entry ], [ %add16, %for.body ]
+ %add30 = add nsw i64 %mac1.0.lcssa, %mac2.0.lcssa
+ ret i64 %add30
+
+for.body.preheader:
+ br label %for.body
+
+for.body:
+; And two reduction statements here:
+ %mac1.058 = phi i64 [ %add16, %for.body ], [ 0, %for.body.preheader ]
+ %mac2.057 = phi i64 [ %add28, %for.body ], [ 0, %for.body.preheader ]
+
+ %i.056 = phi i32 [ %add29, %for.body ], [ 0, %for.body.preheader ]
+ %arrayidx = getelementptr inbounds i16, i16* %arg3, i32 %i.056
+ %0 = load i16, i16* %arrayidx, align 2
+ %add1 = or i32 %i.056, 1
+ %arrayidx2 = getelementptr inbounds i16, i16* %arg3, i32 %add1
+ %1 = load i16, i16* %arrayidx2, align 2
+ %add3 = or i32 %i.056, 2
+ %arrayidx4 = getelementptr inbounds i16, i16* %arg3, i32 %add3
+ %2 = load i16, i16* %arrayidx4, align 2
+
+ %add5 = or i32 %i.056, 3
+ %arrayidx6 = getelementptr inbounds i16, i16* %arg3, i32 %add5
+ %3 = load i16, i16* %arrayidx6, align 2
+ %arrayidx8 = getelementptr inbounds i16, i16* %arg2, i32 %i.056
+ %4 = load i16, i16* %arrayidx8, align 2
+ %conv = sext i16 %4 to i32
+ %conv9 = sext i16 %0 to i32
+ %mul = mul nsw i32 %conv, %conv9
+ %sext0 = sext i32 %mul to i64
+ %arrayidx11 = getelementptr inbounds i16, i16* %arg2, i32 %add1
+ %5 = load i16, i16* %arrayidx11, align 2
+ %conv12 = sext i16 %5 to i32
+ %conv13 = sext i16 %1 to i32
+ %mul14 = mul nsw i32 %conv12, %conv13
+ %sext1 = sext i32 %mul14 to i64
+ %add15 = add i64 %sext0, %mac1.058
+ %add16 = add i64 %add15, %sext1
+ %arrayidx18 = getelementptr inbounds i16, i16* %arg2, i32 %add3
+ %6 = load i16, i16* %arrayidx18, align 2
+ %conv19 = sext i16 %6 to i32
+ %conv20 = sext i16 %2 to i32
+ %mul21 = mul nsw i32 %conv19, %conv20
+ %sext2 = sext i32 %mul21 to i64
+ %arrayidx23 = getelementptr inbounds i16, i16* %arg2, i32 %add5
+ %7 = load i16, i16* %arrayidx23, align 2
+ %conv24 = sext i16 %7 to i32
+ %conv25 = sext i16 %3 to i32
+ %mul26 = mul nsw i32 %conv24, %conv25
+ %sext3 = sext i32 %mul26 to i64
+ %add27 = add i64 %sext2, %mac2.057
+ %add28 = add i64 %add27, %sext3
+ %add29 = add nuw nsw i32 %i.056, 4
+ %cmp = icmp slt i32 %add29, %arg
+ br i1 %cmp, label %for.body, label %for.cond.cleanup
+}
+
+define i64 @zext_mul_reduction(i32 %arg, i32* nocapture readnone %arg1, i16* nocapture readonly %arg2, i16* nocapture readonly %arg3) {
+; CHECK-LABEL: @zext_mul_reduction
+; CHECK-NOT: call i64 @llvm.arm.smlald
+; CHECK-NOT: call i32 @llvm.arm.smlad
+entry:
+ %cmp24 = icmp sgt i32 %arg, 0
+ br i1 %cmp24, label %for.body.preheader, label %for.cond.cleanup
+
+for.body.preheader:
+ %.pre = load i16, i16* %arg3, align 2
+ %.pre27 = load i16, i16* %arg2, align 2
+ br label %for.body
+
+for.cond.cleanup:
+ %mac1.0.lcssa = phi i64 [ 0, %entry ], [ %add11, %for.body ]
+ ret i64 %mac1.0.lcssa
+
+for.body:
+ %mac1.026 = phi i64 [ %add11, %for.body ], [ 0, %for.body.preheader ]
+ %i.025 = phi i32 [ %add, %for.body ], [ 0, %for.body.preheader ]
+ %arrayidx = getelementptr inbounds i16, i16* %arg3, i32 %i.025
+ %0 = load i16, i16* %arrayidx, align 2
+ %add = add nuw nsw i32 %i.025, 1
+ %arrayidx1 = getelementptr inbounds i16, i16* %arg3, i32 %add
+ %1 = load i16, i16* %arrayidx1, align 2
+ %arrayidx3 = getelementptr inbounds i16, i16* %arg2, i32 %i.025
+ %2 = load i16, i16* %arrayidx3, align 2
+ %conv = zext i16 %2 to i32
+ %conv4 = sext i16 %0 to i32
+ %mul = mul nsw i32 %conv, %conv4
+ %sext0 = sext i32 %mul to i64
+ %arrayidx6 = getelementptr inbounds i16, i16* %arg2, i32 %add
+ %3 = load i16, i16* %arrayidx6, align 2
+ %conv7 = zext i16 %3 to i32
+ %conv8 = sext i16 %1 to i32
+ %mul9 = mul nsw i32 %conv7, %conv8
+ %sext1 = sext i32 %mul9 to i64
+ %add10 = add i64 %sext0, %mac1.026
+ %add11 = add i64 %sext1, %add10
+ %exitcond = icmp ne i32 %add, %arg
+ br i1 %exitcond, label %for.body, label %for.cond.cleanup
+}
+
+define i64 @zext_add_reduction(i32 %arg, i32* nocapture readnone %arg1, i16* nocapture readonly %arg2, i16* nocapture readonly %arg3) {
+; CHECK-LABEL: @zext_add_reduction
+; CHECK-NOT: call i64 @llvm.arm.smlald
+; CHECK-NOT: call i32 @llvm.arm.smlad
+entry:
+ %cmp24 = icmp sgt i32 %arg, 0
+ br i1 %cmp24, label %for.body.preheader, label %for.cond.cleanup
+
+for.body.preheader:
+ %.pre = load i16, i16* %arg3, align 2
+ %.pre27 = load i16, i16* %arg2, align 2
+ br label %for.body
+
+for.cond.cleanup:
+ %mac1.0.lcssa = phi i64 [ 0, %entry ], [ %add11, %for.body ]
+ ret i64 %mac1.0.lcssa
+
+for.body:
+ %mac1.026 = phi i64 [ %add11, %for.body ], [ 0, %for.body.preheader ]
+ %i.025 = phi i32 [ %add, %for.body ], [ 0, %for.body.preheader ]
+ %arrayidx = getelementptr inbounds i16, i16* %arg3, i32 %i.025
+ %0 = load i16, i16* %arrayidx, align 2
+ %add = add nuw nsw i32 %i.025, 1
+ %arrayidx1 = getelementptr inbounds i16, i16* %arg3, i32 %add
+ %1 = load i16, i16* %arrayidx1, align 2
+ %arrayidx3 = getelementptr inbounds i16, i16* %arg2, i32 %i.025
+ %2 = load i16, i16* %arrayidx3, align 2
+ %conv = sext i16 %2 to i32
+ %conv4 = sext i16 %0 to i32
+ %mul = mul nsw i32 %conv, %conv4
+ %sext0 = zext i32 %mul to i64
+ %arrayidx6 = getelementptr inbounds i16, i16* %arg2, i32 %add
+ %3 = load i16, i16* %arrayidx6, align 2
+ %conv7 = sext i16 %3 to i32
+ %conv8 = sext i16 %1 to i32
+ %mul9 = mul nsw i32 %conv7, %conv8
+ %sext1 = zext i32 %mul9 to i64
+ %add10 = add i64 %sext0, %mac1.026
+ %add11 = add i64 %sext1, %add10
+ %exitcond = icmp ne i32 %add, %arg
+ br i1 %exitcond, label %for.body, label %for.cond.cleanup
+}
diff --git a/test/CodeGen/ARM/smlaldx-1.ll b/test/CodeGen/ARM/smlaldx-1.ll
new file mode 100644
index 000000000000..e615f209f57a
--- /dev/null
+++ b/test/CodeGen/ARM/smlaldx-1.ll
@@ -0,0 +1,249 @@
+; RUN: opt -mtriple=thumbv8m.main -mcpu=cortex-m33 -arm-parallel-dsp %s -S -o - | FileCheck %s
+; RUN: opt -mtriple=arm-arm-eabi -mcpu=cortex-m0 < %s -arm-parallel-dsp -S | FileCheck %s --check-prefix=CHECK-UNSUPPORTED
+; RUN: opt -mtriple=arm-arm-eabi -mcpu=cortex-m33 -mattr=-dsp < %s -arm-parallel-dsp -S | FileCheck %s --check-prefix=CHECK-UNSUPPORTED
+
+define i64 @smlaldx(i16* nocapture readonly %pIn1, i16* nocapture readonly %pIn2, i32 %j, i32 %limit) {
+
+; CHECK-LABEL: smlaldx
+; CHECK: = phi i32 [ 0, %for.body.preheader.new ],
+; CHECK: [[ACC0:%[^ ]+]] = phi i64 [ 0, %for.body.preheader.new ], [ [[ACC2:%[^ ]+]], %for.body ]
+; CHECK: [[PIN23:%[^ ]+]] = bitcast i16* %pIn2.3 to i32*
+; CHECK: [[IN23:%[^ ]+]] = load i32, i32* [[PIN23]], align 2
+; CHECK: [[PIN12:%[^ ]+]] = bitcast i16* %pIn1.2 to i32*
+; CHECK: [[IN12:%[^ ]+]] = load i32, i32* [[PIN12]], align 2
+; CHECK: [[ACC1:%[^ ]+]] = call i64 @llvm.arm.smlaldx(i32 [[IN23]], i32 [[IN12]], i64 [[ACC0]])
+; CHECK: [[PIN21:%[^ ]+]] = bitcast i16* %pIn2.1 to i32*
+; CHECK: [[IN21:%[^ ]+]] = load i32, i32* [[PIN21]], align 2
+; CHECK: [[PIN10:%[^ ]+]] = bitcast i16* %pIn1.0 to i32*
+; CHECK: [[IN10:%[^ ]+]] = load i32, i32* [[PIN10]], align 2
+; CHECK: [[ACC2]] = call i64 @llvm.arm.smlaldx(i32 [[IN21]], i32 [[IN10]], i64 [[ACC1]])
+; CHECK-NOT: call i64 @llvm.arm.smlad
+; CHECK-UNSUPPORTED-NOT: call i64 @llvm.arm.smlad
+
+entry:
+ %cmp9 = icmp eq i32 %limit, 0
+ br i1 %cmp9, label %for.cond.cleanup, label %for.body.preheader
+
+for.body.preheader:
+ %0 = add i32 %limit, -1
+ %xtraiter = and i32 %limit, 3
+ %1 = icmp ult i32 %0, 3
+ br i1 %1, label %for.cond.cleanup.loopexit.unr-lcssa, label %for.body.preheader.new
+
+for.body.preheader.new:
+ %unroll_iter = sub i32 %limit, %xtraiter
+ br label %for.body
+
+for.cond.cleanup.loopexit.unr-lcssa:
+ %add.lcssa.ph = phi i64 [ undef, %for.body.preheader ], [ %add.3, %for.body ]
+ %i.011.unr = phi i32 [ 0, %for.body.preheader ], [ %inc.3, %for.body ]
+ %sum.010.unr = phi i64 [ 0, %for.body.preheader ], [ %add.3, %for.body ]
+ %lcmp.mod = icmp eq i32 %xtraiter, 0
+ br i1 %lcmp.mod, label %for.cond.cleanup, label %for.body.epil
+
+for.body.epil:
+ %i.011.epil = phi i32 [ %inc.epil, %for.body.epil ], [ %i.011.unr, %for.cond.cleanup.loopexit.unr-lcssa ]
+ %sum.010.epil = phi i64 [ %add.epil, %for.body.epil ], [ %sum.010.unr, %for.cond.cleanup.loopexit.unr-lcssa ]
+ %epil.iter = phi i32 [ %epil.iter.sub, %for.body.epil ], [ %xtraiter, %for.cond.cleanup.loopexit.unr-lcssa ]
+ %sub.epil = sub i32 %j, %i.011.epil
+ %arrayidx.epil = getelementptr inbounds i16, i16* %pIn2, i32 %sub.epil
+ %2 = load i16, i16* %arrayidx.epil, align 2
+ %conv.epil = sext i16 %2 to i32
+ %arrayidx1.epil = getelementptr inbounds i16, i16* %pIn1, i32 %i.011.epil
+ %3 = load i16, i16* %arrayidx1.epil, align 2
+ %conv2.epil = sext i16 %3 to i32
+ %mul.epil = mul nsw i32 %conv2.epil, %conv.epil
+ %sext.mul.epil = sext i32 %mul.epil to i64
+ %add.epil = add nsw i64 %sext.mul.epil, %sum.010.epil
+ %inc.epil = add nuw i32 %i.011.epil, 1
+ %epil.iter.sub = add i32 %epil.iter, -1
+ %epil.iter.cmp = icmp eq i32 %epil.iter.sub, 0
+ br i1 %epil.iter.cmp, label %for.cond.cleanup, label %for.body.epil
+
+for.cond.cleanup:
+ %sum.0.lcssa = phi i64 [ 0, %entry ], [ %add.lcssa.ph, %for.cond.cleanup.loopexit.unr-lcssa ], [ %add.epil, %for.body.epil ]
+ ret i64 %sum.0.lcssa
+
+for.body:
+ %i.011 = phi i32 [ 0, %for.body.preheader.new ], [ %inc.3, %for.body ]
+ %sum.010 = phi i64 [ 0, %for.body.preheader.new ], [ %add.3, %for.body ]
+ %niter = phi i32 [ %unroll_iter, %for.body.preheader.new ], [ %niter.nsub.3, %for.body ]
+ %pIn2Base = phi i16* [ %pIn2, %for.body.preheader.new ], [ %pIn2.4, %for.body ]
+ %pIn2.0 = getelementptr inbounds i16, i16* %pIn2Base, i32 0
+ %In2 = load i16, i16* %pIn2.0, align 2
+ %pIn1.0 = getelementptr inbounds i16, i16* %pIn1, i32 %i.011
+ %In1 = load i16, i16* %pIn1.0, align 2
+ %inc = or i32 %i.011, 1
+ %pIn2.1 = getelementptr inbounds i16, i16* %pIn2Base, i32 -1
+ %In2.1 = load i16, i16* %pIn2.1, align 2
+ %pIn1.1 = getelementptr inbounds i16, i16* %pIn1, i32 %inc
+ %In1.1 = load i16, i16* %pIn1.1, align 2
+ %inc.1 = or i32 %i.011, 2
+ %pIn2.2 = getelementptr inbounds i16, i16* %pIn2Base, i32 -2
+ %In2.2 = load i16, i16* %pIn2.2, align 2
+ %pIn1.2 = getelementptr inbounds i16, i16* %pIn1, i32 %inc.1
+ %In1.2 = load i16, i16* %pIn1.2, align 2
+ %inc.2 = or i32 %i.011, 3
+ %pIn2.3 = getelementptr inbounds i16, i16* %pIn2Base, i32 -3
+ %In2.3 = load i16, i16* %pIn2.3, align 2
+ %pIn1.3 = getelementptr inbounds i16, i16* %pIn1, i32 %inc.2
+ %In1.3 = load i16, i16* %pIn1.3, align 2
+ %sextIn1 = sext i16 %In1 to i32
+ %sextIn1.1 = sext i16 %In1.1 to i32
+ %sextIn1.2 = sext i16 %In1.2 to i32
+ %sextIn1.3 = sext i16 %In1.3 to i32
+ %sextIn2 = sext i16 %In2 to i32
+ %sextIn2.1 = sext i16 %In2.1 to i32
+ %sextIn2.2 = sext i16 %In2.2 to i32
+ %sextIn2.3 = sext i16 %In2.3 to i32
+ %mul = mul nsw i32 %sextIn1, %sextIn2
+ %mul.1 = mul nsw i32 %sextIn1.1, %sextIn2.1
+ %mul.2 = mul nsw i32 %sextIn1.2, %sextIn2.2
+ %mul.3 = mul nsw i32 %sextIn1.3, %sextIn2.3
+ %sext.mul = sext i32 %mul to i64
+ %sext.mul.1 = sext i32 %mul.1 to i64
+ %sext.mul.2 = sext i32 %mul.2 to i64
+ %sext.mul.3 = sext i32 %mul.3 to i64
+ %add = add nsw i64 %sext.mul, %sum.010
+ %add.1 = add nsw i64 %sext.mul.1, %add
+ %add.2 = add nsw i64 %sext.mul.2, %add.1
+ %add.3 = add nsw i64 %sext.mul.3, %add.2
+ %inc.3 = add i32 %i.011, 4
+ %pIn2.4 = getelementptr inbounds i16, i16* %pIn2Base, i32 -4
+ %niter.nsub.3 = add i32 %niter, -4
+ %niter.ncmp.3 = icmp eq i32 %niter.nsub.3, 0
+ br i1 %niter.ncmp.3, label %for.cond.cleanup.loopexit.unr-lcssa, label %for.body
+}
+
+define i64 @smlaldx_swap(i16* nocapture readonly %pIn1, i16* nocapture readonly %pIn2, i32 %j, i32 %limit) {
+
+entry:
+ %cmp9 = icmp eq i32 %limit, 0
+ br i1 %cmp9, label %for.cond.cleanup, label %for.body.preheader
+
+for.body.preheader:
+ %0 = add i32 %limit, -1
+ %xtraiter = and i32 %limit, 3
+ %1 = icmp ult i32 %0, 3
+ br i1 %1, label %for.cond.cleanup.loopexit.unr-lcssa, label %for.body.preheader.new
+
+for.body.preheader.new:
+ %unroll_iter = sub i32 %limit, %xtraiter
+ %scevgep6 = getelementptr i16, i16* %pIn1, i32 2
+ %2 = add i32 %j, -1
+ %scevgep11 = getelementptr i16, i16* %pIn2, i32 %2
+ br label %for.body
+
+for.cond.cleanup.loopexit.unr-lcssa:
+ %add.lcssa.ph = phi i64 [ undef, %for.body.preheader ], [ %add.3, %for.body ]
+ %i.011.unr = phi i32 [ 0, %for.body.preheader ], [ %inc.3, %for.body ]
+ %sum.010.unr = phi i64 [ 0, %for.body.preheader ], [ %add.3, %for.body ]
+ %lcmp.mod = icmp eq i32 %xtraiter, 0
+ br i1 %lcmp.mod, label %for.cond.cleanup, label %for.body.epil.preheader
+
+for.body.epil.preheader:
+ %scevgep = getelementptr i16, i16* %pIn1, i32 %i.011.unr
+ %3 = sub i32 %j, %i.011.unr
+ %scevgep2 = getelementptr i16, i16* %pIn2, i32 %3
+ %4 = sub i32 0, %xtraiter
+ br label %for.body.epil
+
+for.body.epil:
+ %lsr.iv5 = phi i32 [ %4, %for.body.epil.preheader ], [ %lsr.iv.next, %for.body.epil ]
+ %lsr.iv3 = phi i16* [ %scevgep2, %for.body.epil.preheader ], [ %scevgep4, %for.body.epil ]
+ %lsr.iv = phi i16* [ %scevgep, %for.body.epil.preheader ], [ %scevgep1, %for.body.epil ]
+ %sum.010.epil = phi i64 [ %add.epil, %for.body.epil ], [ %sum.010.unr, %for.body.epil.preheader ]
+ %5 = load i16, i16* %lsr.iv3, align 2
+ %conv.epil = sext i16 %5 to i32
+ %6 = load i16, i16* %lsr.iv, align 2
+ %conv2.epil = sext i16 %6 to i32
+ %mul.epil = mul nsw i32 %conv2.epil, %conv.epil
+ %sext.mul.epil = sext i32 %mul.epil to i64
+ %add.epil = add nsw i64 %sext.mul.epil, %sum.010.epil
+ %scevgep1 = getelementptr i16, i16* %lsr.iv, i32 1
+ %scevgep4 = getelementptr i16, i16* %lsr.iv3, i32 -1
+ %lsr.iv.next = add nsw i32 %lsr.iv5, 1
+ %epil.iter.cmp = icmp eq i32 %lsr.iv.next, 0
+ br i1 %epil.iter.cmp, label %for.cond.cleanup, label %for.body.epil
+
+for.cond.cleanup:
+ %sum.0.lcssa = phi i64 [ 0, %entry ], [ %add.lcssa.ph, %for.cond.cleanup.loopexit.unr-lcssa ], [ %add.epil, %for.body.epil ]
+ ret i64 %sum.0.lcssa
+
+; CHECK-LABEL: smlaldx_swap
+; CHECK: for.body.preheader.new:
+; CHECK: [[PIN1Base:[^ ]+]] = getelementptr i16, i16* %pIn1
+; CHECK: [[PIN2Base:[^ ]+]] = getelementptr i16, i16* %pIn2
+
+; CHECK: for.body:
+; CHECK: [[PIN2:%[^ ]+]] = phi i16* [ [[PIN2_NEXT:%[^ ]+]], %for.body ], [ [[PIN2Base]], %for.body.preheader.new ]
+; CHECK: [[PIN1:%[^ ]+]] = phi i16* [ [[PIN1_NEXT:%[^ ]+]], %for.body ], [ [[PIN1Base]], %for.body.preheader.new ]
+; CHECK: [[IV:%[^ ]+]] = phi i32
+; CHECK: [[ACC0:%[^ ]+]] = phi i64 [ 0, %for.body.preheader.new ], [ [[ACC2:%[^ ]+]], %for.body ]
+
+; CHECK: [[PIN1_2:%[^ ]+]] = getelementptr i16, i16* [[PIN1]], i32 -2
+; CHECK: [[PIN2_2:%[^ ]+]] = getelementptr i16, i16* [[PIN2]], i32 -2
+
+; CHECK: [[PIN2_2_CAST:%[^ ]+]] = bitcast i16* [[PIN2_2]] to i32*
+; CHECK: [[IN2_2:%[^ ]+]] = load i32, i32* [[PIN2_2_CAST]], align 2
+; CHECK: [[PIN1_CAST:%[^ ]+]] = bitcast i16* [[PIN1]] to i32*
+; CHECK: [[IN1:%[^ ]+]] = load i32, i32* [[PIN1_CAST]], align 2
+; CHECK: [[ACC1:%[^ ]+]] = call i64 @llvm.arm.smlaldx(i32 [[IN2_2]], i32 [[IN1]], i64 [[ACC0]])
+
+; CHECK: [[PIN2_CAST:%[^ ]+]] = bitcast i16* [[PIN2]] to i32*
+; CHECK: [[IN2:%[^ ]+]] = load i32, i32* [[PIN2_CAST]], align 2
+; CHECK: [[PIN1_2_CAST:%[^ ]+]] = bitcast i16* [[PIN1_2]] to i32*
+; CHECK: [[IN1_2:%[^ ]+]] = load i32, i32* [[PIN1_2_CAST]], align 2
+; CHECK: [[ACC2]] = call i64 @llvm.arm.smlaldx(i32 [[IN2]], i32 [[IN1_2]], i64 [[ACC1]])
+
+; CHECK: [[PIN1_NEXT]] = getelementptr i16, i16* [[PIN1]], i32 4
+; CHECK: [[PIN2_NEXT]] = getelementptr i16, i16* [[PIN2]], i32 -4
+
+; CHECK-NOT: call i64 @llvm.arm.smlad
+; CHECK-UNSUPPORTED-NOT: call i64 @llvm.arm.smlad
+
+for.body:
+ %pin2 = phi i16* [ %pin2.sub4, %for.body ], [ %scevgep11, %for.body.preheader.new ]
+ %pin1 = phi i16* [ %pin1.add4, %for.body ], [ %scevgep6, %for.body.preheader.new ]
+ %i.011 = phi i32 [ 0, %for.body.preheader.new ], [ %inc.3, %for.body ]
+ %sum.010 = phi i64 [ 0, %for.body.preheader.new ], [ %add.3, %for.body ]
+ %pin2.add1 = getelementptr i16, i16* %pin2, i32 1
+ %In2 = load i16, i16* %pin2.add1, align 2
+ %pin1.sub2 = getelementptr i16, i16* %pin1, i32 -2
+ %In1 = load i16, i16* %pin1.sub2, align 2
+ %In2.1 = load i16, i16* %pin2, align 2
+ %pin1.sub1 = getelementptr i16, i16* %pin1, i32 -1
+ %In1.1 = load i16, i16* %pin1.sub1, align 2
+ %pin2.sub1 = getelementptr i16, i16* %pin2, i32 -1
+ %In2.2 = load i16, i16* %pin2.sub1, align 2
+ %In1.2 = load i16, i16* %pin1, align 2
+ %pin2.sub2 = getelementptr i16, i16* %pin2, i32 -2
+ %In2.3 = load i16, i16* %pin2.sub2, align 2
+ %pin1.add1 = getelementptr i16, i16* %pin1, i32 1
+ %In1.3 = load i16, i16* %pin1.add1, align 2
+ %sextIn2 = sext i16 %In2 to i32
+ %sextIn1 = sext i16 %In1 to i32
+ %sextIn2.1 = sext i16 %In2.1 to i32
+ %sextIn1.1 = sext i16 %In1.1 to i32
+ %sextIn2.2 = sext i16 %In2.2 to i32
+ %sextIn1.2 = sext i16 %In1.2 to i32
+ %sextIn2.3 = sext i16 %In2.3 to i32
+ %sextIn1.3 = sext i16 %In1.3 to i32
+ %mul = mul nsw i32 %sextIn2, %sextIn1
+ %sext.mul = sext i32 %mul to i64
+ %add = add nsw i64 %sext.mul, %sum.010
+ %mul.1 = mul nsw i32 %sextIn2.1, %sextIn1.1
+ %sext.mul.1 = sext i32 %mul.1 to i64
+ %add.1 = add nsw i64 %sext.mul.1, %add
+ %mul.2 = mul nsw i32 %sextIn2.2, %sextIn1.2
+ %sext.mul.2 = sext i32 %mul.2 to i64
+ %add.2 = add nsw i64 %sext.mul.2, %add.1
+ %mul.3 = mul nsw i32 %sextIn2.3, %sextIn1.3
+ %sext.mul.3 = sext i32 %mul.3 to i64
+ %add.3 = add nsw i64 %sext.mul.3, %add.2
+ %inc.3 = add i32 %i.011, 4
+ %pin1.add4 = getelementptr i16, i16* %pin1, i32 4
+ %pin2.sub4 = getelementptr i16, i16* %pin2, i32 -4
+ %niter.ncmp.3 = icmp eq i32 %unroll_iter, %inc.3
+ br i1 %niter.ncmp.3, label %for.cond.cleanup.loopexit.unr-lcssa, label %for.body
+}
diff --git a/test/CodeGen/ARM/smlaldx-2.ll b/test/CodeGen/ARM/smlaldx-2.ll
new file mode 100644
index 000000000000..a4b5a272dc60
--- /dev/null
+++ b/test/CodeGen/ARM/smlaldx-2.ll
@@ -0,0 +1,248 @@
+; RUN: opt -mtriple=thumbv8m.main -mcpu=cortex-m33 -arm-parallel-dsp %s -S -o - | FileCheck %s
+; RUN: opt -mtriple=arm-arm-eabi -mcpu=cortex-m0 < %s -arm-parallel-dsp -S | FileCheck %s --check-prefix=CHECK-UNSUPPORTED
+; RUN: opt -mtriple=arm-arm-eabi -mcpu=cortex-m33 -mattr=-dsp < %s -arm-parallel-dsp -S | FileCheck %s --check-prefix=CHECK-UNSUPPORTED
+
+define i64 @smlaldx(i16* nocapture readonly %pIn1, i16* nocapture readonly %pIn2, i32 %j, i32 %limit) {
+
+; CHECK-LABEL: smlaldx
+; CHECK: = phi i32 [ 0, %for.body.preheader.new ],
+; CHECK: [[ACC0:%[^ ]+]] = phi i64 [ 0, %for.body.preheader.new ], [ [[ACC2:%[^ ]+]], %for.body ]
+; CHECK: [[PIN23:%[^ ]+]] = bitcast i16* %pIn2.3 to i32*
+; CHECK: [[IN23:%[^ ]+]] = load i32, i32* [[PIN23]], align 2
+; CHECK: [[PIN12:%[^ ]+]] = bitcast i16* %pIn1.2 to i32*
+; CHECK: [[IN12:%[^ ]+]] = load i32, i32* [[PIN12]], align 2
+; CHECK: [[ACC1:%[^ ]+]] = call i64 @llvm.arm.smlaldx(i32 [[IN23]], i32 [[IN12]], i64 [[ACC0]])
+; CHECK: [[PIN21:%[^ ]+]] = bitcast i16* %pIn2.1 to i32*
+; CHECK: [[IN21:%[^ ]+]] = load i32, i32* [[PIN21]], align 2
+; CHECK: [[PIN10:%[^ ]+]] = bitcast i16* %pIn1.0 to i32*
+; CHECK: [[IN10:%[^ ]+]] = load i32, i32* [[PIN10]], align 2
+; CHECK: [[ACC2]] = call i64 @llvm.arm.smlaldx(i32 [[IN21]], i32 [[IN10]], i64 [[ACC1]])
+; CHECK-NOT: call i64 @llvm.arm.smlad
+; CHECK-UNSUPPORTED-NOT: call i64 @llvm.arm.smlad
+
+entry:
+ %cmp9 = icmp eq i32 %limit, 0
+ br i1 %cmp9, label %for.cond.cleanup, label %for.body.preheader
+
+for.body.preheader:
+ %0 = add i32 %limit, -1
+ %xtraiter = and i32 %limit, 3
+ %1 = icmp ult i32 %0, 3
+ br i1 %1, label %for.cond.cleanup.loopexit.unr-lcssa, label %for.body.preheader.new
+
+for.body.preheader.new:
+ %unroll_iter = sub i32 %limit, %xtraiter
+ br label %for.body
+
+for.cond.cleanup.loopexit.unr-lcssa:
+ %add.lcssa.ph = phi i64 [ undef, %for.body.preheader ], [ %add.3, %for.body ]
+ %i.011.unr = phi i32 [ 0, %for.body.preheader ], [ %inc.3, %for.body ]
+ %sum.010.unr = phi i64 [ 0, %for.body.preheader ], [ %add.3, %for.body ]
+ %lcmp.mod = icmp eq i32 %xtraiter, 0
+ br i1 %lcmp.mod, label %for.cond.cleanup, label %for.body.epil
+
+for.body.epil:
+ %i.011.epil = phi i32 [ %inc.epil, %for.body.epil ], [ %i.011.unr, %for.cond.cleanup.loopexit.unr-lcssa ]
+ %sum.010.epil = phi i64 [ %add.epil, %for.body.epil ], [ %sum.010.unr, %for.cond.cleanup.loopexit.unr-lcssa ]
+ %epil.iter = phi i32 [ %epil.iter.sub, %for.body.epil ], [ %xtraiter, %for.cond.cleanup.loopexit.unr-lcssa ]
+ %sub.epil = sub i32 %j, %i.011.epil
+ %arrayidx.epil = getelementptr inbounds i16, i16* %pIn2, i32 %sub.epil
+ %2 = load i16, i16* %arrayidx.epil, align 2
+ %conv.epil = sext i16 %2 to i32
+ %arrayidx1.epil = getelementptr inbounds i16, i16* %pIn1, i32 %i.011.epil
+ %3 = load i16, i16* %arrayidx1.epil, align 2
+ %conv2.epil = sext i16 %3 to i32
+ %mul.epil = mul nsw i32 %conv2.epil, %conv.epil
+ %sext.mul.epil = sext i32 %mul.epil to i64
+ %add.epil = add nsw i64 %sext.mul.epil, %sum.010.epil
+ %inc.epil = add nuw i32 %i.011.epil, 1
+ %epil.iter.sub = add i32 %epil.iter, -1
+ %epil.iter.cmp = icmp eq i32 %epil.iter.sub, 0
+ br i1 %epil.iter.cmp, label %for.cond.cleanup, label %for.body.epil
+
+for.cond.cleanup:
+ %sum.0.lcssa = phi i64 [ 0, %entry ], [ %add.lcssa.ph, %for.cond.cleanup.loopexit.unr-lcssa ], [ %add.epil, %for.body.epil ]
+ ret i64 %sum.0.lcssa
+
+for.body:
+ %i.011 = phi i32 [ 0, %for.body.preheader.new ], [ %inc.3, %for.body ]
+ %sum.010 = phi i64 [ 0, %for.body.preheader.new ], [ %add.3, %for.body ]
+ %niter = phi i32 [ %unroll_iter, %for.body.preheader.new ], [ %niter.nsub.3, %for.body ]
+ %pIn2Base = phi i16* [ %pIn2, %for.body.preheader.new ], [ %pIn2.4, %for.body ]
+ %pIn2.0 = getelementptr inbounds i16, i16* %pIn2Base, i32 0
+ %In2 = load i16, i16* %pIn2.0, align 2
+ %pIn1.0 = getelementptr inbounds i16, i16* %pIn1, i32 %i.011
+ %In1 = load i16, i16* %pIn1.0, align 2
+ %inc = or i32 %i.011, 1
+ %pIn2.1 = getelementptr inbounds i16, i16* %pIn2Base, i32 -1
+ %In2.1 = load i16, i16* %pIn2.1, align 2
+ %pIn1.1 = getelementptr inbounds i16, i16* %pIn1, i32 %inc
+ %In1.1 = load i16, i16* %pIn1.1, align 2
+ %inc.1 = or i32 %i.011, 2
+ %pIn2.2 = getelementptr inbounds i16, i16* %pIn2Base, i32 -2
+ %In2.2 = load i16, i16* %pIn2.2, align 2
+ %pIn1.2 = getelementptr inbounds i16, i16* %pIn1, i32 %inc.1
+ %In1.2 = load i16, i16* %pIn1.2, align 2
+ %inc.2 = or i32 %i.011, 3
+ %pIn2.3 = getelementptr inbounds i16, i16* %pIn2Base, i32 -3
+ %In2.3 = load i16, i16* %pIn2.3, align 2
+ %pIn1.3 = getelementptr inbounds i16, i16* %pIn1, i32 %inc.2
+ %In1.3 = load i16, i16* %pIn1.3, align 2
+ %sextIn1 = sext i16 %In1 to i32
+ %sextIn1.1 = sext i16 %In1.1 to i32
+ %sextIn1.2 = sext i16 %In1.2 to i32
+ %sextIn1.3 = sext i16 %In1.3 to i32
+ %sextIn2 = sext i16 %In2 to i32
+ %sextIn2.1 = sext i16 %In2.1 to i32
+ %sextIn2.2 = sext i16 %In2.2 to i32
+ %sextIn2.3 = sext i16 %In2.3 to i32
+ %mul = mul nsw i32 %sextIn1, %sextIn2
+ %mul.1 = mul nsw i32 %sextIn1.1, %sextIn2.1
+ %mul.2 = mul nsw i32 %sextIn1.2, %sextIn2.2
+ %mul.3 = mul nsw i32 %sextIn1.3, %sextIn2.3
+ %sext.mul = sext i32 %mul to i64
+ %sext.mul.1 = sext i32 %mul.1 to i64
+ %sext.mul.2 = sext i32 %mul.2 to i64
+ %sext.mul.3 = sext i32 %mul.3 to i64
+ %add = add nsw i64 %sum.010, %sext.mul
+ %add.1 = add nsw i64 %sext.mul.1, %add
+ %add.2 = add nsw i64 %add.1, %sext.mul.2
+ %add.3 = add nsw i64 %sext.mul.3, %add.2
+ %inc.3 = add i32 %i.011, 4
+ %pIn2.4 = getelementptr inbounds i16, i16* %pIn2Base, i32 -4
+ %niter.nsub.3 = add i32 %niter, -4
+ %niter.ncmp.3 = icmp eq i32 %niter.nsub.3, 0
+ br i1 %niter.ncmp.3, label %for.cond.cleanup.loopexit.unr-lcssa, label %for.body
+}
+
+define i64 @smlaldx_swap(i16* nocapture readonly %pIn1, i16* nocapture readonly %pIn2, i32 %j, i32 %limit) {
+
+entry:
+ %cmp9 = icmp eq i32 %limit, 0
+ br i1 %cmp9, label %for.cond.cleanup, label %for.body.preheader
+
+for.body.preheader:
+ %0 = add i32 %limit, -1
+ %xtraiter = and i32 %limit, 3
+ %1 = icmp ult i32 %0, 3
+ br i1 %1, label %for.cond.cleanup.loopexit.unr-lcssa, label %for.body.preheader.new
+
+for.body.preheader.new:
+ %unroll_iter = sub i32 %limit, %xtraiter
+ %scevgep6 = getelementptr i16, i16* %pIn1, i32 2
+ %2 = add i32 %j, -1
+ %scevgep11 = getelementptr i16, i16* %pIn2, i32 %2
+ br label %for.body
+
+for.cond.cleanup.loopexit.unr-lcssa:
+ %add.lcssa.ph = phi i64 [ undef, %for.body.preheader ], [ %add.3, %for.body ]
+ %i.011.unr = phi i32 [ 0, %for.body.preheader ], [ %inc.3, %for.body ]
+ %sum.010.unr = phi i64 [ 0, %for.body.preheader ], [ %add.3, %for.body ]
+ %lcmp.mod = icmp eq i32 %xtraiter, 0
+ br i1 %lcmp.mod, label %for.cond.cleanup, label %for.body.epil.preheader
+
+for.body.epil.preheader:
+ %scevgep = getelementptr i16, i16* %pIn1, i32 %i.011.unr
+ %3 = sub i32 %j, %i.011.unr
+ %scevgep2 = getelementptr i16, i16* %pIn2, i32 %3
+ %4 = sub i32 0, %xtraiter
+ br label %for.body.epil
+
+for.body.epil:
+ %lsr.iv5 = phi i32 [ %4, %for.body.epil.preheader ], [ %lsr.iv.next, %for.body.epil ]
+ %lsr.iv3 = phi i16* [ %scevgep2, %for.body.epil.preheader ], [ %scevgep4, %for.body.epil ]
+ %lsr.iv = phi i16* [ %scevgep, %for.body.epil.preheader ], [ %scevgep1, %for.body.epil ]
+ %sum.010.epil = phi i64 [ %add.epil, %for.body.epil ], [ %sum.010.unr, %for.body.epil.preheader ]
+ %5 = load i16, i16* %lsr.iv3, align 2
+ %conv.epil = sext i16 %5 to i32
+ %6 = load i16, i16* %lsr.iv, align 2
+ %conv2.epil = sext i16 %6 to i32
+ %mul.epil = mul nsw i32 %conv2.epil, %conv.epil
+ %sext.mul.epil = sext i32 %mul.epil to i64
+ %add.epil = add nsw i64 %sext.mul.epil, %sum.010.epil
+ %scevgep1 = getelementptr i16, i16* %lsr.iv, i32 1
+ %scevgep4 = getelementptr i16, i16* %lsr.iv3, i32 -1
+ %lsr.iv.next = add nsw i32 %lsr.iv5, 1
+ %epil.iter.cmp = icmp eq i32 %lsr.iv.next, 0
+ br i1 %epil.iter.cmp, label %for.cond.cleanup, label %for.body.epil
+
+for.cond.cleanup:
+ %sum.0.lcssa = phi i64 [ 0, %entry ], [ %add.lcssa.ph, %for.cond.cleanup.loopexit.unr-lcssa ], [ %add.epil, %for.body.epil ]
+ ret i64 %sum.0.lcssa
+
+; CHECK-LABEL: smlaldx_swap
+; CHECK: for.body.preheader.new:
+; CHECK: [[PIN1Base:[^ ]+]] = getelementptr i16, i16* %pIn1
+; CHECK: [[PIN2Base:[^ ]+]] = getelementptr i16, i16* %pIn2
+
+; CHECK: for.body:
+; CHECK: [[PIN2:%[^ ]+]] = phi i16* [ [[PIN2_NEXT:%[^ ]+]], %for.body ], [ [[PIN2Base]], %for.body.preheader.new ]
+; CHECK: [[PIN1:%[^ ]+]] = phi i16* [ [[PIN1_NEXT:%[^ ]+]], %for.body ], [ [[PIN1Base]], %for.body.preheader.new ]
+; CHECK: [[IV:%[^ ]+]] = phi i32
+; CHECK: [[ACC0:%[^ ]+]] = phi i64 [ 0, %for.body.preheader.new ], [ [[ACC2:%[^ ]+]], %for.body ]
+; CHECK: [[PIN1_2:%[^ ]+]] = getelementptr i16, i16* [[PIN1]], i32 -2
+; CHECK: [[PIN2_2:%[^ ]+]] = getelementptr i16, i16* [[PIN2]], i32 -2
+
+; CHECK: [[PIN2_CAST:%[^ ]+]] = bitcast i16* [[PIN2]] to i32*
+; CHECK: [[IN2:%[^ ]+]] = load i32, i32* [[PIN2_CAST]], align 2
+; CHECK: [[PIN1_2_CAST:%[^ ]+]] = bitcast i16* [[PIN1_2]] to i32*
+; CHECK: [[IN1_2:%[^ ]+]] = load i32, i32* [[PIN1_2_CAST]], align 2
+; CHECK: [[ACC1:%[^ ]+]] = call i64 @llvm.arm.smlaldx(i32 [[IN2]], i32 [[IN1_2]], i64 [[ACC0]])
+
+; CHECK: [[PIN1_CAST:%[^ ]+]] = bitcast i16* [[PIN1]] to i32*
+; CHECK: [[IN1:%[^ ]+]] = load i32, i32* [[PIN1_CAST]], align 2
+; CHECK: [[PIN2_2_CAST:%[^ ]+]] = bitcast i16* [[PIN2_2]] to i32*
+; CHECK: [[IN2_2:%[^ ]+]] = load i32, i32* [[PIN2_2_CAST]], align 2
+; CHECK: [[ACC2]] = call i64 @llvm.arm.smlaldx(i32 [[IN1]], i32 [[IN2_2]], i64 [[ACC1]])
+
+; CHECK: [[PIN1_NEXT]] = getelementptr i16, i16* [[PIN1]], i32 4
+; CHECK: [[PIN2_NEXT]] = getelementptr i16, i16* [[PIN2]], i32 -4
+
+; CHECK-NOT: call i64 @llvm.arm.smlad
+; CHECK-UNSUPPORTED-NOT: call i64 @llvm.arm.smlad
+
+for.body:
+ %pin2 = phi i16* [ %pin2.sub4, %for.body ], [ %scevgep11, %for.body.preheader.new ]
+ %pin1 = phi i16* [ %pin1.add4, %for.body ], [ %scevgep6, %for.body.preheader.new ]
+ %i.011 = phi i32 [ 0, %for.body.preheader.new ], [ %inc.3, %for.body ]
+ %sum.010 = phi i64 [ 0, %for.body.preheader.new ], [ %add.3, %for.body ]
+ %pin2.add1 = getelementptr i16, i16* %pin2, i32 1
+ %In2 = load i16, i16* %pin2.add1, align 2
+ %pin1.sub2 = getelementptr i16, i16* %pin1, i32 -2
+ %In1 = load i16, i16* %pin1.sub2, align 2
+ %In2.1 = load i16, i16* %pin2, align 2
+ %pin1.sub1 = getelementptr i16, i16* %pin1, i32 -1
+ %In1.1 = load i16, i16* %pin1.sub1, align 2
+ %pin2.sub1 = getelementptr i16, i16* %pin2, i32 -1
+ %In2.2 = load i16, i16* %pin2.sub1, align 2
+ %In1.2 = load i16, i16* %pin1, align 2
+ %pin2.sub2 = getelementptr i16, i16* %pin2, i32 -2
+ %In2.3 = load i16, i16* %pin2.sub2, align 2
+ %pin1.add1 = getelementptr i16, i16* %pin1, i32 1
+ %In1.3 = load i16, i16* %pin1.add1, align 2
+ %sextIn2 = sext i16 %In2 to i32
+ %sextIn1 = sext i16 %In1 to i32
+ %sextIn2.1 = sext i16 %In2.1 to i32
+ %sextIn1.1 = sext i16 %In1.1 to i32
+ %sextIn2.2 = sext i16 %In2.2 to i32
+ %sextIn1.2 = sext i16 %In1.2 to i32
+ %sextIn2.3 = sext i16 %In2.3 to i32
+ %sextIn1.3 = sext i16 %In1.3 to i32
+ %mul = mul nsw i32 %sextIn2, %sextIn1
+ %sext.mul = sext i32 %mul to i64
+ %add = add nsw i64 %sext.mul, %sum.010
+ %mul.1 = mul nsw i32 %sextIn2.1, %sextIn1.1
+ %sext.mul.1 = sext i32 %mul.1 to i64
+ %add.1 = add nsw i64 %sext.mul.1, %add
+ %mul.2 = mul nsw i32 %sextIn2.2, %sextIn1.2
+ %sext.mul.2 = sext i32 %mul.2 to i64
+ %add.2 = add nsw i64 %add.1, %sext.mul.2
+ %mul.3 = mul nsw i32 %sextIn2.3, %sextIn1.3
+ %sext.mul.3 = sext i32 %mul.3 to i64
+ %add.3 = add nsw i64 %add.2, %sext.mul.3
+ %inc.3 = add i32 %i.011, 4
+ %pin1.add4 = getelementptr i16, i16* %pin1, i32 4
+ %pin2.sub4 = getelementptr i16, i16* %pin2, i32 -4
+ %niter.ncmp.3 = icmp eq i32 %unroll_iter, %inc.3
+ br i1 %niter.ncmp.3, label %for.cond.cleanup.loopexit.unr-lcssa, label %for.body
+}
diff --git a/test/CodeGen/ARM/smml.ll b/test/CodeGen/ARM/smml.ll
index ba996e5ddd88..af34000b2a0b 100644
--- a/test/CodeGen/ARM/smml.ll
+++ b/test/CodeGen/ARM/smml.ll
@@ -6,7 +6,7 @@
; RUN: llc -mtriple=thumbv6t2-eabi %s -o - | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-THUMBV6T2
; RUN: llc -mtriple=thumbv7-eabi %s -o - | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-THUMBV6T2
; RUN: llc -mtriple=thumbv7m-eabi %s -o - | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-V4
-; RUN: llc -mtriple=thumbv7em-eabi %s -o - | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-V6T2
+; RUN: llc -mtriple=thumbv7em-eabi %s -o - | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-THUMBV6T2
define i32 @Test0(i32 %a, i32 %b, i32 %c) nounwind readnone ssp {
entry:
@@ -44,7 +44,7 @@ declare void @opaque(i32)
define void @test_used_flags(i32 %in1, i32 %in2) {
; CHECK-LABEL: test_used_flags:
; CHECK-THUMB: movs r2, #0
-; CHECK-THUMB: subs r0, r2, r0
+; CHECK-THUMB: rsbs r0, r0, #0
; CHECK-THUMB: sbcs r2, r1
; CHECK-THUMB: bge
; CHECK-V6: smull [[PROD_LO:r[0-9]+]], [[PROD_HI:r[0-9]+]], r0, r1
diff --git a/test/CodeGen/ARM/smul.ll b/test/CodeGen/ARM/smul.ll
index 2b7be41ddb24..7091f8d19148 100644
--- a/test/CodeGen/ARM/smul.ll
+++ b/test/CodeGen/ARM/smul.ll
@@ -1,14 +1,14 @@
-; RUN: llc -mtriple=arm-eabi -mcpu=generic %s -o /dev/null
+; RUN: llc -mtriple=arm-eabi -mcpu=generic %s -o - | FileCheck %s --check-prefix=DISABLED
; RUN: llc -mtriple=arm-eabi -mcpu=cortex-a8 %s -o - | FileCheck %s
; RUN: llc -mtriple=thumb--none-eabi -mcpu=cortex-a8 %s -o - | FileCheck %s
; RUN: llc -mtriple=thumbv6t2-none-eabi %s -o - | FileCheck %s
-; RUN: llc -mtriple=thumbv6-none-eabi %s -o - | FileCheck %s -check-prefix=CHECK-THUMBV6
+; RUN: llc -mtriple=thumbv6-none-eabi %s -o - | FileCheck %s -check-prefix=DISABLED
define i32 @f1(i16 %x, i32 %y) {
; CHECK-LABEL: f1:
; CHECK-NOT: sxth
; CHECK: {{smulbt r0, r0, r1|smultb r0, r1, r0}}
-; CHECK-THUMBV6-NOT: {{smulbt|smultb}}
+; DISABLED-NOT: {{smulbt|smultb}}
%tmp1 = sext i16 %x to i32
%tmp2 = ashr i32 %y, 16
%tmp3 = mul i32 %tmp2, %tmp1
@@ -18,7 +18,7 @@ define i32 @f1(i16 %x, i32 %y) {
define i32 @f2(i32 %x, i32 %y) {
; CHECK-LABEL: f2:
; CHECK: smultt
-; CHECK-THUMBV6-NOT: smultt
+; DISABLED-NOT: smultt
%tmp1 = ashr i32 %x, 16
%tmp3 = ashr i32 %y, 16
%tmp4 = mul i32 %tmp3, %tmp1
@@ -29,7 +29,7 @@ define i32 @f3(i32 %a, i16 %x, i32 %y) {
; CHECK-LABEL: f3:
; CHECK-NOT: sxth
; CHECK: {{smlabt r0, r1, r2, r0|smlatb r0, r2, r1, r0}}
-; CHECK-THUMBV6-NOT: {{smlabt|smlatb}}
+; DISABLED-NOT: {{smlabt|smlatb}}
%tmp = sext i16 %x to i32
%tmp2 = ashr i32 %y, 16
%tmp3 = mul i32 %tmp2, %tmp
@@ -40,7 +40,7 @@ define i32 @f3(i32 %a, i16 %x, i32 %y) {
define i32 @f4(i32 %a, i32 %x, i32 %y) {
; CHECK-LABEL: f4:
; CHECK: smlatt
-; CHECK-THUMBV6-NOT: smlatt
+; DISABLED-NOT: smlatt
%tmp1 = ashr i32 %x, 16
%tmp3 = ashr i32 %y, 16
%tmp4 = mul i32 %tmp3, %tmp1
@@ -52,7 +52,7 @@ define i32 @f5(i32 %a, i16 %x, i16 %y) {
; CHECK-LABEL: f5:
; CHECK-NOT: sxth
; CHECK: smlabb
-; CHECK-THUMBV6-NOT: smlabb
+; DISABLED-NOT: smlabb
%tmp1 = sext i16 %x to i32
%tmp3 = sext i16 %y to i32
%tmp4 = mul i32 %tmp3, %tmp1
@@ -64,7 +64,7 @@ define i32 @f6(i32 %a, i32 %x, i16 %y) {
; CHECK-LABEL: f6:
; CHECK-NOT: sxth
; CHECK: {{smlatb r0, r1, r2, r0|smlabt r0, r2, r1, r0}}
-; CHECK-THUMBV6-NOT: {{smlatb|smlabt}}
+; DISABLED-NOT: {{smlatb|smlabt}}
%tmp1 = sext i16 %y to i32
%tmp2 = ashr i32 %x, 16
%tmp3 = mul i32 %tmp2, %tmp1
@@ -75,7 +75,7 @@ define i32 @f6(i32 %a, i32 %x, i16 %y) {
define i32 @f7(i32 %a, i32 %b, i32 %c) {
; CHECK-LABEL: f7:
; CHECK: smlawb r0, r0, r1, r2
-; CHECK-THUMBV6-NOT: smlawb
+; DISABLED-NOT: smlawb
%shl = shl i32 %b, 16
%shr = ashr exact i32 %shl, 16
%conv = sext i32 %a to i64
@@ -91,7 +91,7 @@ define i32 @f8(i32 %a, i16 signext %b, i32 %c) {
; CHECK-LABEL: f8:
; CHECK-NOT: sxth
; CHECK: smlawb r0, r0, r1, r2
-; CHECK-THUMBV6-NOT: smlawb
+; DISABLED-NOT: smlawb
%conv = sext i32 %a to i64
%conv1 = sext i16 %b to i64
%mul = mul nsw i64 %conv1, %conv
@@ -104,7 +104,7 @@ define i32 @f8(i32 %a, i16 signext %b, i32 %c) {
define i32 @f9(i32 %a, i32 %b, i32 %c) {
; CHECK-LABEL: f9:
; CHECK: smlawt r0, r0, r1, r2
-; CHECK-THUMBV6-NOT: smlawt
+; DISABLED-NOT: smlawt
%conv = sext i32 %a to i64
%shr = ashr i32 %b, 16
%conv1 = sext i32 %shr to i64
@@ -118,7 +118,7 @@ define i32 @f9(i32 %a, i32 %b, i32 %c) {
define i32 @f10(i32 %a, i32 %b) {
; CHECK-LABEL: f10:
; CHECK: smulwb r0, r0, r1
-; CHECK-THUMBV6-NOT: smulwb
+; DISABLED-NOT: smulwb
%shl = shl i32 %b, 16
%shr = ashr exact i32 %shl, 16
%conv = sext i32 %a to i64
@@ -133,7 +133,7 @@ define i32 @f11(i32 %a, i16 signext %b) {
; CHECK-LABEL: f11:
; CHECK-NOT: sxth
; CHECK: smulwb r0, r0, r1
-; CHECK-THUMBV6-NOT: smulwb
+; DISABLED-NOT: smulwb
%conv = sext i32 %a to i64
%conv1 = sext i16 %b to i64
%mul = mul nsw i64 %conv1, %conv
@@ -145,7 +145,7 @@ define i32 @f11(i32 %a, i16 signext %b) {
define i32 @f12(i32 %a, i32 %b) {
; CHECK-LABEL: f12:
; CHECK: smulwt r0, r0, r1
-; CHECK-THUMBV6-NOT: smulwt
+; DISABLED-NOT: smulwt
%conv = sext i32 %a to i64
%shr = ashr i32 %b, 16
%conv1 = sext i32 %shr to i64
@@ -159,7 +159,7 @@ define i32 @f13(i32 %x, i16 %y) {
; CHECK-LABEL: f13:
; CHECK-NOT: sxth
; CHECK: {{smultb r0, r0, r1|smulbt r0, r1, r0}}
-; CHECK-THUMBV6-NOT: {{smultb|smulbt}}
+; DISABLED-NOT: {{smultb|smulbt}}
%tmp1 = sext i16 %y to i32
%tmp2 = ashr i32 %x, 16
%tmp3 = mul i32 %tmp2, %tmp1
@@ -169,11 +169,11 @@ define i32 @f13(i32 %x, i16 %y) {
define i32 @f14(i32 %x, i32 %y) {
; CHECK-LABEL: f14:
; CHECK-NOT: sxth
-; CHECK: {{smultb r0, r0, r1|smulbt r0, r1, r0}}
-; CHECK-THUMBV6-NOT: {{smultb|smulbt}}
- %tmp1 = shl i32 %y, 16
+; CHECK: {{smultb r0, r1, r0|smulbt r0, r0, r1}}
+; DISABLED-NOT: {{smultb|smulbt}}
+ %tmp1 = shl i32 %x, 16
%tmp2 = ashr i32 %tmp1, 16
- %tmp3 = ashr i32 %x, 16
+ %tmp3 = ashr i32 %y, 16
%tmp4 = mul i32 %tmp3, %tmp2
ret i32 %tmp4
}
@@ -182,7 +182,7 @@ define i32 @f15(i32 %x, i32 %y) {
; CHECK-LABEL: f15:
; CHECK-NOT: sxth
; CHECK: {{smulbt r0, r0, r1|smultb r0, r1, r0}}
-; CHECK-THUMBV6-NOT: {{smulbt|smultb}}
+; DISABLED-NOT: {{smulbt|smultb}}
%tmp1 = shl i32 %x, 16
%tmp2 = ashr i32 %tmp1, 16
%tmp3 = ashr i32 %y, 16
@@ -194,7 +194,7 @@ define i32 @f16(i16 %x, i16 %y) {
; CHECK-LABEL: f16:
; CHECK-NOT: sxth
; CHECK: smulbb
-; CHECK-THUMBV6-NOT: smulbb
+; DISABLED-NOT: smulbb
%tmp1 = sext i16 %x to i32
%tmp2 = sext i16 %x to i32
%tmp3 = mul i32 %tmp1, %tmp2
@@ -203,8 +203,9 @@ define i32 @f16(i16 %x, i16 %y) {
define i32 @f17(i32 %x, i32 %y) {
; CHECK-LABEL: f17:
+; CHECK-NOT: sxth
; CHECK: smulbb
-; CHECK-THUMBV6-NOT: smulbb
+; DISABLED-NOT: smulbb
%tmp1 = shl i32 %x, 16
%tmp2 = shl i32 %y, 16
%tmp3 = ashr i32 %tmp1, 16
@@ -215,8 +216,9 @@ define i32 @f17(i32 %x, i32 %y) {
define i32 @f18(i32 %a, i32 %x, i32 %y) {
; CHECK-LABEL: f18:
+; CHECK-NOT: sxth
; CHECK: {{smlabt r0, r1, r2, r0|smlatb r0, r2, r1, r0}}
-; CHECK-THUMBV6-NOT: {{smlabt|smlatb}}
+; DISABLED-NOT: {{smlabt|smlatb}}
%tmp0 = shl i32 %x, 16
%tmp1 = ashr i32 %tmp0, 16
%tmp2 = ashr i32 %y, 16
@@ -227,20 +229,21 @@ define i32 @f18(i32 %a, i32 %x, i32 %y) {
define i32 @f19(i32 %a, i32 %x, i32 %y) {
; CHECK-LABEL: f19:
-; CHECK: {{smlatb r0, r1, r2, r0|smlabt r0, r2, r1, r0}}
-; CHECK-THUMBV6-NOT: {{smlatb|smlabt}}
- %tmp0 = shl i32 %y, 16
+; CHECK: {{smlatb r0, r2, r1, r0|smlabt r0, r1, r2, r0}}
+; DISABLED-NOT: {{smlatb|smlabt}}
+ %tmp0 = shl i32 %x, 16
%tmp1 = ashr i32 %tmp0, 16
- %tmp2 = ashr i32 %x, 16
- %tmp3 = mul i32 %tmp2, %tmp1
+ %tmp2 = ashr i32 %y, 16
+ %tmp3 = mul i32 %tmp1, %tmp2
%tmp5 = add i32 %tmp3, %a
ret i32 %tmp5
}
define i32 @f20(i32 %a, i32 %x, i32 %y) {
; CHECK-LABEL: f20:
+; CHECK-NOT: sxth
; CHECK: smlabb
-; CHECK-THUMBV6-NOT: smlabb
+; DISABLED-NOT: smlabb
%tmp1 = shl i32 %x, 16
%tmp2 = ashr i32 %tmp1, 16
%tmp3 = shl i32 %y, 16
@@ -254,7 +257,7 @@ define i32 @f21(i32 %a, i32 %x, i16 %y) {
; CHECK-LABEL: f21
; CHECK-NOT: sxth
; CHECK: smlabb
-; CHECK-THUMBV6-NOT: smlabb
+; DISABLED-NOT: smlabb
%tmp1 = shl i32 %x, 16
%tmp2 = ashr i32 %tmp1, 16
%tmp3 = sext i16 %y to i32
@@ -263,12 +266,26 @@ define i32 @f21(i32 %a, i32 %x, i16 %y) {
ret i32 %tmp5
}
+define i32 @f21_b(i32 %a, i32 %x, i16 %y) {
+; CHECK-LABEL: f21_b
+; CHECK-NOT: sxth
+; CHECK: smlabb
+; DISABLED-NOT: smlabb
+ %tmp1 = shl i32 %x, 16
+ %tmp2 = ashr i32 %tmp1, 16
+ %tmp3 = sext i16 %y to i32
+ %tmp4 = mul i32 %tmp3, %tmp2
+ %tmp5 = add i32 %a, %tmp4
+ ret i32 %tmp5
+}
+
@global_b = external global i16, align 2
define i32 @f22(i32 %a) {
; CHECK-LABEL: f22:
+; CHECK-NOT: sxth
; CHECK: smulwb r0, r0, r1
-; CHECK-THUMBV6-NOT: smulwb
+; DISABLED-NOT: smulwb
%b = load i16, i16* @global_b, align 2
%sext = sext i16 %b to i64
%conv = sext i32 %a to i64
@@ -280,8 +297,9 @@ define i32 @f22(i32 %a) {
define i32 @f23(i32 %a, i32 %c) {
; CHECK-LABEL: f23:
+; CHECK-NOT: sxth
; CHECK: smlawb r0, r0, r2, r1
-; CHECK-THUMBV6-NOT: smlawb
+; DISABLED-NOT: smlawb
%b = load i16, i16* @global_b, align 2
%sext = sext i16 %b to i64
%conv = sext i32 %a to i64
@@ -291,3 +309,102 @@ define i32 @f23(i32 %a, i32 %c) {
%add = add nsw i32 %conv5, %c
ret i32 %add
}
+
+; CHECK-LABEL: f24
+; CHECK-NOT: sxth
+; CHECK: smulbb
+define i32 @f24(i16* %a, i32* %b, i32* %c) {
+ %ld.0 = load i16, i16* %a, align 2
+ %ld.1 = load i32, i32* %b, align 4
+ %conv.0 = sext i16 %ld.0 to i32
+ %shift = shl i32 %ld.1, 16
+ %conv.1 = ashr i32 %shift, 16
+ %mul.0 = mul i32 %conv.0, %conv.1
+ store i32 %ld.1, i32* %c
+ ret i32 %mul.0
+}
+
+; CHECK-LABEL: f25
+; CHECK-NOT: sxth
+; CHECK: smulbb
+define i32 @f25(i16* %a, i32 %b, i32* %c) {
+ %ld.0 = load i16, i16* %a, align 2
+ %conv.0 = sext i16 %ld.0 to i32
+ %shift = shl i32 %b, 16
+ %conv.1 = ashr i32 %shift, 16
+ %mul.0 = mul i32 %conv.0, %conv.1
+ store i32 %b, i32* %c
+ ret i32 %mul.0
+}
+
+; CHECK-LABEL: f25_b
+; CHECK-NOT: sxth
+; CHECK: smulbb
+define i32 @f25_b(i16* %a, i32 %b, i32* %c) {
+ %ld.0 = load i16, i16* %a, align 2
+ %conv.0 = sext i16 %ld.0 to i32
+ %shift = shl i32 %b, 16
+ %conv.1 = ashr i32 %shift, 16
+ %mul.0 = mul i32 %conv.1, %conv.0
+ store i32 %b, i32* %c
+ ret i32 %mul.0
+}
+
+; CHECK-LABEL: f26
+; CHECK-NOT: sxth
+; CHECK: {{smulbt | smultb}}
+define i32 @f26(i16* %a, i32 %b, i32* %c) {
+ %ld.0 = load i16, i16* %a, align 2
+ %conv.0 = sext i16 %ld.0 to i32
+ %conv.1 = ashr i32 %b, 16
+ %mul.0 = mul i32 %conv.0, %conv.1
+ store i32 %b, i32* %c
+ ret i32 %mul.0
+}
+
+; CHECK-LABEL: f26_b
+; CHECK-NOT: sxth
+; CHECK: {{smulbt | smultb}}
+define i32 @f26_b(i16* %a, i32 %b, i32* %c) {
+ %ld.0 = load i16, i16* %a, align 2
+ %conv.0 = sext i16 %ld.0 to i32
+ %conv.1 = ashr i32 %b, 16
+ %mul.0 = mul i32 %conv.1, %conv.0
+ store i32 %b, i32* %c
+ ret i32 %mul.0
+}
+
+; CHECK-LABEL: f27
+; CHECK-NOT: sxth
+; CHECK: smulbb
+; CHECK: {{smlabt | smlatb}}
+define i32 @f27(i16* %a, i32* %b) {
+ %ld.0 = load i16, i16* %a, align 2
+ %ld.1 = load i32, i32* %b, align 4
+ %conv.0 = sext i16 %ld.0 to i32
+ %shift = shl i32 %ld.1, 16
+ %conv.1 = ashr i32 %shift, 16
+ %conv.2 = ashr i32 %ld.1, 16
+ %mul.0 = mul i32 %conv.0, %conv.1
+ %mul.1 = mul i32 %conv.0, %conv.2
+ %add = add i32 %mul.0, %mul.1
+ ret i32 %add
+}
+
+; CHECK-LABEL: f27_b
+; CHECK-NOT: sxth
+; CHECK: smulbb
+; CHECK: {{smlabt | smlatb}}
+define i32 @f27_b(i16* %a, i32* %b) {
+ %ld.0 = load i16, i16* %a, align 2
+ %ld.1 = load i32, i32* %b, align 4
+ %conv.0 = sext i16 %ld.0 to i32
+ %shift = shl i32 %ld.1, 16
+ %conv.1 = ashr i32 %shift, 16
+ %conv.2 = ashr i32 %ld.1, 16
+ %mul.0 = mul i32 %conv.0, %conv.1
+ %mul.1 = mul i32 %conv.2, %conv.0
+ %add = add i32 %mul.0, %mul.1
+ ret i32 %add
+}
+
diff --git a/test/CodeGen/ARM/softfp-fabs-fneg.ll b/test/CodeGen/ARM/softfp-fabs-fneg.ll
index b7c684d35b57..9777995fe64e 100644
--- a/test/CodeGen/ARM/softfp-fabs-fneg.ll
+++ b/test/CodeGen/ARM/softfp-fabs-fneg.ll
@@ -1,5 +1,5 @@
-; RUN: llc -mtriple=armv7 < %s | FileCheck %s --check-prefix=CHECK-ARM --check-prefix=CHECK
-; RUN: llc -mtriple=thumbv7 < %s | FileCheck %s --check-prefix=CHECK-THUMB --check-prefix=CHECK
+; RUN: llc -mtriple=armv7 < %s | FileCheck %s
+; RUN: llc -mtriple=thumbv7 < %s | FileCheck %s
target datalayout = "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64"
target triple = "armv7--"
diff --git a/test/CodeGen/ARM/ssp-data-layout.ll b/test/CodeGen/ARM/ssp-data-layout.ll
index b087fa9489f4..feb0189be9ed 100644
--- a/test/CodeGen/ARM/ssp-data-layout.ll
+++ b/test/CodeGen/ARM/ssp-data-layout.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -disable-fp-elim -mcpu=cortex-a8 -mtriple arm-linux-gnu -target-abi=apcs -o - | FileCheck %s
+; RUN: llc < %s -frame-pointer=all -mcpu=cortex-a8 -mtriple arm-linux-gnu -target-abi=apcs -o - | FileCheck %s
; This test is fairly fragile. The goal is to ensure that "large" stack
; objects are allocated closest to the stack protector (i.e., farthest away
; from the Stack Pointer.) In standard SSP mode this means that large (>=
diff --git a/test/CodeGen/ARM/sub-cmp-peephole.ll b/test/CodeGen/ARM/sub-cmp-peephole.ll
index 45964e534114..9720df795eb6 100644
--- a/test/CodeGen/ARM/sub-cmp-peephole.ll
+++ b/test/CodeGen/ARM/sub-cmp-peephole.ll
@@ -75,7 +75,7 @@ if.else:
; CHECK: cmp
define i32 @bc_raise(i1 %cond) nounwind ssp {
entry:
- %val.2.i = select i1 %cond, i32 0, i32 undef
+ %val.2.i = select i1 %cond, i32 0, i32 1
%sub.i = sub nsw i32 0, %val.2.i
%retval.0.i = select i1 %cond, i32 %val.2.i, i32 %sub.i
%cmp1 = icmp eq i32 %retval.0.i, 0
diff --git a/test/CodeGen/ARM/subreg-remat.ll b/test/CodeGen/ARM/subreg-remat.ll
index 1b406103d118..6166a947fad8 100644
--- a/test/CodeGen/ARM/subreg-remat.ll
+++ b/test/CodeGen/ARM/subreg-remat.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -relocation-model=pic -disable-fp-elim -mcpu=cortex-a8 -pre-RA-sched=source -no-integrated-as | FileCheck %s
+; RUN: llc < %s -relocation-model=pic -frame-pointer=all -mcpu=cortex-a8 -pre-RA-sched=source -no-integrated-as | FileCheck %s
target triple = "thumbv7-apple-ios"
; <rdar://problem/10032939>
;
diff --git a/test/CodeGen/ARM/tail-call.ll b/test/CodeGen/ARM/tail-call.ll
index 15ce4d791e8f..abf0c988c9ec 100644
--- a/test/CodeGen/ARM/tail-call.ll
+++ b/test/CodeGen/ARM/tail-call.ll
@@ -98,3 +98,15 @@ entry:
%call = tail call i32 (i32, ...) @variadic(i32 %y, i64 %z, i64 %z)
ret void
}
+
+; Check that NonNull attributes don't inhibit tailcalls.
+
+declare nonnull i8* @nonnull_callee(i8* %p, i32 %val)
+define i8* @nonnull_caller(i8* %p, i32 %val) {
+; CHECK-LABEL: nonnull_caller:
+; CHECK-TAIL: b nonnull_callee
+; CHECK-NO-TAIL: bl nonnull_callee
+entry:
+ %call = tail call i8* @nonnull_callee(i8* %p, i32 %val)
+ ret i8* %call
+}
diff --git a/test/CodeGen/ARM/tls-models.ll b/test/CodeGen/ARM/tls-models.ll
index 0a82f4d6c8f2..33c85299bdcc 100644
--- a/test/CodeGen/ARM/tls-models.ll
+++ b/test/CodeGen/ARM/tls-models.ll
@@ -3,9 +3,9 @@
; RUN: llc -mtriple=arm-linux-gnueabi -relocation-model=pic < %s \
; RUN: | FileCheck -check-prefix=CHECK-PIC -check-prefix=COMMON %s
; RUN: llc -emulated-tls -mtriple=arm-linux-gnueabi < %s \
-; RUN: | FileCheck -check-prefix=EMUNONPIC -check-prefix=EMU -check-prefix=COMMON %s
+; RUN: | FileCheck -check-prefix=EMU -check-prefix=COMMON %s
; RUN: llc -emulated-tls -mtriple=arm-linux-gnueabi -relocation-model=pic < %s \
-; RUN: | FileCheck -check-prefix=EMUPIC -check-prefix=EMU -check-prefix=COMMON %s
+; RUN: | FileCheck -check-prefix=EMU -check-prefix=COMMON %s
@external_gd = external thread_local global i32
diff --git a/test/CodeGen/ARM/trap.ll b/test/CodeGen/ARM/trap.ll
index 585218cf337c..c45f7133febe 100644
--- a/test/CodeGen/ARM/trap.ll
+++ b/test/CodeGen/ARM/trap.ll
@@ -59,25 +59,25 @@ entry:
define void @t2() nounwind {
entry:
; DARWIN-LABEL: t2:
-; DARWIN: trap
+; DARWIN: udf #254
; FUNC-LABEL: t2:
; FUNC: bl __trap
; NACL-LABEL: t2:
-; NACL: .inst 0xe7fedef0
+; NACL: bkpt #0
; ARM-LABEL: t2:
-; ARM: .inst 0xe7ffdefe
+; ARM: bkpt #0
; THUMB-LABEL: t2:
-; THUMB: .inst.n 0xdefe
+; THUMB: bkpt #0
-; ENCODING-NACL: f0 de fe e7 trap
+; ENCODING-NACL: 70 00 20 e1 bkpt #0
-; ENCODING-ARM: fe de ff e7 trap
+; ENCODING-ARM: 70 00 20 e1 bkpt #0
-; ENCODING-THUMB: fe de trap
+; ENCODING-THUMB: 00 be bkpt #0
call void @llvm.debugtrap()
unreachable
diff --git a/test/CodeGen/ARM/twoaddrinstr.ll b/test/CodeGen/ARM/twoaddrinstr.ll
index f0a95c833c6b..e8c52e1b58df 100644
--- a/test/CodeGen/ARM/twoaddrinstr.ll
+++ b/test/CodeGen/ARM/twoaddrinstr.ll
@@ -4,8 +4,8 @@
define void @PR13378() nounwind {
; This was orriginally a crasher trying to schedule the instructions.
; CHECK-LABEL: PR13378:
-; CHECK: vld1.32
-; CHECK-NEXT: vmov.i32
+; CHECK: vmov.i32
+; CHECK-NEXT: vld1.32
; CHECK-NEXT: vst1.32
; CHECK-NEXT: vst1.32
; CHECK-NEXT: vmov.f32
diff --git a/test/CodeGen/ARM/umulo-128-legalisation-lowering.ll b/test/CodeGen/ARM/umulo-128-legalisation-lowering.ll
new file mode 100644
index 000000000000..c237a396bf91
--- /dev/null
+++ b/test/CodeGen/ARM/umulo-128-legalisation-lowering.ll
@@ -0,0 +1,210 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=armv6-unknown-linux-gnu | FileCheck %s --check-prefixes=ARMV6
+; RUN: llc < %s -mtriple=armv7-unknown-linux-gnu | FileCheck %s --check-prefixes=ARMV7
+
+define { i128, i8 } @muloti_test(i128 %l, i128 %r) unnamed_addr #0 {
+; ARMV6-LABEL: muloti_test:
+; ARMV6: @ %bb.0: @ %start
+; ARMV6-NEXT: push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; ARMV6-NEXT: sub sp, sp, #28
+; ARMV6-NEXT: mov r9, #0
+; ARMV6-NEXT: mov r11, r0
+; ARMV6-NEXT: ldr r7, [sp, #76]
+; ARMV6-NEXT: mov r5, r3
+; ARMV6-NEXT: ldr r10, [sp, #72]
+; ARMV6-NEXT: mov r1, r3
+; ARMV6-NEXT: mov r6, r2
+; ARMV6-NEXT: mov r0, r2
+; ARMV6-NEXT: mov r2, #0
+; ARMV6-NEXT: mov r3, #0
+; ARMV6-NEXT: str r9, [sp, #12]
+; ARMV6-NEXT: str r9, [sp, #8]
+; ARMV6-NEXT: str r7, [sp, #4]
+; ARMV6-NEXT: str r10, [sp]
+; ARMV6-NEXT: bl __multi3
+; ARMV6-NEXT: str r3, [sp, #20] @ 4-byte Spill
+; ARMV6-NEXT: str r2, [sp, #16] @ 4-byte Spill
+; ARMV6-NEXT: stm r11, {r0, r1}
+; ARMV6-NEXT: ldr r0, [sp, #84]
+; ARMV6-NEXT: ldr r3, [sp, #80]
+; ARMV6-NEXT: ldr r8, [sp, #64]
+; ARMV6-NEXT: umull r4, r0, r0, r6
+; ARMV6-NEXT: umull r2, r1, r5, r3
+; ARMV6-NEXT: add r2, r4, r2
+; ARMV6-NEXT: umull lr, r4, r3, r6
+; ARMV6-NEXT: umull r3, r6, r7, r8
+; ARMV6-NEXT: adds r12, r4, r2
+; ARMV6-NEXT: adc r2, r9, #0
+; ARMV6-NEXT: str r2, [sp, #24] @ 4-byte Spill
+; ARMV6-NEXT: ldr r2, [sp, #68]
+; ARMV6-NEXT: umull r4, r2, r2, r10
+; ARMV6-NEXT: add r3, r4, r3
+; ARMV6-NEXT: umull r4, r10, r8, r10
+; ARMV6-NEXT: adds r3, r10, r3
+; ARMV6-NEXT: adc r10, r9, #0
+; ARMV6-NEXT: adds r4, r4, lr
+; ARMV6-NEXT: adc r12, r3, r12
+; ARMV6-NEXT: ldr r3, [sp, #16] @ 4-byte Reload
+; ARMV6-NEXT: adds r4, r3, r4
+; ARMV6-NEXT: str r4, [r11, #8]
+; ARMV6-NEXT: ldr r4, [sp, #20] @ 4-byte Reload
+; ARMV6-NEXT: adcs r3, r4, r12
+; ARMV6-NEXT: str r3, [r11, #12]
+; ARMV6-NEXT: ldr r3, [sp, #84]
+; ARMV6-NEXT: adc r12, r9, #0
+; ARMV6-NEXT: cmp r5, #0
+; ARMV6-NEXT: movne r5, #1
+; ARMV6-NEXT: cmp r3, #0
+; ARMV6-NEXT: mov r4, r3
+; ARMV6-NEXT: movne r4, #1
+; ARMV6-NEXT: cmp r0, #0
+; ARMV6-NEXT: movne r0, #1
+; ARMV6-NEXT: cmp r1, #0
+; ARMV6-NEXT: and r5, r4, r5
+; ARMV6-NEXT: movne r1, #1
+; ARMV6-NEXT: orr r0, r5, r0
+; ARMV6-NEXT: ldr r5, [sp, #68]
+; ARMV6-NEXT: orr r0, r0, r1
+; ARMV6-NEXT: ldr r1, [sp, #24] @ 4-byte Reload
+; ARMV6-NEXT: cmp r7, #0
+; ARMV6-NEXT: orr r0, r0, r1
+; ARMV6-NEXT: movne r7, #1
+; ARMV6-NEXT: cmp r5, #0
+; ARMV6-NEXT: mov r1, r5
+; ARMV6-NEXT: movne r1, #1
+; ARMV6-NEXT: cmp r2, #0
+; ARMV6-NEXT: movne r2, #1
+; ARMV6-NEXT: and r1, r1, r7
+; ARMV6-NEXT: orr r1, r1, r2
+; ARMV6-NEXT: ldr r2, [sp, #80]
+; ARMV6-NEXT: cmp r6, #0
+; ARMV6-NEXT: movne r6, #1
+; ARMV6-NEXT: orrs r2, r2, r3
+; ARMV6-NEXT: orr r1, r1, r6
+; ARMV6-NEXT: movne r2, #1
+; ARMV6-NEXT: orrs r7, r8, r5
+; ARMV6-NEXT: orr r1, r1, r10
+; ARMV6-NEXT: movne r7, #1
+; ARMV6-NEXT: and r2, r7, r2
+; ARMV6-NEXT: orr r1, r2, r1
+; ARMV6-NEXT: orr r0, r1, r0
+; ARMV6-NEXT: orr r0, r0, r12
+; ARMV6-NEXT: and r0, r0, #1
+; ARMV6-NEXT: strb r0, [r11, #16]
+; ARMV6-NEXT: add sp, sp, #28
+; ARMV6-NEXT: pop {r4, r5, r6, r7, r8, r9, r10, r11, pc}
+;
+; ARMV7-LABEL: muloti_test:
+; ARMV7: @ %bb.0: @ %start
+; ARMV7-NEXT: push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+; ARMV7-NEXT: sub sp, sp, #44
+; ARMV7-NEXT: str r0, [sp, #40] @ 4-byte Spill
+; ARMV7-NEXT: mov r0, #0
+; ARMV7-NEXT: ldr r8, [sp, #88]
+; ARMV7-NEXT: mov r5, r3
+; ARMV7-NEXT: ldr r7, [sp, #92]
+; ARMV7-NEXT: mov r1, r3
+; ARMV7-NEXT: mov r6, r2
+; ARMV7-NEXT: str r0, [sp, #8]
+; ARMV7-NEXT: str r0, [sp, #12]
+; ARMV7-NEXT: mov r0, r2
+; ARMV7-NEXT: mov r2, #0
+; ARMV7-NEXT: mov r3, #0
+; ARMV7-NEXT: str r8, [sp]
+; ARMV7-NEXT: str r7, [sp, #4]
+; ARMV7-NEXT: bl __multi3
+; ARMV7-NEXT: str r1, [sp, #28] @ 4-byte Spill
+; ARMV7-NEXT: ldr r1, [sp, #80]
+; ARMV7-NEXT: str r2, [sp, #24] @ 4-byte Spill
+; ARMV7-NEXT: str r3, [sp, #20] @ 4-byte Spill
+; ARMV7-NEXT: umull r2, r9, r7, r1
+; ARMV7-NEXT: str r0, [sp, #32] @ 4-byte Spill
+; ARMV7-NEXT: ldr r4, [sp, #84]
+; ARMV7-NEXT: ldr r0, [sp, #96]
+; ARMV7-NEXT: umull r1, r3, r1, r8
+; ARMV7-NEXT: umull r12, r10, r4, r8
+; ARMV7-NEXT: str r1, [sp, #16] @ 4-byte Spill
+; ARMV7-NEXT: umull lr, r1, r5, r0
+; ARMV7-NEXT: add r2, r12, r2
+; ARMV7-NEXT: umull r11, r8, r0, r6
+; ARMV7-NEXT: ldr r0, [sp, #100]
+; ARMV7-NEXT: adds r2, r3, r2
+; ARMV7-NEXT: mov r12, #0
+; ARMV7-NEXT: umull r6, r0, r0, r6
+; ARMV7-NEXT: adc r3, r12, #0
+; ARMV7-NEXT: str r3, [sp, #36] @ 4-byte Spill
+; ARMV7-NEXT: add r3, r6, lr
+; ARMV7-NEXT: ldr r6, [sp, #16] @ 4-byte Reload
+; ARMV7-NEXT: adds r3, r8, r3
+; ARMV7-NEXT: adc lr, r12, #0
+; ARMV7-NEXT: adds r6, r6, r11
+; ARMV7-NEXT: adc r2, r2, r3
+; ARMV7-NEXT: ldr r3, [sp, #24] @ 4-byte Reload
+; ARMV7-NEXT: mov r12, #0
+; ARMV7-NEXT: adds r3, r3, r6
+; ARMV7-NEXT: ldr r6, [sp, #20] @ 4-byte Reload
+; ARMV7-NEXT: adcs r8, r6, r2
+; ARMV7-NEXT: ldr r6, [sp, #40] @ 4-byte Reload
+; ARMV7-NEXT: ldr r2, [sp, #32] @ 4-byte Reload
+; ARMV7-NEXT: str r2, [r6]
+; ARMV7-NEXT: ldr r2, [sp, #28] @ 4-byte Reload
+; ARMV7-NEXT: stmib r6, {r2, r3, r8}
+; ARMV7-NEXT: adc r8, r12, #0
+; ARMV7-NEXT: cmp r5, #0
+; ARMV7-NEXT: ldr r2, [sp, #100]
+; ARMV7-NEXT: movwne r5, #1
+; ARMV7-NEXT: cmp r2, #0
+; ARMV7-NEXT: mov r3, r2
+; ARMV7-NEXT: movwne r3, #1
+; ARMV7-NEXT: cmp r0, #0
+; ARMV7-NEXT: movwne r0, #1
+; ARMV7-NEXT: cmp r1, #0
+; ARMV7-NEXT: and r3, r3, r5
+; ARMV7-NEXT: movwne r1, #1
+; ARMV7-NEXT: orr r0, r3, r0
+; ARMV7-NEXT: cmp r7, #0
+; ARMV7-NEXT: orr r0, r0, r1
+; ARMV7-NEXT: ldr r1, [sp, #80]
+; ARMV7-NEXT: movwne r7, #1
+; ARMV7-NEXT: cmp r4, #0
+; ARMV7-NEXT: orr r1, r1, r4
+; ARMV7-NEXT: movwne r4, #1
+; ARMV7-NEXT: cmp r10, #0
+; ARMV7-NEXT: and r3, r4, r7
+; ARMV7-NEXT: movwne r10, #1
+; ARMV7-NEXT: cmp r9, #0
+; ARMV7-NEXT: orr r3, r3, r10
+; ARMV7-NEXT: ldr r7, [sp, #36] @ 4-byte Reload
+; ARMV7-NEXT: movwne r9, #1
+; ARMV7-NEXT: orr r3, r3, r9
+; ARMV7-NEXT: orr r3, r3, r7
+; ARMV7-NEXT: ldr r7, [sp, #96]
+; ARMV7-NEXT: orr r0, r0, lr
+; ARMV7-NEXT: orrs r7, r7, r2
+; ARMV7-NEXT: movwne r7, #1
+; ARMV7-NEXT: cmp r1, #0
+; ARMV7-NEXT: movwne r1, #1
+; ARMV7-NEXT: and r1, r1, r7
+; ARMV7-NEXT: orr r1, r1, r3
+; ARMV7-NEXT: orr r0, r1, r0
+; ARMV7-NEXT: orr r0, r0, r8
+; ARMV7-NEXT: and r0, r0, #1
+; ARMV7-NEXT: strb r0, [r6, #16]
+; ARMV7-NEXT: add sp, sp, #44
+; ARMV7-NEXT: pop {r4, r5, r6, r7, r8, r9, r10, r11, pc}
+start:
+ %0 = tail call { i128, i1 } @llvm.umul.with.overflow.i128(i128 %l, i128 %r) #2
+ %1 = extractvalue { i128, i1 } %0, 0
+ %2 = extractvalue { i128, i1 } %0, 1
+ %3 = zext i1 %2 to i8
+ %4 = insertvalue { i128, i8 } undef, i128 %1, 0
+ %5 = insertvalue { i128, i8 } %4, i8 %3, 1
+ ret { i128, i8 } %5
+}
+
+; Function Attrs: nounwind readnone speculatable
+declare { i128, i1 } @llvm.umul.with.overflow.i128(i128, i128) #1
+
+attributes #0 = { nounwind readnone uwtable }
+attributes #1 = { nounwind readnone speculatable }
+attributes #2 = { nounwind }
diff --git a/test/CodeGen/ARM/umulo-32.ll b/test/CodeGen/ARM/umulo-32.ll
index 1c8357314c28..cfd132aebcc0 100644
--- a/test/CodeGen/ARM/umulo-32.ll
+++ b/test/CodeGen/ARM/umulo-32.ll
@@ -2,12 +2,12 @@
%umul.ty = type { i32, i1 }
-define i32 @test1(i32 %a) nounwind {
+define i32 @test1(i32 %a, i1 %x) nounwind {
; CHECK: test1:
; CHECK: muldi3
%tmp0 = tail call %umul.ty @llvm.umul.with.overflow.i32(i32 %a, i32 37)
%tmp1 = extractvalue %umul.ty %tmp0, 0
- %tmp2 = select i1 undef, i32 -1, i32 %tmp1
+ %tmp2 = select i1 %x, i32 -1, i32 %tmp1
ret i32 %tmp2
}
diff --git a/test/CodeGen/ARM/umulo-64-legalisation-lowering.ll b/test/CodeGen/ARM/umulo-64-legalisation-lowering.ll
new file mode 100644
index 000000000000..ddf033b19b94
--- /dev/null
+++ b/test/CodeGen/ARM/umulo-64-legalisation-lowering.ll
@@ -0,0 +1,69 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=armv6-unknown-linux-gnu | FileCheck %s --check-prefixes=ARMV6
+; RUN: llc < %s -mtriple=armv7-unknown-linux-gnu | FileCheck %s --check-prefixes=ARMV7
+
+define { i64, i8 } @mulodi_test(i64 %l, i64 %r) unnamed_addr #0 {
+; ARMV6-LABEL: mulodi_test:
+; ARMV6: @ %bb.0: @ %start
+; ARMV6-NEXT: push {r4, r5, r6, lr}
+; ARMV6-NEXT: umull r12, lr, r3, r0
+; ARMV6-NEXT: mov r6, #0
+; ARMV6-NEXT: umull r4, r5, r1, r2
+; ARMV6-NEXT: umull r0, r2, r0, r2
+; ARMV6-NEXT: add r4, r4, r12
+; ARMV6-NEXT: adds r12, r2, r4
+; ARMV6-NEXT: adc r2, r6, #0
+; ARMV6-NEXT: cmp r3, #0
+; ARMV6-NEXT: movne r3, #1
+; ARMV6-NEXT: cmp r1, #0
+; ARMV6-NEXT: movne r1, #1
+; ARMV6-NEXT: cmp r5, #0
+; ARMV6-NEXT: and r1, r1, r3
+; ARMV6-NEXT: movne r5, #1
+; ARMV6-NEXT: cmp lr, #0
+; ARMV6-NEXT: orr r1, r1, r5
+; ARMV6-NEXT: movne lr, #1
+; ARMV6-NEXT: orr r1, r1, lr
+; ARMV6-NEXT: orr r2, r1, r2
+; ARMV6-NEXT: mov r1, r12
+; ARMV6-NEXT: pop {r4, r5, r6, pc}
+;
+; ARMV7-LABEL: mulodi_test:
+; ARMV7: @ %bb.0: @ %start
+; ARMV7-NEXT: push {r4, r5, r11, lr}
+; ARMV7-NEXT: umull r12, lr, r1, r2
+; ARMV7-NEXT: cmp r3, #0
+; ARMV7-NEXT: umull r4, r5, r3, r0
+; ARMV7-NEXT: movwne r3, #1
+; ARMV7-NEXT: cmp r1, #0
+; ARMV7-NEXT: movwne r1, #1
+; ARMV7-NEXT: umull r0, r2, r0, r2
+; ARMV7-NEXT: cmp lr, #0
+; ARMV7-NEXT: and r1, r1, r3
+; ARMV7-NEXT: movwne lr, #1
+; ARMV7-NEXT: cmp r5, #0
+; ARMV7-NEXT: orr r1, r1, lr
+; ARMV7-NEXT: movwne r5, #1
+; ARMV7-NEXT: orr r3, r1, r5
+; ARMV7-NEXT: add r1, r12, r4
+; ARMV7-NEXT: mov r5, #0
+; ARMV7-NEXT: adds r1, r2, r1
+; ARMV7-NEXT: adc r2, r5, #0
+; ARMV7-NEXT: orr r2, r3, r2
+; ARMV7-NEXT: pop {r4, r5, r11, pc}
+start:
+ %0 = tail call { i64, i1 } @llvm.umul.with.overflow.i64(i64 %l, i64 %r) #2
+ %1 = extractvalue { i64, i1 } %0, 0
+ %2 = extractvalue { i64, i1 } %0, 1
+ %3 = zext i1 %2 to i8
+ %4 = insertvalue { i64, i8 } undef, i64 %1, 0
+ %5 = insertvalue { i64, i8 } %4, i8 %3, 1
+ ret { i64, i8 } %5
+}
+
+; Function Attrs: nounwind readnone speculatable
+declare { i64, i1 } @llvm.umul.with.overflow.i64(i64, i64) #1
+
+attributes #0 = { nounwind readnone uwtable }
+attributes #1 = { nounwind readnone speculatable }
+attributes #2 = { nounwind }
diff --git a/test/CodeGen/ARM/unwind-fp.ll b/test/CodeGen/ARM/unwind-fp.ll
new file mode 100644
index 000000000000..e655bbbb6591
--- /dev/null
+++ b/test/CodeGen/ARM/unwind-fp.ll
@@ -0,0 +1,15 @@
+; RUN: llc < %s -mtriple=armv7a-arm-none-eabi | FileCheck %s
+
+target datalayout = "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64"
+target triple = "armv7-arm-none-eabi"
+
+define void @foo() minsize {
+entry:
+ ; CHECK: .vsave {[[SAVE_REG:d[0-9]+]]}
+ ; CHECK-NEXT: .pad #8
+ ; CHECK-NEXT: vpush {[[PAD_REG:d[0-9]+]], [[SAVE_REG]]}
+ ; CHECK: vpop {[[PAD_REG]], [[SAVE_REG]]}
+ %a = alloca i32, align 4
+ call void asm sideeffect "", "r,~{d8}"(i32* %a)
+ ret void
+}
diff --git a/test/CodeGen/ARM/v8m-tail-call.ll b/test/CodeGen/ARM/v8m-tail-call.ll
index f4d3a454c704..7ee80d4b9b96 100644
--- a/test/CodeGen/ARM/v8m-tail-call.ll
+++ b/test/CodeGen/ARM/v8m-tail-call.ll
@@ -1,34 +1,65 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc %s -o - -mtriple=thumbv8m.base | FileCheck %s
declare i32 @g(...)
declare i32 @h0(i32, i32, i32, i32)
define hidden i32 @f0() {
+; CHECK-LABEL: f0:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: push {r7, lr}
+; CHECK-NEXT: bl g
+; CHECK-NEXT: movs r1, #1
+; CHECK-NEXT: movs r2, #2
+; CHECK-NEXT: movs r3, #3
+; CHECK-NEXT: ldr r7, [sp, #4]
+; CHECK-NEXT: mov lr, r7
+; CHECK-NEXT: pop {r7}
+; CHECK-NEXT: add sp, #4
+; CHECK-NEXT: b h0
%1 = tail call i32 bitcast (i32 (...)* @g to i32 ()*)()
%2 = tail call i32 @h0(i32 %1, i32 1, i32 2, i32 3)
ret i32 %2
-; CHECK-LABEL: f0
-; CHECK: ldr [[POP:r[4567]]], [sp, #4]
-; CHECK-NEXT: mov lr, [[POP]]
-; CHECK-NEXT: pop {{.*}}[[POP]]
-; CHECK-NEXT: add sp, #4
-; CHECK-NEXT: b h0
}
declare i32 @h1(i32)
define hidden i32 @f1() {
+; CHECK-LABEL: f1:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: push {r7, lr}
+; CHECK-NEXT: bl g
+; CHECK-NEXT: pop {r7}
+; CHECK-NEXT: pop {r1}
+; CHECK-NEXT: mov lr, r1
+; CHECK-NEXT: b h1
%1 = tail call i32 bitcast (i32 (...)* @g to i32 ()*)()
%2 = tail call i32 @h1(i32 %1)
ret i32 %2
-; CHECK-LABEL: f1
-; CHECK: pop {r7}
-; CHECK: pop {r1}
-; CHECK: mov lr, r1
-; CHECK: b h1
}
declare i32 @h2(i32, i32, i32, i32, i32)
define hidden i32 @f2(i32, i32, i32, i32, i32) {
+; CHECK-LABEL: f2:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: push {r4, r5, r6, lr}
+; CHECK-NEXT: mov r4, r3
+; CHECK-NEXT: mov r5, r2
+; CHECK-NEXT: mov r6, r1
+; CHECK-NEXT: bl g
+; CHECK-NEXT: cbz r0, .LBB2_2
+; CHECK-NEXT: @ %bb.1:
+; CHECK-NEXT: mov r1, r6
+; CHECK-NEXT: mov r2, r5
+; CHECK-NEXT: mov r3, r4
+; CHECK-NEXT: ldr r4, [sp, #12]
+; CHECK-NEXT: mov lr, r4
+; CHECK-NEXT: pop {r4, r5, r6}
+; CHECK-NEXT: add sp, #4
+; CHECK-NEXT: b h2
+; CHECK-NEXT: .LBB2_2:
+; CHECK-NEXT: movs r0, #0
+; CHECK-NEXT: mvns r0, r0
+; CHECK-NEXT: pop {r4, r5, r6, pc}
%6 = tail call i32 bitcast (i32 (...)* @g to i32 ()*)()
%7 = icmp eq i32 %6, 0
br i1 %7, label %10, label %8
@@ -38,12 +69,6 @@ define hidden i32 @f2(i32, i32, i32, i32, i32) {
%11 = phi i32 [ %9, %8 ], [ -1, %5 ]
ret i32 %11
-; CHECK-LABEL: f2
-; CHECK: ldr [[POP:r[4567]]], [sp, #12]
-; CHECK-NEXT: mov lr, [[POP]]
-; CHECK-NEXT: pop {{.*}}[[POP]]
-; CHECK-NEXT: add sp, #4
-; CHECK-NEXT: b h2
}
; Make sure that tail calls to function pointers that require r0-r3 for argument
@@ -51,7 +76,17 @@ define hidden i32 @f2(i32, i32, i32, i32, i32) {
@fnptr = global i32 (i32, i32, i32, i32)* null
define i32 @test3() {
; CHECK-LABEL: test3:
-; CHECK: blx {{r[0-9]+}}
+; CHECK: @ %bb.0:
+; CHECK-NEXT: push {r4, lr}
+; CHECK-NEXT: movw r0, :lower16:fnptr
+; CHECK-NEXT: movt r0, :upper16:fnptr
+; CHECK-NEXT: ldr r4, [r0]
+; CHECK-NEXT: movs r0, #1
+; CHECK-NEXT: movs r1, #2
+; CHECK-NEXT: movs r2, #3
+; CHECK-NEXT: movs r3, #4
+; CHECK-NEXT: blx r4
+; CHECK-NEXT: pop {r4, pc}
%1 = load i32 (i32, i32, i32, i32)*, i32 (i32, i32, i32, i32)** @fnptr
%2 = tail call i32 %1(i32 1, i32 2, i32 3, i32 4)
ret i32 %2
@@ -60,7 +95,17 @@ define i32 @test3() {
@fnptr2 = global i32 (i32, i32, i64)* null
define i32 @test4() {
; CHECK-LABEL: test4:
-; CHECK: blx {{r[0-9]+}}
+; CHECK: @ %bb.0:
+; CHECK-NEXT: push {r4, lr}
+; CHECK-NEXT: movw r0, :lower16:fnptr2
+; CHECK-NEXT: movt r0, :upper16:fnptr2
+; CHECK-NEXT: ldr r4, [r0]
+; CHECK-NEXT: movs r0, #1
+; CHECK-NEXT: movs r1, #2
+; CHECK-NEXT: movs r2, #3
+; CHECK-NEXT: movs r3, #0
+; CHECK-NEXT: blx r4
+; CHECK-NEXT: pop {r4, pc}
%1 = load i32 (i32, i32, i64)*, i32 (i32, i32, i64)** @fnptr2
%2 = tail call i32 %1(i32 1, i32 2, i64 3)
ret i32 %2
@@ -72,9 +117,13 @@ define i32 @test4() {
@fnptr3 = global i32 (i32, i32)* null
define i32 @test5() {
; CHECK-LABEL: test5:
-; CHECK: ldr [[REG:r[0-9]+]]
-; CHECK: bx [[REG]]
-; CHECK-NOT: blx [[REG]]
+; CHECK: @ %bb.0:
+; CHECK-NEXT: movw r0, :lower16:fnptr3
+; CHECK-NEXT: movt r0, :upper16:fnptr3
+; CHECK-NEXT: ldr r2, [r0]
+; CHECK-NEXT: movs r0, #1
+; CHECK-NEXT: movs r1, #2
+; CHECK-NEXT: bx r2
%1 = load i32 (i32, i32)*, i32 (i32, i32)** @fnptr3
%2 = tail call i32 %1(i32 1, i32 2)
ret i32 %2
@@ -84,9 +133,14 @@ define i32 @test5() {
@fnptr4 = global i32 (i32, i64)* null
define i32 @test6() {
; CHECK-LABEL: test6:
-; CHECK: ldr [[REG:r[0-9]+]]
-; CHECK: bx [[REG]]
-; CHECK-NOT: blx [[REG]]
+; CHECK: @ %bb.0:
+; CHECK-NEXT: movw r0, :lower16:fnptr4
+; CHECK-NEXT: movt r0, :upper16:fnptr4
+; CHECK-NEXT: ldr r1, [r0]
+; CHECK-NEXT: movs r0, #1
+; CHECK-NEXT: movs r2, #2
+; CHECK-NEXT: movs r3, #0
+; CHECK-NEXT: bx r1
%1 = load i32 (i32, i64)*, i32 (i32, i64)** @fnptr4
%2 = tail call i32 %1(i32 1, i64 2)
ret i32 %2
@@ -96,8 +150,12 @@ define i32 @test6() {
; tail-call optimized.
define i32 @test7() {
; CHECK-LABEL: test7:
-; CHECK: b bar
-; CHECK-NOT: bl bar
+; CHECK: @ %bb.0:
+; CHECK-NEXT: movs r0, #1
+; CHECK-NEXT: movs r1, #2
+; CHECK-NEXT: movs r2, #3
+; CHECK-NEXT: movs r3, #4
+; CHECK-NEXT: b bar
%tail = tail call i32 @bar(i32 1, i32 2, i32 3, i32 4)
ret i32 %tail
}
@@ -109,6 +167,33 @@ declare i32 @bar(i32, i32, i32, i32)
%struct.S = type { i32 }
define void @test8(i32 (i32, i32, i32)* nocapture %fn, i32 %x) local_unnamed_addr {
+; CHECK-LABEL: test8:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: push {r4, r5, r6, r7, lr}
+; CHECK-NEXT: sub sp, #4
+; CHECK-NEXT: mov r4, r1
+; CHECK-NEXT: str r0, [sp] @ 4-byte Spill
+; CHECK-NEXT: bl test8_u
+; CHECK-NEXT: mov r5, r0
+; CHECK-NEXT: ldr r6, [r0]
+; CHECK-NEXT: movs r7, #0
+; CHECK-NEXT: mov r0, r7
+; CHECK-NEXT: bl test8_h
+; CHECK-NEXT: mov r1, r0
+; CHECK-NEXT: mov r0, r6
+; CHECK-NEXT: mov r2, r7
+; CHECK-NEXT: bl test8_g
+; CHECK-NEXT: str r4, [r5]
+; CHECK-NEXT: movs r0, #1
+; CHECK-NEXT: movs r1, #2
+; CHECK-NEXT: movs r2, #3
+; CHECK-NEXT: ldr r3, [sp] @ 4-byte Reload
+; CHECK-NEXT: add sp, #4
+; CHECK-NEXT: ldr r4, [sp, #16]
+; CHECK-NEXT: mov lr, r4
+; CHECK-NEXT: pop {r4, r5, r6, r7}
+; CHECK-NEXT: add sp, #4
+; CHECK-NEXT: bx r3
entry:
%call = tail call %struct.S* bitcast (%struct.S* (...)* @test8_u to %struct.S* ()*)()
%a = getelementptr inbounds %struct.S, %struct.S* %call, i32 0, i32 0
@@ -125,6 +210,28 @@ declare %struct.S* @test8_u(...)
declare i32 @test8_g(i32, i32, i32)
declare i32 @test8_h(i32)
-; CHECK: str r0, [sp] @ 4-byte Spill
-; CHECK: ldr r3, [sp] @ 4-byte Reload
-; CHECK: bx r3
+
+; Check that we don't introduce an unnecessary spill of lr.
+declare i32 @h9(i32, i32, i32, i32)
+define i32 @test9(i32* %x, i32* %y, i32* %z, i32* %a) {
+; CHECK-LABEL: test9:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: push {r4, r7}
+; CHECK-NEXT: ldr r4, [r3]
+; CHECK-NEXT: ldr r3, [r3, #4]
+; CHECK-NEXT: adds r3, r4, r3
+; CHECK-NEXT: ldr r1, [r1]
+; CHECK-NEXT: ldr r0, [r0]
+; CHECK-NEXT: ldr r2, [r2]
+; CHECK-NEXT: pop {r4, r7}
+; CHECK-NEXT: b h9
+ %zz = load i32, i32* %z
+ %xx = load i32, i32* %x
+ %yy = load i32, i32* %y
+ %aa1 = load i32, i32* %a
+ %a2 = getelementptr i32, i32* %a, i32 1
+ %aa2 = load i32, i32* %a2
+ %aa = add i32 %aa1, %aa2
+ %r = tail call i32 @h9(i32 %xx, i32 %yy, i32 %zz, i32 %aa)
+ ret i32 %r
+}
diff --git a/test/CodeGen/ARM/vcombine.ll b/test/CodeGen/ARM/vcombine.ll
index c08ed81d042a..de234b6879ee 100644
--- a/test/CodeGen/ARM/vcombine.ll
+++ b/test/CodeGen/ARM/vcombine.ll
@@ -39,8 +39,8 @@ define <4 x i32> @vcombine32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
; CHECK-DAG: vldr [[LD0:d[0-9]+]], [r0]
; CHECK-DAG: vldr [[LD1:d[0-9]+]], [r1]
-; CHECK-LE: vmov r0, r1, [[LD0]]
; CHECK-LE: vmov r2, r3, [[LD1]]
+; CHECK-LE: vmov r0, r1, [[LD0]]
; CHECK-BE: vmov r1, r0, d16
; CHECK-BE: vmov r3, r2, d17
@@ -56,8 +56,8 @@ define <4 x float> @vcombinefloat(<2 x float>* %A, <2 x float>* %B) nounwind {
; CHECK-DAG: vldr [[LD0:d[0-9]+]], [r0]
; CHECK-DAG: vldr [[LD1:d[0-9]+]], [r1]
-; CHECK-LE: vmov r0, r1, [[LD0]]
; CHECK-LE: vmov r2, r3, [[LD1]]
+; CHECK-LE: vmov r0, r1, [[LD0]]
; CHECK-BE: vmov r1, r0, d16
; CHECK-BE: vmov r3, r2, d17
@@ -72,11 +72,11 @@ define <2 x i64> @vcombine64(<1 x i64>* %A, <1 x i64>* %B) nounwind {
; CHECK-DAG: vldr [[LD0:d[0-9]+]], [r0]
; CHECK-DAG: vldr [[LD1:d[0-9]+]], [r1]
-; CHECK-LE: vmov r0, r1, [[LD0]]
; CHECK-LE: vmov r2, r3, [[LD1]]
+; CHECK-LE: vmov r0, r1, [[LD0]]
-; CHECK-BE: vmov r1, r0, [[LD0]]
; CHECK-BE: vmov r3, r2, [[LD1]]
+; CHECK-BE: vmov r1, r0, [[LD0]]
%tmp1 = load <1 x i64>, <1 x i64>* %A
%tmp2 = load <1 x i64>, <1 x i64>* %B
%tmp3 = shufflevector <1 x i64> %tmp1, <1 x i64> %tmp2, <2 x i32> <i32 0, i32 1>
diff --git a/test/CodeGen/ARM/vcvt.ll b/test/CodeGen/ARM/vcvt.ll
index 7052607bf80f..f16c8dc3a151 100644
--- a/test/CodeGen/ARM/vcvt.ll
+++ b/test/CodeGen/ARM/vcvt.ll
@@ -293,14 +293,14 @@ define <4 x i16> @fix_double_to_i16(<4 x double> %in) {
; CHECK-NEXT: vld1.64 {d16, d17}, [r12]
; CHECK-NEXT: vmov d19, r2, r3
; CHECK-NEXT: vadd.f64 d18, d18, d18
-; CHECK-NEXT: vcvt.u32.f64 s0, d18
+; CHECK-NEXT: vcvt.s32.f64 s0, d18
; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: vadd.f64 d20, d16, d16
; CHECK-NEXT: vadd.f64 d19, d19, d19
; CHECK-NEXT: vadd.f64 d16, d17, d17
-; CHECK-NEXT: vcvt.u32.f64 s2, d20
-; CHECK-NEXT: vcvt.u32.f64 s4, d19
-; CHECK-NEXT: vcvt.u32.f64 s6, d16
+; CHECK-NEXT: vcvt.s32.f64 s2, d20
+; CHECK-NEXT: vcvt.s32.f64 s4, d19
+; CHECK-NEXT: vcvt.s32.f64 s6, d16
; CHECK-NEXT: vmov.32 d16[0], r0
; CHECK-NEXT: vmov r0, s2
; CHECK-NEXT: vmov.32 d17[0], r0
@@ -308,7 +308,7 @@ define <4 x i16> @fix_double_to_i16(<4 x double> %in) {
; CHECK-NEXT: vmov.32 d16[1], r0
; CHECK-NEXT: vmov r0, s6
; CHECK-NEXT: vmov.32 d17[1], r0
-; CHECK-NEXT: vmovn.i32 d16, q8
+; CHECK-NEXT: vuzp.16 d16, d17
; CHECK-NEXT: vmov r0, r1, d16
; CHECK-NEXT: mov pc, lr
diff --git a/test/CodeGen/ARM/vdup.ll b/test/CodeGen/ARM/vdup.ll
index b7693c797635..5127dab26564 100644
--- a/test/CodeGen/ARM/vdup.ll
+++ b/test/CodeGen/ARM/vdup.ll
@@ -1,9 +1,12 @@
-; RUN: llc -mtriple=arm-eabi -float-abi=soft -mattr=+neon -verify-machineinstrs %s -o - \
-; RUN: | FileCheck %s
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=arm-eabi -float-abi=soft -mattr=+neon -verify-machineinstrs | FileCheck %s
define <8 x i8> @v_dup8(i8 %A) nounwind {
-;CHECK-LABEL: v_dup8:
-;CHECK: vdup.8
+; CHECK-LABEL: v_dup8:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vdup.8 d16, r0
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: mov pc, lr
%tmp1 = insertelement <8 x i8> zeroinitializer, i8 %A, i32 0
%tmp2 = insertelement <8 x i8> %tmp1, i8 %A, i32 1
%tmp3 = insertelement <8 x i8> %tmp2, i8 %A, i32 2
@@ -16,8 +19,11 @@ define <8 x i8> @v_dup8(i8 %A) nounwind {
}
define <4 x i16> @v_dup16(i16 %A) nounwind {
-;CHECK-LABEL: v_dup16:
-;CHECK: vdup.16
+; CHECK-LABEL: v_dup16:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vdup.16 d16, r0
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: mov pc, lr
%tmp1 = insertelement <4 x i16> zeroinitializer, i16 %A, i32 0
%tmp2 = insertelement <4 x i16> %tmp1, i16 %A, i32 1
%tmp3 = insertelement <4 x i16> %tmp2, i16 %A, i32 2
@@ -26,24 +32,34 @@ define <4 x i16> @v_dup16(i16 %A) nounwind {
}
define <2 x i32> @v_dup32(i32 %A) nounwind {
-;CHECK-LABEL: v_dup32:
-;CHECK: vdup.32
+; CHECK-LABEL: v_dup32:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vdup.32 d16, r0
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: mov pc, lr
%tmp1 = insertelement <2 x i32> zeroinitializer, i32 %A, i32 0
%tmp2 = insertelement <2 x i32> %tmp1, i32 %A, i32 1
ret <2 x i32> %tmp2
}
define <2 x float> @v_dupfloat(float %A) nounwind {
-;CHECK-LABEL: v_dupfloat:
-;CHECK: vdup.32
+; CHECK-LABEL: v_dupfloat:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vdup.32 d16, r0
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: mov pc, lr
%tmp1 = insertelement <2 x float> zeroinitializer, float %A, i32 0
%tmp2 = insertelement <2 x float> %tmp1, float %A, i32 1
ret <2 x float> %tmp2
}
define <16 x i8> @v_dupQ8(i8 %A) nounwind {
-;CHECK-LABEL: v_dupQ8:
-;CHECK: vdup.8
+; CHECK-LABEL: v_dupQ8:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vdup.8 q8, r0
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: vmov r2, r3, d17
+; CHECK-NEXT: mov pc, lr
%tmp1 = insertelement <16 x i8> zeroinitializer, i8 %A, i32 0
%tmp2 = insertelement <16 x i8> %tmp1, i8 %A, i32 1
%tmp3 = insertelement <16 x i8> %tmp2, i8 %A, i32 2
@@ -64,8 +80,12 @@ define <16 x i8> @v_dupQ8(i8 %A) nounwind {
}
define <8 x i16> @v_dupQ16(i16 %A) nounwind {
-;CHECK-LABEL: v_dupQ16:
-;CHECK: vdup.16
+; CHECK-LABEL: v_dupQ16:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vdup.16 q8, r0
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: vmov r2, r3, d17
+; CHECK-NEXT: mov pc, lr
%tmp1 = insertelement <8 x i16> zeroinitializer, i16 %A, i32 0
%tmp2 = insertelement <8 x i16> %tmp1, i16 %A, i32 1
%tmp3 = insertelement <8 x i16> %tmp2, i16 %A, i32 2
@@ -78,8 +98,12 @@ define <8 x i16> @v_dupQ16(i16 %A) nounwind {
}
define <4 x i32> @v_dupQ32(i32 %A) nounwind {
-;CHECK-LABEL: v_dupQ32:
-;CHECK: vdup.32
+; CHECK-LABEL: v_dupQ32:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vdup.32 q8, r0
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: vmov r2, r3, d17
+; CHECK-NEXT: mov pc, lr
%tmp1 = insertelement <4 x i32> zeroinitializer, i32 %A, i32 0
%tmp2 = insertelement <4 x i32> %tmp1, i32 %A, i32 1
%tmp3 = insertelement <4 x i32> %tmp2, i32 %A, i32 2
@@ -88,8 +112,12 @@ define <4 x i32> @v_dupQ32(i32 %A) nounwind {
}
define <4 x float> @v_dupQfloat(float %A) nounwind {
-;CHECK-LABEL: v_dupQfloat:
-;CHECK: vdup.32
+; CHECK-LABEL: v_dupQfloat:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vdup.32 q8, r0
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: vmov r2, r3, d17
+; CHECK-NEXT: mov pc, lr
%tmp1 = insertelement <4 x float> zeroinitializer, float %A, i32 0
%tmp2 = insertelement <4 x float> %tmp1, float %A, i32 1
%tmp3 = insertelement <4 x float> %tmp2, float %A, i32 2
@@ -100,163 +128,248 @@ define <4 x float> @v_dupQfloat(float %A) nounwind {
; Check to make sure it works with shuffles, too.
define <8 x i8> @v_shuffledup8(i8 %A) nounwind {
-;CHECK-LABEL: v_shuffledup8:
-;CHECK: vdup.8
+; CHECK-LABEL: v_shuffledup8:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vdup.8 d16, r0
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: mov pc, lr
%tmp1 = insertelement <8 x i8> undef, i8 %A, i32 0
%tmp2 = shufflevector <8 x i8> %tmp1, <8 x i8> undef, <8 x i32> zeroinitializer
ret <8 x i8> %tmp2
}
define <4 x i16> @v_shuffledup16(i16 %A) nounwind {
-;CHECK-LABEL: v_shuffledup16:
-;CHECK: vdup.16
+; CHECK-LABEL: v_shuffledup16:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vdup.16 d16, r0
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: mov pc, lr
%tmp1 = insertelement <4 x i16> undef, i16 %A, i32 0
%tmp2 = shufflevector <4 x i16> %tmp1, <4 x i16> undef, <4 x i32> zeroinitializer
ret <4 x i16> %tmp2
}
define <2 x i32> @v_shuffledup32(i32 %A) nounwind {
-;CHECK-LABEL: v_shuffledup32:
-;CHECK: vdup.32
+; CHECK-LABEL: v_shuffledup32:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vdup.32 d16, r0
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: mov pc, lr
%tmp1 = insertelement <2 x i32> undef, i32 %A, i32 0
%tmp2 = shufflevector <2 x i32> %tmp1, <2 x i32> undef, <2 x i32> zeroinitializer
ret <2 x i32> %tmp2
}
define <2 x float> @v_shuffledupfloat(float %A) nounwind {
-;CHECK-LABEL: v_shuffledupfloat:
-;CHECK: vdup.32
+; CHECK-LABEL: v_shuffledupfloat:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vdup.32 d16, r0
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: mov pc, lr
%tmp1 = insertelement <2 x float> undef, float %A, i32 0
%tmp2 = shufflevector <2 x float> %tmp1, <2 x float> undef, <2 x i32> zeroinitializer
ret <2 x float> %tmp2
}
define <16 x i8> @v_shuffledupQ8(i8 %A) nounwind {
-;CHECK-LABEL: v_shuffledupQ8:
-;CHECK: vdup.8
+; CHECK-LABEL: v_shuffledupQ8:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vdup.8 q8, r0
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: vmov r2, r3, d17
+; CHECK-NEXT: mov pc, lr
%tmp1 = insertelement <16 x i8> undef, i8 %A, i32 0
%tmp2 = shufflevector <16 x i8> %tmp1, <16 x i8> undef, <16 x i32> zeroinitializer
ret <16 x i8> %tmp2
}
define <8 x i16> @v_shuffledupQ16(i16 %A) nounwind {
-;CHECK-LABEL: v_shuffledupQ16:
-;CHECK: vdup.16
+; CHECK-LABEL: v_shuffledupQ16:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vdup.16 q8, r0
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: vmov r2, r3, d17
+; CHECK-NEXT: mov pc, lr
%tmp1 = insertelement <8 x i16> undef, i16 %A, i32 0
%tmp2 = shufflevector <8 x i16> %tmp1, <8 x i16> undef, <8 x i32> zeroinitializer
ret <8 x i16> %tmp2
}
define <4 x i32> @v_shuffledupQ32(i32 %A) nounwind {
-;CHECK-LABEL: v_shuffledupQ32:
-;CHECK: vdup.32
+; CHECK-LABEL: v_shuffledupQ32:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vdup.32 q8, r0
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: vmov r2, r3, d17
+; CHECK-NEXT: mov pc, lr
%tmp1 = insertelement <4 x i32> undef, i32 %A, i32 0
%tmp2 = shufflevector <4 x i32> %tmp1, <4 x i32> undef, <4 x i32> zeroinitializer
ret <4 x i32> %tmp2
}
define <4 x float> @v_shuffledupQfloat(float %A) nounwind {
-;CHECK-LABEL: v_shuffledupQfloat:
-;CHECK: vdup.32
+; CHECK-LABEL: v_shuffledupQfloat:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vdup.32 q8, r0
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: vmov r2, r3, d17
+; CHECK-NEXT: mov pc, lr
%tmp1 = insertelement <4 x float> undef, float %A, i32 0
%tmp2 = shufflevector <4 x float> %tmp1, <4 x float> undef, <4 x i32> zeroinitializer
ret <4 x float> %tmp2
}
define <8 x i8> @vduplane8(<8 x i8>* %A) nounwind {
-;CHECK-LABEL: vduplane8:
-;CHECK: vdup.8
+; CHECK-LABEL: vduplane8:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vldr d16, [r0]
+; CHECK-NEXT: vdup.8 d16, d16[1]
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: mov pc, lr
%tmp1 = load <8 x i8>, <8 x i8>* %A
%tmp2 = shufflevector <8 x i8> %tmp1, <8 x i8> undef, <8 x i32> < i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1 >
ret <8 x i8> %tmp2
}
define <4 x i16> @vduplane16(<4 x i16>* %A) nounwind {
-;CHECK-LABEL: vduplane16:
-;CHECK: vdup.16
+; CHECK-LABEL: vduplane16:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vldr d16, [r0]
+; CHECK-NEXT: vdup.16 d16, d16[1]
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: mov pc, lr
%tmp1 = load <4 x i16>, <4 x i16>* %A
%tmp2 = shufflevector <4 x i16> %tmp1, <4 x i16> undef, <4 x i32> < i32 1, i32 1, i32 1, i32 1 >
ret <4 x i16> %tmp2
}
define <2 x i32> @vduplane32(<2 x i32>* %A) nounwind {
-;CHECK-LABEL: vduplane32:
-;CHECK: vdup.32
+; CHECK-LABEL: vduplane32:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vldr d16, [r0]
+; CHECK-NEXT: vdup.32 d16, d16[1]
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: mov pc, lr
%tmp1 = load <2 x i32>, <2 x i32>* %A
%tmp2 = shufflevector <2 x i32> %tmp1, <2 x i32> undef, <2 x i32> < i32 1, i32 1 >
ret <2 x i32> %tmp2
}
define <2 x float> @vduplanefloat(<2 x float>* %A) nounwind {
-;CHECK-LABEL: vduplanefloat:
-;CHECK: vdup.32
+; CHECK-LABEL: vduplanefloat:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vldr d16, [r0]
+; CHECK-NEXT: vdup.32 d16, d16[1]
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: mov pc, lr
%tmp1 = load <2 x float>, <2 x float>* %A
%tmp2 = shufflevector <2 x float> %tmp1, <2 x float> undef, <2 x i32> < i32 1, i32 1 >
ret <2 x float> %tmp2
}
define <16 x i8> @vduplaneQ8(<8 x i8>* %A) nounwind {
-;CHECK-LABEL: vduplaneQ8:
-;CHECK: vdup.8
+; CHECK-LABEL: vduplaneQ8:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vldr d16, [r0]
+; CHECK-NEXT: vdup.8 q8, d16[1]
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: vmov r2, r3, d17
+; CHECK-NEXT: mov pc, lr
%tmp1 = load <8 x i8>, <8 x i8>* %A
%tmp2 = shufflevector <8 x i8> %tmp1, <8 x i8> undef, <16 x i32> < i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1 >
ret <16 x i8> %tmp2
}
define <8 x i16> @vduplaneQ16(<4 x i16>* %A) nounwind {
-;CHECK-LABEL: vduplaneQ16:
-;CHECK: vdup.16
+; CHECK-LABEL: vduplaneQ16:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vldr d16, [r0]
+; CHECK-NEXT: vdup.16 q8, d16[1]
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: vmov r2, r3, d17
+; CHECK-NEXT: mov pc, lr
%tmp1 = load <4 x i16>, <4 x i16>* %A
%tmp2 = shufflevector <4 x i16> %tmp1, <4 x i16> undef, <8 x i32> < i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1 >
ret <8 x i16> %tmp2
}
define <4 x i32> @vduplaneQ32(<2 x i32>* %A) nounwind {
-;CHECK-LABEL: vduplaneQ32:
-;CHECK: vdup.32
+; CHECK-LABEL: vduplaneQ32:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vldr d16, [r0]
+; CHECK-NEXT: vdup.32 q8, d16[1]
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: vmov r2, r3, d17
+; CHECK-NEXT: mov pc, lr
%tmp1 = load <2 x i32>, <2 x i32>* %A
%tmp2 = shufflevector <2 x i32> %tmp1, <2 x i32> undef, <4 x i32> < i32 1, i32 1, i32 1, i32 1 >
ret <4 x i32> %tmp2
}
define <4 x float> @vduplaneQfloat(<2 x float>* %A) nounwind {
-;CHECK-LABEL: vduplaneQfloat:
-;CHECK: vdup.32
+; CHECK-LABEL: vduplaneQfloat:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vldr d16, [r0]
+; CHECK-NEXT: vdup.32 q8, d16[1]
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: vmov r2, r3, d17
+; CHECK-NEXT: mov pc, lr
%tmp1 = load <2 x float>, <2 x float>* %A
%tmp2 = shufflevector <2 x float> %tmp1, <2 x float> undef, <4 x i32> < i32 1, i32 1, i32 1, i32 1 >
ret <4 x float> %tmp2
}
define <2 x i64> @foo(<2 x i64> %arg0_int64x1_t) nounwind readnone {
+; CHECK-LABEL: foo:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: mov r0, r2
+; CHECK-NEXT: mov r1, r3
+; CHECK-NEXT: mov pc, lr
entry:
%0 = shufflevector <2 x i64> %arg0_int64x1_t, <2 x i64> undef, <2 x i32> <i32 1, i32 1>
ret <2 x i64> %0
}
define <2 x i64> @bar(<2 x i64> %arg0_int64x1_t) nounwind readnone {
+; CHECK-LABEL: bar:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: mov r2, r0
+; CHECK-NEXT: mov r3, r1
+; CHECK-NEXT: mov pc, lr
entry:
%0 = shufflevector <2 x i64> %arg0_int64x1_t, <2 x i64> undef, <2 x i32> <i32 0, i32 0>
ret <2 x i64> %0
}
define <2 x double> @baz(<2 x double> %arg0_int64x1_t) nounwind readnone {
+; CHECK-LABEL: baz:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: mov r0, r2
+; CHECK-NEXT: mov r1, r3
+; CHECK-NEXT: mov pc, lr
entry:
%0 = shufflevector <2 x double> %arg0_int64x1_t, <2 x double> undef, <2 x i32> <i32 1, i32 1>
ret <2 x double> %0
}
define <2 x double> @qux(<2 x double> %arg0_int64x1_t) nounwind readnone {
+; CHECK-LABEL: qux:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: mov r2, r0
+; CHECK-NEXT: mov r3, r1
+; CHECK-NEXT: mov pc, lr
entry:
%0 = shufflevector <2 x double> %arg0_int64x1_t, <2 x double> undef, <2 x i32> <i32 0, i32 0>
ret <2 x double> %0
}
; Radar 7373643
-;CHECK-LABEL: redundantVdup:
-;CHECK: vmov.i8
-;CHECK-NOT: vdup.8
-;CHECK: vstr
define void @redundantVdup(<8 x i8>* %ptr) nounwind {
+; CHECK-LABEL: redundantVdup:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vmov.i8 d16, #0x80
+; CHECK-NEXT: vstr d16, [r0]
+; CHECK-NEXT: mov pc, lr
%1 = insertelement <8 x i8> undef, i8 -128, i32 0
%2 = shufflevector <8 x i8> %1, <8 x i8> undef, <8 x i32> zeroinitializer
store <8 x i8> %2, <8 x i8>* %ptr, align 8
@@ -264,8 +377,13 @@ define void @redundantVdup(<8 x i8>* %ptr) nounwind {
}
define <4 x i32> @tdupi(i32 %x, i32 %y) {
-;CHECK-LABEL: tdupi:
-;CHECK: vdup.32
+; CHECK-LABEL: tdupi:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vdup.32 q8, r0
+; CHECK-NEXT: vmov.32 d17[1], r1
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: vmov r2, r3, d17
+; CHECK-NEXT: mov pc, lr
%1 = insertelement <4 x i32> undef, i32 %x, i32 0
%2 = insertelement <4 x i32> %1, i32 %x, i32 1
%3 = insertelement <4 x i32> %2, i32 %x, i32 2
@@ -274,8 +392,13 @@ define <4 x i32> @tdupi(i32 %x, i32 %y) {
}
define <4 x float> @tdupf(float %x, float %y) {
-;CHECK-LABEL: tdupf:
-;CHECK: vdup.32
+; CHECK-LABEL: tdupf:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vdup.32 q0, r0
+; CHECK-NEXT: vmov s3, r1
+; CHECK-NEXT: vmov r0, r1, d0
+; CHECK-NEXT: vmov r2, r3, d1
+; CHECK-NEXT: mov pc, lr
%1 = insertelement <4 x float> undef, float %x, i32 0
%2 = insertelement <4 x float> %1, float %x, i32 1
%3 = insertelement <4 x float> %2, float %x, i32 2
@@ -286,9 +409,15 @@ define <4 x float> @tdupf(float %x, float %y) {
; This test checks that when splatting an element from a vector into another,
; the value isn't moved out to GPRs first.
define <4 x i32> @tduplane(<4 x i32> %invec) {
-;CHECK-LABEL: tduplane:
-;CHECK-NOT: vmov {{.*}}, d16[1]
-;CHECK: vdup.32 {{.*}}, d16[1]
+; CHECK-LABEL: tduplane:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vmov d16, r0, r1
+; CHECK-NEXT: mov r0, #255
+; CHECK-NEXT: vdup.32 q8, d16[1]
+; CHECK-NEXT: vmov.32 d17[1], r0
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: vmov r2, r3, d17
+; CHECK-NEXT: mov pc, lr
%in = extractelement <4 x i32> %invec, i32 1
%1 = insertelement <4 x i32> undef, i32 %in, i32 0
%2 = insertelement <4 x i32> %1, i32 %in, i32 1
@@ -298,8 +427,13 @@ define <4 x i32> @tduplane(<4 x i32> %invec) {
}
define <2 x float> @check_f32(<4 x float> %v) nounwind {
-;CHECK-LABEL: check_f32:
-;CHECK: vdup.32 {{.*}}, d{{..}}[1]
+; CHECK-LABEL: check_f32:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vmov d17, r2, r3
+; CHECK-NEXT: vmov d16, r0, r1
+; CHECK-NEXT: vdup.32 d16, d17[1]
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: mov pc, lr
%x = extractelement <4 x float> %v, i32 3
%1 = insertelement <2 x float> undef, float %x, i32 0
%2 = insertelement <2 x float> %1, float %x, i32 1
@@ -307,8 +441,13 @@ define <2 x float> @check_f32(<4 x float> %v) nounwind {
}
define <2 x i32> @check_i32(<4 x i32> %v) nounwind {
-;CHECK-LABEL: check_i32:
-;CHECK: vdup.32 {{.*}}, d{{..}}[1]
+; CHECK-LABEL: check_i32:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vmov d17, r2, r3
+; CHECK-NEXT: vmov d16, r0, r1
+; CHECK-NEXT: vdup.32 d16, d17[1]
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: mov pc, lr
%x = extractelement <4 x i32> %v, i32 3
%1 = insertelement <2 x i32> undef, i32 %x, i32 0
%2 = insertelement <2 x i32> %1, i32 %x, i32 1
@@ -316,8 +455,13 @@ define <2 x i32> @check_i32(<4 x i32> %v) nounwind {
}
define <4 x i16> @check_i16(<8 x i16> %v) nounwind {
-;CHECK-LABEL: check_i16:
-;CHECK: vdup.16 {{.*}}, d{{..}}[3]
+; CHECK-LABEL: check_i16:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vmov d17, r2, r3
+; CHECK-NEXT: vmov d16, r0, r1
+; CHECK-NEXT: vdup.16 d16, d16[3]
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: mov pc, lr
%x = extractelement <8 x i16> %v, i32 3
%1 = insertelement <4 x i16> undef, i16 %x, i32 0
%2 = insertelement <4 x i16> %1, i16 %x, i32 1
@@ -325,8 +469,13 @@ define <4 x i16> @check_i16(<8 x i16> %v) nounwind {
}
define <8 x i8> @check_i8(<16 x i8> %v) nounwind {
-;CHECK-LABEL: check_i8:
-;CHECK: vdup.8 {{.*}}, d{{..}}[3]
+; CHECK-LABEL: check_i8:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vmov d17, r2, r3
+; CHECK-NEXT: vmov d16, r0, r1
+; CHECK-NEXT: vdup.8 d16, d16[3]
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: mov pc, lr
%x = extractelement <16 x i8> %v, i32 3
%1 = insertelement <8 x i8> undef, i8 %x, i32 0
%2 = insertelement <8 x i8> %1, i8 %x, i32 1
@@ -336,8 +485,17 @@ define <8 x i8> @check_i8(<16 x i8> %v) nounwind {
; Check that an SPR splat produces a vdup.
define <2 x float> @check_spr_splat2(<2 x float> %p, i16 %q) {
-;CHECK-LABEL: check_spr_splat2:
-;CHECK: vdup.32 d
+; CHECK-LABEL: check_spr_splat2:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: lsl r2, r2, #16
+; CHECK-NEXT: vmov d16, r0, r1
+; CHECK-NEXT: asr r2, r2, #16
+; CHECK-NEXT: vmov s0, r2
+; CHECK-NEXT: vcvt.f32.s32 s0, s0
+; CHECK-NEXT: vdup.32 d17, d0[0]
+; CHECK-NEXT: vsub.f32 d16, d17, d16
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: mov pc, lr
%conv = sitofp i16 %q to float
%splat.splatinsert = insertelement <2 x float> undef, float %conv, i32 0
%splat.splat = shufflevector <2 x float> %splat.splatinsert, <2 x float> undef, <2 x i32> zeroinitializer
@@ -346,8 +504,18 @@ define <2 x float> @check_spr_splat2(<2 x float> %p, i16 %q) {
}
define <4 x float> @check_spr_splat4(<4 x float> %p, i16 %q) {
-;CHECK-LABEL: check_spr_splat4:
-;CHECK: vld1.16
+; CHECK-LABEL: check_spr_splat4:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: ldrsh r12, [sp]
+; CHECK-NEXT: vmov d17, r2, r3
+; CHECK-NEXT: vmov d16, r0, r1
+; CHECK-NEXT: vmov s0, r12
+; CHECK-NEXT: vcvt.f32.s32 s0, s0
+; CHECK-NEXT: vdup.32 q9, d0[0]
+; CHECK-NEXT: vsub.f32 q8, q9, q8
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: vmov r2, r3, d17
+; CHECK-NEXT: mov pc, lr
%conv = sitofp i16 %q to float
%splat.splatinsert = insertelement <4 x float> undef, float %conv, i32 0
%splat.splat = shufflevector <4 x float> %splat.splatinsert, <4 x float> undef, <4 x i32> zeroinitializer
@@ -356,8 +524,18 @@ define <4 x float> @check_spr_splat4(<4 x float> %p, i16 %q) {
}
; Same codegen as above test; scalar is splatted using vld1, so shuffle index is irrelevant.
define <4 x float> @check_spr_splat4_lane1(<4 x float> %p, i16 %q) {
-;CHECK-LABEL: check_spr_splat4_lane1:
-;CHECK: vld1.16
+; CHECK-LABEL: check_spr_splat4_lane1:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: ldrsh r12, [sp]
+; CHECK-NEXT: vmov d17, r2, r3
+; CHECK-NEXT: vmov d16, r0, r1
+; CHECK-NEXT: vmov s0, r12
+; CHECK-NEXT: vcvt.f32.s32 s0, s0
+; CHECK-NEXT: vdup.32 q9, d0[0]
+; CHECK-NEXT: vsub.f32 q8, q9, q8
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: vmov r2, r3, d17
+; CHECK-NEXT: mov pc, lr
%conv = sitofp i16 %q to float
%splat.splatinsert = insertelement <4 x float> undef, float %conv, i32 1
%splat.splat = shufflevector <4 x float> %splat.splatinsert, <4 x float> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
@@ -370,12 +548,25 @@ define <4 x float> @check_spr_splat4_lane1(<4 x float> %p, i16 %q) {
define <8 x i8> @check_i8_varidx(<16 x i8> %v, i32 %idx) {
; CHECK-LABEL: check_i8_varidx:
-; CHECK: mov r[[FP:[0-9]+]], sp
-; CHECK: ldr r[[IDX:[0-9]+]], [r[[FP]], #4]
-; CHECK: mov r[[SPCOPY:[0-9]+]], sp
-; CHECK: and r[[MASKED_IDX:[0-9]+]], r[[IDX]], #15
-; CHECK: vst1.64 {d{{.*}}, d{{.*}}}, [r[[SPCOPY]]:128], r[[MASKED_IDX]]
-; CHECK: vld1.8 {d{{.*}}[]}, [r[[SPCOPY]]]
+; CHECK: @ %bb.0:
+; CHECK-NEXT: .save {r11}
+; CHECK-NEXT: push {r11}
+; CHECK-NEXT: .setfp r11, sp
+; CHECK-NEXT: mov r11, sp
+; CHECK-NEXT: .pad #28
+; CHECK-NEXT: sub sp, sp, #28
+; CHECK-NEXT: bic sp, sp, #15
+; CHECK-NEXT: ldr r12, [r11, #4]
+; CHECK-NEXT: vmov d17, r2, r3
+; CHECK-NEXT: vmov d16, r0, r1
+; CHECK-NEXT: mov r1, sp
+; CHECK-NEXT: and r0, r12, #15
+; CHECK-NEXT: vst1.64 {d16, d17}, [r1:128], r0
+; CHECK-NEXT: vld1.8 {d16[]}, [r1]
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: mov sp, r11
+; CHECK-NEXT: pop {r11}
+; CHECK-NEXT: mov pc, lr
%x = extractelement <16 x i8> %v, i32 %idx
%1 = insertelement <8 x i8> undef, i8 %x, i32 0
%2 = insertelement <8 x i8> %1, i8 %x, i32 1
diff --git a/test/CodeGen/ARM/vector-DAGCombine.ll b/test/CodeGen/ARM/vector-DAGCombine.ll
index 8623d2c164ba..ba045a8c5303 100644
--- a/test/CodeGen/ARM/vector-DAGCombine.ll
+++ b/test/CodeGen/ARM/vector-DAGCombine.ll
@@ -237,17 +237,10 @@ entry:
; illegal type to a legal type.
define <2 x i8> @test_truncate(<2 x i128> %in) {
; CHECK-LABEL: test_truncate:
-; REG2 Should map on the same Q register as REG1, i.e., REG2 = REG1 - 1, but we
-; cannot express that.
-; CHECK: vmov.32 [[REG2:d[0-9]+]][0], r0
+; CHECK: vmov.32 [[REG:d[0-9]+]][0], r0
; CHECK-NEXT: mov [[BASE:r[0-9]+]], sp
-; CHECK-NEXT: vld1.32 {[[REG1:d[0-9]+]][0]}, {{\[}}[[BASE]]:32]
-; CHECK-NEXT: add [[BASE2:r[0-9]+]], [[BASE]], #4
-; CHECK-NEXT: vmov.32 [[REG2]][1], r1
-; CHECK-NEXT: vld1.32 {[[REG1]][1]}, {{\[}}[[BASE2]]:32]
-; The Q register used here should match floor(REG1/2), but we cannot express that.
-; CHECK-NEXT: vmovn.i64 [[RES:d[0-9]+]], q{{[0-9]+}}
-; CHECK-NEXT: vmov r0, r1, [[RES]]
+; CHECK-NEXT: vld1.32 {[[REG]][1]}, {{\[}}[[BASE]]:32]
+; CHECK-NEXT: vmov r0, r1, [[REG]]
entry:
%res = trunc <2 x i128> %in to <2 x i8>
ret <2 x i8> %res
diff --git a/test/CodeGen/ARM/vector-extend-narrow.ll b/test/CodeGen/ARM/vector-extend-narrow.ll
index d054bfda615e..1aaffcc302d2 100644
--- a/test/CodeGen/ARM/vector-extend-narrow.ll
+++ b/test/CodeGen/ARM/vector-extend-narrow.ll
@@ -48,7 +48,7 @@ define <4 x i8> @h(<4 x float> %v) {
}
; CHECK-LABEL: i:
-define <4 x i8> @i(<4 x i8>* %x) {
+define <4 x i8> @i(<4 x i8>* %x, <4 x i8> %y) {
; Note: vld1 here is reasonably important. Mixing VFP and NEON
; instructions is bad on some cores
; CHECK: vld1
@@ -59,7 +59,7 @@ define <4 x i8> @i(<4 x i8>* %x) {
; CHECK: vmul
; CHECK: vmovn
%1 = load <4 x i8>, <4 x i8>* %x, align 4
- %2 = sdiv <4 x i8> zeroinitializer, %1
+ %2 = sdiv <4 x i8> %y, %1
ret <4 x i8> %2
}
; CHECK-LABEL: j:
diff --git a/test/CodeGen/ARM/vfcmp.ll b/test/CodeGen/ARM/vfcmp.ll
index 8673b7d639d6..79b23a535344 100644
--- a/test/CodeGen/ARM/vfcmp.ll
+++ b/test/CodeGen/ARM/vfcmp.ll
@@ -7,33 +7,33 @@ define <2 x i32> @vcunef32(<2 x float>* %A, <2 x float>* %B) nounwind {
;CHECK-LABEL: vcunef32:
;CHECK: vceq.f32
;CHECK-NEXT: vmvn
- %tmp1 = load <2 x float>, <2 x float>* %A
- %tmp2 = load <2 x float>, <2 x float>* %B
- %tmp3 = fcmp une <2 x float> %tmp1, %tmp2
- %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
- ret <2 x i32> %tmp4
+ %tmp1 = load <2 x float>, <2 x float>* %A
+ %tmp2 = load <2 x float>, <2 x float>* %B
+ %tmp3 = fcmp une <2 x float> %tmp1, %tmp2
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
+ ret <2 x i32> %tmp4
}
; olt is implemented with VCGT
define <2 x i32> @vcoltf32(<2 x float>* %A, <2 x float>* %B) nounwind {
;CHECK-LABEL: vcoltf32:
;CHECK: vcgt.f32
- %tmp1 = load <2 x float>, <2 x float>* %A
- %tmp2 = load <2 x float>, <2 x float>* %B
- %tmp3 = fcmp olt <2 x float> %tmp1, %tmp2
- %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
- ret <2 x i32> %tmp4
+ %tmp1 = load <2 x float>, <2 x float>* %A
+ %tmp2 = load <2 x float>, <2 x float>* %B
+ %tmp3 = fcmp olt <2 x float> %tmp1, %tmp2
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
+ ret <2 x i32> %tmp4
}
; ole is implemented with VCGE
define <2 x i32> @vcolef32(<2 x float>* %A, <2 x float>* %B) nounwind {
;CHECK-LABEL: vcolef32:
;CHECK: vcge.f32
- %tmp1 = load <2 x float>, <2 x float>* %A
- %tmp2 = load <2 x float>, <2 x float>* %B
- %tmp3 = fcmp ole <2 x float> %tmp1, %tmp2
- %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
- ret <2 x i32> %tmp4
+ %tmp1 = load <2 x float>, <2 x float>* %A
+ %tmp2 = load <2 x float>, <2 x float>* %B
+ %tmp3 = fcmp ole <2 x float> %tmp1, %tmp2
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
+ ret <2 x i32> %tmp4
}
; uge is implemented with VCGT/VMVN
@@ -41,11 +41,11 @@ define <2 x i32> @vcugef32(<2 x float>* %A, <2 x float>* %B) nounwind {
;CHECK-LABEL: vcugef32:
;CHECK: vcgt.f32
;CHECK-NEXT: vmvn
- %tmp1 = load <2 x float>, <2 x float>* %A
- %tmp2 = load <2 x float>, <2 x float>* %B
- %tmp3 = fcmp uge <2 x float> %tmp1, %tmp2
- %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
- ret <2 x i32> %tmp4
+ %tmp1 = load <2 x float>, <2 x float>* %A
+ %tmp2 = load <2 x float>, <2 x float>* %B
+ %tmp3 = fcmp uge <2 x float> %tmp1, %tmp2
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
+ ret <2 x i32> %tmp4
}
; ule is implemented with VCGT/VMVN
@@ -53,11 +53,11 @@ define <2 x i32> @vculef32(<2 x float>* %A, <2 x float>* %B) nounwind {
;CHECK-LABEL: vculef32:
;CHECK: vcgt.f32
;CHECK-NEXT: vmvn
- %tmp1 = load <2 x float>, <2 x float>* %A
- %tmp2 = load <2 x float>, <2 x float>* %B
- %tmp3 = fcmp ule <2 x float> %tmp1, %tmp2
- %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
- ret <2 x i32> %tmp4
+ %tmp1 = load <2 x float>, <2 x float>* %A
+ %tmp2 = load <2 x float>, <2 x float>* %B
+ %tmp3 = fcmp ule <2 x float> %tmp1, %tmp2
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
+ ret <2 x i32> %tmp4
}
; ugt is implemented with VCGE/VMVN
@@ -65,11 +65,11 @@ define <2 x i32> @vcugtf32(<2 x float>* %A, <2 x float>* %B) nounwind {
;CHECK-LABEL: vcugtf32:
;CHECK: vcge.f32
;CHECK-NEXT: vmvn
- %tmp1 = load <2 x float>, <2 x float>* %A
- %tmp2 = load <2 x float>, <2 x float>* %B
- %tmp3 = fcmp ugt <2 x float> %tmp1, %tmp2
- %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
- ret <2 x i32> %tmp4
+ %tmp1 = load <2 x float>, <2 x float>* %A
+ %tmp2 = load <2 x float>, <2 x float>* %B
+ %tmp3 = fcmp ugt <2 x float> %tmp1, %tmp2
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
+ ret <2 x i32> %tmp4
}
; ult is implemented with VCGE/VMVN
@@ -77,11 +77,11 @@ define <2 x i32> @vcultf32(<2 x float>* %A, <2 x float>* %B) nounwind {
;CHECK-LABEL: vcultf32:
;CHECK: vcge.f32
;CHECK-NEXT: vmvn
- %tmp1 = load <2 x float>, <2 x float>* %A
- %tmp2 = load <2 x float>, <2 x float>* %B
- %tmp3 = fcmp ult <2 x float> %tmp1, %tmp2
- %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
- ret <2 x i32> %tmp4
+ %tmp1 = load <2 x float>, <2 x float>* %A
+ %tmp2 = load <2 x float>, <2 x float>* %B
+ %tmp3 = fcmp ult <2 x float> %tmp1, %tmp2
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
+ ret <2 x i32> %tmp4
}
; ueq is implemented with VCGT/VCGT/VORR/VMVN
@@ -91,11 +91,11 @@ define <2 x i32> @vcueqf32(<2 x float>* %A, <2 x float>* %B) nounwind {
;CHECK-NEXT: vcgt.f32
;CHECK-NEXT: vorr
;CHECK-NEXT: vmvn
- %tmp1 = load <2 x float>, <2 x float>* %A
- %tmp2 = load <2 x float>, <2 x float>* %B
- %tmp3 = fcmp ueq <2 x float> %tmp1, %tmp2
- %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
- ret <2 x i32> %tmp4
+ %tmp1 = load <2 x float>, <2 x float>* %A
+ %tmp2 = load <2 x float>, <2 x float>* %B
+ %tmp3 = fcmp ueq <2 x float> %tmp1, %tmp2
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
+ ret <2 x i32> %tmp4
}
; one is implemented with VCGT/VCGT/VORR
@@ -104,11 +104,11 @@ define <2 x i32> @vconef32(<2 x float>* %A, <2 x float>* %B) nounwind {
;CHECK: vcgt.f32
;CHECK-NEXT: vcgt.f32
;CHECK-NEXT: vorr
- %tmp1 = load <2 x float>, <2 x float>* %A
- %tmp2 = load <2 x float>, <2 x float>* %B
- %tmp3 = fcmp one <2 x float> %tmp1, %tmp2
- %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
- ret <2 x i32> %tmp4
+ %tmp1 = load <2 x float>, <2 x float>* %A
+ %tmp2 = load <2 x float>, <2 x float>* %B
+ %tmp3 = fcmp one <2 x float> %tmp1, %tmp2
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
+ ret <2 x i32> %tmp4
}
; uno is implemented with VCGT/VCGE/VORR/VMVN
@@ -118,11 +118,11 @@ define <2 x i32> @vcunof32(<2 x float>* %A, <2 x float>* %B) nounwind {
;CHECK-NEXT: vcgt.f32
;CHECK-NEXT: vorr
;CHECK-NEXT: vmvn
- %tmp1 = load <2 x float>, <2 x float>* %A
- %tmp2 = load <2 x float>, <2 x float>* %B
- %tmp3 = fcmp uno <2 x float> %tmp1, %tmp2
- %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
- ret <2 x i32> %tmp4
+ %tmp1 = load <2 x float>, <2 x float>* %A
+ %tmp2 = load <2 x float>, <2 x float>* %B
+ %tmp3 = fcmp uno <2 x float> %tmp1, %tmp2
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
+ ret <2 x i32> %tmp4
}
; ord is implemented with VCGT/VCGE/VORR
@@ -131,9 +131,9 @@ define <2 x i32> @vcordf32(<2 x float>* %A, <2 x float>* %B) nounwind {
;CHECK: vcge.f32
;CHECK-NEXT: vcgt.f32
;CHECK-NEXT: vorr
- %tmp1 = load <2 x float>, <2 x float>* %A
- %tmp2 = load <2 x float>, <2 x float>* %B
- %tmp3 = fcmp ord <2 x float> %tmp1, %tmp2
- %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
- ret <2 x i32> %tmp4
+ %tmp1 = load <2 x float>, <2 x float>* %A
+ %tmp2 = load <2 x float>, <2 x float>* %B
+ %tmp3 = fcmp ord <2 x float> %tmp1, %tmp2
+ %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
+ ret <2 x i32> %tmp4
}
diff --git a/test/CodeGen/ARM/vfp-reg-stride.ll b/test/CodeGen/ARM/vfp-reg-stride.ll
index c5339db68e30..0028b3d1a418 100644
--- a/test/CodeGen/ARM/vfp-reg-stride.ll
+++ b/test/CodeGen/ARM/vfp-reg-stride.ll
@@ -1,42 +1,45 @@
-; RUN: llc -mcpu=swift -mtriple=thumbv7s-apple-ios -o - %s | FileCheck %s --check-prefix=CHECK-STRIDE4
-; RUN: llc -mcpu=swift -mtriple=thumbv7k-apple-watchos -o - %s | FileCheck %s --check-prefix=CHECK-STRIDE4-WATCH
-; RUN: llc -mcpu=cortex-a57 -mtriple=thumbv7-linux-gnueabihf -o - %s | FileCheck %s --check-prefix=CHECK-GENERIC
+; RUN: llc -mcpu=swift -mtriple=thumbv7s-apple-ios -o - %s | FileCheck %s --check-prefixes=CHECK,CHECK-STRIDE4
+; RUN: llc -mcpu=swift -mtriple=thumbv7k-apple-watchos -o - %s | FileCheck %s --check-prefixes=CHECK,CHECK-STRIDE4-WATCH
+; RUN: llc -mcpu=cortex-a57 -mtriple=thumbv7-linux-gnueabihf -o - %s | FileCheck %s --check-prefixes=CHECK,CHECK-GENERIC
+; RUN: llc -mattr=wide-stride-vfp -mtriple=thumbv7-linux-gnueabihf -o - %s | FileCheck %s --check-prefixes=CHECK,CHECK-GENERIC4
+; CHECK-LABEL: test_reg_stride:
define void @test_reg_stride(double %a, double %b) {
-; CHECK-STRIDE4-LABEL: test_reg_stride:
; CHECK-STRIDE4-DAG: vmov d16, r
; CHECK-STRIDE4-DAG: vmov d18, r
-; CHECK-STRIDE4-WATCH-LABEL: test_reg_stride:
; CHECK-STRIDE4-WATCH-DAG: vmov.f64 d16, d
; CHECK-STRIDE4-WATCH-DAG: vmov.f64 d18, d
-; CHECK-GENERIC-LABEL: test_reg_stride:
; CHECK-GENERIC-DAG: vmov.f64 d16, {{d[01]}}
; CHECK-GENERIC-DAG: vmov.f64 d17, {{d[01]}}
+; CHECK-GENERIC4-DAG: vmov.f64 d16, {{d[01]}}
+; CHECK-GENERIC4-DAG: vmov.f64 d18, {{d[01]}}
+
call void asm "", "~{r0},~{r1},~{d0},~{d1}"()
call arm_aapcs_vfpcc void @eat_doubles(double %a, double %b)
ret void
}
+; CHECK-LABEL: test_stride_minsize:
define void @test_stride_minsize(float %a, float %b) minsize {
-; CHECK-STRIDE4-LABEL: test_stride_minsize:
; CHECK-STRIDE4: vmov d2, {{r[01]}}
; CHECK-STRIDE4: vmov d3, {{r[01]}}
-; CHECK-STRIDE4-WATCH-LABEL: test_stride_minsize:
; CHECK-STRIDE4-WATCH-DAG: vmov.f32 s4, {{s[01]}}
; CHECK-STRIDE4-WATCH-DAG: vmov.f32 s8, {{s[01]}}
-; CHECK-GENERIC-LABEL: test_stride_minsize:
; CHECK-GENERIC-DAG: vmov.f32 s4, {{s[01]}}
; CHECK-GENERIC-DAG: vmov.f32 s6, {{s[01]}}
+
+; CHECK-GENERIC4-DAG: vmov.f32 s4, {{s[01]}}
+; CHECK-GENERIC4-DAG: vmov.f32 s6, {{s[01]}}
+
call void asm "", "~{r0},~{r1},~{s0},~{s1},~{d0},~{d1}"()
call arm_aapcs_vfpcc void @eat_floats(float %a, float %b)
ret void
}
-
declare arm_aapcs_vfpcc void @eat_doubles(double, double)
declare arm_aapcs_vfpcc void @eat_floats(float, float)
diff --git a/test/CodeGen/ARM/vtrn.ll b/test/CodeGen/ARM/vtrn.ll
index 12cb504eda79..6b200176e1fe 100644
--- a/test/CodeGen/ARM/vtrn.ll
+++ b/test/CodeGen/ARM/vtrn.ll
@@ -9,12 +9,12 @@ define <8 x i8> @vtrni8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
; CHECK-NEXT: vadd.i8 d16, d17, d16
; CHECK-NEXT: vmov r0, r1, d16
; CHECK-NEXT: mov pc, lr
- %tmp1 = load <8 x i8>, <8 x i8>* %A
- %tmp2 = load <8 x i8>, <8 x i8>* %B
- %tmp3 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 0, i32 8, i32 2, i32 10, i32 4, i32 12, i32 6, i32 14>
- %tmp4 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 1, i32 9, i32 3, i32 11, i32 5, i32 13, i32 7, i32 15>
- %tmp5 = add <8 x i8> %tmp3, %tmp4
- ret <8 x i8> %tmp5
+ %tmp1 = load <8 x i8>, <8 x i8>* %A
+ %tmp2 = load <8 x i8>, <8 x i8>* %B
+ %tmp3 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 0, i32 8, i32 2, i32 10, i32 4, i32 12, i32 6, i32 14>
+ %tmp4 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 1, i32 9, i32 3, i32 11, i32 5, i32 13, i32 7, i32 15>
+ %tmp5 = add <8 x i8> %tmp3, %tmp4
+ ret <8 x i8> %tmp5
}
define <16 x i8> @vtrni8_Qres(<8 x i8>* %A, <8 x i8>* %B) nounwind {
@@ -26,10 +26,10 @@ define <16 x i8> @vtrni8_Qres(<8 x i8>* %A, <8 x i8>* %B) nounwind {
; CHECK-NEXT: vmov r0, r1, [[LDR0]]
; CHECK-NEXT: vmov r2, r3, [[LDR1]]
; CHECK-NEXT: mov pc, lr
- %tmp1 = load <8 x i8>, <8 x i8>* %A
- %tmp2 = load <8 x i8>, <8 x i8>* %B
- %tmp3 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <16 x i32> <i32 0, i32 8, i32 2, i32 10, i32 4, i32 12, i32 6, i32 14, i32 1, i32 9, i32 3, i32 11, i32 5, i32 13, i32 7, i32 15>
- ret <16 x i8> %tmp3
+ %tmp1 = load <8 x i8>, <8 x i8>* %A
+ %tmp2 = load <8 x i8>, <8 x i8>* %B
+ %tmp3 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <16 x i32> <i32 0, i32 8, i32 2, i32 10, i32 4, i32 12, i32 6, i32 14, i32 1, i32 9, i32 3, i32 11, i32 5, i32 13, i32 7, i32 15>
+ ret <16 x i8> %tmp3
}
define <4 x i16> @vtrni16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
@@ -41,12 +41,12 @@ define <4 x i16> @vtrni16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
; CHECK-NEXT: vadd.i16 d16, d17, d16
; CHECK-NEXT: vmov r0, r1, d16
; CHECK-NEXT: mov pc, lr
- %tmp1 = load <4 x i16>, <4 x i16>* %A
- %tmp2 = load <4 x i16>, <4 x i16>* %B
- %tmp3 = shufflevector <4 x i16> %tmp1, <4 x i16> %tmp2, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
- %tmp4 = shufflevector <4 x i16> %tmp1, <4 x i16> %tmp2, <4 x i32> <i32 1, i32 5, i32 3, i32 7>
- %tmp5 = add <4 x i16> %tmp3, %tmp4
- ret <4 x i16> %tmp5
+ %tmp1 = load <4 x i16>, <4 x i16>* %A
+ %tmp2 = load <4 x i16>, <4 x i16>* %B
+ %tmp3 = shufflevector <4 x i16> %tmp1, <4 x i16> %tmp2, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
+ %tmp4 = shufflevector <4 x i16> %tmp1, <4 x i16> %tmp2, <4 x i32> <i32 1, i32 5, i32 3, i32 7>
+ %tmp5 = add <4 x i16> %tmp3, %tmp4
+ ret <4 x i16> %tmp5
}
define <8 x i16> @vtrni16_Qres(<4 x i16>* %A, <4 x i16>* %B) nounwind {
@@ -58,10 +58,10 @@ define <8 x i16> @vtrni16_Qres(<4 x i16>* %A, <4 x i16>* %B) nounwind {
; CHECK-NEXT: vmov r0, r1, [[LDR0]]
; CHECK-NEXT: vmov r2, r3, [[LDR1]]
; CHECK-NEXT: mov pc, lr
- %tmp1 = load <4 x i16>, <4 x i16>* %A
- %tmp2 = load <4 x i16>, <4 x i16>* %B
- %tmp3 = shufflevector <4 x i16> %tmp1, <4 x i16> %tmp2, <8 x i32> <i32 0, i32 4, i32 2, i32 6, i32 1, i32 5, i32 3, i32 7>
- ret <8 x i16> %tmp3
+ %tmp1 = load <4 x i16>, <4 x i16>* %A
+ %tmp2 = load <4 x i16>, <4 x i16>* %B
+ %tmp3 = shufflevector <4 x i16> %tmp1, <4 x i16> %tmp2, <8 x i32> <i32 0, i32 4, i32 2, i32 6, i32 1, i32 5, i32 3, i32 7>
+ ret <8 x i16> %tmp3
}
define <2 x i32> @vtrni32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
@@ -73,12 +73,12 @@ define <2 x i32> @vtrni32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
; CHECK-NEXT: vmul.i32 d16, d17, d16
; CHECK-NEXT: vmov r0, r1, d16
; CHECK-NEXT: mov pc, lr
- %tmp1 = load <2 x i32>, <2 x i32>* %A
- %tmp2 = load <2 x i32>, <2 x i32>* %B
- %tmp3 = shufflevector <2 x i32> %tmp1, <2 x i32> %tmp2, <2 x i32> <i32 0, i32 2>
- %tmp4 = shufflevector <2 x i32> %tmp1, <2 x i32> %tmp2, <2 x i32> <i32 1, i32 3>
- %tmp5 = mul <2 x i32> %tmp3, %tmp4
- ret <2 x i32> %tmp5
+ %tmp1 = load <2 x i32>, <2 x i32>* %A
+ %tmp2 = load <2 x i32>, <2 x i32>* %B
+ %tmp3 = shufflevector <2 x i32> %tmp1, <2 x i32> %tmp2, <2 x i32> <i32 0, i32 2>
+ %tmp4 = shufflevector <2 x i32> %tmp1, <2 x i32> %tmp2, <2 x i32> <i32 1, i32 3>
+ %tmp5 = mul <2 x i32> %tmp3, %tmp4
+ ret <2 x i32> %tmp5
}
define <4 x i32> @vtrni32_Qres(<2 x i32>* %A, <2 x i32>* %B) nounwind {
@@ -90,10 +90,10 @@ define <4 x i32> @vtrni32_Qres(<2 x i32>* %A, <2 x i32>* %B) nounwind {
; CHECK-NEXT: vmov r0, r1, [[LDR0]]
; CHECK-NEXT: vmov r2, r3, [[LDR1]]
; CHECK-NEXT: mov pc, lr
- %tmp1 = load <2 x i32>, <2 x i32>* %A
- %tmp2 = load <2 x i32>, <2 x i32>* %B
- %tmp3 = shufflevector <2 x i32> %tmp1, <2 x i32> %tmp2, <4 x i32> <i32 0, i32 2, i32 1, i32 3>
- ret <4 x i32> %tmp3
+ %tmp1 = load <2 x i32>, <2 x i32>* %A
+ %tmp2 = load <2 x i32>, <2 x i32>* %B
+ %tmp3 = shufflevector <2 x i32> %tmp1, <2 x i32> %tmp2, <4 x i32> <i32 0, i32 2, i32 1, i32 3>
+ ret <4 x i32> %tmp3
}
define <2 x float> @vtrnf(<2 x float>* %A, <2 x float>* %B) nounwind {
@@ -105,12 +105,12 @@ define <2 x float> @vtrnf(<2 x float>* %A, <2 x float>* %B) nounwind {
; CHECK-NEXT: vadd.f32 d16, d17, d16
; CHECK-NEXT: vmov r0, r1, d16
; CHECK-NEXT: mov pc, lr
- %tmp1 = load <2 x float>, <2 x float>* %A
- %tmp2 = load <2 x float>, <2 x float>* %B
- %tmp3 = shufflevector <2 x float> %tmp1, <2 x float> %tmp2, <2 x i32> <i32 0, i32 2>
- %tmp4 = shufflevector <2 x float> %tmp1, <2 x float> %tmp2, <2 x i32> <i32 1, i32 3>
- %tmp5 = fadd <2 x float> %tmp3, %tmp4
- ret <2 x float> %tmp5
+ %tmp1 = load <2 x float>, <2 x float>* %A
+ %tmp2 = load <2 x float>, <2 x float>* %B
+ %tmp3 = shufflevector <2 x float> %tmp1, <2 x float> %tmp2, <2 x i32> <i32 0, i32 2>
+ %tmp4 = shufflevector <2 x float> %tmp1, <2 x float> %tmp2, <2 x i32> <i32 1, i32 3>
+ %tmp5 = fadd <2 x float> %tmp3, %tmp4
+ ret <2 x float> %tmp5
}
define <4 x float> @vtrnf_Qres(<2 x float>* %A, <2 x float>* %B) nounwind {
@@ -122,10 +122,10 @@ define <4 x float> @vtrnf_Qres(<2 x float>* %A, <2 x float>* %B) nounwind {
; CHECK-NEXT: vmov r0, r1, [[LDR0]]
; CHECK-NEXT: vmov r2, r3, [[LDR1]]
; CHECK-NEXT: mov pc, lr
- %tmp1 = load <2 x float>, <2 x float>* %A
- %tmp2 = load <2 x float>, <2 x float>* %B
- %tmp3 = shufflevector <2 x float> %tmp1, <2 x float> %tmp2, <4 x i32> <i32 0, i32 2, i32 1, i32 3>
- ret <4 x float> %tmp3
+ %tmp1 = load <2 x float>, <2 x float>* %A
+ %tmp2 = load <2 x float>, <2 x float>* %B
+ %tmp3 = shufflevector <2 x float> %tmp1, <2 x float> %tmp2, <4 x i32> <i32 0, i32 2, i32 1, i32 3>
+ ret <4 x float> %tmp3
}
define <16 x i8> @vtrnQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
@@ -138,12 +138,12 @@ define <16 x i8> @vtrnQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
; CHECK-NEXT: vmov r0, r1, d16
; CHECK-NEXT: vmov r2, r3, d17
; CHECK-NEXT: mov pc, lr
- %tmp1 = load <16 x i8>, <16 x i8>* %A
- %tmp2 = load <16 x i8>, <16 x i8>* %B
- %tmp3 = shufflevector <16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i32> <i32 0, i32 16, i32 2, i32 18, i32 4, i32 20, i32 6, i32 22, i32 8, i32 24, i32 10, i32 26, i32 12, i32 28, i32 14, i32 30>
- %tmp4 = shufflevector <16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i32> <i32 1, i32 17, i32 3, i32 19, i32 5, i32 21, i32 7, i32 23, i32 9, i32 25, i32 11, i32 27, i32 13, i32 29, i32 15, i32 31>
- %tmp5 = add <16 x i8> %tmp3, %tmp4
- ret <16 x i8> %tmp5
+ %tmp1 = load <16 x i8>, <16 x i8>* %A
+ %tmp2 = load <16 x i8>, <16 x i8>* %B
+ %tmp3 = shufflevector <16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i32> <i32 0, i32 16, i32 2, i32 18, i32 4, i32 20, i32 6, i32 22, i32 8, i32 24, i32 10, i32 26, i32 12, i32 28, i32 14, i32 30>
+ %tmp4 = shufflevector <16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i32> <i32 1, i32 17, i32 3, i32 19, i32 5, i32 21, i32 7, i32 23, i32 9, i32 25, i32 11, i32 27, i32 13, i32 29, i32 15, i32 31>
+ %tmp5 = add <16 x i8> %tmp3, %tmp4
+ ret <16 x i8> %tmp5
}
define <32 x i8> @vtrnQi8_QQres(<16 x i8>* %A, <16 x i8>* %B) nounwind {
@@ -155,10 +155,10 @@ define <32 x i8> @vtrnQi8_QQres(<16 x i8>* %A, <16 x i8>* %B) nounwind {
; CHECK-NEXT: vst1.8 {d18, d19}, [r0:128]!
; CHECK-NEXT: vst1.64 {d16, d17}, [r0:128]
; CHECK-NEXT: mov pc, lr
- %tmp1 = load <16 x i8>, <16 x i8>* %A
- %tmp2 = load <16 x i8>, <16 x i8>* %B
- %tmp3 = shufflevector <16 x i8> %tmp1, <16 x i8> %tmp2, <32 x i32> <i32 0, i32 16, i32 2, i32 18, i32 4, i32 20, i32 6, i32 22, i32 8, i32 24, i32 10, i32 26, i32 12, i32 28, i32 14, i32 30, i32 1, i32 17, i32 3, i32 19, i32 5, i32 21, i32 7, i32 23, i32 9, i32 25, i32 11, i32 27, i32 13, i32 29, i32 15, i32 31>
- ret <32 x i8> %tmp3
+ %tmp1 = load <16 x i8>, <16 x i8>* %A
+ %tmp2 = load <16 x i8>, <16 x i8>* %B
+ %tmp3 = shufflevector <16 x i8> %tmp1, <16 x i8> %tmp2, <32 x i32> <i32 0, i32 16, i32 2, i32 18, i32 4, i32 20, i32 6, i32 22, i32 8, i32 24, i32 10, i32 26, i32 12, i32 28, i32 14, i32 30, i32 1, i32 17, i32 3, i32 19, i32 5, i32 21, i32 7, i32 23, i32 9, i32 25, i32 11, i32 27, i32 13, i32 29, i32 15, i32 31>
+ ret <32 x i8> %tmp3
}
define <8 x i16> @vtrnQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
@@ -171,12 +171,12 @@ define <8 x i16> @vtrnQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
; CHECK-NEXT: vmov r0, r1, d16
; CHECK-NEXT: vmov r2, r3, d17
; CHECK-NEXT: mov pc, lr
- %tmp1 = load <8 x i16>, <8 x i16>* %A
- %tmp2 = load <8 x i16>, <8 x i16>* %B
- %tmp3 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i32> <i32 0, i32 8, i32 2, i32 10, i32 4, i32 12, i32 6, i32 14>
- %tmp4 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i32> <i32 1, i32 9, i32 3, i32 11, i32 5, i32 13, i32 7, i32 15>
- %tmp5 = add <8 x i16> %tmp3, %tmp4
- ret <8 x i16> %tmp5
+ %tmp1 = load <8 x i16>, <8 x i16>* %A
+ %tmp2 = load <8 x i16>, <8 x i16>* %B
+ %tmp3 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i32> <i32 0, i32 8, i32 2, i32 10, i32 4, i32 12, i32 6, i32 14>
+ %tmp4 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i32> <i32 1, i32 9, i32 3, i32 11, i32 5, i32 13, i32 7, i32 15>
+ %tmp5 = add <8 x i16> %tmp3, %tmp4
+ ret <8 x i16> %tmp5
}
define <16 x i16> @vtrnQi16_QQres(<8 x i16>* %A, <8 x i16>* %B) nounwind {
@@ -188,10 +188,10 @@ define <16 x i16> @vtrnQi16_QQres(<8 x i16>* %A, <8 x i16>* %B) nounwind {
; CHECK-NEXT: vst1.16 {d18, d19}, [r0:128]!
; CHECK-NEXT: vst1.64 {d16, d17}, [r0:128]
; CHECK-NEXT: mov pc, lr
- %tmp1 = load <8 x i16>, <8 x i16>* %A
- %tmp2 = load <8 x i16>, <8 x i16>* %B
- %tmp3 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <16 x i32> <i32 0, i32 8, i32 2, i32 10, i32 4, i32 12, i32 6, i32 14, i32 1, i32 9, i32 3, i32 11, i32 5, i32 13, i32 7, i32 15>
- ret <16 x i16> %tmp3
+ %tmp1 = load <8 x i16>, <8 x i16>* %A
+ %tmp2 = load <8 x i16>, <8 x i16>* %B
+ %tmp3 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <16 x i32> <i32 0, i32 8, i32 2, i32 10, i32 4, i32 12, i32 6, i32 14, i32 1, i32 9, i32 3, i32 11, i32 5, i32 13, i32 7, i32 15>
+ ret <16 x i16> %tmp3
}
define <4 x i32> @vtrnQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
@@ -204,12 +204,12 @@ define <4 x i32> @vtrnQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
; CHECK-NEXT: vmov r0, r1, d16
; CHECK-NEXT: vmov r2, r3, d17
; CHECK-NEXT: mov pc, lr
- %tmp1 = load <4 x i32>, <4 x i32>* %A
- %tmp2 = load <4 x i32>, <4 x i32>* %B
- %tmp3 = shufflevector <4 x i32> %tmp1, <4 x i32> %tmp2, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
- %tmp4 = shufflevector <4 x i32> %tmp1, <4 x i32> %tmp2, <4 x i32> <i32 1, i32 5, i32 3, i32 7>
- %tmp5 = add <4 x i32> %tmp3, %tmp4
- ret <4 x i32> %tmp5
+ %tmp1 = load <4 x i32>, <4 x i32>* %A
+ %tmp2 = load <4 x i32>, <4 x i32>* %B
+ %tmp3 = shufflevector <4 x i32> %tmp1, <4 x i32> %tmp2, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
+ %tmp4 = shufflevector <4 x i32> %tmp1, <4 x i32> %tmp2, <4 x i32> <i32 1, i32 5, i32 3, i32 7>
+ %tmp5 = add <4 x i32> %tmp3, %tmp4
+ ret <4 x i32> %tmp5
}
define <8 x i32> @vtrnQi32_QQres(<4 x i32>* %A, <4 x i32>* %B) nounwind {
@@ -221,10 +221,10 @@ define <8 x i32> @vtrnQi32_QQres(<4 x i32>* %A, <4 x i32>* %B) nounwind {
; CHECK-NEXT: vst1.32 {d18, d19}, [r0:128]!
; CHECK-NEXT: vst1.64 {d16, d17}, [r0:128]
; CHECK-NEXT: mov pc, lr
- %tmp1 = load <4 x i32>, <4 x i32>* %A
- %tmp2 = load <4 x i32>, <4 x i32>* %B
- %tmp3 = shufflevector <4 x i32> %tmp1, <4 x i32> %tmp2, <8 x i32> <i32 0, i32 4, i32 2, i32 6, i32 1, i32 5, i32 3, i32 7>
- ret <8 x i32> %tmp3
+ %tmp1 = load <4 x i32>, <4 x i32>* %A
+ %tmp2 = load <4 x i32>, <4 x i32>* %B
+ %tmp3 = shufflevector <4 x i32> %tmp1, <4 x i32> %tmp2, <8 x i32> <i32 0, i32 4, i32 2, i32 6, i32 1, i32 5, i32 3, i32 7>
+ ret <8 x i32> %tmp3
}
define <4 x float> @vtrnQf(<4 x float>* %A, <4 x float>* %B) nounwind {
@@ -237,12 +237,12 @@ define <4 x float> @vtrnQf(<4 x float>* %A, <4 x float>* %B) nounwind {
; CHECK-NEXT: vmov r0, r1, d16
; CHECK-NEXT: vmov r2, r3, d17
; CHECK-NEXT: mov pc, lr
- %tmp1 = load <4 x float>, <4 x float>* %A
- %tmp2 = load <4 x float>, <4 x float>* %B
- %tmp3 = shufflevector <4 x float> %tmp1, <4 x float> %tmp2, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
- %tmp4 = shufflevector <4 x float> %tmp1, <4 x float> %tmp2, <4 x i32> <i32 1, i32 5, i32 3, i32 7>
- %tmp5 = fadd <4 x float> %tmp3, %tmp4
- ret <4 x float> %tmp5
+ %tmp1 = load <4 x float>, <4 x float>* %A
+ %tmp2 = load <4 x float>, <4 x float>* %B
+ %tmp3 = shufflevector <4 x float> %tmp1, <4 x float> %tmp2, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
+ %tmp4 = shufflevector <4 x float> %tmp1, <4 x float> %tmp2, <4 x i32> <i32 1, i32 5, i32 3, i32 7>
+ %tmp5 = fadd <4 x float> %tmp3, %tmp4
+ ret <4 x float> %tmp5
}
define <8 x float> @vtrnQf_QQres(<4 x float>* %A, <4 x float>* %B) nounwind {
@@ -254,10 +254,10 @@ define <8 x float> @vtrnQf_QQres(<4 x float>* %A, <4 x float>* %B) nounwind {
; CHECK-NEXT: vst1.32 {d18, d19}, [r0:128]!
; CHECK-NEXT: vst1.64 {d16, d17}, [r0:128]
; CHECK-NEXT: mov pc, lr
- %tmp1 = load <4 x float>, <4 x float>* %A
- %tmp2 = load <4 x float>, <4 x float>* %B
- %tmp3 = shufflevector <4 x float> %tmp1, <4 x float> %tmp2, <8 x i32> <i32 0, i32 4, i32 2, i32 6, i32 1, i32 5, i32 3, i32 7>
- ret <8 x float> %tmp3
+ %tmp1 = load <4 x float>, <4 x float>* %A
+ %tmp2 = load <4 x float>, <4 x float>* %B
+ %tmp3 = shufflevector <4 x float> %tmp1, <4 x float> %tmp2, <8 x i32> <i32 0, i32 4, i32 2, i32 6, i32 1, i32 5, i32 3, i32 7>
+ ret <8 x float> %tmp3
}
@@ -270,12 +270,12 @@ define <8 x i8> @vtrni8_undef(<8 x i8>* %A, <8 x i8>* %B) nounwind {
; CHECK-NEXT: vadd.i8 d16, d17, d16
; CHECK-NEXT: vmov r0, r1, d16
; CHECK-NEXT: mov pc, lr
- %tmp1 = load <8 x i8>, <8 x i8>* %A
- %tmp2 = load <8 x i8>, <8 x i8>* %B
- %tmp3 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 0, i32 undef, i32 2, i32 10, i32 undef, i32 12, i32 6, i32 14>
- %tmp4 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 1, i32 9, i32 3, i32 11, i32 5, i32 undef, i32 undef, i32 15>
- %tmp5 = add <8 x i8> %tmp3, %tmp4
- ret <8 x i8> %tmp5
+ %tmp1 = load <8 x i8>, <8 x i8>* %A
+ %tmp2 = load <8 x i8>, <8 x i8>* %B
+ %tmp3 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 0, i32 undef, i32 2, i32 10, i32 undef, i32 12, i32 6, i32 14>
+ %tmp4 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 1, i32 9, i32 3, i32 11, i32 5, i32 undef, i32 undef, i32 15>
+ %tmp5 = add <8 x i8> %tmp3, %tmp4
+ ret <8 x i8> %tmp5
}
define <16 x i8> @vtrni8_undef_Qres(<8 x i8>* %A, <8 x i8>* %B) nounwind {
@@ -287,10 +287,10 @@ define <16 x i8> @vtrni8_undef_Qres(<8 x i8>* %A, <8 x i8>* %B) nounwind {
; CHECK-NEXT: vmov r0, r1, [[LDR0]]
; CHECK-NEXT: vmov r2, r3, [[LDR1]]
; CHECK-NEXT: mov pc, lr
- %tmp1 = load <8 x i8>, <8 x i8>* %A
- %tmp2 = load <8 x i8>, <8 x i8>* %B
- %tmp3 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <16 x i32> <i32 0, i32 undef, i32 2, i32 10, i32 undef, i32 12, i32 6, i32 14, i32 1, i32 9, i32 3, i32 11, i32 5, i32 undef, i32 undef, i32 15>
- ret <16 x i8> %tmp3
+ %tmp1 = load <8 x i8>, <8 x i8>* %A
+ %tmp2 = load <8 x i8>, <8 x i8>* %B
+ %tmp3 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <16 x i32> <i32 0, i32 undef, i32 2, i32 10, i32 undef, i32 12, i32 6, i32 14, i32 1, i32 9, i32 3, i32 11, i32 5, i32 undef, i32 undef, i32 15>
+ ret <16 x i8> %tmp3
}
define <8 x i16> @vtrnQi16_undef(<8 x i16>* %A, <8 x i16>* %B) nounwind {
@@ -303,12 +303,12 @@ define <8 x i16> @vtrnQi16_undef(<8 x i16>* %A, <8 x i16>* %B) nounwind {
; CHECK-NEXT: vmov r0, r1, d16
; CHECK-NEXT: vmov r2, r3, d17
; CHECK-NEXT: mov pc, lr
- %tmp1 = load <8 x i16>, <8 x i16>* %A
- %tmp2 = load <8 x i16>, <8 x i16>* %B
- %tmp3 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i32> <i32 0, i32 8, i32 undef, i32 undef, i32 4, i32 12, i32 6, i32 14>
- %tmp4 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i32> <i32 1, i32 undef, i32 3, i32 11, i32 5, i32 13, i32 undef, i32 undef>
- %tmp5 = add <8 x i16> %tmp3, %tmp4
- ret <8 x i16> %tmp5
+ %tmp1 = load <8 x i16>, <8 x i16>* %A
+ %tmp2 = load <8 x i16>, <8 x i16>* %B
+ %tmp3 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i32> <i32 0, i32 8, i32 undef, i32 undef, i32 4, i32 12, i32 6, i32 14>
+ %tmp4 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i32> <i32 1, i32 undef, i32 3, i32 11, i32 5, i32 13, i32 undef, i32 undef>
+ %tmp5 = add <8 x i16> %tmp3, %tmp4
+ ret <8 x i16> %tmp5
}
define <16 x i16> @vtrnQi16_undef_QQres(<8 x i16>* %A, <8 x i16>* %B) nounwind {
@@ -320,10 +320,10 @@ define <16 x i16> @vtrnQi16_undef_QQres(<8 x i16>* %A, <8 x i16>* %B) nounwind {
; CHECK-NEXT: vst1.16 {d18, d19}, [r0:128]!
; CHECK-NEXT: vst1.64 {d16, d17}, [r0:128]
; CHECK-NEXT: mov pc, lr
- %tmp1 = load <8 x i16>, <8 x i16>* %A
- %tmp2 = load <8 x i16>, <8 x i16>* %B
- %tmp3 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <16 x i32> <i32 0, i32 8, i32 undef, i32 undef, i32 4, i32 12, i32 6, i32 14, i32 1, i32 undef, i32 3, i32 11, i32 5, i32 13, i32 undef, i32 undef>
- ret <16 x i16> %tmp3
+ %tmp1 = load <8 x i16>, <8 x i16>* %A
+ %tmp2 = load <8 x i16>, <8 x i16>* %B
+ %tmp3 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <16 x i32> <i32 0, i32 8, i32 undef, i32 undef, i32 4, i32 12, i32 6, i32 14, i32 1, i32 undef, i32 3, i32 11, i32 5, i32 13, i32 undef, i32 undef>
+ ret <16 x i16> %tmp3
}
define <8 x i16> @vtrn_lower_shufflemask_undef(<4 x i16>* %A, <4 x i16>* %B) {
diff --git a/test/CodeGen/ARM/vuzp.ll b/test/CodeGen/ARM/vuzp.ll
index 96cafdec7bf1..281fe2537a47 100644
--- a/test/CodeGen/ARM/vuzp.ll
+++ b/test/CodeGen/ARM/vuzp.ll
@@ -324,23 +324,23 @@ define <8 x i8> @cmpsel_trunc(<8 x i8> %in0, <8 x i8> %in1, <8 x i32> %cmp0, <8
; truncate from i32 to i16 and one vmovn.i16 to perform the final truncation for i8.
; CHECK-LABEL: cmpsel_trunc:
; CHECK: @ %bb.0:
-; CHECK-NEXT: add r12, sp, #16
-; CHECK-NEXT: vld1.64 {d16, d17}, [r12]
-; CHECK-NEXT: mov r12, sp
-; CHECK-NEXT: vld1.64 {d18, d19}, [r12]
-; CHECK-NEXT: add r12, sp, #48
-; CHECK-NEXT: vld1.64 {d20, d21}, [r12]
-; CHECK-NEXT: add r12, sp, #32
-; CHECK-NEXT: vcgt.u32 q8, q10, q8
-; CHECK-NEXT: vld1.64 {d20, d21}, [r12]
-; CHECK-NEXT: vcgt.u32 q9, q10, q9
-; CHECK-NEXT: vmov d20, r2, r3
-; CHECK-NEXT: vmovn.i32 d17, q8
-; CHECK-NEXT: vmovn.i32 d16, q9
-; CHECK-NEXT: vmov d18, r0, r1
-; CHECK-NEXT: vmovn.i16 d16, q8
-; CHECK-NEXT: vbsl d16, d18, d20
-; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: add r12, sp, #16
+; CHECK-NEXT: vld1.64 {d16, d17}, [r12]
+; CHECK-NEXT: mov r12, sp
+; CHECK-NEXT: vld1.64 {d18, d19}, [r12]
+; CHECK-NEXT: add r12, sp, #48
+; CHECK-NEXT: vld1.64 {d20, d21}, [r12]
+; CHECK-NEXT: add r12, sp, #32
+; CHECK-NEXT: vcgt.u32 q8, q10, q8
+; CHECK-NEXT: vld1.64 {d20, d21}, [r12]
+; CHECK-NEXT: vcgt.u32 q9, q10, q9
+; CHECK-NEXT: vmov d20, r2, r3
+; CHECK-NEXT: vmovn.i32 d17, q8
+; CHECK-NEXT: vmovn.i32 d16, q9
+; CHECK-NEXT: vmov d18, r0, r1
+; CHECK-NEXT: vmovn.i16 d16, q8
+; CHECK-NEXT: vbsl d16, d18, d20
+; CHECK-NEXT: vmov r0, r1, d16
; CHECK-NEXT: mov pc, lr
%c = icmp ult <8 x i32> %cmp0, %cmp1
%res = select <8 x i1> %c, <8 x i8> %in0, <8 x i8> %in1
@@ -353,28 +353,28 @@ define <8 x i8> @cmpsel_trunc(<8 x i8> %in0, <8 x i8> %in1, <8 x i32> %cmp0, <8
define <8 x i8> @vuzp_trunc_and_shuffle(<8 x i8> %tr0, <8 x i8> %tr1,
; CHECK-LABEL: vuzp_trunc_and_shuffle:
; CHECK: @ %bb.0:
-; CHECK-NEXT: .save {r11, lr}
-; CHECK-NEXT: push {r11, lr}
-; CHECK-NEXT: add r12, sp, #8
-; CHECK-NEXT: add lr, sp, #24
-; CHECK-NEXT: vld1.64 {d16, d17}, [r12]
-; CHECK-NEXT: ldr r12, [sp, #40]
-; CHECK-NEXT: vld1.64 {d18, d19}, [lr]
-; CHECK-NEXT: vcgt.u32 q8, q9, q8
-; CHECK-NEXT: vld1.32 {d18[0]}, [r12:32]
-; CHECK-NEXT: vmov.i8 d19, #0x7
-; CHECK-NEXT: vmovl.u8 q10, d18
-; CHECK-NEXT: vmovn.i32 d16, q8
-; CHECK-NEXT: vneg.s8 d17, d19
-; CHECK-NEXT: vmov d18, r2, r3
-; CHECK-NEXT: vuzp.8 d16, d20
-; CHECK-NEXT: vshl.i8 d16, d16, #7
-; CHECK-NEXT: vshl.s8 d16, d16, d17
-; CHECK-NEXT: vmov d17, r0, r1
-; CHECK-NEXT: vbsl d16, d17, d18
-; CHECK-NEXT: vmov r0, r1, d16
-; CHECK-NEXT: pop {r11, lr}
-; CHECK-NEXT: mov pc, lr
+; CHECK-NEXT: .save {r11, lr}
+; CHECK-NEXT: push {r11, lr}
+; CHECK-NEXT: add r12, sp, #8
+; CHECK-NEXT: add lr, sp, #24
+; CHECK-NEXT: vld1.64 {d16, d17}, [r12]
+; CHECK-NEXT: ldr r12, [sp, #40]
+; CHECK-NEXT: vld1.64 {d18, d19}, [lr]
+; CHECK-NEXT: vcgt.u32 q8, q9, q8
+; CHECK-NEXT: vld1.32 {d18[0]}, [r12:32]
+; CHECK-NEXT: vmov.i8 d19, #0x7
+; CHECK-NEXT: vmovl.u8 q10, d18
+; CHECK-NEXT: vmovn.i32 d16, q8
+; CHECK-NEXT: vneg.s8 d17, d19
+; CHECK-NEXT: vmov d18, r2, r3
+; CHECK-NEXT: vuzp.8 d16, d20
+; CHECK-NEXT: vshl.i8 d16, d16, #7
+; CHECK-NEXT: vshl.s8 d16, d16, d17
+; CHECK-NEXT: vmov d17, r0, r1
+; CHECK-NEXT: vbsl d16, d17, d18
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: pop {r11, lr}
+; CHECK-NEXT: mov pc, lr
<4 x i32> %cmp0, <4 x i32> %cmp1, <4 x i8> *%cmp2_ptr) {
%cmp2_load = load <4 x i8>, <4 x i8> * %cmp2_ptr, align 4
%cmp2 = trunc <4 x i8> %cmp2_load to <4 x i1>
@@ -389,22 +389,22 @@ define <8 x i8> @vuzp_trunc_and_shuffle(<8 x i8> %tr0, <8 x i8> %tr1,
define <8 x i8> @vuzp_trunc_and_shuffle_undef_right(<8 x i8> %tr0, <8 x i8> %tr1,
; CHECK-LABEL: vuzp_trunc_and_shuffle_undef_right:
; CHECK: @ %bb.0:
-; CHECK-NEXT: mov r12, sp
-; CHECK-NEXT: vld1.64 {d16, d17}, [r12]
-; CHECK-NEXT: add r12, sp, #16
-; CHECK-NEXT: vld1.64 {d18, d19}, [r12]
-; CHECK-NEXT: vcgt.u32 q8, q9, q8
-; CHECK-NEXT: vmov.i8 d18, #0x7
-; CHECK-NEXT: vmovn.i32 d16, q8
-; CHECK-NEXT: vuzp.8 d16, d17
-; CHECK-NEXT: vneg.s8 d17, d18
-; CHECK-NEXT: vshl.i8 d16, d16, #7
-; CHECK-NEXT: vmov d18, r2, r3
-; CHECK-NEXT: vshl.s8 d16, d16, d17
-; CHECK-NEXT: vmov d17, r0, r1
-; CHECK-NEXT: vbsl d16, d17, d18
-; CHECK-NEXT: vmov r0, r1, d16
-; CHECK-NEXT: mov pc, lr
+; CHECK-NEXT: mov r12, sp
+; CHECK-NEXT: vld1.64 {d16, d17}, [r12]
+; CHECK-NEXT: add r12, sp, #16
+; CHECK-NEXT: vld1.64 {d18, d19}, [r12]
+; CHECK-NEXT: vcgt.u32 q8, q9, q8
+; CHECK-NEXT: vmov.i8 d18, #0x7
+; CHECK-NEXT: vmovn.i32 d16, q8
+; CHECK-NEXT: vuzp.8 d16, d17
+; CHECK-NEXT: vneg.s8 d17, d18
+; CHECK-NEXT: vshl.i8 d16, d16, #7
+; CHECK-NEXT: vmov d18, r2, r3
+; CHECK-NEXT: vshl.s8 d16, d16, d17
+; CHECK-NEXT: vmov d17, r0, r1
+; CHECK-NEXT: vbsl d16, d17, d18
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: mov pc, lr
<4 x i32> %cmp0, <4 x i32> %cmp1, <4 x i8> *%cmp2_ptr) {
%cmp2_load = load <4 x i8>, <4 x i8> * %cmp2_ptr, align 4
%cmp2 = trunc <4 x i8> %cmp2_load to <4 x i1>
@@ -417,23 +417,23 @@ define <8 x i8> @vuzp_trunc_and_shuffle_undef_right(<8 x i8> %tr0, <8 x i8> %tr1
define <8 x i8> @vuzp_trunc_and_shuffle_undef_left(<8 x i8> %tr0, <8 x i8> %tr1,
; CHECK-LABEL: vuzp_trunc_and_shuffle_undef_left:
; CHECK: @ %bb.0:
-; CHECK-NEXT: mov r12, sp
-; CHECK-NEXT: vld1.64 {d16, d17}, [r12]
-; CHECK-NEXT: add r12, sp, #16
-; CHECK-NEXT: vld1.64 {d18, d19}, [r12]
-; CHECK-NEXT: vcgt.u32 q8, q9, q8
-; CHECK-NEXT: vldr d18, .LCPI22_0
-; CHECK-NEXT: vmov.i8 d19, #0x7
-; CHECK-NEXT: vmovn.i32 d16, q8
-; CHECK-NEXT: vtbl.8 d16, {d16}, d18
-; CHECK-NEXT: vneg.s8 d17, d19
-; CHECK-NEXT: vmov d18, r2, r3
-; CHECK-NEXT: vshl.i8 d16, d16, #7
-; CHECK-NEXT: vshl.s8 d16, d16, d17
-; CHECK-NEXT: vmov d17, r0, r1
-; CHECK-NEXT: vbsl d16, d17, d18
-; CHECK-NEXT: vmov r0, r1, d16
-; CHECK-NEXT: mov pc, lr
+; CHECK-NEXT: mov r12, sp
+; CHECK-NEXT: vld1.64 {d16, d17}, [r12]
+; CHECK-NEXT: add r12, sp, #16
+; CHECK-NEXT: vld1.64 {d18, d19}, [r12]
+; CHECK-NEXT: vcgt.u32 q8, q9, q8
+; CHECK-NEXT: vldr d18, .LCPI22_0
+; CHECK-NEXT: vmov.i8 d19, #0x7
+; CHECK-NEXT: vmovn.i32 d16, q8
+; CHECK-NEXT: vtbl.8 d16, {d16}, d18
+; CHECK-NEXT: vneg.s8 d17, d19
+; CHECK-NEXT: vmov d18, r2, r3
+; CHECK-NEXT: vshl.i8 d16, d16, #7
+; CHECK-NEXT: vshl.s8 d16, d16, d17
+; CHECK-NEXT: vmov d17, r0, r1
+; CHECK-NEXT: vbsl d16, d17, d18
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: mov pc, lr
; CHECK-NEXT: .p2align 3
; CHECK-NEXT: @ %bb.1:
; CHECK-NEXT: .LCPI22_0:
@@ -459,55 +459,55 @@ define <8 x i8> @vuzp_trunc_and_shuffle_undef_left(<8 x i8> %tr0, <8 x i8> %tr1,
define <10 x i8> @vuzp_wide_type(<10 x i8> %tr0, <10 x i8> %tr1,
; CHECK-LABEL: vuzp_wide_type:
; CHECK: @ %bb.0:
-; CHECK-NEXT: .save {r4, lr}
-; CHECK-NEXT: push {r4, lr}
-; CHECK-NEXT: add r12, sp, #32
-; CHECK-NEXT: add lr, sp, #48
-; CHECK-NEXT: vld1.32 {d17[0]}, [r12:32]
-; CHECK-NEXT: add r12, sp, #24
-; CHECK-NEXT: vld1.32 {d16[0]}, [r12:32]
-; CHECK-NEXT: add r12, sp, #56
-; CHECK-NEXT: vld1.32 {d19[0]}, [r12:32]
-; CHECK-NEXT: ldr r12, [sp, #68]
-; CHECK-NEXT: vld1.32 {d18[0]}, [lr:32]
-; CHECK-NEXT: add lr, sp, #40
-; CHECK-NEXT: vld1.32 {d20[0]}, [lr:32]
-; CHECK-NEXT: ldr r4, [r12]
-; CHECK-NEXT: vmov.32 d23[0], r4
-; CHECK-NEXT: add r4, sp, #64
-; CHECK-NEXT: vld1.32 {d24[0]}, [r4:32]
-; CHECK-NEXT: add r4, sp, #36
-; CHECK-NEXT: vld1.32 {d17[1]}, [r4:32]
-; CHECK-NEXT: add r4, sp, #28
-; CHECK-NEXT: vcgt.u32 q10, q12, q10
-; CHECK-NEXT: vmov.u8 lr, d23[3]
-; CHECK-NEXT: vld1.32 {d16[1]}, [r4:32]
-; CHECK-NEXT: add r4, sp, #60
-; CHECK-NEXT: vld1.32 {d19[1]}, [r4:32]
-; CHECK-NEXT: add r4, sp, #52
-; CHECK-NEXT: vld1.32 {d18[1]}, [r4:32]
-; CHECK-NEXT: add r4, r12, #4
-; CHECK-NEXT: vcgt.u32 q8, q9, q8
-; CHECK-NEXT: vmovn.i32 d19, q10
-; CHECK-NEXT: vldr d20, .LCPI23_0
-; CHECK-NEXT: vmovn.i32 d18, q8
-; CHECK-NEXT: vmovn.i16 d22, q9
-; CHECK-NEXT: vmov.i8 q9, #0x7
-; CHECK-NEXT: vmov.8 d17[0], lr
-; CHECK-NEXT: vneg.s8 q9, q9
-; CHECK-NEXT: vtbl.8 d16, {d22, d23}, d20
-; CHECK-NEXT: vld1.8 {d17[1]}, [r4]
-; CHECK-NEXT: add r4, sp, #8
-; CHECK-NEXT: vshl.i8 q8, q8, #7
-; CHECK-NEXT: vld1.64 {d20, d21}, [r4]
-; CHECK-NEXT: vshl.s8 q8, q8, q9
-; CHECK-NEXT: vmov d19, r2, r3
-; CHECK-NEXT: vmov d18, r0, r1
-; CHECK-NEXT: vbsl q8, q9, q10
-; CHECK-NEXT: vmov r0, r1, d16
-; CHECK-NEXT: vmov r2, r3, d17
-; CHECK-NEXT: pop {r4, lr}
-; CHECK-NEXT: mov pc, lr
+; CHECK-NEXT: .save {r4, lr}
+; CHECK-NEXT: push {r4, lr}
+; CHECK-NEXT: add r12, sp, #32
+; CHECK-NEXT: add lr, sp, #48
+; CHECK-NEXT: vld1.32 {d17[0]}, [r12:32]
+; CHECK-NEXT: add r12, sp, #24
+; CHECK-NEXT: vld1.32 {d16[0]}, [r12:32]
+; CHECK-NEXT: add r12, sp, #56
+; CHECK-NEXT: vld1.32 {d19[0]}, [r12:32]
+; CHECK-NEXT: vld1.32 {d18[0]}, [lr:32]
+; CHECK-NEXT: add lr, sp, #40
+; CHECK-NEXT: vld1.32 {d20[0]}, [lr:32]
+; CHECK-NEXT: ldr r12, [sp, #68]
+; CHECK-NEXT: ldr r4, [r12]
+; CHECK-NEXT: vmov.32 d23[0], r4
+; CHECK-NEXT: add r4, sp, #64
+; CHECK-NEXT: vld1.32 {d24[0]}, [r4:32]
+; CHECK-NEXT: add r4, sp, #36
+; CHECK-NEXT: vcgt.u32 q10, q12, q10
+; CHECK-NEXT: vld1.32 {d17[1]}, [r4:32]
+; CHECK-NEXT: add r4, sp, #28
+; CHECK-NEXT: vld1.32 {d16[1]}, [r4:32]
+; CHECK-NEXT: add r4, sp, #60
+; CHECK-NEXT: vld1.32 {d19[1]}, [r4:32]
+; CHECK-NEXT: add r4, sp, #52
+; CHECK-NEXT: vld1.32 {d18[1]}, [r4:32]
+; CHECK-NEXT: add r4, r12, #4
+; CHECK-NEXT: vcgt.u32 q8, q9, q8
+; CHECK-NEXT: vmovn.i32 d19, q10
+; CHECK-NEXT: vmov.u8 lr, d23[3]
+; CHECK-NEXT: vldr d20, .LCPI23_0
+; CHECK-NEXT: vmovn.i32 d18, q8
+; CHECK-NEXT: vmovn.i16 d22, q9
+; CHECK-NEXT: vmov.i8 q9, #0x7
+; CHECK-NEXT: vneg.s8 q9, q9
+; CHECK-NEXT: vmov.8 d17[0], lr
+; CHECK-NEXT: vtbl.8 d16, {d22, d23}, d20
+; CHECK-NEXT: vld1.8 {d17[1]}, [r4]
+; CHECK-NEXT: add r4, sp, #8
+; CHECK-NEXT: vshl.i8 q8, q8, #7
+; CHECK-NEXT: vld1.64 {d20, d21}, [r4]
+; CHECK-NEXT: vshl.s8 q8, q8, q9
+; CHECK-NEXT: vmov d19, r2, r3
+; CHECK-NEXT: vmov d18, r0, r1
+; CHECK-NEXT: vbsl q8, q9, q10
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: vmov r2, r3, d17
+; CHECK-NEXT: pop {r4, lr}
+; CHECK-NEXT: mov pc, lr
; CHECK-NEXT: .p2align 3
; CHECK-NEXT: @ %bb.1:
; CHECK-NEXT: .LCPI23_0:
diff --git a/test/CodeGen/ARM/wide-compares.ll b/test/CodeGen/ARM/wide-compares.ll
index 9b22f5fedfeb..6584f0c7616c 100644
--- a/test/CodeGen/ARM/wide-compares.ll
+++ b/test/CodeGen/ARM/wide-compares.ll
@@ -1,7 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=armv7-unknown-linux < %s | FileCheck --check-prefix=CHECK-ARM %s
-; RUN: llc -mtriple=thumbv6-unknown-linux < %s | FileCheck --check-prefix=CHECK-THUMB1 %s
-; RUN: llc -mtriple=thumbv7-unknown-linux < %s | FileCheck --check-prefix=CHECK-THUMB2 %s
+; RUN: llc -mtriple=armv7-unknown-linux < %s -verify-machineinstrs | FileCheck --check-prefix=CHECK-ARM %s
+; RUN: llc -mtriple=thumb-eabi < %s -verify-machineinstrs | FileCheck --check-prefix=CHECK-THUMB1-NOMOV %s
+; RUN: llc -mtriple=thumbv6-unknown-linux < %s -verify-machineinstrs | FileCheck --check-prefix=CHECK-THUMB1 %s
+; RUN: llc -mtriple=thumbv7-unknown-linux < %s -verify-machineinstrs | FileCheck --check-prefix=CHECK-THUMB2 %s
define i32 @test_slt1(i64 %a, i64 %b) {
; CHECK-ARM-LABEL: test_slt1:
@@ -13,6 +14,18 @@ define i32 @test_slt1(i64 %a, i64 %b) {
; CHECK-ARM-NEXT: mov r0, r12
; CHECK-ARM-NEXT: bx lr
;
+; CHECK-THUMB1-NOMOV-LABEL: test_slt1:
+; CHECK-THUMB1-NOMOV: @ %bb.0: @ %entry
+; CHECK-THUMB1-NOMOV-NEXT: subs r0, r0, r2
+; CHECK-THUMB1-NOMOV-NEXT: sbcs r1, r3
+; CHECK-THUMB1-NOMOV-NEXT: bge .LBB0_2
+; CHECK-THUMB1-NOMOV-NEXT: @ %bb.1: @ %bb1
+; CHECK-THUMB1-NOMOV-NEXT: movs r0, #1
+; CHECK-THUMB1-NOMOV-NEXT: bx lr
+; CHECK-THUMB1-NOMOV-NEXT: .LBB0_2: @ %bb2
+; CHECK-THUMB1-NOMOV-NEXT: movs r0, #2
+; CHECK-THUMB1-NOMOV-NEXT: bx lr
+;
; CHECK-THUMB1-LABEL: test_slt1:
; CHECK-THUMB1: @ %bb.0: @ %entry
; CHECK-THUMB1-NEXT: subs r0, r0, r2
@@ -57,6 +70,23 @@ define void @test_slt2(i64 %a, i64 %b) {
; CHECK-ARM-NEXT: bl g
; CHECK-ARM-NEXT: pop {r11, pc}
;
+; CHECK-THUMB1-NOMOV-LABEL: test_slt2:
+; CHECK-THUMB1-NOMOV: @ %bb.0: @ %entry
+; CHECK-THUMB1-NOMOV-NEXT: .save {r7, lr}
+; CHECK-THUMB1-NOMOV-NEXT: push {r7, lr}
+; CHECK-THUMB1-NOMOV-NEXT: subs r0, r0, r2
+; CHECK-THUMB1-NOMOV-NEXT: sbcs r1, r3
+; CHECK-THUMB1-NOMOV-NEXT: bge .LBB1_2
+; CHECK-THUMB1-NOMOV-NEXT: @ %bb.1: @ %bb1
+; CHECK-THUMB1-NOMOV-NEXT: bl f
+; CHECK-THUMB1-NOMOV-NEXT: b .LBB1_3
+; CHECK-THUMB1-NOMOV-NEXT: .LBB1_2: @ %bb2
+; CHECK-THUMB1-NOMOV-NEXT: bl g
+; CHECK-THUMB1-NOMOV-NEXT: .LBB1_3: @ %bb1
+; CHECK-THUMB1-NOMOV-NEXT: pop {r7}
+; CHECK-THUMB1-NOMOV-NEXT: pop {r0}
+; CHECK-THUMB1-NOMOV-NEXT: bx r0
+;
; CHECK-THUMB1-LABEL: test_slt2:
; CHECK-THUMB1: @ %bb.0: @ %entry
; CHECK-THUMB1-NEXT: push {r7, lr}
@@ -95,3 +125,193 @@ bb2:
declare void @f()
declare void @g()
+
+define i64 @test_slt_select(i64 %c, i64 %d, i64 %a, i64 %b) {
+; CHECK-ARM-LABEL: test_slt_select:
+; CHECK-ARM: @ %bb.0: @ %entry
+; CHECK-ARM-NEXT: push {r4, r5, r6, r7, r11, lr}
+; CHECK-ARM-NEXT: ldr r12, [sp, #32]
+; CHECK-ARM-NEXT: mov r6, #0
+; CHECK-ARM-NEXT: ldr lr, [sp, #24]
+; CHECK-ARM-NEXT: ldr r7, [sp, #36]
+; CHECK-ARM-NEXT: ldr r5, [sp, #28]
+; CHECK-ARM-NEXT: subs r4, lr, r12
+; CHECK-ARM-NEXT: sbcs r7, r5, r7
+; CHECK-ARM-NEXT: movwlo r6, #1
+; CHECK-ARM-NEXT: cmp r6, #0
+; CHECK-ARM-NEXT: moveq r0, r2
+; CHECK-ARM-NEXT: moveq r1, r3
+; CHECK-ARM-NEXT: pop {r4, r5, r6, r7, r11, pc}
+;
+; CHECK-THUMB1-NOMOV-LABEL: test_slt_select:
+; CHECK-THUMB1-NOMOV: @ %bb.0: @ %entry
+; CHECK-THUMB1-NOMOV-NEXT: .save {r4, r5, r6, r7, lr}
+; CHECK-THUMB1-NOMOV-NEXT: push {r4, r5, r6, r7, lr}
+; CHECK-THUMB1-NOMOV-NEXT: .pad #4
+; CHECK-THUMB1-NOMOV-NEXT: sub sp, #4
+; CHECK-THUMB1-NOMOV-NEXT: ldr r4, [sp, #36]
+; CHECK-THUMB1-NOMOV-NEXT: ldr r5, [sp, #28]
+; CHECK-THUMB1-NOMOV-NEXT: ldr r6, [sp, #32]
+; CHECK-THUMB1-NOMOV-NEXT: ldr r7, [sp, #24]
+; CHECK-THUMB1-NOMOV-NEXT: subs r6, r7, r6
+; CHECK-THUMB1-NOMOV-NEXT: sbcs r5, r4
+; CHECK-THUMB1-NOMOV-NEXT: blo .LBB2_2
+; CHECK-THUMB1-NOMOV-NEXT: @ %bb.1: @ %entry
+; CHECK-THUMB1-NOMOV-NEXT: movs r4, #0
+; CHECK-THUMB1-NOMOV-NEXT: cmp r4, #0
+; CHECK-THUMB1-NOMOV-NEXT: beq .LBB2_3
+; CHECK-THUMB1-NOMOV-NEXT: b .LBB2_4
+; CHECK-THUMB1-NOMOV-NEXT: .LBB2_2:
+; CHECK-THUMB1-NOMOV-NEXT: movs r4, #1
+; CHECK-THUMB1-NOMOV-NEXT: cmp r4, #0
+; CHECK-THUMB1-NOMOV-NEXT: bne .LBB2_4
+; CHECK-THUMB1-NOMOV-NEXT: .LBB2_3: @ %entry
+; CHECK-THUMB1-NOMOV-NEXT: movs r0, r2
+; CHECK-THUMB1-NOMOV-NEXT: .LBB2_4: @ %entry
+; CHECK-THUMB1-NOMOV-NEXT: cmp r4, #0
+; CHECK-THUMB1-NOMOV-NEXT: bne .LBB2_6
+; CHECK-THUMB1-NOMOV-NEXT: @ %bb.5: @ %entry
+; CHECK-THUMB1-NOMOV-NEXT: movs r1, r3
+; CHECK-THUMB1-NOMOV-NEXT: .LBB2_6: @ %entry
+; CHECK-THUMB1-NOMOV-NEXT: add sp, #4
+; CHECK-THUMB1-NOMOV-NEXT: pop {r4, r5, r6, r7}
+; CHECK-THUMB1-NOMOV-NEXT: pop {r2}
+; CHECK-THUMB1-NOMOV-NEXT: bx r2
+;
+; CHECK-THUMB1-LABEL: test_slt_select:
+; CHECK-THUMB1: @ %bb.0: @ %entry
+; CHECK-THUMB1-NEXT: push {r4, r5, r6, r7, lr}
+; CHECK-THUMB1-NEXT: sub sp, #4
+; CHECK-THUMB1-NEXT: ldr r4, [sp, #36]
+; CHECK-THUMB1-NEXT: ldr r5, [sp, #28]
+; CHECK-THUMB1-NEXT: ldr r6, [sp, #32]
+; CHECK-THUMB1-NEXT: ldr r7, [sp, #24]
+; CHECK-THUMB1-NEXT: subs r6, r7, r6
+; CHECK-THUMB1-NEXT: sbcs r5, r4
+; CHECK-THUMB1-NEXT: blo .LBB2_2
+; CHECK-THUMB1-NEXT: @ %bb.1: @ %entry
+; CHECK-THUMB1-NEXT: movs r4, #0
+; CHECK-THUMB1-NEXT: cmp r4, #0
+; CHECK-THUMB1-NEXT: beq .LBB2_3
+; CHECK-THUMB1-NEXT: b .LBB2_4
+; CHECK-THUMB1-NEXT: .LBB2_2:
+; CHECK-THUMB1-NEXT: movs r4, #1
+; CHECK-THUMB1-NEXT: cmp r4, #0
+; CHECK-THUMB1-NEXT: bne .LBB2_4
+; CHECK-THUMB1-NEXT: .LBB2_3: @ %entry
+; CHECK-THUMB1-NEXT: mov r0, r2
+; CHECK-THUMB1-NEXT: .LBB2_4: @ %entry
+; CHECK-THUMB1-NEXT: cmp r4, #0
+; CHECK-THUMB1-NEXT: beq .LBB2_6
+; CHECK-THUMB1-NEXT: @ %bb.5: @ %entry
+; CHECK-THUMB1-NEXT: add sp, #4
+; CHECK-THUMB1-NEXT: pop {r4, r5, r6, r7, pc}
+; CHECK-THUMB1-NEXT: .LBB2_6: @ %entry
+; CHECK-THUMB1-NEXT: mov r1, r3
+; CHECK-THUMB1-NEXT: add sp, #4
+; CHECK-THUMB1-NEXT: pop {r4, r5, r6, r7, pc}
+;
+; CHECK-THUMB2-LABEL: test_slt_select:
+; CHECK-THUMB2: @ %bb.0: @ %entry
+; CHECK-THUMB2-NEXT: push {r4, r5, r6, r7, lr}
+; CHECK-THUMB2-NEXT: sub sp, #4
+; CHECK-THUMB2-NEXT: ldrd r12, r7, [sp, #32]
+; CHECK-THUMB2-NEXT: movs r6, #0
+; CHECK-THUMB2-NEXT: ldrd lr, r5, [sp, #24]
+; CHECK-THUMB2-NEXT: subs.w r4, lr, r12
+; CHECK-THUMB2-NEXT: sbcs.w r7, r5, r7
+; CHECK-THUMB2-NEXT: it lo
+; CHECK-THUMB2-NEXT: movlo r6, #1
+; CHECK-THUMB2-NEXT: cmp r6, #0
+; CHECK-THUMB2-NEXT: itt eq
+; CHECK-THUMB2-NEXT: moveq r0, r2
+; CHECK-THUMB2-NEXT: moveq r1, r3
+; CHECK-THUMB2-NEXT: add sp, #4
+; CHECK-THUMB2-NEXT: pop {r4, r5, r6, r7, pc}
+entry:
+ %cmp = icmp ult i64 %a, %b
+ %r1 = select i1 %cmp, i64 %c, i64 %d
+ ret i64 %r1
+}
+
+define {i32, i32} @test_slt_not(i32 %c, i32 %d, i64 %a, i64 %b) {
+; CHECK-ARM-LABEL: test_slt_not:
+; CHECK-ARM: @ %bb.0: @ %entry
+; CHECK-ARM-NEXT: ldr r12, [sp]
+; CHECK-ARM-NEXT: mov r1, #0
+; CHECK-ARM-NEXT: ldr r0, [sp, #4]
+; CHECK-ARM-NEXT: subs r2, r2, r12
+; CHECK-ARM-NEXT: sbcs r0, r3, r0
+; CHECK-ARM-NEXT: mov r0, #0
+; CHECK-ARM-NEXT: movwge r1, #1
+; CHECK-ARM-NEXT: movwlt r0, #1
+; CHECK-ARM-NEXT: bx lr
+;
+; CHECK-THUMB1-NOMOV-LABEL: test_slt_not:
+; CHECK-THUMB1-NOMOV: @ %bb.0: @ %entry
+; CHECK-THUMB1-NOMOV-NEXT: .save {r4, r5, r7, lr}
+; CHECK-THUMB1-NOMOV-NEXT: push {r4, r5, r7, lr}
+; CHECK-THUMB1-NOMOV-NEXT: movs r1, #1
+; CHECK-THUMB1-NOMOV-NEXT: movs r4, #0
+; CHECK-THUMB1-NOMOV-NEXT: ldr r0, [sp, #20]
+; CHECK-THUMB1-NOMOV-NEXT: ldr r5, [sp, #16]
+; CHECK-THUMB1-NOMOV-NEXT: subs r2, r2, r5
+; CHECK-THUMB1-NOMOV-NEXT: sbcs r3, r0
+; CHECK-THUMB1-NOMOV-NEXT: push {r1}
+; CHECK-THUMB1-NOMOV-NEXT: pop {r0}
+; CHECK-THUMB1-NOMOV-NEXT: blt .LBB3_2
+; CHECK-THUMB1-NOMOV-NEXT: @ %bb.1: @ %entry
+; CHECK-THUMB1-NOMOV-NEXT: push {r4}
+; CHECK-THUMB1-NOMOV-NEXT: pop {r0}
+; CHECK-THUMB1-NOMOV-NEXT: .LBB3_2: @ %entry
+; CHECK-THUMB1-NOMOV-NEXT: bge .LBB3_4
+; CHECK-THUMB1-NOMOV-NEXT: @ %bb.3: @ %entry
+; CHECK-THUMB1-NOMOV-NEXT: movs r1, r4
+; CHECK-THUMB1-NOMOV-NEXT: .LBB3_4: @ %entry
+; CHECK-THUMB1-NOMOV-NEXT: pop {r4, r5, r7}
+; CHECK-THUMB1-NOMOV-NEXT: pop {r2}
+; CHECK-THUMB1-NOMOV-NEXT: bx r2
+;
+; CHECK-THUMB1-LABEL: test_slt_not:
+; CHECK-THUMB1: @ %bb.0: @ %entry
+; CHECK-THUMB1-NEXT: push {r4, r5, r7, lr}
+; CHECK-THUMB1-NEXT: movs r1, #1
+; CHECK-THUMB1-NEXT: movs r4, #0
+; CHECK-THUMB1-NEXT: ldr r0, [sp, #20]
+; CHECK-THUMB1-NEXT: ldr r5, [sp, #16]
+; CHECK-THUMB1-NEXT: subs r2, r2, r5
+; CHECK-THUMB1-NEXT: sbcs r3, r0
+; CHECK-THUMB1-NEXT: mov r0, r1
+; CHECK-THUMB1-NEXT: bge .LBB3_3
+; CHECK-THUMB1-NEXT: @ %bb.1: @ %entry
+; CHECK-THUMB1-NEXT: blt .LBB3_4
+; CHECK-THUMB1-NEXT: .LBB3_2: @ %entry
+; CHECK-THUMB1-NEXT: pop {r4, r5, r7, pc}
+; CHECK-THUMB1-NEXT: .LBB3_3: @ %entry
+; CHECK-THUMB1-NEXT: mov r0, r4
+; CHECK-THUMB1-NEXT: bge .LBB3_2
+; CHECK-THUMB1-NEXT: .LBB3_4: @ %entry
+; CHECK-THUMB1-NEXT: mov r1, r4
+; CHECK-THUMB1-NEXT: pop {r4, r5, r7, pc}
+;
+; CHECK-THUMB2-LABEL: test_slt_not:
+; CHECK-THUMB2: @ %bb.0: @ %entry
+; CHECK-THUMB2-NEXT: ldr.w r12, [sp]
+; CHECK-THUMB2-NEXT: movs r1, #0
+; CHECK-THUMB2-NEXT: ldr r0, [sp, #4]
+; CHECK-THUMB2-NEXT: subs.w r2, r2, r12
+; CHECK-THUMB2-NEXT: sbcs.w r0, r3, r0
+; CHECK-THUMB2-NEXT: mov.w r0, #0
+; CHECK-THUMB2-NEXT: ite lt
+; CHECK-THUMB2-NEXT: movlt r0, #1
+; CHECK-THUMB2-NEXT: movge r1, #1
+; CHECK-THUMB2-NEXT: bx lr
+entry:
+ %cmp = icmp slt i64 %a, %b
+ %not = xor i1 %cmp, true
+ %r1 = zext i1 %cmp to i32
+ %r2 = zext i1 %not to i32
+ %z = insertvalue { i32, i32 } undef, i32 %r1, 0
+ %z2 = insertvalue { i32, i32 } %z, i32 %r2, 1
+ ret { i32, i32 } %z2
+}