summaryrefslogtreecommitdiff
path: root/test/Transforms/LoopStrengthReduce
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2017-04-16 16:01:22 +0000
committerDimitry Andric <dim@FreeBSD.org>2017-04-16 16:01:22 +0000
commit71d5a2540a98c81f5bcaeb48805e0e2881f530ef (patch)
tree5343938942df402b49ec7300a1c25a2d4ccd5821 /test/Transforms/LoopStrengthReduce
parent31bbf64f3a4974a2d6c8b3b27ad2f519caf74057 (diff)
Diffstat (limited to 'test/Transforms/LoopStrengthReduce')
-rw-r--r--test/Transforms/LoopStrengthReduce/AMDGPU/atomics.ll87
-rw-r--r--test/Transforms/LoopStrengthReduce/AMDGPU/different-addrspace-addressing-mode-loops.ll8
-rw-r--r--test/Transforms/LoopStrengthReduce/AMDGPU/different-addrspace-crash.ll31
-rw-r--r--test/Transforms/LoopStrengthReduce/AMDGPU/lsr-postinc-pos-addrspace.ll8
-rw-r--r--test/Transforms/LoopStrengthReduce/AMDGPU/preserve-addrspace-assert.ll2
-rw-r--r--test/Transforms/LoopStrengthReduce/ARM/ivchain-ARM.ll1
-rw-r--r--test/Transforms/LoopStrengthReduce/X86/canonical.ll65
-rw-r--r--test/Transforms/LoopStrengthReduce/X86/incorrect-offset-scaling.ll48
-rw-r--r--test/Transforms/LoopStrengthReduce/X86/ivchain-X86.ll2
-rw-r--r--test/Transforms/LoopStrengthReduce/X86/lsr-insns-1.ll52
-rw-r--r--test/Transforms/LoopStrengthReduce/X86/lsr-insns-2.ll58
-rw-r--r--test/Transforms/LoopStrengthReduce/X86/nested-loop.ll65
-rw-r--r--test/Transforms/LoopStrengthReduce/X86/sibling-loops.ll97
-rw-r--r--test/Transforms/LoopStrengthReduce/quadradic-exit-value.ll54
14 files changed, 512 insertions, 66 deletions
diff --git a/test/Transforms/LoopStrengthReduce/AMDGPU/atomics.ll b/test/Transforms/LoopStrengthReduce/AMDGPU/atomics.ll
new file mode 100644
index 000000000000..054c61d18795
--- /dev/null
+++ b/test/Transforms/LoopStrengthReduce/AMDGPU/atomics.ll
@@ -0,0 +1,87 @@
+; RUN: opt -S -mtriple=amdgcn-- -mcpu=bonaire -loop-reduce < %s | FileCheck -check-prefix=OPT %s
+
+target datalayout = "e-p:32:32-p1:64:64-p2:64:64-p3:32:32-p4:64:64-p5:32:32-p24:64:64-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64"
+
+; Make sure the pointer / address space of AtomicRMW is considered
+
+; OPT-LABEL: @test_local_atomicrmw_addressing_loop_uniform_index_max_offset_i32(
+
+; OPT-NOT: getelementptr
+
+; OPT: .lr.ph:
+; OPT: %lsr.iv2 = phi i32 addrspace(3)* [ %scevgep3, %.lr.ph ], [ %arg1, %.lr.ph.preheader ]
+; OPT: %lsr.iv1 = phi i32 addrspace(3)* [ %scevgep, %.lr.ph ], [ %arg0, %.lr.ph.preheader ]
+; OPT: %lsr.iv = phi i32 [ %lsr.iv.next, %.lr.ph ], [ %n, %.lr.ph.preheader ]
+; OPT: %scevgep4 = getelementptr i32, i32 addrspace(3)* %lsr.iv2, i32 16383
+; OPT: %tmp4 = atomicrmw add i32 addrspace(3)* %scevgep4, i32 undef seq_cst
+; OPT: %tmp7 = atomicrmw add i32 addrspace(3)* %lsr.iv1, i32 undef seq_cst
+; OPT: %0 = atomicrmw add i32 addrspace(3)* %lsr.iv1, i32 %tmp8 seq_cst
+; OPT: br i1 %exitcond
+define amdgpu_kernel void @test_local_atomicrmw_addressing_loop_uniform_index_max_offset_i32(i32 addrspace(3)* noalias nocapture %arg0, i32 addrspace(3)* noalias nocapture readonly %arg1, i32 %n) #0 {
+bb:
+ %tmp = icmp sgt i32 %n, 0
+ br i1 %tmp, label %.lr.ph.preheader, label %._crit_edge
+
+.lr.ph.preheader: ; preds = %bb
+ br label %.lr.ph
+
+._crit_edge.loopexit: ; preds = %.lr.ph
+ br label %._crit_edge
+
+._crit_edge: ; preds = %._crit_edge.loopexit, %bb
+ ret void
+
+.lr.ph: ; preds = %.lr.ph, %.lr.ph.preheader
+ %indvars.iv = phi i32 [ %indvars.iv.next, %.lr.ph ], [ 0, %.lr.ph.preheader ]
+ %tmp1 = add nuw nsw i32 %indvars.iv, 16383
+ %tmp3 = getelementptr inbounds i32, i32 addrspace(3)* %arg1, i32 %tmp1
+ %tmp4 = atomicrmw add i32 addrspace(3)* %tmp3, i32 undef seq_cst
+ %tmp6 = getelementptr inbounds i32, i32 addrspace(3)* %arg0, i32 %indvars.iv
+ %tmp7 = atomicrmw add i32 addrspace(3)* %tmp6, i32 undef seq_cst
+ %tmp8 = add nsw i32 %tmp7, %tmp4
+ atomicrmw add i32 addrspace(3)* %tmp6, i32 %tmp8 seq_cst
+ %indvars.iv.next = add nuw nsw i32 %indvars.iv, 1
+ %exitcond = icmp eq i32 %indvars.iv.next, %n
+ br i1 %exitcond, label %._crit_edge.loopexit, label %.lr.ph
+}
+
+; OPT-LABEL: test_local_cmpxchg_addressing_loop_uniform_index_max_offset_i32(
+; OPT-NOT: getelementptr
+
+; OPT: .lr.ph:
+; OPT: %lsr.iv2 = phi i32 addrspace(3)* [ %scevgep3, %.lr.ph ], [ %arg1, %.lr.ph.preheader ]
+; OPT: %lsr.iv1 = phi i32 addrspace(3)* [ %scevgep, %.lr.ph ], [ %arg0, %.lr.ph.preheader ]
+; OPT: %lsr.iv = phi i32 [ %lsr.iv.next, %.lr.ph ], [ %n, %.lr.ph.preheader ]
+; OPT: %scevgep4 = getelementptr i32, i32 addrspace(3)* %lsr.iv2, i32 16383
+; OPT: %tmp4 = cmpxchg i32 addrspace(3)* %scevgep4, i32 undef, i32 undef seq_cst monotonic
+define amdgpu_kernel void @test_local_cmpxchg_addressing_loop_uniform_index_max_offset_i32(i32 addrspace(3)* noalias nocapture %arg0, i32 addrspace(3)* noalias nocapture readonly %arg1, i32 %n) #0 {
+bb:
+ %tmp = icmp sgt i32 %n, 0
+ br i1 %tmp, label %.lr.ph.preheader, label %._crit_edge
+
+.lr.ph.preheader: ; preds = %bb
+ br label %.lr.ph
+
+._crit_edge.loopexit: ; preds = %.lr.ph
+ br label %._crit_edge
+
+._crit_edge: ; preds = %._crit_edge.loopexit, %bb
+ ret void
+
+.lr.ph: ; preds = %.lr.ph, %.lr.ph.preheader
+ %indvars.iv = phi i32 [ %indvars.iv.next, %.lr.ph ], [ 0, %.lr.ph.preheader ]
+ %tmp1 = add nuw nsw i32 %indvars.iv, 16383
+ %tmp3 = getelementptr inbounds i32, i32 addrspace(3)* %arg1, i32 %tmp1
+ %tmp4 = cmpxchg i32 addrspace(3)* %tmp3, i32 undef, i32 undef seq_cst monotonic
+ %tmp4.0 = extractvalue { i32, i1 } %tmp4, 0
+ %tmp6 = getelementptr inbounds i32, i32 addrspace(3)* %arg0, i32 %indvars.iv
+ %tmp7 = cmpxchg i32 addrspace(3)* %tmp6, i32 undef, i32 undef seq_cst monotonic
+ %tmp7.0 = extractvalue { i32, i1 } %tmp7, 0
+ %tmp8 = add nsw i32 %tmp7.0, %tmp4.0
+ atomicrmw add i32 addrspace(3)* %tmp6, i32 %tmp8 seq_cst
+ %indvars.iv.next = add nuw nsw i32 %indvars.iv, 1
+ %exitcond = icmp eq i32 %indvars.iv.next, %n
+ br i1 %exitcond, label %._crit_edge.loopexit, label %.lr.ph
+}
+
+attributes #0 = { nounwind } \ No newline at end of file
diff --git a/test/Transforms/LoopStrengthReduce/AMDGPU/different-addrspace-addressing-mode-loops.ll b/test/Transforms/LoopStrengthReduce/AMDGPU/different-addrspace-addressing-mode-loops.ll
index bf61112a3c3e..c5ea1b915d91 100644
--- a/test/Transforms/LoopStrengthReduce/AMDGPU/different-addrspace-addressing-mode-loops.ll
+++ b/test/Transforms/LoopStrengthReduce/AMDGPU/different-addrspace-addressing-mode-loops.ll
@@ -10,7 +10,7 @@ target datalayout = "e-p:32:32-p1:64:64-p2:64:64-p3:32:32-p4:64:64-p5:32:32-p24:
; OPT: %lsr.iv2 = phi i8 addrspace(1)* [ %scevgep3, %.lr.ph ], [ %arg1, %.lr.ph.preheader ]
; OPT: %scevgep4 = getelementptr i8, i8 addrspace(1)* %lsr.iv2, i64 4095
; OPT: load i8, i8 addrspace(1)* %scevgep4, align 1
-define void @test_global_addressing_loop_uniform_index_max_offset_i32(i32 addrspace(1)* noalias nocapture %arg0, i8 addrspace(1)* noalias nocapture readonly %arg1, i32 %n) #0 {
+define amdgpu_kernel void @test_global_addressing_loop_uniform_index_max_offset_i32(i32 addrspace(1)* noalias nocapture %arg0, i8 addrspace(1)* noalias nocapture readonly %arg1, i32 %n) #0 {
bb:
%tmp = icmp sgt i32 %n, 0
br i1 %tmp, label %.lr.ph.preheader, label %._crit_edge
@@ -48,7 +48,7 @@ bb:
; OPT: {{^}}.lr.ph:
; OPT: %lsr.iv3 = phi i8 addrspace(1)* [ %scevgep4, %.lr.ph ], [ %scevgep2, %.lr.ph.preheader ]
; OPT: %scevgep4 = getelementptr i8, i8 addrspace(1)* %lsr.iv3, i64 1
-define void @test_global_addressing_loop_uniform_index_max_offset_p1_i32(i32 addrspace(1)* noalias nocapture %arg0, i8 addrspace(1)* noalias nocapture readonly %arg1, i32 %n) #0 {
+define amdgpu_kernel void @test_global_addressing_loop_uniform_index_max_offset_p1_i32(i32 addrspace(1)* noalias nocapture %arg0, i8 addrspace(1)* noalias nocapture readonly %arg1, i32 %n) #0 {
bb:
%tmp = icmp sgt i32 %n, 0
br i1 %tmp, label %.lr.ph.preheader, label %._crit_edge
@@ -83,7 +83,7 @@ bb:
; OPT: %lsr.iv2 = phi i8 addrspace(3)* [ %scevgep3, %.lr.ph ], [ %arg1, %.lr.ph.preheader ]
; OPT: %scevgep4 = getelementptr i8, i8 addrspace(3)* %lsr.iv2, i32 65535
; OPT: %tmp4 = load i8, i8 addrspace(3)* %scevgep4, align 1
-define void @test_local_addressing_loop_uniform_index_max_offset_i32(i32 addrspace(1)* noalias nocapture %arg0, i8 addrspace(3)* noalias nocapture readonly %arg1, i32 %n) #0 {
+define amdgpu_kernel void @test_local_addressing_loop_uniform_index_max_offset_i32(i32 addrspace(1)* noalias nocapture %arg0, i8 addrspace(3)* noalias nocapture readonly %arg1, i32 %n) #0 {
bb:
%tmp = icmp sgt i32 %n, 0
br i1 %tmp, label %.lr.ph.preheader, label %._crit_edge
@@ -122,7 +122,7 @@ bb:
; OPT: {{^}}.lr.ph:
; OPT: %lsr.iv3 = phi i8 addrspace(3)* [ %scevgep4, %.lr.ph ], [ %scevgep2, %.lr.ph.preheader ]
; OPT: %scevgep4 = getelementptr i8, i8 addrspace(3)* %lsr.iv3, i32 1
-define void @test_local_addressing_loop_uniform_index_max_offset_p1_i32(i32 addrspace(1)* noalias nocapture %arg0, i8 addrspace(3)* noalias nocapture readonly %arg1, i32 %n) #0 {
+define amdgpu_kernel void @test_local_addressing_loop_uniform_index_max_offset_p1_i32(i32 addrspace(1)* noalias nocapture %arg0, i8 addrspace(3)* noalias nocapture readonly %arg1, i32 %n) #0 {
bb:
%tmp = icmp sgt i32 %n, 0
br i1 %tmp, label %.lr.ph.preheader, label %._crit_edge
diff --git a/test/Transforms/LoopStrengthReduce/AMDGPU/different-addrspace-crash.ll b/test/Transforms/LoopStrengthReduce/AMDGPU/different-addrspace-crash.ll
new file mode 100644
index 000000000000..02c3c05e7945
--- /dev/null
+++ b/test/Transforms/LoopStrengthReduce/AMDGPU/different-addrspace-crash.ll
@@ -0,0 +1,31 @@
+; RUN: llc < %s | FileCheck %s
+
+target datalayout = "e-p:32:32-p1:64:64-p2:64:64-p3:32:32-p4:64:64-p5:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64"
+target triple = "amdgcn--"
+
+; We need to compile this for a target where we have different address spaces,
+; and where pointers in those address spaces have different size.
+; E.g. for amdgcn-- pointers in address space 0 are 32 bits and pointers in
+; address space 1 are 64 bits.
+
+; We shouldn't crash. Check that we get a loop with the two stores.
+;CHECK-LABEL: foo:
+;CHECK: [[LOOP_LABEL:BB[0-9]+_[0-9]+]]:
+;CHECK: buffer_store_dword
+;CHECK: buffer_store_dword
+;CHECK: s_branch [[LOOP_LABEL]]
+
+define amdgpu_kernel void @foo() {
+entry:
+ br label %loop
+
+loop:
+ %idx0 = phi i32 [ %next_idx0, %loop ], [ 0, %entry ]
+ %0 = getelementptr inbounds i32, i32* null, i32 %idx0
+ %1 = getelementptr inbounds i32, i32 addrspace(1)* null, i32 %idx0
+ store i32 1, i32* %0
+ store i32 7, i32 addrspace(1)* %1
+ %next_idx0 = add nuw nsw i32 %idx0, 1
+ br label %loop
+}
+
diff --git a/test/Transforms/LoopStrengthReduce/AMDGPU/lsr-postinc-pos-addrspace.ll b/test/Transforms/LoopStrengthReduce/AMDGPU/lsr-postinc-pos-addrspace.ll
index 8c83df5843d2..67b1926bdf27 100644
--- a/test/Transforms/LoopStrengthReduce/AMDGPU/lsr-postinc-pos-addrspace.ll
+++ b/test/Transforms/LoopStrengthReduce/AMDGPU/lsr-postinc-pos-addrspace.ll
@@ -16,7 +16,7 @@ target datalayout = "e-p:32:32-p1:64:64-p2:64:64-p3:32:32-p4:64:64-p5:32:32-p24:
; CHECK: bb:
; CHECK: inttoptr i32 %lsr.iv.next2 to i8 addrspace(3)*
; CHECK: %c1 = icmp ne i8 addrspace(3)*
-define void @local_cmp_user(i32 %arg0) nounwind {
+define amdgpu_kernel void @local_cmp_user(i32 %arg0) nounwind {
entry:
br label %bb11
@@ -47,7 +47,7 @@ bb13:
; CHECK: bb:
; CHECK: inttoptr i64 %lsr.iv.next2 to i8 addrspace(1)*
; CHECK: icmp ne i8 addrspace(1)* %t
-define void @global_cmp_user(i64 %arg0) nounwind {
+define amdgpu_kernel void @global_cmp_user(i64 %arg0) nounwind {
entry:
br label %bb11
@@ -78,7 +78,7 @@ bb13:
; CHECK: bb:
; CHECK: %idxprom = sext i32 %lsr.iv1 to i64
; CHECK: getelementptr i8, i8 addrspace(1)* %t, i64 %idxprom
-define void @global_gep_user(i32 %arg0) nounwind {
+define amdgpu_kernel void @global_gep_user(i32 %arg0) nounwind {
entry:
br label %bb11
@@ -108,7 +108,7 @@ bb13:
; CHECK: bb
; CHECK: %p = getelementptr i8, i8 addrspace(1)* %t, i64 %ii.ext
-define void @global_sext_scale_user(i32 %arg0) nounwind {
+define amdgpu_kernel void @global_sext_scale_user(i32 %arg0) nounwind {
entry:
br label %bb11
diff --git a/test/Transforms/LoopStrengthReduce/AMDGPU/preserve-addrspace-assert.ll b/test/Transforms/LoopStrengthReduce/AMDGPU/preserve-addrspace-assert.ll
index b3b696d42c59..9eba0c3051dc 100644
--- a/test/Transforms/LoopStrengthReduce/AMDGPU/preserve-addrspace-assert.ll
+++ b/test/Transforms/LoopStrengthReduce/AMDGPU/preserve-addrspace-assert.ll
@@ -14,7 +14,7 @@ target datalayout = "e-p:32:32-p1:64:64-p2:64:64-p3:32:32-p4:64:64-p5:32:32-p24:
; CHECK: %scevgep = getelementptr i32, i32 addrspace(3)* %tmp1, i32 4
; CHECK:%tmp14 = load i32, i32 addrspace(3)* %scevgep
-define void @lsr_crash_preserve_addrspace_unknown_type() #0 {
+define amdgpu_kernel void @lsr_crash_preserve_addrspace_unknown_type() #0 {
bb:
br label %bb1
diff --git a/test/Transforms/LoopStrengthReduce/ARM/ivchain-ARM.ll b/test/Transforms/LoopStrengthReduce/ARM/ivchain-ARM.ll
index 788842101080..a9d1e8758766 100644
--- a/test/Transforms/LoopStrengthReduce/ARM/ivchain-ARM.ll
+++ b/test/Transforms/LoopStrengthReduce/ARM/ivchain-ARM.ll
@@ -1,5 +1,4 @@
; RUN: llc -O3 -mtriple=thumb-eabi -mcpu=cortex-a9 %s -o - | FileCheck %s -check-prefix=A9
-; RUN: llc -O3 -mtriple=thumb-eabi -mcpu=cortex-a9 -addr-sink-using-gep=1 %s -o - | FileCheck %s -check-prefix=A9
; @simple is the most basic chain of address induction variables. Chaining
; saves at least one register and avoids complex addressing and setup
diff --git a/test/Transforms/LoopStrengthReduce/X86/canonical.ll b/test/Transforms/LoopStrengthReduce/X86/canonical.ll
new file mode 100644
index 000000000000..2dafbb408aad
--- /dev/null
+++ b/test/Transforms/LoopStrengthReduce/X86/canonical.ll
@@ -0,0 +1,65 @@
+; RUN: opt -mtriple=x86_64-unknown-linux-gnu -loop-reduce -S < %s | FileCheck %s
+; Check LSR formula canonicalization will put loop invariant regs before
+; induction variable of current loop, so exprs involving loop invariant regs
+; can be promoted outside of current loop.
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+
+define void @foo(i32 %size, i32 %nsteps, i8* nocapture %maxarray, i8* nocapture readnone %buffer, i32 %init) local_unnamed_addr #0 {
+entry:
+ %cmp25 = icmp sgt i32 %nsteps, 0
+ br i1 %cmp25, label %for.cond1.preheader.lr.ph, label %for.end12
+
+for.cond1.preheader.lr.ph: ; preds = %entry
+ %cmp223 = icmp sgt i32 %size, 1
+ %t0 = sext i32 %init to i64
+ %wide.trip.count = zext i32 %size to i64
+ %wide.trip.count31 = zext i32 %nsteps to i64
+ br label %for.cond1.preheader
+
+for.cond1.preheader: ; preds = %for.inc10, %for.cond1.preheader.lr.ph
+ %indvars.iv28 = phi i64 [ 0, %for.cond1.preheader.lr.ph ], [ %indvars.iv.next29, %for.inc10 ]
+ br i1 %cmp223, label %for.body3.lr.ph, label %for.inc10
+
+for.body3.lr.ph: ; preds = %for.cond1.preheader
+ %t1 = add nsw i64 %indvars.iv28, %t0
+ %t2 = trunc i64 %indvars.iv28 to i8
+ br label %for.body3
+
+; Make sure loop invariant items are grouped together so that load address can
+; be represented in one getelementptr.
+; CHECK-LABEL: for.body3:
+; CHECK-NEXT: [[LSR:%[^,]+]] = phi i64 [ 1, %for.body3.lr.ph ], [ {{.*}}, %for.body3 ]
+; CHECK-NOT: = phi i64
+; CHECK-NEXT: [[LOADADDR:%[^,]+]] = getelementptr i8, i8* {{.*}}, i64 [[LSR]]
+; CHECK-NEXT: = load i8, i8* [[LOADADDR]], align 1
+; CHECK: br i1 %exitcond, label %for.inc10.loopexit, label %for.body3
+
+for.body3: ; preds = %for.body3, %for.body3.lr.ph
+ %indvars.iv = phi i64 [ 1, %for.body3.lr.ph ], [ %indvars.iv.next, %for.body3 ]
+ %t5 = trunc i64 %indvars.iv to i8
+ %t3 = add nsw i64 %t1, %indvars.iv
+ %arrayidx = getelementptr inbounds i8, i8* %maxarray, i64 %t3
+ %t4 = load i8, i8* %arrayidx, align 1
+ %add5 = add i8 %t4, %t5
+ %add6 = add i8 %add5, %t2
+ %arrayidx9 = getelementptr inbounds i8, i8* %maxarray, i64 %indvars.iv
+ store i8 %add6, i8* %arrayidx9, align 1
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %exitcond = icmp eq i64 %indvars.iv.next, %wide.trip.count
+ br i1 %exitcond, label %for.inc10.loopexit, label %for.body3
+
+for.inc10.loopexit: ; preds = %for.body3
+ br label %for.inc10
+
+for.inc10: ; preds = %for.inc10.loopexit, %for.cond1.preheader
+ %indvars.iv.next29 = add nuw nsw i64 %indvars.iv28, 1
+ %exitcond32 = icmp eq i64 %indvars.iv.next29, %wide.trip.count31
+ br i1 %exitcond32, label %for.end12.loopexit, label %for.cond1.preheader
+
+for.end12.loopexit: ; preds = %for.inc10
+ br label %for.end12
+
+for.end12: ; preds = %for.end12.loopexit, %entry
+ ret void
+}
diff --git a/test/Transforms/LoopStrengthReduce/X86/incorrect-offset-scaling.ll b/test/Transforms/LoopStrengthReduce/X86/incorrect-offset-scaling.ll
new file mode 100644
index 000000000000..3adb8bcf514d
--- /dev/null
+++ b/test/Transforms/LoopStrengthReduce/X86/incorrect-offset-scaling.ll
@@ -0,0 +1,48 @@
+; RUN: opt -S -loop-reduce < %s | FileCheck %s
+
+target triple = "x86_64-unknown-unknown"
+target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
+
+define void @incorrect_offset_scaling(i64, i64*) {
+top:
+ br label %L
+
+L: ; preds = %idxend.10, %idxend, %L2, %top
+ br i1 undef, label %L, label %L1
+
+L1: ; preds = %L1.preheader, %L2
+ %r13 = phi i64 [ %r1, %L2 ], [ 1, %L ]
+; CHECK: %lsr.iv = phi i64 [ 0, %L{{[^ ]+}} ], [ %lsr.iv.next, %L2 ]
+; CHECK-NOT: %lsr.iv = phi i64 [ -1, %L{{[^ ]+}} ], [ %lsr.iv.next, %L2 ]
+; CHECK: br
+ %r0 = add i64 %r13, -1
+ br label %idxend.8
+
+L2: ; preds = %idxend.8
+ %r1 = add i64 %r13, 1
+ br i1 undef, label %L, label %L1
+
+if6: ; preds = %idxend.8
+ %r2 = add i64 %0, -1
+ %r3 = load i64, i64* %1, align 8
+; CHECK-NOT: %r2
+; CHECK: %r3 = load i64
+ br label %ib
+
+idxend.8: ; preds = %L1
+ br i1 undef, label %if6, label %L2
+
+ib: ; preds = %if6
+ %r4 = mul i64 %r3, %r0
+ %r5 = add i64 %r2, %r4
+ %r6 = icmp ult i64 %r5, undef
+; CHECK: [[MUL1:%[0-9]+]] = mul i64 %lsr.iv, %r3
+; CHECK: [[ADD1:%[0-9]+]] = add i64 [[MUL1]], -1
+; CHECK: add i64 %{{.}}, [[ADD1]]
+; CHECK: %r6
+ %r7 = getelementptr i64, i64* undef, i64 %r5
+ store i64 1, i64* %r7, align 8
+; CHECK: [[MUL2:%[0-9]+]] = mul i64 %lsr.iv, %r3
+; CHECK: [[ADD2:%[0-9]+]] = add i64 [[MUL2]], -1
+ br label %L
+}
diff --git a/test/Transforms/LoopStrengthReduce/X86/ivchain-X86.ll b/test/Transforms/LoopStrengthReduce/X86/ivchain-X86.ll
index ab7d4f1baa81..fb63b66137f3 100644
--- a/test/Transforms/LoopStrengthReduce/X86/ivchain-X86.ll
+++ b/test/Transforms/LoopStrengthReduce/X86/ivchain-X86.ll
@@ -1,7 +1,5 @@
; RUN: llc < %s -O3 -march=x86-64 -mcpu=core2 | FileCheck %s -check-prefix=X64
; RUN: llc < %s -O3 -march=x86 -mcpu=core2 | FileCheck %s -check-prefix=X32
-; RUN: llc < %s -O3 -march=x86-64 -mcpu=core2 -addr-sink-using-gep=1 | FileCheck %s -check-prefix=X64
-; RUN: llc < %s -O3 -march=x86 -mcpu=core2 -addr-sink-using-gep=1 | FileCheck %s -check-prefix=X32
; @simple is the most basic chain of address induction variables. Chaining
; saves at least one register and avoids complex addressing and setup
diff --git a/test/Transforms/LoopStrengthReduce/X86/lsr-insns-1.ll b/test/Transforms/LoopStrengthReduce/X86/lsr-insns-1.ll
new file mode 100644
index 000000000000..4888536bdf81
--- /dev/null
+++ b/test/Transforms/LoopStrengthReduce/X86/lsr-insns-1.ll
@@ -0,0 +1,52 @@
+; RUN: opt < %s -loop-reduce -mtriple=x86_64 -lsr-insns-cost -S | FileCheck %s -check-prefix=BOTH -check-prefix=INSN
+; RUN: opt < %s -loop-reduce -mtriple=x86_64 -S | FileCheck %s -check-prefix=BOTH -check-prefix=REGS
+; RUN: llc < %s -O2 -march=x86-64 -lsr-insns-cost -asm-verbose=0 | FileCheck %s
+
+; OPT test checks that LSR optimize compare for static counter to compare with 0.
+
+; BOTH: for.body:
+; INSN: icmp eq i64 %lsr.iv.next, 0
+; REGS: icmp eq i64 %indvars.iv.next, 1024
+
+; LLC test checks that LSR optimize compare for static counter.
+; That means that instead of creating the following:
+; movl %ecx, (%rdx,%rax,4)
+; incq %rax
+; cmpq $1024, %rax
+; LSR should optimize out cmp:
+; movl %ecx, 4096(%rdx,%rax)
+; addq $4, %rax
+; or
+; movl %ecx, 4096(%rdx,%rax,4)
+; incq %rax
+
+; CHECK: LBB0_1:
+; CHECK-NEXT: movl 4096(%{{.+}},[[REG:%[0-9a-z]+]]
+; CHECK-NEXT: addl 4096(%{{.+}},[[REG]]
+; CHECK-NEXT: movl %{{.+}}, 4096(%{{.+}},[[REG]]
+; CHECK-NOT: cmp
+; CHECK: jne
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+
+; Function Attrs: norecurse nounwind uwtable
+define void @foo(i32* nocapture readonly %x, i32* nocapture readonly %y, i32* nocapture %q) {
+entry:
+ br label %for.body
+
+for.cond.cleanup: ; preds = %for.body
+ ret void
+
+for.body: ; preds = %for.body, %entry
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %arrayidx = getelementptr inbounds i32, i32* %x, i64 %indvars.iv
+ %tmp = load i32, i32* %arrayidx, align 4
+ %arrayidx2 = getelementptr inbounds i32, i32* %y, i64 %indvars.iv
+ %tmp1 = load i32, i32* %arrayidx2, align 4
+ %add = add nsw i32 %tmp1, %tmp
+ %arrayidx4 = getelementptr inbounds i32, i32* %q, i64 %indvars.iv
+ store i32 %add, i32* %arrayidx4, align 4
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %exitcond = icmp eq i64 %indvars.iv.next, 1024
+ br i1 %exitcond, label %for.cond.cleanup, label %for.body
+}
diff --git a/test/Transforms/LoopStrengthReduce/X86/lsr-insns-2.ll b/test/Transforms/LoopStrengthReduce/X86/lsr-insns-2.ll
new file mode 100644
index 000000000000..3273cb4e6b5b
--- /dev/null
+++ b/test/Transforms/LoopStrengthReduce/X86/lsr-insns-2.ll
@@ -0,0 +1,58 @@
+; RUN: opt < %s -loop-reduce -mtriple=x86_64 -lsr-insns-cost -S | FileCheck %s -check-prefix=BOTH -check-prefix=INSN
+; RUN: opt < %s -loop-reduce -mtriple=x86_64 -S | FileCheck %s -check-prefix=BOTH -check-prefix=REGS
+; RUN: llc < %s -O2 -march=x86-64 -lsr-insns-cost -asm-verbose=0 | FileCheck %s
+
+; OPT checks that LSR prefers less instructions to less registers.
+; For x86 LSR should prefer complicated address to new lsr induction
+; variables.
+
+; BOTH: for.body:
+; INSN: getelementptr i32, i32* %x, i64 %indvars.iv
+; INSN: getelementptr i32, i32* %y, i64 %indvars.iv
+; INSN: getelementptr i32, i32* %q, i64 %indvars.iv
+; REGS %lsr.iv4 = phi
+; REGS %lsr.iv2 = phi
+; REGS %lsr.iv1 = phi
+; REGS: getelementptr i32, i32* %lsr.iv1, i64 1
+; REGS: getelementptr i32, i32* %lsr.iv2, i64 1
+; REGS: getelementptr i32, i32* %lsr.iv4, i64 1
+
+; LLC checks that LSR prefers less instructions to less registers.
+; LSR should prefer complicated address to additonal add instructions.
+
+; CHECK: LBB0_2:
+; CHECK-NEXT: movl (%r{{.+}},
+; CHECK-NEXT: addl (%r{{.+}},
+; CHECK-NEXT: movl %e{{.+}}, (%r{{.+}},
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+
+; Function Attrs: norecurse nounwind uwtable
+define void @foo(i32* nocapture readonly %x, i32* nocapture readonly %y, i32* nocapture %q, i32 %n) {
+entry:
+ %cmp10 = icmp sgt i32 %n, 0
+ br i1 %cmp10, label %for.body.preheader, label %for.cond.cleanup
+
+for.body.preheader: ; preds = %entry
+ %wide.trip.count = zext i32 %n to i64
+ br label %for.body
+
+for.cond.cleanup.loopexit: ; preds = %for.body
+ br label %for.cond.cleanup
+
+for.cond.cleanup: ; preds = %for.cond.cleanup.loopexit, %entry
+ ret void
+
+for.body: ; preds = %for.body, %for.body.preheader
+ %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %for.body.preheader ]
+ %arrayidx = getelementptr inbounds i32, i32* %x, i64 %indvars.iv
+ %tmp = load i32, i32* %arrayidx, align 4
+ %arrayidx2 = getelementptr inbounds i32, i32* %y, i64 %indvars.iv
+ %tmp1 = load i32, i32* %arrayidx2, align 4
+ %add = add nsw i32 %tmp1, %tmp
+ %arrayidx4 = getelementptr inbounds i32, i32* %q, i64 %indvars.iv
+ store i32 %add, i32* %arrayidx4, align 4
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %exitcond = icmp eq i64 %indvars.iv.next, %wide.trip.count
+ br i1 %exitcond, label %for.cond.cleanup.loopexit, label %for.body
+}
diff --git a/test/Transforms/LoopStrengthReduce/X86/nested-loop.ll b/test/Transforms/LoopStrengthReduce/X86/nested-loop.ll
new file mode 100644
index 000000000000..b563eb3ad994
--- /dev/null
+++ b/test/Transforms/LoopStrengthReduce/X86/nested-loop.ll
@@ -0,0 +1,65 @@
+; RUN: opt -loop-reduce -S < %s | FileCheck %s
+; Check when we use an outerloop induction variable inside of an innerloop
+; induction value expr, LSR can still choose to use single induction variable
+; for the innerloop and share it in multiple induction value exprs.
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+define void @foo(i32 %size, i32 %nsteps, i32 %hsize, i32* %lined, i8* %maxarray) {
+entry:
+ %cmp215 = icmp sgt i32 %size, 1
+ %t0 = zext i32 %size to i64
+ %t1 = sext i32 %nsteps to i64
+ %sub2 = sub i64 %t0, 2
+ br label %for.body
+
+for.body: ; preds = %for.inc, %entry
+ %indvars.iv2 = phi i64 [ %indvars.iv.next3, %for.inc ], [ 0, %entry ]
+ %t2 = mul nsw i64 %indvars.iv2, %t0
+ br i1 %cmp215, label %for.body2.preheader, label %for.inc
+
+for.body2.preheader: ; preds = %for.body
+ br label %for.body2
+
+; Check LSR only generates one induction variable for for.body2 and the induction
+; variable will be shared by multiple array accesses.
+; CHECK: for.body2:
+; CHECK-NEXT: [[LSR:%[^,]+]] = phi i64 [ %lsr.iv.next, %for.body2 ], [ 0, %for.body2.preheader ]
+; CHECK-NOT: = phi i64 [ {{.*}}, %for.body2 ], [ {{.*}}, %for.body2.preheader ]
+; CHECK: [[SCEVGEP1:%[^,]+]] = getelementptr i8, i8* %maxarray, i64 [[LSR]]
+; CHECK: [[SCEVGEP2:%[^,]+]] = getelementptr i8, i8* [[SCEVGEP1]], i64 1
+; CHECK: {{.*}} = load i8, i8* [[SCEVGEP2]], align 1
+; CHECK: [[SCEVGEP3:%[^,]+]] = getelementptr i8, i8* {{.*}}, i64 [[LSR]]
+; CHECK: {{.*}} = load i8, i8* [[SCEVGEP3]], align 1
+; CHECK: [[SCEVGEP4:%[^,]+]] = getelementptr i8, i8* {{.*}}, i64 [[LSR]]
+; CHECK: store i8 {{.*}}, i8* [[SCEVGEP4]], align 1
+; CHECK: br i1 %exitcond, label %for.body2, label %for.inc.loopexit
+
+for.body2: ; preds = %for.body2.preheader, %for.body2
+ %indvars.iv = phi i64 [ 1, %for.body2.preheader ], [ %indvars.iv.next, %for.body2 ]
+ %arrayidx1 = getelementptr inbounds i8, i8* %maxarray, i64 %indvars.iv
+ %v1 = load i8, i8* %arrayidx1, align 1
+ %idx2 = add nsw i64 %indvars.iv, %sub2
+ %arrayidx2 = getelementptr inbounds i8, i8* %maxarray, i64 %idx2
+ %v2 = load i8, i8* %arrayidx2, align 1
+ %tmpv = xor i8 %v1, %v2
+ %t4 = add nsw i64 %t2, %indvars.iv
+ %add.ptr = getelementptr inbounds i8, i8* %maxarray, i64 %t4
+ store i8 %tmpv, i8* %add.ptr, align 1
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %wide.trip.count = zext i32 %size to i64
+ %exitcond = icmp ne i64 %indvars.iv.next, %wide.trip.count
+ br i1 %exitcond, label %for.body2, label %for.inc.loopexit
+
+for.inc.loopexit: ; preds = %for.body2
+ br label %for.inc
+
+for.inc: ; preds = %for.inc.loopexit, %for.body
+ %indvars.iv.next3 = add nuw nsw i64 %indvars.iv2, 1
+ %cmp = icmp slt i64 %indvars.iv.next3, %t1
+ br i1 %cmp, label %for.body, label %for.end.loopexit
+
+for.end.loopexit: ; preds = %for.inc
+ ret void
+}
diff --git a/test/Transforms/LoopStrengthReduce/X86/sibling-loops.ll b/test/Transforms/LoopStrengthReduce/X86/sibling-loops.ll
new file mode 100644
index 000000000000..a69d6adc0f03
--- /dev/null
+++ b/test/Transforms/LoopStrengthReduce/X86/sibling-loops.ll
@@ -0,0 +1,97 @@
+; RUN: opt -loop-reduce -S < %s | FileCheck %s
+; We find it is very bad to allow LSR formula containing SCEVAddRecExpr Reg
+; from siblings of current loop. When one loop is LSR optimized, it can
+; insert lsr.iv for other sibling loops, which sometimes leads to many extra
+; lsr.iv inserted for loops.
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+
+@cond = common local_unnamed_addr global i64 0, align 8
+
+; Check there is no extra lsr.iv generated in foo.
+; CHECK-LABEL: @foo(
+; CHECK-NOT: lsr.iv{{[0-9]+}} =
+;
+define void @foo(i64 %N) local_unnamed_addr {
+entry:
+ br label %do.body
+
+do.body: ; preds = %do.body, %entry
+ %i.0 = phi i64 [ 0, %entry ], [ %inc, %do.body ]
+ tail call void @goo(i64 %i.0, i64 %i.0)
+ %inc = add nuw nsw i64 %i.0, 1
+ %t0 = load i64, i64* @cond, align 8
+ %tobool = icmp eq i64 %t0, 0
+ br i1 %tobool, label %do.body2.preheader, label %do.body
+
+do.body2.preheader: ; preds = %do.body
+ br label %do.body2
+
+do.body2: ; preds = %do.body2.preheader, %do.body2
+ %i.1 = phi i64 [ %inc3, %do.body2 ], [ 0, %do.body2.preheader ]
+ %j.1 = phi i64 [ %inc4, %do.body2 ], [ %inc, %do.body2.preheader ]
+ tail call void @goo(i64 %i.1, i64 %j.1)
+ %inc3 = add nuw nsw i64 %i.1, 1
+ %inc4 = add nsw i64 %j.1, 1
+ %t1 = load i64, i64* @cond, align 8
+ %tobool6 = icmp eq i64 %t1, 0
+ br i1 %tobool6, label %do.body8.preheader, label %do.body2
+
+do.body8.preheader: ; preds = %do.body2
+ br label %do.body8
+
+do.body8: ; preds = %do.body8.preheader, %do.body8
+ %i.2 = phi i64 [ %inc9, %do.body8 ], [ 0, %do.body8.preheader ]
+ %j.2 = phi i64 [ %inc10, %do.body8 ], [ %inc4, %do.body8.preheader ]
+ tail call void @goo(i64 %i.2, i64 %j.2)
+ %inc9 = add nuw nsw i64 %i.2, 1
+ %inc10 = add nsw i64 %j.2, 1
+ %t2 = load i64, i64* @cond, align 8
+ %tobool12 = icmp eq i64 %t2, 0
+ br i1 %tobool12, label %do.body14.preheader, label %do.body8
+
+do.body14.preheader: ; preds = %do.body8
+ br label %do.body14
+
+do.body14: ; preds = %do.body14.preheader, %do.body14
+ %i.3 = phi i64 [ %inc15, %do.body14 ], [ 0, %do.body14.preheader ]
+ %j.3 = phi i64 [ %inc16, %do.body14 ], [ %inc10, %do.body14.preheader ]
+ tail call void @goo(i64 %i.3, i64 %j.3)
+ %inc15 = add nuw nsw i64 %i.3, 1
+ %inc16 = add nsw i64 %j.3, 1
+ %t3 = load i64, i64* @cond, align 8
+ %tobool18 = icmp eq i64 %t3, 0
+ br i1 %tobool18, label %do.body20.preheader, label %do.body14
+
+do.body20.preheader: ; preds = %do.body14
+ br label %do.body20
+
+do.body20: ; preds = %do.body20.preheader, %do.body20
+ %i.4 = phi i64 [ %inc21, %do.body20 ], [ 0, %do.body20.preheader ]
+ %j.4 = phi i64 [ %inc22, %do.body20 ], [ %inc16, %do.body20.preheader ]
+ tail call void @goo(i64 %i.4, i64 %j.4)
+ %inc21 = add nuw nsw i64 %i.4, 1
+ %inc22 = add nsw i64 %j.4, 1
+ %t4 = load i64, i64* @cond, align 8
+ %tobool24 = icmp eq i64 %t4, 0
+ br i1 %tobool24, label %do.body26.preheader, label %do.body20
+
+do.body26.preheader: ; preds = %do.body20
+ br label %do.body26
+
+do.body26: ; preds = %do.body26.preheader, %do.body26
+ %i.5 = phi i64 [ %inc27, %do.body26 ], [ 0, %do.body26.preheader ]
+ %j.5 = phi i64 [ %inc28, %do.body26 ], [ %inc22, %do.body26.preheader ]
+ tail call void @goo(i64 %i.5, i64 %j.5)
+ %inc27 = add nuw nsw i64 %i.5, 1
+ %inc28 = add nsw i64 %j.5, 1
+ %t5 = load i64, i64* @cond, align 8
+ %tobool30 = icmp eq i64 %t5, 0
+ br i1 %tobool30, label %do.end31, label %do.body26
+
+do.end31: ; preds = %do.body26
+ ret void
+}
+
+declare void @goo(i64, i64) local_unnamed_addr
+
diff --git a/test/Transforms/LoopStrengthReduce/quadradic-exit-value.ll b/test/Transforms/LoopStrengthReduce/quadradic-exit-value.ll
deleted file mode 100644
index 09f0e1aa2a09..000000000000
--- a/test/Transforms/LoopStrengthReduce/quadradic-exit-value.ll
+++ /dev/null
@@ -1,54 +0,0 @@
-; RUN: opt < %s -analyze -iv-users | FileCheck %s
-; RUN: opt -passes='function(require<scalar-evolution>,loop(print<ivusers>))' -S < %s 2>&1| FileCheck %s
-
-; Provide legal integer types.
-target datalayout = "n8:16:32:64"
-
-; The value of %r is dependent on a polynomial iteration expression.
-;
-; CHECK-LABEL: IV Users for loop %foo.loop
-; CHECK: {1,+,3,+,2}<%foo.loop>
-define i64 @foo(i64 %n) {
-entry:
- br label %foo.loop
-
-foo.loop:
- %indvar = phi i64 [ 0, %entry ], [ %indvar.next, %foo.loop ]
- %indvar.next = add i64 %indvar, 1
- %c = icmp eq i64 %indvar.next, %n
- br i1 %c, label %exit, label %foo.loop
-
-exit:
- %r = mul i64 %indvar.next, %indvar.next
- ret i64 %r
-}
-
-; PR15470: LSR miscompile. The test2 function should return '1'.
-;
-; SCEV does not know how to denormalize chained recurrences, so make
-; sure they aren't marked as post-inc users.
-;
-; CHECK-LABEL: IV Users for loop %test2.loop
-; CHECK: %sext.us = {0,+,(16777216 + (-16777216 * %sub.us))<nuw><nsw>,+,33554432}<%test2.loop> in %f = ashr i32 %sext.us, 24
-define i32 @test2() {
-entry:
- br label %test2.loop
-
-test2.loop:
- %inc1115.us = phi i32 [ 0, %entry ], [ %inc11.us, %test2.loop ]
- %inc11.us = add nsw i32 %inc1115.us, 1
- %cmp.us = icmp slt i32 %inc11.us, 2
- br i1 %cmp.us, label %test2.loop, label %for.end
-
-for.end:
- %tobool.us = icmp eq i32 %inc1115.us, 0
- %sub.us = select i1 %tobool.us, i32 0, i32 0
- %mul.us = shl i32 %inc1115.us, 24
- %sub.cond.us = sub nsw i32 %inc1115.us, %sub.us
- %sext.us = mul i32 %mul.us, %sub.cond.us
- %f = ashr i32 %sext.us, 24
- br label %exit
-
-exit:
- ret i32 %f
-}