summaryrefslogtreecommitdiff
path: root/test/Analysis
diff options
context:
space:
mode:
Diffstat (limited to 'test/Analysis')
-rw-r--r--test/Analysis/BasicAA/modref.ll45
-rw-r--r--test/Analysis/CostModel/X86/testshiftashr.ll56
-rw-r--r--test/Analysis/CostModel/X86/testshiftlshr.ll24
-rw-r--r--test/Analysis/LoopAccessAnalysis/number-of-memchecks.ll236
-rw-r--r--test/Analysis/LoopAccessAnalysis/pointer-with-unknown-bounds.ll42
-rw-r--r--test/Analysis/LoopAccessAnalysis/resort-to-memchecks-only.ll2
-rw-r--r--test/Analysis/LoopAccessAnalysis/unsafe-and-rt-checks.ll6
7 files changed, 365 insertions, 46 deletions
diff --git a/test/Analysis/BasicAA/modref.ll b/test/Analysis/BasicAA/modref.ll
index e124d6cbe20f..3084f809c370 100644
--- a/test/Analysis/BasicAA/modref.ll
+++ b/test/Analysis/BasicAA/modref.ll
@@ -145,6 +145,51 @@ entry:
; CHECK: load i32, i32*
}
+;; Check that aa correctly handles functions marked with argmemonly
+;; attribute.
+declare i32 @func_argmemonly(i32 * %P) argmemonly
+
+;; Can not remove redundant load, function may write to it.
+; CHECK-LABEL: @test8(
+define i32 @test8(i32 *%P) {
+ %V1 = load i32, i32* %P
+ call i32 @func_argmemonly(i32* %P)
+ %V2 = load i32, i32* %P
+ %Diff = sub i32 %V1, %V2
+ ret i32 %Diff
+ ; CHECK: load
+ ; CHECK: load
+ ; CHECK: sub
+ ; CHECK: ret i32 %Diff
+}
+
+;; In this case load can be removed, function clobbers only %P2.
+; CHECK-LABEL: @test9(
+define i32 @test9(i32* %P, i32* noalias %P2) {
+ %V1 = load i32, i32* %P
+ call i32 @func_argmemonly(i32* %P2)
+ %V2 = load i32, i32* %P
+ %Diff = sub i32 %V1, %V2
+ ret i32 %Diff
+ ; CHECK-NOT: load
+ ; CHECK: ret i32 0
+}
+
+;; In this case load can *not* be removed. Function clobers only %P2 but it may
+;; alias with %P.
+; CHECK-LABEL: @test10(
+define i32 @test10(i32* %P, i32* %P2) {
+ %V1 = load i32, i32* %P
+ call i32 @func_argmemonly(i32* %P2)
+ %V2 = load i32, i32* %P
+ %Diff = sub i32 %V1, %V2
+ ret i32 %Diff
+ ; CHECK: load
+ ; CHECK: load
+ ; CHECK: sub
+ ; CHECK: ret i32 %Diff
+}
+
declare void @llvm.memset.p0i8.i32(i8* nocapture, i8, i32, i32, i1) nounwind
declare void @llvm.memset.p0i8.i8(i8* nocapture, i8, i8, i32, i1) nounwind
declare void @llvm.memcpy.p0i8.p0i8.i8(i8* nocapture, i8* nocapture, i8, i32, i1) nounwind
diff --git a/test/Analysis/CostModel/X86/testshiftashr.ll b/test/Analysis/CostModel/X86/testshiftashr.ll
index ced2ffed4552..da4e7d466e2b 100644
--- a/test/Analysis/CostModel/X86/testshiftashr.ll
+++ b/test/Analysis/CostModel/X86/testshiftashr.ll
@@ -17,9 +17,9 @@ entry:
define %shifttype4i16 @shift4i16(%shifttype4i16 %a, %shifttype4i16 %b) {
entry:
; SSE2: shift4i16
- ; SSE2: cost of 40 {{.*}} ashr
+ ; SSE2: cost of 16 {{.*}} ashr
; SSE2-CODEGEN: shift4i16
- ; SSE2-CODEGEN: sarl %cl
+ ; SSE2-CODEGEN: psrad
%0 = ashr %shifttype4i16 %a , %b
ret %shifttype4i16 %0
@@ -77,9 +77,9 @@ entry:
define %shifttype4i32 @shift4i32(%shifttype4i32 %a, %shifttype4i32 %b) {
entry:
; SSE2: shift4i32
- ; SSE2: cost of 40 {{.*}} ashr
+ ; SSE2: cost of 16 {{.*}} ashr
; SSE2-CODEGEN: shift4i32
- ; SSE2-CODEGEN: sarl %cl
+ ; SSE2-CODEGEN: psrad
%0 = ashr %shifttype4i32 %a , %b
ret %shifttype4i32 %0
@@ -89,9 +89,9 @@ entry:
define %shifttype8i32 @shift8i32(%shifttype8i32 %a, %shifttype8i32 %b) {
entry:
; SSE2: shift8i32
- ; SSE2: cost of 80 {{.*}} ashr
+ ; SSE2: cost of 32 {{.*}} ashr
; SSE2-CODEGEN: shift8i32
- ; SSE2-CODEGEN: sarl %cl
+ ; SSE2-CODEGEN: psrad
%0 = ashr %shifttype8i32 %a , %b
ret %shifttype8i32 %0
@@ -101,9 +101,9 @@ entry:
define %shifttype16i32 @shift16i32(%shifttype16i32 %a, %shifttype16i32 %b) {
entry:
; SSE2: shift16i32
- ; SSE2: cost of 160 {{.*}} ashr
+ ; SSE2: cost of 64 {{.*}} ashr
; SSE2-CODEGEN: shift16i32
- ; SSE2-CODEGEN: sarl %cl
+ ; SSE2-CODEGEN: psrad
%0 = ashr %shifttype16i32 %a , %b
ret %shifttype16i32 %0
@@ -113,9 +113,9 @@ entry:
define %shifttype32i32 @shift32i32(%shifttype32i32 %a, %shifttype32i32 %b) {
entry:
; SSE2: shift32i32
- ; SSE2: cost of 320 {{.*}} ashr
+ ; SSE2: cost of 128 {{.*}} ashr
; SSE2-CODEGEN: shift32i32
- ; SSE2-CODEGEN: sarl %cl
+ ; SSE2-CODEGEN: psrad
%0 = ashr %shifttype32i32 %a , %b
ret %shifttype32i32 %0
@@ -197,9 +197,9 @@ entry:
define %shifttype4i8 @shift4i8(%shifttype4i8 %a, %shifttype4i8 %b) {
entry:
; SSE2: shift4i8
- ; SSE2: cost of 40 {{.*}} ashr
+ ; SSE2: cost of 16 {{.*}} ashr
; SSE2-CODEGEN: shift4i8
- ; SSE2-CODEGEN: sarl %cl
+ ; SSE2-CODEGEN: psrad
%0 = ashr %shifttype4i8 %a , %b
ret %shifttype4i8 %0
@@ -247,9 +247,9 @@ entry:
define %shifttypec @shift2i16const(%shifttypec %a, %shifttypec %b) {
entry:
; SSE2: shift2i16const
- ; SSE2: cost of 20 {{.*}} ashr
+ ; SSE2: cost of 4 {{.*}} ashr
; SSE2-CODEGEN: shift2i16const
- ; SSE2-CODEGEN: sarq $
+ ; SSE2-CODEGEN: psrad $3
%0 = ashr %shifttypec %a , <i16 3, i16 3>
ret %shifttypec %0
@@ -320,9 +320,9 @@ entry:
define %shifttypec2i32 @shift2i32c(%shifttypec2i32 %a, %shifttypec2i32 %b) {
entry:
; SSE2: shift2i32c
- ; SSE2: cost of 20 {{.*}} ashr
+ ; SSE2: cost of 4 {{.*}} ashr
; SSE2-CODEGEN: shift2i32c
- ; SSE2-CODEGEN: sarq $3
+ ; SSE2-CODEGEN: psrad $3
%0 = ashr %shifttypec2i32 %a , <i32 3, i32 3>
ret %shifttypec2i32 %0
@@ -391,9 +391,9 @@ entry:
define %shifttypec2i64 @shift2i64c(%shifttypec2i64 %a, %shifttypec2i64 %b) {
entry:
; SSE2: shift2i64c
- ; SSE2: cost of 20 {{.*}} ashr
+ ; SSE2: cost of 4 {{.*}} ashr
; SSE2-CODEGEN: shift2i64c
- ; SSE2-CODEGEN: sarq $3
+ ; SSE2-CODEGEN: psrad $3
%0 = ashr %shifttypec2i64 %a , <i64 3, i64 3>
ret %shifttypec2i64 %0
@@ -403,9 +403,9 @@ entry:
define %shifttypec4i64 @shift4i64c(%shifttypec4i64 %a, %shifttypec4i64 %b) {
entry:
; SSE2: shift4i64c
- ; SSE2: cost of 40 {{.*}} ashr
+ ; SSE2: cost of 8 {{.*}} ashr
; SSE2-CODEGEN: shift4i64c
- ; SSE2-CODEGEN: sarq $3
+ ; SSE2-CODEGEN: psrad $3
%0 = ashr %shifttypec4i64 %a , <i64 3, i64 3, i64 3, i64 3>
ret %shifttypec4i64 %0
@@ -415,9 +415,9 @@ entry:
define %shifttypec8i64 @shift8i64c(%shifttypec8i64 %a, %shifttypec8i64 %b) {
entry:
; SSE2: shift8i64c
- ; SSE2: cost of 80 {{.*}} ashr
+ ; SSE2: cost of 16 {{.*}} ashr
; SSE2-CODEGEN: shift8i64c
- ; SSE2-CODEGEN: sarq $3
+ ; SSE2-CODEGEN: psrad $3
%0 = ashr %shifttypec8i64 %a , <i64 3, i64 3, i64 3, i64 3,
i64 3, i64 3, i64 3, i64 3>
@@ -428,9 +428,9 @@ entry:
define %shifttypec16i64 @shift16i64c(%shifttypec16i64 %a, %shifttypec16i64 %b) {
entry:
; SSE2: shift16i64c
- ; SSE2: cost of 160 {{.*}} ashr
+ ; SSE2: cost of 32 {{.*}} ashr
; SSE2-CODEGEN: shift16i64c
- ; SSE2-CODEGEN: sarq $3
+ ; SSE2-CODEGEN: psrad $3
%0 = ashr %shifttypec16i64 %a , <i64 3, i64 3, i64 3, i64 3,
i64 3, i64 3, i64 3, i64 3,
@@ -443,9 +443,9 @@ entry:
define %shifttypec32i64 @shift32i64c(%shifttypec32i64 %a, %shifttypec32i64 %b) {
entry:
; SSE2: shift32i64c
- ; SSE2: cost of 320 {{.*}} ashr
+ ; SSE2: cost of 64 {{.*}} ashr
; SSE2-CODEGEN: shift32i64c
- ; SSE2-CODEGEN: sarq $3
+ ; SSE2-CODEGEN: psrad $3
%0 = ashr %shifttypec32i64 %a ,<i64 3, i64 3, i64 3, i64 3,
i64 3, i64 3, i64 3, i64 3,
@@ -462,9 +462,9 @@ entry:
define %shifttypec2i8 @shift2i8c(%shifttypec2i8 %a, %shifttypec2i8 %b) {
entry:
; SSE2: shift2i8c
- ; SSE2: cost of 20 {{.*}} ashr
+ ; SSE2: cost of 4 {{.*}} ashr
; SSE2-CODEGEN: shift2i8c
- ; SSE2-CODEGEN: sarq $3
+ ; SSE2-CODEGEN: psrad $3
%0 = ashr %shifttypec2i8 %a , <i8 3, i8 3>
ret %shifttypec2i8 %0
diff --git a/test/Analysis/CostModel/X86/testshiftlshr.ll b/test/Analysis/CostModel/X86/testshiftlshr.ll
index 0bc60eacac9a..5775a42d08ad 100644
--- a/test/Analysis/CostModel/X86/testshiftlshr.ll
+++ b/test/Analysis/CostModel/X86/testshiftlshr.ll
@@ -17,9 +17,9 @@ entry:
define %shifttype4i16 @shift4i16(%shifttype4i16 %a, %shifttype4i16 %b) {
entry:
; SSE2: shift4i16
- ; SSE2: cost of 40 {{.*}} lshr
+ ; SSE2: cost of 16 {{.*}} lshr
; SSE2-CODEGEN: shift4i16
- ; SSE2-CODEGEN: shrl %cl
+ ; SSE2-CODEGEN: psrld
%0 = lshr %shifttype4i16 %a , %b
ret %shifttype4i16 %0
@@ -77,9 +77,9 @@ entry:
define %shifttype4i32 @shift4i32(%shifttype4i32 %a, %shifttype4i32 %b) {
entry:
; SSE2: shift4i32
- ; SSE2: cost of 40 {{.*}} lshr
+ ; SSE2: cost of 16 {{.*}} lshr
; SSE2-CODEGEN: shift4i32
- ; SSE2-CODEGEN: shrl %cl
+ ; SSE2-CODEGEN: psrld
%0 = lshr %shifttype4i32 %a , %b
ret %shifttype4i32 %0
@@ -89,9 +89,9 @@ entry:
define %shifttype8i32 @shift8i32(%shifttype8i32 %a, %shifttype8i32 %b) {
entry:
; SSE2: shift8i32
- ; SSE2: cost of 80 {{.*}} lshr
+ ; SSE2: cost of 32 {{.*}} lshr
; SSE2-CODEGEN: shift8i32
- ; SSE2-CODEGEN: shrl %cl
+ ; SSE2-CODEGEN: psrld
%0 = lshr %shifttype8i32 %a , %b
ret %shifttype8i32 %0
@@ -101,9 +101,9 @@ entry:
define %shifttype16i32 @shift16i32(%shifttype16i32 %a, %shifttype16i32 %b) {
entry:
; SSE2: shift16i32
- ; SSE2: cost of 160 {{.*}} lshr
+ ; SSE2: cost of 64 {{.*}} lshr
; SSE2-CODEGEN: shift16i32
- ; SSE2-CODEGEN: shrl %cl
+ ; SSE2-CODEGEN: psrld
%0 = lshr %shifttype16i32 %a , %b
ret %shifttype16i32 %0
@@ -113,9 +113,9 @@ entry:
define %shifttype32i32 @shift32i32(%shifttype32i32 %a, %shifttype32i32 %b) {
entry:
; SSE2: shift32i32
- ; SSE2: cost of 320 {{.*}} lshr
+ ; SSE2: cost of 128 {{.*}} lshr
; SSE2-CODEGEN: shift32i32
- ; SSE2-CODEGEN: shrl %cl
+ ; SSE2-CODEGEN: psrld
%0 = lshr %shifttype32i32 %a , %b
ret %shifttype32i32 %0
@@ -197,9 +197,9 @@ entry:
define %shifttype4i8 @shift4i8(%shifttype4i8 %a, %shifttype4i8 %b) {
entry:
; SSE2: shift4i8
- ; SSE2: cost of 40 {{.*}} lshr
+ ; SSE2: cost of 16 {{.*}} lshr
; SSE2-CODEGEN: shift4i8
- ; SSE2-CODEGEN: shrl %cl
+ ; SSE2-CODEGEN: psrld
%0 = lshr %shifttype4i8 %a , %b
ret %shifttype4i8 %0
diff --git a/test/Analysis/LoopAccessAnalysis/number-of-memchecks.ll b/test/Analysis/LoopAccessAnalysis/number-of-memchecks.ll
index f9871c643c9d..50b37a031a60 100644
--- a/test/Analysis/LoopAccessAnalysis/number-of-memchecks.ll
+++ b/test/Analysis/LoopAccessAnalysis/number-of-memchecks.ll
@@ -1,19 +1,20 @@
; RUN: opt -loop-accesses -analyze < %s | FileCheck %s
-; 3 reads and 3 writes should need 12 memchecks
-
target datalayout = "e-m:e-i64:64-i128:128-n32:64-S128"
target triple = "aarch64--linux-gnueabi"
+; 3 reads and 3 writes should need 12 memchecks
+; CHECK: function 'testf':
; CHECK: Memory dependences are safe with run-time checks
-; Memory dependecies have labels starting from 0, so in
+
+; Memory dependencies have labels starting from 0, so in
; order to verify that we have n checks, we look for
; (n-1): and not n:.
; CHECK: Run-time memory checks:
-; CHECK-NEXT: 0:
-; CHECK: 11:
-; CHECK-NOT: 12:
+; CHECK-NEXT: Check 0:
+; CHECK: Check 11:
+; CHECK-NOT: Check 12:
define void @testf(i16* %a,
i16* %b,
@@ -56,3 +57,226 @@ for.body: ; preds = %for.body, %entry
for.end: ; preds = %for.body
ret void
}
+
+; The following (testg and testh) check that we can group
+; memory checks of accesses which differ by a constant value.
+; Both tests are based on the following C code:
+;
+; void testh(short *a, short *b, short *c) {
+; unsigned long ind = 0;
+; for (unsigned long ind = 0; ind < 20; ++ind) {
+; c[2 * ind] = a[ind] * a[ind + 1];
+; c[2 * ind + 1] = a[ind] * a[ind + 1] * b[ind];
+; }
+; }
+;
+; It is sufficient to check the intervals
+; [a, a + 21], [b, b + 20] against [c, c + 41].
+
+; 3 reads and 2 writes - two of the reads can be merged,
+; and the writes can be merged as well. This gives us a
+; total of 2 memory checks.
+
+; CHECK: function 'testg':
+
+; CHECK: Run-time memory checks:
+; CHECK-NEXT: Check 0:
+; CHECK-NEXT: Comparing group 0:
+; CHECK-NEXT: %arrayidxC1 = getelementptr inbounds i16, i16* %c, i64 %store_ind_inc
+; CHECK-NEXT: %arrayidxC = getelementptr inbounds i16, i16* %c, i64 %store_ind
+; CHECK-NEXT: Against group 1:
+; CHECK-NEXT: %arrayidxA1 = getelementptr inbounds i16, i16* %a, i64 %add
+; CHECK-NEXT: %arrayidxA = getelementptr inbounds i16, i16* %a, i64 %ind
+; CHECK-NEXT: Check 1:
+; CHECK-NEXT: Comparing group 0:
+; CHECK-NEXT: %arrayidxC1 = getelementptr inbounds i16, i16* %c, i64 %store_ind_inc
+; CHECK-NEXT: %arrayidxC = getelementptr inbounds i16, i16* %c, i64 %store_ind
+; CHECK-NEXT: Against group 2:
+; CHECK-NEXT: %arrayidxB = getelementptr inbounds i16, i16* %b, i64 %ind
+; CHECK-NEXT: Grouped accesses:
+; CHECK-NEXT: Group 0:
+; CHECK-NEXT: (Low: %c High: (78 + %c))
+; CHECK-NEXT: Member: {(2 + %c),+,4}
+; CHECK-NEXT: Member: {%c,+,4}
+; CHECK-NEXT: Group 1:
+; CHECK-NEXT: (Low: %a High: (40 + %a))
+; CHECK-NEXT: Member: {(2 + %a),+,2}
+; CHECK-NEXT: Member: {%a,+,2}
+; CHECK-NEXT: Group 2:
+; CHECK-NEXT: (Low: %b High: (38 + %b))
+; CHECK-NEXT: Member: {%b,+,2}
+
+define void @testg(i16* %a,
+ i16* %b,
+ i16* %c) {
+entry:
+ br label %for.body
+
+for.body: ; preds = %for.body, %entry
+ %ind = phi i64 [ 0, %entry ], [ %add, %for.body ]
+ %store_ind = phi i64 [ 0, %entry ], [ %store_ind_next, %for.body ]
+
+ %add = add nuw nsw i64 %ind, 1
+ %store_ind_inc = add nuw nsw i64 %store_ind, 1
+ %store_ind_next = add nuw nsw i64 %store_ind_inc, 1
+
+ %arrayidxA = getelementptr inbounds i16, i16* %a, i64 %ind
+ %loadA = load i16, i16* %arrayidxA, align 2
+
+ %arrayidxA1 = getelementptr inbounds i16, i16* %a, i64 %add
+ %loadA1 = load i16, i16* %arrayidxA1, align 2
+
+ %arrayidxB = getelementptr inbounds i16, i16* %b, i64 %ind
+ %loadB = load i16, i16* %arrayidxB, align 2
+
+ %mul = mul i16 %loadA, %loadA1
+ %mul1 = mul i16 %mul, %loadB
+
+ %arrayidxC = getelementptr inbounds i16, i16* %c, i64 %store_ind
+ store i16 %mul1, i16* %arrayidxC, align 2
+
+ %arrayidxC1 = getelementptr inbounds i16, i16* %c, i64 %store_ind_inc
+ store i16 %mul, i16* %arrayidxC1, align 2
+
+ %exitcond = icmp eq i64 %add, 20
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.body
+ ret void
+}
+
+; 3 reads and 2 writes - the writes can be merged into a single
+; group, but the GEPs used for the reads are not marked as inbounds.
+; We can still merge them because we are using a unit stride for
+; accesses, so we cannot overflow the GEPs.
+
+; CHECK: function 'testh':
+; CHECK: Run-time memory checks:
+; CHECK-NEXT: Check 0:
+; CHECK-NEXT: Comparing group 0:
+; CHECK-NEXT: %arrayidxC1 = getelementptr inbounds i16, i16* %c, i64 %store_ind_inc
+; CHECK-NEXT: %arrayidxC = getelementptr inbounds i16, i16* %c, i64 %store_ind
+; CHECK-NEXT: Against group 1:
+; CHECK-NEXT: %arrayidxA1 = getelementptr i16, i16* %a, i64 %add
+; CHECK-NEXT: %arrayidxA = getelementptr i16, i16* %a, i64 %ind
+; CHECK-NEXT: Check 1:
+; CHECK-NEXT: Comparing group 0:
+; CHECK-NEXT: %arrayidxC1 = getelementptr inbounds i16, i16* %c, i64 %store_ind_inc
+; CHECK-NEXT: %arrayidxC = getelementptr inbounds i16, i16* %c, i64 %store_ind
+; CHECK-NEXT: Against group 2:
+; CHECK-NEXT: %arrayidxB = getelementptr i16, i16* %b, i64 %ind
+; CHECK-NEXT: Grouped accesses:
+; CHECK-NEXT: Group 0:
+; CHECK-NEXT: (Low: %c High: (78 + %c))
+; CHECK-NEXT: Member: {(2 + %c),+,4}
+; CHECK-NEXT: Member: {%c,+,4}
+; CHECK-NEXT: Group 1:
+; CHECK-NEXT: (Low: %a High: (40 + %a))
+; CHECK-NEXT: Member: {(2 + %a),+,2}
+; CHECK-NEXT: Member: {%a,+,2}
+; CHECK-NEXT: Group 2:
+; CHECK-NEXT: (Low: %b High: (38 + %b))
+; CHECK-NEXT: Member: {%b,+,2}
+
+define void @testh(i16* %a,
+ i16* %b,
+ i16* %c) {
+entry:
+ br label %for.body
+
+for.body: ; preds = %for.body, %entry
+ %ind = phi i64 [ 0, %entry ], [ %add, %for.body ]
+ %store_ind = phi i64 [ 0, %entry ], [ %store_ind_next, %for.body ]
+
+ %add = add nuw nsw i64 %ind, 1
+ %store_ind_inc = add nuw nsw i64 %store_ind, 1
+ %store_ind_next = add nuw nsw i64 %store_ind_inc, 1
+
+ %arrayidxA = getelementptr i16, i16* %a, i64 %ind
+ %loadA = load i16, i16* %arrayidxA, align 2
+
+ %arrayidxA1 = getelementptr i16, i16* %a, i64 %add
+ %loadA1 = load i16, i16* %arrayidxA1, align 2
+
+ %arrayidxB = getelementptr i16, i16* %b, i64 %ind
+ %loadB = load i16, i16* %arrayidxB, align 2
+
+ %mul = mul i16 %loadA, %loadA1
+ %mul1 = mul i16 %mul, %loadB
+
+ %arrayidxC = getelementptr inbounds i16, i16* %c, i64 %store_ind
+ store i16 %mul1, i16* %arrayidxC, align 2
+
+ %arrayidxC1 = getelementptr inbounds i16, i16* %c, i64 %store_ind_inc
+ store i16 %mul, i16* %arrayidxC1, align 2
+
+ %exitcond = icmp eq i64 %add, 20
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.body
+ ret void
+}
+
+; Don't merge pointers if there is some other check which could be falsely
+; invalidated. For example, in the following loop:
+;
+; for (i = 0; i < 5000; ++i)
+; a[i + offset] = a[i] + a[i + 10000]
+;
+; we should not merge the intervals associated with the reads (0,5000) and
+; (10000, 15000) into (0, 15000) as this will pottentially fail the check
+; against the interval associated with the write.
+
+; CHECK: function 'testi':
+; CHECK: Run-time memory checks:
+; CHECK-NEXT: Check 0:
+; CHECK-NEXT: Comparing group 0:
+; CHECK-NEXT: %storeidx = getelementptr inbounds i16, i16* %a, i64 %store_ind
+; CHECK-NEXT: Against group 1:
+; CHECK-NEXT: %arrayidxA1 = getelementptr i16, i16* %a, i64 %ind
+; CHECK-NEXT: Check 1:
+; CHECK-NEXT: Comparing group 0:
+; CHECK-NEXT: %storeidx = getelementptr inbounds i16, i16* %a, i64 %store_ind
+; CHECK-NEXT: Against group 2:
+; CHECK-NEXT: %arrayidxA2 = getelementptr i16, i16* %a, i64 %ind2
+; CHECK-NEXT: Grouped accesses:
+; CHECK-NEXT: Group 0:
+; CHECK-NEXT: (Low: ((2 * %offset) + %a) High: (9998 + (2 * %offset) + %a))
+; CHECK-NEXT: Member: {((2 * %offset) + %a),+,2}<nsw><%for.body>
+; CHECK-NEXT: Group 1:
+; CHECK-NEXT: (Low: %a High: (9998 + %a))
+; CHECK-NEXT: Member: {%a,+,2}<%for.body>
+; CHECK-NEXT: Group 2:
+; CHECK-NEXT: (Low: (20000 + %a) High: (29998 + %a))
+; CHECK-NEXT: Member: {(20000 + %a),+,2}<%for.body>
+
+define void @testi(i16* %a,
+ i64 %offset) {
+entry:
+ br label %for.body
+
+for.body: ; preds = %for.body, %entry
+ %ind = phi i64 [ 0, %entry ], [ %add, %for.body ]
+ %store_ind = phi i64 [ %offset, %entry ], [ %store_ind_inc, %for.body ]
+
+ %add = add nuw nsw i64 %ind, 1
+ %store_ind_inc = add nuw nsw i64 %store_ind, 1
+
+ %arrayidxA1 = getelementptr i16, i16* %a, i64 %ind
+ %ind2 = add nuw nsw i64 %ind, 10000
+ %arrayidxA2 = getelementptr i16, i16* %a, i64 %ind2
+
+ %loadA1 = load i16, i16* %arrayidxA1, align 2
+ %loadA2 = load i16, i16* %arrayidxA2, align 2
+
+ %addres = add i16 %loadA1, %loadA2
+
+ %storeidx = getelementptr inbounds i16, i16* %a, i64 %store_ind
+ store i16 %addres, i16* %storeidx, align 2
+
+ %exitcond = icmp eq i64 %add, 5000
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.body
+ ret void
+}
diff --git a/test/Analysis/LoopAccessAnalysis/pointer-with-unknown-bounds.ll b/test/Analysis/LoopAccessAnalysis/pointer-with-unknown-bounds.ll
new file mode 100644
index 000000000000..d05849e2be2d
--- /dev/null
+++ b/test/Analysis/LoopAccessAnalysis/pointer-with-unknown-bounds.ll
@@ -0,0 +1,42 @@
+; RUN: opt -loop-accesses -analyze < %s | FileCheck %s
+
+target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
+
+; We shouldn't quit the analysis if we encounter a pointer without known
+; bounds *unless* we actually need to emit a memcheck for it. (We only
+; compute bounds for SCEVAddRecs so A[i*I] is deemed not having known bounds.)
+;
+; for (i = 0; i < 20; ++i)
+; A[i*i] *= 2;
+
+; CHECK: for.body:
+; CHECK: Report: unsafe dependent memory operations in loop
+; CHECK-NOT: Report: cannot identify array bounds
+; CHECK: Interesting Dependences:
+; CHECK: Unknown:
+; CHECK: %loadA = load i16, i16* %arrayidxA, align 2 ->
+; CHECK: store i16 %mul, i16* %arrayidxA, align 2
+
+define void @f(i16* %a) {
+entry:
+ br label %for.body
+
+for.body: ; preds = %for.body, %entry
+ %ind = phi i64 [ 0, %entry ], [ %add, %for.body ]
+
+ %access_ind = mul i64 %ind, %ind
+
+ %arrayidxA = getelementptr inbounds i16, i16* %a, i64 %access_ind
+ %loadA = load i16, i16* %arrayidxA, align 2
+
+ %mul = mul i16 %loadA, 2
+
+ store i16 %mul, i16* %arrayidxA, align 2
+
+ %add = add nuw nsw i64 %ind, 1
+ %exitcond = icmp eq i64 %add, 20
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.body
+ ret void
+}
diff --git a/test/Analysis/LoopAccessAnalysis/resort-to-memchecks-only.ll b/test/Analysis/LoopAccessAnalysis/resort-to-memchecks-only.ll
index 64f7729fa18a..e7305173dd95 100644
--- a/test/Analysis/LoopAccessAnalysis/resort-to-memchecks-only.ll
+++ b/test/Analysis/LoopAccessAnalysis/resort-to-memchecks-only.ll
@@ -15,7 +15,9 @@ target triple = "x86_64-apple-macosx10.10.0"
; CHECK-NEXT: Interesting Dependences:
; CHECK-NEXT: Run-time memory checks:
; CHECK-NEXT: 0:
+; CHECK-NEXT: Comparing group
; CHECK-NEXT: %arrayidxA2 = getelementptr inbounds i16, i16* %a, i64 %idx
+; CHECK-NEXT: Against group
; CHECK-NEXT: %arrayidxA = getelementptr inbounds i16, i16* %a, i64 %indvar
@B = common global i16* null, align 8
diff --git a/test/Analysis/LoopAccessAnalysis/unsafe-and-rt-checks.ll b/test/Analysis/LoopAccessAnalysis/unsafe-and-rt-checks.ll
index ce8b86ba2c51..237cbc8b9873 100644
--- a/test/Analysis/LoopAccessAnalysis/unsafe-and-rt-checks.ll
+++ b/test/Analysis/LoopAccessAnalysis/unsafe-and-rt-checks.ll
@@ -14,10 +14,16 @@ target triple = "x86_64-apple-macosx10.10.0"
; CHECK-NEXT: store i16 %mul1, i16* %arrayidxA_plus_2, align 2
; CHECK: Run-time memory checks:
; CHECK-NEXT: 0:
+; CHECK-NEXT: Comparing group
+; CHECK-NEXT: %arrayidxA = getelementptr inbounds i16, i16* %a, i64 %storemerge3
; CHECK-NEXT: %arrayidxA_plus_2 = getelementptr inbounds i16, i16* %a, i64 %add
+; CHECK-NEXT: Against group
; CHECK-NEXT: %arrayidxB = getelementptr inbounds i16, i16* %b, i64 %storemerge3
; CHECK-NEXT: 1:
+; CHECK-NEXT: Comparing group
+; CHECK-NEXT: %arrayidxA = getelementptr inbounds i16, i16* %a, i64 %storemerge3
; CHECK-NEXT: %arrayidxA_plus_2 = getelementptr inbounds i16, i16* %a, i64 %add
+; CHECK-NEXT: Against group
; CHECK-NEXT: %arrayidxC = getelementptr inbounds i16, i16* %c, i64 %storemerge3
@B = common global i16* null, align 8