summaryrefslogtreecommitdiff
path: root/test/Transforms/LoopDistribute
diff options
context:
space:
mode:
Diffstat (limited to 'test/Transforms/LoopDistribute')
-rw-r--r--test/Transforms/LoopDistribute/basic-with-memchecks.ll112
-rw-r--r--test/Transforms/LoopDistribute/basic.ll83
-rw-r--r--test/Transforms/LoopDistribute/crash-in-memcheck-generation.ll59
-rw-r--r--test/Transforms/LoopDistribute/no-if-convert.ll95
-rw-r--r--test/Transforms/LoopDistribute/outside-use.ll69
-rw-r--r--test/Transforms/LoopDistribute/program-order.ll65
6 files changed, 483 insertions, 0 deletions
diff --git a/test/Transforms/LoopDistribute/basic-with-memchecks.ll b/test/Transforms/LoopDistribute/basic-with-memchecks.ll
new file mode 100644
index 0000000000000..4c1c1b822964a
--- /dev/null
+++ b/test/Transforms/LoopDistribute/basic-with-memchecks.ll
@@ -0,0 +1,112 @@
+; RUN: opt -basicaa -loop-distribute -verify-loop-info -verify-dom-info -S \
+; RUN: < %s | FileCheck %s
+
+; RUN: opt -basicaa -loop-distribute -loop-vectorize -force-vector-width=4 \
+; RUN: -verify-loop-info -verify-dom-info -S < %s | \
+; RUN: FileCheck --check-prefix=VECTORIZE %s
+
+; The memcheck version of basic.ll. We should distribute and vectorize the
+; second part of this loop with 5 memchecks (A+1 x {C, D, E} + C x {A, B})
+;
+; for (i = 0; i < n; i++) {
+; A[i + 1] = A[i] * B[i];
+; -------------------------------
+; C[i] = D[i] * E[i];
+; }
+
+target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.10.0"
+
+@B = common global i32* null, align 8
+@A = common global i32* null, align 8
+@C = common global i32* null, align 8
+@D = common global i32* null, align 8
+@E = common global i32* null, align 8
+
+define void @f() {
+entry:
+ %a = load i32*, i32** @A, align 8
+ %b = load i32*, i32** @B, align 8
+ %c = load i32*, i32** @C, align 8
+ %d = load i32*, i32** @D, align 8
+ %e = load i32*, i32** @E, align 8
+ br label %for.body
+
+; We have two compares for each array overlap check which is a total of 10
+; compares.
+;
+; CHECK: for.body.ldist.memcheck:
+; CHECK: = icmp
+; CHECK: = icmp
+
+; CHECK: = icmp
+; CHECK: = icmp
+
+; CHECK: = icmp
+; CHECK: = icmp
+
+; CHECK: = icmp
+; CHECK: = icmp
+
+; CHECK: = icmp
+; CHECK: = icmp
+
+; CHECK-NOT: = icmp
+; CHECK: br i1 %memcheck.conflict, label %for.body.ph.ldist.nondist, label %for.body.ph.ldist1
+
+; The non-distributed loop that the memchecks fall back on.
+
+; CHECK: for.body.ph.ldist.nondist:
+; CHECK: br label %for.body.ldist.nondist
+; CHECK: for.body.ldist.nondist:
+; CHECK: br i1 %exitcond.ldist.nondist, label %for.end, label %for.body.ldist.nondist
+
+; Verify the two distributed loops.
+
+; CHECK: for.body.ph.ldist1:
+; CHECK: br label %for.body.ldist1
+; CHECK: for.body.ldist1:
+; CHECK: %mulA.ldist1 = mul i32 %loadB.ldist1, %loadA.ldist1
+; CHECK: br i1 %exitcond.ldist1, label %for.body.ph, label %for.body.ldist1
+
+; CHECK: for.body.ph:
+; CHECK: br label %for.body
+; CHECK: for.body:
+; CHECK: %mulC = mul i32 %loadD, %loadE
+; CHECK: for.end:
+
+
+; VECTORIZE: mul <4 x i32>
+
+for.body: ; preds = %for.body, %entry
+ %ind = phi i64 [ 0, %entry ], [ %add, %for.body ]
+
+ %arrayidxA = getelementptr inbounds i32, i32* %a, i64 %ind
+ %loadA = load i32, i32* %arrayidxA, align 4
+
+ %arrayidxB = getelementptr inbounds i32, i32* %b, i64 %ind
+ %loadB = load i32, i32* %arrayidxB, align 4
+
+ %mulA = mul i32 %loadB, %loadA
+
+ %add = add nuw nsw i64 %ind, 1
+ %arrayidxA_plus_4 = getelementptr inbounds i32, i32* %a, i64 %add
+ store i32 %mulA, i32* %arrayidxA_plus_4, align 4
+
+ %arrayidxD = getelementptr inbounds i32, i32* %d, i64 %ind
+ %loadD = load i32, i32* %arrayidxD, align 4
+
+ %arrayidxE = getelementptr inbounds i32, i32* %e, i64 %ind
+ %loadE = load i32, i32* %arrayidxE, align 4
+
+ %mulC = mul i32 %loadD, %loadE
+
+ %arrayidxC = getelementptr inbounds i32, i32* %c, i64 %ind
+ store i32 %mulC, i32* %arrayidxC, align 4
+
+ %exitcond = icmp eq i64 %add, 20
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.body
+ ret void
+}
diff --git a/test/Transforms/LoopDistribute/basic.ll b/test/Transforms/LoopDistribute/basic.ll
new file mode 100644
index 0000000000000..f19fa8bb1b14b
--- /dev/null
+++ b/test/Transforms/LoopDistribute/basic.ll
@@ -0,0 +1,83 @@
+; RUN: opt -basicaa -loop-distribute -verify-loop-info -verify-dom-info -S \
+; RUN: < %s | FileCheck %s
+
+; RUN: opt -basicaa -loop-distribute -verify-loop-info -verify-dom-info \
+; RUN: -loop-accesses -analyze < %s | FileCheck %s --check-prefix=ANALYSIS
+
+; RUN: opt -basicaa -loop-distribute -loop-vectorize -force-vector-width=4 -S \
+; RUN: < %s | FileCheck %s --check-prefix=VECTORIZE
+
+; We should distribute this loop into a safe (2nd statement) and unsafe loop
+; (1st statement):
+; for (i = 0; i < n; i++) {
+; A[i + 1] = A[i] * B[i];
+; =======================
+; C[i] = D[i] * E[i];
+; }
+
+target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.10.0"
+
+define void @f(i32* noalias %a,
+ i32* noalias %b,
+ i32* noalias %c,
+ i32* noalias %d,
+ i32* noalias %e) {
+entry:
+ br label %for.body
+
+; Verify the two distributed loops.
+
+; CHECK: entry.split.ldist1:
+; CHECK: br label %for.body.ldist1
+; CHECK: for.body.ldist1:
+; CHECK: %mulA.ldist1 = mul i32 %loadB.ldist1, %loadA.ldist1
+; CHECK: br i1 %exitcond.ldist1, label %entry.split, label %for.body.ldist1
+
+; CHECK: entry.split:
+; CHECK: br label %for.body
+; CHECK: for.body:
+; CHECK: %mulC = mul i32 %loadD, %loadE
+; CHECK: for.end:
+
+
+; ANALYSIS: for.body:
+; ANALYSIS-NEXT: Memory dependences are safe{{$}}
+; ANALYSIS: for.body.ldist1:
+; ANALYSIS-NEXT: Report: unsafe dependent memory operations in loop
+
+
+; VECTORIZE: mul <4 x i32>
+
+for.body: ; preds = %for.body, %entry
+ %ind = phi i64 [ 0, %entry ], [ %add, %for.body ]
+
+ %arrayidxA = getelementptr inbounds i32, i32* %a, i64 %ind
+ %loadA = load i32, i32* %arrayidxA, align 4
+
+ %arrayidxB = getelementptr inbounds i32, i32* %b, i64 %ind
+ %loadB = load i32, i32* %arrayidxB, align 4
+
+ %mulA = mul i32 %loadB, %loadA
+
+ %add = add nuw nsw i64 %ind, 1
+ %arrayidxA_plus_4 = getelementptr inbounds i32, i32* %a, i64 %add
+ store i32 %mulA, i32* %arrayidxA_plus_4, align 4
+
+ %arrayidxD = getelementptr inbounds i32, i32* %d, i64 %ind
+ %loadD = load i32, i32* %arrayidxD, align 4
+
+ %arrayidxE = getelementptr inbounds i32, i32* %e, i64 %ind
+ %loadE = load i32, i32* %arrayidxE, align 4
+
+ %mulC = mul i32 %loadD, %loadE
+
+ %arrayidxC = getelementptr inbounds i32, i32* %c, i64 %ind
+ store i32 %mulC, i32* %arrayidxC, align 4
+
+ %exitcond = icmp eq i64 %add, 20
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.body
+ ret void
+}
diff --git a/test/Transforms/LoopDistribute/crash-in-memcheck-generation.ll b/test/Transforms/LoopDistribute/crash-in-memcheck-generation.ll
new file mode 100644
index 0000000000000..c81ddf59fa7c1
--- /dev/null
+++ b/test/Transforms/LoopDistribute/crash-in-memcheck-generation.ll
@@ -0,0 +1,59 @@
+; RUN: opt -basicaa -loop-distribute -loop-vectorize -force-vector-width=4 \
+; RUN: -verify-loop-info -verify-dom-info -S < %s | FileCheck %s
+
+; If only A and B can alias here, we don't need memchecks to distribute since
+; A and B are in the same partition. This used to cause a crash in the
+; memcheck generation.
+;
+; for (i = 0; i < n; i++) {
+; A[i + 1] = A[i] * B[i];
+; ------------------------------
+; C[i] = D[i] * E[i];
+; }
+
+target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.10.0"
+
+define void @f(i32* %a,
+ i32* %b,
+ i32* noalias %c,
+ i32* noalias %d,
+ i32* noalias %e) {
+entry:
+ br label %for.body
+
+; CHECK-NOT: memcheck:
+; CHECK: mul <4 x i32>
+
+for.body: ; preds = %for.body, %entry
+ %ind = phi i64 [ 0, %entry ], [ %add, %for.body ]
+
+ %arrayidxA = getelementptr inbounds i32, i32* %a, i64 %ind
+ %loadA = load i32, i32* %arrayidxA, align 4
+
+ %arrayidxB = getelementptr inbounds i32, i32* %b, i64 %ind
+ %loadB = load i32, i32* %arrayidxB, align 4
+
+ %mulA = mul i32 %loadB, %loadA
+
+ %add = add nuw nsw i64 %ind, 1
+ %arrayidxA_plus_4 = getelementptr inbounds i32, i32* %a, i64 %add
+ store i32 %mulA, i32* %arrayidxA_plus_4, align 4
+
+ %arrayidxD = getelementptr inbounds i32, i32* %d, i64 %ind
+ %loadD = load i32, i32* %arrayidxD, align 4
+
+ %arrayidxE = getelementptr inbounds i32, i32* %e, i64 %ind
+ %loadE = load i32, i32* %arrayidxE, align 4
+
+ %mulC = mul i32 %loadD, %loadE
+
+ %arrayidxC = getelementptr inbounds i32, i32* %c, i64 %ind
+ store i32 %mulC, i32* %arrayidxC, align 4
+
+ %exitcond = icmp eq i64 %add, 20
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.body
+ ret void
+}
diff --git a/test/Transforms/LoopDistribute/no-if-convert.ll b/test/Transforms/LoopDistribute/no-if-convert.ll
new file mode 100644
index 0000000000000..fcd8b65731ccb
--- /dev/null
+++ b/test/Transforms/LoopDistribute/no-if-convert.ll
@@ -0,0 +1,95 @@
+; RUN: opt -basicaa -loop-distribute -verify-loop-info -verify-dom-info -S < %s \
+; RUN: | FileCheck %s
+
+; We should distribute this loop along === but not along ---. The last
+; partition won't be vectorized due to conditional stores so it's better to
+; keep it with the second partition which has a dependence cycle.
+
+; (1st statement):
+; for (i = 0; i < n; i++) {
+; C[i] = D[i] * E[i];
+;=============================
+; A[i + 1] = A[i] * B[i];
+;-----------------------------
+; if (F[i])
+; G[i] = H[i] * J[i];
+; }
+
+target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.10.0"
+
+define void @f(i32* noalias %a,
+ i32* noalias %b,
+ i32* noalias %c,
+ i32* noalias %d,
+ i32* noalias %e,
+ i32* noalias %g,
+ i32* noalias %h,
+ i32* noalias %j,
+ i64 %x) {
+entry:
+ br label %for.body
+
+; Ensure that we have only two partitions, the first with one multiplication
+; and the second with two.
+
+; CHECK: for.body.ldist1:
+; CHECK: %mulC.ldist1 = mul i32 %loadD.ldist1, %loadE.ldist1
+; CHECK: br i1 %exitcond.ldist1, label %entry.split, label %for.body.ldist1
+; CHECK: entry.split:
+; CHECK: br label %for.body
+; CHECK: for.body:
+; CHECK: %mulA = mul i32 %loadB, %loadA
+; CHECK: %mulG = mul i32 %loadH, %loadJ
+; CHECK: for.end:
+
+for.body: ; preds = %for.body, %entry
+ %ind = phi i64 [ 0, %entry ], [ %add, %if.end ]
+
+ %arrayidxD = getelementptr inbounds i32, i32* %d, i64 %ind
+ %loadD = load i32, i32* %arrayidxD, align 4
+
+ %arrayidxE = getelementptr inbounds i32, i32* %e, i64 %ind
+ %loadE = load i32, i32* %arrayidxE, align 4
+
+ %mulC = mul i32 %loadD, %loadE
+
+ %arrayidxC = getelementptr inbounds i32, i32* %c, i64 %ind
+ store i32 %mulC, i32* %arrayidxC, align 4
+
+
+ %arrayidxA = getelementptr inbounds i32, i32* %a, i64 %ind
+ %loadA = load i32, i32* %arrayidxA, align 4
+
+ %arrayidxB = getelementptr inbounds i32, i32* %b, i64 %ind
+ %loadB = load i32, i32* %arrayidxB, align 4
+
+ %mulA = mul i32 %loadB, %loadA
+
+ %add = add nuw nsw i64 %ind, 1
+ %arrayidxA_plus_4 = getelementptr inbounds i32, i32* %a, i64 %add
+ store i32 %mulA, i32* %arrayidxA_plus_4, align 4
+
+ %if.cond = icmp eq i64 %ind, %x
+ br i1 %if.cond, label %if.then, label %if.end
+
+if.then:
+ %arrayidxH = getelementptr inbounds i32, i32* %h, i64 %ind
+ %loadH = load i32, i32* %arrayidxH, align 4
+
+ %arrayidxJ = getelementptr inbounds i32, i32* %j, i64 %ind
+ %loadJ = load i32, i32* %arrayidxJ, align 4
+
+ %mulG = mul i32 %loadH, %loadJ
+
+ %arrayidxG = getelementptr inbounds i32, i32* %g, i64 %ind
+ store i32 %mulG, i32* %arrayidxG, align 4
+ br label %if.end
+
+if.end:
+ %exitcond = icmp eq i64 %add, 20
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.body
+ ret void
+}
diff --git a/test/Transforms/LoopDistribute/outside-use.ll b/test/Transforms/LoopDistribute/outside-use.ll
new file mode 100644
index 0000000000000..546050d5b0463
--- /dev/null
+++ b/test/Transforms/LoopDistribute/outside-use.ll
@@ -0,0 +1,69 @@
+; RUN: opt -loop-distribute -verify-loop-info -verify-dom-info -S < %s \
+; RUN: | FileCheck %s
+
+; Check that definitions used outside the loop are handled correctly: (1) they
+; are not dropped (2) when version the loop, a phi is added to merge the value
+; from the non-distributed loop and the distributed loop.
+;
+; for (i = 0; i < n; i++) {
+; A[i + 1] = A[i] * B[i];
+; ==========================
+; sum += C[i];
+; }
+
+target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.10.0"
+
+@B = common global i32* null, align 8
+@A = common global i32* null, align 8
+@C = common global i32* null, align 8
+@D = common global i32* null, align 8
+@E = common global i32* null, align 8
+@SUM = common global i32 0, align 8
+
+define void @f() {
+entry:
+ %a = load i32*, i32** @A, align 8
+ %b = load i32*, i32** @B, align 8
+ %c = load i32*, i32** @C, align 8
+ %d = load i32*, i32** @D, align 8
+ %e = load i32*, i32** @E, align 8
+
+ br label %for.body
+
+; CHECK: for.body.ldist1:
+; CHECK: %mulA.ldist1 = mul i32 %loadB.ldist1, %loadA.ldist1
+; CHECK: for.body.ph:
+; CHECK: for.body:
+; CHECK: %sum_add = add nuw nsw i32 %sum, %loadC
+; CHECK: for.end:
+; CHECK: %sum_add.ldist = phi i32 [ %sum_add, %for.body ], [ %sum_add.ldist.nondist, %for.body.ldist.nondist ]
+
+for.body: ; preds = %for.body, %entry
+ %ind = phi i64 [ 0, %entry ], [ %add, %for.body ]
+ %sum = phi i32 [ 0, %entry ], [ %sum_add, %for.body ]
+
+ %arrayidxA = getelementptr inbounds i32, i32* %a, i64 %ind
+ %loadA = load i32, i32* %arrayidxA, align 4
+
+ %arrayidxB = getelementptr inbounds i32, i32* %b, i64 %ind
+ %loadB = load i32, i32* %arrayidxB, align 4
+
+ %mulA = mul i32 %loadB, %loadA
+
+ %add = add nuw nsw i64 %ind, 1
+ %arrayidxA_plus_4 = getelementptr inbounds i32, i32* %a, i64 %add
+ store i32 %mulA, i32* %arrayidxA_plus_4, align 4
+
+ %arrayidxC = getelementptr inbounds i32, i32* %c, i64 %ind
+ %loadC = load i32, i32* %arrayidxC, align 4
+
+ %sum_add = add nuw nsw i32 %sum, %loadC
+
+ %exitcond = icmp eq i64 %add, 20
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.body
+ store i32 %sum_add, i32* @SUM, align 4
+ ret void
+}
diff --git a/test/Transforms/LoopDistribute/program-order.ll b/test/Transforms/LoopDistribute/program-order.ll
new file mode 100644
index 0000000000000..b534b79af71a4
--- /dev/null
+++ b/test/Transforms/LoopDistribute/program-order.ll
@@ -0,0 +1,65 @@
+; RUN: opt -loop-distribute -S -verify-loop-info -verify-dom-info < %s \
+; RUN: | FileCheck %s
+
+; Distributing this loop to avoid the dependence cycle would require to
+; reorder S1 and S2 to form the two partitions: {S2} | {S1, S3}. The analysis
+; provided by LoopAccessAnalysis does not allow us to reorder memory
+; operations so make sure we bail on this loop.
+;
+; for (i = 0; i < n; i++) {
+; S1: d = D[i];
+; S2: A[i + 1] = A[i] * B[i];
+; S3: C[i] = d * E[i];
+; }
+
+target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.10.0"
+
+define void @f(i32* noalias %a,
+ i32* noalias %b,
+ i32* noalias %c,
+ i32* noalias %d,
+ i32* noalias %e) {
+entry:
+ br label %for.body
+
+; CHECK: entry:
+; CHECK: br label %for.body
+; CHECK: for.body:
+; CHECK: br i1 %exitcond, label %for.end, label %for.body
+; CHECK: for.end:
+; CHECK: ret void
+
+for.body: ; preds = %for.body, %entry
+ %ind = phi i64 [ 0, %entry ], [ %add, %for.body ]
+
+ %arrayidxA = getelementptr inbounds i32, i32* %a, i64 %ind
+ %loadA = load i32, i32* %arrayidxA, align 4
+
+ %arrayidxB = getelementptr inbounds i32, i32* %b, i64 %ind
+ %loadB = load i32, i32* %arrayidxB, align 4
+
+ %mulA = mul i32 %loadB, %loadA
+
+ %arrayidxD = getelementptr inbounds i32, i32* %d, i64 %ind
+ %loadD = load i32, i32* %arrayidxD, align 4
+
+ %add = add nuw nsw i64 %ind, 1
+ %arrayidxA_plus_4 = getelementptr inbounds i32, i32* %a, i64 %add
+ store i32 %mulA, i32* %arrayidxA_plus_4, align 4
+
+ %arrayidxC = getelementptr inbounds i32, i32* %c, i64 %ind
+
+ %arrayidxE = getelementptr inbounds i32, i32* %e, i64 %ind
+ %loadE = load i32, i32* %arrayidxE, align 4
+
+ %mulC = mul i32 %loadD, %loadE
+
+ store i32 %mulC, i32* %arrayidxC, align 4
+
+ %exitcond = icmp eq i64 %add, 20
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.body
+ ret void
+}