summaryrefslogtreecommitdiff
path: root/test/Transforms/LoopStrengthReduce/X86
diff options
context:
space:
mode:
Diffstat (limited to 'test/Transforms/LoopStrengthReduce/X86')
-rw-r--r--test/Transforms/LoopStrengthReduce/X86/canonical.ll65
-rw-r--r--test/Transforms/LoopStrengthReduce/X86/incorrect-offset-scaling.ll48
-rw-r--r--test/Transforms/LoopStrengthReduce/X86/ivchain-X86.ll2
-rw-r--r--test/Transforms/LoopStrengthReduce/X86/lsr-insns-1.ll52
-rw-r--r--test/Transforms/LoopStrengthReduce/X86/lsr-insns-2.ll58
-rw-r--r--test/Transforms/LoopStrengthReduce/X86/nested-loop.ll65
-rw-r--r--test/Transforms/LoopStrengthReduce/X86/sibling-loops.ll97
7 files changed, 385 insertions, 2 deletions
diff --git a/test/Transforms/LoopStrengthReduce/X86/canonical.ll b/test/Transforms/LoopStrengthReduce/X86/canonical.ll
new file mode 100644
index 0000000000000..2dafbb408aad4
--- /dev/null
+++ b/test/Transforms/LoopStrengthReduce/X86/canonical.ll
@@ -0,0 +1,65 @@
+; RUN: opt -mtriple=x86_64-unknown-linux-gnu -loop-reduce -S < %s | FileCheck %s
+; Check LSR formula canonicalization will put loop invariant regs before
+; induction variable of current loop, so exprs involving loop invariant regs
+; can be promoted outside of current loop.
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+
+define void @foo(i32 %size, i32 %nsteps, i8* nocapture %maxarray, i8* nocapture readnone %buffer, i32 %init) local_unnamed_addr #0 {
+entry:
+ %cmp25 = icmp sgt i32 %nsteps, 0
+ br i1 %cmp25, label %for.cond1.preheader.lr.ph, label %for.end12
+
+for.cond1.preheader.lr.ph: ; preds = %entry
+ %cmp223 = icmp sgt i32 %size, 1
+ %t0 = sext i32 %init to i64
+ %wide.trip.count = zext i32 %size to i64
+ %wide.trip.count31 = zext i32 %nsteps to i64
+ br label %for.cond1.preheader
+
+for.cond1.preheader: ; preds = %for.inc10, %for.cond1.preheader.lr.ph
+ %indvars.iv28 = phi i64 [ 0, %for.cond1.preheader.lr.ph ], [ %indvars.iv.next29, %for.inc10 ]
+ br i1 %cmp223, label %for.body3.lr.ph, label %for.inc10
+
+for.body3.lr.ph: ; preds = %for.cond1.preheader
+ %t1 = add nsw i64 %indvars.iv28, %t0
+ %t2 = trunc i64 %indvars.iv28 to i8
+ br label %for.body3
+
+; Make sure loop invariant items are grouped together so that load address can
+; be represented in one getelementptr.
+; CHECK-LABEL: for.body3:
+; CHECK-NEXT: [[LSR:%[^,]+]] = phi i64 [ 1, %for.body3.lr.ph ], [ {{.*}}, %for.body3 ]
+; CHECK-NOT: = phi i64
+; CHECK-NEXT: [[LOADADDR:%[^,]+]] = getelementptr i8, i8* {{.*}}, i64 [[LSR]]
+; CHECK-NEXT: = load i8, i8* [[LOADADDR]], align 1
+; CHECK: br i1 %exitcond, label %for.inc10.loopexit, label %for.body3
+
+for.body3: ; preds = %for.body3, %for.body3.lr.ph
+ %indvars.iv = phi i64 [ 1, %for.body3.lr.ph ], [ %indvars.iv.next, %for.body3 ]
+ %t5 = trunc i64 %indvars.iv to i8
+ %t3 = add nsw i64 %t1, %indvars.iv
+ %arrayidx = getelementptr inbounds i8, i8* %maxarray, i64 %t3
+ %t4 = load i8, i8* %arrayidx, align 1
+ %add5 = add i8 %t4, %t5
+ %add6 = add i8 %add5, %t2
+ %arrayidx9 = getelementptr inbounds i8, i8* %maxarray, i64 %indvars.iv
+ store i8 %add6, i8* %arrayidx9, align 1
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %exitcond = icmp eq i64 %indvars.iv.next, %wide.trip.count
+ br i1 %exitcond, label %for.inc10.loopexit, label %for.body3
+
+for.inc10.loopexit: ; preds = %for.body3
+ br label %for.inc10
+
+for.inc10: ; preds = %for.inc10.loopexit, %for.cond1.preheader
+ %indvars.iv.next29 = add nuw nsw i64 %indvars.iv28, 1
+ %exitcond32 = icmp eq i64 %indvars.iv.next29, %wide.trip.count31
+ br i1 %exitcond32, label %for.end12.loopexit, label %for.cond1.preheader
+
+for.end12.loopexit: ; preds = %for.inc10
+ br label %for.end12
+
+for.end12: ; preds = %for.end12.loopexit, %entry
+ ret void
+}
diff --git a/test/Transforms/LoopStrengthReduce/X86/incorrect-offset-scaling.ll b/test/Transforms/LoopStrengthReduce/X86/incorrect-offset-scaling.ll
new file mode 100644
index 0000000000000..3adb8bcf514da
--- /dev/null
+++ b/test/Transforms/LoopStrengthReduce/X86/incorrect-offset-scaling.ll
@@ -0,0 +1,48 @@
+; RUN: opt -S -loop-reduce < %s | FileCheck %s
+
+target triple = "x86_64-unknown-unknown"
+target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
+
+define void @incorrect_offset_scaling(i64, i64*) {
+top:
+ br label %L
+
+L: ; preds = %idxend.10, %idxend, %L2, %top
+ br i1 undef, label %L, label %L1
+
+L1: ; preds = %L1.preheader, %L2
+ %r13 = phi i64 [ %r1, %L2 ], [ 1, %L ]
+; CHECK: %lsr.iv = phi i64 [ 0, %L{{[^ ]+}} ], [ %lsr.iv.next, %L2 ]
+; CHECK-NOT: %lsr.iv = phi i64 [ -1, %L{{[^ ]+}} ], [ %lsr.iv.next, %L2 ]
+; CHECK: br
+ %r0 = add i64 %r13, -1
+ br label %idxend.8
+
+L2: ; preds = %idxend.8
+ %r1 = add i64 %r13, 1
+ br i1 undef, label %L, label %L1
+
+if6: ; preds = %idxend.8
+ %r2 = add i64 %0, -1
+ %r3 = load i64, i64* %1, align 8
+; CHECK-NOT: %r2
+; CHECK: %r3 = load i64
+ br label %ib
+
+idxend.8: ; preds = %L1
+ br i1 undef, label %if6, label %L2
+
+ib: ; preds = %if6
+ %r4 = mul i64 %r3, %r0
+ %r5 = add i64 %r2, %r4
+ %r6 = icmp ult i64 %r5, undef
+; CHECK: [[MUL1:%[0-9]+]] = mul i64 %lsr.iv, %r3
+; CHECK: [[ADD1:%[0-9]+]] = add i64 [[MUL1]], -1
+; CHECK: add i64 %{{.}}, [[ADD1]]
+; CHECK: %r6
+ %r7 = getelementptr i64, i64* undef, i64 %r5
+ store i64 1, i64* %r7, align 8
+; CHECK: [[MUL2:%[0-9]+]] = mul i64 %lsr.iv, %r3
+; CHECK: [[ADD2:%[0-9]+]] = add i64 [[MUL2]], -1
+ br label %L
+}
diff --git a/test/Transforms/LoopStrengthReduce/X86/ivchain-X86.ll b/test/Transforms/LoopStrengthReduce/X86/ivchain-X86.ll
index ab7d4f1baa816..fb63b66137f37 100644
--- a/test/Transforms/LoopStrengthReduce/X86/ivchain-X86.ll
+++ b/test/Transforms/LoopStrengthReduce/X86/ivchain-X86.ll
@@ -1,7 +1,5 @@
; RUN: llc < %s -O3 -march=x86-64 -mcpu=core2 | FileCheck %s -check-prefix=X64
; RUN: llc < %s -O3 -march=x86 -mcpu=core2 | FileCheck %s -check-prefix=X32
-; RUN: llc < %s -O3 -march=x86-64 -mcpu=core2 -addr-sink-using-gep=1 | FileCheck %s -check-prefix=X64
-; RUN: llc < %s -O3 -march=x86 -mcpu=core2 -addr-sink-using-gep=1 | FileCheck %s -check-prefix=X32
; @simple is the most basic chain of address induction variables. Chaining
; saves at least one register and avoids complex addressing and setup
diff --git a/test/Transforms/LoopStrengthReduce/X86/lsr-insns-1.ll b/test/Transforms/LoopStrengthReduce/X86/lsr-insns-1.ll
new file mode 100644
index 0000000000000..4888536bdf819
--- /dev/null
+++ b/test/Transforms/LoopStrengthReduce/X86/lsr-insns-1.ll
@@ -0,0 +1,52 @@
+; RUN: opt < %s -loop-reduce -mtriple=x86_64 -lsr-insns-cost -S | FileCheck %s -check-prefix=BOTH -check-prefix=INSN
+; RUN: opt < %s -loop-reduce -mtriple=x86_64 -S | FileCheck %s -check-prefix=BOTH -check-prefix=REGS
+; RUN: llc < %s -O2 -march=x86-64 -lsr-insns-cost -asm-verbose=0 | FileCheck %s
+
+; OPT test checks that LSR optimize compare for static counter to compare with 0.
+
+; BOTH: for.body:
+; INSN: icmp eq i64 %lsr.iv.next, 0
+; REGS: icmp eq i64 %indvars.iv.next, 1024
+
+; LLC test checks that LSR optimize compare for static counter.
+; That means that instead of creating the following:
+; movl %ecx, (%rdx,%rax,4)
+; incq %rax
+; cmpq $1024, %rax
+; LSR should optimize out cmp:
+; movl %ecx, 4096(%rdx,%rax)
+; addq $4, %rax
+; or
+; movl %ecx, 4096(%rdx,%rax,4)
+; incq %rax
+
+; CHECK: LBB0_1:
+; CHECK-NEXT: movl 4096(%{{.+}},[[REG:%[0-9a-z]+]]
+; CHECK-NEXT: addl 4096(%{{.+}},[[REG]]
+; CHECK-NEXT: movl %{{.+}}, 4096(%{{.+}},[[REG]]
+; CHECK-NOT: cmp
+; CHECK: jne
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+
+; Function Attrs: norecurse nounwind uwtable
+define void @foo(i32* nocapture readonly %x, i32* nocapture readonly %y, i32* nocapture %q) {
+entry:
+ br label %for.body
+
+for.cond.cleanup: ; preds = %for.body
+ ret void
+
+for.body: ; preds = %for.body, %entry
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %arrayidx = getelementptr inbounds i32, i32* %x, i64 %indvars.iv
+ %tmp = load i32, i32* %arrayidx, align 4
+ %arrayidx2 = getelementptr inbounds i32, i32* %y, i64 %indvars.iv
+ %tmp1 = load i32, i32* %arrayidx2, align 4
+ %add = add nsw i32 %tmp1, %tmp
+ %arrayidx4 = getelementptr inbounds i32, i32* %q, i64 %indvars.iv
+ store i32 %add, i32* %arrayidx4, align 4
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %exitcond = icmp eq i64 %indvars.iv.next, 1024
+ br i1 %exitcond, label %for.cond.cleanup, label %for.body
+}
diff --git a/test/Transforms/LoopStrengthReduce/X86/lsr-insns-2.ll b/test/Transforms/LoopStrengthReduce/X86/lsr-insns-2.ll
new file mode 100644
index 0000000000000..3273cb4e6b5bc
--- /dev/null
+++ b/test/Transforms/LoopStrengthReduce/X86/lsr-insns-2.ll
@@ -0,0 +1,58 @@
+; RUN: opt < %s -loop-reduce -mtriple=x86_64 -lsr-insns-cost -S | FileCheck %s -check-prefix=BOTH -check-prefix=INSN
+; RUN: opt < %s -loop-reduce -mtriple=x86_64 -S | FileCheck %s -check-prefix=BOTH -check-prefix=REGS
+; RUN: llc < %s -O2 -march=x86-64 -lsr-insns-cost -asm-verbose=0 | FileCheck %s
+
+; OPT checks that LSR prefers less instructions to less registers.
+; For x86 LSR should prefer complicated address to new lsr induction
+; variables.
+
+; BOTH: for.body:
+; INSN: getelementptr i32, i32* %x, i64 %indvars.iv
+; INSN: getelementptr i32, i32* %y, i64 %indvars.iv
+; INSN: getelementptr i32, i32* %q, i64 %indvars.iv
+; REGS %lsr.iv4 = phi
+; REGS %lsr.iv2 = phi
+; REGS %lsr.iv1 = phi
+; REGS: getelementptr i32, i32* %lsr.iv1, i64 1
+; REGS: getelementptr i32, i32* %lsr.iv2, i64 1
+; REGS: getelementptr i32, i32* %lsr.iv4, i64 1
+
+; LLC checks that LSR prefers less instructions to less registers.
+; LSR should prefer complicated address to additonal add instructions.
+
+; CHECK: LBB0_2:
+; CHECK-NEXT: movl (%r{{.+}},
+; CHECK-NEXT: addl (%r{{.+}},
+; CHECK-NEXT: movl %e{{.+}}, (%r{{.+}},
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+
+; Function Attrs: norecurse nounwind uwtable
+define void @foo(i32* nocapture readonly %x, i32* nocapture readonly %y, i32* nocapture %q, i32 %n) {
+entry:
+ %cmp10 = icmp sgt i32 %n, 0
+ br i1 %cmp10, label %for.body.preheader, label %for.cond.cleanup
+
+for.body.preheader: ; preds = %entry
+ %wide.trip.count = zext i32 %n to i64
+ br label %for.body
+
+for.cond.cleanup.loopexit: ; preds = %for.body
+ br label %for.cond.cleanup
+
+for.cond.cleanup: ; preds = %for.cond.cleanup.loopexit, %entry
+ ret void
+
+for.body: ; preds = %for.body, %for.body.preheader
+ %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %for.body.preheader ]
+ %arrayidx = getelementptr inbounds i32, i32* %x, i64 %indvars.iv
+ %tmp = load i32, i32* %arrayidx, align 4
+ %arrayidx2 = getelementptr inbounds i32, i32* %y, i64 %indvars.iv
+ %tmp1 = load i32, i32* %arrayidx2, align 4
+ %add = add nsw i32 %tmp1, %tmp
+ %arrayidx4 = getelementptr inbounds i32, i32* %q, i64 %indvars.iv
+ store i32 %add, i32* %arrayidx4, align 4
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %exitcond = icmp eq i64 %indvars.iv.next, %wide.trip.count
+ br i1 %exitcond, label %for.cond.cleanup.loopexit, label %for.body
+}
diff --git a/test/Transforms/LoopStrengthReduce/X86/nested-loop.ll b/test/Transforms/LoopStrengthReduce/X86/nested-loop.ll
new file mode 100644
index 0000000000000..b563eb3ad9940
--- /dev/null
+++ b/test/Transforms/LoopStrengthReduce/X86/nested-loop.ll
@@ -0,0 +1,65 @@
+; RUN: opt -loop-reduce -S < %s | FileCheck %s
+; Check when we use an outerloop induction variable inside of an innerloop
+; induction value expr, LSR can still choose to use single induction variable
+; for the innerloop and share it in multiple induction value exprs.
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+define void @foo(i32 %size, i32 %nsteps, i32 %hsize, i32* %lined, i8* %maxarray) {
+entry:
+ %cmp215 = icmp sgt i32 %size, 1
+ %t0 = zext i32 %size to i64
+ %t1 = sext i32 %nsteps to i64
+ %sub2 = sub i64 %t0, 2
+ br label %for.body
+
+for.body: ; preds = %for.inc, %entry
+ %indvars.iv2 = phi i64 [ %indvars.iv.next3, %for.inc ], [ 0, %entry ]
+ %t2 = mul nsw i64 %indvars.iv2, %t0
+ br i1 %cmp215, label %for.body2.preheader, label %for.inc
+
+for.body2.preheader: ; preds = %for.body
+ br label %for.body2
+
+; Check LSR only generates one induction variable for for.body2 and the induction
+; variable will be shared by multiple array accesses.
+; CHECK: for.body2:
+; CHECK-NEXT: [[LSR:%[^,]+]] = phi i64 [ %lsr.iv.next, %for.body2 ], [ 0, %for.body2.preheader ]
+; CHECK-NOT: = phi i64 [ {{.*}}, %for.body2 ], [ {{.*}}, %for.body2.preheader ]
+; CHECK: [[SCEVGEP1:%[^,]+]] = getelementptr i8, i8* %maxarray, i64 [[LSR]]
+; CHECK: [[SCEVGEP2:%[^,]+]] = getelementptr i8, i8* [[SCEVGEP1]], i64 1
+; CHECK: {{.*}} = load i8, i8* [[SCEVGEP2]], align 1
+; CHECK: [[SCEVGEP3:%[^,]+]] = getelementptr i8, i8* {{.*}}, i64 [[LSR]]
+; CHECK: {{.*}} = load i8, i8* [[SCEVGEP3]], align 1
+; CHECK: [[SCEVGEP4:%[^,]+]] = getelementptr i8, i8* {{.*}}, i64 [[LSR]]
+; CHECK: store i8 {{.*}}, i8* [[SCEVGEP4]], align 1
+; CHECK: br i1 %exitcond, label %for.body2, label %for.inc.loopexit
+
+for.body2: ; preds = %for.body2.preheader, %for.body2
+ %indvars.iv = phi i64 [ 1, %for.body2.preheader ], [ %indvars.iv.next, %for.body2 ]
+ %arrayidx1 = getelementptr inbounds i8, i8* %maxarray, i64 %indvars.iv
+ %v1 = load i8, i8* %arrayidx1, align 1
+ %idx2 = add nsw i64 %indvars.iv, %sub2
+ %arrayidx2 = getelementptr inbounds i8, i8* %maxarray, i64 %idx2
+ %v2 = load i8, i8* %arrayidx2, align 1
+ %tmpv = xor i8 %v1, %v2
+ %t4 = add nsw i64 %t2, %indvars.iv
+ %add.ptr = getelementptr inbounds i8, i8* %maxarray, i64 %t4
+ store i8 %tmpv, i8* %add.ptr, align 1
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %wide.trip.count = zext i32 %size to i64
+ %exitcond = icmp ne i64 %indvars.iv.next, %wide.trip.count
+ br i1 %exitcond, label %for.body2, label %for.inc.loopexit
+
+for.inc.loopexit: ; preds = %for.body2
+ br label %for.inc
+
+for.inc: ; preds = %for.inc.loopexit, %for.body
+ %indvars.iv.next3 = add nuw nsw i64 %indvars.iv2, 1
+ %cmp = icmp slt i64 %indvars.iv.next3, %t1
+ br i1 %cmp, label %for.body, label %for.end.loopexit
+
+for.end.loopexit: ; preds = %for.inc
+ ret void
+}
diff --git a/test/Transforms/LoopStrengthReduce/X86/sibling-loops.ll b/test/Transforms/LoopStrengthReduce/X86/sibling-loops.ll
new file mode 100644
index 0000000000000..a69d6adc0f038
--- /dev/null
+++ b/test/Transforms/LoopStrengthReduce/X86/sibling-loops.ll
@@ -0,0 +1,97 @@
+; RUN: opt -loop-reduce -S < %s | FileCheck %s
+; We find it is very bad to allow LSR formula containing SCEVAddRecExpr Reg
+; from siblings of current loop. When one loop is LSR optimized, it can
+; insert lsr.iv for other sibling loops, which sometimes leads to many extra
+; lsr.iv inserted for loops.
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+
+@cond = common local_unnamed_addr global i64 0, align 8
+
+; Check there is no extra lsr.iv generated in foo.
+; CHECK-LABEL: @foo(
+; CHECK-NOT: lsr.iv{{[0-9]+}} =
+;
+define void @foo(i64 %N) local_unnamed_addr {
+entry:
+ br label %do.body
+
+do.body: ; preds = %do.body, %entry
+ %i.0 = phi i64 [ 0, %entry ], [ %inc, %do.body ]
+ tail call void @goo(i64 %i.0, i64 %i.0)
+ %inc = add nuw nsw i64 %i.0, 1
+ %t0 = load i64, i64* @cond, align 8
+ %tobool = icmp eq i64 %t0, 0
+ br i1 %tobool, label %do.body2.preheader, label %do.body
+
+do.body2.preheader: ; preds = %do.body
+ br label %do.body2
+
+do.body2: ; preds = %do.body2.preheader, %do.body2
+ %i.1 = phi i64 [ %inc3, %do.body2 ], [ 0, %do.body2.preheader ]
+ %j.1 = phi i64 [ %inc4, %do.body2 ], [ %inc, %do.body2.preheader ]
+ tail call void @goo(i64 %i.1, i64 %j.1)
+ %inc3 = add nuw nsw i64 %i.1, 1
+ %inc4 = add nsw i64 %j.1, 1
+ %t1 = load i64, i64* @cond, align 8
+ %tobool6 = icmp eq i64 %t1, 0
+ br i1 %tobool6, label %do.body8.preheader, label %do.body2
+
+do.body8.preheader: ; preds = %do.body2
+ br label %do.body8
+
+do.body8: ; preds = %do.body8.preheader, %do.body8
+ %i.2 = phi i64 [ %inc9, %do.body8 ], [ 0, %do.body8.preheader ]
+ %j.2 = phi i64 [ %inc10, %do.body8 ], [ %inc4, %do.body8.preheader ]
+ tail call void @goo(i64 %i.2, i64 %j.2)
+ %inc9 = add nuw nsw i64 %i.2, 1
+ %inc10 = add nsw i64 %j.2, 1
+ %t2 = load i64, i64* @cond, align 8
+ %tobool12 = icmp eq i64 %t2, 0
+ br i1 %tobool12, label %do.body14.preheader, label %do.body8
+
+do.body14.preheader: ; preds = %do.body8
+ br label %do.body14
+
+do.body14: ; preds = %do.body14.preheader, %do.body14
+ %i.3 = phi i64 [ %inc15, %do.body14 ], [ 0, %do.body14.preheader ]
+ %j.3 = phi i64 [ %inc16, %do.body14 ], [ %inc10, %do.body14.preheader ]
+ tail call void @goo(i64 %i.3, i64 %j.3)
+ %inc15 = add nuw nsw i64 %i.3, 1
+ %inc16 = add nsw i64 %j.3, 1
+ %t3 = load i64, i64* @cond, align 8
+ %tobool18 = icmp eq i64 %t3, 0
+ br i1 %tobool18, label %do.body20.preheader, label %do.body14
+
+do.body20.preheader: ; preds = %do.body14
+ br label %do.body20
+
+do.body20: ; preds = %do.body20.preheader, %do.body20
+ %i.4 = phi i64 [ %inc21, %do.body20 ], [ 0, %do.body20.preheader ]
+ %j.4 = phi i64 [ %inc22, %do.body20 ], [ %inc16, %do.body20.preheader ]
+ tail call void @goo(i64 %i.4, i64 %j.4)
+ %inc21 = add nuw nsw i64 %i.4, 1
+ %inc22 = add nsw i64 %j.4, 1
+ %t4 = load i64, i64* @cond, align 8
+ %tobool24 = icmp eq i64 %t4, 0
+ br i1 %tobool24, label %do.body26.preheader, label %do.body20
+
+do.body26.preheader: ; preds = %do.body20
+ br label %do.body26
+
+do.body26: ; preds = %do.body26.preheader, %do.body26
+ %i.5 = phi i64 [ %inc27, %do.body26 ], [ 0, %do.body26.preheader ]
+ %j.5 = phi i64 [ %inc28, %do.body26 ], [ %inc22, %do.body26.preheader ]
+ tail call void @goo(i64 %i.5, i64 %j.5)
+ %inc27 = add nuw nsw i64 %i.5, 1
+ %inc28 = add nsw i64 %j.5, 1
+ %t5 = load i64, i64* @cond, align 8
+ %tobool30 = icmp eq i64 %t5, 0
+ br i1 %tobool30, label %do.end31, label %do.body26
+
+do.end31: ; preds = %do.body26
+ ret void
+}
+
+declare void @goo(i64, i64) local_unnamed_addr
+