diff options
author | Dimitry Andric <dim@FreeBSD.org> | 2017-05-29 16:25:25 +0000 |
---|---|---|
committer | Dimitry Andric <dim@FreeBSD.org> | 2017-05-29 16:25:25 +0000 |
commit | ab44ce3d598882e51a25eb82eb7ae6308de85ae6 (patch) | |
tree | 568d786a59d49bef961dcb9bd09d422701b9da5b /test/Analysis | |
parent | b5630dbadf9a2a06754194387d6b0fd9962a67f1 (diff) |
Notes
Diffstat (limited to 'test/Analysis')
-rw-r--r-- | test/Analysis/CostModel/AArch64/falkor.ll | 26 | ||||
-rw-r--r-- | test/Analysis/Delinearization/constant_functions_multi_dim.ll | 80 | ||||
-rw-r--r-- | test/Analysis/IVUsers/quadradic-exit-value.ll | 36 | ||||
-rw-r--r-- | test/Analysis/ScalarEvolution/different-loops-recs.ll | 64 |
4 files changed, 176 insertions, 30 deletions
diff --git a/test/Analysis/CostModel/AArch64/falkor.ll b/test/Analysis/CostModel/AArch64/falkor.ll deleted file mode 100644 index e9563191f077b..0000000000000 --- a/test/Analysis/CostModel/AArch64/falkor.ll +++ /dev/null @@ -1,26 +0,0 @@ -; RUN: opt < %s -cost-model -analyze -mcpu=falkor | FileCheck %s - -target datalayout = "e-m:e-i64:64-i128:128-n32:64-S128" -target triple = "aarch64--linux-gnu" - -; CHECK-LABEL: vectorInstrCost -define void @vectorInstrCost() { - - ; Vector extracts - extracting the first element should have a zero cost; - ; all other elements should have a cost of two. - ; - ; CHECK: cost of 0 {{.*}} extractelement <2 x i64> undef, i32 0 - ; CHECK: cost of 2 {{.*}} extractelement <2 x i64> undef, i32 1 - %t1 = extractelement <2 x i64> undef, i32 0 - %t2 = extractelement <2 x i64> undef, i32 1 - - ; Vector inserts - inserting the first element should have a zero cost; all - ; other elements should have a cost of two. - ; - ; CHECK: cost of 0 {{.*}} insertelement <2 x i64> undef, i64 undef, i32 0 - ; CHECK: cost of 2 {{.*}} insertelement <2 x i64> undef, i64 undef, i32 1 - %t3 = insertelement <2 x i64> undef, i64 undef, i32 0 - %t4 = insertelement <2 x i64> undef, i64 undef, i32 1 - - ret void -} diff --git a/test/Analysis/Delinearization/constant_functions_multi_dim.ll b/test/Analysis/Delinearization/constant_functions_multi_dim.ll new file mode 100644 index 0000000000000..b44b900d3f522 --- /dev/null +++ b/test/Analysis/Delinearization/constant_functions_multi_dim.ll @@ -0,0 +1,80 @@ +; RUN: opt -delinearize -analyze < %s | FileCheck %s + +target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" + +; CHECK: Inst: %tmp = load float, float* %arrayidx, align 4 +; CHECK-NEXT: In Loop with Header: for.inc +; CHECK-NEXT: AccessFunction: {(4 * %N * %call),+,4}<nsw><%for.inc> +; CHECK-NEXT: Base offset: %A +; CHECK-NEXT: ArrayDecl[UnknownSize][%N] with elements of 4 bytes. +; CHECK-NEXT: ArrayRef[%call][{0,+,1}<nuw><nsw><%for.inc>] + +; CHECK: Inst: %tmp5 = load float, float* %arrayidx4, align 4 +; CHECK-NEXT: In Loop with Header: for.inc +; CHECK-NEXT: AccessFunction: {(4 * %call1),+,(4 * %N)}<nsw><%for.inc> +; CHECK-NEXT: Base offset: %B +; CHECK-NEXT: ArrayDecl[UnknownSize][%N] with elements of 4 bytes. +; CHECK-NEXT: ArrayRef[{0,+,1}<nuw><nsw><%for.inc>][%call1] + +; Function Attrs: noinline nounwind uwtable +define void @mat_mul(float* %C, float* %A, float* %B, i64 %N) #0 !kernel_arg_addr_space !2 !kernel_arg_access_qual !3 !kernel_arg_type !4 !kernel_arg_base_type !4 !kernel_arg_type_qual !5 { +entry: + br label %entry.split + +entry.split: ; preds = %entry + %call = tail call i64 @_Z13get_global_idj(i32 0) #3 + %call1 = tail call i64 @_Z13get_global_idj(i32 1) #3 + %cmp1 = icmp sgt i64 %N, 0 + %mul = mul nsw i64 %call, %N + br i1 %cmp1, label %for.inc.lr.ph, label %for.end + +for.inc.lr.ph: ; preds = %entry.split + br label %for.inc + +for.inc: ; preds = %for.inc.lr.ph, %for.inc + %acc.03 = phi float [ 0.000000e+00, %for.inc.lr.ph ], [ %tmp6, %for.inc ] + %m.02 = phi i64 [ 0, %for.inc.lr.ph ], [ %inc, %for.inc ] + %add = add nsw i64 %m.02, %mul + %arrayidx = getelementptr inbounds float, float* %A, i64 %add + %tmp = load float, float* %arrayidx, align 4 + %mul2 = mul nsw i64 %m.02, %N + %add3 = add nsw i64 %mul2, %call1 + %arrayidx4 = getelementptr inbounds float, float* %B, i64 %add3 + %tmp5 = load float, float* %arrayidx4, align 4 + %tmp6 = tail call float @llvm.fmuladd.f32(float %tmp, float %tmp5, float %acc.03) + %inc = add nuw nsw i64 %m.02, 1 + %exitcond = icmp ne i64 %inc, %N + br i1 %exitcond, label %for.inc, label %for.cond.for.end_crit_edge + +for.cond.for.end_crit_edge: ; preds = %for.inc + %.lcssa = phi float [ %tmp6, %for.inc ] + br label %for.end + +for.end: ; preds = %for.cond.for.end_crit_edge, %entry.split + %acc.0.lcssa = phi float [ %.lcssa, %for.cond.for.end_crit_edge ], [ 0.000000e+00, %entry.split ] + %add7 = add nsw i64 %mul, %call1 + %arrayidx8 = getelementptr inbounds float, float* %C, i64 %add7 + store float %acc.0.lcssa, float* %arrayidx8, align 4 + ret void +} + +; Function Attrs: nounwind readnone +declare i64 @_Z13get_global_idj(i32) #1 + +; Function Attrs: nounwind readnone speculatable +declare float @llvm.fmuladd.f32(float, float, float) #2 + +attributes #0 = { noinline nounwind uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" } +attributes #1 = { nounwind readnone "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" } +attributes #2 = { nounwind readnone speculatable } +attributes #3 = { nounwind readnone } + +!llvm.module.flags = !{!0} +!llvm.ident = !{!1} + +!0 = !{i32 1, !"wchar_size", i32 4} +!1 = !{!"clang version 5.0.0 (trunk 303846) (llvm/trunk 303834)"} +!2 = !{i32 1, i32 1, i32 1, i32 0} +!3 = !{!"none", !"none", !"none", !"none"} +!4 = !{!"float*", !"float*", !"float*", !"long"} +!5 = !{!"", !"", !"", !""} diff --git a/test/Analysis/IVUsers/quadradic-exit-value.ll b/test/Analysis/IVUsers/quadradic-exit-value.ll index 6d4f1b039b481..afc2151982183 100644 --- a/test/Analysis/IVUsers/quadradic-exit-value.ll +++ b/test/Analysis/IVUsers/quadradic-exit-value.ll @@ -30,13 +30,47 @@ exit: ret i64 %r } +; PR15470: LSR miscompile. The test1 function should return '1'. +; It is valid to fold SCEVUnknown into the recurrence because it +; was defined before the loop. +; +; SCEV does not know how to denormalize chained recurrences, so make +; sure they aren't marked as post-inc users. +; +; CHECK-LABEL: IV Users for loop %test1.loop +; CHECK-NO-LCSSA: %sext.us = {0,+,(16777216 + (-16777216 * %sub.us))<nuw><nsw>,+,33554432}<%test1.loop> (post-inc with loop %test1.loop) in %f = ashr i32 %sext.us, 24 +define i32 @test1(i1 %cond) { +entry: + %sub.us = select i1 %cond, i32 0, i32 0 + br label %test1.loop + +test1.loop: + %inc1115.us = phi i32 [ 0, %entry ], [ %inc11.us, %test1.loop ] + %inc11.us = add nsw i32 %inc1115.us, 1 + %cmp.us = icmp slt i32 %inc11.us, 2 + br i1 %cmp.us, label %test1.loop, label %for.end + +for.end: + %tobool.us = icmp eq i32 %inc1115.us, 0 + %mul.us = shl i32 %inc1115.us, 24 + %sub.cond.us = sub nsw i32 %inc1115.us, %sub.us + %sext.us = mul i32 %mul.us, %sub.cond.us + %f = ashr i32 %sext.us, 24 + br label %exit + +exit: + ret i32 %f +} + ; PR15470: LSR miscompile. The test2 function should return '1'. +; It is illegal to fold SCEVUnknown (sext.us) into the recurrence +; because it is defined after the loop where this recurrence belongs. ; ; SCEV does not know how to denormalize chained recurrences, so make ; sure they aren't marked as post-inc users. ; ; CHECK-LABEL: IV Users for loop %test2.loop -; CHECK-NO-LCSSA: %sext.us = {0,+,(16777216 + (-16777216 * %sub.us))<nuw><nsw>,+,33554432}<%test2.loop> (post-inc with loop %test2.loop) in %f = ashr i32 %sext.us, 24 +; CHECK-NO-LCSSA: %sub.cond.us = ((-1 * %sub.us)<nsw> + {0,+,1}<nuw><nsw><%test2.loop>) (post-inc with loop %test2.loop) in %sext.us = mul i32 %mul.us, %sub.cond.us define i32 @test2() { entry: br label %test2.loop diff --git a/test/Analysis/ScalarEvolution/different-loops-recs.ll b/test/Analysis/ScalarEvolution/different-loops-recs.ll index ad3d1e0bd1100..6b88f09e936fb 100644 --- a/test/Analysis/ScalarEvolution/different-loops-recs.ll +++ b/test/Analysis/ScalarEvolution/different-loops-recs.ll @@ -220,7 +220,8 @@ exit: ; Mix of previous use cases that demonstrates %s3 can be incorrectly treated as ; a recurrence of loop1 because of operands order if we pick recurrencies in an -; incorrect order. +; incorrect order. It also shows that we cannot safely fold v1 (SCEVUnknown) +; because we cannot prove for sure that it doesn't use Phis of loop 2. define void @test_03(i32 %a, i32 %b, i32 %c, i32* %p) { @@ -228,9 +229,9 @@ define void @test_03(i32 %a, i32 %b, i32 %c, i32* %p) { ; CHECK: %v1 = load i32, i32* %p ; CHECK-NEXT: --> %v1 ; CHECK: %s1 = add i32 %phi1, %v1 -; CHECK-NEXT: --> {(%a + %v1),+,1}<%loop1> +; CHECK-NEXT: --> ({%a,+,1}<%loop1> + %v1) ; CHECK: %s2 = add i32 %s1, %b -; CHECK-NEXT: --> {(%a + %b + %v1),+,1}<%loop1> +; CHECK-NEXT: --> ({(%a + %b),+,1}<%loop1> + %v1) ; CHECK: %s3 = add i32 %s2, %phi2 ; CHECK-NEXT: --> ({{{{}}((2 * %a) + %b),+,1}<%loop1>,+,2}<%loop2> + %v1) @@ -452,3 +453,60 @@ exit: %s6 = add i32 %phi3, %phi2 ret void } + +; Make sure that a complicated Phi does not get folded with rec's start value +; of a loop which is above. +define void @test_08() { + +; CHECK-LABEL: Classifying expressions for: @test_08 +; CHECK: %tmp11 = add i64 %iv.2.2, %iv.2.1 +; CHECK-NEXT: --> ({0,+,-1}<nsw><%loop_2> + %iv.2.1) +; CHECK: %tmp12 = trunc i64 %tmp11 to i32 +; CHECK-NEXT: --> (trunc i64 ({0,+,-1}<nsw><%loop_2> + %iv.2.1) to i32) +; CHECK: %tmp14 = mul i32 %tmp12, %tmp7 +; CHECK-NEXT: --> ((trunc i64 ({0,+,-1}<nsw><%loop_2> + %iv.2.1) to i32) * {-1,+,-1}<%loop_1>) +; CHECK: %tmp16 = mul i64 %iv.2.1, %iv.1.1 +; CHECK-NEXT: --> ({2,+,1}<nuw><nsw><%loop_1> * %iv.2.1) + +entry: + br label %loop_1 + +loop_1: + %iv.1.1 = phi i64 [ 2, %entry ], [ %iv.1.1.next, %loop_1_back_branch ] + %iv.1.2 = phi i32 [ -1, %entry ], [ %iv.1.2.next, %loop_1_back_branch ] + br label %loop_1_exit + +dead: + br label %loop_1_exit + +loop_1_exit: + %tmp5 = icmp sgt i64 %iv.1.1, 2 + br i1 %tmp5, label %loop_2_preheader, label %loop_1_back_branch + +loop_1_back_branch: + %iv.1.1.next = add nuw nsw i64 %iv.1.1, 1 + %iv.1.2.next = add nsw i32 %iv.1.2, 1 + br label %loop_1 + +loop_2_preheader: + %tmp6 = sub i64 1, %iv.1.1 + %tmp7 = trunc i64 %tmp6 to i32 + br label %loop_2 + +loop_2: + %iv.2.1 = phi i64 [ 0, %loop_2_preheader ], [ %tmp16, %loop_2 ] + %iv.2.2 = phi i64 [ 0, %loop_2_preheader ], [ %iv.2.2.next, %loop_2 ] + %iv.2.3 = phi i64 [ 2, %loop_2_preheader ], [ %iv.2.3.next, %loop_2 ] + %tmp11 = add i64 %iv.2.2, %iv.2.1 + %tmp12 = trunc i64 %tmp11 to i32 + %tmp14 = mul i32 %tmp12, %tmp7 + %tmp16 = mul i64 %iv.2.1, %iv.1.1 + %iv.2.3.next = add nuw nsw i64 %iv.2.3, 1 + %iv.2.2.next = add nsw i64 %iv.2.2, -1 + %tmp17 = icmp slt i64 %iv.2.3.next, %iv.1.1 + br i1 %tmp17, label %loop_2, label %exit + +exit: + %tmp10 = add i32 %iv.1.2, 3 + ret void +} |