summaryrefslogtreecommitdiff
path: root/test/Transforms/LoopVectorize
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2017-05-16 19:46:52 +0000
committerDimitry Andric <dim@FreeBSD.org>2017-05-16 19:46:52 +0000
commit6b3f41ed88e8e440e11a4fbf20b6600529f80049 (patch)
tree928b056f24a634d628c80238dbbf10d41b1a71d5 /test/Transforms/LoopVectorize
parentc46e6a5940c50058e00c0c5f9123fd82e338d29a (diff)
Diffstat (limited to 'test/Transforms/LoopVectorize')
-rw-r--r--test/Transforms/LoopVectorize/X86/svml-calls-finite.ll187
-rw-r--r--test/Transforms/LoopVectorize/induction.ll45
-rw-r--r--test/Transforms/LoopVectorize/pr32859.ll30
3 files changed, 262 insertions, 0 deletions
diff --git a/test/Transforms/LoopVectorize/X86/svml-calls-finite.ll b/test/Transforms/LoopVectorize/X86/svml-calls-finite.ll
new file mode 100644
index 000000000000..5a4bfe5e6bdd
--- /dev/null
+++ b/test/Transforms/LoopVectorize/X86/svml-calls-finite.ll
@@ -0,0 +1,187 @@
+; RUN: opt -vector-library=SVML -loop-vectorize -S < %s | FileCheck %s
+
+; Test to verify that when math headers are built with
+; __FINITE_MATH_ONLY__ enabled, causing use of __<func>_finite
+; function versions, vectorization can map these to vector versions.
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+declare float @__expf_finite(float) #0
+
+; CHECK-LABEL: @exp_f32
+; CHECK: <4 x float> @__svml_expf4
+; CHECK: ret
+define void @exp_f32(float* nocapture %varray) {
+entry:
+ br label %for.body
+
+for.body: ; preds = %for.body, %entry
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %tmp = trunc i64 %indvars.iv to i32
+ %conv = sitofp i32 %tmp to float
+ %call = tail call fast float @__expf_finite(float %conv)
+ %arrayidx = getelementptr inbounds float, float* %varray, i64 %indvars.iv
+ store float %call, float* %arrayidx, align 4
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %exitcond = icmp eq i64 %indvars.iv.next, 1000
+ br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !1
+
+for.end: ; preds = %for.body
+ ret void
+}
+
+!1 = distinct !{!1, !2, !3}
+!2 = !{!"llvm.loop.vectorize.width", i32 4}
+!3 = !{!"llvm.loop.vectorize.enable", i1 true}
+
+
+declare double @__exp_finite(double) #0
+
+; CHECK-LABEL: @exp_f64
+; CHECK: <4 x double> @__svml_exp4
+; CHECK: ret
+define void @exp_f64(double* nocapture %varray) {
+entry:
+ br label %for.body
+
+for.body: ; preds = %for.body, %entry
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %tmp = trunc i64 %indvars.iv to i32
+ %conv = sitofp i32 %tmp to double
+ %call = tail call fast double @__exp_finite(double %conv)
+ %arrayidx = getelementptr inbounds double, double* %varray, i64 %indvars.iv
+ store double %call, double* %arrayidx, align 4
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %exitcond = icmp eq i64 %indvars.iv.next, 1000
+ br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !11
+
+for.end: ; preds = %for.body
+ ret void
+}
+
+!11 = distinct !{!11, !12, !13}
+!12 = !{!"llvm.loop.vectorize.width", i32 4}
+!13 = !{!"llvm.loop.vectorize.enable", i1 true}
+
+
+
+
+declare float @__logf_finite(float) #0
+
+; CHECK-LABEL: @log_f32
+; CHECK: <4 x float> @__svml_logf4
+; CHECK: ret
+define void @log_f32(float* nocapture %varray) {
+entry:
+ br label %for.body
+
+for.body: ; preds = %for.body, %entry
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %tmp = trunc i64 %indvars.iv to i32
+ %conv = sitofp i32 %tmp to float
+ %call = tail call fast float @__logf_finite(float %conv)
+ %arrayidx = getelementptr inbounds float, float* %varray, i64 %indvars.iv
+ store float %call, float* %arrayidx, align 4
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %exitcond = icmp eq i64 %indvars.iv.next, 1000
+ br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !21
+
+for.end: ; preds = %for.body
+ ret void
+}
+
+!21 = distinct !{!21, !22, !23}
+!22 = !{!"llvm.loop.vectorize.width", i32 4}
+!23 = !{!"llvm.loop.vectorize.enable", i1 true}
+
+
+declare double @__log_finite(double) #0
+
+; CHECK-LABEL: @log_f64
+; CHECK: <4 x double> @__svml_log4
+; CHECK: ret
+define void @log_f64(double* nocapture %varray) {
+entry:
+ br label %for.body
+
+for.body: ; preds = %for.body, %entry
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %tmp = trunc i64 %indvars.iv to i32
+ %conv = sitofp i32 %tmp to double
+ %call = tail call fast double @__log_finite(double %conv)
+ %arrayidx = getelementptr inbounds double, double* %varray, i64 %indvars.iv
+ store double %call, double* %arrayidx, align 4
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %exitcond = icmp eq i64 %indvars.iv.next, 1000
+ br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !31
+
+for.end: ; preds = %for.body
+ ret void
+}
+
+!31 = distinct !{!31, !32, !33}
+!32 = !{!"llvm.loop.vectorize.width", i32 4}
+!33 = !{!"llvm.loop.vectorize.enable", i1 true}
+
+
+declare float @__powf_finite(float, float) #0
+
+; CHECK-LABEL: @pow_f32
+; CHECK: <4 x float> @__svml_powf4
+; CHECK: ret
+define void @pow_f32(float* nocapture %varray, float* nocapture readonly %exp) {
+entry:
+ br label %for.body
+
+for.body: ; preds = %for.body, %entry
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %tmp = trunc i64 %indvars.iv to i32
+ %conv = sitofp i32 %tmp to float
+ %arrayidx = getelementptr inbounds float, float* %exp, i64 %indvars.iv
+ %tmp1 = load float, float* %arrayidx, align 4
+ %tmp2 = tail call fast float @__powf_finite(float %conv, float %tmp1)
+ %arrayidx2 = getelementptr inbounds float, float* %varray, i64 %indvars.iv
+ store float %tmp2, float* %arrayidx2, align 4
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %exitcond = icmp eq i64 %indvars.iv.next, 1000
+ br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !41
+
+for.end: ; preds = %for.body
+ ret void
+}
+
+!41 = distinct !{!41, !42, !43}
+!42 = !{!"llvm.loop.vectorize.width", i32 4}
+!43 = !{!"llvm.loop.vectorize.enable", i1 true}
+
+
+declare double @__pow_finite(double, double) #0
+
+; CHECK-LABEL: @pow_f64
+; CHECK: <4 x double> @__svml_pow4
+; CHECK: ret
+define void @pow_f64(double* nocapture %varray, double* nocapture readonly %exp) {
+entry:
+ br label %for.body
+
+for.body: ; preds = %for.body, %entry
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %tmp = trunc i64 %indvars.iv to i32
+ %conv = sitofp i32 %tmp to double
+ %arrayidx = getelementptr inbounds double, double* %exp, i64 %indvars.iv
+ %tmp1 = load double, double* %arrayidx, align 4
+ %tmp2 = tail call fast double @__pow_finite(double %conv, double %tmp1)
+ %arrayidx2 = getelementptr inbounds double, double* %varray, i64 %indvars.iv
+ store double %tmp2, double* %arrayidx2, align 4
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %exitcond = icmp eq i64 %indvars.iv.next, 1000
+ br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !51
+
+for.end: ; preds = %for.body
+ ret void
+}
+
+!51 = distinct !{!51, !52, !53}
+!52 = !{!"llvm.loop.vectorize.width", i32 4}
+!53 = !{!"llvm.loop.vectorize.enable", i1 true}
diff --git a/test/Transforms/LoopVectorize/induction.ll b/test/Transforms/LoopVectorize/induction.ll
index 6507166dd1f2..7e9e6b1cdc8e 100644
--- a/test/Transforms/LoopVectorize/induction.ll
+++ b/test/Transforms/LoopVectorize/induction.ll
@@ -849,3 +849,48 @@ for.end:
%tmp7 = phi i32 [ %tmp6, %for.inc ]
ret i32 %tmp7
}
+
+; Ensure that the shuffle vector for first order recurrence is inserted
+; correctly after all the phis. These new phis correspond to new IVs
+; that are generated by optimizing non-free truncs of IVs to IVs themselves
+define i64 @trunc_with_first_order_recurrence() {
+; CHECK-LABEL: trunc_with_first_order_recurrence
+; CHECK-LABEL: vector.body:
+; CHECK-NEXT: %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+; CHECK-NEXT: %vec.phi = phi <2 x i64>
+; CHECK-NEXT: %vec.ind = phi <2 x i64> [ <i64 1, i64 2>, %vector.ph ], [ %vec.ind.next, %vector.body ]
+; CHECK-NEXT: %vec.ind2 = phi <2 x i32> [ <i32 1, i32 2>, %vector.ph ], [ %vec.ind.next3, %vector.body ]
+; CHECK-NEXT: %vector.recur = phi <2 x i32> [ <i32 undef, i32 42>, %vector.ph ], [ %vec.ind5, %vector.body ]
+; CHECK-NEXT: %vec.ind5 = phi <2 x i32> [ <i32 1, i32 2>, %vector.ph ], [ %vec.ind.next6, %vector.body ]
+; CHECK-NEXT: %vec.ind7 = phi <2 x i32> [ <i32 1, i32 2>, %vector.ph ], [ %vec.ind.next8, %vector.body ]
+; CHECK-NEXT: shufflevector <2 x i32> %vector.recur, <2 x i32> %vec.ind5, <2 x i32> <i32 1, i32 2>
+entry:
+ br label %loop
+
+exit: ; preds = %loop
+ %.lcssa = phi i64 [ %c23, %loop ]
+ ret i64 %.lcssa
+
+loop: ; preds = %loop, %entry
+ %c5 = phi i64 [ %c23, %loop ], [ 0, %entry ]
+ %indvars.iv = phi i64 [ %indvars.iv.next, %loop ], [ 1, %entry ]
+ %x = phi i32 [ %c24, %loop ], [ 1, %entry ]
+ %y = phi i32 [ %c6, %loop ], [ 42, %entry ]
+ %c6 = trunc i64 %indvars.iv to i32
+ %c8 = mul i32 %x, %c6
+ %c9 = add i32 %c8, 42
+ %c10 = add i32 %y, %c6
+ %c11 = add i32 %c10, %c9
+ %c12 = sext i32 %c11 to i64
+ %c13 = add i64 %c5, %c12
+ %indvars.iv.tr = trunc i64 %indvars.iv to i32
+ %c14 = shl i32 %indvars.iv.tr, 1
+ %c15 = add i32 %c9, %c14
+ %c16 = sext i32 %c15 to i64
+ %c23 = add i64 %c13, %c16
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %c24 = add nuw nsw i32 %x, 1
+ %exitcond.i = icmp eq i64 %indvars.iv.next, 114
+ br i1 %exitcond.i, label %exit, label %loop
+
+}
diff --git a/test/Transforms/LoopVectorize/pr32859.ll b/test/Transforms/LoopVectorize/pr32859.ll
new file mode 100644
index 000000000000..31cb84699f71
--- /dev/null
+++ b/test/Transforms/LoopVectorize/pr32859.ll
@@ -0,0 +1,30 @@
+; RUN: opt < %s -loop-vectorize -S | FileCheck %s
+
+; Out of the LCSSA form we could have 'phi i32 [ loop-invariant, %for.inc.2.i ]'
+; but the IR Verifier requires for PHI one entry for each predecessor of
+; it's parent basic block. The original PR14725 solution for the issue just
+; added 'undef' for an predecessor BB and which is not correct. We copy the real
+; value for another predecessor instead of bringing 'undef'.
+
+; CHECK-LABEL: for.cond.preheader:
+; CHECK: %e.0.ph = phi i32 [ 0, %if.end.2.i ], [ 0, %middle.block ]
+
+; Function Attrs: nounwind uwtable
+define void @main() #0 {
+entry:
+ br label %for.cond1.preheader.i
+
+for.cond1.preheader.i: ; preds = %if.end.2.i, %entry
+ %c.06.i = phi i32 [ 0, %entry ], [ %inc5.i, %if.end.2.i ]
+ %tobool.i = icmp ne i32 undef, 0
+ br label %if.end.2.i
+
+if.end.2.i: ; preds = %for.cond1.preheader.i
+ %inc5.i = add nsw i32 %c.06.i, 1
+ %cmp.i = icmp slt i32 %inc5.i, 16
+ br i1 %cmp.i, label %for.cond1.preheader.i, label %for.cond.preheader
+
+for.cond.preheader: ; preds = %if.end.2.i
+ %e.0.ph = phi i32 [ 0, %if.end.2.i ]
+ unreachable
+}