summaryrefslogtreecommitdiff
path: root/test/Transforms
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2013-06-10 20:36:52 +0000
committerDimitry Andric <dim@FreeBSD.org>2013-06-10 20:36:52 +0000
commit59d6cff90eecf31cb3dd860c4e786674cfdd42eb (patch)
tree909310b2e05119d1d6efda049977042abbb58bb1 /test/Transforms
parent4a16efa3e43e35f0cc9efe3a67f620f0017c3d36 (diff)
Notes
Diffstat (limited to 'test/Transforms')
-rw-r--r--test/Transforms/BBVectorize/X86/loop1.ll10
-rw-r--r--test/Transforms/BBVectorize/X86/simple.ll23
-rw-r--r--test/Transforms/ConstantMerge/merge-both.ll3
-rw-r--r--test/Transforms/DeadArgElim/dbginfo.ll2
-rw-r--r--test/Transforms/DeadStoreElimination/2011-09-06-EndOfFunction.ll10
-rw-r--r--test/Transforms/GCOVProfiling/linkagename.ll2
-rw-r--r--test/Transforms/GVN/unreachable_block_infinite_loop.ll14
-rw-r--r--test/Transforms/GlobalDCE/complex-constantexpr.ll97
-rw-r--r--test/Transforms/GlobalDCE/indirectbr.ll18
-rw-r--r--test/Transforms/GlobalOpt/alias-used.ll42
-rw-r--r--test/Transforms/InstCombine/2012-05-27-Negative-Shift-Crash.ll16
-rw-r--r--test/Transforms/InstCombine/add4.ll58
-rw-r--r--test/Transforms/InstCombine/and-fcmp.ll21
-rw-r--r--test/Transforms/InstCombine/apint-shift-simplify.ll15
-rw-r--r--test/Transforms/InstCombine/debuginfo.ll19
-rw-r--r--test/Transforms/InstCombine/fprintf-1.ll9
-rw-r--r--test/Transforms/InstCombine/icmp.ll90
-rw-r--r--test/Transforms/InstCombine/load-cmp.ll4
-rw-r--r--test/Transforms/InstCombine/objsize.ll108
-rw-r--r--test/Transforms/InstCombine/or.ll6
-rw-r--r--test/Transforms/InstCombine/select.ll122
-rw-r--r--test/Transforms/InstCombine/sub-xor.ll10
-rw-r--r--test/Transforms/InstCombine/vec_demanded_elts.ll2
-rw-r--r--test/Transforms/InstCombine/vec_extract_2elts.ll12
-rw-r--r--test/Transforms/InstCombine/vec_extract_var_elt.ll18
-rw-r--r--test/Transforms/InstCombine/vec_phi_extract.ll27
-rw-r--r--test/Transforms/InstCombine/vec_shuffle.ll43
-rw-r--r--test/Transforms/InstSimplify/2013-04-19-ConstantFoldingCrash.ll9
-rw-r--r--test/Transforms/InstSimplify/floating-point-arithmetic.ll4
-rw-r--r--test/Transforms/JumpThreading/2011-04-14-InfLoop.ll6
-rw-r--r--test/Transforms/LoopRotate/simplifylatch.ll39
-rw-r--r--test/Transforms/LoopStrengthReduce/2012-07-13-ExpandUDiv.ll14
-rw-r--r--test/Transforms/LoopStrengthReduce/ARM/2012-06-15-lsr-noaddrmode.ll15
-rw-r--r--test/Transforms/LoopUnroll/scevunroll.ll50
-rw-r--r--test/Transforms/LoopUnroll/unloop.ll34
-rw-r--r--test/Transforms/LoopUnswitch/2011-09-26-EHCrash.ll6
-rw-r--r--test/Transforms/LoopUnswitch/2012-04-30-LoopUnswitch-LPad-Crash.ll8
-rw-r--r--test/Transforms/LoopVectorize/12-12-11-if-conv.ll8
-rw-r--r--test/Transforms/LoopVectorize/2012-10-22-isconsec.ll7
-rw-r--r--test/Transforms/LoopVectorize/X86/constant-vector-operand.ll2
-rw-r--r--test/Transforms/LoopVectorize/X86/illegal-parallel-loop-uniform-write.ll56
-rw-r--r--test/Transforms/LoopVectorize/X86/min-trip-count-switch.ll8
-rw-r--r--test/Transforms/LoopVectorize/X86/parallel-loops-after-reg2mem.ll13
-rw-r--r--test/Transforms/LoopVectorize/X86/parallel-loops.ll33
-rw-r--r--test/Transforms/LoopVectorize/X86/x86_fp80-vector-store.ll29
-rw-r--r--test/Transforms/LoopVectorize/bsd_regex.ll38
-rw-r--r--test/Transforms/LoopVectorize/bzip_reverse_loops.ll13
-rw-r--r--test/Transforms/LoopVectorize/calloc.ll7
-rw-r--r--test/Transforms/LoopVectorize/dbg.value.ll9
-rw-r--r--test/Transforms/LoopVectorize/float-reduction.ll6
-rw-r--r--test/Transforms/LoopVectorize/i8-induction.ll10
-rw-r--r--test/Transforms/LoopVectorize/if-conversion-nest.ll48
-rw-r--r--test/Transforms/LoopVectorize/intrinsic.ll162
-rw-r--r--test/Transforms/LoopVectorize/lcssa-crash.ll11
-rw-r--r--test/Transforms/LoopVectorize/minmax_reduction.ll885
-rw-r--r--test/Transforms/LoopVectorize/no_idiv_reduction.ll24
-rw-r--r--test/Transforms/LoopVectorize/no_outside_user.ll41
-rw-r--r--test/Transforms/LoopVectorize/phi-hang.ll18
-rw-r--r--test/Transforms/LoopVectorize/reverse_induction.ll79
-rw-r--r--test/Transforms/LoopVectorize/runtime-check-readonly.ll36
-rw-r--r--test/Transforms/LoopVectorize/runtime-check.ll8
-rw-r--r--test/Transforms/LoopVectorize/runtime-limit.ll84
-rw-r--r--test/Transforms/LoopVectorize/start-non-zero.ll8
-rw-r--r--test/Transforms/LoopVectorize/struct_access.ll6
-rw-r--r--test/Transforms/LoopVectorize/value-ptr-bug.ll50
-rw-r--r--test/Transforms/LoopVectorize/vectorize-once.ll7
-rw-r--r--test/Transforms/MergeFunc/crash.ll46
-rw-r--r--test/Transforms/MergeFunc/inttoptr.ll55
-rw-r--r--test/Transforms/MergeFunc/vector.ll9
-rw-r--r--test/Transforms/ObjCARC/apelim.ll4
-rw-r--r--test/Transforms/ObjCARC/arc-annotations.ll242
-rw-r--r--test/Transforms/ObjCARC/basic.ll942
-rw-r--r--test/Transforms/ObjCARC/cfg-hazards.ll36
-rw-r--r--test/Transforms/ObjCARC/contract-marker.ll2
-rw-r--r--test/Transforms/ObjCARC/contract-storestrong.ll3
-rw-r--r--test/Transforms/ObjCARC/contract-testcases.ll2
-rw-r--r--test/Transforms/ObjCARC/contract.ll56
-rw-r--r--test/Transforms/ObjCARC/expand.ll76
-rw-r--r--test/Transforms/ObjCARC/gvn.ll3
-rw-r--r--test/Transforms/ObjCARC/intrinsic-use-isolated.ll (renamed from test/Transforms/ObjCARC/clang-arc-used-intrinsic-removed-if-isolated.ll)0
-rw-r--r--test/Transforms/ObjCARC/intrinsic-use.ll53
-rw-r--r--test/Transforms/ObjCARC/invoke.ll4
-rw-r--r--test/Transforms/ObjCARC/move-and-merge-autorelease.ll2
-rw-r--r--test/Transforms/ObjCARC/retain-block-escape-analysis.ll96
-rw-r--r--test/Transforms/ObjCARC/rv.ll50
-rw-r--r--test/Transforms/ObjCARC/tail-call-invariant-enforcement.ll65
-rw-r--r--test/Transforms/Reassociate/pr12245.ll44
-rw-r--r--test/Transforms/Reassociate/xor_reassoc.ll27
-rw-r--r--test/Transforms/SLPVectorizer/X86/barriercall.ll32
-rw-r--r--test/Transforms/SLPVectorizer/X86/cast.ll38
-rw-r--r--test/Transforms/SLPVectorizer/X86/compare-reduce.ll53
-rw-r--r--test/Transforms/SLPVectorizer/X86/diamond.ll78
-rw-r--r--test/Transforms/SLPVectorizer/X86/flag.ll51
-rw-r--r--test/Transforms/SLPVectorizer/X86/hoist.ll59
-rw-r--r--test/Transforms/SLPVectorizer/X86/lit.local.cfg6
-rw-r--r--test/Transforms/SLPVectorizer/X86/loopinvariant.ll69
-rw-r--r--test/Transforms/SLPVectorizer/X86/multi_user.ll47
-rw-r--r--test/Transforms/SLPVectorizer/X86/reduction.ll47
-rw-r--r--test/Transforms/SLPVectorizer/X86/reduction2.ll32
-rw-r--r--test/Transforms/SLPVectorizer/X86/saxpy.ll45
-rw-r--r--test/Transforms/SLPVectorizer/X86/simple-loop.ll100
-rw-r--r--test/Transforms/SLPVectorizer/X86/simplebb.ll25
-rw-r--r--test/Transforms/SLPVectorizer/X86/vector.ll14
-rw-r--r--test/Transforms/SLPVectorizer/lit.local.cfg1
-rw-r--r--test/Transforms/SROA/basictest.ll74
-rw-r--r--test/Transforms/SROA/vector-promotion.ll28
-rw-r--r--test/Transforms/ScalarRepl/dynamic-vector-gep.ll167
-rw-r--r--test/Transforms/SimplifyCFG/2003-08-17-BranchFold.ll4
-rw-r--r--test/Transforms/SimplifyCFG/2003-08-17-BranchFoldOrdering.ll5
-rw-r--r--test/Transforms/SimplifyCFG/2003-08-17-FoldSwitch-dbg.ll5
-rw-r--r--test/Transforms/SimplifyCFG/2003-08-17-FoldSwitch.ll5
-rw-r--r--test/Transforms/SimplifyCFG/2005-12-03-IncorrectPHIFold.ll7
-rw-r--r--test/Transforms/SimplifyCFG/2006-10-19-UncondDiv.ll5
-rw-r--r--test/Transforms/SimplifyCFG/2007-11-22-InvokeNoUnwind.ll4
-rw-r--r--test/Transforms/SimplifyCFG/2008-01-02-hoist-fp-add.ll3
-rw-r--r--test/Transforms/SimplifyCFG/speculate-store.ll108
-rw-r--r--test/Transforms/SimplifyCFG/switch-to-icmp.ll18
117 files changed, 4590 insertions, 1049 deletions
diff --git a/test/Transforms/BBVectorize/X86/loop1.ll b/test/Transforms/BBVectorize/X86/loop1.ll
index 493f23b098539..bbf565d1cc7f0 100644
--- a/test/Transforms/BBVectorize/X86/loop1.ll
+++ b/test/Transforms/BBVectorize/X86/loop1.ll
@@ -34,7 +34,15 @@ for.body: ; preds = %for.body, %entry
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
%exitcond = icmp eq i32 %lftr.wideiv, 10
br i1 %exitcond, label %for.end, label %for.body
-; CHECK-NOT: <2 x double>
+; CHECK: insertelement
+; CHECK-NEXT: insertelement
+; CHECK-NEXT: fadd <2 x double>
+; CHECK-NEXT: insertelement
+; CHECK-NEXT: insertelement
+; CHECK-NEXT: fadd <2 x double>
+; CHECK-NEXT: insertelement
+; CHECK-NEXT: fmul <2 x double>
+
; CHECK-UNRL: %mul = fmul <2 x double> %2, %2
; CHECK-UNRL: %mul3 = fmul <2 x double> %2, %3
; CHECK-UNRL: %add = fadd <2 x double> %mul, %mul3
diff --git a/test/Transforms/BBVectorize/X86/simple.ll b/test/Transforms/BBVectorize/X86/simple.ll
index 0113e38bb1c91..8abfa5f8bd29a 100644
--- a/test/Transforms/BBVectorize/X86/simple.ll
+++ b/test/Transforms/BBVectorize/X86/simple.ll
@@ -12,7 +12,11 @@ define double @test1(double %A1, double %A2, double %B1, double %B2) {
%R = fmul double %Z1, %Z2
ret double %R
; CHECK: @test1
-; CHECK-NOT: fmul <2 x double>
+; CHECK: fsub <2 x double>
+; CHECK: fmul <2 x double>
+; CHECK: fadd <2 x double>
+; CHECK: extract
+; CHECK: extract
; CHECK: ret double %R
}
@@ -63,7 +67,12 @@ define double @test2(double %A1, double %A2, double %B1, double %B2) {
%R = fmul double %Z1, %Z2
ret double %R
; CHECK: @test2
-; CHECK-NOT: fmul <2 x double>
+; CHECK: insertelement
+; CHECK: insertelement
+; CHECK: insertelement
+; CHECK: insertelement
+; CHECK: fsub <2 x double>
+; CHECK: fmul <2 x double>
; CHECK: ret double %R
}
@@ -80,7 +89,15 @@ define double @test4(double %A1, double %A2, double %B1, double %B2) {
%R = fmul double %Z1, %Z2
ret double %R
; CHECK: @test4
-; CHECK-NOT: fmul <2 x double>
+; CHECK: insertelement
+; CHECK: insertelement
+; CHECK: insertelement
+; CHECK: insertelement
+; CHECK: fsub <2 x double>
+; CHECK: fmul <2 x double>
+; CHECK: insertelement
+; CHECK: insertelement
+; CHECK: fadd <2 x double>
; CHECK: ret double %R
}
diff --git a/test/Transforms/ConstantMerge/merge-both.ll b/test/Transforms/ConstantMerge/merge-both.ll
index b00345557c83a..316267648f1f7 100644
--- a/test/Transforms/ConstantMerge/merge-both.ll
+++ b/test/Transforms/ConstantMerge/merge-both.ll
@@ -26,6 +26,9 @@ declare void @helper([16 x i8]*)
; CHECK-NEXT: @var6 = private constant [16 x i8] c"foo1bar2foo3bar\00", align 16
; CHECK-NEXT: @var8 = private constant [16 x i8] c"foo1bar2foo3bar\00"
+@var4a = alias %struct.foobar* @var4
+@llvm.used = appending global [1 x %struct.foobar*] [%struct.foobar* @var4a], section "llvm.metadata"
+
define i32 @main() {
entry:
call void @zed(%struct.foobar* @var1, %struct.foobar* @var2)
diff --git a/test/Transforms/DeadArgElim/dbginfo.ll b/test/Transforms/DeadArgElim/dbginfo.ll
index 24448b7009ed5..d53c19c8ef587 100644
--- a/test/Transforms/DeadArgElim/dbginfo.ll
+++ b/test/Transforms/DeadArgElim/dbginfo.ll
@@ -36,7 +36,7 @@ entry:
!llvm.dbg.cu = !{!0}
-!0 = metadata !{i32 786449, i32 4, metadata !6, metadata !"clang version 3.2 (trunk 165305)", i1 false, metadata !"", i32 0, metadata !1, metadata !1, metadata !3, metadata !1, metadata !""} ; [ DW_TAG_compile_unit ] [/home/samsonov/tmp/clang-di/test.cc] [DW_LANG_C_plus_plus]
+!0 = metadata !{i32 786449, i32 4, metadata !6, metadata !"clang version 3.2 (trunk 165305)", i1 false, metadata !"", i32 0, metadata !1, metadata !1, metadata !3, metadata !1, metadata !1, metadata !""} ; [ DW_TAG_compile_unit ] [/home/samsonov/tmp/clang-di/test.cc] [DW_LANG_C_plus_plus]
!1 = metadata !{i32 0}
!3 = metadata !{metadata !5, metadata !8, metadata !9}
!5 = metadata !{i32 786478, metadata !6, metadata !"run", metadata !"run", metadata !"", metadata !6, i32 8, metadata !7, i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, void ()* @_Z3runv, null, null, metadata !1, i32 8} ; [ DW_TAG_subprogram ] [line 8] [def] [run]
diff --git a/test/Transforms/DeadStoreElimination/2011-09-06-EndOfFunction.ll b/test/Transforms/DeadStoreElimination/2011-09-06-EndOfFunction.ll
index c5cc101a5f7b3..d114e513ed2b7 100644
--- a/test/Transforms/DeadStoreElimination/2011-09-06-EndOfFunction.ll
+++ b/test/Transforms/DeadStoreElimination/2011-09-06-EndOfFunction.ll
@@ -11,17 +11,13 @@ _ZNSt8auto_ptrIiED1Ev.exit:
%temp.lvalue = alloca %"class.std::auto_ptr", align 8
call void @_Z3barv(%"class.std::auto_ptr"* sret %temp.lvalue)
%_M_ptr.i.i = getelementptr inbounds %"class.std::auto_ptr"* %temp.lvalue, i64 0, i32 0
- %tmp.i.i = load i32** %_M_ptr.i.i, align 8, !tbaa !0
+ %tmp.i.i = load i32** %_M_ptr.i.i, align 8
; CHECK-NOT: store i32* null
- store i32* null, i32** %_M_ptr.i.i, align 8, !tbaa !0
+ store i32* null, i32** %_M_ptr.i.i, align 8
%_M_ptr.i.i4 = getelementptr inbounds %"class.std::auto_ptr"* %agg.result, i64 0, i32 0
- store i32* %tmp.i.i, i32** %_M_ptr.i.i4, align 8, !tbaa !0
+ store i32* %tmp.i.i, i32** %_M_ptr.i.i4, align 8
; CHECK: ret void
ret void
}
declare void @_Z3barv(%"class.std::auto_ptr"* sret)
-
-!0 = metadata !{metadata !"any pointer", metadata !1}
-!1 = metadata !{metadata !"omnipotent char", metadata !2}
-!2 = metadata !{metadata !"Simple C/C++ TBAA", null}
diff --git a/test/Transforms/GCOVProfiling/linkagename.ll b/test/Transforms/GCOVProfiling/linkagename.ll
index d1bce728e08c4..7ce4d861f0e0b 100644
--- a/test/Transforms/GCOVProfiling/linkagename.ll
+++ b/test/Transforms/GCOVProfiling/linkagename.ll
@@ -14,7 +14,7 @@ entry:
!llvm.dbg.cu = !{!0}
!llvm.gcov = !{!9}
-!0 = metadata !{i32 786449, i32 4, metadata !1, metadata !"clang version 3.3 (trunk 177323)", i1 false, metadata !"", i32 0, metadata !3, metadata !3, metadata !4, metadata !3, metadata !""} ; [ DW_TAG_compile_unit ] [/home/nlewycky/hello.cc] [DW_LANG_C_plus_plus]
+!0 = metadata !{i32 786449, i32 4, metadata !1, metadata !"clang version 3.3 (trunk 177323)", i1 false, metadata !"", i32 0, metadata !3, metadata !3, metadata !4, metadata !3, metadata !3, metadata !""} ; [ DW_TAG_compile_unit ] [/home/nlewycky/hello.cc] [DW_LANG_C_plus_plus]
!1 = metadata !{i32 786473, metadata !2} ; [ DW_TAG_file_type ] [/home/nlewycky/hello.cc]
!2 = metadata !{metadata !"hello.cc", metadata !"/home/nlewycky"}
!3 = metadata !{i32 0}
diff --git a/test/Transforms/GVN/unreachable_block_infinite_loop.ll b/test/Transforms/GVN/unreachable_block_infinite_loop.ll
new file mode 100644
index 0000000000000..fe335ced5c370
--- /dev/null
+++ b/test/Transforms/GVN/unreachable_block_infinite_loop.ll
@@ -0,0 +1,14 @@
+; RUN: opt -memdep -gvn -disable-output
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
+target triple = "x86_64-apple-darwin10.0"
+
+define i32 @test2() nounwind ssp {
+entry:
+ ret i32 0
+
+unreachable_block:
+ %a = add i32 %a, 1
+ ret i32 %a
+}
+
diff --git a/test/Transforms/GlobalDCE/complex-constantexpr.ll b/test/Transforms/GlobalDCE/complex-constantexpr.ll
new file mode 100644
index 0000000000000..4bf1aeee709bc
--- /dev/null
+++ b/test/Transforms/GlobalDCE/complex-constantexpr.ll
@@ -0,0 +1,97 @@
+; RUN: opt -O2 -disable-output < %s
+; PR15714
+
+%struct.ham = type { i32 }
+
+@global5 = common global i32 0, align 4
+@global6 = common global i32 0, align 4
+@global7 = common global i32 0, align 4
+@global = common global i32 0, align 4
+@global8 = common global %struct.ham zeroinitializer, align 4
+@global9 = common global i32 0, align 4
+@global10 = common global i32 0, align 4
+@global11 = common global i32 0, align 4
+
+define void @zot12() {
+bb:
+ store i32 0, i32* @global5, align 4
+ store i32 0, i32* @global6, align 4
+ br label %bb2
+
+bb1: ; preds = %bb11
+ %tmp = load i32* @global5, align 4
+ br label %bb2
+
+bb2: ; preds = %bb1, %bb
+ %tmp3 = phi i32 [ %tmp, %bb1 ], [ 0, %bb ]
+ %tmp4 = xor i32 %tmp3, zext (i1 icmp ne (i64 ptrtoint (i32* @global5 to i64), i64 1) to i32)
+ store i32 %tmp4, i32* @global5, align 4
+ %tmp5 = icmp eq i32 %tmp3, zext (i1 icmp ne (i64 ptrtoint (i32* @global5 to i64), i64 1) to i32)
+ br i1 %tmp5, label %bb8, label %bb6
+
+bb6: ; preds = %bb2
+ %tmp7 = tail call i32 @quux13()
+ br label %bb8
+
+bb8: ; preds = %bb6, %bb2
+ %tmp9 = load i32* @global7, align 4
+ %tmp10 = icmp eq i32 %tmp9, 0
+ br i1 %tmp10, label %bb11, label %bb15
+
+bb11: ; preds = %bb8
+ %tmp12 = load i32* @global6, align 4
+ %tmp13 = add nsw i32 %tmp12, 1
+ store i32 %tmp13, i32* @global6, align 4
+ %tmp14 = icmp slt i32 %tmp13, 42
+ br i1 %tmp14, label %bb1, label %bb15
+
+bb15: ; preds = %bb11, %bb8
+ ret void
+}
+
+define i32 @quux13() {
+bb:
+ store i32 1, i32* @global5, align 4
+ ret i32 1
+}
+
+define void @wombat() {
+bb:
+ tail call void @zot12()
+ ret void
+}
+
+define void @wombat14() {
+bb:
+ tail call void @blam()
+ ret void
+}
+
+define void @blam() {
+bb:
+ store i32 ptrtoint (i32* @global to i32), i32* getelementptr inbounds (%struct.ham* @global8, i64 0, i32 0), align 4
+ store i32 0, i32* @global9, align 4
+ %tmp = load i32* getelementptr inbounds (%struct.ham* @global8, i64 0, i32 0), align 4
+ br label %bb1
+
+bb1: ; preds = %bb1, %bb
+ %tmp2 = phi i32 [ 0, %bb ], [ %tmp11, %bb1 ]
+ %tmp3 = phi i32 [ %tmp, %bb ], [ %tmp10, %bb1 ]
+ %tmp4 = icmp sgt i32 %tmp3, 0
+ %tmp5 = zext i1 %tmp4 to i32
+ %tmp6 = urem i32 %tmp5, 5
+ %tmp7 = mul i32 %tmp3, -80
+ %tmp8 = or i32 %tmp7, %tmp6
+ %tmp9 = icmp eq i32 %tmp8, 0
+ %tmp10 = zext i1 %tmp9 to i32
+ %tmp11 = add nsw i32 %tmp2, 1
+ %tmp12 = icmp eq i32 %tmp11, 20
+ br i1 %tmp12, label %bb13, label %bb1
+
+bb13: ; preds = %bb1
+ store i32 %tmp10, i32* getelementptr inbounds (%struct.ham* @global8, i64 0, i32 0), align 4
+ store i32 0, i32* @global10, align 4
+ store i32 %tmp6, i32* @global11, align 4
+ store i32 20, i32* @global9, align 4
+ ret void
+}
diff --git a/test/Transforms/GlobalDCE/indirectbr.ll b/test/Transforms/GlobalDCE/indirectbr.ll
new file mode 100644
index 0000000000000..90f1ae44b1ac1
--- /dev/null
+++ b/test/Transforms/GlobalDCE/indirectbr.ll
@@ -0,0 +1,18 @@
+; RUN: opt -S -globaldce < %s | FileCheck %s
+
+@L = internal unnamed_addr constant [3 x i8*] [i8* blockaddress(@test1, %L1), i8* blockaddress(@test1, %L2), i8* null], align 16
+
+; CHECK: @L = internal unnamed_addr constant
+
+define void @test1(i32 %idx) {
+entry:
+ br label %L1
+
+L1:
+ %arrayidx = getelementptr inbounds [3 x i8*]* @L, i32 0, i32 %idx
+ %l = load i8** %arrayidx
+ indirectbr i8* %l, [label %L1, label %L2]
+
+L2:
+ ret void
+}
diff --git a/test/Transforms/GlobalOpt/alias-used.ll b/test/Transforms/GlobalOpt/alias-used.ll
new file mode 100644
index 0000000000000..f91579bf0507e
--- /dev/null
+++ b/test/Transforms/GlobalOpt/alias-used.ll
@@ -0,0 +1,42 @@
+; RUN: opt < %s -globalopt -S | FileCheck %s
+
+@c = global i8 42
+
+@llvm.used = appending global [3 x i8*] [i8* bitcast (void ()* @fa to i8*), i8* bitcast (void ()* @f to i8*), i8* @ca], section "llvm.metadata"
+; CHECK: @llvm.used = appending global [3 x i8*] [i8* bitcast (void ()* @fa to i8*), i8* bitcast (void ()* @f to i8*), i8* @ca], section "llvm.metadata"
+
+@llvm.compiler_used = appending global [2 x i8*] [i8* bitcast (void ()* @fa to i8*), i8* bitcast (void ()* @fa3 to i8*)], section "llvm.metadata"
+
+@sameAsUsed = global [3 x i8*] [i8* bitcast (void ()* @fa to i8*), i8* bitcast (void ()* @f to i8*), i8* @ca]
+; CHECK: @sameAsUsed = global [3 x i8*] [i8* bitcast (void ()* @f to i8*), i8* bitcast (void ()* @f to i8*), i8* @c]
+
+@other = global i32* bitcast (void ()* @fa to i32*)
+; CHECK: @other = global i32* bitcast (void ()* @f to i32*)
+
+@fa = alias internal void ()* @f
+; CHECK: @fa = alias internal void ()* @f
+
+@fa2 = alias internal void ()* @f
+; CHECK-NOT: @fa2
+
+@fa3 = alias internal void ()* @f
+; CHECK: @fa3
+
+@ca = alias internal i8* @c
+; CHECK: @ca = alias internal i8* @c
+
+define void @f() {
+ ret void
+}
+
+define i8* @g() {
+ ret i8* bitcast (void ()* @fa to i8*);
+}
+
+define i8* @g2() {
+ ret i8* bitcast (void ()* @fa2 to i8*);
+}
+
+define i8* @h() {
+ ret i8* @ca
+}
diff --git a/test/Transforms/InstCombine/2012-05-27-Negative-Shift-Crash.ll b/test/Transforms/InstCombine/2012-05-27-Negative-Shift-Crash.ll
index 2ec0a32ffcbf3..ba83fe9ec0ad9 100644
--- a/test/Transforms/InstCombine/2012-05-27-Negative-Shift-Crash.ll
+++ b/test/Transforms/InstCombine/2012-05-27-Negative-Shift-Crash.ll
@@ -20,10 +20,10 @@ entry:
define void @fn4() nounwind uwtable ssp {
entry:
- %0 = load i32* @d, align 4, !tbaa !0
+ %0 = load i32* @d, align 4
%cmp = icmp eq i32 %0, 0
%conv = zext i1 %cmp to i32
- store i32 %conv, i32* @c, align 4, !tbaa !0
+ store i32 %conv, i32* @c, align 4
tail call void @fn3(i32 %conv) nounwind
ret void
}
@@ -31,15 +31,15 @@ entry:
define void @fn3(i32 %p1) nounwind uwtable ssp {
entry:
%and = and i32 %p1, 8
- store i32 %and, i32* @e, align 4, !tbaa !0
+ store i32 %and, i32* @e, align 4
%sub = add nsw i32 %and, -1
- store i32 %sub, i32* @f, align 4, !tbaa !0
- %0 = load i32* @a, align 4, !tbaa !0
+ store i32 %sub, i32* @f, align 4
+ %0 = load i32* @a, align 4
%tobool = icmp eq i32 %0, 0
br i1 %tobool, label %if.else, label %if.then
if.then: ; preds = %entry
- %1 = load i32* @b, align 4, !tbaa !0
+ %1 = load i32* @b, align 4
%.lobit = lshr i32 %1, 31
%2 = trunc i32 %.lobit to i8
%.not = xor i8 %2, 1
@@ -55,7 +55,3 @@ if.end: ; preds = %if.else, %if.then
store i32 %storemerge, i32* @b, align 4
ret void
}
-
-!0 = metadata !{metadata !"int", metadata !1}
-!1 = metadata !{metadata !"omnipotent char", metadata !2}
-!2 = metadata !{metadata !"Simple C/C++ TBAA"}
diff --git a/test/Transforms/InstCombine/add4.ll b/test/Transforms/InstCombine/add4.ll
new file mode 100644
index 0000000000000..0fc0a6c1ac263
--- /dev/null
+++ b/test/Transforms/InstCombine/add4.ll
@@ -0,0 +1,58 @@
+; RUN: opt < %s -instcombine -S | FileCheck %s
+
+;; Target triple for gep raising case below.
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
+target triple = "i686-apple-darwin8"
+
+define float @test1(float %A, float %B, i1 %C) {
+EntryBlock:
+ ;; A*(1 - uitofp i1 C) -> select C, 0, A
+ %cf = uitofp i1 %C to float
+ %mc = fsub float 1.000000e+00, %cf
+ %p1 = fmul fast float %A, %mc
+ ret float %p1
+; CHECK: @test1
+; CHECK: select i1 %C, float -0.000000e+00, float %A
+}
+
+define float @test2(float %A, float %B, i1 %C) {
+EntryBlock:
+ ;; B*(uitofp i1 C) -> select C, B, 0
+ %cf = uitofp i1 %C to float
+ %p2 = fmul fast float %B, %cf
+ ret float %p2
+; CHECK: @test2
+; CHECK: select i1 %C, float %B, float -0.000000e+00
+}
+
+define float @test3(float %A, float %B, i1 %C) {
+EntryBlock:
+ ;; A*(1 - uitofp i1 C) + B*(uitofp i1 C) -> select C, A, B
+ %cf = uitofp i1 %C to float
+ %mc = fsub float 1.000000e+00, %cf
+ %p1 = fmul fast float %A, %mc
+ %p2 = fmul fast float %B, %cf
+ %s1 = fadd fast float %p1, %p2
+ ret float %s1
+; CHECK: @test3
+; CHECK: select i1 %C, float %B, float %A
+}
+
+; PR15952
+define float @test4(float %A, float %B, i32 %C) {
+ %cf = uitofp i32 %C to float
+ %mc = fsub float 1.000000e+00, %cf
+ %p1 = fmul fast float %A, %mc
+ ret float %p1
+; CHECK: @test4
+; CHECK: uitofp
+}
+
+define float @test5(float %A, float %B, i32 %C) {
+ %cf = uitofp i32 %C to float
+ %p2 = fmul fast float %B, %cf
+ ret float %p2
+; CHECK: @test5
+; CHECK: uitofp
+}
+
diff --git a/test/Transforms/InstCombine/and-fcmp.ll b/test/Transforms/InstCombine/and-fcmp.ll
index 40c44c09a8c01..a398307f869e9 100644
--- a/test/Transforms/InstCombine/and-fcmp.ll
+++ b/test/Transforms/InstCombine/and-fcmp.ll
@@ -77,3 +77,24 @@ define zeroext i8 @t7(float %x, float %y) nounwind {
; CHECK: fcmp uno
; CHECK-NOT: fcmp ult
}
+
+; PR15737
+define i1 @t8(float %a, double %b) {
+ %cmp = fcmp ord float %a, 0.000000e+00
+ %cmp1 = fcmp ord double %b, 0.000000e+00
+ %and = and i1 %cmp, %cmp1
+ ret i1 %and
+; CHECK: t8
+; CHECK: fcmp ord
+; CHECK: fcmp ord
+}
+
+define <2 x i1> @t9(<2 x float> %a, <2 x double> %b) {
+ %cmp = fcmp ord <2 x float> %a, zeroinitializer
+ %cmp1 = fcmp ord <2 x double> %b, zeroinitializer
+ %and = and <2 x i1> %cmp, %cmp1
+ ret <2 x i1> %and
+; CHECK: t9
+; CHECK: fcmp ord
+; CHECK: fcmp ord
+}
diff --git a/test/Transforms/InstCombine/apint-shift-simplify.ll b/test/Transforms/InstCombine/apint-shift-simplify.ll
index 818ae6659b260..14e895ad4bf66 100644
--- a/test/Transforms/InstCombine/apint-shift-simplify.ll
+++ b/test/Transforms/InstCombine/apint-shift-simplify.ll
@@ -1,11 +1,14 @@
-; RUN: opt < %s -instcombine -S | \
-; RUN: egrep "shl|lshr|ashr" | count 3
+; RUN: opt < %s -instcombine -S | FileCheck %s
define i41 @test0(i41 %A, i41 %B, i41 %C) {
%X = shl i41 %A, %C
%Y = shl i41 %B, %C
%Z = and i41 %X, %Y
ret i41 %Z
+; CHECK: @test0
+; CHECK-NEXT: and i41 %A, %B
+; CHECK-NEXT: shl i41
+; CHECK-NEXT: ret
}
define i57 @test1(i57 %A, i57 %B, i57 %C) {
@@ -13,6 +16,10 @@ define i57 @test1(i57 %A, i57 %B, i57 %C) {
%Y = lshr i57 %B, %C
%Z = or i57 %X, %Y
ret i57 %Z
+; CHECK: @test1
+; CHECK-NEXT: or i57 %A, %B
+; CHECK-NEXT: lshr i57
+; CHECK-NEXT: ret
}
define i49 @test2(i49 %A, i49 %B, i49 %C) {
@@ -20,4 +27,8 @@ define i49 @test2(i49 %A, i49 %B, i49 %C) {
%Y = ashr i49 %B, %C
%Z = xor i49 %X, %Y
ret i49 %Z
+; CHECK: @test2
+; CHECK-NEXT: xor i49 %A, %B
+; CHECK-NEXT: ashr i49
+; CHECK-NEXT: ret
}
diff --git a/test/Transforms/InstCombine/debuginfo.ll b/test/Transforms/InstCombine/debuginfo.ll
index cdbcd865117cf..a9e3de3b3f7b0 100644
--- a/test/Transforms/InstCombine/debuginfo.ll
+++ b/test/Transforms/InstCombine/debuginfo.ll
@@ -11,18 +11,18 @@ entry:
%__dest.addr = alloca i8*, align 8
%__val.addr = alloca i32, align 4
%__len.addr = alloca i64, align 8
- store i8* %__dest, i8** %__dest.addr, align 8, !tbaa !1
+ store i8* %__dest, i8** %__dest.addr, align 8
; CHECK-NOT: call void @llvm.dbg.declare
; CHECK: call void @llvm.dbg.value
call void @llvm.dbg.declare(metadata !{i8** %__dest.addr}, metadata !0), !dbg !16
- store i32 %__val, i32* %__val.addr, align 4, !tbaa !17
+ store i32 %__val, i32* %__val.addr, align 4
call void @llvm.dbg.declare(metadata !{i32* %__val.addr}, metadata !7), !dbg !18
- store i64 %__len, i64* %__len.addr, align 8, !tbaa !19
+ store i64 %__len, i64* %__len.addr, align 8
call void @llvm.dbg.declare(metadata !{i64* %__len.addr}, metadata !9), !dbg !20
- %tmp = load i8** %__dest.addr, align 8, !dbg !21, !tbaa !13
- %tmp1 = load i32* %__val.addr, align 4, !dbg !21, !tbaa !17
- %tmp2 = load i64* %__len.addr, align 8, !dbg !21, !tbaa !19
- %tmp3 = load i8** %__dest.addr, align 8, !dbg !21, !tbaa !13
+ %tmp = load i8** %__dest.addr, align 8, !dbg !21
+ %tmp1 = load i32* %__val.addr, align 4, !dbg !21
+ %tmp2 = load i64* %__len.addr, align 8, !dbg !21
+ %tmp3 = load i8** %__dest.addr, align 8, !dbg !21
%0 = call i64 @llvm.objectsize.i64(i8* %tmp3, i1 false), !dbg !21
%call = call i8* @foo(i8* %tmp, i32 %tmp1, i64 %tmp2, i64 %0), !dbg !21
ret i8* %call, !dbg !21
@@ -43,13 +43,8 @@ entry:
!10 = metadata !{i32 589846, metadata !3, metadata !"size_t", metadata !2, i32 80, i64 0, i64 0, i64 0, i32 0, metadata !11} ; [ DW_TAG_typedef ]
!11 = metadata !{i32 589846, metadata !3, metadata !"__darwin_size_t", metadata !2, i32 90, i64 0, i64 0, i64 0, i32 0, metadata !12} ; [ DW_TAG_typedef ]
!12 = metadata !{i32 786468, metadata !3, metadata !"long unsigned int", null, i32 0, i64 64, i64 64, i64 0, i32 0, i32 7} ; [ DW_TAG_base_type ]
-!13 = metadata !{metadata !"any pointer", metadata !14}
-!14 = metadata !{metadata !"omnipotent char", metadata !15}
-!15 = metadata !{metadata !"Simple C/C++ TBAA", null}
!16 = metadata !{i32 78, i32 28, metadata !1, null}
-!17 = metadata !{metadata !"int", metadata !14}
!18 = metadata !{i32 78, i32 40, metadata !1, null}
-!19 = metadata !{metadata !"long", metadata !14}
!20 = metadata !{i32 78, i32 54, metadata !1, null}
!21 = metadata !{i32 80, i32 3, metadata !22, null}
!22 = metadata !{i32 786443, metadata !23, i32 80, i32 3, metadata !2, i32 7} ; [ DW_TAG_lexical_block ]
diff --git a/test/Transforms/InstCombine/fprintf-1.ll b/test/Transforms/InstCombine/fprintf-1.ll
index 39d86b4588cc8..e1dc191bd700c 100644
--- a/test/Transforms/InstCombine/fprintf-1.ll
+++ b/test/Transforms/InstCombine/fprintf-1.ll
@@ -78,3 +78,12 @@ define void @test_no_simplify2(%FILE* %fp, double %d) {
ret void
; CHECK-NEXT: ret void
}
+
+define i32 @test_no_simplify3(%FILE* %fp) {
+; CHECK: @test_no_simplify3
+ %fmt = getelementptr [13 x i8]* @hello_world, i32 0, i32 0
+ %1 = call i32 (%FILE*, i8*, ...)* @fprintf(%FILE* %fp, i8* %fmt)
+; CHECK-NEXT: call i32 (%FILE*, i8*, ...)* @fprintf(%FILE* %fp, i8* getelementptr inbounds ([13 x i8]* @hello_world, i32 0, i32 0))
+ ret i32 %1
+; CHECK-NEXT: ret i32 %1
+}
diff --git a/test/Transforms/InstCombine/icmp.ll b/test/Transforms/InstCombine/icmp.ll
index 446c0e01dcaa5..c912a576c3d29 100644
--- a/test/Transforms/InstCombine/icmp.ll
+++ b/test/Transforms/InstCombine/icmp.ll
@@ -886,3 +886,93 @@ define i1 @icmp_mul0_ne0(i32 %x) {
%cmp = icmp ne i32 %mul, 0
ret i1 %cmp
}
+
+; CHECK: @icmp_sub1_sge
+; CHECK-NEXT: icmp sgt i32 %x, %y
+define i1 @icmp_sub1_sge(i32 %x, i32 %y) {
+ %sub = add nsw i32 %x, -1
+ %cmp = icmp sge i32 %sub, %y
+ ret i1 %cmp
+}
+
+; CHECK: @icmp_add1_sgt
+; CHECK-NEXT: icmp sge i32 %x, %y
+define i1 @icmp_add1_sgt(i32 %x, i32 %y) {
+ %add = add nsw i32 %x, 1
+ %cmp = icmp sgt i32 %add, %y
+ ret i1 %cmp
+}
+
+; CHECK: @icmp_sub1_slt
+; CHECK-NEXT: icmp sle i32 %x, %y
+define i1 @icmp_sub1_slt(i32 %x, i32 %y) {
+ %sub = add nsw i32 %x, -1
+ %cmp = icmp slt i32 %sub, %y
+ ret i1 %cmp
+}
+
+; CHECK: @icmp_add1_sle
+; CHECK-NEXT: icmp slt i32 %x, %y
+define i1 @icmp_add1_sle(i32 %x, i32 %y) {
+ %add = add nsw i32 %x, 1
+ %cmp = icmp sle i32 %add, %y
+ ret i1 %cmp
+}
+
+; CHECK: @icmp_add20_sge_add57
+; CHECK-NEXT: [[ADD:%[a-z0-9]+]] = add nsw i32 %y, 37
+; CHECK-NEXT: icmp sle i32 [[ADD]], %x
+define i1 @icmp_add20_sge_add57(i32 %x, i32 %y) {
+ %1 = add nsw i32 %x, 20
+ %2 = add nsw i32 %y, 57
+ %cmp = icmp sge i32 %1, %2
+ ret i1 %cmp
+}
+
+; CHECK: @icmp_sub57_sge_sub20
+; CHECK-NEXT: [[SUB:%[a-z0-9]+]] = add nsw i32 %x, -37
+; CHECK-NEXT: icmp sge i32 [[SUB]], %y
+define i1 @icmp_sub57_sge_sub20(i32 %x, i32 %y) {
+ %1 = add nsw i32 %x, -57
+ %2 = add nsw i32 %y, -20
+ %cmp = icmp sge i32 %1, %2
+ ret i1 %cmp
+}
+
+; CHECK: @icmp_and_shl_neg_ne_0
+; CHECK-NEXT: [[SHL:%[a-z0-9]+]] = shl i32 1, %B
+; CHECK-NEXT: [[AND:%[a-z0-9]+]] = and i32 [[SHL]], %A
+; CHECK-NEXT: [[CMP:%[a-z0-9]+]] = icmp eq i32 [[AND]], 0
+; CHECK-NEXT: ret i1 [[CMP]]
+define i1 @icmp_and_shl_neg_ne_0(i32 %A, i32 %B) {
+ %neg = xor i32 %A, -1
+ %shl = shl i32 1, %B
+ %and = and i32 %shl, %neg
+ %cmp = icmp ne i32 %and, 0
+ ret i1 %cmp
+}
+
+; CHECK: @icmp_and_shl_neg_eq_0
+; CHECK-NEXT: [[SHL:%[a-z0-9]+]] = shl i32 1, %B
+; CHECK-NEXT: [[AND:%[a-z0-9]+]] = and i32 [[SHL]], %A
+; CHECK-NEXT: [[CMP:%[a-z0-9]+]] = icmp ne i32 [[AND]], 0
+; CHECK-NEXT: ret i1 [[CMP]]
+define i1 @icmp_and_shl_neg_eq_0(i32 %A, i32 %B) {
+ %neg = xor i32 %A, -1
+ %shl = shl i32 1, %B
+ %and = and i32 %shl, %neg
+ %cmp = icmp eq i32 %and, 0
+ ret i1 %cmp
+}
+
+; CHECK: @icmp_add_and_shr_ne_0
+; CHECK-NEXT: [[AND:%[a-z0-9]+]] = and i32 %X, 240
+; CHECK-NEXT: [[CMP:%[a-z0-9]+]] = icmp ne i32 [[AND]], 224
+; CHECK-NEXT: ret i1 [[CMP]]
+define i1 @icmp_add_and_shr_ne_0(i32 %X) {
+ %shr = lshr i32 %X, 4
+ %and = and i32 %shr, 15
+ %add = add i32 %and, -14
+ %tobool = icmp ne i32 %add, 0
+ ret i1 %tobool
+}
diff --git a/test/Transforms/InstCombine/load-cmp.ll b/test/Transforms/InstCombine/load-cmp.ll
index d88188e4109cd..869215cb58d47 100644
--- a/test/Transforms/InstCombine/load-cmp.ll
+++ b/test/Transforms/InstCombine/load-cmp.ll
@@ -100,8 +100,8 @@ define i1 @test8(i32 %X) {
%S = icmp eq i16 %R, 0
ret i1 %S
; CHECK: @test8
-; CHECK-NEXT: add i32 %X, -8
-; CHECK-NEXT: icmp ult i32 {{.*}}, 2
+; CHECK-NEXT: and i32 %X, -2
+; CHECK-NEXT: icmp eq i32 {{.*}}, 8
; CHECK-NEXT: ret i1
}
diff --git a/test/Transforms/InstCombine/objsize.ll b/test/Transforms/InstCombine/objsize.ll
index 0ead9d123749c..122c6501a3f50 100644
--- a/test/Transforms/InstCombine/objsize.ll
+++ b/test/Transforms/InstCombine/objsize.ll
@@ -257,114 +257,6 @@ return:
ret i32 7
}
-declare noalias i8* @valloc(i32) nounwind
-
-; CHECK: @test14
-; CHECK: ret i32 6
-define i32 @test14(i32 %a) nounwind {
- switch i32 %a, label %sw.default [
- i32 1, label %sw.bb
- i32 2, label %sw.bb1
- ]
-
-sw.bb:
- %call = tail call noalias i8* @malloc(i32 6) nounwind
- br label %sw.epilog
-
-sw.bb1:
- %call2 = tail call noalias i8* @calloc(i32 3, i32 2) nounwind
- br label %sw.epilog
-
-sw.default:
- %call3 = tail call noalias i8* @valloc(i32 6) nounwind
- br label %sw.epilog
-
-sw.epilog:
- %b.0 = phi i8* [ %call3, %sw.default ], [ %call2, %sw.bb1 ], [ %call, %sw.bb ]
- %1 = tail call i32 @llvm.objectsize.i32(i8* %b.0, i1 false)
- ret i32 %1
-}
-
-; CHECK: @test15
-; CHECK: llvm.objectsize
-define i32 @test15(i32 %a) nounwind {
- switch i32 %a, label %sw.default [
- i32 1, label %sw.bb
- i32 2, label %sw.bb1
- ]
-
-sw.bb:
- %call = tail call noalias i8* @malloc(i32 3) nounwind
- br label %sw.epilog
-
-sw.bb1:
- %call2 = tail call noalias i8* @calloc(i32 2, i32 1) nounwind
- br label %sw.epilog
-
-sw.default:
- %call3 = tail call noalias i8* @valloc(i32 3) nounwind
- br label %sw.epilog
-
-sw.epilog:
- %b.0 = phi i8* [ %call3, %sw.default ], [ %call2, %sw.bb1 ], [ %call, %sw.bb ]
- %1 = tail call i32 @llvm.objectsize.i32(i8* %b.0, i1 false)
- ret i32 %1
-}
-
-; CHECK: @test16
-; CHECK: llvm.objectsize
-define i32 @test16(i8* %a, i32 %n) nounwind {
- %b = alloca [5 x i8], align 1
- %c = alloca [5 x i8], align 1
- switch i32 %n, label %sw.default [
- i32 1, label %sw.bb
- i32 2, label %sw.bb1
- ]
-
-sw.bb:
- %bp = bitcast [5 x i8]* %b to i8*
- br label %sw.epilog
-
-sw.bb1:
- %cp = bitcast [5 x i8]* %c to i8*
- br label %sw.epilog
-
-sw.default:
- br label %sw.epilog
-
-sw.epilog:
- %phi = phi i8* [ %a, %sw.default ], [ %cp, %sw.bb1 ], [ %bp, %sw.bb ]
- %sz = call i32 @llvm.objectsize.i32(i8* %phi, i1 false)
- ret i32 %sz
-}
-
-; CHECK: @test17
-; CHECK: ret i32 5
-define i32 @test17(i32 %n) nounwind {
- %b = alloca [5 x i8], align 1
- %c = alloca [5 x i8], align 1
- %bp = bitcast [5 x i8]* %b to i8*
- switch i32 %n, label %sw.default [
- i32 1, label %sw.bb
- i32 2, label %sw.bb1
- ]
-
-sw.bb:
- br label %sw.epilog
-
-sw.bb1:
- %cp = bitcast [5 x i8]* %c to i8*
- br label %sw.epilog
-
-sw.default:
- br label %sw.epilog
-
-sw.epilog:
- %phi = phi i8* [ %bp, %sw.default ], [ %cp, %sw.bb1 ], [ %bp, %sw.bb ]
- %sz = call i32 @llvm.objectsize.i32(i8* %phi, i1 false)
- ret i32 %sz
-}
-
@globalalias = alias internal [60 x i8]* @a
; CHECK: @test18
diff --git a/test/Transforms/InstCombine/or.ll b/test/Transforms/InstCombine/or.ll
index bde2a54048add..7226bd93996f0 100644
--- a/test/Transforms/InstCombine/or.ll
+++ b/test/Transforms/InstCombine/or.ll
@@ -178,12 +178,12 @@ define i1 @test18(i32 %A) {
define i1 @test19(i32 %A) {
%B = icmp eq i32 %A, 50
%C = icmp eq i32 %A, 51
- ;; (A-50) < 2
+ ;; (A&-2) == 50
%D = or i1 %B, %C
ret i1 %D
; CHECK: @test19
-; CHECK: add i32
-; CHECK: icmp ult
+; CHECK: and i32
+; CHECK: icmp eq
; CHECK: ret i1
}
diff --git a/test/Transforms/InstCombine/select.ll b/test/Transforms/InstCombine/select.ll
index cc3aacdce3c87..c72a6f7c49c6c 100644
--- a/test/Transforms/InstCombine/select.ll
+++ b/test/Transforms/InstCombine/select.ll
@@ -863,3 +863,125 @@ while.body:
; CHECK: @test64
; CHECK-NOT: select
}
+
+; CHECK: @select_icmp_eq_and_1_0_or_2
+; CHECK-NEXT: [[SHL:%[a-z0-9]+]] = shl i32 %x, 1
+; CHECK-NEXT: [[AND:%[a-z0-9]+]] = and i32 [[SHL]], 2
+; CHECK-NEXT: [[OR:%[a-z0-9]+]] = or i32 [[AND]], %y
+; CHECK-NEXT: ret i32 [[OR]]
+define i32 @select_icmp_eq_and_1_0_or_2(i32 %x, i32 %y) {
+ %and = and i32 %x, 1
+ %cmp = icmp eq i32 %and, 0
+ %or = or i32 %y, 2
+ %select = select i1 %cmp, i32 %y, i32 %or
+ ret i32 %select
+}
+
+; CHECK: @select_icmp_eq_and_32_0_or_8
+; CHECK-NEXT: [[LSHR:%[a-z0-9]+]] = lshr i32 %x, 2
+; CHECK-NEXT: [[AND:%[a-z0-9]+]] = and i32 [[LSHR]], 8
+; CHECK-NEXT: [[OR:%[a-z0-9]+]] = or i32 [[AND]], %y
+; CHECK-NEXT: ret i32 [[OR]]
+define i32 @select_icmp_eq_and_32_0_or_8(i32 %x, i32 %y) {
+ %and = and i32 %x, 32
+ %cmp = icmp eq i32 %and, 0
+ %or = or i32 %y, 8
+ %select = select i1 %cmp, i32 %y, i32 %or
+ ret i32 %select
+}
+
+; CHECK: @select_icmp_ne_0_and_4096_or_4096
+; CHECK-NEXT: [[AND:%[a-z0-9]+]] = and i32 %x, 4096
+; CHECK-NEXT: [[XOR:%[a-z0-9]+]] = xor i32 [[AND]], 4096
+; CHECK-NEXT: [[OR:%[a-z0-9]+]] = or i32 [[XOR]], %y
+; CHECK-NEXT: ret i32 [[OR]]
+define i32 @select_icmp_ne_0_and_4096_or_4096(i32 %x, i32 %y) {
+ %and = and i32 %x, 4096
+ %cmp = icmp ne i32 0, %and
+ %or = or i32 %y, 4096
+ %select = select i1 %cmp, i32 %y, i32 %or
+ ret i32 %select
+}
+
+; CHECK: @select_icmp_eq_and_4096_0_or_4096
+; CHECK-NEXT: [[AND:%[a-z0-9]+]] = and i32 %x, 4096
+; CHECK-NEXT: [[OR:%[a-z0-9]+]] = or i32 [[AND]], %y
+; CHECK-NEXT: ret i32 [[OR]]
+define i32 @select_icmp_eq_and_4096_0_or_4096(i32 %x, i32 %y) {
+ %and = and i32 %x, 4096
+ %cmp = icmp eq i32 %and, 0
+ %or = or i32 %y, 4096
+ %select = select i1 %cmp, i32 %y, i32 %or
+ ret i32 %select
+}
+
+; CHECK: @select_icmp_eq_0_and_1_or_1
+; CHECK-NEXT: [[AND:%[a-z0-9]+]] = and i64 %x, 1
+; CHECK-NEXT: [[ZEXT:%[a-z0-9]+]] = trunc i64 [[AND]] to i32
+; CHECK-NEXT: [[OR:%[a-z0-9]+]] = or i32 [[XOR]], %y
+; CHECK-NEXT: ret i32 [[OR]]
+define i32 @select_icmp_eq_0_and_1_or_1(i64 %x, i32 %y) {
+ %and = and i64 %x, 1
+ %cmp = icmp eq i64 %and, 0
+ %or = or i32 %y, 1
+ %select = select i1 %cmp, i32 %y, i32 %or
+ ret i32 %select
+}
+
+; CHECK: @select_icmp_ne_0_and_4096_or_32
+; CHECK-NEXT: [[LSHR:%[a-z0-9]+]] = lshr i32 %x, 7
+; CHECK-NEXT: [[AND:%[a-z0-9]+]] = and i32 [[LSHR]], 32
+; CHECK-NEXT: [[XOR:%[a-z0-9]+]] = xor i32 [[AND]], 32
+; CHECK-NEXT: [[OR:%[a-z0-9]+]] = or i32 [[XOR]], %y
+; CHECK-NEXT: ret i32 [[OR]]
+define i32 @select_icmp_ne_0_and_4096_or_32(i32 %x, i32 %y) {
+ %and = and i32 %x, 4096
+ %cmp = icmp ne i32 0, %and
+ %or = or i32 %y, 32
+ %select = select i1 %cmp, i32 %y, i32 %or
+ ret i32 %select
+}
+
+; CHECK: @select_icmp_ne_0_and_32_or_4096
+; CHECK-NEXT: [[SHL:%[a-z0-9]+]] = shl i32 %x, 7
+; CHECK-NEXT: [[AND:%[a-z0-9]+]] = and i32 [[SHL]], 4096
+; CHECK-NEXT: [[XOR:%[a-z0-9]+]] = xor i32 [[AND]], 4096
+; CHECK-NEXT: [[OR:%[a-z0-9]+]] = or i32 [[XOR]], %y
+; CHECK-NEXT: ret i32 [[OR]]
+define i32 @select_icmp_ne_0_and_32_or_4096(i32 %x, i32 %y) {
+ %and = and i32 %x, 32
+ %cmp = icmp ne i32 0, %and
+ %or = or i32 %y, 4096
+ %select = select i1 %cmp, i32 %y, i32 %or
+ ret i32 %select
+}
+
+; CHECK: @select_icmp_ne_0_and_1073741824_or_8
+; CHECK-NEXT: [[LSHR:%[a-z0-9]+]] = lshr i32 %x, 27
+; CHECK-NEXT: [[AND:%[a-z0-9]+]] = and i32 [[LSHR]], 8
+; CHECK-NEXT: [[TRUNC:%[a-z0-9]+]] = trunc i32 [[AND]] to i8
+; CHECK-NEXT: [[XOR:%[a-z0-9]+]] = xor i8 [[TRUNC]], 8
+; CHECK-NEXT: [[OR:%[a-z0-9]+]] = or i8 [[XOR]], %y
+; CHECK-NEXT: ret i8 [[OR]]
+define i8 @select_icmp_ne_0_and_1073741824_or_8(i32 %x, i8 %y) {
+ %and = and i32 %x, 1073741824
+ %cmp = icmp ne i32 0, %and
+ %or = or i8 %y, 8
+ %select = select i1 %cmp, i8 %y, i8 %or
+ ret i8 %select
+}
+
+; CHECK: @select_icmp_ne_0_and_8_or_1073741824
+; CHECK-NEXT: [[AND:%[a-z0-9]+]] = and i8 %x, 8
+; CHECK-NEXT: [[ZEXT:%[a-z0-9]+]] = zext i8 [[AND]] to i32
+; CHECK-NEXT: [[SHL:%[a-z0-9]+]] = shl nuw nsw i32 [[ZEXT]], 27
+; CHECK-NEXT: [[XOR:%[a-z0-9]+]] = xor i32 [[SHL]], 1073741824
+; CHECK-NEXT: [[OR:%[a-z0-9]+]] = or i32 [[XOR]], %y
+; CHECK-NEXT: ret i32 [[OR]]
+define i32 @select_icmp_ne_0_and_8_or_1073741824(i8 %x, i32 %y) {
+ %and = and i8 %x, 8
+ %cmp = icmp ne i8 0, %and
+ %or = or i32 %y, 1073741824
+ %select = select i1 %cmp, i32 %y, i32 %or
+ ret i32 %select
+}
diff --git a/test/Transforms/InstCombine/sub-xor.ll b/test/Transforms/InstCombine/sub-xor.ll
index 279e4aca9de4f..1d14852bc8039 100644
--- a/test/Transforms/InstCombine/sub-xor.ll
+++ b/test/Transforms/InstCombine/sub-xor.ll
@@ -35,3 +35,13 @@ define i32 @test3(i32 %x) nounwind {
; CHECK-NEXT: sub i32 73, %and
; CHECK-NEXT: ret
}
+
+define i32 @test4(i32 %x) nounwind {
+ %sub = xor i32 %x, 2147483648
+ %add = add i32 %sub, 42
+ ret i32 %add
+
+; CHECK: @test4
+; CHECK-NEXT: add i32 %x, -2147483606
+; CHECK-NEXT: ret
+}
diff --git a/test/Transforms/InstCombine/vec_demanded_elts.ll b/test/Transforms/InstCombine/vec_demanded_elts.ll
index 2d90750a2f1e6..0019a57627cb2 100644
--- a/test/Transforms/InstCombine/vec_demanded_elts.ll
+++ b/test/Transforms/InstCombine/vec_demanded_elts.ll
@@ -196,7 +196,7 @@ define <4 x float> @test_select(float %f, float %g) {
; CHECK-NOT: insertelement
; CHECK: %a3 = insertelement <4 x float> %a0, float 3.000000e+00, i32 3
; CHECK-NOT: insertelement
-; CHECK: shufflevector <4 x float> %a3, <4 x float> <float undef, float 4.000000e+00, float 5.000000e+00, float undef>, <4 x i32> <i32 0, i32 5, i32 6, i32 3>
+; CHECK: %ret = select <4 x i1> <i1 true, i1 false, i1 false, i1 true>, <4 x float> %a3, <4 x float> <float undef, float 4.000000e+00, float 5.000000e+00, float undef>
%a0 = insertelement <4 x float> undef, float %f, i32 0
%a1 = insertelement <4 x float> %a0, float 1.000000e+00, i32 1
%a2 = insertelement <4 x float> %a1, float 2.000000e+00, i32 2
diff --git a/test/Transforms/InstCombine/vec_extract_2elts.ll b/test/Transforms/InstCombine/vec_extract_2elts.ll
new file mode 100644
index 0000000000000..5972340d60a91
--- /dev/null
+++ b/test/Transforms/InstCombine/vec_extract_2elts.ll
@@ -0,0 +1,12 @@
+; RUN: opt < %s -instcombine -S | FileCheck %s
+
+define void @test(<4 x i32> %v, i64 *%r1, i64 *%r2) {
+;CHECK: %1 = extractelement <4 x i32> %v, i32 0
+;CHECK: %2 = zext i32 %1 to i64
+ %1 = zext <4 x i32> %v to <4 x i64>
+ %2 = extractelement <4 x i64> %1, i32 0
+ store i64 %2, i64 *%r1
+ store i64 %2, i64 *%r2
+ ret void
+}
+
diff --git a/test/Transforms/InstCombine/vec_extract_var_elt.ll b/test/Transforms/InstCombine/vec_extract_var_elt.ll
new file mode 100644
index 0000000000000..3c982873e2881
--- /dev/null
+++ b/test/Transforms/InstCombine/vec_extract_var_elt.ll
@@ -0,0 +1,18 @@
+; RUN: opt < %s -instcombine -S | FileCheck %s
+
+define void @test (float %b, <8 x float> * %p) {
+; CHECK: extractelement
+; CHECK: fptosi
+ %1 = load <8 x float> * %p
+ %2 = bitcast <8 x float> %1 to <8 x i32>
+ %3 = bitcast <8 x i32> %2 to <8 x float>
+ %a = fptosi <8 x float> %3 to <8 x i32>
+ %4 = fptosi float %b to i32
+ %5 = add i32 %4, -2
+ %6 = extractelement <8 x i32> %a, i32 %5
+ %7 = insertelement <8 x i32> undef, i32 %6, i32 7
+ %8 = sitofp <8 x i32> %7 to <8 x float>
+ store <8 x float> %8, <8 x float>* %p
+ ret void
+}
+
diff --git a/test/Transforms/InstCombine/vec_phi_extract.ll b/test/Transforms/InstCombine/vec_phi_extract.ll
new file mode 100644
index 0000000000000..2f10fc2c1ed28
--- /dev/null
+++ b/test/Transforms/InstCombine/vec_phi_extract.ll
@@ -0,0 +1,27 @@
+; RUN: opt < %s -instcombine -S | FileCheck %s
+
+define void @f(i64 %val, i32 %limit, i32 *%ptr) {
+;CHECK: %0 = trunc i64
+;CHECK: %1 = phi i32
+entry:
+ %tempvector = insertelement <16 x i64> undef, i64 %val, i32 0
+ %vector = shufflevector <16 x i64> %tempvector, <16 x i64> undef, <16 x i32> zeroinitializer
+ %0 = add <16 x i64> %vector, <i64 0, i64 1, i64 2, i64 3, i64 4, i64 5, i64 6, i64 7, i64 8, i64 9, i64 10, i64 11, i64 12, i64 13, i64 14, i64 15>
+ %1 = trunc <16 x i64> %0 to <16 x i32>
+ br label %loop
+
+loop:
+ %2 = phi <16 x i32> [ %1, %entry ], [ %inc, %loop ]
+ %elt = extractelement <16 x i32> %2, i32 0
+ %end = icmp ult i32 %elt, %limit
+ %3 = add i32 10, %elt
+ %4 = sext i32 %elt to i64
+ %5 = getelementptr i32* %ptr, i64 %4
+ store i32 %3, i32* %5
+ %inc = add <16 x i32> %2, <i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16>
+ br i1 %end, label %loop, label %ret
+
+ret:
+ ret void
+}
+
diff --git a/test/Transforms/InstCombine/vec_shuffle.ll b/test/Transforms/InstCombine/vec_shuffle.ll
index 14f532195d7c3..8f78c2e6bd505 100644
--- a/test/Transforms/InstCombine/vec_shuffle.ll
+++ b/test/Transforms/InstCombine/vec_shuffle.ll
@@ -153,46 +153,3 @@ define <8 x i8> @test12a(<8 x i8> %tmp6, <8 x i8> %tmp2) nounwind {
ret <8 x i8> %tmp3
}
-; We should form a shuffle out of a select with constant condition.
-define <4 x i16> @test13a(<4 x i16> %lhs, <4 x i16> %rhs) {
-; CHECK: @test13a
-; CHECK-NEXT: shufflevector <4 x i16> %lhs, <4 x i16> %rhs, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
-; CHECK-NEXT: ret
- %A = select <4 x i1> <i1 true, i1 false, i1 true, i1 false>,
- <4 x i16> %lhs, <4 x i16> %rhs
- ret <4 x i16> %A
-}
-
-define <4 x i16> @test13b(<4 x i16> %lhs, <4 x i16> %rhs) {
-; CHECK: @test13b
-; CHECK-NEXT: ret <4 x i16> %lhs
- %A = select <4 x i1> <i1 true, i1 undef, i1 true, i1 true>,
- <4 x i16> %lhs, <4 x i16> %rhs
- ret <4 x i16> %A
-}
-
-define <4 x i16> @test13c(<4 x i16> %lhs, <4 x i16> %rhs) {
-; CHECK: @test13c
-; CHECK-NEXT: shufflevector <4 x i16> %lhs, <4 x i16> %rhs, <4 x i32> <i32 0, i32 undef, i32 2, i32 7>
-; CHECK-NEXT: ret
- %A = select <4 x i1> <i1 true, i1 undef, i1 true, i1 false>,
- <4 x i16> %lhs, <4 x i16> %rhs
- ret <4 x i16> %A
-}
-
-define <4 x i16> @test13d(<4 x i16> %lhs, <4 x i16> %rhs) {
-; CHECK: @test13d
-; CHECK: select
-; CHECK-NEXT: ret
- %A = select <4 x i1> <i1 true, i1 icmp ugt (<4 x i16>(<4 x i16>, <4 x i16>)* @test13a, <4 x i16>(<4 x i16>, <4 x i16>)* @test13b), i1 true, i1 false>,
- <4 x i16> %lhs, <4 x i16> %rhs
- ret <4 x i16> %A
-}
-
-define <4 x i16> @test13e(<4 x i16> %lhs, <4 x i16> %rhs) {
-; CHECK: @test13e
-; CHECK-NEXT: ret <4 x i16> %rhs
- %A = select <4 x i1> <i1 false, i1 false, i1 false, i1 false>,
- <4 x i16> %lhs, <4 x i16> %rhs
- ret <4 x i16> %A
-}
diff --git a/test/Transforms/InstSimplify/2013-04-19-ConstantFoldingCrash.ll b/test/Transforms/InstSimplify/2013-04-19-ConstantFoldingCrash.ll
new file mode 100644
index 0000000000000..164751784a65f
--- /dev/null
+++ b/test/Transforms/InstSimplify/2013-04-19-ConstantFoldingCrash.ll
@@ -0,0 +1,9 @@
+; RUN: opt < %s -instsimplify
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+
+; PR15791
+define <2 x i64> @test1() {
+ %a = and <2 x i64> undef, bitcast (<4 x i32> <i32 undef, i32 undef, i32 undef, i32 2147483647> to <2 x i64>)
+ ret <2 x i64> %a
+}
diff --git a/test/Transforms/InstSimplify/floating-point-arithmetic.ll b/test/Transforms/InstSimplify/floating-point-arithmetic.ll
index f9c364cade36f..91ce26324b818 100644
--- a/test/Transforms/InstSimplify/floating-point-arithmetic.ll
+++ b/test/Transforms/InstSimplify/floating-point-arithmetic.ll
@@ -14,7 +14,7 @@ define float @fsub_0_0_x(float %a) {
; CHECK: @fsub_x_0
define float @fsub_x_0(float %a) {
%ret = fsub float %a, 0.0
-; CHECK ret float %a
+; CHECK: ret float %a
ret float %ret
}
@@ -22,7 +22,7 @@ define float @fsub_x_0(float %a) {
; CHECK: @fadd_x_n0
define float @fadd_x_n0(float %a) {
%ret = fadd float %a, -0.0
-; CHECK ret float %a
+; CHECK: ret float %a
ret float %ret
}
diff --git a/test/Transforms/JumpThreading/2011-04-14-InfLoop.ll b/test/Transforms/JumpThreading/2011-04-14-InfLoop.ll
index e80bae578a93e..86a1321c35437 100644
--- a/test/Transforms/JumpThreading/2011-04-14-InfLoop.ll
+++ b/test/Transforms/JumpThreading/2011-04-14-InfLoop.ll
@@ -15,7 +15,7 @@ for.cond1177:
br i1 %cmp1179, label %for.cond1177, label %land.rhs1320
land.rhs1320:
- %tmp1324 = load volatile i64* getelementptr inbounds (%0* @g_338, i64 0, i32 2), align 1, !tbaa !0
+ %tmp1324 = load volatile i64* getelementptr inbounds (%0* @g_338, i64 0, i32 2), align 1
br label %if.end.i
if.end.i:
@@ -25,7 +25,3 @@ if.end.i:
return:
ret void
}
-
-!0 = metadata !{metadata !"long long", metadata !1}
-!1 = metadata !{metadata !"omnipotent char", metadata !2}
-!2 = metadata !{metadata !"Simple C/C++ TBAA", null}
diff --git a/test/Transforms/LoopRotate/simplifylatch.ll b/test/Transforms/LoopRotate/simplifylatch.ll
index f4227245f74bc..037bb2042f95b 100644
--- a/test/Transforms/LoopRotate/simplifylatch.ll
+++ b/test/Transforms/LoopRotate/simplifylatch.ll
@@ -1,4 +1,4 @@
-; RUN: opt -S < %s -loop-rotate -verify-dom-info -verify-loop-info | FileCheck %s
+; RUN: opt -S < %s -loop-rotate -licm -verify-dom-info -verify-loop-info | FileCheck %s
; PR2624 unroll multiple exits
@mode_table = global [4 x i32] zeroinitializer ; <[4 x i32]*> [#uses=1]
@@ -37,3 +37,40 @@ bb5: ; preds = %bb2
declare i32 @fegetround()
declare void @raise_exception() noreturn
+
+;CHECK: for.body.lr.ph:
+;CHECK-NEXT: %arrayidx1 = getelementptr inbounds i8* %CurPtr, i64 0
+;CHECK-NEXT: %0 = load i8* %arrayidx1, align 1
+;CHECK-NEXT: %conv2 = sext i8 %0 to i32
+;CHECK-NEXT: br label %for.body
+
+define i32 @foo(i8* %CurPtr, i32 %a) #0 {
+entry:
+ br label %for.cond
+
+for.cond: ; preds = %for.inc, %entry
+ %i.0 = phi i32 [ 1, %entry ], [ %inc, %for.inc ]
+ %cmp = icmp ne i32 %i.0, %a
+ br i1 %cmp, label %for.body, label %return
+
+for.body: ; preds = %for.cond
+ %idxprom = zext i32 %i.0 to i64
+ %arrayidx = getelementptr inbounds i8* %CurPtr, i64 %idxprom
+ %0 = load i8* %arrayidx, align 1
+ %conv = sext i8 %0 to i32
+ %arrayidx1 = getelementptr inbounds i8* %CurPtr, i64 0
+ %1 = load i8* %arrayidx1, align 1
+ %conv2 = sext i8 %1 to i32
+ %cmp3 = icmp ne i32 %conv, %conv2
+ br i1 %cmp3, label %return, label %for.inc
+
+for.inc: ; preds = %for.body
+ %inc = add i32 %i.0, 1
+ br label %for.cond
+
+return: ; preds = %for.cond, %for.body
+ %retval.0 = phi i32 [ 0, %for.body ], [ 1, %for.cond ]
+ ret i32 %retval.0
+}
+
+attributes #0 = { nounwind uwtable }
diff --git a/test/Transforms/LoopStrengthReduce/2012-07-13-ExpandUDiv.ll b/test/Transforms/LoopStrengthReduce/2012-07-13-ExpandUDiv.ll
index a1222083abc93..8bac639ae559d 100644
--- a/test/Transforms/LoopStrengthReduce/2012-07-13-ExpandUDiv.ll
+++ b/test/Transforms/LoopStrengthReduce/2012-07-13-ExpandUDiv.ll
@@ -18,11 +18,11 @@ define i32 @main() nounwind uwtable ssp {
entry:
%l_2 = alloca [1 x i32], align 4
%arrayidx = getelementptr inbounds [1 x i32]* %l_2, i64 0, i64 0
- store i32 0, i32* %arrayidx, align 4, !tbaa !0
- %tmp = load i32* @g_3, align 4, !tbaa !0
+ store i32 0, i32* %arrayidx, align 4
+ %tmp = load i32* @g_3, align 4
%idxprom = sext i32 %tmp to i64
%arrayidx1 = getelementptr inbounds [1 x i32]* %l_2, i64 0, i64 %idxprom
- %tmp1 = load i32* %arrayidx1, align 4, !tbaa !0
+ %tmp1 = load i32* %arrayidx1, align 4
%conv.i.i = and i32 %tmp1, 65535
%tobool.i.i.i = icmp ne i32 %tmp, 0
br label %codeRepl
@@ -48,7 +48,7 @@ for.cond.i.i.us: ; preds = %for.inc.i.i.us, %co
for.inc.i.i.us: ; preds = %for.body.i.i.us
%add.i.i.us = add nsw i32 %tmp2, 1
- store i32 %add.i.i.us, i32* @g_752, align 4, !tbaa !0
+ store i32 %add.i.i.us, i32* @g_752, align 4
br label %for.cond.i.i.us
for.body.i.i.us: ; preds = %codeRepl5.us
@@ -78,13 +78,9 @@ for.body.i.i: ; preds = %codeRepl5
for.inc.i.i: ; preds = %for.body.i.i
%add.i.i = add nsw i32 %tmp3, 1
- store i32 %add.i.i, i32* @g_752, align 4, !tbaa !0
+ store i32 %add.i.i, i32* @g_752, align 4
br label %for.cond.i.i
func_4.exit: ; No predecessors!
ret i32 0
}
-
-!0 = metadata !{metadata !"int", metadata !1}
-!1 = metadata !{metadata !"omnipotent char", metadata !2}
-!2 = metadata !{metadata !"Simple C/C++ TBAA", null}
diff --git a/test/Transforms/LoopStrengthReduce/ARM/2012-06-15-lsr-noaddrmode.ll b/test/Transforms/LoopStrengthReduce/ARM/2012-06-15-lsr-noaddrmode.ll
index b5124ea6f30dd..5d728b528ea5b 100644
--- a/test/Transforms/LoopStrengthReduce/ARM/2012-06-15-lsr-noaddrmode.ll
+++ b/test/Transforms/LoopStrengthReduce/ARM/2012-06-15-lsr-noaddrmode.ll
@@ -50,7 +50,7 @@ declare %s* @getstruct() nounwind
; CHECK: ldr{{.*}}lsl #2
define i32 @main() nounwind ssp {
entry:
- %v0 = load i32* @ncol, align 4, !tbaa !0
+ %v0 = load i32* @ncol, align 4
%v1 = tail call i32* @getptr() nounwind
%cmp10.i = icmp eq i32 %v0, 0
br label %while.cond.outer
@@ -64,12 +64,12 @@ while.cond:
br label %while.body
while.body:
- %v3 = load i32* @ncol, align 4, !tbaa !0
+ %v3 = load i32* @ncol, align 4
br label %end_of_chain
end_of_chain:
%state.i = getelementptr inbounds %s* %call18, i32 0, i32 0
- %v4 = load i32** %state.i, align 4, !tbaa !3
+ %v4 = load i32** %state.i, align 4
br label %while.cond.i.i
while.cond.i.i:
@@ -80,9 +80,9 @@ while.cond.i.i:
land.rhs.i.i:
%arrayidx.i.i = getelementptr inbounds i32* %v4, i32 %dec.i.i
- %v5 = load i32* %arrayidx.i.i, align 4, !tbaa !0
+ %v5 = load i32* %arrayidx.i.i, align 4
%arrayidx1.i.i = getelementptr inbounds i32* %v1, i32 %dec.i.i
- %v6 = load i32* %arrayidx1.i.i, align 4, !tbaa !0
+ %v6 = load i32* %arrayidx1.i.i, align 4
%cmp.i.i = icmp eq i32 %v5, %v6
br i1 %cmp.i.i, label %while.cond.i.i, label %equal_data.exit.i
@@ -95,8 +95,3 @@ where.exit:
while.end.i:
ret i32 %v3
}
-
-!0 = metadata !{metadata !"int", metadata !1}
-!1 = metadata !{metadata !"omnipotent char", metadata !2}
-!2 = metadata !{metadata !"Simple C/C++ TBAA"}
-!3 = metadata !{metadata !"any pointer", metadata !1}
diff --git a/test/Transforms/LoopUnroll/scevunroll.ll b/test/Transforms/LoopUnroll/scevunroll.ll
index 99b3a7d861971..308a0363165c9 100644
--- a/test/Transforms/LoopUnroll/scevunroll.ll
+++ b/test/Transforms/LoopUnroll/scevunroll.ll
@@ -66,13 +66,16 @@ exit2:
; SCEV properly unrolls multi-exit loops.
;
+; SCEV cannot currently unroll this loop.
+; It should ideally detect a trip count of 5.
+; rdar:14038809 [SCEV]: Optimize trip count computation for multi-exit loops.
; CHECK: @multiExit
-; CHECK: getelementptr i32* %base, i32 10
-; CHECK-NEXT: load i32*
-; CHECK: br i1 false, label %l2.10, label %exit1
-; CHECK: l2.10:
-; CHECK-NOT: br
-; CHECK: ret i32
+; CHECKFIXME: getelementptr i32* %base, i32 10
+; CHECKFIXME-NEXT: load i32*
+; CHECKFIXME: br i1 false, label %l2.10, label %exit1
+; CHECKFIXME: l2.10:
+; CHECKFIXME-NOT: br
+; CHECKFIXME: ret i32
define i32 @multiExit(i32* %base) nounwind {
entry:
br label %l1
@@ -170,3 +173,38 @@ for.body87:
br label %for.body87
}
+; PR16130: clang produces incorrect code with loop/expression at -O2
+; rdar:14036816 loop-unroll makes assumptions about undefined behavior
+;
+; The loop latch is assumed to exit after the first iteration because
+; of the induction variable's NSW flag. However, the loop latch's
+; equality test is skipped and the loop exits after the second
+; iteration via the early exit. So loop unrolling cannot assume that
+; the loop latch's exit count of zero is an upper bound on the number
+; of iterations.
+;
+; CHECK: @nsw_latch
+; CHECK: for.body:
+; CHECK: %b.03 = phi i32 [ 0, %entry ], [ %add, %for.cond ]
+; CHECK: return:
+; CHECK: %b.03.lcssa = phi i32 [ %b.03, %for.body ], [ %b.03, %for.cond ]
+define void @nsw_latch(i32* %a) nounwind {
+entry:
+ br label %for.body
+
+for.body: ; preds = %for.cond, %entry
+ %b.03 = phi i32 [ 0, %entry ], [ %add, %for.cond ]
+ %tobool = icmp eq i32 %b.03, 0
+ %add = add nsw i32 %b.03, 8
+ br i1 %tobool, label %for.cond, label %return
+
+for.cond: ; preds = %for.body
+ %cmp = icmp eq i32 %add, 13
+ br i1 %cmp, label %return, label %for.body
+
+return: ; preds = %for.body, %for.cond
+ %b.03.lcssa = phi i32 [ %b.03, %for.body ], [ %b.03, %for.cond ]
+ %retval.0 = phi i32 [ 1, %for.body ], [ 0, %for.cond ]
+ store i32 %b.03.lcssa, i32* %a, align 4
+ ret void
+}
diff --git a/test/Transforms/LoopUnroll/unloop.ll b/test/Transforms/LoopUnroll/unloop.ll
index 5a9cacda443cb..9a938cc28774e 100644
--- a/test/Transforms/LoopUnroll/unloop.ll
+++ b/test/Transforms/LoopUnroll/unloop.ll
@@ -21,8 +21,8 @@ outer:
inner:
%iv = phi i32 [ 0, %outer ], [ %inc, %tail ]
%inc = add i32 %iv, 1
- %wbucond = call zeroext i1 @check()
- br i1 %wbucond, label %outer.backedge, label %tail
+ call zeroext i1 @check()
+ br i1 true, label %outer.backedge, label %tail
tail:
br i1 false, label %inner, label %exit
@@ -126,25 +126,27 @@ return:
; Ensure that only the middle loop is removed and rely on verify-loopinfo to
; check soundness.
;
-; CHECK: @unloopDeepNested
+; This test must be disabled until trip count computation can be optimized...
+; rdar:14038809 [SCEV]: Optimize trip count computation for multi-exit loops.
+; CHECKFIXME: @unloopDeepNested
; Inner-inner loop control.
-; CHECK: while.cond.us.i:
-; CHECK: br i1 %cmp.us.i, label %next_data.exit, label %while.body.us.i
-; CHECK: if.then.us.i:
-; CHECK: br label %while.cond.us.i
+; CHECKFIXME: while.cond.us.i:
+; CHECKFIXME: br i1 %cmp.us.i, label %next_data.exit, label %while.body.us.i
+; CHECKFIXME: if.then.us.i:
+; CHECKFIXME: br label %while.cond.us.i
; Inner loop tail.
-; CHECK: if.else.i:
-; CHECK: br label %while.cond.outer.i
+; CHECKFIXME: if.else.i:
+; CHECKFIXME: br label %while.cond.outer.i
; Middle loop control (removed).
-; CHECK: valid_data.exit:
-; CHECK-NOT: br
-; CHECK: %cmp = call zeroext i1 @check()
+; CHECKFIXME: valid_data.exit:
+; CHECKFIXME-NOT: br
+; CHECKFIXME: %cmp = call zeroext i1 @check()
; Outer loop control.
-; CHECK: copy_data.exit:
-; CHECK: br i1 %cmp38, label %if.then39, label %while.cond.outer
+; CHECKFIXME: copy_data.exit:
+; CHECKFIXME: br i1 %cmp38, label %if.then39, label %while.cond.outer
; Outer-outer loop tail.
-; CHECK: while.cond.outer.outer.backedge:
-; CHECK: br label %while.cond.outer.outer
+; CHECKFIXME: while.cond.outer.outer.backedge:
+; CHECKFIXME: br label %while.cond.outer.outer
define void @unloopDeepNested() nounwind {
for.cond8.preheader.i:
%cmp113.i = call zeroext i1 @check()
diff --git a/test/Transforms/LoopUnswitch/2011-09-26-EHCrash.ll b/test/Transforms/LoopUnswitch/2011-09-26-EHCrash.ll
index 0e3103dc6db71..e8feef383b051 100644
--- a/test/Transforms/LoopUnswitch/2011-09-26-EHCrash.ll
+++ b/test/Transforms/LoopUnswitch/2011-09-26-EHCrash.ll
@@ -24,7 +24,7 @@ if.then: ; preds = %for.body
%idxprom = sext i32 %inc1 to i64
%array_ = getelementptr inbounds %class.MyContainer.1.3.19.29* %this, i32 0, i32 0
%arrayidx = getelementptr inbounds [6 x %class.MyMemVarClass.0.2.18.28*]* %array_, i32 0, i64 %idxprom
- %tmp4 = load %class.MyMemVarClass.0.2.18.28** %arrayidx, align 8, !tbaa !0
+ %tmp4 = load %class.MyMemVarClass.0.2.18.28** %arrayidx, align 8
%isnull = icmp eq %class.MyMemVarClass.0.2.18.28* %tmp4, null
br i1 %isnull, label %for.inc, label %delete.notnull
@@ -61,7 +61,3 @@ declare void @_ZN13MyMemVarClassD1Ev(%class.MyMemVarClass.0.2.18.28*)
declare i32 @__gxx_personality_v0(...)
declare void @_ZdlPv(i8*) nounwind
-
-!0 = metadata !{metadata !"any pointer", metadata !1}
-!1 = metadata !{metadata !"omnipotent char", metadata !2}
-!2 = metadata !{metadata !"Simple C/C++ TBAA", null}
diff --git a/test/Transforms/LoopUnswitch/2012-04-30-LoopUnswitch-LPad-Crash.ll b/test/Transforms/LoopUnswitch/2012-04-30-LoopUnswitch-LPad-Crash.ll
index 261876df2aa0d..a6c0d83484b3a 100644
--- a/test/Transforms/LoopUnswitch/2012-04-30-LoopUnswitch-LPad-Crash.ll
+++ b/test/Transforms/LoopUnswitch/2012-04-30-LoopUnswitch-LPad-Crash.ll
@@ -45,10 +45,10 @@ for.end: ; preds = %invoke.cont6
define void @_ZN1DptEv(%class.D.22.42.66.102.138.158.178.198.238.242.246.250.262.294.302.338.346.379* %this) uwtable ssp align 2 {
entry:
%this.addr = alloca %class.D.22.42.66.102.138.158.178.198.238.242.246.250.262.294.302.338.346.379*, align 8
- store %class.D.22.42.66.102.138.158.178.198.238.242.246.250.262.294.302.338.346.379* %this, %class.D.22.42.66.102.138.158.178.198.238.242.246.250.262.294.302.338.346.379** %this.addr, align 8, !tbaa !0
+ store %class.D.22.42.66.102.138.158.178.198.238.242.246.250.262.294.302.338.346.379* %this, %class.D.22.42.66.102.138.158.178.198.238.242.246.250.262.294.302.338.346.379** %this.addr, align 8
%this1 = load %class.D.22.42.66.102.138.158.178.198.238.242.246.250.262.294.302.338.346.379** %this.addr
%px = getelementptr inbounds %class.D.22.42.66.102.138.158.178.198.238.242.246.250.262.294.302.338.346.379* %this1, i32 0, i32 0
- %0 = load %class.C.23.43.67.103.139.159.179.199.239.243.247.251.263.295.303.339.347.376** %px, align 8, !tbaa !0
+ %0 = load %class.C.23.43.67.103.139.159.179.199.239.243.247.251.263.295.303.339.347.376** %px, align 8
%tobool = icmp ne %class.C.23.43.67.103.139.159.179.199.239.243.247.251.263.295.303.339.347.376* %0, null
br i1 %tobool, label %cond.end, label %cond.false
@@ -95,7 +95,3 @@ entry:
}
declare void @_Z10__assert13v() noreturn
-
-!0 = metadata !{metadata !"any pointer", metadata !1}
-!1 = metadata !{metadata !"omnipotent char", metadata !2}
-!2 = metadata !{metadata !"Simple C/C++ TBAA", null}
diff --git a/test/Transforms/LoopVectorize/12-12-11-if-conv.ll b/test/Transforms/LoopVectorize/12-12-11-if-conv.ll
index 2dd7fe34a70bf..bab6300f2e7f9 100644
--- a/test/Transforms/LoopVectorize/12-12-11-if-conv.ll
+++ b/test/Transforms/LoopVectorize/12-12-11-if-conv.ll
@@ -15,7 +15,7 @@ entry:
for.body: ; preds = %entry, %if.end
%indvars.iv = phi i64 [ %indvars.iv.next, %if.end ], [ 0, %entry ]
%arrayidx = getelementptr inbounds i32* %A, i64 %indvars.iv
- %0 = load i32* %arrayidx, align 4, !tbaa !0
+ %0 = load i32* %arrayidx, align 4
%tobool = icmp eq i32 %0, 0
br i1 %tobool, label %if.end, label %if.then
@@ -29,7 +29,7 @@ if.then: ; preds = %for.body
if.end: ; preds = %for.body, %if.then
%z.0 = phi i32 [ %add1, %if.then ], [ 9, %for.body ]
- store i32 %z.0, i32* %arrayidx, align 4, !tbaa !0
+ store i32 %z.0, i32* %arrayidx, align 4
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
%exitcond = icmp eq i32 %lftr.wideiv, %x
@@ -38,7 +38,3 @@ if.end: ; preds = %for.body, %if.then
for.end: ; preds = %if.end, %entry
ret i32 undef
}
-
-!0 = metadata !{metadata !"int", metadata !1}
-!1 = metadata !{metadata !"omnipotent char", metadata !2}
-!2 = metadata !{metadata !"Simple C/C++ TBAA"}
diff --git a/test/Transforms/LoopVectorize/2012-10-22-isconsec.ll b/test/Transforms/LoopVectorize/2012-10-22-isconsec.ll
index 405582c408999..ae9f998fee268 100644
--- a/test/Transforms/LoopVectorize/2012-10-22-isconsec.ll
+++ b/test/Transforms/LoopVectorize/2012-10-22-isconsec.ll
@@ -24,7 +24,7 @@ entry:
%3 = shl nsw i64 %indvars.iv, 2
%4 = getelementptr inbounds i8* %1, i64 %3
%5 = bitcast i8* %4 to float*
- store float %value, float* %5, align 4, !tbaa !0
+ store float %value, float* %5, align 4
%indvars.iv.next = add i64 %indvars.iv, %2
%6 = trunc i64 %indvars.iv.next to i32
%7 = icmp slt i32 %6, %_n
@@ -43,7 +43,7 @@ entry:
%0 = shl nsw i64 %indvars.iv, 2
%1 = getelementptr inbounds i8* bitcast (float* getelementptr inbounds ([32000 x float]* @b, i64 0, i64 16000) to i8*), i64 %0
%2 = bitcast i8* %1 to float*
- store float -1.000000e+00, float* %2, align 4, !tbaa !0
+ store float -1.000000e+00, float* %2, align 4
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
%exitcond = icmp eq i32 %lftr.wideiv, 16000
@@ -52,6 +52,3 @@ entry:
"5": ; preds = %"3"
ret i32 0
}
-
-!0 = metadata !{metadata !"alias set 7: float", metadata !1}
-!1 = metadata !{metadata !1}
diff --git a/test/Transforms/LoopVectorize/X86/constant-vector-operand.ll b/test/Transforms/LoopVectorize/X86/constant-vector-operand.ll
index 6c924409af372..f4c07b4b24d9d 100644
--- a/test/Transforms/LoopVectorize/X86/constant-vector-operand.ll
+++ b/test/Transforms/LoopVectorize/X86/constant-vector-operand.ll
@@ -1,5 +1,7 @@
; RUN: opt -mtriple=x86_64-apple-darwin -mcpu=core2 -loop-vectorize -dce -instcombine -S < %s | FileCheck %s
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+
@B = common global [1024 x i32] zeroinitializer, align 16
@A = common global [1024 x i32] zeroinitializer, align 16
diff --git a/test/Transforms/LoopVectorize/X86/illegal-parallel-loop-uniform-write.ll b/test/Transforms/LoopVectorize/X86/illegal-parallel-loop-uniform-write.ll
new file mode 100644
index 0000000000000..47a5e7aee4c1b
--- /dev/null
+++ b/test/Transforms/LoopVectorize/X86/illegal-parallel-loop-uniform-write.ll
@@ -0,0 +1,56 @@
+; RUN: opt < %s -loop-vectorize -force-vector-unroll=1 -force-vector-width=4 -dce -instcombine -S | FileCheck %s
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+;CHECK: @foo
+;CHECK-NOT: <4 x i32>
+;CHECK: ret void
+
+; Function Attrs: nounwind uwtable
+define void @foo(i32* nocapture %a, i32* nocapture %b, i32 %k, i32 %m) #0 {
+entry:
+ %cmp27 = icmp sgt i32 %m, 0
+ br i1 %cmp27, label %for.body3.lr.ph.us, label %for.end15
+
+for.end.us: ; preds = %for.body3.us
+ %arrayidx9.us = getelementptr inbounds i32* %b, i64 %indvars.iv33
+ %0 = load i32* %arrayidx9.us, align 4, !llvm.mem.parallel_loop_access !3
+ %add10.us = add nsw i32 %0, 3
+ store i32 %add10.us, i32* %arrayidx9.us, align 4, !llvm.mem.parallel_loop_access !3
+ %indvars.iv.next34 = add i64 %indvars.iv33, 1
+ %lftr.wideiv35 = trunc i64 %indvars.iv.next34 to i32
+ %exitcond36 = icmp eq i32 %lftr.wideiv35, %m
+ br i1 %exitcond36, label %for.end15, label %for.body3.lr.ph.us, !llvm.loop.parallel !5
+
+for.body3.us: ; preds = %for.body3.us, %for.body3.lr.ph.us
+ %indvars.iv29 = phi i64 [ 0, %for.body3.lr.ph.us ], [ %indvars.iv.next30, %for.body3.us ]
+ %1 = trunc i64 %indvars.iv29 to i32
+ %add4.us = add i32 %add.us, %1
+ %idxprom.us = sext i32 %add4.us to i64
+ %arrayidx.us = getelementptr inbounds i32* %a, i64 %idxprom.us
+ %2 = load i32* %arrayidx.us, align 4, !llvm.mem.parallel_loop_access !3
+ %add5.us = add nsw i32 %2, 1
+ store i32 %add5.us, i32* %arrayidx7.us, align 4, !llvm.mem.parallel_loop_access !3
+ %indvars.iv.next30 = add i64 %indvars.iv29, 1
+ %lftr.wideiv31 = trunc i64 %indvars.iv.next30 to i32
+ %exitcond32 = icmp eq i32 %lftr.wideiv31, %m
+ br i1 %exitcond32, label %for.end.us, label %for.body3.us, !llvm.loop.parallel !4
+
+for.body3.lr.ph.us: ; preds = %for.end.us, %entry
+ %indvars.iv33 = phi i64 [ %indvars.iv.next34, %for.end.us ], [ 0, %entry ]
+ %3 = trunc i64 %indvars.iv33 to i32
+ %add.us = add i32 %3, %k
+ %arrayidx7.us = getelementptr inbounds i32* %a, i64 %indvars.iv33
+ br label %for.body3.us
+
+for.end15: ; preds = %for.end.us, %entry
+ ret void
+}
+
+attributes #0 = { nounwind uwtable "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
+
+!3 = metadata !{metadata !4, metadata !5}
+!4 = metadata !{metadata !4}
+!5 = metadata !{metadata !5}
+
diff --git a/test/Transforms/LoopVectorize/X86/min-trip-count-switch.ll b/test/Transforms/LoopVectorize/X86/min-trip-count-switch.ll
index 186fba87d653d..8716cff77789a 100644
--- a/test/Transforms/LoopVectorize/X86/min-trip-count-switch.ll
+++ b/test/Transforms/LoopVectorize/X86/min-trip-count-switch.ll
@@ -11,9 +11,9 @@ entry:
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%arrayidx = getelementptr inbounds float* %a, i64 %indvars.iv
- %0 = load float* %arrayidx, align 4, !tbaa !0
+ %0 = load float* %arrayidx, align 4
%add = fadd float %0, 1.000000e+00
- store float %add, float* %arrayidx, align 4, !tbaa !0
+ store float %add, float* %arrayidx, align 4
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
%exitcond = icmp eq i32 %lftr.wideiv, 8
@@ -22,7 +22,3 @@ for.body: ; preds = %for.body, %entry
for.end: ; preds = %for.body
ret void
}
-
-!0 = metadata !{metadata !"float", metadata !1}
-!1 = metadata !{metadata !"omnipotent char", metadata !2}
-!2 = metadata !{metadata !"Simple C/C++ TBAA"}
diff --git a/test/Transforms/LoopVectorize/X86/parallel-loops-after-reg2mem.ll b/test/Transforms/LoopVectorize/X86/parallel-loops-after-reg2mem.ll
index 452d0df133dbe..f904a8e0b1173 100644
--- a/test/Transforms/LoopVectorize/X86/parallel-loops-after-reg2mem.ll
+++ b/test/Transforms/LoopVectorize/X86/parallel-loops-after-reg2mem.ll
@@ -19,19 +19,19 @@ entry:
for.body: ; preds = %for.body.for.body_crit_edge, %entry
%indvars.iv.reload = load i64* %indvars.iv.reg2mem
%arrayidx = getelementptr inbounds i32* %b, i64 %indvars.iv.reload
- %0 = load i32* %arrayidx, align 4, !tbaa !0, !llvm.mem.parallel_loop_access !3
+ %0 = load i32* %arrayidx, align 4, !llvm.mem.parallel_loop_access !3
%arrayidx2 = getelementptr inbounds i32* %a, i64 %indvars.iv.reload
- %1 = load i32* %arrayidx2, align 4, !tbaa !0, !llvm.mem.parallel_loop_access !3
+ %1 = load i32* %arrayidx2, align 4, !llvm.mem.parallel_loop_access !3
%idxprom3 = sext i32 %1 to i64
%arrayidx4 = getelementptr inbounds i32* %a, i64 %idxprom3
- store i32 %0, i32* %arrayidx4, align 4, !tbaa !0, !llvm.mem.parallel_loop_access !3
+ store i32 %0, i32* %arrayidx4, align 4, !llvm.mem.parallel_loop_access !3
%indvars.iv.next = add i64 %indvars.iv.reload, 1
; A new store without the parallel metadata here:
store i64 %indvars.iv.next, i64* %indvars.iv.next.reg2mem
%indvars.iv.next.reload1 = load i64* %indvars.iv.next.reg2mem
%arrayidx6 = getelementptr inbounds i32* %b, i64 %indvars.iv.next.reload1
- %2 = load i32* %arrayidx6, align 4, !tbaa !0, !llvm.mem.parallel_loop_access !3
- store i32 %2, i32* %arrayidx2, align 4, !tbaa !0, !llvm.mem.parallel_loop_access !3
+ %2 = load i32* %arrayidx6, align 4, !llvm.mem.parallel_loop_access !3
+ store i32 %2, i32* %arrayidx2, align 4, !llvm.mem.parallel_loop_access !3
%indvars.iv.next.reload = load i64* %indvars.iv.next.reg2mem
%lftr.wideiv = trunc i64 %indvars.iv.next.reload to i32
%exitcond = icmp eq i32 %lftr.wideiv, 512
@@ -46,7 +46,4 @@ for.end: ; preds = %for.body
ret void
}
-!0 = metadata !{metadata !"int", metadata !1}
-!1 = metadata !{metadata !"omnipotent char", metadata !2}
-!2 = metadata !{metadata !"Simple C/C++ TBAA"}
!3 = metadata !{metadata !3}
diff --git a/test/Transforms/LoopVectorize/X86/parallel-loops.ll b/test/Transforms/LoopVectorize/X86/parallel-loops.ll
index f648722734a17..3f1a071e69fa8 100644
--- a/test/Transforms/LoopVectorize/X86/parallel-loops.ll
+++ b/test/Transforms/LoopVectorize/X86/parallel-loops.ll
@@ -21,16 +21,16 @@ entry:
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%arrayidx = getelementptr inbounds i32* %b, i64 %indvars.iv
- %0 = load i32* %arrayidx, align 4, !tbaa !0
+ %0 = load i32* %arrayidx, align 4
%arrayidx2 = getelementptr inbounds i32* %a, i64 %indvars.iv
- %1 = load i32* %arrayidx2, align 4, !tbaa !0
+ %1 = load i32* %arrayidx2, align 4
%idxprom3 = sext i32 %1 to i64
%arrayidx4 = getelementptr inbounds i32* %a, i64 %idxprom3
- store i32 %0, i32* %arrayidx4, align 4, !tbaa !0
+ store i32 %0, i32* %arrayidx4, align 4
%indvars.iv.next = add i64 %indvars.iv, 1
%arrayidx6 = getelementptr inbounds i32* %b, i64 %indvars.iv.next
- %2 = load i32* %arrayidx6, align 4, !tbaa !0
- store i32 %2, i32* %arrayidx2, align 4, !tbaa !0
+ %2 = load i32* %arrayidx6, align 4
+ store i32 %2, i32* %arrayidx2, align 4
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
%exitcond = icmp eq i32 %lftr.wideiv, 512
br i1 %exitcond, label %for.end, label %for.body
@@ -51,18 +51,18 @@ entry:
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%arrayidx = getelementptr inbounds i32* %b, i64 %indvars.iv
- %0 = load i32* %arrayidx, align 4, !tbaa !0, !llvm.mem.parallel_loop_access !3
+ %0 = load i32* %arrayidx, align 4, !llvm.mem.parallel_loop_access !3
%arrayidx2 = getelementptr inbounds i32* %a, i64 %indvars.iv
- %1 = load i32* %arrayidx2, align 4, !tbaa !0, !llvm.mem.parallel_loop_access !3
+ %1 = load i32* %arrayidx2, align 4, !llvm.mem.parallel_loop_access !3
%idxprom3 = sext i32 %1 to i64
%arrayidx4 = getelementptr inbounds i32* %a, i64 %idxprom3
; This store might have originated from inlining a function with a parallel
; loop. Refers to a list with the "original loop reference" (!4) also included.
- store i32 %0, i32* %arrayidx4, align 4, !tbaa !0, !llvm.mem.parallel_loop_access !5
+ store i32 %0, i32* %arrayidx4, align 4, !llvm.mem.parallel_loop_access !5
%indvars.iv.next = add i64 %indvars.iv, 1
%arrayidx6 = getelementptr inbounds i32* %b, i64 %indvars.iv.next
- %2 = load i32* %arrayidx6, align 4, !tbaa !0, !llvm.mem.parallel_loop_access !3
- store i32 %2, i32* %arrayidx2, align 4, !tbaa !0, !llvm.mem.parallel_loop_access !3
+ %2 = load i32* %arrayidx6, align 4, !llvm.mem.parallel_loop_access !3
+ store i32 %2, i32* %arrayidx2, align 4, !llvm.mem.parallel_loop_access !3
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
%exitcond = icmp eq i32 %lftr.wideiv, 512
br i1 %exitcond, label %for.end, label %for.body, !llvm.loop.parallel !3
@@ -84,18 +84,18 @@ entry:
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%arrayidx = getelementptr inbounds i32* %b, i64 %indvars.iv
- %0 = load i32* %arrayidx, align 4, !tbaa !0, !llvm.mem.parallel_loop_access !6
+ %0 = load i32* %arrayidx, align 4, !llvm.mem.parallel_loop_access !6
%arrayidx2 = getelementptr inbounds i32* %a, i64 %indvars.iv
- %1 = load i32* %arrayidx2, align 4, !tbaa !0, !llvm.mem.parallel_loop_access !6
+ %1 = load i32* %arrayidx2, align 4, !llvm.mem.parallel_loop_access !6
%idxprom3 = sext i32 %1 to i64
%arrayidx4 = getelementptr inbounds i32* %a, i64 %idxprom3
; This refers to the loop marked with !7 which we are not in at the moment.
; It should prevent detecting as a parallel loop.
- store i32 %0, i32* %arrayidx4, align 4, !tbaa !0, !llvm.mem.parallel_loop_access !7
+ store i32 %0, i32* %arrayidx4, align 4, !llvm.mem.parallel_loop_access !7
%indvars.iv.next = add i64 %indvars.iv, 1
%arrayidx6 = getelementptr inbounds i32* %b, i64 %indvars.iv.next
- %2 = load i32* %arrayidx6, align 4, !tbaa !0, !llvm.mem.parallel_loop_access !6
- store i32 %2, i32* %arrayidx2, align 4, !tbaa !0, !llvm.mem.parallel_loop_access !6
+ %2 = load i32* %arrayidx6, align 4, !llvm.mem.parallel_loop_access !6
+ store i32 %2, i32* %arrayidx2, align 4, !llvm.mem.parallel_loop_access !6
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
%exitcond = icmp eq i32 %lftr.wideiv, 512
br i1 %exitcond, label %for.end, label %for.body, !llvm.loop.parallel !6
@@ -104,9 +104,6 @@ for.end: ; preds = %for.body
ret void
}
-!0 = metadata !{metadata !"int", metadata !1}
-!1 = metadata !{metadata !"omnipotent char", metadata !2}
-!2 = metadata !{metadata !"Simple C/C++ TBAA"}
!3 = metadata !{metadata !3}
!4 = metadata !{metadata !4}
!5 = metadata !{metadata !3, metadata !4}
diff --git a/test/Transforms/LoopVectorize/X86/x86_fp80-vector-store.ll b/test/Transforms/LoopVectorize/X86/x86_fp80-vector-store.ll
new file mode 100644
index 0000000000000..b66119f4ef59e
--- /dev/null
+++ b/test/Transforms/LoopVectorize/X86/x86_fp80-vector-store.ll
@@ -0,0 +1,29 @@
+; RUN: opt -O3 -loop-vectorize -force-vector-unroll=1 -force-vector-width=2 -S < %s | FileCheck %s
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.7.0"
+
+@x = common global [1024 x x86_fp80] zeroinitializer, align 16
+
+;CHECK: @example
+;CHECK-NOT: bitcast x86_fp80* {{%[^ ]+}} to <{{[2-9][0-9]*}} x x86_fp80>*
+;CHECK: store
+;CHECK: ret void
+
+define void @example() nounwind ssp uwtable {
+entry:
+ br label %for.body
+
+for.body: ; preds = %for.body, %entry
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %conv = sitofp i32 1 to x86_fp80
+ %arrayidx = getelementptr inbounds [1024 x x86_fp80]* @x, i64 0, i64 %indvars.iv
+ store x86_fp80 %conv, x86_fp80* %arrayidx, align 16
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, 1024
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.body
+ ret void
+}
diff --git a/test/Transforms/LoopVectorize/bsd_regex.ll b/test/Transforms/LoopVectorize/bsd_regex.ll
new file mode 100644
index 0000000000000..a14b92d229b45
--- /dev/null
+++ b/test/Transforms/LoopVectorize/bsd_regex.ll
@@ -0,0 +1,38 @@
+; RUN: opt -S -loop-vectorize -dce -instcombine -force-vector-width=2 -force-vector-unroll=2 < %s | FileCheck %s
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+
+;PR 15830.
+
+;CHECK: foo
+; When scalarizing stores we need to preserve the original order.
+; Make sure that we are extracting in the correct order (0101, and not 0011).
+;CHECK: extractelement <2 x i64> {{.*}}, i32 0
+;CHECK: extractelement <2 x i64> {{.*}}, i32 1
+;CHECK: extractelement <2 x i64> {{.*}}, i32 0
+;CHECK: extractelement <2 x i64> {{.*}}, i32 1
+;CHECK: store
+;CHECK: store
+;CHECK: store
+;CHECK: store
+;CHECK: ret
+
+define i32 @foo(i32* nocapture %A) {
+entry:
+ br label %for.body
+
+for.body:
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %0 = shl nsw i64 %indvars.iv, 2
+ %arrayidx = getelementptr inbounds i32* %A, i64 %0
+ store i32 4, i32* %arrayidx, align 4
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, 10000
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end:
+ ret i32 undef
+}
+
+
diff --git a/test/Transforms/LoopVectorize/bzip_reverse_loops.ll b/test/Transforms/LoopVectorize/bzip_reverse_loops.ll
index 431e422c2fbe0..2648bbea80357 100644
--- a/test/Transforms/LoopVectorize/bzip_reverse_loops.ll
+++ b/test/Transforms/LoopVectorize/bzip_reverse_loops.ll
@@ -17,7 +17,7 @@ do.body: ; preds = %cond.end, %entry
%n.addr.0 = phi i32 [ %n, %entry ], [ %dec, %cond.end ]
%p.addr.0 = phi i16* [ %p, %entry ], [ %incdec.ptr, %cond.end ]
%incdec.ptr = getelementptr inbounds i16* %p.addr.0, i64 -1
- %0 = load i16* %incdec.ptr, align 2, !tbaa !0
+ %0 = load i16* %incdec.ptr, align 2
%conv = zext i16 %0 to i32
%cmp = icmp ult i32 %conv, %size
br i1 %cmp, label %cond.end, label %cond.true
@@ -29,7 +29,7 @@ cond.true: ; preds = %do.body
cond.end: ; preds = %do.body, %cond.true
%cond = phi i16 [ %phitmp, %cond.true ], [ 0, %do.body ]
- store i16 %cond, i16* %incdec.ptr, align 2, !tbaa !0
+ store i16 %cond, i16* %incdec.ptr, align 2
%dec = add i32 %n.addr.0, -1
%tobool = icmp eq i32 %dec, 0
br i1 %tobool, label %do.end, label %do.body
@@ -52,11 +52,11 @@ do.body: ; preds = %do.body, %entry
%n.addr.0 = phi i32 [ %n, %entry ], [ %dec, %do.body ]
%p.0 = phi i32* [ %a, %entry ], [ %incdec.ptr, %do.body ]
%incdec.ptr = getelementptr inbounds i32* %p.0, i64 -1
- %0 = load i32* %incdec.ptr, align 4, !tbaa !3
+ %0 = load i32* %incdec.ptr, align 4
%cmp = icmp slt i32 %0, %wsize
%sub = sub nsw i32 %0, %wsize
%cond = select i1 %cmp, i32 0, i32 %sub
- store i32 %cond, i32* %incdec.ptr, align 4, !tbaa !3
+ store i32 %cond, i32* %incdec.ptr, align 4
%dec = add nsw i32 %n.addr.0, -1
%tobool = icmp eq i32 %dec, 0
br i1 %tobool, label %do.end, label %do.body
@@ -64,8 +64,3 @@ do.body: ; preds = %do.body, %entry
do.end: ; preds = %do.body
ret void
}
-
-!0 = metadata !{metadata !"short", metadata !1}
-!1 = metadata !{metadata !"omnipotent char", metadata !2}
-!2 = metadata !{metadata !"Simple C/C++ TBAA"}
-!3 = metadata !{metadata !"int", metadata !1}
diff --git a/test/Transforms/LoopVectorize/calloc.ll b/test/Transforms/LoopVectorize/calloc.ll
index 08c84eff5dbf3..7e79916164591 100644
--- a/test/Transforms/LoopVectorize/calloc.ll
+++ b/test/Transforms/LoopVectorize/calloc.ll
@@ -23,7 +23,7 @@ for.body: ; preds = %for.body, %for.body
%i.030 = phi i64 [ 0, %for.body.lr.ph ], [ %inc, %for.body ]
%shr = lshr i64 %i.030, 1
%arrayidx = getelementptr inbounds i8* %bytes, i64 %shr
- %1 = load i8* %arrayidx, align 1, !tbaa !0
+ %1 = load i8* %arrayidx, align 1
%conv = zext i8 %1 to i32
%and = shl i64 %i.030, 2
%neg = and i64 %and, 4
@@ -38,7 +38,7 @@ for.body: ; preds = %for.body, %for.body
%add17 = add nsw i32 %cond, %shr11
%conv18 = trunc i32 %add17 to i8
%arrayidx19 = getelementptr inbounds i8* %call, i64 %i.030
- store i8 %conv18, i8* %arrayidx19, align 1, !tbaa !0
+ store i8 %conv18, i8* %arrayidx19, align 1
%inc = add i64 %i.030, 1
%exitcond = icmp eq i64 %inc, %0
br i1 %exitcond, label %for.end, label %for.body
@@ -48,6 +48,3 @@ for.end: ; preds = %for.body, %entry
}
declare noalias i8* @calloc(i64, i64) nounwind
-
-!0 = metadata !{metadata !"omnipotent char", metadata !1}
-!1 = metadata !{metadata !"Simple C/C++ TBAA"}
diff --git a/test/Transforms/LoopVectorize/dbg.value.ll b/test/Transforms/LoopVectorize/dbg.value.ll
index a2ea9511bb22f..127d479b3a060 100644
--- a/test/Transforms/LoopVectorize/dbg.value.ll
+++ b/test/Transforms/LoopVectorize/dbg.value.ll
@@ -18,12 +18,12 @@ for.body:
;CHECK: load <4 x i32>
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%arrayidx = getelementptr inbounds [1024 x i32]* @B, i64 0, i64 %indvars.iv, !dbg !19
- %0 = load i32* %arrayidx, align 4, !dbg !19, !tbaa !21
+ %0 = load i32* %arrayidx, align 4, !dbg !19
%arrayidx2 = getelementptr inbounds [1024 x i32]* @C, i64 0, i64 %indvars.iv, !dbg !19
- %1 = load i32* %arrayidx2, align 4, !dbg !19, !tbaa !21
+ %1 = load i32* %arrayidx2, align 4, !dbg !19
%add = add nsw i32 %1, %0, !dbg !19
%arrayidx4 = getelementptr inbounds [1024 x i32]* @A, i64 0, i64 %indvars.iv, !dbg !19
- store i32 %add, i32* %arrayidx4, align 4, !dbg !19, !tbaa !21
+ store i32 %add, i32* %arrayidx4, align 4, !dbg !19
%indvars.iv.next = add i64 %indvars.iv, 1, !dbg !18
tail call void @llvm.dbg.value(metadata !{null}, i64 0, metadata !9), !dbg !18
%lftr.wideiv = trunc i64 %indvars.iv.next to i32, !dbg !18
@@ -64,7 +64,4 @@ attributes #1 = { nounwind readnone }
!18 = metadata !{i32 6, i32 0, metadata !10, null}
!19 = metadata !{i32 7, i32 0, metadata !20, null}
!20 = metadata !{i32 786443, metadata !10, i32 6, i32 0, metadata !4, i32 1}
-!21 = metadata !{metadata !"int", metadata !22}
-!22 = metadata !{metadata !"omnipotent char", metadata !23}
-!23 = metadata !{metadata !"Simple C/C++ TBAA"}
!24 = metadata !{i32 9, i32 0, metadata !3, null}
diff --git a/test/Transforms/LoopVectorize/float-reduction.ll b/test/Transforms/LoopVectorize/float-reduction.ll
index 565684cccb9a7..54ca172e86965 100644
--- a/test/Transforms/LoopVectorize/float-reduction.ll
+++ b/test/Transforms/LoopVectorize/float-reduction.ll
@@ -13,7 +13,7 @@ for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%sum.04 = phi float [ 0.000000e+00, %entry ], [ %add, %for.body ]
%arrayidx = getelementptr inbounds float* %A, i64 %indvars.iv
- %0 = load float* %arrayidx, align 4, !tbaa !0
+ %0 = load float* %arrayidx, align 4
%add = fadd fast float %sum.04, %0
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
@@ -23,7 +23,3 @@ for.body: ; preds = %for.body, %entry
for.end: ; preds = %for.body
ret float %add
}
-
-!0 = metadata !{metadata !"float", metadata !1}
-!1 = metadata !{metadata !"omnipotent char", metadata !2}
-!2 = metadata !{metadata !"Simple C/C++ TBAA"}
diff --git a/test/Transforms/LoopVectorize/i8-induction.ll b/test/Transforms/LoopVectorize/i8-induction.ll
index 7759b7085a1b5..2a0e826454940 100644
--- a/test/Transforms/LoopVectorize/i8-induction.ll
+++ b/test/Transforms/LoopVectorize/i8-induction.ll
@@ -8,8 +8,8 @@ target triple = "x86_64-apple-macosx10.8.0"
define void @f() nounwind uwtable ssp {
scalar.ph:
- store i8 0, i8* inttoptr (i64 1 to i8*), align 1, !tbaa !0
- %0 = load i8* @a, align 1, !tbaa !0
+ store i8 0, i8* inttoptr (i64 1 to i8*), align 1
+ %0 = load i8* @a, align 1
br label %for.body
for.body:
@@ -26,10 +26,6 @@ for.body:
br i1 %phitmp14, label %for.body, label %for.end
for.end: ; preds = %for.body
- store i8 %mul, i8* @b, align 1, !tbaa !0
+ store i8 %mul, i8* @b, align 1
ret void
}
-
-!0 = metadata !{metadata !"omnipotent char", metadata !1}
-!1 = metadata !{metadata !"Simple C/C++ TBAA"}
-
diff --git a/test/Transforms/LoopVectorize/if-conversion-nest.ll b/test/Transforms/LoopVectorize/if-conversion-nest.ll
new file mode 100644
index 0000000000000..f44862a2ebb92
--- /dev/null
+++ b/test/Transforms/LoopVectorize/if-conversion-nest.ll
@@ -0,0 +1,48 @@
+; RUN: opt < %s -loop-vectorize -force-vector-unroll=1 -force-vector-width=4 -enable-if-conversion -dce -instcombine -S | FileCheck %s
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+
+;CHECK: @foo
+;CHECK: icmp sgt
+;CHECK: icmp sgt
+;CHECK: icmp slt
+;CHECK: select <4 x i1>
+;CHECK: %[[P1:.*]] = select <4 x i1>
+;CHECK: xor <4 x i1>
+;CHECK: and <4 x i1>
+;CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %[[P1]]
+;CHECK: ret
+define i32 @foo(i32* nocapture %A, i32* nocapture %B, i32 %n) {
+entry:
+ %cmp26 = icmp sgt i32 %n, 0
+ br i1 %cmp26, label %for.body, label %for.end
+
+for.body:
+ %indvars.iv = phi i64 [ %indvars.iv.next, %if.end14 ], [ 0, %entry ]
+ %arrayidx = getelementptr inbounds i32* %A, i64 %indvars.iv
+ %0 = load i32* %arrayidx, align 4
+ %arrayidx2 = getelementptr inbounds i32* %B, i64 %indvars.iv
+ %1 = load i32* %arrayidx2, align 4
+ %cmp3 = icmp sgt i32 %0, %1
+ br i1 %cmp3, label %if.then, label %if.end14
+
+if.then:
+ %cmp6 = icmp sgt i32 %0, 19
+ br i1 %cmp6, label %if.end14, label %if.else
+
+if.else:
+ %cmp10 = icmp slt i32 %1, 4
+ %. = select i1 %cmp10, i32 4, i32 5
+ br label %if.end14
+
+if.end14:
+ %x.0 = phi i32 [ 9, %for.body ], [ 3, %if.then ], [ %., %if.else ] ; <------------- A PHI with 3 entries that we can still vectorize.
+ store i32 %x.0, i32* %arrayidx, align 4
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, %n
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end:
+ ret i32 undef
+}
diff --git a/test/Transforms/LoopVectorize/intrinsic.ll b/test/Transforms/LoopVectorize/intrinsic.ll
index e79d78de67c5a..defbb5bd94ba0 100644
--- a/test/Transforms/LoopVectorize/intrinsic.ll
+++ b/test/Transforms/LoopVectorize/intrinsic.ll
@@ -14,10 +14,10 @@ entry:
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%arrayidx = getelementptr inbounds float* %y, i64 %indvars.iv
- %0 = load float* %arrayidx, align 4, !tbaa !0
+ %0 = load float* %arrayidx, align 4
%call = tail call float @llvm.sqrt.f32(float %0) nounwind readnone
%arrayidx2 = getelementptr inbounds float* %x, i64 %indvars.iv
- store float %call, float* %arrayidx2, align 4, !tbaa !0
+ store float %call, float* %arrayidx2, align 4
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
%exitcond = icmp eq i32 %lftr.wideiv, %n
@@ -40,10 +40,10 @@ entry:
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%arrayidx = getelementptr inbounds double* %y, i64 %indvars.iv
- %0 = load double* %arrayidx, align 8, !tbaa !3
+ %0 = load double* %arrayidx, align 8
%call = tail call double @llvm.sqrt.f64(double %0) nounwind readnone
%arrayidx2 = getelementptr inbounds double* %x, i64 %indvars.iv
- store double %call, double* %arrayidx2, align 8, !tbaa !3
+ store double %call, double* %arrayidx2, align 8
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
%exitcond = icmp eq i32 %lftr.wideiv, %n
@@ -66,10 +66,10 @@ entry:
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%arrayidx = getelementptr inbounds float* %y, i64 %indvars.iv
- %0 = load float* %arrayidx, align 4, !tbaa !0
+ %0 = load float* %arrayidx, align 4
%call = tail call float @llvm.sin.f32(float %0) nounwind readnone
%arrayidx2 = getelementptr inbounds float* %x, i64 %indvars.iv
- store float %call, float* %arrayidx2, align 4, !tbaa !0
+ store float %call, float* %arrayidx2, align 4
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
%exitcond = icmp eq i32 %lftr.wideiv, %n
@@ -92,10 +92,10 @@ entry:
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%arrayidx = getelementptr inbounds double* %y, i64 %indvars.iv
- %0 = load double* %arrayidx, align 8, !tbaa !3
+ %0 = load double* %arrayidx, align 8
%call = tail call double @llvm.sin.f64(double %0) nounwind readnone
%arrayidx2 = getelementptr inbounds double* %x, i64 %indvars.iv
- store double %call, double* %arrayidx2, align 8, !tbaa !3
+ store double %call, double* %arrayidx2, align 8
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
%exitcond = icmp eq i32 %lftr.wideiv, %n
@@ -118,10 +118,10 @@ entry:
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%arrayidx = getelementptr inbounds float* %y, i64 %indvars.iv
- %0 = load float* %arrayidx, align 4, !tbaa !0
+ %0 = load float* %arrayidx, align 4
%call = tail call float @llvm.cos.f32(float %0) nounwind readnone
%arrayidx2 = getelementptr inbounds float* %x, i64 %indvars.iv
- store float %call, float* %arrayidx2, align 4, !tbaa !0
+ store float %call, float* %arrayidx2, align 4
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
%exitcond = icmp eq i32 %lftr.wideiv, %n
@@ -144,10 +144,10 @@ entry:
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%arrayidx = getelementptr inbounds double* %y, i64 %indvars.iv
- %0 = load double* %arrayidx, align 8, !tbaa !3
+ %0 = load double* %arrayidx, align 8
%call = tail call double @llvm.cos.f64(double %0) nounwind readnone
%arrayidx2 = getelementptr inbounds double* %x, i64 %indvars.iv
- store double %call, double* %arrayidx2, align 8, !tbaa !3
+ store double %call, double* %arrayidx2, align 8
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
%exitcond = icmp eq i32 %lftr.wideiv, %n
@@ -170,10 +170,10 @@ entry:
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%arrayidx = getelementptr inbounds float* %y, i64 %indvars.iv
- %0 = load float* %arrayidx, align 4, !tbaa !0
+ %0 = load float* %arrayidx, align 4
%call = tail call float @llvm.exp.f32(float %0) nounwind readnone
%arrayidx2 = getelementptr inbounds float* %x, i64 %indvars.iv
- store float %call, float* %arrayidx2, align 4, !tbaa !0
+ store float %call, float* %arrayidx2, align 4
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
%exitcond = icmp eq i32 %lftr.wideiv, %n
@@ -196,10 +196,10 @@ entry:
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%arrayidx = getelementptr inbounds double* %y, i64 %indvars.iv
- %0 = load double* %arrayidx, align 8, !tbaa !3
+ %0 = load double* %arrayidx, align 8
%call = tail call double @llvm.exp.f64(double %0) nounwind readnone
%arrayidx2 = getelementptr inbounds double* %x, i64 %indvars.iv
- store double %call, double* %arrayidx2, align 8, !tbaa !3
+ store double %call, double* %arrayidx2, align 8
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
%exitcond = icmp eq i32 %lftr.wideiv, %n
@@ -222,10 +222,10 @@ entry:
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%arrayidx = getelementptr inbounds float* %y, i64 %indvars.iv
- %0 = load float* %arrayidx, align 4, !tbaa !0
+ %0 = load float* %arrayidx, align 4
%call = tail call float @llvm.exp2.f32(float %0) nounwind readnone
%arrayidx2 = getelementptr inbounds float* %x, i64 %indvars.iv
- store float %call, float* %arrayidx2, align 4, !tbaa !0
+ store float %call, float* %arrayidx2, align 4
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
%exitcond = icmp eq i32 %lftr.wideiv, %n
@@ -248,10 +248,10 @@ entry:
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%arrayidx = getelementptr inbounds double* %y, i64 %indvars.iv
- %0 = load double* %arrayidx, align 8, !tbaa !3
+ %0 = load double* %arrayidx, align 8
%call = tail call double @llvm.exp2.f64(double %0) nounwind readnone
%arrayidx2 = getelementptr inbounds double* %x, i64 %indvars.iv
- store double %call, double* %arrayidx2, align 8, !tbaa !3
+ store double %call, double* %arrayidx2, align 8
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
%exitcond = icmp eq i32 %lftr.wideiv, %n
@@ -274,10 +274,10 @@ entry:
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%arrayidx = getelementptr inbounds float* %y, i64 %indvars.iv
- %0 = load float* %arrayidx, align 4, !tbaa !0
+ %0 = load float* %arrayidx, align 4
%call = tail call float @llvm.log.f32(float %0) nounwind readnone
%arrayidx2 = getelementptr inbounds float* %x, i64 %indvars.iv
- store float %call, float* %arrayidx2, align 4, !tbaa !0
+ store float %call, float* %arrayidx2, align 4
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
%exitcond = icmp eq i32 %lftr.wideiv, %n
@@ -300,10 +300,10 @@ entry:
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%arrayidx = getelementptr inbounds double* %y, i64 %indvars.iv
- %0 = load double* %arrayidx, align 8, !tbaa !3
+ %0 = load double* %arrayidx, align 8
%call = tail call double @llvm.log.f64(double %0) nounwind readnone
%arrayidx2 = getelementptr inbounds double* %x, i64 %indvars.iv
- store double %call, double* %arrayidx2, align 8, !tbaa !3
+ store double %call, double* %arrayidx2, align 8
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
%exitcond = icmp eq i32 %lftr.wideiv, %n
@@ -326,10 +326,10 @@ entry:
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%arrayidx = getelementptr inbounds float* %y, i64 %indvars.iv
- %0 = load float* %arrayidx, align 4, !tbaa !0
+ %0 = load float* %arrayidx, align 4
%call = tail call float @llvm.log10.f32(float %0) nounwind readnone
%arrayidx2 = getelementptr inbounds float* %x, i64 %indvars.iv
- store float %call, float* %arrayidx2, align 4, !tbaa !0
+ store float %call, float* %arrayidx2, align 4
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
%exitcond = icmp eq i32 %lftr.wideiv, %n
@@ -352,10 +352,10 @@ entry:
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%arrayidx = getelementptr inbounds double* %y, i64 %indvars.iv
- %0 = load double* %arrayidx, align 8, !tbaa !3
+ %0 = load double* %arrayidx, align 8
%call = tail call double @llvm.log10.f64(double %0) nounwind readnone
%arrayidx2 = getelementptr inbounds double* %x, i64 %indvars.iv
- store double %call, double* %arrayidx2, align 8, !tbaa !3
+ store double %call, double* %arrayidx2, align 8
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
%exitcond = icmp eq i32 %lftr.wideiv, %n
@@ -378,10 +378,10 @@ entry:
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%arrayidx = getelementptr inbounds float* %y, i64 %indvars.iv
- %0 = load float* %arrayidx, align 4, !tbaa !0
+ %0 = load float* %arrayidx, align 4
%call = tail call float @llvm.log2.f32(float %0) nounwind readnone
%arrayidx2 = getelementptr inbounds float* %x, i64 %indvars.iv
- store float %call, float* %arrayidx2, align 4, !tbaa !0
+ store float %call, float* %arrayidx2, align 4
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
%exitcond = icmp eq i32 %lftr.wideiv, %n
@@ -404,10 +404,10 @@ entry:
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%arrayidx = getelementptr inbounds double* %y, i64 %indvars.iv
- %0 = load double* %arrayidx, align 8, !tbaa !3
+ %0 = load double* %arrayidx, align 8
%call = tail call double @llvm.log2.f64(double %0) nounwind readnone
%arrayidx2 = getelementptr inbounds double* %x, i64 %indvars.iv
- store double %call, double* %arrayidx2, align 8, !tbaa !3
+ store double %call, double* %arrayidx2, align 8
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
%exitcond = icmp eq i32 %lftr.wideiv, %n
@@ -430,10 +430,10 @@ entry:
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%arrayidx = getelementptr inbounds float* %y, i64 %indvars.iv
- %0 = load float* %arrayidx, align 4, !tbaa !0
+ %0 = load float* %arrayidx, align 4
%call = tail call float @llvm.fabs.f32(float %0) nounwind readnone
%arrayidx2 = getelementptr inbounds float* %x, i64 %indvars.iv
- store float %call, float* %arrayidx2, align 4, !tbaa !0
+ store float %call, float* %arrayidx2, align 4
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
%exitcond = icmp eq i32 %lftr.wideiv, %n
@@ -453,10 +453,10 @@ entry:
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%arrayidx = getelementptr inbounds double* %y, i64 %indvars.iv
- %0 = load double* %arrayidx, align 8, !tbaa !3
+ %0 = load double* %arrayidx, align 8
%call = tail call double @llvm.fabs(double %0) nounwind readnone
%arrayidx2 = getelementptr inbounds double* %x, i64 %indvars.iv
- store double %call, double* %arrayidx2, align 8, !tbaa !3
+ store double %call, double* %arrayidx2, align 8
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
%exitcond = icmp eq i32 %lftr.wideiv, %n
@@ -479,10 +479,10 @@ entry:
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%arrayidx = getelementptr inbounds float* %y, i64 %indvars.iv
- %0 = load float* %arrayidx, align 4, !tbaa !0
+ %0 = load float* %arrayidx, align 4
%call = tail call float @llvm.floor.f32(float %0) nounwind readnone
%arrayidx2 = getelementptr inbounds float* %x, i64 %indvars.iv
- store float %call, float* %arrayidx2, align 4, !tbaa !0
+ store float %call, float* %arrayidx2, align 4
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
%exitcond = icmp eq i32 %lftr.wideiv, %n
@@ -505,10 +505,10 @@ entry:
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%arrayidx = getelementptr inbounds double* %y, i64 %indvars.iv
- %0 = load double* %arrayidx, align 8, !tbaa !3
+ %0 = load double* %arrayidx, align 8
%call = tail call double @llvm.floor.f64(double %0) nounwind readnone
%arrayidx2 = getelementptr inbounds double* %x, i64 %indvars.iv
- store double %call, double* %arrayidx2, align 8, !tbaa !3
+ store double %call, double* %arrayidx2, align 8
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
%exitcond = icmp eq i32 %lftr.wideiv, %n
@@ -531,10 +531,10 @@ entry:
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%arrayidx = getelementptr inbounds float* %y, i64 %indvars.iv
- %0 = load float* %arrayidx, align 4, !tbaa !0
+ %0 = load float* %arrayidx, align 4
%call = tail call float @llvm.ceil.f32(float %0) nounwind readnone
%arrayidx2 = getelementptr inbounds float* %x, i64 %indvars.iv
- store float %call, float* %arrayidx2, align 4, !tbaa !0
+ store float %call, float* %arrayidx2, align 4
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
%exitcond = icmp eq i32 %lftr.wideiv, %n
@@ -557,10 +557,10 @@ entry:
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%arrayidx = getelementptr inbounds double* %y, i64 %indvars.iv
- %0 = load double* %arrayidx, align 8, !tbaa !3
+ %0 = load double* %arrayidx, align 8
%call = tail call double @llvm.ceil.f64(double %0) nounwind readnone
%arrayidx2 = getelementptr inbounds double* %x, i64 %indvars.iv
- store double %call, double* %arrayidx2, align 8, !tbaa !3
+ store double %call, double* %arrayidx2, align 8
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
%exitcond = icmp eq i32 %lftr.wideiv, %n
@@ -583,10 +583,10 @@ entry:
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%arrayidx = getelementptr inbounds float* %y, i64 %indvars.iv
- %0 = load float* %arrayidx, align 4, !tbaa !0
+ %0 = load float* %arrayidx, align 4
%call = tail call float @llvm.trunc.f32(float %0) nounwind readnone
%arrayidx2 = getelementptr inbounds float* %x, i64 %indvars.iv
- store float %call, float* %arrayidx2, align 4, !tbaa !0
+ store float %call, float* %arrayidx2, align 4
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
%exitcond = icmp eq i32 %lftr.wideiv, %n
@@ -609,10 +609,10 @@ entry:
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%arrayidx = getelementptr inbounds double* %y, i64 %indvars.iv
- %0 = load double* %arrayidx, align 8, !tbaa !3
+ %0 = load double* %arrayidx, align 8
%call = tail call double @llvm.trunc.f64(double %0) nounwind readnone
%arrayidx2 = getelementptr inbounds double* %x, i64 %indvars.iv
- store double %call, double* %arrayidx2, align 8, !tbaa !3
+ store double %call, double* %arrayidx2, align 8
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
%exitcond = icmp eq i32 %lftr.wideiv, %n
@@ -635,10 +635,10 @@ entry:
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%arrayidx = getelementptr inbounds float* %y, i64 %indvars.iv
- %0 = load float* %arrayidx, align 4, !tbaa !0
+ %0 = load float* %arrayidx, align 4
%call = tail call float @llvm.rint.f32(float %0) nounwind readnone
%arrayidx2 = getelementptr inbounds float* %x, i64 %indvars.iv
- store float %call, float* %arrayidx2, align 4, !tbaa !0
+ store float %call, float* %arrayidx2, align 4
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
%exitcond = icmp eq i32 %lftr.wideiv, %n
@@ -661,10 +661,10 @@ entry:
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%arrayidx = getelementptr inbounds double* %y, i64 %indvars.iv
- %0 = load double* %arrayidx, align 8, !tbaa !3
+ %0 = load double* %arrayidx, align 8
%call = tail call double @llvm.rint.f64(double %0) nounwind readnone
%arrayidx2 = getelementptr inbounds double* %x, i64 %indvars.iv
- store double %call, double* %arrayidx2, align 8, !tbaa !3
+ store double %call, double* %arrayidx2, align 8
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
%exitcond = icmp eq i32 %lftr.wideiv, %n
@@ -687,10 +687,10 @@ entry:
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%arrayidx = getelementptr inbounds float* %y, i64 %indvars.iv
- %0 = load float* %arrayidx, align 4, !tbaa !0
+ %0 = load float* %arrayidx, align 4
%call = tail call float @llvm.nearbyint.f32(float %0) nounwind readnone
%arrayidx2 = getelementptr inbounds float* %x, i64 %indvars.iv
- store float %call, float* %arrayidx2, align 4, !tbaa !0
+ store float %call, float* %arrayidx2, align 4
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
%exitcond = icmp eq i32 %lftr.wideiv, %n
@@ -713,10 +713,10 @@ entry:
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%arrayidx = getelementptr inbounds double* %y, i64 %indvars.iv
- %0 = load double* %arrayidx, align 8, !tbaa !3
+ %0 = load double* %arrayidx, align 8
%call = tail call double @llvm.nearbyint.f64(double %0) nounwind readnone
%arrayidx2 = getelementptr inbounds double* %x, i64 %indvars.iv
- store double %call, double* %arrayidx2, align 8, !tbaa !3
+ store double %call, double* %arrayidx2, align 8
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
%exitcond = icmp eq i32 %lftr.wideiv, %n
@@ -739,14 +739,14 @@ entry:
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%arrayidx = getelementptr inbounds float* %y, i64 %indvars.iv
- %0 = load float* %arrayidx, align 4, !tbaa !0
+ %0 = load float* %arrayidx, align 4
%arrayidx2 = getelementptr inbounds float* %w, i64 %indvars.iv
- %1 = load float* %arrayidx2, align 4, !tbaa !0
+ %1 = load float* %arrayidx2, align 4
%arrayidx4 = getelementptr inbounds float* %z, i64 %indvars.iv
- %2 = load float* %arrayidx4, align 4, !tbaa !0
+ %2 = load float* %arrayidx4, align 4
%3 = tail call float @llvm.fma.f32(float %0, float %2, float %1)
%arrayidx6 = getelementptr inbounds float* %x, i64 %indvars.iv
- store float %3, float* %arrayidx6, align 4, !tbaa !0
+ store float %3, float* %arrayidx6, align 4
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
%exitcond = icmp eq i32 %lftr.wideiv, %n
@@ -769,14 +769,14 @@ entry:
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%arrayidx = getelementptr inbounds double* %y, i64 %indvars.iv
- %0 = load double* %arrayidx, align 8, !tbaa !3
+ %0 = load double* %arrayidx, align 8
%arrayidx2 = getelementptr inbounds double* %w, i64 %indvars.iv
- %1 = load double* %arrayidx2, align 8, !tbaa !3
+ %1 = load double* %arrayidx2, align 8
%arrayidx4 = getelementptr inbounds double* %z, i64 %indvars.iv
- %2 = load double* %arrayidx4, align 8, !tbaa !3
+ %2 = load double* %arrayidx4, align 8
%3 = tail call double @llvm.fma.f64(double %0, double %2, double %1)
%arrayidx6 = getelementptr inbounds double* %x, i64 %indvars.iv
- store double %3, double* %arrayidx6, align 8, !tbaa !3
+ store double %3, double* %arrayidx6, align 8
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
%exitcond = icmp eq i32 %lftr.wideiv, %n
@@ -799,14 +799,14 @@ entry:
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%arrayidx = getelementptr inbounds float* %y, i64 %indvars.iv
- %0 = load float* %arrayidx, align 4, !tbaa !0
+ %0 = load float* %arrayidx, align 4
%arrayidx2 = getelementptr inbounds float* %w, i64 %indvars.iv
- %1 = load float* %arrayidx2, align 4, !tbaa !0
+ %1 = load float* %arrayidx2, align 4
%arrayidx4 = getelementptr inbounds float* %z, i64 %indvars.iv
- %2 = load float* %arrayidx4, align 4, !tbaa !0
+ %2 = load float* %arrayidx4, align 4
%3 = tail call float @llvm.fmuladd.f32(float %0, float %2, float %1)
%arrayidx6 = getelementptr inbounds float* %x, i64 %indvars.iv
- store float %3, float* %arrayidx6, align 4, !tbaa !0
+ store float %3, float* %arrayidx6, align 4
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
%exitcond = icmp eq i32 %lftr.wideiv, %n
@@ -829,14 +829,14 @@ entry:
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%arrayidx = getelementptr inbounds double* %y, i64 %indvars.iv
- %0 = load double* %arrayidx, align 8, !tbaa !3
+ %0 = load double* %arrayidx, align 8
%arrayidx2 = getelementptr inbounds double* %w, i64 %indvars.iv
- %1 = load double* %arrayidx2, align 8, !tbaa !3
+ %1 = load double* %arrayidx2, align 8
%arrayidx4 = getelementptr inbounds double* %z, i64 %indvars.iv
- %2 = load double* %arrayidx4, align 8, !tbaa !3
+ %2 = load double* %arrayidx4, align 8
%3 = tail call double @llvm.fmuladd.f64(double %0, double %2, double %1)
%arrayidx6 = getelementptr inbounds double* %x, i64 %indvars.iv
- store double %3, double* %arrayidx6, align 8, !tbaa !3
+ store double %3, double* %arrayidx6, align 8
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
%exitcond = icmp eq i32 %lftr.wideiv, %n
@@ -859,12 +859,12 @@ entry:
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%arrayidx = getelementptr inbounds float* %y, i64 %indvars.iv
- %0 = load float* %arrayidx, align 4, !tbaa !0
+ %0 = load float* %arrayidx, align 4
%arrayidx2 = getelementptr inbounds float* %z, i64 %indvars.iv
- %1 = load float* %arrayidx2, align 4, !tbaa !0
+ %1 = load float* %arrayidx2, align 4
%call = tail call float @llvm.pow.f32(float %0, float %1) nounwind readnone
%arrayidx4 = getelementptr inbounds float* %x, i64 %indvars.iv
- store float %call, float* %arrayidx4, align 4, !tbaa !0
+ store float %call, float* %arrayidx4, align 4
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
%exitcond = icmp eq i32 %lftr.wideiv, %n
@@ -887,12 +887,12 @@ entry:
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%arrayidx = getelementptr inbounds double* %y, i64 %indvars.iv
- %0 = load double* %arrayidx, align 8, !tbaa !3
+ %0 = load double* %arrayidx, align 8
%arrayidx2 = getelementptr inbounds double* %z, i64 %indvars.iv
- %1 = load double* %arrayidx2, align 8, !tbaa !3
+ %1 = load double* %arrayidx2, align 8
%call = tail call double @llvm.pow.f64(double %0, double %1) nounwind readnone
%arrayidx4 = getelementptr inbounds double* %x, i64 %indvars.iv
- store double %call, double* %arrayidx4, align 8, !tbaa !3
+ store double %call, double* %arrayidx4, align 8
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
%exitcond = icmp eq i32 %lftr.wideiv, %n
@@ -927,9 +927,3 @@ for.end: ; preds = %for.body
declare float @fabsf(float) nounwind readnone
declare double @llvm.pow.f64(double, double) nounwind readnone
-
-!0 = metadata !{metadata !"float", metadata !1}
-!1 = metadata !{metadata !"omnipotent char", metadata !2}
-!2 = metadata !{metadata !"Simple C/C++ TBAA"}
-!3 = metadata !{metadata !"double", metadata !1}
-!4 = metadata !{metadata !"int", metadata !1}
diff --git a/test/Transforms/LoopVectorize/lcssa-crash.ll b/test/Transforms/LoopVectorize/lcssa-crash.ll
index 06b3b08aa0e3e..de6be548490c5 100644
--- a/test/Transforms/LoopVectorize/lcssa-crash.ll
+++ b/test/Transforms/LoopVectorize/lcssa-crash.ll
@@ -27,3 +27,14 @@ for.end.i.i.i:
unreachable
}
+; PR16139
+define void @test2(i8* %x) {
+entry:
+ indirectbr i8* %x, [ label %L0, label %L1 ]
+
+L0:
+ br label %L0
+
+L1:
+ ret void
+}
diff --git a/test/Transforms/LoopVectorize/minmax_reduction.ll b/test/Transforms/LoopVectorize/minmax_reduction.ll
new file mode 100644
index 0000000000000..502fd8b9383b1
--- /dev/null
+++ b/test/Transforms/LoopVectorize/minmax_reduction.ll
@@ -0,0 +1,885 @@
+; RUN: opt -S -loop-vectorize -dce -instcombine -force-vector-width=2 -force-vector-unroll=1 < %s | FileCheck %s
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+
+@A = common global [1024 x i32] zeroinitializer, align 16
+@fA = common global [1024 x float] zeroinitializer, align 16
+@dA = common global [1024 x double] zeroinitializer, align 16
+
+; Signed tests.
+
+; Turn this into a max reduction. Make sure we use a splat to initialize the
+; vector for the reduction.
+; CHECK: @max_red
+; CHECK: %[[VAR:.*]] = insertelement <2 x i32> undef, i32 %max, i32 0
+; CHECK: {{.*}} = shufflevector <2 x i32> %[[VAR]], <2 x i32> undef, <2 x i32> zeroinitializer
+; CHECK: icmp sgt <2 x i32>
+; CHECK: select <2 x i1>
+; CHECK: middle.block
+; CHECK: icmp sgt <2 x i32>
+; CHECK: select <2 x i1>
+
+define i32 @max_red(i32 %max) {
+entry:
+ br label %for.body
+
+for.body:
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %max.red.08 = phi i32 [ %max, %entry ], [ %max.red.0, %for.body ]
+ %arrayidx = getelementptr inbounds [1024 x i32]* @A, i64 0, i64 %indvars.iv
+ %0 = load i32* %arrayidx, align 4
+ %cmp3 = icmp sgt i32 %0, %max.red.08
+ %max.red.0 = select i1 %cmp3, i32 %0, i32 %max.red.08
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, 1024
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end:
+ ret i32 %max.red.0
+}
+
+; Turn this into a max reduction. The select has its inputs reversed therefore
+; this is a max reduction.
+; CHECK: @max_red_inverse_select
+; CHECK: icmp slt <2 x i32>
+; CHECK: select <2 x i1>
+; CHECK: middle.block
+; CHECK: icmp sgt <2 x i32>
+; CHECK: select <2 x i1>
+
+define i32 @max_red_inverse_select(i32 %max) {
+entry:
+ br label %for.body
+
+for.body:
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %max.red.08 = phi i32 [ %max, %entry ], [ %max.red.0, %for.body ]
+ %arrayidx = getelementptr inbounds [1024 x i32]* @A, i64 0, i64 %indvars.iv
+ %0 = load i32* %arrayidx, align 4
+ %cmp3 = icmp slt i32 %max.red.08, %0
+ %max.red.0 = select i1 %cmp3, i32 %0, i32 %max.red.08
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, 1024
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end:
+ ret i32 %max.red.0
+}
+
+; Turn this into a min reduction.
+; CHECK: @min_red
+; CHECK: icmp slt <2 x i32>
+; CHECK: select <2 x i1>
+; CHECK: middle.block
+; CHECK: icmp slt <2 x i32>
+; CHECK: select <2 x i1>
+
+define i32 @min_red(i32 %max) {
+entry:
+ br label %for.body
+
+for.body:
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %max.red.08 = phi i32 [ %max, %entry ], [ %max.red.0, %for.body ]
+ %arrayidx = getelementptr inbounds [1024 x i32]* @A, i64 0, i64 %indvars.iv
+ %0 = load i32* %arrayidx, align 4
+ %cmp3 = icmp slt i32 %0, %max.red.08
+ %max.red.0 = select i1 %cmp3, i32 %0, i32 %max.red.08
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, 1024
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end:
+ ret i32 %max.red.0
+}
+
+; Turn this into a min reduction. The select has its inputs reversed therefore
+; this is a min reduction.
+; CHECK: @min_red_inverse_select
+; CHECK: icmp sgt <2 x i32>
+; CHECK: select <2 x i1>
+; CHECK: middle.block
+; CHECK: icmp slt <2 x i32>
+; CHECK: select <2 x i1>
+
+define i32 @min_red_inverse_select(i32 %max) {
+entry:
+ br label %for.body
+
+for.body:
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %max.red.08 = phi i32 [ %max, %entry ], [ %max.red.0, %for.body ]
+ %arrayidx = getelementptr inbounds [1024 x i32]* @A, i64 0, i64 %indvars.iv
+ %0 = load i32* %arrayidx, align 4
+ %cmp3 = icmp sgt i32 %max.red.08, %0
+ %max.red.0 = select i1 %cmp3, i32 %0, i32 %max.red.08
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, 1024
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end:
+ ret i32 %max.red.0
+}
+
+; Unsigned tests.
+
+; Turn this into a max reduction.
+; CHECK: @umax_red
+; CHECK: icmp ugt <2 x i32>
+; CHECK: select <2 x i1>
+; CHECK: middle.block
+; CHECK: icmp ugt <2 x i32>
+; CHECK: select <2 x i1>
+
+define i32 @umax_red(i32 %max) {
+entry:
+ br label %for.body
+
+for.body:
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %max.red.08 = phi i32 [ %max, %entry ], [ %max.red.0, %for.body ]
+ %arrayidx = getelementptr inbounds [1024 x i32]* @A, i64 0, i64 %indvars.iv
+ %0 = load i32* %arrayidx, align 4
+ %cmp3 = icmp ugt i32 %0, %max.red.08
+ %max.red.0 = select i1 %cmp3, i32 %0, i32 %max.red.08
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, 1024
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end:
+ ret i32 %max.red.0
+}
+
+; Turn this into a max reduction. The select has its inputs reversed therefore
+; this is a max reduction.
+; CHECK: @umax_red_inverse_select
+; CHECK: icmp ult <2 x i32>
+; CHECK: select <2 x i1>
+; CHECK: middle.block
+; CHECK: icmp ugt <2 x i32>
+; CHECK: select <2 x i1>
+
+define i32 @umax_red_inverse_select(i32 %max) {
+entry:
+ br label %for.body
+
+for.body:
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %max.red.08 = phi i32 [ %max, %entry ], [ %max.red.0, %for.body ]
+ %arrayidx = getelementptr inbounds [1024 x i32]* @A, i64 0, i64 %indvars.iv
+ %0 = load i32* %arrayidx, align 4
+ %cmp3 = icmp ult i32 %max.red.08, %0
+ %max.red.0 = select i1 %cmp3, i32 %0, i32 %max.red.08
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, 1024
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end:
+ ret i32 %max.red.0
+}
+
+; Turn this into a min reduction.
+; CHECK: @umin_red
+; CHECK: icmp ult <2 x i32>
+; CHECK: select <2 x i1>
+; CHECK: middle.block
+; CHECK: icmp ult <2 x i32>
+; CHECK: select <2 x i1>
+
+define i32 @umin_red(i32 %max) {
+entry:
+ br label %for.body
+
+for.body:
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %max.red.08 = phi i32 [ %max, %entry ], [ %max.red.0, %for.body ]
+ %arrayidx = getelementptr inbounds [1024 x i32]* @A, i64 0, i64 %indvars.iv
+ %0 = load i32* %arrayidx, align 4
+ %cmp3 = icmp ult i32 %0, %max.red.08
+ %max.red.0 = select i1 %cmp3, i32 %0, i32 %max.red.08
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, 1024
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end:
+ ret i32 %max.red.0
+}
+
+; Turn this into a min reduction. The select has its inputs reversed therefore
+; this is a min reduction.
+; CHECK: @umin_red_inverse_select
+; CHECK: icmp ugt <2 x i32>
+; CHECK: select <2 x i1>
+; CHECK: middle.block
+; CHECK: icmp ult <2 x i32>
+; CHECK: select <2 x i1>
+
+define i32 @umin_red_inverse_select(i32 %max) {
+entry:
+ br label %for.body
+
+for.body:
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %max.red.08 = phi i32 [ %max, %entry ], [ %max.red.0, %for.body ]
+ %arrayidx = getelementptr inbounds [1024 x i32]* @A, i64 0, i64 %indvars.iv
+ %0 = load i32* %arrayidx, align 4
+ %cmp3 = icmp ugt i32 %max.red.08, %0
+ %max.red.0 = select i1 %cmp3, i32 %0, i32 %max.red.08
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, 1024
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end:
+ ret i32 %max.red.0
+}
+
+; SGE -> SLT
+; Turn this into a min reduction (select inputs are reversed).
+; CHECK: @sge_min_red
+; CHECK: icmp sge <2 x i32>
+; CHECK: select <2 x i1>
+; CHECK: middle.block
+; CHECK: icmp slt <2 x i32>
+; CHECK: select <2 x i1>
+
+define i32 @sge_min_red(i32 %max) {
+entry:
+ br label %for.body
+
+for.body:
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %max.red.08 = phi i32 [ %max, %entry ], [ %max.red.0, %for.body ]
+ %arrayidx = getelementptr inbounds [1024 x i32]* @A, i64 0, i64 %indvars.iv
+ %0 = load i32* %arrayidx, align 4
+ %cmp3 = icmp sge i32 %0, %max.red.08
+ %max.red.0 = select i1 %cmp3, i32 %max.red.08, i32 %0
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, 1024
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end:
+ ret i32 %max.red.0
+}
+
+; SLE -> SGT
+; Turn this into a max reduction (select inputs are reversed).
+; CHECK: @sle_min_red
+; CHECK: icmp sle <2 x i32>
+; CHECK: select <2 x i1>
+; CHECK: middle.block
+; CHECK: icmp sgt <2 x i32>
+; CHECK: select <2 x i1>
+
+define i32 @sle_min_red(i32 %max) {
+entry:
+ br label %for.body
+
+for.body:
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %max.red.08 = phi i32 [ %max, %entry ], [ %max.red.0, %for.body ]
+ %arrayidx = getelementptr inbounds [1024 x i32]* @A, i64 0, i64 %indvars.iv
+ %0 = load i32* %arrayidx, align 4
+ %cmp3 = icmp sle i32 %0, %max.red.08
+ %max.red.0 = select i1 %cmp3, i32 %max.red.08, i32 %0
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, 1024
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end:
+ ret i32 %max.red.0
+}
+
+; UGE -> ULT
+; Turn this into a min reduction (select inputs are reversed).
+; CHECK: @uge_min_red
+; CHECK: icmp uge <2 x i32>
+; CHECK: select <2 x i1>
+; CHECK: middle.block
+; CHECK: icmp ult <2 x i32>
+; CHECK: select <2 x i1>
+
+define i32 @uge_min_red(i32 %max) {
+entry:
+ br label %for.body
+
+for.body:
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %max.red.08 = phi i32 [ %max, %entry ], [ %max.red.0, %for.body ]
+ %arrayidx = getelementptr inbounds [1024 x i32]* @A, i64 0, i64 %indvars.iv
+ %0 = load i32* %arrayidx, align 4
+ %cmp3 = icmp uge i32 %0, %max.red.08
+ %max.red.0 = select i1 %cmp3, i32 %max.red.08, i32 %0
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, 1024
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end:
+ ret i32 %max.red.0
+}
+
+; ULE -> UGT
+; Turn this into a max reduction (select inputs are reversed).
+; CHECK: @ule_min_red
+; CHECK: icmp ule <2 x i32>
+; CHECK: select <2 x i1>
+; CHECK: middle.block
+; CHECK: icmp ugt <2 x i32>
+; CHECK: select <2 x i1>
+
+define i32 @ule_min_red(i32 %max) {
+entry:
+ br label %for.body
+
+for.body:
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %max.red.08 = phi i32 [ %max, %entry ], [ %max.red.0, %for.body ]
+ %arrayidx = getelementptr inbounds [1024 x i32]* @A, i64 0, i64 %indvars.iv
+ %0 = load i32* %arrayidx, align 4
+ %cmp3 = icmp ule i32 %0, %max.red.08
+ %max.red.0 = select i1 %cmp3, i32 %max.red.08, i32 %0
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, 1024
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end:
+ ret i32 %max.red.0
+}
+
+; No reduction.
+; CHECK: @no_red_1
+; CHECK-NOT: icmp <2 x i32>
+define i32 @no_red_1(i32 %max) {
+entry:
+ br label %for.body
+
+for.body:
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %max.red.08 = phi i32 [ %max, %entry ], [ %max.red.0, %for.body ]
+ %arrayidx = getelementptr inbounds [1024 x i32]* @A, i64 0, i64 %indvars.iv
+ %arrayidx1 = getelementptr inbounds [1024 x i32]* @A, i64 1, i64 %indvars.iv
+ %0 = load i32* %arrayidx, align 4
+ %1 = load i32* %arrayidx1, align 4
+ %cmp3 = icmp sgt i32 %0, %1
+ %max.red.0 = select i1 %cmp3, i32 %0, i32 %max.red.08
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, 1024
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end:
+ ret i32 %max.red.0
+}
+
+; CHECK: @no_red_2
+; CHECK-NOT: icmp <2 x i32>
+define i32 @no_red_2(i32 %max) {
+entry:
+ br label %for.body
+
+for.body:
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %max.red.08 = phi i32 [ %max, %entry ], [ %max.red.0, %for.body ]
+ %arrayidx = getelementptr inbounds [1024 x i32]* @A, i64 0, i64 %indvars.iv
+ %arrayidx1 = getelementptr inbounds [1024 x i32]* @A, i64 1, i64 %indvars.iv
+ %0 = load i32* %arrayidx, align 4
+ %1 = load i32* %arrayidx1, align 4
+ %cmp3 = icmp sgt i32 %0, %max.red.08
+ %max.red.0 = select i1 %cmp3, i32 %0, i32 %1
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, 1024
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end:
+ ret i32 %max.red.0
+}
+
+; Float tests.
+
+; Maximum.
+
+; Turn this into a max reduction in the presence of a no-nans-fp-math attribute.
+; CHECK: @max_red_float
+; CHECK: fcmp ogt <2 x float>
+; CHECK: select <2 x i1>
+; CHECK: middle.block
+; CHECK: fcmp ogt <2 x float>
+; CHECK: select <2 x i1>
+
+define float @max_red_float(float %max) #0 {
+entry:
+ br label %for.body
+
+for.body:
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %max.red.08 = phi float [ %max, %entry ], [ %max.red.0, %for.body ]
+ %arrayidx = getelementptr inbounds [1024 x float]* @fA, i64 0, i64 %indvars.iv
+ %0 = load float* %arrayidx, align 4
+ %cmp3 = fcmp ogt float %0, %max.red.08
+ %max.red.0 = select i1 %cmp3, float %0, float %max.red.08
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %exitcond = icmp eq i64 %indvars.iv.next, 1024
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end:
+ ret float %max.red.0
+}
+
+; CHECK: @max_red_float_ge
+; CHECK: fcmp oge <2 x float>
+; CHECK: select <2 x i1>
+; CHECK: middle.block
+; CHECK: fcmp ogt <2 x float>
+; CHECK: select <2 x i1>
+
+define float @max_red_float_ge(float %max) #0 {
+entry:
+ br label %for.body
+
+for.body:
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %max.red.08 = phi float [ %max, %entry ], [ %max.red.0, %for.body ]
+ %arrayidx = getelementptr inbounds [1024 x float]* @fA, i64 0, i64 %indvars.iv
+ %0 = load float* %arrayidx, align 4
+ %cmp3 = fcmp oge float %0, %max.red.08
+ %max.red.0 = select i1 %cmp3, float %0, float %max.red.08
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %exitcond = icmp eq i64 %indvars.iv.next, 1024
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end:
+ ret float %max.red.0
+}
+
+; CHECK: @inverted_max_red_float
+; CHECK: fcmp olt <2 x float>
+; CHECK: select <2 x i1>
+; CHECK: middle.block
+; CHECK: fcmp ogt <2 x float>
+; CHECK: select <2 x i1>
+
+define float @inverted_max_red_float(float %max) #0 {
+entry:
+ br label %for.body
+
+for.body:
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %max.red.08 = phi float [ %max, %entry ], [ %max.red.0, %for.body ]
+ %arrayidx = getelementptr inbounds [1024 x float]* @fA, i64 0, i64 %indvars.iv
+ %0 = load float* %arrayidx, align 4
+ %cmp3 = fcmp olt float %0, %max.red.08
+ %max.red.0 = select i1 %cmp3, float %max.red.08, float %0
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %exitcond = icmp eq i64 %indvars.iv.next, 1024
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end:
+ ret float %max.red.0
+}
+
+; CHECK: @inverted_max_red_float_le
+; CHECK: fcmp ole <2 x float>
+; CHECK: select <2 x i1>
+; CHECK: middle.block
+; CHECK: fcmp ogt <2 x float>
+; CHECK: select <2 x i1>
+
+define float @inverted_max_red_float_le(float %max) #0 {
+entry:
+ br label %for.body
+
+for.body:
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %max.red.08 = phi float [ %max, %entry ], [ %max.red.0, %for.body ]
+ %arrayidx = getelementptr inbounds [1024 x float]* @fA, i64 0, i64 %indvars.iv
+ %0 = load float* %arrayidx, align 4
+ %cmp3 = fcmp ole float %0, %max.red.08
+ %max.red.0 = select i1 %cmp3, float %max.red.08, float %0
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %exitcond = icmp eq i64 %indvars.iv.next, 1024
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end:
+ ret float %max.red.0
+}
+
+; CHECK: @unordered_max_red
+; CHECK: fcmp ugt <2 x float>
+; CHECK: select <2 x i1>
+; CHECK: middle.block
+; CHECK: fcmp ogt <2 x float>
+; CHECK: select <2 x i1>
+
+define float @unordered_max_red_float(float %max) #0 {
+entry:
+ br label %for.body
+
+for.body:
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %max.red.08 = phi float [ %max, %entry ], [ %max.red.0, %for.body ]
+ %arrayidx = getelementptr inbounds [1024 x float]* @fA, i64 0, i64 %indvars.iv
+ %0 = load float* %arrayidx, align 4
+ %cmp3 = fcmp ugt float %0, %max.red.08
+ %max.red.0 = select i1 %cmp3, float %0, float %max.red.08
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %exitcond = icmp eq i64 %indvars.iv.next, 1024
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end:
+ ret float %max.red.0
+}
+
+; CHECK: @unordered_max_red_float_ge
+; CHECK: fcmp uge <2 x float>
+; CHECK: select <2 x i1>
+; CHECK: middle.block
+; CHECK: fcmp ogt <2 x float>
+; CHECK: select <2 x i1>
+
+define float @unordered_max_red_float_ge(float %max) #0 {
+entry:
+ br label %for.body
+
+for.body:
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %max.red.08 = phi float [ %max, %entry ], [ %max.red.0, %for.body ]
+ %arrayidx = getelementptr inbounds [1024 x float]* @fA, i64 0, i64 %indvars.iv
+ %0 = load float* %arrayidx, align 4
+ %cmp3 = fcmp uge float %0, %max.red.08
+ %max.red.0 = select i1 %cmp3, float %0, float %max.red.08
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %exitcond = icmp eq i64 %indvars.iv.next, 1024
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end:
+ ret float %max.red.0
+}
+
+; CHECK: @inverted_unordered_max_red
+; CHECK: fcmp ult <2 x float>
+; CHECK: select <2 x i1>
+; CHECK: middle.block
+; CHECK: fcmp ogt <2 x float>
+; CHECK: select <2 x i1>
+
+define float @inverted_unordered_max_red_float(float %max) #0 {
+entry:
+ br label %for.body
+
+for.body:
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %max.red.08 = phi float [ %max, %entry ], [ %max.red.0, %for.body ]
+ %arrayidx = getelementptr inbounds [1024 x float]* @fA, i64 0, i64 %indvars.iv
+ %0 = load float* %arrayidx, align 4
+ %cmp3 = fcmp ult float %0, %max.red.08
+ %max.red.0 = select i1 %cmp3, float %max.red.08, float %0
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %exitcond = icmp eq i64 %indvars.iv.next, 1024
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end:
+ ret float %max.red.0
+}
+
+; CHECK: @inverted_unordered_max_red_float_le
+; CHECK: fcmp ule <2 x float>
+; CHECK: select <2 x i1>
+; CHECK: middle.block
+; CHECK: fcmp ogt <2 x float>
+; CHECK: select <2 x i1>
+
+define float @inverted_unordered_max_red_float_le(float %max) #0 {
+entry:
+ br label %for.body
+
+for.body:
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %max.red.08 = phi float [ %max, %entry ], [ %max.red.0, %for.body ]
+ %arrayidx = getelementptr inbounds [1024 x float]* @fA, i64 0, i64 %indvars.iv
+ %0 = load float* %arrayidx, align 4
+ %cmp3 = fcmp ule float %0, %max.red.08
+ %max.red.0 = select i1 %cmp3, float %max.red.08, float %0
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %exitcond = icmp eq i64 %indvars.iv.next, 1024
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end:
+ ret float %max.red.0
+}
+
+; Minimum.
+
+; Turn this into a min reduction in the presence of a no-nans-fp-math attribute.
+; CHECK: @min_red_float
+; CHECK: fcmp olt <2 x float>
+; CHECK: select <2 x i1>
+; CHECK: middle.block
+; CHECK: fcmp olt <2 x float>
+; CHECK: select <2 x i1>
+
+define float @min_red_float(float %min) #0 {
+entry:
+ br label %for.body
+
+for.body:
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %min.red.08 = phi float [ %min, %entry ], [ %min.red.0, %for.body ]
+ %arrayidx = getelementptr inbounds [1024 x float]* @fA, i64 0, i64 %indvars.iv
+ %0 = load float* %arrayidx, align 4
+ %cmp3 = fcmp olt float %0, %min.red.08
+ %min.red.0 = select i1 %cmp3, float %0, float %min.red.08
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %exitcond = icmp eq i64 %indvars.iv.next, 1024
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end:
+ ret float %min.red.0
+}
+
+; CHECK: @min_red_float_le
+; CHECK: fcmp ole <2 x float>
+; CHECK: select <2 x i1>
+; CHECK: middle.block
+; CHECK: fcmp olt <2 x float>
+; CHECK: select <2 x i1>
+
+define float @min_red_float_le(float %min) #0 {
+entry:
+ br label %for.body
+
+for.body:
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %min.red.08 = phi float [ %min, %entry ], [ %min.red.0, %for.body ]
+ %arrayidx = getelementptr inbounds [1024 x float]* @fA, i64 0, i64 %indvars.iv
+ %0 = load float* %arrayidx, align 4
+ %cmp3 = fcmp ole float %0, %min.red.08
+ %min.red.0 = select i1 %cmp3, float %0, float %min.red.08
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %exitcond = icmp eq i64 %indvars.iv.next, 1024
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end:
+ ret float %min.red.0
+}
+
+; CHECK: @inverted_min_red_float
+; CHECK: fcmp ogt <2 x float>
+; CHECK: select <2 x i1>
+; CHECK: middle.block
+; CHECK: fcmp olt <2 x float>
+; CHECK: select <2 x i1>
+
+define float @inverted_min_red_float(float %min) #0 {
+entry:
+ br label %for.body
+
+for.body:
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %min.red.08 = phi float [ %min, %entry ], [ %min.red.0, %for.body ]
+ %arrayidx = getelementptr inbounds [1024 x float]* @fA, i64 0, i64 %indvars.iv
+ %0 = load float* %arrayidx, align 4
+ %cmp3 = fcmp ogt float %0, %min.red.08
+ %min.red.0 = select i1 %cmp3, float %min.red.08, float %0
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %exitcond = icmp eq i64 %indvars.iv.next, 1024
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end:
+ ret float %min.red.0
+}
+
+; CHECK: @inverted_min_red_float_ge
+; CHECK: fcmp oge <2 x float>
+; CHECK: select <2 x i1>
+; CHECK: middle.block
+; CHECK: fcmp olt <2 x float>
+; CHECK: select <2 x i1>
+
+define float @inverted_min_red_float_ge(float %min) #0 {
+entry:
+ br label %for.body
+
+for.body:
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %min.red.08 = phi float [ %min, %entry ], [ %min.red.0, %for.body ]
+ %arrayidx = getelementptr inbounds [1024 x float]* @fA, i64 0, i64 %indvars.iv
+ %0 = load float* %arrayidx, align 4
+ %cmp3 = fcmp oge float %0, %min.red.08
+ %min.red.0 = select i1 %cmp3, float %min.red.08, float %0
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %exitcond = icmp eq i64 %indvars.iv.next, 1024
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end:
+ ret float %min.red.0
+}
+
+; CHECK: @unordered_min_red
+; CHECK: fcmp ult <2 x float>
+; CHECK: select <2 x i1>
+; CHECK: middle.block
+; CHECK: fcmp olt <2 x float>
+; CHECK: select <2 x i1>
+
+define float @unordered_min_red_float(float %min) #0 {
+entry:
+ br label %for.body
+
+for.body:
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %min.red.08 = phi float [ %min, %entry ], [ %min.red.0, %for.body ]
+ %arrayidx = getelementptr inbounds [1024 x float]* @fA, i64 0, i64 %indvars.iv
+ %0 = load float* %arrayidx, align 4
+ %cmp3 = fcmp ult float %0, %min.red.08
+ %min.red.0 = select i1 %cmp3, float %0, float %min.red.08
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %exitcond = icmp eq i64 %indvars.iv.next, 1024
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end:
+ ret float %min.red.0
+}
+
+; CHECK: @unordered_min_red_float_le
+; CHECK: fcmp ule <2 x float>
+; CHECK: select <2 x i1>
+; CHECK: middle.block
+; CHECK: fcmp olt <2 x float>
+; CHECK: select <2 x i1>
+
+define float @unordered_min_red_float_le(float %min) #0 {
+entry:
+ br label %for.body
+
+for.body:
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %min.red.08 = phi float [ %min, %entry ], [ %min.red.0, %for.body ]
+ %arrayidx = getelementptr inbounds [1024 x float]* @fA, i64 0, i64 %indvars.iv
+ %0 = load float* %arrayidx, align 4
+ %cmp3 = fcmp ule float %0, %min.red.08
+ %min.red.0 = select i1 %cmp3, float %0, float %min.red.08
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %exitcond = icmp eq i64 %indvars.iv.next, 1024
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end:
+ ret float %min.red.0
+}
+
+; CHECK: @inverted_unordered_min_red
+; CHECK: fcmp ugt <2 x float>
+; CHECK: select <2 x i1>
+; CHECK: middle.block
+; CHECK: fcmp olt <2 x float>
+; CHECK: select <2 x i1>
+
+define float @inverted_unordered_min_red_float(float %min) #0 {
+entry:
+ br label %for.body
+
+for.body:
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %min.red.08 = phi float [ %min, %entry ], [ %min.red.0, %for.body ]
+ %arrayidx = getelementptr inbounds [1024 x float]* @fA, i64 0, i64 %indvars.iv
+ %0 = load float* %arrayidx, align 4
+ %cmp3 = fcmp ugt float %0, %min.red.08
+ %min.red.0 = select i1 %cmp3, float %min.red.08, float %0
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %exitcond = icmp eq i64 %indvars.iv.next, 1024
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end:
+ ret float %min.red.0
+}
+
+; CHECK: @inverted_unordered_min_red_float_ge
+; CHECK: fcmp uge <2 x float>
+; CHECK: select <2 x i1>
+; CHECK: middle.block
+; CHECK: fcmp olt <2 x float>
+; CHECK: select <2 x i1>
+
+define float @inverted_unordered_min_red_float_ge(float %min) #0 {
+entry:
+ br label %for.body
+
+for.body:
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %min.red.08 = phi float [ %min, %entry ], [ %min.red.0, %for.body ]
+ %arrayidx = getelementptr inbounds [1024 x float]* @fA, i64 0, i64 %indvars.iv
+ %0 = load float* %arrayidx, align 4
+ %cmp3 = fcmp uge float %0, %min.red.08
+ %min.red.0 = select i1 %cmp3, float %min.red.08, float %0
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %exitcond = icmp eq i64 %indvars.iv.next, 1024
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end:
+ ret float %min.red.0
+}
+
+; Make sure we handle doubles, too.
+; CHECK: @min_red_double
+; CHECK: fcmp olt <2 x double>
+; CHECK: select <2 x i1>
+; CHECK: middle.block
+; CHECK: fcmp olt <2 x double>
+; CHECK: select <2 x i1>
+
+define double @min_red_double(double %min) #0 {
+entry:
+ br label %for.body
+
+for.body:
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %min.red.08 = phi double [ %min, %entry ], [ %min.red.0, %for.body ]
+ %arrayidx = getelementptr inbounds [1024 x double]* @dA, i64 0, i64 %indvars.iv
+ %0 = load double* %arrayidx, align 4
+ %cmp3 = fcmp olt double %0, %min.red.08
+ %min.red.0 = select i1 %cmp3, double %0, double %min.red.08
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %exitcond = icmp eq i64 %indvars.iv.next, 1024
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end:
+ ret double %min.red.0
+}
+
+
+; Don't this into a max reduction. The no-nans-fp-math attribute is missing
+; CHECK: @max_red_float_nans
+; CHECK-NOT: <2 x float>
+
+define float @max_red_float_nans(float %max) {
+entry:
+ br label %for.body
+
+for.body:
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %max.red.08 = phi float [ %max, %entry ], [ %max.red.0, %for.body ]
+ %arrayidx = getelementptr inbounds [1024 x float]* @fA, i64 0, i64 %indvars.iv
+ %0 = load float* %arrayidx, align 4
+ %cmp3 = fcmp ogt float %0, %max.red.08
+ %max.red.0 = select i1 %cmp3, float %0, float %max.red.08
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %exitcond = icmp eq i64 %indvars.iv.next, 1024
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end:
+ ret float %max.red.0
+}
+
+
+attributes #0 = { "no-nans-fp-math"="true" }
diff --git a/test/Transforms/LoopVectorize/no_idiv_reduction.ll b/test/Transforms/LoopVectorize/no_idiv_reduction.ll
new file mode 100644
index 0000000000000..cdfb3fd66f054
--- /dev/null
+++ b/test/Transforms/LoopVectorize/no_idiv_reduction.ll
@@ -0,0 +1,24 @@
+; RUN: opt -loop-vectorize -force-vector-width=2 -force-vector-unroll=1 -S < %s | FileCheck %s
+@a = common global [128 x i32] zeroinitializer, align 16
+
+;; Must not vectorize division reduction. Division is lossy.
+define i32 @g() {
+entry:
+ br label %for.body
+
+for.body:
+ ; CHECK: @g
+ ; CHECK-NOT: sdiv <2 x i32>
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %r.05 = phi i32 [ 80, %entry ], [ %div, %for.body ]
+ %arrayidx = getelementptr inbounds [128 x i32]* @a, i64 0, i64 %indvars.iv
+ %0 = load i32* %arrayidx, align 4
+ %div = sdiv i32 %r.05, %0
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, 1024
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end:
+ ret i32 %div
+}
diff --git a/test/Transforms/LoopVectorize/no_outside_user.ll b/test/Transforms/LoopVectorize/no_outside_user.ll
new file mode 100644
index 0000000000000..6f0357c5e546f
--- /dev/null
+++ b/test/Transforms/LoopVectorize/no_outside_user.ll
@@ -0,0 +1,41 @@
+; RUN: opt -S -loop-vectorize -force-vector-unroll=1 -force-vector-width=2 < %s | FileCheck %s
+
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:32:32-n8:16:32-S128"
+
+@f = common global i32 0, align 4
+@.str = private unnamed_addr constant [4 x i8] c"%d\0A\00", align 1
+@c = common global i32 0, align 4
+@a = common global i32 0, align 4
+@b = common global i32 0, align 4
+@e = common global i32 0, align 4
+
+; We used to vectorize this loop. But it has a value that is used outside of the
+; and is not a recognized reduction variable "tmp17".
+
+; CHECK-NOT: <2 x i32>
+
+define i32 @main() {
+bb:
+ %b.promoted = load i32* @b, align 4
+ br label %.lr.ph.i
+
+.lr.ph.i:
+ %tmp8 = phi i32 [ %tmp18, %bb16 ], [ %b.promoted, %bb ]
+ %tmp2 = icmp sgt i32 %tmp8, 10
+ br i1 %tmp2, label %bb16, label %bb10
+
+bb10:
+ br label %bb16
+
+bb16:
+ %tmp17 = phi i32 [ 0, %bb10 ], [ 1, %.lr.ph.i ]
+ %tmp18 = add nsw i32 %tmp8, 1
+ %tmp19 = icmp slt i32 %tmp18, 4
+ br i1 %tmp19, label %.lr.ph.i, label %f1.exit.loopexit
+
+f1.exit.loopexit:
+ %.lcssa = phi i32 [ %tmp17, %bb16 ]
+ ret i32 %.lcssa
+}
+
+
diff --git a/test/Transforms/LoopVectorize/phi-hang.ll b/test/Transforms/LoopVectorize/phi-hang.ll
index b80d45995dc37..bbce239afa71f 100644
--- a/test/Transforms/LoopVectorize/phi-hang.ll
+++ b/test/Transforms/LoopVectorize/phi-hang.ll
@@ -27,3 +27,21 @@ bb5: ; preds = %bb4, %bb1
bb11: ; preds = %bb5
ret void
}
+
+; PR15748
+define void @test2() {
+bb:
+ br label %bb1
+
+bb1: ; preds = %bb1, %bb
+ %tmp = phi i32 [ 0, %bb ], [ %tmp5, %bb1 ]
+ %tmp2 = phi i32 [ 0, %bb ], [ 1, %bb1 ]
+ %tmp3 = phi i32 [ 0, %bb ], [ %tmp4, %bb1 ]
+ %tmp4 = or i32 %tmp2, %tmp3
+ %tmp5 = add nsw i32 %tmp, 1
+ %tmp6 = icmp eq i32 %tmp5, 0
+ br i1 %tmp6, label %bb7, label %bb1
+
+bb7: ; preds = %bb1
+ ret void
+}
diff --git a/test/Transforms/LoopVectorize/reverse_induction.ll b/test/Transforms/LoopVectorize/reverse_induction.ll
new file mode 100644
index 0000000000000..f43f02bc3132a
--- /dev/null
+++ b/test/Transforms/LoopVectorize/reverse_induction.ll
@@ -0,0 +1,79 @@
+; RUN: opt < %s -loop-vectorize -force-vector-unroll=2 -force-vector-width=4 -S | FileCheck %s
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+
+; Make sure consecutive vector generates correct negative indices.
+; PR15882
+
+; CHECK: reverse_induction_i64
+; CHECK: add <4 x i64> %[[SPLAT:.*]], <i64 0, i64 -1, i64 -2, i64 -3>
+; CHECK: add <4 x i64> %[[SPLAT]], <i64 -4, i64 -5, i64 -6, i64 -7>
+
+define i32 @reverse_induction_i64(i64 %startval, i32 * %ptr) {
+entry:
+ br label %for.body
+
+for.body:
+ %add.i7 = phi i64 [ %startval, %entry ], [ %add.i, %for.body ]
+ %i.06 = phi i32 [ 0, %entry ], [ %inc4, %for.body ]
+ %redux5 = phi i32 [ 0, %entry ], [ %inc.redux, %for.body ]
+ %add.i = add i64 %add.i7, -1
+ %kind_.i = getelementptr inbounds i32* %ptr, i64 %add.i
+ %tmp.i1 = load i32* %kind_.i, align 4
+ %inc.redux = add i32 %tmp.i1, %redux5
+ %inc4 = add i32 %i.06, 1
+ %exitcond = icmp ne i32 %inc4, 1024
+ br i1 %exitcond, label %for.body, label %loopend
+
+loopend:
+ ret i32 %inc.redux
+}
+
+; CHECK: reverse_induction_i128
+; CHECK: add <4 x i128> %[[SPLAT:.*]], <i128 0, i128 -1, i128 -2, i128 -3>
+; CHECK: add <4 x i128> %[[SPLAT]], <i128 -4, i128 -5, i128 -6, i128 -7>
+define i32 @reverse_induction_i128(i128 %startval, i32 * %ptr) {
+entry:
+ br label %for.body
+
+for.body:
+ %add.i7 = phi i128 [ %startval, %entry ], [ %add.i, %for.body ]
+ %i.06 = phi i32 [ 0, %entry ], [ %inc4, %for.body ]
+ %redux5 = phi i32 [ 0, %entry ], [ %inc.redux, %for.body ]
+ %add.i = add i128 %add.i7, -1
+ %kind_.i = getelementptr inbounds i32* %ptr, i128 %add.i
+ %tmp.i1 = load i32* %kind_.i, align 4
+ %inc.redux = add i32 %tmp.i1, %redux5
+ %inc4 = add i32 %i.06, 1
+ %exitcond = icmp ne i32 %inc4, 1024
+ br i1 %exitcond, label %for.body, label %loopend
+
+loopend:
+ ret i32 %inc.redux
+}
+
+; CHECK: reverse_induction_i16
+; CHECK: add <4 x i16> %[[SPLAT:.*]], <i16 0, i16 -1, i16 -2, i16 -3>
+; CHECK: add <4 x i16> %[[SPLAT]], <i16 -4, i16 -5, i16 -6, i16 -7>
+
+define i32 @reverse_induction_i16(i16 %startval, i32 * %ptr) {
+entry:
+ br label %for.body
+
+for.body:
+ %add.i7 = phi i16 [ %startval, %entry ], [ %add.i, %for.body ]
+ %i.06 = phi i32 [ 0, %entry ], [ %inc4, %for.body ]
+ %redux5 = phi i32 [ 0, %entry ], [ %inc.redux, %for.body ]
+ %add.i = add i16 %add.i7, -1
+ %kind_.i = getelementptr inbounds i32* %ptr, i16 %add.i
+ %tmp.i1 = load i32* %kind_.i, align 4
+ %inc.redux = add i32 %tmp.i1, %redux5
+ %inc4 = add i32 %i.06, 1
+ %exitcond = icmp ne i32 %inc4, 1024
+ br i1 %exitcond, label %for.body, label %loopend
+
+loopend:
+ ret i32 %inc.redux
+}
+
+
diff --git a/test/Transforms/LoopVectorize/runtime-check-readonly.ll b/test/Transforms/LoopVectorize/runtime-check-readonly.ll
new file mode 100644
index 0000000000000..4145d134fd70d
--- /dev/null
+++ b/test/Transforms/LoopVectorize/runtime-check-readonly.ll
@@ -0,0 +1,36 @@
+; RUN: opt < %s -loop-vectorize -force-vector-unroll=1 -force-vector-width=4 -dce -instcombine -S | FileCheck %s
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.8.0"
+
+;CHECK: add_ints
+;CHECK: br
+;CHECK: getelementptr
+;CHECK-NEXT: getelementptr
+;CHECK-NEXT: icmp uge
+;CHECK-NEXT: icmp uge
+;CHECK-NEXT: icmp uge
+;CHECK-NEXT: icmp uge
+;CHECK-NEXT: and
+;CHECK: ret
+define void @add_ints(i32* nocapture %A, i32* nocapture %B, i32* nocapture %C) {
+entry:
+ br label %for.body
+
+for.body:
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %arrayidx = getelementptr inbounds i32* %B, i64 %indvars.iv
+ %0 = load i32* %arrayidx, align 4
+ %arrayidx2 = getelementptr inbounds i32* %C, i64 %indvars.iv
+ %1 = load i32* %arrayidx2, align 4
+ %add = add nsw i32 %1, %0
+ %arrayidx4 = getelementptr inbounds i32* %A, i64 %indvars.iv
+ store i32 %add, i32* %arrayidx4, align 4
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, 200
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end:
+ ret void
+}
diff --git a/test/Transforms/LoopVectorize/runtime-check.ll b/test/Transforms/LoopVectorize/runtime-check.ll
index 86098a6e7db23..014c4fc48f877 100644
--- a/test/Transforms/LoopVectorize/runtime-check.ll
+++ b/test/Transforms/LoopVectorize/runtime-check.ll
@@ -22,10 +22,10 @@ entry:
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%arrayidx = getelementptr inbounds float* %b, i64 %indvars.iv
- %0 = load float* %arrayidx, align 4, !tbaa !0
+ %0 = load float* %arrayidx, align 4
%mul = fmul float %0, 3.000000e+00
%arrayidx2 = getelementptr inbounds float* %a, i64 %indvars.iv
- store float %mul, float* %arrayidx2, align 4, !tbaa !0
+ store float %mul, float* %arrayidx2, align 4
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
%exitcond = icmp eq i32 %lftr.wideiv, %n
@@ -34,7 +34,3 @@ for.body: ; preds = %entry, %for.body
for.end: ; preds = %for.body, %entry
ret i32 undef
}
-
-!0 = metadata !{metadata !"float", metadata !1}
-!1 = metadata !{metadata !"omnipotent char", metadata !2}
-!2 = metadata !{metadata !"Simple C/C++ TBAA"}
diff --git a/test/Transforms/LoopVectorize/runtime-limit.ll b/test/Transforms/LoopVectorize/runtime-limit.ll
new file mode 100644
index 0000000000000..d7839746f0e15
--- /dev/null
+++ b/test/Transforms/LoopVectorize/runtime-limit.ll
@@ -0,0 +1,84 @@
+; RUN: opt < %s -loop-vectorize -force-vector-unroll=1 -force-vector-width=4 -dce -instcombine -S | FileCheck %s
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.8.0"
+
+; We are vectorizing with 6 runtime checks.
+;CHECK: func1x6
+;CHECK: <4 x i32>
+;CHECK: ret
+define i32 @func1x6(i32* nocapture %out, i32* nocapture %A, i32* nocapture %B, i32* nocapture %C, i32* nocapture %D, i32* nocapture %E, i32* nocapture %F) {
+entry:
+ br label %for.body
+
+for.body: ; preds = %for.body, %entry
+ %i.016 = phi i64 [ 0, %entry ], [ %inc, %for.body ]
+ %arrayidx = getelementptr inbounds i32* %A, i64 %i.016
+ %0 = load i32* %arrayidx, align 4
+ %arrayidx1 = getelementptr inbounds i32* %B, i64 %i.016
+ %1 = load i32* %arrayidx1, align 4
+ %add = add nsw i32 %1, %0
+ %arrayidx2 = getelementptr inbounds i32* %C, i64 %i.016
+ %2 = load i32* %arrayidx2, align 4
+ %add3 = add nsw i32 %add, %2
+ %arrayidx4 = getelementptr inbounds i32* %E, i64 %i.016
+ %3 = load i32* %arrayidx4, align 4
+ %add5 = add nsw i32 %add3, %3
+ %arrayidx6 = getelementptr inbounds i32* %F, i64 %i.016
+ %4 = load i32* %arrayidx6, align 4
+ %add7 = add nsw i32 %add5, %4
+ %arrayidx8 = getelementptr inbounds i32* %out, i64 %i.016
+ store i32 %add7, i32* %arrayidx8, align 4
+ %inc = add i64 %i.016, 1
+ %exitcond = icmp eq i64 %inc, 256
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.body
+ ret i32 undef
+}
+
+; We are not vectorizing with 12 runtime checks.
+;CHECK: func2x6
+;CHECK-NOT: <4 x i32>
+;CHECK: ret
+define i32 @func2x6(i32* nocapture %out, i32* nocapture %out2, i32* nocapture %A, i32* nocapture %B, i32* nocapture %C, i32* nocapture %D, i32* nocapture %E, i32* nocapture %F) {
+entry:
+ br label %for.body
+
+for.body: ; preds = %for.body, %entry
+ %i.037 = phi i64 [ 0, %entry ], [ %inc, %for.body ]
+ %arrayidx = getelementptr inbounds i32* %A, i64 %i.037
+ %0 = load i32* %arrayidx, align 4
+ %arrayidx1 = getelementptr inbounds i32* %B, i64 %i.037
+ %1 = load i32* %arrayidx1, align 4
+ %add = add nsw i32 %1, %0
+ %arrayidx2 = getelementptr inbounds i32* %C, i64 %i.037
+ %2 = load i32* %arrayidx2, align 4
+ %add3 = add nsw i32 %add, %2
+ %arrayidx4 = getelementptr inbounds i32* %E, i64 %i.037
+ %3 = load i32* %arrayidx4, align 4
+ %add5 = add nsw i32 %add3, %3
+ %arrayidx6 = getelementptr inbounds i32* %F, i64 %i.037
+ %4 = load i32* %arrayidx6, align 4
+ %add7 = add nsw i32 %add5, %4
+ %arrayidx8 = getelementptr inbounds i32* %out, i64 %i.037
+ store i32 %add7, i32* %arrayidx8, align 4
+ %5 = load i32* %arrayidx, align 4
+ %6 = load i32* %arrayidx1, align 4
+ %add11 = add nsw i32 %6, %5
+ %7 = load i32* %arrayidx2, align 4
+ %add13 = add nsw i32 %add11, %7
+ %8 = load i32* %arrayidx4, align 4
+ %add15 = add nsw i32 %add13, %8
+ %9 = load i32* %arrayidx6, align 4
+ %add17 = add nsw i32 %add15, %9
+ %arrayidx18 = getelementptr inbounds i32* %out2, i64 %i.037
+ store i32 %add17, i32* %arrayidx18, align 4
+ %inc = add i64 %i.037, 1
+ %exitcond = icmp eq i64 %inc, 256
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.body
+ ret i32 undef
+}
+
diff --git a/test/Transforms/LoopVectorize/start-non-zero.ll b/test/Transforms/LoopVectorize/start-non-zero.ll
index 998001c3187b0..e8a089a981209 100644
--- a/test/Transforms/LoopVectorize/start-non-zero.ll
+++ b/test/Transforms/LoopVectorize/start-non-zero.ll
@@ -18,9 +18,9 @@ for.body.lr.ph: ; preds = %entry
for.body: ; preds = %for.body.lr.ph, %for.body
%indvars.iv = phi i64 [ %0, %for.body.lr.ph ], [ %indvars.iv.next, %for.body ]
%arrayidx = getelementptr inbounds i32* %a, i64 %indvars.iv
- %1 = load i32* %arrayidx, align 4, !tbaa !0
+ %1 = load i32* %arrayidx, align 4
%mul = mul nuw i32 %1, 333
- store i32 %mul, i32* %arrayidx, align 4, !tbaa !0
+ store i32 %mul, i32* %arrayidx, align 4
%indvars.iv.next = add i64 %indvars.iv, 1
%2 = trunc i64 %indvars.iv.next to i32
%cmp = icmp slt i32 %2, %end
@@ -29,7 +29,3 @@ for.body: ; preds = %for.body.lr.ph, %fo
for.end: ; preds = %for.body, %entry
ret i32 4
}
-
-!0 = metadata !{metadata !"int", metadata !1}
-!1 = metadata !{metadata !"omnipotent char", metadata !2}
-!2 = metadata !{metadata !"Simple C/C++ TBAA"}
diff --git a/test/Transforms/LoopVectorize/struct_access.ll b/test/Transforms/LoopVectorize/struct_access.ll
index de65d0d14870e..573480d77cdda 100644
--- a/test/Transforms/LoopVectorize/struct_access.ll
+++ b/test/Transforms/LoopVectorize/struct_access.ll
@@ -33,7 +33,7 @@ for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%sum.05 = phi i32 [ %add, %for.body ], [ 0, %entry ]
%x = getelementptr inbounds %struct.coordinate* %A, i64 %indvars.iv, i32 0
- %0 = load i32* %x, align 4, !tbaa !0
+ %0 = load i32* %x, align 4
%add = add nsw i32 %0, %sum.05
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
@@ -44,7 +44,3 @@ for.end: ; preds = %for.body, %entry
%sum.0.lcssa = phi i32 [ 0, %entry ], [ %add, %for.body ]
ret i32 %sum.0.lcssa
}
-
-!0 = metadata !{metadata !"int", metadata !1}
-!1 = metadata !{metadata !"omnipotent char", metadata !2}
-!2 = metadata !{metadata !"Simple C/C++ TBAA"}
diff --git a/test/Transforms/LoopVectorize/value-ptr-bug.ll b/test/Transforms/LoopVectorize/value-ptr-bug.ll
new file mode 100644
index 0000000000000..f376656f0754d
--- /dev/null
+++ b/test/Transforms/LoopVectorize/value-ptr-bug.ll
@@ -0,0 +1,50 @@
+; RUN: opt -S -loop-vectorize -force-vector-width=4 -force-vector-unroll=1 -dce -instcombine < %s | FileCheck %s
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+
+; PR16073
+
+; Because we were caching value pointers accross a function call that could RAUW
+; we would generate an undefined value store below:
+; SCEVExpander::expandCodeFor would change a value (the start value of an
+; induction) that we cached in the induction variable list.
+
+; CHECK: test_vh
+; CHECK-NOT: store <4 x i8> undef
+
+define void @test_vh(i32* %ptr265, i32* %ptr266, i32 %sub267) {
+entry:
+ br label %loop
+
+loop:
+ %inc = phi i32 [ %sub267, %entry ], [ %add, %loop]
+ %ext.inc = sext i32 %inc to i64
+ %add.ptr265 = getelementptr inbounds i32* %ptr265, i64 %ext.inc
+ %add.ptr266 = getelementptr inbounds i32* %ptr266, i64 %ext.inc
+ %add = add i32 %inc, 9
+ %cmp = icmp slt i32 %add, 140
+ br i1 %cmp, label %block1, label %loop
+
+block1:
+ %sub267.lcssa = phi i32 [ %add, %loop ]
+ %add.ptr266.lcssa = phi i32* [ %add.ptr266, %loop ]
+ %add.ptr265.lcssa = phi i32* [ %add.ptr265, %loop ]
+ %tmp29 = bitcast i32* %add.ptr265.lcssa to i8*
+ %tmp30 = bitcast i32* %add.ptr266.lcssa to i8*
+ br label %do.body272
+
+do.body272:
+ %row_width.5 = phi i32 [ %sub267.lcssa, %block1 ], [ %dec, %do.body272 ]
+ %sp.4 = phi i8* [ %tmp30, %block1 ], [ %incdec.ptr273, %do.body272 ]
+ %dp.addr.4 = phi i8* [ %tmp29, %block1 ], [ %incdec.ptr274, %do.body272 ]
+ %incdec.ptr273 = getelementptr inbounds i8* %sp.4, i64 1
+ %tmp31 = load i8* %sp.4, align 1
+ %incdec.ptr274 = getelementptr inbounds i8* %dp.addr.4, i64 1
+ store i8 %tmp31, i8* %dp.addr.4, align 1
+ %dec = add i32 %row_width.5, -1
+ %cmp276 = icmp eq i32 %dec, 0
+ br i1 %cmp276, label %loop.exit, label %do.body272
+
+loop.exit:
+ ret void
+}
diff --git a/test/Transforms/LoopVectorize/vectorize-once.ll b/test/Transforms/LoopVectorize/vectorize-once.ll
index ac1694802a32d..f289ded25de1a 100644
--- a/test/Transforms/LoopVectorize/vectorize-once.ll
+++ b/test/Transforms/LoopVectorize/vectorize-once.ll
@@ -29,7 +29,7 @@ entry:
for.body.i: ; preds = %entry, %for.body.i
%__init.addr.05.i = phi i32 [ %add.i, %for.body.i ], [ 0, %entry ]
%__first.addr.04.i = phi i32* [ %incdec.ptr.i, %for.body.i ], [ %A, %entry ]
- %0 = load i32* %__first.addr.04.i, align 4, !tbaa !0
+ %0 = load i32* %__first.addr.04.i, align 4
%add.i = add nsw i32 %0, %__init.addr.05.i
%incdec.ptr.i = getelementptr inbounds i32* %__first.addr.04.i, i64 1
%cmp.i = icmp eq i32* %incdec.ptr.i, %add.ptr
@@ -55,7 +55,7 @@ entry:
for.body.i: ; preds = %entry, %for.body.i
%__init.addr.05.i = phi i32 [ %add.i, %for.body.i ], [ 0, %entry ]
%__first.addr.04.i = phi i32* [ %incdec.ptr.i, %for.body.i ], [ %A, %entry ]
- %0 = load i32* %__first.addr.04.i, align 4, !tbaa !0
+ %0 = load i32* %__first.addr.04.i, align 4
%add.i = add nsw i32 %0, %__init.addr.05.i
%incdec.ptr.i = getelementptr inbounds i32* %__first.addr.04.i, i64 1
%cmp.i = icmp eq i32* %incdec.ptr.i, %add.ptr
@@ -68,8 +68,5 @@ _ZSt10accumulateIPiiET0_T_S2_S1_.exit: ; preds = %for.body.i, %entry
attributes #0 = { nounwind readonly ssp uwtable "fp-contract-model"="standard" "no-frame-pointer-elim" "no-frame-pointer-elim-non-leaf" "realign-stack" "relocation-model"="pic" "ssp-buffers-size"="8" }
-!0 = metadata !{metadata !"int", metadata !1}
-!1 = metadata !{metadata !"omnipotent char", metadata !2}
-!2 = metadata !{metadata !"Simple C/C++ TBAA"}
!3 = metadata !{}
diff --git a/test/Transforms/MergeFunc/crash.ll b/test/Transforms/MergeFunc/crash.ll
new file mode 100644
index 0000000000000..0897ba289337a
--- /dev/null
+++ b/test/Transforms/MergeFunc/crash.ll
@@ -0,0 +1,46 @@
+; RUN: opt -mergefunc -disable-output < %s
+; PR15185
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:32:32-n8:16:32-S128"
+target triple = "i386-pc-linux-gnu"
+
+%.qux.2496 = type { i32, %.qux.2497 }
+%.qux.2497 = type { i8, i32 }
+%.qux.2585 = type { i32, i32, i8* }
+
+@g2 = external unnamed_addr constant [9 x i8], align 1
+@g3 = internal hidden unnamed_addr constant [1 x i8*] [i8* bitcast (i8* (%.qux.2585*)* @func35 to i8*)]
+
+define internal hidden i32 @func1(i32* %ptr, { i32, i32 }* nocapture %method) align 2 {
+ br label %1
+
+; <label>:1
+ br label %2
+
+; <label>:2
+ ret i32 undef
+}
+
+define internal hidden i32 @func10(%.qux.2496* nocapture %this) align 2 {
+ %1 = getelementptr inbounds %.qux.2496* %this, i32 0, i32 1, i32 1
+ %2 = load i32* %1, align 4
+ ret i32 %2
+}
+
+define internal hidden i8* @func29(i32* nocapture %this) align 2 {
+ ret i8* getelementptr inbounds ([9 x i8]* @g2, i32 0, i32 0)
+}
+
+define internal hidden i32* @func33(%.qux.2585* nocapture %this) align 2 {
+ ret i32* undef
+}
+
+define internal hidden i32* @func34(%.qux.2585* nocapture %this) align 2 {
+ %1 = getelementptr inbounds %.qux.2585* %this, i32 0
+ ret i32* undef
+}
+
+define internal hidden i8* @func35(%.qux.2585* nocapture %this) align 2 {
+ %1 = getelementptr inbounds %.qux.2585* %this, i32 0, i32 2
+ %2 = load i8** %1, align 4
+ ret i8* %2
+}
diff --git a/test/Transforms/MergeFunc/inttoptr.ll b/test/Transforms/MergeFunc/inttoptr.ll
new file mode 100644
index 0000000000000..93250fa8ed1a7
--- /dev/null
+++ b/test/Transforms/MergeFunc/inttoptr.ll
@@ -0,0 +1,55 @@
+; RUN: opt -mergefunc -S < %s | FileCheck %s
+; PR15185
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:32:32-n8:16:32-S128"
+target triple = "i386-pc-linux-gnu"
+
+%.qux.2496 = type { i32, %.qux.2497 }
+%.qux.2497 = type { i8, i32 }
+%.qux.2585 = type { i32, i32, i8* }
+
+@g2 = external unnamed_addr constant [9 x i8], align 1
+@g3 = internal hidden unnamed_addr constant [1 x i8*] [i8* bitcast (i8* (%.qux.2585*)* @func35 to i8*)]
+
+define internal hidden i32 @func1(i32* %ptr, { i32, i32 }* nocapture %method) align 2 {
+bb:
+ br label %bb1
+
+bb1: ; preds = %bb
+ br label %bb2
+
+bb2: ; preds = %bb1
+ ret i32 undef
+}
+
+define internal hidden i32 @func10(%.qux.2496* nocapture %this) align 2 {
+bb:
+ %tmp = getelementptr inbounds %.qux.2496* %this, i32 0, i32 1, i32 1
+ %tmp1 = load i32* %tmp, align 4
+ ret i32 %tmp1
+}
+
+define internal hidden i8* @func29(i32* nocapture %this) align 2 {
+bb:
+ ret i8* getelementptr inbounds ([9 x i8]* @g2, i32 0, i32 0)
+}
+
+define internal hidden i32* @func33(%.qux.2585* nocapture %this) align 2 {
+bb:
+ ret i32* undef
+}
+
+define internal hidden i32* @func34(%.qux.2585* nocapture %this) align 2 {
+bb:
+ %tmp = getelementptr inbounds %.qux.2585* %this, i32 0
+ ret i32* undef
+}
+
+define internal hidden i8* @func35(%.qux.2585* nocapture %this) align 2 {
+bb:
+; CHECK: %[[V2:.+]] = bitcast %.qux.2585* %{{.*}} to %.qux.2496*
+; CHECK: %[[V3:.+]] = tail call i32 @func10(%.qux.2496* %[[V2]])
+; CHECK: %{{.*}} = inttoptr i32 %[[V3]] to i8*
+ %tmp = getelementptr inbounds %.qux.2585* %this, i32 0, i32 2
+ %tmp1 = load i8** %tmp, align 4
+ ret i8* %tmp1
+}
diff --git a/test/Transforms/MergeFunc/vector.ll b/test/Transforms/MergeFunc/vector.ll
index dba5fa349aba8..56f74e65c60de 100644
--- a/test/Transforms/MergeFunc/vector.ll
+++ b/test/Transforms/MergeFunc/vector.ll
@@ -22,7 +22,7 @@ target triple = "x86_64-unknown-linux-gnu"
define linkonce_odr void @_ZNSt6vectorIlSaIlEED1Ev(%"class.std::vector"* nocapture %this) unnamed_addr align 2 {
entry:
%tmp2.i.i = bitcast %"class.std::vector"* %this to i64**
- %tmp3.i.i = load i64** %tmp2.i.i, align 8, !tbaa !0
+ %tmp3.i.i = load i64** %tmp2.i.i, align 8
%tobool.i.i.i = icmp eq i64* %tmp3.i.i, null
br i1 %tobool.i.i.i, label %_ZNSt6vectorIlSaIlEED2Ev.exit, label %if.then.i.i.i
@@ -40,7 +40,7 @@ declare i32 @__cxa_atexit(void (i8*)*, i8*, i8*)
define linkonce_odr void @_ZNSt6vectorIPvSaIS0_EED1Ev(%"class.std::vector"* nocapture %this) unnamed_addr align 2 {
entry:
%tmp2.i.i = bitcast %"class.std::vector"* %this to i8***
- %tmp3.i.i = load i8*** %tmp2.i.i, align 8, !tbaa !0
+ %tmp3.i.i = load i8*** %tmp2.i.i, align 8
%tobool.i.i.i = icmp eq i8** %tmp3.i.i, null
br i1 %tobool.i.i.i, label %_ZNSt6vectorIPvSaIS0_EED2Ev.exit, label %if.then.i.i.i
@@ -70,8 +70,3 @@ declare void @_ZNSt6vectorIlSaIlEE13_M_insert_auxEN9__gnu_cxx17__normal_iterator
declare void @_GLOBAL__I_a()
declare %1 @llvm.uadd.with.overflow.i64(i64, i64) nounwind readnone
-
-!0 = metadata !{metadata !"any pointer", metadata !1}
-!1 = metadata !{metadata !"omnipotent char", metadata !2}
-!2 = metadata !{metadata !"Simple C/C++ TBAA", null}
-!3 = metadata !{metadata !"long", metadata !1}
diff --git a/test/Transforms/ObjCARC/apelim.ll b/test/Transforms/ObjCARC/apelim.ll
index 4541b3f2fdf35..14412c6fc9c79 100644
--- a/test/Transforms/ObjCARC/apelim.ll
+++ b/test/Transforms/ObjCARC/apelim.ll
@@ -26,7 +26,7 @@ entry:
ret void
}
-; CHECK: define internal void @_GLOBAL__I_x()
+; CHECK: define internal void @_GLOBAL__I_x() {
; CHECK-NOT: @objc
; CHECK: }
define internal void @_GLOBAL__I_x() {
@@ -37,7 +37,7 @@ entry:
ret void
}
-; CHECK: define internal void @_GLOBAL__I_y()
+; CHECK: define internal void @_GLOBAL__I_y() {
; CHECK: %0 = call i8* @objc_autoreleasePoolPush() [[NUW:#[0-9]+]]
; CHECK: call void @objc_autoreleasePoolPop(i8* %0) [[NUW]]
; CHECK: }
diff --git a/test/Transforms/ObjCARC/arc-annotations.ll b/test/Transforms/ObjCARC/arc-annotations.ll
index 4c56b4a3def93..c0dea4b1b6a07 100644
--- a/test/Transforms/ObjCARC/arc-annotations.ll
+++ b/test/Transforms/ObjCARC/arc-annotations.ll
@@ -30,25 +30,25 @@ declare i8* @returner()
; CHECK: define void @test0(
; CHECK: entry:
; CHECK: call void @llvm.arc.annotation.bottomup.bbstart(i8** @x, i8** @S_None)
-; CHECK: %0 = tail call i8* @objc_retain(i8* %a) #0, !llvm.arc.annotation.bottomup !0, !llvm.arc.annotation.topdown !1
+; CHECK: %0 = tail call i8* @objc_retain(i8* %a) #0, !llvm.arc.annotation.bottomup ![[ANN0:[0-9]+]], !llvm.arc.annotation.topdown ![[ANN1:[0-9]+]]
; CHECK: call void @llvm.arc.annotation.bottomup.bbend(i8** @x, i8** @S_Use)
; CHECK: call void @llvm.arc.annotation.topdown.bbend(i8** @x, i8** @S_Retain)
; CHECK: t:
; CHECK: call void @llvm.arc.annotation.topdown.bbstart(i8** @x, i8** @S_Retain)
; CHECK: call void @llvm.arc.annotation.bottomup.bbstart(i8** @x, i8** @S_Use)
-; CHECK: store float 2.000000e+00, float* %b, !llvm.arc.annotation.bottomup !2
+; CHECK: store float 2.000000e+00, float* %b, !llvm.arc.annotation.bottomup ![[ANN2:[0-9]+]]
; CHECK: call void @llvm.arc.annotation.bottomup.bbend(i8** @x, i8** @S_Release)
; CHECK: call void @llvm.arc.annotation.topdown.bbend(i8** @x, i8** @S_Retain)
; CHECK: f:
; CHECK: call void @llvm.arc.annotation.topdown.bbstart(i8** @x, i8** @S_Retain)
; CHECK: call void @llvm.arc.annotation.bottomup.bbstart(i8** @x, i8** @S_Use)
-; CHECK: store i32 7, i32* %x, !llvm.arc.annotation.bottomup !2
+; CHECK: store i32 7, i32* %x, !llvm.arc.annotation.bottomup ![[ANN2]]
; CHECK: call void @llvm.arc.annotation.bottomup.bbend(i8** @x, i8** @S_Release)
; CHECK: call void @llvm.arc.annotation.topdown.bbend(i8** @x, i8** @S_Retain)
; CHECK: return:
; CHECK: call void @llvm.arc.annotation.topdown.bbstart(i8** @x, i8** @S_Retain)
; CHECK: call void @llvm.arc.annotation.bottomup.bbstart(i8** @x, i8** @S_Release)
-; CHECK: call void @objc_release(i8* %c) #0, !llvm.arc.annotation.bottomup !3, !llvm.arc.annotation.topdown !4
+; CHECK: call void @objc_release(i8* %c) #0, !llvm.arc.annotation.bottomup ![[ANN3:[0-9]+]], !llvm.arc.annotation.topdown ![[ANN4:[0-9]+]]
; CHECK: call void @llvm.arc.annotation.topdown.bbend(i8** @x, i8** @S_None)
; CHECK: }
define void @test0(i32* %x, i1 %p) nounwind {
@@ -73,235 +73,11 @@ return:
ret void
}
-; Like test0 but the release isn't always executed when the retain is,
-; so the optimization is not safe.
-
-; TODO: Make the objc_release's argument be %0.
-
-; CHECK: define void @test1(
-; CHECK: entry:
-; CHECK: call void @llvm.arc.annotation.bottomup.bbstart(i8** @x, i8** @S_None)
-; CHECK: %0 = tail call i8* @objc_retain(i8* %a) #0, !llvm.arc.annotation.bottomup !5, !llvm.arc.annotation.topdown !6
-; CHECK: call void @llvm.arc.annotation.bottomup.bbend(i8** @x, i8** @S_None)
-; CHECK: call void @llvm.arc.annotation.topdown.bbend(i8** @x, i8** @S_Retain)
-; CHECK: t:
-; CHECK: call void @llvm.arc.annotation.topdown.bbstart(i8** @x, i8** @S_Retain)
-; CHECK: call void @llvm.arc.annotation.bottomup.bbstart(i8** @x, i8** @S_Use)
-; CHECK: store float 2.000000e+00, float* %b, !llvm.arc.annotation.bottomup !7
-; CHECK: call void @llvm.arc.annotation.bottomup.bbend(i8** @x, i8** @S_Release)
-; CHECK: call void @llvm.arc.annotation.topdown.bbend(i8** @x, i8** @S_Retain)
-; CHECK: f:
-; CHECK: call void @llvm.arc.annotation.topdown.bbstart(i8** @x, i8** @S_Retain)
-; CHECK: call void @llvm.arc.annotation.bottomup.bbstart(i8** @x, i8** @S_None)
-; CHECK: call void @callee(), !llvm.arc.annotation.topdown !8
-; CHECK: call void @llvm.arc.annotation.bottomup.bbend(i8** @x, i8** @S_None)
-; CHECK: call void @llvm.arc.annotation.topdown.bbend(i8** @x, i8** @S_CanRelease)
-; CHECK: return:
-; CHECK: call void @llvm.arc.annotation.topdown.bbstart(i8** @x, i8** @S_None)
-; CHECK: call void @llvm.arc.annotation.bottomup.bbstart(i8** @x, i8** @S_Release)
-; CHECK: call void @objc_release(i8* %c) #0, !llvm.arc.annotation.bottomup !9
-; CHECK: call void @llvm.arc.annotation.topdown.bbend(i8** @x, i8** @S_None)
-; CHECK: alt_return:
-; CHECK: call void @llvm.arc.annotation.topdown.bbstart(i8** @x, i8** @S_None)
-; CHECK: call void @llvm.arc.annotation.topdown.bbend(i8** @x, i8** @S_None)
-; CHECK: }
-define void @test1(i32* %x, i1 %p, i1 %q) nounwind {
-entry:
- %a = bitcast i32* %x to i8*
- %0 = call i8* @objc_retain(i8* %a) nounwind
- br i1 %p, label %t, label %f
-
-t:
- store i8 3, i8* %a
- %b = bitcast i32* %x to float*
- store float 2.0, float* %b
- br label %return
-
-f:
- store i32 7, i32* %x
- call void @callee()
- br i1 %q, label %return, label %alt_return
-
-return:
- %c = bitcast i32* %x to i8*
- call void @objc_release(i8* %c) nounwind
- ret void
-
-alt_return:
- ret void
-}
-
-; Don't do partial elimination into two different CFG diamonds.
-
-; CHECK: define void @test1b(
-; CHECK: entry:
-; CHECK: call void @llvm.arc.annotation.bottomup.bbstart(i8** @x, i8** @S_None)
-; CHECK: %0 = tail call i8* @objc_retain(i8* %x) #0, !llvm.arc.annotation.bottomup !10, !llvm.arc.annotation.topdown !11
-; CHECK: call void @llvm.arc.annotation.bottomup.bbend(i8** @x, i8** @S_None)
-; CHECK: call void @llvm.arc.annotation.topdown.bbend(i8** @x, i8** @S_Retain)
-; CHECK: if.then:
-; CHECK: call void @llvm.arc.annotation.topdown.bbstart(i8** @x, i8** @S_Retain)
-; CHECK: call void @llvm.arc.annotation.bottomup.bbstart(i8** @x, i8** @S_CanRelease)
-; CHECK: tail call void @callee(), !llvm.arc.annotation.bottomup !12, !llvm.arc.annotation.topdown !13
-; CHECK: call void @llvm.arc.annotation.bottomup.bbend(i8** @x, i8** @S_Use)
-; CHECK: call void @llvm.arc.annotation.topdown.bbend(i8** @x, i8** @S_CanRelease)
-; CHECK: if.end:
-; CHECK: call void @llvm.arc.annotation.topdown.bbstart(i8** @x, i8** @S_CanRelease)
-; CHECK: call void @llvm.arc.annotation.bottomup.bbstart(i8** @x, i8** @S_Use)
-; CHECK: call void @llvm.arc.annotation.bottomup.bbend(i8** @x, i8** @S_Use)
-; CHECK: call void @llvm.arc.annotation.topdown.bbend(i8** @x, i8** @S_CanRelease)
-; CHECK: if.then3:
-; CHECK: call void @llvm.arc.annotation.topdown.bbstart(i8** @x, i8** @S_CanRelease)
-; CHECK: call void @llvm.arc.annotation.bottomup.bbstart(i8** @x, i8** @S_Use)
-; CHECK: tail call void @use_pointer(i8* %x), !llvm.arc.annotation.bottomup !14, !llvm.arc.annotation.topdown !15
-; CHECK: call void @llvm.arc.annotation.bottomup.bbend(i8** @x, i8** @S_MovableRelease)
-; CHECK: call void @llvm.arc.annotation.topdown.bbend(i8** @x, i8** @S_Use)
-; CHECK: if.end5:
-; CHECK: call void @llvm.arc.annotation.topdown.bbstart(i8** @x, i8** @S_None)
-; CHECK: call void @llvm.arc.annotation.bottomup.bbstart(i8** @x, i8** @S_MovableRelease)
-; CHECK: tail call void @objc_release(i8* %x) #0, !clang.imprecise_release !16, !llvm.arc.annotation.bottomup !17
-; CHECK: call void @llvm.arc.annotation.topdown.bbend(i8** @x, i8** @S_None)
-; CHECK: }
-define void @test1b(i8* %x, i1 %p, i1 %q) {
-entry:
- tail call i8* @objc_retain(i8* %x) nounwind
- br i1 %p, label %if.then, label %if.end
-
-if.then: ; preds = %entry
- tail call void @callee()
- br label %if.end
-
-if.end: ; preds = %if.then, %entry
- br i1 %q, label %if.then3, label %if.end5
-
-if.then3: ; preds = %if.end
- tail call void @use_pointer(i8* %x)
- br label %if.end5
-
-if.end5: ; preds = %if.then3, %if.end
- tail call void @objc_release(i8* %x) nounwind, !clang.imprecise_release !0
- ret void
-}
-
-; Like test0 but the pointer is passed to an intervening call,
-; so the optimization is not safe.
-
-; CHECK: define void @test2(
-; CHECK: entry:
-; CHECK: call void @llvm.arc.annotation.bottomup.bbstart(i8** @x, i8** @S_None)
-; CHECK: %e = tail call i8* @objc_retain(i8* %a) #0, !llvm.arc.annotation.bottomup !18, !llvm.arc.annotation.topdown !19
-; CHECK: call void @llvm.arc.annotation.bottomup.bbend(i8** @x, i8** @S_CanRelease)
-; CHECK: call void @llvm.arc.annotation.topdown.bbend(i8** @x, i8** @S_Retain)
-; CHECK: t:
-; CHECK: call void @llvm.arc.annotation.topdown.bbstart(i8** @x, i8** @S_Retain)
-; CHECK: call void @llvm.arc.annotation.bottomup.bbstart(i8** @x, i8** @S_Use)
-; CHECK: store float 2.000000e+00, float* %b, !llvm.arc.annotation.bottomup !20
-; CHECK: call void @llvm.arc.annotation.bottomup.bbend(i8** @x, i8** @S_Release)
-; CHECK: call void @llvm.arc.annotation.topdown.bbend(i8** @x, i8** @S_Retain)
-; CHECK: f:
-; CHECK: call void @llvm.arc.annotation.topdown.bbstart(i8** @x, i8** @S_Retain)
-; CHECK: call void @llvm.arc.annotation.bottomup.bbstart(i8** @x, i8** @S_CanRelease)
-; CHECK: call void @use_pointer(i8* %e), !llvm.arc.annotation.bottomup !21, !llvm.arc.annotation.topdown !22
-; CHECK: store float 3.000000e+00, float* %d, !llvm.arc.annotation.bottomup !20, !llvm.arc.annotation.topdown !23
-; CHECK: call void @llvm.arc.annotation.bottomup.bbend(i8** @x, i8** @S_Release)
-; CHECK: call void @llvm.arc.annotation.topdown.bbend(i8** @x, i8** @S_Use)
-; CHECK: return:
-; CHECK: call void @llvm.arc.annotation.topdown.bbstart(i8** @x, i8** @S_Use)
-; CHECK: call void @llvm.arc.annotation.bottomup.bbstart(i8** @x, i8** @S_Release)
-; CHECK: call void @objc_release(i8* %c) #0, !llvm.arc.annotation.bottomup !24, !llvm.arc.annotation.topdown !25
-; CHECK: call void @llvm.arc.annotation.topdown.bbend(i8** @x, i8** @S_None)
-; CHECK: }
-define void @test2(i32* %x, i1 %p) nounwind {
-entry:
- %a = bitcast i32* %x to i8*
- %e = call i8* @objc_retain(i8* %a) nounwind
- br i1 %p, label %t, label %f
-
-t:
- store i8 3, i8* %a
- %b = bitcast i32* %x to float*
- store float 2.0, float* %b
- br label %return
-
-f:
- store i32 7, i32* %x
- call void @use_pointer(i8* %e)
- %d = bitcast i32* %x to float*
- store float 3.0, float* %d
- br label %return
-
-return:
- %c = bitcast i32* %x to i8*
- call void @objc_release(i8* %c) nounwind
- ret void
-}
-
-; Like test0 but the release is in a loop,
-; so the optimization is not safe.
-
-; TODO: For now, assume this can't happen.
-
-; CHECK: define void @test3(
-; CHECK: entry:
-; CHECK: call void @llvm.arc.annotation.bottomup.bbstart(i8** @x, i8** @S_None)
-; CHECK: tail call i8* @objc_retain(i8* %a) #0, !llvm.arc.annotation.bottomup !26, !llvm.arc.annotation.topdown !27
-; CHECK: call void @llvm.arc.annotation.bottomup.bbend(i8** @x, i8** @S_Release)
-; CHECK: call void @llvm.arc.annotation.topdown.bbend(i8** @x, i8** @S_Retain)
-; CHECK: loop:
-; CHECK: call void @llvm.arc.annotation.topdown.bbstart(i8** @x, i8** @S_Retain)
-; CHECK: call void @llvm.arc.annotation.bottomup.bbstart(i8** @x, i8** @S_Release)
-; CHECK: call void @objc_release(i8* %c) #0, !llvm.arc.annotation.bottomup !28, !llvm.arc.annotation.topdown !29
-; CHECK: call void @llvm.arc.annotation.topdown.bbend(i8** @x, i8** @S_None)
-; CHECK: return:
-; CHECK: call void @llvm.arc.annotation.topdown.bbstart(i8** @x, i8** @S_None)
-; CHECK: call void @llvm.arc.annotation.topdown.bbend(i8** @x, i8** @S_None)
-; CHECK: }
-define void @test3(i32* %x, i1* %q) nounwind {
-entry:
- %a = bitcast i32* %x to i8*
- %0 = call i8* @objc_retain(i8* %a) nounwind
- br label %loop
-
-loop:
- %c = bitcast i32* %x to i8*
- call void @objc_release(i8* %c) nounwind
- %j = load volatile i1* %q
- br i1 %j, label %loop, label %return
-
-return:
- ret void
-}
-
!0 = metadata !{}
-; CHECK: !0 = metadata !{metadata !"(test0,%x)", metadata !"S_Use", metadata !"S_None"}
-; CHECK: !1 = metadata !{metadata !"(test0,%x)", metadata !"S_None", metadata !"S_Retain"}
-; CHECK: !2 = metadata !{metadata !"(test0,%x)", metadata !"S_Release", metadata !"S_Use"}
-; CHECK: !3 = metadata !{metadata !"(test0,%x)", metadata !"S_None", metadata !"S_Release"}
-; CHECK: !4 = metadata !{metadata !"(test0,%x)", metadata !"S_Retain", metadata !"S_None"}
-; CHECK: !5 = metadata !{metadata !"(test1,%x)", metadata !"S_None", metadata !"S_None"}
-; CHECK: !6 = metadata !{metadata !"(test1,%x)", metadata !"S_None", metadata !"S_Retain"}
-; CHECK: !7 = metadata !{metadata !"(test1,%x)", metadata !"S_Release", metadata !"S_Use"}
-; CHECK: !8 = metadata !{metadata !"(test1,%x)", metadata !"S_Retain", metadata !"S_CanRelease"}
-; CHECK: !9 = metadata !{metadata !"(test1,%x)", metadata !"S_None", metadata !"S_Release"}
-; CHECK: !10 = metadata !{metadata !"(test1b,%x)", metadata !"S_None", metadata !"S_None"}
-; CHECK: !11 = metadata !{metadata !"(test1b,%x)", metadata !"S_None", metadata !"S_Retain"}
-; CHECK: !12 = metadata !{metadata !"(test1b,%x)", metadata !"S_Use", metadata !"S_CanRelease"}
-; CHECK: !13 = metadata !{metadata !"(test1b,%x)", metadata !"S_Retain", metadata !"S_CanRelease"}
-; CHECK: !14 = metadata !{metadata !"(test1b,%x)", metadata !"S_MovableRelease", metadata !"S_Use"}
-; CHECK: !15 = metadata !{metadata !"(test1b,%x)", metadata !"S_CanRelease", metadata !"S_Use"}
-; CHECK: !16 = metadata !{}
-; CHECK: !17 = metadata !{metadata !"(test1b,%x)", metadata !"S_None", metadata !"S_MovableRelease"}
-; CHECK: !18 = metadata !{metadata !"(test2,%x)", metadata !"S_CanRelease", metadata !"S_None"}
-; CHECK: !19 = metadata !{metadata !"(test2,%x)", metadata !"S_None", metadata !"S_Retain"}
-; CHECK: !20 = metadata !{metadata !"(test2,%x)", metadata !"S_Release", metadata !"S_Use"}
-; CHECK: !21 = metadata !{metadata !"(test2,%x)", metadata !"S_Use", metadata !"S_CanRelease"}
-; CHECK: !22 = metadata !{metadata !"(test2,%x)", metadata !"S_Retain", metadata !"S_CanRelease"}
-; CHECK: !23 = metadata !{metadata !"(test2,%x)", metadata !"S_CanRelease", metadata !"S_Use"}
-; CHECK: !24 = metadata !{metadata !"(test2,%x)", metadata !"S_None", metadata !"S_Release"}
-; CHECK: !25 = metadata !{metadata !"(test2,%x)", metadata !"S_Use", metadata !"S_None"}
-; CHECK: !26 = metadata !{metadata !"(test3,%x)", metadata !"S_Release", metadata !"S_None"}
-; CHECK: !27 = metadata !{metadata !"(test3,%x)", metadata !"S_None", metadata !"S_Retain"}
-; CHECK: !28 = metadata !{metadata !"(test3,%x)", metadata !"S_None", metadata !"S_Release"}
-; CHECK: !29 = metadata !{metadata !"(test3,%x)", metadata !"S_Retain", metadata !"S_None"}
+; CHECK: ![[ANN0]] = metadata !{metadata !"(test0,%x)", metadata !"S_Use", metadata !"S_None"}
+; CHECK: ![[ANN1]] = metadata !{metadata !"(test0,%x)", metadata !"S_None", metadata !"S_Retain"}
+; CHECK: ![[ANN2]] = metadata !{metadata !"(test0,%x)", metadata !"S_Release", metadata !"S_Use"}
+; CHECK: ![[ANN3]] = metadata !{metadata !"(test0,%x)", metadata !"S_None", metadata !"S_Release"}
+; CHECK: ![[ANN4]] = metadata !{metadata !"(test0,%x)", metadata !"S_Retain", metadata !"S_None"}
diff --git a/test/Transforms/ObjCARC/basic.ll b/test/Transforms/ObjCARC/basic.ll
index 828a8a701127c..ca1279206591d 100644
--- a/test/Transforms/ObjCARC/basic.ll
+++ b/test/Transforms/ObjCARC/basic.ll
@@ -20,6 +20,7 @@ declare void @callee()
declare void @callee_fnptr(void ()*)
declare void @invokee()
declare i8* @returner()
+declare void @bar(i32 ()*)
declare void @llvm.dbg.value(metadata, i64, metadata)
@@ -28,10 +29,11 @@ declare i8* @objc_msgSend(i8*, i8*, ...)
; Simple retain+release pair deletion, with some intervening control
; flow and harmless instructions.
-; CHECK: define void @test0(
-; CHECK-NOT: @objc_
+; CHECK: define void @test0_precise(i32* %x, i1 %p) [[NUW:#[0-9]+]] {
+; CHECK: @objc_retain
+; CHECK: @objc_release
; CHECK: }
-define void @test0(i32* %x, i1 %p) nounwind {
+define void @test0_precise(i32* %x, i1 %p) nounwind {
entry:
%a = bitcast i32* %x to i8*
%0 = call i8* @objc_retain(i8* %a) nounwind
@@ -53,16 +55,41 @@ return:
ret void
}
+; CHECK: define void @test0_imprecise(i32* %x, i1 %p) [[NUW]] {
+; CHECK-NOT: @objc_
+; CHECK: }
+define void @test0_imprecise(i32* %x, i1 %p) nounwind {
+entry:
+ %a = bitcast i32* %x to i8*
+ %0 = call i8* @objc_retain(i8* %a) nounwind
+ br i1 %p, label %t, label %f
+
+t:
+ store i8 3, i8* %a
+ %b = bitcast i32* %x to float*
+ store float 2.0, float* %b
+ br label %return
+
+f:
+ store i32 7, i32* %x
+ br label %return
+
+return:
+ %c = bitcast i32* %x to i8*
+ call void @objc_release(i8* %c) nounwind, !clang.imprecise_release !0
+ ret void
+}
+
; Like test0 but the release isn't always executed when the retain is,
; so the optimization is not safe.
; TODO: Make the objc_release's argument be %0.
-; CHECK: define void @test1(
+; CHECK: define void @test1_precise(i32* %x, i1 %p, i1 %q) [[NUW]] {
; CHECK: @objc_retain(i8* %a)
; CHECK: @objc_release
; CHECK: }
-define void @test1(i32* %x, i1 %p, i1 %q) nounwind {
+define void @test1_precise(i32* %x, i1 %p, i1 %q) nounwind {
entry:
%a = bitcast i32* %x to i8*
%0 = call i8* @objc_retain(i8* %a) nounwind
@@ -88,9 +115,69 @@ alt_return:
ret void
}
+; CHECK: define void @test1_imprecise(i32* %x, i1 %p, i1 %q) [[NUW]] {
+; CHECK: @objc_retain(i8* %a)
+; CHECK: @objc_release
+; CHECK: }
+define void @test1_imprecise(i32* %x, i1 %p, i1 %q) nounwind {
+entry:
+ %a = bitcast i32* %x to i8*
+ %0 = call i8* @objc_retain(i8* %a) nounwind
+ br i1 %p, label %t, label %f
+
+t:
+ store i8 3, i8* %a
+ %b = bitcast i32* %x to float*
+ store float 2.0, float* %b
+ br label %return
+
+f:
+ store i32 7, i32* %x
+ call void @callee()
+ br i1 %q, label %return, label %alt_return
+
+return:
+ %c = bitcast i32* %x to i8*
+ call void @objc_release(i8* %c) nounwind, !clang.imprecise_release !0
+ ret void
+
+alt_return:
+ ret void
+}
+
+
; Don't do partial elimination into two different CFG diamonds.
-; CHECK: define void @test1b(
+; CHECK: define void @test1b_precise(i8* %x, i1 %p, i1 %q) {
+; CHECK: entry:
+; CHECK: tail call i8* @objc_retain(i8* %x) [[NUW]]
+; CHECK-NOT: @objc_
+; CHECK: if.end5:
+; CHECK: tail call void @objc_release(i8* %x) [[NUW]]
+; CHECK-NOT: @objc_
+; CHECK: }
+define void @test1b_precise(i8* %x, i1 %p, i1 %q) {
+entry:
+ tail call i8* @objc_retain(i8* %x) nounwind
+ br i1 %p, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ tail call void @callee()
+ br label %if.end
+
+if.end: ; preds = %if.then, %entry
+ br i1 %q, label %if.then3, label %if.end5
+
+if.then3: ; preds = %if.end
+ tail call void @use_pointer(i8* %x)
+ br label %if.end5
+
+if.end5: ; preds = %if.then3, %if.end
+ tail call void @objc_release(i8* %x) nounwind
+ ret void
+}
+
+; CHECK: define void @test1b_imprecise(
; CHECK: entry:
; CHECK: tail call i8* @objc_retain(i8* %x) [[NUW:#[0-9]+]]
; CHECK-NOT: @objc_
@@ -98,7 +185,7 @@ alt_return:
; CHECK: tail call void @objc_release(i8* %x) [[NUW]], !clang.imprecise_release !0
; CHECK-NOT: @objc_
; CHECK: }
-define void @test1b(i8* %x, i1 %p, i1 %q) {
+define void @test1b_imprecise(i8* %x, i1 %p, i1 %q) {
entry:
tail call i8* @objc_retain(i8* %x) nounwind
br i1 %p, label %if.then, label %if.end
@@ -119,14 +206,15 @@ if.end5: ; preds = %if.then3, %if.end
ret void
}
+
; Like test0 but the pointer is passed to an intervening call,
; so the optimization is not safe.
-; CHECK: define void @test2(
+; CHECK: define void @test2_precise(
; CHECK: @objc_retain(i8* %a)
; CHECK: @objc_release
; CHECK: }
-define void @test2(i32* %x, i1 %p) nounwind {
+define void @test2_precise(i32* %x, i1 %p) nounwind {
entry:
%a = bitcast i32* %x to i8*
%0 = call i8* @objc_retain(i8* %a) nounwind
@@ -151,16 +239,45 @@ return:
ret void
}
+; CHECK: define void @test2_imprecise(
+; CHECK: @objc_retain(i8* %a)
+; CHECK: @objc_release
+; CHECK: }
+define void @test2_imprecise(i32* %x, i1 %p) nounwind {
+entry:
+ %a = bitcast i32* %x to i8*
+ %0 = call i8* @objc_retain(i8* %a) nounwind
+ br i1 %p, label %t, label %f
+
+t:
+ store i8 3, i8* %a
+ %b = bitcast i32* %x to float*
+ store float 2.0, float* %b
+ br label %return
+
+f:
+ store i32 7, i32* %x
+ call void @use_pointer(i8* %0)
+ %d = bitcast i32* %x to float*
+ store float 3.0, float* %d
+ br label %return
+
+return:
+ %c = bitcast i32* %x to i8*
+ call void @objc_release(i8* %c) nounwind, !clang.imprecise_release !0
+ ret void
+}
+
; Like test0 but the release is in a loop,
; so the optimization is not safe.
; TODO: For now, assume this can't happen.
-; CHECK: define void @test3(
+; CHECK: define void @test3_precise(
; TODO: @objc_retain(i8* %a)
; TODO: @objc_release
; CHECK: }
-define void @test3(i32* %x, i1* %q) nounwind {
+define void @test3_precise(i32* %x, i1* %q) nounwind {
entry:
%a = bitcast i32* %x to i8*
%0 = call i8* @objc_retain(i8* %a) nounwind
@@ -176,16 +293,37 @@ return:
ret void
}
+; CHECK: define void @test3_imprecise(
+; TODO: @objc_retain(i8* %a)
+; TODO: @objc_release
+; CHECK: }
+define void @test3_imprecise(i32* %x, i1* %q) nounwind {
+entry:
+ %a = bitcast i32* %x to i8*
+ %0 = call i8* @objc_retain(i8* %a) nounwind
+ br label %loop
+
+loop:
+ %c = bitcast i32* %x to i8*
+ call void @objc_release(i8* %c) nounwind, !clang.imprecise_release !0
+ %j = load volatile i1* %q
+ br i1 %j, label %loop, label %return
+
+return:
+ ret void
+}
+
+
; TODO: For now, assume this can't happen.
; Like test0 but the retain is in a loop,
; so the optimization is not safe.
-; CHECK: define void @test4(
+; CHECK: define void @test4_precise(
; TODO: @objc_retain(i8* %a)
; TODO: @objc_release
; CHECK: }
-define void @test4(i32* %x, i1* %q) nounwind {
+define void @test4_precise(i32* %x, i1* %q) nounwind {
entry:
br label %loop
@@ -201,14 +339,35 @@ return:
ret void
}
+; CHECK: define void @test4_imprecise(
+; TODO: @objc_retain(i8* %a)
+; TODO: @objc_release
+; CHECK: }
+define void @test4_imprecise(i32* %x, i1* %q) nounwind {
+entry:
+ br label %loop
+
+loop:
+ %a = bitcast i32* %x to i8*
+ %0 = call i8* @objc_retain(i8* %a) nounwind
+ %j = load volatile i1* %q
+ br i1 %j, label %loop, label %return
+
+return:
+ %c = bitcast i32* %x to i8*
+ call void @objc_release(i8* %c) nounwind, !clang.imprecise_release !0
+ ret void
+}
+
+
; Like test0 but the pointer is conditionally passed to an intervening call,
; so the optimization is not safe.
-; CHECK: define void @test5(
+; CHECK: define void @test5a(
; CHECK: @objc_retain(i8*
; CHECK: @objc_release
; CHECK: }
-define void @test5(i32* %x, i1 %q, i8* %y) nounwind {
+define void @test5a(i32* %x, i1 %q, i8* %y) nounwind {
entry:
%a = bitcast i32* %x to i8*
%0 = call i8* @objc_retain(i8* %a) nounwind
@@ -220,13 +379,98 @@ entry:
ret void
}
+; CHECK: define void @test5b(
+; CHECK: @objc_retain(i8*
+; CHECK: @objc_release
+; CHECK: }
+define void @test5b(i32* %x, i1 %q, i8* %y) nounwind {
+entry:
+ %a = bitcast i32* %x to i8*
+ %0 = call i8* @objc_retain(i8* %a) nounwind
+ %s = select i1 %q, i8* %y, i8* %0
+ call void @use_pointer(i8* %s)
+ store i32 7, i32* %x
+ %c = bitcast i32* %x to i8*
+ call void @objc_release(i8* %c) nounwind, !clang.imprecise_release !0
+ ret void
+}
+
+
; retain+release pair deletion, where the release happens on two different
; flow paths.
-; CHECK: define void @test6(
+; CHECK: define void @test6a(
+; CHECK: entry:
+; CHECK: tail call i8* @objc_retain(
+; CHECK: t:
+; CHECK: call void @objc_release(
+; CHECK: f:
+; CHECK: call void @objc_release(
+; CHECK: return:
+; CHECK: }
+define void @test6a(i32* %x, i1 %p) nounwind {
+entry:
+ %a = bitcast i32* %x to i8*
+ %0 = call i8* @objc_retain(i8* %a) nounwind
+ br i1 %p, label %t, label %f
+
+t:
+ store i8 3, i8* %a
+ %b = bitcast i32* %x to float*
+ store float 2.0, float* %b
+ %ct = bitcast i32* %x to i8*
+ call void @objc_release(i8* %ct) nounwind
+ br label %return
+
+f:
+ store i32 7, i32* %x
+ call void @callee()
+ %cf = bitcast i32* %x to i8*
+ call void @objc_release(i8* %cf) nounwind
+ br label %return
+
+return:
+ ret void
+}
+
+; CHECK: define void @test6b(
; CHECK-NOT: @objc_
; CHECK: }
-define void @test6(i32* %x, i1 %p) nounwind {
+define void @test6b(i32* %x, i1 %p) nounwind {
+entry:
+ %a = bitcast i32* %x to i8*
+ %0 = call i8* @objc_retain(i8* %a) nounwind
+ br i1 %p, label %t, label %f
+
+t:
+ store i8 3, i8* %a
+ %b = bitcast i32* %x to float*
+ store float 2.0, float* %b
+ %ct = bitcast i32* %x to i8*
+ call void @objc_release(i8* %ct) nounwind, !clang.imprecise_release !0
+ br label %return
+
+f:
+ store i32 7, i32* %x
+ call void @callee()
+ %cf = bitcast i32* %x to i8*
+ call void @objc_release(i8* %cf) nounwind, !clang.imprecise_release !0
+ br label %return
+
+return:
+ ret void
+}
+
+; CHECK: define void @test6c(
+; CHECK: entry:
+; CHECK: tail call i8* @objc_retain(
+; CHECK: t:
+; CHECK: call void @objc_release(
+; CHECK: f:
+; CHECK: call void @objc_release(
+; CHECK: return:
+; CHECK: }
+define void @test6c(i32* %x, i1 %p) nounwind {
entry:
%a = bitcast i32* %x to i8*
%0 = call i8* @objc_retain(i8* %a) nounwind
@@ -244,6 +488,40 @@ f:
store i32 7, i32* %x
call void @callee()
%cf = bitcast i32* %x to i8*
+ call void @objc_release(i8* %cf) nounwind, !clang.imprecise_release !0
+ br label %return
+
+return:
+ ret void
+}
+
+; CHECK: define void @test6d(
+; CHECK: entry:
+; CHECK: tail call i8* @objc_retain(
+; CHECK: t:
+; CHECK: call void @objc_release(
+; CHECK: f:
+; CHECK: call void @objc_release(
+; CHECK: return:
+; CHECK: }
+define void @test6d(i32* %x, i1 %p) nounwind {
+entry:
+ %a = bitcast i32* %x to i8*
+ %0 = call i8* @objc_retain(i8* %a) nounwind
+ br i1 %p, label %t, label %f
+
+t:
+ store i8 3, i8* %a
+ %b = bitcast i32* %x to float*
+ store float 2.0, float* %b
+ %ct = bitcast i32* %x to i8*
+ call void @objc_release(i8* %ct) nounwind, !clang.imprecise_release !0
+ br label %return
+
+f:
+ store i32 7, i32* %x
+ call void @callee()
+ %cf = bitcast i32* %x to i8*
call void @objc_release(i8* %cf) nounwind
br label %return
@@ -251,11 +529,19 @@ return:
ret void
}
+
; retain+release pair deletion, where the retain happens on two different
; flow paths.
-; CHECK: define void @test7(
-; CHECK-NOT: @objc_
+; CHECK: define void @test7(
+; CHECK: entry:
+; CHECK-NOT: objc_
+; CHECK: t:
+; CHECK: call i8* @objc_retain
+; CHECK: f:
+; CHECK: call i8* @objc_retain
+; CHECK: return:
+; CHECK: call void @objc_release
; CHECK: }
define void @test7(i32* %x, i1 %p) nounwind {
entry:
@@ -281,17 +567,44 @@ return:
ret void
}
+; CHECK: define void @test7b(
+; CHECK-NOT: @objc_
+; CHECK: }
+define void @test7b(i32* %x, i1 %p) nounwind {
+entry:
+ %a = bitcast i32* %x to i8*
+ br i1 %p, label %t, label %f
+
+t:
+ %0 = call i8* @objc_retain(i8* %a) nounwind
+ store i8 3, i8* %a
+ %b = bitcast i32* %x to float*
+ store float 2.0, float* %b
+ br label %return
+
+f:
+ %1 = call i8* @objc_retain(i8* %a) nounwind
+ store i32 7, i32* %x
+ call void @callee()
+ br label %return
+
+return:
+ %c = bitcast i32* %x to i8*
+ call void @objc_release(i8* %c) nounwind, !clang.imprecise_release !0
+ ret void
+}
+
; Like test7, but there's a retain/retainBlock mismatch. Don't delete!
-; CHECK: define void @test7b
+; CHECK: define void @test7c
; CHECK: t:
-; CHECK: call i8* @objc_retainBlock
+; CHECK: call i8* @objc_retainBlock
; CHECK: f:
-; CHECK: call i8* @objc_retain
+; CHECK: call i8* @objc_retain
; CHECK: return:
-; CHECK: call void @objc_release
+; CHECK: call void @objc_release
; CHECK: }
-define void @test7b(i32* %x, i1 %p) nounwind {
+define void @test7c(i32* %x, i1 %p) nounwind {
entry:
%a = bitcast i32* %x to i8*
br i1 %p, label %t, label %f
@@ -318,10 +631,106 @@ return:
; retain+release pair deletion, where the retain and release both happen on
; different flow paths. Wild!
-; CHECK: define void @test8(
+; CHECK: define void @test8a(
+; CHECK: entry:
+; CHECK: t:
+; CHECK: @objc_retain
+; CHECK: f:
+; CHECK: @objc_retain
+; CHECK: mid:
+; CHECK: u:
+; CHECK: @objc_release
+; CHECK: g:
+; CHECK: @objc_release
+; CHECK: return:
+; CHECK: }
+define void @test8a(i32* %x, i1 %p, i1 %q) nounwind {
+entry:
+ %a = bitcast i32* %x to i8*
+ br i1 %p, label %t, label %f
+
+t:
+ %0 = call i8* @objc_retain(i8* %a) nounwind
+ store i8 3, i8* %a
+ %b = bitcast i32* %x to float*
+ store float 2.0, float* %b
+ br label %mid
+
+f:
+ %1 = call i8* @objc_retain(i8* %a) nounwind
+ store i32 7, i32* %x
+ br label %mid
+
+mid:
+ br i1 %q, label %u, label %g
+
+u:
+ call void @callee()
+ %cu = bitcast i32* %x to i8*
+ call void @objc_release(i8* %cu) nounwind
+ br label %return
+
+g:
+ %cg = bitcast i32* %x to i8*
+ call void @objc_release(i8* %cg) nounwind
+ br label %return
+
+return:
+ ret void
+}
+
+; CHECK: define void @test8b(
; CHECK-NOT: @objc_
; CHECK: }
-define void @test8(i32* %x, i1 %p, i1 %q) nounwind {
+define void @test8b(i32* %x, i1 %p, i1 %q) nounwind {
+entry:
+ %a = bitcast i32* %x to i8*
+ br i1 %p, label %t, label %f
+
+t:
+ %0 = call i8* @objc_retain(i8* %a) nounwind
+ store i8 3, i8* %a
+ %b = bitcast i32* %x to float*
+ store float 2.0, float* %b
+ br label %mid
+
+f:
+ %1 = call i8* @objc_retain(i8* %a) nounwind
+ store i32 7, i32* %x
+ br label %mid
+
+mid:
+ br i1 %q, label %u, label %g
+
+u:
+ call void @callee()
+ %cu = bitcast i32* %x to i8*
+ call void @objc_release(i8* %cu) nounwind, !clang.imprecise_release !0
+ br label %return
+
+g:
+ %cg = bitcast i32* %x to i8*
+ call void @objc_release(i8* %cg) nounwind, !clang.imprecise_release !0
+ br label %return
+
+return:
+ ret void
+}
+
+; CHECK: define void @test8c(
+; CHECK: entry:
+; CHECK: t:
+; CHECK: @objc_retain
+; CHECK: f:
+; CHECK: @objc_retain
+; CHECK: mid:
+; CHECK: u:
+; CHECK: @objc_release
+; CHECK: g:
+; CHECK: @objc_release
+; CHECK: return:
+; CHECK: }
+define void @test8c(i32* %x, i1 %p, i1 %q) nounwind {
entry:
%a = bitcast i32* %x to i8*
br i1 %p, label %t, label %f
@@ -349,6 +758,54 @@ u:
g:
%cg = bitcast i32* %x to i8*
+ call void @objc_release(i8* %cg) nounwind, !clang.imprecise_release !0
+ br label %return
+
+return:
+ ret void
+}
+
+; CHECK: define void @test8d(
+; CHECK: entry:
+; CHECK: t:
+; CHECK: @objc_retain
+; CHECK: f:
+; CHECK: @objc_retain
+; CHECK: mid:
+; CHECK: u:
+; CHECK: @objc_release
+; CHECK: g:
+; CHECK: @objc_release
+; CHECK: return:
+; CHECK: }
+define void @test8d(i32* %x, i1 %p, i1 %q) nounwind {
+entry:
+ %a = bitcast i32* %x to i8*
+ br i1 %p, label %t, label %f
+
+t:
+ %0 = call i8* @objc_retain(i8* %a) nounwind
+ store i8 3, i8* %a
+ %b = bitcast i32* %x to float*
+ store float 2.0, float* %b
+ br label %mid
+
+f:
+ %1 = call i8* @objc_retain(i8* %a) nounwind
+ store i32 7, i32* %x
+ br label %mid
+
+mid:
+ br i1 %q, label %u, label %g
+
+u:
+ call void @callee()
+ %cu = bitcast i32* %x to i8*
+ call void @objc_release(i8* %cu) nounwind, !clang.imprecise_release !0
+ br label %return
+
+g:
+ %cg = bitcast i32* %x to i8*
call void @objc_release(i8* %cg) nounwind
br label %return
@@ -486,6 +943,7 @@ entry:
; CHECK-NEXT: @use_pointer
; CHECK-NEXT: @use_pointer
; CHECK-NEXT: ret void
+; CHECK-NEXT: }
define void @test13b(i8* %x, i64 %n) {
entry:
call i8* @objc_retain(i8* %x) nounwind
@@ -527,6 +985,7 @@ entry:
; CHECK-NEXT: @use_pointer
; CHECK-NEXT: @use_pointer
; CHECK-NEXT: ret void
+; CHECK-NEXT: }
define void @test13d(i8* %x, i64 %n) {
entry:
call i8* @objc_retain(i8* %x) nounwind
@@ -583,7 +1042,9 @@ entry:
; CHECK: define void @test15b
; CHECK-NEXT: entry:
+; CHECK-NEXT: @objc_retain
; CHECK-NEXT: @objc_autorelease
+; CHECK-NEXT: @objc_release
; CHECK-NEXT: ret void
; CHECK-NEXT: }
define void @test15b(i8* %x, i64 %n) {
@@ -594,13 +1055,60 @@ entry:
ret void
}
+; CHECK: define void @test15c
+; CHECK-NEXT: entry:
+; CHECK-NEXT: @objc_autorelease
+; CHECK-NEXT: ret void
+; CHECK-NEXT: }
+define void @test15c(i8* %x, i64 %n) {
+entry:
+ call i8* @objc_retain(i8* %x) nounwind
+ call i8* @objc_autorelease(i8* %x) nounwind
+ call void @objc_release(i8* %x) nounwind, !clang.imprecise_release !0
+ ret void
+}
+
; Retain+release pairs in diamonds, all dominated by a retain.
-; CHECK: define void @test16(
+; CHECK: define void @test16a(
+; CHECK: @objc_retain(i8* %x)
+; CHECK-NOT: @objc
+; CHECK: }
+define void @test16a(i1 %a, i1 %b, i8* %x) {
+entry:
+ call i8* @objc_retain(i8* %x) nounwind
+ br i1 %a, label %red, label %orange
+
+red:
+ call i8* @objc_retain(i8* %x) nounwind
+ br label %yellow
+
+orange:
+ call i8* @objc_retain(i8* %x) nounwind
+ br label %yellow
+
+yellow:
+ call void @use_pointer(i8* %x)
+ call void @use_pointer(i8* %x)
+ br i1 %b, label %green, label %blue
+
+green:
+ call void @objc_release(i8* %x) nounwind
+ br label %purple
+
+blue:
+ call void @objc_release(i8* %x) nounwind
+ br label %purple
+
+purple:
+ ret void
+}
+
+; CHECK: define void @test16b(
; CHECK: @objc_retain(i8* %x)
; CHECK-NOT: @objc
; CHECK: }
-define void @test16(i1 %a, i1 %b, i8* %x) {
+define void @test16b(i1 %a, i1 %b, i8* %x) {
entry:
call i8* @objc_retain(i8* %x) nounwind
br i1 %a, label %red, label %orange
@@ -619,17 +1127,86 @@ yellow:
br i1 %b, label %green, label %blue
green:
+ call void @objc_release(i8* %x) nounwind, !clang.imprecise_release !0
+ br label %purple
+
+blue:
call void @objc_release(i8* %x) nounwind
br label %purple
+purple:
+ ret void
+}
+
+; CHECK: define void @test16c(
+; CHECK: @objc_retain(i8* %x)
+; CHECK-NOT: @objc
+; CHECK: }
+define void @test16c(i1 %a, i1 %b, i8* %x) {
+entry:
+ call i8* @objc_retain(i8* %x) nounwind
+ br i1 %a, label %red, label %orange
+
+red:
+ call i8* @objc_retain(i8* %x) nounwind
+ br label %yellow
+
+orange:
+ call i8* @objc_retain(i8* %x) nounwind
+ br label %yellow
+
+yellow:
+ call void @use_pointer(i8* %x)
+ call void @use_pointer(i8* %x)
+ br i1 %b, label %green, label %blue
+
+green:
+ call void @objc_release(i8* %x) nounwind, !clang.imprecise_release !0
+ br label %purple
+
blue:
+ call void @objc_release(i8* %x) nounwind, !clang.imprecise_release !0
+ br label %purple
+
+purple:
+ ret void
+}
+
+; CHECK: define void @test16d(
+; CHECK: @objc_retain(i8* %x)
+; CHECK-NOT: @objc
+; CHECK: }
+define void @test16d(i1 %a, i1 %b, i8* %x) {
+entry:
+ call i8* @objc_retain(i8* %x) nounwind
+ br i1 %a, label %red, label %orange
+
+red:
+ call i8* @objc_retain(i8* %x) nounwind
+ br label %yellow
+
+orange:
+ call i8* @objc_retain(i8* %x) nounwind
+ br label %yellow
+
+yellow:
+ call void @use_pointer(i8* %x)
+ call void @use_pointer(i8* %x)
+ br i1 %b, label %green, label %blue
+
+green:
call void @objc_release(i8* %x) nounwind
br label %purple
+blue:
+ call void @objc_release(i8* %x) nounwind, !clang.imprecise_release !0
+ br label %purple
+
purple:
ret void
}
+
; Retain+release pairs in diamonds, all post-dominated by a release.
; CHECK: define void @test17(
@@ -720,6 +1297,7 @@ entry:
; CHECK: define void @test20(
; CHECK: %tmp1 = tail call i8* @objc_retain(i8* %tmp) [[NUW]]
; CHECK-NEXT: invoke
+; CHECK: }
define void @test20(double* %self) {
if.then12:
%tmp = bitcast double* %self to i8*
@@ -747,6 +1325,7 @@ if.end: ; preds = %invoke.cont23
; CHECK: define i8* @test21(
; CHECK: call i8* @returner()
; CHECK-NEXT: ret i8* %call
+; CHECK-NEXT: }
define i8* @test21() {
entry:
%call = call i8* @returner()
@@ -799,7 +1378,7 @@ entry:
; Don't optimize objc_retainBlock, but do strength reduce it.
-; CHECK: define void @test23b
+; CHECK: define void @test23b(i8* %p) {
; CHECK: @objc_retain
; CHECK: @objc_release
; CHECK: }
@@ -1163,12 +1742,16 @@ done:
ret void
}
-; Delete retain,release if there's just a possible dec.
+; Delete retain,release if there's just a possible dec and we have imprecise
+; releases.
-; CHECK: define void @test34(
-; CHECK-NOT: @objc_
+; CHECK: define void @test34a(
+; CHECK: call i8* @objc_retain
+; CHECK: true:
+; CHECK: done:
+; CHECK: call void @objc_release
; CHECK: }
-define void @test34(i8* %p, i1 %x, i8* %y) {
+define void @test34a(i8* %p, i1 %x, i8* %y) {
entry:
%f0 = call i8* @objc_retain(i8* %p)
br i1 %x, label %true, label %done
@@ -1184,12 +1767,38 @@ done:
ret void
}
-; Delete retain,release if there's just a use.
-
-; CHECK: define void @test35(
+; CHECK: define void @test34b(
; CHECK-NOT: @objc_
; CHECK: }
-define void @test35(i8* %p, i1 %x, i8* %y) {
+define void @test34b(i8* %p, i1 %x, i8* %y) {
+entry:
+ %f0 = call i8* @objc_retain(i8* %p)
+ br i1 %x, label %true, label %done
+
+true:
+ call void @callee()
+ br label %done
+
+done:
+ %g = bitcast i8* %p to i8*
+ %h = getelementptr i8* %g, i64 0
+ call void @objc_release(i8* %g), !clang.imprecise_release !0
+ ret void
+}
+
+
+; Delete retain,release if there's just a use and we do not have a precise
+; release.
+
+; Precise.
+; CHECK: define void @test35a(
+; CHECK: entry:
+; CHECK: call i8* @objc_retain
+; CHECK: true:
+; CHECK: done:
+; CHECK: call void @objc_release
+; CHECK: }
+define void @test35a(i8* %p, i1 %x, i8* %y) {
entry:
%f0 = call i8* @objc_retain(i8* %p)
br i1 %x, label %true, label %done
@@ -1205,16 +1814,36 @@ done:
ret void
}
-; Delete a retain,release if there's no actual use.
-
-; CHECK: define void @test36(
+; Imprecise.
+; CHECK: define void @test35b(
; CHECK-NOT: @objc_
+; CHECK: }
+define void @test35b(i8* %p, i1 %x, i8* %y) {
+entry:
+ %f0 = call i8* @objc_retain(i8* %p)
+ br i1 %x, label %true, label %done
+
+true:
+ %v = icmp eq i8* %p, %y
+ br label %done
+
+done:
+ %g = bitcast i8* %p to i8*
+ %h = getelementptr i8* %g, i64 0
+ call void @objc_release(i8* %g), !clang.imprecise_release !0
+ ret void
+}
+
+; Delete a retain,release if there's no actual use and we have precise release.
+
+; CHECK: define void @test36a(
+; CHECK: @objc_retain
; CHECK: call void @callee()
; CHECK-NOT: @objc_
; CHECK: call void @callee()
-; CHECK-NOT: @objc_
+; CHECK: @objc_release
; CHECK: }
-define void @test36(i8* %p) {
+define void @test36a(i8* %p) {
entry:
call i8* @objc_retain(i8* %p)
call void @callee()
@@ -1225,10 +1854,10 @@ entry:
; Like test36, but with metadata.
-; CHECK: define void @test37(
+; CHECK: define void @test36b(
; CHECK-NOT: @objc_
; CHECK: }
-define void @test37(i8* %p) {
+define void @test36b(i8* %p) {
entry:
call i8* @objc_retain(i8* %p)
call void @callee()
@@ -1439,6 +2068,7 @@ define void @test44(i8** %pp) {
; CHECK: call void @objc_release(i8* %q)
; CHECK: call void @use_pointer(i8* %p)
; CHECK: call void @objc_release(i8* %p)
+; CHECK: }
define void @test45(i8** %pp, i8** %qq) {
%p = load i8** %pp
%q = load i8** %qq
@@ -1455,6 +2085,7 @@ define void @test45(i8** %pp, i8** %qq) {
; CHECK: tail call i8* @objc_retain(i8* %p) [[NUW]]
; CHECK: true:
; CHECK: call i8* @objc_autorelease(i8* %p) [[NUW]]
+; CHECK: }
define void @test46(i8* %p, i1 %a) {
entry:
call i8* @objc_retain(i8* %p)
@@ -1474,6 +2105,7 @@ false:
; CHECK: define i8* @test47(
; CHECK-NOT: call
; CHECK: ret i8* %p
+; CHECK: }
define i8* @test47(i8* %p) nounwind {
%x = call i8* @objc_retainedObject(i8* %p)
ret i8* %x
@@ -1484,6 +2116,7 @@ define i8* @test47(i8* %p) nounwind {
; CHECK: define i8* @test48(
; CHECK-NOT: call
; CHECK: ret i8* %p
+; CHECK: }
define i8* @test48(i8* %p) nounwind {
%x = call i8* @objc_unretainedObject(i8* %p)
ret i8* %x
@@ -1494,32 +2127,51 @@ define i8* @test48(i8* %p) nounwind {
; CHECK: define i8* @test49(
; CHECK-NOT: call
; CHECK: ret i8* %p
+; CHECK: }
define i8* @test49(i8* %p) nounwind {
%x = call i8* @objc_unretainedPointer(i8* %p)
ret i8* %x
}
-; Do delete retain+release with intervening stores of the
-; address value.
+; Do delete retain+release with intervening stores of the address value if we
+; have imprecise release attached to objc_release.
-; CHECK: define void @test50(
+; CHECK: define void @test50a(
+; CHECK-NEXT: call i8* @objc_retain
+; CHECK-NEXT: call void @callee
+; CHECK-NEXT: store
+; CHECK-NEXT: call void @objc_release
+; CHECK-NEXT: ret void
+; CHECK-NEXT: }
+define void @test50a(i8* %p, i8** %pp) {
+ call i8* @objc_retain(i8* %p)
+ call void @callee()
+ store i8* %p, i8** %pp
+ call void @objc_release(i8* %p)
+ ret void
+}
+
+; CHECK: define void @test50b(
; CHECK-NOT: @objc_
; CHECK: }
-define void @test50(i8* %p, i8** %pp) {
+define void @test50b(i8* %p, i8** %pp) {
call i8* @objc_retain(i8* %p)
call void @callee()
store i8* %p, i8** %pp
- call void @objc_release(i8* %p)
+ call void @objc_release(i8* %p), !clang.imprecise_release !0
ret void
}
+
; Don't delete retain+release with intervening stores through the
; address value.
-; CHECK: define void @test51(
+; CHECK: define void @test51a(
; CHECK: call i8* @objc_retain(i8* %p)
; CHECK: call void @objc_release(i8* %p)
-define void @test51(i8* %p) {
+; CHECK: ret void
+; CHECK: }
+define void @test51a(i8* %p) {
call i8* @objc_retain(i8* %p)
call void @callee()
store i8 0, i8* %p
@@ -1527,15 +2179,30 @@ define void @test51(i8* %p) {
ret void
}
+; CHECK: define void @test51b(
+; CHECK: call i8* @objc_retain(i8* %p)
+; CHECK: call void @objc_release(i8* %p)
+; CHECK: ret void
+; CHECK: }
+define void @test51b(i8* %p) {
+ call i8* @objc_retain(i8* %p)
+ call void @callee()
+ store i8 0, i8* %p
+ call void @objc_release(i8* %p), !clang.imprecise_release !0
+ ret void
+}
+
; Don't delete retain+release with intervening use of a pointer of
; unknown provenance.
-; CHECK: define void @test52(
+; CHECK: define void @test52a(
; CHECK: call i8* @objc_retain
; CHECK: call void @callee()
; CHECK: call void @use_pointer(i8* %z)
; CHECK: call void @objc_release
-define void @test52(i8** %zz, i8** %pp) {
+; CHECK: ret void
+; CHECK: }
+define void @test52a(i8** %zz, i8** %pp) {
%p = load i8** %pp
%1 = call i8* @objc_retain(i8* %p)
call void @callee()
@@ -1545,6 +2212,23 @@ define void @test52(i8** %zz, i8** %pp) {
ret void
}
+; CHECK: define void @test52b(
+; CHECK: call i8* @objc_retain
+; CHECK: call void @callee()
+; CHECK: call void @use_pointer(i8* %z)
+; CHECK: call void @objc_release
+; CHECK: ret void
+; CHECK: }
+define void @test52b(i8** %zz, i8** %pp) {
+ %p = load i8** %pp
+ %1 = call i8* @objc_retain(i8* %p)
+ call void @callee()
+ %z = load i8** %zz
+ call void @use_pointer(i8* %z)
+ call void @objc_release(i8* %p), !clang.imprecise_release !0
+ ret void
+}
+
; Like test52, but the pointer has function type, so it's assumed to
; be not reference counted.
; Oops. That's wrong. Clang sometimes uses function types gratuitously.
@@ -1569,6 +2253,7 @@ define void @test53(void ()** %zz, i8** %pp) {
; CHECK: call i8* @returner()
; CHECK-NEXT: call void @objc_release(i8* %t) [[NUW]], !clang.imprecise_release !0
; CHECK-NEXT: ret void
+; CHECK: }
define void @test54() {
%t = call i8* @returner()
call i8* @objc_autorelease(i8* %t)
@@ -1697,19 +2382,78 @@ entry:
@constptr = external constant i8*
@something = external global i8*
-; CHECK: define void @test60(
-; CHECK-NOT: @objc_
+; We have a precise lifetime retain/release here. We can not remove them since
+; @something is not constant.
+
+; CHECK: define void @test60a(
+; CHECK: call i8* @objc_retain
+; CHECK: call void @objc_release
+; CHECK: }
+define void @test60a() {
+ %t = load i8** @constptr
+ %s = load i8** @something
+ call i8* @objc_retain(i8* %s)
+ call void @callee()
+ call void @use_pointer(i8* %t)
+ call void @objc_release(i8* %s)
+ ret void
+}
+
+; CHECK: define void @test60b(
+; CHECK: call i8* @objc_retain
+; CHECK-NOT: call i8* @objc_retain
+; CHECK-NOT: call i8* @objc_rrelease
; CHECK: }
-define void @test60() {
+define void @test60b() {
%t = load i8** @constptr
%s = load i8** @something
call i8* @objc_retain(i8* %s)
+ call i8* @objc_retain(i8* %s)
call void @callee()
call void @use_pointer(i8* %t)
call void @objc_release(i8* %s)
ret void
}
+; CHECK: define void @test60c(
+; CHECK-NOT: @objc_
+; CHECK: }
+define void @test60c() {
+ %t = load i8** @constptr
+ %s = load i8** @something
+ call i8* @objc_retain(i8* %s)
+ call void @callee()
+ call void @use_pointer(i8* %t)
+ call void @objc_release(i8* %s), !clang.imprecise_release !0
+ ret void
+}
+
+; CHECK: define void @test60d(
+; CHECK-NOT: @objc_
+; CHECK: }
+define void @test60d() {
+ %t = load i8** @constptr
+ %s = load i8** @something
+ call i8* @objc_retain(i8* %t)
+ call void @callee()
+ call void @use_pointer(i8* %s)
+ call void @objc_release(i8* %t)
+ ret void
+}
+
+; CHECK: define void @test60e(
+; CHECK-NOT: @objc_
+; CHECK: }
+define void @test60e() {
+ %t = load i8** @constptr
+ %s = load i8** @something
+ call i8* @objc_retain(i8* %t)
+ call void @callee()
+ call void @use_pointer(i8* %s)
+ call void @objc_release(i8* %t), !clang.imprecise_release !0
+ ret void
+}
+
; Constant pointers to objects don't need to be considered related to other
; pointers.
@@ -1876,11 +2620,13 @@ return: ; preds = %if.then, %entry
; An objc_retain can serve as a may-use for a different pointer.
; rdar://11931823
-; CHECK: define void @test66(
-; CHECK: %tmp7 = tail call i8* @objc_retain(i8* %cond) [[NUW]]
+; CHECK: define void @test66a(
+; CHECK: tail call i8* @objc_retain(i8* %cond) [[NUW]]
+; CHECK: tail call void @objc_release(i8* %call) [[NUW]]
+; CHECK: tail call i8* @objc_retain(i8* %tmp8) [[NUW]]
; CHECK: tail call void @objc_release(i8* %cond) [[NUW]]
; CHECK: }
-define void @test66(i8* %tmp5, i8* %bar, i1 %tobool, i1 %tobool1, i8* %call) {
+define void @test66a(i8* %tmp5, i8* %bar, i1 %tobool, i1 %tobool1, i8* %call) {
entry:
br i1 %tobool, label %cond.true, label %cond.end
@@ -1897,7 +2643,74 @@ cond.end: ; preds = %cond.true, %entry
ret void
}
-declare void @bar(i32 ()*)
+; CHECK: define void @test66b(
+; CHECK: tail call i8* @objc_retain(i8* %cond) [[NUW]]
+; CHECK: tail call void @objc_release(i8* %call) [[NUW]]
+; CHECK: tail call i8* @objc_retain(i8* %tmp8) [[NUW]]
+; CHECK: tail call void @objc_release(i8* %cond) [[NUW]]
+; CHECK: }
+define void @test66b(i8* %tmp5, i8* %bar, i1 %tobool, i1 %tobool1, i8* %call) {
+entry:
+ br i1 %tobool, label %cond.true, label %cond.end
+
+cond.true:
+ br label %cond.end
+
+cond.end: ; preds = %cond.true, %entry
+ %cond = phi i8* [ %tmp5, %cond.true ], [ %call, %entry ]
+ %tmp7 = tail call i8* @objc_retain(i8* %cond) nounwind
+ tail call void @objc_release(i8* %call) nounwind, !clang.imprecise_release !0
+ %tmp8 = select i1 %tobool1, i8* %cond, i8* %bar
+ %tmp9 = tail call i8* @objc_retain(i8* %tmp8) nounwind
+ tail call void @objc_release(i8* %cond) nounwind
+ ret void
+}
+
+; CHECK: define void @test66c(
+; CHECK: tail call i8* @objc_retain(i8* %cond) [[NUW]]
+; CHECK: tail call void @objc_release(i8* %call) [[NUW]]
+; CHECK: tail call i8* @objc_retain(i8* %tmp8) [[NUW]]
+; CHECK: tail call void @objc_release(i8* %cond) [[NUW]]
+; CHECK: }
+define void @test66c(i8* %tmp5, i8* %bar, i1 %tobool, i1 %tobool1, i8* %call) {
+entry:
+ br i1 %tobool, label %cond.true, label %cond.end
+
+cond.true:
+ br label %cond.end
+
+cond.end: ; preds = %cond.true, %entry
+ %cond = phi i8* [ %tmp5, %cond.true ], [ %call, %entry ]
+ %tmp7 = tail call i8* @objc_retain(i8* %cond) nounwind
+ tail call void @objc_release(i8* %call) nounwind
+ %tmp8 = select i1 %tobool1, i8* %cond, i8* %bar
+ %tmp9 = tail call i8* @objc_retain(i8* %tmp8) nounwind, !clang.imprecise_release !0
+ tail call void @objc_release(i8* %cond) nounwind
+ ret void
+}
+
+; CHECK: define void @test66d(
+; CHECK: tail call i8* @objc_retain(i8* %cond) [[NUW]]
+; CHECK: tail call void @objc_release(i8* %call) [[NUW]]
+; CHECK: tail call i8* @objc_retain(i8* %tmp8) [[NUW]]
+; CHECK: tail call void @objc_release(i8* %cond) [[NUW]]
+; CHECK: }
+define void @test66d(i8* %tmp5, i8* %bar, i1 %tobool, i1 %tobool1, i8* %call) {
+entry:
+ br i1 %tobool, label %cond.true, label %cond.end
+
+cond.true:
+ br label %cond.end
+
+cond.end: ; preds = %cond.true, %entry
+ %cond = phi i8* [ %tmp5, %cond.true ], [ %call, %entry ]
+ %tmp7 = tail call i8* @objc_retain(i8* %cond) nounwind
+ tail call void @objc_release(i8* %call) nounwind, !clang.imprecise_release !0
+ %tmp8 = select i1 %tobool1, i8* %cond, i8* %bar
+ %tmp9 = tail call i8* @objc_retain(i8* %tmp8) nounwind
+ tail call void @objc_release(i8* %cond) nounwind, !clang.imprecise_release !0
+ ret void
+}
; A few real-world testcases.
@@ -1907,7 +2720,7 @@ declare i32 @printf(i8* nocapture, ...) nounwind
declare i32 @puts(i8* nocapture) nounwind
@str = internal constant [16 x i8] c"-[ Top0 _getX ]\00"
-; CHECK: @"\01-[A z]"
+; CHECK: define { <2 x float>, <2 x float> } @"\01-[A z]"({}* %self, i8* nocapture %_cmd) [[NUW]] {
; CHECK-NOT: @objc_
; CHECK: }
@@ -1953,7 +2766,7 @@ invoke.cont:
ret {<2 x float>, <2 x float>} %tmp35
}
-; CHECK: @"\01-[Top0 _getX]"
+; CHECK: @"\01-[Top0 _getX]"({}* %self, i8* nocapture %_cmd) [[NUW]] {
; CHECK-NOT: @objc_
; CHECK: }
@@ -1972,12 +2785,13 @@ invoke.cont:
; A simple loop. Eliminate the retain and release inside of it!
-; CHECK: define void @loop
+; CHECK: define void @loop(i8* %x, i64 %n) {
; CHECK: for.body:
; CHECK-NOT: @objc_
; CHECK: @objc_msgSend
; CHECK-NOT: @objc_
; CHECK: for.end:
+; CHECK: }
define void @loop(i8* %x, i64 %n) {
entry:
%0 = tail call i8* @objc_retain(i8* %x) nounwind
@@ -2001,7 +2815,7 @@ for.end: ; preds = %for.body, %entry
; ObjCARCOpt can delete the retain,release on self.
-; CHECK: define void @TextEditTest
+; CHECK: define void @TextEditTest(%2* %self, %3* %pboard) {
; CHECK-NOT: call i8* @objc_retain(i8* %tmp7)
; CHECK: }
diff --git a/test/Transforms/ObjCARC/cfg-hazards.ll b/test/Transforms/ObjCARC/cfg-hazards.ll
index 899298b5967e4..0156d5bfb4644 100644
--- a/test/Transforms/ObjCARC/cfg-hazards.ll
+++ b/test/Transforms/ObjCARC/cfg-hazards.ll
@@ -8,6 +8,7 @@ declare void @use_pointer(i8*)
declare i8* @objc_retain(i8*)
declare void @objc_release(i8*)
declare void @callee()
+declare void @block_callee(void ()*)
; CHECK: define void @test0(
; CHECK: call i8* @objc_retain(
@@ -394,6 +395,41 @@ exit:
ret void
}
+; Do not improperly pair retains in a for loop with releases outside of a for
+; loop when the proper pairing is disguised by a separate provenance represented
+; by an alloca.
+; rdar://12969722
+
+; CHECK: define void @test13(i8* %a) [[NUW]] {
+; CHECK: entry:
+; CHECK: tail call i8* @objc_retain(i8* %a) [[NUW]]
+; CHECK: loop:
+; CHECK: tail call i8* @objc_retain(i8* %a) [[NUW]]
+; CHECK: call void @block_callee
+; CHECK: call void @objc_release(i8* %reloaded_a) [[NUW]]
+; CHECK: exit:
+; CHECK: call void @objc_release(i8* %a) [[NUW]]
+; CHECK: }
+define void @test13(i8* %a) nounwind {
+entry:
+ %block = alloca i8*
+ %a1 = tail call i8* @objc_retain(i8* %a) nounwind
+ br label %loop
+
+loop:
+ %a2 = tail call i8* @objc_retain(i8* %a) nounwind
+ store i8* %a, i8** %block, align 8
+ %casted_block = bitcast i8** %block to void ()*
+ call void @block_callee(void ()* %casted_block)
+ %reloaded_a = load i8** %block, align 8
+ call void @objc_release(i8* %reloaded_a) nounwind, !clang.imprecise_release !0
+ br i1 undef, label %loop, label %exit
+
+exit:
+ call void @objc_release(i8* %a) nounwind, !clang.imprecise_release !0
+ ret void
+}
+
; CHECK: attributes [[NUW]] = { nounwind }
!0 = metadata !{}
diff --git a/test/Transforms/ObjCARC/contract-marker.ll b/test/Transforms/ObjCARC/contract-marker.ll
index 01fd1e71436e2..55a1b28e1c466 100644
--- a/test/Transforms/ObjCARC/contract-marker.ll
+++ b/test/Transforms/ObjCARC/contract-marker.ll
@@ -1,9 +1,11 @@
; RUN: opt -S -objc-arc-contract < %s | FileCheck %s
+; CHECK: define void @foo() {
; CHECK: %call = tail call i32* @qux()
; CHECK-NEXT: %tcall = bitcast i32* %call to i8*
; CHECK-NEXT: call void asm sideeffect "mov\09r7, r7\09\09@ marker for objc_retainAutoreleaseReturnValue", ""()
; CHECK-NEXT: %0 = tail call i8* @objc_retainAutoreleasedReturnValue(i8* %tcall) [[NUW:#[0-9]+]]
+; CHECK: }
define void @foo() {
entry:
diff --git a/test/Transforms/ObjCARC/contract-storestrong.ll b/test/Transforms/ObjCARC/contract-storestrong.ll
index 6999237300e76..023604e105b03 100644
--- a/test/Transforms/ObjCARC/contract-storestrong.ll
+++ b/test/Transforms/ObjCARC/contract-storestrong.ll
@@ -12,6 +12,7 @@ declare void @use_pointer(i8*)
; CHECK: entry:
; CHECK-NEXT: tail call void @objc_storeStrong(i8** @x, i8* %p) [[NUW:#[0-9]+]]
; CHECK-NEXT: ret void
+; CHECK-NEXT: }
define void @test0(i8* %p) {
entry:
%0 = tail call i8* @objc_retain(i8* %p) nounwind
@@ -107,6 +108,7 @@ entry:
; CHECK: define i1 @test5(i8* %newValue, i8* %foo) {
; CHECK: %t = icmp eq i8* %x1, %foo
; CHECK: tail call void @objc_storeStrong(i8** @x, i8* %newValue) [[NUW]]
+; CHECK: }
define i1 @test5(i8* %newValue, i8* %foo) {
entry:
%x0 = tail call i8* @objc_retain(i8* %newValue) nounwind
@@ -122,6 +124,7 @@ entry:
; CHECK: define i1 @test6(i8* %newValue, i8* %foo) {
; CHECK: %t = icmp eq i8* %x1, %foo
; CHECK: tail call void @objc_storeStrong(i8** @x, i8* %newValue) [[NUW]]
+; CHECK: }
define i1 @test6(i8* %newValue, i8* %foo) {
entry:
%x0 = tail call i8* @objc_retain(i8* %newValue) nounwind
diff --git a/test/Transforms/ObjCARC/contract-testcases.ll b/test/Transforms/ObjCARC/contract-testcases.ll
index 85b03be275ec7..fc023f8981986 100644
--- a/test/Transforms/ObjCARC/contract-testcases.ll
+++ b/test/Transforms/ObjCARC/contract-testcases.ll
@@ -50,6 +50,7 @@ bb6: ; preds = %bb5, %bb4, %bb4, %b
; CHECK: br i1 undef, label %bb7, label %bb7
; CHECK: bb7:
; CHECK: %tmp8 = phi %0* [ %0, %bb ], [ %0, %bb ]
+; CHECK: }
define void @test1() {
bb:
%tmp = tail call %0* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to %0* ()*)()
@@ -70,6 +71,7 @@ bb7: ; preds = %bb6, %bb6, %bb5
; CHECK: invoke.cont: ; preds = %entry
; CHECK-NEXT: call void asm sideeffect "mov\09r7, r7\09\09@ marker for objc_retainAutoreleaseReturnValue", ""()
; CHECK-NEXT: %tmp = tail call i8* @objc_retainAutoreleasedReturnValue(i8* %call) [[NUW:#[0-9]+]]
+; CHECK: }
define void @_Z6doTestP8NSString() {
entry:
%call = invoke i8* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i8* ()*)()
diff --git a/test/Transforms/ObjCARC/contract.ll b/test/Transforms/ObjCARC/contract.ll
index 0b60683d99951..3544f885528b4 100644
--- a/test/Transforms/ObjCARC/contract.ll
+++ b/test/Transforms/ObjCARC/contract.ll
@@ -10,6 +10,7 @@ declare i8* @objc_retainAutoreleasedReturnValue(i8*)
declare void @use_pointer(i8*)
declare i8* @returner()
+declare void @callee()
; CHECK: define void @test0
; CHECK: call void @use_pointer(i8* %0)
@@ -137,6 +138,7 @@ define i8* @test6() {
; CHECK: call void @use_pointer(i8* %1)
; CHECK: tail call i8* @objc_autoreleaseReturnValue(i8* %1)
; CHECK: ret i8* %2
+; CHECK-NEXT: }
define i8* @test7(i8* %p) {
%1 = tail call i8* @objc_retain(i8* %p)
call void @use_pointer(i8* %p)
@@ -171,6 +173,60 @@ define void @test9(i8* %a, i8* %b) {
ret void
}
+
+; Turn objc_retain into objc_retainAutoreleasedReturnValue if its operand
+; is a return value.
+
+; CHECK: define void @test10()
+; CHECK: tail call i8* @objc_retainAutoreleasedReturnValue(i8* %p)
+define void @test10() {
+ %p = call i8* @returner()
+ tail call i8* @objc_retain(i8* %p) nounwind
+ ret void
+}
+
+; Convert objc_retain to objc_retainAutoreleasedReturnValue if its
+; argument is a return value.
+
+; CHECK: define void @test11(
+; CHECK-NEXT: %y = call i8* @returner()
+; CHECK-NEXT: tail call i8* @objc_retainAutoreleasedReturnValue(i8* %y) [[NUW]]
+; CHECK-NEXT: ret void
+define void @test11() {
+ %y = call i8* @returner()
+ tail call i8* @objc_retain(i8* %y) nounwind
+ ret void
+}
+
+; Don't convert objc_retain to objc_retainAutoreleasedReturnValue if its
+; argument is not a return value.
+
+; CHECK: define void @test12(
+; CHECK-NEXT: tail call i8* @objc_retain(i8* %y) [[NUW]]
+; CHECK-NEXT: ret void
+; CHECK-NEXT: }
+define void @test12(i8* %y) {
+ tail call i8* @objc_retain(i8* %y) nounwind
+ ret void
+}
+
+; Don't Convert objc_retain to objc_retainAutoreleasedReturnValue if it
+; isn't next to the call providing its return value.
+
+; CHECK: define void @test13(
+; CHECK-NEXT: %y = call i8* @returner()
+; CHECK-NEXT: call void @callee()
+; CHECK-NEXT: tail call i8* @objc_retain(i8* %y) [[NUW]]
+; CHECK-NEXT: ret void
+; CHECK-NEXT: }
+define void @test13() {
+ %y = call i8* @returner()
+ call void @callee()
+ tail call i8* @objc_retain(i8* %y) nounwind
+ ret void
+}
+
+
declare void @clang.arc.use(...) nounwind
; CHECK: attributes [[NUW]] = { nounwind }
diff --git a/test/Transforms/ObjCARC/expand.ll b/test/Transforms/ObjCARC/expand.ll
index 5388673f2b9de..fe47ee52e9079 100644
--- a/test/Transforms/ObjCARC/expand.ll
+++ b/test/Transforms/ObjCARC/expand.ll
@@ -4,25 +4,91 @@ target datalayout = "e-p:64:64:64"
declare i8* @objc_retain(i8*)
declare i8* @objc_autorelease(i8*)
+declare i8* @objc_retainAutoreleasedReturnValue(i8*)
+declare i8* @objc_autoreleaseReturnValue(i8*)
+declare i8* @objc_retainAutorelease(i8*)
+declare i8* @objc_retainAutoreleaseReturnValue(i8*)
+declare i8* @objc_retainBlock(i8*)
declare void @use_pointer(i8*)
-; CHECK: define void @test0
+; CHECK: define void @test_retain(i8* %x) [[NUW:#[0-9]+]] {
+; CHECK: call i8* @objc_retain(i8* %x)
; CHECK: call void @use_pointer(i8* %x)
; CHECK: }
-define void @test0(i8* %x) nounwind {
+define void @test_retain(i8* %x) nounwind {
entry:
%0 = call i8* @objc_retain(i8* %x) nounwind
call void @use_pointer(i8* %0)
ret void
}
-; CHECK: define void @test1
+; CHECK: define void @test_retainAutoreleasedReturnValue(i8* %x) [[NUW]] {
+; CHECK: call i8* @objc_retainAutoreleasedReturnValue(i8* %x)
; CHECK: call void @use_pointer(i8* %x)
; CHECK: }
-define void @test1(i8* %x) nounwind {
+define void @test_retainAutoreleasedReturnValue(i8* %x) nounwind {
+entry:
+ %0 = call i8* @objc_retainAutoreleasedReturnValue(i8* %x) nounwind
+ call void @use_pointer(i8* %0)
+ ret void
+}
+
+; CHECK: define void @test_retainAutorelease(i8* %x) [[NUW]] {
+; CHECK: call i8* @objc_retainAutorelease(i8* %x)
+; CHECK: call void @use_pointer(i8* %x)
+; CHECK: }
+define void @test_retainAutorelease(i8* %x) nounwind {
+entry:
+ %0 = call i8* @objc_retainAutorelease(i8* %x) nounwind
+ call void @use_pointer(i8* %0)
+ ret void
+}
+
+; CHECK: define void @test_retainAutoreleaseReturnValue(i8* %x) [[NUW]] {
+; CHECK: call i8* @objc_retainAutoreleaseReturnValue(i8* %x)
+; CHECK: call void @use_pointer(i8* %x)
+; CHECK: }
+define void @test_retainAutoreleaseReturnValue(i8* %x) nounwind {
+entry:
+ %0 = call i8* @objc_retainAutoreleaseReturnValue(i8* %x) nounwind
+ call void @use_pointer(i8* %0)
+ ret void
+}
+
+; CHECK: define void @test_autorelease(i8* %x) [[NUW]] {
+; CHECK: call i8* @objc_autorelease(i8* %x)
+; CHECK: call void @use_pointer(i8* %x)
+; CHECK: }
+define void @test_autorelease(i8* %x) nounwind {
entry:
%0 = call i8* @objc_autorelease(i8* %x) nounwind
- call void @use_pointer(i8* %x)
+ call void @use_pointer(i8* %0)
+ ret void
+}
+
+; CHECK: define void @test_autoreleaseReturnValue(i8* %x) [[NUW]] {
+; CHECK: call i8* @objc_autoreleaseReturnValue(i8* %x)
+; CHECK: call void @use_pointer(i8* %x)
+; CHECK: }
+define void @test_autoreleaseReturnValue(i8* %x) nounwind {
+entry:
+ %0 = call i8* @objc_autoreleaseReturnValue(i8* %x) nounwind
+ call void @use_pointer(i8* %0)
+ ret void
+}
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; RetainBlock is not strictly forwarding. Do not touch it. ;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+; CHECK: define void @test_retainBlock(i8* %x) [[NUW]] {
+; CHECK: call i8* @objc_retainBlock(i8* %x)
+; CHECK: call void @use_pointer(i8* %0)
+; CHECK: }
+define void @test_retainBlock(i8* %x) nounwind {
+entry:
+ %0 = call i8* @objc_retainBlock(i8* %x) nounwind
+ call void @use_pointer(i8* %0)
ret void
}
diff --git a/test/Transforms/ObjCARC/gvn.ll b/test/Transforms/ObjCARC/gvn.ll
index 3648866de01a2..a828b5485f4c3 100644
--- a/test/Transforms/ObjCARC/gvn.ll
+++ b/test/Transforms/ObjCARC/gvn.ll
@@ -7,11 +7,12 @@ declare i8* @objc_retain(i8*)
; GVN should be able to eliminate this redundant load, with ARC-specific
; alias analysis.
-; CHECK: @foo
+; CHECK: define i8* @foo(i32 %n)
; CHECK-NEXT: entry:
; CHECK-NEXT: %s = load i8** @x
; CHECK-NOT: load
; CHECK: ret i8* %s
+; CHECK-NEXT: }
define i8* @foo(i32 %n) nounwind {
entry:
%s = load i8** @x
diff --git a/test/Transforms/ObjCARC/clang-arc-used-intrinsic-removed-if-isolated.ll b/test/Transforms/ObjCARC/intrinsic-use-isolated.ll
index 4215b5c364653..4215b5c364653 100644
--- a/test/Transforms/ObjCARC/clang-arc-used-intrinsic-removed-if-isolated.ll
+++ b/test/Transforms/ObjCARC/intrinsic-use-isolated.ll
diff --git a/test/Transforms/ObjCARC/intrinsic-use.ll b/test/Transforms/ObjCARC/intrinsic-use.ll
index 9c7b81a95d23b..60370c1f4405b 100644
--- a/test/Transforms/ObjCARC/intrinsic-use.ll
+++ b/test/Transforms/ObjCARC/intrinsic-use.ll
@@ -34,8 +34,11 @@ declare void @test0_helper(i8*, i8**)
; CHECK-NEXT: @objc_release(i8* [[VAL1]])
; CHECK-NEXT: @objc_autorelease(i8* %x)
; CHECK-NEXT: store i8* %x, i8** %out
+; CHECK-NEXT: @objc_retain(i8* %x)
; CHECK-NEXT: @objc_release(i8* [[VAL2]])
+; CHECK-NEXT: @objc_release(i8* %x)
; CHECK-NEXT: ret void
+; CHECK-NEXT: }
define void @test0(i8** %out, i8* %x, i8* %y) {
entry:
%temp0 = alloca i8*, align 8
@@ -61,3 +64,53 @@ entry:
call void @objc_release(i8* %x) nounwind
ret void
}
+
+; CHECK: define void @test0a(
+; CHECK: @objc_retain(i8* %x)
+; CHECK-NEXT: store i8* %y, i8** %temp0
+; CHECK-NEXT: @objc_retain(i8* %y)
+; CHECK-NEXT: call void @test0_helper
+; CHECK-NEXT: [[VAL1:%.*]] = load i8** %temp0
+; CHECK-NEXT: call void (...)* @clang.arc.use(i8* %y)
+; CHECK-NEXT: @objc_retain(i8* [[VAL1]])
+; CHECK-NEXT: @objc_release(i8* %y)
+; CHECK-NEXT: store i8* [[VAL1]], i8** %temp1
+; CHECK-NEXT: call void @test0_helper
+; CHECK-NEXT: [[VAL2:%.*]] = load i8** %temp1
+; CHECK-NEXT: call void (...)* @clang.arc.use(i8* [[VAL1]])
+; CHECK-NEXT: @objc_retain(i8* [[VAL2]])
+; CHECK-NEXT: @objc_release(i8* [[VAL1]])
+; CHECK-NEXT: @objc_autorelease(i8* %x)
+; CHECK-NEXT: @objc_release(i8* [[VAL2]])
+; CHECK-NEXT: store i8* %x, i8** %out
+; CHECK-NEXT: ret void
+; CHECK-NEXT: }
+define void @test0a(i8** %out, i8* %x, i8* %y) {
+entry:
+ %temp0 = alloca i8*, align 8
+ %temp1 = alloca i8*, align 8
+ %0 = call i8* @objc_retain(i8* %x) nounwind
+ %1 = call i8* @objc_retain(i8* %y) nounwind
+ store i8* %y, i8** %temp0
+ call void @test0_helper(i8* %x, i8** %temp0)
+ %val1 = load i8** %temp0
+ %2 = call i8* @objc_retain(i8* %val1) nounwind
+ call void (...)* @clang.arc.use(i8* %y) nounwind
+ call void @objc_release(i8* %y) nounwind, !clang.imprecise_release !0
+ store i8* %val1, i8** %temp1
+ call void @test0_helper(i8* %x, i8** %temp1)
+ %val2 = load i8** %temp1
+ %3 = call i8* @objc_retain(i8* %val2) nounwind
+ call void (...)* @clang.arc.use(i8* %val1) nounwind
+ call void @objc_release(i8* %val1) nounwind, !clang.imprecise_release !0
+ %4 = call i8* @objc_retain(i8* %x) nounwind
+ %5 = call i8* @objc_autorelease(i8* %x) nounwind
+ store i8* %x, i8** %out
+ call void @objc_release(i8* %val2) nounwind, !clang.imprecise_release !0
+ call void @objc_release(i8* %x) nounwind, !clang.imprecise_release !0
+ ret void
+}
+
+
+!0 = metadata !{}
+
diff --git a/test/Transforms/ObjCARC/invoke.ll b/test/Transforms/ObjCARC/invoke.ll
index f528b4ac35bc4..9510f2e7ddeca 100644
--- a/test/Transforms/ObjCARC/invoke.ll
+++ b/test/Transforms/ObjCARC/invoke.ll
@@ -17,6 +17,7 @@ declare i8* @returner()
; CHECK: lpad:
; CHECK: call void @objc_release(i8* %zipFile) [[NUW]], !clang.imprecise_release !0
; CHECK: ret void
+; CHECK-NEXT: }
define void @test0(i8* %zipFile) {
entry:
call i8* @objc_retain(i8* %zipFile) nounwind
@@ -48,6 +49,7 @@ lpad: ; preds = %entry
; CHECK: br label %done
; CHECK: done:
; CHECK-NEXT: ret void
+; CHECK-NEXT: }
define void @test1(i8* %zipFile) {
entry:
call i8* @objc_retain(i8* %zipFile) nounwind
@@ -110,6 +112,7 @@ finally.rethrow: ; preds = %invoke.cont, %entry
; CHECK: if.end:
; CHECK-NEXT: call void @objc_release(i8* %p) [[NUW]]
; CHECK-NEXT: ret void
+; CHECK-NEXT: }
define void @test3(i8* %p, i1 %b) {
entry:
%0 = call i8* @objc_retain(i8* %p)
@@ -145,6 +148,7 @@ if.end:
; CHECK: if.end:
; CHECK-NEXT: call void @objc_release(i8* %p) [[NUW]]
; CHECK-NEXT: ret void
+; CHECK-NEXT: }
define void @test4(i8* %p, i1 %b) {
entry:
%0 = call i8* @objc_retain(i8* %p)
diff --git a/test/Transforms/ObjCARC/move-and-merge-autorelease.ll b/test/Transforms/ObjCARC/move-and-merge-autorelease.ll
index 8462c70a48e73..e5d2f07e45a2b 100644
--- a/test/Transforms/ObjCARC/move-and-merge-autorelease.ll
+++ b/test/Transforms/ObjCARC/move-and-merge-autorelease.ll
@@ -1,4 +1,4 @@
-; RUN: opt -S -objc-arc < %s | FileCheck %s
+; RUN: opt -S -objc-arc -objc-arc-contract < %s | FileCheck %s
; The optimizer should be able to move the autorelease past two phi nodes
; and fold it with the release in bb65.
diff --git a/test/Transforms/ObjCARC/retain-block-escape-analysis.ll b/test/Transforms/ObjCARC/retain-block-escape-analysis.ll
index 2c1ddce32836b..8df05ad22666a 100644
--- a/test/Transforms/ObjCARC/retain-block-escape-analysis.ll
+++ b/test/Transforms/ObjCARC/retain-block-escape-analysis.ll
@@ -23,6 +23,23 @@ define void @bitcasttest(i8* %storage, void (...)* %block) {
; CHECK: define void @bitcasttest
entry:
%t1 = bitcast void (...)* %block to i8*
+; CHECK: tail call i8* @objc_retain
+ %t2 = tail call i8* @objc_retain(i8* %t1)
+; CHECK: tail call i8* @objc_retainBlock
+ %t3 = tail call i8* @objc_retainBlock(i8* %t1), !clang.arc.copy_on_escape !0
+ %t4 = bitcast i8* %storage to void (...)**
+ %t5 = bitcast i8* %t3 to void (...)*
+ store void (...)* %t5, void (...)** %t4, align 8
+; CHECK: call void @objc_release
+ call void @objc_release(i8* %t1)
+ ret void
+; CHECK: }
+}
+
+define void @bitcasttest_a(i8* %storage, void (...)* %block) {
+; CHECK: define void @bitcasttest_a
+entry:
+ %t1 = bitcast void (...)* %block to i8*
; CHECK-NOT: tail call i8* @objc_retain
%t2 = tail call i8* @objc_retain(i8* %t1)
; CHECK: tail call i8* @objc_retainBlock
@@ -31,14 +48,34 @@ entry:
%t5 = bitcast i8* %t3 to void (...)*
store void (...)* %t5, void (...)** %t4, align 8
; CHECK-NOT: call void @objc_release
- call void @objc_release(i8* %t1)
+ call void @objc_release(i8* %t1), !clang.imprecise_release !0
ret void
+; CHECK: }
}
define void @geptest(void (...)** %storage_array, void (...)* %block) {
; CHECK: define void @geptest
entry:
%t1 = bitcast void (...)* %block to i8*
+; CHECK: tail call i8* @objc_retain
+ %t2 = tail call i8* @objc_retain(i8* %t1)
+; CHECK: tail call i8* @objc_retainBlock
+ %t3 = tail call i8* @objc_retainBlock(i8* %t1), !clang.arc.copy_on_escape !0
+ %t4 = bitcast i8* %t3 to void (...)*
+
+ %storage = getelementptr inbounds void (...)** %storage_array, i64 0
+
+ store void (...)* %t4, void (...)** %storage, align 8
+; CHECK: call void @objc_release
+ call void @objc_release(i8* %t1)
+ ret void
+; CHECK: }
+}
+
+define void @geptest_a(void (...)** %storage_array, void (...)* %block) {
+; CHECK: define void @geptest_a
+entry:
+ %t1 = bitcast void (...)* %block to i8*
; CHECK-NOT: tail call i8* @objc_retain
%t2 = tail call i8* @objc_retain(i8* %t1)
; CHECK: tail call i8* @objc_retainBlock
@@ -49,8 +86,9 @@ entry:
store void (...)* %t4, void (...)** %storage, align 8
; CHECK-NOT: call void @objc_release
- call void @objc_release(i8* %t1)
+ call void @objc_release(i8* %t1), !clang.imprecise_release !0
ret void
+; CHECK: }
}
define void @selecttest(void (...)** %store1, void (...)** %store2,
@@ -58,6 +96,24 @@ define void @selecttest(void (...)** %store1, void (...)** %store2,
; CHECK: define void @selecttest
entry:
%t1 = bitcast void (...)* %block to i8*
+; CHECK: tail call i8* @objc_retain
+ %t2 = tail call i8* @objc_retain(i8* %t1)
+; CHECK: tail call i8* @objc_retainBlock
+ %t3 = tail call i8* @objc_retainBlock(i8* %t1), !clang.arc.copy_on_escape !0
+ %t4 = bitcast i8* %t3 to void (...)*
+ %store = select i1 undef, void (...)** %store1, void (...)** %store2
+ store void (...)* %t4, void (...)** %store, align 8
+; CHECK: call void @objc_release
+ call void @objc_release(i8* %t1)
+ ret void
+; CHECK: }
+}
+
+define void @selecttest_a(void (...)** %store1, void (...)** %store2,
+ void (...)* %block) {
+; CHECK: define void @selecttest_a
+entry:
+ %t1 = bitcast void (...)* %block to i8*
; CHECK-NOT: tail call i8* @objc_retain
%t2 = tail call i8* @objc_retain(i8* %t1)
; CHECK: tail call i8* @objc_retainBlock
@@ -66,8 +122,9 @@ entry:
%store = select i1 undef, void (...)** %store1, void (...)** %store2
store void (...)* %t4, void (...)** %store, align 8
; CHECK-NOT: call void @objc_release
- call void @objc_release(i8* %t1)
+ call void @objc_release(i8* %t1), !clang.imprecise_release !0
ret void
+; CHECK: }
}
define void @phinodetest(void (...)** %storage1,
@@ -76,6 +133,36 @@ define void @phinodetest(void (...)** %storage1,
; CHECK: define void @phinodetest
entry:
%t1 = bitcast void (...)* %block to i8*
+; CHECK: tail call i8* @objc_retain
+ %t2 = tail call i8* @objc_retain(i8* %t1)
+; CHECK: tail call i8* @objc_retainBlock
+ %t3 = tail call i8* @objc_retainBlock(i8* %t1), !clang.arc.copy_on_escape !0
+ %t4 = bitcast i8* %t3 to void (...)*
+ br i1 undef, label %store1_set, label %store2_set
+; CHECK: store1_set:
+
+store1_set:
+ br label %end
+
+store2_set:
+ br label %end
+
+end:
+; CHECK: end:
+ %storage = phi void (...)** [ %storage1, %store1_set ], [ %storage2, %store2_set]
+ store void (...)* %t4, void (...)** %storage, align 8
+; CHECK: call void @objc_release
+ call void @objc_release(i8* %t1)
+ ret void
+; CHECK: }
+}
+
+define void @phinodetest_a(void (...)** %storage1,
+ void (...)** %storage2,
+ void (...)* %block) {
+; CHECK: define void @phinodetest_a
+entry:
+ %t1 = bitcast void (...)* %block to i8*
; CHECK-NOT: tail call i8* @objc_retain
%t2 = tail call i8* @objc_retain(i8* %t1)
; CHECK: tail call i8* @objc_retainBlock
@@ -93,10 +180,11 @@ end:
%storage = phi void (...)** [ %storage1, %store1_set ], [ %storage2, %store2_set]
store void (...)* %t4, void (...)** %storage, align 8
; CHECK-NOT: call void @objc_release
- call void @objc_release(i8* %t1)
+ call void @objc_release(i8* %t1), !clang.imprecise_release !0
ret void
}
+
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
; This test makes sure that we do not hang clang when visiting a use ;
; cycle caused by phi nodes during objc-arc analysis. *NOTE* This ;
diff --git a/test/Transforms/ObjCARC/rv.ll b/test/Transforms/ObjCARC/rv.ll
index 589c60f9f3aaa..e857c9f41bb49 100644
--- a/test/Transforms/ObjCARC/rv.ll
+++ b/test/Transforms/ObjCARC/rv.ll
@@ -136,17 +136,6 @@ define i8* @test7b() {
ret i8* %p
}
-; Turn objc_retain into objc_retainAutoreleasedReturnValue if its operand
-; is a return value.
-
-; CHECK: define void @test8()
-; CHECK: tail call i8* @objc_retainAutoreleasedReturnValue(i8* %p)
-define void @test8() {
- %p = call i8* @returner()
- call i8* @objc_retain(i8* %p)
- ret void
-}
-
; Don't apply the RV optimization to autorelease if there's no retain.
; CHECK: define i8* @test9(i8* %p)
@@ -235,45 +224,6 @@ define void @test15() {
ret void
}
-; Convert objc_retain to objc_retainAutoreleasedReturnValue if its
-; argument is a return value.
-
-; CHECK: define void @test16(
-; CHECK-NEXT: %y = call i8* @returner()
-; CHECK-NEXT: tail call i8* @objc_retainAutoreleasedReturnValue(i8* %y) [[NUW]]
-; CHECK-NEXT: ret void
-define void @test16() {
- %y = call i8* @returner()
- call i8* @objc_retain(i8* %y)
- ret void
-}
-
-; Don't convert objc_retain to objc_retainAutoreleasedReturnValue if its
-; argument is not a return value.
-
-; CHECK: define void @test17(
-; CHECK-NEXT: tail call i8* @objc_retain(i8* %y) [[NUW]]
-; CHECK-NEXT: ret void
-define void @test17(i8* %y) {
- call i8* @objc_retain(i8* %y)
- ret void
-}
-
-; Don't Convert objc_retain to objc_retainAutoreleasedReturnValue if it
-; isn't next to the call providing its return value.
-
-; CHECK: define void @test18(
-; CHECK-NEXT: %y = call i8* @returner()
-; CHECK-NEXT: call void @callee()
-; CHECK-NEXT: tail call i8* @objc_retain(i8* %y) [[NUW]]
-; CHECK-NEXT: ret void
-define void @test18() {
- %y = call i8* @returner()
- call void @callee()
- call i8* @objc_retain(i8* %y)
- ret void
-}
-
; Delete autoreleaseRV+retainRV pairs.
; CHECK: define i8* @test19(i8* %p) {
diff --git a/test/Transforms/ObjCARC/tail-call-invariant-enforcement.ll b/test/Transforms/ObjCARC/tail-call-invariant-enforcement.ll
index 26cd67727e6a6..1ec61c8481016 100644
--- a/test/Transforms/ObjCARC/tail-call-invariant-enforcement.ll
+++ b/test/Transforms/ObjCARC/tail-call-invariant-enforcement.ll
@@ -1,74 +1,89 @@
; RUN: opt -objc-arc -S < %s | FileCheck %s
-declare i8* @objc_release(i8* %x)
+declare void @objc_release(i8* %x)
declare i8* @objc_retain(i8* %x)
declare i8* @objc_autorelease(i8* %x)
declare i8* @objc_autoreleaseReturnValue(i8* %x)
declare i8* @objc_retainAutoreleasedReturnValue(i8* %x)
+declare i8* @tmp(i8*)
; Never tail call objc_autorelease.
-define i8* @test0(i8* %x) {
+
+; CHECK: define i8* @test0(i8* %x) [[NUW:#[0-9]+]] {
+; CHECK: %tmp0 = call i8* @objc_autorelease(i8* %x) [[NUW]]
+; CHECK: %tmp1 = call i8* @objc_autorelease(i8* %x) [[NUW]]
+; CHECK: }
+define i8* @test0(i8* %x) nounwind {
entry:
- ; CHECK: %tmp0 = call i8* @objc_autorelease(i8* %x)
%tmp0 = call i8* @objc_autorelease(i8* %x)
- ; CHECK: %tmp1 = call i8* @objc_autorelease(i8* %x)
%tmp1 = tail call i8* @objc_autorelease(i8* %x)
ret i8* %x
}
; Always tail call autoreleaseReturnValue.
-define i8* @test1(i8* %x) {
+
+; CHECK: define i8* @test1(i8* %x) [[NUW]] {
+; CHECK: %tmp0 = tail call i8* @objc_autoreleaseReturnValue(i8* %x) [[NUW]]
+; CHECK: %tmp1 = tail call i8* @objc_autoreleaseReturnValue(i8* %x) [[NUW]]
+; CHECK: }
+define i8* @test1(i8* %x) nounwind {
entry:
- ; CHECK: %tmp0 = tail call i8* @objc_autoreleaseReturnValue(i8* %x)
%tmp0 = call i8* @objc_autoreleaseReturnValue(i8* %x)
- ; CHECK: %tmp1 = tail call i8* @objc_autoreleaseReturnValue(i8* %x)
%tmp1 = tail call i8* @objc_autoreleaseReturnValue(i8* %x)
ret i8* %x
}
; Always tail call objc_retain.
-define i8* @test2(i8* %x) {
+
+; CHECK: define i8* @test2(i8* %x) [[NUW]] {
+; CHECK: %tmp0 = tail call i8* @objc_retain(i8* %x) [[NUW]]
+; CHECK: %tmp1 = tail call i8* @objc_retain(i8* %x) [[NUW]]
+; CHECK: }
+define i8* @test2(i8* %x) nounwind {
entry:
- ; CHECK: %tmp0 = tail call i8* @objc_retain(i8* %x)
%tmp0 = call i8* @objc_retain(i8* %x)
- ; CHECK: %tmp1 = tail call i8* @objc_retain(i8* %x)
%tmp1 = tail call i8* @objc_retain(i8* %x)
ret i8* %x
}
-define i8* @tmp(i8* %x) {
- ret i8* %x
-}
-
; Always tail call objc_retainAutoreleasedReturnValue.
-define i8* @test3(i8* %x) {
+; CHECK: define i8* @test3(i8* %x) [[NUW]] {
+; CHECK: %tmp0 = tail call i8* @objc_retainAutoreleasedReturnValue(i8* %y) [[NUW]]
+; CHECK: %tmp1 = tail call i8* @objc_retainAutoreleasedReturnValue(i8* %z) [[NUW]]
+; CHECK: }
+define i8* @test3(i8* %x) nounwind {
entry:
%y = call i8* @tmp(i8* %x)
- ; CHECK: %tmp0 = tail call i8* @objc_retainAutoreleasedReturnValue(i8* %y)
%tmp0 = call i8* @objc_retainAutoreleasedReturnValue(i8* %y)
%z = call i8* @tmp(i8* %x)
- ; CHECK: %tmp1 = tail call i8* @objc_retainAutoreleasedReturnValue(i8* %z)
%tmp1 = tail call i8* @objc_retainAutoreleasedReturnValue(i8* %z)
ret i8* %x
}
; By itself, we should never change whether or not objc_release is tail called.
-define i8* @test4(i8* %x) {
+
+; CHECK: define void @test4(i8* %x) [[NUW]] {
+; CHECK: call void @objc_release(i8* %x) [[NUW]]
+; CHECK: tail call void @objc_release(i8* %x) [[NUW]]
+; CHECK: }
+define void @test4(i8* %x) nounwind {
entry:
- ; CHECK: %tmp0 = call i8* @objc_release(i8* %x)
- %tmp0 = call i8* @objc_release(i8* %x)
- ; CHECK: %tmp1 = tail call i8* @objc_release(i8* %x)
- %tmp1 = tail call i8* @objc_release(i8* %x)
- ret i8* %x
+ call void @objc_release(i8* %x)
+ tail call void @objc_release(i8* %x)
+ ret void
}
; If we convert a tail called @objc_autoreleaseReturnValue to an
; @objc_autorelease, ensure that the tail call is removed.
-define i8* @test5(i8* %x) {
+; CHECK: define i8* @test5(i8* %x) [[NUW]] {
+; CHECK: %tmp0 = call i8* @objc_autorelease(i8* %x) [[NUW]]
+; CHECK: }
+define i8* @test5(i8* %x) nounwind {
entry:
- ; CHECK: %tmp0 = call i8* @objc_autorelease(i8* %x)
%tmp0 = tail call i8* @objc_autoreleaseReturnValue(i8* %x)
ret i8* %tmp0
}
+; CHECK: attributes [[NUW]] = { nounwind }
+
diff --git a/test/Transforms/Reassociate/pr12245.ll b/test/Transforms/Reassociate/pr12245.ll
index 84098bdb006b6..e9b5355cceb13 100644
--- a/test/Transforms/Reassociate/pr12245.ll
+++ b/test/Transforms/Reassociate/pr12245.ll
@@ -6,36 +6,36 @@
define i32 @fn2() nounwind uwtable ssp {
entry:
- %0 = load i32* @a, align 4, !tbaa !0
+ %0 = load i32* @a, align 4
%dec = add nsw i32 %0, -1
- store i32 %dec, i32* @a, align 4, !tbaa !0
- %1 = load i32* @d, align 4, !tbaa !0
+ store i32 %dec, i32* @a, align 4
+ %1 = load i32* @d, align 4
%sub = sub nsw i32 %dec, %1
- store i32 %sub, i32* @d, align 4, !tbaa !0
- %2 = load i32* @a, align 4, !tbaa !0
+ store i32 %sub, i32* @d, align 4
+ %2 = load i32* @a, align 4
%dec1 = add nsw i32 %2, -1
- store i32 %dec1, i32* @a, align 4, !tbaa !0
- %3 = load i32* @d, align 4, !tbaa !0
+ store i32 %dec1, i32* @a, align 4
+ %3 = load i32* @d, align 4
%sub2 = sub nsw i32 %dec1, %3
- store i32 %sub2, i32* @d, align 4, !tbaa !0
- %4 = load i32* @a, align 4, !tbaa !0
+ store i32 %sub2, i32* @d, align 4
+ %4 = load i32* @a, align 4
%dec3 = add nsw i32 %4, -1
- store i32 %dec3, i32* @a, align 4, !tbaa !0
- %5 = load i32* @d, align 4, !tbaa !0
+ store i32 %dec3, i32* @a, align 4
+ %5 = load i32* @d, align 4
%sub4 = sub nsw i32 %dec3, %5
- store i32 %sub4, i32* @d, align 4, !tbaa !0
- %6 = load i32* @a, align 4, !tbaa !0
+ store i32 %sub4, i32* @d, align 4
+ %6 = load i32* @a, align 4
%dec5 = add nsw i32 %6, -1
- store i32 %dec5, i32* @a, align 4, !tbaa !0
- %7 = load i32* @d, align 4, !tbaa !0
+ store i32 %dec5, i32* @a, align 4
+ %7 = load i32* @d, align 4
%sub6 = sub nsw i32 %dec5, %7
- store i32 %sub6, i32* @d, align 4, !tbaa !0
- %8 = load i32* @a, align 4, !tbaa !0
+ store i32 %sub6, i32* @d, align 4
+ %8 = load i32* @a, align 4
%dec7 = add nsw i32 %8, -1
- store i32 %dec7, i32* @a, align 4, !tbaa !0
- %9 = load i32* @d, align 4, !tbaa !0
+ store i32 %dec7, i32* @a, align 4
+ %9 = load i32* @d, align 4
%sub8 = sub nsw i32 %dec7, %9
- store i32 %sub8, i32* @d, align 4, !tbaa !0
+ store i32 %sub8, i32* @d, align 4
ret i32 0
}
@@ -44,7 +44,3 @@ entry:
%call = call i32 @fn2()
ret i32 %call
}
-
-!0 = metadata !{metadata !"int", metadata !1}
-!1 = metadata !{metadata !"omnipotent char", metadata !2}
-!2 = metadata !{metadata !"Simple C/C++ TBAA"}
diff --git a/test/Transforms/Reassociate/xor_reassoc.ll b/test/Transforms/Reassociate/xor_reassoc.ll
index d371a9b5b68f3..b9353c7f81fe3 100644
--- a/test/Transforms/Reassociate/xor_reassoc.ll
+++ b/test/Transforms/Reassociate/xor_reassoc.ll
@@ -164,3 +164,30 @@ define void @xor_bug1() {
%3 = and i64 undef, %2
ret void
}
+
+; The bug was that when the compiler optimize "(x | c1)" ^ "(x & c2)", it may
+; swap the two xor-subexpressions if they are not in canoninical order; however,
+; when optimizer swaps two sub-expressions, if forgot to swap the cached value
+; of c1 and c2 accordingly, hence cause the problem.
+;
+define i32 @xor_bug2(i32, i32, i32, i32) {
+ %5 = mul i32 %0, 123
+ %6 = add i32 %2, 24
+ %7 = add i32 %1, 8
+ %8 = and i32 %1, 3456789
+ %9 = or i32 %8, 4567890
+ %10 = and i32 %1, 543210987
+ %11 = or i32 %1, 891034567
+ %12 = and i32 %2, 255
+ %13 = xor i32 %9, %10
+ %14 = xor i32 %11, %13
+ %15 = xor i32 %5, %14
+ %16 = and i32 %3, 255
+ %17 = xor i32 %16, 42
+ %18 = add i32 %6, %7
+ %19 = add i32 %18, %12
+ %20 = add i32 %19, %15
+ ret i32 %20
+;CHECK: @xor_bug2
+;CHECK: xor i32 %5, 891034567
+}
diff --git a/test/Transforms/SLPVectorizer/X86/barriercall.ll b/test/Transforms/SLPVectorizer/X86/barriercall.ll
new file mode 100644
index 0000000000000..04eb8f919bc72
--- /dev/null
+++ b/test/Transforms/SLPVectorizer/X86/barriercall.ll
@@ -0,0 +1,32 @@
+; RUN: opt < %s -basicaa -slp-vectorizer -dce -S -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7-avx | FileCheck %s
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.8.0"
+
+;CHECK: @foo
+;CHECK: store <4 x i32>
+;CHECK: ret
+define i32 @foo(i32* nocapture %A, i32 %n) {
+entry:
+ %call = tail call i32 (...)* @bar() #2
+ %mul = mul nsw i32 %n, 5
+ %add = add nsw i32 %mul, 9
+ store i32 %add, i32* %A, align 4
+ %mul1 = mul nsw i32 %n, 9
+ %add2 = add nsw i32 %mul1, 9
+ %arrayidx3 = getelementptr inbounds i32* %A, i64 1
+ store i32 %add2, i32* %arrayidx3, align 4
+ %mul4 = shl i32 %n, 3
+ %add5 = add nsw i32 %mul4, 9
+ %arrayidx6 = getelementptr inbounds i32* %A, i64 2
+ store i32 %add5, i32* %arrayidx6, align 4
+ %mul7 = mul nsw i32 %n, 10
+ %add8 = add nsw i32 %mul7, 9
+ %arrayidx9 = getelementptr inbounds i32* %A, i64 3
+ store i32 %add8, i32* %arrayidx9, align 4
+ ret i32 undef
+}
+
+ ; We can still vectorize the stores below.
+
+declare i32 @bar(...)
diff --git a/test/Transforms/SLPVectorizer/X86/cast.ll b/test/Transforms/SLPVectorizer/X86/cast.ll
new file mode 100644
index 0000000000000..344dbbca2c572
--- /dev/null
+++ b/test/Transforms/SLPVectorizer/X86/cast.ll
@@ -0,0 +1,38 @@
+; RUN: opt < %s -basicaa -slp-vectorizer -dce -S -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7-avx | FileCheck %s
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.9.0"
+
+; int foo(int * restrict A, char * restrict B) {
+; A[0] = B[0];
+; A[1] = B[1];
+; A[2] = B[2];
+; A[3] = B[3];
+; }
+;CHECK: @foo
+;CHECK: load <4 x i8>
+;CHECK: sext
+;CHECK: store <4 x i32>
+define i32 @foo(i32* noalias nocapture %A, i8* noalias nocapture %B) {
+entry:
+ %0 = load i8* %B, align 1
+ %conv = sext i8 %0 to i32
+ store i32 %conv, i32* %A, align 4
+ %arrayidx2 = getelementptr inbounds i8* %B, i64 1
+ %1 = load i8* %arrayidx2, align 1
+ %conv3 = sext i8 %1 to i32
+ %arrayidx4 = getelementptr inbounds i32* %A, i64 1
+ store i32 %conv3, i32* %arrayidx4, align 4
+ %arrayidx5 = getelementptr inbounds i8* %B, i64 2
+ %2 = load i8* %arrayidx5, align 1
+ %conv6 = sext i8 %2 to i32
+ %arrayidx7 = getelementptr inbounds i32* %A, i64 2
+ store i32 %conv6, i32* %arrayidx7, align 4
+ %arrayidx8 = getelementptr inbounds i8* %B, i64 3
+ %3 = load i8* %arrayidx8, align 1
+ %conv9 = sext i8 %3 to i32
+ %arrayidx10 = getelementptr inbounds i32* %A, i64 3
+ store i32 %conv9, i32* %arrayidx10, align 4
+ ret i32 undef
+}
+
diff --git a/test/Transforms/SLPVectorizer/X86/compare-reduce.ll b/test/Transforms/SLPVectorizer/X86/compare-reduce.ll
new file mode 100644
index 0000000000000..05f8e616bb8e2
--- /dev/null
+++ b/test/Transforms/SLPVectorizer/X86/compare-reduce.ll
@@ -0,0 +1,53 @@
+; RUN: opt < %s -basicaa -slp-vectorizer -dce -S -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7-avx | FileCheck %s
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.7.0"
+
+@.str = private unnamed_addr constant [6 x i8] c"bingo\00", align 1
+
+;CHECK: @reduce_compare
+;CHECK: load <2 x double>
+;CHECK: fmul <2 x double>
+;CHECK: fmul <2 x double>
+;CHECK: fadd <2 x double>
+;CHECK: extractelement
+;CHECK: extractelement
+;CHECK: ret
+define void @reduce_compare(double* nocapture %A, i32 %n) {
+entry:
+ %conv = sitofp i32 %n to double
+ br label %for.body
+
+for.body: ; preds = %for.inc, %entry
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.inc ]
+ %0 = shl nsw i64 %indvars.iv, 1
+ %arrayidx = getelementptr inbounds double* %A, i64 %0
+ %1 = load double* %arrayidx, align 8
+ %mul1 = fmul double %conv, %1
+ %mul2 = fmul double %mul1, 7.000000e+00
+ %add = fadd double %mul2, 5.000000e+00
+ %2 = or i64 %0, 1
+ %arrayidx6 = getelementptr inbounds double* %A, i64 %2
+ %3 = load double* %arrayidx6, align 8
+ %mul8 = fmul double %conv, %3
+ %mul9 = fmul double %mul8, 4.000000e+00
+ %add10 = fadd double %mul9, 9.000000e+00
+ %cmp11 = fcmp ogt double %add, %add10
+ br i1 %cmp11, label %if.then, label %for.inc
+
+if.then: ; preds = %for.body
+ %call = tail call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([6 x i8]* @.str, i64 0, i64 0))
+ br label %for.inc
+
+for.inc: ; preds = %for.body, %if.then
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %lftr.wideiv = trunc i64 %indvars.iv.next to i32
+ %exitcond = icmp eq i32 %lftr.wideiv, 100
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.inc
+ ret void
+}
+
+declare i32 @printf(i8* nocapture, ...)
+
diff --git a/test/Transforms/SLPVectorizer/X86/diamond.ll b/test/Transforms/SLPVectorizer/X86/diamond.ll
new file mode 100644
index 0000000000000..8e85cb6c9b8f8
--- /dev/null
+++ b/test/Transforms/SLPVectorizer/X86/diamond.ll
@@ -0,0 +1,78 @@
+; RUN: opt < %s -basicaa -slp-vectorizer -dce -S -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7-avx | FileCheck %s
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.8.0"
+
+; int foo(int * restrict B, int * restrict A, int n, int m) {
+; B[0] = n * A[0] + m * A[0];
+; B[1] = n * A[1] + m * A[1];
+; B[2] = n * A[2] + m * A[2];
+; B[3] = n * A[3] + m * A[3];
+; return 0;
+; }
+
+; CHECK: @foo
+; CHECK: load <4 x i32>
+; CHECK: mul <4 x i32>
+; CHECK: store <4 x i32>
+; CHECK: ret
+define i32 @foo(i32* noalias nocapture %B, i32* noalias nocapture %A, i32 %n, i32 %m) #0 {
+entry:
+ %0 = load i32* %A, align 4
+ %mul238 = add i32 %m, %n
+ %add = mul i32 %0, %mul238
+ store i32 %add, i32* %B, align 4
+ %arrayidx4 = getelementptr inbounds i32* %A, i64 1
+ %1 = load i32* %arrayidx4, align 4
+ %add8 = mul i32 %1, %mul238
+ %arrayidx9 = getelementptr inbounds i32* %B, i64 1
+ store i32 %add8, i32* %arrayidx9, align 4
+ %arrayidx10 = getelementptr inbounds i32* %A, i64 2
+ %2 = load i32* %arrayidx10, align 4
+ %add14 = mul i32 %2, %mul238
+ %arrayidx15 = getelementptr inbounds i32* %B, i64 2
+ store i32 %add14, i32* %arrayidx15, align 4
+ %arrayidx16 = getelementptr inbounds i32* %A, i64 3
+ %3 = load i32* %arrayidx16, align 4
+ %add20 = mul i32 %3, %mul238
+ %arrayidx21 = getelementptr inbounds i32* %B, i64 3
+ store i32 %add20, i32* %arrayidx21, align 4
+ ret i32 0
+}
+
+
+; int foo_fail(int * restrict B, int * restrict A, int n, int m) {
+; B[0] = n * A[0] + m * A[0];
+; B[1] = n * A[1] + m * A[1];
+; B[2] = n * A[2] + m * A[2];
+; B[3] = n * A[3] + m * A[3];
+; return A[0];
+; }
+
+; CHECK: @foo_fail
+; CHECK-NOT: load <4 x i32>
+; CHECK: ret
+define i32 @foo_fail(i32* noalias nocapture %B, i32* noalias nocapture %A, i32 %n, i32 %m) {
+entry:
+ %0 = load i32* %A, align 4
+ %mul238 = add i32 %m, %n
+ %add = mul i32 %0, %mul238
+ store i32 %add, i32* %B, align 4
+ %arrayidx4 = getelementptr inbounds i32* %A, i64 1
+ %1 = load i32* %arrayidx4, align 4
+ %add8 = mul i32 %1, %mul238
+ %arrayidx9 = getelementptr inbounds i32* %B, i64 1
+ store i32 %add8, i32* %arrayidx9, align 4
+ %arrayidx10 = getelementptr inbounds i32* %A, i64 2
+ %2 = load i32* %arrayidx10, align 4
+ %add14 = mul i32 %2, %mul238
+ %arrayidx15 = getelementptr inbounds i32* %B, i64 2
+ store i32 %add14, i32* %arrayidx15, align 4
+ %arrayidx16 = getelementptr inbounds i32* %A, i64 3
+ %3 = load i32* %arrayidx16, align 4
+ %add20 = mul i32 %3, %mul238
+ %arrayidx21 = getelementptr inbounds i32* %B, i64 3
+ store i32 %add20, i32* %arrayidx21, align 4
+ ret i32 %0 ;<--------- This value has multiple users and can't be vectorized.
+}
+
diff --git a/test/Transforms/SLPVectorizer/X86/flag.ll b/test/Transforms/SLPVectorizer/X86/flag.ll
new file mode 100644
index 0000000000000..3ca5407067449
--- /dev/null
+++ b/test/Transforms/SLPVectorizer/X86/flag.ll
@@ -0,0 +1,51 @@
+; RUN: opt < %s -basicaa -slp-vectorizer -slp-threshold=1000 -dce -S -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7-avx | FileCheck %s
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.8.0"
+
+; Check that the command line flag works.
+;CHECK:rollable
+;CHECK-NOT:load <4 x i32>
+;CHECK: ret
+
+define i32 @rollable(i32* noalias nocapture %in, i32* noalias nocapture %out, i64 %n) {
+ %1 = icmp eq i64 %n, 0
+ br i1 %1, label %._crit_edge, label %.lr.ph
+
+.lr.ph: ; preds = %0, %.lr.ph
+ %i.019 = phi i64 [ %26, %.lr.ph ], [ 0, %0 ]
+ %2 = shl i64 %i.019, 2
+ %3 = getelementptr inbounds i32* %in, i64 %2
+ %4 = load i32* %3, align 4
+ %5 = or i64 %2, 1
+ %6 = getelementptr inbounds i32* %in, i64 %5
+ %7 = load i32* %6, align 4
+ %8 = or i64 %2, 2
+ %9 = getelementptr inbounds i32* %in, i64 %8
+ %10 = load i32* %9, align 4
+ %11 = or i64 %2, 3
+ %12 = getelementptr inbounds i32* %in, i64 %11
+ %13 = load i32* %12, align 4
+ %14 = mul i32 %4, 7
+ %15 = add i32 %14, 7
+ %16 = mul i32 %7, 7
+ %17 = add i32 %16, 14
+ %18 = mul i32 %10, 7
+ %19 = add i32 %18, 21
+ %20 = mul i32 %13, 7
+ %21 = add i32 %20, 28
+ %22 = getelementptr inbounds i32* %out, i64 %2
+ store i32 %15, i32* %22, align 4
+ %23 = getelementptr inbounds i32* %out, i64 %5
+ store i32 %17, i32* %23, align 4
+ %24 = getelementptr inbounds i32* %out, i64 %8
+ store i32 %19, i32* %24, align 4
+ %25 = getelementptr inbounds i32* %out, i64 %11
+ store i32 %21, i32* %25, align 4
+ %26 = add i64 %i.019, 1
+ %exitcond = icmp eq i64 %26, %n
+ br i1 %exitcond, label %._crit_edge, label %.lr.ph
+
+._crit_edge: ; preds = %.lr.ph, %0
+ ret i32 undef
+}
diff --git a/test/Transforms/SLPVectorizer/X86/hoist.ll b/test/Transforms/SLPVectorizer/X86/hoist.ll
new file mode 100644
index 0000000000000..5074ceaaabd7c
--- /dev/null
+++ b/test/Transforms/SLPVectorizer/X86/hoist.ll
@@ -0,0 +1,59 @@
+; RUN: opt < %s -basicaa -slp-vectorizer -dce -S -mtriple=i386-apple-macosx10.8.0 -mcpu=corei7-avx | FileCheck %s
+
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128-n8:16:32-S128"
+target triple = "i386-apple-macosx10.9.0"
+
+;int foo(int *A, int n, int k) {
+; for (int i=0; i < 10000; i+=4) {
+; A[i] += n;
+; A[i+1] += k;
+; A[i+2] += n;
+; A[i+3] += k;
+; }
+;}
+
+; preheader:
+;CHECK: entry
+;CHECK-NEXT: insertelement
+;CHECK-NEXT: insertelement
+;CHECK-NEXT: insertelement
+;CHECK-NEXT: insertelement
+; loop body:
+;CHECK: phi
+;CHECK: load <4 x i32>
+;CHECK: add <4 x i32>
+;CHECK: store <4 x i32>
+;CHECK: ret
+define i32 @foo(i32* nocapture %A, i32 %n, i32 %k) {
+entry:
+ br label %for.body
+
+for.body: ; preds = %entry, %for.body
+ %i.024 = phi i32 [ 0, %entry ], [ %add10, %for.body ]
+ %arrayidx = getelementptr inbounds i32* %A, i32 %i.024
+ %0 = load i32* %arrayidx, align 4
+ %add = add nsw i32 %0, %n
+ store i32 %add, i32* %arrayidx, align 4
+ %add121 = or i32 %i.024, 1
+ %arrayidx2 = getelementptr inbounds i32* %A, i32 %add121
+ %1 = load i32* %arrayidx2, align 4
+ %add3 = add nsw i32 %1, %k
+ store i32 %add3, i32* %arrayidx2, align 4
+ %add422 = or i32 %i.024, 2
+ %arrayidx5 = getelementptr inbounds i32* %A, i32 %add422
+ %2 = load i32* %arrayidx5, align 4
+ %add6 = add nsw i32 %2, %n
+ store i32 %add6, i32* %arrayidx5, align 4
+ %add723 = or i32 %i.024, 3
+ %arrayidx8 = getelementptr inbounds i32* %A, i32 %add723
+ %3 = load i32* %arrayidx8, align 4
+ %add9 = add nsw i32 %3, %k
+ store i32 %add9, i32* %arrayidx8, align 4
+ %add10 = add nsw i32 %i.024, 4
+ %cmp = icmp slt i32 %add10, 10000
+ br i1 %cmp, label %for.body, label %for.end
+
+for.end: ; preds = %for.body
+ ret i32 undef
+}
+
diff --git a/test/Transforms/SLPVectorizer/X86/lit.local.cfg b/test/Transforms/SLPVectorizer/X86/lit.local.cfg
new file mode 100644
index 0000000000000..a8ad0f1a28b23
--- /dev/null
+++ b/test/Transforms/SLPVectorizer/X86/lit.local.cfg
@@ -0,0 +1,6 @@
+config.suffixes = ['.ll', '.c', '.cpp']
+
+targets = set(config.root.targets_to_build.split())
+if not 'X86' in targets:
+ config.unsupported = True
+
diff --git a/test/Transforms/SLPVectorizer/X86/loopinvariant.ll b/test/Transforms/SLPVectorizer/X86/loopinvariant.ll
new file mode 100644
index 0000000000000..4a37fce2ff247
--- /dev/null
+++ b/test/Transforms/SLPVectorizer/X86/loopinvariant.ll
@@ -0,0 +1,69 @@
+; RUN: opt < %s -basicaa -slp-vectorizer -dce -S -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7-avx | FileCheck %s
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.8.0"
+
+;CHECK: @foo
+;CHECK: load <4 x i32>
+;CHECK: add <4 x i32>
+;CHECK: store <4 x i32>
+;CHECK: load <4 x i32>
+;CHECK: add <4 x i32>
+;CHECK: store <4 x i32>
+;CHECK: ret
+define i32 @foo(i32* nocapture %A, i32 %n) #0 {
+entry:
+ %cmp62 = icmp sgt i32 %n, 0
+ br i1 %cmp62, label %for.body, label %for.end
+
+for.body: ; preds = %entry, %for.body
+ %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
+ %arrayidx = getelementptr inbounds i32* %A, i64 %indvars.iv
+ %0 = load i32* %arrayidx, align 4
+ %add1 = add nsw i32 %0, %n
+ store i32 %add1, i32* %arrayidx, align 4
+ %1 = or i64 %indvars.iv, 1
+ %arrayidx4 = getelementptr inbounds i32* %A, i64 %1
+ %2 = load i32* %arrayidx4, align 4
+ %add5 = add nsw i32 %2, %n
+ store i32 %add5, i32* %arrayidx4, align 4
+ %3 = or i64 %indvars.iv, 2
+ %arrayidx8 = getelementptr inbounds i32* %A, i64 %3
+ %4 = load i32* %arrayidx8, align 4
+ %add9 = add nsw i32 %4, %n
+ store i32 %add9, i32* %arrayidx8, align 4
+ %5 = or i64 %indvars.iv, 3
+ %arrayidx12 = getelementptr inbounds i32* %A, i64 %5
+ %6 = load i32* %arrayidx12, align 4
+ %add13 = add nsw i32 %6, %n
+ store i32 %add13, i32* %arrayidx12, align 4
+ %7 = or i64 %indvars.iv, 4
+ %arrayidx16 = getelementptr inbounds i32* %A, i64 %7
+ %8 = load i32* %arrayidx16, align 4
+ %add17 = add nsw i32 %8, %n
+ store i32 %add17, i32* %arrayidx16, align 4
+ %9 = or i64 %indvars.iv, 5
+ %arrayidx20 = getelementptr inbounds i32* %A, i64 %9
+ %10 = load i32* %arrayidx20, align 4
+ %add21 = add nsw i32 %10, %n
+ store i32 %add21, i32* %arrayidx20, align 4
+ %11 = or i64 %indvars.iv, 6
+ %arrayidx24 = getelementptr inbounds i32* %A, i64 %11
+ %12 = load i32* %arrayidx24, align 4
+ %add25 = add nsw i32 %12, %n
+ store i32 %add25, i32* %arrayidx24, align 4
+ %13 = or i64 %indvars.iv, 7
+ %arrayidx28 = getelementptr inbounds i32* %A, i64 %13
+ %14 = load i32* %arrayidx28, align 4
+ %add29 = add nsw i32 %14, %n
+ store i32 %add29, i32* %arrayidx28, align 4
+ %indvars.iv.next = add i64 %indvars.iv, 8
+ %15 = trunc i64 %indvars.iv.next to i32
+ %cmp = icmp slt i32 %15, %n
+ br i1 %cmp, label %for.body, label %for.end
+
+for.end: ; preds = %for.body, %entry
+ ret i32 undef
+}
+
+attributes #0 = { nounwind ssp uwtable "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
diff --git a/test/Transforms/SLPVectorizer/X86/multi_user.ll b/test/Transforms/SLPVectorizer/X86/multi_user.ll
new file mode 100644
index 0000000000000..aaa6063fdeda3
--- /dev/null
+++ b/test/Transforms/SLPVectorizer/X86/multi_user.ll
@@ -0,0 +1,47 @@
+; RUN: opt < %s -basicaa -slp-vectorizer -dce -S -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7-avx | FileCheck %s
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.7.0"
+
+;int foo (int *A, int n) {
+; A[0] += n * 5 + 7;
+; A[1] += n * 5 + 8;
+; A[2] += n * 5 + 9;
+; A[3] += n * 5 + 10;
+; A[4] += n * 5 + 11;
+;}
+
+;CHECK: @foo
+;CHECK: insertelement <4 x i32>
+;CHECK: load <4 x i32>
+;CHECK: add <4 x i32>
+;CHECK: store <4 x i32>
+;CHECK: ret
+define i32 @foo(i32* nocapture %A, i32 %n) {
+ %1 = mul nsw i32 %n, 5
+ %2 = add nsw i32 %1, 7
+ %3 = load i32* %A, align 4
+ %4 = add nsw i32 %2, %3
+ store i32 %4, i32* %A, align 4
+ %5 = add nsw i32 %1, 8
+ %6 = getelementptr inbounds i32* %A, i64 1
+ %7 = load i32* %6, align 4
+ %8 = add nsw i32 %5, %7
+ store i32 %8, i32* %6, align 4
+ %9 = add nsw i32 %1, 9
+ %10 = getelementptr inbounds i32* %A, i64 2
+ %11 = load i32* %10, align 4
+ %12 = add nsw i32 %9, %11
+ store i32 %12, i32* %10, align 4
+ %13 = add nsw i32 %1, 10
+ %14 = getelementptr inbounds i32* %A, i64 3
+ %15 = load i32* %14, align 4
+ %16 = add nsw i32 %13, %15
+ store i32 %16, i32* %14, align 4
+ %17 = add nsw i32 %1, 11
+ %18 = getelementptr inbounds i32* %A, i64 4
+ %19 = load i32* %18, align 4
+ %20 = add nsw i32 %17, %19
+ store i32 %20, i32* %18, align 4
+ ret i32 undef
+}
diff --git a/test/Transforms/SLPVectorizer/X86/reduction.ll b/test/Transforms/SLPVectorizer/X86/reduction.ll
new file mode 100644
index 0000000000000..70b7c3a0b9980
--- /dev/null
+++ b/test/Transforms/SLPVectorizer/X86/reduction.ll
@@ -0,0 +1,47 @@
+; RUN: opt < %s -basicaa -slp-vectorizer -dce -S -mtriple=i386-apple-macosx10.8.0 -mcpu=corei7-avx | FileCheck %s
+
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128-n8:16:32-S128"
+target triple = "i386-apple-macosx10.8.0"
+
+; int foo(double *A, int n, int m) {
+; double sum = 0, v1 = 2, v0 = 3;
+; for (int i=0; i < n; ++i)
+; sum += 7*A[i*2] + 7*A[i*2+1];
+; return sum;
+; }
+
+;CHECK: reduce
+;CHECK: load <2 x double>
+;CHECK: fmul <2 x double>
+;CHECK: ret
+define i32 @reduce(double* nocapture %A, i32 %n, i32 %m) {
+entry:
+ %cmp13 = icmp sgt i32 %n, 0
+ br i1 %cmp13, label %for.body, label %for.end
+
+for.body: ; preds = %entry, %for.body
+ %i.015 = phi i32 [ %inc, %for.body ], [ 0, %entry ]
+ %sum.014 = phi double [ %add6, %for.body ], [ 0.000000e+00, %entry ]
+ %mul = shl nsw i32 %i.015, 1
+ %arrayidx = getelementptr inbounds double* %A, i32 %mul
+ %0 = load double* %arrayidx, align 4
+ %mul1 = fmul double %0, 7.000000e+00
+ %add12 = or i32 %mul, 1
+ %arrayidx3 = getelementptr inbounds double* %A, i32 %add12
+ %1 = load double* %arrayidx3, align 4
+ %mul4 = fmul double %1, 7.000000e+00
+ %add5 = fadd double %mul1, %mul4
+ %add6 = fadd double %sum.014, %add5
+ %inc = add nsw i32 %i.015, 1
+ %exitcond = icmp eq i32 %inc, %n
+ br i1 %exitcond, label %for.cond.for.end_crit_edge, label %for.body
+
+for.cond.for.end_crit_edge: ; preds = %for.body
+ %phitmp = fptosi double %add6 to i32
+ br label %for.end
+
+for.end: ; preds = %for.cond.for.end_crit_edge, %entry
+ %sum.0.lcssa = phi i32 [ %phitmp, %for.cond.for.end_crit_edge ], [ 0, %entry ]
+ ret i32 %sum.0.lcssa
+}
+
diff --git a/test/Transforms/SLPVectorizer/X86/reduction2.ll b/test/Transforms/SLPVectorizer/X86/reduction2.ll
new file mode 100644
index 0000000000000..7aa7d7e243d08
--- /dev/null
+++ b/test/Transforms/SLPVectorizer/X86/reduction2.ll
@@ -0,0 +1,32 @@
+; RUN: opt < %s -basicaa -slp-vectorizer -dce -S -mtriple=i386-apple-macosx10.8.0 -mcpu=corei7-avx | FileCheck %s
+
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128-n8:16:32-S128"
+target triple = "i386-apple-macosx10.8.0"
+
+;CHECK: @foo
+;CHECK: load <2 x double>
+;CHECK: ret
+define double @foo(double* nocapture %D) {
+ br label %1
+
+; <label>:1 ; preds = %1, %0
+ %i.02 = phi i32 [ 0, %0 ], [ %10, %1 ]
+ %sum.01 = phi double [ 0.000000e+00, %0 ], [ %9, %1 ]
+ %2 = shl nsw i32 %i.02, 1
+ %3 = getelementptr inbounds double* %D, i32 %2
+ %4 = load double* %3, align 4
+ %A4 = fmul double %4, %4
+ %5 = or i32 %2, 1
+ %6 = getelementptr inbounds double* %D, i32 %5
+ %7 = load double* %6, align 4
+ %A7 = fmul double %7, %7
+ %8 = fadd double %A4, %A7
+ %9 = fadd double %sum.01, %8
+ %10 = add nsw i32 %i.02, 1
+ %exitcond = icmp eq i32 %10, 100
+ br i1 %exitcond, label %11, label %1
+
+; <label>:11 ; preds = %1
+ ret double %9
+}
+
diff --git a/test/Transforms/SLPVectorizer/X86/saxpy.ll b/test/Transforms/SLPVectorizer/X86/saxpy.ll
new file mode 100644
index 0000000000000..b520913a398d1
--- /dev/null
+++ b/test/Transforms/SLPVectorizer/X86/saxpy.ll
@@ -0,0 +1,45 @@
+; RUN: opt < %s -basicaa -slp-vectorizer -dce -S -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7-avx | FileCheck %s
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.8.0"
+
+; SLP vectorization example from http://cs.stanford.edu/people/eschkufz/research/asplos291-schkufza.pdf
+;CHECK: SAXPY
+;CHECK: mul <4 x i32>
+;CHECK: ret
+
+define void @SAXPY(i32* noalias nocapture %x, i32* noalias nocapture %y, i32 %a, i64 %i) {
+ %1 = getelementptr inbounds i32* %x, i64 %i
+ %2 = load i32* %1, align 4
+ %3 = mul nsw i32 %2, %a
+ %4 = getelementptr inbounds i32* %y, i64 %i
+ %5 = load i32* %4, align 4
+ %6 = add nsw i32 %3, %5
+ store i32 %6, i32* %1, align 4
+ %7 = add i64 %i, 1
+ %8 = getelementptr inbounds i32* %x, i64 %7
+ %9 = load i32* %8, align 4
+ %10 = mul nsw i32 %9, %a
+ %11 = getelementptr inbounds i32* %y, i64 %7
+ %12 = load i32* %11, align 4
+ %13 = add nsw i32 %10, %12
+ store i32 %13, i32* %8, align 4
+ %14 = add i64 %i, 2
+ %15 = getelementptr inbounds i32* %x, i64 %14
+ %16 = load i32* %15, align 4
+ %17 = mul nsw i32 %16, %a
+ %18 = getelementptr inbounds i32* %y, i64 %14
+ %19 = load i32* %18, align 4
+ %20 = add nsw i32 %17, %19
+ store i32 %20, i32* %15, align 4
+ %21 = add i64 %i, 3
+ %22 = getelementptr inbounds i32* %x, i64 %21
+ %23 = load i32* %22, align 4
+ %24 = mul nsw i32 %23, %a
+ %25 = getelementptr inbounds i32* %y, i64 %21
+ %26 = load i32* %25, align 4
+ %27 = add nsw i32 %24, %26
+ store i32 %27, i32* %22, align 4
+ ret void
+}
+
diff --git a/test/Transforms/SLPVectorizer/X86/simple-loop.ll b/test/Transforms/SLPVectorizer/X86/simple-loop.ll
new file mode 100644
index 0000000000000..0111b946d4968
--- /dev/null
+++ b/test/Transforms/SLPVectorizer/X86/simple-loop.ll
@@ -0,0 +1,100 @@
+; RUN: opt < %s -basicaa -slp-vectorizer -dce -S -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7-avx | FileCheck %s
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.8.0"
+
+;CHECK:rollable
+define i32 @rollable(i32* noalias nocapture %in, i32* noalias nocapture %out, i64 %n) {
+ %1 = icmp eq i64 %n, 0
+ br i1 %1, label %._crit_edge, label %.lr.ph
+
+.lr.ph: ; preds = %0, %.lr.ph
+ %i.019 = phi i64 [ %26, %.lr.ph ], [ 0, %0 ]
+ %2 = shl i64 %i.019, 2
+ %3 = getelementptr inbounds i32* %in, i64 %2
+;CHECK:load <4 x i32>
+ %4 = load i32* %3, align 4
+ %5 = or i64 %2, 1
+ %6 = getelementptr inbounds i32* %in, i64 %5
+ %7 = load i32* %6, align 4
+ %8 = or i64 %2, 2
+ %9 = getelementptr inbounds i32* %in, i64 %8
+ %10 = load i32* %9, align 4
+ %11 = or i64 %2, 3
+ %12 = getelementptr inbounds i32* %in, i64 %11
+ %13 = load i32* %12, align 4
+;CHECK:mul <4 x i32>
+ %14 = mul i32 %4, 7
+;CHECK:add <4 x i32>
+ %15 = add i32 %14, 7
+ %16 = mul i32 %7, 7
+ %17 = add i32 %16, 14
+ %18 = mul i32 %10, 7
+ %19 = add i32 %18, 21
+ %20 = mul i32 %13, 7
+ %21 = add i32 %20, 28
+ %22 = getelementptr inbounds i32* %out, i64 %2
+;CHECK:store <4 x i32>
+ store i32 %15, i32* %22, align 4
+ %23 = getelementptr inbounds i32* %out, i64 %5
+ store i32 %17, i32* %23, align 4
+ %24 = getelementptr inbounds i32* %out, i64 %8
+ store i32 %19, i32* %24, align 4
+ %25 = getelementptr inbounds i32* %out, i64 %11
+ store i32 %21, i32* %25, align 4
+ %26 = add i64 %i.019, 1
+ %exitcond = icmp eq i64 %26, %n
+ br i1 %exitcond, label %._crit_edge, label %.lr.ph
+
+._crit_edge: ; preds = %.lr.ph, %0
+;CHECK: ret
+ ret i32 undef
+}
+
+;CHECK:unrollable
+;CHECK-NOT: <4 x i32>
+;CHECK: ret
+define i32 @unrollable(i32* %in, i32* %out, i64 %n) nounwind ssp uwtable {
+ %1 = icmp eq i64 %n, 0
+ br i1 %1, label %._crit_edge, label %.lr.ph
+
+.lr.ph: ; preds = %0, %.lr.ph
+ %i.019 = phi i64 [ %26, %.lr.ph ], [ 0, %0 ]
+ %2 = shl i64 %i.019, 2
+ %3 = getelementptr inbounds i32* %in, i64 %2
+ %4 = load i32* %3, align 4
+ %5 = or i64 %2, 1
+ %6 = getelementptr inbounds i32* %in, i64 %5
+ %7 = load i32* %6, align 4
+ %8 = or i64 %2, 2
+ %9 = getelementptr inbounds i32* %in, i64 %8
+ %10 = load i32* %9, align 4
+ %11 = or i64 %2, 3
+ %12 = getelementptr inbounds i32* %in, i64 %11
+ %13 = load i32* %12, align 4
+ %14 = mul i32 %4, 7
+ %15 = add i32 %14, 7
+ %16 = mul i32 %7, 7
+ %17 = add i32 %16, 14
+ %18 = mul i32 %10, 7
+ %19 = add i32 %18, 21
+ %20 = mul i32 %13, 7
+ %21 = add i32 %20, 28
+ %22 = getelementptr inbounds i32* %out, i64 %2
+ store i32 %15, i32* %22, align 4
+ %23 = getelementptr inbounds i32* %out, i64 %5
+ store i32 %17, i32* %23, align 4
+ %barrier = call i32 @goo(i32 0) ; <---------------- memory barrier.
+ %24 = getelementptr inbounds i32* %out, i64 %8
+ store i32 %19, i32* %24, align 4
+ %25 = getelementptr inbounds i32* %out, i64 %11
+ store i32 %21, i32* %25, align 4
+ %26 = add i64 %i.019, 1
+ %exitcond = icmp eq i64 %26, %n
+ br i1 %exitcond, label %._crit_edge, label %.lr.ph
+
+._crit_edge: ; preds = %.lr.ph, %0
+ ret i32 undef
+}
+
+declare i32 @goo(i32)
diff --git a/test/Transforms/SLPVectorizer/X86/simplebb.ll b/test/Transforms/SLPVectorizer/X86/simplebb.ll
new file mode 100644
index 0000000000000..cd0b99e646773
--- /dev/null
+++ b/test/Transforms/SLPVectorizer/X86/simplebb.ll
@@ -0,0 +1,25 @@
+; RUN: opt < %s -basicaa -slp-vectorizer -dce -S -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7-avx | FileCheck %s
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.8.0"
+
+; Simple 3-pair chain with loads and stores
+; CHECK: test1
+; CHECK: store <2 x double>
+; CHECK: ret
+define void @test1(double* %a, double* %b, double* %c) {
+entry:
+ %i0 = load double* %a, align 8
+ %i1 = load double* %b, align 8
+ %mul = fmul double %i0, %i1
+ %arrayidx3 = getelementptr inbounds double* %a, i64 1
+ %i3 = load double* %arrayidx3, align 8
+ %arrayidx4 = getelementptr inbounds double* %b, i64 1
+ %i4 = load double* %arrayidx4, align 8
+ %mul5 = fmul double %i3, %i4
+ store double %mul, double* %c, align 8
+ %arrayidx5 = getelementptr inbounds double* %c, i64 1
+ store double %mul5, double* %arrayidx5, align 8
+ ret void
+}
+
diff --git a/test/Transforms/SLPVectorizer/X86/vector.ll b/test/Transforms/SLPVectorizer/X86/vector.ll
new file mode 100644
index 0000000000000..02a18979c6596
--- /dev/null
+++ b/test/Transforms/SLPVectorizer/X86/vector.ll
@@ -0,0 +1,14 @@
+; RUN: opt < %s -slp-vectorizer -S -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7-avx | FileCheck %s
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.8.0"
+
+; Make sure that we are not crashing or changing the code.
+;CHECK: test
+;CHECK: icmp
+;CHECK: ret
+define void @test(<4 x i32> %in, <4 x i32> %in2) {
+ %k = icmp eq <4 x i32> %in, %in2
+ ret void
+}
+
diff --git a/test/Transforms/SLPVectorizer/lit.local.cfg b/test/Transforms/SLPVectorizer/lit.local.cfg
new file mode 100644
index 0000000000000..19eebc0ac7ac3
--- /dev/null
+++ b/test/Transforms/SLPVectorizer/lit.local.cfg
@@ -0,0 +1 @@
+config.suffixes = ['.ll', '.c', '.cpp']
diff --git a/test/Transforms/SROA/basictest.ll b/test/Transforms/SROA/basictest.ll
index 30dd217743433..834032246f9a8 100644
--- a/test/Transforms/SROA/basictest.ll
+++ b/test/Transforms/SROA/basictest.ll
@@ -1243,3 +1243,77 @@ entry:
%v = load i32* %a
ret i32 %v
}
+
+define void @PR15674(i8* %data, i8* %src, i32 %size) {
+; Arrange (via control flow) to have unmerged stores of a particular width to
+; an alloca where we incrementally store from the end of the array toward the
+; beginning of the array. Ensure that the final integer store, despite being
+; convertable to the integer type that we end up promoting this alloca toward,
+; doesn't get widened to a full alloca store.
+; CHECK: @PR15674
+
+entry:
+ %tmp = alloca [4 x i8], align 1
+; CHECK: alloca i32
+
+ switch i32 %size, label %end [
+ i32 4, label %bb4
+ i32 3, label %bb3
+ i32 2, label %bb2
+ i32 1, label %bb1
+ ]
+
+bb4:
+ %src.gep3 = getelementptr inbounds i8* %src, i32 3
+ %src.3 = load i8* %src.gep3
+ %tmp.gep3 = getelementptr inbounds [4 x i8]* %tmp, i32 0, i32 3
+ store i8 %src.3, i8* %tmp.gep3
+; CHECK: store i8
+
+ br label %bb3
+
+bb3:
+ %src.gep2 = getelementptr inbounds i8* %src, i32 2
+ %src.2 = load i8* %src.gep2
+ %tmp.gep2 = getelementptr inbounds [4 x i8]* %tmp, i32 0, i32 2
+ store i8 %src.2, i8* %tmp.gep2
+; CHECK: store i8
+
+ br label %bb2
+
+bb2:
+ %src.gep1 = getelementptr inbounds i8* %src, i32 1
+ %src.1 = load i8* %src.gep1
+ %tmp.gep1 = getelementptr inbounds [4 x i8]* %tmp, i32 0, i32 1
+ store i8 %src.1, i8* %tmp.gep1
+; CHECK: store i8
+
+ br label %bb1
+
+bb1:
+ %src.gep0 = getelementptr inbounds i8* %src, i32 0
+ %src.0 = load i8* %src.gep0
+ %tmp.gep0 = getelementptr inbounds [4 x i8]* %tmp, i32 0, i32 0
+ store i8 %src.0, i8* %tmp.gep0
+; CHECK: store i8
+
+ br label %end
+
+end:
+ %tmp.raw = bitcast [4 x i8]* %tmp to i8*
+ call void @llvm.memcpy.p0i8.p0i8.i32(i8* %data, i8* %tmp.raw, i32 %size, i32 1, i1 false)
+ ret void
+; CHECK: ret void
+}
+
+define void @PR15805(i1 %a, i1 %b) {
+; CHECK: @PR15805
+; CHECK: select i1 undef, i64* %c, i64* %c
+; CHECK: ret void
+
+ %c = alloca i64, align 8
+ %p.0.c = select i1 undef, i64* %c, i64* %c
+ %cond.in = select i1 undef, i64* %p.0.c, i64* %c
+ %cond = load i64* %cond.in, align 8
+ ret void
+}
diff --git a/test/Transforms/SROA/vector-promotion.ll b/test/Transforms/SROA/vector-promotion.ll
index 02f6d040cc950..3336515770a37 100644
--- a/test/Transforms/SROA/vector-promotion.ll
+++ b/test/Transforms/SROA/vector-promotion.ll
@@ -224,26 +224,26 @@ entry:
%a.cast0 = bitcast i32* %a.gep0 to <2 x i32>*
store <2 x i32> <i32 0, i32 0>, <2 x i32>* %a.cast0
; CHECK-NOT: store
-; CHECK: %[[insert1:.*]] = shufflevector <4 x i32> <i32 0, i32 0, i32 undef, i32 undef>, <4 x i32> undef, <4 x i32> <i32 0, i32 1, {{.*}}>
+; CHECK: select <4 x i1> <i1 true, i1 true, i1 false, i1 false>
%a.gep1 = getelementptr <4 x i32>* %a, i32 0, i32 1
%a.cast1 = bitcast i32* %a.gep1 to <2 x i32>*
store <2 x i32> <i32 1, i32 1>, <2 x i32>* %a.cast1
-; CHECK-NEXT: %[[insert2:.*]] = shufflevector <4 x i32> <i32 undef, i32 1, i32 1, i32 undef>, <4 x i32> %[[insert1]], <4 x i32> <i32 4, i32 1, i32 2, {{.*}}>
+; CHECK-NEXT: select <4 x i1> <i1 false, i1 true, i1 true, i1 false>
%a.gep2 = getelementptr <4 x i32>* %a, i32 0, i32 2
%a.cast2 = bitcast i32* %a.gep2 to <2 x i32>*
store <2 x i32> <i32 2, i32 2>, <2 x i32>* %a.cast2
-; CHECK-NEXT: %[[insert3:.*]] = shufflevector <4 x i32> <i32 undef, i32 undef, i32 2, i32 2>, <4 x i32> %[[insert2]], <4 x i32> <i32 4, i32 5, i32 2, i32 3>
+; CHECK-NEXT: select <4 x i1> <i1 false, i1 false, i1 true, i1 true>
%a.gep3 = getelementptr <4 x i32>* %a, i32 0, i32 3
store i32 3, i32* %a.gep3
-; CHECK-NEXT: %[[insert4:.*]] = insertelement <4 x i32> %[[insert3]], i32 3, i32 3
+; CHECK-NEXT: insertelement <4 x i32>
%ret = load <4 x i32>* %a
ret <4 x i32> %ret
-; CHECK-NEXT: ret <4 x i32> %[[insert4]]
+; CHECK-NEXT: ret <4 x i32>
}
define <4 x i32> @test_subvec_load() {
@@ -291,27 +291,27 @@ entry:
%a.cast0 = bitcast float* %a.gep0 to i8*
call void @llvm.memset.p0i8.i32(i8* %a.cast0, i8 0, i32 8, i32 0, i1 false)
; CHECK-NOT: store
-; CHECK: %[[insert1:.*]] = shufflevector <4 x float> <float 0.000000e+00, float 0.000000e+00, float undef, float undef>, <4 x float> undef, <4 x i32> <i32 0, i32 1, {{.*}}>
+; CHECK: select <4 x i1> <i1 true, i1 true, i1 false, i1 false>
%a.gep1 = getelementptr <4 x float>* %a, i32 0, i32 1
%a.cast1 = bitcast float* %a.gep1 to i8*
call void @llvm.memset.p0i8.i32(i8* %a.cast1, i8 1, i32 8, i32 0, i1 false)
-; CHECK-NEXT: %[[insert2:.*]] = shufflevector <4 x float> <float undef, float 0x3820202020000000, float 0x3820202020000000, float undef>, <4 x float> %[[insert1]], <4 x i32> <i32 4, i32 1, i32 2, {{.*}}>
+; CHECK-NEXT: select <4 x i1> <i1 false, i1 true, i1 true, i1 false>
%a.gep2 = getelementptr <4 x float>* %a, i32 0, i32 2
%a.cast2 = bitcast float* %a.gep2 to i8*
call void @llvm.memset.p0i8.i32(i8* %a.cast2, i8 3, i32 8, i32 0, i1 false)
-; CHECK-NEXT: %[[insert3:.*]] = shufflevector <4 x float> <float undef, float undef, float 0x3860606060000000, float 0x3860606060000000>, <4 x float> %[[insert2]], <4 x i32> <i32 4, i32 5, i32 2, i32 3>
+; CHECK-NEXT: select <4 x i1> <i1 false, i1 false, i1 true, i1 true>
%a.gep3 = getelementptr <4 x float>* %a, i32 0, i32 3
%a.cast3 = bitcast float* %a.gep3 to i8*
call void @llvm.memset.p0i8.i32(i8* %a.cast3, i8 7, i32 4, i32 0, i1 false)
-; CHECK-NEXT: %[[insert4:.*]] = insertelement <4 x float> %[[insert3]], float 0x38E0E0E0E0000000, i32 3
+; CHECK-NEXT: insertelement <4 x float>
%ret = load <4 x float>* %a
ret <4 x float> %ret
-; CHECK-NEXT: ret <4 x float> %[[insert4]]
+; CHECK-NEXT: ret <4 x float>
}
define <4 x float> @test_subvec_memcpy(i8* %x, i8* %y, i8* %z, i8* %f, i8* %out) {
@@ -326,7 +326,7 @@ entry:
; CHECK: %[[xptr:.*]] = bitcast i8* %x to <2 x float>*
; CHECK-NEXT: %[[x:.*]] = load <2 x float>* %[[xptr]]
; CHECK-NEXT: %[[expand_x:.*]] = shufflevector <2 x float> %[[x]], <2 x float> undef, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef>
-; CHECK-NEXT: %[[insert_x:.*]] = shufflevector <4 x float> %[[expand_x]], <4 x float> undef, <4 x i32> <i32 0, i32 1, {{.*}}>
+; CHECK-NEXT: select <4 x i1> <i1 true, i1 true, i1 false, i1 false>
%a.gep1 = getelementptr <4 x float>* %a, i32 0, i32 1
%a.cast1 = bitcast float* %a.gep1 to i8*
@@ -334,7 +334,7 @@ entry:
; CHECK-NEXT: %[[yptr:.*]] = bitcast i8* %y to <2 x float>*
; CHECK-NEXT: %[[y:.*]] = load <2 x float>* %[[yptr]]
; CHECK-NEXT: %[[expand_y:.*]] = shufflevector <2 x float> %[[y]], <2 x float> undef, <4 x i32> <i32 undef, i32 0, i32 1, i32 undef>
-; CHECK-NEXT: %[[insert_y:.*]] = shufflevector <4 x float> %[[expand_y]], <4 x float> %[[insert_x]], <4 x i32> <i32 4, i32 1, i32 2, {{.*}}>
+; CHECK-NEXT: select <4 x i1> <i1 false, i1 true, i1 true, i1 false>
%a.gep2 = getelementptr <4 x float>* %a, i32 0, i32 2
%a.cast2 = bitcast float* %a.gep2 to i8*
@@ -342,14 +342,14 @@ entry:
; CHECK-NEXT: %[[zptr:.*]] = bitcast i8* %z to <2 x float>*
; CHECK-NEXT: %[[z:.*]] = load <2 x float>* %[[zptr]]
; CHECK-NEXT: %[[expand_z:.*]] = shufflevector <2 x float> %[[z]], <2 x float> undef, <4 x i32> <i32 undef, i32 undef, i32 0, i32 1>
-; CHECK-NEXT: %[[insert_z:.*]] = shufflevector <4 x float> %[[expand_z]], <4 x float> %[[insert_y]], <4 x i32> <i32 4, i32 5, i32 2, i32 3>
+; CHECK-NEXT: select <4 x i1> <i1 false, i1 false, i1 true, i1 true>
%a.gep3 = getelementptr <4 x float>* %a, i32 0, i32 3
%a.cast3 = bitcast float* %a.gep3 to i8*
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %a.cast3, i8* %f, i32 4, i32 0, i1 false)
; CHECK-NEXT: %[[fptr:.*]] = bitcast i8* %f to float*
; CHECK-NEXT: %[[f:.*]] = load float* %[[fptr]]
-; CHECK-NEXT: %[[insert_f:.*]] = insertelement <4 x float> %[[insert_z]], float %[[f]], i32 3
+; CHECK-NEXT: %[[insert_f:.*]] = insertelement <4 x float>
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %out, i8* %a.cast2, i32 8, i32 0, i1 false)
; CHECK-NEXT: %[[outptr:.*]] = bitcast i8* %out to <2 x float>*
diff --git a/test/Transforms/ScalarRepl/dynamic-vector-gep.ll b/test/Transforms/ScalarRepl/dynamic-vector-gep.ll
deleted file mode 100644
index 565cd76164222..0000000000000
--- a/test/Transforms/ScalarRepl/dynamic-vector-gep.ll
+++ /dev/null
@@ -1,167 +0,0 @@
-; RUN: opt < %s -scalarrepl -S | FileCheck %s
-
-target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64"
-target triple = "x86_64-apple-darwin10.0.0"
-
-; CHECK: @test1
-; CHECK: %[[alloc:[\.a-z0-9]*]] = alloca <4 x float>
-; CHECK: store <4 x float> zeroinitializer, <4 x float>* %[[alloc]]
-; CHECK: memset
-; CHECK: extractelement <4 x float> zeroinitializer, i32 %idx2
-
-; Split the array but don't replace the memset with an insert
-; element as its not a constant offset.
-; The load, however, can be replaced with an extract element.
-define float @test1(i32 %idx1, i32 %idx2) {
-entry:
- %0 = alloca [4 x <4 x float>]
- store [4 x <4 x float>] zeroinitializer, [4 x <4 x float>]* %0
- %ptr1 = getelementptr [4 x <4 x float>]* %0, i32 0, i32 0, i32 %idx1
- %cast = bitcast float* %ptr1 to i8*
- call void @llvm.memset.p0i8.i32(i8* %cast, i8 0, i32 4, i32 4, i1 false)
- %ptr2 = getelementptr [4 x <4 x float>]* %0, i32 0, i32 1, i32 %idx2
- %ret = load float* %ptr2
- ret float %ret
-}
-
-; CHECK: @test2
-; CHECK: %[[ins:[\.a-z0-9]*]] = insertelement <4 x float> zeroinitializer, float 1.000000e+00, i32 %idx1
-; CHECK: extractelement <4 x float> %[[ins]], i32 %idx2
-
-; Do SROA on the array when it has dynamic vector reads and writes.
-define float @test2(i32 %idx1, i32 %idx2) {
-entry:
- %0 = alloca [4 x <4 x float>]
- store [4 x <4 x float>] zeroinitializer, [4 x <4 x float>]* %0
- %ptr1 = getelementptr [4 x <4 x float>]* %0, i32 0, i32 0, i32 %idx1
- store float 1.0, float* %ptr1
- %ptr2 = getelementptr [4 x <4 x float>]* %0, i32 0, i32 0, i32 %idx2
- %ret = load float* %ptr2
- ret float %ret
-}
-
-; CHECK: test3
-; CHECK: %0 = alloca [4 x <4 x float>]
-; CHECK-NOT: alloca
-
-; Don't do SROA on a dynamically indexed vector when it spans
-; more than one array element of the alloca array it is within.
-define float @test3(i32 %idx1, i32 %idx2) {
-entry:
- %0 = alloca [4 x <4 x float>]
- store [4 x <4 x float>] zeroinitializer, [4 x <4 x float>]* %0
- %bigvec = bitcast [4 x <4 x float>]* %0 to <16 x float>*
- %ptr1 = getelementptr <16 x float>* %bigvec, i32 0, i32 %idx1
- store float 1.0, float* %ptr1
- %ptr2 = getelementptr <16 x float>* %bigvec, i32 0, i32 %idx2
- %ret = load float* %ptr2
- ret float %ret
-}
-
-; CHECK: test4
-; CHECK: insertelement <16 x float> zeroinitializer, float 1.000000e+00, i32 %idx1
-; CHECK: extractelement <16 x float> %0, i32 %idx2
-
-; Don't do SROA on a dynamically indexed vector when it spans
-; more than one array element of the alloca array it is within.
-; However, unlike test3, the store is on the vector type
-; so SROA will convert the large alloca into the large vector
-; type and do all accesses with insert/extract element
-define float @test4(i32 %idx1, i32 %idx2) {
-entry:
- %0 = alloca [4 x <4 x float>]
- %bigvec = bitcast [4 x <4 x float>]* %0 to <16 x float>*
- store <16 x float> zeroinitializer, <16 x float>* %bigvec
- %ptr1 = getelementptr <16 x float>* %bigvec, i32 0, i32 %idx1
- store float 1.0, float* %ptr1
- %ptr2 = getelementptr <16 x float>* %bigvec, i32 0, i32 %idx2
- %ret = load float* %ptr2
- ret float %ret
-}
-
-; CHECK: @test5
-; CHECK: %0 = alloca [4 x <4 x float>]
-; CHECK-NOT: alloca
-
-; Don't do SROA as the is a second dynamically indexed array
-; which may span multiple elements of the alloca.
-define float @test5(i32 %idx1, i32 %idx2) {
-entry:
- %0 = alloca [4 x <4 x float>]
- store [4 x <4 x float>] zeroinitializer, [4 x <4 x float>]* %0
- %ptr1 = getelementptr [4 x <4 x float>]* %0, i32 0, i32 0, i32 %idx1
- %ptr2 = bitcast float* %ptr1 to [1 x <2 x float>]*
- %ptr3 = getelementptr [1 x <2 x float>]* %ptr2, i32 0, i32 0, i32 %idx1
- store float 1.0, float* %ptr1
- %ptr4 = getelementptr [4 x <4 x float>]* %0, i32 0, i32 0, i32 %idx2
- %ret = load float* %ptr4
- ret float %ret
-}
-
-; CHECK: test6
-; CHECK: insertelement <4 x float> zeroinitializer, float 1.000000e+00, i32 %idx1
-; CHECK: extractelement <4 x float> zeroinitializer, i32 %idx2
-
-%vector.pair = type { %vector.anon, %vector.anon }
-%vector.anon = type { %vector }
-%vector = type { <4 x float> }
-
-; Dynamic GEPs on vectors were crashing when the vector was inside a struct
-; as the new GEP for the new alloca might not include all the indices from
-; the original GEP, just the indices it needs to get to the correct offset of
-; some type, not necessarily the dynamic vector.
-; This test makes sure we don't have this crash.
-define float @test6(i32 %idx1, i32 %idx2) {
-entry:
- %0 = alloca %vector.pair
- store %vector.pair zeroinitializer, %vector.pair* %0
- %ptr1 = getelementptr %vector.pair* %0, i32 0, i32 0, i32 0, i32 0, i32 %idx1
- store float 1.0, float* %ptr1
- %ptr2 = getelementptr %vector.pair* %0, i32 0, i32 1, i32 0, i32 0, i32 %idx2
- %ret = load float* %ptr2
- ret float %ret
-}
-
-; CHECK: test7
-; CHECK: insertelement <4 x float> zeroinitializer, float 1.000000e+00, i32 %idx1
-; CHECK: extractelement <4 x float> zeroinitializer, i32 %idx2
-
-%array.pair = type { [2 x %array.anon], %array.anon }
-%array.anon = type { [2 x %vector] }
-
-; This is the same as test6 and tests the same crash, but on arrays.
-define float @test7(i32 %idx1, i32 %idx2) {
-entry:
- %0 = alloca %array.pair
- store %array.pair zeroinitializer, %array.pair* %0
- %ptr1 = getelementptr %array.pair* %0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 %idx1
- store float 1.0, float* %ptr1
- %ptr2 = getelementptr %array.pair* %0, i32 0, i32 1, i32 0, i32 0, i32 0, i32 %idx2
- %ret = load float* %ptr2
- ret float %ret
-}
-
-; CHECK: test8
-; CHECK: %[[offset1:[\.a-z0-9]*]] = add i32 %idx1, 1
-; CHECK: %[[ins:[\.a-z0-9]*]] = insertelement <4 x float> zeroinitializer, float 1.000000e+00, i32 %[[offset1]]
-; CHECK: %[[offset2:[\.a-z0-9]*]] = add i32 %idx2, 2
-; CHECK: extractelement <4 x float> %[[ins]], i32 %[[offset2]]
-
-; Do SROA on the vector when it has dynamic vector reads and writes
-; from a non-zero offset.
-define float @test8(i32 %idx1, i32 %idx2) {
-entry:
- %0 = alloca <4 x float>
- store <4 x float> zeroinitializer, <4 x float>* %0
- %ptr1 = getelementptr <4 x float>* %0, i32 0, i32 1
- %ptr2 = bitcast float* %ptr1 to <3 x float>*
- %ptr3 = getelementptr <3 x float>* %ptr2, i32 0, i32 %idx1
- store float 1.0, float* %ptr3
- %ptr4 = getelementptr <4 x float>* %0, i32 0, i32 2
- %ptr5 = bitcast float* %ptr4 to <2 x float>*
- %ptr6 = getelementptr <2 x float>* %ptr5, i32 0, i32 %idx2
- %ret = load float* %ptr6
- ret float %ret
-}
-
-declare void @llvm.memset.p0i8.i32(i8*, i8, i32, i32, i1)
diff --git a/test/Transforms/SimplifyCFG/2003-08-17-BranchFold.ll b/test/Transforms/SimplifyCFG/2003-08-17-BranchFold.ll
index fc89b165f85b1..f6b068fd79c37 100644
--- a/test/Transforms/SimplifyCFG/2003-08-17-BranchFold.ll
+++ b/test/Transforms/SimplifyCFG/2003-08-17-BranchFold.ll
@@ -1,11 +1,11 @@
; This test checks to make sure that 'br X, Dest, Dest' is folded into
; 'br Dest'
-; RUN: opt < %s -simplifycfg -S | \
-; RUN: not grep "br i1 %c2"
+; RUN: opt < %s -simplifycfg -S | FileCheck %s
declare void @noop()
+; CHECK-NOT: br i1 %c2
define i32 @test(i1 %c1, i1 %c2) {
call void @noop( )
br i1 %c1, label %A, label %Y
diff --git a/test/Transforms/SimplifyCFG/2003-08-17-BranchFoldOrdering.ll b/test/Transforms/SimplifyCFG/2003-08-17-BranchFoldOrdering.ll
index c1b032fb8b395..78049080a64eb 100644
--- a/test/Transforms/SimplifyCFG/2003-08-17-BranchFoldOrdering.ll
+++ b/test/Transforms/SimplifyCFG/2003-08-17-BranchFoldOrdering.ll
@@ -3,8 +3,9 @@
; due to the fact that the SimplifyCFG function does not use
; the ConstantFoldTerminator function.
-; RUN: opt < %s -simplifycfg -S | \
-; RUN: not grep "br i1 %c2"
+; RUN: opt < %s -simplifycfg -S | FileCheck %s
+
+; CHECK-NOT: br i1 %c2
declare void @noop()
diff --git a/test/Transforms/SimplifyCFG/2003-08-17-FoldSwitch-dbg.ll b/test/Transforms/SimplifyCFG/2003-08-17-FoldSwitch-dbg.ll
index af59ba04f4418..fbfb100274f61 100644
--- a/test/Transforms/SimplifyCFG/2003-08-17-FoldSwitch-dbg.ll
+++ b/test/Transforms/SimplifyCFG/2003-08-17-FoldSwitch-dbg.ll
@@ -1,7 +1,6 @@
-; RUN: opt < %s -simplifycfg -S | \
-; RUN: not grep switch
-
+; RUN: opt < %s -simplifycfg -S | FileCheck %s
+; CHECK-NOT: switch
%llvm.dbg.anchor.type = type { i32, i32 }
%llvm.dbg.compile_unit.type = type { i32, { }*, i32, i8*, i8*, i8*, i1, i1, i8* }
diff --git a/test/Transforms/SimplifyCFG/2003-08-17-FoldSwitch.ll b/test/Transforms/SimplifyCFG/2003-08-17-FoldSwitch.ll
index 93f851c6f9eaa..806659635e4b5 100644
--- a/test/Transforms/SimplifyCFG/2003-08-17-FoldSwitch.ll
+++ b/test/Transforms/SimplifyCFG/2003-08-17-FoldSwitch.ll
@@ -1,5 +1,6 @@
-; RUN: opt < %s -simplifycfg -S | \
-; RUN: not grep switch
+; RUN: opt < %s -simplifycfg -S | FileCheck %s
+
+; CHECK-NOT: switch
; Test normal folding
define i32 @test1() {
diff --git a/test/Transforms/SimplifyCFG/2005-12-03-IncorrectPHIFold.ll b/test/Transforms/SimplifyCFG/2005-12-03-IncorrectPHIFold.ll
index 760aa139bf7be..907261bd438ad 100644
--- a/test/Transforms/SimplifyCFG/2005-12-03-IncorrectPHIFold.ll
+++ b/test/Transforms/SimplifyCFG/2005-12-03-IncorrectPHIFold.ll
@@ -1,9 +1,7 @@
; Make sure this doesn't turn into an infinite loop
-; RUN: opt < %s -simplifycfg -constprop -simplifycfg |\
-; RUN: llvm-dis | grep bb86
-; END.
-
+; RUN: opt < %s -simplifycfg -constprop -simplifycfg | llvm-dis | FileCheck %s
+
%struct.anon = type { i32, i32, i32, i32, [1024 x i8] }
@_zero_ = external global %struct.anon* ; <%struct.anon**> [#uses=2]
@_one_ = external global %struct.anon* ; <%struct.anon**> [#uses=4]
@@ -112,6 +110,7 @@ cond_true83: ; preds = %bb80
%tmp71 = call i32 @_do_compare( %struct.anon* null, %struct.anon* null, i32 0, i32 1 ) ; <i32> [#uses=1]
%tmp76 = icmp eq i32 %tmp71, 0 ; <i1> [#uses=1]
br i1 %tmp76, label %bb80.outer, label %bb80
+; CHECK: bb86
bb86: ; preds = %bb80
call void @free_num( %struct.anon** %num )
%tmp88 = load %struct.anon** %guess ; <%struct.anon*> [#uses=1]
diff --git a/test/Transforms/SimplifyCFG/2006-10-19-UncondDiv.ll b/test/Transforms/SimplifyCFG/2006-10-19-UncondDiv.ll
index 009d1c8cc4da8..8f21b9bab9b65 100644
--- a/test/Transforms/SimplifyCFG/2006-10-19-UncondDiv.ll
+++ b/test/Transforms/SimplifyCFG/2006-10-19-UncondDiv.ll
@@ -1,6 +1,7 @@
; PR957
-; RUN: opt < %s -simplifycfg -S | \
-; RUN: not grep select
+; RUN: opt < %s -simplifycfg -S | FileCheck %s
+
+; CHECK-NOT: select
@G = extern_weak global i32
diff --git a/test/Transforms/SimplifyCFG/2007-11-22-InvokeNoUnwind.ll b/test/Transforms/SimplifyCFG/2007-11-22-InvokeNoUnwind.ll
index a20c46e1ad1f9..a90e072762534 100644
--- a/test/Transforms/SimplifyCFG/2007-11-22-InvokeNoUnwind.ll
+++ b/test/Transforms/SimplifyCFG/2007-11-22-InvokeNoUnwind.ll
@@ -1,4 +1,6 @@
-; RUN: opt < %s -simplifycfg -S | not grep invoke
+; RUN: opt < %s -simplifycfg -S | FileCheck %s
+
+; CHECK-NOT: invoke
declare i32 @func(i8*) nounwind
diff --git a/test/Transforms/SimplifyCFG/2008-01-02-hoist-fp-add.ll b/test/Transforms/SimplifyCFG/2008-01-02-hoist-fp-add.ll
index 14baeea4b0b9b..cf29b7159793f 100644
--- a/test/Transforms/SimplifyCFG/2008-01-02-hoist-fp-add.ll
+++ b/test/Transforms/SimplifyCFG/2008-01-02-hoist-fp-add.ll
@@ -1,5 +1,5 @@
; The phi should not be eliminated in this case, because the fp op could trap.
-; RUN: opt < %s -simplifycfg -S | grep "= phi double"
+; RUN: opt < %s -simplifycfg -S | FileCheck %s
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
target triple = "i686-apple-darwin8"
@@ -19,6 +19,7 @@ cond_true: ; preds = %entry
br label %cond_next
cond_next: ; preds = %cond_true, %entry
+; CHECK: = phi double
%F.0 = phi double [ %tmp, %entry ], [ %tmp7, %cond_true ] ; <double> [#uses=1]
store double %F.0, double* @G, align 8
ret void
diff --git a/test/Transforms/SimplifyCFG/speculate-store.ll b/test/Transforms/SimplifyCFG/speculate-store.ll
new file mode 100644
index 0000000000000..8d7fe79dcd4e2
--- /dev/null
+++ b/test/Transforms/SimplifyCFG/speculate-store.ll
@@ -0,0 +1,108 @@
+; RUN: opt -simplifycfg -S < %s | FileCheck %s
+
+define void @ifconvertstore(i32 %m, i32* %A, i32* %B, i32 %C, i32 %D) {
+entry:
+ %arrayidx = getelementptr inbounds i32* %B, i64 0
+ %0 = load i32* %arrayidx, align 4
+ %add = add nsw i32 %0, %C
+ %arrayidx2 = getelementptr inbounds i32* %A, i64 0
+
+; First store to the location.
+ store i32 %add, i32* %arrayidx2, align 4
+ %arrayidx4 = getelementptr inbounds i32* %B, i64 1
+ %1 = load i32* %arrayidx4, align 4
+ %add5 = add nsw i32 %1, %D
+ %cmp6 = icmp sgt i32 %add5, %C
+ br i1 %cmp6, label %if.then, label %ret.end
+
+; Make sure we speculate stores like the following one. It is cheap compared to
+; a mispredicated branch.
+; CHECK: @ifconvertstore
+; CHECK: %add5.add = select i1 %cmp6, i32 %add5, i32 %add
+; CHECK: store i32 %add5.add, i32* %arrayidx2, align 4
+if.then:
+ store i32 %add5, i32* %arrayidx2, align 4
+ br label %ret.end
+
+ret.end:
+ ret void
+}
+
+define void @noifconvertstore1(i32 %m, i32* %A, i32* %B, i32 %C, i32 %D) {
+entry:
+ %arrayidx = getelementptr inbounds i32* %B, i64 0
+ %0 = load i32* %arrayidx, align 4
+ %add = add nsw i32 %0, %C
+ %arrayidx2 = getelementptr inbounds i32* %A, i64 0
+
+; Store to a different location.
+ store i32 %add, i32* %arrayidx, align 4
+ %arrayidx4 = getelementptr inbounds i32* %B, i64 1
+ %1 = load i32* %arrayidx4, align 4
+ %add5 = add nsw i32 %1, %D
+ %cmp6 = icmp sgt i32 %add5, %C
+ br i1 %cmp6, label %if.then, label %ret.end
+
+; CHECK: @noifconvertstore1
+; CHECK-NOT: select
+if.then:
+ store i32 %add5, i32* %arrayidx2, align 4
+ br label %ret.end
+
+ret.end:
+ ret void
+}
+
+declare void @unknown_fun()
+
+define void @noifconvertstore2(i32 %m, i32* %A, i32* %B, i32 %C, i32 %D) {
+entry:
+ %arrayidx = getelementptr inbounds i32* %B, i64 0
+ %0 = load i32* %arrayidx, align 4
+ %add = add nsw i32 %0, %C
+ %arrayidx2 = getelementptr inbounds i32* %A, i64 0
+
+; First store to the location.
+ store i32 %add, i32* %arrayidx2, align 4
+ call void @unknown_fun()
+ %arrayidx4 = getelementptr inbounds i32* %B, i64 1
+ %1 = load i32* %arrayidx4, align 4
+ %add5 = add nsw i32 %1, %D
+ %cmp6 = icmp sgt i32 %add5, %C
+ br i1 %cmp6, label %if.then, label %ret.end
+
+; CHECK: @noifconvertstore2
+; CHECK-NOT: select
+if.then:
+ store i32 %add5, i32* %arrayidx2, align 4
+ br label %ret.end
+
+ret.end:
+ ret void
+}
+
+define void @noifconvertstore_volatile(i32 %m, i32* %A, i32* %B, i32 %C, i32 %D) {
+entry:
+ %arrayidx = getelementptr inbounds i32* %B, i64 0
+ %0 = load i32* %arrayidx, align 4
+ %add = add nsw i32 %0, %C
+ %arrayidx2 = getelementptr inbounds i32* %A, i64 0
+
+; First store to the location.
+ store i32 %add, i32* %arrayidx2, align 4
+ %arrayidx4 = getelementptr inbounds i32* %B, i64 1
+ %1 = load i32* %arrayidx4, align 4
+ %add5 = add nsw i32 %1, %D
+ %cmp6 = icmp sgt i32 %add5, %C
+ br i1 %cmp6, label %if.then, label %ret.end
+
+; Make sure we don't speculate volatile stores.
+; CHECK: @noifconvertstore_volatile
+; CHECK-NOT: select
+if.then:
+ store volatile i32 %add5, i32* %arrayidx2, align 4
+ br label %ret.end
+
+ret.end:
+ ret void
+}
diff --git a/test/Transforms/SimplifyCFG/switch-to-icmp.ll b/test/Transforms/SimplifyCFG/switch-to-icmp.ll
index 414f8475bc230..e9a6db45cb007 100644
--- a/test/Transforms/SimplifyCFG/switch-to-icmp.ll
+++ b/test/Transforms/SimplifyCFG/switch-to-icmp.ll
@@ -37,3 +37,21 @@ lor.end:
; CHECK: @test2
; CHECK: %switch = icmp ult i32 %x, 2
}
+
+define i32 @test3(i1 %flag) {
+entry:
+ switch i1 %flag, label %bad [
+ i1 true, label %good
+ i1 false, label %good
+ ]
+
+good:
+ ret i32 0
+
+bad:
+ ret i32 1
+
+; CHECK: @test3
+; CHECK: entry:
+; CHECK-NEXT: ret i32 0
+}