summaryrefslogtreecommitdiff
path: root/test/Transforms/JumpThreading/assume.ll
diff options
context:
space:
mode:
Diffstat (limited to 'test/Transforms/JumpThreading/assume.ll')
-rw-r--r--test/Transforms/JumpThreading/assume.ll145
1 files changed, 137 insertions, 8 deletions
diff --git a/test/Transforms/JumpThreading/assume.ll b/test/Transforms/JumpThreading/assume.ll
index 3a039676e172..f58ee299cba0 100644
--- a/test/Transforms/JumpThreading/assume.ll
+++ b/test/Transforms/JumpThreading/assume.ll
@@ -59,12 +59,12 @@ return: ; preds = %entry, %if.then
@g = external global i32
; Check that we do prove a fact using an assume within the block.
-; FIXME: We can fold the assume based on the semantics of assume.
-; CHECK-LABEL: @can_fold_assume
-; CHECK: %notnull = icmp ne i32* %array, null
-; CHECK-NEXT: call void @llvm.assume(i1 %notnull)
-; CHECK-NEXT: ret void
+; We can fold the assume based on the semantics of assume.
define void @can_fold_assume(i32* %array) {
+; CHECK-LABEL: @can_fold_assume
+; CHECK-NOT: call void @llvm.assume
+; CHECK-NOT: br
+; CHECK: ret void
%notnull = icmp ne i32* %array, null
call void @llvm.assume(i1 %notnull)
br i1 %notnull, label %normal, label %error
@@ -80,19 +80,128 @@ error:
declare void @f(i1)
declare void @exit()
; We can fold the assume but not the uses before the assume.
-define void @dont_fold_incorrectly(i32* %array) {
-; CHECK-LABEL:@dont_fold_incorrectly
+define void @cannot_fold_use_before_assume(i32* %array) {
+; CHECK-LABEL:@cannot_fold_use_before_assume
; CHECK: @f(i1 %notnull)
; CHECK-NEXT: exit()
-; CHECK-NEXT: assume(i1 %notnull)
+; CHECK-NOT: assume
+; CHECK-NEXT: ret void
+ %notnull = icmp ne i32* %array, null
+ call void @f(i1 %notnull)
+ call void @exit()
+ call void @llvm.assume(i1 %notnull)
+ br i1 %notnull, label %normal, label %error
+
+normal:
+ ret void
+
+error:
+ store atomic i32 0, i32* @g unordered, align 4
+ ret void
+}
+
+declare void @dummy(i1) nounwind argmemonly
+define void @can_fold_some_use_before_assume(i32* %array) {
+
+; CHECK-LABEL:@can_fold_some_use_before_assume
+; CHECK: @f(i1 %notnull)
+; CHECK-NEXT: @dummy(i1 true)
+; CHECK-NOT: assume
; CHECK-NEXT: ret void
%notnull = icmp ne i32* %array, null
call void @f(i1 %notnull)
+ call void @dummy(i1 %notnull)
+ call void @llvm.assume(i1 %notnull)
+ br i1 %notnull, label %normal, label %error
+
+normal:
+ ret void
+
+error:
+ store atomic i32 0, i32* @g unordered, align 4
+ ret void
+
+}
+
+; FIXME: can fold assume and all uses before/after assume.
+; because the trapping exit call is after the assume.
+define void @can_fold_assume_and_all_uses(i32* %array) {
+; CHECK-LABEL:@can_fold_assume_and_all_uses
+; CHECK: @dummy(i1 %notnull)
+; CHECK-NEXT: assume(i1 %notnull)
+; CHECK-NEXT: exit()
+; CHECK-NEXT: %notnull2 = or i1 true, false
+; CHECK-NEXT: @f(i1 %notnull2)
+; CHECK-NEXT: ret void
+ %notnull = icmp ne i32* %array, null
+ call void @dummy(i1 %notnull)
+ call void @llvm.assume(i1 %notnull)
call void @exit()
+ br i1 %notnull, label %normal, label %error
+
+normal:
+ %notnull2 = or i1 %notnull, false
+ call void @f(i1 %notnull2)
+ ret void
+
+error:
+ store atomic i32 0, i32* @g unordered, align 4
+ ret void
+}
+
+declare void @fz(i8)
+; FIXME: We can fold assume to true, and the use after assume, but we do not do so
+; currently, because of the function call after the assume.
+define void @can_fold_assume2(i32* %array) {
+
+; CHECK-LABEL:@can_fold_assume2
+; CHECK: @f(i1 %notnull)
+; CHECK-NEXT: assume(i1 %notnull)
+; CHECK-NEXT: znotnull = zext i1 %notnull to i8
+; CHECK-NEXT: @f(i1 %notnull)
+; CHECK-NEXT: @f(i1 true)
+; CHECK-NEXT: @fz(i8 %znotnull)
+; CHECK-NEXT: ret void
+ %notnull = icmp ne i32* %array, null
+ call void @f(i1 %notnull)
+ call void @llvm.assume(i1 %notnull)
+ %znotnull = zext i1 %notnull to i8
+ call void @f(i1 %notnull)
+ br i1 %notnull, label %normal, label %error
+
+normal:
+ call void @f(i1 %notnull)
+ call void @fz(i8 %znotnull)
+ ret void
+
+error:
+ store atomic i32 0, i32* @g unordered, align 4
+ ret void
+}
+
+declare void @llvm.experimental.guard(i1, ...)
+; FIXME: We can fold assume to true, but we do not do so
+; because of the guard following the assume.
+define void @can_fold_assume3(i32* %array){
+
+; CHECK-LABEL:@can_fold_assume3
+; CHECK: @f(i1 %notnull)
+; CHECK-NEXT: assume(i1 %notnull)
+; CHECK-NEXT: guard(i1 %notnull)
+; CHECK-NEXT: znotnull = zext i1 true to i8
+; CHECK-NEXT: @f(i1 true)
+; CHECK-NEXT: @fz(i8 %znotnull)
+; CHECK-NEXT: ret void
+ %notnull = icmp ne i32* %array, null
+ call void @f(i1 %notnull)
call void @llvm.assume(i1 %notnull)
+ call void(i1, ...) @llvm.experimental.guard(i1 %notnull) [ "deopt"() ]
+ %znotnull = zext i1 %notnull to i8
br i1 %notnull, label %normal, label %error
normal:
+ call void @f(i1 %notnull)
+ call void @fz(i8 %znotnull)
ret void
error:
@@ -100,6 +209,26 @@ error:
ret void
}
+
+; can fold all uses and remove the cond
+define void @can_fold_assume4(i32* %array) {
+; CHECK-LABEL: can_fold_assume4
+; CHECK-NOT: notnull
+; CHECK: dummy(i1 true)
+; CHECK-NEXT: ret void
+ %notnull = icmp ne i32* %array, null
+ call void @exit()
+ call void @dummy(i1 %notnull)
+ call void @llvm.assume(i1 %notnull)
+ br i1 %notnull, label %normal, label %error
+
+normal:
+ ret void
+
+error:
+ store atomic i32 0, i32* @g unordered, align 4
+ ret void
+}
; Function Attrs: nounwind
declare void @llvm.assume(i1) #1