summaryrefslogtreecommitdiff
path: root/test/Transforms/InstCombine/icmp-xor-signbit.ll
diff options
context:
space:
mode:
Diffstat (limited to 'test/Transforms/InstCombine/icmp-xor-signbit.ll')
-rw-r--r--test/Transforms/InstCombine/icmp-xor-signbit.ll228
1 files changed, 228 insertions, 0 deletions
diff --git a/test/Transforms/InstCombine/icmp-xor-signbit.ll b/test/Transforms/InstCombine/icmp-xor-signbit.ll
new file mode 100644
index 000000000000..30a9668f37df
--- /dev/null
+++ b/test/Transforms/InstCombine/icmp-xor-signbit.ll
@@ -0,0 +1,228 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt < %s -instcombine -S | FileCheck %s
+
+; icmp u/s (a ^ signmask), (b ^ signmask) --> icmp s/u a, b
+
+define i1 @slt_to_ult(i8 %x, i8 %y) {
+; CHECK-LABEL: @slt_to_ult(
+; CHECK-NEXT: [[CMP:%.*]] = icmp ult i8 %x, %y
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %a = xor i8 %x, 128
+ %b = xor i8 %y, 128
+ %cmp = icmp slt i8 %a, %b
+ ret i1 %cmp
+}
+
+; PR33138 - https://bugs.llvm.org/show_bug.cgi?id=33138
+
+define <2 x i1> @slt_to_ult_splat(<2 x i8> %x, <2 x i8> %y) {
+; CHECK-LABEL: @slt_to_ult_splat(
+; CHECK-NEXT: [[CMP:%.*]] = icmp ult <2 x i8> %x, %y
+; CHECK-NEXT: ret <2 x i1> [[CMP]]
+;
+ %a = xor <2 x i8> %x, <i8 128, i8 128>
+ %b = xor <2 x i8> %y, <i8 128, i8 128>
+ %cmp = icmp slt <2 x i8> %a, %b
+ ret <2 x i1> %cmp
+}
+
+; Make sure that unsigned -> signed works too.
+
+define i1 @ult_to_slt(i8 %x, i8 %y) {
+; CHECK-LABEL: @ult_to_slt(
+; CHECK-NEXT: [[CMP:%.*]] = icmp slt i8 %x, %y
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %a = xor i8 %x, 128
+ %b = xor i8 %y, 128
+ %cmp = icmp ult i8 %a, %b
+ ret i1 %cmp
+}
+
+define <2 x i1> @ult_to_slt_splat(<2 x i8> %x, <2 x i8> %y) {
+; CHECK-LABEL: @ult_to_slt_splat(
+; CHECK-NEXT: [[CMP:%.*]] = icmp slt <2 x i8> %x, %y
+; CHECK-NEXT: ret <2 x i1> [[CMP]]
+;
+ %a = xor <2 x i8> %x, <i8 128, i8 128>
+ %b = xor <2 x i8> %y, <i8 128, i8 128>
+ %cmp = icmp ult <2 x i8> %a, %b
+ ret <2 x i1> %cmp
+}
+
+; icmp u/s (a ^ maxsignval), (b ^ maxsignval) --> icmp s/u' a, b
+
+define i1 @slt_to_ugt(i8 %x, i8 %y) {
+; CHECK-LABEL: @slt_to_ugt(
+; CHECK-NEXT: [[CMP:%.*]] = icmp ugt i8 %x, %y
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %a = xor i8 %x, 127
+ %b = xor i8 %y, 127
+ %cmp = icmp slt i8 %a, %b
+ ret i1 %cmp
+}
+
+define <2 x i1> @slt_to_ugt_splat(<2 x i8> %x, <2 x i8> %y) {
+; CHECK-LABEL: @slt_to_ugt_splat(
+; CHECK-NEXT: [[CMP:%.*]] = icmp ugt <2 x i8> %x, %y
+; CHECK-NEXT: ret <2 x i1> [[CMP]]
+;
+ %a = xor <2 x i8> %x, <i8 127, i8 127>
+ %b = xor <2 x i8> %y, <i8 127, i8 127>
+ %cmp = icmp slt <2 x i8> %a, %b
+ ret <2 x i1> %cmp
+}
+
+; Make sure that unsigned -> signed works too.
+
+define i1 @ult_to_sgt(i8 %x, i8 %y) {
+; CHECK-LABEL: @ult_to_sgt(
+; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i8 %x, %y
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %a = xor i8 %x, 127
+ %b = xor i8 %y, 127
+ %cmp = icmp ult i8 %a, %b
+ ret i1 %cmp
+}
+
+define <2 x i1> @ult_to_sgt_splat(<2 x i8> %x, <2 x i8> %y) {
+; CHECK-LABEL: @ult_to_sgt_splat(
+; CHECK-NEXT: [[CMP:%.*]] = icmp sgt <2 x i8> %x, %y
+; CHECK-NEXT: ret <2 x i1> [[CMP]]
+;
+ %a = xor <2 x i8> %x, <i8 127, i8 127>
+ %b = xor <2 x i8> %y, <i8 127, i8 127>
+ %cmp = icmp ult <2 x i8> %a, %b
+ ret <2 x i1> %cmp
+}
+
+; icmp u/s (a ^ signmask), C --> icmp s/u a, C'
+
+define i1 @sge_to_ugt(i8 %x) {
+; CHECK-LABEL: @sge_to_ugt(
+; CHECK-NEXT: [[CMP:%.*]] = icmp ugt i8 %x, -114
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %a = xor i8 %x, 128
+ %cmp = icmp sge i8 %a, 15
+ ret i1 %cmp
+}
+
+define <2 x i1> @sge_to_ugt_splat(<2 x i8> %x) {
+; CHECK-LABEL: @sge_to_ugt_splat(
+; CHECK-NEXT: [[CMP:%.*]] = icmp ugt <2 x i8> %x, <i8 -114, i8 -114>
+; CHECK-NEXT: ret <2 x i1> [[CMP]]
+;
+ %a = xor <2 x i8> %x, <i8 128, i8 128>
+ %cmp = icmp sge <2 x i8> %a, <i8 15, i8 15>
+ ret <2 x i1> %cmp
+}
+
+; Make sure that unsigned -> signed works too.
+
+define i1 @uge_to_sgt(i8 %x) {
+; CHECK-LABEL: @uge_to_sgt(
+; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i8 %x, -114
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %a = xor i8 %x, 128
+ %cmp = icmp uge i8 %a, 15
+ ret i1 %cmp
+}
+
+define <2 x i1> @uge_to_sgt_splat(<2 x i8> %x) {
+; CHECK-LABEL: @uge_to_sgt_splat(
+; CHECK-NEXT: [[CMP:%.*]] = icmp sgt <2 x i8> %x, <i8 -114, i8 -114>
+; CHECK-NEXT: ret <2 x i1> [[CMP]]
+;
+ %a = xor <2 x i8> %x, <i8 128, i8 128>
+ %cmp = icmp uge <2 x i8> %a, <i8 15, i8 15>
+ ret <2 x i1> %cmp
+}
+
+; icmp u/s (a ^ maxsignval), C --> icmp s/u' a, C'
+
+define i1 @sge_to_ult(i8 %x) {
+; CHECK-LABEL: @sge_to_ult(
+; CHECK-NEXT: [[CMP:%.*]] = icmp ult i8 %x, 113
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %a = xor i8 %x, 127
+ %cmp = icmp sge i8 %a, 15
+ ret i1 %cmp
+}
+
+define <2 x i1> @sge_to_ult_splat(<2 x i8> %x) {
+; CHECK-LABEL: @sge_to_ult_splat(
+; CHECK-NEXT: [[CMP:%.*]] = icmp ult <2 x i8> %x, <i8 113, i8 113>
+; CHECK-NEXT: ret <2 x i1> [[CMP]]
+;
+ %a = xor <2 x i8> %x, <i8 127, i8 127>
+ %cmp = icmp sge <2 x i8> %a, <i8 15, i8 15>
+ ret <2 x i1> %cmp
+}
+
+; Make sure that unsigned -> signed works too.
+
+define i1 @uge_to_slt(i8 %x) {
+; CHECK-LABEL: @uge_to_slt(
+; CHECK-NEXT: [[CMP:%.*]] = icmp slt i8 %x, 113
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %a = xor i8 %x, 127
+ %cmp = icmp uge i8 %a, 15
+ ret i1 %cmp
+}
+
+define <2 x i1> @uge_to_slt_splat(<2 x i8> %x) {
+; CHECK-LABEL: @uge_to_slt_splat(
+; CHECK-NEXT: [[CMP:%.*]] = icmp slt <2 x i8> %x, <i8 113, i8 113>
+; CHECK-NEXT: ret <2 x i1> [[CMP]]
+;
+ %a = xor <2 x i8> %x, <i8 127, i8 127>
+ %cmp = icmp uge <2 x i8> %a, <i8 15, i8 15>
+ ret <2 x i1> %cmp
+}
+
+; PR33138, part 2: https://bugs.llvm.org/show_bug.cgi?id=33138
+; TODO: We could look through vector bitcasts for icmp folds,
+; or we could canonicalize bitcast ahead of logic ops with constants.
+
+define <8 x i1> @sgt_to_ugt_bitcasted_splat(<2 x i32> %x, <2 x i32> %y) {
+; CHECK-LABEL: @sgt_to_ugt_bitcasted_splat(
+; CHECK-NEXT: [[A:%.*]] = xor <2 x i32> %x, <i32 -2139062144, i32 -2139062144>
+; CHECK-NEXT: [[B:%.*]] = xor <2 x i32> %y, <i32 -2139062144, i32 -2139062144>
+; CHECK-NEXT: [[C:%.*]] = bitcast <2 x i32> [[A]] to <8 x i8>
+; CHECK-NEXT: [[D:%.*]] = bitcast <2 x i32> [[B]] to <8 x i8>
+; CHECK-NEXT: [[E:%.*]] = icmp sgt <8 x i8> [[C]], [[D]]
+; CHECK-NEXT: ret <8 x i1> [[E]]
+;
+ %a = xor <2 x i32> %x, <i32 2155905152, i32 2155905152> ; 0x80808080
+ %b = xor <2 x i32> %y, <i32 2155905152, i32 2155905152>
+ %c = bitcast <2 x i32> %a to <8 x i8>
+ %d = bitcast <2 x i32> %b to <8 x i8>
+ %e = icmp sgt <8 x i8> %c, %d
+ ret <8 x i1> %e
+}
+
+; TODO: This is false (little-endian). How should that be recognized?
+; Ie, should InstSimplify know this directly, should InstCombine canonicalize
+; this so InstSimplify can know this, or is that not something that we want
+; either pass to recognize?
+
+define <2 x i1> @negative_simplify_splat(<4 x i8> %x) {
+; CHECK-LABEL: @negative_simplify_splat(
+; CHECK-NEXT: [[A:%.*]] = or <4 x i8> %x, <i8 0, i8 -128, i8 0, i8 -128>
+; CHECK-NEXT: [[B:%.*]] = bitcast <4 x i8> [[A]] to <2 x i16>
+; CHECK-NEXT: [[C:%.*]] = icmp sgt <2 x i16> [[B]], zeroinitializer
+; CHECK-NEXT: ret <2 x i1> [[C]]
+;
+ %a = or <4 x i8> %x, <i8 0, i8 128, i8 0, i8 128>
+ %b = bitcast <4 x i8> %a to <2 x i16>
+ %c = icmp sgt <2 x i16> %b, zeroinitializer
+ ret <2 x i1> %c
+}
+