summaryrefslogtreecommitdiff
path: root/test/CodeGen/X86/rotate4.ll
diff options
context:
space:
mode:
Diffstat (limited to 'test/CodeGen/X86/rotate4.ll')
-rw-r--r--test/CodeGen/X86/rotate4.ll104
1 files changed, 55 insertions, 49 deletions
diff --git a/test/CodeGen/X86/rotate4.ll b/test/CodeGen/X86/rotate4.ll
index 56a7d32850569..c7117be91ab47 100644
--- a/test/CodeGen/X86/rotate4.ll
+++ b/test/CodeGen/X86/rotate4.ll
@@ -1,17 +1,20 @@
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=generic | FileCheck %s
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s
; Check that we recognize this idiom for rotation too:
; a << (b & (OpSize-1)) | a >> ((0 - b) & (OpSize-1))
define i32 @rotate_left_32(i32 %a, i32 %b) {
; CHECK-LABEL: rotate_left_32:
-; CHECK-NOT: and
-; CHECK: roll
-entry:
+; CHECK: # BB#0:
+; CHECK-NEXT: movl %esi, %ecx
+; CHECK-NEXT: roll %cl, %edi
+; CHECK-NEXT: movl %edi, %eax
+; CHECK-NEXT: retq
%and = and i32 %b, 31
%shl = shl i32 %a, %and
- %0 = sub i32 0, %b
- %and3 = and i32 %0, 31
+ %t0 = sub i32 0, %b
+ %and3 = and i32 %t0, 31
%shr = lshr i32 %a, %and3
%or = or i32 %shl, %shr
ret i32 %or
@@ -19,13 +22,15 @@ entry:
define i32 @rotate_right_32(i32 %a, i32 %b) {
; CHECK-LABEL: rotate_right_32:
-; CHECK-NOT: and
-; CHECK: rorl
-entry:
+; CHECK: # BB#0:
+; CHECK-NEXT: movl %esi, %ecx
+; CHECK-NEXT: rorl %cl, %edi
+; CHECK-NEXT: movl %edi, %eax
+; CHECK-NEXT: retq
%and = and i32 %b, 31
%shl = lshr i32 %a, %and
- %0 = sub i32 0, %b
- %and3 = and i32 %0, 31
+ %t0 = sub i32 0, %b
+ %and3 = and i32 %t0, 31
%shr = shl i32 %a, %and3
%or = or i32 %shl, %shr
ret i32 %or
@@ -33,13 +38,15 @@ entry:
define i64 @rotate_left_64(i64 %a, i64 %b) {
; CHECK-LABEL: rotate_left_64:
-; CHECK-NOT: and
-; CHECK: rolq
-entry:
+; CHECK: # BB#0:
+; CHECK-NEXT: movl %esi, %ecx
+; CHECK-NEXT: rolq %cl, %rdi
+; CHECK-NEXT: movq %rdi, %rax
+; CHECK-NEXT: retq
%and = and i64 %b, 63
%shl = shl i64 %a, %and
- %0 = sub i64 0, %b
- %and3 = and i64 %0, 63
+ %t0 = sub i64 0, %b
+ %and3 = and i64 %t0, 63
%shr = lshr i64 %a, %and3
%or = or i64 %shl, %shr
ret i64 %or
@@ -47,13 +54,15 @@ entry:
define i64 @rotate_right_64(i64 %a, i64 %b) {
; CHECK-LABEL: rotate_right_64:
-; CHECK-NOT: and
-; CHECK: rorq
-entry:
+; CHECK: # BB#0:
+; CHECK-NEXT: movl %esi, %ecx
+; CHECK-NEXT: rorq %cl, %rdi
+; CHECK-NEXT: movq %rdi, %rax
+; CHECK-NEXT: retq
%and = and i64 %b, 63
%shl = lshr i64 %a, %and
- %0 = sub i64 0, %b
- %and3 = and i64 %0, 63
+ %t0 = sub i64 0, %b
+ %and3 = and i64 %t0, 63
%shr = shl i64 %a, %and3
%or = or i64 %shl, %shr
ret i64 %or
@@ -63,16 +72,15 @@ entry:
define void @rotate_left_m32(i32 *%pa, i32 %b) {
; CHECK-LABEL: rotate_left_m32:
-; CHECK-NOT: and
-; CHECK: roll
-; no store:
-; CHECK-NOT: mov
-entry:
+; CHECK: # BB#0:
+; CHECK-NEXT: movl %esi, %ecx
+; CHECK-NEXT: roll %cl, (%rdi)
+; CHECK-NEXT: retq
%a = load i32, i32* %pa, align 16
%and = and i32 %b, 31
%shl = shl i32 %a, %and
- %0 = sub i32 0, %b
- %and3 = and i32 %0, 31
+ %t0 = sub i32 0, %b
+ %and3 = and i32 %t0, 31
%shr = lshr i32 %a, %and3
%or = or i32 %shl, %shr
store i32 %or, i32* %pa, align 32
@@ -81,16 +89,15 @@ entry:
define void @rotate_right_m32(i32 *%pa, i32 %b) {
; CHECK-LABEL: rotate_right_m32:
-; CHECK-NOT: and
-; CHECK: rorl
-; no store:
-; CHECK-NOT: mov
-entry:
+; CHECK: # BB#0:
+; CHECK-NEXT: movl %esi, %ecx
+; CHECK-NEXT: rorl %cl, (%rdi)
+; CHECK-NEXT: retq
%a = load i32, i32* %pa, align 16
%and = and i32 %b, 31
%shl = lshr i32 %a, %and
- %0 = sub i32 0, %b
- %and3 = and i32 %0, 31
+ %t0 = sub i32 0, %b
+ %and3 = and i32 %t0, 31
%shr = shl i32 %a, %and3
%or = or i32 %shl, %shr
store i32 %or, i32* %pa, align 32
@@ -99,16 +106,15 @@ entry:
define void @rotate_left_m64(i64 *%pa, i64 %b) {
; CHECK-LABEL: rotate_left_m64:
-; CHECK-NOT: and
-; CHECK: rolq
-; no store:
-; CHECK-NOT: mov
-entry:
+; CHECK: # BB#0:
+; CHECK-NEXT: movl %esi, %ecx
+; CHECK-NEXT: rolq %cl, (%rdi)
+; CHECK-NEXT: retq
%a = load i64, i64* %pa, align 16
%and = and i64 %b, 63
%shl = shl i64 %a, %and
- %0 = sub i64 0, %b
- %and3 = and i64 %0, 63
+ %t0 = sub i64 0, %b
+ %and3 = and i64 %t0, 63
%shr = lshr i64 %a, %and3
%or = or i64 %shl, %shr
store i64 %or, i64* %pa, align 64
@@ -117,18 +123,18 @@ entry:
define void @rotate_right_m64(i64 *%pa, i64 %b) {
; CHECK-LABEL: rotate_right_m64:
-; CHECK-NOT: and
-; CHECK: rorq
-; no store:
-; CHECK-NOT: mov
-entry:
+; CHECK: # BB#0:
+; CHECK-NEXT: movl %esi, %ecx
+; CHECK-NEXT: rorq %cl, (%rdi)
+; CHECK-NEXT: retq
%a = load i64, i64* %pa, align 16
%and = and i64 %b, 63
%shl = lshr i64 %a, %and
- %0 = sub i64 0, %b
- %and3 = and i64 %0, 63
+ %t0 = sub i64 0, %b
+ %and3 = and i64 %t0, 63
%shr = shl i64 %a, %and3
%or = or i64 %shl, %shr
store i64 %or, i64* %pa, align 64
ret void
}
+