aboutsummaryrefslogtreecommitdiff
path: root/test/CodeGen/R600/rotl.ll
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2014-11-24 09:08:18 +0000
committerDimitry Andric <dim@FreeBSD.org>2014-11-24 09:08:18 +0000
commit5ca98fd98791947eba83a1ed3f2c8191ef7afa6c (patch)
treef5944309621cee4fe0976be6f9ac619b7ebfc4c2 /test/CodeGen/R600/rotl.ll
parent68bcb7db193e4bc81430063148253d30a791023e (diff)
downloadsrc-5ca98fd98791947eba83a1ed3f2c8191ef7afa6c.tar.gz
src-5ca98fd98791947eba83a1ed3f2c8191ef7afa6c.zip
Notes
Diffstat (limited to 'test/CodeGen/R600/rotl.ll')
-rw-r--r--test/CodeGen/R600/rotl.ll54
1 files changed, 54 insertions, 0 deletions
diff --git a/test/CodeGen/R600/rotl.ll b/test/CodeGen/R600/rotl.ll
new file mode 100644
index 000000000000..83f657fd4cce
--- /dev/null
+++ b/test/CodeGen/R600/rotl.ll
@@ -0,0 +1,54 @@
+; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck --check-prefix=R600 -check-prefix=FUNC %s
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+
+; FUNC-LABEL: @rotl_i32:
+; R600: SUB_INT {{\** T[0-9]+\.[XYZW]}}, literal.x
+; R600-NEXT: 32
+; R600: BIT_ALIGN_INT {{T[0-9]+\.[XYZW]}}, KC0[2].Z, KC0[2].Z, PV.{{[XYZW]}}
+
+; SI: S_SUB_I32 [[SDST:s[0-9]+]], 32, {{[s][0-9]+}}
+; SI: V_MOV_B32_e32 [[VDST:v[0-9]+]], [[SDST]]
+; SI: V_ALIGNBIT_B32 {{v[0-9]+, [s][0-9]+, v[0-9]+}}, [[VDST]]
+define void @rotl_i32(i32 addrspace(1)* %in, i32 %x, i32 %y) {
+entry:
+ %0 = shl i32 %x, %y
+ %1 = sub i32 32, %y
+ %2 = lshr i32 %x, %1
+ %3 = or i32 %0, %2
+ store i32 %3, i32 addrspace(1)* %in
+ ret void
+}
+
+; FUNC-LABEL: @rotl_v2i32
+; SI: S_SUB_I32
+; SI: V_ALIGNBIT_B32
+; SI: S_SUB_I32
+; SI: V_ALIGNBIT_B32
+define void @rotl_v2i32(<2 x i32> addrspace(1)* %in, <2 x i32> %x, <2 x i32> %y) {
+entry:
+ %0 = shl <2 x i32> %x, %y
+ %1 = sub <2 x i32> <i32 32, i32 32>, %y
+ %2 = lshr <2 x i32> %x, %1
+ %3 = or <2 x i32> %0, %2
+ store <2 x i32> %3, <2 x i32> addrspace(1)* %in
+ ret void
+}
+
+; FUNC-LABEL: @rotl_v4i32
+; SI: S_SUB_I32
+; SI: V_ALIGNBIT_B32
+; SI: S_SUB_I32
+; SI: V_ALIGNBIT_B32
+; SI: S_SUB_I32
+; SI: V_ALIGNBIT_B32
+; SI: S_SUB_I32
+; SI: V_ALIGNBIT_B32
+define void @rotl_v4i32(<4 x i32> addrspace(1)* %in, <4 x i32> %x, <4 x i32> %y) {
+entry:
+ %0 = shl <4 x i32> %x, %y
+ %1 = sub <4 x i32> <i32 32, i32 32, i32 32, i32 32>, %y
+ %2 = lshr <4 x i32> %x, %1
+ %3 = or <4 x i32> %0, %2
+ store <4 x i32> %3, <4 x i32> addrspace(1)* %in
+ ret void
+}