summaryrefslogtreecommitdiff
path: root/test/CodeGen/Hexagon
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2015-06-21 13:59:01 +0000
committerDimitry Andric <dim@FreeBSD.org>2015-06-21 13:59:01 +0000
commit3a0822f094b578157263e04114075ad7df81db41 (patch)
treebc48361fe2cd1ca5f93ac01b38b183774468fc79 /test/CodeGen/Hexagon
parent85d8b2bbe386bcfe669575d05b61482d7be07e5d (diff)
Notes
Diffstat (limited to 'test/CodeGen/Hexagon')
-rw-r--r--test/CodeGen/Hexagon/absaddr-store.ll29
-rw-r--r--test/CodeGen/Hexagon/absimm.ll2
-rw-r--r--test/CodeGen/Hexagon/addh-sext-trunc.ll43
-rw-r--r--test/CodeGen/Hexagon/addh-shifted.ll21
-rw-r--r--test/CodeGen/Hexagon/addh.ll21
-rw-r--r--test/CodeGen/Hexagon/addrmode-indoff.ll74
-rw-r--r--test/CodeGen/Hexagon/always-ext.ll5
-rw-r--r--test/CodeGen/Hexagon/args.ll2
-rw-r--r--test/CodeGen/Hexagon/ashift-left-right.ll2
-rw-r--r--test/CodeGen/Hexagon/barrier-flag.ll125
-rw-r--r--test/CodeGen/Hexagon/base-offset-addr.ll15
-rw-r--r--test/CodeGen/Hexagon/base-offset-post.ll30
-rw-r--r--test/CodeGen/Hexagon/bugAsmHWloop.ll71
-rw-r--r--test/CodeGen/Hexagon/cext-valid-packet1.ll3
-rw-r--r--test/CodeGen/Hexagon/cext-valid-packet2.ll38
-rw-r--r--test/CodeGen/Hexagon/cext.ll16
-rw-r--r--test/CodeGen/Hexagon/cexti16.ll16
-rw-r--r--test/CodeGen/Hexagon/checktabs.ll8
-rw-r--r--test/CodeGen/Hexagon/cmp-extend.ll40
-rw-r--r--test/CodeGen/Hexagon/cmp-promote.ll72
-rw-r--r--test/CodeGen/Hexagon/cmp-to-genreg.ll2
-rw-r--r--test/CodeGen/Hexagon/cmp-to-predreg.ll2
-rw-r--r--test/CodeGen/Hexagon/cmp.ll161
-rw-r--r--test/CodeGen/Hexagon/cmp_pred.ll3
-rw-r--r--test/CodeGen/Hexagon/cmp_pred_reg.ll3
-rw-r--r--test/CodeGen/Hexagon/cmpb-eq.ll53
-rw-r--r--test/CodeGen/Hexagon/cmpb_pred.ll3
-rw-r--r--test/CodeGen/Hexagon/eh_return.ll48
-rw-r--r--test/CodeGen/Hexagon/hwloop-lt.ll2
-rw-r--r--test/CodeGen/Hexagon/hwloop-lt1.ll2
-rw-r--r--test/CodeGen/Hexagon/intrinsics/alu32_alu.ll63
-rw-r--r--test/CodeGen/Hexagon/intrinsics/alu32_perm.ll31
-rw-r--r--test/CodeGen/Hexagon/intrinsics/cr.ll39
-rw-r--r--test/CodeGen/Hexagon/intrinsics/xtype_alu.ll282
-rw-r--r--test/CodeGen/Hexagon/intrinsics/xtype_bit.ll93
-rw-r--r--test/CodeGen/Hexagon/intrinsics/xtype_complex.ll99
-rw-r--r--test/CodeGen/Hexagon/intrinsics/xtype_fp.ll110
-rw-r--r--test/CodeGen/Hexagon/intrinsics/xtype_mpy.ll434
-rw-r--r--test/CodeGen/Hexagon/intrinsics/xtype_perm.ll71
-rw-r--r--test/CodeGen/Hexagon/intrinsics/xtype_pred.ll99
-rw-r--r--test/CodeGen/Hexagon/intrinsics/xtype_shift.ll205
-rw-r--r--test/CodeGen/Hexagon/loadi1-G0.ll43
-rw-r--r--test/CodeGen/Hexagon/loadi1-v4-G0.ll43
-rw-r--r--test/CodeGen/Hexagon/loadi1-v4.ll45
-rw-r--r--test/CodeGen/Hexagon/loadi1.ll45
-rw-r--r--test/CodeGen/Hexagon/maxd.ll9
-rw-r--r--test/CodeGen/Hexagon/maxh.ll23
-rw-r--r--test/CodeGen/Hexagon/maxud.ll9
-rw-r--r--test/CodeGen/Hexagon/maxuw.ll9
-rw-r--r--test/CodeGen/Hexagon/maxw.ll9
-rw-r--r--test/CodeGen/Hexagon/mind.ll9
-rw-r--r--test/CodeGen/Hexagon/minu-zext-16.ll11
-rw-r--r--test/CodeGen/Hexagon/minu-zext-8.ll11
-rw-r--r--test/CodeGen/Hexagon/minud.ll9
-rw-r--r--test/CodeGen/Hexagon/minuw.ll9
-rw-r--r--test/CodeGen/Hexagon/minw.ll9
-rw-r--r--test/CodeGen/Hexagon/postinc-offset.ll40
-rw-r--r--test/CodeGen/Hexagon/signed_immediates.ll99
-rw-r--r--test/CodeGen/Hexagon/simple_addend.ll10
-rw-r--r--test/CodeGen/Hexagon/usr-ovf-dep.ll28
60 files changed, 2095 insertions, 813 deletions
diff --git a/test/CodeGen/Hexagon/absaddr-store.ll b/test/CodeGen/Hexagon/absaddr-store.ll
index 3be4b1cc26144..dac8607d88db6 100644
--- a/test/CodeGen/Hexagon/absaddr-store.ll
+++ b/test/CodeGen/Hexagon/absaddr-store.ll
@@ -1,39 +1,42 @@
-; RUN: llc -march=hexagon -mcpu=hexagonv4 < %s | FileCheck %s
+; RUN: llc -march=hexagon -hexagon-small-data-threshold=0 < %s | FileCheck %s
; Check that we generate load instructions with absolute addressing mode.
-@a = external global i32
-@b = external global i8
-@c = external global i16
+@a0 = external global i32
+@a1 = external global i32
+@b0 = external global i8
+@b1 = external global i8
+@c0 = external global i16
+@c1 = external global i16
@d = external global i64
define zeroext i8 @absStoreByte() nounwind {
-; CHECK: memb(##b){{ *}}={{ *}}r{{[0-9]+}}
+; CHECK: memb(##b1){{ *}}={{ *}}r{{[0-9]+}}
entry:
- %0 = load i8, i8* @b, align 1
+ %0 = load i8, i8* @b0, align 1
%conv = zext i8 %0 to i32
%mul = mul nsw i32 100, %conv
%conv1 = trunc i32 %mul to i8
- store i8 %conv1, i8* @b, align 1
+ store i8 %conv1, i8* @b1, align 1
ret i8 %conv1
}
define signext i16 @absStoreHalf() nounwind {
-; CHECK: memh(##c){{ *}}={{ *}}r{{[0-9]+}}
+; CHECK: memh(##c1){{ *}}={{ *}}r{{[0-9]+}}
entry:
- %0 = load i16, i16* @c, align 2
+ %0 = load i16, i16* @c0, align 2
%conv = sext i16 %0 to i32
%mul = mul nsw i32 100, %conv
%conv1 = trunc i32 %mul to i16
- store i16 %conv1, i16* @c, align 2
+ store i16 %conv1, i16* @c1, align 2
ret i16 %conv1
}
define i32 @absStoreWord() nounwind {
-; CHECK: memw(##a){{ *}}={{ *}}r{{[0-9]+}}
+; CHECK: memw(##a1){{ *}}={{ *}}r{{[0-9]+}}
entry:
- %0 = load i32, i32* @a, align 4
+ %0 = load i32, i32* @a0, align 4
%mul = mul nsw i32 100, %0
- store i32 %mul, i32* @a, align 4
+ store i32 %mul, i32* @a1, align 4
ret i32 %mul
}
diff --git a/test/CodeGen/Hexagon/absimm.ll b/test/CodeGen/Hexagon/absimm.ll
index 07adb3fe49d5b..e67af5e8fef90 100644
--- a/test/CodeGen/Hexagon/absimm.ll
+++ b/test/CodeGen/Hexagon/absimm.ll
@@ -1,4 +1,4 @@
-; RUN: llc -march=hexagon -mcpu=hexagonv4 < %s | FileCheck %s
+; RUN: llc -march=hexagon < %s | FileCheck %s
; Check that we generate absolute addressing mode instructions
; with immediate value.
diff --git a/test/CodeGen/Hexagon/addh-sext-trunc.ll b/test/CodeGen/Hexagon/addh-sext-trunc.ll
new file mode 100644
index 0000000000000..094932933fbc6
--- /dev/null
+++ b/test/CodeGen/Hexagon/addh-sext-trunc.ll
@@ -0,0 +1,43 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+; CHECK: r{{[0-9]+}} = add(r{{[0-9]+}}.{{L|l}}, r{{[0-9]+}}.{{H|h}})
+
+target datalayout = "e-p:32:32:32-i64:64:64-i32:32:32-i16:16:16-i1:32:32-f64:64:64-f32:32:32-v64:64:64-v32:32:32-a0:0-n16:32"
+target triple = "hexagon-unknown-none"
+
+%struct.aDataType = type { i16, i16, i16, i16, i16, i16*, i16*, i16*, i8*, i16*, i16*, i16*, i8* }
+
+define i8* @a_get_score(%struct.aDataType* nocapture %pData, i16 signext %gmmModelIndex, i16* nocapture %pGmmScoreL16Q4) #0 {
+entry:
+ %numSubVector = getelementptr inbounds %struct.aDataType, %struct.aDataType* %pData, i32 0, i32 3
+ %0 = load i16, i16* %numSubVector, align 2, !tbaa !0
+ %and = and i16 %0, -4
+ %b = getelementptr inbounds %struct.aDataType, %struct.aDataType* %pData, i32 0, i32 8
+ %1 = load i8*, i8** %b, align 4, !tbaa !3
+ %conv3 = sext i16 %and to i32
+ %cmp21 = icmp sgt i16 %and, 0
+ br i1 %cmp21, label %for.inc.preheader, label %for.end
+
+for.inc.preheader: ; preds = %entry
+ br label %for.inc
+
+for.inc: ; preds = %for.inc.preheader, %for.inc
+ %j.022 = phi i32 [ %phitmp, %for.inc ], [ 0, %for.inc.preheader ]
+ %add13 = mul i32 %j.022, 65536
+ %sext = add i32 %add13, 262144
+ %phitmp = ashr exact i32 %sext, 16
+ %cmp = icmp slt i32 %phitmp, %conv3
+ br i1 %cmp, label %for.inc, label %for.end.loopexit
+
+for.end.loopexit: ; preds = %for.inc
+ br label %for.end
+
+for.end: ; preds = %for.end.loopexit, %entry
+ ret i8* %1
+}
+
+attributes #0 = { nounwind readonly "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
+
+!0 = !{!"short", !1}
+!1 = !{!"omnipotent char", !2}
+!2 = !{!"Simple C/C++ TBAA"}
+!3 = !{!"any pointer", !1}
diff --git a/test/CodeGen/Hexagon/addh-shifted.ll b/test/CodeGen/Hexagon/addh-shifted.ll
new file mode 100644
index 0000000000000..eb263521b42fb
--- /dev/null
+++ b/test/CodeGen/Hexagon/addh-shifted.ll
@@ -0,0 +1,21 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+; CHECK: r{{[0-9]+}} = add(r{{[0-9]+}}.{{L|l}}, r{{[0-9]+}}.{{L|l}}):<<16
+
+define i64 @test_cast(i64 %arg0, i16 zeroext %arg1, i16 zeroext %arg2) nounwind readnone {
+entry:
+ %conv.i = zext i16 %arg1 to i32
+ %conv1.i = zext i16 %arg2 to i32
+ %sub.i = add nsw i32 %conv.i, %conv1.i
+ %sext.i = shl i32 %sub.i, 16
+ %cmp.i = icmp slt i32 %sext.i, 65536
+ %0 = ashr exact i32 %sext.i, 16
+ %conv7.i = select i1 %cmp.i, i32 1, i32 %0
+ %cmp8.i = icmp sgt i32 %conv7.i, 4
+ %conv7.op.i = add i32 %conv7.i, 65535
+ %shl = shl i64 %arg0, 2
+ %.mask = and i32 %conv7.op.i, 65535
+ %1 = zext i32 %.mask to i64
+ %conv = select i1 %cmp8.i, i64 3, i64 %1
+ %or = or i64 %conv, %shl
+ ret i64 %or
+}
diff --git a/test/CodeGen/Hexagon/addh.ll b/test/CodeGen/Hexagon/addh.ll
new file mode 100644
index 0000000000000..c2b536c4669a5
--- /dev/null
+++ b/test/CodeGen/Hexagon/addh.ll
@@ -0,0 +1,21 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+; CHECK: r{{[0-9]+}} = add(r{{[0-9]+}}.{{L|l}}, r{{[0-9]+}}.{{L|l}})
+
+define i64 @test_cast(i64 %arg0, i16 zeroext %arg1, i16 zeroext %arg2) nounwind readnone {
+entry:
+ %conv.i = zext i16 %arg1 to i32
+ %conv1.i = zext i16 %arg2 to i32
+ %sub.i = add nsw i32 %conv.i, %conv1.i
+ %sext.i = shl i32 %sub.i, 16
+ %cmp.i = icmp slt i32 %sext.i, 65536
+ %0 = ashr exact i32 %sext.i, 16
+ %conv7.i = select i1 %cmp.i, i32 1, i32 %0
+ %cmp8.i = icmp sgt i32 %conv7.i, 4
+ %conv7.op.i = add i32 %conv7.i, 65535
+ %shl = shl i64 %arg0, 2
+ %.mask = and i32 %conv7.op.i, 65535
+ %1 = zext i32 %.mask to i64
+ %conv = select i1 %cmp8.i, i64 3, i64 %1
+ %or = or i64 %conv, %shl
+ ret i64 %or
+}
diff --git a/test/CodeGen/Hexagon/addrmode-indoff.ll b/test/CodeGen/Hexagon/addrmode-indoff.ll
new file mode 100644
index 0000000000000..6ea2b3d95daf7
--- /dev/null
+++ b/test/CodeGen/Hexagon/addrmode-indoff.ll
@@ -0,0 +1,74 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+;
+; Bug 6840. Use absolute+index addressing.
+
+@ga = common global [1024 x i8] zeroinitializer, align 8
+@gb = common global [1024 x i8] zeroinitializer, align 8
+
+; CHECK: memub(r{{[0-9]+}}{{ *}}<<{{ *}}#0{{ *}}+{{ *}}##ga)
+define zeroext i8 @lf2(i32 %i) nounwind readonly {
+entry:
+ %arrayidx = getelementptr inbounds [1024 x i8], [1024 x i8]* @ga, i32 0, i32 %i
+ %0 = load i8, i8* %arrayidx, align 1
+ ret i8 %0
+}
+
+; CHECK: memb(r{{[0-9]+}}{{ *}}<<{{ *}}#0{{ *}}+{{ *}}##gb)
+define signext i8 @lf2s(i32 %i) nounwind readonly {
+entry:
+ %arrayidx = getelementptr inbounds [1024 x i8], [1024 x i8]* @gb, i32 0, i32 %i
+ %0 = load i8, i8* %arrayidx, align 1
+ ret i8 %0
+}
+
+; CHECK: memub(r{{[0-9]+}}{{ *}}<<{{ *}}#2{{ *}}+{{ *}}##ga)
+define zeroext i8 @lf3(i32 %i) nounwind readonly {
+entry:
+ %mul = shl nsw i32 %i, 2
+ %arrayidx = getelementptr inbounds [1024 x i8], [1024 x i8]* @ga, i32 0, i32 %mul
+ %0 = load i8, i8* %arrayidx, align 1
+ ret i8 %0
+}
+
+; CHECK: memb(r{{[0-9]+}}{{ *}}<<{{ *}}#2{{ *}}+{{ *}}##gb)
+define signext i8 @lf3s(i32 %i) nounwind readonly {
+entry:
+ %mul = shl nsw i32 %i, 2
+ %arrayidx = getelementptr inbounds [1024 x i8], [1024 x i8]* @gb, i32 0, i32 %mul
+ %0 = load i8, i8* %arrayidx, align 1
+ ret i8 %0
+}
+
+; CHECK: memb(r{{[0-9]+}}{{ *}}<<{{ *}}#0{{ *}}+{{ *}}##ga)
+define void @sf4(i32 %i, i8 zeroext %j) nounwind {
+entry:
+ %arrayidx = getelementptr inbounds [1024 x i8], [1024 x i8]* @ga, i32 0, i32 %i
+ store i8 %j, i8* %arrayidx, align 1
+ ret void
+}
+
+; CHECK: memb(r{{[0-9]+}}{{ *}}<<{{ *}}#0{{ *}}+{{ *}}##gb)
+define void @sf4s(i32 %i, i8 signext %j) nounwind {
+entry:
+ %arrayidx = getelementptr inbounds [1024 x i8], [1024 x i8]* @gb, i32 0, i32 %i
+ store i8 %j, i8* %arrayidx, align 1
+ ret void
+}
+
+; CHECK: memb(r{{[0-9]+}}{{ *}}<<{{ *}}#2{{ *}}+{{ *}}##ga)
+define void @sf5(i32 %i, i8 zeroext %j) nounwind {
+entry:
+ %mul = shl nsw i32 %i, 2
+ %arrayidx = getelementptr inbounds [1024 x i8], [1024 x i8]* @ga, i32 0, i32 %mul
+ store i8 %j, i8* %arrayidx, align 1
+ ret void
+}
+
+; CHECK: memb(r{{[0-9]+}}{{ *}}<<{{ *}}#2{{ *}}+{{ *}}##gb)
+define void @sf5s(i32 %i, i8 signext %j) nounwind {
+entry:
+ %mul = shl nsw i32 %i, 2
+ %arrayidx = getelementptr inbounds [1024 x i8], [1024 x i8]* @gb, i32 0, i32 %mul
+ store i8 %j, i8* %arrayidx, align 1
+ ret void
+}
diff --git a/test/CodeGen/Hexagon/always-ext.ll b/test/CodeGen/Hexagon/always-ext.ll
index 8b4b2f5bf4f27..3bf465b6a513a 100644
--- a/test/CodeGen/Hexagon/always-ext.ll
+++ b/test/CodeGen/Hexagon/always-ext.ll
@@ -1,5 +1,4 @@
-; XFAIL:
-; RUN: llc -march=hexagon -mcpu=hexagonv4 < %s | FileCheck %s
+; RUN: llc -march=hexagon < %s | FileCheck %s
; Check that we don't generate an invalid packet with too many instructions
; due to a store that has a must-extend operand.
@@ -8,7 +7,7 @@
; CHECK: {
; CHECK-NOT: call abort
; CHECK: memw(##0)
-; CHECK: memw(r{{[0-9+]}}<<#2 + ##4)
+; CHECK: memw(r{{[0-9+]}}{{ *}}<<{{ *}}#2{{ *}}+{{ *}}##4)
; CHECK: }
%struct.CuTest.1.28.31.37.40.43.52.55.67.85.111 = type { i8*, void (%struct.CuTest.1.28.31.37.40.43.52.55.67.85.111*)*, i32, i32, i8*, [23 x i32]* }
diff --git a/test/CodeGen/Hexagon/args.ll b/test/CodeGen/Hexagon/args.ll
index 1c470f68aa272..3bfb8b159556d 100644
--- a/test/CodeGen/Hexagon/args.ll
+++ b/test/CodeGen/Hexagon/args.ll
@@ -2,7 +2,7 @@
; CHECK: r5:4 = combine(#6, #5)
; CHECK: r3:2 = combine(#4, #3)
; CHECK: r1:0 = combine(#2, #1)
-; CHECK: memw(r29{{ *}}+{{ *}}#0){{ *}}={{ *}}#7
+; CHECK: memw(r29+#0)=#7
define void @foo() nounwind {
diff --git a/test/CodeGen/Hexagon/ashift-left-right.ll b/test/CodeGen/Hexagon/ashift-left-right.ll
index 7c41bc7bbf3b0..bc3e813220dbb 100644
--- a/test/CodeGen/Hexagon/ashift-left-right.ll
+++ b/test/CodeGen/Hexagon/ashift-left-right.ll
@@ -1,4 +1,4 @@
-; RUN: llc -march=hexagon -mcpu=hexagonv4 < %s | FileCheck %s
+; RUN: llc -march=hexagon < %s | FileCheck %s
define i32 @foo(i32 %a, i32 %b) nounwind readnone {
; CHECK: lsl
diff --git a/test/CodeGen/Hexagon/barrier-flag.ll b/test/CodeGen/Hexagon/barrier-flag.ll
new file mode 100644
index 0000000000000..e70a56bae02db
--- /dev/null
+++ b/test/CodeGen/Hexagon/barrier-flag.ll
@@ -0,0 +1,125 @@
+; RUN: llc -O2 < %s
+; Check for successful compilation. It originally caused an abort due to
+; the "isBarrier" flag set on instructions that were not meant to have it.
+
+target datalayout = "e-m:e-p:32:32-i1:32-i64:64-a:0-v32:32-n16:32"
+target triple = "hexagon"
+
+; Function Attrs: nounwind optsize readnone
+define void @dummy() #0 {
+entry:
+ ret void
+}
+
+; Function Attrs: nounwind optsize
+define void @conv3x3(i8* nocapture readonly %inp, i8* nocapture readonly %mask, i32 %shift, i8* nocapture %outp, i32 %width) #1 {
+entry:
+ %cmp381 = icmp sgt i32 %width, 0
+ %arrayidx16.gep = getelementptr i8, i8* %mask, i32 4
+ %arrayidx19.gep = getelementptr i8, i8* %mask, i32 8
+ br label %for.body
+
+for.body: ; preds = %for.inc48, %entry
+ %i.086 = phi i32 [ 0, %entry ], [ %inc49, %for.inc48 ]
+ %mul = mul nsw i32 %i.086, %width
+ %arrayidx.sum = add i32 %mul, %width
+ br i1 %cmp381, label %for.cond5.preheader.lr.ph, label %for.inc48
+
+for.cond5.preheader.lr.ph: ; preds = %for.body
+ %add.ptr.sum = add i32 %arrayidx.sum, %width
+ %add.ptr1 = getelementptr inbounds i8, i8* %inp, i32 %add.ptr.sum
+ %add.ptr = getelementptr inbounds i8, i8* %inp, i32 %arrayidx.sum
+ %arrayidx = getelementptr inbounds i8, i8* %inp, i32 %mul
+ %arrayidx44.gep = getelementptr i8, i8* %outp, i32 %mul
+ br label %for.cond5.preheader
+
+for.cond5.preheader: ; preds = %if.end40, %for.cond5.preheader.lr.ph
+ %arrayidx44.phi = phi i8* [ %arrayidx44.gep, %for.cond5.preheader.lr.ph ], [ %arrayidx44.inc, %if.end40 ]
+ %j.085 = phi i32 [ 0, %for.cond5.preheader.lr.ph ], [ %inc46, %if.end40 ]
+ %IN1.084 = phi i8* [ %arrayidx, %for.cond5.preheader.lr.ph ], [ %incdec.ptr, %if.end40 ]
+ %IN2.083 = phi i8* [ %add.ptr, %for.cond5.preheader.lr.ph ], [ %incdec.ptr33, %if.end40 ]
+ %IN3.082 = phi i8* [ %add.ptr1, %for.cond5.preheader.lr.ph ], [ %incdec.ptr34, %if.end40 ]
+ br label %for.body7
+
+for.body7: ; preds = %for.body7, %for.cond5.preheader
+ %arrayidx8.phi = phi i8* [ %IN1.084, %for.cond5.preheader ], [ %arrayidx8.inc, %for.body7 ]
+ %arrayidx9.phi = phi i8* [ %IN2.083, %for.cond5.preheader ], [ %arrayidx9.inc, %for.body7 ]
+ %arrayidx11.phi = phi i8* [ %IN3.082, %for.cond5.preheader ], [ %arrayidx11.inc, %for.body7 ]
+ %arrayidx13.phi = phi i8* [ %mask, %for.cond5.preheader ], [ %arrayidx13.inc, %for.body7 ]
+ %arrayidx16.phi = phi i8* [ %arrayidx16.gep, %for.cond5.preheader ], [ %arrayidx16.inc, %for.body7 ]
+ %arrayidx19.phi = phi i8* [ %arrayidx19.gep, %for.cond5.preheader ], [ %arrayidx19.inc, %for.body7 ]
+ %k.080 = phi i32 [ 0, %for.cond5.preheader ], [ %inc, %for.body7 ]
+ %sum.079 = phi i32 [ 0, %for.cond5.preheader ], [ %add32, %for.body7 ]
+ %0 = load i8, i8* %arrayidx8.phi, align 1, !tbaa !1
+ %1 = load i8, i8* %arrayidx9.phi, align 1, !tbaa !1
+ %2 = load i8, i8* %arrayidx11.phi, align 1, !tbaa !1
+ %3 = load i8, i8* %arrayidx13.phi, align 1, !tbaa !1
+ %4 = load i8, i8* %arrayidx16.phi, align 1, !tbaa !1
+ %5 = load i8, i8* %arrayidx19.phi, align 1, !tbaa !1
+ %conv21 = zext i8 %0 to i32
+ %conv22 = sext i8 %3 to i32
+ %mul23 = mul nsw i32 %conv22, %conv21
+ %conv24 = zext i8 %1 to i32
+ %conv25 = sext i8 %4 to i32
+ %mul26 = mul nsw i32 %conv25, %conv24
+ %conv27 = zext i8 %2 to i32
+ %conv28 = sext i8 %5 to i32
+ %mul29 = mul nsw i32 %conv28, %conv27
+ %add30 = add i32 %mul23, %sum.079
+ %add31 = add i32 %add30, %mul26
+ %add32 = add i32 %add31, %mul29
+ %inc = add nsw i32 %k.080, 1
+ %exitcond = icmp eq i32 %inc, 3
+ %arrayidx8.inc = getelementptr i8, i8* %arrayidx8.phi, i32 1
+ %arrayidx9.inc = getelementptr i8, i8* %arrayidx9.phi, i32 1
+ %arrayidx11.inc = getelementptr i8, i8* %arrayidx11.phi, i32 1
+ %arrayidx13.inc = getelementptr i8, i8* %arrayidx13.phi, i32 1
+ %arrayidx16.inc = getelementptr i8, i8* %arrayidx16.phi, i32 1
+ %arrayidx19.inc = getelementptr i8, i8* %arrayidx19.phi, i32 1
+ br i1 %exitcond, label %for.end, label %for.body7
+
+for.end: ; preds = %for.body7
+ %incdec.ptr = getelementptr inbounds i8, i8* %IN1.084, i32 1
+ %incdec.ptr33 = getelementptr inbounds i8, i8* %IN2.083, i32 1
+ %incdec.ptr34 = getelementptr inbounds i8, i8* %IN3.082, i32 1
+ %shr = ashr i32 %add32, %shift
+ %cmp35 = icmp slt i32 %shr, 0
+ br i1 %cmp35, label %if.end40, label %if.end
+
+if.end: ; preds = %for.end
+ %cmp37 = icmp sgt i32 %shr, 255
+ br i1 %cmp37, label %if.then39, label %if.end40
+
+if.then39: ; preds = %if.end
+ br label %if.end40
+
+if.end40: ; preds = %for.end, %if.then39, %if.end
+ %sum.2 = phi i32 [ 255, %if.then39 ], [ %shr, %if.end ], [ 0, %for.end ]
+ %conv41 = trunc i32 %sum.2 to i8
+ store i8 %conv41, i8* %arrayidx44.phi, align 1, !tbaa !1
+ %inc46 = add nsw i32 %j.085, 1
+ %exitcond87 = icmp eq i32 %inc46, %width
+ %arrayidx44.inc = getelementptr i8, i8* %arrayidx44.phi, i32 1
+ br i1 %exitcond87, label %for.inc48.loopexit, label %for.cond5.preheader
+
+for.inc48.loopexit: ; preds = %if.end40
+ br label %for.inc48
+
+for.inc48: ; preds = %for.inc48.loopexit, %for.body
+ %inc49 = add nsw i32 %i.086, 1
+ %exitcond88 = icmp eq i32 %inc49, 2
+ br i1 %exitcond88, label %for.end50, label %for.body
+
+for.end50: ; preds = %for.inc48
+ ret void
+}
+
+attributes #0 = { nounwind optsize readnone "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { nounwind optsize "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+
+!llvm.ident = !{!0}
+
+!0 = !{!"Clang 3.1"}
+!1 = !{!2, !2, i64 0}
+!2 = !{!"omnipotent char", !3, i64 0}
+!3 = !{!"Simple C/C++ TBAA"}
diff --git a/test/CodeGen/Hexagon/base-offset-addr.ll b/test/CodeGen/Hexagon/base-offset-addr.ll
new file mode 100644
index 0000000000000..30410fe925432
--- /dev/null
+++ b/test/CodeGen/Hexagon/base-offset-addr.ll
@@ -0,0 +1,15 @@
+; RUN: llc -march=hexagon -enable-aa-sched-mi < %s
+; REQUIRES: asserts
+
+; Make sure the base is a register and not an address.
+
+define fastcc void @Get_lsp_pol(i32* nocapture %f) #0 {
+entry:
+ %f5 = alloca i32, align 4
+ %arrayidx103 = getelementptr inbounds i32, i32* %f, i32 4
+ store i32 0, i32* %arrayidx103, align 4
+ %f5.0.load185 = load volatile i32, i32* %f5, align 4
+ ret void
+}
+
+attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
diff --git a/test/CodeGen/Hexagon/base-offset-post.ll b/test/CodeGen/Hexagon/base-offset-post.ll
new file mode 100644
index 0000000000000..a6e4cdd34a0da
--- /dev/null
+++ b/test/CodeGen/Hexagon/base-offset-post.ll
@@ -0,0 +1,30 @@
+; RUN: llc -march=hexagon -mcpu=hexagonv5 < %s
+; REQUIRES: asserts
+
+; Test that the accessSize is set on a post-increment store. If not, an assert
+; is triggered in getBaseAndOffset()
+
+%struct.A = type { i8, i32, i32, i32, [10 x i32], [10 x i32], [80 x i32], [80 x i32], [8 x i32], i32, i16, i16, i16, i16, i16, i16, i16, i16, i16, i16, i16 }
+
+; Function Attrs: nounwind
+define fastcc void @Decoder_amr(i8 zeroext %mode) #0 {
+entry:
+ br label %for.cond64.preheader.i
+
+for.cond64.preheader.i:
+ %i.1984.i = phi i32 [ 0, %entry ], [ %inc166.i.1, %for.cond64.preheader.i ]
+ %inc166.i = add nsw i32 %i.1984.i, 1
+ %arrayidx71.i1422.1 = getelementptr inbounds %struct.A, %struct.A* undef, i32 0, i32 7, i32 %inc166.i
+ %storemerge800.i.1 = select i1 undef, i32 1310, i32 undef
+ %sub156.i.1 = sub nsw i32 0, %storemerge800.i.1
+ %sub156.storemerge800.i.1 = select i1 undef, i32 %storemerge800.i.1, i32 %sub156.i.1
+ store i32 %sub156.storemerge800.i.1, i32* %arrayidx71.i1422.1, align 4
+ store i32 0, i32* undef, align 4
+ %inc166.i.1 = add nsw i32 %i.1984.i, 2
+ br label %for.cond64.preheader.i
+
+if.end:
+ ret void
+}
+
+attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
diff --git a/test/CodeGen/Hexagon/bugAsmHWloop.ll b/test/CodeGen/Hexagon/bugAsmHWloop.ll
new file mode 100644
index 0000000000000..c7e95ed056646
--- /dev/null
+++ b/test/CodeGen/Hexagon/bugAsmHWloop.ll
@@ -0,0 +1,71 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+
+; CHECK: {
+; CHECK: loop0(.LBB
+; CHECK-NOT: loop0(##.LBB
+
+target datalayout = "e-p:32:32:32-i64:64:64-i32:32:32-i16:16:16-i1:32:32-f64:64:64-f32:32:32-v64:64:64-v32:32:32-a0:0-n16:32"
+target triple = "hexagon"
+
+define i32 @q6zip_uncompress(i8* %out_buf, i32* %out_buf_size, i8* %in_buf, i32 %in_buf_size, i8* nocapture %dict, i32 %dict_size) nounwind {
+entry:
+ %0 = bitcast i8* %in_buf to i32*
+ %incdec.ptr = getelementptr inbounds i8, i8* %in_buf, i32 4
+ %1 = load i32, i32* %0, align 4, !tbaa !0
+ %2 = ptrtoint i8* %incdec.ptr to i32
+ %and.i = and i32 %2, 31
+ %sub.i = sub i32 %2, %and.i
+ %3 = inttoptr i32 %sub.i to i8*
+ %add.i = add i32 %in_buf_size, 31
+ %sub2.i = add i32 %add.i, %and.i
+ %div.i = lshr i32 %sub2.i, 5
+ %4 = tail call i32 @llvm.hexagon.A2.combine.ll(i32 32, i32 %div.i) nounwind
+ %5 = tail call i64 @llvm.hexagon.A4.combineir(i32 32, i32 %4) nounwind
+ tail call void asm sideeffect "l2fetch($0,$1)", "r,r,~{memory}"(i8* %3, i64 %5) nounwind, !srcloc !3
+ %6 = ptrtoint i8* %out_buf to i32
+ br label %for.body.i
+
+for.body.i: ; preds = %for.body.i, %entry
+ %i.02.i = phi i32 [ 0, %entry ], [ %inc.i, %for.body.i ]
+ %addr.addr.01.i = phi i32 [ %6, %entry ], [ %add.i14, %for.body.i ]
+ tail call void asm sideeffect "dczeroa($0)", "r"(i32 %addr.addr.01.i) nounwind, !srcloc !4
+ %add.i14 = add i32 %addr.addr.01.i, 32
+ %inc.i = add i32 %i.02.i, 1
+ %exitcond.i = icmp eq i32 %inc.i, 128
+ br i1 %exitcond.i, label %while.cond.preheader, label %for.body.i
+
+while.cond.preheader: ; preds = %for.body.i
+ %and = and i32 %1, 3
+ switch i32 %and, label %infloop.preheader [
+ i32 0, label %exit_inflate.split
+ i32 2, label %if.then.preheader
+ ]
+
+if.then.preheader: ; preds = %while.cond.preheader
+ br label %if.then
+
+infloop.preheader: ; preds = %while.cond.preheader
+ br label %infloop
+
+if.then: ; preds = %if.then.preheader, %if.then
+ tail call void @llvm.prefetch(i8* %incdec.ptr, i32 0, i32 3, i32 1)
+ br label %if.then
+
+exit_inflate.split: ; preds = %while.cond.preheader
+ ret i32 0
+
+infloop: ; preds = %infloop.preheader, %infloop
+ br label %infloop
+}
+
+declare void @llvm.prefetch(i8* nocapture, i32, i32, i32) nounwind
+
+declare i64 @llvm.hexagon.A4.combineir(i32, i32) nounwind readnone
+
+declare i32 @llvm.hexagon.A2.combine.ll(i32, i32) nounwind readnone
+
+!0 = !{!"long", !1}
+!1 = !{!"omnipotent char", !2}
+!2 = !{!"Simple C/C++ TBAA"}
+!3 = !{i32 18362}
+!4 = !{i32 18893}
diff --git a/test/CodeGen/Hexagon/cext-valid-packet1.ll b/test/CodeGen/Hexagon/cext-valid-packet1.ll
index 35e7b364b5089..36abc59f5e3e6 100644
--- a/test/CodeGen/Hexagon/cext-valid-packet1.ll
+++ b/test/CodeGen/Hexagon/cext-valid-packet1.ll
@@ -1,5 +1,4 @@
-; RUN: llc -march=hexagon -mcpu=hexagonv4 < %s | FileCheck %s
-; XFAIL:
+; RUN: llc -march=hexagon < %s | FileCheck %s
; Check that the packetizer generates valid packets with constant
; extended instructions.
diff --git a/test/CodeGen/Hexagon/cext-valid-packet2.ll b/test/CodeGen/Hexagon/cext-valid-packet2.ll
index c3a4915ec2e08..9f03ef1309ecc 100644
--- a/test/CodeGen/Hexagon/cext-valid-packet2.ll
+++ b/test/CodeGen/Hexagon/cext-valid-packet2.ll
@@ -1,44 +1,16 @@
-; RUN: llc -march=hexagon -mcpu=hexagonv4 < %s | FileCheck %s
-; XFAIL:
+; RUN: llc -march=hexagon < %s | FileCheck %s
; Check that the packetizer generates valid packets with constant
; extended add and base+offset store instructions.
-; CHECK: {
-; CHECK-NEXT: r{{[0-9]+}}{{ *}}={{ *}}add(r{{[0-9]+}}, ##{{[0-9]+}})
-; CHECK-NEXT: memw(r{{[0-9]+}}+{{ *}}##{{[0-9]+}}){{ *}}={{ *}}r{{[0-9]+}}.new
+; CHECK: r{{[0-9]+}}{{ *}}={{ *}}add(r{{[0-9]+}},{{ *}}##200000)
+; CHECK-NEXT: memw(r{{[0-9]+}}{{ *}}+{{ *}}##12000){{ *}}={{ *}}r{{[0-9]+}}.new
; CHECK-NEXT: }
-define i32 @test(i32* nocapture %a, i32* nocapture %b, i32 %c) nounwind {
+define void @test(i32* nocapture %a, i32* nocapture %b, i32 %c) nounwind {
entry:
- %add = add nsw i32 %c, 200002
%0 = load i32, i32* %a, align 4
%add1 = add nsw i32 %0, 200000
%arrayidx2 = getelementptr inbounds i32, i32* %a, i32 3000
store i32 %add1, i32* %arrayidx2, align 4
- %1 = load i32, i32* %b, align 4
- %add4 = add nsw i32 %1, 200001
- %arrayidx5 = getelementptr inbounds i32, i32* %a, i32 1
- store i32 %add4, i32* %arrayidx5, align 4
- %arrayidx7 = getelementptr inbounds i32, i32* %b, i32 1
- %2 = load i32, i32* %arrayidx7, align 4
- %cmp = icmp sgt i32 %add4, %2
- br i1 %cmp, label %if.then, label %if.else
-
-if.then: ; preds = %entry
- %arrayidx8 = getelementptr inbounds i32, i32* %a, i32 2
- %3 = load i32, i32* %arrayidx8, align 4
- %arrayidx9 = getelementptr inbounds i32, i32* %b, i32 2000
- %4 = load i32, i32* %arrayidx9, align 4
- %sub = sub nsw i32 %3, %4
- %arrayidx10 = getelementptr inbounds i32, i32* %a, i32 4000
- store i32 %sub, i32* %arrayidx10, align 4
- br label %if.end
-
-if.else: ; preds = %entry
- %arrayidx11 = getelementptr inbounds i32, i32* %b, i32 3200
- store i32 %add, i32* %arrayidx11, align 4
- br label %if.end
-
-if.end: ; preds = %if.else, %if.then
- ret i32 %add
+ ret void
}
diff --git a/test/CodeGen/Hexagon/cext.ll b/test/CodeGen/Hexagon/cext.ll
new file mode 100644
index 0000000000000..6daba8cc9599d
--- /dev/null
+++ b/test/CodeGen/Hexagon/cext.ll
@@ -0,0 +1,16 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+; CHECK: memub(r{{[0-9]+}}{{ *}}<<{{ *}}#1{{ *}}+{{ *}}##a)
+
+@a = external global [5 x [2 x i8]]
+
+define zeroext i8 @foo(i8 zeroext %l) nounwind readonly {
+for.end:
+ %idxprom = zext i8 %l to i32
+ %arrayidx1 = getelementptr inbounds [5 x [2 x i8]], [5 x [2 x i8]]* @a, i32 0, i32 %idxprom, i32 0
+ %0 = load i8, i8* %arrayidx1, align 1
+ %conv = zext i8 %0 to i32
+ %mul = mul nsw i32 %conv, 20
+ %conv2 = trunc i32 %mul to i8
+ ret i8 %conv2
+}
+
diff --git a/test/CodeGen/Hexagon/cexti16.ll b/test/CodeGen/Hexagon/cexti16.ll
new file mode 100644
index 0000000000000..465cfe400719e
--- /dev/null
+++ b/test/CodeGen/Hexagon/cexti16.ll
@@ -0,0 +1,16 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+; CHECK: memuh(r{{[0-9]+}}{{ *}}<<{{ *}}#2{{ *}}+{{ *}}##a)
+
+@a = external global [5 x [2 x i16]]
+
+define signext i16 @foo(i16 zeroext %l) nounwind readonly {
+for.end:
+ %idxprom = zext i16 %l to i32
+ %arrayidx1 = getelementptr inbounds [5 x [2 x i16]], [5 x [2 x i16]]* @a, i32 0, i32 %idxprom, i32 0
+ %0 = load i16, i16* %arrayidx1, align 2
+ %conv = zext i16 %0 to i32
+ %mul = mul nsw i32 %conv, 20
+ %conv2 = trunc i32 %mul to i16
+ ret i16 %conv2
+}
+
diff --git a/test/CodeGen/Hexagon/checktabs.ll b/test/CodeGen/Hexagon/checktabs.ll
new file mode 100644
index 0000000000000..740433bf824aa
--- /dev/null
+++ b/test/CodeGen/Hexagon/checktabs.ll
@@ -0,0 +1,8 @@
+; RUN: llc -march=hexagon < %s | FileCheck --strict-whitespace %s
+; Make sure we are emitting tabs as formatting.
+; CHECK: {
+; CHECK-NEXT: {{jump|r}}
+define i32 @foobar(i32 %a, i32 %b) {
+ %1 = add i32 %a, %b
+ ret i32 %1
+}
diff --git a/test/CodeGen/Hexagon/cmp-extend.ll b/test/CodeGen/Hexagon/cmp-extend.ll
new file mode 100644
index 0000000000000..0bd1fca73946f
--- /dev/null
+++ b/test/CodeGen/Hexagon/cmp-extend.ll
@@ -0,0 +1,40 @@
+; RUN: llc -march=hexagon -mcpu=hexagonv5 < %s | FileCheck %s
+
+%struct.RESULTS_S.A = type { i16, i16, i16, [4 x i8*], i32, i32, i32, %struct.list_head_s.B*, %struct.MAT_PARAMS_S.D, i16, i16, i16, i16, i16, %struct.CORE_PORTABLE_S.E }
+%struct.list_head_s.B = type { %struct.list_head_s.B*, %struct.list_data_s.C* }
+%struct.list_data_s.C = type { i16, i16 }
+%struct.MAT_PARAMS_S.D = type { i32, i16*, i16*, i32* }
+%struct.CORE_PORTABLE_S.E = type { i8 }
+
+; Test that we don't generate a zero extend in this case. Instead we generate
+; a single sign extend instead of two zero extends.
+
+; CHECK-NOT: zxth
+
+; Function Attrs: nounwind
+define void @core_bench_list(%struct.RESULTS_S.A* %res) #0 {
+entry:
+ %seed3 = getelementptr inbounds %struct.RESULTS_S.A, %struct.RESULTS_S.A* %res, i32 0, i32 2
+ %0 = load i16, i16* %seed3, align 2
+ %cmp364 = icmp sgt i16 %0, 0
+ br i1 %cmp364, label %for.body, label %while.body19.i160
+
+for.body:
+ %i.0370 = phi i16 [ %inc50, %if.then ], [ 0, %entry ]
+ br i1 undef, label %if.then, label %while.body.i273
+
+while.body.i273:
+ %tobool.i272 = icmp eq %struct.list_head_s.B* undef, null
+ br i1 %tobool.i272, label %if.then, label %while.body.i273
+
+if.then:
+ %inc50 = add i16 %i.0370, 1
+ %exitcond = icmp eq i16 %inc50, %0
+ br i1 %exitcond, label %while.body19.i160, label %for.body
+
+while.body19.i160:
+ br label %while.body19.i160
+}
+
+attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+
diff --git a/test/CodeGen/Hexagon/cmp-promote.ll b/test/CodeGen/Hexagon/cmp-promote.ll
new file mode 100644
index 0000000000000..7811b7e729cbe
--- /dev/null
+++ b/test/CodeGen/Hexagon/cmp-promote.ll
@@ -0,0 +1,72 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+;
+; Bug 6714. Use sign-extend to promote the arguments for compare
+; equal/not-equal for 8- and 16-bit types with negative constants.
+
+; CHECK: cmp.eq{{.*}}#-16
+define i32 @foo1(i16 signext %q) nounwind readnone {
+entry:
+ %not.cmp = icmp ne i16 %q, -16
+ %res.0 = zext i1 %not.cmp to i32
+ ret i32 %res.0
+}
+
+; CHECK: cmp.eq{{.*}}#-14
+define i32 @foo2(i16 signext %q) nounwind readnone {
+entry:
+ %cmp = icmp eq i16 %q, -14
+ %res.0 = select i1 %cmp, i32 2, i32 0
+ ret i32 %res.0
+}
+
+; CHECK: cmp.eq{{.*}}#-8
+define i32 @foo3(i8 signext %r) nounwind readnone {
+entry:
+ %cmp = icmp eq i8 %r, -8
+ %res.0 = select i1 %cmp, i32 0, i32 3
+ ret i32 %res.0
+}
+
+; CHECK: cmp.eq{{.*}}#-6
+define i32 @foo4(i8 signext %r) nounwind readnone {
+entry:
+ %cmp = icmp eq i8 %r, -6
+ %res.0 = select i1 %cmp, i32 4, i32 0
+ ret i32 %res.0
+}
+
+; CHECK: cmp.eq{{.*}}#-20
+define i32 @foo5(i32 %s) nounwind readnone {
+entry:
+ %cmp = icmp eq i32 %s, -20
+ %res.0 = select i1 %cmp, i32 0, i32 5
+ ret i32 %res.0
+}
+
+; CHECK: cmp.eq{{.*}}#-18
+define i32 @foo6(i32 %s) nounwind readnone {
+entry:
+ %cmp = icmp eq i32 %s, -18
+ %res.0 = select i1 %cmp, i32 6, i32 0
+ ret i32 %res.0
+}
+
+; CHECK: cmp.eq{{.*}}#10
+define i32 @foo7(i16 signext %q) nounwind readnone {
+entry:
+ %cmp = icmp eq i16 %q, 10
+ %res.0 = select i1 %cmp, i32 7, i32 0
+ ret i32 %res.0
+}
+
+@g = external global i16
+
+; CHECK: cmp.eq{{.*}}#-12
+define i32 @foo8() nounwind readonly {
+entry:
+ %0 = load i16, i16* @g, align 2
+ %cmp = icmp eq i16 %0, -12
+ %res.0 = select i1 %cmp, i32 0, i32 8
+ ret i32 %res.0
+}
+
diff --git a/test/CodeGen/Hexagon/cmp-to-genreg.ll b/test/CodeGen/Hexagon/cmp-to-genreg.ll
index 97cf51ce1a2bb..d0df168151317 100644
--- a/test/CodeGen/Hexagon/cmp-to-genreg.ll
+++ b/test/CodeGen/Hexagon/cmp-to-genreg.ll
@@ -1,4 +1,4 @@
-; RUN: llc -march=hexagon -mcpu=hexagonv4 < %s | FileCheck %s
+; RUN: llc -march=hexagon < %s | FileCheck %s
; Check that we generate compare to general register.
define i32 @compare1(i32 %a) nounwind {
diff --git a/test/CodeGen/Hexagon/cmp-to-predreg.ll b/test/CodeGen/Hexagon/cmp-to-predreg.ll
index 2b65343ab2cfa..c97a736f10afc 100644
--- a/test/CodeGen/Hexagon/cmp-to-predreg.ll
+++ b/test/CodeGen/Hexagon/cmp-to-predreg.ll
@@ -1,4 +1,4 @@
-; RUN: llc -march=hexagon -mcpu=hexagonv4 < %s | FileCheck %s
+; RUN: llc -march=hexagon < %s | FileCheck %s
; Check that we generate compare to predicate register.
define i32 @compare1(i32 %a, i32 %b) nounwind {
diff --git a/test/CodeGen/Hexagon/cmp.ll b/test/CodeGen/Hexagon/cmp.ll
new file mode 100644
index 0000000000000..c274a787249a5
--- /dev/null
+++ b/test/CodeGen/Hexagon/cmp.ll
@@ -0,0 +1,161 @@
+; RUN: llc -march=hexagon --filetype=obj < %s -o - | llvm-objdump -d - | FileCheck %s
+
+; Function Attrs: nounwind
+define i32 @cmpeq(i32 %i) #0 {
+entry:
+ %i.addr = alloca i32, align 4
+ store i32 %i, i32* %i.addr, align 4
+ %0 = load i32, i32* %i.addr, align 4
+ %1 = call i32 @llvm.hexagon.C2.cmpeq(i32 %0, i32 1)
+ ret i32 %1
+}
+; CHECK: { p{{[0-3]}} = cmp.eq(r{{[0-9]}}, r{{[0-9]}})
+
+; Function Attrs: nounwind readnone
+declare i32 @llvm.hexagon.C2.cmpeq(i32, i32) #1
+
+; Function Attrs: nounwind
+define i32 @cmpgt(i32 %i) #0 {
+entry:
+ %i.addr = alloca i32, align 4
+ store i32 %i, i32* %i.addr, align 4
+ %0 = load i32, i32* %i.addr, align 4
+ %1 = call i32 @llvm.hexagon.C2.cmpgt(i32 %0, i32 2)
+ ret i32 %1
+}
+; CHECK: { p{{[0-3]}} = cmp.gt(r{{[0-9]}}, r{{[0-9]}})
+
+; Function Attrs: nounwind readnone
+declare i32 @llvm.hexagon.C2.cmpgt(i32, i32) #1
+
+; Function Attrs: nounwind
+define i32 @cmpgtu(i32 %i) #0 {
+entry:
+ %i.addr = alloca i32, align 4
+ store i32 %i, i32* %i.addr, align 4
+ %0 = load i32, i32* %i.addr, align 4
+ %1 = call i32 @llvm.hexagon.C2.cmpgtu(i32 %0, i32 3)
+ ret i32 %1
+}
+; CHECK: { p{{[0-3]}} = cmp.gtu(r{{[0-9]}}, r{{[0-9]}})
+
+; Function Attrs: nounwind readnone
+declare i32 @llvm.hexagon.C2.cmpgtu(i32, i32) #1
+
+; Function Attrs: nounwind
+define i32 @cmplt(i32 %i) #0 {
+entry:
+ %i.addr = alloca i32, align 4
+ store i32 %i, i32* %i.addr, align 4
+ %0 = load i32, i32* %i.addr, align 4
+ %1 = call i32 @llvm.hexagon.C2.cmplt(i32 %0, i32 4)
+ ret i32 %1
+}
+; CHECK: { p{{[0-3]}} = cmp.gt(r{{[0-9]}}, r{{[0-9]}})
+
+; Function Attrs: nounwind readnone
+declare i32 @llvm.hexagon.C2.cmplt(i32, i32) #1
+
+; Function Attrs: nounwind
+define i32 @cmpltu(i32 %i) #0 {
+entry:
+ %i.addr = alloca i32, align 4
+ store i32 %i, i32* %i.addr, align 4
+ %0 = load i32, i32* %i.addr, align 4
+ %1 = call i32 @llvm.hexagon.C2.cmpltu(i32 %0, i32 5)
+ ret i32 %1
+}
+; CHECK: { p{{[0-3]}} = cmp.gtu(r{{[0-9]}}, r{{[0-9]}})
+
+; Function Attrs: nounwind readnone
+declare i32 @llvm.hexagon.C2.cmpltu(i32, i32) #1
+
+; Function Attrs: nounwind
+define i32 @cmpeqi(i32 %i) #0 {
+entry:
+ %i.addr = alloca i32, align 4
+ store i32 %i, i32* %i.addr, align 4
+ %0 = load i32, i32* %i.addr, align 4
+ %1 = call i32 @llvm.hexagon.C2.cmpeqi(i32 %0, i32 10)
+ ret i32 %1
+}
+; CHECK: { p{{[0-3]}} = cmp.eq(r{{[0-9]}}, {{.*}}#10)
+
+; Function Attrs: nounwind readnone
+declare i32 @llvm.hexagon.C2.cmpeqi(i32, i32) #1
+
+; Function Attrs: nounwind
+define i32 @cmpgti(i32 %i) #0 {
+entry:
+ %i.addr = alloca i32, align 4
+ store i32 %i, i32* %i.addr, align 4
+ %0 = load i32, i32* %i.addr, align 4
+ %1 = call i32 @llvm.hexagon.C2.cmpgti(i32 %0, i32 20)
+ ret i32 %1
+}
+; CHECK: { p{{[0-3]}} = cmp.gt(r{{[0-9]}}, {{.*}}#20)
+
+; Function Attrs: nounwind readnone
+declare i32 @llvm.hexagon.C2.cmpgti(i32, i32) #1
+
+; Function Attrs: nounwind
+define i32 @cmpgtui(i32 %i) #0 {
+entry:
+ %i.addr = alloca i32, align 4
+ store i32 %i, i32* %i.addr, align 4
+ %0 = load i32, i32* %i.addr, align 4
+ %1 = call i32 @llvm.hexagon.C2.cmpgtui(i32 %0, i32 40)
+ ret i32 %1
+}
+; CHECK: { p{{[0-3]}} = cmp.gtu(r{{[0-9]}}, {{.*}}#40)
+
+; Function Attrs: nounwind readnone
+declare i32 @llvm.hexagon.C2.cmpgtui(i32, i32) #1
+
+; Function Attrs: nounwind
+define i32 @cmpgei(i32 %i) #0 {
+entry:
+ %i.addr = alloca i32, align 4
+ store i32 %i, i32* %i.addr, align 4
+ %0 = load i32, i32* %i.addr, align 4
+ %1 = call i32 @llvm.hexagon.C2.cmpgei(i32 %0, i32 3)
+ ret i32 %1
+}
+; CHECK: { p{{[0-3]}} = cmp.gt(r{{[0-9]}}, {{.*}}#2)
+
+; Function Attrs: nounwind readnone
+declare i32 @llvm.hexagon.C2.cmpgei(i32, i32) #1
+
+; Function Attrs: nounwind
+define i32 @cmpgeu(i32 %i) #0 {
+entry:
+ %i.addr = alloca i32, align 4
+ store i32 %i, i32* %i.addr, align 4
+ %0 = load i32, i32* %i.addr, align 4
+ %1 = call i32 @llvm.hexagon.C2.cmpgeui(i32 %0, i32 3)
+ ret i32 %1
+}
+; CHECK: { p{{[0-3]}} = cmp.gtu(r{{[0-9]}}, {{.*}}#2)
+
+; Function Attrs: nounwind readnone
+declare i32 @llvm.hexagon.C2.cmpgeui(i32, i32) #1
+
+; Function Attrs: nounwind
+define i32 @cmpgeu0(i32 %i) #0 {
+entry:
+ %i.addr = alloca i32, align 4
+ store i32 %i, i32* %i.addr, align 4
+ %0 = load i32, i32* %i.addr, align 4
+ %1 = call i32 @llvm.hexagon.C2.cmpgeui(i32 %0, i32 0)
+ ret i32 %1
+}
+; CHECK: { p{{[0-3]}} = cmp.eq(r{{[0-9]}}, r{{[0-9]}})
+
+
+attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { nounwind readnone }
+
+!llvm.ident = !{!0}
+
+!0 = !{!"Clang 3.1"}
+
diff --git a/test/CodeGen/Hexagon/cmp_pred.ll b/test/CodeGen/Hexagon/cmp_pred.ll
index 39549a1f2d547..ee3f5ddf1f12f 100644
--- a/test/CodeGen/Hexagon/cmp_pred.ll
+++ b/test/CodeGen/Hexagon/cmp_pred.ll
@@ -1,4 +1,3 @@
-; XFAIL:
; RUN: llc -march=hexagon -mcpu=hexagonv5 < %s | FileCheck %s
; Generate various cmpb instruction followed by if (p0) .. if (!p0)...
target triple = "hexagon"
@@ -61,7 +60,7 @@ entry:
define i32 @Func_3gt(i32 %Enum_Par_Val, i32 %pv2) nounwind readnone {
entry:
-; CHECK: mux
+; CHECK-NOT: mux
%cmp = icmp sgt i32 %Enum_Par_Val, %pv2
%selv = zext i1 %cmp to i32
ret i32 %selv
diff --git a/test/CodeGen/Hexagon/cmp_pred_reg.ll b/test/CodeGen/Hexagon/cmp_pred_reg.ll
index 39549a1f2d547..ee3f5ddf1f12f 100644
--- a/test/CodeGen/Hexagon/cmp_pred_reg.ll
+++ b/test/CodeGen/Hexagon/cmp_pred_reg.ll
@@ -1,4 +1,3 @@
-; XFAIL:
; RUN: llc -march=hexagon -mcpu=hexagonv5 < %s | FileCheck %s
; Generate various cmpb instruction followed by if (p0) .. if (!p0)...
target triple = "hexagon"
@@ -61,7 +60,7 @@ entry:
define i32 @Func_3gt(i32 %Enum_Par_Val, i32 %pv2) nounwind readnone {
entry:
-; CHECK: mux
+; CHECK-NOT: mux
%cmp = icmp sgt i32 %Enum_Par_Val, %pv2
%selv = zext i1 %cmp to i32
ret i32 %selv
diff --git a/test/CodeGen/Hexagon/cmpb-eq.ll b/test/CodeGen/Hexagon/cmpb-eq.ll
new file mode 100644
index 0000000000000..e59ed3e51c372
--- /dev/null
+++ b/test/CodeGen/Hexagon/cmpb-eq.ll
@@ -0,0 +1,53 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+; CHECK-NOT: cmpb.eq(r{{[0-9]+}}, #-1)
+
+target datalayout = "e-p:32:32:32-i64:64:64-i32:32:32-i16:16:16-i1:32:32-f64:64:64-f32:32:32-v64:64:64-v32:32:32-a0:0-n16:32"
+target triple = "hexagon"
+
+%struct.wms_address_s = type { i32, i32, i32, i32, i8, [48 x i8] }
+
+define zeroext i8 @qmi_wmsi_bin_to_addr(i8* %str, i8 zeroext %len, %struct.wms_address_s* %addr) nounwind optsize {
+entry:
+ %cmp = icmp eq i8* %str, null
+ %cmp2 = icmp eq %struct.wms_address_s* %addr, null
+ %or.cond = or i1 %cmp, %cmp2
+ br i1 %or.cond, label %if.then12, label %if.then
+
+if.then: ; preds = %entry
+ %dec = add i8 %len, -1
+ %cmp3 = icmp ugt i8 %dec, 24
+ %tobool27 = icmp eq i8 %dec, 0
+ %or.cond31 = or i1 %cmp3, %tobool27
+ br i1 %or.cond31, label %if.then12, label %for.body.lr.ph
+
+for.body.lr.ph: ; preds = %if.then
+ %dec626 = add i8 %len, -2
+ br label %for.body
+
+for.body: ; preds = %for.body.lr.ph, %if.end21
+ %indvars.iv = phi i32 [ 0, %for.body.lr.ph ], [ %indvars.iv.next, %if.end21 ]
+ %dec630 = phi i8 [ %dec626, %for.body.lr.ph ], [ %dec6, %if.end21 ]
+ %str.pn = phi i8* [ %str, %for.body.lr.ph ], [ %str.addr.029, %if.end21 ]
+ %str.addr.029 = getelementptr inbounds i8, i8* %str.pn, i32 1
+ %0 = load i8, i8* %str.addr.029, align 1, !tbaa !0
+ %cmp10 = icmp ugt i8 %0, -49
+ br i1 %cmp10, label %if.then12.loopexit, label %if.end21
+
+if.then12.loopexit: ; preds = %if.end21, %for.body
+ br label %if.then12
+
+if.then12: ; preds = %if.then12.loopexit, %if.then, %entry
+ ret i8 0
+
+if.end21: ; preds = %for.body
+ %shr24 = lshr i8 %0, 4
+ %arrayidx = getelementptr inbounds %struct.wms_address_s, %struct.wms_address_s* %addr, i32 0, i32 5, i32 %indvars.iv
+ store i8 %shr24, i8* %arrayidx, align 1, !tbaa !0
+ %dec6 = add i8 %dec630, -1
+ %tobool = icmp eq i8 %dec630, 0
+ %indvars.iv.next = add i32 %indvars.iv, 1
+ br i1 %tobool, label %if.then12.loopexit, label %for.body
+}
+
+!0 = !{!"omnipotent char", !1}
+!1 = !{!"Simple C/C++ TBAA"}
diff --git a/test/CodeGen/Hexagon/cmpb_pred.ll b/test/CodeGen/Hexagon/cmpb_pred.ll
index 1a43e62916960..d5a76ff129e39 100644
--- a/test/CodeGen/Hexagon/cmpb_pred.ll
+++ b/test/CodeGen/Hexagon/cmpb_pred.ll
@@ -1,4 +1,3 @@
-; XFAIL:
; RUN: llc -march=hexagon -mcpu=hexagonv5 < %s | FileCheck %s
; Generate various cmpb instruction followed by if (p0) .. if (!p0)...
target triple = "hexagon"
@@ -64,7 +63,7 @@ entry:
define i32 @Func_3g(i32) nounwind readnone {
entry:
-; CHECK: mux
+; CHECK-NOT: mux
%conv = and i32 %0, 255
%cmp = icmp ult i32 %conv, 3
%selv = zext i1 %cmp to i32
diff --git a/test/CodeGen/Hexagon/eh_return.ll b/test/CodeGen/Hexagon/eh_return.ll
new file mode 100644
index 0000000000000..67649a07afc7e
--- /dev/null
+++ b/test/CodeGen/Hexagon/eh_return.ll
@@ -0,0 +1,48 @@
+; RUN: llc -O0 -march=hexagon < %s | FileCheck %s
+; Make sure we generate an exception handling return.
+
+; CHECK: deallocframe
+; CHECK-NEXT: }
+; CHECK-NEXT: {
+; CHECK-NEXT: r29 = add(r29, r28)
+; CHECK-NEXT: }
+; CHECK-NEXT: {
+; CHECK-NEXT: jumpr r31
+; CHECK-NEXT: }
+
+target datalayout = "e-p:32:32:32-i64:64:64-i32:32:32-i16:16:16-i1:32:32-f64:64:64-f32:32:32-a0:0-n32"
+target triple = "hexagon-unknown-linux-gnu"
+
+%struct.Data = type { i32, i8* }
+
+define i32 @test_eh_return(i32 %a, i32 %b) nounwind {
+entry:
+ %a.addr = alloca i32, align 4
+ %b.addr = alloca i32, align 4
+ %d = alloca %struct.Data, align 4
+ store i32 %a, i32* %a.addr, align 4
+ store i32 %b, i32* %b.addr, align 4
+ %0 = load i32, i32* %a.addr, align 4
+ %1 = load i32, i32* %b.addr, align 4
+ %cmp = icmp sgt i32 %0, %1
+ br i1 %cmp, label %if.then, label %if.else
+
+if.then: ; preds = %entry
+ %2 = load i32, i32* %a.addr, align 4
+ %3 = load i32, i32* %b.addr, align 4
+ %add = add nsw i32 %2, %3
+ ret i32 %add
+
+if.else: ; preds = %entry
+ %call = call i32 @setup(%struct.Data* %d)
+ %_d1 = getelementptr inbounds %struct.Data, %struct.Data* %d, i32 0, i32 0
+ %4 = load i32, i32* %_d1, align 4
+ %_d2 = getelementptr inbounds %struct.Data, %struct.Data* %d, i32 0, i32 1
+ %5 = load i8*, i8** %_d2, align 4
+ call void @llvm.eh.return.i32(i32 %4, i8* %5)
+ unreachable
+}
+
+declare i32 @setup(%struct.Data*)
+
+declare void @llvm.eh.return.i32(i32, i8*) nounwind
diff --git a/test/CodeGen/Hexagon/hwloop-lt.ll b/test/CodeGen/Hexagon/hwloop-lt.ll
index 7e2ad2a4678e4..8919f265abfe3 100644
--- a/test/CodeGen/Hexagon/hwloop-lt.ll
+++ b/test/CodeGen/Hexagon/hwloop-lt.ll
@@ -1,4 +1,4 @@
-; RUN: llc -march=hexagon -mcpu=hexagonv4 -O3 < %s | FileCheck %s
+; RUN: llc -march=hexagon -O3 < %s | FileCheck %s
; CHECK-LABEL: @test_pos1_ir_slt
; CHECK: loop0
diff --git a/test/CodeGen/Hexagon/hwloop-lt1.ll b/test/CodeGen/Hexagon/hwloop-lt1.ll
index 16fe728fa7bcf..cf97fffce40aa 100644
--- a/test/CodeGen/Hexagon/hwloop-lt1.ll
+++ b/test/CodeGen/Hexagon/hwloop-lt1.ll
@@ -1,4 +1,4 @@
-; RUN: llc -march=hexagon -mcpu=hexagonv4 < %s | FileCheck %s
+; RUN: llc -march=hexagon < %s | FileCheck %s
; Check that we generate a hardware loop instruction.
; CHECK: endloop0
diff --git a/test/CodeGen/Hexagon/intrinsics/alu32_alu.ll b/test/CodeGen/Hexagon/intrinsics/alu32_alu.ll
index 37f9f4007b67f..fcf80b08181ee 100644
--- a/test/CodeGen/Hexagon/intrinsics/alu32_alu.ll
+++ b/test/CodeGen/Hexagon/intrinsics/alu32_alu.ll
@@ -1,27 +1,30 @@
; RUN: llc -march=hexagon -O0 < %s | FileCheck %s
+; RUN: llc -march=hexagon -O0 < %s | FileCheck -check-prefix=CHECK-CALL %s
; Hexagon Programmer's Reference Manual 11.1.1 ALU32/ALU
+; CHECK-CALL-NOT: call
+
; Add
declare i32 @llvm.hexagon.A2.addi(i32, i32)
define i32 @A2_addi(i32 %a) {
%z = call i32 @llvm.hexagon.A2.addi(i32 %a, i32 0)
ret i32 %z
}
-; CHECK: r0 = add(r0, #0)
+; CHECK: = add({{.*}}, #0)
declare i32 @llvm.hexagon.A2.add(i32, i32)
define i32 @A2_add(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.add(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: r0 = add(r0, r1)
+; CHECK: = add({{.*}}, {{.*}})
declare i32 @llvm.hexagon.A2.addsat(i32, i32)
define i32 @A2_addsat(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.addsat(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: r0 = add(r0, r1):sat
+; CHECK: = add({{.*}}, {{.*}}):sat
; Logical operations
declare i32 @llvm.hexagon.A2.and(i32, i32)
@@ -29,43 +32,35 @@ define i32 @A2_and(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.and(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: r0 = and(r0, r1)
+; CHECK: = and({{.*}}, {{.*}})
declare i32 @llvm.hexagon.A2.or(i32, i32)
define i32 @A2_or(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.or(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: r0 = or(r0, r1)
+; CHECK: = or({{.*}}, {{.*}})
declare i32 @llvm.hexagon.A2.xor(i32, i32)
define i32 @A2_xor(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.xor(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: r0 = xor(r0, r1)
+; CHECK: = xor({{.*}}, {{.*}})
declare i32 @llvm.hexagon.A4.andn(i32, i32)
define i32 @A4_andn(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A4.andn(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: r0 = and(r0, ~r1)
+; CHECK: = and({{.*}}, ~{{.*}})
declare i32 @llvm.hexagon.A4.orn(i32, i32)
define i32 @A4_orn(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A4.orn(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: r0 = or(r0, ~r1)
-
-; Nop
-declare void @llvm.hexagon.A2.nop()
-define void @A2_nop(i32 %a, i32 %b) {
- call void @llvm.hexagon.A2.nop()
- ret void
-}
-; CHECK: nop
+; CHECK: = or({{.*}}, ~{{.*}})
; Subtract
declare i32 @llvm.hexagon.A2.sub(i32, i32)
@@ -73,14 +68,14 @@ define i32 @A2_sub(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.sub(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: r0 = sub(r0, r1)
+; CHECK: = sub({{.*}}, {{.*}})
declare i32 @llvm.hexagon.A2.subsat(i32, i32)
define i32 @A2_subsat(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.subsat(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: r0 = sub(r0, r1):sat
+; CHECK: = sub({{.*}}, {{.*}}):sat
; Sign extend
declare i32 @llvm.hexagon.A2.sxtb(i32)
@@ -88,14 +83,14 @@ define i32 @A2_sxtb(i32 %a) {
%z = call i32 @llvm.hexagon.A2.sxtb(i32 %a)
ret i32 %z
}
-; CHECK: r0 = sxtb(r0)
+; CHECK: = sxtb({{.*}})
declare i32 @llvm.hexagon.A2.sxth(i32)
define i32 @A2_sxth(i32 %a) {
%z = call i32 @llvm.hexagon.A2.sxth(i32 %a)
ret i32 %z
}
-; CHECK: r0 = sxth(r0)
+; CHECK: = sxth({{.*}})
; Transfer immediate
declare i32 @llvm.hexagon.A2.tfril(i32, i32)
@@ -103,21 +98,21 @@ define i32 @A2_tfril(i32 %a) {
%z = call i32 @llvm.hexagon.A2.tfril(i32 %a, i32 0)
ret i32 %z
}
-; CHECK: r0.l = #0
+; CHECK: = #0
declare i32 @llvm.hexagon.A2.tfrih(i32, i32)
define i32 @A2_tfrih(i32 %a) {
%z = call i32 @llvm.hexagon.A2.tfrih(i32 %a, i32 0)
ret i32 %z
}
-; CHECK: r0.h = #0
+; CHECK: = #0
declare i32 @llvm.hexagon.A2.tfrsi(i32)
define i32 @A2_tfrsi() {
%z = call i32 @llvm.hexagon.A2.tfrsi(i32 0)
ret i32 %z
}
-; CHECK: r0 = #0
+; CHECK: = #0
; Transfer register
declare i32 @llvm.hexagon.A2.tfr(i32)
@@ -125,7 +120,7 @@ define i32 @A2_tfr(i32 %a) {
%z = call i32 @llvm.hexagon.A2.tfr(i32 %a)
ret i32 %z
}
-; CHECK: r0 = r0
+; CHECK: =
; Vector add halfwords
declare i32 @llvm.hexagon.A2.svaddh(i32, i32)
@@ -133,21 +128,21 @@ define i32 @A2_svaddh(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.svaddh(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: r0 = vaddh(r0, r1)
+; CHECK: = vaddh({{.*}}, {{.*}})
declare i32 @llvm.hexagon.A2.svaddhs(i32, i32)
define i32 @A2_svaddhs(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.svaddhs(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: r0 = vaddh(r0, r1):sat
+; CHECK: = vaddh({{.*}}, {{.*}}):sat
declare i32 @llvm.hexagon.A2.svadduhs(i32, i32)
define i32 @A2_svadduhs(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.svadduhs(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: r0 = vadduh(r0, r1):sat
+; CHECK: = vadduh({{.*}}, {{.*}}):sat
; Vector average halfwords
declare i32 @llvm.hexagon.A2.svavgh(i32, i32)
@@ -155,21 +150,21 @@ define i32 @A2_svavgh(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.svavgh(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: r0 = vavgh(r0, r1)
+; CHECK: = vavgh({{.*}}, {{.*}})
declare i32 @llvm.hexagon.A2.svavghs(i32, i32)
define i32 @A2_svavghs(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.svavghs(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: r0 = vavgh(r0, r1):rnd
+; CHECK: = vavgh({{.*}}, {{.*}}):rnd
declare i32 @llvm.hexagon.A2.svnavgh(i32, i32)
define i32 @A2_svnavgh(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.svnavgh(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: r0 = vnavgh(r0, r1)
+; CHECK: = vnavgh({{.*}}, {{.*}})
; Vector subtract halfwords
declare i32 @llvm.hexagon.A2.svsubh(i32, i32)
@@ -177,21 +172,21 @@ define i32 @A2_svsubh(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.svsubh(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: r0 = vsubh(r0, r1)
+; CHECK: = vsubh({{.*}}, {{.*}})
declare i32 @llvm.hexagon.A2.svsubhs(i32, i32)
define i32 @A2_svsubhs(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.svsubhs(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: r0 = vsubh(r0, r1):sat
+; CHECK: = vsubh({{.*}}, {{.*}}):sat
declare i32 @llvm.hexagon.A2.svsubuhs(i32, i32)
define i32 @A2_svsubuhs(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.svsubuhs(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: r0 = vsubuh(r0, r1):sat
+; CHECK: = vsubuh({{.*}}, {{.*}}):sat
; Zero extend
declare i32 @llvm.hexagon.A2.zxth(i32)
@@ -199,4 +194,4 @@ define i32 @A2_zxth(i32 %a) {
%z = call i32 @llvm.hexagon.A2.zxth(i32 %a)
ret i32 %z
}
-; CHECK: r0 = zxth(r0)
+; CHECK: = zxth({{.*}})
diff --git a/test/CodeGen/Hexagon/intrinsics/alu32_perm.ll b/test/CodeGen/Hexagon/intrinsics/alu32_perm.ll
index a9cc01c5dcb05..c9fb0afe07810 100644
--- a/test/CodeGen/Hexagon/intrinsics/alu32_perm.ll
+++ b/test/CodeGen/Hexagon/intrinsics/alu32_perm.ll
@@ -1,62 +1,65 @@
; RUN: llc -march=hexagon -O0 < %s | FileCheck %s
+; RUN: llc -march=hexagon -O0 < %s | FileCheck -check-prefix=CHECK-CALL %s
; Hexagon Programmer's Reference Manual 11.1.2 ALU32/PERM
+; CHECK-CALL-NOT: call
+
; Combine words into doubleword
declare i64 @llvm.hexagon.A4.combineri(i32, i32)
define i64 @A4_combineri(i32 %a) {
%z = call i64 @llvm.hexagon.A4.combineri(i32 %a, i32 0)
ret i64 %z
}
-; CHECK: = combine(r0, #0)
+; CHECK: = combine({{.*}}, #0)
declare i64 @llvm.hexagon.A4.combineir(i32, i32)
define i64 @A4_combineir(i32 %a) {
%z = call i64 @llvm.hexagon.A4.combineir(i32 0, i32 %a)
ret i64 %z
}
-; CHECK: = combine(#0, r0)
+; CHECK: = combine(#0, {{.*}})
declare i64 @llvm.hexagon.A2.combineii(i32, i32)
define i64 @A2_combineii() {
%z = call i64 @llvm.hexagon.A2.combineii(i32 0, i32 0)
ret i64 %z
}
-; CHECK: r1:0 = combine(#0, #0)
+; CHECK: = combine(#0, #0)
declare i32 @llvm.hexagon.A2.combine.hh(i32, i32)
define i32 @A2_combine_hh(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.combine.hh(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: r0 = combine(r0.h, r1.h)
+; CHECK: = combine({{.*}}, {{.*}})
declare i32 @llvm.hexagon.A2.combine.hl(i32, i32)
define i32 @A2_combine_hl(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.combine.hl(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: r0 = combine(r0.h, r1.l)
+; CHECK: = combine({{.*}}, {{.*}})
declare i32 @llvm.hexagon.A2.combine.lh(i32, i32)
define i32 @A2_combine_lh(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.combine.lh(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: r0 = combine(r0.l, r1.h)
+; CHECK: = combine({{.*}}, {{.*}})
declare i32 @llvm.hexagon.A2.combine.ll(i32, i32)
define i32 @A2_combine_ll(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.combine.ll(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: r0 = combine(r0.l, r1.l)
+; CHECK: = combine({{.*}}, {{.*}})
declare i64 @llvm.hexagon.A2.combinew(i32, i32)
define i64 @A2_combinew(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.A2.combinew(i32 %a, i32 %b)
ret i64 %z
}
-; CHECK: r1:0 = combine(r0, r1)
+; CHECK: = combine({{.*}}, {{.*}})
; Mux
declare i32 @llvm.hexagon.C2.muxri(i32, i32, i32)
@@ -64,21 +67,21 @@ define i32 @C2_muxri(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.C2.muxri(i32 %a, i32 0, i32 %b)
ret i32 %z
}
-; CHECK: r0 = mux(p0, #0, r1)
+; CHECK: = mux({{.*}}, #0, {{.*}})
declare i32 @llvm.hexagon.C2.muxir(i32, i32, i32)
define i32 @C2_muxir(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.C2.muxir(i32 %a, i32 %b, i32 0)
ret i32 %z
}
-; CHECK: r0 = mux(p0, r1, #0)
+; CHECK: = mux({{.*}}, {{.*}}, #0)
declare i32 @llvm.hexagon.C2.mux(i32, i32, i32)
define i32 @C2_mux(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.C2.mux(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: r0 = mux(p0, r1, r2)
+; CHECK: = mux({{.*}}, {{.*}}, {{.*}})
; Shift word by 16
declare i32 @llvm.hexagon.A2.aslh(i32)
@@ -86,14 +89,14 @@ define i32 @A2_aslh(i32 %a) {
%z = call i32 @llvm.hexagon.A2.aslh(i32 %a)
ret i32 %z
}
-; CHECK: r0 = aslh(r0)
+; CHECK: = aslh({{.*}})
declare i32 @llvm.hexagon.A2.asrh(i32)
define i32 @A2_asrh(i32 %a) {
%z = call i32 @llvm.hexagon.A2.asrh(i32 %a)
ret i32 %z
}
-; CHECK: r0 = asrh(r0)
+; CHECK: = asrh({{.*}})
; Pack high and low halfwords
declare i64 @llvm.hexagon.S2.packhl(i32, i32)
@@ -101,4 +104,4 @@ define i64 @S2_packhl(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.S2.packhl(i32 %a, i32 %b)
ret i64 %z
}
-; CHECK: r1:0 = packhl(r0, r1)
+; CHECK: = packhl({{.*}}, {{.*}})
diff --git a/test/CodeGen/Hexagon/intrinsics/cr.ll b/test/CodeGen/Hexagon/intrinsics/cr.ll
index 9bdcb253fe2fa..f308ef8e56643 100644
--- a/test/CodeGen/Hexagon/intrinsics/cr.ll
+++ b/test/CodeGen/Hexagon/intrinsics/cr.ll
@@ -1,20 +1,23 @@
; RUN: llc -march=hexagon < %s | FileCheck %s
+; RUN: llc -march=hexagon -O0 < %s | FileCheck -check-prefix=CHECK-CALL %s
; Hexagon Programmer's Reference Manual 11.2 CR
+; CHECK-CALL-NOT: call
+
; Corner detection acceleration
declare i32 @llvm.hexagon.C4.fastcorner9(i32, i32)
define i32 @C4_fastcorner9(i32 %a, i32 %b) {
%z = call i32@llvm.hexagon.C4.fastcorner9(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: p0 = fastcorner9(p0, p1)
+; CHECK: = fastcorner9({{.*}}, {{.*}})
declare i32 @llvm.hexagon.C4.fastcorner9.not(i32, i32)
define i32 @C4_fastcorner9_not(i32 %a, i32 %b) {
%z = call i32@llvm.hexagon.C4.fastcorner9.not(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: p0 = !fastcorner9(p0, p1)
+; CHECK: = !fastcorner9({{.*}}, {{.*}})
; Logical reductions on predicates
declare i32 @llvm.hexagon.C2.any8(i32)
@@ -22,7 +25,7 @@ define i32 @C2_any8(i32 %a) {
%z = call i32@llvm.hexagon.C2.any8(i32 %a)
ret i32 %z
}
-; CHECK: p0 = any8(p0)
+; CHECK: = any8({{.*}})
declare i32 @llvm.hexagon.C2.all8(i32)
define i32 @C2_all8(i32 %a) {
@@ -30,7 +33,7 @@ define i32 @C2_all8(i32 %a) {
ret i32 %z
}
-; CHECK: p0 = all8(p0)
+; CHECK: = all8({{.*}})
; Logical operations on predicates
declare i32 @llvm.hexagon.C2.and(i32, i32)
@@ -38,95 +41,95 @@ define i32 @C2_and(i32 %a, i32 %b) {
%z = call i32@llvm.hexagon.C2.and(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: p0 = and(p0, p1)
+; CHECK: = and({{.*}}, {{.*}})
declare i32 @llvm.hexagon.C4.and.and(i32, i32, i32)
define i32 @C4_and_and(i32 %a, i32 %b, i32 %c) {
%z = call i32@llvm.hexagon.C4.and.and(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: p0 = and(p0, and(p1, p2))
+; CHECK: = and({{.*}}, and({{.*}}, {{.*}}))
declare i32 @llvm.hexagon.C2.or(i32, i32)
define i32 @C2_or(i32 %a, i32 %b) {
%z = call i32@llvm.hexagon.C2.or(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: p0 = or(p0, p1)
+; CHECK: = or({{.*}}, {{.*}})
declare i32 @llvm.hexagon.C4.and.or(i32, i32, i32)
define i32 @C4_and_or(i32 %a, i32 %b, i32 %c) {
%z = call i32@llvm.hexagon.C4.and.or(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: p0 = and(p0, or(p1, p2))
+; CHECK: = and({{.*}}, or({{.*}}, {{.*}}))
declare i32 @llvm.hexagon.C2.xor(i32, i32)
define i32 @C2_xor(i32 %a, i32 %b) {
%z = call i32@llvm.hexagon.C2.xor(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: p0 = xor(p0, p1)
+; CHECK: = xor({{.*}}, {{.*}})
declare i32 @llvm.hexagon.C4.or.and(i32, i32, i32)
define i32 @C4_or_and(i32 %a, i32 %b, i32 %c) {
%z = call i32@llvm.hexagon.C4.or.and(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: p0 = or(p0, and(p1, p2))
+; CHECK: = or({{.*}}, and({{.*}}, {{.*}}))
declare i32 @llvm.hexagon.C2.andn(i32, i32)
define i32 @C2_andn(i32 %a, i32 %b) {
%z = call i32@llvm.hexagon.C2.andn(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: p0 = and(p0, !p1)
+; CHECK: = and({{.*}}, !{{.*}})
declare i32 @llvm.hexagon.C4.or.or(i32, i32, i32)
define i32 @C4_or_or(i32 %a, i32 %b, i32 %c) {
%z = call i32@llvm.hexagon.C4.or.or(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: p0 = or(p0, or(p1, p2))
+; CHECK: = or({{.*}}, or({{.*}}, {{.*}}))
declare i32 @llvm.hexagon.C4.and.andn(i32, i32, i32)
define i32 @C4_and_andn(i32 %a, i32 %b, i32 %c) {
%z = call i32@llvm.hexagon.C4.and.andn(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: p0 = and(p0, and(p1, !p2))
+; CHECK: = and({{.*}}, and({{.*}}, !{{.*}}))
declare i32 @llvm.hexagon.C4.and.orn(i32, i32, i32)
define i32 @C4_and_orn(i32 %a, i32 %b, i32 %c) {
%z = call i32@llvm.hexagon.C4.and.orn(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: p0 = and(p0, or(p1, !p2))
+; CHECK: = and({{.*}}, or({{.*}}, !{{.*}}))
declare i32 @llvm.hexagon.C2.not(i32)
define i32 @C2_not(i32 %a) {
%z = call i32@llvm.hexagon.C2.not(i32 %a)
ret i32 %z
}
-; CHECK: p0 = not(p0)
+; CHECK: = not({{.*}})
declare i32 @llvm.hexagon.C4.or.andn(i32, i32, i32)
define i32 @C4_or_andn(i32 %a, i32 %b, i32 %c) {
%z = call i32@llvm.hexagon.C4.or.andn(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: p0 = or(p0, and(p1, !p2))
+; CHECK: = or({{.*}}, and({{.*}}, !{{.*}}))
declare i32 @llvm.hexagon.C2.orn(i32, i32)
define i32 @C2_orn(i32 %a, i32 %b) {
%z = call i32@llvm.hexagon.C2.orn(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: p0 = or(p0, !p1)
+; CHECK: = or({{.*}}, !{{.*}})
declare i32 @llvm.hexagon.C4.or.orn(i32, i32, i32)
define i32 @C4_or_orn(i32 %a, i32 %b, i32 %c) {
%z = call i32@llvm.hexagon.C4.or.orn(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: p0 = or(p0, or(p1, !p2))
+; CHECK: = or({{.*}}, or({{.*}}, !{{.*}}))
diff --git a/test/CodeGen/Hexagon/intrinsics/xtype_alu.ll b/test/CodeGen/Hexagon/intrinsics/xtype_alu.ll
index 4a11112d73a96..c5c23c22bde94 100644
--- a/test/CodeGen/Hexagon/intrinsics/xtype_alu.ll
+++ b/test/CodeGen/Hexagon/intrinsics/xtype_alu.ll
@@ -1,13 +1,17 @@
; RUN: llc -march=hexagon -mcpu=hexagonv5 -O0 < %s | FileCheck %s
+; RUN: llc -march=hexagon -mcpu=hexagonv5 -O0 < %s | \
+; RUN: FileCheck -check-prefix=CHECK-CALL %s
; Hexagon Programmer's Reference Manual 11.10.1 XTYPE/ALU
+; CHECK-CALL-NOT: call
+
; Absolute value doubleword
declare i64 @llvm.hexagon.A2.absp(i64)
define i64 @A2_absp(i64 %a) {
%z = call i64 @llvm.hexagon.A2.absp(i64 %a)
ret i64 %z
}
-; CHECK: r1:0 = abs(r1:0)
+; CHECK: = abs({{.*}})
; Absolute value word
declare i32 @llvm.hexagon.A2.abs(i32)
@@ -15,14 +19,14 @@ define i32 @A2_abs(i32 %a) {
%z = call i32 @llvm.hexagon.A2.abs(i32 %a)
ret i32 %z
}
-; CHECK: r0 = abs(r0)
+; CHECK: = abs({{.*}})
declare i32 @llvm.hexagon.A2.abssat(i32)
define i32 @A2_abssat(i32 %a) {
%z = call i32 @llvm.hexagon.A2.abssat(i32 %a)
ret i32 %z
}
-; CHECK: r0 = abs(r0):sat
+; CHECK: = abs({{.*}}):sat
; Add and accumulate
declare i32 @llvm.hexagon.S4.addaddi(i32, i32, i32)
@@ -30,42 +34,42 @@ define i32 @S4_addaddi(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.S4.addaddi(i32 %a, i32 %b, i32 0)
ret i32 %z
}
-; CHECK: r0 = add(r0, add(r1, #0))
+; CHECK: = add({{.*}}, add({{.*}}, #0))
declare i32 @llvm.hexagon.S4.subaddi(i32, i32, i32)
define i32 @S4_subaddi(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.S4.subaddi(i32 %a, i32 0, i32 %b)
ret i32 %z
}
-; CHECK: r0 = add(r0, sub(#0, r1))
+; CHECK: = add({{.*}}, sub(#0, {{.*}}))
declare i32 @llvm.hexagon.M2.accii(i32, i32, i32)
define i32 @M2_accii(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.accii(i32 %a, i32 %b, i32 0)
ret i32 %z
}
-; CHECK: r0 += add(r1, #0)
+; CHECK: += add({{.*}}, #0)
declare i32 @llvm.hexagon.M2.naccii(i32, i32, i32)
define i32 @M2_naccii(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.naccii(i32 %a, i32 %b, i32 0)
ret i32 %z
}
-; CHECK: r0 -= add(r1, #0)
+; CHECK: -= add({{.*}}, #0)
declare i32 @llvm.hexagon.M2.acci(i32, i32, i32)
define i32 @M2_acci(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.acci(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: r0 += add(r1, r2)
+; CHECK: += add({{.*}}, {{.*}})
declare i32 @llvm.hexagon.M2.nacci(i32, i32, i32)
define i32 @M2_nacci(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.nacci(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: r0 -= add(r1, r2)
+; CHECK: -= add({{.*}}, {{.*}})
; Add doublewords
declare i64 @llvm.hexagon.A2.addp(i64, i64)
@@ -73,14 +77,14 @@ define i64 @A2_addp(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.addp(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: r1:0 = add(r1:0, r3:2)
+; CHECK: = add({{.*}}, {{.*}})
declare i64 @llvm.hexagon.A2.addpsat(i64, i64)
define i64 @A2_addpsat(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.addpsat(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: r1:0 = add(r1:0, r3:2):sat
+; CHECK: = add({{.*}}, {{.*}}):sat
; Add halfword
declare i32 @llvm.hexagon.A2.addh.l16.ll(i32, i32)
@@ -88,84 +92,84 @@ define i32 @A2_addh_l16_ll(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.addh.l16.ll(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: r0 = add(r0.l, r1.l)
+; CHECK: = add({{.*}}.l, {{.*}}.l)
declare i32 @llvm.hexagon.A2.addh.l16.hl(i32, i32)
define i32 @A2_addh_l16_hl(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.addh.l16.hl(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: r0 = add(r0.l, r1.h)
+; CHECK: = add({{.*}}.l, {{.*}}.h)
declare i32 @llvm.hexagon.A2.addh.l16.sat.ll(i32, i32)
define i32 @A2_addh_l16_sat.ll(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.addh.l16.sat.ll(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: r0 = add(r0.l, r1.l):sat
+; CHECK: = add({{.*}}.l, {{.*}}.l):sat
declare i32 @llvm.hexagon.A2.addh.l16.sat.hl(i32, i32)
define i32 @A2_addh_l16_sat.hl(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.addh.l16.sat.hl(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: r0 = add(r0.l, r1.h):sat
+; CHECK: = add({{.*}}.l, {{.*}}.h):sat
declare i32 @llvm.hexagon.A2.addh.h16.ll(i32, i32)
define i32 @A2_addh_h16_ll(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.addh.h16.ll(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: r0 = add(r0.l, r1.l):<<16
+; CHECK: = add({{.*}}.l, {{.*}}.l):<<16
declare i32 @llvm.hexagon.A2.addh.h16.lh(i32, i32)
define i32 @A2_addh_h16_lh(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.addh.h16.lh(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: r0 = add(r0.l, r1.h):<<16
+; CHECK: = add({{.*}}.l, {{.*}}.h):<<16
declare i32 @llvm.hexagon.A2.addh.h16.hl(i32, i32)
define i32 @A2_addh_h16_hl(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.addh.h16.hl(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: r0 = add(r0.h, r1.l):<<16
+; CHECK: = add({{.*}}.h, {{.*}}.l):<<16
declare i32 @llvm.hexagon.A2.addh.h16.hh(i32, i32)
define i32 @A2_addh_h16_hh(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.addh.h16.hh(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: r0 = add(r0.h, r1.h):<<16
+; CHECK: = add({{.*}}.h, {{.*}}.h):<<16
declare i32 @llvm.hexagon.A2.addh.h16.sat.ll(i32, i32)
define i32 @A2_addh_h16_sat_ll(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.addh.h16.sat.ll(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: r0 = add(r0.l, r1.l):sat:<<16
+; CHECK: = add({{.*}}.l, {{.*}}.l):sat:<<16
declare i32 @llvm.hexagon.A2.addh.h16.sat.lh(i32, i32)
define i32 @A2_addh_h16_sat_lh(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.addh.h16.sat.lh(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: r0 = add(r0.l, r1.h):sat:<<16
+; CHECK: = add({{.*}}.l, {{.*}}.h):sat:<<16
declare i32 @llvm.hexagon.A2.addh.h16.sat.hl(i32, i32)
define i32 @A2_addh_h16_sat_hl(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.addh.h16.sat.hl(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: r0 = add(r0.h, r1.l):sat:<<16
+; CHECK: = add({{.*}}.h, {{.*}}.l):sat:<<16
declare i32 @llvm.hexagon.A2.addh.h16.sat.hh(i32, i32)
define i32 @A2_addh_h16_sat_hh(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.addh.h16.sat.hh(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: r0 = add(r0.h, r1.h):sat:<<16
+; CHECK: = add({{.*}}.h, {{.*}}.h):sat:<<16
; Logical doublewords
declare i64 @llvm.hexagon.A2.notp(i64)
@@ -173,42 +177,42 @@ define i64 @A2_notp(i64 %a) {
%z = call i64 @llvm.hexagon.A2.notp(i64 %a)
ret i64 %z
}
-; CHECK: r1:0 = not(r1:0)
+; CHECK: = not({{.*}})
declare i64 @llvm.hexagon.A2.andp(i64, i64)
define i64 @A2_andp(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.andp(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: r1:0 = and(r1:0, r3:2)
+; CHECK: = and({{.*}}, {{.*}})
declare i64 @llvm.hexagon.A4.andnp(i64, i64)
define i64 @A2_andnp(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A4.andnp(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: r1:0 = and(r1:0, ~r3:2)
+; CHECK: = and({{.*}}, ~{{.*}})
declare i64 @llvm.hexagon.A2.orp(i64, i64)
define i64 @A2_orp(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.orp(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: r1:0 = or(r1:0, r3:2)
+; CHECK: = or({{.*}}, {{.*}})
declare i64 @llvm.hexagon.A4.ornp(i64, i64)
define i64 @A2_ornp(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A4.ornp(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: r1:0 = or(r1:0, ~r3:2)
+; CHECK: = or({{.*}}, ~{{.*}})
declare i64 @llvm.hexagon.A2.xorp(i64, i64)
define i64 @A2_xorp(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.xorp(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: r1:0 = xor(r1:0, r3:2)
+; CHECK: = xor({{.*}}, {{.*}})
; Logical-logical doublewords
declare i64 @llvm.hexagon.M4.xor.xacc(i64, i64, i64)
@@ -216,7 +220,7 @@ define i64 @M4_xor_xacc(i64 %a, i64 %b, i64 %c) {
%z = call i64 @llvm.hexagon.M4.xor.xacc(i64 %a, i64 %b, i64 %c)
ret i64 %z
}
-; CHECK: r1:0 ^= xor(r3:2, r5:4)
+; CHECK: ^= xor({{.*}}, {{.*}})
; Logical-logical words
declare i32 @llvm.hexagon.S4.or.andi(i32, i32, i32)
@@ -224,91 +228,91 @@ define i32 @S4_or_andi(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.S4.or.andi(i32 %a, i32 %b, i32 0)
ret i32 %z
}
-; CHECK: r0 |= and(r1, #0)
+; CHECK: |= and({{.*}}, #0)
declare i32 @llvm.hexagon.S4.or.andix(i32, i32, i32)
define i32 @S4_or_andix(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.S4.or.andix(i32 %a, i32 %b, i32 0)
ret i32 %z
}
-; CHECK: r1 = or(r0, and(r1, #0))
+; CHECK: = or({{.*}}, and({{.*}}, #0))
declare i32 @llvm.hexagon.M4.or.andn(i32, i32, i32)
define i32 @M4_or_andn(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M4.or.andn(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: r0 |= and(r1, ~r2)
+; CHECK: |= and({{.*}}, ~{{.*}})
declare i32 @llvm.hexagon.M4.and.andn(i32, i32, i32)
define i32 @M4_and_andn(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M4.and.andn(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: r0 &= and(r1, ~r2)
+; CHECK: &= and({{.*}}, ~{{.*}})
declare i32 @llvm.hexagon.M4.xor.andn(i32, i32, i32)
define i32 @M4_xor_andn(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M4.xor.andn(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: r0 ^= and(r1, ~r2)
+; CHECK: ^= and({{.*}}, ~{{.*}})
declare i32 @llvm.hexagon.M4.and.and(i32, i32, i32)
define i32 @M4_and_and(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M4.and.and(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: r0 &= and(r1, r2)
+; CHECK: &= and({{.*}}, {{.*}})
declare i32 @llvm.hexagon.M4.and.or(i32, i32, i32)
define i32 @M4_and_or(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M4.and.or(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: r0 &= or(r1, r2)
+; CHECK: &= or({{.*}}, {{.*}})
declare i32 @llvm.hexagon.M4.and.xor(i32, i32, i32)
define i32 @M4_and_xor(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M4.and.xor(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: r0 &= xor(r1, r2)
+; CHECK: &= xor({{.*}}, {{.*}})
declare i32 @llvm.hexagon.M4.or.and(i32, i32, i32)
define i32 @M4_or_and(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M4.or.and(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: r0 |= and(r1, r2)
+; CHECK: |= and({{.*}}, {{.*}})
declare i32 @llvm.hexagon.M4.or.or(i32, i32, i32)
define i32 @M4_or_or(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M4.or.or(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: r0 |= or(r1, r2)
+; CHECK: |= or({{.*}}, {{.*}})
declare i32 @llvm.hexagon.M4.or.xor(i32, i32, i32)
define i32 @M4_or_xor(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M4.or.xor(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: r0 |= xor(r1, r2)
+; CHECK: |= xor({{.*}}, {{.*}})
declare i32 @llvm.hexagon.M4.xor.and(i32, i32, i32)
define i32 @M4_xor_and(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M4.xor.and(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: r0 ^= and(r1, r2)
+; CHECK: ^= and({{.*}}, {{.*}})
declare i32 @llvm.hexagon.M4.xor.or(i32, i32, i32)
define i32 @M4_xor_or(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M4.xor.or(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: r0 ^= or(r1, r2)
+; CHECK: ^= or({{.*}}, {{.*}})
; Maximum words
declare i32 @llvm.hexagon.A2.max(i32, i32)
@@ -316,14 +320,14 @@ define i32 @A2_max(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.max(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: r0 = max(r0, r1)
+; CHECK: = max({{.*}}, {{.*}})
declare i32 @llvm.hexagon.A2.maxu(i32, i32)
define i32 @A2_maxu(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.maxu(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: r0 = maxu(r0, r1)
+; CHECK: = maxu({{.*}}, {{.*}})
; Maximum doublewords
declare i64 @llvm.hexagon.A2.maxp(i64, i64)
@@ -331,14 +335,14 @@ define i64 @A2_maxp(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.maxp(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: r1:0 = max(r1:0, r3:2)
+; CHECK: = max({{.*}}, {{.*}})
declare i64 @llvm.hexagon.A2.maxup(i64, i64)
define i64 @A2_maxup(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.maxup(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: r1:0 = maxu(r1:0, r3:2)
+; CHECK: = maxu({{.*}}, {{.*}})
; Minimum words
declare i32 @llvm.hexagon.A2.min(i32, i32)
@@ -346,14 +350,14 @@ define i32 @A2_min(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.min(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: r0 = min(r0, r1)
+; CHECK: = min({{.*}}, {{.*}})
declare i32 @llvm.hexagon.A2.minu(i32, i32)
define i32 @A2_minu(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.minu(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: r0 = minu(r0, r1)
+; CHECK: = minu({{.*}}, {{.*}})
; Minimum doublewords
declare i64 @llvm.hexagon.A2.minp(i64, i64)
@@ -361,14 +365,14 @@ define i64 @A2_minp(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.minp(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: r1:0 = min(r1:0, r3:2)
+; CHECK: = min({{.*}}, {{.*}})
declare i64 @llvm.hexagon.A2.minup(i64, i64)
define i64 @A2_minup(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.minup(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: r1:0 = minu(r1:0, r3:2)
+; CHECK: = minu({{.*}}, {{.*}})
; Module wrap
declare i32 @llvm.hexagon.A4.modwrapu(i32, i32)
@@ -376,7 +380,7 @@ define i32 @A4_modwrapu(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A4.modwrapu(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: r0 = modwrap(r0, r1)
+; CHECK: = modwrap({{.*}}, {{.*}})
; Negate
declare i64 @llvm.hexagon.A2.negp(i64)
@@ -384,14 +388,14 @@ define i64 @A2_negp(i64 %a) {
%z = call i64 @llvm.hexagon.A2.negp(i64 %a)
ret i64 %z
}
-; CHECK: r1:0 = neg(r1:0)
+; CHECK: = neg({{.*}})
declare i32 @llvm.hexagon.A2.negsat(i32)
define i32 @A2_negsat(i32 %a) {
%z = call i32 @llvm.hexagon.A2.negsat(i32 %a)
ret i32 %z
}
-; CHECK: r0 = neg(r0):sat
+; CHECK: = neg({{.*}}):sat
; Round
declare i32 @llvm.hexagon.A2.roundsat(i64)
@@ -399,49 +403,49 @@ define i32 @A2_roundsat(i64 %a) {
%z = call i32 @llvm.hexagon.A2.roundsat(i64 %a)
ret i32 %z
}
-; CHECK: r0 = round(r1:0):sat
+; CHECK: = round({{.*}}):sat
declare i32 @llvm.hexagon.A4.cround.ri(i32, i32)
define i32 @A4_cround_ri(i32 %a) {
%z = call i32 @llvm.hexagon.A4.cround.ri(i32 %a, i32 0)
ret i32 %z
}
-; CHECK: r0 = cround(r0, #0)
+; CHECK: = cround({{.*}}, #0)
declare i32 @llvm.hexagon.A4.round.ri(i32, i32)
define i32 @A4_round_ri(i32 %a) {
%z = call i32 @llvm.hexagon.A4.round.ri(i32 %a, i32 0)
ret i32 %z
}
-; CHECK: r0 = round(r0, #0)
+; CHECK: = round({{.*}}, #0)
declare i32 @llvm.hexagon.A4.round.ri.sat(i32, i32)
define i32 @A4_round_ri_sat(i32 %a) {
%z = call i32 @llvm.hexagon.A4.round.ri.sat(i32 %a, i32 0)
ret i32 %z
}
-; CHECK: r0 = round(r0, #0):sat
+; CHECK: = round({{.*}}, #0):sat
declare i32 @llvm.hexagon.A4.cround.rr(i32, i32)
define i32 @A4_cround_rr(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A4.cround.rr(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: r0 = cround(r0, r1)
+; CHECK: = cround({{.*}}, {{.*}})
declare i32 @llvm.hexagon.A4.round.rr(i32, i32)
define i32 @A4_round_rr(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A4.round.rr(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: r0 = round(r0, r1)
+; CHECK: = round({{.*}}, {{.*}})
declare i32 @llvm.hexagon.A4.round.rr.sat(i32, i32)
define i32 @A4_round_rr_sat(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A4.round.rr.sat(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: r0 = round(r0, r1):sat
+; CHECK: = round({{.*}}, {{.*}}):sat
; Subtract doublewords
declare i64 @llvm.hexagon.A2.subp(i64, i64)
@@ -449,7 +453,7 @@ define i64 @A2_subp(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.subp(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: r1:0 = sub(r1:0, r3:2)
+; CHECK: = sub({{.*}}, {{.*}})
; Subtract and accumulate
declare i32 @llvm.hexagon.M2.subacc(i32, i32, i32)
@@ -457,7 +461,7 @@ define i32 @M2_subacc(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.subacc(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: r0 += sub(r1, r2)
+; CHECK: += sub({{.*}}, {{.*}})
; Subtract halfwords
declare i32 @llvm.hexagon.A2.subh.l16.ll(i32, i32)
@@ -465,84 +469,84 @@ define i32 @A2_subh_l16_ll(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.subh.l16.ll(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: r0 = sub(r0.l, r1.l)
+; CHECK: = sub({{.*}}.l, {{.*}}.l)
declare i32 @llvm.hexagon.A2.subh.l16.hl(i32, i32)
define i32 @A2_subh_l16_hl(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.subh.l16.hl(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: r0 = sub(r0.l, r1.h)
+; CHECK: = sub({{.*}}.l, {{.*}}.h)
declare i32 @llvm.hexagon.A2.subh.l16.sat.ll(i32, i32)
define i32 @A2_subh_l16_sat.ll(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.subh.l16.sat.ll(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: r0 = sub(r0.l, r1.l):sat
+; CHECK: = sub({{.*}}.l, {{.*}}.l):sat
declare i32 @llvm.hexagon.A2.subh.l16.sat.hl(i32, i32)
define i32 @A2_subh_l16_sat.hl(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.subh.l16.sat.hl(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: r0 = sub(r0.l, r1.h):sat
+; CHECK: = sub({{.*}}.l, {{.*}}.h):sat
declare i32 @llvm.hexagon.A2.subh.h16.ll(i32, i32)
define i32 @A2_subh_h16_ll(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.subh.h16.ll(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: r0 = sub(r0.l, r1.l):<<16
+; CHECK: = sub({{.*}}.l, {{.*}}.l):<<16
declare i32 @llvm.hexagon.A2.subh.h16.lh(i32, i32)
define i32 @A2_subh_h16_lh(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.subh.h16.lh(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: r0 = sub(r0.l, r1.h):<<16
+; CHECK: = sub({{.*}}.l, {{.*}}.h):<<16
declare i32 @llvm.hexagon.A2.subh.h16.hl(i32, i32)
define i32 @A2_subh_h16_hl(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.subh.h16.hl(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: r0 = sub(r0.h, r1.l):<<16
+; CHECK: = sub({{.*}}.h, {{.*}}.l):<<16
declare i32 @llvm.hexagon.A2.subh.h16.hh(i32, i32)
define i32 @A2_subh_h16_hh(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.subh.h16.hh(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: r0 = sub(r0.h, r1.h):<<16
+; CHECK: = sub({{.*}}.h, {{.*}}.h):<<16
declare i32 @llvm.hexagon.A2.subh.h16.sat.ll(i32, i32)
define i32 @A2_subh_h16_sat_ll(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.subh.h16.sat.ll(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: r0 = sub(r0.l, r1.l):sat:<<16
+; CHECK: = sub({{.*}}.l, {{.*}}.l):sat:<<16
declare i32 @llvm.hexagon.A2.subh.h16.sat.lh(i32, i32)
define i32 @A2_subh_h16_sat_lh(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.subh.h16.sat.lh(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: r0 = sub(r0.l, r1.h):sat:<<16
+; CHECK: = sub({{.*}}.l, {{.*}}.h):sat:<<16
declare i32 @llvm.hexagon.A2.subh.h16.sat.hl(i32, i32)
define i32 @A2_subh_h16_sat_hl(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.subh.h16.sat.hl(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: r0 = sub(r0.h, r1.l):sat:<<16
+; CHECK: = sub({{.*}}.h, {{.*}}.l):sat:<<16
declare i32 @llvm.hexagon.A2.subh.h16.sat.hh(i32, i32)
define i32 @A2_subh_h16_sat_hh(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A2.subh.h16.sat.hh(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: r0 = sub(r0.h, r1.h):sat:<<16
+; CHECK: = sub({{.*}}.h, {{.*}}.h):sat:<<16
; Sign extend word to doubleword
declare i64 @llvm.hexagon.A2.sxtw(i32)
@@ -550,7 +554,7 @@ define i64 @A2_sxtw(i32 %a) {
%z = call i64 @llvm.hexagon.A2.sxtw(i32 %a)
ret i64 %z
}
-; CHECK: = sxtw(r0)
+; CHECK: = sxtw({{.*}})
; Vector absolute value halfwords
declare i64 @llvm.hexagon.A2.vabsh(i64)
@@ -558,14 +562,14 @@ define i64 @A2_vabsh(i64 %a) {
%z = call i64 @llvm.hexagon.A2.vabsh(i64 %a)
ret i64 %z
}
-; CHECK: r1:0 = vabsh(r1:0)
+; CHECK: = vabsh({{.*}})
declare i64 @llvm.hexagon.A2.vabshsat(i64)
define i64 @A2_vabshsat(i64 %a) {
%z = call i64 @llvm.hexagon.A2.vabshsat(i64 %a)
ret i64 %z
}
-; CHECK: r1:0 = vabsh(r1:0):sat
+; CHECK: = vabsh({{.*}}):sat
; Vector absolute value words
declare i64 @llvm.hexagon.A2.vabsw(i64)
@@ -573,14 +577,14 @@ define i64 @A2_vabsw(i64 %a) {
%z = call i64 @llvm.hexagon.A2.vabsw(i64 %a)
ret i64 %z
}
-; CHECK: r1:0 = vabsw(r1:0)
+; CHECK: = vabsw({{.*}})
declare i64 @llvm.hexagon.A2.vabswsat(i64)
define i64 @A2_vabswsat(i64 %a) {
%z = call i64 @llvm.hexagon.A2.vabswsat(i64 %a)
ret i64 %z
}
-; CHECK: r1:0 = vabsw(r1:0):sat
+; CHECK: = vabsw({{.*}}):sat
; Vector absolute difference halfwords
declare i64 @llvm.hexagon.M2.vabsdiffh(i64, i64)
@@ -588,7 +592,7 @@ define i64 @M2_vabsdiffh(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.M2.vabsdiffh(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: r1:0 = vabsdiffh(r1:0, r3:2)
+; CHECK: = vabsdiffh({{.*}}, {{.*}})
; Vector absolute difference words
declare i64 @llvm.hexagon.M2.vabsdiffw(i64, i64)
@@ -596,7 +600,7 @@ define i64 @M2_vabsdiffw(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.M2.vabsdiffw(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: r1:0 = vabsdiffw(r1:0, r3:2)
+; CHECK: = vabsdiffw({{.*}}, {{.*}})
; Vector add halfwords
declare i64 @llvm.hexagon.A2.vaddh(i64, i64)
@@ -604,21 +608,21 @@ define i64 @A2_vaddh(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.vaddh(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: r1:0 = vaddh(r1:0, r3:2)
+; CHECK: = vaddh({{.*}}, {{.*}})
declare i64 @llvm.hexagon.A2.vaddhs(i64, i64)
define i64 @A2_vaddhs(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.vaddhs(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: r1:0 = vaddh(r1:0, r3:2):sat
+; CHECK: = vaddh({{.*}}, {{.*}}):sat
declare i64 @llvm.hexagon.A2.vadduhs(i64, i64)
define i64 @A2_vadduhs(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.vadduhs(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: r1:0 = vadduh(r1:0, r3:2):sat
+; CHECK: = vadduh({{.*}}, {{.*}}):sat
; Vector add halfwords with saturate and pack to unsigned bytes
declare i32 @llvm.hexagon.A5.vaddhubs(i64, i64)
@@ -626,7 +630,7 @@ define i32 @A5_vaddhubs(i64 %a, i64 %b) {
%z = call i32 @llvm.hexagon.A5.vaddhubs(i64 %a, i64 %b)
ret i32 %z
}
-; CHECK: r0 = vaddhub(r1:0, r3:2):sat
+; CHECK: = vaddhub({{.*}}, {{.*}}):sat
; Vector reduce add unsigned bytes
declare i64 @llvm.hexagon.A2.vraddub(i64, i64)
@@ -634,14 +638,14 @@ define i64 @A2_vraddub(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.vraddub(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: r1:0 = vraddub(r1:0, r3:2)
+; CHECK: = vraddub({{.*}}, {{.*}})
declare i64 @llvm.hexagon.A2.vraddub.acc(i64, i64, i64)
define i64 @A2_vraddub_acc(i64 %a, i64 %b, i64 %c) {
%z = call i64 @llvm.hexagon.A2.vraddub.acc(i64 %a, i64 %b, i64 %c)
ret i64 %z
}
-; CHECK: r1:0 += vraddub(r3:2, r5:4)
+; CHECK: += vraddub({{.*}}, {{.*}})
; Vector reduce add halfwords
declare i32 @llvm.hexagon.M2.vradduh(i64, i64)
@@ -649,14 +653,14 @@ define i32 @M2_vradduh(i64 %a, i64 %b) {
%z = call i32 @llvm.hexagon.M2.vradduh(i64 %a, i64 %b)
ret i32 %z
}
-; CHECK: r0 = vradduh(r1:0, r3:2)
+; CHECK: = vradduh({{.*}}, {{.*}})
declare i32 @llvm.hexagon.M2.vraddh(i64, i64)
define i32 @M2_vraddh(i64 %a, i64 %b) {
%z = call i32 @llvm.hexagon.M2.vraddh(i64 %a, i64 %b)
ret i32 %z
}
-; CHECK: r0 = vraddh(r1:0, r3:2)
+; CHECK: = vraddh({{.*}}, {{.*}})
; Vector add bytes
declare i64 @llvm.hexagon.A2.vaddub(i64, i64)
@@ -664,14 +668,14 @@ define i64 @A2_vaddub(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.vaddub(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: r1:0 = vaddub(r1:0, r3:2)
+; CHECK: = vaddub({{.*}}, {{.*}})
declare i64 @llvm.hexagon.A2.vaddubs(i64, i64)
define i64 @A2_vaddubs(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.vaddubs(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: r1:0 = vaddub(r1:0, r3:2):sat
+; CHECK: = vaddub({{.*}}, {{.*}}):sat
; Vector add words
declare i64 @llvm.hexagon.A2.vaddw(i64, i64)
@@ -679,14 +683,14 @@ define i64 @A2_vaddw(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.vaddw(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: r1:0 = vaddw(r1:0, r3:2)
+; CHECK: = vaddw({{.*}}, {{.*}})
declare i64 @llvm.hexagon.A2.vaddws(i64, i64)
define i64 @A2_vaddws(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.vaddws(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: r1:0 = vaddw(r1:0, r3:2):sat
+; CHECK: = vaddw({{.*}}, {{.*}}):sat
; Vector average halfwords
declare i64 @llvm.hexagon.A2.vavgh(i64, i64)
@@ -694,56 +698,56 @@ define i64 @A2_vavgh(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.vavgh(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: r1:0 = vavgh(r1:0, r3:2)
+; CHECK: = vavgh({{.*}}, {{.*}})
declare i64 @llvm.hexagon.A2.vavghr(i64, i64)
define i64 @A2_vavghr(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.vavghr(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: r1:0 = vavgh(r1:0, r3:2):rnd
+; CHECK: = vavgh({{.*}}, {{.*}}):rnd
declare i64 @llvm.hexagon.A2.vavghcr(i64, i64)
define i64 @A2_vavghcr(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.vavghcr(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: r1:0 = vavgh(r1:0, r3:2):crnd
+; CHECK: = vavgh({{.*}}, {{.*}}):crnd
declare i64 @llvm.hexagon.A2.vavguh(i64, i64)
define i64 @A2_vavguh(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.vavguh(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: r1:0 = vavguh(r1:0, r3:2)
+; CHECK: = vavguh({{.*}}, {{.*}})
declare i64 @llvm.hexagon.A2.vavguhr(i64, i64)
define i64 @A2_vavguhr(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.vavguhr(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: r1:0 = vavguh(r1:0, r3:2):rnd
+; CHECK: = vavguh({{.*}}, {{.*}}):rnd
declare i64 @llvm.hexagon.A2.vnavgh(i64, i64)
define i64 @A2_vnavgh(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.vnavgh(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: r1:0 = vnavgh(r1:0, r3:2)
+; CHECK: = vnavgh({{.*}}, {{.*}})
declare i64 @llvm.hexagon.A2.vnavghr(i64, i64)
define i64 @A2_vnavghr(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.vnavghr(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: r1:0 = vnavgh(r1:0, r3:2):rnd
+; CHECK: = vnavgh({{.*}}, {{.*}}):rnd
declare i64 @llvm.hexagon.A2.vnavghcr(i64, i64)
define i64 @A2_vnavghcr(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.vnavghcr(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: r1:0 = vnavgh(r1:0, r3:2):crnd
+; CHECK: = vnavgh({{.*}}, {{.*}}):crnd
; Vector average unsigned bytes
declare i64 @llvm.hexagon.A2.vavgub(i64, i64)
@@ -751,14 +755,14 @@ define i64 @A2_vavgub(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.vavgub(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: r1:0 = vavgub(r1:0, r3:2)
+; CHECK: vavgub({{.*}}, {{.*}})
declare i64 @llvm.hexagon.A2.vavgubr(i64, i64)
define i64 @A2_vavgubr(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.vavgubr(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: r1:0 = vavgub(r1:0, r3:2):rnd
+; CHECK: = vavgub({{.*}}, {{.*}}):rnd
; Vector average words
declare i64 @llvm.hexagon.A2.vavgw(i64, i64)
@@ -766,56 +770,56 @@ define i64 @A2_vavgw(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.vavgw(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: r1:0 = vavgw(r1:0, r3:2)
+; CHECK: = vavgw({{.*}}, {{.*}})
declare i64 @llvm.hexagon.A2.vavgwr(i64, i64)
define i64 @A2_vavgwr(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.vavgwr(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: r1:0 = vavgw(r1:0, r3:2):rnd
+; CHECK: = vavgw({{.*}}, {{.*}}):rnd
declare i64 @llvm.hexagon.A2.vavgwcr(i64, i64)
define i64 @A2_vavgwcr(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.vavgwcr(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: r1:0 = vavgw(r1:0, r3:2):crnd
+; CHECK: = vavgw({{.*}}, {{.*}}):crnd
declare i64 @llvm.hexagon.A2.vavguw(i64, i64)
define i64 @A2_vavguw(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.vavguw(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: r1:0 = vavguw(r1:0, r3:2)
+; CHECK: = vavguw({{.*}}, {{.*}})
declare i64 @llvm.hexagon.A2.vavguwr(i64, i64)
define i64 @A2_vavguwr(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.vavguwr(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: r1:0 = vavguw(r1:0, r3:2):rnd
+; CHECK: = vavguw({{.*}}, {{.*}}):rnd
declare i64 @llvm.hexagon.A2.vnavgw(i64, i64)
define i64 @A2_vnavgw(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.vnavgw(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: r1:0 = vnavgw(r1:0, r3:2)
+; CHECK: = vnavgw({{.*}}, {{.*}})
declare i64 @llvm.hexagon.A2.vnavgwr(i64, i64)
define i64 @A2_vnavgwr(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.vnavgwr(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: r1:0 = vnavgw(r1:0, r3:2):rnd
+; CHECK: = vnavgw({{.*}}, {{.*}}):rnd
declare i64 @llvm.hexagon.A2.vnavgwcr(i64, i64)
define i64 @A2_vnavgwcr(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.vnavgwcr(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: r1:0 = vnavgw(r1:0, r3:2):crnd
+; CHECK: = vnavgw({{.*}}, {{.*}}):crnd
; Vector conditional negate
declare i64 @llvm.hexagon.S2.vcnegh(i64, i32)
@@ -823,14 +827,14 @@ define i64 @S2_vcnegh(i64 %a, i32 %b) {
%z = call i64 @llvm.hexagon.S2.vcnegh(i64 %a, i32 %b)
ret i64 %z
}
-; CHECK: r1:0 = vcnegh(r1:0, r2)
+; CHECK: = vcnegh({{.*}}, {{.*}})
declare i64 @llvm.hexagon.S2.vrcnegh(i64, i64, i32)
define i64 @S2_vrcnegh(i64 %a, i64 %b, i32 %c) {
%z = call i64 @llvm.hexagon.S2.vrcnegh(i64 %a, i64 %b, i32 %c)
ret i64 %z
}
-; CHECK: r1:0 += vrcnegh(r3:2, r4)
+; CHECK: += vrcnegh({{.*}}, {{.*}})
; Vector maximum bytes
declare i64 @llvm.hexagon.A2.vmaxub(i64, i64)
@@ -838,14 +842,14 @@ define i64 @A2_vmaxub(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.vmaxub(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: r1:0 = vmaxub(r1:0, r3:2)
+; CHECK: = vmaxub({{.*}}, {{.*}})
declare i64 @llvm.hexagon.A2.vmaxb(i64, i64)
define i64 @A2_vmaxb(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.vmaxb(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: r1:0 = vmaxb(r1:0, r3:2)
+; CHECK: = vmaxb({{.*}}, {{.*}})
; Vector maximum halfwords
declare i64 @llvm.hexagon.A2.vmaxh(i64, i64)
@@ -853,14 +857,14 @@ define i64 @A2_vmaxh(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.vmaxh(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: r1:0 = vmaxh(r1:0, r3:2)
+; CHECK: = vmaxh({{.*}}, {{.*}})
declare i64 @llvm.hexagon.A2.vmaxuh(i64, i64)
define i64 @A2_vmaxuh(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.vmaxuh(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: r1:0 = vmaxuh(r1:0, r3:2)
+; CHECK: = vmaxuh({{.*}}, {{.*}})
; Vector reduce maximum halfwords
declare i64 @llvm.hexagon.A4.vrmaxh(i64, i64, i32)
@@ -868,14 +872,14 @@ define i64 @A4_vrmaxh(i64 %a, i64 %b, i32 %c) {
%z = call i64 @llvm.hexagon.A4.vrmaxh(i64 %a, i64 %b, i32 %c)
ret i64 %z
}
-; CHECK: r1:0 = vrmaxh(r3:2, r4)
+; CHECK: = vrmaxh({{.*}}, {{.*}})
declare i64 @llvm.hexagon.A4.vrmaxuh(i64, i64, i32)
define i64 @A4_vrmaxuh(i64 %a, i64 %b, i32 %c) {
%z = call i64 @llvm.hexagon.A4.vrmaxuh(i64 %a, i64 %b, i32 %c)
ret i64 %z
}
-; CHECK: r1:0 = vrmaxuh(r3:2, r4)
+; CHECK: = vrmaxuh({{.*}}, {{.*}})
; Vector reduce maximum words
declare i64 @llvm.hexagon.A4.vrmaxw(i64, i64, i32)
@@ -883,14 +887,14 @@ define i64 @A4_vrmaxw(i64 %a, i64 %b, i32 %c) {
%z = call i64 @llvm.hexagon.A4.vrmaxw(i64 %a, i64 %b, i32 %c)
ret i64 %z
}
-; CHECK: r1:0 = vrmaxw(r3:2, r4)
+; CHECK: = vrmaxw({{.*}}, {{.*}})
declare i64 @llvm.hexagon.A4.vrmaxuw(i64, i64, i32)
define i64 @A4_vrmaxuw(i64 %a, i64 %b, i32 %c) {
%z = call i64 @llvm.hexagon.A4.vrmaxuw(i64 %a, i64 %b, i32 %c)
ret i64 %z
}
-; CHECK: r1:0 = vrmaxuw(r3:2, r4)
+; CHECK: vrmaxuw({{.*}}, {{.*}})
; Vector minimum bytes
declare i64 @llvm.hexagon.A2.vminub(i64, i64)
@@ -898,14 +902,14 @@ define i64 @A2_vminub(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.vminub(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: r1:0 = vminub(r1:0, r3:2)
+; CHECK: = vminub({{.*}}, {{.*}})
declare i64 @llvm.hexagon.A2.vminb(i64, i64)
define i64 @A2_vminb(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.vminb(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: r1:0 = vminb(r1:0, r3:2)
+; CHECK: = vminb({{.*}}, {{.*}})
; Vector minimum halfwords
declare i64 @llvm.hexagon.A2.vminh(i64, i64)
@@ -913,14 +917,14 @@ define i64 @A2_vminh(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.vminh(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: r1:0 = vminh(r1:0, r3:2)
+; CHECK: = vminh({{.*}}, {{.*}})
declare i64 @llvm.hexagon.A2.vminuh(i64, i64)
define i64 @A2_vminuh(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.vminuh(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: r1:0 = vminuh(r1:0, r3:2)
+; CHECK: = vminuh({{.*}}, {{.*}})
; Vector reduce minimum halfwords
declare i64 @llvm.hexagon.A4.vrminh(i64, i64, i32)
@@ -928,14 +932,14 @@ define i64 @A4_vrminh(i64 %a, i64 %b, i32 %c) {
%z = call i64 @llvm.hexagon.A4.vrminh(i64 %a, i64 %b, i32 %c)
ret i64 %z
}
-; CHECK: r1:0 = vrminh(r3:2, r4)
+; CHECK: = vrminh({{.*}}, {{.*}})
declare i64 @llvm.hexagon.A4.vrminuh(i64, i64, i32)
define i64 @A4_vrminuh(i64 %a, i64 %b, i32 %c) {
%z = call i64 @llvm.hexagon.A4.vrminuh(i64 %a, i64 %b, i32 %c)
ret i64 %z
}
-; CHECK: r1:0 = vrminuh(r3:2, r4)
+; CHECK: = vrminuh({{.*}}, {{.*}})
; Vector reduce minimum words
declare i64 @llvm.hexagon.A4.vrminw(i64, i64, i32)
@@ -943,14 +947,14 @@ define i64 @A4_vrminw(i64 %a, i64 %b, i32 %c) {
%z = call i64 @llvm.hexagon.A4.vrminw(i64 %a, i64 %b, i32 %c)
ret i64 %z
}
-; CHECK: r1:0 = vrminw(r3:2, r4)
+; CHECK: = vrminw({{.*}}, {{.*}})
declare i64 @llvm.hexagon.A4.vrminuw(i64, i64, i32)
define i64 @A4_vrminuw(i64 %a, i64 %b, i32 %c) {
%z = call i64 @llvm.hexagon.A4.vrminuw(i64 %a, i64 %b, i32 %c)
ret i64 %z
}
-; CHECK: r1:0 = vrminuw(r3:2, r4)
+; CHECK: = vrminuw({{.*}}, {{.*}})
; Vector sum of absolute differences unsigned bytes
declare i64 @llvm.hexagon.A2.vrsadub(i64, i64)
@@ -958,14 +962,14 @@ define i64 @A2_vrsadub(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.vrsadub(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: r1:0 = vrsadub(r1:0, r3:2)
+; CHECK: = vrsadub({{.*}}, {{.*}})
declare i64 @llvm.hexagon.A2.vrsadub.acc(i64, i64, i64)
define i64 @A2_vrsadub_acc(i64 %a, i64 %b, i64 %c) {
%z = call i64 @llvm.hexagon.A2.vrsadub.acc(i64 %a, i64 %b, i64 %c)
ret i64 %z
}
-; CHECK: r1:0 += vrsadub(r3:2, r5:4)
+; CHECK: += vrsadub({{.*}}, {{.*}})
; Vector subtract halfwords
declare i64 @llvm.hexagon.A2.vsubh(i64, i64)
@@ -973,21 +977,21 @@ define i64 @A2_vsubh(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.vsubh(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: r1:0 = vsubh(r1:0, r3:2)
+; CHECK: = vsubh({{.*}}, {{.*}})
declare i64 @llvm.hexagon.A2.vsubhs(i64, i64)
define i64 @A2_vsubhs(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.vsubhs(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: r1:0 = vsubh(r1:0, r3:2):sat
+; CHECK: = vsubh({{.*}}, {{.*}}):sat
declare i64 @llvm.hexagon.A2.vsubuhs(i64, i64)
define i64 @A2_vsubuhs(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.vsubuhs(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: r1:0 = vsubuh(r1:0, r3:2):sat
+; CHECK: = vsubuh({{.*}}, {{.*}}):sat
; Vector subtract bytes
declare i64 @llvm.hexagon.A2.vsubub(i64, i64)
@@ -995,14 +999,14 @@ define i64 @A2_vsubub(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.vsubub(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: r1:0 = vsubub(r1:0, r3:2)
+; CHECK: = vsubub({{.*}}, {{.*}})
declare i64 @llvm.hexagon.A2.vsububs(i64, i64)
define i64 @A2_vsububs(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.vsububs(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: r1:0 = vsubub(r1:0, r3:2):sat
+; CHECK: = vsubub({{.*}}, {{.*}}):sat
; Vector subtract words
declare i64 @llvm.hexagon.A2.vsubw(i64, i64)
@@ -1010,11 +1014,11 @@ define i64 @A2_vsubw(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.vsubw(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: r1:0 = vsubw(r1:0, r3:2)
+; CHECK: = vsubw({{.*}}, {{.*}})
declare i64 @llvm.hexagon.A2.vsubws(i64, i64)
define i64 @A2_vsubws(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.A2.vsubws(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: r1:0 = vsubw(r1:0, r3:2):sat
+; CHECK: = vsubw({{.*}}, {{.*}}):sat
diff --git a/test/CodeGen/Hexagon/intrinsics/xtype_bit.ll b/test/CodeGen/Hexagon/intrinsics/xtype_bit.ll
index 8531b2f9334b2..e8f83d01820a0 100644
--- a/test/CodeGen/Hexagon/intrinsics/xtype_bit.ll
+++ b/test/CodeGen/Hexagon/intrinsics/xtype_bit.ll
@@ -1,69 +1,72 @@
; RUN: llc -march=hexagon -O0 < %s | FileCheck %s
+; RUN: llc -march=hexagon -O0 < %s | FileCheck -check-prefix=CHECK-CALL %s
; Hexagon Programmer's Reference Manual 11.10.2 XTYPE/BIT
+; CHECK-CALL-NOT: call
+
; Count leading
declare i32 @llvm.hexagon.S2.clbp(i64)
define i32 @S2_clbp(i64 %a) {
%z = call i32 @llvm.hexagon.S2.clbp(i64 %a)
ret i32 %z
}
-; CHECK: r0 = clb(r1:0)
+; CHECK: = clb({{.*}})
declare i32 @llvm.hexagon.S2.cl0p(i64)
define i32 @S2_cl0p(i64 %a) {
%z = call i32 @llvm.hexagon.S2.cl0p(i64 %a)
ret i32 %z
}
-; CHECK: r0 = cl0(r1:0)
+; CHECK: = cl0({{.*}})
declare i32 @llvm.hexagon.S2.cl1p(i64)
define i32 @S2_cl1p(i64 %a) {
%z = call i32 @llvm.hexagon.S2.cl1p(i64 %a)
ret i32 %z
}
-; CHECK: r0 = cl1(r1:0)
+; CHECK: = cl1({{.*}})
declare i32 @llvm.hexagon.S4.clbpnorm(i64)
define i32 @S4_clbpnorm(i64 %a) {
%z = call i32 @llvm.hexagon.S4.clbpnorm(i64 %a)
ret i32 %z
}
-; CHECK: r0 = normamt(r1:0)
+; CHECK: = normamt({{.*}})
declare i32 @llvm.hexagon.S4.clbpaddi(i64, i32)
define i32 @S4_clbpaddi(i64 %a) {
%z = call i32 @llvm.hexagon.S4.clbpaddi(i64 %a, i32 0)
ret i32 %z
}
-; CHECK: r0 = add(clb(r1:0), #0)
+; CHECK: = add(clb({{.*}}), #0)
declare i32 @llvm.hexagon.S4.clbaddi(i32, i32)
define i32 @S4_clbaddi(i32 %a) {
%z = call i32 @llvm.hexagon.S4.clbaddi(i32 %a, i32 0)
ret i32 %z
}
-; CHECK: r0 = add(clb(r0), #0)
+; CHECK: = add(clb({{.*}}), #0)
declare i32 @llvm.hexagon.S2.cl0(i32)
define i32 @S2_cl0(i32 %a) {
%z = call i32 @llvm.hexagon.S2.cl0(i32 %a)
ret i32 %z
}
-; CHECK: r0 = cl0(r0)
+; CHECK: = cl0({{.*}})
declare i32 @llvm.hexagon.S2.cl1(i32)
define i32 @S2_cl1(i32 %a) {
%z = call i32 @llvm.hexagon.S2.cl1(i32 %a)
ret i32 %z
}
-; CHECK: r0 = cl1(r0)
+; CHECK: = cl1({{.*}})
declare i32 @llvm.hexagon.S2.clbnorm(i32)
define i32 @S4_clbnorm(i32 %a) {
%z = call i32 @llvm.hexagon.S2.clbnorm(i32 %a)
ret i32 %z
}
-; CHECK: r0 = normamt(r0)
+; CHECK: = normamt({{.*}})
; Count population
declare i32 @llvm.hexagon.S5.popcountp(i64)
@@ -71,7 +74,7 @@ define i32 @S5_popcountp(i64 %a) {
%z = call i32 @llvm.hexagon.S5.popcountp(i64 %a)
ret i32 %z
}
-; CHECK: r0 = popcount(r1:0)
+; CHECK: = popcount({{.*}})
; Count trailing
declare i32 @llvm.hexagon.S2.ct0p(i64)
@@ -79,28 +82,28 @@ define i32 @S2_ct0p(i64 %a) {
%z = call i32 @llvm.hexagon.S2.ct0p(i64 %a)
ret i32 %z
}
-; CHECK: r0 = ct0(r1:0)
+; CHECK: = ct0({{.*}})
declare i32 @llvm.hexagon.S2.ct1p(i64)
define i32 @S2_ct1p(i64 %a) {
%z = call i32 @llvm.hexagon.S2.ct1p(i64 %a)
ret i32 %z
}
-; CHECK: r0 = ct1(r1:0)
+; CHECK: = ct1({{.*}})
declare i32 @llvm.hexagon.S2.ct0(i32)
define i32 @S2_ct0(i32 %a) {
%z = call i32 @llvm.hexagon.S2.ct0(i32 %a)
ret i32 %z
}
-; CHECK: r0 = ct0(r0)
+; CHECK: = ct0({{.*}})
declare i32 @llvm.hexagon.S2.ct1(i32)
define i32 @S2_ct1(i32 %a) {
%z = call i32 @llvm.hexagon.S2.ct1(i32 %a)
ret i32 %z
}
-; CHECK: r0 = ct1(r0)
+; CHECK: = ct1({{.*}})
; Extract bitfield
declare i64 @llvm.hexagon.S2.extractup(i64, i32, i32)
@@ -108,56 +111,56 @@ define i64 @S2_extractup(i64 %a) {
%z = call i64 @llvm.hexagon.S2.extractup(i64 %a, i32 0, i32 0)
ret i64 %z
}
-; CHECK: r1:0 = extractu(r1:0, #0, #0)
+; CHECK: = extractu({{.*}}, #0, #0)
declare i64 @llvm.hexagon.S4.extractp(i64, i32, i32)
define i64 @S2_extractp(i64 %a) {
%z = call i64 @llvm.hexagon.S4.extractp(i64 %a, i32 0, i32 0)
ret i64 %z
}
-; CHECK: r1:0 = extract(r1:0, #0, #0)
+; CHECK: = extract({{.*}}, #0, #0)
declare i32 @llvm.hexagon.S2.extractu(i32, i32, i32)
define i32 @S2_extractu(i32 %a) {
%z = call i32 @llvm.hexagon.S2.extractu(i32 %a, i32 0, i32 0)
ret i32 %z
}
-; CHECK: r0 = extractu(r0, #0, #0)
+; CHECK: = extractu({{.*}}, #0, #0)
declare i32 @llvm.hexagon.S4.extract(i32, i32, i32)
define i32 @S2_extract(i32 %a) {
%z = call i32 @llvm.hexagon.S4.extract(i32 %a, i32 0, i32 0)
ret i32 %z
}
-; CHECK: r0 = extract(r0, #0, #0)
+; CHECK: = extract({{.*}}, #0, #0)
declare i64 @llvm.hexagon.S2.extractup.rp(i64, i64)
define i64 @S2_extractup_rp(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.S2.extractup.rp(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: r1:0 = extractu(r1:0, r3:2)
+; CHECK: = extractu({{.*}}, {{.*}})
declare i64 @llvm.hexagon.S4.extractp.rp(i64, i64)
define i64 @S4_extractp_rp(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.S4.extractp.rp(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: r1:0 = extract(r1:0, r3:2)
+; CHECK: = extract({{.*}}, {{.*}})
declare i32 @llvm.hexagon.S2.extractu.rp(i32, i64)
define i32 @S2_extractu_rp(i32 %a, i64 %b) {
%z = call i32 @llvm.hexagon.S2.extractu.rp(i32 %a, i64 %b)
ret i32 %z
}
-; CHECK: r0 = extractu(r0, r3:2)
+; CHECK: = extractu({{.*}}, {{.*}})
declare i32 @llvm.hexagon.S4.extract.rp(i32, i64)
define i32 @S4_extract_rp(i32 %a, i64 %b) {
%z = call i32 @llvm.hexagon.S4.extract.rp(i32 %a, i64 %b)
ret i32 %z
}
-; CHECK: r0 = extract(r0, r3:2)
+; CHECK: = extract({{.*}}, {{.*}})
; Insert bitfield
declare i64 @llvm.hexagon.S2.insertp(i64, i64, i32, i32)
@@ -165,28 +168,28 @@ define i64 @S2_insertp(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.S2.insertp(i64 %a, i64 %b, i32 0, i32 0)
ret i64 %z
}
-; CHECK: r1:0 = insert(r3:2, #0, #0)
+; CHECK: = insert({{.*}}, #0, #0)
declare i32 @llvm.hexagon.S2.insert(i32, i32, i32, i32)
define i32 @S2_insert(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.S2.insert(i32 %a, i32 %b, i32 0, i32 0)
ret i32 %z
}
-; CHECK: r0 = insert(r1, #0, #0)
+; CHECK: = insert({{.*}}, #0, #0)
declare i32 @llvm.hexagon.S2.insert.rp(i32, i32, i64)
define i32 @S2_insert_rp(i32 %a, i32 %b, i64 %c) {
%z = call i32 @llvm.hexagon.S2.insert.rp(i32 %a, i32 %b, i64 %c)
ret i32 %z
}
-; CHECK: r0 = insert(r1, r3:2)
+; CHECK: = insert({{.*}}, {{.*}})
declare i64 @llvm.hexagon.S2.insertp.rp(i64, i64, i64)
define i64 @S2_insertp_rp(i64 %a, i64 %b, i64 %c) {
%z = call i64 @llvm.hexagon.S2.insertp.rp(i64 %a, i64 %b, i64 %c)
ret i64 %z
}
-; CHECK: r1:0 = insert(r3:2, r5:4)
+; CHECK: = insert({{.*}}, r5:4)
; Interleave/deinterleave
declare i64 @llvm.hexagon.S2.deinterleave(i64)
@@ -194,14 +197,14 @@ define i64 @S2_deinterleave(i64 %a) {
%z = call i64 @llvm.hexagon.S2.deinterleave(i64 %a)
ret i64 %z
}
-; CHECK: r1:0 = deinterleave(r1:0)
+; CHECK: = deinterleave({{.*}})
declare i64 @llvm.hexagon.S2.interleave(i64)
define i64 @S2_interleave(i64 %a) {
%z = call i64 @llvm.hexagon.S2.interleave(i64 %a)
ret i64 %z
}
-; CHECK: r1:0 = interleave(r1:0)
+; CHECK: = interleave({{.*}})
; Linear feedback-shift operation
declare i64 @llvm.hexagon.S2.lfsp(i64, i64)
@@ -209,7 +212,7 @@ define i64 @S2_lfsp(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.S2.lfsp(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: r1:0 = lfs(r1:0, r3:2)
+; CHECK: = lfs({{.*}}, {{.*}})
; Masked parity
declare i32 @llvm.hexagon.S2.parityp(i64, i64)
@@ -217,14 +220,14 @@ define i32 @S2_parityp(i64 %a, i64 %b) {
%z = call i32 @llvm.hexagon.S2.parityp(i64 %a, i64 %b)
ret i32 %z
}
-; CHECK: r0 = parity(r1:0, r3:2)
+; CHECK: = parity({{.*}}, {{.*}})
declare i32 @llvm.hexagon.S4.parity(i32, i32)
define i32 @S4_parity(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.S4.parity(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: r0 = parity(r0, r1)
+; CHECK: = parity({{.*}}, {{.*}})
; Bit reverse
declare i64 @llvm.hexagon.S2.brevp(i64)
@@ -232,14 +235,14 @@ define i64 @S2_brevp(i64 %a) {
%z = call i64 @llvm.hexagon.S2.brevp(i64 %a)
ret i64 %z
}
-; CHECK: r1:0 = brev(r1:0)
+; CHECK: = brev({{.*}})
declare i32 @llvm.hexagon.S2.brev(i32)
define i32 @S2_brev(i32 %a) {
%z = call i32 @llvm.hexagon.S2.brev(i32 %a)
ret i32 %z
}
-; CHECK: r0 = brev(r0)
+; CHECK: = brev({{.*}})
; Set/clear/toggle bit
declare i32 @llvm.hexagon.S2.setbit.i(i32, i32)
@@ -247,42 +250,42 @@ define i32 @S2_setbit_i(i32 %a) {
%z = call i32 @llvm.hexagon.S2.setbit.i(i32 %a, i32 0)
ret i32 %z
}
-; CHECK: r0 = setbit(r0, #0)
+; CHECK: = setbit({{.*}}, #0)
declare i32 @llvm.hexagon.S2.clrbit.i(i32, i32)
define i32 @S2_clrbit_i(i32 %a) {
%z = call i32 @llvm.hexagon.S2.clrbit.i(i32 %a, i32 0)
ret i32 %z
}
-; CHECK: r0 = clrbit(r0, #0)
+; CHECK: = clrbit({{.*}}, #0)
declare i32 @llvm.hexagon.S2.togglebit.i(i32, i32)
define i32 @S2_togglebit_i(i32 %a) {
%z = call i32 @llvm.hexagon.S2.togglebit.i(i32 %a, i32 0)
ret i32 %z
}
-; CHECK: r0 = togglebit(r0, #0)
+; CHECK: = togglebit({{.*}}, #0)
declare i32 @llvm.hexagon.S2.setbit.r(i32, i32)
define i32 @S2_setbit_r(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.S2.setbit.r(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: r0 = setbit(r0, r1)
+; CHECK: = setbit({{.*}}, {{.*}})
declare i32 @llvm.hexagon.S2.clrbit.r(i32, i32)
define i32 @S2_clrbit_r(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.S2.clrbit.r(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: r0 = clrbit(r0, r1)
+; CHECK: = clrbit({{.*}}, {{.*}})
declare i32 @llvm.hexagon.S2.togglebit.r(i32, i32)
define i32 @S2_togglebit_r(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.S2.togglebit.r(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: r0 = togglebit(r0, r1)
+; CHECK: = togglebit({{.*}}, {{.*}})
; Split bitfield
declare i64 @llvm.hexagon.A4.bitspliti(i32, i32)
@@ -290,14 +293,14 @@ define i64 @A4_bitspliti(i32 %a) {
%z = call i64 @llvm.hexagon.A4.bitspliti(i32 %a, i32 0)
ret i64 %z
}
-; CHECK: = bitsplit(r0, #0)
+; CHECK: = bitsplit({{.*}}, #0)
declare i64 @llvm.hexagon.A4.bitsplit(i32, i32)
define i64 @A4_bitsplit(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.A4.bitsplit(i32 %a, i32 %b)
ret i64 %z
}
-; CHECK: r1:0 = bitsplit(r0, r1)
+; CHECK: = bitsplit({{.*}}, {{.*}})
; Table index
declare i32 @llvm.hexagon.S2.tableidxb.goodsyntax(i32, i32, i32, i32)
@@ -305,25 +308,25 @@ define i32 @S2_tableidxb_goodsyntax(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.S2.tableidxb.goodsyntax(i32 %a, i32 %b, i32 0, i32 0)
ret i32 %z
}
-; CHECK: r0 = tableidxb(r1, #0, #0)
+; CHECK: = tableidxb({{.*}}, #0, #0)
declare i32 @llvm.hexagon.S2.tableidxh.goodsyntax(i32, i32, i32, i32)
define i32 @S2_tableidxh_goodsyntax(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.S2.tableidxh.goodsyntax(i32 %a, i32 %b, i32 0, i32 0)
ret i32 %z
}
-; CHECK: r0 = tableidxh(r1, #0, #-1)
+; CHECK: = tableidxh({{.*}}, #0, #-1)
declare i32 @llvm.hexagon.S2.tableidxw.goodsyntax(i32, i32, i32, i32)
define i32 @S2_tableidxw_goodsyntax(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.S2.tableidxw.goodsyntax(i32 %a, i32 %b, i32 0, i32 0)
ret i32 %z
}
-; CHECK: r0 = tableidxw(r1, #0, #-2)
+; CHECK: = tableidxw({{.*}}, #0, #-2)
declare i32 @llvm.hexagon.S2.tableidxd.goodsyntax(i32, i32, i32, i32)
define i32 @S2_tableidxd_goodsyntax(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.S2.tableidxd.goodsyntax(i32 %a, i32 %b, i32 0, i32 0)
ret i32 %z
}
-; CHECK: r0 = tableidxd(r1, #0, #-3)
+; CHECK: = tableidxd({{.*}}, #0, #-3)
diff --git a/test/CodeGen/Hexagon/intrinsics/xtype_complex.ll b/test/CodeGen/Hexagon/intrinsics/xtype_complex.ll
index 57b0c5b6db561..0087883573ec2 100644
--- a/test/CodeGen/Hexagon/intrinsics/xtype_complex.ll
+++ b/test/CodeGen/Hexagon/intrinsics/xtype_complex.ll
@@ -1,34 +1,37 @@
; RUN: llc -march=hexagon -O0 < %s | FileCheck %s
+; RUN: llc -march=hexagon -O0 < %s | FileCheck -check-prefix=CHECK-CALL %s
; Hexagon Programmer's Reference Manual 11.10.3 XTYPE/COMPLEX
+; CHECK-CALL-NOT: call
+
; Complex add/sub halfwords
declare i64 @llvm.hexagon.S4.vxaddsubh(i64, i64)
define i64 @S4_vxaddsubh(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.S4.vxaddsubh(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: r1:0 = vxaddsubh(r1:0, r3:2):sat
+; CHECK: = vxaddsubh({{.*}}, {{.*}}):sat
declare i64 @llvm.hexagon.S4.vxsubaddh(i64, i64)
define i64 @S4_vxsubaddh(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.S4.vxsubaddh(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: r1:0 = vxsubaddh(r1:0, r3:2):sat
+; CHECK: = vxsubaddh({{.*}}, {{.*}}):sat
declare i64 @llvm.hexagon.S4.vxaddsubhr(i64, i64)
define i64 @S4_vxaddsubhr(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.S4.vxaddsubhr(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: r1:0 = vxaddsubh(r1:0, r3:2):rnd:>>1:sat
+; CHECK: = vxaddsubh({{.*}}, {{.*}}):rnd:>>1:sat
declare i64 @llvm.hexagon.S4.vxsubaddhr(i64, i64)
define i64 @S4_vxsubaddhr(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.S4.vxsubaddhr(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: r1:0 = vxsubaddh(r1:0, r3:2):rnd:>>1:sat
+; CHECK: = vxsubaddh({{.*}}, {{.*}}):rnd:>>1:sat
; Complex add/sub words
declare i64 @llvm.hexagon.S4.vxaddsubw(i64, i64)
@@ -36,14 +39,14 @@ define i64 @S4_vxaddsubw(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.S4.vxaddsubw(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: r1:0 = vxaddsubw(r1:0, r3:2):sat
+; CHECK: = vxaddsubw({{.*}}, {{.*}}):sat
declare i64 @llvm.hexagon.S4.vxsubaddw(i64, i64)
define i64 @S4_vxsubaddw(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.S4.vxsubaddw(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: r1:0 = vxsubaddw(r1:0, r3:2):sat
+; CHECK: = vxsubaddw({{.*}}, {{.*}}):sat
; Complex multiply
declare i64 @llvm.hexagon.M2.cmpys.s0(i32, i32)
@@ -51,84 +54,84 @@ define i64 @M2_cmpys_s0(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.M2.cmpys.s0(i32 %a, i32 %b)
ret i64 %z
}
-; CHECK: r1:0 = cmpy(r0, r1):sat
+; CHECK: = cmpy({{.*}}, {{.*}}):sat
declare i64 @llvm.hexagon.M2.cmpys.s1(i32, i32)
define i64 @M2_cmpys_s1(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.M2.cmpys.s1(i32 %a, i32 %b)
ret i64 %z
}
-; CHECK: r1:0 = cmpy(r0, r1):<<1:sat
+; CHECK: = cmpy({{.*}}, {{.*}}):<<1:sat
declare i64 @llvm.hexagon.M2.cmpysc.s0(i32, i32)
define i64 @M2_cmpysc_s0(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.M2.cmpysc.s0(i32 %a, i32 %b)
ret i64 %z
}
-; CHECK: r1:0 = cmpy(r0, r1*):sat
+; CHECK: = cmpy({{.*}}, {{.*}}*):sat
declare i64 @llvm.hexagon.M2.cmpysc.s1(i32, i32)
define i64 @M2_cmpysc_s1(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.M2.cmpysc.s1(i32 %a, i32 %b)
ret i64 %z
}
-; CHECK: r1:0 = cmpy(r0, r1*):<<1:sat
+; CHECK: = cmpy({{.*}}, {{.*}}*):<<1:sat
declare i64 @llvm.hexagon.M2.cmacs.s0(i64, i32, i32)
define i64 @M2_cmacs_s0(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.cmacs.s0(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: r1:0 += cmpy(r2, r3):sat
+; CHECK: += cmpy({{.*}}, {{.*}}):sat
declare i64 @llvm.hexagon.M2.cmacs.s1(i64, i32, i32)
define i64 @M2_cmacs_s1(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.cmacs.s1(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: r1:0 += cmpy(r2, r3):<<1:sat
+; CHECK: += cmpy({{.*}}, {{.*}}):<<1:sat
declare i64 @llvm.hexagon.M2.cnacs.s0(i64, i32, i32)
define i64 @M2_cnacs_s0(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.cnacs.s0(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: r1:0 -= cmpy(r2, r3):sat
+; CHECK: -= cmpy({{.*}}, {{.*}}):sat
declare i64 @llvm.hexagon.M2.cnacs.s1(i64, i32, i32)
define i64 @M2_cnacs_s1(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.cnacs.s1(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: r1:0 -= cmpy(r2, r3):<<1:sat
+; CHECK: -= cmpy({{.*}}, {{.*}}):<<1:sat
declare i64 @llvm.hexagon.M2.cmacsc.s0(i64, i32, i32)
define i64 @M2_cmacsc_s0(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.cmacsc.s0(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: r1:0 += cmpy(r2, r3*):sat
+; CHECK: += cmpy({{.*}}, {{.*}}*):sat
declare i64 @llvm.hexagon.M2.cmacsc.s1(i64, i32, i32)
define i64 @M2_cmacsc_s1(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.cmacsc.s1(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: r1:0 += cmpy(r2, r3*):<<1:sat
+; CHECK: += cmpy({{.*}}, {{.*}}*):<<1:sat
declare i64 @llvm.hexagon.M2.cnacsc.s0(i64, i32, i32)
define i64 @M2_cnacsc_s0(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.cnacsc.s0(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: r1:0 -= cmpy(r2, r3*):sat
+; CHECK: -= cmpy({{.*}}, {{.*}}*):sat
declare i64 @llvm.hexagon.M2.cnacsc.s1(i64, i32, i32)
define i64 @M2_cnacsc_s1(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.cnacsc.s1(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: r1:0 -= cmpy(r2, r3*):<<1:sat
+; CHECK: -= cmpy({{.*}}, {{.*}}*):<<1:sat
; Complex multiply real or imaginary
declare i64 @llvm.hexagon.M2.cmpyi.s0(i32, i32)
@@ -136,28 +139,28 @@ define i64 @M2_cmpyi_s0(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.M2.cmpyi.s0(i32 %a, i32 %b)
ret i64 %z
}
-; CHECK: r1:0 = cmpyi(r0, r1)
+; CHECK: = cmpyi({{.*}}, {{.*}})
declare i64 @llvm.hexagon.M2.cmpyr.s0(i32, i32)
define i64 @M2_cmpyr_s0(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.M2.cmpyr.s0(i32 %a, i32 %b)
ret i64 %z
}
-; CHECK: r1:0 = cmpyr(r0, r1)
+; CHECK: = cmpyr({{.*}}, {{.*}})
declare i64 @llvm.hexagon.M2.cmaci.s0(i64, i32, i32)
define i64 @M2_cmaci_s0(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.cmaci.s0(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: r1:0 += cmpyi(r2, r3)
+; CHECK: += cmpyi({{.*}}, {{.*}})
declare i64 @llvm.hexagon.M2.cmacr.s0(i64, i32, i32)
define i64 @M2_cmacr_s0(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.cmacr.s0(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: r1:0 += cmpyr(r2, r3)
+; CHECK: += cmpyr({{.*}}, {{.*}})
; Complex multiply with round and pack
declare i32 @llvm.hexagon.M2.cmpyrs.s0(i32, i32)
@@ -165,28 +168,28 @@ define i32 @M2_cmpyrs_s0(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.cmpyrs.s0(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: r0 = cmpy(r0, r1):rnd:sat
+; CHECK: = cmpy({{.*}}, {{.*}}):rnd:sat
declare i32 @llvm.hexagon.M2.cmpyrs.s1(i32, i32)
define i32 @M2_cmpyrs_s1(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.cmpyrs.s1(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: r0 = cmpy(r0, r1):<<1:rnd:sat
+; CHECK: = cmpy({{.*}}, {{.*}}):<<1:rnd:sat
declare i32 @llvm.hexagon.M2.cmpyrsc.s0(i32, i32)
define i32 @M2_cmpyrsc_s0(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.cmpyrsc.s0(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: r0 = cmpy(r0, r1*):rnd:sat
+; CHECK: = cmpy({{.*}}, {{.*}}*):rnd:sat
declare i32 @llvm.hexagon.M2.cmpyrsc.s1(i32, i32)
define i32 @M2_cmpyrsc_s1(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.cmpyrsc.s1(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: r0 = cmpy(r0, r1*):<<1:rnd:sat
+; CHECK: = cmpy({{.*}}, {{.*}}*):<<1:rnd:sat
; Complex multiply 32x16
declare i32 @llvm.hexagon.M4.cmpyi.wh(i64, i32)
@@ -194,28 +197,28 @@ define i32 @M4_cmpyi_wh(i64 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M4.cmpyi.wh(i64 %a, i32 %b)
ret i32 %z
}
-; CHECK: r0 = cmpyiwh(r1:0, r2):<<1:rnd:sat
+; CHECK: = cmpyiwh({{.*}}, {{.*}}):<<1:rnd:sat
declare i32 @llvm.hexagon.M4.cmpyi.whc(i64, i32)
define i32 @M4_cmpyi_whc(i64 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M4.cmpyi.whc(i64 %a, i32 %b)
ret i32 %z
}
-; CHECK: r0 = cmpyiwh(r1:0, r2*):<<1:rnd:sat
+; CHECK: = cmpyiwh({{.*}}, {{.*}}*):<<1:rnd:sat
declare i32 @llvm.hexagon.M4.cmpyr.wh(i64, i32)
define i32 @M4_cmpyr_wh(i64 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M4.cmpyr.wh(i64 %a, i32 %b)
ret i32 %z
}
-; CHECK: r0 = cmpyrwh(r1:0, r2):<<1:rnd:sat
+; CHECK: = cmpyrwh({{.*}}, {{.*}}):<<1:rnd:sat
declare i32 @llvm.hexagon.M4.cmpyr.whc(i64, i32)
define i32 @M4_cmpyr_whc(i64 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M4.cmpyr.whc(i64 %a, i32 %b)
ret i32 %z
}
-; CHECK: r0 = cmpyrwh(r1:0, r2*):<<1:rnd:sat
+; CHECK: = cmpyrwh({{.*}}, {{.*}}*):<<1:rnd:sat
; Vector complex multiply real or imaginary
declare i64 @llvm.hexagon.M2.vcmpy.s0.sat.r(i64, i64)
@@ -223,42 +226,42 @@ define i64 @M2_vcmpy_s0_sat_r(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.M2.vcmpy.s0.sat.r(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: r1:0 = vcmpyr(r1:0, r3:2):sat
+; CHECK: = vcmpyr({{.*}}, {{.*}}):sat
declare i64 @llvm.hexagon.M2.vcmpy.s1.sat.r(i64, i64)
define i64 @M2_vcmpy_s1_sat_r(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.M2.vcmpy.s1.sat.r(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: r1:0 = vcmpyr(r1:0, r3:2):<<1:sat
+; CHECK: = vcmpyr({{.*}}, {{.*}}):<<1:sat
declare i64 @llvm.hexagon.M2.vcmpy.s0.sat.i(i64, i64)
define i64 @M2_vcmpy_s0_sat_i(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.M2.vcmpy.s0.sat.i(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: r1:0 = vcmpyi(r1:0, r3:2):sat
+; CHECK: = vcmpyi({{.*}}, {{.*}}):sat
declare i64 @llvm.hexagon.M2.vcmpy.s1.sat.i(i64, i64)
define i64 @M2_vcmpy_s1_sat_i(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.M2.vcmpy.s1.sat.i(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: r1:0 = vcmpyi(r1:0, r3:2):<<1:sat
+; CHECK: = vcmpyi({{.*}}, {{.*}}):<<1:sat
declare i64 @llvm.hexagon.M2.vcmac.s0.sat.r(i64, i64, i64)
define i64 @M2_vcmac_s0_sat_r(i64 %a, i64 %b, i64 %c) {
%z = call i64 @llvm.hexagon.M2.vcmac.s0.sat.r(i64 %a, i64 %b, i64 %c)
ret i64 %z
}
-; CHECK: r1:0 += vcmpyr(r3:2, r5:4):sat
+; CHECK: += vcmpyr({{.*}}, r5:4):sat
declare i64 @llvm.hexagon.M2.vcmac.s0.sat.i(i64, i64, i64)
define i64 @M2_vcmac_s0_sat_i(i64 %a, i64 %b, i64 %c) {
%z = call i64 @llvm.hexagon.M2.vcmac.s0.sat.i(i64 %a, i64 %b, i64 %c)
ret i64 %z
}
-; CHECK: r1:0 += vcmpyi(r3:2, r5:4):sat
+; CHECK: += vcmpyi({{.*}}, r5:4):sat
; Vector complex conjugate
declare i64 @llvm.hexagon.A2.vconj(i64)
@@ -266,7 +269,7 @@ define i64 @A2_vconj(i64 %a) {
%z = call i64 @llvm.hexagon.A2.vconj(i64 %a)
ret i64 %z
}
-; CHECK: r1:0 = vconj(r1:0):sat
+; CHECK: = vconj({{.*}}):sat
; Vector complex rotate
declare i64 @llvm.hexagon.S2.vcrotate(i64, i32)
@@ -274,7 +277,7 @@ define i64 @S2_vcrotate(i64 %a, i32 %b) {
%z = call i64 @llvm.hexagon.S2.vcrotate(i64 %a, i32 %b)
ret i64 %z
}
-; CHECK: r1:0 = vcrotate(r1:0, r2)
+; CHECK: = vcrotate({{.*}}, {{.*}})
; Vector reduce complex multiply real or imaginary
declare i64 @llvm.hexagon.M2.vrcmpyi.s0(i64, i64)
@@ -282,56 +285,56 @@ define i64 @M2_vrcmpyi_s0(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.M2.vrcmpyi.s0(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: r1:0 = vrcmpyi(r1:0, r3:2)
+; CHECK: = vrcmpyi({{.*}}, {{.*}})
declare i64 @llvm.hexagon.M2.vrcmpyr.s0(i64, i64)
define i64 @M2_vrcmpyr_s0(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.M2.vrcmpyr.s0(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: r1:0 = vrcmpyr(r1:0, r3:2)
+; CHECK: = vrcmpyr({{.*}}, {{.*}})
declare i64 @llvm.hexagon.M2.vrcmpyi.s0c(i64, i64)
define i64 @M2_vrcmpyi_s0c(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.M2.vrcmpyi.s0c(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: r1:0 = vrcmpyi(r1:0, r3:2*)
+; CHECK: = vrcmpyi({{.*}}, {{.*}}*)
declare i64 @llvm.hexagon.M2.vrcmpyr.s0c(i64, i64)
define i64 @M2_vrcmpyr_s0c(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.M2.vrcmpyr.s0c(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: r1:0 = vrcmpyr(r1:0, r3:2*)
+; CHECK: = vrcmpyr({{.*}}, {{.*}}*)
declare i64 @llvm.hexagon.M2.vrcmaci.s0(i64, i64, i64)
define i64 @M2_vrcmaci_s0(i64 %a, i64 %b, i64 %c) {
%z = call i64 @llvm.hexagon.M2.vrcmaci.s0(i64 %a, i64 %b, i64 %c)
ret i64 %z
}
-; CHECK: r1:0 += vrcmpyi(r3:2, r5:4)
+; CHECK: += vrcmpyi({{.*}}, r5:4)
declare i64 @llvm.hexagon.M2.vrcmacr.s0(i64, i64, i64)
define i64 @M2_vrcmacr_s0(i64 %a, i64 %b, i64 %c) {
%z = call i64 @llvm.hexagon.M2.vrcmacr.s0(i64 %a, i64 %b, i64 %c)
ret i64 %z
}
-; CHECK: r1:0 += vrcmpyr(r3:2, r5:4)
+; CHECK: += vrcmpyr({{.*}}, r5:4)
declare i64 @llvm.hexagon.M2.vrcmaci.s0c(i64, i64, i64)
define i64 @M2_vrcmaci_s0c(i64 %a, i64 %b, i64 %c) {
%z = call i64 @llvm.hexagon.M2.vrcmaci.s0c(i64 %a, i64 %b, i64 %c)
ret i64 %z
}
-; CHECK: r1:0 += vrcmpyi(r3:2, r5:4*)
+; CHECK: += vrcmpyi({{.*}}, r5:4*)
declare i64 @llvm.hexagon.M2.vrcmacr.s0c(i64, i64, i64)
define i64 @M2_vrcmacr_s0c(i64 %a, i64 %b, i64 %c) {
%z = call i64 @llvm.hexagon.M2.vrcmacr.s0c(i64 %a, i64 %b, i64 %c)
ret i64 %z
}
-; CHECK: r1:0 += vrcmpyr(r3:2, r5:4*)
+; CHECK: += vrcmpyr({{.*}}, r5:4*)
; Vector reduce complex rotate
declare i64 @llvm.hexagon.S4.vrcrotate(i64, i32, i32)
@@ -339,11 +342,11 @@ define i64 @S4_vrcrotate(i64 %a, i32 %b) {
%z = call i64 @llvm.hexagon.S4.vrcrotate(i64 %a, i32 %b, i32 0)
ret i64 %z
}
-; CHECK: r1:0 = vrcrotate(r1:0, r2, #0)
+; CHECK: = vrcrotate({{.*}}, {{.*}}, #0)
declare i64 @llvm.hexagon.S4.vrcrotate.acc(i64, i64, i32, i32)
define i64 @S4_vrcrotate_acc(i64 %a, i64 %b, i32 %c) {
%z = call i64 @llvm.hexagon.S4.vrcrotate.acc(i64 %a, i64 %b, i32 %c, i32 0)
ret i64 %z
}
-; CHECK: r1:0 += vrcrotate(r3:2, r4, #0)
+; CHECK: += vrcrotate({{.*}}, {{.*}}, #0)
diff --git a/test/CodeGen/Hexagon/intrinsics/xtype_fp.ll b/test/CodeGen/Hexagon/intrinsics/xtype_fp.ll
index aef8127d668cc..598d0a83206dd 100644
--- a/test/CodeGen/Hexagon/intrinsics/xtype_fp.ll
+++ b/test/CodeGen/Hexagon/intrinsics/xtype_fp.ll
@@ -1,13 +1,17 @@
; RUN: llc -march=hexagon -mcpu=hexagonv5 -O0 < %s | FileCheck %s
+; RUN: llc -march=hexagon -mcpu=hexagonv5 -O0 < %s | \
+; RUN: FileCheck -check-prefix=CHECK-CALL %s
; Hexagon Programmer's Reference Manual 11.10.4 XTYPE/FP
+; CHECK-CALL-NOT: call
+
; Floating point addition
declare float @llvm.hexagon.F2.sfadd(float, float)
define float @F2_sfadd(float %a, float %b) {
%z = call float @llvm.hexagon.F2.sfadd(float %a, float %b)
ret float %z
}
-; CHECK: r0 = sfadd(r0, r1)
+; CHECK: = sfadd({{.*}}, {{.*}})
; Classify floating-point value
declare i32 @llvm.hexagon.F2.sfclass(float, i32)
@@ -15,14 +19,14 @@ define i32 @F2_sfclass(float %a) {
%z = call i32 @llvm.hexagon.F2.sfclass(float %a, i32 0)
ret i32 %z
}
-; CHECK: p0 = sfclass(r0, #0)
+; CHECK: = sfclass({{.*}}, #0)
declare i32 @llvm.hexagon.F2.dfclass(double, i32)
define i32 @F2_dfclass(double %a) {
%z = call i32 @llvm.hexagon.F2.dfclass(double %a, i32 0)
ret i32 %z
}
-; CHECK: p0 = dfclass(r1:0, #0)
+; CHECK: = dfclass({{.*}}, #0)
; Compare floating-point value
declare i32 @llvm.hexagon.F2.sfcmpge(float, float)
@@ -30,56 +34,56 @@ define i32 @F2_sfcmpge(float %a, float %b) {
%z = call i32 @llvm.hexagon.F2.sfcmpge(float %a, float %b)
ret i32 %z
}
-; CHECK: p0 = sfcmp.ge(r0, r1)
+; CHECK: = sfcmp.ge({{.*}}, {{.*}})
declare i32 @llvm.hexagon.F2.sfcmpuo(float, float)
define i32 @F2_sfcmpuo(float %a, float %b) {
%z = call i32 @llvm.hexagon.F2.sfcmpuo(float %a, float %b)
ret i32 %z
}
-; CHECK: p0 = sfcmp.uo(r0, r1)
+; CHECK: = sfcmp.uo({{.*}}, {{.*}})
declare i32 @llvm.hexagon.F2.sfcmpeq(float, float)
define i32 @F2_sfcmpeq(float %a, float %b) {
%z = call i32 @llvm.hexagon.F2.sfcmpeq(float %a, float %b)
ret i32 %z
}
-; CHECK: p0 = sfcmp.eq(r0, r1)
+; CHECK: = sfcmp.eq({{.*}}, {{.*}})
declare i32 @llvm.hexagon.F2.sfcmpgt(float, float)
define i32 @F2_sfcmpgt(float %a, float %b) {
%z = call i32 @llvm.hexagon.F2.sfcmpgt(float %a, float %b)
ret i32 %z
}
-; CHECK: p0 = sfcmp.gt(r0, r1)
+; CHECK: = sfcmp.gt({{.*}}, {{.*}})
declare i32 @llvm.hexagon.F2.dfcmpge(double, double)
define i32 @F2_dfcmpge(double %a, double %b) {
%z = call i32 @llvm.hexagon.F2.dfcmpge(double %a, double %b)
ret i32 %z
}
-; CHECK: p0 = dfcmp.ge(r1:0, r3:2)
+; CHECK: = dfcmp.ge({{.*}}, {{.*}})
declare i32 @llvm.hexagon.F2.dfcmpuo(double, double)
define i32 @F2_dfcmpuo(double %a, double %b) {
%z = call i32 @llvm.hexagon.F2.dfcmpuo(double %a, double %b)
ret i32 %z
}
-; CHECK: p0 = dfcmp.uo(r1:0, r3:2)
+; CHECK: = dfcmp.uo({{.*}}, {{.*}})
declare i32 @llvm.hexagon.F2.dfcmpeq(double, double)
define i32 @F2_dfcmpeq(double %a, double %b) {
%z = call i32 @llvm.hexagon.F2.dfcmpeq(double %a, double %b)
ret i32 %z
}
-; CHECK: p0 = dfcmp.eq(r1:0, r3:2)
+; CHECK: = dfcmp.eq({{.*}}, {{.*}})
declare i32 @llvm.hexagon.F2.dfcmpgt(double, double)
define i32 @F2_dfcmpgt(double %a, double %b) {
%z = call i32 @llvm.hexagon.F2.dfcmpgt(double %a, double %b)
ret i32 %z
}
-; CHECK: p0 = dfcmp.gt(r1:0, r3:2)
+; CHECK: = dfcmp.gt({{.*}}, {{.*}})
; Convert floating-point value to other format
declare double @llvm.hexagon.F2.conv.sf2df(float)
@@ -87,14 +91,14 @@ define double @F2_conv_sf2df(float %a) {
%z = call double @llvm.hexagon.F2.conv.sf2df(float %a)
ret double %z
}
-; CHECK: = convert_sf2df(r0)
+; CHECK: = convert_sf2df({{.*}})
declare float @llvm.hexagon.F2.conv.df2sf(double)
define float @F2_conv_df2sf(double %a) {
%z = call float @llvm.hexagon.F2.conv.df2sf(double %a)
ret float %z
}
-; CHECK: r0 = convert_df2sf(r1:0)
+; CHECK: = convert_df2sf({{.*}})
; Convert integer to floating-point value
declare double @llvm.hexagon.F2.conv.ud2df(i64)
@@ -102,56 +106,56 @@ define double @F2_conv_ud2df(i64 %a) {
%z = call double @llvm.hexagon.F2.conv.ud2df(i64 %a)
ret double %z
}
-; CHECK: r1:0 = convert_ud2df(r1:0)
+; CHECK: = convert_ud2df({{.*}})
declare double @llvm.hexagon.F2.conv.d2df(i64)
define double @F2_conv_d2df(i64 %a) {
%z = call double @llvm.hexagon.F2.conv.d2df(i64 %a)
ret double %z
}
-; CHECK: r1:0 = convert_d2df(r1:0)
+; CHECK: = convert_d2df({{.*}})
declare double @llvm.hexagon.F2.conv.uw2df(i32)
define double @F2_conv_uw2df(i32 %a) {
%z = call double @llvm.hexagon.F2.conv.uw2df(i32 %a)
ret double %z
}
-; CHECK: = convert_uw2df(r0)
+; CHECK: = convert_uw2df({{.*}})
declare double @llvm.hexagon.F2.conv.w2df(i32)
define double @F2_conv_w2df(i32 %a) {
%z = call double @llvm.hexagon.F2.conv.w2df(i32 %a)
ret double %z
}
-; CHECK: = convert_w2df(r0)
+; CHECK: = convert_w2df({{.*}})
declare float @llvm.hexagon.F2.conv.ud2sf(i64)
define float @F2_conv_ud2sf(i64 %a) {
%z = call float @llvm.hexagon.F2.conv.ud2sf(i64 %a)
ret float %z
}
-; CHECK: r0 = convert_ud2sf(r1:0)
+; CHECK: = convert_ud2sf({{.*}})
declare float @llvm.hexagon.F2.conv.d2sf(i64)
define float @F2_conv_d2sf(i64 %a) {
%z = call float @llvm.hexagon.F2.conv.d2sf(i64 %a)
ret float %z
}
-; CHECK: r0 = convert_d2sf(r1:0)
+; CHECK: = convert_d2sf({{.*}})
declare float @llvm.hexagon.F2.conv.uw2sf(i32)
define float @F2_conv_uw2sf(i32 %a) {
%z = call float @llvm.hexagon.F2.conv.uw2sf(i32 %a)
ret float %z
}
-; CHECK: r0 = convert_uw2sf(r0)
+; CHECK: = convert_uw2sf({{.*}})
declare float @llvm.hexagon.F2.conv.w2sf(i32)
define float @F2_conv_w2sf(i32 %a) {
%z = call float @llvm.hexagon.F2.conv.w2sf(i32 %a)
ret float %z
}
-; CHECK: r0 = convert_w2sf(r0)
+; CHECK: = convert_w2sf({{.*}})
; Convert floating-point value to integer
declare i64 @llvm.hexagon.F2.conv.df2d(double)
@@ -159,112 +163,112 @@ define i64 @F2_conv_df2d(double %a) {
%z = call i64 @llvm.hexagon.F2.conv.df2d(double %a)
ret i64 %z
}
-; CHECK: r1:0 = convert_df2d(r1:0)
+; CHECK: = convert_df2d({{.*}})
declare i64 @llvm.hexagon.F2.conv.df2ud(double)
define i64 @F2_conv_df2ud(double %a) {
%z = call i64 @llvm.hexagon.F2.conv.df2ud(double %a)
ret i64 %z
}
-; CHECK: r1:0 = convert_df2ud(r1:0)
+; CHECK: {{.*}} = convert_df2ud({{.*}})
declare i64 @llvm.hexagon.F2.conv.df2d.chop(double)
define i64 @F2_conv_df2d_chop(double %a) {
%z = call i64 @llvm.hexagon.F2.conv.df2d.chop(double %a)
ret i64 %z
}
-; CHECK: r1:0 = convert_df2d(r1:0):chop
+; CHECK: = convert_df2d({{.*}}):chop
declare i64 @llvm.hexagon.F2.conv.df2ud.chop(double)
define i64 @F2_conv_df2ud_chop(double %a) {
%z = call i64 @llvm.hexagon.F2.conv.df2ud.chop(double %a)
ret i64 %z
}
-; CHECK: r1:0 = convert_df2ud(r1:0):chop
+; CHECK: = convert_df2ud({{.*}}):chop
declare i64 @llvm.hexagon.F2.conv.sf2ud(float)
define i64 @F2_conv_sf2ud(float %a) {
%z = call i64 @llvm.hexagon.F2.conv.sf2ud(float %a)
ret i64 %z
}
-; CHECK: = convert_sf2ud(r0)
+; CHECK: = convert_sf2ud({{.*}})
declare i64 @llvm.hexagon.F2.conv.sf2d(float)
define i64 @F2_conv_sf2d(float %a) {
%z = call i64 @llvm.hexagon.F2.conv.sf2d(float %a)
ret i64 %z
}
-; CHECK: = convert_sf2d(r0)
+; CHECK: = convert_sf2d({{.*}})
declare i64 @llvm.hexagon.F2.conv.sf2d.chop(float)
define i64 @F2_conv_sf2d_chop(float %a) {
%z = call i64 @llvm.hexagon.F2.conv.sf2d.chop(float %a)
ret i64 %z
}
-; CHECK: = convert_sf2d(r0):chop
+; CHECK: = convert_sf2d({{.*}}):chop
declare i64 @llvm.hexagon.F2.conv.sf2ud.chop(float)
define i64 @F2_conv_sf2ud_chop(float %a) {
%z = call i64 @llvm.hexagon.F2.conv.sf2ud.chop(float %a)
ret i64 %z
}
-; CHECK: = convert_sf2ud(r0):chop
+; CHECK: = convert_sf2ud({{.*}}):chop
declare i32 @llvm.hexagon.F2.conv.df2uw(double)
define i32 @F2_conv_df2uw(double %a) {
%z = call i32 @llvm.hexagon.F2.conv.df2uw(double %a)
ret i32 %z
}
-; CHECK: r0 = convert_df2uw(r1:0)
+; CHECK: = convert_df2uw({{.*}})
declare i32 @llvm.hexagon.F2.conv.df2w(double)
define i32 @F2_conv_df2w(double %a) {
%z = call i32 @llvm.hexagon.F2.conv.df2w(double %a)
ret i32 %z
}
-; CHECK: r0 = convert_df2w(r1:0)
+; CHECK: = convert_df2w({{.*}})
declare i32 @llvm.hexagon.F2.conv.df2w.chop(double)
define i32 @F2_conv_df2w_chop(double %a) {
%z = call i32 @llvm.hexagon.F2.conv.df2w.chop(double %a)
ret i32 %z
}
-; CHECK: r0 = convert_df2w(r1:0):chop
+; CHECK: = convert_df2w({{.*}}):chop
declare i32 @llvm.hexagon.F2.conv.df2uw.chop(double)
define i32 @F2_conv_df2uw_chop(double %a) {
%z = call i32 @llvm.hexagon.F2.conv.df2uw.chop(double %a)
ret i32 %z
}
-; CHECK: r0 = convert_df2uw(r1:0):chop
+; CHECK: = convert_df2uw({{.*}}):chop
declare i32 @llvm.hexagon.F2.conv.sf2uw(float)
define i32 @F2_conv_sf2uw(float %a) {
%z = call i32 @llvm.hexagon.F2.conv.sf2uw(float %a)
ret i32 %z
}
-; CHECK: r0 = convert_sf2uw(r0)
+; CHECK: = convert_sf2uw({{.*}})
declare i32 @llvm.hexagon.F2.conv.sf2uw.chop(float)
define i32 @F2_conv_sf2uw_chop(float %a) {
%z = call i32 @llvm.hexagon.F2.conv.sf2uw.chop(float %a)
ret i32 %z
}
-; CHECK: r0 = convert_sf2uw(r0):chop
+; CHECK: = convert_sf2uw({{.*}}):chop
declare i32 @llvm.hexagon.F2.conv.sf2w(float)
define i32 @F2_conv_sf2w(float %a) {
%z = call i32 @llvm.hexagon.F2.conv.sf2w(float %a)
ret i32 %z
}
-; CHECK: r0 = convert_sf2w(r0)
+; CHECK: = convert_sf2w({{.*}})
declare i32 @llvm.hexagon.F2.conv.sf2w.chop(float)
define i32 @F2_conv_sf2w_chop(float %a) {
%z = call i32 @llvm.hexagon.F2.conv.sf2w.chop(float %a)
ret i32 %z
}
-; CHECK: r0 = convert_sf2w(r0):chop
+; CHECK: = convert_sf2w({{.*}}):chop
; Floating point extreme value assistance
declare float @llvm.hexagon.F2.sffixupr(float)
@@ -272,21 +276,21 @@ define float @F2_sffixupr(float %a) {
%z = call float @llvm.hexagon.F2.sffixupr(float %a)
ret float %z
}
-; CHECK: r0 = sffixupr(r0)
+; CHECK: = sffixupr({{.*}})
declare float @llvm.hexagon.F2.sffixupn(float, float)
define float @F2_sffixupn(float %a, float %b) {
%z = call float @llvm.hexagon.F2.sffixupn(float %a, float %b)
ret float %z
}
-; CHECK: r0 = sffixupn(r0, r1)
+; CHECK: = sffixupn({{.*}}, {{.*}})
declare float @llvm.hexagon.F2.sffixupd(float, float)
define float @F2_sffixupd(float %a, float %b) {
%z = call float @llvm.hexagon.F2.sffixupd(float %a, float %b)
ret float %z
}
-; CHECK: r0 = sffixupd(r0, r1)
+; CHECK: = sffixupd({{.*}}, {{.*}})
; Floating point fused multiply-add
declare float @llvm.hexagon.F2.sffma(float, float, float)
@@ -294,14 +298,14 @@ define float @F2_sffma(float %a, float %b, float %c) {
%z = call float @llvm.hexagon.F2.sffma(float %a, float %b, float %c)
ret float %z
}
-; CHECK: r0 += sfmpy(r1, r2)
+; CHECK: += sfmpy({{.*}}, {{.*}})
declare float @llvm.hexagon.F2.sffms(float, float, float)
define float @F2_sffms(float %a, float %b, float %c) {
%z = call float @llvm.hexagon.F2.sffms(float %a, float %b, float %c)
ret float %z
}
-; CHECK: r0 -= sfmpy(r1, r2)
+; CHECK: -= sfmpy({{.*}}, {{.*}})
; Floating point fused multiply-add with scaling
declare float @llvm.hexagon.F2.sffma.sc(float, float, float, i32)
@@ -309,7 +313,7 @@ define float @F2_sffma_sc(float %a, float %b, float %c, i32 %d) {
%z = call float @llvm.hexagon.F2.sffma.sc(float %a, float %b, float %c, i32 %d)
ret float %z
}
-; CHECK: r0 += sfmpy(r1, r2, p0):scale
+; CHECK: += sfmpy({{.*}}, {{.*}}, {{.*}}):scale
; Floating point fused multiply-add for library routines
declare float @llvm.hexagon.F2.sffma.lib(float, float, float)
@@ -317,14 +321,14 @@ define float @F2_sffma_lib(float %a, float %b, float %c) {
%z = call float @llvm.hexagon.F2.sffma.lib(float %a, float %b, float %c)
ret float %z
}
-; CHECK: r0 += sfmpy(r1, r2):lib
+; CHECK: += sfmpy({{.*}}, {{.*}}):lib
declare float @llvm.hexagon.F2.sffms.lib(float, float, float)
define float @F2_sffms_lib(float %a, float %b, float %c) {
%z = call float @llvm.hexagon.F2.sffms.lib(float %a, float %b, float %c)
ret float %z
}
-; CHECK: r0 -= sfmpy(r1, r2):lib
+; CHECK: -= sfmpy({{.*}}, {{.*}}):lib
; Create floating-point constant
declare float @llvm.hexagon.F2.sfimm.p(i32)
@@ -332,28 +336,28 @@ define float @F2_sfimm_p() {
%z = call float @llvm.hexagon.F2.sfimm.p(i32 0)
ret float %z
}
-; CHECK: r0 = sfmake(#0):pos
+; CHECK: = sfmake(#0):pos
declare float @llvm.hexagon.F2.sfimm.n(i32)
define float @F2_sfimm_n() {
%z = call float @llvm.hexagon.F2.sfimm.n(i32 0)
ret float %z
}
-; CHECK: r0 = sfmake(#0):neg
+; CHECK: = sfmake(#0):neg
declare double @llvm.hexagon.F2.dfimm.p(i32)
define double @F2_dfimm_p() {
%z = call double @llvm.hexagon.F2.dfimm.p(i32 0)
ret double %z
}
-; CHECK: r1:0 = dfmake(#0):pos
+; CHECK: = dfmake(#0):pos
declare double @llvm.hexagon.F2.dfimm.n(i32)
define double @F2_dfimm_n() {
%z = call double @llvm.hexagon.F2.dfimm.n(i32 0)
ret double %z
}
-; CHECK: r1:0 = dfmake(#0):neg
+; CHECK: = dfmake(#0):neg
; Floating point maximum
declare float @llvm.hexagon.F2.sfmax(float, float)
@@ -361,7 +365,7 @@ define float @F2_sfmax(float %a, float %b) {
%z = call float @llvm.hexagon.F2.sfmax(float %a, float %b)
ret float %z
}
-; CHECK: r0 = sfmax(r0, r1)
+; CHECK: = sfmax({{.*}}, {{.*}})
; Floating point minimum
declare float @llvm.hexagon.F2.sfmin(float, float)
@@ -369,7 +373,7 @@ define float @F2_sfmin(float %a, float %b) {
%z = call float @llvm.hexagon.F2.sfmin(float %a, float %b)
ret float %z
}
-; CHECK: r0 = sfmin(r0, r1)
+; CHECK: = sfmin({{.*}}, {{.*}})
; Floating point multiply
declare float @llvm.hexagon.F2.sfmpy(float, float)
@@ -377,7 +381,7 @@ define float @F2_sfmpy(float %a, float %b) {
%z = call float @llvm.hexagon.F2.sfmpy(float %a, float %b)
ret float %z
}
-; CHECK: r0 = sfmpy(r0, r1)
+; CHECK: = sfmpy({{.*}}, {{.*}})
; Floating point subtraction
declare float @llvm.hexagon.F2.sfsub(float, float)
@@ -385,4 +389,4 @@ define float @F2_sfsub(float %a, float %b) {
%z = call float @llvm.hexagon.F2.sfsub(float %a, float %b)
ret float %z
}
-; CHECK: r0 = sfsub(r0, r1)
+; CHECK: = sfsub({{.*}}, {{.*}})
diff --git a/test/CodeGen/Hexagon/intrinsics/xtype_mpy.ll b/test/CodeGen/Hexagon/intrinsics/xtype_mpy.ll
index 6409e4e10ca26..a1490499fbf6d 100644
--- a/test/CodeGen/Hexagon/intrinsics/xtype_mpy.ll
+++ b/test/CodeGen/Hexagon/intrinsics/xtype_mpy.ll
@@ -1,41 +1,45 @@
; RUN: llc -march=hexagon -mcpu=hexagonv5 -O0 < %s | FileCheck %s
+; RUN: llc -march=hexagon -mcpu=hexagonv5 -O0 < %s | \
+; RUN: FileCheck -check-prefix=CHECK-CALL %s
; Hexagon Programmer's Reference Manual 11.10.5 XTYPE/MPY
+; CHECK-CALL-NOT: call
+
; Multiply and use lower result
declare i32 @llvm.hexagon.M4.mpyrr.addi(i32, i32, i32)
define i32 @M4_mpyrr_addi(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M4.mpyrr.addi(i32 0, i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: r0 = add(#0, mpyi(r0, r1))
+; CHECK: = add(#0, mpyi({{.*}}, {{.*}}))
declare i32 @llvm.hexagon.M4.mpyri.addi(i32, i32, i32)
define i32 @M4_mpyri_addi(i32 %a) {
%z = call i32 @llvm.hexagon.M4.mpyri.addi(i32 0, i32 %a, i32 0)
ret i32 %z
}
-; CHECK: r0 = add(#0, mpyi(r0, #0))
+; CHECK: = add(#0, mpyi({{.*}}, #0))
declare i32 @llvm.hexagon.M4.mpyri.addr.u2(i32, i32, i32)
define i32 @M4_mpyri_addr_u2(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M4.mpyri.addr.u2(i32 %a, i32 0, i32 %b)
ret i32 %z
}
-; CHECK: r0 = add(r0, mpyi(#0, r1))
+; CHECK: = add({{.*}}, mpyi(#0, {{.*}}))
declare i32 @llvm.hexagon.M4.mpyri.addr(i32, i32, i32)
define i32 @M4_mpyri_addr(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M4.mpyri.addr(i32 %a, i32 %b, i32 0)
ret i32 %z
}
-; CHECK: r0 = add(r0, mpyi(r1, #0))
+; CHECK: = add({{.*}}, mpyi({{.*}}, #0))
declare i32 @llvm.hexagon.M4.mpyrr.addr(i32, i32, i32)
define i32 @M4_mpyrr_addr(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M4.mpyrr.addr(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: r1 = add(r0, mpyi(r1, r2))
+; CHECK: = add({{.*}}, mpyi({{.*}}, {{.*}}))
; Vector multiply word by signed half (32x16)
declare i64 @llvm.hexagon.M2.mmpyl.s0(i64, i64)
@@ -43,56 +47,56 @@ define i64 @M2_mmpyl_s0(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.M2.mmpyl.s0(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: r1:0 = vmpyweh(r1:0, r3:2):sat
+; CHECK: = vmpyweh({{.*}}, {{.*}}):sat
declare i64 @llvm.hexagon.M2.mmpyl.s1(i64, i64)
define i64 @M2_mmpyl_s1(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.M2.mmpyl.s1(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: r1:0 = vmpyweh(r1:0, r3:2):<<1:sat
+; CHECK: = vmpyweh({{.*}}, {{.*}}):<<1:sat
declare i64 @llvm.hexagon.M2.mmpyh.s0(i64, i64)
define i64 @M2_mmpyh_s0(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.M2.mmpyh.s0(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: r1:0 = vmpywoh(r1:0, r3:2):sat
+; CHECK: = vmpywoh({{.*}}, {{.*}}):sat
declare i64 @llvm.hexagon.M2.mmpyh.s1(i64, i64)
define i64 @M2_mmpyh_s1(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.M2.mmpyh.s1(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: r1:0 = vmpywoh(r1:0, r3:2):<<1:sat
+; CHECK: = vmpywoh({{.*}}, {{.*}}):<<1:sat
declare i64 @llvm.hexagon.M2.mmpyl.rs0(i64, i64)
define i64 @M2_mmpyl_rs0(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.M2.mmpyl.rs0(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: r1:0 = vmpyweh(r1:0, r3:2):rnd:sat
+; CHECK: = vmpyweh({{.*}}, {{.*}}):rnd:sat
declare i64 @llvm.hexagon.M2.mmpyl.rs1(i64, i64)
define i64 @M2_mmpyl_rs1(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.M2.mmpyl.rs1(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: r1:0 = vmpyweh(r1:0, r3:2):<<1:rnd:sat
+; CHECK: = vmpyweh({{.*}}, {{.*}}):<<1:rnd:sat
declare i64 @llvm.hexagon.M2.mmpyh.rs0(i64, i64)
define i64 @M2_mmpyh_rs0(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.M2.mmpyh.rs0(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: r1:0 = vmpywoh(r1:0, r3:2):rnd:sat
+; CHECK: = vmpywoh({{.*}}, {{.*}}):rnd:sat
declare i64 @llvm.hexagon.M2.mmpyh.rs1(i64, i64)
define i64 @M2_mmpyh_rs1(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.M2.mmpyh.rs1(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: r1:0 = vmpywoh(r1:0, r3:2):<<1:rnd:sat
+; CHECK: = vmpywoh({{.*}}, {{.*}}):<<1:rnd:sat
; Vector multiply word by unsigned half (32x16)
declare i64 @llvm.hexagon.M2.mmpyul.s0(i64, i64)
@@ -100,56 +104,56 @@ define i64 @M2_mmpyul_s0(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.M2.mmpyul.s0(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: r1:0 = vmpyweuh(r1:0, r3:2):sat
+; CHECK: = vmpyweuh({{.*}}, {{.*}}):sat
declare i64 @llvm.hexagon.M2.mmpyul.s1(i64, i64)
define i64 @M2_mmpyul_s1(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.M2.mmpyul.s1(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: r1:0 = vmpyweuh(r1:0, r3:2):<<1:sat
+; CHECK: = vmpyweuh({{.*}}, {{.*}}):<<1:sat
declare i64 @llvm.hexagon.M2.mmpyuh.s0(i64, i64)
define i64 @M2_mmpyuh_s0(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.M2.mmpyuh.s0(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: r1:0 = vmpywouh(r1:0, r3:2):sat
+; CHECK: = vmpywouh({{.*}}, {{.*}}):sat
declare i64 @llvm.hexagon.M2.mmpyuh.s1(i64, i64)
define i64 @M2_mmpyuh_s1(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.M2.mmpyuh.s1(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: r1:0 = vmpywouh(r1:0, r3:2):<<1:sat
+; CHECK: = vmpywouh({{.*}}, {{.*}}):<<1:sat
declare i64 @llvm.hexagon.M2.mmpyul.rs0(i64, i64)
define i64 @M2_mmpyul_rs0(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.M2.mmpyul.rs0(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: r1:0 = vmpyweuh(r1:0, r3:2):rnd:sat
+; CHECK: = vmpyweuh({{.*}}, {{.*}}):rnd:sat
declare i64 @llvm.hexagon.M2.mmpyul.rs1(i64, i64)
define i64 @M2_mmpyul_rs1(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.M2.mmpyul.rs1(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: r1:0 = vmpyweuh(r1:0, r3:2):<<1:rnd:sat
+; CHECK: = vmpyweuh({{.*}}, {{.*}}):<<1:rnd:sat
declare i64 @llvm.hexagon.M2.mmpyuh.rs0(i64, i64)
define i64 @M2_mmpyuh_rs0(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.M2.mmpyuh.rs0(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: r1:0 = vmpywouh(r1:0, r3:2):rnd:sat
+; CHECK: = vmpywouh({{.*}}, {{.*}}):rnd:sat
declare i64 @llvm.hexagon.M2.mmpyuh.rs1(i64, i64)
define i64 @M2_mmpyuh_rs1(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.M2.mmpyuh.rs1(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: r1:0 = vmpywouh(r1:0, r3:2):<<1:rnd:sat
+; CHECK: = vmpywouh({{.*}}, {{.*}}):<<1:rnd:sat
; Multiply signed halfwords
declare i64 @llvm.hexagon.M2.mpyd.ll.s0(i32, i32)
@@ -157,616 +161,616 @@ define i64 @M2_mpyd_ll_s0(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.M2.mpyd.ll.s0(i32 %a, i32 %b)
ret i64 %z
}
-; CHECK: r1:0 = mpy(r0.l, r1.l)
+; CHECK: = mpy({{.*}}.l, {{.*}}.l)
declare i64 @llvm.hexagon.M2.mpyd.ll.s1(i32, i32)
define i64 @M2_mpyd_ll_s1(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.M2.mpyd.ll.s1(i32 %a, i32 %b)
ret i64 %z
}
-; CHECK: r1:0 = mpy(r0.l, r1.l):<<1
+; CHECK: = mpy({{.*}}.l, {{.*}}.l):<<1
declare i64 @llvm.hexagon.M2.mpyd.lh.s0(i32, i32)
define i64 @M2_mpyd_lh_s0(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.M2.mpyd.lh.s0(i32 %a, i32 %b)
ret i64 %z
}
-; CHECK: r1:0 = mpy(r0.l, r1.h)
+; CHECK: = mpy({{.*}}.l, {{.*}}.h)
declare i64 @llvm.hexagon.M2.mpyd.lh.s1(i32, i32)
define i64 @M2_mpyd_lh_s1(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.M2.mpyd.lh.s1(i32 %a, i32 %b)
ret i64 %z
}
-; CHECK: r1:0 = mpy(r0.l, r1.h):<<1
+; CHECK: = mpy({{.*}}.l, {{.*}}.h):<<1
declare i64 @llvm.hexagon.M2.mpyd.hl.s0(i32, i32)
define i64 @M2_mpyd_hl_s0(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.M2.mpyd.hl.s0(i32 %a, i32 %b)
ret i64 %z
}
-; CHECK: r1:0 = mpy(r0.h, r1.l)
+; CHECK: = mpy({{.*}}.h, {{.*}}.l)
declare i64 @llvm.hexagon.M2.mpyd.hl.s1(i32, i32)
define i64 @M2_mpyd_hl_s1(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.M2.mpyd.hl.s1(i32 %a, i32 %b)
ret i64 %z
}
-; CHECK: r1:0 = mpy(r0.h, r1.l):<<1
+; CHECK: = mpy({{.*}}.h, {{.*}}.l):<<1
declare i64 @llvm.hexagon.M2.mpyd.hh.s0(i32, i32)
define i64 @M2_mpyd_hh_s0(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.M2.mpyd.hh.s0(i32 %a, i32 %b)
ret i64 %z
}
-; CHECK: r1:0 = mpy(r0.h, r1.h)
+; CHECK: = mpy({{.*}}.h, {{.*}}.h)
declare i64 @llvm.hexagon.M2.mpyd.hh.s1(i32, i32)
define i64 @M2_mpyd_hh_s1(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.M2.mpyd.hh.s1(i32 %a, i32 %b)
ret i64 %z
}
-; CHECK: r1:0 = mpy(r0.h, r1.h):<<1
+; CHECK: = mpy({{.*}}.h, {{.*}}.h):<<1
declare i64 @llvm.hexagon.M2.mpyd.rnd.ll.s0(i32, i32)
define i64 @M2_mpyd_rnd_ll_s0(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.M2.mpyd.rnd.ll.s0(i32 %a, i32 %b)
ret i64 %z
}
-; CHECK: r1:0 = mpy(r0.l, r1.l):rnd
+; CHECK: = mpy({{.*}}.l, {{.*}}.l):rnd
declare i64 @llvm.hexagon.M2.mpyd.rnd.ll.s1(i32, i32)
define i64 @M2_mpyd_rnd_ll_s1(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.M2.mpyd.rnd.ll.s1(i32 %a, i32 %b)
ret i64 %z
}
-; CHECK: r1:0 = mpy(r0.l, r1.l):<<1:rnd
+; CHECK: = mpy({{.*}}.l, {{.*}}.l):<<1:rnd
declare i64 @llvm.hexagon.M2.mpyd.rnd.lh.s0(i32, i32)
define i64 @M2_mpyd_rnd_lh_s0(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.M2.mpyd.rnd.lh.s0(i32 %a, i32 %b)
ret i64 %z
}
-; CHECK: r1:0 = mpy(r0.l, r1.h):rnd
+; CHECK: = mpy({{.*}}.l, {{.*}}.h):rnd
declare i64 @llvm.hexagon.M2.mpyd.rnd.lh.s1(i32, i32)
define i64 @M2_mpyd_rnd_lh_s1(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.M2.mpyd.rnd.lh.s1(i32 %a, i32 %b)
ret i64 %z
}
-; CHECK: r1:0 = mpy(r0.l, r1.h):<<1:rnd
+; CHECK: = mpy({{.*}}.l, {{.*}}.h):<<1:rnd
declare i64 @llvm.hexagon.M2.mpyd.rnd.hl.s0(i32, i32)
define i64 @M2_mpyd_rnd_hl_s0(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.M2.mpyd.rnd.hl.s0(i32 %a, i32 %b)
ret i64 %z
}
-; CHECK: r1:0 = mpy(r0.h, r1.l):rnd
+; CHECK: = mpy({{.*}}.h, {{.*}}.l):rnd
declare i64 @llvm.hexagon.M2.mpyd.rnd.hl.s1(i32, i32)
define i64 @M2_mpyd_rnd_hl_s1(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.M2.mpyd.rnd.hl.s1(i32 %a, i32 %b)
ret i64 %z
}
-; CHECK: r1:0 = mpy(r0.h, r1.l):<<1:rnd
+; CHECK: = mpy({{.*}}.h, {{.*}}.l):<<1:rnd
declare i64 @llvm.hexagon.M2.mpyd.rnd.hh.s0(i32, i32)
define i64 @M2_mpyd_rnd_hh_s0(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.M2.mpyd.rnd.hh.s0(i32 %a, i32 %b)
ret i64 %z
}
-; CHECK: r1:0 = mpy(r0.h, r1.h):rnd
+; CHECK: = mpy({{.*}}.h, {{.*}}.h):rnd
declare i64 @llvm.hexagon.M2.mpyd.rnd.hh.s1(i32, i32)
define i64 @M2_mpyd_rnd_hh_s1(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.M2.mpyd.rnd.hh.s1(i32 %a, i32 %b)
ret i64 %z
}
-; CHECK: r1:0 = mpy(r0.h, r1.h):<<1:rnd
+; CHECK: = mpy({{.*}}.h, {{.*}}.h):<<1:rnd
declare i64 @llvm.hexagon.M2.mpyd.acc.ll.s0(i64, i32, i32)
define i64 @M2_mpyd_acc_ll_s0(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.mpyd.acc.ll.s0(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: r1:0 += mpy(r2.l, r3.l)
+; CHECK: += mpy({{.*}}.l, {{.*}}.l)
declare i64 @llvm.hexagon.M2.mpyd.acc.ll.s1(i64, i32, i32)
define i64 @M2_mpyd_acc_ll_s1(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.mpyd.acc.ll.s1(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: r1:0 += mpy(r2.l, r3.l):<<1
+; CHECK: += mpy({{.*}}.l, {{.*}}.l):<<1
declare i64 @llvm.hexagon.M2.mpyd.acc.lh.s0(i64, i32, i32)
define i64 @M2_mpyd_acc_lh_s0(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.mpyd.acc.lh.s0(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: r1:0 += mpy(r2.l, r3.h)
+; CHECK: += mpy({{.*}}.l, {{.*}}.h)
declare i64 @llvm.hexagon.M2.mpyd.acc.lh.s1(i64, i32, i32)
define i64 @M2_mpyd_acc_lh_s1(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.mpyd.acc.lh.s1(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: r1:0 += mpy(r2.l, r3.h):<<1
+; CHECK: += mpy({{.*}}.l, {{.*}}.h):<<1
declare i64 @llvm.hexagon.M2.mpyd.acc.hl.s0(i64, i32, i32)
define i64 @M2_mpyd_acc_hl_s0(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.mpyd.acc.hl.s0(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: r1:0 += mpy(r2.h, r3.l)
+; CHECK: += mpy({{.*}}.h, {{.*}}.l)
declare i64 @llvm.hexagon.M2.mpyd.acc.hl.s1(i64, i32, i32)
define i64 @M2_mpyd_acc_hl_s1(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.mpyd.acc.hl.s1(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: r1:0 += mpy(r2.h, r3.l):<<1
+; CHECK: += mpy({{.*}}.h, {{.*}}.l):<<1
declare i64 @llvm.hexagon.M2.mpyd.acc.hh.s0(i64, i32, i32)
define i64 @M2_mpyd_acc_hh_s0(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.mpyd.acc.hh.s0(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: r1:0 += mpy(r2.h, r3.h)
+; CHECK: += mpy({{.*}}.h, {{.*}}.h)
declare i64 @llvm.hexagon.M2.mpyd.acc.hh.s1(i64, i32, i32)
define i64 @M2_mpyd_acc_hh_s1(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.mpyd.acc.hh.s1(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: r1:0 += mpy(r2.h, r3.h):<<1
+; CHECK: += mpy({{.*}}.h, {{.*}}.h):<<1
declare i64 @llvm.hexagon.M2.mpyd.nac.ll.s0(i64, i32, i32)
define i64 @M2_mpyd_nac_ll_s0(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.mpyd.nac.ll.s0(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: r1:0 -= mpy(r2.l, r3.l)
+; CHECK: -= mpy({{.*}}.l, {{.*}}.l)
declare i64 @llvm.hexagon.M2.mpyd.nac.ll.s1(i64, i32, i32)
define i64 @M2_mpyd_nac_ll_s1(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.mpyd.nac.ll.s1(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: r1:0 -= mpy(r2.l, r3.l):<<1
+; CHECK: -= mpy({{.*}}.l, {{.*}}.l):<<1
declare i64 @llvm.hexagon.M2.mpyd.nac.lh.s0(i64, i32, i32)
define i64 @M2_mpyd_nac_lh_s0(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.mpyd.nac.lh.s0(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: r1:0 -= mpy(r2.l, r3.h)
+; CHECK: -= mpy({{.*}}.l, {{.*}}.h)
declare i64 @llvm.hexagon.M2.mpyd.nac.lh.s1(i64, i32, i32)
define i64 @M2_mpyd_nac_lh_s1(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.mpyd.nac.lh.s1(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: r1:0 -= mpy(r2.l, r3.h):<<1
+; CHECK: -= mpy({{.*}}.l, {{.*}}.h):<<1
declare i64 @llvm.hexagon.M2.mpyd.nac.hl.s0(i64, i32, i32)
define i64 @M2_mpyd_nac_hl_s0(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.mpyd.nac.hl.s0(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: r1:0 -= mpy(r2.h, r3.l)
+; CHECK: -= mpy({{.*}}.h, {{.*}}.l)
declare i64 @llvm.hexagon.M2.mpyd.nac.hl.s1(i64, i32, i32)
define i64 @M2_mpyd_nac_hl_s1(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.mpyd.nac.hl.s1(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: r1:0 -= mpy(r2.h, r3.l):<<1
+; CHECK: -= mpy({{.*}}.h, {{.*}}.l):<<1
declare i64 @llvm.hexagon.M2.mpyd.nac.hh.s0(i64, i32, i32)
define i64 @M2_mpyd_nac_hh_s0(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.mpyd.nac.hh.s0(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: r1:0 -= mpy(r2.h, r3.h)
+; CHECK: -= mpy({{.*}}.h, {{.*}}.h)
declare i64 @llvm.hexagon.M2.mpyd.nac.hh.s1(i64, i32, i32)
define i64 @M2_mpyd_nac_hh_s1(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.mpyd.nac.hh.s1(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: r1:0 -= mpy(r2.h, r3.h):<<1
+; CHECK: -= mpy({{.*}}.h, {{.*}}.h):<<1
declare i32 @llvm.hexagon.M2.mpy.ll.s0(i32, i32)
define i32 @M2_mpy_ll_s0(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.mpy.ll.s0(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: r0 = mpy(r0.l, r1.l)
+; CHECK: = mpy({{.*}}.l, {{.*}}.l)
declare i32 @llvm.hexagon.M2.mpy.ll.s1(i32, i32)
define i32 @M2_mpy_ll_s1(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.mpy.ll.s1(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: r0 = mpy(r0.l, r1.l):<<1
+; CHECK: = mpy({{.*}}.l, {{.*}}.l):<<1
declare i32 @llvm.hexagon.M2.mpy.lh.s0(i32, i32)
define i32 @M2_mpy_lh_s0(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.mpy.lh.s0(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: r0 = mpy(r0.l, r1.h)
+; CHECK: = mpy({{.*}}.l, {{.*}}.h)
declare i32 @llvm.hexagon.M2.mpy.lh.s1(i32, i32)
define i32 @M2_mpy_lh_s1(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.mpy.lh.s1(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: r0 = mpy(r0.l, r1.h):<<1
+; CHECK: = mpy({{.*}}.l, {{.*}}.h):<<1
declare i32 @llvm.hexagon.M2.mpy.hl.s0(i32, i32)
define i32 @M2_mpy_hl_s0(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.mpy.hl.s0(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: r0 = mpy(r0.h, r1.l)
+; CHECK: = mpy({{.*}}.h, {{.*}}.l)
declare i32 @llvm.hexagon.M2.mpy.hl.s1(i32, i32)
define i32 @M2_mpy_hl_s1(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.mpy.hl.s1(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: r0 = mpy(r0.h, r1.l):<<1
+; CHECK: = mpy({{.*}}.h, {{.*}}.l):<<1
declare i32 @llvm.hexagon.M2.mpy.hh.s0(i32, i32)
define i32 @M2_mpy_hh_s0(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.mpy.hh.s0(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: r0 = mpy(r0.h, r1.h)
+; CHECK: = mpy({{.*}}.h, {{.*}}.h)
declare i32 @llvm.hexagon.M2.mpy.hh.s1(i32, i32)
define i32 @M2_mpy_hh_s1(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.mpy.hh.s1(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: r0 = mpy(r0.h, r1.h):<<1
+; CHECK: = mpy({{.*}}.h, {{.*}}.h):<<1
declare i32 @llvm.hexagon.M2.mpy.sat.ll.s0(i32, i32)
define i32 @M2_mpy_sat_ll_s0(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.mpy.sat.ll.s0(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: r0 = mpy(r0.l, r1.l):sat
+; CHECK: = mpy({{.*}}.l, {{.*}}.l):sat
declare i32 @llvm.hexagon.M2.mpy.sat.ll.s1(i32, i32)
define i32 @M2_mpy_sat_ll_s1(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.mpy.sat.ll.s1(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: r0 = mpy(r0.l, r1.l):<<1:sat
+; CHECK: = mpy({{.*}}.l, {{.*}}.l):<<1:sat
declare i32 @llvm.hexagon.M2.mpy.sat.lh.s0(i32, i32)
define i32 @M2_mpy_sat_lh_s0(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.mpy.sat.lh.s0(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: r0 = mpy(r0.l, r1.h):sat
+; CHECK: = mpy({{.*}}.l, {{.*}}.h):sat
declare i32 @llvm.hexagon.M2.mpy.sat.lh.s1(i32, i32)
define i32 @M2_mpy_sat_lh_s1(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.mpy.sat.lh.s1(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: r0 = mpy(r0.l, r1.h):<<1:sat
+; CHECK: = mpy({{.*}}.l, {{.*}}.h):<<1:sat
declare i32 @llvm.hexagon.M2.mpy.sat.hl.s0(i32, i32)
define i32 @M2_mpy_sat_hl_s0(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.mpy.sat.hl.s0(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: r0 = mpy(r0.h, r1.l):sat
+; CHECK: = mpy({{.*}}.h, {{.*}}.l):sat
declare i32 @llvm.hexagon.M2.mpy.sat.hl.s1(i32, i32)
define i32 @M2_mpy_sat_hl_s1(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.mpy.sat.hl.s1(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: r0 = mpy(r0.h, r1.l):<<1:sat
+; CHECK: = mpy({{.*}}.h, {{.*}}.l):<<1:sat
declare i32 @llvm.hexagon.M2.mpy.sat.hh.s0(i32, i32)
define i32 @M2_mpy_sat_hh_s0(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.mpy.sat.hh.s0(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: r0 = mpy(r0.h, r1.h):sat
+; CHECK: = mpy({{.*}}.h, {{.*}}.h):sat
declare i32 @llvm.hexagon.M2.mpy.sat.hh.s1(i32, i32)
define i32 @M2_mpy_sat_hh_s1(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.mpy.sat.hh.s1(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: r0 = mpy(r0.h, r1.h):<<1:sat
+; CHECK: = mpy({{.*}}.h, {{.*}}.h):<<1:sat
declare i32 @llvm.hexagon.M2.mpy.sat.rnd.ll.s0(i32, i32)
define i32 @M2_mpy_sat_rnd_ll_s0(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.mpy.sat.rnd.ll.s0(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: r0 = mpy(r0.l, r1.l):rnd:sat
+; CHECK: = mpy({{.*}}.l, {{.*}}.l):rnd:sat
declare i32 @llvm.hexagon.M2.mpy.sat.rnd.ll.s1(i32, i32)
define i32 @M2_mpy_sat_rnd_ll_s1(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.mpy.sat.rnd.ll.s1(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: r0 = mpy(r0.l, r1.l):<<1:rnd:sat
+; CHECK: = mpy({{.*}}.l, {{.*}}.l):<<1:rnd:sat
declare i32 @llvm.hexagon.M2.mpy.sat.rnd.lh.s0(i32, i32)
define i32 @M2_mpy_sat_rnd_lh_s0(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.mpy.sat.rnd.lh.s0(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: r0 = mpy(r0.l, r1.h):rnd:sat
+; CHECK: = mpy({{.*}}.l, {{.*}}.h):rnd:sat
declare i32 @llvm.hexagon.M2.mpy.sat.rnd.lh.s1(i32, i32)
define i32 @M2_mpy_sat_rnd_lh_s1(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.mpy.sat.rnd.lh.s1(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: r0 = mpy(r0.l, r1.h):<<1:rnd:sat
+; CHECK: = mpy({{.*}}.l, {{.*}}.h):<<1:rnd:sat
declare i32 @llvm.hexagon.M2.mpy.sat.rnd.hl.s0(i32, i32)
define i32 @M2_mpy_sat_rnd_hl_s0(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.mpy.sat.rnd.hl.s0(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: r0 = mpy(r0.h, r1.l):rnd:sat
+; CHECK: = mpy({{.*}}.h, {{.*}}.l):rnd:sat
declare i32 @llvm.hexagon.M2.mpy.sat.rnd.hl.s1(i32, i32)
define i32 @M2_mpy_sat_rnd_hl_s1(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.mpy.sat.rnd.hl.s1(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: r0 = mpy(r0.h, r1.l):<<1:rnd:sat
+; CHECK: = mpy({{.*}}.h, {{.*}}.l):<<1:rnd:sat
declare i32 @llvm.hexagon.M2.mpy.sat.rnd.hh.s0(i32, i32)
define i32 @M2_mpy_sat_rnd_hh_s0(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.mpy.sat.rnd.hh.s0(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: r0 = mpy(r0.h, r1.h):rnd:sat
+; CHECK: = mpy({{.*}}.h, {{.*}}.h):rnd:sat
declare i32 @llvm.hexagon.M2.mpy.sat.rnd.hh.s1(i32, i32)
define i32 @M2_mpy_sat_rnd_hh_s1(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.mpy.sat.rnd.hh.s1(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: r0 = mpy(r0.h, r1.h):<<1:rnd:sat
+; CHECK: = mpy({{.*}}.h, {{.*}}.h):<<1:rnd:sat
declare i32 @llvm.hexagon.M2.mpy.acc.ll.s0(i32, i32, i32)
define i32 @M2_mpy_acc_ll_s0(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpy.acc.ll.s0(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: r0 += mpy(r1.l, r2.l)
+; CHECK: += mpy({{.*}}.l, {{.*}}.l)
declare i32 @llvm.hexagon.M2.mpy.acc.ll.s1(i32, i32, i32)
define i32 @M2_mpy_acc_ll_s1(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpy.acc.ll.s1(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: r0 += mpy(r1.l, r2.l):<<1
+; CHECK: += mpy({{.*}}.l, {{.*}}.l):<<1
declare i32 @llvm.hexagon.M2.mpy.acc.lh.s0(i32, i32, i32)
define i32 @M2_mpy_acc_lh_s0(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpy.acc.lh.s0(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: r0 += mpy(r1.l, r2.h)
+; CHECK: += mpy({{.*}}.l, {{.*}}.h)
declare i32 @llvm.hexagon.M2.mpy.acc.lh.s1(i32, i32, i32)
define i32 @M2_mpy_acc_lh_s1(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpy.acc.lh.s1(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: r0 += mpy(r1.l, r2.h):<<1
+; CHECK: += mpy({{.*}}.l, {{.*}}.h):<<1
declare i32 @llvm.hexagon.M2.mpy.acc.hl.s0(i32, i32, i32)
define i32 @M2_mpy_acc_hl_s0(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpy.acc.hl.s0(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: r0 += mpy(r1.h, r2.l)
+; CHECK: += mpy({{.*}}.h, {{.*}}.l)
declare i32 @llvm.hexagon.M2.mpy.acc.hl.s1(i32, i32, i32)
define i32 @M2_mpy_acc_hl_s1(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpy.acc.hl.s1(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: r0 += mpy(r1.h, r2.l):<<1
+; CHECK: += mpy({{.*}}.h, {{.*}}.l):<<1
declare i32 @llvm.hexagon.M2.mpy.acc.hh.s0(i32, i32, i32)
define i32 @M2_mpy_acc_hh_s0(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpy.acc.hh.s0(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: r0 += mpy(r1.h, r2.h)
+; CHECK: += mpy({{.*}}.h, {{.*}}.h)
declare i32 @llvm.hexagon.M2.mpy.acc.hh.s1(i32, i32, i32)
define i32 @M2_mpy_acc_hh_s1(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpy.acc.hh.s1(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: r0 += mpy(r1.h, r2.h):<<1
+; CHECK: += mpy({{.*}}.h, {{.*}}.h):<<1
declare i32 @llvm.hexagon.M2.mpy.acc.sat.ll.s0(i32, i32, i32)
define i32 @M2_mpy_acc_sat_ll_s0(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpy.acc.sat.ll.s0(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: r0 += mpy(r1.l, r2.l):sat
+; CHECK: += mpy({{.*}}.l, {{.*}}.l):sat
declare i32 @llvm.hexagon.M2.mpy.acc.sat.ll.s1(i32, i32, i32)
define i32 @M2_mpy_acc_sat_ll_s1(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpy.acc.sat.ll.s1(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: r0 += mpy(r1.l, r2.l):<<1:sat
+; CHECK: += mpy({{.*}}.l, {{.*}}.l):<<1:sat
declare i32 @llvm.hexagon.M2.mpy.acc.sat.lh.s0(i32, i32, i32)
define i32 @M2_mpy_acc_sat_lh_s0(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpy.acc.sat.lh.s0(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: r0 += mpy(r1.l, r2.h):sat
+; CHECK: += mpy({{.*}}.l, {{.*}}.h):sat
declare i32 @llvm.hexagon.M2.mpy.acc.sat.lh.s1(i32, i32, i32)
define i32 @M2_mpy_acc_sat_lh_s1(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpy.acc.sat.lh.s1(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: r0 += mpy(r1.l, r2.h):<<1:sat
+; CHECK: += mpy({{.*}}.l, {{.*}}.h):<<1:sat
declare i32 @llvm.hexagon.M2.mpy.acc.sat.hl.s0(i32, i32, i32)
define i32 @M2_mpy_acc_sat_hl_s0(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpy.acc.sat.hl.s0(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: r0 += mpy(r1.h, r2.l):sat
+; CHECK: += mpy({{.*}}.h, {{.*}}.l):sat
declare i32 @llvm.hexagon.M2.mpy.acc.sat.hl.s1(i32, i32, i32)
define i32 @M2_mpy_acc_sat_hl_s1(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpy.acc.sat.hl.s1(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: r0 += mpy(r1.h, r2.l):<<1:sat
+; CHECK: += mpy({{.*}}.h, {{.*}}.l):<<1:sat
declare i32 @llvm.hexagon.M2.mpy.acc.sat.hh.s0(i32, i32, i32)
define i32 @M2_mpy_acc_sat_hh_s0(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpy.acc.sat.hh.s0(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: r0 += mpy(r1.h, r2.h):sat
+; CHECK: += mpy({{.*}}.h, {{.*}}.h):sat
declare i32 @llvm.hexagon.M2.mpy.acc.sat.hh.s1(i32, i32, i32)
define i32 @M2_mpy_acc_sat_hh_s1(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpy.acc.sat.hh.s1(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: r0 += mpy(r1.h, r2.h):<<1:sat
+; CHECK: += mpy({{.*}}.h, {{.*}}.h):<<1:sat
declare i32 @llvm.hexagon.M2.mpy.nac.ll.s0(i32, i32, i32)
define i32 @M2_mpy_nac_ll_s0(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpy.nac.ll.s0(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: r0 -= mpy(r1.l, r2.l)
+; CHECK: -= mpy({{.*}}.l, {{.*}}.l)
declare i32 @llvm.hexagon.M2.mpy.nac.ll.s1(i32, i32, i32)
define i32 @M2_mpy_nac_ll_s1(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpy.nac.ll.s1(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: r0 -= mpy(r1.l, r2.l):<<1
+; CHECK: -= mpy({{.*}}.l, {{.*}}.l):<<1
declare i32 @llvm.hexagon.M2.mpy.nac.lh.s0(i32, i32, i32)
define i32 @M2_mpy_nac_lh_s0(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpy.nac.lh.s0(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: r0 -= mpy(r1.l, r2.h)
+; CHECK: -= mpy({{.*}}.l, {{.*}}.h)
declare i32 @llvm.hexagon.M2.mpy.nac.lh.s1(i32, i32, i32)
define i32 @M2_mpy_nac_lh_s1(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpy.nac.lh.s1(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: r0 -= mpy(r1.l, r2.h):<<1
+; CHECK: -= mpy({{.*}}.l, {{.*}}.h):<<1
declare i32 @llvm.hexagon.M2.mpy.nac.hl.s0(i32, i32, i32)
define i32 @M2_mpy_nac_hl_s0(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpy.nac.hl.s0(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: r0 -= mpy(r1.h, r2.l)
+; CHECK: -= mpy({{.*}}.h, {{.*}}.l)
declare i32 @llvm.hexagon.M2.mpy.nac.hl.s1(i32, i32, i32)
define i32 @M2_mpy_nac_hl_s1(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpy.nac.hl.s1(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: r0 -= mpy(r1.h, r2.l):<<1
+; CHECK: -= mpy({{.*}}.h, {{.*}}.l):<<1
declare i32 @llvm.hexagon.M2.mpy.nac.hh.s0(i32, i32, i32)
define i32 @M2_mpy_nac_hh_s0(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpy.nac.hh.s0(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: r0 -= mpy(r1.h, r2.h)
+; CHECK: -= mpy({{.*}}.h, {{.*}}.h)
declare i32 @llvm.hexagon.M2.mpy.nac.hh.s1(i32, i32, i32)
define i32 @M2_mpy_nac_hh_s1(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpy.nac.hh.s1(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: r0 -= mpy(r1.h, r2.h):<<1
+; CHECK: -= mpy({{.*}}.h, {{.*}}.h):<<1
declare i32 @llvm.hexagon.M2.mpy.nac.sat.ll.s0(i32, i32, i32)
define i32 @M2_mpy_nac_sat_ll_s0(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpy.nac.sat.ll.s0(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: r0 -= mpy(r1.l, r2.l):sat
+; CHECK: -= mpy({{.*}}.l, {{.*}}.l):sat
declare i32 @llvm.hexagon.M2.mpy.nac.sat.ll.s1(i32, i32, i32)
define i32 @M2_mpy_nac_sat_ll_s1(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpy.nac.sat.ll.s1(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: r0 -= mpy(r1.l, r2.l):<<1:sat
+; CHECK: -= mpy({{.*}}.l, {{.*}}.l):<<1:sat
declare i32 @llvm.hexagon.M2.mpy.nac.sat.lh.s0(i32, i32, i32)
define i32 @M2_mpy_nac_sat_lh_s0(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpy.nac.sat.lh.s0(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: r0 -= mpy(r1.l, r2.h):sat
+; CHECK: -= mpy({{.*}}.l, {{.*}}.h):sat
declare i32 @llvm.hexagon.M2.mpy.nac.sat.lh.s1(i32, i32, i32)
define i32 @M2_mpy_nac_sat_lh_s1(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpy.nac.sat.lh.s1(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: r0 -= mpy(r1.l, r2.h):<<1:sat
+; CHECK: -= mpy({{.*}}.l, {{.*}}.h):<<1:sat
declare i32 @llvm.hexagon.M2.mpy.nac.sat.hl.s0(i32, i32, i32)
define i32 @M2_mpy_nac_sat_hl_s0(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpy.nac.sat.hl.s0(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: r0 -= mpy(r1.h, r2.l):sat
+; CHECK: -= mpy({{.*}}.h, {{.*}}.l):sat
declare i32 @llvm.hexagon.M2.mpy.nac.sat.hl.s1(i32, i32, i32)
define i32 @M2_mpy_nac_sat_hl_s1(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpy.nac.sat.hl.s1(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: r0 -= mpy(r1.h, r2.l):<<1:sat
+; CHECK: -= mpy({{.*}}.h, {{.*}}.l):<<1:sat
declare i32 @llvm.hexagon.M2.mpy.nac.sat.hh.s0(i32, i32, i32)
define i32 @M2_mpy_nac_sat_hh_s0(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpy.nac.sat.hh.s0(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: r0 -= mpy(r1.h, r2.h):sat
+; CHECK: -= mpy({{.*}}.h, {{.*}}.h):sat
declare i32 @llvm.hexagon.M2.mpy.nac.sat.hh.s1(i32, i32, i32)
define i32 @M2_mpy_nac_sat_hh_s1(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpy.nac.sat.hh.s1(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: r0 -= mpy(r1.h, r2.h):<<1:sat
+; CHECK: -= mpy({{.*}}.h, {{.*}}.h):<<1:sat
; Multiply unsigned halfwords
declare i64 @llvm.hexagon.M2.mpyud.ll.s0(i32, i32)
@@ -774,336 +778,336 @@ define i64 @M2_mpyud_ll_s0(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.M2.mpyud.ll.s0(i32 %a, i32 %b)
ret i64 %z
}
-; CHECK: r1:0 = mpyu(r0.l, r1.l)
+; CHECK: = mpyu({{.*}}.l, {{.*}}.l)
declare i64 @llvm.hexagon.M2.mpyud.ll.s1(i32, i32)
define i64 @M2_mpyud_ll_s1(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.M2.mpyud.ll.s1(i32 %a, i32 %b)
ret i64 %z
}
-; CHECK: r1:0 = mpyu(r0.l, r1.l):<<1
+; CHECK: = mpyu({{.*}}.l, {{.*}}.l):<<1
declare i64 @llvm.hexagon.M2.mpyud.lh.s0(i32, i32)
define i64 @M2_mpyud_lh_s0(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.M2.mpyud.lh.s0(i32 %a, i32 %b)
ret i64 %z
}
-; CHECK: r1:0 = mpyu(r0.l, r1.h)
+; CHECK: = mpyu({{.*}}.l, {{.*}}.h)
declare i64 @llvm.hexagon.M2.mpyud.lh.s1(i32, i32)
define i64 @M2_mpyud_lh_s1(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.M2.mpyud.lh.s1(i32 %a, i32 %b)
ret i64 %z
}
-; CHECK: r1:0 = mpyu(r0.l, r1.h):<<1
+; CHECK: = mpyu({{.*}}.l, {{.*}}.h):<<1
declare i64 @llvm.hexagon.M2.mpyud.hl.s0(i32, i32)
define i64 @M2_mpyud_hl_s0(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.M2.mpyud.hl.s0(i32 %a, i32 %b)
ret i64 %z
}
-; CHECK: r1:0 = mpyu(r0.h, r1.l)
+; CHECK: = mpyu({{.*}}.h, {{.*}}.l)
declare i64 @llvm.hexagon.M2.mpyud.hl.s1(i32, i32)
define i64 @M2_mpyud_hl_s1(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.M2.mpyud.hl.s1(i32 %a, i32 %b)
ret i64 %z
}
-; CHECK: r1:0 = mpyu(r0.h, r1.l):<<1
+; CHECK: = mpyu({{.*}}.h, {{.*}}.l):<<1
declare i64 @llvm.hexagon.M2.mpyud.hh.s0(i32, i32)
define i64 @M2_mpyud_hh_s0(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.M2.mpyud.hh.s0(i32 %a, i32 %b)
ret i64 %z
}
-; CHECK: r1:0 = mpyu(r0.h, r1.h)
+; CHECK: = mpyu({{.*}}.h, {{.*}}.h)
declare i64 @llvm.hexagon.M2.mpyud.hh.s1(i32, i32)
define i64 @M2_mpyud_hh_s1(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.M2.mpyud.hh.s1(i32 %a, i32 %b)
ret i64 %z
}
-; CHECK: r1:0 = mpyu(r0.h, r1.h):<<1
+; CHECK: = mpyu({{.*}}.h, {{.*}}.h):<<1
declare i64 @llvm.hexagon.M2.mpyud.acc.ll.s0(i64, i32, i32)
define i64 @M2_mpyud_acc_ll_s0(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.mpyud.acc.ll.s0(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: r1:0 += mpyu(r2.l, r3.l)
+; CHECK: += mpyu({{.*}}.l, {{.*}}.l)
declare i64 @llvm.hexagon.M2.mpyud.acc.ll.s1(i64, i32, i32)
define i64 @M2_mpyud_acc_ll_s1(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.mpyud.acc.ll.s1(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: r1:0 += mpyu(r2.l, r3.l):<<1
+; CHECK: += mpyu({{.*}}.l, {{.*}}.l):<<1
declare i64 @llvm.hexagon.M2.mpyud.acc.lh.s0(i64, i32, i32)
define i64 @M2_mpyud_acc_lh_s0(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.mpyud.acc.lh.s0(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: r1:0 += mpyu(r2.l, r3.h)
+; CHECK: += mpyu({{.*}}.l, {{.*}}.h)
declare i64 @llvm.hexagon.M2.mpyud.acc.lh.s1(i64, i32, i32)
define i64 @M2_mpyud_acc_lh_s1(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.mpyud.acc.lh.s1(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: r1:0 += mpyu(r2.l, r3.h):<<1
+; CHECK: += mpyu({{.*}}.l, {{.*}}.h):<<1
declare i64 @llvm.hexagon.M2.mpyud.acc.hl.s0(i64, i32, i32)
define i64 @M2_mpyud_acc_hl_s0(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.mpyud.acc.hl.s0(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: r1:0 += mpyu(r2.h, r3.l)
+; CHECK: += mpyu({{.*}}.h, {{.*}}.l)
declare i64 @llvm.hexagon.M2.mpyud.acc.hl.s1(i64, i32, i32)
define i64 @M2_mpyud_acc_hl_s1(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.mpyud.acc.hl.s1(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: r1:0 += mpyu(r2.h, r3.l):<<1
+; CHECK: += mpyu({{.*}}.h, {{.*}}.l):<<1
declare i64 @llvm.hexagon.M2.mpyud.acc.hh.s0(i64, i32, i32)
define i64 @M2_mpyud_acc_hh_s0(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.mpyud.acc.hh.s0(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: r1:0 += mpyu(r2.h, r3.h)
+; CHECK: += mpyu({{.*}}.h, {{.*}}.h)
declare i64 @llvm.hexagon.M2.mpyud.acc.hh.s1(i64, i32, i32)
define i64 @M2_mpyud_acc_hh_s1(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.mpyud.acc.hh.s1(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: r1:0 += mpyu(r2.h, r3.h):<<1
+; CHECK: += mpyu({{.*}}.h, {{.*}}.h):<<1
declare i64 @llvm.hexagon.M2.mpyud.nac.ll.s0(i64, i32, i32)
define i64 @M2_mpyud_nac_ll_s0(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.mpyud.nac.ll.s0(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: r1:0 -= mpyu(r2.l, r3.l)
+; CHECK: -= mpyu({{.*}}.l, {{.*}}.l)
declare i64 @llvm.hexagon.M2.mpyud.nac.ll.s1(i64, i32, i32)
define i64 @M2_mpyud_nac_ll_s1(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.mpyud.nac.ll.s1(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: r1:0 -= mpyu(r2.l, r3.l):<<1
+; CHECK: -= mpyu({{.*}}.l, {{.*}}.l):<<1
declare i64 @llvm.hexagon.M2.mpyud.nac.lh.s0(i64, i32, i32)
define i64 @M2_mpyud_nac_lh_s0(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.mpyud.nac.lh.s0(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: r1:0 -= mpyu(r2.l, r3.h)
+; CHECK: -= mpyu({{.*}}.l, {{.*}}.h)
declare i64 @llvm.hexagon.M2.mpyud.nac.lh.s1(i64, i32, i32)
define i64 @M2_mpyud_nac_lh_s1(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.mpyud.nac.lh.s1(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: r1:0 -= mpyu(r2.l, r3.h):<<1
+; CHECK: -= mpyu({{.*}}.l, {{.*}}.h):<<1
declare i64 @llvm.hexagon.M2.mpyud.nac.hl.s0(i64, i32, i32)
define i64 @M2_mpyud_nac_hl_s0(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.mpyud.nac.hl.s0(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: r1:0 -= mpyu(r2.h, r3.l)
+; CHECK: -= mpyu({{.*}}.h, {{.*}}.l)
declare i64 @llvm.hexagon.M2.mpyud.nac.hl.s1(i64, i32, i32)
define i64 @M2_mpyud_nac_hl_s1(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.mpyud.nac.hl.s1(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: r1:0 -= mpyu(r2.h, r3.l):<<1
+; CHECK: -= mpyu({{.*}}.h, {{.*}}.l):<<1
declare i64 @llvm.hexagon.M2.mpyud.nac.hh.s0(i64, i32, i32)
define i64 @M2_mpyud_nac_hh_s0(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.mpyud.nac.hh.s0(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: r1:0 -= mpyu(r2.h, r3.h)
+; CHECK: -= mpyu({{.*}}.h, {{.*}}.h)
declare i64 @llvm.hexagon.M2.mpyud.nac.hh.s1(i64, i32, i32)
define i64 @M2_mpyud_nac_hh_s1(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.mpyud.nac.hh.s1(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: r1:0 -= mpyu(r2.h, r3.h):<<1
+; CHECK: -= mpyu({{.*}}.h, {{.*}}.h):<<1
declare i32 @llvm.hexagon.M2.mpyu.ll.s0(i32, i32)
define i32 @M2_mpyu_ll_s0(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.mpyu.ll.s0(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: r0 = mpyu(r0.l, r1.l)
+; CHECK: = mpyu({{.*}}.l, {{.*}}.l)
declare i32 @llvm.hexagon.M2.mpyu.ll.s1(i32, i32)
define i32 @M2_mpyu_ll_s1(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.mpyu.ll.s1(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: r0 = mpyu(r0.l, r1.l):<<1
+; CHECK: = mpyu({{.*}}.l, {{.*}}.l):<<1
declare i32 @llvm.hexagon.M2.mpyu.lh.s0(i32, i32)
define i32 @M2_mpyu_lh_s0(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.mpyu.lh.s0(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: r0 = mpyu(r0.l, r1.h)
+; CHECK: = mpyu({{.*}}.l, {{.*}}.h)
declare i32 @llvm.hexagon.M2.mpyu.lh.s1(i32, i32)
define i32 @M2_mpyu_lh_s1(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.mpyu.lh.s1(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: r0 = mpyu(r0.l, r1.h):<<1
+; CHECK: = mpyu({{.*}}.l, {{.*}}.h):<<1
declare i32 @llvm.hexagon.M2.mpyu.hl.s0(i32, i32)
define i32 @M2_mpyu_hl_s0(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.mpyu.hl.s0(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: r0 = mpyu(r0.h, r1.l)
+; CHECK: = mpyu({{.*}}.h, {{.*}}.l)
declare i32 @llvm.hexagon.M2.mpyu.hl.s1(i32, i32)
define i32 @M2_mpyu_hl_s1(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.mpyu.hl.s1(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: r0 = mpyu(r0.h, r1.l):<<1
+; CHECK: = mpyu({{.*}}.h, {{.*}}.l):<<1
declare i32 @llvm.hexagon.M2.mpyu.hh.s0(i32, i32)
define i32 @M2_mpyu_hh_s0(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.mpyu.hh.s0(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: r0 = mpyu(r0.h, r1.h)
+; CHECK: = mpyu({{.*}}.h, {{.*}}.h)
declare i32 @llvm.hexagon.M2.mpyu.hh.s1(i32, i32)
define i32 @M2_mpyu_hh_s1(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.mpyu.hh.s1(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: r0 = mpyu(r0.h, r1.h):<<1
+; CHECK: = mpyu({{.*}}.h, {{.*}}.h):<<1
declare i32 @llvm.hexagon.M2.mpyu.acc.ll.s0(i32, i32, i32)
define i32 @M2_mpyu_acc_ll_s0(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpyu.acc.ll.s0(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: r0 += mpyu(r1.l, r2.l)
+; CHECK: += mpyu({{.*}}.l, {{.*}}.l)
declare i32 @llvm.hexagon.M2.mpyu.acc.ll.s1(i32, i32, i32)
define i32 @M2_mpyu_acc_ll_s1(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpyu.acc.ll.s1(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: r0 += mpyu(r1.l, r2.l):<<1
+; CHECK: += mpyu({{.*}}.l, {{.*}}.l):<<1
declare i32 @llvm.hexagon.M2.mpyu.acc.lh.s0(i32, i32, i32)
define i32 @M2_mpyu_acc_lh_s0(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpyu.acc.lh.s0(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: r0 += mpyu(r1.l, r2.h)
+; CHECK: += mpyu({{.*}}.l, {{.*}}.h)
declare i32 @llvm.hexagon.M2.mpyu.acc.lh.s1(i32, i32, i32)
define i32 @M2_mpyu_acc_lh_s1(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpyu.acc.lh.s1(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: r0 += mpyu(r1.l, r2.h):<<1
+; CHECK: += mpyu({{.*}}.l, {{.*}}.h):<<1
declare i32 @llvm.hexagon.M2.mpyu.acc.hl.s0(i32, i32, i32)
define i32 @M2_mpyu_acc_hl_s0(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpyu.acc.hl.s0(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: r0 += mpyu(r1.h, r2.l)
+; CHECK: += mpyu({{.*}}.h, {{.*}}.l)
declare i32 @llvm.hexagon.M2.mpyu.acc.hl.s1(i32, i32, i32)
define i32 @M2_mpyu_acc_hl_s1(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpyu.acc.hl.s1(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: r0 += mpyu(r1.h, r2.l):<<1
+; CHECK: += mpyu({{.*}}.h, {{.*}}.l):<<1
declare i32 @llvm.hexagon.M2.mpyu.acc.hh.s0(i32, i32, i32)
define i32 @M2_mpyu_acc_hh_s0(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpyu.acc.hh.s0(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: r0 += mpyu(r1.h, r2.h)
+; CHECK: += mpyu({{.*}}.h, {{.*}}.h)
declare i32 @llvm.hexagon.M2.mpyu.acc.hh.s1(i32, i32, i32)
define i32 @M2_mpyu_acc_hh_s1(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpyu.acc.hh.s1(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: r0 += mpyu(r1.h, r2.h):<<1
+; CHECK: += mpyu({{.*}}.h, {{.*}}.h):<<1
declare i32 @llvm.hexagon.M2.mpyu.nac.ll.s0(i32, i32, i32)
define i32 @M2_mpyu_nac_ll_s0(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpyu.nac.ll.s0(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: r0 -= mpyu(r1.l, r2.l)
+; CHECK: -= mpyu({{.*}}.l, {{.*}}.l)
declare i32 @llvm.hexagon.M2.mpyu.nac.ll.s1(i32, i32, i32)
define i32 @M2_mpyu_nac_ll_s1(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpyu.nac.ll.s1(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: r0 -= mpyu(r1.l, r2.l):<<1
+; CHECK: -= mpyu({{.*}}.l, {{.*}}.l):<<1
declare i32 @llvm.hexagon.M2.mpyu.nac.lh.s0(i32, i32, i32)
define i32 @M2_mpyu_nac_lh_s0(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpyu.nac.lh.s0(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: r0 -= mpyu(r1.l, r2.h)
+; CHECK: -= mpyu({{.*}}.l, {{.*}}.h)
declare i32 @llvm.hexagon.M2.mpyu.nac.lh.s1(i32, i32, i32)
define i32 @M2_mpyu_nac_lh_s1(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpyu.nac.lh.s1(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: r0 -= mpyu(r1.l, r2.h):<<1
+; CHECK: -= mpyu({{.*}}.l, {{.*}}.h):<<1
declare i32 @llvm.hexagon.M2.mpyu.nac.hl.s0(i32, i32, i32)
define i32 @M2_mpyu_nac_hl_s0(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpyu.nac.hl.s0(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: r0 -= mpyu(r1.h, r2.l)
+; CHECK: -= mpyu({{.*}}.h, {{.*}}.l)
declare i32 @llvm.hexagon.M2.mpyu.nac.hl.s1(i32, i32, i32)
define i32 @M2_mpyu_nac_hl_s1(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpyu.nac.hl.s1(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: r0 -= mpyu(r1.h, r2.l):<<1
+; CHECK: -= mpyu({{.*}}.h, {{.*}}.l):<<1
declare i32 @llvm.hexagon.M2.mpyu.nac.hh.s0(i32, i32, i32)
define i32 @M2_mpyu_nac_hh_s0(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpyu.nac.hh.s0(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: r0 -= mpyu(r1.h, r2.h)
+; CHECK: -= mpyu({{.*}}.h, {{.*}}.h)
declare i32 @llvm.hexagon.M2.mpyu.nac.hh.s1(i32, i32, i32)
define i32 @M2_mpyu_nac_hh_s1(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M2.mpyu.nac.hh.s1(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: r0 -= mpyu(r1.h, r2.h):<<1
+; CHECK: -= mpyu({{.*}}.h, {{.*}}.h):<<1
; Polynomial multiply words
declare i64 @llvm.hexagon.M4.pmpyw(i32, i32)
@@ -1111,14 +1115,14 @@ define i64 @M4_pmpyw(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.M4.pmpyw(i32 %a, i32 %b)
ret i64 %z
}
-; CHECK: r1:0 = pmpyw(r0, r1)
+; CHECK: = pmpyw({{.*}}, {{.*}})
declare i64 @llvm.hexagon.M4.pmpyw.acc(i64, i32, i32)
define i64 @M4_pmpyw_acc(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M4.pmpyw.acc(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: r1:0 ^= pmpyw(r2, r3)
+; CHECK: ^= pmpyw({{.*}}, {{.*}})
; Vector reduce multiply word by signed half
declare i64 @llvm.hexagon.M4.vrmpyoh.s0(i64, i64)
@@ -1126,56 +1130,56 @@ define i64 @M4_vrmpyoh_s0(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.M4.vrmpyoh.s0(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: r1:0 = vrmpywoh(r1:0, r3:2)
+; CHECK: = vrmpywoh({{.*}}, {{.*}})
declare i64 @llvm.hexagon.M4.vrmpyoh.s1(i64, i64)
define i64 @M4_vrmpyoh_s1(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.M4.vrmpyoh.s1(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: r1:0 = vrmpywoh(r1:0, r3:2):<<1
+; CHECK: = vrmpywoh({{.*}}, {{.*}}):<<1
declare i64 @llvm.hexagon.M4.vrmpyeh.s0(i64, i64)
define i64 @M4_vrmpyeh_s0(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.M4.vrmpyeh.s0(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: r1:0 = vrmpyweh(r1:0, r3:2)
+; CHECK: = vrmpyweh({{.*}}, {{.*}})
declare i64 @llvm.hexagon.M4.vrmpyeh.s1(i64, i64)
define i64 @M4_vrmpyeh_s1(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.M4.vrmpyeh.s1(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: r1:0 = vrmpyweh(r1:0, r3:2):<<1
+; CHECK: = vrmpyweh({{.*}}, {{.*}}):<<1
declare i64 @llvm.hexagon.M4.vrmpyoh.acc.s0(i64, i64, i64)
define i64 @M4_vrmpyoh_acc_s0(i64 %a, i64 %b, i64 %c) {
%z = call i64 @llvm.hexagon.M4.vrmpyoh.acc.s0(i64 %a, i64 %b, i64 %c)
ret i64 %z
}
-; CHECK: r1:0 += vrmpywoh(r3:2, r5:4)
+; CHECK: += vrmpywoh({{.*}}, r5:4)
declare i64 @llvm.hexagon.M4.vrmpyoh.acc.s1(i64, i64, i64)
define i64 @M4_vrmpyoh_acc_s1(i64 %a, i64 %b, i64 %c) {
%z = call i64 @llvm.hexagon.M4.vrmpyoh.acc.s1(i64 %a, i64 %b, i64 %c)
ret i64 %z
}
-; CHECK: r1:0 += vrmpywoh(r3:2, r5:4):<<1
+; CHECK: += vrmpywoh({{.*}}, r5:4):<<1
declare i64 @llvm.hexagon.M4.vrmpyeh.acc.s0(i64, i64, i64)
define i64 @M4_vrmpyeh_acc_s0(i64 %a, i64 %b, i64 %c) {
%z = call i64 @llvm.hexagon.M4.vrmpyeh.acc.s0(i64 %a, i64 %b, i64 %c)
ret i64 %z
}
-; CHECK: r1:0 += vrmpyweh(r3:2, r5:4)
+; CHECK: += vrmpyweh({{.*}}, r5:4)
declare i64 @llvm.hexagon.M4.vrmpyeh.acc.s1(i64, i64, i64)
define i64 @M4_vrmpyeh_acc_s1(i64 %a, i64 %b, i64 %c) {
%z = call i64 @llvm.hexagon.M4.vrmpyeh.acc.s1(i64 %a, i64 %b, i64 %c)
ret i64 %z
}
-; CHECK: r1:0 += vrmpyweh(r3:2, r5:4):<<1
+; CHECK: += vrmpyweh({{.*}}, r5:4):<<1
; Multiply and use upper result
declare i32 @llvm.hexagon.M2.dpmpyss.rnd.s0(i32, i32)
@@ -1183,84 +1187,84 @@ define i32 @M2_dpmpyss_rnd_s0(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.dpmpyss.rnd.s0(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: r0 = mpy(r0, r1):rnd
+; CHECK: = mpy({{.*}}, {{.*}}):rnd
declare i32 @llvm.hexagon.M2.mpyu.up(i32, i32)
define i32 @M2_mpyu_up(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.mpyu.up(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: r0 = mpyu(r0, r1)
+; CHECK: = mpyu({{.*}}, {{.*}})
declare i32 @llvm.hexagon.M2.mpysu.up(i32, i32)
define i32 @M2_mpysu_up(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.mpysu.up(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: r0 = mpysu(r0, r1)
+; CHECK: = mpysu({{.*}}, {{.*}})
declare i32 @llvm.hexagon.M2.hmmpyh.s1(i32, i32)
define i32 @M2_hmmpyh_s1(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.hmmpyh.s1(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: r0 = mpy(r0, r1.h):<<1:sat
+; CHECK: = mpy({{.*}}, {{.*}}.h):<<1:sat
declare i32 @llvm.hexagon.M2.hmmpyl.s1(i32, i32)
define i32 @M2_hmmpyl_s1(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.hmmpyl.s1(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: r0 = mpy(r0, r1.l):<<1:sat
+; CHECK: = mpy({{.*}}, {{.*}}.l):<<1:sat
declare i32 @llvm.hexagon.M2.hmmpyh.rs1(i32, i32)
define i32 @M2_hmmpyh_rs1(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.hmmpyh.rs1(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: r0 = mpy(r0, r1.h):<<1:rnd:sat
+; CHECK: = mpy({{.*}}, {{.*}}.h):<<1:rnd:sat
declare i32 @llvm.hexagon.M2.mpy.up.s1.sat(i32, i32)
define i32 @M2_mpy_up_s1_sat(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.mpy.up.s1.sat(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: r0 = mpy(r0, r1):<<1:sat
+; CHECK: = mpy({{.*}}, {{.*}}):<<1:sat
declare i32 @llvm.hexagon.M2.hmmpyl.rs1(i32, i32)
define i32 @M2_hmmpyl_rs1(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.hmmpyl.rs1(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: r0 = mpy(r0, r1.l):<<1:rnd:sat
+; CHECK: = mpy({{.*}}, {{.*}}.l):<<1:rnd:sat
declare i32 @llvm.hexagon.M2.mpy.up(i32, i32)
define i32 @M2_mpy_up(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.mpy.up(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: r0 = mpy(r0, r1)
+; CHECK: = mpy({{.*}}, {{.*}})
declare i32 @llvm.hexagon.M2.mpy.up.s1(i32, i32)
define i32 @M2_mpy_up_s1(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.M2.mpy.up.s1(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: r0 = mpy(r0, r1):<<1
+; CHECK: = mpy({{.*}}, {{.*}}):<<1
declare i32 @llvm.hexagon.M4.mac.up.s1.sat(i32, i32, i32)
define i32 @M4_mac_up_s1_sat(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M4.mac.up.s1.sat(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: r0 += mpy(r1, r2):<<1:sat
+; CHECK: += mpy({{.*}}, {{.*}}):<<1:sat
declare i32 @llvm.hexagon.M4.nac.up.s1.sat(i32, i32, i32)
define i32 @M4_nac_up_s1_sat(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.M4.nac.up.s1.sat(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: r0 -= mpy(r1, r2):<<1:sat
+; CHECK: -= mpy({{.*}}, {{.*}}):<<1:sat
; Multiply and use full result
declare i64 @llvm.hexagon.M2.dpmpyss.s0(i32, i32)
@@ -1268,42 +1272,42 @@ define i64 @M2_dpmpyss_s0(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.M2.dpmpyss.s0(i32 %a, i32 %b)
ret i64 %z
}
-; CHECK: r1:0 = mpy(r0, r1)
+; CHECK: = mpy({{.*}}, {{.*}})
declare i64 @llvm.hexagon.M2.dpmpyuu.s0(i32, i32)
define i64 @M2_dpmpyuu_s0(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.M2.dpmpyuu.s0(i32 %a, i32 %b)
ret i64 %z
}
-; CHECK: r1:0 = mpyu(r0, r1)
+; CHECK: = mpyu({{.*}}, {{.*}})
declare i64 @llvm.hexagon.M2.dpmpyss.acc.s0(i64, i32, i32)
define i64 @M2_dpmpyss_acc_s0(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.dpmpyss.acc.s0(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: r1:0 += mpy(r2, r3)
+; CHECK: += mpy({{.*}}, {{.*}})
declare i64 @llvm.hexagon.M2.dpmpyss.nac.s0(i64, i32, i32)
define i64 @M2_dpmpyss_nac_s0(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.dpmpyss.nac.s0(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: r1:0 -= mpy(r2, r3)
+; CHECK: -= mpy({{.*}}, {{.*}})
declare i64 @llvm.hexagon.M2.dpmpyuu.acc.s0(i64, i32, i32)
define i64 @M2_dpmpyuu_acc_s0(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.dpmpyuu.acc.s0(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: r1:0 += mpyu(r2, r3)
+; CHECK: += mpyu({{.*}}, {{.*}})
declare i64 @llvm.hexagon.M2.dpmpyuu.nac.s0(i64, i32, i32)
define i64 @M2_dpmpyuu_nac_s0(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.dpmpyuu.nac.s0(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: r1:0 -= mpyu(r2, r3)
+; CHECK: -= mpyu({{.*}}, {{.*}})
; Vector dual multiply
declare i64 @llvm.hexagon.M2.vdmpys.s0(i64, i64)
@@ -1311,14 +1315,14 @@ define i64 @M2_vdmpys_s0(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.M2.vdmpys.s0(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: r1:0 = vdmpy(r1:0, r3:2):sat
+; CHECK: = vdmpy({{.*}}, {{.*}}):sat
declare i64 @llvm.hexagon.M2.vdmpys.s1(i64, i64)
define i64 @M2_vdmpys_s1(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.M2.vdmpys.s1(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: r1:0 = vdmpy(r1:0, r3:2):<<1:sat
+; CHECK: = vdmpy({{.*}}, {{.*}}):<<1:sat
; Vector reduce multiply bytes
declare i64 @llvm.hexagon.M5.vrmpybuu(i64, i64)
@@ -1326,28 +1330,28 @@ define i64 @M5_vrmpybuu(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.M5.vrmpybuu(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: r1:0 = vrmpybu(r1:0, r3:2)
+; CHECK: = vrmpybu({{.*}}, {{.*}})
declare i64 @llvm.hexagon.M5.vrmpybsu(i64, i64)
define i64 @M5_vrmpybsu(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.M5.vrmpybsu(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: r1:0 = vrmpybsu(r1:0, r3:2)
+; CHECK: = vrmpybsu({{.*}}, {{.*}})
declare i64 @llvm.hexagon.M5.vrmacbuu(i64, i64, i64)
define i64 @M5_vrmacbuu(i64 %a, i64 %b, i64 %c) {
%z = call i64 @llvm.hexagon.M5.vrmacbuu(i64 %a, i64 %b, i64 %c)
ret i64 %z
}
-; CHECK: r1:0 += vrmpybu(r3:2, r5:4)
+; CHECK: += vrmpybu({{.*}}, r5:4)
declare i64 @llvm.hexagon.M5.vrmacbsu(i64, i64, i64)
define i64 @M5_vrmacbsu(i64 %a, i64 %b, i64 %c) {
%z = call i64 @llvm.hexagon.M5.vrmacbsu(i64 %a, i64 %b, i64 %c)
ret i64 %z
}
-; CHECK: r1:0 += vrmpybsu(r3:2, r5:4)
+; CHECK: += vrmpybsu({{.*}}, r5:4)
; Vector dual multiply signed by unsigned bytes
declare i64 @llvm.hexagon.M5.vdmpybsu(i64, i64)
@@ -1355,14 +1359,14 @@ define i64 @M5_vdmpybsu(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.M5.vdmpybsu(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: r1:0 = vdmpybsu(r1:0, r3:2):sat
+; CHECK: = vdmpybsu({{.*}}, {{.*}}):sat
declare i64 @llvm.hexagon.M5.vdmacbsu(i64, i64, i64)
define i64 @M5_vdmacbsu(i64 %a, i64 %b, i64 %c) {
%z = call i64 @llvm.hexagon.M5.vdmacbsu(i64 %a, i64 %b, i64 %c)
ret i64 %z
}
-; CHECK: r1:0 += vdmpybsu(r3:2, r5:4):sat
+; CHECK: += vdmpybsu({{.*}}, r5:4):sat
; Vector multiply even halfwords
declare i64 @llvm.hexagon.M2.vmpy2es.s0(i64, i64)
@@ -1370,35 +1374,35 @@ define i64 @M2_vmpy2es_s0(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.M2.vmpy2es.s0(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: r1:0 = vmpyeh(r1:0, r3:2):sat
+; CHECK: = vmpyeh({{.*}}, {{.*}}):sat
declare i64 @llvm.hexagon.M2.vmpy2es.s1(i64, i64)
define i64 @M2_vmpy2es_s1(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.M2.vmpy2es.s1(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: r1:0 = vmpyeh(r1:0, r3:2):<<1:sat
+; CHECK: = vmpyeh({{.*}}, {{.*}}):<<1:sat
declare i64 @llvm.hexagon.M2.vmac2es(i64, i64, i64)
define i64 @M2_vmac2es(i64 %a, i64 %b, i64 %c) {
%z = call i64 @llvm.hexagon.M2.vmac2es(i64 %a, i64 %b, i64 %c)
ret i64 %z
}
-; CHECK: r1:0 += vmpyeh(r3:2, r5:4)
+; CHECK: += vmpyeh({{.*}}, r5:4)
declare i64 @llvm.hexagon.M2.vmac2es.s0(i64, i64, i64)
define i64 @M2_vmac2es_s0(i64 %a, i64 %b, i64 %c) {
%z = call i64 @llvm.hexagon.M2.vmac2es.s0(i64 %a, i64 %b, i64 %c)
ret i64 %z
}
-; CHECK: r1:0 += vmpyeh(r3:2, r5:4):sat
+; CHECK: += vmpyeh({{.*}}, r5:4):sat
declare i64 @llvm.hexagon.M2.vmac2es.s1(i64, i64, i64)
define i64 @M2_vmac2es_s1(i64 %a, i64 %b, i64 %c) {
%z = call i64 @llvm.hexagon.M2.vmac2es.s1(i64 %a, i64 %b, i64 %c)
ret i64 %z
}
-; CHECK: r1:0 += vmpyeh(r3:2, r5:4):<<1:sat
+; CHECK: += vmpyeh({{.*}}, r5:4):<<1:sat
; Vector multiply halfwords
declare i64 @llvm.hexagon.M2.vmpy2s.s0(i32, i32)
@@ -1406,35 +1410,35 @@ define i64 @M2_vmpy2s_s0(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.M2.vmpy2s.s0(i32 %a, i32 %b)
ret i64 %z
}
-; CHECK: r1:0 = vmpyh(r0, r1):sat
+; CHECK: = vmpyh({{.*}}, {{.*}}):sat
declare i64 @llvm.hexagon.M2.vmpy2s.s1(i32, i32)
define i64 @M2_vmpy2s_s1(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.M2.vmpy2s.s1(i32 %a, i32 %b)
ret i64 %z
}
-; CHECK: r1:0 = vmpyh(r0, r1):<<1:sat
+; CHECK: = vmpyh({{.*}}, {{.*}}):<<1:sat
declare i64 @llvm.hexagon.M2.vmac2(i64, i32, i32)
define i64 @M2_vmac2(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.vmac2(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: r1:0 += vmpyh(r2, r3)
+; CHECK: += vmpyh({{.*}}, {{.*}})
declare i64 @llvm.hexagon.M2.vmac2s.s0(i64, i32, i32)
define i64 @M2_vmac2s_s0(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.vmac2s.s0(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: r1:0 += vmpyh(r2, r3):sat
+; CHECK: += vmpyh({{.*}}, {{.*}}):sat
declare i64 @llvm.hexagon.M2.vmac2s.s1(i64, i32, i32)
define i64 @M2_vmac2s_s1(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.vmac2s.s1(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: r1:0 += vmpyh(r2, r3):<<1:sat
+; CHECK: += vmpyh({{.*}}, {{.*}}):<<1:sat
; Vector multiply halfwords signed by unsigned
declare i64 @llvm.hexagon.M2.vmpy2su.s0(i32, i32)
@@ -1442,28 +1446,28 @@ define i64 @M2_vmpy2su_s0(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.M2.vmpy2su.s0(i32 %a, i32 %b)
ret i64 %z
}
-; CHECK: r1:0 = vmpyhsu(r0, r1):sat
+; CHECK: = vmpyhsu({{.*}}, {{.*}}):sat
declare i64 @llvm.hexagon.M2.vmpy2su.s1(i32, i32)
define i64 @M2_vmpy2su_s1(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.M2.vmpy2su.s1(i32 %a, i32 %b)
ret i64 %z
}
-; CHECK: r1:0 = vmpyhsu(r0, r1):<<1:sat
+; CHECK: = vmpyhsu({{.*}}, {{.*}}):<<1:sat
declare i64 @llvm.hexagon.M2.vmac2su.s0(i64, i32, i32)
define i64 @M2_vmac2su_s0(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.vmac2su.s0(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: r1:0 += vmpyhsu(r2, r3):sat
+; CHECK: += vmpyhsu({{.*}}, {{.*}}):sat
declare i64 @llvm.hexagon.M2.vmac2su.s1(i64, i32, i32)
define i64 @M2_vmac2su_s1(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M2.vmac2su.s1(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: r1:0 += vmpyhsu(r2, r3):<<1:sat
+; CHECK: += vmpyhsu({{.*}}, {{.*}}):<<1:sat
; Vector reduce multiply halfwords
declare i64 @llvm.hexagon.M2.vrmpy.s0(i64, i64)
@@ -1471,14 +1475,14 @@ define i64 @M2_vrmpy_s0(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.M2.vrmpy.s0(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: r1:0 = vrmpyh(r1:0, r3:2)
+; CHECK: = vrmpyh({{.*}}, {{.*}})
declare i64 @llvm.hexagon.M2.vrmac.s0(i64, i64, i64)
define i64 @M2_vrmac_s0(i64 %a, i64 %b, i64 %c) {
%z = call i64 @llvm.hexagon.M2.vrmac.s0(i64 %a, i64 %b, i64 %c)
ret i64 %z
}
-; CHECK: r1:0 += vrmpyh(r3:2, r5:4)
+; CHECK: += vrmpyh({{.*}}, r5:4)
; Vector multiply bytes
declare i64 @llvm.hexagon.M5.vmpybsu(i32, i32)
@@ -1486,28 +1490,28 @@ define i64 @M2_vmpybsu(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.M5.vmpybsu(i32 %a, i32 %b)
ret i64 %z
}
-; CHECK: r1:0 = vmpybsu(r0, r1)
+; CHECK: = vmpybsu({{.*}}, {{.*}})
declare i64 @llvm.hexagon.M5.vmpybuu(i32, i32)
define i64 @M2_vmpybuu(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.M5.vmpybuu(i32 %a, i32 %b)
ret i64 %z
}
-; CHECK: r1:0 = vmpybu(r0, r1)
+; CHECK: = vmpybu({{.*}}, {{.*}})
declare i64 @llvm.hexagon.M5.vmacbuu(i64, i32, i32)
define i64 @M2_vmacbuu(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M5.vmacbuu(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: r1:0 += vmpybu(r2, r3)
+; CHECK: += vmpybu({{.*}}, {{.*}})
declare i64 @llvm.hexagon.M5.vmacbsu(i64, i32, i32)
define i64 @M2_vmacbsu(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M5.vmacbsu(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: r1:0 += vmpybsu(r2, r3)
+; CHECK: += vmpybsu({{.*}}, {{.*}})
; Vector polynomial multiply halfwords
declare i64 @llvm.hexagon.M4.vpmpyh(i32, i32)
@@ -1515,11 +1519,11 @@ define i64 @M4_vpmpyh(i32 %a, i32 %b) {
%z = call i64 @llvm.hexagon.M4.vpmpyh(i32 %a, i32 %b)
ret i64 %z
}
-; CHECK: r1:0 = vpmpyh(r0, r1)
+; CHECK: = vpmpyh({{.*}}, {{.*}})
declare i64 @llvm.hexagon.M4.vpmpyh.acc(i64, i32, i32)
define i64 @M4_vpmpyh_acc(i64 %a, i32 %b, i32 %c) {
%z = call i64 @llvm.hexagon.M4.vpmpyh.acc(i64 %a, i32 %b, i32 %c)
ret i64 %z
}
-; CHECK: r1:0 ^= vpmpyh(r2, r3)
+; CHECK: ^= vpmpyh({{.*}}, {{.*}})
diff --git a/test/CodeGen/Hexagon/intrinsics/xtype_perm.ll b/test/CodeGen/Hexagon/intrinsics/xtype_perm.ll
index 0b761323e31e1..3e044e3838dec 100644
--- a/test/CodeGen/Hexagon/intrinsics/xtype_perm.ll
+++ b/test/CodeGen/Hexagon/intrinsics/xtype_perm.ll
@@ -1,41 +1,44 @@
; RUN: llc -march=hexagon -O0 < %s | FileCheck %s
+; RUN: llc -march=hexagon -O0 < %s | FileCheck -check-prefix=CHECK-CALL %s
; Hexagon Programmer's Reference Manual 11.10.6 XTYPE/PERM
+; CHECK-CALL-NOT: call
+
; Saturate
declare i32 @llvm.hexagon.A2.sat(i64)
define i32 @A2_sat(i64 %a) {
%z = call i32 @llvm.hexagon.A2.sat(i64 %a)
ret i32 %z
}
-; CHECK: r0 = sat(r1:0)
+; CHECK: = sat({{.*}})
declare i32 @llvm.hexagon.A2.sath(i32)
define i32 @A2_sath(i32 %a) {
%z = call i32 @llvm.hexagon.A2.sath(i32 %a)
ret i32 %z
}
-; CHECK: r0 = sath(r0)
+; CHECK: = sath({{.*}})
declare i32 @llvm.hexagon.A2.satuh(i32)
define i32 @A2_satuh(i32 %a) {
%z = call i32 @llvm.hexagon.A2.satuh(i32 %a)
ret i32 %z
}
-; CHECK: r0 = satuh(r0)
+; CHECK: = satuh({{.*}})
declare i32 @llvm.hexagon.A2.satub(i32)
define i32 @A2_satub(i32 %a) {
%z = call i32 @llvm.hexagon.A2.satub(i32 %a)
ret i32 %z
}
-; CHECK: r0 = satub(r0)
+; CHECK: = satub({{.*}})
declare i32 @llvm.hexagon.A2.satb(i32)
define i32 @A2_satb(i32 %a) {
%z = call i32 @llvm.hexagon.A2.satb(i32 %a)
ret i32 %z
}
-; CHECK: r0 = satb(r0)
+; CHECK: = satb({{.*}})
; Swizzle bytes
declare i32 @llvm.hexagon.A2.swiz(i32)
@@ -43,7 +46,7 @@ define i32 @A2_swiz(i32 %a) {
%z = call i32 @llvm.hexagon.A2.swiz(i32 %a)
ret i32 %z
}
-; CHECK: r0 = swiz(r0)
+; CHECK: = swiz({{.*}})
; Vector round and pack
declare i32 @llvm.hexagon.S2.vrndpackwh(i64)
@@ -51,14 +54,14 @@ define i32 @S2_vrndpackwh(i64 %a) {
%z = call i32 @llvm.hexagon.S2.vrndpackwh(i64 %a)
ret i32 %z
}
-; CHECK: r0 = vrndwh(r1:0)
+; CHECK: = vrndwh({{.*}})
declare i32 @llvm.hexagon.S2.vrndpackwhs(i64)
define i32 @S2_vrndpackwhs(i64 %a) {
%z = call i32 @llvm.hexagon.S2.vrndpackwhs(i64 %a)
ret i32 %z
}
-; CHECK: r0 = vrndwh(r1:0):sat
+; CHECK: = vrndwh({{.*}}):sat
; Vector saturate and pack
declare i32 @llvm.hexagon.S2.vsathub(i64)
@@ -66,42 +69,42 @@ define i32 @S2_vsathub(i64 %a) {
%z = call i32 @llvm.hexagon.S2.vsathub(i64 %a)
ret i32 %z
}
-; CHECK: r0 = vsathub(r1:0)
+; CHECK: = vsathub({{.*}})
declare i32 @llvm.hexagon.S2.vsatwh(i64)
define i32 @S2_vsatwh(i64 %a) {
%z = call i32 @llvm.hexagon.S2.vsatwh(i64 %a)
ret i32 %z
}
-; CHECK: r0 = vsatwh(r1:0)
+; CHECK: = vsatwh({{.*}})
declare i32 @llvm.hexagon.S2.vsatwuh(i64)
define i32 @S2_vsatwuh(i64 %a) {
%z = call i32 @llvm.hexagon.S2.vsatwuh(i64 %a)
ret i32 %z
}
-; CHECK: r0 = vsatwuh(r1:0)
+; CHECK: = vsatwuh({{.*}})
declare i32 @llvm.hexagon.S2.vsathb(i64)
define i32 @S2_vsathb(i64 %a) {
%z = call i32 @llvm.hexagon.S2.vsathb(i64 %a)
ret i32 %z
}
-; CHECK: r0 = vsathb(r1:0)
+; CHECK: = vsathb({{.*}})
declare i32 @llvm.hexagon.S2.svsathb(i32)
define i32 @S2_svsathb(i32 %a) {
%z = call i32 @llvm.hexagon.S2.svsathb(i32 %a)
ret i32 %z
}
-; CHECK: r0 = vsathb(r0)
+; CHECK: = vsathb({{.*}})
declare i32 @llvm.hexagon.S2.svsathub(i32)
define i32 @S2_svsathub(i32 %a) {
%z = call i32 @llvm.hexagon.S2.svsathub(i32 %a)
ret i32 %z
}
-; CHECK: r0 = vsathub(r0)
+; CHECK: = vsathub({{.*}})
; Vector saturate without pack
declare i64 @llvm.hexagon.S2.vsathub.nopack(i64)
@@ -109,28 +112,28 @@ define i64 @S2_vsathub_nopack(i64 %a) {
%z = call i64 @llvm.hexagon.S2.vsathub.nopack(i64 %a)
ret i64 %z
}
-; CHECK: r1:0 = vsathub(r1:0)
+; CHECK: = vsathub({{.*}})
declare i64 @llvm.hexagon.S2.vsatwuh.nopack(i64)
define i64 @S2_vsatwuh_nopack(i64 %a) {
%z = call i64 @llvm.hexagon.S2.vsatwuh.nopack(i64 %a)
ret i64 %z
}
-; CHECK: r1:0 = vsatwuh(r1:0)
+; CHECK: = vsatwuh({{.*}})
declare i64 @llvm.hexagon.S2.vsatwh.nopack(i64)
define i64 @S2_vsatwh_nopack(i64 %a) {
%z = call i64 @llvm.hexagon.S2.vsatwh.nopack(i64 %a)
ret i64 %z
}
-; CHECK: r1:0 = vsatwh(r1:0)
+; CHECK: = vsatwh({{.*}})
declare i64 @llvm.hexagon.S2.vsathb.nopack(i64)
define i64 @S2_vsathb_nopack(i64 %a) {
%z = call i64 @llvm.hexagon.S2.vsathb.nopack(i64 %a)
ret i64 %z
}
-; CHECK: r1:0 = vsathb(r1:0)
+; CHECK: = vsathb({{.*}})
; Vector shuffle
declare i64 @llvm.hexagon.S2.shuffeb(i64, i64)
@@ -138,28 +141,28 @@ define i64 @S2_shuffeb(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.S2.shuffeb(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: r1:0 = shuffeb(r1:0, r3:2)
+; CHECK: = shuffeb({{.*}}, {{.*}})
declare i64 @llvm.hexagon.S2.shuffob(i64, i64)
define i64 @S2_shuffob(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.S2.shuffob(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: r1:0 = shuffob(r1:0, r3:2)
+; CHECK: = shuffob({{.*}}, {{.*}})
declare i64 @llvm.hexagon.S2.shuffeh(i64, i64)
define i64 @S2_shuffeh(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.S2.shuffeh(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: r1:0 = shuffeh(r1:0, r3:2)
+; CHECK: = shuffeh({{.*}}, {{.*}})
declare i64 @llvm.hexagon.S2.shuffoh(i64, i64)
define i64 @S2_shuffoh(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.S2.shuffoh(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: r1:0 = shuffoh(r1:0, r3:2)
+; CHECK: = shuffoh({{.*}}, {{.*}})
; Vector splat bytes
declare i32 @llvm.hexagon.S2.vsplatrb(i32)
@@ -167,7 +170,7 @@ define i32 @S2_vsplatrb(i32 %a) {
%z = call i32 @llvm.hexagon.S2.vsplatrb(i32 %a)
ret i32 %z
}
-; CHECK: r0 = vsplatb(r0)
+; CHECK: = vsplatb({{.*}})
; Vector splat halfwords
declare i64 @llvm.hexagon.S2.vsplatrh(i32)
@@ -175,7 +178,7 @@ define i64 @S2_vsplatrh(i32 %a) {
%z = call i64 @llvm.hexagon.S2.vsplatrh(i32 %a)
ret i64 %z
}
-; CHECK: = vsplath(r0)
+; CHECK: = vsplath({{.*}})
; Vector splice
declare i64 @llvm.hexagon.S2.vspliceib(i64, i64, i32)
@@ -183,14 +186,14 @@ define i64 @S2_vspliceib(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.S2.vspliceib(i64 %a, i64 %b, i32 0)
ret i64 %z
}
-; CHECK: r1:0 = vspliceb(r1:0, r3:2, #0)
+; CHECK: = vspliceb({{.*}}, {{.*}}, #0)
declare i64 @llvm.hexagon.S2.vsplicerb(i64, i64, i32)
define i64 @S2_vsplicerb(i64 %a, i64 %b, i32 %c) {
%z = call i64 @llvm.hexagon.S2.vsplicerb(i64 %a, i64 %b, i32 %c)
ret i64 %z
}
-; CHECK: r1:0 = vspliceb(r1:0, r3:2, p0)
+; CHECK: = vspliceb({{.*}}, {{.*}}, {{.*}})
; Vector sign extend
declare i64 @llvm.hexagon.S2.vsxtbh(i32)
@@ -198,14 +201,14 @@ define i64 @S2_vsxtbh(i32 %a) {
%z = call i64 @llvm.hexagon.S2.vsxtbh(i32 %a)
ret i64 %z
}
-; CHECK: = vsxtbh(r0)
+; CHECK: = vsxtbh({{.*}})
declare i64 @llvm.hexagon.S2.vsxthw(i32)
define i64 @S2_vsxthw(i32 %a) {
%z = call i64 @llvm.hexagon.S2.vsxthw(i32 %a)
ret i64 %z
}
-; CHECK: = vsxthw(r0)
+; CHECK: = vsxthw({{.*}})
; Vector truncate
declare i32 @llvm.hexagon.S2.vtrunohb(i64)
@@ -213,28 +216,28 @@ define i32 @S2_vtrunohb(i64 %a) {
%z = call i32 @llvm.hexagon.S2.vtrunohb(i64 %a)
ret i32 %z
}
-; CHECK: r0 = vtrunohb(r1:0)
+; CHECK: = vtrunohb({{.*}})
declare i32 @llvm.hexagon.S2.vtrunehb(i64)
define i32 @S2_vtrunehb(i64 %a) {
%z = call i32 @llvm.hexagon.S2.vtrunehb(i64 %a)
ret i32 %z
}
-; CHECK: r0 = vtrunehb(r1:0)
+; CHECK: = vtrunehb({{.*}})
declare i64 @llvm.hexagon.S2.vtrunowh(i64, i64)
define i64 @S2_vtrunowh(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.S2.vtrunowh(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: r1:0 = vtrunowh(r1:0, r3:2)
+; CHECK: = vtrunowh({{.*}}, {{.*}})
declare i64 @llvm.hexagon.S2.vtrunewh(i64, i64)
define i64 @S2_vtrunewh(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.S2.vtrunewh(i64 %a, i64 %b)
ret i64 %z
}
-; CHECK: r1:0 = vtrunewh(r1:0, r3:2)
+; CHECK: = vtrunewh({{.*}}, {{.*}})
; Vector zero extend
declare i64 @llvm.hexagon.S2.vzxtbh(i32)
@@ -242,11 +245,11 @@ define i64 @S2_vzxtbh(i32 %a) {
%z = call i64 @llvm.hexagon.S2.vzxtbh(i32 %a)
ret i64 %z
}
-; CHECK: = vzxtbh(r0)
+; CHECK: = vzxtbh({{.*}})
declare i64 @llvm.hexagon.S2.vzxthw(i32)
define i64 @S2_vzxthw(i32 %a) {
%z = call i64 @llvm.hexagon.S2.vzxthw(i32 %a)
ret i64 %z
}
-; CHECK: = vzxthw(r0)
+; CHECK: = vzxthw({{.*}})
diff --git a/test/CodeGen/Hexagon/intrinsics/xtype_pred.ll b/test/CodeGen/Hexagon/intrinsics/xtype_pred.ll
index 96e63d8d77908..f06339b9a85a3 100644
--- a/test/CodeGen/Hexagon/intrinsics/xtype_pred.ll
+++ b/test/CodeGen/Hexagon/intrinsics/xtype_pred.ll
@@ -1,48 +1,51 @@
; RUN: llc -march=hexagon -O0 < %s | FileCheck %s
+; RUN: llc -march=hexagon -O0 < %s | FileCheck -check-prefix=CHECK-CALL %s
; Hexagon Programmer's Reference Manual 11.10.7 XTYPE/PRED
+; CHECK-CALL-NOT: call
+
; Compare byte
declare i32 @llvm.hexagon.A4.cmpbgt(i32, i32)
define i32 @A4_cmpbgt(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A4.cmpbgt(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: p0 = cmpb.gt(r0, r1)
+; CHECK: = cmpb.gt({{.*}}, {{.*}})
declare i32 @llvm.hexagon.A4.cmpbeq(i32, i32)
define i32 @A4_cmpbeq(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A4.cmpbeq(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: p0 = cmpb.eq(r0, r1)
+; CHECK: = cmpb.eq({{.*}}, {{.*}})
declare i32 @llvm.hexagon.A4.cmpbgtu(i32, i32)
define i32 @A4_cmpbgtu(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A4.cmpbgtu(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: p0 = cmpb.gtu(r0, r1)
+; CHECK: = cmpb.gtu({{.*}}, {{.*}})
declare i32 @llvm.hexagon.A4.cmpbgti(i32, i32)
define i32 @A4_cmpbgti(i32 %a) {
%z = call i32 @llvm.hexagon.A4.cmpbgti(i32 %a, i32 0)
ret i32 %z
}
-; CHECK: p0 = cmpb.gt(r0, #0)
+; CHECK: = cmpb.gt({{.*}}, #0)
declare i32 @llvm.hexagon.A4.cmpbeqi(i32, i32)
define i32 @A4_cmpbeqi(i32 %a) {
%z = call i32 @llvm.hexagon.A4.cmpbeqi(i32 %a, i32 0)
ret i32 %z
}
-; CHECK: p0 = cmpb.eq(r0, #0)
+; CHECK: = cmpb.eq({{.*}}, #0)
declare i32 @llvm.hexagon.A4.cmpbgtui(i32, i32)
define i32 @A4_cmpbgtui(i32 %a) {
%z = call i32 @llvm.hexagon.A4.cmpbgtui(i32 %a, i32 0)
ret i32 %z
}
-; CHECK: p0 = cmpb.gtu(r0, #0)
+; CHECK: = cmpb.gtu({{.*}}, #0)
; Compare half
declare i32 @llvm.hexagon.A4.cmphgt(i32, i32)
@@ -50,42 +53,42 @@ define i32 @A4_cmphgt(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A4.cmphgt(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: p0 = cmph.gt(r0, r1)
+; CHECK: = cmph.gt({{.*}}, {{.*}})
declare i32 @llvm.hexagon.A4.cmpheq(i32, i32)
define i32 @A4_cmpheq(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A4.cmpheq(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: p0 = cmph.eq(r0, r1)
+; CHECK: = cmph.eq({{.*}}, {{.*}})
declare i32 @llvm.hexagon.A4.cmphgtu(i32, i32)
define i32 @A4_cmphgtu(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A4.cmphgtu(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: p0 = cmph.gtu(r0, r1)
+; CHECK: = cmph.gtu({{.*}}, {{.*}})
declare i32 @llvm.hexagon.A4.cmphgti(i32, i32)
define i32 @A4_cmphgti(i32 %a) {
%z = call i32 @llvm.hexagon.A4.cmphgti(i32 %a, i32 0)
ret i32 %z
}
-; CHECK: p0 = cmph.gt(r0, #0)
+; CHECK: = cmph.gt({{.*}}, #0)
declare i32 @llvm.hexagon.A4.cmpheqi(i32, i32)
define i32 @A4_cmpheqi(i32 %a) {
%z = call i32 @llvm.hexagon.A4.cmpheqi(i32 %a, i32 0)
ret i32 %z
}
-; CHECK: p0 = cmph.eq(r0, #0)
+; CHECK: = cmph.eq({{.*}}, #0)
declare i32 @llvm.hexagon.A4.cmphgtui(i32, i32)
define i32 @A4_cmphgtui(i32 %a) {
%z = call i32 @llvm.hexagon.A4.cmphgtui(i32 %a, i32 0)
ret i32 %z
}
-; CHECK: p0 = cmph.gtu(r0, #0)
+; CHECK: = cmph.gtu({{.*}}, #0)
; Compare doublewords
declare i32 @llvm.hexagon.C2.cmpgtp(i64, i64)
@@ -93,21 +96,21 @@ define i32 @C2_cmpgtp(i64 %a, i64 %b) {
%z = call i32 @llvm.hexagon.C2.cmpgtp(i64 %a, i64 %b)
ret i32 %z
}
-; CHECK: p0 = cmp.gt(r1:0, r3:2)
+; CHECK: = cmp.gt({{.*}}, {{.*}})
declare i32 @llvm.hexagon.C2.cmpeqp(i64, i64)
define i32 @C2_cmpeqp(i64 %a, i64 %b) {
%z = call i32 @llvm.hexagon.C2.cmpeqp(i64 %a, i64 %b)
ret i32 %z
}
-; CHECK: p0 = cmp.eq(r1:0, r3:2)
+; CHECK: = cmp.eq({{.*}}, {{.*}})
declare i32 @llvm.hexagon.C2.cmpgtup(i64, i64)
define i32 @C2_cmpgtup(i64 %a, i64 %b) {
%z = call i32 @llvm.hexagon.C2.cmpgtup(i64 %a, i64 %b)
ret i32 %z
}
-; CHECK: p0 = cmp.gtu(r1:0, r3:2)
+; CHECK: = cmp.gtu({{.*}}, {{.*}})
; Compare bitmask
declare i32 @llvm.hexagon.C2.bitsclri(i32, i32)
@@ -115,42 +118,42 @@ define i32 @C2_bitsclri(i32 %a) {
%z = call i32 @llvm.hexagon.C2.bitsclri(i32 %a, i32 0)
ret i32 %z
}
-; CHECK: p0 = bitsclr(r0, #0)
+; CHECK: = bitsclr({{.*}}, #0)
declare i32 @llvm.hexagon.C4.nbitsclri(i32, i32)
define i32 @C4_nbitsclri(i32 %a) {
%z = call i32 @llvm.hexagon.C4.nbitsclri(i32 %a, i32 0)
ret i32 %z
}
-; CHECK: p0 = !bitsclr(r0, #0)
+; CHECK: = !bitsclr({{.*}}, #0)
declare i32 @llvm.hexagon.C2.bitsset(i32, i32)
define i32 @C2_bitsset(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.C2.bitsset(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: p0 = bitsset(r0, r1)
+; CHECK: = bitsset({{.*}}, {{.*}})
declare i32 @llvm.hexagon.C4.nbitsset(i32, i32)
define i32 @C4_nbitsset(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.C4.nbitsset(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: p0 = !bitsset(r0, r1)
+; CHECK: = !bitsset({{.*}}, {{.*}})
declare i32 @llvm.hexagon.C2.bitsclr(i32, i32)
define i32 @C2_bitsclr(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.C2.bitsclr(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: p0 = bitsclr(r0, r1)
+; CHECK: = bitsclr({{.*}}, {{.*}})
declare i32 @llvm.hexagon.C4.nbitsclr(i32, i32)
define i32 @C4_nbitsclr(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.C4.nbitsclr(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: p0 = !bitsclr(r0, r1)
+; CHECK: = !bitsclr({{.*}}, {{.*}})
; Mask generate from predicate
declare i64 @llvm.hexagon.C2.mask(i32)
@@ -158,7 +161,7 @@ define i64 @C2_mask(i32 %a) {
%z = call i64 @llvm.hexagon.C2.mask(i32 %a)
ret i64 %z
}
-; CHECK: = mask(p0)
+; CHECK: = mask({{.*}})
; Check for TLB match
declare i32 @llvm.hexagon.A4.tlbmatch(i64, i32)
@@ -166,7 +169,7 @@ define i32 @A4_tlbmatch(i64 %a, i32 %b) {
%z = call i32 @llvm.hexagon.A4.tlbmatch(i64 %a, i32 %b)
ret i32 %z
}
-; CHECK: p0 = tlbmatch(r1:0, r2)
+; CHECK: = tlbmatch({{.*}}, {{.*}})
; Test bit
declare i32 @llvm.hexagon.S2.tstbit.i(i32, i32)
@@ -174,28 +177,28 @@ define i32 @S2_tstbit_i(i32 %a) {
%z = call i32 @llvm.hexagon.S2.tstbit.i(i32 %a, i32 0)
ret i32 %z
}
-; CHECK: p0 = tstbit(r0, #0)
+; CHECK: = tstbit({{.*}}, #0)
declare i32 @llvm.hexagon.S4.ntstbit.i(i32, i32)
define i32 @S4_ntstbit_i(i32 %a) {
%z = call i32 @llvm.hexagon.S4.ntstbit.i(i32 %a, i32 0)
ret i32 %z
}
-; CHECK: p0 = !tstbit(r0, #0)
+; CHECK: = !tstbit({{.*}}, #0)
declare i32 @llvm.hexagon.S2.tstbit.r(i32, i32)
define i32 @S2_tstbit_r(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.S2.tstbit.r(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: p0 = tstbit(r0, r1)
+; CHECK: = tstbit({{.*}}, {{.*}})
declare i32 @llvm.hexagon.S4.ntstbit.r(i32, i32)
define i32 @S4_ntstbit_r(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.S4.ntstbit.r(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: p0 = !tstbit(r0, r1)
+; CHECK: = !tstbit({{.*}}, {{.*}})
; Vector compare halfwords
declare i32 @llvm.hexagon.A2.vcmpheq(i64, i64)
@@ -203,42 +206,42 @@ define i32 @A2_vcmpheq(i64 %a, i64 %b) {
%z = call i32 @llvm.hexagon.A2.vcmpheq(i64 %a, i64 %b)
ret i32 %z
}
-; CHECK: p0 = vcmph.eq(r1:0, r3:2)
+; CHECK: = vcmph.eq({{.*}}, {{.*}})
declare i32 @llvm.hexagon.A2.vcmphgt(i64, i64)
define i32 @A2_vcmphgt(i64 %a, i64 %b) {
%z = call i32 @llvm.hexagon.A2.vcmphgt(i64 %a, i64 %b)
ret i32 %z
}
-; CHECK: p0 = vcmph.gt(r1:0, r3:2)
+; CHECK: = vcmph.gt({{.*}}, {{.*}})
declare i32 @llvm.hexagon.A2.vcmphgtu(i64, i64)
define i32 @A2_vcmphgtu(i64 %a, i64 %b) {
%z = call i32 @llvm.hexagon.A2.vcmphgtu(i64 %a, i64 %b)
ret i32 %z
}
-; CHECK: p0 = vcmph.gtu(r1:0, r3:2)
+; CHECK: = vcmph.gtu({{.*}}, {{.*}})
declare i32 @llvm.hexagon.A4.vcmpheqi(i64, i32)
define i32 @A4_vcmpheqi(i64 %a) {
%z = call i32 @llvm.hexagon.A4.vcmpheqi(i64 %a, i32 0)
ret i32 %z
}
-; CHECK: p0 = vcmph.eq(r1:0, #0)
+; CHECK: = vcmph.eq({{.*}}, #0)
declare i32 @llvm.hexagon.A4.vcmphgti(i64, i32)
define i32 @A4_vcmphgti(i64 %a) {
%z = call i32 @llvm.hexagon.A4.vcmphgti(i64 %a, i32 0)
ret i32 %z
}
-; CHECK: p0 = vcmph.gt(r1:0, #0)
+; CHECK: = vcmph.gt({{.*}}, #0)
declare i32 @llvm.hexagon.A4.vcmphgtui(i64, i32)
define i32 @A4_vcmphgtui(i64 %a) {
%z = call i32 @llvm.hexagon.A4.vcmphgtui(i64 %a, i32 0)
ret i32 %z
}
-; CHECK: p0 = vcmph.gtu(r1:0, #0)
+; CHECK: = vcmph.gtu({{.*}}, #0)
; Vector compare bytes for any match
declare i32 @llvm.hexagon.A4.vcmpbeq.any(i64, i64)
@@ -246,7 +249,7 @@ define i32 @A4_vcmpbeq_any(i64 %a, i64 %b) {
%z = call i32 @llvm.hexagon.A4.vcmpbeq.any(i64 %a, i64 %b)
ret i32 %z
}
-; CHECK: p0 = any8(vcmpb.eq(r1:0, r3:2))
+; CHECK: = any8(vcmpb.eq({{.*}}, {{.*}}))
; Vector compare bytes
declare i32 @llvm.hexagon.A2.vcmpbeq(i64, i64)
@@ -254,42 +257,42 @@ define i32 @A2_vcmpbeq(i64 %a, i64 %b) {
%z = call i32 @llvm.hexagon.A2.vcmpbeq(i64 %a, i64 %b)
ret i32 %z
}
-; CHECK: p0 = vcmpb.eq(r1:0, r3:2)
+; CHECK: = vcmpb.eq({{.*}}, {{.*}})
declare i32 @llvm.hexagon.A2.vcmpbgtu(i64, i64)
define i32 @A2_vcmpbgtu(i64 %a, i64 %b) {
%z = call i32 @llvm.hexagon.A2.vcmpbgtu(i64 %a, i64 %b)
ret i32 %z
}
-; CHECK: p0 = vcmpb.gtu(r1:0, r3:2)
+; CHECK: = vcmpb.gtu({{.*}}, {{.*}})
declare i32 @llvm.hexagon.A4.vcmpbgt(i64, i64)
define i32 @A4_vcmpbgt(i64 %a, i64 %b) {
%z = call i32 @llvm.hexagon.A4.vcmpbgt(i64 %a, i64 %b)
ret i32 %z
}
-; CHECK: p0 = vcmpb.gt(r1:0, r3:2)
+; CHECK: = vcmpb.gt({{.*}}, {{.*}})
declare i32 @llvm.hexagon.A4.vcmpbeqi(i64, i32)
define i32 @A4_vcmpbeqi(i64 %a) {
%z = call i32 @llvm.hexagon.A4.vcmpbeqi(i64 %a, i32 0)
ret i32 %z
}
-; CHECK: p0 = vcmpb.eq(r1:0, #0)
+; CHECK: = vcmpb.eq({{.*}}, #0)
declare i32 @llvm.hexagon.A4.vcmpbgti(i64, i32)
define i32 @A4_vcmpbgti(i64 %a) {
%z = call i32 @llvm.hexagon.A4.vcmpbgti(i64 %a, i32 0)
ret i32 %z
}
-; CHECK: p0 = vcmpb.gt(r1:0, #0)
+; CHECK: = vcmpb.gt({{.*}}, #0)
declare i32 @llvm.hexagon.A4.vcmpbgtui(i64, i32)
define i32 @A4_vcmpbgtui(i64 %a) {
%z = call i32 @llvm.hexagon.A4.vcmpbgtui(i64 %a, i32 0)
ret i32 %z
}
-; CHECK: p0 = vcmpb.gtu(r1:0, #0)
+; CHECK: = vcmpb.gtu({{.*}}, #0)
; Vector compare words
declare i32 @llvm.hexagon.A2.vcmpweq(i64, i64)
@@ -297,42 +300,42 @@ define i32 @A2_vcmpweq(i64 %a, i64 %b) {
%z = call i32 @llvm.hexagon.A2.vcmpweq(i64 %a, i64 %b)
ret i32 %z
}
-; CHECK: p0 = vcmpw.eq(r1:0, r3:2)
+; CHECK: = vcmpw.eq({{.*}}, {{.*}})
declare i32 @llvm.hexagon.A2.vcmpwgt(i64, i64)
define i32 @A2_vcmpwgt(i64 %a, i64 %b) {
%z = call i32 @llvm.hexagon.A2.vcmpwgt(i64 %a, i64 %b)
ret i32 %z
}
-; CHECK: p0 = vcmpw.gt(r1:0, r3:2)
+; CHECK: = vcmpw.gt({{.*}}, {{.*}})
declare i32 @llvm.hexagon.A2.vcmpwgtu(i64, i64)
define i32 @A2_vcmpwgtu(i64 %a, i64 %b) {
%z = call i32 @llvm.hexagon.A2.vcmpwgtu(i64 %a, i64 %b)
ret i32 %z
}
-; CHECK: p0 = vcmpw.gtu(r1:0, r3:2)
+; CHECK: = vcmpw.gtu({{.*}}, {{.*}})
declare i32 @llvm.hexagon.A4.vcmpweqi(i64, i32)
define i32 @A4_vcmpweqi(i64 %a) {
%z = call i32 @llvm.hexagon.A4.vcmpweqi(i64 %a, i32 0)
ret i32 %z
}
-; CHECK: p0 = vcmpw.eq(r1:0, #0)
+; CHECK: = vcmpw.eq({{.*}}, #0)
declare i32 @llvm.hexagon.A4.vcmpwgti(i64, i32)
define i32 @A4_vcmpwgti(i64 %a) {
%z = call i32 @llvm.hexagon.A4.vcmpwgti(i64 %a, i32 0)
ret i32 %z
}
-; CHECK: p0 = vcmpw.gt(r1:0, #0)
+; CHECK: = vcmpw.gt({{.*}}, #0)
declare i32 @llvm.hexagon.A4.vcmpwgtui(i64, i32)
define i32 @A4_vcmpwgtui(i64 %a) {
%z = call i32 @llvm.hexagon.A4.vcmpwgtui(i64 %a, i32 0)
ret i32 %z
}
-; CHECK: p0 = vcmpw.gtu(r1:0, #0)
+; CHECK: = vcmpw.gtu({{.*}}, #0)
; Viterbi pack even and odd predicate bitsclr
declare i32 @llvm.hexagon.C2.vitpack(i32, i32)
@@ -340,7 +343,7 @@ define i32 @C2_vitpack(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.C2.vitpack(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: r0 = vitpack(p1, p0)
+; CHECK: = vitpack({{.*}}, {{.*}})
; Vector mux
declare i64 @llvm.hexagon.C2.vmux(i32, i64, i64)
@@ -348,4 +351,4 @@ define i64 @C2_vmux(i32 %a, i64 %b, i64 %c) {
%z = call i64 @llvm.hexagon.C2.vmux(i32 %a, i64 %b, i64 %c)
ret i64 %z
}
-; CHECK: = vmux(p0, r3:2, r5:4)
+; CHECK: = vmux({{.*}}, {{.*}}, {{.*}})
diff --git a/test/CodeGen/Hexagon/intrinsics/xtype_shift.ll b/test/CodeGen/Hexagon/intrinsics/xtype_shift.ll
index c84999bf94fd8..1a65f44c19546 100644
--- a/test/CodeGen/Hexagon/intrinsics/xtype_shift.ll
+++ b/test/CodeGen/Hexagon/intrinsics/xtype_shift.ll
@@ -1,48 +1,51 @@
; RUN: llc -march=hexagon -O0 < %s | FileCheck %s
+; RUN: llc -march=hexagon -O0 < %s | FileCheck -check-prefix=CHECK-CALL %s
; Hexagon Programmer's Reference Manual 11.10.8 XTYPE/SHIFT
+; CHECK-CALL-NOT: call
+
; Shift by immediate
declare i64 @llvm.hexagon.S2.asr.i.p(i64, i32)
define i64 @S2_asr_i_p(i64 %a) {
%z = call i64 @llvm.hexagon.S2.asr.i.p(i64 %a, i32 0)
ret i64 %z
}
-; CHECK: r1:0 = asr(r1:0, #0)
+; CHECK: = asr({{.*}}, #0)
declare i64 @llvm.hexagon.S2.lsr.i.p(i64, i32)
define i64 @S2_lsr_i_p(i64 %a) {
%z = call i64 @llvm.hexagon.S2.lsr.i.p(i64 %a, i32 0)
ret i64 %z
}
-; CHECK: r1:0 = lsr(r1:0, #0)
+; CHECK: = lsr({{.*}}, #0)
declare i64 @llvm.hexagon.S2.asl.i.p(i64, i32)
define i64 @S2_asl_i_p(i64 %a) {
%z = call i64 @llvm.hexagon.S2.asl.i.p(i64 %a, i32 0)
ret i64 %z
}
-; CHECK: r1:0 = asl(r1:0, #0)
+; CHECK: = asl({{.*}}, #0)
declare i32 @llvm.hexagon.S2.asr.i.r(i32, i32)
define i32 @S2_asr_i_r(i32 %a) {
%z = call i32 @llvm.hexagon.S2.asr.i.r(i32 %a, i32 0)
ret i32 %z
}
-; CHECK: r0 = asr(r0, #0)
+; CHECK: = asr({{.*}}, #0)
declare i32 @llvm.hexagon.S2.lsr.i.r(i32, i32)
define i32 @S2_lsr_i_r(i32 %a) {
%z = call i32 @llvm.hexagon.S2.lsr.i.r(i32 %a, i32 0)
ret i32 %z
}
-; CHECK: r0 = lsr(r0, #0)
+; CHECK: = lsr({{.*}}, #0)
declare i32 @llvm.hexagon.S2.asl.i.r(i32, i32)
define i32 @S2_asl_i_r(i32 %a) {
%z = call i32 @llvm.hexagon.S2.asl.i.r(i32 %a, i32 0)
ret i32 %z
}
-; CHECK: r0 = asl(r0, #0)
+; CHECK: = asl({{.*}}, #0)
; Shift by immediate and accumulate
declare i64 @llvm.hexagon.S2.asr.i.p.nac(i64, i64, i32)
@@ -50,84 +53,84 @@ define i64 @S2_asr_i_p_nac(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.S2.asr.i.p.nac(i64 %a, i64 %b, i32 0)
ret i64 %z
}
-; CHECK: r1:0 -= asr(r3:2, #0)
+; CHECK: -= asr({{.*}}, #0)
declare i64 @llvm.hexagon.S2.lsr.i.p.nac(i64, i64, i32)
define i64 @S2_lsr_i_p_nac(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.S2.lsr.i.p.nac(i64 %a, i64 %b, i32 0)
ret i64 %z
}
-; CHECK: r1:0 -= lsr(r3:2, #0)
+; CHECK: -= lsr({{.*}}, #0)
declare i64 @llvm.hexagon.S2.asl.i.p.nac(i64, i64, i32)
define i64 @S2_asl_i_p_nac(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.S2.asl.i.p.nac(i64 %a, i64 %b, i32 0)
ret i64 %z
}
-; CHECK: r1:0 -= asl(r3:2, #0)
+; CHECK: -= asl({{.*}}, #0)
declare i64 @llvm.hexagon.S2.asr.i.p.acc(i64, i64, i32)
define i64 @S2_asr_i_p_acc(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.S2.asr.i.p.acc(i64 %a, i64 %b, i32 0)
ret i64 %z
}
-; CHECK: r1:0 += asr(r3:2, #0)
+; CHECK: += asr({{.*}}, #0)
declare i64 @llvm.hexagon.S2.lsr.i.p.acc(i64, i64, i32)
define i64 @S2_lsr_i_p_acc(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.S2.lsr.i.p.acc(i64 %a, i64 %b, i32 0)
ret i64 %z
}
-; CHECK: r1:0 += lsr(r3:2, #0)
+; CHECK: += lsr({{.*}}, #0)
declare i64 @llvm.hexagon.S2.asl.i.p.acc(i64, i64, i32)
define i64 @S2_asl_i_p_acc(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.S2.asl.i.p.acc(i64 %a, i64 %b, i32 0)
ret i64 %z
}
-; CHECK: r1:0 += asl(r3:2, #0)
+; CHECK: += asl({{.*}}, #0)
declare i32 @llvm.hexagon.S2.asr.i.r.nac(i32, i32, i32)
define i32 @S2_asr_i_r_nac(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.S2.asr.i.r.nac(i32 %a, i32 %b, i32 0)
ret i32 %z
}
-; CHECK: r0 -= asr(r1, #0)
+; CHECK: -= asr({{.*}}, #0)
declare i32 @llvm.hexagon.S2.lsr.i.r.nac(i32, i32, i32)
define i32 @S2_lsr_i_r_nac(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.S2.lsr.i.r.nac(i32 %a, i32 %b, i32 0)
ret i32 %z
}
-; CHECK: r0 -= lsr(r1, #0)
+; CHECK: -= lsr({{.*}}, #0)
declare i32 @llvm.hexagon.S2.asl.i.r.nac(i32, i32, i32)
define i32 @S2_asl_i_r_nac(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.S2.asl.i.r.nac(i32 %a, i32 %b, i32 0)
ret i32 %z
}
-; CHECK: r0 -= asl(r1, #0)
+; CHECK: -= asl({{.*}}, #0)
declare i32 @llvm.hexagon.S2.asr.i.r.acc(i32, i32, i32)
define i32 @S2_asr_i_r_acc(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.S2.asr.i.r.acc(i32 %a, i32 %b, i32 0)
ret i32 %z
}
-; CHECK: r0 += asr(r1, #0)
+; CHECK: += asr({{.*}}, #0)
declare i32 @llvm.hexagon.S2.lsr.i.r.acc(i32, i32, i32)
define i32 @S2_lsr_i_r_acc(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.S2.lsr.i.r.acc(i32 %a, i32 %b, i32 0)
ret i32 %z
}
-; CHECK: r0 += lsr(r1, #0)
+; CHECK: += lsr({{.*}}, #0)
declare i32 @llvm.hexagon.S2.asl.i.r.acc(i32, i32, i32)
define i32 @S2_asl_i_r_acc(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.S2.asl.i.r.acc(i32 %a, i32 %b, i32 0)
ret i32 %z
}
-; CHECK: r0 += asl(r1, #0)
+; CHECK: += asl({{.*}}, #0)
; Shift by immediate and add
declare i32 @llvm.hexagon.S4.addi.asl.ri(i32, i32, i32)
@@ -135,35 +138,35 @@ define i32 @S4_addi_asl_ri(i32 %a) {
%z = call i32 @llvm.hexagon.S4.addi.asl.ri(i32 0, i32 %a, i32 0)
ret i32 %z
}
-; CHECK: r0 = add(#0, asl(r0, #0))
+; CHECK: = add(#0, asl({{.*}}, #0))
declare i32 @llvm.hexagon.S4.subi.asl.ri(i32, i32, i32)
define i32 @S4_subi_asl_ri(i32 %a) {
%z = call i32 @llvm.hexagon.S4.subi.asl.ri(i32 0, i32 %a, i32 0)
ret i32 %z
}
-; CHECK: r0 = sub(#0, asl(r0, #0))
+; CHECK: = sub(#0, asl({{.*}}, #0))
declare i32 @llvm.hexagon.S4.addi.lsr.ri(i32, i32, i32)
define i32 @S4_addi_lsr_ri(i32 %a) {
%z = call i32 @llvm.hexagon.S4.addi.lsr.ri(i32 0, i32 %a, i32 0)
ret i32 %z
}
-; CHECK: r0 = add(#0, lsr(r0, #0))
+; CHECK: = add(#0, lsr({{.*}}, #0))
declare i32 @llvm.hexagon.S4.subi.lsr.ri(i32, i32, i32)
define i32 @S4_subi_lsr_ri(i32 %a) {
%z = call i32 @llvm.hexagon.S4.subi.lsr.ri(i32 0, i32 %a, i32 0)
ret i32 %z
}
-; CHECK: r0 = sub(#0, lsr(r0, #0))
+; CHECK: = sub(#0, lsr({{.*}}, #0))
declare i32 @llvm.hexagon.S2.addasl.rrri(i32, i32, i32)
define i32 @S2_addasl_rrri(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.S2.addasl.rrri(i32 %a, i32 %b, i32 0)
ret i32 %z
}
-; CHECK: r0 = addasl(r0, r1, #0)
+; CHECK: = addasl({{.*}}, {{.*}}, #0)
; Shift by immediate and logical
declare i64 @llvm.hexagon.S2.asr.i.p.and(i64, i64, i32)
@@ -171,140 +174,140 @@ define i64 @S2_asr_i_p_and(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.S2.asr.i.p.and(i64 %a, i64 %b, i32 0)
ret i64 %z
}
-; CHECK: r1:0 &= asr(r3:2, #0)
+; CHECK: &= asr({{.*}}, #0)
declare i64 @llvm.hexagon.S2.lsr.i.p.and(i64, i64, i32)
define i64 @S2_lsr_i_p_and(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.S2.lsr.i.p.and(i64 %a, i64 %b, i32 0)
ret i64 %z
}
-; CHECK: r1:0 &= lsr(r3:2, #0)
+; CHECK: {{.*}} &= lsr({{.*}}, #0)
declare i64 @llvm.hexagon.S2.asl.i.p.and(i64, i64, i32)
define i64 @S2_asl_i_p_and(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.S2.asl.i.p.and(i64 %a, i64 %b, i32 0)
ret i64 %z
}
-; CHECK: r1:0 &= asl(r3:2, #0)
+; CHECK: &= asl({{.*}}, #0)
declare i64 @llvm.hexagon.S2.asr.i.p.or(i64, i64, i32)
define i64 @S2_asr_i_p_or(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.S2.asr.i.p.or(i64 %a, i64 %b, i32 0)
ret i64 %z
}
-; CHECK: r1:0 |= asr(r3:2, #0)
+; CHECK: |= asr({{.*}}, #0)
declare i64 @llvm.hexagon.S2.lsr.i.p.or(i64, i64, i32)
define i64 @S2_lsr_i_p_or(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.S2.lsr.i.p.or(i64 %a, i64 %b, i32 0)
ret i64 %z
}
-; CHECK: r1:0 |= lsr(r3:2, #0)
+; CHECK: |= lsr({{.*}}, #0)
declare i64 @llvm.hexagon.S2.asl.i.p.or(i64, i64, i32)
define i64 @S2_asl_i_p_or(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.S2.asl.i.p.or(i64 %a, i64 %b, i32 0)
ret i64 %z
}
-; CHECK: r1:0 |= asl(r3:2, #0)
+; CHECK: |= asl({{.*}}, #0)
declare i64 @llvm.hexagon.S2.lsr.i.p.xacc(i64, i64, i32)
define i64 @S2_lsr_i_p_xacc(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.S2.lsr.i.p.xacc(i64 %a, i64 %b, i32 0)
ret i64 %z
}
-; CHECK: r1:0 ^= lsr(r3:2, #0)
+; CHECK: ^= lsr({{.*}}, #0)
declare i64 @llvm.hexagon.S2.asl.i.p.xacc(i64, i64, i32)
define i64 @S2_asl_i_p_xacc(i64 %a, i64 %b) {
%z = call i64 @llvm.hexagon.S2.asl.i.p.xacc(i64 %a, i64 %b, i32 0)
ret i64 %z
}
-; CHECK: r1:0 ^= asl(r3:2, #0)
+; CHECK: ^= asl({{.*}}, #0)
declare i32 @llvm.hexagon.S2.asr.i.r.and(i32, i32, i32)
define i32 @S2_asr_i_r_and(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.S2.asr.i.r.and(i32 %a, i32 %b, i32 0)
ret i32 %z
}
-; CHECK: r0 &= asr(r1, #0)
+; CHECK: &= asr({{.*}}, #0)
declare i32 @llvm.hexagon.S2.lsr.i.r.and(i32, i32, i32)
define i32 @S2_lsr_i_r_and(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.S2.lsr.i.r.and(i32 %a, i32 %b, i32 0)
ret i32 %z
}
-; CHECK: r0 &= lsr(r1, #0)
+; CHECK: &= lsr({{.*}}, #0)
declare i32 @llvm.hexagon.S2.asl.i.r.and(i32, i32, i32)
define i32 @S2_asl_i_r_and(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.S2.asl.i.r.and(i32 %a, i32 %b, i32 0)
ret i32 %z
}
-; CHECK: r0 &= asl(r1, #0)
+; CHECK: &= asl({{.*}}, #0)
declare i32 @llvm.hexagon.S2.asr.i.r.or(i32, i32, i32)
define i32 @S2_asr_i_r_or(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.S2.asr.i.r.or(i32 %a, i32 %b, i32 0)
ret i32 %z
}
-; CHECK: r0 |= asr(r1, #0)
+; CHECK: |= asr({{.*}}, #0)
declare i32 @llvm.hexagon.S2.lsr.i.r.or(i32, i32, i32)
define i32 @S2_lsr_i_r_or(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.S2.lsr.i.r.or(i32 %a, i32 %b, i32 0)
ret i32 %z
}
-; CHECK: r0 |= lsr(r1, #0)
+; CHECK: |= lsr({{.*}}, #0)
declare i32 @llvm.hexagon.S2.asl.i.r.or(i32, i32, i32)
define i32 @S2_asl_i_r_or(i32%a, i32 %b) {
%z = call i32 @llvm.hexagon.S2.asl.i.r.or(i32 %a, i32 %b, i32 0)
ret i32 %z
}
-; CHECK: r0 |= asl(r1, #0)
+; CHECK: |= asl({{.*}}, #0)
declare i32 @llvm.hexagon.S2.lsr.i.r.xacc(i32, i32, i32)
define i32 @S2_lsr_i_r_xacc(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.S2.lsr.i.r.xacc(i32%a, i32 %b, i32 0)
ret i32 %z
}
-; CHECK: r0 ^= lsr(r1, #0)
+; CHECK: ^= lsr({{.*}}, #0)
declare i32 @llvm.hexagon.S2.asl.i.r.xacc(i32, i32, i32)
define i32 @S2_asl_i_r_xacc(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.S2.asl.i.r.xacc(i32 %a, i32 %b, i32 0)
ret i32 %z
}
-; CHECK: r0 ^= asl(r1, #0)
+; CHECK: ^= asl({{.*}}, #0)
declare i32 @llvm.hexagon.S4.andi.asl.ri(i32, i32, i32)
define i32 @S4_andi_asl_ri(i32 %a) {
%z = call i32 @llvm.hexagon.S4.andi.asl.ri(i32 0, i32 %a, i32 0)
ret i32 %z
}
-; CHECK: r0 = and(#0, asl(r0, #0))
+; CHECK: = and(#0, asl({{.*}}, #0))
declare i32 @llvm.hexagon.S4.ori.asl.ri(i32, i32, i32)
define i32 @S4_ori_asl_ri(i32 %a) {
%z = call i32 @llvm.hexagon.S4.ori.asl.ri(i32 0, i32 %a, i32 0)
ret i32 %z
}
-; CHECK: r0 = or(#0, asl(r0, #0))
+; CHECK: = or(#0, asl({{.*}}, #0))
declare i32 @llvm.hexagon.S4.andi.lsr.ri(i32, i32, i32)
define i32 @S4_andi_lsr_ri(i32 %a) {
%z = call i32 @llvm.hexagon.S4.andi.lsr.ri(i32 0, i32 %a, i32 0)
ret i32 %z
}
-; CHECK: r0 = and(#0, lsr(r0, #0))
+; CHECK: = and(#0, lsr({{.*}}, #0))
declare i32 @llvm.hexagon.S4.ori.lsr.ri(i32, i32, i32)
define i32 @S4_ori_lsr_ri(i32 %a) {
%z = call i32 @llvm.hexagon.S4.ori.lsr.ri(i32 0, i32 %a, i32 0)
ret i32 %z
}
-; CHECK: r0 = or(#0, lsr(r0, #0))
+; CHECK: = or(#0, lsr({{.*}}, #0))
; Shift right by immediate with rounding
declare i64 @llvm.hexagon.S2.asr.i.p.rnd(i64, i32)
@@ -312,14 +315,14 @@ define i64 @S2_asr_i_p_rnd(i64 %a) {
%z = call i64 @llvm.hexagon.S2.asr.i.p.rnd(i64 %a, i32 0)
ret i64 %z
}
-; CHECK: r1:0 = asr(r1:0, #0):rnd
+; CHECK: = asr({{.*}}, #0):rnd
declare i32 @llvm.hexagon.S2.asr.i.r.rnd(i32, i32)
define i32 @S2_asr_i_r_rnd(i32 %a) {
%z = call i32 @llvm.hexagon.S2.asr.i.r.rnd(i32 %a, i32 0)
ret i32 %z
}
-; CHECK: r0 = asr(r0, #0):rnd
+; CHECK: = asr({{.*}}, #0):rnd
; Shift left by immediate with saturation
declare i32 @llvm.hexagon.S2.asl.i.r.sat(i32, i32)
@@ -327,7 +330,7 @@ define i32 @S2_asl_i_r_sat(i32 %a) {
%z = call i32 @llvm.hexagon.S2.asl.i.r.sat(i32 %a, i32 0)
ret i32 %z
}
-; CHECK: r0 = asl(r0, #0):sat
+; CHECK: = asl({{.*}}, #0):sat
; Shift by register
declare i64 @llvm.hexagon.S2.asr.r.p(i64, i32)
@@ -335,63 +338,63 @@ define i64 @S2_asr_r_p(i64 %a, i32 %b) {
%z = call i64 @llvm.hexagon.S2.asr.r.p(i64 %a, i32 %b)
ret i64 %z
}
-; CHECK: r1:0 = asr(r1:0, r2)
+; CHECK: = asr({{.*}}, {{.*}})
declare i64 @llvm.hexagon.S2.lsr.r.p(i64, i32)
define i64 @S2_lsr_r_p(i64 %a, i32 %b) {
%z = call i64 @llvm.hexagon.S2.lsr.r.p(i64 %a, i32 %b)
ret i64 %z
}
-; CHECK: r1:0 = lsr(r1:0, r2)
+; CHECK: = lsr({{.*}}, {{.*}})
declare i64 @llvm.hexagon.S2.asl.r.p(i64, i32)
define i64 @S2_asl_r_p(i64 %a, i32 %b) {
%z = call i64 @llvm.hexagon.S2.asl.r.p(i64 %a, i32 %b)
ret i64 %z
}
-; CHECK: r1:0 = asl(r1:0, r2)
+; CHECK: = asl({{.*}}, {{.*}})
declare i64 @llvm.hexagon.S2.lsl.r.p(i64, i32)
define i64 @S2_lsl_r_p(i64 %a, i32 %b) {
%z = call i64 @llvm.hexagon.S2.lsl.r.p(i64 %a, i32 %b)
ret i64 %z
}
-; CHECK: r1:0 = lsl(r1:0, r2)
+; CHECK: = lsl({{.*}}, {{.*}})
declare i32 @llvm.hexagon.S2.asr.r.r(i32, i32)
define i32 @S2_asr_r_r(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.S2.asr.r.r(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: r0 = asr(r0, r1)
+; CHECK: = asr({{.*}}, {{.*}})
declare i32 @llvm.hexagon.S2.lsr.r.r(i32, i32)
define i32 @S2_lsr_r_r(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.S2.lsr.r.r(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: r0 = lsr(r0, r1)
+; CHECK: = lsr({{.*}}, {{.*}})
declare i32 @llvm.hexagon.S2.asl.r.r(i32, i32)
define i32 @S2_asl_r_r(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.S2.asl.r.r(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: r0 = asl(r0, r1)
+; CHECK: = asl({{.*}}, {{.*}})
declare i32 @llvm.hexagon.S2.lsl.r.r(i32, i32)
define i32 @S2_lsl_r_r(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.S2.lsl.r.r(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: r0 = lsl(r0, r1)
+; CHECK: = lsl({{.*}}, {{.*}})
declare i32 @llvm.hexagon.S4.lsli(i32, i32)
define i32 @S4_lsli(i32 %a) {
%z = call i32 @llvm.hexagon.S4.lsli(i32 0, i32 %a)
ret i32 %z
}
-; CHECK: r0 = lsl(#0, r0)
+; CHECK: = lsl(#0, {{.*}})
; Shift by register and accumulate
declare i64 @llvm.hexagon.S2.asr.r.p.nac(i64, i64, i32)
@@ -399,112 +402,112 @@ define i64 @S2_asr_r_p_nac(i64 %a, i64 %b, i32 %c) {
%z = call i64 @llvm.hexagon.S2.asr.r.p.nac(i64 %a, i64 %b, i32 %c)
ret i64 %z
}
-; CHECK: r1:0 -= asr(r3:2, r4)
+; CHECK: -= asr({{.*}}, r4)
declare i64 @llvm.hexagon.S2.lsr.r.p.nac(i64, i64, i32)
define i64 @S2_lsr_r_p_nac(i64 %a, i64 %b, i32 %c) {
%z = call i64 @llvm.hexagon.S2.lsr.r.p.nac(i64 %a, i64 %b, i32 %c)
ret i64 %z
}
-; CHECK: r1:0 -= lsr(r3:2, r4)
+; CHECK: -= lsr({{.*}}, r4)
declare i64 @llvm.hexagon.S2.asl.r.p.nac(i64, i64, i32)
define i64 @S2_asl_r_p_nac(i64 %a, i64 %b, i32 %c) {
%z = call i64 @llvm.hexagon.S2.asl.r.p.nac(i64 %a, i64 %b, i32 %c)
ret i64 %z
}
-; CHECK: r1:0 -= asl(r3:2, r4)
+; CHECK: -= asl({{.*}}, r4)
declare i64 @llvm.hexagon.S2.lsl.r.p.nac(i64, i64, i32)
define i64 @S2_lsl_r_p_nac(i64 %a, i64 %b, i32 %c) {
%z = call i64 @llvm.hexagon.S2.lsl.r.p.nac(i64 %a, i64 %b, i32 %c)
ret i64 %z
}
-; CHECK: r1:0 -= lsl(r3:2, r4)
+; CHECK: -= lsl({{.*}}, r4)
declare i64 @llvm.hexagon.S2.asr.r.p.acc(i64, i64, i32)
define i64 @S2_asr_r_p_acc(i64 %a, i64 %b, i32 %c) {
%z = call i64 @llvm.hexagon.S2.asr.r.p.acc(i64 %a, i64 %b, i32 %c)
ret i64 %z
}
-; CHECK: r1:0 += asr(r3:2, r4)
+; CHECK: += asr({{.*}}, r4)
declare i64 @llvm.hexagon.S2.lsr.r.p.acc(i64, i64, i32)
define i64 @S2_lsr_r_p_acc(i64 %a, i64 %b, i32 %c) {
%z = call i64 @llvm.hexagon.S2.lsr.r.p.acc(i64 %a, i64 %b, i32 %c)
ret i64 %z
}
-; CHECK: r1:0 += lsr(r3:2, r4)
+; CHECK: += lsr({{.*}}, r4)
declare i64 @llvm.hexagon.S2.asl.r.p.acc(i64, i64, i32)
define i64 @S2_asl_r_p_acc(i64 %a, i64 %b, i32 %c) {
%z = call i64 @llvm.hexagon.S2.asl.r.p.acc(i64 %a, i64 %b, i32 %c)
ret i64 %z
}
-; CHECK: r1:0 += asl(r3:2, r4)
+; CHECK: += asl({{.*}}, r4)
declare i64 @llvm.hexagon.S2.lsl.r.p.acc(i64, i64, i32)
define i64 @S2_lsl_r_p_acc(i64 %a, i64 %b, i32 %c) {
%z = call i64 @llvm.hexagon.S2.lsl.r.p.acc(i64 %a, i64 %b, i32 %c)
ret i64 %z
}
-; CHECK: r1:0 += lsl(r3:2, r4)
+; CHECK: += lsl({{.*}}, r4)
declare i32 @llvm.hexagon.S2.asr.r.r.nac(i32, i32, i32)
define i32 @S2_asr_r_r_nac(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.S2.asr.r.r.nac(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: r0 -= asr(r1, r2)
+; CHECK: -= asr({{.*}}, {{.*}})
declare i32 @llvm.hexagon.S2.lsr.r.r.nac(i32, i32, i32)
define i32 @S2_lsr_r_r_nac(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.S2.lsr.r.r.nac(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: r0 -= lsr(r1, r2)
+; CHECK: -= lsr({{.*}}, {{.*}})
declare i32 @llvm.hexagon.S2.asl.r.r.nac(i32, i32, i32)
define i32 @S2_asl_r_r_nac(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.S2.asl.r.r.nac(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: r0 -= asl(r1, r2)
+; CHECK: -= asl({{.*}}, {{.*}})
declare i32 @llvm.hexagon.S2.lsl.r.r.nac(i32, i32, i32)
define i32 @S2_lsl_r_r_nac(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.S2.lsl.r.r.nac(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: r0 -= lsl(r1, r2)
+; CHECK: -= lsl({{.*}}, {{.*}})
declare i32 @llvm.hexagon.S2.asr.r.r.acc(i32, i32, i32)
define i32 @S2_asr_r_r_acc(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.S2.asr.r.r.acc(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: r0 += asr(r1, r2)
+; CHECK: += asr({{.*}}, {{.*}})
declare i32 @llvm.hexagon.S2.lsr.r.r.acc(i32, i32, i32)
define i32 @S2_lsr_r_r_acc(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.S2.lsr.r.r.acc(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: r0 += lsr(r1, r2)
+; CHECK: += lsr({{.*}}, {{.*}})
declare i32 @llvm.hexagon.S2.asl.r.r.acc(i32, i32, i32)
define i32 @S2_asl_r_r_acc(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.S2.asl.r.r.acc(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: r0 += asl(r1, r2)
+; CHECK: += asl({{.*}}, {{.*}})
declare i32 @llvm.hexagon.S2.lsl.r.r.acc(i32, i32, i32)
define i32 @S2_lsl_r_r_acc(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.S2.lsl.r.r.acc(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: r0 += lsl(r1, r2)
+; CHECK: += lsl({{.*}}, {{.*}})
; Shift by register and logical
declare i64 @llvm.hexagon.S2.asr.r.p.or(i64, i64, i32)
@@ -512,112 +515,112 @@ define i64 @S2_asr_r_p_or(i64 %a, i64 %b, i32 %c) {
%z = call i64 @llvm.hexagon.S2.asr.r.p.or(i64 %a, i64 %b, i32 %c)
ret i64 %z
}
-; CHECK: r1:0 |= asr(r3:2, r4)
+; CHECK: |= asr({{.*}}, r4)
declare i64 @llvm.hexagon.S2.lsr.r.p.or(i64, i64, i32)
define i64 @S2_lsr_r_p_or(i64 %a, i64 %b, i32 %c) {
%z = call i64 @llvm.hexagon.S2.lsr.r.p.or(i64 %a, i64 %b, i32 %c)
ret i64 %z
}
-; CHECK: r1:0 |= lsr(r3:2, r4)
+; CHECK: |= lsr({{.*}}, r4)
declare i64 @llvm.hexagon.S2.asl.r.p.or(i64, i64, i32)
define i64 @S2_asl_r_p_or(i64 %a, i64 %b, i32 %c) {
%z = call i64 @llvm.hexagon.S2.asl.r.p.or(i64 %a, i64 %b, i32 %c)
ret i64 %z
}
-; CHECK: r1:0 |= asl(r3:2, r4)
+; CHECK: |= asl({{.*}}, r4)
declare i64 @llvm.hexagon.S2.lsl.r.p.or(i64, i64, i32)
define i64 @S2_lsl_r_p_or(i64 %a, i64 %b, i32 %c) {
%z = call i64 @llvm.hexagon.S2.lsl.r.p.or(i64 %a, i64 %b, i32 %c)
ret i64 %z
}
-; CHECK: r1:0 |= lsl(r3:2, r4)
+; CHECK: |= lsl({{.*}}, r4)
declare i64 @llvm.hexagon.S2.asr.r.p.and(i64, i64, i32)
define i64 @S2_asr_r_p_and(i64 %a, i64 %b, i32 %c) {
%z = call i64 @llvm.hexagon.S2.asr.r.p.and(i64 %a, i64 %b, i32 %c)
ret i64 %z
}
-; CHECK: r1:0 &= asr(r3:2, r4)
+; CHECK: &= asr({{.*}}, r4)
declare i64 @llvm.hexagon.S2.lsr.r.p.and(i64, i64, i32)
define i64 @S2_lsr_r_p_and(i64 %a, i64 %b, i32 %c) {
%z = call i64 @llvm.hexagon.S2.lsr.r.p.and(i64 %a, i64 %b, i32 %c)
ret i64 %z
}
-; CHECK: r1:0 &= lsr(r3:2, r4)
+; CHECK: &= lsr({{.*}}, r4)
declare i64 @llvm.hexagon.S2.asl.r.p.and(i64, i64, i32)
define i64 @S2_asl_r_p_and(i64 %a, i64 %b, i32 %c) {
%z = call i64 @llvm.hexagon.S2.asl.r.p.and(i64 %a, i64 %b, i32 %c)
ret i64 %z
}
-; CHECK: r1:0 &= asl(r3:2, r4)
+; CHECK: &= asl({{.*}}, r4)
declare i64 @llvm.hexagon.S2.lsl.r.p.and(i64, i64, i32)
define i64 @S2_lsl_r_p_and(i64 %a, i64 %b, i32 %c) {
%z = call i64 @llvm.hexagon.S2.lsl.r.p.and(i64 %a, i64 %b, i32 %c)
ret i64 %z
}
-; CHECK: r1:0 &= lsl(r3:2, r4)
+; CHECK: &= lsl({{.*}}, r4)
declare i32 @llvm.hexagon.S2.asr.r.r.or(i32, i32, i32)
define i32 @S2_asr_r_r_or(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.S2.asr.r.r.or(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: r0 |= asr(r1, r2)
+; CHECK: |= asr({{.*}}, {{.*}})
declare i32 @llvm.hexagon.S2.lsr.r.r.or(i32, i32, i32)
define i32 @S2_lsr_r_r_or(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.S2.lsr.r.r.or(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: r0 |= lsr(r1, r2)
+; CHECK: |= lsr({{.*}}, {{.*}})
declare i32 @llvm.hexagon.S2.asl.r.r.or(i32, i32, i32)
define i32 @S2_asl_r_r_or(i32%a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.S2.asl.r.r.or(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: r0 |= asl(r1, r2)
+; CHECK: |= asl({{.*}}, {{.*}})
declare i32 @llvm.hexagon.S2.lsl.r.r.or(i32, i32, i32)
define i32 @S2_lsl_r_r_or(i32%a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.S2.lsl.r.r.or(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: r0 |= lsl(r1, r2)
+; CHECK: |= lsl({{.*}}, {{.*}})
declare i32 @llvm.hexagon.S2.asr.r.r.and(i32, i32, i32)
define i32 @S2_asr_r_r_and(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.S2.asr.r.r.and(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: r0 &= asr(r1, r2)
+; CHECK: &= asr({{.*}}, {{.*}})
declare i32 @llvm.hexagon.S2.lsr.r.r.and(i32, i32, i32)
define i32 @S2_lsr_r_r_and(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.S2.lsr.r.r.and(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: r0 &= lsr(r1, r2)
+; CHECK: &= lsr({{.*}}, {{.*}})
declare i32 @llvm.hexagon.S2.asl.r.r.and(i32, i32, i32)
define i32 @S2_asl_r_r_and(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.S2.asl.r.r.and(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: r0 &= asl(r1, r2)
+; CHECK: &= asl({{.*}}, {{.*}})
declare i32 @llvm.hexagon.S2.lsl.r.r.and(i32, i32, i32)
define i32 @S2_lsl_r_r_and(i32 %a, i32 %b, i32 %c) {
%z = call i32 @llvm.hexagon.S2.lsl.r.r.and(i32 %a, i32 %b, i32 %c)
ret i32 %z
}
-; CHECK: r0 &= lsl(r1, r2)
+; CHECK: &= lsl({{.*}}, {{.*}})
; Shift by register with saturation
declare i32 @llvm.hexagon.S2.asr.r.r.sat(i32, i32)
@@ -625,14 +628,14 @@ define i32 @S2_asr_r_r_sat(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.S2.asr.r.r.sat(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: r0 = asr(r0, r1):sat
+; CHECK: = asr({{.*}}, {{.*}}):sat
declare i32 @llvm.hexagon.S2.asl.r.r.sat(i32, i32)
define i32 @S2_asl_r_r_sat(i32 %a, i32 %b) {
%z = call i32 @llvm.hexagon.S2.asl.r.r.sat(i32 %a, i32 %b)
ret i32 %z
}
-; CHECK: r0 = asl(r0, r1):sat
+; CHECK: = asl({{.*}}, {{.*}}):sat
; Vector shift halfwords by immediate
declare i64 @llvm.hexagon.S2.asr.i.vh(i64, i32)
@@ -640,21 +643,21 @@ define i64 @S2_asr_i_vh(i64 %a) {
%z = call i64 @llvm.hexagon.S2.asr.i.vh(i64 %a, i32 0)
ret i64 %z
}
-; CHECK: r1:0 = vasrh(r1:0, #0)
+; CHECK: = vasrh({{.*}}, #0)
declare i64 @llvm.hexagon.S2.lsr.i.vh(i64, i32)
define i64 @S2_lsr_i_vh(i64 %a) {
%z = call i64 @llvm.hexagon.S2.lsr.i.vh(i64 %a, i32 0)
ret i64 %z
}
-; CHECK: r1:0 = vlsrh(r1:0, #0)
+; CHECK: = vlsrh({{.*}}, #0)
declare i64 @llvm.hexagon.S2.asl.i.vh(i64, i32)
define i64 @S2_asl_i_vh(i64 %a) {
%z = call i64 @llvm.hexagon.S2.asl.i.vh(i64 %a, i32 0)
ret i64 %z
}
-; CHECK: r1:0 = vaslh(r1:0, #0)
+; CHECK: = vaslh({{.*}}, #0)
; Vector shift halfwords by register
declare i64 @llvm.hexagon.S2.asr.r.vh(i64, i32)
@@ -662,28 +665,28 @@ define i64 @S2_asr_r_vh(i64 %a, i32 %b) {
%z = call i64 @llvm.hexagon.S2.asr.r.vh(i64 %a, i32 %b)
ret i64 %z
}
-; CHECK: r1:0 = vasrh(r1:0, r2)
+; CHECK: = vasrh({{.*}}, {{.*}})
declare i64 @llvm.hexagon.S2.lsr.r.vh(i64, i32)
define i64 @S2_lsr_r_vh(i64 %a, i32 %b) {
%z = call i64 @llvm.hexagon.S2.lsr.r.vh(i64 %a, i32 %b)
ret i64 %z
}
-; CHECK: r1:0 = vlsrh(r1:0, r2)
+; CHECK: = vlsrh({{.*}}, {{.*}})
declare i64 @llvm.hexagon.S2.asl.r.vh(i64, i32)
define i64 @S2_asl_r_vh(i64 %a, i32 %b) {
%z = call i64 @llvm.hexagon.S2.asl.r.vh(i64 %a, i32 %b)
ret i64 %z
}
-; CHECK: r1:0 = vaslh(r1:0, r2)
+; CHECK: = vaslh({{.*}}, {{.*}})
declare i64 @llvm.hexagon.S2.lsl.r.vh(i64, i32)
define i64 @S2_lsl_r_vh(i64 %a, i32 %b) {
%z = call i64 @llvm.hexagon.S2.lsl.r.vh(i64 %a, i32 %b)
ret i64 %z
}
-; CHECK: r1:0 = vlslh(r1:0, r2)
+; CHECK: = vlslh({{.*}}, {{.*}})
; Vector shift words by immediate
declare i64 @llvm.hexagon.S2.asr.i.vw(i64, i32)
@@ -691,21 +694,21 @@ define i64 @S2_asr_i_vw(i64 %a) {
%z = call i64 @llvm.hexagon.S2.asr.i.vw(i64 %a, i32 0)
ret i64 %z
}
-; CHECK: r1:0 = vasrw(r1:0, #0)
+; CHECK: = vasrw({{.*}}, #0)
declare i64 @llvm.hexagon.S2.lsr.i.vw(i64, i32)
define i64 @S2_lsr_i_vw(i64 %a) {
%z = call i64 @llvm.hexagon.S2.lsr.i.vw(i64 %a, i32 0)
ret i64 %z
}
-; CHECK: r1:0 = vlsrw(r1:0, #0)
+; CHECK: = vlsrw({{.*}}, #0)
declare i64 @llvm.hexagon.S2.asl.i.vw(i64, i32)
define i64 @S2_asl_i_vw(i64 %a) {
%z = call i64 @llvm.hexagon.S2.asl.i.vw(i64 %a, i32 0)
ret i64 %z
}
-; CHECK: r1:0 = vaslw(r1:0, #0)
+; CHECK: = vaslw({{.*}}, #0)
; Vector shift words by with truncate and pack
declare i32 @llvm.hexagon.S2.asr.i.svw.trun(i64, i32)
@@ -713,11 +716,11 @@ define i32 @S2_asr_i_svw_trun(i64 %a) {
%z = call i32 @llvm.hexagon.S2.asr.i.svw.trun(i64 %a, i32 0)
ret i32 %z
}
-; CHECK: r0 = vasrw(r1:0, #0)
+; CHECK: = vasrw({{.*}}, #0)
declare i32 @llvm.hexagon.S2.asr.r.svw.trun(i64, i32)
define i32 @S2_asr_r_svw_trun(i64 %a, i32 %b) {
%z = call i32 @llvm.hexagon.S2.asr.r.svw.trun(i64 %a, i32 %b)
ret i32 %z
}
-; CHECK: r0 = vasrw(r1:0, r2)
+; CHECK: = vasrw({{.*}}, {{.*}})
diff --git a/test/CodeGen/Hexagon/loadi1-G0.ll b/test/CodeGen/Hexagon/loadi1-G0.ll
new file mode 100644
index 0000000000000..1116341c92ba2
--- /dev/null
+++ b/test/CodeGen/Hexagon/loadi1-G0.ll
@@ -0,0 +1,43 @@
+; RUN: llc -march=hexagon -mcpu=hexagonv5 -hexagon-small-data-threshold=0 < %s | FileCheck %s
+target datalayout = "e-p:32:32:32-i64:64:64-i32:32:32-i16:16:16-i1:32:32-f64:64:64-f32:32:32-a0:0-n32"
+target triple = "hexagon-unknown-linux-gnu"
+
+
+@flag = external global i1
+
+
+; CHECK-NOT: CONST
+
+define i32 @test_sextloadi1_32() {
+entry:
+ %0 = load i1, i1* @flag, align 4
+ %1 = sext i1 %0 to i32
+ ret i32 %1
+}
+
+
+
+define i16 @test_zextloadi1_16() {
+entry:
+ %0 = load i1, i1* @flag, align 4
+ %1 = zext i1 %0 to i16
+ ret i16 %1
+}
+
+
+define i32 @test_zextloadi1_32() {
+entry:
+ %0 = load i1, i1* @flag, align 4
+ %1 = zext i1 %0 to i32
+ ret i32 %1
+}
+
+
+define i64 @test_zextloadi1_64() {
+entry:
+ %0 = load i1, i1* @flag, align 4
+ %1 = zext i1 %0 to i64
+ ret i64 %1
+}
+
+
diff --git a/test/CodeGen/Hexagon/loadi1-v4-G0.ll b/test/CodeGen/Hexagon/loadi1-v4-G0.ll
new file mode 100644
index 0000000000000..b7df1a125fb0c
--- /dev/null
+++ b/test/CodeGen/Hexagon/loadi1-v4-G0.ll
@@ -0,0 +1,43 @@
+; RUN: llc -march=hexagon -hexagon-small-data-threshold=0 < %s | FileCheck %s
+target datalayout = "e-p:32:32:32-i64:64:64-i32:32:32-i16:16:16-i1:32:32-f64:64:64-f32:32:32-a0:0-n32"
+target triple = "hexagon-unknown-linux-gnu"
+
+
+@flag = external global i1
+
+
+; CHECK-NOT: CONST
+
+define i32 @test_sextloadi1_32() {
+entry:
+ %0 = load i1, i1* @flag, align 4
+ %1 = sext i1 %0 to i32
+ ret i32 %1
+}
+
+
+
+define i16 @test_zextloadi1_16() {
+entry:
+ %0 = load i1, i1* @flag, align 4
+ %1 = zext i1 %0 to i16
+ ret i16 %1
+}
+
+
+define i32 @test_zextloadi1_32() {
+entry:
+ %0 = load i1, i1* @flag, align 4
+ %1 = zext i1 %0 to i32
+ ret i32 %1
+}
+
+
+define i64 @test_zextloadi1_64() {
+entry:
+ %0 = load i1, i1* @flag, align 4
+ %1 = zext i1 %0 to i64
+ ret i64 %1
+}
+
+
diff --git a/test/CodeGen/Hexagon/loadi1-v4.ll b/test/CodeGen/Hexagon/loadi1-v4.ll
new file mode 100644
index 0000000000000..15b056040a42f
--- /dev/null
+++ b/test/CodeGen/Hexagon/loadi1-v4.ll
@@ -0,0 +1,45 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+target datalayout = "e-p:32:32:32-i64:64:64-i32:32:32-i16:16:16-i1:32:32-f64:64:64-f32:32:32-a0:0-n32"
+target triple = "hexagon-unknown-linux-gnu"
+
+
+@flag = external global i1
+
+
+define i32 @test_sextloadi1_32() {
+entry:
+ %0 = load i1, i1* @flag, align 4
+; CHECK: memub
+ %1 = sext i1 %0 to i32
+ ret i32 %1
+}
+
+
+
+define i16 @test_zextloadi1_16() {
+entry:
+ %0 = load i1, i1* @flag, align 4
+; CHECK: memub
+ %1 = zext i1 %0 to i16
+ ret i16 %1
+}
+
+
+define i32 @test_zextloadi1_32() {
+entry:
+ %0 = load i1, i1* @flag, align 4
+; CHECK: memub
+ %1 = zext i1 %0 to i32
+ ret i32 %1
+}
+
+
+define i64 @test_zextloadi1_64() {
+entry:
+ %0 = load i1, i1* @flag, align 4
+; CHECK: memub
+ %1 = zext i1 %0 to i64
+ ret i64 %1
+}
+
+
diff --git a/test/CodeGen/Hexagon/loadi1.ll b/test/CodeGen/Hexagon/loadi1.ll
new file mode 100644
index 0000000000000..38c1dfec83296
--- /dev/null
+++ b/test/CodeGen/Hexagon/loadi1.ll
@@ -0,0 +1,45 @@
+; RUN: llc -march=hexagon -mcpu=hexagonv5 < %s | FileCheck %s
+target datalayout = "e-p:32:32:32-i64:64:64-i32:32:32-i16:16:16-i1:32:32-f64:64:64-f32:32:32-a0:0-n32"
+target triple = "hexagon-unknown-linux-gnu"
+
+
+@flag = external global i1
+
+
+define i32 @test_sextloadi1_32() {
+entry:
+ %0 = load i1, i1* @flag, align 4
+; CHECK: memub
+ %1 = sext i1 %0 to i32
+ ret i32 %1
+}
+
+
+
+define i16 @test_zextloadi1_16() {
+entry:
+ %0 = load i1, i1* @flag, align 4
+; CHECK: memub
+ %1 = zext i1 %0 to i16
+ ret i16 %1
+}
+
+
+define i32 @test_zextloadi1_32() {
+entry:
+ %0 = load i1, i1* @flag, align 4
+; CHECK: memub
+ %1 = zext i1 %0 to i32
+ ret i32 %1
+}
+
+
+define i64 @test_zextloadi1_64() {
+entry:
+ %0 = load i1, i1* @flag, align 4
+; CHECK: memub
+ %1 = zext i1 %0 to i64
+ ret i64 %1
+}
+
+
diff --git a/test/CodeGen/Hexagon/maxd.ll b/test/CodeGen/Hexagon/maxd.ll
new file mode 100644
index 0000000000000..7f237fd54e7ad
--- /dev/null
+++ b/test/CodeGen/Hexagon/maxd.ll
@@ -0,0 +1,9 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+; CHECK: max
+
+define i64 @f(i64 %src, i64 %maxval) nounwind readnone {
+entry:
+ %cmp = icmp slt i64 %maxval, %src
+ %cond = select i1 %cmp, i64 %src, i64 %maxval
+ ret i64 %cond
+}
diff --git a/test/CodeGen/Hexagon/maxh.ll b/test/CodeGen/Hexagon/maxh.ll
new file mode 100644
index 0000000000000..79b5e922c1bb1
--- /dev/null
+++ b/test/CodeGen/Hexagon/maxh.ll
@@ -0,0 +1,23 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+; The result of max(half-word, half-word) is also half-word.
+; Check that we are not producing a sign extend after the max.
+; CHECK-NOT: sxth
+
+define i64 @test_cast(i64 %arg0, i16 zeroext %arg1, i16 zeroext %arg2) nounwind readnone {
+entry:
+ %conv.i = zext i16 %arg1 to i32
+ %conv1.i = zext i16 %arg2 to i32
+ %sub.i = sub nsw i32 %conv.i, %conv1.i
+ %sext.i = shl i32 %sub.i, 16
+ %cmp.i = icmp slt i32 %sext.i, 65536
+ %0 = ashr exact i32 %sext.i, 16
+ %conv7.i = select i1 %cmp.i, i32 1, i32 %0
+ %cmp8.i = icmp sgt i32 %conv7.i, 4
+ %conv7.op.i = add i32 %conv7.i, 65535
+ %shl = shl i64 %arg0, 2
+ %.mask = and i32 %conv7.op.i, 65535
+ %1 = zext i32 %.mask to i64
+ %conv = select i1 %cmp8.i, i64 3, i64 %1
+ %or = or i64 %conv, %shl
+ ret i64 %or
+}
diff --git a/test/CodeGen/Hexagon/maxud.ll b/test/CodeGen/Hexagon/maxud.ll
new file mode 100644
index 0000000000000..eca4faee602cd
--- /dev/null
+++ b/test/CodeGen/Hexagon/maxud.ll
@@ -0,0 +1,9 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+; CHECK: maxu
+
+define i64 @f(i64 %src, i64 %maxval) nounwind readnone {
+entry:
+ %cmp = icmp ult i64 %maxval, %src
+ %cond = select i1 %cmp, i64 %src, i64 %maxval
+ ret i64 %cond
+}
diff --git a/test/CodeGen/Hexagon/maxuw.ll b/test/CodeGen/Hexagon/maxuw.ll
new file mode 100644
index 0000000000000..0dba1f5acdef0
--- /dev/null
+++ b/test/CodeGen/Hexagon/maxuw.ll
@@ -0,0 +1,9 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+; CHECK: maxu
+
+define i32 @f(i32 %src, i32 %maxval) nounwind readnone {
+entry:
+ %cmp = icmp ult i32 %maxval, %src
+ %cond = select i1 %cmp, i32 %src, i32 %maxval
+ ret i32 %cond
+}
diff --git a/test/CodeGen/Hexagon/maxw.ll b/test/CodeGen/Hexagon/maxw.ll
new file mode 100644
index 0000000000000..e66ca958806fc
--- /dev/null
+++ b/test/CodeGen/Hexagon/maxw.ll
@@ -0,0 +1,9 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+; CHECK: max
+
+define i32 @f(i32 %src, i32 %maxval) nounwind readnone {
+entry:
+ %cmp = icmp slt i32 %maxval, %src
+ %cond = select i1 %cmp, i32 %src, i32 %maxval
+ ret i32 %cond
+}
diff --git a/test/CodeGen/Hexagon/mind.ll b/test/CodeGen/Hexagon/mind.ll
new file mode 100644
index 0000000000000..610283d97e2bb
--- /dev/null
+++ b/test/CodeGen/Hexagon/mind.ll
@@ -0,0 +1,9 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+; CHECK: min
+
+define i64 @f(i64 %src, i64 %maxval) nounwind readnone {
+entry:
+ %cmp = icmp sgt i64 %maxval, %src
+ %cond = select i1 %cmp, i64 %src, i64 %maxval
+ ret i64 %cond
+}
diff --git a/test/CodeGen/Hexagon/minu-zext-16.ll b/test/CodeGen/Hexagon/minu-zext-16.ll
new file mode 100644
index 0000000000000..e27507da3d445
--- /dev/null
+++ b/test/CodeGen/Hexagon/minu-zext-16.ll
@@ -0,0 +1,11 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+; CHECK: minu
+
+define zeroext i16 @f(i16* noalias nocapture %src) nounwind readonly {
+entry:
+ %arrayidx = getelementptr inbounds i16, i16* %src, i32 1
+ %0 = load i16, i16* %arrayidx, align 1
+ %cmp = icmp ult i16 %0, 32767
+ %. = select i1 %cmp, i16 %0, i16 32767
+ ret i16 %.
+}
diff --git a/test/CodeGen/Hexagon/minu-zext-8.ll b/test/CodeGen/Hexagon/minu-zext-8.ll
new file mode 100644
index 0000000000000..15dc1a164912d
--- /dev/null
+++ b/test/CodeGen/Hexagon/minu-zext-8.ll
@@ -0,0 +1,11 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+; CHECK: minu
+
+define zeroext i8 @f(i8* noalias nocapture %src) nounwind readonly {
+entry:
+ %arrayidx = getelementptr inbounds i8, i8* %src, i32 1
+ %0 = load i8, i8* %arrayidx, align 1
+ %cmp = icmp ult i8 %0, 127
+ %. = select i1 %cmp, i8 %0, i8 127
+ ret i8 %.
+}
diff --git a/test/CodeGen/Hexagon/minud.ll b/test/CodeGen/Hexagon/minud.ll
new file mode 100644
index 0000000000000..29e81005081a3
--- /dev/null
+++ b/test/CodeGen/Hexagon/minud.ll
@@ -0,0 +1,9 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+; CHECK: minu
+
+define i64 @f(i64 %src, i64 %maxval) nounwind readnone {
+entry:
+ %cmp = icmp ugt i64 %maxval, %src
+ %cond = select i1 %cmp, i64 %src, i64 %maxval
+ ret i64 %cond
+}
diff --git a/test/CodeGen/Hexagon/minuw.ll b/test/CodeGen/Hexagon/minuw.ll
new file mode 100644
index 0000000000000..a88d1e1160374
--- /dev/null
+++ b/test/CodeGen/Hexagon/minuw.ll
@@ -0,0 +1,9 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+; CHECK: minu
+
+define i32 @f(i32 %src, i32 %maxval) nounwind readnone {
+entry:
+ %cmp = icmp ugt i32 %maxval, %src
+ %cond = select i1 %cmp, i32 %src, i32 %maxval
+ ret i32 %cond
+}
diff --git a/test/CodeGen/Hexagon/minw.ll b/test/CodeGen/Hexagon/minw.ll
new file mode 100644
index 0000000000000..5bfaae09c805d
--- /dev/null
+++ b/test/CodeGen/Hexagon/minw.ll
@@ -0,0 +1,9 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+; CHECK: min
+
+define i32 @f(i32 %src, i32 %maxval) nounwind readnone {
+entry:
+ %cmp = icmp sgt i32 %maxval, %src
+ %cond = select i1 %cmp, i32 %src, i32 %maxval
+ ret i32 %cond
+}
diff --git a/test/CodeGen/Hexagon/postinc-offset.ll b/test/CodeGen/Hexagon/postinc-offset.ll
new file mode 100644
index 0000000000000..5e0f4751f3056
--- /dev/null
+++ b/test/CodeGen/Hexagon/postinc-offset.ll
@@ -0,0 +1,40 @@
+; RUN: llc -enable-aa-sched-mi -march=hexagon -mcpu=hexagonv5 < %s | FileCheck %s
+
+; CHECK: {
+; CHECK: ={{ *}}memd([[REG0:(r[0-9]+)]]{{ *}}++{{ *}}#8)
+; CHECK-NOT: memw([[REG0]]{{ *}}+{{ *}}#0){{ *}}=
+; CHECK: }
+
+define void @main() #0 {
+cond.end.6:
+ store i32 -1, i32* undef, align 8, !tbaa !0
+ br label %polly.stmt.for.body.i
+
+if.then:
+ unreachable
+
+if.end:
+ ret void
+
+polly.stmt.for.body.i24:
+ %0 = extractelement <2 x i32> %add.ip_vec, i32 1
+ br i1 undef, label %if.end, label %if.then
+
+polly.stmt.for.body.i:
+ %add.ip_vec30 = phi <2 x i32> [ %add.ip_vec, %polly.stmt.for.body.i ], [ zeroinitializer, %cond.end.6 ]
+ %scevgep.phi = phi i32* [ %scevgep.inc, %polly.stmt.for.body.i ], [ undef, %cond.end.6 ]
+ %polly.indvar = phi i32 [ %polly.indvar_next, %polly.stmt.for.body.i ], [ 0, %cond.end.6 ]
+ %vector_ptr = bitcast i32* %scevgep.phi to <2 x i32>*
+ %_p_vec_full = load <2 x i32>, <2 x i32>* %vector_ptr, align 8
+ %add.ip_vec = add <2 x i32> %_p_vec_full, %add.ip_vec30
+ %polly.indvar_next = add nsw i32 %polly.indvar, 2
+ %polly.loop_cond = icmp slt i32 %polly.indvar, 4
+ %scevgep.inc = getelementptr i32, i32* %scevgep.phi, i32 2
+ br i1 %polly.loop_cond, label %polly.stmt.for.body.i, label %polly.stmt.for.body.i24
+}
+
+attributes #0 = { "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
+
+!0 = !{!"int", !1}
+!1 = !{!"omnipotent char", !2}
+!2 = !{!"Simple C/C++ TBAA"}
diff --git a/test/CodeGen/Hexagon/signed_immediates.ll b/test/CodeGen/Hexagon/signed_immediates.ll
new file mode 100644
index 0000000000000..a4766313cc682
--- /dev/null
+++ b/test/CodeGen/Hexagon/signed_immediates.ll
@@ -0,0 +1,99 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+
+; s4_0Imm
+; CHECK: memb(r0++#-1) = r1
+define i8* @foo1(i8* %a, i8 %b) {
+ store i8 %b, i8* %a
+ %c = getelementptr i8, i8* %a, i32 -1
+ ret i8* %c
+}
+
+; s4_1Imm
+; CHECK: memh(r0++#-2) = r1
+define i16* @foo2(i16* %a, i16 %b) {
+ store i16 %b, i16* %a
+ %c = getelementptr i16, i16* %a, i32 -1
+ ret i16* %c
+}
+
+; s4_2Imm
+; CHECK: memw(r0++#-4) = r1
+define i32* @foo3(i32* %a, i32 %b) {
+ store i32 %b, i32* %a
+ %c = getelementptr i32, i32* %a, i32 -1
+ ret i32* %c
+}
+
+; s4_3Imm
+; CHECK: memd(r0++#-8) = r3:2
+define i64* @foo4(i64* %a, i64 %b) {
+ store i64 %b, i64* %a
+ %c = getelementptr i64, i64* %a, i32 -1
+ ret i64* %c
+}
+
+; s6Ext
+; CHECK: if (p0.new) memw(r0+#0)=#-1
+define void @foo5(i32* %a, i1 %b) {
+br i1 %b, label %x, label %y
+x:
+ store i32 -1, i32* %a
+ ret void
+y:
+ ret void
+}
+
+; s10Ext
+; CHECK: p0 = cmp.eq(r0, #-1)
+define i1 @foo7(i32 %a) {
+ %b = icmp eq i32 %a, -1
+ ret i1 %b
+}
+
+; s11_0Ext
+; CHECK: memb(r0+#-1) = r1
+define void @foo8(i8* %a, i8 %b) {
+ %c = getelementptr i8, i8* %a, i32 -1
+ store i8 %b, i8* %c
+ ret void
+}
+
+; s11_1Ext
+; CHECK: memh(r0+#-2) = r1
+define void @foo9(i16* %a, i16 %b) {
+ %c = getelementptr i16, i16* %a, i32 -1
+ store i16 %b, i16* %c
+ ret void
+}
+
+; s11_2Ext
+; CHECK: memw(r0+#-4) = r1
+define void @foo10(i32* %a, i32 %b) {
+ %c = getelementptr i32, i32* %a, i32 -1
+ store i32 %b, i32* %c
+ ret void
+}
+
+; s11_3Ext
+; CHECK: memd(r0+#-8) = r3:2
+define void @foo11(i64* %a, i64 %b) {
+ %c = getelementptr i64, i64* %a, i32 -1
+ store i64 %b, i64* %c
+ ret void
+}
+
+; s12Ext
+; CHECK: if (p0.new) r0 = #-1
+define i32 @foo12(i32 %a, i1 %b) {
+br i1 %b, label %x, label %y
+x:
+ ret i32 -1
+y:
+ ret i32 %a
+}
+
+; s16Ext
+; CHECK: r0 = #-2
+define i32 @foo13() {
+ ret i32 -2
+} \ No newline at end of file
diff --git a/test/CodeGen/Hexagon/simple_addend.ll b/test/CodeGen/Hexagon/simple_addend.ll
new file mode 100644
index 0000000000000..ec3a87f1dcc04
--- /dev/null
+++ b/test/CodeGen/Hexagon/simple_addend.ll
@@ -0,0 +1,10 @@
+; RUN: llc -march=hexagon -filetype=obj -o - < %s | llvm-readobj -relocations | FileCheck %s
+
+declare void @bar(i32);
+
+define void @foo(i32 %a) {
+ %b = mul i32 %a, 3
+ call void @bar(i32 %b)
+ ret void
+}
+; CHECK: 0x8 R_HEX_B22_PCREL bar 0x4
diff --git a/test/CodeGen/Hexagon/usr-ovf-dep.ll b/test/CodeGen/Hexagon/usr-ovf-dep.ll
new file mode 100644
index 0000000000000..1f06986f0aa98
--- /dev/null
+++ b/test/CodeGen/Hexagon/usr-ovf-dep.ll
@@ -0,0 +1,28 @@
+; RUN: llc -O2 < %s | FileCheck %s
+target datalayout = "e-m:e-p:32:32-i1:32-i64:64-a:0-v32:32-n16:32"
+target triple = "hexagon"
+
+; Check that the two ":sat" instructions are in the same packet.
+; CHECK: foo
+; CHECK: {
+; CHECK: :sat
+; CHECK-NEXT: :sat
+
+target datalayout = "e-m:e-p:32:32-i1:32-i64:64-a:0-v32:32-n16:32"
+target triple = "hexagon"
+
+; Function Attrs: nounwind readnone
+define i32 @foo(i32 %Rs, i32 %Rt, i32 %Ru) #0 {
+entry:
+ %0 = tail call i32 @llvm.hexagon.S2.asr.r.r.sat(i32 %Rs, i32 %Ru)
+ %1 = tail call i32 @llvm.hexagon.S2.asr.r.r.sat(i32 %Rt, i32 %Ru)
+ %add = add nsw i32 %1, %0
+ ret i32 %add
+}
+
+; Function Attrs: nounwind readnone
+declare i32 @llvm.hexagon.S2.asr.r.r.sat(i32, i32) #1
+
+attributes #0 = { nounwind readnone "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { nounwind readnone }
+