diff options
author | Dimitry Andric <dim@FreeBSD.org> | 2015-06-21 13:59:01 +0000 |
---|---|---|
committer | Dimitry Andric <dim@FreeBSD.org> | 2015-06-21 13:59:01 +0000 |
commit | 3a0822f094b578157263e04114075ad7df81db41 (patch) | |
tree | bc48361fe2cd1ca5f93ac01b38b183774468fc79 /test/CodeGen/X86 | |
parent | 85d8b2bbe386bcfe669575d05b61482d7be07e5d (diff) |
Notes
Diffstat (limited to 'test/CodeGen/X86')
77 files changed, 3747 insertions, 1967 deletions
diff --git a/test/CodeGen/X86/2007-05-05-Personality.ll b/test/CodeGen/X86/2007-05-05-Personality.ll index b99c58c6e4af4..f177a35273a32 100644 --- a/test/CodeGen/X86/2007-05-05-Personality.ll +++ b/test/CodeGen/X86/2007-05-05-Personality.ll @@ -12,13 +12,13 @@ @error = external global i8 -define void @_ada_x() { +define void @_ada_x() personality i8* bitcast (i32 (...)* @__gnat_eh_personality to i8*) { entry: invoke void @raise() to label %eh_then unwind label %unwind unwind: ; preds = %entry - %eh_ptr = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gnat_eh_personality to i8*) + %eh_ptr = landingpad { i8*, i32 } catch i8* @error %eh_select = extractvalue { i8*, i32 } %eh_ptr, 1 %eh_typeid = tail call i32 @llvm.eh.typeid.for(i8* @error) diff --git a/test/CodeGen/X86/2008-04-17-CoalescerBug.ll b/test/CodeGen/X86/2008-04-17-CoalescerBug.ll index d1cfb447a2c31..3d3851cbd4c24 100644 --- a/test/CodeGen/X86/2008-04-17-CoalescerBug.ll +++ b/test/CodeGen/X86/2008-04-17-CoalescerBug.ll @@ -13,7 +13,7 @@ @.str33 = external constant [29 x i32] ; <[29 x i32]*> [#uses=1] @.str89 = external constant [5 x i32] ; <[5 x i32]*> [#uses=1] -define void @_ZNK10wxDateTime6FormatEPKwRKNS_8TimeZoneE(%struct.wxString* noalias sret %agg.result, %struct.wxDateTime* %this, i32* %format, %"struct.wxDateTime::TimeZone"* %tz, i1 %foo) { +define void @_ZNK10wxDateTime6FormatEPKwRKNS_8TimeZoneE(%struct.wxString* noalias sret %agg.result, %struct.wxDateTime* %this, i32* %format, %"struct.wxDateTime::TimeZone"* %tz, i1 %foo) personality i32 (...)* @__gxx_personality_v0 { entry: br i1 %foo, label %bb116.i, label %bb115.critedge.i bb115.critedge.i: ; preds = %entry @@ -151,11 +151,11 @@ bb7819: ; preds = %bb3314 bb7834: ; preds = %bb7806, %invcont5831 br label %bb3261 lpad: ; preds = %bb7806, %bb5968, %invcont5814, %bb440.i8663, %bb155.i8541, %bb5657, %bb3306 - %exn = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0 + %exn = landingpad {i8*, i32} cleanup ret void lpad8185: ; preds = %invcont5831 - %exn8185 = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0 + %exn8185 = landingpad {i8*, i32} cleanup ret void } diff --git a/test/CodeGen/X86/2008-05-28-LocalRegAllocBug.ll b/test/CodeGen/X86/2008-05-28-LocalRegAllocBug.ll index fc7ddf0bc67ac..7ddedacbabd9b 100644 --- a/test/CodeGen/X86/2008-05-28-LocalRegAllocBug.ll +++ b/test/CodeGen/X86/2008-05-28-LocalRegAllocBug.ll @@ -6,7 +6,7 @@ declare i8* @_Znwm(i32) declare i8* @__cxa_begin_catch(i8*) nounwind -define i32 @main(i32 %argc, i8** %argv) { +define i32 @main(i32 %argc, i8** %argv) personality i32 (...)* @__gxx_personality_v0 { entry: br i1 false, label %bb37, label %bb34 @@ -21,7 +21,7 @@ tmp12.i.i.i.i.i.noexc65: ; preds = %bb37 unreachable lpad243: ; preds = %bb37 - %exn = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0 + %exn = landingpad {i8*, i32} cleanup %eh_ptr244 = extractvalue { i8*, i32 } %exn, 0 store i32 (...)** getelementptr ([5 x i32 (...)*], [5 x i32 (...)*]* @_ZTVN10Evaluation10GridOutputILi3EEE, i32 0, i32 2), i32 (...)*** null, align 8 diff --git a/test/CodeGen/X86/2009-03-13-PHIElimBug.ll b/test/CodeGen/X86/2009-03-13-PHIElimBug.ll index e14c30a27449d..91f29c4f24cd9 100644 --- a/test/CodeGen/X86/2009-03-13-PHIElimBug.ll +++ b/test/CodeGen/X86/2009-03-13-PHIElimBug.ll @@ -6,7 +6,7 @@ declare i32 @f() declare i32 @g() -define i32 @phi() { +define i32 @phi() personality i32 (...)* @__gxx_personality_v0 { entry: %a = call i32 @f() ; <i32> [#uses=1] %b = invoke i32 @g() @@ -24,7 +24,7 @@ cont2: ; preds = %cont lpad: ; preds = %cont, %entry %y = phi i32 [ %a, %entry ], [ %aa, %cont ] ; <i32> [#uses=1] - %exn = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0 + %exn = landingpad {i8*, i32} cleanup ret i32 %y } diff --git a/test/CodeGen/X86/2009-03-16-PHIElimInLPad.ll b/test/CodeGen/X86/2009-03-16-PHIElimInLPad.ll index f8c7a151b2c9d..6814ed1d894ed 100644 --- a/test/CodeGen/X86/2009-03-16-PHIElimInLPad.ll +++ b/test/CodeGen/X86/2009-03-16-PHIElimInLPad.ll @@ -3,7 +3,7 @@ declare i32 @f() -define i32 @phi(i32 %x) { +define i32 @phi(i32 %x) personality i32 (...)* @__gxx_personality_v0 { entry: %a = invoke i32 @f() to label %cont unwind label %lpad ; <i32> [#uses=1] @@ -17,7 +17,7 @@ cont2: ; preds = %cont lpad: ; preds = %cont, %entry %v = phi i32 [ %x, %entry ], [ %a, %cont ] ; <i32> [#uses=1] - %exn = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0 + %exn = landingpad {i8*, i32} cleanup ret i32 %v } diff --git a/test/CodeGen/X86/2009-09-10-LoadFoldingBug.ll b/test/CodeGen/X86/2009-09-10-LoadFoldingBug.ll index 2ec49f486c994..aa88576c148e9 100644 --- a/test/CodeGen/X86/2009-09-10-LoadFoldingBug.ll +++ b/test/CodeGen/X86/2009-09-10-LoadFoldingBug.ll @@ -9,7 +9,7 @@ %struct.ComplexType = type { i32 } -define i32 @t(i32 %clientPort, i32 %pluginID, i32 %requestID, i32 %objectID, i64 %serverIdentifier, i64 %argumentsData, i32 %argumentsLength) ssp { +define i32 @t(i32 %clientPort, i32 %pluginID, i32 %requestID, i32 %objectID, i64 %serverIdentifier, i64 %argumentsData, i32 %argumentsLength) ssp personality i32 (...)* @__gxx_personality_v0 { entry: ; CHECK: _t: ; CHECK: movl 16(%rbp), @@ -34,7 +34,7 @@ invcont2: ; preds = %invcont1 ret i32 0 lpad: ; preds = %invcont1, %invcont, %entry - %exn = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0 + %exn = landingpad {i8*, i32} cleanup %8 = call i32 @vm_deallocate(i32 undef, i64 0, i64 %0) ; <i32> [#uses=0] unreachable diff --git a/test/CodeGen/X86/2009-11-25-ImpDefBug.ll b/test/CodeGen/X86/2009-11-25-ImpDefBug.ll index 0bf13de612751..2f4e11e54e351 100644 --- a/test/CodeGen/X86/2009-11-25-ImpDefBug.ll +++ b/test/CodeGen/X86/2009-11-25-ImpDefBug.ll @@ -20,7 +20,7 @@ declare void @_ZNSt6vectorIP10ASN1ObjectSaIS1_EE13_M_insert_auxEN9__gnu_cxx17__n declare i32 @_Z17LoadObjectFromBERR8xmstreamPP10ASN1ObjectPPF10ASN1StatusP13ASN1ObjHeaderS3_E(%struct.xmstream*, %struct.ASN1Object**, i32 (%struct.ASN1ObjHeader*, %struct.ASN1Object**)**) -define i32 @_ZN8ASN1Unit4loadER8xmstreamjm18ASN1LengthEncoding(%struct.ASN1Unit* %this, %struct.xmstream* nocapture %stream, i32 %numObjects, i64 %size, i32 %lEncoding) { +define i32 @_ZN8ASN1Unit4loadER8xmstreamjm18ASN1LengthEncoding(%struct.ASN1Unit* %this, %struct.xmstream* nocapture %stream, i32 %numObjects, i64 %size, i32 %lEncoding) personality i32 (...)* @__gxx_personality_v0 { entry: br label %meshBB85 @@ -46,7 +46,7 @@ bb1.i5: ; preds = %bb.i1 lpad: ; preds = %bb1.i.fragment.cl, %bb1.i.fragment, %bb5 %.SV10.phi807 = phi i8* [ undef, %bb1.i.fragment.cl ], [ undef, %bb1.i.fragment ], [ undef, %bb5 ] ; <i8*> [#uses=1] - %exn = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0 + %exn = landingpad {i8*, i32} cleanup %1 = load i8, i8* %.SV10.phi807, align 8 ; <i8> [#uses=0] br i1 undef, label %meshBB81.bbcl.disp, label %bb13.fragment.bbcl.disp diff --git a/test/CodeGen/X86/2010-04-06-SSEDomainFixCrash.ll b/test/CodeGen/X86/2010-04-06-SSEDomainFixCrash.ll index 2ba4d9aaded80..41c318b62eab7 100644 --- a/test/CodeGen/X86/2010-04-06-SSEDomainFixCrash.ll +++ b/test/CodeGen/X86/2010-04-06-SSEDomainFixCrash.ll @@ -7,7 +7,7 @@ target triple = "i386-apple-darwin10.0" declare i32 @_ZN11HullLibrary16CreateConvexHullERK8HullDescR10HullResult(i8*, i8* nocapture, i8* nocapture) ssp align 2 -define void @_ZN17btSoftBodyHelpers4DrawEP10btSoftBodyP12btIDebugDrawi(i8* %psb, i8* %idraw, i32 %drawflags) ssp align 2 { +define void @_ZN17btSoftBodyHelpers4DrawEP10btSoftBodyP12btIDebugDrawi(i8* %psb, i8* %idraw, i32 %drawflags) ssp align 2 personality i32 (...)* @__gxx_personality_v0 { entry: br i1 undef, label %bb92, label %bb58 @@ -60,7 +60,7 @@ bb92: ; preds = %entry unreachable lpad159: ; preds = %bb58 - %exn = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0 + %exn = landingpad {i8*, i32} cleanup unreachable } diff --git a/test/CodeGen/X86/2010-04-30-LocalAlloc-LandingPad.ll b/test/CodeGen/X86/2010-04-30-LocalAlloc-LandingPad.ll index 4711d52746752..fc5520e12ac04 100644 --- a/test/CodeGen/X86/2010-04-30-LocalAlloc-LandingPad.ll +++ b/test/CodeGen/X86/2010-04-30-LocalAlloc-LandingPad.ll @@ -13,7 +13,7 @@ target triple = "i386-apple-darwin10.0.0" ; CHECK: movl %esi,{{.*}}(%ebp) ; CHECK: calll __Z6throwsv -define i8* @_Z4test1SiS_(%struct.S* byval %s1, i32 %n, %struct.S* byval %s2) ssp { +define i8* @_Z4test1SiS_(%struct.S* byval %s1, i32 %n, %struct.S* byval %s2) ssp personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) { entry: %retval = alloca i8*, align 4 ; <i8**> [#uses=2] %n.addr = alloca i32, align 4 ; <i32*> [#uses=1] @@ -30,13 +30,13 @@ invoke.cont: ; preds = %entry br label %finally terminate.handler: ; preds = %match.end - %1 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) + %1 = landingpad { i8*, i32 } cleanup call void @_ZSt9terminatev() noreturn nounwind unreachable try.handler: ; preds = %entry - %exc1.ptr = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) + %exc1.ptr = landingpad { i8*, i32 } catch i8* null %exc1 = extractvalue { i8*, i32 } %exc1.ptr, 0 %selector = extractvalue { i8*, i32 } %exc1.ptr, 1 @@ -57,7 +57,7 @@ invoke.cont2: ; preds = %match br label %match.end match.handler: ; preds = %match - %exc3 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) + %exc3 = landingpad { i8*, i32 } cleanup %7 = extractvalue { i8*, i32 } %exc3, 0 store i8* %7, i8** %_rethrow diff --git a/test/CodeGen/X86/2010-08-04-MingWCrash.ll b/test/CodeGen/X86/2010-08-04-MingWCrash.ll index 61f527b0470c4..e97615a417ad0 100644 --- a/test/CodeGen/X86/2010-08-04-MingWCrash.ll +++ b/test/CodeGen/X86/2010-08-04-MingWCrash.ll @@ -1,6 +1,6 @@ ; RUN: llc < %s -mtriple=i386-pc-mingw32 -define void @func() nounwind { +define void @func() nounwind personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) { invoke.cont: %call = tail call i8* @malloc() %a = invoke i32 @bar() @@ -10,7 +10,7 @@ bb1: ret void lpad: - %exn.ptr = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) + %exn.ptr = landingpad { i8*, i32 } catch i8* null %exn = extractvalue { i8*, i32 } %exn.ptr, 0 %eh.selector = extractvalue { i8*, i32 } %exn.ptr, 1 diff --git a/test/CodeGen/X86/2011-12-15-vec_shift.ll b/test/CodeGen/X86/2011-12-15-vec_shift.ll index 0183e107460eb..4d49b3af88ee1 100644 --- a/test/CodeGen/X86/2011-12-15-vec_shift.ll +++ b/test/CodeGen/X86/2011-12-15-vec_shift.ll @@ -12,8 +12,8 @@ define <16 x i8> @shift(<16 x i8> %a, <16 x i8> %b) nounwind { ; Make sure we're masking and pcmp'ing the VSELECT conditon vector. ; CHECK-WO-SSE4: psllw $5, [[REG1:%xmm.]] - ; CHECK-WO-SSE4: pand [[REG1]], [[REG2:%xmm.]] - ; CHECK-WO-SSE4: pcmpeqb {{%xmm., }}[[REG2]] + ; CHECK-WO-SSE4: pxor [[REG2:%xmm.]], [[REG2:%xmm.]] + ; CHECK-WO-SSE4: pcmpgtb {{%xmm., }}[[REG2]] %1 = shl <16 x i8> %a, %b ret <16 x i8> %1 } diff --git a/test/CodeGen/X86/2012-01-10-UndefExceptionEdge.ll b/test/CodeGen/X86/2012-01-10-UndefExceptionEdge.ll index 21443441c9f37..20615afdfa17b 100644 --- a/test/CodeGen/X86/2012-01-10-UndefExceptionEdge.ll +++ b/test/CodeGen/X86/2012-01-10-UndefExceptionEdge.ll @@ -16,7 +16,7 @@ target triple = "i386-apple-macosx10.7" declare void @llvm.memset.p0i8.i32(i8* nocapture, i8, i32, i32, i1) nounwind -define void @f(i32* nocapture %arg, i32* nocapture %arg1, i32* nocapture %arg2, i32* nocapture %arg3, i32 %arg4, i32 %arg5) optsize ssp { +define void @f(i32* nocapture %arg, i32* nocapture %arg1, i32* nocapture %arg2, i32* nocapture %arg3, i32 %arg4, i32 %arg5) optsize ssp personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) { bb: br i1 undef, label %bb6, label %bb7 @@ -43,7 +43,7 @@ bb11: ; preds = %bb7 bb20: ; preds = %bb43, %bb41, %bb29, %bb7 %tmp21 = phi i32 [ undef, %bb7 ], [ %tmp12, %bb43 ], [ %tmp12, %bb29 ], [ %tmp12, %bb41 ] - %tmp22 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) + %tmp22 = landingpad { i8*, i32 } catch i8* bitcast ({ i8*, i8* }* @Exception to i8*) br i1 undef, label %bb23, label %bb69 diff --git a/test/CodeGen/X86/2012-05-19-CoalescerCrash.ll b/test/CodeGen/X86/2012-05-19-CoalescerCrash.ll index 837fbc0777f73..a3f68fa4c2239 100644 --- a/test/CodeGen/X86/2012-05-19-CoalescerCrash.ll +++ b/test/CodeGen/X86/2012-05-19-CoalescerCrash.ll @@ -7,7 +7,7 @@ target triple = "i386-pc-linux-gnu" -define void @_ZN4llvm17AsmMatcherEmitter3runERNS_11raw_ostreamE() align 2 { +define void @_ZN4llvm17AsmMatcherEmitter3runERNS_11raw_ostreamE() align 2 personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) { invoke void @_ZNK4llvm13CodeGenTarget12getAsmParserEv() to label %1 unwind label %5 @@ -16,7 +16,7 @@ define void @_ZN4llvm17AsmMatcherEmitter3runERNS_11raw_ostreamE() align 2 { to label %4 unwind label %2 ; <label>:2 ; preds = %1 - %3 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) + %3 = landingpad { i8*, i32 } cleanup unreachable @@ -25,12 +25,12 @@ define void @_ZN4llvm17AsmMatcherEmitter3runERNS_11raw_ostreamE() align 2 { to label %12 unwind label %7 ; <label>:5 ; preds = %0 - %6 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) + %6 = landingpad { i8*, i32 } cleanup br label %33 ; <label>:7 ; preds = %4 - %8 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) + %8 = landingpad { i8*, i32 } cleanup br label %9 @@ -52,7 +52,7 @@ define void @_ZN4llvm17AsmMatcherEmitter3runERNS_11raw_ostreamE() align 2 { br i1 %15, label %20, label %18 ; <label>:16 ; preds = %12 - %17 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) + %17 = landingpad { i8*, i32 } cleanup br label %26 @@ -67,7 +67,7 @@ define void @_ZN4llvm17AsmMatcherEmitter3runERNS_11raw_ostreamE() align 2 { br label %14 ; <label>:21 ; preds = %18 - %22 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) + %22 = landingpad { i8*, i32 } cleanup %23 = extractvalue { i8*, i32 } %22, 1 br i1 undef, label %26, label %24 @@ -88,7 +88,7 @@ define void @_ZN4llvm17AsmMatcherEmitter3runERNS_11raw_ostreamE() align 2 { br label %9 ; <label>:30 ; preds = %26 - %31 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) + %31 = landingpad { i8*, i32 } catch i8* null unreachable @@ -100,7 +100,7 @@ define void @_ZN4llvm17AsmMatcherEmitter3runERNS_11raw_ostreamE() align 2 { unreachable ; <label>:35 ; preds = %9 - %36 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) + %36 = landingpad { i8*, i32 } catch i8* null unreachable } diff --git a/test/CodeGen/X86/2012-11-28-merge-store-alias.ll b/test/CodeGen/X86/2012-11-28-merge-store-alias.ll index ed1daadf6297c..c16deeff3d99a 100644 --- a/test/CodeGen/X86/2012-11-28-merge-store-alias.ll +++ b/test/CodeGen/X86/2012-11-28-merge-store-alias.ll @@ -3,6 +3,7 @@ ; CHECK: merge_stores_can ; CHECK: callq foo ; CHECK: xorps %xmm0, %xmm0 +; CHECK-NEXT: movl 36(%rsp), %ebp ; CHECK-NEXT: movups %xmm0 ; CHECK: callq foo ; CHECK: ret diff --git a/test/CodeGen/X86/2012-11-30-misched-dbg.ll b/test/CodeGen/X86/2012-11-30-misched-dbg.ll index 818c5ed56873e..22227faab942f 100644 --- a/test/CodeGen/X86/2012-11-30-misched-dbg.ll +++ b/test/CodeGen/X86/2012-11-30-misched-dbg.ll @@ -99,7 +99,7 @@ declare i32 @__sprintf_chk(i8*, i32, i64, i8*, ...) %"class.__gnu_cxx::hash_map" = type { %"class.__gnu_cxx::hashtable" } %"class.__gnu_cxx::hashtable" = type { i64, i64, i64, i64, i64, i64 } -define void @main() uwtable ssp { +define void @main() uwtable ssp personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) { entry: %X = alloca %"class.__gnu_cxx::hash_map", align 8 br i1 undef, label %cond.true, label %cond.end @@ -117,7 +117,7 @@ exit.i: ; preds = %cond.end unreachable lpad2.i.i.i.i: ; preds = %cond.end - %0 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) + %0 = landingpad { i8*, i32 } cleanup br i1 undef, label %lpad.body.i.i, label %if.then.i.i.i.i.i.i.i.i diff --git a/test/CodeGen/X86/MergeConsecutiveStores.ll b/test/CodeGen/X86/MergeConsecutiveStores.ll index 275d4213bd2ba..c8f249b7529d9 100644 --- a/test/CodeGen/X86/MergeConsecutiveStores.ll +++ b/test/CodeGen/X86/MergeConsecutiveStores.ll @@ -463,6 +463,67 @@ define void @merge_vec_element_store(<8 x float> %v, float* %ptr) { ; CHECK-NEXT: retq } +; PR21711 - Merge vector stores into wider vector stores. +; These should be merged into 32-byte stores. +define void @merge_vec_extract_stores(<8 x float> %v1, <8 x float> %v2, <4 x float>* %ptr) { + %idx0 = getelementptr inbounds <4 x float>, <4 x float>* %ptr, i64 3 + %idx1 = getelementptr inbounds <4 x float>, <4 x float>* %ptr, i64 4 + %idx2 = getelementptr inbounds <4 x float>, <4 x float>* %ptr, i64 5 + %idx3 = getelementptr inbounds <4 x float>, <4 x float>* %ptr, i64 6 + %shuffle0 = shufflevector <8 x float> %v1, <8 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3> + %shuffle1 = shufflevector <8 x float> %v1, <8 x float> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7> + %shuffle2 = shufflevector <8 x float> %v2, <8 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3> + %shuffle3 = shufflevector <8 x float> %v2, <8 x float> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7> + store <4 x float> %shuffle0, <4 x float>* %idx0, align 16 + store <4 x float> %shuffle1, <4 x float>* %idx1, align 16 + store <4 x float> %shuffle2, <4 x float>* %idx2, align 16 + store <4 x float> %shuffle3, <4 x float>* %idx3, align 16 + ret void + +; CHECK-LABEL: merge_vec_extract_stores +; CHECK: vmovaps %xmm0, 48(%rdi) +; CHECK-NEXT: vextractf128 $1, %ymm0, 64(%rdi) +; CHECK-NEXT: vmovaps %xmm1, 80(%rdi) +; CHECK-NEXT: vextractf128 $1, %ymm1, 96(%rdi) +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq +} + +; Merging vector stores when sourced from vector loads is not currently handled. +define void @merge_vec_stores_from_loads(<4 x float>* %v, <4 x float>* %ptr) { + %load_idx0 = getelementptr inbounds <4 x float>, <4 x float>* %v, i64 0 + %load_idx1 = getelementptr inbounds <4 x float>, <4 x float>* %v, i64 1 + %v0 = load <4 x float>, <4 x float>* %load_idx0 + %v1 = load <4 x float>, <4 x float>* %load_idx1 + %store_idx0 = getelementptr inbounds <4 x float>, <4 x float>* %ptr, i64 0 + %store_idx1 = getelementptr inbounds <4 x float>, <4 x float>* %ptr, i64 1 + store <4 x float> %v0, <4 x float>* %store_idx0, align 16 + store <4 x float> %v1, <4 x float>* %store_idx1, align 16 + ret void + +; CHECK-LABEL: merge_vec_stores_from_loads +; CHECK: vmovaps +; CHECK-NEXT: vmovaps +; CHECK-NEXT: vmovaps +; CHECK-NEXT: vmovaps +; CHECK-NEXT: retq +} + +; Merging vector stores when sourced from a constant vector is not currently handled. +define void @merge_vec_stores_of_constants(<4 x i32>* %ptr) { + %idx0 = getelementptr inbounds <4 x i32>, <4 x i32>* %ptr, i64 3 + %idx1 = getelementptr inbounds <4 x i32>, <4 x i32>* %ptr, i64 4 + store <4 x i32> <i32 0, i32 0, i32 0, i32 0>, <4 x i32>* %idx0, align 16 + store <4 x i32> <i32 0, i32 0, i32 0, i32 0>, <4 x i32>* %idx1, align 16 + ret void + +; CHECK-LABEL: merge_vec_stores_of_constants +; CHECK: vxorps +; CHECK-NEXT: vmovaps +; CHECK-NEXT: vmovaps +; CHECK-NEXT: retq +} + ; This is a minimized test based on real code that was failing. ; We could merge stores (and loads) like this... diff --git a/test/CodeGen/X86/asm-label2.ll b/test/CodeGen/X86/asm-label2.ll index 8715aa98ba5ec..031bd3852e620 100644 --- a/test/CodeGen/X86/asm-label2.ll +++ b/test/CodeGen/X86/asm-label2.ll @@ -7,7 +7,7 @@ ; CHECK: jmp LBB0_1 ; CHECK: LBB0_1: -define void @foobar() { +define void @foobar() personality i32 (...)* @__gxx_personality_v0 { entry: invoke void @_zed() to label %invoke.cont unwind label %lpad @@ -16,7 +16,7 @@ invoke.cont: ; preds = %entry ret void lpad: ; preds = %entry - %exn = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0 + %exn = landingpad {i8*, i32} cleanup unreachable } diff --git a/test/CodeGen/X86/avx2-vector-shifts.ll b/test/CodeGen/X86/avx2-vector-shifts.ll index 5d99269ae1dc8..b92b78035009d 100644 --- a/test/CodeGen/X86/avx2-vector-shifts.ll +++ b/test/CodeGen/X86/avx2-vector-shifts.ll @@ -302,49 +302,17 @@ define <16 x i16> @shl_16i16(<16 x i16> %r, <16 x i16> %a) nounwind { define <32 x i8> @shl_32i8(<32 x i8> %r, <32 x i8> %a) nounwind { ; CHECK-LABEL: shl_32i8 -; CHECK: vextracti128 $1, %ymm0, %xmm3 -; CHECK-NEXT: vpsllw $4, %xmm3, %xmm2 -; CHECK-NEXT: vmovdqa {{.*#+}} xmm8 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] -; CHECK-NEXT: vpand %xmm8, %xmm2, %xmm5 -; CHECK-NEXT: vextracti128 $1, %ymm1, %xmm2 -; CHECK-NEXT: vpsllw $5, %xmm2, %xmm2 -; CHECK-NEXT: vmovdqa {{.*#+}} xmm9 = [224,224,224,224,224,224,224,224,224,224,224,224,224,224,224,224] -; CHECK-NEXT: vpand %xmm9, %xmm2, %xmm7 -; CHECK-NEXT: vmovdqa {{.*#+}} xmm2 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128] -; CHECK-NEXT: vpand %xmm7, %xmm2, %xmm4 -; CHECK-NEXT: vpcmpeqb %xmm2, %xmm4, %xmm4 -; CHECK-NEXT: vpblendvb %xmm4, %xmm5, %xmm3, %xmm3 -; CHECK-NEXT: vpsllw $2, %xmm3, %xmm4 -; CHECK-NEXT: vmovdqa {{.*#+}} xmm5 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252] -; CHECK-NEXT: vpand %xmm5, %xmm4, %xmm4 -; CHECK-NEXT: vpaddb %xmm7, %xmm7, %xmm7 -; CHECK-NEXT: vpand %xmm7, %xmm2, %xmm6 -; CHECK-NEXT: vpcmpeqb %xmm2, %xmm6, %xmm6 -; CHECK-NEXT: vpblendvb %xmm6, %xmm4, %xmm3, %xmm3 -; CHECK-NEXT: vpaddb %xmm3, %xmm3, %xmm4 -; CHECK-NEXT: vpaddb %xmm7, %xmm7, %xmm6 -; CHECK-NEXT: vpand %xmm6, %xmm2, %xmm6 -; CHECK-NEXT: vpcmpeqb %xmm2, %xmm6, %xmm6 -; CHECK-NEXT: vpblendvb %xmm6, %xmm4, %xmm3, %xmm3 -; CHECK-NEXT: vpsllw $4, %xmm0, %xmm4 -; CHECK-NEXT: vpand %xmm8, %xmm4, %xmm4 -; CHECK-NEXT: vpsllw $5, %xmm1, %xmm1 -; CHECK-NEXT: vpand %xmm9, %xmm1, %xmm1 -; CHECK-NEXT: vpand %xmm1, %xmm2, %xmm6 -; CHECK-NEXT: vpcmpeqb %xmm2, %xmm6, %xmm6 -; CHECK-NEXT: vpblendvb %xmm6, %xmm4, %xmm0, %xmm0 -; CHECK-NEXT: vpsllw $2, %xmm0, %xmm4 -; CHECK-NEXT: vpand %xmm5, %xmm4, %xmm4 -; CHECK-NEXT: vpaddb %xmm1, %xmm1, %xmm1 -; CHECK-NEXT: vpand %xmm1, %xmm2, %xmm5 -; CHECK-NEXT: vpcmpeqb %xmm2, %xmm5, %xmm5 -; CHECK-NEXT: vpblendvb %xmm5, %xmm4, %xmm0, %xmm0 -; CHECK-NEXT: vpaddb %xmm0, %xmm0, %xmm4 -; CHECK-NEXT: vpaddb %xmm1, %xmm1, %xmm1 -; CHECK-NEXT: vpand %xmm1, %xmm2, %xmm1 -; CHECK-NEXT: vpcmpeqb %xmm2, %xmm1, %xmm1 -; CHECK-NEXT: vpblendvb %xmm1, %xmm4, %xmm0, %xmm0 -; CHECK-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm0 +; CHECK: vpsllw $5, %ymm1, %ymm1 +; CHECK-NEXT: vpsllw $4, %ymm0, %ymm2 +; CHECK-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2 +; CHECK-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0 +; CHECK-NEXT: vpsllw $2, %ymm0, %ymm2 +; CHECK-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2 +; CHECK-NEXT: vpaddb %ymm1, %ymm1, %ymm1 +; CHECK-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0 +; CHECK-NEXT: vpaddb %ymm0, %ymm0, %ymm2 +; CHECK-NEXT: vpaddb %ymm1, %ymm1, %ymm1 +; CHECK-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0 ; CHECK-NEXT: retq %shl = shl <32 x i8> %r, %a ret <32 x i8> %shl @@ -381,169 +349,30 @@ define <16 x i16> @ashr_16i16(<16 x i16> %r, <16 x i16> %a) nounwind { define <32 x i8> @ashr_32i8(<32 x i8> %r, <32 x i8> %a) nounwind { ; CHECK-LABEL: ashr_32i8 -; CHECK: vextracti128 $1, %ymm1, %xmm2 -; CHECK-NEXT: vpextrb $1, %xmm2, %ecx -; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm3 -; CHECK-NEXT: vpextrb $1, %xmm3, %eax -; CHECK-NEXT: sarb %cl, %al -; CHECK-NEXT: vpextrb $0, %xmm2, %ecx -; CHECK-NEXT: vpextrb $0, %xmm3, %edx -; CHECK-NEXT: sarb %cl, %dl -; CHECK-NEXT: movzbl %al, %eax -; CHECK-NEXT: movzbl %dl, %edx -; CHECK-NEXT: vpextrb $2, %xmm2, %ecx -; CHECK-NEXT: vpextrb $2, %xmm3, %esi -; CHECK-NEXT: sarb %cl, %sil -; CHECK-NEXT: vmovd %edx, %xmm4 -; CHECK-NEXT: vpinsrb $1, %eax, %xmm4, %xmm4 -; CHECK-NEXT: movzbl %sil, %eax -; CHECK-NEXT: vpextrb $3, %xmm2, %ecx -; CHECK-NEXT: vpextrb $3, %xmm3, %edx -; CHECK-NEXT: sarb %cl, %dl -; CHECK-NEXT: vpinsrb $2, %eax, %xmm4, %xmm4 -; CHECK-NEXT: movzbl %dl, %eax -; CHECK-NEXT: vpinsrb $3, %eax, %xmm4, %xmm4 -; CHECK-NEXT: vpextrb $4, %xmm2, %ecx -; CHECK-NEXT: vpextrb $4, %xmm3, %eax -; CHECK-NEXT: sarb %cl, %al -; CHECK-NEXT: movzbl %al, %eax -; CHECK-NEXT: vpinsrb $4, %eax, %xmm4, %xmm4 -; CHECK-NEXT: vpextrb $5, %xmm2, %ecx -; CHECK-NEXT: vpextrb $5, %xmm3, %eax -; CHECK-NEXT: sarb %cl, %al -; CHECK-NEXT: vpextrb $6, %xmm2, %ecx -; CHECK-NEXT: vpextrb $6, %xmm3, %edx -; CHECK-NEXT: sarb %cl, %dl -; CHECK-NEXT: movzbl %al, %eax -; CHECK-NEXT: vpinsrb $5, %eax, %xmm4, %xmm4 -; CHECK-NEXT: movzbl %dl, %eax -; CHECK-NEXT: vpextrb $7, %xmm2, %ecx -; CHECK-NEXT: vpextrb $7, %xmm3, %edx -; CHECK-NEXT: sarb %cl, %dl -; CHECK-NEXT: vpinsrb $6, %eax, %xmm4, %xmm4 -; CHECK-NEXT: movzbl %dl, %eax -; CHECK-NEXT: vpinsrb $7, %eax, %xmm4, %xmm4 -; CHECK-NEXT: vpextrb $8, %xmm2, %ecx -; CHECK-NEXT: vpextrb $8, %xmm3, %eax -; CHECK-NEXT: sarb %cl, %al -; CHECK-NEXT: movzbl %al, %eax -; CHECK-NEXT: vpinsrb $8, %eax, %xmm4, %xmm4 -; CHECK-NEXT: vpextrb $9, %xmm2, %ecx -; CHECK-NEXT: vpextrb $9, %xmm3, %eax -; CHECK-NEXT: sarb %cl, %al -; CHECK-NEXT: vpextrb $10, %xmm2, %ecx -; CHECK-NEXT: vpextrb $10, %xmm3, %edx -; CHECK-NEXT: sarb %cl, %dl -; CHECK-NEXT: movzbl %al, %eax -; CHECK-NEXT: vpinsrb $9, %eax, %xmm4, %xmm4 -; CHECK-NEXT: movzbl %dl, %eax -; CHECK-NEXT: vpextrb $11, %xmm2, %ecx -; CHECK-NEXT: vpextrb $11, %xmm3, %edx -; CHECK-NEXT: sarb %cl, %dl -; CHECK-NEXT: vpinsrb $10, %eax, %xmm4, %xmm4 -; CHECK-NEXT: movzbl %dl, %eax -; CHECK-NEXT: vpinsrb $11, %eax, %xmm4, %xmm4 -; CHECK-NEXT: vpextrb $12, %xmm2, %ecx -; CHECK-NEXT: vpextrb $12, %xmm3, %eax -; CHECK-NEXT: sarb %cl, %al -; CHECK-NEXT: movzbl %al, %eax -; CHECK-NEXT: vpinsrb $12, %eax, %xmm4, %xmm4 -; CHECK-NEXT: vpextrb $13, %xmm2, %ecx -; CHECK-NEXT: vpextrb $13, %xmm3, %eax -; CHECK-NEXT: sarb %cl, %al -; CHECK-NEXT: vpextrb $14, %xmm2, %ecx -; CHECK-NEXT: vpextrb $14, %xmm3, %edx -; CHECK-NEXT: sarb %cl, %dl -; CHECK-NEXT: movzbl %al, %eax -; CHECK-NEXT: vpinsrb $13, %eax, %xmm4, %xmm4 -; CHECK-NEXT: vpextrb $15, %xmm2, %ecx -; CHECK-NEXT: vpextrb $15, %xmm3, %eax -; CHECK-NEXT: sarb %cl, %al -; CHECK-NEXT: vpextrb $1, %xmm1, %ecx -; CHECK-NEXT: vpextrb $1, %xmm0, %esi -; CHECK-NEXT: sarb %cl, %sil -; CHECK-NEXT: movzbl %dl, %ecx -; CHECK-NEXT: vpinsrb $14, %ecx, %xmm4, %xmm2 -; CHECK-NEXT: vpextrb $0, %xmm1, %ecx -; CHECK-NEXT: vpextrb $0, %xmm0, %edx -; CHECK-NEXT: sarb %cl, %dl -; CHECK-NEXT: vpextrb $2, %xmm1, %ecx -; CHECK-NEXT: vpextrb $2, %xmm0, %edi -; CHECK-NEXT: sarb %cl, %dil -; CHECK-NEXT: movzbl %al, %eax -; CHECK-NEXT: vpinsrb $15, %eax, %xmm2, %xmm2 -; CHECK-NEXT: movzbl %sil, %eax -; CHECK-NEXT: movzbl %dl, %ecx -; CHECK-NEXT: vmovd %ecx, %xmm3 -; CHECK-NEXT: vpinsrb $1, %eax, %xmm3, %xmm3 -; CHECK-NEXT: movzbl %dil, %eax -; CHECK-NEXT: vpextrb $3, %xmm1, %ecx -; CHECK-NEXT: vpextrb $3, %xmm0, %edx -; CHECK-NEXT: sarb %cl, %dl -; CHECK-NEXT: vpinsrb $2, %eax, %xmm3, %xmm3 -; CHECK-NEXT: movzbl %dl, %eax -; CHECK-NEXT: vpinsrb $3, %eax, %xmm3, %xmm3 -; CHECK-NEXT: vpextrb $4, %xmm1, %ecx -; CHECK-NEXT: vpextrb $4, %xmm0, %eax -; CHECK-NEXT: sarb %cl, %al -; CHECK-NEXT: movzbl %al, %eax -; CHECK-NEXT: vpinsrb $4, %eax, %xmm3, %xmm3 -; CHECK-NEXT: vpextrb $5, %xmm1, %ecx -; CHECK-NEXT: vpextrb $5, %xmm0, %eax -; CHECK-NEXT: sarb %cl, %al -; CHECK-NEXT: vpextrb $6, %xmm1, %ecx -; CHECK-NEXT: vpextrb $6, %xmm0, %edx -; CHECK-NEXT: sarb %cl, %dl -; CHECK-NEXT: movzbl %al, %eax -; CHECK-NEXT: vpinsrb $5, %eax, %xmm3, %xmm3 -; CHECK-NEXT: movzbl %dl, %eax -; CHECK-NEXT: vpextrb $7, %xmm1, %ecx -; CHECK-NEXT: vpextrb $7, %xmm0, %edx -; CHECK-NEXT: sarb %cl, %dl -; CHECK-NEXT: vpinsrb $6, %eax, %xmm3, %xmm3 -; CHECK-NEXT: movzbl %dl, %eax -; CHECK-NEXT: vpinsrb $7, %eax, %xmm3, %xmm3 -; CHECK-NEXT: vpextrb $8, %xmm1, %ecx -; CHECK-NEXT: vpextrb $8, %xmm0, %eax -; CHECK-NEXT: sarb %cl, %al -; CHECK-NEXT: movzbl %al, %eax -; CHECK-NEXT: vpinsrb $8, %eax, %xmm3, %xmm3 -; CHECK-NEXT: vpextrb $9, %xmm1, %ecx -; CHECK-NEXT: vpextrb $9, %xmm0, %eax -; CHECK-NEXT: sarb %cl, %al -; CHECK-NEXT: vpextrb $10, %xmm1, %ecx -; CHECK-NEXT: vpextrb $10, %xmm0, %edx -; CHECK-NEXT: sarb %cl, %dl -; CHECK-NEXT: movzbl %al, %eax -; CHECK-NEXT: vpinsrb $9, %eax, %xmm3, %xmm3 -; CHECK-NEXT: movzbl %dl, %eax -; CHECK-NEXT: vpextrb $11, %xmm1, %ecx -; CHECK-NEXT: vpextrb $11, %xmm0, %edx -; CHECK-NEXT: sarb %cl, %dl -; CHECK-NEXT: vpinsrb $10, %eax, %xmm3, %xmm3 -; CHECK-NEXT: movzbl %dl, %eax -; CHECK-NEXT: vpinsrb $11, %eax, %xmm3, %xmm3 -; CHECK-NEXT: vpextrb $12, %xmm1, %ecx -; CHECK-NEXT: vpextrb $12, %xmm0, %eax -; CHECK-NEXT: sarb %cl, %al -; CHECK-NEXT: movzbl %al, %eax -; CHECK-NEXT: vpinsrb $12, %eax, %xmm3, %xmm3 -; CHECK-NEXT: vpextrb $13, %xmm1, %ecx -; CHECK-NEXT: vpextrb $13, %xmm0, %eax -; CHECK-NEXT: sarb %cl, %al -; CHECK-NEXT: vpextrb $14, %xmm1, %ecx -; CHECK-NEXT: vpextrb $14, %xmm0, %edx -; CHECK-NEXT: sarb %cl, %dl -; CHECK-NEXT: movzbl %al, %eax -; CHECK-NEXT: vpinsrb $13, %eax, %xmm3, %xmm3 -; CHECK-NEXT: movzbl %dl, %eax -; CHECK-NEXT: vpextrb $15, %xmm1, %ecx -; CHECK-NEXT: vpextrb $15, %xmm0, %edx -; CHECK-NEXT: sarb %cl, %dl -; CHECK-NEXT: vpinsrb $14, %eax, %xmm3, %xmm0 -; CHECK-NEXT: movzbl %dl, %eax -; CHECK-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0 -; CHECK-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0 +; CHECK: vpsllw $5, %ymm1, %ymm1 +; CHECK-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31] +; CHECK-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31] +; CHECK-NEXT: vpsraw $4, %ymm3, %ymm4 +; CHECK-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm3 +; CHECK-NEXT: vpsraw $2, %ymm3, %ymm4 +; CHECK-NEXT: vpaddw %ymm2, %ymm2, %ymm2 +; CHECK-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm3 +; CHECK-NEXT: vpsraw $1, %ymm3, %ymm4 +; CHECK-NEXT: vpaddw %ymm2, %ymm2, %ymm2 +; CHECK-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm2 +; CHECK-NEXT: vpsrlw $8, %ymm2, %ymm2 +; CHECK-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23] +; CHECK-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23] +; CHECK-NEXT: vpsraw $4, %ymm0, %ymm3 +; CHECK-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0 +; CHECK-NEXT: vpsraw $2, %ymm0, %ymm3 +; CHECK-NEXT: vpaddw %ymm1, %ymm1, %ymm1 +; CHECK-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0 +; CHECK-NEXT: vpsraw $1, %ymm0, %ymm3 +; CHECK-NEXT: vpaddw %ymm1, %ymm1, %ymm1 +; CHECK-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0 +; CHECK-NEXT: vpsrlw $8, %ymm0, %ymm0 +; CHECK-NEXT: vpackuswb %ymm2, %ymm0, %ymm0 ; CHECK-NEXT: retq %ashr = ashr <32 x i8> %r, %a ret <32 x i8> %ashr @@ -580,169 +409,18 @@ define <16 x i16> @lshr_16i16(<16 x i16> %r, <16 x i16> %a) nounwind { define <32 x i8> @lshr_32i8(<32 x i8> %r, <32 x i8> %a) nounwind { ; CHECK-LABEL: lshr_32i8 -; CHECK: vextracti128 $1, %ymm1, %xmm2 -; CHECK-NEXT: vpextrb $1, %xmm2, %ecx -; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm3 -; CHECK-NEXT: vpextrb $1, %xmm3, %eax -; CHECK-NEXT: shrb %cl, %al -; CHECK-NEXT: vpextrb $0, %xmm2, %ecx -; CHECK-NEXT: vpextrb $0, %xmm3, %edx -; CHECK-NEXT: shrb %cl, %dl -; CHECK-NEXT: movzbl %al, %eax -; CHECK-NEXT: movzbl %dl, %edx -; CHECK-NEXT: vpextrb $2, %xmm2, %ecx -; CHECK-NEXT: vpextrb $2, %xmm3, %esi -; CHECK-NEXT: shrb %cl, %sil -; CHECK-NEXT: vmovd %edx, %xmm4 -; CHECK-NEXT: vpinsrb $1, %eax, %xmm4, %xmm4 -; CHECK-NEXT: movzbl %sil, %eax -; CHECK-NEXT: vpextrb $3, %xmm2, %ecx -; CHECK-NEXT: vpextrb $3, %xmm3, %edx -; CHECK-NEXT: shrb %cl, %dl -; CHECK-NEXT: vpinsrb $2, %eax, %xmm4, %xmm4 -; CHECK-NEXT: movzbl %dl, %eax -; CHECK-NEXT: vpinsrb $3, %eax, %xmm4, %xmm4 -; CHECK-NEXT: vpextrb $4, %xmm2, %ecx -; CHECK-NEXT: vpextrb $4, %xmm3, %eax -; CHECK-NEXT: shrb %cl, %al -; CHECK-NEXT: movzbl %al, %eax -; CHECK-NEXT: vpinsrb $4, %eax, %xmm4, %xmm4 -; CHECK-NEXT: vpextrb $5, %xmm2, %ecx -; CHECK-NEXT: vpextrb $5, %xmm3, %eax -; CHECK-NEXT: shrb %cl, %al -; CHECK-NEXT: vpextrb $6, %xmm2, %ecx -; CHECK-NEXT: vpextrb $6, %xmm3, %edx -; CHECK-NEXT: shrb %cl, %dl -; CHECK-NEXT: movzbl %al, %eax -; CHECK-NEXT: vpinsrb $5, %eax, %xmm4, %xmm4 -; CHECK-NEXT: movzbl %dl, %eax -; CHECK-NEXT: vpextrb $7, %xmm2, %ecx -; CHECK-NEXT: vpextrb $7, %xmm3, %edx -; CHECK-NEXT: shrb %cl, %dl -; CHECK-NEXT: vpinsrb $6, %eax, %xmm4, %xmm4 -; CHECK-NEXT: movzbl %dl, %eax -; CHECK-NEXT: vpinsrb $7, %eax, %xmm4, %xmm4 -; CHECK-NEXT: vpextrb $8, %xmm2, %ecx -; CHECK-NEXT: vpextrb $8, %xmm3, %eax -; CHECK-NEXT: shrb %cl, %al -; CHECK-NEXT: movzbl %al, %eax -; CHECK-NEXT: vpinsrb $8, %eax, %xmm4, %xmm4 -; CHECK-NEXT: vpextrb $9, %xmm2, %ecx -; CHECK-NEXT: vpextrb $9, %xmm3, %eax -; CHECK-NEXT: shrb %cl, %al -; CHECK-NEXT: vpextrb $10, %xmm2, %ecx -; CHECK-NEXT: vpextrb $10, %xmm3, %edx -; CHECK-NEXT: shrb %cl, %dl -; CHECK-NEXT: movzbl %al, %eax -; CHECK-NEXT: vpinsrb $9, %eax, %xmm4, %xmm4 -; CHECK-NEXT: movzbl %dl, %eax -; CHECK-NEXT: vpextrb $11, %xmm2, %ecx -; CHECK-NEXT: vpextrb $11, %xmm3, %edx -; CHECK-NEXT: shrb %cl, %dl -; CHECK-NEXT: vpinsrb $10, %eax, %xmm4, %xmm4 -; CHECK-NEXT: movzbl %dl, %eax -; CHECK-NEXT: vpinsrb $11, %eax, %xmm4, %xmm4 -; CHECK-NEXT: vpextrb $12, %xmm2, %ecx -; CHECK-NEXT: vpextrb $12, %xmm3, %eax -; CHECK-NEXT: shrb %cl, %al -; CHECK-NEXT: movzbl %al, %eax -; CHECK-NEXT: vpinsrb $12, %eax, %xmm4, %xmm4 -; CHECK-NEXT: vpextrb $13, %xmm2, %ecx -; CHECK-NEXT: vpextrb $13, %xmm3, %eax -; CHECK-NEXT: shrb %cl, %al -; CHECK-NEXT: vpextrb $14, %xmm2, %ecx -; CHECK-NEXT: vpextrb $14, %xmm3, %edx -; CHECK-NEXT: shrb %cl, %dl -; CHECK-NEXT: movzbl %al, %eax -; CHECK-NEXT: vpinsrb $13, %eax, %xmm4, %xmm4 -; CHECK-NEXT: vpextrb $15, %xmm2, %ecx -; CHECK-NEXT: vpextrb $15, %xmm3, %eax -; CHECK-NEXT: shrb %cl, %al -; CHECK-NEXT: vpextrb $1, %xmm1, %ecx -; CHECK-NEXT: vpextrb $1, %xmm0, %esi -; CHECK-NEXT: shrb %cl, %sil -; CHECK-NEXT: movzbl %dl, %ecx -; CHECK-NEXT: vpinsrb $14, %ecx, %xmm4, %xmm2 -; CHECK-NEXT: vpextrb $0, %xmm1, %ecx -; CHECK-NEXT: vpextrb $0, %xmm0, %edx -; CHECK-NEXT: shrb %cl, %dl -; CHECK-NEXT: vpextrb $2, %xmm1, %ecx -; CHECK-NEXT: vpextrb $2, %xmm0, %edi -; CHECK-NEXT: shrb %cl, %dil -; CHECK-NEXT: movzbl %al, %eax -; CHECK-NEXT: vpinsrb $15, %eax, %xmm2, %xmm2 -; CHECK-NEXT: movzbl %sil, %eax -; CHECK-NEXT: movzbl %dl, %ecx -; CHECK-NEXT: vmovd %ecx, %xmm3 -; CHECK-NEXT: vpinsrb $1, %eax, %xmm3, %xmm3 -; CHECK-NEXT: movzbl %dil, %eax -; CHECK-NEXT: vpextrb $3, %xmm1, %ecx -; CHECK-NEXT: vpextrb $3, %xmm0, %edx -; CHECK-NEXT: shrb %cl, %dl -; CHECK-NEXT: vpinsrb $2, %eax, %xmm3, %xmm3 -; CHECK-NEXT: movzbl %dl, %eax -; CHECK-NEXT: vpinsrb $3, %eax, %xmm3, %xmm3 -; CHECK-NEXT: vpextrb $4, %xmm1, %ecx -; CHECK-NEXT: vpextrb $4, %xmm0, %eax -; CHECK-NEXT: shrb %cl, %al -; CHECK-NEXT: movzbl %al, %eax -; CHECK-NEXT: vpinsrb $4, %eax, %xmm3, %xmm3 -; CHECK-NEXT: vpextrb $5, %xmm1, %ecx -; CHECK-NEXT: vpextrb $5, %xmm0, %eax -; CHECK-NEXT: shrb %cl, %al -; CHECK-NEXT: vpextrb $6, %xmm1, %ecx -; CHECK-NEXT: vpextrb $6, %xmm0, %edx -; CHECK-NEXT: shrb %cl, %dl -; CHECK-NEXT: movzbl %al, %eax -; CHECK-NEXT: vpinsrb $5, %eax, %xmm3, %xmm3 -; CHECK-NEXT: movzbl %dl, %eax -; CHECK-NEXT: vpextrb $7, %xmm1, %ecx -; CHECK-NEXT: vpextrb $7, %xmm0, %edx -; CHECK-NEXT: shrb %cl, %dl -; CHECK-NEXT: vpinsrb $6, %eax, %xmm3, %xmm3 -; CHECK-NEXT: movzbl %dl, %eax -; CHECK-NEXT: vpinsrb $7, %eax, %xmm3, %xmm3 -; CHECK-NEXT: vpextrb $8, %xmm1, %ecx -; CHECK-NEXT: vpextrb $8, %xmm0, %eax -; CHECK-NEXT: shrb %cl, %al -; CHECK-NEXT: movzbl %al, %eax -; CHECK-NEXT: vpinsrb $8, %eax, %xmm3, %xmm3 -; CHECK-NEXT: vpextrb $9, %xmm1, %ecx -; CHECK-NEXT: vpextrb $9, %xmm0, %eax -; CHECK-NEXT: shrb %cl, %al -; CHECK-NEXT: vpextrb $10, %xmm1, %ecx -; CHECK-NEXT: vpextrb $10, %xmm0, %edx -; CHECK-NEXT: shrb %cl, %dl -; CHECK-NEXT: movzbl %al, %eax -; CHECK-NEXT: vpinsrb $9, %eax, %xmm3, %xmm3 -; CHECK-NEXT: movzbl %dl, %eax -; CHECK-NEXT: vpextrb $11, %xmm1, %ecx -; CHECK-NEXT: vpextrb $11, %xmm0, %edx -; CHECK-NEXT: shrb %cl, %dl -; CHECK-NEXT: vpinsrb $10, %eax, %xmm3, %xmm3 -; CHECK-NEXT: movzbl %dl, %eax -; CHECK-NEXT: vpinsrb $11, %eax, %xmm3, %xmm3 -; CHECK-NEXT: vpextrb $12, %xmm1, %ecx -; CHECK-NEXT: vpextrb $12, %xmm0, %eax -; CHECK-NEXT: shrb %cl, %al -; CHECK-NEXT: movzbl %al, %eax -; CHECK-NEXT: vpinsrb $12, %eax, %xmm3, %xmm3 -; CHECK-NEXT: vpextrb $13, %xmm1, %ecx -; CHECK-NEXT: vpextrb $13, %xmm0, %eax -; CHECK-NEXT: shrb %cl, %al -; CHECK-NEXT: vpextrb $14, %xmm1, %ecx -; CHECK-NEXT: vpextrb $14, %xmm0, %edx -; CHECK-NEXT: shrb %cl, %dl -; CHECK-NEXT: movzbl %al, %eax -; CHECK-NEXT: vpinsrb $13, %eax, %xmm3, %xmm3 -; CHECK-NEXT: movzbl %dl, %eax -; CHECK-NEXT: vpextrb $15, %xmm1, %ecx -; CHECK-NEXT: vpextrb $15, %xmm0, %edx -; CHECK-NEXT: shrb %cl, %dl -; CHECK-NEXT: vpinsrb $14, %eax, %xmm3, %xmm0 -; CHECK-NEXT: movzbl %dl, %eax -; CHECK-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0 -; CHECK-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0 +; CHECK: vpsllw $5, %ymm1, %ymm1 +; CHECK-NEXT: vpsrlw $4, %ymm0, %ymm2 +; CHECK-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2 +; CHECK-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0 +; CHECK-NEXT: vpsrlw $2, %ymm0, %ymm2 +; CHECK-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2 +; CHECK-NEXT: vpaddb %ymm1, %ymm1, %ymm1 +; CHECK-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0 +; CHECK-NEXT: vpsrlw $1, %ymm0, %ymm2 +; CHECK-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2 +; CHECK-NEXT: vpaddb %ymm1, %ymm1, %ymm1 +; CHECK-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0 ; CHECK-NEXT: retq %lshr = lshr <32 x i8> %r, %a ret <32 x i8> %lshr diff --git a/test/CodeGen/X86/avx512-intrinsics.ll b/test/CodeGen/X86/avx512-intrinsics.ll index 9387192f8aa44..a06cadaa3f5ab 100644 --- a/test/CodeGen/X86/avx512-intrinsics.ll +++ b/test/CodeGen/X86/avx512-intrinsics.ll @@ -176,13 +176,6 @@ define <2 x double> @test_x86_sse2_cvtsi642sd(<2 x double> %a0, i64 %a1) { } declare <2 x double> @llvm.x86.sse2.cvtsi642sd(<2 x double>, i64) nounwind readnone -define <2 x double> @test_x86_avx512_cvtusi642sd(<2 x double> %a0, i64 %a1) { - ; CHECK: vcvtusi2sdq {{.*}}encoding: [0x62 - %res = call <2 x double> @llvm.x86.avx512.cvtusi642sd(<2 x double> %a0, i64 %a1) ; <<2 x double>> [#uses=1] - ret <2 x double> %res -} -declare <2 x double> @llvm.x86.avx512.cvtusi642sd(<2 x double>, i64) nounwind readnone - define i64 @test_x86_sse2_cvttsd2si64(<2 x double> %a0) { ; CHECK: vcvttsd2si {{.*}}encoding: [0x62 %res = call i64 @llvm.x86.sse2.cvttsd2si64(<2 x double> %a0) ; <i64> [#uses=1] @@ -510,30 +503,6 @@ declare <8 x double> @llvm.x86.avx512.mask.min.pd.512(<8 x double>, <8 x double> } declare <8 x i64> @llvm.x86.avx512.mask.pabs.q.512(<8 x i64>, <8 x i64>, i8) -define <8 x i64> @test_vpmaxq(<8 x i64> %a0, <8 x i64> %a1) { - ; CHECK: vpmaxsq {{.*}}encoding: [0x62,0xf2,0xfd,0x48,0x3d,0xc1] - %res = call <8 x i64> @llvm.x86.avx512.mask.pmaxs.q.512(<8 x i64> %a0, <8 x i64> %a1, - <8 x i64>zeroinitializer, i8 -1) - ret <8 x i64> %res -} -declare <8 x i64> @llvm.x86.avx512.mask.pmaxs.q.512(<8 x i64>, <8 x i64>, <8 x i64>, i8) - -define <16 x i32> @test_vpminud(<16 x i32> %a0, <16 x i32> %a1) { - ; CHECK: vpminud {{.*}}encoding: [0x62,0xf2,0x7d,0x48,0x3b,0xc1] - %res = call <16 x i32> @llvm.x86.avx512.mask.pminu.d.512(<16 x i32> %a0, <16 x i32> %a1, - <16 x i32>zeroinitializer, i16 -1) - ret <16 x i32> %res -} -declare <16 x i32> @llvm.x86.avx512.mask.pminu.d.512(<16 x i32>, <16 x i32>, <16 x i32>, i16) - -define <16 x i32> @test_vpmaxsd(<16 x i32> %a0, <16 x i32> %a1) { - ; CHECK: vpmaxsd {{.*}}encoding: [0x62,0xf2,0x7d,0x48,0x3d,0xc1] - %res = call <16 x i32> @llvm.x86.avx512.mask.pmaxs.d.512(<16 x i32> %a0, <16 x i32> %a1, - <16 x i32>zeroinitializer, i16 -1) - ret <16 x i32> %res -} -declare <16 x i32> @llvm.x86.avx512.mask.pmaxs.d.512(<16 x i32>, <16 x i32>, <16 x i32>, i16) - define i8 @test_vptestmq(<8 x i64> %a0, <8 x i64> %a1) { ; CHECK: vptestmq {{.*}}encoding: [0x62,0xf2,0xfd,0x48,0x27,0xc1] %res = call i8 @llvm.x86.avx512.mask.ptestm.q.512(<8 x i64> %a0, <8 x i64> %a1, i8 -1) @@ -630,28 +599,6 @@ define <8 x double> @test_load_aligned_pd(<8 x double> %data, i8* %ptr, i8 %mask ret <8 x double> %res } -define <16 x float> @test_vpermt2ps(<16 x float>%x, <16 x float>%y, <16 x i32>%perm) { -; CHECK: vpermt2ps {{.*}}encoding: [0x62,0xf2,0x6d,0x48,0x7f,0xc1] - %res = call <16 x float> @llvm.x86.avx512.mask.vpermt.ps.512(<16 x i32>%perm, <16 x float>%x, <16 x float>%y, i16 -1) - ret <16 x float> %res -} - -define <16 x float> @test_vpermt2ps_mask(<16 x float>%x, <16 x float>%y, <16 x i32>%perm, i16 %mask) { -; CHECK-LABEL: test_vpermt2ps_mask: -; CHECK: vpermt2ps %zmm1, %zmm2, %zmm0 {%k1} ## encoding: [0x62,0xf2,0x6d,0x49,0x7f,0xc1] - %res = call <16 x float> @llvm.x86.avx512.mask.vpermt.ps.512(<16 x i32>%perm, <16 x float>%x, <16 x float>%y, i16 %mask) - ret <16 x float> %res -} - -declare <16 x float> @llvm.x86.avx512.mask.vpermt.ps.512(<16 x i32>, <16 x float>, <16 x float>, i16) - -define <8 x i64> @test_vmovntdqa(i8 *%x) { -; CHECK-LABEL: test_vmovntdqa: -; CHECK: vmovntdqa (%rdi), %zmm0 ## encoding: [0x62,0xf2,0x7d,0x48,0x2a,0x07] - %res = call <8 x i64> @llvm.x86.avx512.movntdqa(i8* %x) - ret <8 x i64> %res -} - declare <8 x i64> @llvm.x86.avx512.movntdqa(i8*) define <8 x i64> @test_valign_q(<8 x i64> %a, <8 x i64> %b) { @@ -2807,3 +2754,262 @@ define <2 x double> @test_max_sd(<2 x double> %a0, <2 x double> %a1) { %res = call <2 x double> @llvm.x86.avx512.mask.max.sd.round(<2 x double>%a0, <2 x double> %a1, <2 x double> zeroinitializer, i8 -1, i32 4) ret <2 x double> %res } + +define <2 x double> @test_x86_avx512_cvtsi2sd32(<2 x double> %a, i32 %b) { +; CHECK-LABEL: test_x86_avx512_cvtsi2sd32: +; CHECK: ## BB#0: +; CHECK-NEXT: vcvtsi2sdl %edi, {rz-sae}, %xmm0, %xmm0 +; CHECK-NEXT: retq + %res = call <2 x double> @llvm.x86.avx512.cvtsi2sd32(<2 x double> %a, i32 %b, i32 3) ; <<<2 x double>> [#uses=1] + ret <2 x double> %res +} +declare <2 x double> @llvm.x86.avx512.cvtsi2sd32(<2 x double>, i32, i32) nounwind readnone + +define <2 x double> @test_x86_avx512_cvtsi2sd64(<2 x double> %a, i64 %b) { +; CHECK-LABEL: test_x86_avx512_cvtsi2sd64: +; CHECK: ## BB#0: +; CHECK-NEXT: vcvtsi2sdq %rdi, {rz-sae}, %xmm0, %xmm0 +; CHECK-NEXT: retq + %res = call <2 x double> @llvm.x86.avx512.cvtsi2sd64(<2 x double> %a, i64 %b, i32 3) ; <<<2 x double>> [#uses=1] + ret <2 x double> %res +} +declare <2 x double> @llvm.x86.avx512.cvtsi2sd64(<2 x double>, i64, i32) nounwind readnone + +define <4 x float> @test_x86_avx512_cvtsi2ss32(<4 x float> %a, i32 %b) { +; CHECK-LABEL: test_x86_avx512_cvtsi2ss32: +; CHECK: ## BB#0: +; CHECK-NEXT: vcvtsi2ssl %edi, {rz-sae}, %xmm0, %xmm0 +; CHECK-NEXT: retq + %res = call <4 x float> @llvm.x86.avx512.cvtsi2ss32(<4 x float> %a, i32 %b, i32 3) ; <<<4 x float>> [#uses=1] + ret <4 x float> %res +} +declare <4 x float> @llvm.x86.avx512.cvtsi2ss32(<4 x float>, i32, i32) nounwind readnone + +define <4 x float> @test_x86_avx512_cvtsi2ss64(<4 x float> %a, i64 %b) { +; CHECK-LABEL: test_x86_avx512_cvtsi2ss64: +; CHECK: ## BB#0: +; CHECK-NEXT: vcvtsi2ssq %rdi, {rz-sae}, %xmm0, %xmm0 +; CHECK-NEXT: retq + %res = call <4 x float> @llvm.x86.avx512.cvtsi2ss64(<4 x float> %a, i64 %b, i32 3) ; <<<4 x float>> [#uses=1] + ret <4 x float> %res +} +declare <4 x float> @llvm.x86.avx512.cvtsi2ss64(<4 x float>, i64, i32) nounwind readnone + +define <4 x float> @test_x86_avx512__mm_cvt_roundu32_ss (<4 x float> %a, i32 %b) +; CHECK-LABEL: test_x86_avx512__mm_cvt_roundu32_ss: +; CHECK: ## BB#0: +; CHECK-NEXT: vcvtusi2ssl %edi, {rd-sae}, %xmm0, %xmm0 +; CHECK-NEXT: retq +{ + %res = call <4 x float> @llvm.x86.avx512.cvtusi2ss(<4 x float> %a, i32 %b, i32 1) ; <<<4 x float>> [#uses=1] + ret <4 x float> %res +} + +define <4 x float> @test_x86_avx512__mm_cvt_roundu32_ss_mem(<4 x float> %a, i32* %ptr) +; CHECK-LABEL: test_x86_avx512__mm_cvt_roundu32_ss_mem: +; CHECK: ## BB#0: +; CHECK-NEXT: movl (%rdi), %eax +; CHECK-NEXT: vcvtusi2ssl %eax, {rd-sae}, %xmm0, %xmm0 +; CHECK-NEXT: retq +{ + %b = load i32, i32* %ptr + %res = call <4 x float> @llvm.x86.avx512.cvtusi2ss(<4 x float> %a, i32 %b, i32 1) ; <<<4 x float>> [#uses=1] + ret <4 x float> %res +} + +define <4 x float> @test_x86_avx512__mm_cvtu32_ss(<4 x float> %a, i32 %b) +; CHECK-LABEL: test_x86_avx512__mm_cvtu32_ss: +; CHECK: ## BB#0: +; CHECK-NEXT: vcvtusi2ssl %edi, %xmm0, %xmm0 +; CHECK-NEXT: retq +{ + %res = call <4 x float> @llvm.x86.avx512.cvtusi2ss(<4 x float> %a, i32 %b, i32 4) ; <<<4 x float>> [#uses=1] + ret <4 x float> %res +} + +define <4 x float> @test_x86_avx512__mm_cvtu32_ss_mem(<4 x float> %a, i32* %ptr) +; CHECK-LABEL: test_x86_avx512__mm_cvtu32_ss_mem: +; CHECK: ## BB#0: +; CHECK-NEXT: vcvtusi2ssl (%rdi), %xmm0, %xmm0 +; CHECK-NEXT: retq +{ + %b = load i32, i32* %ptr + %res = call <4 x float> @llvm.x86.avx512.cvtusi2ss(<4 x float> %a, i32 %b, i32 4) ; <<<4 x float>> [#uses=1] + ret <4 x float> %res +} +declare <4 x float> @llvm.x86.avx512.cvtusi2ss(<4 x float>, i32, i32) nounwind readnone + +define <4 x float> @_mm_cvt_roundu64_ss (<4 x float> %a, i64 %b) +; CHECK-LABEL: _mm_cvt_roundu64_ss: +; CHECK: ## BB#0: +; CHECK-NEXT: vcvtusi2ssq %rdi, {rd-sae}, %xmm0, %xmm0 +; CHECK-NEXT: retq +{ + %res = call <4 x float> @llvm.x86.avx512.cvtusi642ss(<4 x float> %a, i64 %b, i32 1) ; <<<4 x float>> [#uses=1] + ret <4 x float> %res +} + +define <4 x float> @_mm_cvtu64_ss(<4 x float> %a, i64 %b) +; CHECK-LABEL: _mm_cvtu64_ss: +; CHECK: ## BB#0: +; CHECK-NEXT: vcvtusi2ssq %rdi, %xmm0, %xmm0 +; CHECK-NEXT: retq +{ + %res = call <4 x float> @llvm.x86.avx512.cvtusi642ss(<4 x float> %a, i64 %b, i32 4) ; <<<4 x float>> [#uses=1] + ret <4 x float> %res +} +declare <4 x float> @llvm.x86.avx512.cvtusi642ss(<4 x float>, i64, i32) nounwind readnone + +define <2 x double> @test_x86_avx512_mm_cvtu32_sd(<2 x double> %a, i32 %b) +; CHECK-LABEL: test_x86_avx512_mm_cvtu32_sd: +; CHECK: ## BB#0: +; CHECK-NEXT: vcvtusi2sdl %edi, %xmm0, %xmm0 +; CHECK-NEXT: retq +{ + %res = call <2 x double> @llvm.x86.avx512.cvtusi2sd(<2 x double> %a, i32 %b) ; <<<2 x double>> [#uses=1] + ret <2 x double> %res +} +declare <2 x double> @llvm.x86.avx512.cvtusi2sd(<2 x double>, i32) nounwind readnone + +define <2 x double> @test_x86_avx512_mm_cvtu64_sd(<2 x double> %a, i64 %b) +; CHECK-LABEL: test_x86_avx512_mm_cvtu64_sd: +; CHECK: ## BB#0: +; CHECK-NEXT: vcvtusi2sdq %rdi, {rd-sae}, %xmm0, %xmm0 +; CHECK-NEXT: retq +{ + %res = call <2 x double> @llvm.x86.avx512.cvtusi642sd(<2 x double> %a, i64 %b, i32 1) ; <<<2 x double>> [#uses=1] + ret <2 x double> %res +} + +define <2 x double> @test_x86_avx512__mm_cvt_roundu64_sd(<2 x double> %a, i64 %b) +; CHECK-LABEL: test_x86_avx512__mm_cvt_roundu64_sd: +; CHECK: ## BB#0: +; CHECK-NEXT: vcvtusi2sdq %rdi, %xmm0, %xmm0 +; CHECK-NEXT: retq +{ + %res = call <2 x double> @llvm.x86.avx512.cvtusi642sd(<2 x double> %a, i64 %b, i32 4) ; <<<2 x double>> [#uses=1] + ret <2 x double> %res +} +declare <2 x double> @llvm.x86.avx512.cvtusi642sd(<2 x double>, i64, i32) nounwind readnone + +define <8 x i64> @test_vpmaxq(<8 x i64> %a0, <8 x i64> %a1) { + ; CHECK: vpmaxsq {{.*}}encoding: [0x62,0xf2,0xfd,0x48,0x3d,0xc1] + %res = call <8 x i64> @llvm.x86.avx512.mask.pmaxs.q.512(<8 x i64> %a0, <8 x i64> %a1, + <8 x i64>zeroinitializer, i8 -1) + ret <8 x i64> %res +} +declare <8 x i64> @llvm.x86.avx512.mask.pmaxs.q.512(<8 x i64>, <8 x i64>, <8 x i64>, i8) + +define <16 x i32> @test_vpminud(<16 x i32> %a0, <16 x i32> %a1) { + ; CHECK: vpminud {{.*}}encoding: [0x62,0xf2,0x7d,0x48,0x3b,0xc1] + %res = call <16 x i32> @llvm.x86.avx512.mask.pminu.d.512(<16 x i32> %a0, <16 x i32> %a1, + <16 x i32>zeroinitializer, i16 -1) + ret <16 x i32> %res +} +declare <16 x i32> @llvm.x86.avx512.mask.pminu.d.512(<16 x i32>, <16 x i32>, <16 x i32>, i16) + +define <16 x i32> @test_vpmaxsd(<16 x i32> %a0, <16 x i32> %a1) { + ; CHECK: vpmaxsd {{.*}}encoding: [0x62,0xf2,0x7d,0x48,0x3d,0xc1] + %res = call <16 x i32> @llvm.x86.avx512.mask.pmaxs.d.512(<16 x i32> %a0, <16 x i32> %a1, + <16 x i32>zeroinitializer, i16 -1) + ret <16 x i32> %res +} +declare <16 x i32> @llvm.x86.avx512.mask.pmaxs.d.512(<16 x i32>, <16 x i32>, <16 x i32>, i16) + +; CHECK-LABEL: @test_int_x86_avx512_mask_pmaxs_d_512 +; CHECK-NOT: call +; CHECK: vpmaxsd %zmm +; CHECK: {%k1} +define <16 x i32>@test_int_x86_avx512_mask_pmaxs_d_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 %x3) { + %res = call <16 x i32> @llvm.x86.avx512.mask.pmaxs.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 %x3) + %res1 = call <16 x i32> @llvm.x86.avx512.mask.pmaxs.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 -1) + %res2 = add <16 x i32> %res, %res1 + ret <16 x i32> %res2 +} + +; CHECK-LABEL: @test_int_x86_avx512_mask_pmaxs_q_512 +; CHECK-NOT: call +; CHECK: vpmaxsq %zmm +; CHECK: {%k1} +define <8 x i64>@test_int_x86_avx512_mask_pmaxs_q_512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 %x3) { + %res = call <8 x i64> @llvm.x86.avx512.mask.pmaxs.q.512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 %x3) + %res1 = call <8 x i64> @llvm.x86.avx512.mask.pmaxs.q.512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 -1) + %res2 = add <8 x i64> %res, %res1 + ret <8 x i64> %res2 +} + +declare <16 x i32> @llvm.x86.avx512.mask.pmaxu.d.512(<16 x i32>, <16 x i32>, <16 x i32>, i16) + +; CHECK-LABEL: @test_int_x86_avx512_mask_pmaxu_d_512 +; CHECK-NOT: call +; CHECK: vpmaxud %zmm +; CHECK: {%k1} +define <16 x i32>@test_int_x86_avx512_mask_pmaxu_d_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 %x3) { + %res = call <16 x i32> @llvm.x86.avx512.mask.pmaxu.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 %x3) + %res1 = call <16 x i32> @llvm.x86.avx512.mask.pmaxu.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 -1) + %res2 = add <16 x i32> %res, %res1 + ret <16 x i32> %res2 +} + +declare <8 x i64> @llvm.x86.avx512.mask.pmaxu.q.512(<8 x i64>, <8 x i64>, <8 x i64>, i8) + +; CHECK-LABEL: @test_int_x86_avx512_mask_pmaxu_q_512 +; CHECK-NOT: call +; CHECK: vpmaxuq %zmm +; CHECK: {%k1} +define <8 x i64>@test_int_x86_avx512_mask_pmaxu_q_512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 %x3) { + %res = call <8 x i64> @llvm.x86.avx512.mask.pmaxu.q.512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 %x3) + %res1 = call <8 x i64> @llvm.x86.avx512.mask.pmaxu.q.512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 -1) + %res2 = add <8 x i64> %res, %res1 + ret <8 x i64> %res2 +} + +declare <16 x i32> @llvm.x86.avx512.mask.pmins.d.512(<16 x i32>, <16 x i32>, <16 x i32>, i16) + +; CHECK-LABEL: @test_int_x86_avx512_mask_pmins_d_512 +; CHECK-NOT: call +; CHECK: vpminsd %zmm +; CHECK: {%k1} +define <16 x i32>@test_int_x86_avx512_mask_pmins_d_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 %x3) { + %res = call <16 x i32> @llvm.x86.avx512.mask.pmins.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 %x3) + %res1 = call <16 x i32> @llvm.x86.avx512.mask.pmins.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 -1) + %res2 = add <16 x i32> %res, %res1 + ret <16 x i32> %res2 +} + +declare <8 x i64> @llvm.x86.avx512.mask.pmins.q.512(<8 x i64>, <8 x i64>, <8 x i64>, i8) + +; CHECK-LABEL: @test_int_x86_avx512_mask_pmins_q_512 +; CHECK-NOT: call +; CHECK: vpminsq %zmm +; CHECK: {%k1} +define <8 x i64>@test_int_x86_avx512_mask_pmins_q_512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 %x3) { + %res = call <8 x i64> @llvm.x86.avx512.mask.pmins.q.512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 %x3) + %res1 = call <8 x i64> @llvm.x86.avx512.mask.pmins.q.512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 -1) + %res2 = add <8 x i64> %res, %res1 + ret <8 x i64> %res2 +} + +; CHECK-LABEL: @test_int_x86_avx512_mask_pminu_d_512 +; CHECK-NOT: call +; CHECK: vpminud %zmm +; CHECK: {%k1} +define <16 x i32>@test_int_x86_avx512_mask_pminu_d_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 %x3) { + %res = call <16 x i32> @llvm.x86.avx512.mask.pminu.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 %x3) + %res1 = call <16 x i32> @llvm.x86.avx512.mask.pminu.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 -1) + %res2 = add <16 x i32> %res, %res1 + ret <16 x i32> %res2 +} + +declare <8 x i64> @llvm.x86.avx512.mask.pminu.q.512(<8 x i64>, <8 x i64>, <8 x i64>, i8) + +; CHECK-LABEL: @test_int_x86_avx512_mask_pminu_q_512 +; CHECK-NOT: call +; CHECK: vpminuq %zmm +; CHECK: {%k1} +define <8 x i64>@test_int_x86_avx512_mask_pminu_q_512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 %x3) { + %res = call <8 x i64> @llvm.x86.avx512.mask.pminu.q.512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 %x3) + %res1 = call <8 x i64> @llvm.x86.avx512.mask.pminu.q.512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 -1) + %res2 = add <8 x i64> %res, %res1 + ret <8 x i64> %res2 +} diff --git a/test/CodeGen/X86/avx512-shuffle.ll b/test/CodeGen/X86/avx512-shuffle.ll index 2683d6fe238c5..7e9eda58737d1 100644 --- a/test/CodeGen/X86/avx512-shuffle.ll +++ b/test/CodeGen/X86/avx512-shuffle.ll @@ -116,10 +116,10 @@ define <16 x i32> @test15(<16 x i32> %a) { ret <16 x i32> %b } ; CHECK-LABEL: test16 -; CHECK: valignq $2, %zmm0, %zmm1 +; CHECK: valignq $3, %zmm0, %zmm1 ; CHECK: ret define <8 x double> @test16(<8 x double> %a, <8 x double> %b) nounwind { - %c = shufflevector <8 x double> %a, <8 x double> %b, <8 x i32> <i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9> + %c = shufflevector <8 x double> %a, <8 x double> %b, <8 x i32> <i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10> ret <8 x double> %c } @@ -252,6 +252,62 @@ define <8 x double> @test32(<8 x double> %a, <8 x double> %b) nounwind { ret <8 x double> %c } +; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl | FileCheck %s +define <8 x double> @test_vshuff64x2_512(<8 x double> %x, <8 x double> %x1) nounwind { +; CHECK-LABEL: test_vshuff64x2_512: +; CHECK: ## BB#0: +; CHECK-NEXT: vshuff64x2 $136, %zmm0, %zmm0, %zmm0 +; CHECK-NEXT: retq + %res = shufflevector <8 x double> %x, <8 x double> %x1, <8 x i32> <i32 0, i32 1, i32 4, i32 5, i32 0, i32 1, i32 4, i32 5> + ret <8 x double> %res +} + +define <8 x double> @test_vshuff64x2_512_mask(<8 x double> %x, <8 x double> %x1, <8 x i1> %mask) nounwind { +; CHECK-LABEL: test_vshuff64x2_512_mask: +; CHECK: ## BB#0: +; CHECK-NEXT: vpmovsxwq %xmm2, %zmm1 +; CHECK-NEXT: vpandq {{.*}}(%rip){1to8}, %zmm1, %zmm1 +; CHECK-NEXT: vptestmq %zmm1, %zmm1, %k1 +; CHECK-NEXT: vshuff64x2 $136, %zmm0, %zmm0, %zmm0 {%k1} {z} +; CHECK-NEXT: retq + %y = shufflevector <8 x double> %x, <8 x double> %x1, <8 x i32> <i32 0, i32 1, i32 4, i32 5, i32 0, i32 1, i32 4, i32 5> + %res = select <8 x i1> %mask, <8 x double> %y, <8 x double> zeroinitializer + ret <8 x double> %res +} + +define <8 x i64> @test_vshufi64x2_512_mask(<8 x i64> %x, <8 x i64> %x1, <8 x i1> %mask) nounwind { +; CHECK-LABEL: test_vshufi64x2_512_mask: +; CHECK: ## BB#0: +; CHECK-NEXT: vpmovsxwq %xmm2, %zmm1 +; CHECK-NEXT: vpandq {{.*}}(%rip){1to8}, %zmm1, %zmm1 +; CHECK-NEXT: vptestmq %zmm1, %zmm1, %k1 +; CHECK-NEXT: vshufi64x2 $168, %zmm0, %zmm0, %zmm0 {%k1} +; CHECK-NEXT: retq + %y = shufflevector <8 x i64> %x, <8 x i64> %x1, <8 x i32> <i32 0, i32 1, i32 4, i32 5, i32 4, i32 5, i32 4, i32 5> + %res = select <8 x i1> %mask, <8 x i64> %y, <8 x i64> %x + ret <8 x i64> %res +} + +define <8 x double> @test_vshuff64x2_512_mem(<8 x double> %x, <8 x double> *%ptr) nounwind { +; CHECK-LABEL: test_vshuff64x2_512_mem: +; CHECK: ## BB#0: +; CHECK-NEXT: vshuff64x2 $40, %zmm0, %zmm0, %zmm0 +; CHECK-NEXT: retq + %x1 = load <8 x double>,<8 x double> *%ptr,align 1 + %res = shufflevector <8 x double> %x, <8 x double> %x1, <8 x i32> <i32 0, i32 1, i32 4, i32 5, i32 4, i32 5, i32 0, i32 1> + ret <8 x double> %res +} + +define <16 x float> @test_vshuff32x4_512_mem(<16 x float> %x, <16 x float> *%ptr) nounwind { +; CHECK-LABEL: test_vshuff32x4_512_mem: +; CHECK: ## BB#0: +; CHECK-NEXT: vshuff64x2 $20, %zmm0, %zmm0, %zmm0 +; CHECK-NEXT: retq + %x1 = load <16 x float>,<16 x float> *%ptr,align 1 + %res = shufflevector <16 x float> %x, <16 x float> %x1, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 0, i32 1, i32 2, i32 3> + ret <16 x float> %res +} + define <16 x i32> @test_align_v16i32_rr(<16 x i32> %a, <16 x i32> %b) nounwind { ; CHECK-LABEL: test_align_v16i32_rr: ; CHECK: ## BB#0: diff --git a/test/CodeGen/X86/avx512-vec-cmp.ll b/test/CodeGen/X86/avx512-vec-cmp.ll index 04028a1da510d..6a4a3aa7e371d 100644 --- a/test/CodeGen/X86/avx512-vec-cmp.ll +++ b/test/CodeGen/X86/avx512-vec-cmp.ll @@ -394,7 +394,7 @@ define <8 x i64> @test27(<8 x i64> %x, i64* %yb.ptr, <8 x i64> %x1, <8 x i64> %y ; KNL-LABEL: test28 ; KNL: vpcmpgtq ; KNL: vpcmpgtq -; KNL: kxorw +; KNL: kxnorw define <8 x i32>@test28(<8 x i64> %x, <8 x i64> %y, <8 x i64> %x1, <8 x i64> %y1) { %x_gt_y = icmp sgt <8 x i64> %x, %y %x1_gt_y1 = icmp sgt <8 x i64> %x1, %y1 @@ -406,7 +406,7 @@ define <8 x i32>@test28(<8 x i64> %x, <8 x i64> %y, <8 x i64> %x1, <8 x i64> %y1 ; KNL-LABEL: test29 ; KNL: vpcmpgtd ; KNL: vpcmpgtd -; KNL: kxnorw +; KNL: kxorw define <16 x i8>@test29(<16 x i32> %x, <16 x i32> %y, <16 x i32> %x1, <16 x i32> %y1) { %x_gt_y = icmp sgt <16 x i32> %x, %y %x1_gt_y1 = icmp sgt <16 x i32> %x1, %y1 diff --git a/test/CodeGen/X86/avx512bw-intrinsics.ll b/test/CodeGen/X86/avx512bw-intrinsics.ll index 0db2941cac6f5..9ee0e09d1b7a2 100644 --- a/test/CodeGen/X86/avx512bw-intrinsics.ll +++ b/test/CodeGen/X86/avx512bw-intrinsics.ll @@ -788,3 +788,133 @@ define <32 x i16> @test_mask_subs_epu16_rmkz_512(<32 x i16> %a, <32 x i16>* %ptr } declare <32 x i16> @llvm.x86.avx512.mask.psubus.w.512(<32 x i16>, <32 x i16>, <32 x i16>, i32) + +declare <64 x i8> @llvm.x86.avx512.mask.pmaxs.b.512(<64 x i8>, <64 x i8>, <64 x i8>, i64) + +; CHECK-LABEL: @test_int_x86_avx512_mask_pmaxs_b_512 +; CHECK-NOT: call +; CHECK: vpmaxsb %zmm +; CHECK: {%k1} +define <64 x i8>@test_int_x86_avx512_mask_pmaxs_b_512(<64 x i8> %x0, <64 x i8> %x1, <64 x i8> %x2, i64 %x3) { + %res = call <64 x i8> @llvm.x86.avx512.mask.pmaxs.b.512(<64 x i8> %x0, <64 x i8> %x1, <64 x i8> %x2, i64 %x3) + %res1 = call <64 x i8> @llvm.x86.avx512.mask.pmaxs.b.512(<64 x i8> %x0, <64 x i8> %x1, <64 x i8> %x2, i64 -1) + %res2 = add <64 x i8> %res, %res1 + ret <64 x i8> %res2 +} + +declare <32 x i16> @llvm.x86.avx512.mask.pmaxs.w.512(<32 x i16>, <32 x i16>, <32 x i16>, i32) + +; CHECK-LABEL: @test_int_x86_avx512_mask_pmaxs_w_512 +; CHECK-NOT: call +; CHECK: vpmaxsw %zmm +; CHECK: {%k1} +define <32 x i16>@test_int_x86_avx512_mask_pmaxs_w_512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2, i32 %x3) { + %res = call <32 x i16> @llvm.x86.avx512.mask.pmaxs.w.512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2, i32 %x3) + %res1 = call <32 x i16> @llvm.x86.avx512.mask.pmaxs.w.512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2, i32 -1) + %res2 = add <32 x i16> %res, %res1 + ret <32 x i16> %res2 +} + +declare <64 x i8> @llvm.x86.avx512.mask.pmaxu.b.512(<64 x i8>, <64 x i8>, <64 x i8>, i64) + +; CHECK-LABEL: @test_int_x86_avx512_mask_pmaxu_b_512 +; CHECK-NOT: call +; CHECK: vpmaxub %zmm +; CHECK: {%k1} +define <64 x i8>@test_int_x86_avx512_mask_pmaxu_b_512(<64 x i8> %x0, <64 x i8> %x1, <64 x i8> %x2, i64 %x3) { + %res = call <64 x i8> @llvm.x86.avx512.mask.pmaxu.b.512(<64 x i8> %x0, <64 x i8> %x1, <64 x i8> %x2, i64 %x3) + %res1 = call <64 x i8> @llvm.x86.avx512.mask.pmaxu.b.512(<64 x i8> %x0, <64 x i8> %x1, <64 x i8> %x2, i64 -1) + %res2 = add <64 x i8> %res, %res1 + ret <64 x i8> %res2 +} + +declare <32 x i16> @llvm.x86.avx512.mask.pmaxu.w.512(<32 x i16>, <32 x i16>, <32 x i16>, i32) + +; CHECK-LABEL: @test_int_x86_avx512_mask_pmaxu_w_512 +; CHECK-NOT: call +; CHECK: vpmaxuw %zmm +; CHECK: {%k1} +define <32 x i16>@test_int_x86_avx512_mask_pmaxu_w_512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2, i32 %x3) { + %res = call <32 x i16> @llvm.x86.avx512.mask.pmaxu.w.512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2, i32 %x3) + %res1 = call <32 x i16> @llvm.x86.avx512.mask.pmaxu.w.512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2, i32 -1) + %res2 = add <32 x i16> %res, %res1 + ret <32 x i16> %res2 +} + +declare <64 x i8> @llvm.x86.avx512.mask.pmins.b.512(<64 x i8>, <64 x i8>, <64 x i8>, i64) + +; CHECK-LABEL: @test_int_x86_avx512_mask_pmins_b_512 +; CHECK-NOT: call +; CHECK: vpminsb %zmm +; CHECK: {%k1} +define <64 x i8>@test_int_x86_avx512_mask_pmins_b_512(<64 x i8> %x0, <64 x i8> %x1, <64 x i8> %x2, i64 %x3) { + %res = call <64 x i8> @llvm.x86.avx512.mask.pmins.b.512(<64 x i8> %x0, <64 x i8> %x1, <64 x i8> %x2, i64 %x3) + %res1 = call <64 x i8> @llvm.x86.avx512.mask.pmins.b.512(<64 x i8> %x0, <64 x i8> %x1, <64 x i8> %x2, i64 -1) + %res2 = add <64 x i8> %res, %res1 + ret <64 x i8> %res2 +} + +declare <32 x i16> @llvm.x86.avx512.mask.pmins.w.512(<32 x i16>, <32 x i16>, <32 x i16>, i32) + +; CHECK-LABEL: @test_int_x86_avx512_mask_pmins_w_512 +; CHECK-NOT: call +; CHECK: vpminsw %zmm +; CHECK: {%k1} +define <32 x i16>@test_int_x86_avx512_mask_pmins_w_512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2, i32 %x3) { + %res = call <32 x i16> @llvm.x86.avx512.mask.pmins.w.512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2, i32 %x3) + %res1 = call <32 x i16> @llvm.x86.avx512.mask.pmins.w.512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2, i32 -1) + %res2 = add <32 x i16> %res, %res1 + ret <32 x i16> %res2 +} + +declare <64 x i8> @llvm.x86.avx512.mask.pminu.b.512(<64 x i8>, <64 x i8>, <64 x i8>, i64) + +; CHECK-LABEL: @test_int_x86_avx512_mask_pminu_b_512 +; CHECK-NOT: call +; CHECK: vpminub %zmm +; CHECK: {%k1} +define <64 x i8>@test_int_x86_avx512_mask_pminu_b_512(<64 x i8> %x0, <64 x i8> %x1, <64 x i8> %x2, i64 %x3) { + %res = call <64 x i8> @llvm.x86.avx512.mask.pminu.b.512(<64 x i8> %x0, <64 x i8> %x1, <64 x i8> %x2, i64 %x3) + %res1 = call <64 x i8> @llvm.x86.avx512.mask.pminu.b.512(<64 x i8> %x0, <64 x i8> %x1, <64 x i8> %x2, i64 -1) + %res2 = add <64 x i8> %res, %res1 + ret <64 x i8> %res2 +} + +declare <32 x i16> @llvm.x86.avx512.mask.pminu.w.512(<32 x i16>, <32 x i16>, <32 x i16>, i32) + +; CHECK-LABEL: @test_int_x86_avx512_mask_pminu_w_512 +; CHECK-NOT: call +; CHECK: vpminuw %zmm +; CHECK: {%k1} +define <32 x i16>@test_int_x86_avx512_mask_pminu_w_512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2, i32 %x3) { + %res = call <32 x i16> @llvm.x86.avx512.mask.pminu.w.512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2, i32 %x3) + %res1 = call <32 x i16> @llvm.x86.avx512.mask.pminu.w.512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2, i32 -1) + %res2 = add <32 x i16> %res, %res1 + ret <32 x i16> %res2 +} + +declare <64 x i8> @llvm.x86.avx512.mask.pavg.b.512(<64 x i8>, <64 x i8>, <64 x i8>, i64) + +; CHECK-LABEL: @test_int_x86_avx512_mask_pavg_b_512 +; CHECK-NOT: call +; CHECK: vpavgb %zmm +; CHECK: {%k1} +define <64 x i8>@test_int_x86_avx512_mask_pavg_b_512(<64 x i8> %x0, <64 x i8> %x1, <64 x i8> %x2, i64 %x3) { + %res = call <64 x i8> @llvm.x86.avx512.mask.pavg.b.512(<64 x i8> %x0, <64 x i8> %x1, <64 x i8> %x2, i64 %x3) + %res1 = call <64 x i8> @llvm.x86.avx512.mask.pavg.b.512(<64 x i8> %x0, <64 x i8> %x1, <64 x i8> %x2, i64 -1) + %res2 = add <64 x i8> %res, %res1 + ret <64 x i8> %res2 +} + +declare <32 x i16> @llvm.x86.avx512.mask.pavg.w.512(<32 x i16>, <32 x i16>, <32 x i16>, i32) + +; CHECK-LABEL: @test_int_x86_avx512_mask_pavg_w_512 +; CHECK-NOT: call +; CHECK: vpavgw %zmm +; CHECK: {%k1} +define <32 x i16>@test_int_x86_avx512_mask_pavg_w_512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2, i32 %x3) { + %res = call <32 x i16> @llvm.x86.avx512.mask.pavg.w.512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2, i32 %x3) + %res1 = call <32 x i16> @llvm.x86.avx512.mask.pavg.w.512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2, i32 -1) + %res2 = add <32 x i16> %res, %res1 + ret <32 x i16> %res2 +} diff --git a/test/CodeGen/X86/avx512bwvl-intrinsics.ll b/test/CodeGen/X86/avx512bwvl-intrinsics.ll index f0efb2c947e9a..cf8c32a48b6b0 100644 --- a/test/CodeGen/X86/avx512bwvl-intrinsics.ll +++ b/test/CodeGen/X86/avx512bwvl-intrinsics.ll @@ -2667,4 +2667,264 @@ define <32 x i8> @test_mask_subs_epu8_rmkz_256(<32 x i8> %a, <32 x i8>* %ptr_b, ret <32 x i8> %res } -declare <32 x i8> @llvm.x86.avx512.mask.psubus.b.256(<32 x i8>, <32 x i8>, <32 x i8>, i32)
\ No newline at end of file +declare <32 x i8> @llvm.x86.avx512.mask.psubus.b.256(<32 x i8>, <32 x i8>, <32 x i8>, i32) + +declare <16 x i8> @llvm.x86.avx512.mask.pmaxs.b.128(<16 x i8>, <16 x i8>, <16 x i8>, i16) + +; CHECK-LABEL: @test_int_x86_avx512_mask_pmaxs_b_128 +; CHECK-NOT: call +; CHECK: vpmaxsb %xmm +; CHECK: {%k1} +define <16 x i8>@test_int_x86_avx512_mask_pmaxs_b_128(<16 x i8> %x0, <16 x i8> %x1, <16 x i8> %x2, i16 %mask) { + %res = call <16 x i8> @llvm.x86.avx512.mask.pmaxs.b.128(<16 x i8> %x0, <16 x i8> %x1, <16 x i8> %x2 ,i16 %mask) + %res1 = call <16 x i8> @llvm.x86.avx512.mask.pmaxs.b.128(<16 x i8> %x0, <16 x i8> %x1, <16 x i8> zeroinitializer, i16 %mask) + %res2 = add <16 x i8> %res, %res1 + ret <16 x i8> %res2 +} + +declare <32 x i8> @llvm.x86.avx512.mask.pmaxs.b.256(<32 x i8>, <32 x i8>, <32 x i8>, i32) + +; CHECK-LABEL: @test_int_x86_avx512_mask_pmaxs_b_256 +; CHECK-NOT: call +; CHECK: vpmaxsb %ymm +; CHECK: {%k1} +define <32 x i8>@test_int_x86_avx512_mask_pmaxs_b_256(<32 x i8> %x0, <32 x i8> %x1, <32 x i8> %x2, i32 %x3) { + %res = call <32 x i8> @llvm.x86.avx512.mask.pmaxs.b.256(<32 x i8> %x0, <32 x i8> %x1, <32 x i8> %x2, i32 %x3) + %res1 = call <32 x i8> @llvm.x86.avx512.mask.pmaxs.b.256(<32 x i8> %x0, <32 x i8> %x1, <32 x i8> %x2, i32 -1) + %res2 = add <32 x i8> %res, %res1 + ret <32 x i8> %res2 +} + +declare <8 x i16> @llvm.x86.avx512.mask.pmaxs.w.128(<8 x i16>, <8 x i16>, <8 x i16>, i8) + +; CHECK-LABEL: @test_int_x86_avx512_mask_pmaxs_w_128 +; CHECK-NOT: call +; CHECK: vpmaxsw %xmm +; CHECK: {%k1} +define <8 x i16>@test_int_x86_avx512_mask_pmaxs_w_128(<8 x i16> %x0, <8 x i16> %x1, <8 x i16> %x2, i8 %x3) { + %res = call <8 x i16> @llvm.x86.avx512.mask.pmaxs.w.128(<8 x i16> %x0, <8 x i16> %x1, <8 x i16> %x2, i8 %x3) + %res1 = call <8 x i16> @llvm.x86.avx512.mask.pmaxs.w.128(<8 x i16> %x0, <8 x i16> %x1, <8 x i16> %x2, i8 -1) + %res2 = add <8 x i16> %res, %res1 + ret <8 x i16> %res2 +} + +declare <16 x i16> @llvm.x86.avx512.mask.pmaxs.w.256(<16 x i16>, <16 x i16>, <16 x i16>, i16) + +; CHECK-LABEL: @test_int_x86_avx512_mask_pmaxs_w_256 +; CHECK-NOT: call +; CHECK: vpmaxsw %ymm +; CHECK: {%k1} +define <16 x i16>@test_int_x86_avx512_mask_pmaxs_w_256(<16 x i16> %x0, <16 x i16> %x1, <16 x i16> %x2, i16 %mask) { + %res = call <16 x i16> @llvm.x86.avx512.mask.pmaxs.w.256(<16 x i16> %x0, <16 x i16> %x1, <16 x i16> %x2, i16 %mask) + %res1 = call <16 x i16> @llvm.x86.avx512.mask.pmaxs.w.256(<16 x i16> %x0, <16 x i16> %x1, <16 x i16> zeroinitializer, i16 %mask) + %res2 = add <16 x i16> %res, %res1 + ret <16 x i16> %res2 +} + +declare <16 x i8> @llvm.x86.avx512.mask.pmaxu.b.128(<16 x i8>, <16 x i8>, <16 x i8>, i16) + +; CHECK-LABEL: @test_int_x86_avx512_mask_pmaxu_b_128 +; CHECK-NOT: call +; CHECK: vpmaxub %xmm +; CHECK: {%k1} +define <16 x i8>@test_int_x86_avx512_mask_pmaxu_b_128(<16 x i8> %x0, <16 x i8> %x1, <16 x i8> %x2,i16 %mask) { + %res = call <16 x i8> @llvm.x86.avx512.mask.pmaxu.b.128(<16 x i8> %x0, <16 x i8> %x1, <16 x i8> %x2, i16 %mask) + %res1 = call <16 x i8> @llvm.x86.avx512.mask.pmaxu.b.128(<16 x i8> %x0, <16 x i8> %x1, <16 x i8> zeroinitializer, i16 %mask) + %res2 = add <16 x i8> %res, %res1 + ret <16 x i8> %res2 +} + +declare <32 x i8> @llvm.x86.avx512.mask.pmaxu.b.256(<32 x i8>, <32 x i8>, <32 x i8>, i32) + +; CHECK-LABEL: @test_int_x86_avx512_mask_pmaxu_b_256 +; CHECK-NOT: call +; CHECK: vpmaxub %ymm +; CHECK: {%k1} +define <32 x i8>@test_int_x86_avx512_mask_pmaxu_b_256(<32 x i8> %x0, <32 x i8> %x1, <32 x i8> %x2, i32 %x3) { + %res = call <32 x i8> @llvm.x86.avx512.mask.pmaxu.b.256(<32 x i8> %x0, <32 x i8> %x1, <32 x i8> %x2, i32 %x3) + %res1 = call <32 x i8> @llvm.x86.avx512.mask.pmaxu.b.256(<32 x i8> %x0, <32 x i8> %x1, <32 x i8> %x2, i32 -1) + %res2 = add <32 x i8> %res, %res1 + ret <32 x i8> %res2 +} + +declare <8 x i16> @llvm.x86.avx512.mask.pmaxu.w.128(<8 x i16>, <8 x i16>, <8 x i16>, i8) + +; CHECK-LABEL: @test_int_x86_avx512_mask_pmaxu_w_128 +; CHECK-NOT: call +; CHECK: vpmaxuw %xmm +; CHECK: {%k1} +define <8 x i16>@test_int_x86_avx512_mask_pmaxu_w_128(<8 x i16> %x0, <8 x i16> %x1, <8 x i16> %x2, i8 %x3) { + %res = call <8 x i16> @llvm.x86.avx512.mask.pmaxu.w.128(<8 x i16> %x0, <8 x i16> %x1, <8 x i16> %x2, i8 %x3) + %res1 = call <8 x i16> @llvm.x86.avx512.mask.pmaxu.w.128(<8 x i16> %x0, <8 x i16> %x1, <8 x i16> %x2, i8 -1) + %res2 = add <8 x i16> %res, %res1 + ret <8 x i16> %res2 +} + +declare <16 x i16> @llvm.x86.avx512.mask.pmaxu.w.256(<16 x i16>, <16 x i16>, <16 x i16>, i16) + +; CHECK-LABEL: @test_int_x86_avx512_mask_pmaxu_w_256 +; CHECK-NOT: call +; CHECK: vpmaxuw %ymm +; CHECK: {%k1} +define <16 x i16>@test_int_x86_avx512_mask_pmaxu_w_256(<16 x i16> %x0, <16 x i16> %x1, <16 x i16> %x2, i16 %mask) { + %res = call <16 x i16> @llvm.x86.avx512.mask.pmaxu.w.256(<16 x i16> %x0, <16 x i16> %x1, <16 x i16> %x2, i16 %mask) + %res1 = call <16 x i16> @llvm.x86.avx512.mask.pmaxu.w.256(<16 x i16> %x0, <16 x i16> %x1, <16 x i16> zeroinitializer, i16 %mask) + %res2 = add <16 x i16> %res, %res1 + ret <16 x i16> %res2 +} + +declare <16 x i8> @llvm.x86.avx512.mask.pmins.b.128(<16 x i8>, <16 x i8>, <16 x i8>, i16) + +; CHECK-LABEL: @test_int_x86_avx512_mask_pmins_b_128 +; CHECK-NOT: call +; CHECK: vpminsb %xmm +; CHECK: {%k1} +define <16 x i8>@test_int_x86_avx512_mask_pmins_b_128(<16 x i8> %x0, <16 x i8> %x1, <16 x i8> %x2, i16 %mask) { + %res = call <16 x i8> @llvm.x86.avx512.mask.pmins.b.128(<16 x i8> %x0, <16 x i8> %x1, <16 x i8> %x2, i16 %mask) + %res1 = call <16 x i8> @llvm.x86.avx512.mask.pmins.b.128(<16 x i8> %x0, <16 x i8> %x1, <16 x i8> zeroinitializer, i16 %mask) + %res2 = add <16 x i8> %res, %res1 + ret <16 x i8> %res2 +} + +declare <32 x i8> @llvm.x86.avx512.mask.pmins.b.256(<32 x i8>, <32 x i8>, <32 x i8>, i32) + +; CHECK-LABEL: @test_int_x86_avx512_mask_pmins_b_256 +; CHECK-NOT: call +; CHECK: vpminsb %ymm +; CHECK: {%k1} +define <32 x i8>@test_int_x86_avx512_mask_pmins_b_256(<32 x i8> %x0, <32 x i8> %x1, <32 x i8> %x2, i32 %x3) { + %res = call <32 x i8> @llvm.x86.avx512.mask.pmins.b.256(<32 x i8> %x0, <32 x i8> %x1, <32 x i8> %x2, i32 %x3) + %res1 = call <32 x i8> @llvm.x86.avx512.mask.pmins.b.256(<32 x i8> %x0, <32 x i8> %x1, <32 x i8> %x2, i32 -1) + %res2 = add <32 x i8> %res, %res1 + ret <32 x i8> %res2 +} + +declare <8 x i16> @llvm.x86.avx512.mask.pmins.w.128(<8 x i16>, <8 x i16>, <8 x i16>, i8) + +; CHECK-LABEL: @test_int_x86_avx512_mask_pmins_w_128 +; CHECK-NOT: call +; CHECK: vpminsw %xmm +; CHECK: {%k1} +define <8 x i16>@test_int_x86_avx512_mask_pmins_w_128(<8 x i16> %x0, <8 x i16> %x1, <8 x i16> %x2, i8 %x3) { + %res = call <8 x i16> @llvm.x86.avx512.mask.pmins.w.128(<8 x i16> %x0, <8 x i16> %x1, <8 x i16> %x2, i8 %x3) + %res1 = call <8 x i16> @llvm.x86.avx512.mask.pmins.w.128(<8 x i16> %x0, <8 x i16> %x1, <8 x i16> %x2, i8 -1) + %res2 = add <8 x i16> %res, %res1 + ret <8 x i16> %res2 +} + +declare <16 x i16> @llvm.x86.avx512.mask.pmins.w.256(<16 x i16>, <16 x i16>, <16 x i16>, i16) + +; CHECK-LABEL: @test_int_x86_avx512_mask_pmins_w_256 +; CHECK-NOT: call +; CHECK: vpminsw %ymm +; CHECK: {%k1} +define <16 x i16>@test_int_x86_avx512_mask_pmins_w_256(<16 x i16> %x0, <16 x i16> %x1, <16 x i16> %x2, i16 %mask) { + %res = call <16 x i16> @llvm.x86.avx512.mask.pmins.w.256(<16 x i16> %x0, <16 x i16> %x1, <16 x i16> %x2, i16 %mask) + %res1 = call <16 x i16> @llvm.x86.avx512.mask.pmins.w.256(<16 x i16> %x0, <16 x i16> %x1, <16 x i16> zeroinitializer, i16 %mask) + %res2 = add <16 x i16> %res, %res1 + ret <16 x i16> %res2 +} + +declare <16 x i8> @llvm.x86.avx512.mask.pminu.b.128(<16 x i8>, <16 x i8>, <16 x i8>, i16) + +; CHECK-LABEL: @test_int_x86_avx512_mask_pminu_b_128 +; CHECK-NOT: call +; CHECK: vpminub %xmm +; CHECK: {%k1} +define <16 x i8>@test_int_x86_avx512_mask_pminu_b_128(<16 x i8> %x0, <16 x i8> %x1, <16 x i8> %x2, i16 %mask) { + %res = call <16 x i8> @llvm.x86.avx512.mask.pminu.b.128(<16 x i8> %x0, <16 x i8> %x1, <16 x i8> %x2, i16 %mask) + %res1 = call <16 x i8> @llvm.x86.avx512.mask.pminu.b.128(<16 x i8> %x0, <16 x i8> %x1, <16 x i8> zeroinitializer, i16 %mask) + %res2 = add <16 x i8> %res, %res1 + ret <16 x i8> %res2 +} + +declare <32 x i8> @llvm.x86.avx512.mask.pminu.b.256(<32 x i8>, <32 x i8>, <32 x i8>, i32) + +; CHECK-LABEL: @test_int_x86_avx512_mask_pminu_b_256 +; CHECK-NOT: call +; CHECK: vpminub %ymm +; CHECK: {%k1} +define <32 x i8>@test_int_x86_avx512_mask_pminu_b_256(<32 x i8> %x0, <32 x i8> %x1, <32 x i8> %x2, i32 %x3) { + %res = call <32 x i8> @llvm.x86.avx512.mask.pminu.b.256(<32 x i8> %x0, <32 x i8> %x1, <32 x i8> %x2, i32 %x3) + %res1 = call <32 x i8> @llvm.x86.avx512.mask.pminu.b.256(<32 x i8> %x0, <32 x i8> %x1, <32 x i8> %x2, i32 -1) + %res2 = add <32 x i8> %res, %res1 + ret <32 x i8> %res2 +} + +declare <8 x i16> @llvm.x86.avx512.mask.pminu.w.128(<8 x i16>, <8 x i16>, <8 x i16>, i8) + +; CHECK-LABEL: @test_int_x86_avx512_mask_pminu_w_128 +; CHECK-NOT: call +; CHECK: vpminuw %xmm +; CHECK: {%k1} +define <8 x i16>@test_int_x86_avx512_mask_pminu_w_128(<8 x i16> %x0, <8 x i16> %x1, <8 x i16> %x2, i8 %x3) { + %res = call <8 x i16> @llvm.x86.avx512.mask.pminu.w.128(<8 x i16> %x0, <8 x i16> %x1, <8 x i16> %x2, i8 %x3) + %res1 = call <8 x i16> @llvm.x86.avx512.mask.pminu.w.128(<8 x i16> %x0, <8 x i16> %x1, <8 x i16> %x2, i8 -1) + %res2 = add <8 x i16> %res, %res1 + ret <8 x i16> %res2 +} + +declare <16 x i16> @llvm.x86.avx512.mask.pminu.w.256(<16 x i16>, <16 x i16>, <16 x i16>, i16) + +; CHECK-LABEL: @test_int_x86_avx512_mask_pminu_w_256 +; CHECK-NOT: call +; CHECK: vpminuw %ymm +; CHECK: {%k1} +define <16 x i16>@test_int_x86_avx512_mask_pminu_w_256(<16 x i16> %x0, <16 x i16> %x1, <16 x i16> %x2, i16 %mask) { + %res = call <16 x i16> @llvm.x86.avx512.mask.pminu.w.256(<16 x i16> %x0, <16 x i16> %x1, <16 x i16> %x2, i16 %mask) + %res1 = call <16 x i16> @llvm.x86.avx512.mask.pminu.w.256(<16 x i16> %x0, <16 x i16> %x1, <16 x i16> zeroinitializer, i16 %mask) + %res2 = add <16 x i16> %res, %res1 + ret <16 x i16> %res2 +} + +declare <16 x i8> @llvm.x86.avx512.mask.pavg.b.128(<16 x i8>, <16 x i8>, <16 x i8>, i16) + +; CHECK-LABEL: @test_int_x86_avx512_mask_pavg_b_128 +; CHECK-NOT: call +; CHECK: vpavgb %xmm +; CHECK: {%k1} +define <16 x i8>@test_int_x86_avx512_mask_pavg_b_128(<16 x i8> %x0, <16 x i8> %x1, <16 x i8> %x2, i16 %x3) { + %res = call <16 x i8> @llvm.x86.avx512.mask.pavg.b.128(<16 x i8> %x0, <16 x i8> %x1, <16 x i8> %x2, i16 %x3) + %res1 = call <16 x i8> @llvm.x86.avx512.mask.pavg.b.128(<16 x i8> %x0, <16 x i8> %x1, <16 x i8> %x2, i16 -1) + %res2 = add <16 x i8> %res, %res1 + ret <16 x i8> %res2 +} + +declare <32 x i8> @llvm.x86.avx512.mask.pavg.b.256(<32 x i8>, <32 x i8>, <32 x i8>, i32) + +; CHECK-LABEL: @test_int_x86_avx512_mask_pavg_b_256 +; CHECK-NOT: call +; CHECK: vpavgb %ymm +; CHECK: {%k1} +define <32 x i8>@test_int_x86_avx512_mask_pavg_b_256(<32 x i8> %x0, <32 x i8> %x1, <32 x i8> %x2, i32 %x3) { + %res = call <32 x i8> @llvm.x86.avx512.mask.pavg.b.256(<32 x i8> %x0, <32 x i8> %x1, <32 x i8> %x2, i32 %x3) + %res1 = call <32 x i8> @llvm.x86.avx512.mask.pavg.b.256(<32 x i8> %x0, <32 x i8> %x1, <32 x i8> %x2, i32 -1) + %res2 = add <32 x i8> %res, %res1 + ret <32 x i8> %res2 +} + +declare <8 x i16> @llvm.x86.avx512.mask.pavg.w.128(<8 x i16>, <8 x i16>, <8 x i16>, i8) + +; CHECK-LABEL: @test_int_x86_avx512_mask_pavg_w_128 +; CHECK-NOT: call +; CHECK: vpavgw %xmm +; CHECK: {%k1} +define <8 x i16>@test_int_x86_avx512_mask_pavg_w_128(<8 x i16> %x0, <8 x i16> %x1, <8 x i16> %x2, i8 %x3) { + %res = call <8 x i16> @llvm.x86.avx512.mask.pavg.w.128(<8 x i16> %x0, <8 x i16> %x1, <8 x i16> %x2, i8 %x3) + %res1 = call <8 x i16> @llvm.x86.avx512.mask.pavg.w.128(<8 x i16> %x0, <8 x i16> %x1, <8 x i16> %x2, i8 -1) + %res2 = add <8 x i16> %res, %res1 + ret <8 x i16> %res2 +} + +declare <16 x i16> @llvm.x86.avx512.mask.pavg.w.256(<16 x i16>, <16 x i16>, <16 x i16>, i16) + +; CHECK-LABEL: @test_int_x86_avx512_mask_pavg_w_256 +; CHECK-NOT: call +; CHECK: vpavgw %ymm +; CHECK: {%k1} +define <16 x i16>@test_int_x86_avx512_mask_pavg_w_256(<16 x i16> %x0, <16 x i16> %x1, <16 x i16> %x2, i16 %x3) { + %res = call <16 x i16> @llvm.x86.avx512.mask.pavg.w.256(<16 x i16> %x0, <16 x i16> %x1, <16 x i16> %x2, i16 %x3) + %res1 = call <16 x i16> @llvm.x86.avx512.mask.pavg.w.256(<16 x i16> %x0, <16 x i16> %x1, <16 x i16> %x2, i16 -1) + %res2 = add <16 x i16> %res, %res1 + ret <16 x i16> %res2 +} diff --git a/test/CodeGen/X86/avx512vl-intrinsics.ll b/test/CodeGen/X86/avx512vl-intrinsics.ll index 9d96c272f3554..dfd4986b85c1e 100644 --- a/test/CodeGen/X86/avx512vl-intrinsics.ll +++ b/test/CodeGen/X86/avx512vl-intrinsics.ll @@ -2586,4 +2586,212 @@ define <8 x float> @test_getexp_ps_256(<8 x float> %a0) { %res = call <8 x float> @llvm.x86.avx512.mask.getexp.ps.256(<8 x float> %a0, <8 x float> zeroinitializer, i8 -1) ret <8 x float> %res } -declare <8 x float> @llvm.x86.avx512.mask.getexp.ps.256(<8 x float>, <8 x float>, i8) nounwind readnone
\ No newline at end of file +declare <8 x float> @llvm.x86.avx512.mask.getexp.ps.256(<8 x float>, <8 x float>, i8) nounwind readnone + +declare <4 x i32> @llvm.x86.avx512.mask.pmaxs.d.128(<4 x i32>, <4 x i32>, <4 x i32>, i8) + +; CHECK-LABEL: @test_int_x86_avx512_mask_pmaxs_d_128 +; CHECK-NOT: call +; CHECK: vpmaxsd %xmm +; CHECK: {%k1} +define <4 x i32>@test_int_x86_avx512_mask_pmaxs_d_128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2, i8 %mask) { + %res = call <4 x i32> @llvm.x86.avx512.mask.pmaxs.d.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2 ,i8 %mask) + %res1 = call <4 x i32> @llvm.x86.avx512.mask.pmaxs.d.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> zeroinitializer, i8 %mask) + %res2 = add <4 x i32> %res, %res1 + ret <4 x i32> %res2 +} + +declare <8 x i32> @llvm.x86.avx512.mask.pmaxs.d.256(<8 x i32>, <8 x i32>, <8 x i32>, i8) + +; CHECK-LABEL: @test_int_x86_avx512_mask_pmaxs_d_256 +; CHECK-NOT: call +; CHECK: vpmaxsd %ymm +; CHECK: {%k1} +define <8 x i32>@test_int_x86_avx512_mask_pmaxs_d_256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2, i8 %x3) { + %res = call <8 x i32> @llvm.x86.avx512.mask.pmaxs.d.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2, i8 %x3) + %res1 = call <8 x i32> @llvm.x86.avx512.mask.pmaxs.d.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2, i8 -1) + %res2 = add <8 x i32> %res, %res1 + ret <8 x i32> %res2 +} + +declare <2 x i64> @llvm.x86.avx512.mask.pmaxs.q.128(<2 x i64>, <2 x i64>, <2 x i64>, i8) + +; CHECK-LABEL: @test_int_x86_avx512_mask_pmaxs_q_128 +; CHECK-NOT: call +; CHECK: vpmaxsq %xmm +; CHECK: {%k1} +define <2 x i64>@test_int_x86_avx512_mask_pmaxs_q_128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2, i8 %x3) { + %res = call <2 x i64> @llvm.x86.avx512.mask.pmaxs.q.128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2, i8 %x3) + %res1 = call <2 x i64> @llvm.x86.avx512.mask.pmaxs.q.128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2, i8 -1) + %res2 = add <2 x i64> %res, %res1 + ret <2 x i64> %res2 +} + +declare <4 x i64> @llvm.x86.avx512.mask.pmaxs.q.256(<4 x i64>, <4 x i64>, <4 x i64>, i8) + +; CHECK-LABEL: @test_int_x86_avx512_mask_pmaxs_q_256 +; CHECK-NOT: call +; CHECK: vpmaxsq %ymm +; CHECK: {%k1} +define <4 x i64>@test_int_x86_avx512_mask_pmaxs_q_256(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %x2, i8 %mask) { + %res = call <4 x i64> @llvm.x86.avx512.mask.pmaxs.q.256(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %x2, i8 %mask) + %res1 = call <4 x i64> @llvm.x86.avx512.mask.pmaxs.q.256(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> zeroinitializer, i8 %mask) + %res2 = add <4 x i64> %res, %res1 + ret <4 x i64> %res2 +} + +declare <4 x i32> @llvm.x86.avx512.mask.pmaxu.d.128(<4 x i32>, <4 x i32>, <4 x i32>, i8) + +; CHECK-LABEL: @test_int_x86_avx512_mask_pmaxu_d_128 +; CHECK-NOT: call +; CHECK: vpmaxud %xmm +; CHECK: {%k1} +define <4 x i32>@test_int_x86_avx512_mask_pmaxu_d_128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2,i8 %mask) { + %res = call <4 x i32> @llvm.x86.avx512.mask.pmaxu.d.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2, i8 %mask) + %res1 = call <4 x i32> @llvm.x86.avx512.mask.pmaxu.d.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> zeroinitializer, i8 %mask) + %res2 = add <4 x i32> %res, %res1 + ret <4 x i32> %res2 +} + +declare <8 x i32> @llvm.x86.avx512.mask.pmaxu.d.256(<8 x i32>, <8 x i32>, <8 x i32>, i8) + +; CHECK-LABEL: @test_int_x86_avx512_mask_pmaxu_d_256 +; CHECK-NOT: call +; CHECK: vpmaxud %ymm +; CHECK: {%k1} +define <8 x i32>@test_int_x86_avx512_mask_pmaxu_d_256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2, i8 %x3) { + %res = call <8 x i32> @llvm.x86.avx512.mask.pmaxu.d.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2, i8 %x3) + %res1 = call <8 x i32> @llvm.x86.avx512.mask.pmaxu.d.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2, i8 -1) + %res2 = add <8 x i32> %res, %res1 + ret <8 x i32> %res2 +} + +declare <2 x i64> @llvm.x86.avx512.mask.pmaxu.q.128(<2 x i64>, <2 x i64>, <2 x i64>, i8) + +; CHECK-LABEL: @test_int_x86_avx512_mask_pmaxu_q_128 +; CHECK-NOT: call +; CHECK: vpmaxuq %xmm +; CHECK: {%k1} +define <2 x i64>@test_int_x86_avx512_mask_pmaxu_q_128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2, i8 %x3) { + %res = call <2 x i64> @llvm.x86.avx512.mask.pmaxu.q.128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2, i8 %x3) + %res1 = call <2 x i64> @llvm.x86.avx512.mask.pmaxu.q.128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2, i8 -1) + %res2 = add <2 x i64> %res, %res1 + ret <2 x i64> %res2 +} + +declare <4 x i64> @llvm.x86.avx512.mask.pmaxu.q.256(<4 x i64>, <4 x i64>, <4 x i64>, i8) + +; CHECK-LABEL: @test_int_x86_avx512_mask_pmaxu_q_256 +; CHECK-NOT: call +; CHECK: vpmaxuq %ymm +; CHECK: {%k1} +define <4 x i64>@test_int_x86_avx512_mask_pmaxu_q_256(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %x2, i8 %mask) { + %res = call <4 x i64> @llvm.x86.avx512.mask.pmaxu.q.256(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %x2, i8 %mask) + %res1 = call <4 x i64> @llvm.x86.avx512.mask.pmaxu.q.256(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> zeroinitializer, i8 %mask) + %res2 = add <4 x i64> %res, %res1 + ret <4 x i64> %res2 +} + +declare <4 x i32> @llvm.x86.avx512.mask.pmins.d.128(<4 x i32>, <4 x i32>, <4 x i32>, i8) + +; CHECK-LABEL: @test_int_x86_avx512_mask_pmins_d_128 +; CHECK-NOT: call +; CHECK: vpminsd %xmm +; CHECK: {%k1} +define <4 x i32>@test_int_x86_avx512_mask_pmins_d_128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2, i8 %mask) { + %res = call <4 x i32> @llvm.x86.avx512.mask.pmins.d.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2, i8 %mask) + %res1 = call <4 x i32> @llvm.x86.avx512.mask.pmins.d.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> zeroinitializer, i8 %mask) + %res2 = add <4 x i32> %res, %res1 + ret <4 x i32> %res2 +} + +declare <8 x i32> @llvm.x86.avx512.mask.pmins.d.256(<8 x i32>, <8 x i32>, <8 x i32>, i8) + +; CHECK-LABEL: @test_int_x86_avx512_mask_pmins_d_256 +; CHECK-NOT: call +; CHECK: vpminsd %ymm +; CHECK: {%k1} +define <8 x i32>@test_int_x86_avx512_mask_pmins_d_256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2, i8 %x3) { + %res = call <8 x i32> @llvm.x86.avx512.mask.pmins.d.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2, i8 %x3) + %res1 = call <8 x i32> @llvm.x86.avx512.mask.pmins.d.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2, i8 -1) + %res2 = add <8 x i32> %res, %res1 + ret <8 x i32> %res2 +} + +declare <2 x i64> @llvm.x86.avx512.mask.pmins.q.128(<2 x i64>, <2 x i64>, <2 x i64>, i8) + +; CHECK-LABEL: @test_int_x86_avx512_mask_pmins_q_128 +; CHECK-NOT: call +; CHECK: vpminsq %xmm +; CHECK: {%k1} +define <2 x i64>@test_int_x86_avx512_mask_pmins_q_128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2, i8 %x3) { + %res = call <2 x i64> @llvm.x86.avx512.mask.pmins.q.128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2, i8 %x3) + %res1 = call <2 x i64> @llvm.x86.avx512.mask.pmins.q.128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2, i8 -1) + %res2 = add <2 x i64> %res, %res1 + ret <2 x i64> %res2 +} + +declare <4 x i64> @llvm.x86.avx512.mask.pmins.q.256(<4 x i64>, <4 x i64>, <4 x i64>, i8) + +; CHECK-LABEL: @test_int_x86_avx512_mask_pmins_q_256 +; CHECK-NOT: call +; CHECK: vpminsq %ymm +; CHECK: {%k1} +define <4 x i64>@test_int_x86_avx512_mask_pmins_q_256(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %x2, i8 %mask) { + %res = call <4 x i64> @llvm.x86.avx512.mask.pmins.q.256(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %x2, i8 %mask) + %res1 = call <4 x i64> @llvm.x86.avx512.mask.pmins.q.256(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> zeroinitializer, i8 %mask) + %res2 = add <4 x i64> %res, %res1 + ret <4 x i64> %res2 +} + +declare <4 x i32> @llvm.x86.avx512.mask.pminu.d.128(<4 x i32>, <4 x i32>, <4 x i32>, i8) + +; CHECK-LABEL: @test_int_x86_avx512_mask_pminu_d_128 +; CHECK-NOT: call +; CHECK: vpminud %xmm +; CHECK: {%k1} +define <4 x i32>@test_int_x86_avx512_mask_pminu_d_128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2, i8 %mask) { + %res = call <4 x i32> @llvm.x86.avx512.mask.pminu.d.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2, i8 %mask) + %res1 = call <4 x i32> @llvm.x86.avx512.mask.pminu.d.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> zeroinitializer, i8 %mask) + %res2 = add <4 x i32> %res, %res1 + ret <4 x i32> %res2 +} + +declare <8 x i32> @llvm.x86.avx512.mask.pminu.d.256(<8 x i32>, <8 x i32>, <8 x i32>, i8) + +; CHECK-LABEL: @test_int_x86_avx512_mask_pminu_d_256 +; CHECK-NOT: call +; CHECK: vpminud %ymm +; CHECK: {%k1} +define <8 x i32>@test_int_x86_avx512_mask_pminu_d_256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2, i8 %x3) { + %res = call <8 x i32> @llvm.x86.avx512.mask.pminu.d.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2, i8 %x3) + %res1 = call <8 x i32> @llvm.x86.avx512.mask.pminu.d.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2, i8 -1) + %res2 = add <8 x i32> %res, %res1 + ret <8 x i32> %res2 +} + +declare <2 x i64> @llvm.x86.avx512.mask.pminu.q.128(<2 x i64>, <2 x i64>, <2 x i64>, i8) + +; CHECK-LABEL: @test_int_x86_avx512_mask_pminu_q_128 +; CHECK-NOT: call +; CHECK: vpminuq %xmm +; CHECK: {%k1} +define <2 x i64>@test_int_x86_avx512_mask_pminu_q_128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2, i8 %x3) { + %res = call <2 x i64> @llvm.x86.avx512.mask.pminu.q.128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2, i8 %x3) + %res1 = call <2 x i64> @llvm.x86.avx512.mask.pminu.q.128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2, i8 -1) + %res2 = add <2 x i64> %res, %res1 + ret <2 x i64> %res2 +} + +declare <4 x i64> @llvm.x86.avx512.mask.pminu.q.256(<4 x i64>, <4 x i64>, <4 x i64>, i8) + +; CHECK-LABEL: @test_int_x86_avx512_mask_pminu_q_256 +; CHECK-NOT: call +; CHECK: vpminuq %ymm +; CHECK: {%k1} +define <4 x i64>@test_int_x86_avx512_mask_pminu_q_256(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %x2, i8 %mask) { + %res = call <4 x i64> @llvm.x86.avx512.mask.pminu.q.256(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %x2, i8 %mask) + %res1 = call <4 x i64> @llvm.x86.avx512.mask.pminu.q.256(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> zeroinitializer, i8 %mask) + %res2 = add <4 x i64> %res, %res1 + ret <4 x i64> %res2 +}
\ No newline at end of file diff --git a/test/CodeGen/X86/block-placement.ll b/test/CodeGen/X86/block-placement.ll index e0276e42d4d2e..89defa956a454 100644 --- a/test/CodeGen/X86/block-placement.ll +++ b/test/CodeGen/X86/block-placement.ll @@ -546,7 +546,7 @@ exit: declare i32 @__gxx_personality_v0(...) -define void @test_eh_lpad_successor() { +define void @test_eh_lpad_successor() personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) { ; Some times the landing pad ends up as the first successor of an invoke block. ; When this happens, a strange result used to fall out of updateTerminators: we ; didn't correctly locate the fallthrough successor, assuming blindly that the @@ -564,7 +564,7 @@ preheader: br label %loop lpad: - %lpad.val = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) + %lpad.val = landingpad { i8*, i32 } cleanup resume { i8*, i32 } %lpad.val @@ -574,7 +574,7 @@ loop: declare void @fake_throw() noreturn -define void @test_eh_throw() { +define void @test_eh_throw() personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) { ; For blocks containing a 'throw' (or similar functionality), we have ; a no-return invoke. In this case, only EH successors will exist, and ; fallthrough simply won't occur. Make sure we don't crash trying to update @@ -591,7 +591,7 @@ continue: unreachable cleanup: - %0 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) + %0 = landingpad { i8*, i32 } cleanup unreachable } diff --git a/test/CodeGen/X86/branchfolding-landingpads.ll b/test/CodeGen/X86/branchfolding-landingpads.ll index 40ec92ea0d7f7..032b988124524 100644 --- a/test/CodeGen/X86/branchfolding-landingpads.ll +++ b/test/CodeGen/X86/branchfolding-landingpads.ll @@ -18,20 +18,20 @@ declare void @_throw() ; CHECK-LABEL: @main ; CHECK: %unreachable -define i32 @main(i8* %cleanup) { +define i32 @main(i8* %cleanup) personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) { entry: invoke void @_throw() #0 to label %unreachable unwind label %catch.dispatch9 catch.dispatch9: ; preds = %entry - %tmp13 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) + %tmp13 = landingpad { i8*, i32 } cleanup catch i8* null invoke void @_throw() #0 to label %unreachable unwind label %lpad31 lpad31: ; preds = %catch.dispatch9 - %tmp20 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) + %tmp20 = landingpad { i8*, i32 } cleanup catch i8* null call void @foo() diff --git a/test/CodeGen/X86/bswap-vector.ll b/test/CodeGen/X86/bswap-vector.ll index 7d5f380c1e289..5376601a95e3c 100644 --- a/test/CodeGen/X86/bswap-vector.ll +++ b/test/CodeGen/X86/bswap-vector.ll @@ -1,6 +1,6 @@ -; RUN: llc < %s -mcpu=x86-64 | FileCheck %s --check-prefix=CHECK-NOSSSE3 -; RUN: llc < %s -mcpu=core2 | FileCheck %s --check-prefix=CHECK-SSSE3 -; RUN: llc < %s -mcpu=core-avx2 | FileCheck %s --check-prefix=CHECK-AVX2 +; RUN: llc < %s -mcpu=x86-64 | FileCheck %s --check-prefix=CHECK-ALL --check-prefix=CHECK-SSE --check-prefix=CHECK-NOSSSE3 +; RUN: llc < %s -mcpu=core2 | FileCheck %s --check-prefix=CHECK-ALL --check-prefix=CHECK-SSE --check-prefix=CHECK-SSSE3 +; RUN: llc < %s -mcpu=core-avx2 | FileCheck %s --check-prefix=CHECK-AVX --check-prefix=CHECK-AVX2 ; RUN: llc < %s -mcpu=core-avx2 -x86-experimental-vector-widening-legalization | FileCheck %s --check-prefix=CHECK-WIDE-AVX2 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" @@ -285,3 +285,174 @@ entry: %r = call <4 x i16> @llvm.bswap.v4i16(<4 x i16> %v) ret <4 x i16> %r } + +; +; Double BSWAP -> Identity +; + +define <8 x i16> @identity_v8i16(<8 x i16> %v) { +; CHECK-ALL-LABEL: identity_v8i16: +; CHECK-ALL: # BB#0: # %entry +; CHECK-ALL: retq +entry: + %bs1 = call <8 x i16> @llvm.bswap.v8i16(<8 x i16> %v) + %bs2 = call <8 x i16> @llvm.bswap.v8i16(<8 x i16> %bs1) + ret <8 x i16> %bs2 +} + +define <4 x i32> @identity_v4i32(<4 x i32> %v) { +; CHECK-ALL-LABEL: identity_v4i32: +; CHECK-ALL: # BB#0: # %entry +; CHECK-ALL-NEXT: retq +entry: + %bs1 = call <4 x i32> @llvm.bswap.v4i32(<4 x i32> %v) + %bs2 = call <4 x i32> @llvm.bswap.v4i32(<4 x i32> %bs1) + ret <4 x i32> %bs2 +} + +define <2 x i64> @identity_v2i64(<2 x i64> %v) { +; CHECK-ALL-LABEL: identity_v2i64: +; CHECK-ALL: # BB#0: # %entry +; CHECK-ALL-NEXT: retq +entry: + %bs1 = call <2 x i64> @llvm.bswap.v2i64(<2 x i64> %v) + %bs2 = call <2 x i64> @llvm.bswap.v2i64(<2 x i64> %bs1) + ret <2 x i64> %bs2 +} + +define <16 x i16> @identity_v16i16(<16 x i16> %v) { +; CHECK-ALL-LABEL: identity_v16i16: +; CHECK-ALL: # BB#0: # %entry +; CHECK-ALL-NEXT: retq +entry: + %bs1 = call <16 x i16> @llvm.bswap.v16i16(<16 x i16> %v) + %bs2 = call <16 x i16> @llvm.bswap.v16i16(<16 x i16> %bs1) + ret <16 x i16> %bs2 +} + +define <8 x i32> @identity_v8i32(<8 x i32> %v) { +; CHECK-ALL-LABEL: identity_v8i32: +; CHECK-ALL: # BB#0: # %entry +; CHECK-ALL-NEXT: retq +entry: + %bs1 = call <8 x i32> @llvm.bswap.v8i32(<8 x i32> %v) + %bs2 = call <8 x i32> @llvm.bswap.v8i32(<8 x i32> %bs1) + ret <8 x i32> %bs2 +} + +define <4 x i64> @identity_v4i64(<4 x i64> %v) { +; CHECK-ALL-LABEL: identity_v4i64: +; CHECK-ALL: # BB#0: # %entry +; CHECK-ALL-NEXT: retq +entry: + %bs1 = call <4 x i64> @llvm.bswap.v4i64(<4 x i64> %v) + %bs2 = call <4 x i64> @llvm.bswap.v4i64(<4 x i64> %bs1) + ret <4 x i64> %bs2 +} + +define <4 x i16> @identity_v4i16(<4 x i16> %v) { +; CHECK-ALL-LABEL: identity_v4i16: +; CHECK-ALL: # BB#0: # %entry +; CHECK-ALL-NEXT: retq +entry: + %bs1 = call <4 x i16> @llvm.bswap.v4i16(<4 x i16> %v) + %bs2 = call <4 x i16> @llvm.bswap.v4i16(<4 x i16> %bs1) + ret <4 x i16> %bs2 +} + +; +; Constant Folding +; + +define <8 x i16> @fold_v8i16() { +; CHECK-SSE-LABEL: fold_v8i16: +; CHECK-SSE: # BB#0: # %entry +; CHECK-SSE-NEXT: movaps {{.*#+}} xmm0 = [0,256,65535,512,65023,1024,64511,1536] +; CHECK-SSE-NEXT: retq +; +; CHECK-AVX-LABEL: fold_v8i16: +; CHECK-AVX: # BB#0: # %entry +; CHECK-AVX-NEXT: vmovaps {{.*#+}} xmm0 = [0,256,65535,512,65023,1024,64511,1536] +; CHECK-AVX-NEXT: retq +entry: + %r = call <8 x i16> @llvm.bswap.v8i16(<8 x i16> <i16 0, i16 1, i16 -1, i16 2, i16 -3, i16 4, i16 -5, i16 6>) + ret <8 x i16> %r +} + +define <4 x i32> @fold_v4i32() { +; CHECK-SSE-LABEL: fold_v4i32: +; CHECK-SSE: # BB#0: # %entry +; CHECK-SSE-NEXT: movaps {{.*#+}} xmm0 = [0,4294967295,33554432,4261412863] +; CHECK-SSE-NEXT: retq +; +; CHECK-AVX-LABEL: fold_v4i32: +; CHECK-AVX: # BB#0: # %entry +; CHECK-AVX-NEXT: vmovaps {{.*#+}} xmm0 = [0,4294967295,33554432,4261412863] +; CHECK-AVX-NEXT: retq +entry: + %r = call <4 x i32> @llvm.bswap.v4i32(<4 x i32> <i32 0, i32 -1, i32 2, i32 -3>) + ret <4 x i32> %r +} + +define <2 x i64> @fold_v2i64() { +; CHECK-SSE-LABEL: fold_v2i64: +; CHECK-SSE: # BB#0: # %entry +; CHECK-SSE-NEXT: movaps {{.*#+}} xmm0 = [18374686479671623680,18446744073709551615] +; CHECK-SSE-NEXT: retq +; +; CHECK-AVX-LABEL: fold_v2i64: +; CHECK-AVX: # BB#0: # %entry +; CHECK-AVX-NEXT: vmovaps {{.*#+}} xmm0 = [18374686479671623680,18446744073709551615] +; CHECK-AVX-NEXT: retq +entry: + %r = call <2 x i64> @llvm.bswap.v2i64(<2 x i64> <i64 255, i64 -1>) + ret <2 x i64> %r +} + +define <16 x i16> @fold_v16i16() { +; CHECK-SSE-LABEL: fold_v16i16: +; CHECK-SSE: # BB#0: # %entry +; CHECK-SSE-NEXT: movaps {{.*#+}} xmm0 = [0,256,65535,512,65023,1024,64511,1536] +; CHECK-SSE-NEXT: movaps {{.*#+}} xmm1 = [63999,2048,63487,2560,62975,3072,62463,3584] +; CHECK-SSE-NEXT: retq +; +; CHECK-AVX-LABEL: fold_v16i16: +; CHECK-AVX: # BB#0: # %entry +; CHECK-AVX-NEXT: vmovaps {{.*#+}} ymm0 = [0,256,65535,512,65023,1024,64511,1536,63999,2048,63487,2560,62975,3072,62463,3584] +; CHECK-AVX-NEXT: retq +entry: + %r = call <16 x i16> @llvm.bswap.v16i16(<16 x i16> <i16 0, i16 1, i16 -1, i16 2, i16 -3, i16 4, i16 -5, i16 6, i16 -7, i16 8, i16 -9, i16 10, i16 -11, i16 12, i16 -13, i16 14>) + ret <16 x i16> %r +} + +define <8 x i32> @fold_v8i32() { +; CHECK-SSE-LABEL: fold_v8i32: +; CHECK-SSE: # BB#0: # %entry +; CHECK-SSE-NEXT: movaps {{.*#+}} xmm0 = [0,16777216,4294967295,33554432] +; CHECK-SSE-NEXT: movaps {{.*#+}} xmm1 = [4261412863,67108864,4227858431,100663296] +; CHECK-SSE-NEXT: retq +; +; CHECK-AVX-LABEL: fold_v8i32: +; CHECK-AVX: # BB#0: # %entry +; CHECK-AVX-NEXT: vmovaps {{.*#+}} ymm0 = [0,16777216,4294967295,33554432,4261412863,67108864,4227858431,100663296] +; CHECK-AVX-NEXT: retq +entry: + %r = call <8 x i32> @llvm.bswap.v8i32(<8 x i32> <i32 0, i32 1, i32 -1, i32 2, i32 -3, i32 4, i32 -5, i32 6>) + ret <8 x i32> %r +} + +define <4 x i64> @fold_v4i64() { +; CHECK-SSE-LABEL: fold_v4i64: +; CHECK-SSE: # BB#0: # %entry +; CHECK-SSE-NEXT: movaps {{.*#+}} xmm0 = [18374686479671623680,18446744073709551615] +; CHECK-SSE-NEXT: movaps {{.*#+}} xmm1 = [18446462598732840960,72056494526300160] +; CHECK-SSE-NEXT: retq +; +; CHECK-AVX-LABEL: fold_v4i64: +; CHECK-AVX: # BB#0: # %entry +; CHECK-AVX-NEXT: vmovaps {{.*#+}} ymm0 = [18374686479671623680,18446744073709551615,18446462598732840960,72056494526300160] +; CHECK-AVX-NEXT: retq +entry: + %r = call <4 x i64> @llvm.bswap.v4i64(<4 x i64> <i64 255, i64 -1, i64 65535, i64 16776960>) + ret <4 x i64> %r +} diff --git a/test/CodeGen/X86/catch.ll b/test/CodeGen/X86/catch.ll index 64e92783ac984..be7466e8abbb3 100644 --- a/test/CodeGen/X86/catch.ll +++ b/test/CodeGen/X86/catch.ll @@ -7,13 +7,13 @@ ; CHECK-NEXT: .quad .Lstr @str = private unnamed_addr constant [12 x i8] c"NSException\00" -define void @f() { +define void @f() personality i8* bitcast (void ()* @h to i8*) { invoke void @g() to label %invoke.cont unwind label %lpad invoke.cont: ret void lpad: - %tmp14 = landingpad { i8*, i32 } personality i8* bitcast (void ()* @h to i8*) + %tmp14 = landingpad { i8*, i32 } catch i8* getelementptr inbounds ([12 x i8], [12 x i8]* @str, i64 0, i64 0) ret void } diff --git a/test/CodeGen/X86/cfi.ll b/test/CodeGen/X86/cfi.ll index b57ff45f51e3a..d5a3a8a26a3f4 100644 --- a/test/CodeGen/X86/cfi.ll +++ b/test/CodeGen/X86/cfi.ll @@ -8,7 +8,7 @@ ; PIC: .cfi_lsda 27, .Lexception0 -define void @bar() { +define void @bar() personality i32 (...)* @__gxx_personality_v0 { entry: %call = invoke i32 @foo() to label %invoke.cont unwind label %lpad @@ -17,7 +17,7 @@ invoke.cont: ret void lpad: - %exn = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0 + %exn = landingpad {i8*, i32} catch i8* null ret void } diff --git a/test/CodeGen/X86/code_placement_eh.ll b/test/CodeGen/X86/code_placement_eh.ll index 2da3f9f53ef83..62fddffffc47c 100644 --- a/test/CodeGen/X86/code_placement_eh.ll +++ b/test/CodeGen/X86/code_placement_eh.ll @@ -6,7 +6,7 @@ target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128-n8:16:32" target triple = "i386-apple-darwin10.0" -define void @foo() { +define void @foo() personality i32 (...)* @__gxx_personality_v0 { invcont5: br label %bb15 @@ -22,12 +22,12 @@ bb18.i5.i: ; preds = %.noexc6.i.i, %bb51. to label %.noexc6.i.i unwind label %lpad.i.i ; <float> [#uses=0] lpad.i.i: ; preds = %bb18.i5.i, %.noexc6.i.i - %lpadval.i.i = landingpad { i8*, i32 } personality i32 (...)* @__gxx_personality_v0 + %lpadval.i.i = landingpad { i8*, i32 } catch i8* null unreachable lpad59.i: ; preds = %bb15 - %lpadval60.i.i = landingpad { i8*, i32 } personality i32 (...)* @__gxx_personality_v0 + %lpadval60.i.i = landingpad { i8*, i32 } catch i8* null unreachable diff --git a/test/CodeGen/X86/codegen-prepare-extload.ll b/test/CodeGen/X86/codegen-prepare-extload.ll index 65502b312b044..c5c761ee63eff 100644 --- a/test/CodeGen/X86/codegen-prepare-extload.ll +++ b/test/CodeGen/X86/codegen-prepare-extload.ll @@ -30,7 +30,7 @@ false: } ; Check that we manage to form a zextload is an operation with only one -; argument to explicitly extend is in the the way. +; argument to explicitly extend is in the way. ; OPTALL-LABEL: @promoteOneArg ; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i8, i8* %p ; OPT-NEXT: [[ZEXT:%[a-zA-Z_0-9-]+]] = zext i8 [[LD]] to i32 @@ -55,7 +55,7 @@ false: } ; Check that we manage to form a sextload is an operation with only one -; argument to explicitly extend is in the the way. +; argument to explicitly extend is in the way. ; Version with sext. ; OPTALL-LABEL: @promoteOneArgSExt ; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i8, i8* %p @@ -80,7 +80,7 @@ false: } ; Check that we manage to form a zextload is an operation with two -; arguments to explicitly extend is in the the way. +; arguments to explicitly extend is in the way. ; Extending %add will create two extensions: ; 1. One for %b. ; 2. One for %t. @@ -119,7 +119,7 @@ false: } ; Check that we manage to form a sextload is an operation with two -; arguments to explicitly extend is in the the way. +; arguments to explicitly extend is in the way. ; Version with sext. ; OPTALL-LABEL: @promoteTwoArgSExt ; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i8, i8* %p diff --git a/test/CodeGen/X86/disable-tail-calls.ll b/test/CodeGen/X86/disable-tail-calls.ll new file mode 100644 index 0000000000000..80e8fd74e92d0 --- /dev/null +++ b/test/CodeGen/X86/disable-tail-calls.ll @@ -0,0 +1,40 @@ +; RUN: llc < %s -march x86-64 | FileCheck %s --check-prefix=NO-OPTION +; RUN: llc < %s -march x86-64 -disable-tail-calls | FileCheck %s --check-prefix=DISABLE-TRUE +; RUN: llc < %s -march x86-64 -disable-tail-calls=false | FileCheck %s --check-prefix=DISABLE-FALSE + +; Check that command line option "-disable-tail-calls" overrides function +; attribute "disable-tail-calls". + +; NO-OPTION-LABEL: {{\_?}}func_attr +; NO-OPTION: callq {{\_?}}callee + +; DISABLE-FALSE-LABEL: {{\_?}}func_attr +; DISABLE-FALSE: jmp {{\_?}}callee + +; DISABLE-TRUE-LABEL: {{\_?}}func_attr +; DISABLE-TRUE: callq {{\_?}}callee + +define i32 @func_attr(i32 %a) #0 { +entry: + %call = tail call i32 @callee(i32 %a) + ret i32 %call +} + +; NO-OPTION-LABEL: {{\_?}}func_noattr +; NO-OPTION: jmp {{\_?}}callee + +; DISABLE-FALSE-LABEL: {{\_?}}func_noattr +; DISABLE-FALSE: jmp {{\_?}}callee + +; DISABLE-TRUE-LABEL: {{\_?}}func_noattr +; DISABLE-TRUE: callq {{\_?}}callee + +define i32 @func_noattr(i32 %a) { +entry: + %call = tail call i32 @callee(i32 %a) + ret i32 %call +} + +declare i32 @callee(i32) + +attributes #0 = { "disable-tail-calls"="true" } diff --git a/test/CodeGen/X86/dllimport.ll b/test/CodeGen/X86/dllimport.ll index 9db654f22712b..34faaeb6fed7e 100644 --- a/test/CodeGen/X86/dllimport.ll +++ b/test/CodeGen/X86/dllimport.ll @@ -57,3 +57,7 @@ define void @use() nounwind { ret void } + +; CHECK: _fp: +; CHECK-NEXT: .long _fun +@fp = constant void ()* @fun diff --git a/test/CodeGen/X86/dwarf-eh-prepare.ll b/test/CodeGen/X86/dwarf-eh-prepare.ll index 25572d868da0d..9acfaeb193e75 100644 --- a/test/CodeGen/X86/dwarf-eh-prepare.ll +++ b/test/CodeGen/X86/dwarf-eh-prepare.ll @@ -9,7 +9,7 @@ declare void @might_throw() declare void @cleanup() -define i32 @simple_cleanup_catch() { +define i32 @simple_cleanup_catch() personality i32 (...)* @__gxx_personality_v0 { invoke void @might_throw() to label %cont unwind label %lpad @@ -22,7 +22,7 @@ cont: ; CHECK: ret i32 0 lpad: - %ehvals = landingpad { i8*, i32 } personality i32 (...)* @__gxx_personality_v0 + %ehvals = landingpad { i8*, i32 } cleanup catch i8* @int_typeinfo %ehptr = extractvalue { i8*, i32 } %ehvals, 0 @@ -33,7 +33,7 @@ lpad: br i1 %int_match, label %catch_int, label %eh.resume ; CHECK: lpad: -; CHECK: landingpad { i8*, i32 } personality i32 (...)* @__gxx_personality_v0 +; CHECK: landingpad { i8*, i32 } ; CHECK: call void @cleanup() ; CHECK: call i32 @llvm.eh.typeid.for ; CHECK: br i1 @@ -54,7 +54,7 @@ eh.resume: } -define i32 @catch_no_resume() { +define i32 @catch_no_resume() personality i32 (...)* @__gxx_personality_v0 { invoke void @might_throw() to label %cont unwind label %lpad @@ -62,7 +62,7 @@ cont: ret i32 0 lpad: - %ehvals = landingpad { i8*, i32 } personality i32 (...)* @__gxx_personality_v0 + %ehvals = landingpad { i8*, i32 } catch i8* @int_typeinfo %ehptr = extractvalue { i8*, i32 } %ehvals, 0 %ehsel = extractvalue { i8*, i32 } %ehvals, 1 @@ -81,18 +81,18 @@ eh.resume: ; Check that we can prune the unreachable resume instruction. -; CHECK-LABEL: define i32 @catch_no_resume() { +; CHECK-LABEL: define i32 @catch_no_resume() personality i32 (...)* @__gxx_personality_v0 { ; CHECK: invoke void @might_throw() ; CHECK: ret i32 0 ; CHECK: lpad: -; CHECK: landingpad { i8*, i32 } personality i32 (...)* @__gxx_personality_v0 +; CHECK: landingpad { i8*, i32 } ; CHECK-NOT: br i1 ; CHECK: ret i32 1 ; CHECK-NOT: call void @_Unwind_Resume ; CHECK: {{^[}]}} -define i32 @catch_cleanup_merge() { +define i32 @catch_cleanup_merge() personality i32 (...)* @__gxx_personality_v0 { invoke void @might_throw() to label %inner_invoke unwind label %outer_lpad inner_invoke: @@ -102,12 +102,12 @@ cont: ret i32 0 outer_lpad: - %ehvals1 = landingpad { i8*, i32 } personality i32 (...)* @__gxx_personality_v0 + %ehvals1 = landingpad { i8*, i32 } catch i8* @int_typeinfo br label %catch.dispatch inner_lpad: - %ehvals2 = landingpad { i8*, i32 } personality i32 (...)* @__gxx_personality_v0 + %ehvals2 = landingpad { i8*, i32 } cleanup catch i8* @int_typeinfo call void @cleanup() @@ -138,11 +138,11 @@ eh.resume: ; CHECK: ret i32 0 ; ; CHECK: outer_lpad: -; CHECK: landingpad { i8*, i32 } personality i32 (...)* @__gxx_personality_v0 +; CHECK: landingpad { i8*, i32 } ; CHECK: br label %catch.dispatch ; ; CHECK: inner_lpad: -; CHECK: landingpad { i8*, i32 } personality i32 (...)* @__gxx_personality_v0 +; CHECK: landingpad { i8*, i32 } ; CHECK: call void @cleanup() ; CHECK: br label %catch.dispatch ; diff --git a/test/CodeGen/X86/eh-label.ll b/test/CodeGen/X86/eh-label.ll index aff0bcfffcfee..d349174f95b74 100644 --- a/test/CodeGen/X86/eh-label.ll +++ b/test/CodeGen/X86/eh-label.ll @@ -3,7 +3,7 @@ declare void @g() -define void @f() { +define void @f() personality i8* bitcast (void ()* @g to i8*) { bb0: call void asm ".Lfunc_end0:", ""() ; CHECK: #APP @@ -12,7 +12,7 @@ bb0: invoke void @g() to label %bb2 unwind label %bb1 bb1: - landingpad { i8*, i32 } personality i8* bitcast (void ()* @g to i8*) + landingpad { i8*, i32 } catch i8* null call void @g() ret void diff --git a/test/CodeGen/X86/exception-label.ll b/test/CodeGen/X86/exception-label.ll index cafa1e630b96a..2270d2da1801e 100644 --- a/test/CodeGen/X86/exception-label.ll +++ b/test/CodeGen/X86/exception-label.ll @@ -8,13 +8,13 @@ declare void @g() -define void @f() { +define void @f() personality i8* bitcast (void ()* @g to i8*) { bb0: call void asm ".Lexception0:", ""() invoke void @g() to label %bb2 unwind label %bb1 bb1: - landingpad { i8*, i32 } personality i8* bitcast (void ()* @g to i8*) + landingpad { i8*, i32 } catch i8* null br label %bb2 diff --git a/test/CodeGen/X86/fast-isel-cmp-branch.ll b/test/CodeGen/X86/fast-isel-cmp-branch.ll index 684647ca94845..d7b64ed3a5b88 100644 --- a/test/CodeGen/X86/fast-isel-cmp-branch.ll +++ b/test/CodeGen/X86/fast-isel-cmp-branch.ll @@ -12,7 +12,7 @@ declare void @bar() -define void @foo(i32 %a, i32 %b) nounwind { +define void @foo(i32 %a, i32 %b) nounwind personality i32 (...)* @__gxx_personality_v0 { entry: %q = add i32 %a, 7 %r = add i32 %b, 9 @@ -26,7 +26,7 @@ true: return: ret void unw: - %exn = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0 + %exn = landingpad {i8*, i32} cleanup unreachable } diff --git a/test/CodeGen/X86/fast-isel-gep.ll b/test/CodeGen/X86/fast-isel-gep.ll index 67b30292be3ce..1886d3379aad3 100644 --- a/test/CodeGen/X86/fast-isel-gep.ll +++ b/test/CodeGen/X86/fast-isel-gep.ll @@ -89,7 +89,7 @@ define i64 @test5(i8* %A, i32 %I, i64 %B) nounwind { ; PR9500, rdar://9156159 - Don't do non-local address mode folding, ; because it may require values which wouldn't otherwise be live out ; of their blocks. -define void @test6() { +define void @test6() personality i32 (...)* @__gxx_personality_v0 { if.end: ; preds = %if.then, %invoke.cont %tmp15 = load i64, i64* undef %dec = add i64 %tmp15, 13 @@ -103,7 +103,7 @@ invoke.cont16: ; preds = %if.then14 unreachable lpad: ; preds = %if.end19, %if.then14, %if.end, %entry - %exn = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0 + %exn = landingpad {i8*, i32} cleanup unreachable } diff --git a/test/CodeGen/X86/fp-fast.ll b/test/CodeGen/X86/fp-fast.ll index 27af5738ca3e8..4f503af716a80 100644 --- a/test/CodeGen/X86/fp-fast.ll +++ b/test/CodeGen/X86/fp-fast.ll @@ -114,3 +114,81 @@ define float @test11(float %a) { ret float %t2 } +; Verify that the first two adds are independent regardless of how the inputs are +; commuted. The destination registers are used as source registers for the third add. + +define float @reassociate_adds1(float %x0, float %x1, float %x2, float %x3) { +; CHECK-LABEL: reassociate_adds1: +; CHECK: # BB#0: +; CHECK-NEXT: vaddss %xmm1, %xmm0, %xmm0 +; CHECK-NEXT: vaddss %xmm3, %xmm2, %xmm1 +; CHECK-NEXT: vaddss %xmm1, %xmm0, %xmm0 +; CHECK-NEXT: retq + %t0 = fadd float %x0, %x1 + %t1 = fadd float %t0, %x2 + %t2 = fadd float %t1, %x3 + ret float %t2 +} + +define float @reassociate_adds2(float %x0, float %x1, float %x2, float %x3) { +; CHECK-LABEL: reassociate_adds2: +; CHECK: # BB#0: +; CHECK-NEXT: vaddss %xmm1, %xmm0, %xmm0 +; CHECK-NEXT: vaddss %xmm3, %xmm2, %xmm1 +; CHECK-NEXT: vaddss %xmm1, %xmm0, %xmm0 +; CHECK-NEXT: retq + %t0 = fadd float %x0, %x1 + %t1 = fadd float %x2, %t0 + %t2 = fadd float %t1, %x3 + ret float %t2 +} + +define float @reassociate_adds3(float %x0, float %x1, float %x2, float %x3) { +; CHECK-LABEL: reassociate_adds3: +; CHECK: # BB#0: +; CHECK-NEXT: vaddss %xmm1, %xmm0, %xmm0 +; CHECK-NEXT: vaddss %xmm3, %xmm2, %xmm1 +; CHECK-NEXT: vaddss %xmm1, %xmm0, %xmm0 +; CHECK-NEXT: retq + %t0 = fadd float %x0, %x1 + %t1 = fadd float %t0, %x2 + %t2 = fadd float %x3, %t1 + ret float %t2 +} + +define float @reassociate_adds4(float %x0, float %x1, float %x2, float %x3) { +; CHECK-LABEL: reassociate_adds4: +; CHECK: # BB#0: +; CHECK-NEXT: vaddss %xmm1, %xmm0, %xmm0 +; CHECK-NEXT: vaddss %xmm3, %xmm2, %xmm1 +; CHECK-NEXT: vaddss %xmm1, %xmm0, %xmm0 +; CHECK-NEXT: retq + %t0 = fadd float %x0, %x1 + %t1 = fadd float %x2, %t0 + %t2 = fadd float %x3, %t1 + ret float %t2 +} + +; Verify that we reassociate some of these ops. The optimal balanced tree of adds is not +; produced because that would cost more compile time. + +define float @reassociate_adds5(float %x0, float %x1, float %x2, float %x3, float %x4, float %x5, float %x6, float %x7) { +; CHECK-LABEL: reassociate_adds5: +; CHECK: # BB#0: +; CHECK-NEXT: vaddss %xmm1, %xmm0, %xmm0 +; CHECK-NEXT: vaddss %xmm3, %xmm2, %xmm1 +; CHECK-NEXT: vaddss %xmm1, %xmm0, %xmm0 +; CHECK-NEXT: vaddss %xmm5, %xmm4, %xmm1 +; CHECK-NEXT: vaddss %xmm1, %xmm0, %xmm0 +; CHECK-NEXT: vaddss %xmm7, %xmm6, %xmm1 +; CHECK-NEXT: vaddss %xmm1, %xmm0, %xmm0 +; CHECK-NEXT: retq + %t0 = fadd float %x0, %x1 + %t1 = fadd float %t0, %x2 + %t2 = fadd float %t1, %x3 + %t3 = fadd float %t2, %x4 + %t4 = fadd float %t3, %x5 + %t5 = fadd float %t4, %x6 + %t6 = fadd float %t5, %x7 + ret float %t6 +} diff --git a/test/CodeGen/X86/gcc_except_table.ll b/test/CodeGen/X86/gcc_except_table.ll index b656dc9d68e24..82064c2a39078 100644 --- a/test/CodeGen/X86/gcc_except_table.ll +++ b/test/CodeGen/X86/gcc_except_table.ll @@ -3,7 +3,7 @@ ; RUN: llc -mtriple i686-pc-windows-gnu %s -o - | FileCheck %s --check-prefix=MINGW32 @_ZTIi = external constant i8* -define i32 @main() uwtable optsize ssp { +define i32 @main() uwtable optsize ssp personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) { ; APPLE: .cfi_startproc ; APPLE: .cfi_personality 155, ___gxx_personality_v0 ; APPLE: .cfi_lsda 16, Lexception0 @@ -36,7 +36,7 @@ entry: to label %try.cont unwind label %lpad lpad: - %0 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) + %0 = landingpad { i8*, i32 } cleanup catch i8* bitcast (i8** @_ZTIi to i8*) br label %eh.resume diff --git a/test/CodeGen/X86/gcc_except_table_functions.ll b/test/CodeGen/X86/gcc_except_table_functions.ll index 7a64a01fa38dd..8e002ad142b80 100644 --- a/test/CodeGen/X86/gcc_except_table_functions.ll +++ b/test/CodeGen/X86/gcc_except_table_functions.ll @@ -10,7 +10,7 @@ declare void @filt1() declare void @_Z1fv() declare i32 @llvm.eh.typeid.for(i8*) -define i32 @main() uwtable { +define i32 @main() uwtable personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) { entry: invoke void @_Z1fv() to label %try.cont unwind label %lpad @@ -19,7 +19,7 @@ try.cont: ret i32 0 lpad: - %0 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) + %0 = landingpad { i8*, i32 } cleanup catch i8* bitcast (void ()* @filt0 to i8*) catch i8* bitcast (void ()* @filt1 to i8*) diff --git a/test/CodeGen/X86/global-fill.ll b/test/CodeGen/X86/global-fill.ll new file mode 100644 index 0000000000000..656c8ca2c323a --- /dev/null +++ b/test/CodeGen/X86/global-fill.ll @@ -0,0 +1,27 @@ +; RUN: llc -mtriple=x86_64-apple-darwin < %s | FileCheck %s + +@test1 = global [2 x i24] [i24 -1, i24 -1] +; CHECK-LABEL: test1: +; CHECK-NEXT: .long 16777215 +; CHECK-NEXT: .long 16777215 + +@test2 = global [2 x i7] [i7 1, i7 1] +; CHECK-LABEL: test2: +; CHECK-NEXT: .space 2,1 + +@test3 = global [4 x i128] [i128 -1, i128 -1, i128 -1, i128 -1] +; CHECK-LABEL: test3: +; CHECK-NEXT: .space 64,255 + +@test4 = global [3 x i16] [i16 257, i16 257, i16 257] +; CHECK-LABEL: test4: +; CHECK-NEXT: .space 6,1 + +@test5 = global [2 x [2 x i16]] [[2 x i16] [i16 257, i16 257], [2 x i16] [i16 -1, i16 -1]] +; CHECK-LABEL: test5: +; CHECK-NEXT: .space 4,1 +; CHECK-NEXT: .space 4,255 + +@test6 = global [2 x [2 x i16]] [[2 x i16] [i16 257, i16 257], [2 x i16] [i16 257, i16 257]] +; CHECK-LABEL: test6: +; CHECK-NEXT: .space 8,1 diff --git a/test/CodeGen/X86/global-sections.ll b/test/CodeGen/X86/global-sections.ll index 8c61411e53eb6..82547a6067429 100644 --- a/test/CodeGen/X86/global-sections.ll +++ b/test/CodeGen/X86/global-sections.ll @@ -61,12 +61,12 @@ bb5: declare void @G() -define void @F3(i32 %y) { +define void @F3(i32 %y) personality i8* bitcast (void ()* @G to i8*) { bb0: invoke void @G() to label %bb2 unwind label %bb1 bb1: - landingpad { i8*, i32 } personality i8* bitcast (void ()* @G to i8*) + landingpad { i8*, i32 } catch i8* null br label %bb2 bb2: diff --git a/test/CodeGen/X86/implicit-null-check-negative.ll b/test/CodeGen/X86/implicit-null-check-negative.ll new file mode 100644 index 0000000000000..e0210d9315f14 --- /dev/null +++ b/test/CodeGen/X86/implicit-null-check-negative.ll @@ -0,0 +1,53 @@ +; RUN: llc -mtriple=x86_64-apple-macosx -O3 -debug-only=faultmaps -enable-implicit-null-checks < %s | FileCheck %s +; REQUIRES: asserts + +; List cases where we should *not* be emitting implicit null checks. + +; CHECK-NOT: Fault Map Output + +define i32 @imp_null_check_load(i32* %x, i32* %y) { + entry: + %c = icmp eq i32* %x, null +; It isn't legal to move the load from %x from "not_null" to here -- +; the store to %y could be aliasing it. + br i1 %c, label %is_null, label %not_null + + is_null: + ret i32 42 + + not_null: + store i32 0, i32* %y + %t = load i32, i32* %x + ret i32 %t +} + +define i32 @imp_null_check_gep_load(i32* %x) { + entry: + %c = icmp eq i32* %x, null + br i1 %c, label %is_null, label %not_null + + is_null: + ret i32 42 + + not_null: +; null + 5000 * sizeof(i32) lies outside the null page and hence the +; load to %t cannot be assumed to be reliably faulting. + %x.gep = getelementptr i32, i32* %x, i32 5000 + %t = load i32, i32* %x.gep + ret i32 %t +} + +define i32 @imp_null_check_load_no_md(i32* %x) { +; Everything is okay except that the !never.executed metadata is +; missing. + entry: + %c = icmp eq i32* %x, null + br i1 %c, label %is_null, label %not_null + + is_null: + ret i32 42 + + not_null: + %t = load i32, i32* %x + ret i32 %t +} diff --git a/test/CodeGen/X86/implicit-null-check.ll b/test/CodeGen/X86/implicit-null-check.ll new file mode 100644 index 0000000000000..f4c539800fbbf --- /dev/null +++ b/test/CodeGen/X86/implicit-null-check.ll @@ -0,0 +1,118 @@ +; RUN: llc -O3 -mtriple=x86_64-apple-macosx -enable-implicit-null-checks < %s | FileCheck %s + +define i32 @imp_null_check_load(i32* %x) { +; CHECK-LABEL: _imp_null_check_load: +; CHECK: Ltmp1: +; CHECK: movl (%rdi), %eax +; CHECK: retq +; CHECK: Ltmp0: +; CHECK: movl $42, %eax +; CHECK: retq + + entry: + %c = icmp eq i32* %x, null + br i1 %c, label %is_null, label %not_null + + is_null: + ret i32 42 + + not_null: + %t = load i32, i32* %x + ret i32 %t +} + +define i32 @imp_null_check_gep_load(i32* %x) { +; CHECK-LABEL: _imp_null_check_gep_load: +; CHECK: Ltmp3: +; CHECK: movl 128(%rdi), %eax +; CHECK: retq +; CHECK: Ltmp2: +; CHECK: movl $42, %eax +; CHECK: retq + + entry: + %c = icmp eq i32* %x, null + br i1 %c, label %is_null, label %not_null + + is_null: + ret i32 42 + + not_null: + %x.gep = getelementptr i32, i32* %x, i32 32 + %t = load i32, i32* %x.gep + ret i32 %t +} + +define i32 @imp_null_check_add_result(i32* %x, i32 %p) { +; CHECK-LABEL: _imp_null_check_add_result: +; CHECK: Ltmp5: +; CHECK: addl (%rdi), %esi +; CHECK: movl %esi, %eax +; CHECK: retq +; CHECK: Ltmp4: +; CHECK: movl $42, %eax +; CHECK: retq + + entry: + %c = icmp eq i32* %x, null + br i1 %c, label %is_null, label %not_null + + is_null: + ret i32 42 + + not_null: + %t = load i32, i32* %x + %p1 = add i32 %t, %p + ret i32 %p1 +} + +; CHECK-LABEL: __LLVM_FaultMaps: + +; Version: +; CHECK-NEXT: .byte 1 + +; Reserved x2 +; CHECK-NEXT: .byte 0 +; CHECK-NEXT: .short 0 + +; # functions: +; CHECK-NEXT: .long 3 + +; FunctionAddr: +; CHECK-NEXT: .quad _imp_null_check_add_result +; NumFaultingPCs +; CHECK-NEXT: .long 1 +; Reserved: +; CHECK-NEXT: .long 0 +; Fault[0].Type: +; CHECK-NEXT: .long 1 +; Fault[0].FaultOffset: +; CHECK-NEXT: .long Ltmp5-_imp_null_check_add_result +; Fault[0].HandlerOffset: +; CHECK-NEXT: .long Ltmp4-_imp_null_check_add_result + +; FunctionAddr: +; CHECK-NEXT: .quad _imp_null_check_gep_load +; NumFaultingPCs +; CHECK-NEXT: .long 1 +; Reserved: +; CHECK-NEXT: .long 0 +; Fault[0].Type: +; CHECK-NEXT: .long 1 +; Fault[0].FaultOffset: +; CHECK-NEXT: .long Ltmp3-_imp_null_check_gep_load +; Fault[0].HandlerOffset: +; CHECK-NEXT: .long Ltmp2-_imp_null_check_gep_load + +; FunctionAddr: +; CHECK-NEXT: .quad _imp_null_check_load +; NumFaultingPCs +; CHECK-NEXT: .long 1 +; Reserved: +; CHECK-NEXT: .long 0 +; Fault[0].Type: +; CHECK-NEXT: .long 1 +; Fault[0].FaultOffset: +; CHECK-NEXT: .long Ltmp1-_imp_null_check_load +; Fault[0].HandlerOffset: +; CHECK-NEXT: .long Ltmp0-_imp_null_check_load diff --git a/test/CodeGen/X86/inalloca-invoke.ll b/test/CodeGen/X86/inalloca-invoke.ll index cf5cbe142ec72..9a184e563b196 100644 --- a/test/CodeGen/X86/inalloca-invoke.ll +++ b/test/CodeGen/X86/inalloca-invoke.ll @@ -11,7 +11,7 @@ declare void @begin(%Iter* sret) declare void @plus(%Iter* sret, %Iter*, i32) declare void @reverse(%frame.reverse* inalloca align 4) -define i32 @main() { +define i32 @main() personality i32 (...)* @pers { %temp.lvalue = alloca %Iter br label %blah @@ -49,7 +49,7 @@ invoke.cont5: ; preds = %invoke.cont ret i32 0 lpad: ; preds = %invoke.cont, %entry - %lp = landingpad { i8*, i32 } personality i32 (...)* @pers + %lp = landingpad { i8*, i32 } cleanup unreachable } diff --git a/test/CodeGen/X86/indirect-hidden.ll b/test/CodeGen/X86/indirect-hidden.ll index 309375d930247..9e1b7d3735540 100644 --- a/test/CodeGen/X86/indirect-hidden.ll +++ b/test/CodeGen/X86/indirect-hidden.ll @@ -8,10 +8,10 @@ declare void @throws() -define void @get_indirect_hidden() { +define void @get_indirect_hidden() personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) { invoke void @throws() to label %end unwind label %lpad lpad: - %tmp = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) + %tmp = landingpad { i8*, i32 } catch i8* bitcast (i8** @hidden_typeid to i8*) br label %end @@ -19,10 +19,10 @@ end: ret void } -define void @get_indirect() { +define void @get_indirect() personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) { invoke void @throws() to label %end unwind label %lpad lpad: - %tmp = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) + %tmp = landingpad { i8*, i32 } catch i8* bitcast (i8** @normal_typeid to i8*) br label %end diff --git a/test/CodeGen/X86/large-gep-chain.ll b/test/CodeGen/X86/large-gep-chain.ll index 44247b8658a7a..8df282983f568 100644 --- a/test/CodeGen/X86/large-gep-chain.ll +++ b/test/CodeGen/X86/large-gep-chain.ll @@ -13,7 +13,7 @@ @7 = external unnamed_addr constant [27 x i8], align 1 @8 = external unnamed_addr constant [63 x i8], align 1 -define void @main() uwtable ssp { +define void @main() uwtable ssp personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) { bb: br i1 undef, label %bb1, label %bb2 @@ -25313,7 +25313,7 @@ bb25275: ; preds = %bb25274 br label %bb25272 bb25276: ; preds = %bb25283, %bb25274, %bb25273 - %tmp25277 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) + %tmp25277 = landingpad { i8*, i32 } cleanup br label %bb25361 @@ -25383,7 +25383,7 @@ bb25297: ; preds = %bb25296 br label %bb25300 bb25298: ; preds = %bb25296, %bb25295, %bb25290, %bb25287 - %tmp25299 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) + %tmp25299 = landingpad { i8*, i32 } cleanup br label %bb25360 @@ -25461,7 +25461,7 @@ bb25323: ; preds = %bb25319 to label %bb25326 unwind label %bb25324 bb25324: ; preds = %bb25357, %bb25344, %bb25343, %bb25342, %bb25337, %bb25334, %bb25333, %bb25323, %bb25313, %bb25307, %bb25306 - %tmp25325 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) + %tmp25325 = landingpad { i8*, i32 } cleanup br label %bb25359 @@ -25562,7 +25562,7 @@ bb25354: ; preds = %bb25353 br label %bb25358 bb25355: ; preds = %bb25353, %bb25352, %bb25351 - %tmp25356 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) + %tmp25356 = landingpad { i8*, i32 } cleanup br label %bb25359 diff --git a/test/CodeGen/X86/patchpoint-invoke.ll b/test/CodeGen/X86/patchpoint-invoke.ll index 98e9eb3b6a44d..b7f198d960a61 100644 --- a/test/CodeGen/X86/patchpoint-invoke.ll +++ b/test/CodeGen/X86/patchpoint-invoke.ll @@ -2,7 +2,7 @@ ; Test invoking of patchpoints ; -define i64 @patchpoint_invoke(i64 %p1, i64 %p2) { +define i64 @patchpoint_invoke(i64 %p1, i64 %p2) personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) { entry: ; CHECK-LABEL: patchpoint_invoke: ; CHECK-NEXT: [[FUNC_BEGIN:.L.*]]: @@ -25,7 +25,7 @@ success: ret i64 %result threw: - %0 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) + %0 = landingpad { i8*, i32 } catch i8* null ret i64 0 } diff --git a/test/CodeGen/X86/personality.ll b/test/CodeGen/X86/personality.ll index 424a30734f001..53162ebc86880 100644 --- a/test/CodeGen/X86/personality.ll +++ b/test/CodeGen/X86/personality.ll @@ -2,13 +2,13 @@ ; RUN: llc < %s -mtriple=i386-apple-darwin9 | FileCheck %s -check-prefix=X32 ; PR1632 -define void @_Z1fv() { +define void @_Z1fv() personality i32 (...)* @__gxx_personality_v0 { entry: invoke void @_Z1gv() to label %return unwind label %unwind unwind: ; preds = %entry - %exn = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0 + %exn = landingpad {i8*, i32} cleanup br i1 false, label %eh_then, label %cleanup20 @@ -17,7 +17,7 @@ eh_then: ; preds = %unwind to label %return unwind label %unwind10 unwind10: ; preds = %eh_then - %exn10 = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0 + %exn10 = landingpad {i8*, i32} cleanup %upgraded.eh_select13 = extractvalue { i8*, i32 } %exn10, 1 %upgraded.eh_select131 = sext i32 %upgraded.eh_select13 to i64 @@ -41,8 +41,10 @@ declare void @__cxa_end_catch() declare i32 @__gxx_personality_v0(...) +; X64-NOT: .quad ___gxx_personality_v0 ; X64: .cfi_personality 155, ___gxx_personality_v0 +; X32-NOT: .long ___gxx_personality_v0 ; X32: .cfi_personality 155, L___gxx_personality_v0$non_lazy_ptr ; X32: .section __IMPORT,__pointers,non_lazy_symbol_pointers diff --git a/test/CodeGen/X86/personality_size.ll b/test/CodeGen/X86/personality_size.ll index 79d131b82b2ec..41f1ac8cad642 100644 --- a/test/CodeGen/X86/personality_size.ll +++ b/test/CodeGen/X86/personality_size.ll @@ -2,13 +2,13 @@ ; RUN: llc < %s -relocation-model=pic -mtriple=i386-pc-solaris2.11 | FileCheck %s -check-prefix=X32 ; PR1632 -define void @_Z1fv() { +define void @_Z1fv() personality i32 (...)* @__gxx_personality_v0 { entry: invoke void @_Z1gv() to label %return unwind label %unwind unwind: ; preds = %entry - %exn = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0 + %exn = landingpad {i8*, i32} cleanup ret void diff --git a/test/CodeGen/X86/pmul.ll b/test/CodeGen/X86/pmul.ll index 21463b8539dc7..dbe5bd646c7fd 100644 --- a/test/CodeGen/X86/pmul.ll +++ b/test/CodeGen/X86/pmul.ll @@ -1,5 +1,6 @@ ; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefix=ALL --check-prefix=SSE2 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=sse4.1 | FileCheck %s --check-prefix=ALL --check-prefix=SSE41 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=core-avx2 | FileCheck %s --check-prefix=AVX2 define <16 x i8> @mul8c(<16 x i8> %i) nounwind { ; SSE2-LABEL: mul8c: @@ -75,10 +76,6 @@ define <2 x i64> @b(<2 x i64> %i) nounwind { ; ALL-NEXT: movdqa {{.*#+}} xmm1 = [117,117] ; ALL-NEXT: movdqa %xmm0, %xmm2 ; ALL-NEXT: pmuludq %xmm1, %xmm2 -; ALL-NEXT: pxor %xmm3, %xmm3 -; ALL-NEXT: pmuludq %xmm0, %xmm3 -; ALL-NEXT: psllq $32, %xmm3 -; ALL-NEXT: paddq %xmm3, %xmm2 ; ALL-NEXT: psrlq $32, %xmm0 ; ALL-NEXT: pmuludq %xmm1, %xmm0 ; ALL-NEXT: psllq $32, %xmm0 @@ -248,3 +245,35 @@ entry: %A = mul <2 x i64> %i, %j ret <2 x i64> %A } + +define <4 x i64> @b1(<4 x i64> %i) nounwind { +; AVX2-LABEL: @b1 +; AVX2: vpbroadcastq +; AVX2-NEXT: vpmuludq +; AVX2-NEXT: vpsrlq $32 +; AVX2-NEXT: vpmuludq +; AVX2-NEXT: vpsllq $32 +; AVX2-NEXT: vpaddq +; AVX2-NEXT: retq +entry: + %A = mul <4 x i64> %i, < i64 117, i64 117, i64 117, i64 117 > + ret <4 x i64> %A +} + +define <4 x i64> @b2(<4 x i64> %i, <4 x i64> %j) nounwind { +; AVX2-LABEL: @b2 +; AVX2: vpmuludq +; AVX2-NEXT: vpsrlq $32 +; AVX2-NEXT: vpmuludq +; AVX2-NEXT: vpsllq $32 +; AVX2-NEXT: vpaddq +; AVX2-NEXT: vpsrlq $32 +; AVX2-NEXT: vpmuludq +; AVX2-NEXT: vpsllq $32 +; AVX2-NEXT: vpaddq +; AVX2-NEXT: retq +entry: + %A = mul <4 x i64> %i, %j + ret <4 x i64> %A +} + diff --git a/test/CodeGen/X86/pr3522.ll b/test/CodeGen/X86/pr3522.ll index 867f2828d4d99..9e048d59d4ee7 100644 --- a/test/CodeGen/X86/pr3522.ll +++ b/test/CodeGen/X86/pr3522.ll @@ -5,7 +5,7 @@ target triple = "i386-pc-linux-gnu" @.str = external constant [13 x i8] ; <[13 x i8]*> [#uses=1] -define void @_ada_c34018a() { +define void @_ada_c34018a() personality i32 (...)* @__gxx_personality_v0 { entry: %0 = tail call i32 @report__ident_int(i32 90) ; <i32> [#uses=1] %1 = trunc i32 %0 to i8 ; <i8> [#uses=1] @@ -22,7 +22,7 @@ return: ; preds = %lpad ret void lpad: ; preds = %entry - %exn = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0 + %exn = landingpad {i8*, i32} cleanup %2 = icmp eq i8 %1, 90 ; <i1> [#uses=1] br i1 %2, label %return, label %bb22 diff --git a/test/CodeGen/X86/scev-interchange.ll b/test/CodeGen/X86/scev-interchange.ll index e224c0858aff0..9cbb462e47da4 100644 --- a/test/CodeGen/X86/scev-interchange.ll +++ b/test/CodeGen/X86/scev-interchange.ll @@ -51,7 +51,7 @@ declare fastcc void @_ZN11FE_Q_Helper12_GLOBAL__N_116invert_numberingERKSt6vecto declare fastcc void @_ZN4FE_QILi3EE14get_dpo_vectorEj(%"struct.std::vector<int,std::allocator<int> >"* noalias nocapture sret, i32) -define fastcc void @_ZN4FE_QILi3EEC1Ej(i32 %degree) { +define fastcc void @_ZN4FE_QILi3EEC1Ej(i32 %degree) personality i32 (...)* @__gxx_personality_v0 { entry: invoke fastcc void @_ZNSt6vectorIbSaIbEEC1EmRKbRKS0_(%"struct.std::vector<bool,std::allocator<bool> >"* undef, i64 1, i8* undef) to label %invcont.i unwind label %lpad.i @@ -149,7 +149,7 @@ bb71.i: ; preds = %bb.i.i.i262.i, %bb66.i to label %_ZNSt12_Vector_baseIjSaIjEEC2EmRKS0_.exit.i.i.i.i.i unwind label %lpad.i.i.i.i.i.i ; <i8*> [#uses=0] lpad.i.i.i.i.i.i: ; preds = %bb71.i - %exn.i.i.i.i.i.i = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0 + %exn.i.i.i.i.i.i = landingpad {i8*, i32} cleanup unreachable @@ -164,7 +164,7 @@ _ZNSt6vectorIjSaIjEED1Ev.exit.i.i: ; preds = %_ZNSt12_Vector_baseIjSaIjEEC2EmRK to label %_ZNSt12_Vector_baseIjSaIjEEC2EmRKS0_.exit.i.i.i12.i.i unwind label %lpad.i.i.i.i8.i.i ; <i8*> [#uses=0] lpad.i.i.i.i8.i.i: ; preds = %_ZNSt6vectorIjSaIjEED1Ev.exit.i.i - %exn.i.i.i.i8.i.i = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0 + %exn.i.i.i.i8.i.i = landingpad {i8*, i32} cleanup invoke void @_Unwind_Resume(i8* undef) to label %.noexc.i9.i.i unwind label %lpad.i19.i.i @@ -183,7 +183,7 @@ bb50.i.i.i: ; preds = %bb.i.i.i.i.i.i.i.i.i.i, %_ZNSt12_Vector_baseIjSaIjEEC2Em to label %bb83.i unwind label %lpad188.i lpad.i19.i.i: ; preds = %lpad.i.i.i.i8.i.i - %exn.i19.i.i = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0 + %exn.i19.i.i = landingpad {i8*, i32} cleanup unreachable @@ -198,7 +198,7 @@ invcont84.i: ; preds = %bb83.i to label %_ZNSt12_Vector_baseIjSaIjEEC2EmRKS0_.exit.i.i.i.i unwind label %lpad.i.i.i.i315.i ; <i8*> [#uses=0] lpad.i.i.i.i315.i: ; preds = %invcont84.i - %exn.i.i.i.i315.i = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0 + %exn.i.i.i.i315.i = landingpad {i8*, i32} cleanup invoke void @_Unwind_Resume(i8* undef) to label %.noexc.i316.i unwind label %lpad.i352.i @@ -217,7 +217,7 @@ bb50.i.i: ; preds = %bb.i.i.i.i.i.i.i.i320.i, %_ZNSt12_Vector_baseIjSaIjEEC2EmR to label %invcont86.i unwind label %lpad200.i lpad.i352.i: ; preds = %lpad.i.i.i.i315.i - %exn.i352.i = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0 + %exn.i352.i = landingpad {i8*, i32} cleanup unreachable @@ -242,7 +242,7 @@ invcont101.i: ; preds = %bb100.i to label %_ZN10FullMatrixIdEC1Ejj.exit.i.i unwind label %lpad.i.i.i.i.i lpad.i.i.i.i.i: ; preds = %invcont101.i - %exn.i.i.i.i.i = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0 + %exn.i.i.i.i.i = landingpad {i8*, i32} cleanup unreachable @@ -251,7 +251,7 @@ _ZN10FullMatrixIdEC1Ejj.exit.i.i: ; preds = %invcont101.i to label %_ZN10FullMatrixIdEC1Ejj.exit28.i.i unwind label %lpad.i.i.i27.i.i lpad.i.i.i27.i.i: ; preds = %_ZN10FullMatrixIdEC1Ejj.exit.i.i - %exn.i.i.i27.i.i = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0 + %exn.i.i.i27.i.i = landingpad {i8*, i32} cleanup invoke void @_Unwind_Resume(i8* undef) to label %.noexc.i.i unwind label %lpad.i.i @@ -272,7 +272,7 @@ bb.i.i.i297.i.i: ; preds = %bb58.i.i unreachable lpad.i.i: ; preds = %lpad.i.i.i27.i.i - %exn.i.i = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0 + %exn.i.i = landingpad {i8*, i32} cleanup unreachable @@ -312,67 +312,67 @@ bb29.loopexit.i.i: ; preds = %.noexc232.i br label %bb9.i216.i lpad.i: ; preds = %entry - %exn.i = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0 + %exn.i = landingpad {i8*, i32} cleanup unreachable lpad120.i: ; preds = %invcont.i - %exn120.i = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0 + %exn120.i = landingpad {i8*, i32} cleanup unreachable lpad124.i: ; preds = %invcont1.i - %exn124.i = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0 + %exn124.i = landingpad {i8*, i32} cleanup unreachable lpad128.i: ; preds = %invcont3.i - %exn128.i = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0 + %exn128.i = landingpad {i8*, i32} cleanup unreachable lpad132.i: ; preds = %invcont4.i - %exn132.i = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0 + %exn132.i = landingpad {i8*, i32} cleanup unreachable lpad136.i: ; preds = %invcont6.i - %exn136.i = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0 + %exn136.i = landingpad {i8*, i32} cleanup unreachable lpad140.i: ; preds = %bb21.i, %invcont7.i - %exn140.i = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0 + %exn140.i = landingpad {i8*, i32} cleanup unreachable lpad144.i: ; preds = %bb10.i168.i, %invcont9.i - %exn144.i = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0 + %exn144.i = landingpad {i8*, i32} cleanup unreachable lpad148.i: ; preds = %invcont10.i - %exn148.i = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0 + %exn148.i = landingpad {i8*, i32} cleanup unreachable lpad188.i: ; preds = %bb50.i.i.i - %exn188.i = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0 + %exn188.i = landingpad {i8*, i32} cleanup unreachable lpad196.i: ; preds = %bb.i191.i - %exn196 = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0 + %exn196 = landingpad {i8*, i32} cleanup unreachable lpad200.i: ; preds = %bb50.i.i - %exn200.i = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0 + %exn200.i = landingpad {i8*, i32} cleanup unreachable lpad204.i: ; preds = %invcont86.i - %exn204.i = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0 + %exn204.i = landingpad {i8*, i32} cleanup unreachable } diff --git a/test/CodeGen/X86/seh-catch-all-win32.ll b/test/CodeGen/X86/seh-catch-all-win32.ll new file mode 100644 index 0000000000000..28b0bca962ea8 --- /dev/null +++ b/test/CodeGen/X86/seh-catch-all-win32.ll @@ -0,0 +1,85 @@ +; RUN: llc -mtriple=i686-windows-msvc < %s | FileCheck %s + +; 32-bit catch-all has to use a filter function because that's how it saves the +; exception code. + +@str = linkonce_odr unnamed_addr constant [27 x i8] c"GetExceptionCode(): 0x%lx\0A\00", align 1 + +declare i32 @_except_handler3(...) +declare void @crash() +declare i32 @printf(i8* nocapture readonly, ...) nounwind +declare i32 @llvm.eh.typeid.for(i8*) +declare i8* @llvm.frameaddress(i32) +declare i8* @llvm.framerecover(i8*, i8*, i32) +declare void @llvm.frameescape(...) +declare i8* @llvm.x86.seh.exceptioninfo(i8*, i8*) + +define i32 @main() personality i8* bitcast (i32 (...)* @_except_handler3 to i8*) { +entry: + %__exceptioncode = alloca i32, align 4 + call void (...) @llvm.frameescape(i32* %__exceptioncode) + invoke void @crash() #5 + to label %__try.cont unwind label %lpad + +lpad: ; preds = %entry + %0 = landingpad { i8*, i32 } + catch i8* bitcast (i32 ()* @"filt$main" to i8*) + %1 = extractvalue { i8*, i32 } %0, 1 + %2 = call i32 @llvm.eh.typeid.for(i8* bitcast (i32 ()* @"filt$main" to i8*)) #4 + %matches = icmp eq i32 %1, %2 + br i1 %matches, label %__except, label %eh.resume + +__except: ; preds = %lpad + %3 = load i32, i32* %__exceptioncode, align 4 + %call = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([27 x i8], [27 x i8]* @str, i32 0, i32 0), i32 %3) #4 + br label %__try.cont + +__try.cont: ; preds = %entry, %__except + ret i32 0 + +eh.resume: ; preds = %lpad + resume { i8*, i32 } %0 +} + +define internal i32 @"filt$main"() { +entry: + %0 = tail call i8* @llvm.frameaddress(i32 1) + %1 = tail call i8* @llvm.framerecover(i8* bitcast (i32 ()* @main to i8*), i8* %0, i32 0) + %__exceptioncode = bitcast i8* %1 to i32* + %2 = tail call i8* @llvm.x86.seh.exceptioninfo(i8* bitcast (i32 ()* @main to i8*), i8* %0) + %3 = bitcast i8* %2 to i32** + %4 = load i32*, i32** %3, align 4 + %5 = load i32, i32* %4, align 4 + store i32 %5, i32* %__exceptioncode, align 4 + ret i32 1 +} + +; Check that we can get the exception code from eax to the printf. + +; CHECK-LABEL: _main: +; CHECK: Lmain$frame_escape_0 = [[code_offs:[-0-9]+]] +; CHECK: Lmain$frame_escape_1 = [[reg_offs:[-0-9]+]] +; CHECK: movl %esp, [[reg_offs]](%ebp) +; CHECK: movl $L__ehtable$main, +; EH state 0 +; CHECK: movl $0, -4(%ebp) +; CHECK: calll _crash +; CHECK: retl +; CHECK: # Block address taken +; stackrestore +; CHECK: movl [[reg_offs]](%ebp), %esp +; EH state -1 +; CHECK: movl [[code_offs]](%ebp), %[[code:[a-z]+]] +; CHECK: movl $-1, -4(%ebp) +; CHECK-DAG: movl %[[code]], 4(%esp) +; CHECK-DAG: movl $_str, (%esp) +; CHECK: calll _printf + +; CHECK: .section .xdata,"dr" +; CHECK: L__ehtable$main +; CHECK-NEXT: .long -1 +; CHECK-NEXT: .long _filt$main +; CHECK-NEXT: .long Ltmp{{[0-9]+}} + +; CHECK-LABEL: _filt$main: +; CHECK: movl diff --git a/test/CodeGen/X86/seh-catch-all.ll b/test/CodeGen/X86/seh-catch-all.ll index 51840134eda38..1c1a3c2139d6d 100644 --- a/test/CodeGen/X86/seh-catch-all.ll +++ b/test/CodeGen/X86/seh-catch-all.ll @@ -6,13 +6,13 @@ declare i32 @__C_specific_handler(...) declare void @crash() declare i32 @printf(i8* nocapture readonly, ...) nounwind -define i32 @main() { +define i32 @main() personality i8* bitcast (i32 (...)* @__C_specific_handler to i8*) { entry: invoke void @crash() to label %__try.cont unwind label %lpad lpad: - %0 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__C_specific_handler to i8*) + %0 = landingpad { i8*, i32 } catch i8* null %1 = extractvalue { i8*, i32 } %0, 0 %2 = ptrtoint i8* %1 to i64 @@ -30,6 +30,7 @@ eh.resume: ; Check that we can get the exception code from eax to the printf. ; CHECK-LABEL: main: +; CHECK: callq crash ; CHECK: retq ; CHECK: # Block address taken ; CHECK: leaq str(%rip), %rcx @@ -38,7 +39,7 @@ eh.resume: ; CHECK: .seh_handlerdata ; CHECK-NEXT: .long 1 -; CHECK-NEXT: .Ltmp{{[0-9]+}}@IMGREL -; CHECK-NEXT: .Ltmp{{[0-9]+}}@IMGREL+1 -; CHECK-NEXT: 1 -; CHECK-NEXT: .Ltmp{{[0-9]+}}@IMGREL +; CHECK-NEXT: .long .Ltmp{{[0-9]+}}@IMGREL +; CHECK-NEXT: .long .Ltmp{{[0-9]+}}@IMGREL+1 +; CHECK-NEXT: .long 1 +; CHECK-NEXT: .long .Ltmp{{[0-9]+}}@IMGREL diff --git a/test/CodeGen/X86/seh-except-finally.ll b/test/CodeGen/X86/seh-except-finally.ll index c796f1ef2888d..4327a64468f92 100644 --- a/test/CodeGen/X86/seh-except-finally.ll +++ b/test/CodeGen/X86/seh-except-finally.ll @@ -33,7 +33,7 @@ declare void @crash() declare i32 @filt() ; Function Attrs: nounwind uwtable -define void @use_both() #1 { +define void @use_both() #1 personality i8* bitcast (i32 (...)* @__C_specific_handler to i8*) { entry: %exn.slot = alloca i8* %ehselector.slot = alloca i32 @@ -49,7 +49,7 @@ invoke.cont2: ; preds = %invoke.cont br label %__try.cont lpad: ; preds = %entry - %1 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__C_specific_handler to i8*) + %1 = landingpad { i8*, i32 } cleanup catch i8* bitcast (i32 (i8*, i8*)* @"\01?filt$0@0@use_both@@" to i8*) %2 = extractvalue { i8*, i32 } %1, 0 @@ -61,7 +61,7 @@ lpad: ; preds = %entry to label %invoke.cont3 unwind label %lpad1 lpad1: ; preds = %lpad, %invoke.cont - %5 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__C_specific_handler to i8*) + %5 = landingpad { i8*, i32 } catch i8* bitcast (i32 (i8*, i8*)* @"\01?filt$0@0@use_both@@" to i8*) %6 = extractvalue { i8*, i32 } %5, 0 store i8* %6, i8** %exn.slot diff --git a/test/CodeGen/X86/seh-filter.ll b/test/CodeGen/X86/seh-filter.ll index 6a3a23edb1ae3..37ed15841a93d 100644 --- a/test/CodeGen/X86/seh-filter.ll +++ b/test/CodeGen/X86/seh-filter.ll @@ -1,14 +1,14 @@ ; RUN: llc -O0 -mtriple=x86_64-windows-msvc < %s | FileCheck %s declare void @g() -define void @f() { +define void @f() personality i8* bitcast (i32 (...)* @__C_specific_handler to i8*) { invoke void @g() to label %return unwind label %lpad return: ret void lpad: - %ehptrs = landingpad {i8*, i32} personality i8* bitcast (i32 (...)* @__C_specific_handler to i8*) + %ehptrs = landingpad {i8*, i32} filter [0 x i8*] zeroinitializer call void @__cxa_call_unexpected(i8* null) unreachable diff --git a/test/CodeGen/X86/seh-finally.ll b/test/CodeGen/X86/seh-finally.ll index 91baed570f256..350cd932f4815 100644 --- a/test/CodeGen/X86/seh-finally.ll +++ b/test/CodeGen/X86/seh-finally.ll @@ -1,10 +1,12 @@ -; RUN: llc -mtriple=x86_64-windows-msvc < %s | FileCheck %s +; RUN: llc -mtriple=x86_64-windows-msvc < %s | FileCheck %s --check-prefix=X64 +; RUN: sed -e 's/__C_specific_handler/_except_handler3/' %s | \ +; RUN: llc -mtriple=i686-windows-msvc | FileCheck %s --check-prefix=X86 @str_recovered = internal unnamed_addr constant [10 x i8] c"recovered\00", align 1 declare void @crash() -define i32 @main() { +define i32 @main() personality i8* bitcast (i32 (...)* @__C_specific_handler to i8*) { entry: invoke void @crash() to label %invoke.cont unwind label %lpad @@ -15,7 +17,7 @@ invoke.cont: ; preds = %entry ret i32 0 lpad: ; preds = %entry - %0 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__C_specific_handler to i8*) + %0 = landingpad { i8*, i32 } cleanup %1 = extractvalue { i8*, i32 } %0, 0 %2 = extractvalue { i8*, i32 } %0, 1 @@ -26,23 +28,38 @@ invoke.cont1: ; preds = %lpad resume { i8*, i32 } %0 terminate.lpad: ; preds = %lpad - %3 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__C_specific_handler to i8*) + %3 = landingpad { i8*, i32 } catch i8* null call void @abort() unreachable } -; CHECK-LABEL: main: -; CHECK: .seh_handlerdata -; CHECK-NEXT: .long 1 -; CHECK-NEXT: .long .Ltmp0@IMGREL -; CHECK-NEXT: .long .Ltmp1@IMGREL -; CHECK-NEXT: .long main.cleanup@IMGREL -; CHECK-NEXT: .long 0 - -; CHECK-LABEL: main.cleanup: -; CHECK: callq puts -; CHECK: retq +; X64-LABEL: main: +; X64: retq + +; X64: .seh_handlerdata +; X64-NEXT: .long 1 +; X64-NEXT: .long .Ltmp0@IMGREL +; X64-NEXT: .long .Ltmp1@IMGREL +; X64-NEXT: .long main.cleanup@IMGREL +; X64-NEXT: .long 0 + +; X64-LABEL: main.cleanup: +; X64: callq puts +; X64: retq + +; X86-LABEL: _main: +; X86: retl + +; X86: .section .xdata,"dr" +; X86: L__ehtable$main: +; X86-NEXT: .long -1 +; X86-NEXT: .long 0 +; X86-NEXT: .long _main.cleanup + +; X86-LABEL: _main.cleanup: +; X86: calll _puts +; X86: retl declare i32 @__C_specific_handler(...) diff --git a/test/CodeGen/X86/seh-safe-div-win32.ll b/test/CodeGen/X86/seh-safe-div-win32.ll new file mode 100644 index 0000000000000..0f76ec07a6b61 --- /dev/null +++ b/test/CodeGen/X86/seh-safe-div-win32.ll @@ -0,0 +1,172 @@ +; RUN: llc -mtriple i686-pc-windows-msvc < %s | FileCheck %s + +; This test case is also intended to be run manually as a complete functional +; test. It should link, print something, and exit zero rather than crashing. +; It is the hypothetical lowering of a C source program that looks like: +; +; int safe_div(int *n, int *d) { +; int r; +; __try { +; __try { +; r = *n / *d; +; } __except(GetExceptionCode() == EXCEPTION_ACCESS_VIOLATION) { +; puts("EXCEPTION_ACCESS_VIOLATION"); +; r = -1; +; } +; } __except(GetExceptionCode() == EXCEPTION_INT_DIVIDE_BY_ZERO) { +; puts("EXCEPTION_INT_DIVIDE_BY_ZERO"); +; r = -2; +; } +; return r; +; } + +@str1 = internal constant [27 x i8] c"EXCEPTION_ACCESS_VIOLATION\00" +@str2 = internal constant [29 x i8] c"EXCEPTION_INT_DIVIDE_BY_ZERO\00" + +define i32 @safe_div(i32* %n, i32* %d) personality i8* bitcast (i32 (...)* @_except_handler3 to i8*) { +entry: + %r = alloca i32, align 4 + store i32 42, i32* %r + invoke void @try_body(i32* %r, i32* %n, i32* %d) + to label %__try.cont unwind label %lpad + +lpad: + %vals = landingpad { i8*, i32 } + catch i8* bitcast (i32 ()* @safe_div_filt0 to i8*) + catch i8* bitcast (i32 ()* @safe_div_filt1 to i8*) + %ehptr = extractvalue { i8*, i32 } %vals, 0 + %sel = extractvalue { i8*, i32 } %vals, 1 + %filt0_val = call i32 @llvm.eh.typeid.for(i8* bitcast (i32 ()* @safe_div_filt0 to i8*)) + %is_filt0 = icmp eq i32 %sel, %filt0_val + br i1 %is_filt0, label %handler0, label %eh.dispatch1 + +eh.dispatch1: + %filt1_val = call i32 @llvm.eh.typeid.for(i8* bitcast (i32 ()* @safe_div_filt1 to i8*)) + %is_filt1 = icmp eq i32 %sel, %filt1_val + br i1 %is_filt1, label %handler1, label %eh.resume + +handler0: + call void @puts(i8* getelementptr ([27 x i8], [27 x i8]* @str1, i32 0, i32 0)) + store i32 -1, i32* %r, align 4 + br label %__try.cont + +handler1: + call void @puts(i8* getelementptr ([29 x i8], [29 x i8]* @str2, i32 0, i32 0)) + store i32 -2, i32* %r, align 4 + br label %__try.cont + +eh.resume: + resume { i8*, i32 } %vals + +__try.cont: + %safe_ret = load i32, i32* %r, align 4 + ret i32 %safe_ret +} + +; Normal path code + +; CHECK: {{^}}_safe_div: +; CHECK: movl $42, [[rloc:.*\(%ebp\)]] +; CHECK: leal [[rloc]], +; CHECK: calll _try_body +; CHECK: [[cont_bb:LBB0_[0-9]+]]: +; CHECK: movl [[rloc]], %eax +; CHECK: retl + +; Landing pad code + +; CHECK: [[handler0:Ltmp[0-9]+]]: # Block address taken +; CHECK: # %handler0 +; Restore SP +; CHECK: movl {{.*}}(%ebp), %esp +; CHECK: calll _puts +; CHECK: jmp [[cont_bb]] + +; CHECK: [[handler1:Ltmp[0-9]+]]: # Block address taken +; CHECK: # %handler1 +; Restore SP +; CHECK: movl {{.*}}(%ebp), %esp +; CHECK: calll _puts +; CHECK: jmp [[cont_bb]] + +; CHECK: .section .xdata,"dr" +; CHECK: L__ehtable$safe_div: +; CHECK-NEXT: .long -1 +; CHECK-NEXT: .long _safe_div_filt1 +; CHECK-NEXT: .long [[handler1]] +; CHECK-NEXT: .long 0 +; CHECK-NEXT: .long _safe_div_filt0 +; CHECK-NEXT: .long [[handler0]] + +define void @try_body(i32* %r, i32* %n, i32* %d) { +entry: + %0 = load i32, i32* %n, align 4 + %1 = load i32, i32* %d, align 4 + %div = sdiv i32 %0, %1 + store i32 %div, i32* %r, align 4 + ret void +} + +; The prototype of these filter functions is: +; int filter(EXCEPTION_POINTERS *eh_ptrs, void *rbp); + +; The definition of EXCEPTION_POINTERS is: +; typedef struct _EXCEPTION_POINTERS { +; EXCEPTION_RECORD *ExceptionRecord; +; CONTEXT *ContextRecord; +; } EXCEPTION_POINTERS; + +; The definition of EXCEPTION_RECORD is: +; typedef struct _EXCEPTION_RECORD { +; DWORD ExceptionCode; +; ... +; } EXCEPTION_RECORD; + +; FIXME: Use llvm.eh.exceptioninfo for this. +declare i32 @safe_div_filt0() +declare i32 @safe_div_filt1() +; define i32 @safe_div_filt0() { +; %eh_ptrs_c = bitcast i8* %eh_ptrs to i32** +; %eh_rec = load i32*, i32** %eh_ptrs_c +; %eh_code = load i32, i32* %eh_rec +; ; EXCEPTION_ACCESS_VIOLATION = 0xC0000005 +; %cmp = icmp eq i32 %eh_code, 3221225477 +; %filt.res = zext i1 %cmp to i32 +; ret i32 %filt.res +; } +; define i32 @safe_div_filt1() { +; %eh_ptrs_c = bitcast i8* %eh_ptrs to i32** +; %eh_rec = load i32*, i32** %eh_ptrs_c +; %eh_code = load i32, i32* %eh_rec +; ; EXCEPTION_INT_DIVIDE_BY_ZERO = 0xC0000094 +; %cmp = icmp eq i32 %eh_code, 3221225620 +; %filt.res = zext i1 %cmp to i32 +; ret i32 %filt.res +; } + +@str_result = internal constant [21 x i8] c"safe_div result: %d\0A\00" + +define i32 @main() { + %d.addr = alloca i32, align 4 + %n.addr = alloca i32, align 4 + + store i32 10, i32* %n.addr, align 4 + store i32 2, i32* %d.addr, align 4 + %r1 = call i32 @safe_div(i32* %n.addr, i32* %d.addr) + call void (i8*, ...) @printf(i8* getelementptr ([21 x i8], [21 x i8]* @str_result, i32 0, i32 0), i32 %r1) + + store i32 10, i32* %n.addr, align 4 + store i32 0, i32* %d.addr, align 4 + %r2 = call i32 @safe_div(i32* %n.addr, i32* %d.addr) + call void (i8*, ...) @printf(i8* getelementptr ([21 x i8], [21 x i8]* @str_result, i32 0, i32 0), i32 %r2) + + %r3 = call i32 @safe_div(i32* %n.addr, i32* null) + call void (i8*, ...) @printf(i8* getelementptr ([21 x i8], [21 x i8]* @str_result, i32 0, i32 0), i32 %r3) + ret i32 0 +} + +declare i32 @_except_handler3(...) +declare i32 @llvm.eh.typeid.for(i8*) readnone nounwind +declare void @puts(i8*) +declare void @printf(i8*, ...) +declare void @abort() diff --git a/test/CodeGen/X86/seh-safe-div.ll b/test/CodeGen/X86/seh-safe-div.ll index 80b15b601020d..699e58ee8bae8 100644 --- a/test/CodeGen/X86/seh-safe-div.ll +++ b/test/CodeGen/X86/seh-safe-div.ll @@ -23,14 +23,14 @@ @str1 = internal constant [27 x i8] c"EXCEPTION_ACCESS_VIOLATION\00" @str2 = internal constant [29 x i8] c"EXCEPTION_INT_DIVIDE_BY_ZERO\00" -define i32 @safe_div(i32* %n, i32* %d) { +define i32 @safe_div(i32* %n, i32* %d) personality i8* bitcast (i32 (...)* @__C_specific_handler to i8*) { entry: %r = alloca i32, align 4 invoke void @try_body(i32* %r, i32* %n, i32* %d) to label %__try.cont unwind label %lpad lpad: - %vals = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__C_specific_handler to i8*) + %vals = landingpad { i8*, i32 } catch i8* bitcast (i32 (i8*, i8*)* @safe_div_filt0 to i8*) catch i8* bitcast (i32 (i8*, i8*)* @safe_div_filt1 to i8*) %ehptr = extractvalue { i8*, i32 } %vals, 0 diff --git a/test/CodeGen/X86/setjmp-spills.ll b/test/CodeGen/X86/setjmp-spills.ll index c35caae97af6f..43136e018c888 100644 --- a/test/CodeGen/X86/setjmp-spills.ll +++ b/test/CodeGen/X86/setjmp-spills.ll @@ -78,7 +78,7 @@ second: ; This is the same as above, but using "invoke" rather than "call" to ; call setjmp(). -define void @setjmp_invoker() { +define void @setjmp_invoker() personality void ()* @personality { ; X86-32-LABEL: setjmp_invoker: ; X86-64-LABEL: setjmp_invoker: %a1 = call i32 @get_val() @@ -103,7 +103,7 @@ cont: br i1 %setjmp_result, label %second, label %first lpad: - %lp = landingpad { i8*, i32 } personality void ()* @personality cleanup + %lp = landingpad { i8*, i32 } cleanup unreachable first: diff --git a/test/CodeGen/X86/split-eh-lpad-edges.ll b/test/CodeGen/X86/split-eh-lpad-edges.ll index 852214e7c248b..82dd3b7674f92 100644 --- a/test/CodeGen/X86/split-eh-lpad-edges.ll +++ b/test/CodeGen/X86/split-eh-lpad-edges.ll @@ -10,7 +10,7 @@ %struct.objc_selector = type opaque @"\01l_objc_msgSend_fixup_alloc" = external global %struct._message_ref_t, align 16 ; <%struct._message_ref_t*> [#uses=2] -define %struct.NSArray* @newFetchedRowsForFetchPlan_MT(%struct.FetchPlanHeader* %fetchPlan, %struct.objc_selector* %selectionMethod, %struct.NSObject* %selectionParameter) ssp { +define %struct.NSArray* @newFetchedRowsForFetchPlan_MT(%struct.FetchPlanHeader* %fetchPlan, %struct.objc_selector* %selectionMethod, %struct.NSObject* %selectionParameter) ssp personality i32 (...)* @__gxx_personality_v0 { entry: %0 = invoke %struct.NSObject* null(%struct.NSObject* null, %struct._message_ref_t* @"\01l_objc_msgSend_fixup_alloc") to label %invcont unwind label %lpad ; <%struct.NSObject*> [#uses=1] @@ -28,7 +28,7 @@ invcont27: ; preds = %invcont26 lpad: ; preds = %invcont26, %invcont, %entry %pool.1 = phi %struct.NSAutoreleasePool* [ null, %entry ], [ null, %invcont ], [ null, %invcont26 ] ; <%struct.NSAutoreleasePool*> [#uses=0] - %exn = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0 + %exn = landingpad {i8*, i32} cleanup unreachable } diff --git a/test/CodeGen/X86/stack-protector.ll b/test/CodeGen/X86/stack-protector.ll index acaba6dc17f8a..398b8548747ba 100644 --- a/test/CodeGen/X86/stack-protector.ll +++ b/test/CodeGen/X86/stack-protector.ll @@ -2097,7 +2097,7 @@ entry: ; test18a: Addr-of a variable passed into an invoke instruction. ; no ssp attribute ; Requires no protector. -define i32 @test18a() { +define i32 @test18a() personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) { entry: ; LINUX-I386-LABEL: test18a: ; LINUX-I386-NOT: calll __stack_chk_fail @@ -2125,7 +2125,7 @@ invoke.cont: ret i32 0 lpad: - %0 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) + %0 = landingpad { i8*, i32 } catch i8* null ret i32 0 } @@ -2134,7 +2134,7 @@ lpad: ; ssp attribute ; Requires no protector. ; Function Attrs: ssp -define i32 @test18b() #0 { +define i32 @test18b() #0 personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) { entry: ; LINUX-I386-LABEL: test18b: ; LINUX-I386-NOT: calll __stack_chk_fail @@ -2162,7 +2162,7 @@ invoke.cont: ret i32 0 lpad: - %0 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) + %0 = landingpad { i8*, i32 } catch i8* null ret i32 0 } @@ -2171,7 +2171,7 @@ lpad: ; sspstrong attribute ; Requires protector. ; Function Attrs: sspstrong -define i32 @test18c() #1 { +define i32 @test18c() #1 personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) { entry: ; LINUX-I386-LABEL: test18c: ; LINUX-I386: mov{{l|q}} %gs: @@ -2199,7 +2199,7 @@ invoke.cont: ret i32 0 lpad: - %0 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) + %0 = landingpad { i8*, i32 } catch i8* null ret i32 0 } @@ -2208,7 +2208,7 @@ lpad: ; sspreq attribute ; Requires protector. ; Function Attrs: sspreq -define i32 @test18d() #2 { +define i32 @test18d() #2 personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) { entry: ; LINUX-I386-LABEL: test18d: ; LINUX-I386: mov{{l|q}} %gs: @@ -2236,7 +2236,7 @@ invoke.cont: ret i32 0 lpad: - %0 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) + %0 = landingpad { i8*, i32 } catch i8* null ret i32 0 } @@ -2244,7 +2244,7 @@ lpad: ; (GEP followed by an invoke) ; no ssp attribute ; Requires no protector. -define i32 @test19a() { +define i32 @test19a() personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) { entry: ; LINUX-I386-LABEL: test19a: ; LINUX-I386-NOT: calll __stack_chk_fail @@ -2274,7 +2274,7 @@ invoke.cont: ret i32 0 lpad: - %0 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) + %0 = landingpad { i8*, i32 } catch i8* null ret i32 0 } @@ -2284,7 +2284,7 @@ lpad: ; ssp attribute ; Requires no protector. ; Function Attrs: ssp -define i32 @test19b() #0 { +define i32 @test19b() #0 personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) { entry: ; LINUX-I386-LABEL: test19b: ; LINUX-I386-NOT: calll __stack_chk_fail @@ -2314,7 +2314,7 @@ invoke.cont: ret i32 0 lpad: - %0 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) + %0 = landingpad { i8*, i32 } catch i8* null ret i32 0 } @@ -2324,7 +2324,7 @@ lpad: ; sspstrong attribute ; Requires protector. ; Function Attrs: sspstrong -define i32 @test19c() #1 { +define i32 @test19c() #1 personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) { entry: ; LINUX-I386-LABEL: test19c: ; LINUX-I386: mov{{l|q}} %gs: @@ -2354,7 +2354,7 @@ invoke.cont: ret i32 0 lpad: - %0 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) + %0 = landingpad { i8*, i32 } catch i8* null ret i32 0 } @@ -2364,7 +2364,7 @@ lpad: ; sspreq attribute ; Requires protector. ; Function Attrs: sspreq -define i32 @test19d() #2 { +define i32 @test19d() #2 personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) { entry: ; LINUX-I386-LABEL: test19d: ; LINUX-I386: mov{{l|q}} %gs: @@ -2398,7 +2398,7 @@ invoke.cont: ret i32 0 lpad: - %0 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) + %0 = landingpad { i8*, i32 } catch i8* null ret i32 0 } diff --git a/test/CodeGen/X86/statepoint-invoke.ll b/test/CodeGen/X86/statepoint-invoke.ll index df78978c117ca..81b9ab89ebca5 100644 --- a/test/CodeGen/X86/statepoint-invoke.ll +++ b/test/CodeGen/X86/statepoint-invoke.ll @@ -9,7 +9,7 @@ declare i32 @"personality_function"() define i64 addrspace(1)* @test_basic(i64 addrspace(1)* %obj, i64 addrspace(1)* %obj1) -gc "statepoint-example" { +gc "statepoint-example" personality i32 ()* @"personality_function" { entry: ; CHECK: Ltmp{{[0-9]+}}: ; CHECK: callq some_call @@ -31,7 +31,7 @@ exceptional_return: ; CHECK: Ltmp{{[0-9]+}}: ; CHECK: movq ; CHECK: retq - %landing_pad = landingpad { i8*, i32 } personality i32 ()* @"personality_function" + %landing_pad = landingpad { i8*, i32 } cleanup %relocate_token = extractvalue { i8*, i32 } %landing_pad, 1 %obj.relocated1 = call coldcc i64 addrspace(1)* @llvm.experimental.gc.relocate.p1i64(i32 %relocate_token, i32 13, i32 13) @@ -46,7 +46,7 @@ exceptional_return: define i64 addrspace(1)* @test_result(i64 addrspace(1)* %obj, i64 addrspace(1)* %obj1) - gc "statepoint-example" { + gc "statepoint-example" personality i32 ()* @personality_function { entry: ; CHECK: .Ltmp{{[0-9]+}}: ; CHECK: callq some_other_call @@ -63,7 +63,7 @@ normal_return: exceptional_return: ; CHECK: .Ltmp{{[0-9]+}}: ; CHECK: movq - %landing_pad = landingpad { i8*, i32 } personality i32 ()* @personality_function + %landing_pad = landingpad { i8*, i32 } cleanup %relocate_token = extractvalue { i8*, i32 } %landing_pad, 1 %obj.relocated = call coldcc i64 addrspace(1)* @llvm.experimental.gc.relocate.p1i64(i32 %relocate_token, i32 13, i32 13) @@ -76,7 +76,7 @@ exceptional_return: ; CHECK: .align 4 define i64 addrspace(1)* @test_same_val(i1 %cond, i64 addrspace(1)* %val1, i64 addrspace(1)* %val2, i64 addrspace(1)* %val3) - gc "statepoint-example" { + gc "statepoint-example" personality i32 ()* @"personality_function" { entry: br i1 %cond, label %left, label %right @@ -120,14 +120,14 @@ normal_return: ret i64 addrspace(1)* %ret exceptional_return.left: - %landing_pad = landingpad { i8*, i32 } personality i32 ()* @"personality_function" + %landing_pad = landingpad { i8*, i32 } cleanup %relocate_token = extractvalue { i8*, i32 } %landing_pad, 1 %val.relocated2 = call coldcc i64 addrspace(1)* @llvm.experimental.gc.relocate.p1i64(i32 %relocate_token, i32 13, i32 13) ret i64 addrspace(1)* %val.relocated2 exceptional_return.right: - %landing_pad1 = landingpad { i8*, i32 } personality i32 ()* @"personality_function" + %landing_pad1 = landingpad { i8*, i32 } cleanup %relocate_token1 = extractvalue { i8*, i32 } %landing_pad1, 1 %val.relocated3 = call coldcc i64 addrspace(1)* @llvm.experimental.gc.relocate.p1i64(i32 %relocate_token1, i32 13, i32 13) @@ -135,7 +135,7 @@ exceptional_return.right: } define i64 addrspace(1)* @test_null_undef(i64 addrspace(1)* %val1) - gc "statepoint-example" { + gc "statepoint-example" personality i32 ()* @"personality_function" { ; CHECK-LABEL: test_null_undef: entry: ; CHECK: callq some_call @@ -152,7 +152,7 @@ normal_return: ret i64 addrspace(1)* %null.relocated exceptional_return: - %landing_pad = landingpad { i8*, i32 } personality i32 ()* @"personality_function" + %landing_pad = landingpad { i8*, i32 } cleanup %relocate_token = extractvalue { i8*, i32 } %landing_pad, 1 %null.relocated2 = call coldcc i64 addrspace(1)* @llvm.experimental.gc.relocate.p1i64(i32 %relocate_token, i32 13, i32 13) @@ -161,7 +161,7 @@ exceptional_return: } define i64 addrspace(1)* @test_alloca_and_const(i64 addrspace(1)* %val1) - gc "statepoint-example" { + gc "statepoint-example" personality i32 ()* @"personality_function" { ; CHECK-LABEL: test_alloca_and_const: entry: %a = alloca i32 @@ -183,7 +183,7 @@ exceptional_return: ; CHECK: movl $15 ; CHECK-NEXT: popq ; CHECK-NEXT: retq - %landing_pad = landingpad { i8*, i32 } personality i32 ()* @"personality_function" + %landing_pad = landingpad { i8*, i32 } cleanup %relocate_token = extractvalue { i8*, i32 } %landing_pad, 1 %aa.rel2 = call coldcc i64 addrspace(1)* @llvm.experimental.gc.relocate.p1i64(i32 %relocate_token, i32 14, i32 14) diff --git a/test/CodeGen/X86/statepoint-stack-usage.ll b/test/CodeGen/X86/statepoint-stack-usage.ll index 02d20c9fcb96e..a4aa747af8cff 100644 --- a/test/CodeGen/X86/statepoint-stack-usage.ll +++ b/test/CodeGen/X86/statepoint-stack-usage.ll @@ -14,6 +14,8 @@ define i32 @back_to_back_calls(i32 addrspace(1)* %a, i32 addrspace(1)* %b, i32 a ; CHECK: movq %rdi, 16(%rsp) ; CHECK: movq %rdx, 8(%rsp) ; CHECK: movq %rsi, (%rsp) +; There should be no more than three moves +; CHECK-NOT: movq %safepoint_token = tail call i32 (i64, i32, void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 0, i32 0, void ()* undef, i32 0, i32 0, i32 0, i32 5, i32 0, i32 -1, i32 0, i32 0, i32 0, i32 addrspace(1)* %a, i32 addrspace(1)* %b, i32 addrspace(1)* %c) %a1 = tail call coldcc i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(i32 %safepoint_token, i32 12, i32 12) %b1 = tail call coldcc i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(i32 %safepoint_token, i32 12, i32 13) @@ -52,9 +54,53 @@ define i32 @reserve_first(i32 addrspace(1)* %a, i32 addrspace(1)* %b, i32 addrsp ret i32 1 } +; Test that stack slots are reused for invokes +define i32 @back_to_back_invokes(i32 addrspace(1)* %a, i32 addrspace(1)* %b, i32 addrspace(1)* %c) #1 gc "statepoint-example" personality i32 ()* @"personality_function" { +; CHECK-LABEL: back_to_back_invokes +entry: + ; The exact stores don't matter, but there need to be three stack slots created + ; CHECK: movq %rdi, 16(%rsp) + ; CHECK: movq %rdx, 8(%rsp) + ; CHECK: movq %rsi, (%rsp) + ; CHECK: callq + %safepoint_token = invoke i32 (i64, i32, void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 0, i32 0, void ()* undef, i32 0, i32 0, i32 0, i32 5, i32 0, i32 -1, i32 0, i32 0, i32 0, i32 addrspace(1)* %a, i32 addrspace(1)* %b, i32 addrspace(1)* %c) + to label %normal_return unwind label %exceptional_return + +normal_return: + %a1 = tail call coldcc i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(i32 %safepoint_token, i32 12, i32 12) + %b1 = tail call coldcc i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(i32 %safepoint_token, i32 12, i32 13) + %c1 = tail call coldcc i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(i32 %safepoint_token, i32 12, i32 14) + ; Should work even through bitcasts + %c1.casted = bitcast i32 addrspace(1)* %c1 to i8 addrspace(1)* + ; This is the key check. There should NOT be any memory moves here + ; CHECK-NOT: movq + ; CHECK: callq + %safepoint_token2 = invoke i32 (i64, i32, void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 0, i32 0, void ()* undef, i32 0, i32 0, i32 0, i32 5, i32 0, i32 -1, i32 0, i32 0, i32 0, i8 addrspace(1)* %c1.casted, i32 addrspace(1)* %b1, i32 addrspace(1)* %a1) + to label %normal_return2 unwind label %exceptional_return2 + +normal_return2: + %a2 = tail call coldcc i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(i32 %safepoint_token2, i32 12, i32 14) + %b2 = tail call coldcc i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(i32 %safepoint_token2, i32 12, i32 13) + %c2 = tail call coldcc i8 addrspace(1)* @llvm.experimental.gc.relocate.p1i8(i32 %safepoint_token2, i32 12, i32 12) + ret i32 1 + +exceptional_return: + %landing_pad = landingpad { i8*, i32 } + cleanup + ret i32 0 + +exceptional_return2: + %landing_pad2 = landingpad { i8*, i32 } + cleanup + ret i32 0 +} + ; Function Attrs: nounwind declare i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(i32, i32, i32) #3 +declare i8 addrspace(1)* @llvm.experimental.gc.relocate.p1i8(i32, i32, i32) #3 declare i32 @llvm.experimental.gc.statepoint.p0f_isVoidf(i64, i32, void ()*, i32, i32, ...) -attributes #1 = { uwtable }
\ No newline at end of file +declare i32 @"personality_function"() + +attributes #1 = { uwtable } diff --git a/test/CodeGen/X86/switch.ll b/test/CodeGen/X86/switch.ll index a4dece65479c6..748fd6f238b19 100644 --- a/test/CodeGen/X86/switch.ll +++ b/test/CodeGen/X86/switch.ll @@ -16,23 +16,18 @@ bb1: tail call void @g(i32 1) br label %return bb2: tail call void @g(i32 1) br label %return return: ret void -; Should be lowered as straight compares in -O0 mode. -; NOOPT-LABEL: basic -; NOOPT: subl $1, %eax -; NOOPT: je -; NOOPT: subl $3, %eax -; NOOPT: je -; NOOPT: subl $4, %eax -; NOOPT: je -; NOOPT: subl $5, %eax -; NOOPT: je - -; Jump table otherwise. +; Lowered as a jump table, both with and without optimization. ; CHECK-LABEL: basic ; CHECK: decl ; CHECK: cmpl $4 ; CHECK: ja ; CHECK: jmpq *.LJTI +; NOOPT-LABEL: basic +; NOOPT: decl +; NOOPT: subl $4 +; NOOPT: ja +; NOOPT: movq .LJTI +; NOOPT: jmpq } @@ -205,6 +200,21 @@ return: ret void ; CHECK: leal -5 ; CHECK: cmpl $10 ; CHECK: jmpq *.LJTI + +; At -O0, we don't build jump tables for only parts of a switch. +; NOOPT-LABEL: optimal_jump_table1 +; NOOPT: testl %edi, %edi +; NOOPT: je +; NOOPT: subl $5, %eax +; NOOPT: je +; NOOPT: subl $6, %eax +; NOOPT: je +; NOOPT: subl $12, %eax +; NOOPT: je +; NOOPT: subl $13, %eax +; NOOPT: je +; NOOPT: subl $15, %eax +; NOOPT: je } @@ -489,6 +499,8 @@ entry: i32 30, label %bb3 i32 40, label %bb4 i32 50, label %bb5 + i32 60, label %bb6 + i32 70, label %bb6 ], !prof !4 bb0: tail call void @g(i32 0) br label %return bb1: tail call void @g(i32 1) br label %return @@ -496,16 +508,87 @@ bb2: tail call void @g(i32 2) br label %return bb3: tail call void @g(i32 3) br label %return bb4: tail call void @g(i32 4) br label %return bb5: tail call void @g(i32 5) br label %return +bb6: tail call void @g(i32 6) br label %return +bb7: tail call void @g(i32 7) br label %return return: ret void -; To balance the tree by weight, the pivot is shifted to the right, moving hot -; cases closer to the root. +; Without branch probabilities, the pivot would be 40, since that would yield +; equal-sized sub-trees. When taking weights into account, case 70 becomes the +; pivot. Since there is room for 3 cases in a leaf, cases 50 and 60 are also +; included in the right-hand side because that doesn't reduce their rank. + ; CHECK-LABEL: left_leaning_weight_balanced_tree ; CHECK-NOT: cmpl -; CHECK: cmpl $39 +; CHECK: cmpl $49 +} + +!4 = !{!"branch_weights", i32 1, i32 10, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1000} + + +define void @left_leaning_weight_balanced_tree2(i32 %x) { +entry: + switch i32 %x, label %return [ + i32 0, label %bb0 + i32 10, label %bb1 + i32 20, label %bb2 + i32 30, label %bb3 + i32 40, label %bb4 + i32 50, label %bb5 + i32 60, label %bb6 + i32 70, label %bb6 + ], !prof !5 +bb0: tail call void @g(i32 0) br label %return +bb1: tail call void @g(i32 1) br label %return +bb2: tail call void @g(i32 2) br label %return +bb3: tail call void @g(i32 3) br label %return +bb4: tail call void @g(i32 4) br label %return +bb5: tail call void @g(i32 5) br label %return +bb6: tail call void @g(i32 6) br label %return +bb7: tail call void @g(i32 7) br label %return +return: ret void + +; Same as the previous test, except case 50 has higher rank to the left than it +; would have on the right. Case 60 would have the same rank on both sides, so is +; moved into the leaf. + +; CHECK-LABEL: left_leaning_weight_balanced_tree2 +; CHECK-NOT: cmpl +; CHECK: cmpl $59 +} + +!5 = !{!"branch_weights", i32 1, i32 10, i32 1, i32 1, i32 1, i32 1, i32 90, i32 70, i32 1000} + + +define void @right_leaning_weight_balanced_tree(i32 %x) { +entry: + switch i32 %x, label %return [ + i32 0, label %bb0 + i32 10, label %bb1 + i32 20, label %bb2 + i32 30, label %bb3 + i32 40, label %bb4 + i32 50, label %bb5 + i32 60, label %bb6 + i32 70, label %bb6 + ], !prof !6 +bb0: tail call void @g(i32 0) br label %return +bb1: tail call void @g(i32 1) br label %return +bb2: tail call void @g(i32 2) br label %return +bb3: tail call void @g(i32 3) br label %return +bb4: tail call void @g(i32 4) br label %return +bb5: tail call void @g(i32 5) br label %return +bb6: tail call void @g(i32 6) br label %return +bb7: tail call void @g(i32 7) br label %return +return: ret void + +; Analogous to left_leaning_weight_balanced_tree. + +; CHECK-LABEL: right_leaning_weight_balanced_tree +; CHECK-NOT: cmpl +; CHECK: cmpl $19 } -!4 = !{!"branch_weights", i32 1, i32 10, i32 1, i32 1, i32 1, i32 10, i32 10} +!6 = !{!"branch_weights", i32 1, i32 1000, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 10} define void @jump_table_affects_balance(i32 %x) { diff --git a/test/CodeGen/X86/unaligned-32-byte-memops.ll b/test/CodeGen/X86/unaligned-32-byte-memops.ll index b337a80b84b30..d979c16f4abdd 100644 --- a/test/CodeGen/X86/unaligned-32-byte-memops.ll +++ b/test/CodeGen/X86/unaligned-32-byte-memops.ll @@ -1,66 +1,72 @@ -; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=corei7-avx | FileCheck %s --check-prefix=SANDYB --check-prefix=CHECK -; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=core-avx-i | FileCheck %s --check-prefix=SANDYB --check-prefix=CHECK -; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=btver2 | FileCheck %s --check-prefix=BTVER2 --check-prefix=CHECK -; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=core-avx2 | FileCheck %s --check-prefix=HASWELL --check-prefix=CHECK +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx,+slow-unaligned-mem-32 | FileCheck %s --check-prefix=AVXSLOW +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx,-slow-unaligned-mem-32 | FileCheck %s --check-prefix=AVXFAST +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx2 | FileCheck %s --check-prefix=AVX2 -; On Sandy Bridge or Ivy Bridge, we should not generate an unaligned 32-byte load -; because that is slower than two 16-byte loads. -; Other AVX-capable chips don't have that problem. +; Don't generate an unaligned 32-byte load on this test if that is slower than two 16-byte loads. define <8 x float> @load32bytes(<8 x float>* %Ap) { - ; CHECK-LABEL: load32bytes - - ; SANDYB: vmovaps - ; SANDYB: vinsertf128 - ; SANDYB: retq - - ; BTVER2: vmovups - ; BTVER2: retq - - ; HASWELL: vmovups - ; HASWELL: retq - +; AVXSLOW-LABEL: load32bytes: +; AVXSLOW: # BB#0: +; AVXSLOW-NEXT: vmovaps (%rdi), %xmm0 +; AVXSLOW-NEXT: vinsertf128 $1, 16(%rdi), %ymm0, %ymm0 +; AVXSLOW-NEXT: retq +; +; AVXFAST-LABEL: load32bytes: +; AVXFAST: # BB#0: +; AVXFAST-NEXT: vmovups (%rdi), %ymm0 +; AVXFAST-NEXT: retq +; +; AVX2-LABEL: load32bytes: +; AVX2: # BB#0: +; AVX2-NEXT: vmovups (%rdi), %ymm0 +; AVX2-NEXT: retq %A = load <8 x float>, <8 x float>* %Ap, align 16 ret <8 x float> %A } -; On Sandy Bridge or Ivy Bridge, we should not generate an unaligned 32-byte store -; because that is slowerthan two 16-byte stores. -; Other AVX-capable chips don't have that problem. +; Don't generate an unaligned 32-byte store on this test if that is slower than two 16-byte loads. define void @store32bytes(<8 x float> %A, <8 x float>* %P) { - ; CHECK-LABEL: store32bytes - - ; SANDYB: vextractf128 - ; SANDYB: vmovaps - ; SANDYB: retq - - ; BTVER2: vmovups - ; BTVER2: retq - - ; HASWELL: vmovups - ; HASWELL: retq - +; AVXSLOW-LABEL: store32bytes: +; AVXSLOW: # BB#0: +; AVXSLOW-NEXT: vextractf128 $1, %ymm0, 16(%rdi) +; AVXSLOW-NEXT: vmovaps %xmm0, (%rdi) +; AVXSLOW-NEXT: vzeroupper +; AVXSLOW-NEXT: retq +; +; AVXFAST-LABEL: store32bytes: +; AVXFAST: # BB#0: +; AVXFAST-NEXT: vmovups %ymm0, (%rdi) +; AVXFAST-NEXT: vzeroupper +; AVXFAST-NEXT: retq +; +; AVX2-LABEL: store32bytes: +; AVX2: # BB#0: +; AVX2-NEXT: vmovups %ymm0, (%rdi) +; AVX2-NEXT: vzeroupper +; AVX2-NEXT: retq store <8 x float> %A, <8 x float>* %P, align 16 ret void } -; Merge two consecutive 16-byte subvector loads into a single 32-byte load -; if it's faster. +; Merge two consecutive 16-byte subvector loads into a single 32-byte load if it's faster. define <8 x float> @combine_16_byte_loads_no_intrinsic(<4 x float>* %ptr) { - ; CHECK-LABEL: combine_16_byte_loads_no_intrinsic - - ; SANDYB: vmovups - ; SANDYB-NEXT: vinsertf128 - ; SANDYB-NEXT: retq - - ; BTVER2: vmovups - ; BTVER2-NEXT: retq - - ; HASWELL: vmovups - ; HASWELL-NEXT: retq - +; AVXSLOW-LABEL: combine_16_byte_loads_no_intrinsic: +; AVXSLOW: # BB#0: +; AVXSLOW-NEXT: vmovups 48(%rdi), %xmm0 +; AVXSLOW-NEXT: vinsertf128 $1, 64(%rdi), %ymm0, %ymm0 +; AVXSLOW-NEXT: retq +; +; AVXFAST-LABEL: combine_16_byte_loads_no_intrinsic: +; AVXFAST: # BB#0: +; AVXFAST-NEXT: vmovups 48(%rdi), %ymm0 +; AVXFAST-NEXT: retq +; +; AVX2-LABEL: combine_16_byte_loads_no_intrinsic: +; AVX2: # BB#0: +; AVX2-NEXT: vmovups 48(%rdi), %ymm0 +; AVX2-NEXT: retq %ptr1 = getelementptr inbounds <4 x float>, <4 x float>* %ptr, i64 3 %ptr2 = getelementptr inbounds <4 x float>, <4 x float>* %ptr, i64 4 %v1 = load <4 x float>, <4 x float>* %ptr1, align 1 @@ -69,21 +75,49 @@ define <8 x float> @combine_16_byte_loads_no_intrinsic(<4 x float>* %ptr) { ret <8 x float> %v3 } -; Swap the order of the shufflevector operands to ensure that the -; pattern still matches. -define <8 x float> @combine_16_byte_loads_no_intrinsic_swap(<4 x float>* %ptr) { - ; CHECK-LABEL: combine_16_byte_loads_no_intrinsic_swap - - ; SANDYB: vmovups - ; SANDYB-NEXT: vinsertf128 - ; SANDYB-NEXT: retq - - ; BTVER2: vmovups - ; BTVER2-NEXT: retq +define <8 x float> @combine_16_byte_loads_aligned(<4 x float>* %ptr) { +;; FIXME: The first load is 32-byte aligned, so the second load should get merged. +; AVXSLOW-LABEL: combine_16_byte_loads_aligned: +; AVXSLOW: # BB#0: +; AVXSLOW-NEXT: vmovaps 48(%rdi), %xmm0 +; AVXSLOW-NEXT: vinsertf128 $1, 64(%rdi), %ymm0, %ymm0 +; AVXSLOW-NEXT: retq +; +; AVXFAST-LABEL: combine_16_byte_loads_aligned: +; AVXFAST: # BB#0: +; AVXFAST-NEXT: vmovaps 48(%rdi), %ymm0 +; AVXFAST-NEXT: retq +; +; AVX2-LABEL: combine_16_byte_loads_aligned: +; AVX2: # BB#0: +; AVX2-NEXT: vmovaps 48(%rdi), %ymm0 +; AVX2-NEXT: retq + %ptr1 = getelementptr inbounds <4 x float>, <4 x float>* %ptr, i64 3 + %ptr2 = getelementptr inbounds <4 x float>, <4 x float>* %ptr, i64 4 + %v1 = load <4 x float>, <4 x float>* %ptr1, align 32 + %v2 = load <4 x float>, <4 x float>* %ptr2, align 1 + %v3 = shufflevector <4 x float> %v1, <4 x float> %v2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7> + ret <8 x float> %v3 +} - ; HASWELL: vmovups - ; HASWELL-NEXT: retq +; Swap the order of the shufflevector operands to ensure that the pattern still matches. +define <8 x float> @combine_16_byte_loads_no_intrinsic_swap(<4 x float>* %ptr) { +; AVXSLOW-LABEL: combine_16_byte_loads_no_intrinsic_swap: +; AVXSLOW: # BB#0: +; AVXSLOW-NEXT: vmovups 64(%rdi), %xmm0 +; AVXSLOW-NEXT: vinsertf128 $1, 80(%rdi), %ymm0, %ymm0 +; AVXSLOW-NEXT: retq +; +; AVXFAST-LABEL: combine_16_byte_loads_no_intrinsic_swap: +; AVXFAST: # BB#0: +; AVXFAST-NEXT: vmovups 64(%rdi), %ymm0 +; AVXFAST-NEXT: retq +; +; AVX2-LABEL: combine_16_byte_loads_no_intrinsic_swap: +; AVX2: # BB#0: +; AVX2-NEXT: vmovups 64(%rdi), %ymm0 +; AVX2-NEXT: retq %ptr1 = getelementptr inbounds <4 x float>, <4 x float>* %ptr, i64 4 %ptr2 = getelementptr inbounds <4 x float>, <4 x float>* %ptr, i64 5 %v1 = load <4 x float>, <4 x float>* %ptr1, align 1 @@ -94,28 +128,29 @@ define <8 x float> @combine_16_byte_loads_no_intrinsic_swap(<4 x float>* %ptr) { ; Check each element type other than float to make sure it is handled correctly. ; Use the loaded values with an 'add' to make sure we're using the correct load type. -; Even though BtVer2 has fast 32-byte loads, we should not generate those for -; 256-bit integer vectors because BtVer2 doesn't have AVX2. +; Don't generate 32-byte loads for integer ops unless we have AVX2. define <4 x i64> @combine_16_byte_loads_i64(<2 x i64>* %ptr, <4 x i64> %x) { - ; CHECK-LABEL: combine_16_byte_loads_i64 - - ; SANDYB: vextractf128 - ; SANDYB-NEXT: vpaddq - ; SANDYB-NEXT: vpaddq - ; SANDYB-NEXT: vinsertf128 - ; SANDYB-NEXT: retq - - ; BTVER2: vextractf128 - ; BTVER2-NEXT: vpaddq - ; BTVER2-NEXT: vpaddq - ; BTVER2-NEXT: vinsertf128 - ; BTVER2-NEXT: retq - - ; HASWELL-NOT: vextract - ; HASWELL: vpaddq - ; HASWELL-NEXT: retq - +; AVXSLOW-LABEL: combine_16_byte_loads_i64: +; AVXSLOW: # BB#0: +; AVXSLOW-NEXT: vextractf128 $1, %ymm0, %xmm1 +; AVXSLOW-NEXT: vpaddq 96(%rdi), %xmm1, %xmm1 +; AVXSLOW-NEXT: vpaddq 80(%rdi), %xmm0, %xmm0 +; AVXSLOW-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVXSLOW-NEXT: retq +; +; AVXFAST-LABEL: combine_16_byte_loads_i64: +; AVXFAST: # BB#0: +; AVXFAST-NEXT: vextractf128 $1, %ymm0, %xmm1 +; AVXFAST-NEXT: vpaddq 96(%rdi), %xmm1, %xmm1 +; AVXFAST-NEXT: vpaddq 80(%rdi), %xmm0, %xmm0 +; AVXFAST-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVXFAST-NEXT: retq +; +; AVX2-LABEL: combine_16_byte_loads_i64: +; AVX2: # BB#0: +; AVX2-NEXT: vpaddq 80(%rdi), %ymm0, %ymm0 +; AVX2-NEXT: retq %ptr1 = getelementptr inbounds <2 x i64>, <2 x i64>* %ptr, i64 5 %ptr2 = getelementptr inbounds <2 x i64>, <2 x i64>* %ptr, i64 6 %v1 = load <2 x i64>, <2 x i64>* %ptr1, align 1 @@ -126,24 +161,26 @@ define <4 x i64> @combine_16_byte_loads_i64(<2 x i64>* %ptr, <4 x i64> %x) { } define <8 x i32> @combine_16_byte_loads_i32(<4 x i32>* %ptr, <8 x i32> %x) { - ; CHECK-LABEL: combine_16_byte_loads_i32 - - ; SANDYB: vextractf128 - ; SANDYB-NEXT: vpaddd - ; SANDYB-NEXT: vpaddd - ; SANDYB-NEXT: vinsertf128 - ; SANDYB-NEXT: retq - - ; BTVER2: vextractf128 - ; BTVER2-NEXT: vpaddd - ; BTVER2-NEXT: vpaddd - ; BTVER2-NEXT: vinsertf128 - ; BTVER2-NEXT: retq - - ; HASWELL-NOT: vextract - ; HASWELL: vpaddd - ; HASWELL-NEXT: retq - +; AVXSLOW-LABEL: combine_16_byte_loads_i32: +; AVXSLOW: # BB#0: +; AVXSLOW-NEXT: vextractf128 $1, %ymm0, %xmm1 +; AVXSLOW-NEXT: vpaddd 112(%rdi), %xmm1, %xmm1 +; AVXSLOW-NEXT: vpaddd 96(%rdi), %xmm0, %xmm0 +; AVXSLOW-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVXSLOW-NEXT: retq +; +; AVXFAST-LABEL: combine_16_byte_loads_i32: +; AVXFAST: # BB#0: +; AVXFAST-NEXT: vextractf128 $1, %ymm0, %xmm1 +; AVXFAST-NEXT: vpaddd 112(%rdi), %xmm1, %xmm1 +; AVXFAST-NEXT: vpaddd 96(%rdi), %xmm0, %xmm0 +; AVXFAST-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVXFAST-NEXT: retq +; +; AVX2-LABEL: combine_16_byte_loads_i32: +; AVX2: # BB#0: +; AVX2-NEXT: vpaddd 96(%rdi), %ymm0, %ymm0 +; AVX2-NEXT: retq %ptr1 = getelementptr inbounds <4 x i32>, <4 x i32>* %ptr, i64 6 %ptr2 = getelementptr inbounds <4 x i32>, <4 x i32>* %ptr, i64 7 %v1 = load <4 x i32>, <4 x i32>* %ptr1, align 1 @@ -154,24 +191,26 @@ define <8 x i32> @combine_16_byte_loads_i32(<4 x i32>* %ptr, <8 x i32> %x) { } define <16 x i16> @combine_16_byte_loads_i16(<8 x i16>* %ptr, <16 x i16> %x) { - ; CHECK-LABEL: combine_16_byte_loads_i16 - - ; SANDYB: vextractf128 - ; SANDYB-NEXT: vpaddw - ; SANDYB-NEXT: vpaddw - ; SANDYB-NEXT: vinsertf128 - ; SANDYB-NEXT: retq - - ; BTVER2: vextractf128 - ; BTVER2-NEXT: vpaddw - ; BTVER2-NEXT: vpaddw - ; BTVER2-NEXT: vinsertf128 - ; BTVER2-NEXT: retq - - ; HASWELL-NOT: vextract - ; HASWELL: vpaddw - ; HASWELL-NEXT: retq - +; AVXSLOW-LABEL: combine_16_byte_loads_i16: +; AVXSLOW: # BB#0: +; AVXSLOW-NEXT: vextractf128 $1, %ymm0, %xmm1 +; AVXSLOW-NEXT: vpaddw 128(%rdi), %xmm1, %xmm1 +; AVXSLOW-NEXT: vpaddw 112(%rdi), %xmm0, %xmm0 +; AVXSLOW-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVXSLOW-NEXT: retq +; +; AVXFAST-LABEL: combine_16_byte_loads_i16: +; AVXFAST: # BB#0: +; AVXFAST-NEXT: vextractf128 $1, %ymm0, %xmm1 +; AVXFAST-NEXT: vpaddw 128(%rdi), %xmm1, %xmm1 +; AVXFAST-NEXT: vpaddw 112(%rdi), %xmm0, %xmm0 +; AVXFAST-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVXFAST-NEXT: retq +; +; AVX2-LABEL: combine_16_byte_loads_i16: +; AVX2: # BB#0: +; AVX2-NEXT: vpaddw 112(%rdi), %ymm0, %ymm0 +; AVX2-NEXT: retq %ptr1 = getelementptr inbounds <8 x i16>, <8 x i16>* %ptr, i64 7 %ptr2 = getelementptr inbounds <8 x i16>, <8 x i16>* %ptr, i64 8 %v1 = load <8 x i16>, <8 x i16>* %ptr1, align 1 @@ -182,24 +221,26 @@ define <16 x i16> @combine_16_byte_loads_i16(<8 x i16>* %ptr, <16 x i16> %x) { } define <32 x i8> @combine_16_byte_loads_i8(<16 x i8>* %ptr, <32 x i8> %x) { - ; CHECK-LABEL: combine_16_byte_loads_i8 - - ; SANDYB: vextractf128 - ; SANDYB-NEXT: vpaddb - ; SANDYB-NEXT: vpaddb - ; SANDYB-NEXT: vinsertf128 - ; SANDYB-NEXT: retq - - ; BTVER2: vextractf128 - ; BTVER2-NEXT: vpaddb - ; BTVER2-NEXT: vpaddb - ; BTVER2-NEXT: vinsertf128 - ; BTVER2-NEXT: retq - - ; HASWELL-NOT: vextract - ; HASWELL: vpaddb - ; HASWELL-NEXT: retq - +; AVXSLOW-LABEL: combine_16_byte_loads_i8: +; AVXSLOW: # BB#0: +; AVXSLOW-NEXT: vextractf128 $1, %ymm0, %xmm1 +; AVXSLOW-NEXT: vpaddb 144(%rdi), %xmm1, %xmm1 +; AVXSLOW-NEXT: vpaddb 128(%rdi), %xmm0, %xmm0 +; AVXSLOW-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVXSLOW-NEXT: retq +; +; AVXFAST-LABEL: combine_16_byte_loads_i8: +; AVXFAST: # BB#0: +; AVXFAST-NEXT: vextractf128 $1, %ymm0, %xmm1 +; AVXFAST-NEXT: vpaddb 144(%rdi), %xmm1, %xmm1 +; AVXFAST-NEXT: vpaddb 128(%rdi), %xmm0, %xmm0 +; AVXFAST-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVXFAST-NEXT: retq +; +; AVX2-LABEL: combine_16_byte_loads_i8: +; AVX2: # BB#0: +; AVX2-NEXT: vpaddb 128(%rdi), %ymm0, %ymm0 +; AVX2-NEXT: retq %ptr1 = getelementptr inbounds <16 x i8>, <16 x i8>* %ptr, i64 8 %ptr2 = getelementptr inbounds <16 x i8>, <16 x i8>* %ptr, i64 9 %v1 = load <16 x i8>, <16 x i8>* %ptr1, align 1 @@ -210,21 +251,22 @@ define <32 x i8> @combine_16_byte_loads_i8(<16 x i8>* %ptr, <32 x i8> %x) { } define <4 x double> @combine_16_byte_loads_double(<2 x double>* %ptr, <4 x double> %x) { - ; CHECK-LABEL: combine_16_byte_loads_double - - ; SANDYB: vmovupd - ; SANDYB-NEXT: vinsertf128 - ; SANDYB-NEXT: vaddpd - ; SANDYB-NEXT: retq - - ; BTVER2-NOT: vinsertf128 - ; BTVER2: vaddpd - ; BTVER2-NEXT: retq - - ; HASWELL-NOT: vinsertf128 - ; HASWELL: vaddpd - ; HASWELL-NEXT: retq - +; AVXSLOW-LABEL: combine_16_byte_loads_double: +; AVXSLOW: # BB#0: +; AVXSLOW-NEXT: vmovupd 144(%rdi), %xmm1 +; AVXSLOW-NEXT: vinsertf128 $1, 160(%rdi), %ymm1, %ymm1 +; AVXSLOW-NEXT: vaddpd %ymm0, %ymm1, %ymm0 +; AVXSLOW-NEXT: retq +; +; AVXFAST-LABEL: combine_16_byte_loads_double: +; AVXFAST: # BB#0: +; AVXFAST-NEXT: vaddpd 144(%rdi), %ymm0, %ymm0 +; AVXFAST-NEXT: retq +; +; AVX2-LABEL: combine_16_byte_loads_double: +; AVX2: # BB#0: +; AVX2-NEXT: vaddpd 144(%rdi), %ymm0, %ymm0 +; AVX2-NEXT: retq %ptr1 = getelementptr inbounds <2 x double>, <2 x double>* %ptr, i64 9 %ptr2 = getelementptr inbounds <2 x double>, <2 x double>* %ptr, i64 10 %v1 = load <2 x double>, <2 x double>* %ptr1, align 1 diff --git a/test/CodeGen/X86/vec_int_to_fp.ll b/test/CodeGen/X86/vec_int_to_fp.ll index 5052ff51092e3..8dded07af7d4d 100644 --- a/test/CodeGen/X86/vec_int_to_fp.ll +++ b/test/CodeGen/X86/vec_int_to_fp.ll @@ -1,5 +1,6 @@ ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=ALL --check-prefix=SSE2 -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=ALL --check-prefix=AVX +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX1 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX2 ; ; Signed Integer to Double @@ -34,12 +35,28 @@ define <2 x double> @sitofp_2vf64(<2 x i64> %a) { define <2 x double> @sitofp_2vf64_i32(<4 x i32> %a) { ; SSE2-LABEL: sitofp_2vf64_i32: ; SSE2: # BB#0: -; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,1,3] +; SSE2-NEXT: cvtdq2pd %xmm0, %xmm0 +; SSE2-NEXT: retq +; +; AVX-LABEL: sitofp_2vf64_i32: +; AVX: # BB#0: +; AVX-NEXT: vcvtdq2pd %xmm0, %xmm0 +; AVX-NEXT: retq + %shuf = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> <i32 0, i32 1> + %cvt = sitofp <2 x i32> %shuf to <2 x double> + ret <2 x double> %cvt +} + +define <2 x double> @sitofp_2vf64_i16(<8 x i16> %a) { +; SSE2-LABEL: sitofp_2vf64_i16: +; SSE2: # BB#0: +; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,3] +; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,5,6,7] ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] ; SSE2-NEXT: movd %xmm1, %rax -; SSE2-NEXT: cltq +; SSE2-NEXT: movswq %ax, %rax ; SSE2-NEXT: movd %xmm0, %rcx -; SSE2-NEXT: movslq %ecx, %rcx +; SSE2-NEXT: movswq %cx, %rcx ; SSE2-NEXT: xorps %xmm0, %xmm0 ; SSE2-NEXT: cvtsi2sdq %rcx, %xmm0 ; SSE2-NEXT: xorps %xmm1, %xmm1 @@ -47,20 +64,55 @@ define <2 x double> @sitofp_2vf64_i32(<4 x i32> %a) { ; SSE2-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0] ; SSE2-NEXT: retq ; -; AVX-LABEL: sitofp_2vf64_i32: +; AVX-LABEL: sitofp_2vf64_i16: ; AVX: # BB#0: -; AVX-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero +; AVX-NEXT: vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero ; AVX-NEXT: vmovq %xmm0, %rax -; AVX-NEXT: cltq +; AVX-NEXT: movswq %ax, %rax ; AVX-NEXT: vpextrq $1, %xmm0, %rcx -; AVX-NEXT: movslq %ecx, %rcx +; AVX-NEXT: movswq %cx, %rcx ; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0 ; AVX-NEXT: vcvtsi2sdq %rcx, %xmm0, %xmm0 ; AVX-NEXT: vcvtsi2sdq %rax, %xmm0, %xmm1 ; AVX-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm1[0],xmm0[0] ; AVX-NEXT: retq - %shuf = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> <i32 0, i32 1> - %cvt = sitofp <2 x i32> %shuf to <2 x double> + %shuf = shufflevector <8 x i16> %a, <8 x i16> undef, <2 x i32> <i32 0, i32 1> + %cvt = sitofp <2 x i16> %shuf to <2 x double> + ret <2 x double> %cvt +} + +define <2 x double> @sitofp_2vf64_i8(<16 x i8> %a) { +; SSE2-LABEL: sitofp_2vf64_i8: +; SSE2: # BB#0: +; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] +; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3] +; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0,0,1,1] +; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] +; SSE2-NEXT: movd %xmm1, %rax +; SSE2-NEXT: movsbq %al, %rax +; SSE2-NEXT: movd %xmm0, %rcx +; SSE2-NEXT: movsbq %cl, %rcx +; SSE2-NEXT: xorps %xmm0, %xmm0 +; SSE2-NEXT: cvtsi2sdq %rcx, %xmm0 +; SSE2-NEXT: xorps %xmm1, %xmm1 +; SSE2-NEXT: cvtsi2sdq %rax, %xmm1 +; SSE2-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; SSE2-NEXT: retq +; +; AVX-LABEL: sitofp_2vf64_i8: +; AVX: # BB#0: +; AVX-NEXT: vpmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero +; AVX-NEXT: vmovq %xmm0, %rax +; AVX-NEXT: movsbq %al, %rax +; AVX-NEXT: vpextrq $1, %xmm0, %rcx +; AVX-NEXT: movsbq %cl, %rcx +; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0 +; AVX-NEXT: vcvtsi2sdq %rcx, %xmm0, %xmm0 +; AVX-NEXT: vcvtsi2sdq %rax, %xmm0, %xmm1 +; AVX-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm1[0],xmm0[0] +; AVX-NEXT: retq + %shuf = shufflevector <16 x i8> %a, <16 x i8> undef, <2 x i32> <i32 0, i32 1> + %cvt = sitofp <2 x i8> %shuf to <2 x double> ret <2 x double> %cvt } @@ -85,22 +137,39 @@ define <4 x double> @sitofp_4vf64(<4 x i64> %a) { ; SSE2-NEXT: movapd %xmm3, %xmm1 ; SSE2-NEXT: retq ; -; AVX-LABEL: sitofp_4vf64: -; AVX: # BB#0: -; AVX-NEXT: vextractf128 $1, %ymm0, %xmm1 -; AVX-NEXT: vpextrq $1, %xmm1, %rax -; AVX-NEXT: vcvtsi2sdq %rax, %xmm0, %xmm2 -; AVX-NEXT: vmovq %xmm1, %rax -; AVX-NEXT: vcvtsi2sdq %rax, %xmm0, %xmm1 -; AVX-NEXT: vunpcklpd {{.*#+}} xmm1 = xmm1[0],xmm2[0] -; AVX-NEXT: vpextrq $1, %xmm0, %rax -; AVX-NEXT: vcvtsi2sdq %rax, %xmm0, %xmm2 -; AVX-NEXT: vmovq %xmm0, %rax -; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0 -; AVX-NEXT: vcvtsi2sdq %rax, %xmm0, %xmm0 -; AVX-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm2[0] -; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 -; AVX-NEXT: retq +; AVX1-LABEL: sitofp_4vf64: +; AVX1: # BB#0: +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 +; AVX1-NEXT: vpextrq $1, %xmm1, %rax +; AVX1-NEXT: vcvtsi2sdq %rax, %xmm0, %xmm2 +; AVX1-NEXT: vmovq %xmm1, %rax +; AVX1-NEXT: vcvtsi2sdq %rax, %xmm0, %xmm1 +; AVX1-NEXT: vunpcklpd {{.*#+}} xmm1 = xmm1[0],xmm2[0] +; AVX1-NEXT: vpextrq $1, %xmm0, %rax +; AVX1-NEXT: vcvtsi2sdq %rax, %xmm0, %xmm2 +; AVX1-NEXT: vmovq %xmm0, %rax +; AVX1-NEXT: vxorps %xmm0, %xmm0, %xmm0 +; AVX1-NEXT: vcvtsi2sdq %rax, %xmm0, %xmm0 +; AVX1-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm2[0] +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX1-NEXT: retq +; +; AVX2-LABEL: sitofp_4vf64: +; AVX2: # BB#0: +; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 +; AVX2-NEXT: vpextrq $1, %xmm1, %rax +; AVX2-NEXT: vcvtsi2sdq %rax, %xmm0, %xmm2 +; AVX2-NEXT: vmovq %xmm1, %rax +; AVX2-NEXT: vcvtsi2sdq %rax, %xmm0, %xmm1 +; AVX2-NEXT: vunpcklpd {{.*#+}} xmm1 = xmm1[0],xmm2[0] +; AVX2-NEXT: vpextrq $1, %xmm0, %rax +; AVX2-NEXT: vcvtsi2sdq %rax, %xmm0, %xmm2 +; AVX2-NEXT: vmovq %xmm0, %rax +; AVX2-NEXT: vxorps %xmm0, %xmm0, %xmm0 +; AVX2-NEXT: vcvtsi2sdq %rax, %xmm0, %xmm0 +; AVX2-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm2[0] +; AVX2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX2-NEXT: retq %cvt = sitofp <4 x i64> %a to <4 x double> ret <4 x double> %cvt } @@ -108,28 +177,10 @@ define <4 x double> @sitofp_4vf64(<4 x i64> %a) { define <4 x double> @sitofp_4vf64_i32(<4 x i32> %a) { ; SSE2-LABEL: sitofp_4vf64_i32: ; SSE2: # BB#0: -; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,1,1,3] -; SSE2-NEXT: movd %xmm1, %rax -; SSE2-NEXT: cltq -; SSE2-NEXT: cvtsi2sdq %rax, %xmm2 -; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1] -; SSE2-NEXT: movd %xmm1, %rax -; SSE2-NEXT: cltq -; SSE2-NEXT: xorps %xmm1, %xmm1 -; SSE2-NEXT: cvtsi2sdq %rax, %xmm1 -; SSE2-NEXT: unpcklpd {{.*#+}} xmm2 = xmm2[0],xmm1[0] -; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,2,3,3] -; SSE2-NEXT: movd %xmm0, %rax -; SSE2-NEXT: cltq -; SSE2-NEXT: xorps %xmm1, %xmm1 -; SSE2-NEXT: cvtsi2sdq %rax, %xmm1 +; SSE2-NEXT: cvtdq2pd %xmm0, %xmm2 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] -; SSE2-NEXT: movd %xmm0, %rax -; SSE2-NEXT: cltq -; SSE2-NEXT: xorps %xmm0, %xmm0 -; SSE2-NEXT: cvtsi2sdq %rax, %xmm0 -; SSE2-NEXT: unpcklpd {{.*#+}} xmm1 = xmm1[0],xmm0[0] -; SSE2-NEXT: movapd %xmm2, %xmm0 +; SSE2-NEXT: cvtdq2pd %xmm0, %xmm1 +; SSE2-NEXT: movaps %xmm2, %xmm0 ; SSE2-NEXT: retq ; ; AVX-LABEL: sitofp_4vf64_i32: @@ -140,6 +191,47 @@ define <4 x double> @sitofp_4vf64_i32(<4 x i32> %a) { ret <4 x double> %cvt } +define <4 x double> @sitofp_4vf64_i16(<8 x i16> %a) { +; SSE2-LABEL: sitofp_4vf64_i16: +; SSE2: # BB#0: +; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3] +; SSE2-NEXT: psrad $16, %xmm1 +; SSE2-NEXT: cvtdq2pd %xmm1, %xmm0 +; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1] +; SSE2-NEXT: cvtdq2pd %xmm1, %xmm1 +; SSE2-NEXT: retq +; +; AVX-LABEL: sitofp_4vf64_i16: +; AVX: # BB#0: +; AVX-NEXT: vpmovsxwd %xmm0, %xmm0 +; AVX-NEXT: vcvtdq2pd %xmm0, %ymm0 +; AVX-NEXT: retq + %shuf = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3> + %cvt = sitofp <4 x i16> %shuf to <4 x double> + ret <4 x double> %cvt +} + +define <4 x double> @sitofp_4vf64_i8(<16 x i8> %a) { +; SSE2-LABEL: sitofp_4vf64_i8: +; SSE2: # BB#0: +; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] +; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3] +; SSE2-NEXT: psrad $24, %xmm1 +; SSE2-NEXT: cvtdq2pd %xmm1, %xmm0 +; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1] +; SSE2-NEXT: cvtdq2pd %xmm1, %xmm1 +; SSE2-NEXT: retq +; +; AVX-LABEL: sitofp_4vf64_i8: +; AVX: # BB#0: +; AVX-NEXT: vpmovsxbd %xmm0, %xmm0 +; AVX-NEXT: vcvtdq2pd %xmm0, %ymm0 +; AVX-NEXT: retq + %shuf = shufflevector <16 x i8> %a, <16 x i8> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3> + %cvt = sitofp <4 x i8> %shuf to <4 x double> + ret <4 x double> %cvt +} + ; ; Unsigned Integer to Double ; @@ -216,6 +308,85 @@ define <2 x double> @uitofp_2vf64_i32(<4 x i32> %a) { ret <2 x double> %cvt } +define <2 x double> @uitofp_2vf64_i16(<8 x i16> %a) { +; SSE2-LABEL: uitofp_2vf64_i16: +; SSE2: # BB#0: +; SSE2-NEXT: pxor %xmm1, %xmm1 +; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] +; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] +; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [1127219200,1160773632,0,0] +; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1] +; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] +; SSE2-NEXT: movapd {{.*#+}} xmm3 = [4.503600e+15,1.934281e+25] +; SSE2-NEXT: subpd %xmm3, %xmm0 +; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm0[2,3,0,1] +; SSE2-NEXT: addpd %xmm4, %xmm0 +; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] +; SSE2-NEXT: subpd %xmm3, %xmm2 +; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[2,3,0,1] +; SSE2-NEXT: addpd %xmm2, %xmm1 +; SSE2-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; SSE2-NEXT: retq +; +; AVX-LABEL: uitofp_2vf64_i16: +; AVX: # BB#0: +; AVX-NEXT: vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero +; AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [1127219200,1160773632,0,0] +; AVX-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] +; AVX-NEXT: vmovapd {{.*#+}} xmm3 = [4.503600e+15,1.934281e+25] +; AVX-NEXT: vsubpd %xmm3, %xmm2, %xmm2 +; AVX-NEXT: vhaddpd %xmm2, %xmm2, %xmm2 +; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] +; AVX-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] +; AVX-NEXT: vsubpd %xmm3, %xmm0, %xmm0 +; AVX-NEXT: vhaddpd %xmm0, %xmm0, %xmm0 +; AVX-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm2[0],xmm0[0] +; AVX-NEXT: retq + %shuf = shufflevector <8 x i16> %a, <8 x i16> undef, <2 x i32> <i32 0, i32 1> + %cvt = uitofp <2 x i16> %shuf to <2 x double> + ret <2 x double> %cvt +} + +define <2 x double> @uitofp_2vf64_i8(<16 x i8> %a) { +; SSE2-LABEL: uitofp_2vf64_i8: +; SSE2: # BB#0: +; SSE2-NEXT: pxor %xmm1, %xmm1 +; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] +; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] +; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] +; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [1127219200,1160773632,0,0] +; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1] +; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] +; SSE2-NEXT: movapd {{.*#+}} xmm3 = [4.503600e+15,1.934281e+25] +; SSE2-NEXT: subpd %xmm3, %xmm0 +; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm0[2,3,0,1] +; SSE2-NEXT: addpd %xmm4, %xmm0 +; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] +; SSE2-NEXT: subpd %xmm3, %xmm2 +; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[2,3,0,1] +; SSE2-NEXT: addpd %xmm2, %xmm1 +; SSE2-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; SSE2-NEXT: retq +; +; AVX-LABEL: uitofp_2vf64_i8: +; AVX: # BB#0: +; AVX-NEXT: vpmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero +; AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [1127219200,1160773632,0,0] +; AVX-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] +; AVX-NEXT: vmovapd {{.*#+}} xmm3 = [4.503600e+15,1.934281e+25] +; AVX-NEXT: vsubpd %xmm3, %xmm2, %xmm2 +; AVX-NEXT: vhaddpd %xmm2, %xmm2, %xmm2 +; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] +; AVX-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] +; AVX-NEXT: vsubpd %xmm3, %xmm0, %xmm0 +; AVX-NEXT: vhaddpd %xmm0, %xmm0, %xmm0 +; AVX-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm2[0],xmm0[0] +; AVX-NEXT: retq + %shuf = shufflevector <16 x i8> %a, <16 x i8> undef, <2 x i32> <i32 0, i32 1> + %cvt = uitofp <2 x i8> %shuf to <2 x double> + ret <2 x double> %cvt +} + define <4 x double> @uitofp_4vf64(<4 x i64> %a) { ; SSE2-LABEL: uitofp_4vf64: ; SSE2: # BB#0: @@ -243,29 +414,53 @@ define <4 x double> @uitofp_4vf64(<4 x i64> %a) { ; SSE2-NEXT: unpcklpd {{.*#+}} xmm1 = xmm1[0],xmm2[0] ; SSE2-NEXT: retq ; -; AVX-LABEL: uitofp_4vf64: -; AVX: # BB#0: -; AVX-NEXT: vextractf128 $1, %ymm0, %xmm1 -; AVX-NEXT: vmovdqa {{.*#+}} xmm2 = [1127219200,1160773632,0,0] -; AVX-NEXT: vpunpckldq {{.*#+}} xmm3 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] -; AVX-NEXT: vmovapd {{.*#+}} xmm4 = [4.503600e+15,1.934281e+25] -; AVX-NEXT: vsubpd %xmm4, %xmm3, %xmm3 -; AVX-NEXT: vhaddpd %xmm3, %xmm3, %xmm3 -; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1] -; AVX-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] -; AVX-NEXT: vsubpd %xmm4, %xmm1, %xmm1 -; AVX-NEXT: vhaddpd %xmm1, %xmm1, %xmm1 -; AVX-NEXT: vunpcklpd {{.*#+}} xmm1 = xmm3[0],xmm1[0] -; AVX-NEXT: vpunpckldq {{.*#+}} xmm3 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] -; AVX-NEXT: vsubpd %xmm4, %xmm3, %xmm3 -; AVX-NEXT: vhaddpd %xmm3, %xmm3, %xmm3 -; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] -; AVX-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] -; AVX-NEXT: vsubpd %xmm4, %xmm0, %xmm0 -; AVX-NEXT: vhaddpd %xmm0, %xmm0, %xmm0 -; AVX-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm3[0],xmm0[0] -; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 -; AVX-NEXT: retq +; AVX1-LABEL: uitofp_4vf64: +; AVX1: # BB#0: +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 +; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [1127219200,1160773632,0,0] +; AVX1-NEXT: vpunpckldq {{.*#+}} xmm3 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] +; AVX1-NEXT: vmovapd {{.*#+}} xmm4 = [4.503600e+15,1.934281e+25] +; AVX1-NEXT: vsubpd %xmm4, %xmm3, %xmm3 +; AVX1-NEXT: vhaddpd %xmm3, %xmm3, %xmm3 +; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1] +; AVX1-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] +; AVX1-NEXT: vsubpd %xmm4, %xmm1, %xmm1 +; AVX1-NEXT: vhaddpd %xmm1, %xmm1, %xmm1 +; AVX1-NEXT: vunpcklpd {{.*#+}} xmm1 = xmm3[0],xmm1[0] +; AVX1-NEXT: vpunpckldq {{.*#+}} xmm3 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] +; AVX1-NEXT: vsubpd %xmm4, %xmm3, %xmm3 +; AVX1-NEXT: vhaddpd %xmm3, %xmm3, %xmm3 +; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] +; AVX1-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] +; AVX1-NEXT: vsubpd %xmm4, %xmm0, %xmm0 +; AVX1-NEXT: vhaddpd %xmm0, %xmm0, %xmm0 +; AVX1-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm3[0],xmm0[0] +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX1-NEXT: retq +; +; AVX2-LABEL: uitofp_4vf64: +; AVX2: # BB#0: +; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 +; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [1127219200,1160773632,0,0] +; AVX2-NEXT: vpunpckldq {{.*#+}} xmm3 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] +; AVX2-NEXT: vmovapd {{.*#+}} xmm4 = [4.503600e+15,1.934281e+25] +; AVX2-NEXT: vsubpd %xmm4, %xmm3, %xmm3 +; AVX2-NEXT: vhaddpd %xmm3, %xmm3, %xmm3 +; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1] +; AVX2-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] +; AVX2-NEXT: vsubpd %xmm4, %xmm1, %xmm1 +; AVX2-NEXT: vhaddpd %xmm1, %xmm1, %xmm1 +; AVX2-NEXT: vunpcklpd {{.*#+}} xmm1 = xmm3[0],xmm1[0] +; AVX2-NEXT: vpunpckldq {{.*#+}} xmm3 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] +; AVX2-NEXT: vsubpd %xmm4, %xmm3, %xmm3 +; AVX2-NEXT: vhaddpd %xmm3, %xmm3, %xmm3 +; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] +; AVX2-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] +; AVX2-NEXT: vsubpd %xmm4, %xmm0, %xmm0 +; AVX2-NEXT: vhaddpd %xmm0, %xmm0, %xmm0 +; AVX2-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm3[0],xmm0[0] +; AVX2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX2-NEXT: retq %cvt = uitofp <4 x i64> %a to <4 x double> ret <4 x double> %cvt } @@ -288,7 +483,66 @@ define <4 x double> @uitofp_4vf64_i32(<4 x i32> %a) { ; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm1[2,3,0,1] ; SSE2-NEXT: addpd %xmm1, %xmm5 ; SSE2-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm5[0] -; SSE2-NEXT: pand .LCPI7_2(%rip), %xmm2 +; SSE2-NEXT: pand .LCPI13_2(%rip), %xmm2 +; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm2[2,3,0,1] +; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1] +; SSE2-NEXT: subpd %xmm4, %xmm2 +; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[2,3,0,1] +; SSE2-NEXT: addpd %xmm2, %xmm1 +; SSE2-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm3[0],xmm5[1],xmm3[1] +; SSE2-NEXT: subpd %xmm4, %xmm5 +; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm5[2,3,0,1] +; SSE2-NEXT: addpd %xmm5, %xmm2 +; SSE2-NEXT: unpcklpd {{.*#+}} xmm1 = xmm1[0],xmm2[0] +; SSE2-NEXT: retq +; +; AVX1-LABEL: uitofp_4vf64_i32: +; AVX1: # BB#0: +; AVX1-NEXT: vpand .LCPI13_0(%rip), %xmm0, %xmm1 +; AVX1-NEXT: vcvtdq2pd %xmm1, %ymm1 +; AVX1-NEXT: vpsrld $16, %xmm0, %xmm0 +; AVX1-NEXT: vcvtdq2pd %xmm0, %ymm0 +; AVX1-NEXT: vmulpd .LCPI13_1(%rip), %ymm0, %ymm0 +; AVX1-NEXT: vaddpd %ymm1, %ymm0, %ymm0 +; AVX1-NEXT: retq +; +; AVX2-LABEL: uitofp_4vf64_i32: +; AVX2: # BB#0: +; AVX2-NEXT: vpsrld $16, %xmm0, %xmm1 +; AVX2-NEXT: vcvtdq2pd %xmm1, %ymm1 +; AVX2-NEXT: vbroadcastsd .LCPI13_0(%rip), %ymm2 +; AVX2-NEXT: vmulpd %ymm2, %ymm1, %ymm1 +; AVX2-NEXT: vpbroadcastd .LCPI13_1(%rip), %xmm2 +; AVX2-NEXT: vpand %xmm2, %xmm0, %xmm0 +; AVX2-NEXT: vcvtdq2pd %xmm0, %ymm0 +; AVX2-NEXT: vaddpd %ymm0, %ymm1, %ymm0 +; AVX2-NEXT: retq + %cvt = uitofp <4 x i32> %a to <4 x double> + ret <4 x double> %cvt +} + +define <4 x double> @uitofp_4vf64_i16(<8 x i16> %a) { +; SSE2-LABEL: uitofp_4vf64_i16: +; SSE2: # BB#0: +; SSE2-NEXT: pxor %xmm1, %xmm1 +; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,1,2,1] +; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] +; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] +; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [1127219200,1160773632,0,0] +; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] +; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1] +; SSE2-NEXT: movapd {{.*#+}} xmm4 = [4.503600e+15,1.934281e+25] +; SSE2-NEXT: subpd %xmm4, %xmm0 +; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm0[2,3,0,1] +; SSE2-NEXT: addpd %xmm5, %xmm0 +; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1] +; SSE2-NEXT: subpd %xmm4, %xmm1 +; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm1[2,3,0,1] +; SSE2-NEXT: addpd %xmm1, %xmm5 +; SSE2-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm5[0] +; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm2[2,1,2,3,4,5,6,7] +; SSE2-NEXT: pshufhw {{.*#+}} xmm2 = xmm1[0,1,2,3,7,5,6,7] +; SSE2-NEXT: pand .LCPI14_2(%rip), %xmm2 ; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm2[2,3,0,1] ; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1] ; SSE2-NEXT: subpd %xmm4, %xmm2 @@ -301,16 +555,60 @@ define <4 x double> @uitofp_4vf64_i32(<4 x i32> %a) { ; SSE2-NEXT: unpcklpd {{.*#+}} xmm1 = xmm1[0],xmm2[0] ; SSE2-NEXT: retq ; -; AVX-LABEL: uitofp_4vf64_i32: +; AVX-LABEL: uitofp_4vf64_i16: ; AVX: # BB#0: -; AVX-NEXT: vpand .LCPI7_0(%rip), %xmm0, %xmm1 -; AVX-NEXT: vcvtdq2pd %xmm1, %ymm1 -; AVX-NEXT: vpsrld $16, %xmm0, %xmm0 +; AVX-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero ; AVX-NEXT: vcvtdq2pd %xmm0, %ymm0 -; AVX-NEXT: vmulpd .LCPI7_1(%rip), %ymm0, %ymm0 -; AVX-NEXT: vaddpd %ymm1, %ymm0, %ymm0 ; AVX-NEXT: retq - %cvt = uitofp <4 x i32> %a to <4 x double> + %shuf = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3> + %cvt = uitofp <4 x i16> %shuf to <4 x double> + ret <4 x double> %cvt +} + +define <4 x double> @uitofp_4vf64_i8(<16 x i8> %a) { +; SSE2-LABEL: uitofp_4vf64_i8: +; SSE2: # BB#0: +; SSE2-NEXT: movdqa %xmm0, %xmm1 +; SSE2-NEXT: pxor %xmm2, %xmm2 +; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7] +; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3] +; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] +; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [1127219200,1160773632,0,0] +; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm0[2,3,0,1] +; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] +; SSE2-NEXT: movapd {{.*#+}} xmm3 = [4.503600e+15,1.934281e+25] +; SSE2-NEXT: subpd %xmm3, %xmm0 +; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm0[2,3,0,1] +; SSE2-NEXT: addpd %xmm5, %xmm0 +; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1] +; SSE2-NEXT: subpd %xmm3, %xmm4 +; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm4[2,3,0,1] +; SSE2-NEXT: addpd %xmm4, %xmm5 +; SSE2-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm5[0] +; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] +; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,2,1] +; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[2,1,2,3,4,5,6,7] +; SSE2-NEXT: pshufhw {{.*#+}} xmm4 = xmm1[0,1,2,3,7,5,6,7] +; SSE2-NEXT: pand .LCPI15_2(%rip), %xmm4 +; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm4[2,3,0,1] +; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1] +; SSE2-NEXT: subpd %xmm3, %xmm4 +; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm4[2,3,0,1] +; SSE2-NEXT: addpd %xmm4, %xmm1 +; SSE2-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm2[0],xmm5[1],xmm2[1] +; SSE2-NEXT: subpd %xmm3, %xmm5 +; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm5[2,3,0,1] +; SSE2-NEXT: addpd %xmm5, %xmm2 +; SSE2-NEXT: unpcklpd {{.*#+}} xmm1 = xmm1[0],xmm2[0] +; SSE2-NEXT: retq +; +; AVX-LABEL: uitofp_4vf64_i8: +; AVX: # BB#0: +; AVX-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero +; AVX-NEXT: vcvtdq2pd %xmm0, %ymm0 +; AVX-NEXT: retq + %shuf = shufflevector <16 x i8> %a, <16 x i8> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3> + %cvt = uitofp <4 x i8> %shuf to <4 x double> ret <4 x double> %cvt } @@ -362,6 +660,43 @@ define <4 x float> @sitofp_4vf32_i64(<2 x i64> %a) { ret <4 x float> %ext } +define <4 x float> @sitofp_4vf32_i16(<8 x i16> %a) { +; SSE2-LABEL: sitofp_4vf32_i16: +; SSE2: # BB#0: +; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3] +; SSE2-NEXT: psrad $16, %xmm0 +; SSE2-NEXT: cvtdq2ps %xmm0, %xmm0 +; SSE2-NEXT: retq +; +; AVX-LABEL: sitofp_4vf32_i16: +; AVX: # BB#0: +; AVX-NEXT: vpmovsxwd %xmm0, %xmm0 +; AVX-NEXT: vcvtdq2ps %xmm0, %xmm0 +; AVX-NEXT: retq + %shuf = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3> + %cvt = sitofp <4 x i16> %shuf to <4 x float> + ret <4 x float> %cvt +} + +define <4 x float> @sitofp_4vf32_i8(<16 x i8> %a) { +; SSE2-LABEL: sitofp_4vf32_i8: +; SSE2: # BB#0: +; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] +; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3] +; SSE2-NEXT: psrad $24, %xmm0 +; SSE2-NEXT: cvtdq2ps %xmm0, %xmm0 +; SSE2-NEXT: retq +; +; AVX-LABEL: sitofp_4vf32_i8: +; AVX: # BB#0: +; AVX-NEXT: vpmovsxbd %xmm0, %xmm0 +; AVX-NEXT: vcvtdq2ps %xmm0, %xmm0 +; AVX-NEXT: retq + %shuf = shufflevector <16 x i8> %a, <16 x i8> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3> + %cvt = sitofp <4 x i8> %shuf to <4 x float> + ret <4 x float> %cvt +} + define <8 x float> @sitofp_8vf32(<8 x i32> %a) { ; SSE2-LABEL: sitofp_8vf32: ; SSE2: # BB#0: @@ -398,27 +733,112 @@ define <4 x float> @sitofp_4vf32_4i64(<4 x i64> %a) { ; SSE2-NEXT: movaps %xmm2, %xmm0 ; SSE2-NEXT: retq ; -; AVX-LABEL: sitofp_4vf32_4i64: -; AVX: # BB#0: -; AVX-NEXT: vpextrq $1, %xmm0, %rax -; AVX-NEXT: vcvtsi2ssq %rax, %xmm0, %xmm1 -; AVX-NEXT: vmovq %xmm0, %rax -; AVX-NEXT: vcvtsi2ssq %rax, %xmm0, %xmm2 -; AVX-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[2,3] -; AVX-NEXT: vextractf128 $1, %ymm0, %xmm0 -; AVX-NEXT: vmovq %xmm0, %rax -; AVX-NEXT: vcvtsi2ssq %rax, %xmm0, %xmm2 -; AVX-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0],xmm1[3] -; AVX-NEXT: vpextrq $1, %xmm0, %rax -; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0 -; AVX-NEXT: vcvtsi2ssq %rax, %xmm0, %xmm0 -; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0] -; AVX-NEXT: vzeroupper -; AVX-NEXT: retq +; AVX1-LABEL: sitofp_4vf32_4i64: +; AVX1: # BB#0: +; AVX1-NEXT: vpextrq $1, %xmm0, %rax +; AVX1-NEXT: vcvtsi2ssq %rax, %xmm0, %xmm1 +; AVX1-NEXT: vmovq %xmm0, %rax +; AVX1-NEXT: vcvtsi2ssq %rax, %xmm0, %xmm2 +; AVX1-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[2,3] +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0 +; AVX1-NEXT: vmovq %xmm0, %rax +; AVX1-NEXT: vcvtsi2ssq %rax, %xmm0, %xmm2 +; AVX1-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0],xmm1[3] +; AVX1-NEXT: vpextrq $1, %xmm0, %rax +; AVX1-NEXT: vxorps %xmm0, %xmm0, %xmm0 +; AVX1-NEXT: vcvtsi2ssq %rax, %xmm0, %xmm0 +; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0] +; AVX1-NEXT: vzeroupper +; AVX1-NEXT: retq +; +; AVX2-LABEL: sitofp_4vf32_4i64: +; AVX2: # BB#0: +; AVX2-NEXT: vpextrq $1, %xmm0, %rax +; AVX2-NEXT: vcvtsi2ssq %rax, %xmm0, %xmm1 +; AVX2-NEXT: vmovq %xmm0, %rax +; AVX2-NEXT: vcvtsi2ssq %rax, %xmm0, %xmm2 +; AVX2-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[2,3] +; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0 +; AVX2-NEXT: vmovq %xmm0, %rax +; AVX2-NEXT: vcvtsi2ssq %rax, %xmm0, %xmm2 +; AVX2-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0],xmm1[3] +; AVX2-NEXT: vpextrq $1, %xmm0, %rax +; AVX2-NEXT: vxorps %xmm0, %xmm0, %xmm0 +; AVX2-NEXT: vcvtsi2ssq %rax, %xmm0, %xmm0 +; AVX2-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0] +; AVX2-NEXT: vzeroupper +; AVX2-NEXT: retq %cvt = sitofp <4 x i64> %a to <4 x float> ret <4 x float> %cvt } +define <8 x float> @sitofp_8vf32_i16(<8 x i16> %a) { +; SSE2-LABEL: sitofp_8vf32_i16: +; SSE2: # BB#0: +; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3] +; SSE2-NEXT: psrad $16, %xmm1 +; SSE2-NEXT: cvtdq2ps %xmm1, %xmm2 +; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] +; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3] +; SSE2-NEXT: psrad $16, %xmm0 +; SSE2-NEXT: cvtdq2ps %xmm0, %xmm1 +; SSE2-NEXT: movaps %xmm2, %xmm0 +; SSE2-NEXT: retq +; +; AVX1-LABEL: sitofp_8vf32_i16: +; AVX1: # BB#0: +; AVX1-NEXT: vpmovsxwd %xmm0, %xmm1 +; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] +; AVX1-NEXT: vpmovsxwd %xmm0, %xmm0 +; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 +; AVX1-NEXT: vcvtdq2ps %ymm0, %ymm0 +; AVX1-NEXT: retq +; +; AVX2-LABEL: sitofp_8vf32_i16: +; AVX2: # BB#0: +; AVX2-NEXT: vpmovsxwd %xmm0, %ymm0 +; AVX2-NEXT: vcvtdq2ps %ymm0, %ymm0 +; AVX2-NEXT: retq + %cvt = sitofp <8 x i16> %a to <8 x float> + ret <8 x float> %cvt +} + +define <8 x float> @sitofp_8vf32_i8(<16 x i8> %a) { +; SSE2-LABEL: sitofp_8vf32_i8: +; SSE2: # BB#0: +; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] +; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3] +; SSE2-NEXT: psrad $24, %xmm1 +; SSE2-NEXT: cvtdq2ps %xmm1, %xmm2 +; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3] +; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] +; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3] +; SSE2-NEXT: psrad $24, %xmm0 +; SSE2-NEXT: cvtdq2ps %xmm0, %xmm1 +; SSE2-NEXT: movaps %xmm2, %xmm0 +; SSE2-NEXT: retq +; +; AVX1-LABEL: sitofp_8vf32_i8: +; AVX1: # BB#0: +; AVX1-NEXT: vpmovsxbd %xmm0, %xmm1 +; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3] +; AVX1-NEXT: vpmovsxbd %xmm0, %xmm0 +; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 +; AVX1-NEXT: vcvtdq2ps %ymm0, %ymm0 +; AVX1-NEXT: retq +; +; AVX2-LABEL: sitofp_8vf32_i8: +; AVX2: # BB#0: +; AVX2-NEXT: vpmovzxbd %xmm0, %ymm0 +; AVX2-NEXT: vpslld $24, %ymm0, %ymm0 +; AVX2-NEXT: vpsrad $24, %ymm0, %ymm0 +; AVX2-NEXT: vcvtdq2ps %ymm0, %ymm0 +; AVX2-NEXT: retq + %shuf = shufflevector <16 x i8> %a, <16 x i8> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7> + %cvt = sitofp <8 x i8> %shuf to <8 x float> + ret <8 x float> %cvt +} + ; ; Unsigned Integer to Float ; @@ -428,21 +848,33 @@ define <4 x float> @uitofp_4vf32(<4 x i32> %a) { ; SSE2: # BB#0: ; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [65535,65535,65535,65535] ; SSE2-NEXT: pand %xmm0, %xmm1 -; SSE2-NEXT: por .LCPI12_1(%rip), %xmm1 +; SSE2-NEXT: por .LCPI24_1(%rip), %xmm1 ; SSE2-NEXT: psrld $16, %xmm0 -; SSE2-NEXT: por .LCPI12_2(%rip), %xmm0 -; SSE2-NEXT: addps .LCPI12_3(%rip), %xmm0 +; SSE2-NEXT: por .LCPI24_2(%rip), %xmm0 +; SSE2-NEXT: addps .LCPI24_3(%rip), %xmm0 ; SSE2-NEXT: addps %xmm1, %xmm0 ; SSE2-NEXT: retq ; -; AVX-LABEL: uitofp_4vf32: -; AVX: # BB#0: -; AVX-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0],mem[1],xmm0[2],mem[3],xmm0[4],mem[5],xmm0[6],mem[7] -; AVX-NEXT: vpsrld $16, %xmm0, %xmm0 -; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],mem[1],xmm0[2],mem[3],xmm0[4],mem[5],xmm0[6],mem[7] -; AVX-NEXT: vaddps .LCPI12_2(%rip), %xmm0, %xmm0 -; AVX-NEXT: vaddps %xmm0, %xmm1, %xmm0 -; AVX-NEXT: retq +; AVX1-LABEL: uitofp_4vf32: +; AVX1: # BB#0: +; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0],mem[1],xmm0[2],mem[3],xmm0[4],mem[5],xmm0[6],mem[7] +; AVX1-NEXT: vpsrld $16, %xmm0, %xmm0 +; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],mem[1],xmm0[2],mem[3],xmm0[4],mem[5],xmm0[6],mem[7] +; AVX1-NEXT: vaddps .LCPI24_2(%rip), %xmm0, %xmm0 +; AVX1-NEXT: vaddps %xmm0, %xmm1, %xmm0 +; AVX1-NEXT: retq +; +; AVX2-LABEL: uitofp_4vf32: +; AVX2: # BB#0: +; AVX2-NEXT: vpbroadcastd .LCPI24_0(%rip), %xmm1 +; AVX2-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7] +; AVX2-NEXT: vpsrld $16, %xmm0, %xmm0 +; AVX2-NEXT: vpbroadcastd .LCPI24_1(%rip), %xmm2 +; AVX2-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3],xmm0[4],xmm2[5],xmm0[6],xmm2[7] +; AVX2-NEXT: vbroadcastss .LCPI24_2(%rip), %xmm2 +; AVX2-NEXT: vaddps %xmm2, %xmm0, %xmm0 +; AVX2-NEXT: vaddps %xmm0, %xmm1, %xmm0 +; AVX2-NEXT: retq %cvt = uitofp <4 x i32> %a to <4 x float> ret <4 x float> %cvt } @@ -455,30 +887,30 @@ define <4 x float> @uitofp_4vf32_i64(<2 x i64> %a) { ; SSE2-NEXT: movl %eax, %ecx ; SSE2-NEXT: andl $1, %ecx ; SSE2-NEXT: testq %rax, %rax -; SSE2-NEXT: js .LBB13_1 +; SSE2-NEXT: js .LBB25_1 ; SSE2-NEXT: # BB#2: ; SSE2-NEXT: xorps %xmm0, %xmm0 ; SSE2-NEXT: cvtsi2ssq %rax, %xmm0 -; SSE2-NEXT: jmp .LBB13_3 -; SSE2-NEXT: .LBB13_1: +; SSE2-NEXT: jmp .LBB25_3 +; SSE2-NEXT: .LBB25_1: ; SSE2-NEXT: shrq %rax ; SSE2-NEXT: orq %rax, %rcx ; SSE2-NEXT: xorps %xmm0, %xmm0 ; SSE2-NEXT: cvtsi2ssq %rcx, %xmm0 ; SSE2-NEXT: addss %xmm0, %xmm0 -; SSE2-NEXT: .LBB13_3: +; SSE2-NEXT: .LBB25_3: ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1] ; SSE2-NEXT: movd %xmm1, %rax ; SSE2-NEXT: movl %eax, %ecx ; SSE2-NEXT: andl $1, %ecx ; SSE2-NEXT: testq %rax, %rax -; SSE2-NEXT: js .LBB13_4 +; SSE2-NEXT: js .LBB25_4 ; SSE2-NEXT: # BB#5: ; SSE2-NEXT: xorps %xmm1, %xmm1 ; SSE2-NEXT: cvtsi2ssq %rax, %xmm1 ; SSE2-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] ; SSE2-NEXT: retq -; SSE2-NEXT: .LBB13_4: +; SSE2-NEXT: .LBB25_4: ; SSE2-NEXT: shrq %rax ; SSE2-NEXT: orq %rax, %rcx ; SSE2-NEXT: xorps %xmm1, %xmm1 @@ -493,39 +925,39 @@ define <4 x float> @uitofp_4vf32_i64(<2 x i64> %a) { ; AVX-NEXT: movl %eax, %ecx ; AVX-NEXT: andl $1, %ecx ; AVX-NEXT: testq %rax, %rax -; AVX-NEXT: js .LBB13_1 +; AVX-NEXT: js .LBB25_1 ; AVX-NEXT: # BB#2: ; AVX-NEXT: vcvtsi2ssq %rax, %xmm0, %xmm1 -; AVX-NEXT: jmp .LBB13_3 -; AVX-NEXT: .LBB13_1: +; AVX-NEXT: jmp .LBB25_3 +; AVX-NEXT: .LBB25_1: ; AVX-NEXT: shrq %rax ; AVX-NEXT: orq %rax, %rcx ; AVX-NEXT: vcvtsi2ssq %rcx, %xmm0, %xmm1 ; AVX-NEXT: vaddss %xmm1, %xmm1, %xmm1 -; AVX-NEXT: .LBB13_3: +; AVX-NEXT: .LBB25_3: ; AVX-NEXT: vmovq %xmm0, %rax ; AVX-NEXT: movl %eax, %ecx ; AVX-NEXT: andl $1, %ecx ; AVX-NEXT: testq %rax, %rax -; AVX-NEXT: js .LBB13_4 +; AVX-NEXT: js .LBB25_4 ; AVX-NEXT: # BB#5: ; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0 ; AVX-NEXT: vcvtsi2ssq %rax, %xmm0, %xmm0 -; AVX-NEXT: jmp .LBB13_6 -; AVX-NEXT: .LBB13_4: +; AVX-NEXT: jmp .LBB25_6 +; AVX-NEXT: .LBB25_4: ; AVX-NEXT: shrq %rax ; AVX-NEXT: orq %rax, %rcx ; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0 ; AVX-NEXT: vcvtsi2ssq %rcx, %xmm0, %xmm0 ; AVX-NEXT: vaddss %xmm0, %xmm0, %xmm0 -; AVX-NEXT: .LBB13_6: +; AVX-NEXT: .LBB25_6: ; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3] ; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1 ; AVX-NEXT: testq %rax, %rax -; AVX-NEXT: js .LBB13_8 +; AVX-NEXT: js .LBB25_8 ; AVX-NEXT: # BB#7: ; AVX-NEXT: vcvtsi2ssq %rax, %xmm0, %xmm1 -; AVX-NEXT: .LBB13_8: +; AVX-NEXT: .LBB25_8: ; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0],xmm0[3] ; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[0] ; AVX-NEXT: retq @@ -534,6 +966,43 @@ define <4 x float> @uitofp_4vf32_i64(<2 x i64> %a) { ret <4 x float> %ext } +define <4 x float> @uitofp_4vf32_i16(<8 x i16> %a) { +; SSE2-LABEL: uitofp_4vf32_i16: +; SSE2: # BB#0: +; SSE2-NEXT: pxor %xmm1, %xmm1 +; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] +; SSE2-NEXT: cvtdq2ps %xmm0, %xmm0 +; SSE2-NEXT: retq +; +; AVX-LABEL: uitofp_4vf32_i16: +; AVX: # BB#0: +; AVX-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero +; AVX-NEXT: vcvtdq2ps %xmm0, %xmm0 +; AVX-NEXT: retq + %shuf = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3> + %cvt = uitofp <4 x i16> %shuf to <4 x float> + ret <4 x float> %cvt +} + +define <4 x float> @uitofp_4vf32_i8(<16 x i8> %a) { +; SSE2-LABEL: uitofp_4vf32_i8: +; SSE2: # BB#0: +; SSE2-NEXT: pxor %xmm1, %xmm1 +; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] +; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] +; SSE2-NEXT: cvtdq2ps %xmm0, %xmm0 +; SSE2-NEXT: retq +; +; AVX-LABEL: uitofp_4vf32_i8: +; AVX: # BB#0: +; AVX-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero +; AVX-NEXT: vcvtdq2ps %xmm0, %xmm0 +; AVX-NEXT: retq + %shuf = shufflevector <16 x i8> %a, <16 x i8> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3> + %cvt = uitofp <4 x i8> %shuf to <4 x float> + ret <4 x float> %cvt +} + define <8 x float> @uitofp_8vf32(<8 x i32> %a) { ; SSE2-LABEL: uitofp_8vf32: ; SSE2: # BB#0: @@ -556,18 +1025,30 @@ define <8 x float> @uitofp_8vf32(<8 x i32> %a) { ; SSE2-NEXT: addps %xmm2, %xmm1 ; SSE2-NEXT: retq ; -; AVX-LABEL: uitofp_8vf32: -; AVX: # BB#0: -; AVX-NEXT: vandps .LCPI14_0(%rip), %ymm0, %ymm1 -; AVX-NEXT: vcvtdq2ps %ymm1, %ymm1 -; AVX-NEXT: vpsrld $16, %xmm0, %xmm2 -; AVX-NEXT: vextractf128 $1, %ymm0, %xmm0 -; AVX-NEXT: vpsrld $16, %xmm0, %xmm0 -; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm2, %ymm0 -; AVX-NEXT: vcvtdq2ps %ymm0, %ymm0 -; AVX-NEXT: vmulps .LCPI14_1(%rip), %ymm0, %ymm0 -; AVX-NEXT: vaddps %ymm1, %ymm0, %ymm0 -; AVX-NEXT: retq +; AVX1-LABEL: uitofp_8vf32: +; AVX1: # BB#0: +; AVX1-NEXT: vandps .LCPI28_0(%rip), %ymm0, %ymm1 +; AVX1-NEXT: vcvtdq2ps %ymm1, %ymm1 +; AVX1-NEXT: vpsrld $16, %xmm0, %xmm2 +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0 +; AVX1-NEXT: vpsrld $16, %xmm0, %xmm0 +; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm2, %ymm0 +; AVX1-NEXT: vcvtdq2ps %ymm0, %ymm0 +; AVX1-NEXT: vmulps .LCPI28_1(%rip), %ymm0, %ymm0 +; AVX1-NEXT: vaddps %ymm1, %ymm0, %ymm0 +; AVX1-NEXT: retq +; +; AVX2-LABEL: uitofp_8vf32: +; AVX2: # BB#0: +; AVX2-NEXT: vpbroadcastd .LCPI28_0(%rip), %ymm1 +; AVX2-NEXT: vpblendw {{.*#+}} ymm1 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7],ymm0[8],ymm1[9],ymm0[10],ymm1[11],ymm0[12],ymm1[13],ymm0[14],ymm1[15] +; AVX2-NEXT: vpsrld $16, %ymm0, %ymm0 +; AVX2-NEXT: vpbroadcastd .LCPI28_1(%rip), %ymm2 +; AVX2-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm2[1],ymm0[2],ymm2[3],ymm0[4],ymm2[5],ymm0[6],ymm2[7],ymm0[8],ymm2[9],ymm0[10],ymm2[11],ymm0[12],ymm2[13],ymm0[14],ymm2[15] +; AVX2-NEXT: vbroadcastss .LCPI28_2(%rip), %ymm2 +; AVX2-NEXT: vaddps %ymm2, %ymm0, %ymm0 +; AVX2-NEXT: vaddps %ymm0, %ymm1, %ymm0 +; AVX2-NEXT: retq %cvt = uitofp <8 x i32> %a to <8 x float> ret <8 x float> %cvt } @@ -579,136 +1060,321 @@ define <4 x float> @uitofp_4vf32_4i64(<4 x i64> %a) { ; SSE2-NEXT: movl %eax, %ecx ; SSE2-NEXT: andl $1, %ecx ; SSE2-NEXT: testq %rax, %rax -; SSE2-NEXT: js .LBB15_1 +; SSE2-NEXT: js .LBB29_1 ; SSE2-NEXT: # BB#2: ; SSE2-NEXT: cvtsi2ssq %rax, %xmm3 -; SSE2-NEXT: jmp .LBB15_3 -; SSE2-NEXT: .LBB15_1: +; SSE2-NEXT: jmp .LBB29_3 +; SSE2-NEXT: .LBB29_1: ; SSE2-NEXT: shrq %rax ; SSE2-NEXT: orq %rax, %rcx ; SSE2-NEXT: cvtsi2ssq %rcx, %xmm3 ; SSE2-NEXT: addss %xmm3, %xmm3 -; SSE2-NEXT: .LBB15_3: +; SSE2-NEXT: .LBB29_3: ; SSE2-NEXT: movd %xmm0, %rax ; SSE2-NEXT: movl %eax, %ecx ; SSE2-NEXT: andl $1, %ecx ; SSE2-NEXT: testq %rax, %rax -; SSE2-NEXT: js .LBB15_4 +; SSE2-NEXT: js .LBB29_4 ; SSE2-NEXT: # BB#5: ; SSE2-NEXT: cvtsi2ssq %rax, %xmm2 -; SSE2-NEXT: jmp .LBB15_6 -; SSE2-NEXT: .LBB15_4: +; SSE2-NEXT: jmp .LBB29_6 +; SSE2-NEXT: .LBB29_4: ; SSE2-NEXT: shrq %rax ; SSE2-NEXT: orq %rax, %rcx ; SSE2-NEXT: cvtsi2ssq %rcx, %xmm2 ; SSE2-NEXT: addss %xmm2, %xmm2 -; SSE2-NEXT: .LBB15_6: +; SSE2-NEXT: .LBB29_6: ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1] ; SSE2-NEXT: movd %xmm1, %rax ; SSE2-NEXT: movl %eax, %ecx ; SSE2-NEXT: andl $1, %ecx ; SSE2-NEXT: testq %rax, %rax -; SSE2-NEXT: js .LBB15_7 +; SSE2-NEXT: js .LBB29_7 ; SSE2-NEXT: # BB#8: ; SSE2-NEXT: xorps %xmm1, %xmm1 ; SSE2-NEXT: cvtsi2ssq %rax, %xmm1 -; SSE2-NEXT: jmp .LBB15_9 -; SSE2-NEXT: .LBB15_7: +; SSE2-NEXT: jmp .LBB29_9 +; SSE2-NEXT: .LBB29_7: ; SSE2-NEXT: shrq %rax ; SSE2-NEXT: orq %rax, %rcx ; SSE2-NEXT: xorps %xmm1, %xmm1 ; SSE2-NEXT: cvtsi2ssq %rcx, %xmm1 ; SSE2-NEXT: addss %xmm1, %xmm1 -; SSE2-NEXT: .LBB15_9: +; SSE2-NEXT: .LBB29_9: ; SSE2-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1] ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] ; SSE2-NEXT: movd %xmm0, %rax ; SSE2-NEXT: movl %eax, %ecx ; SSE2-NEXT: andl $1, %ecx ; SSE2-NEXT: testq %rax, %rax -; SSE2-NEXT: js .LBB15_10 +; SSE2-NEXT: js .LBB29_10 ; SSE2-NEXT: # BB#11: ; SSE2-NEXT: xorps %xmm0, %xmm0 ; SSE2-NEXT: cvtsi2ssq %rax, %xmm0 -; SSE2-NEXT: jmp .LBB15_12 -; SSE2-NEXT: .LBB15_10: +; SSE2-NEXT: jmp .LBB29_12 +; SSE2-NEXT: .LBB29_10: ; SSE2-NEXT: shrq %rax ; SSE2-NEXT: orq %rax, %rcx ; SSE2-NEXT: xorps %xmm0, %xmm0 ; SSE2-NEXT: cvtsi2ssq %rcx, %xmm0 ; SSE2-NEXT: addss %xmm0, %xmm0 -; SSE2-NEXT: .LBB15_12: +; SSE2-NEXT: .LBB29_12: ; SSE2-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] ; SSE2-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1] ; SSE2-NEXT: movaps %xmm2, %xmm0 ; SSE2-NEXT: retq ; -; AVX-LABEL: uitofp_4vf32_4i64: -; AVX: # BB#0: -; AVX-NEXT: vpextrq $1, %xmm0, %rax -; AVX-NEXT: movl %eax, %ecx -; AVX-NEXT: andl $1, %ecx -; AVX-NEXT: testq %rax, %rax -; AVX-NEXT: js .LBB15_1 -; AVX-NEXT: # BB#2: -; AVX-NEXT: vcvtsi2ssq %rax, %xmm0, %xmm1 -; AVX-NEXT: jmp .LBB15_3 -; AVX-NEXT: .LBB15_1: -; AVX-NEXT: shrq %rax -; AVX-NEXT: orq %rax, %rcx -; AVX-NEXT: vcvtsi2ssq %rcx, %xmm0, %xmm1 -; AVX-NEXT: vaddss %xmm1, %xmm1, %xmm1 -; AVX-NEXT: .LBB15_3: -; AVX-NEXT: vmovq %xmm0, %rax -; AVX-NEXT: movl %eax, %ecx -; AVX-NEXT: andl $1, %ecx -; AVX-NEXT: testq %rax, %rax -; AVX-NEXT: js .LBB15_4 -; AVX-NEXT: # BB#5: -; AVX-NEXT: vcvtsi2ssq %rax, %xmm0, %xmm2 -; AVX-NEXT: jmp .LBB15_6 -; AVX-NEXT: .LBB15_4: -; AVX-NEXT: shrq %rax -; AVX-NEXT: orq %rax, %rcx -; AVX-NEXT: vcvtsi2ssq %rcx, %xmm0, %xmm2 -; AVX-NEXT: vaddss %xmm2, %xmm2, %xmm2 -; AVX-NEXT: .LBB15_6: -; AVX-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[2,3] -; AVX-NEXT: vextractf128 $1, %ymm0, %xmm0 -; AVX-NEXT: vmovq %xmm0, %rax -; AVX-NEXT: movl %eax, %ecx -; AVX-NEXT: andl $1, %ecx -; AVX-NEXT: testq %rax, %rax -; AVX-NEXT: js .LBB15_7 -; AVX-NEXT: # BB#8: -; AVX-NEXT: vcvtsi2ssq %rax, %xmm0, %xmm2 -; AVX-NEXT: jmp .LBB15_9 -; AVX-NEXT: .LBB15_7: -; AVX-NEXT: shrq %rax -; AVX-NEXT: orq %rax, %rcx -; AVX-NEXT: vcvtsi2ssq %rcx, %xmm0, %xmm2 -; AVX-NEXT: vaddss %xmm2, %xmm2, %xmm2 -; AVX-NEXT: .LBB15_9: -; AVX-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0],xmm1[3] -; AVX-NEXT: vpextrq $1, %xmm0, %rax -; AVX-NEXT: movl %eax, %ecx -; AVX-NEXT: andl $1, %ecx -; AVX-NEXT: testq %rax, %rax -; AVX-NEXT: js .LBB15_10 -; AVX-NEXT: # BB#11: -; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0 -; AVX-NEXT: vcvtsi2ssq %rax, %xmm0, %xmm0 -; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0] -; AVX-NEXT: vzeroupper -; AVX-NEXT: retq -; AVX-NEXT: .LBB15_10: -; AVX-NEXT: shrq %rax -; AVX-NEXT: orq %rax, %rcx -; AVX-NEXT: vcvtsi2ssq %rcx, %xmm0, %xmm0 -; AVX-NEXT: vaddss %xmm0, %xmm0, %xmm0 -; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0] -; AVX-NEXT: vzeroupper -; AVX-NEXT: retq +; AVX1-LABEL: uitofp_4vf32_4i64: +; AVX1: # BB#0: +; AVX1-NEXT: vpextrq $1, %xmm0, %rax +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: andl $1, %ecx +; AVX1-NEXT: testq %rax, %rax +; AVX1-NEXT: js .LBB29_1 +; AVX1-NEXT: # BB#2: +; AVX1-NEXT: vcvtsi2ssq %rax, %xmm0, %xmm1 +; AVX1-NEXT: jmp .LBB29_3 +; AVX1-NEXT: .LBB29_1: +; AVX1-NEXT: shrq %rax +; AVX1-NEXT: orq %rax, %rcx +; AVX1-NEXT: vcvtsi2ssq %rcx, %xmm0, %xmm1 +; AVX1-NEXT: vaddss %xmm1, %xmm1, %xmm1 +; AVX1-NEXT: .LBB29_3: +; AVX1-NEXT: vmovq %xmm0, %rax +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: andl $1, %ecx +; AVX1-NEXT: testq %rax, %rax +; AVX1-NEXT: js .LBB29_4 +; AVX1-NEXT: # BB#5: +; AVX1-NEXT: vcvtsi2ssq %rax, %xmm0, %xmm2 +; AVX1-NEXT: jmp .LBB29_6 +; AVX1-NEXT: .LBB29_4: +; AVX1-NEXT: shrq %rax +; AVX1-NEXT: orq %rax, %rcx +; AVX1-NEXT: vcvtsi2ssq %rcx, %xmm0, %xmm2 +; AVX1-NEXT: vaddss %xmm2, %xmm2, %xmm2 +; AVX1-NEXT: .LBB29_6: +; AVX1-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[2,3] +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0 +; AVX1-NEXT: vmovq %xmm0, %rax +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: andl $1, %ecx +; AVX1-NEXT: testq %rax, %rax +; AVX1-NEXT: js .LBB29_7 +; AVX1-NEXT: # BB#8: +; AVX1-NEXT: vcvtsi2ssq %rax, %xmm0, %xmm2 +; AVX1-NEXT: jmp .LBB29_9 +; AVX1-NEXT: .LBB29_7: +; AVX1-NEXT: shrq %rax +; AVX1-NEXT: orq %rax, %rcx +; AVX1-NEXT: vcvtsi2ssq %rcx, %xmm0, %xmm2 +; AVX1-NEXT: vaddss %xmm2, %xmm2, %xmm2 +; AVX1-NEXT: .LBB29_9: +; AVX1-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0],xmm1[3] +; AVX1-NEXT: vpextrq $1, %xmm0, %rax +; AVX1-NEXT: movl %eax, %ecx +; AVX1-NEXT: andl $1, %ecx +; AVX1-NEXT: testq %rax, %rax +; AVX1-NEXT: js .LBB29_10 +; AVX1-NEXT: # BB#11: +; AVX1-NEXT: vxorps %xmm0, %xmm0, %xmm0 +; AVX1-NEXT: vcvtsi2ssq %rax, %xmm0, %xmm0 +; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0] +; AVX1-NEXT: vzeroupper +; AVX1-NEXT: retq +; AVX1-NEXT: .LBB29_10: +; AVX1-NEXT: shrq %rax +; AVX1-NEXT: orq %rax, %rcx +; AVX1-NEXT: vcvtsi2ssq %rcx, %xmm0, %xmm0 +; AVX1-NEXT: vaddss %xmm0, %xmm0, %xmm0 +; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0] +; AVX1-NEXT: vzeroupper +; AVX1-NEXT: retq +; +; AVX2-LABEL: uitofp_4vf32_4i64: +; AVX2: # BB#0: +; AVX2-NEXT: vpextrq $1, %xmm0, %rax +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: testq %rax, %rax +; AVX2-NEXT: js .LBB29_1 +; AVX2-NEXT: # BB#2: +; AVX2-NEXT: vcvtsi2ssq %rax, %xmm0, %xmm1 +; AVX2-NEXT: jmp .LBB29_3 +; AVX2-NEXT: .LBB29_1: +; AVX2-NEXT: shrq %rax +; AVX2-NEXT: orq %rax, %rcx +; AVX2-NEXT: vcvtsi2ssq %rcx, %xmm0, %xmm1 +; AVX2-NEXT: vaddss %xmm1, %xmm1, %xmm1 +; AVX2-NEXT: .LBB29_3: +; AVX2-NEXT: vmovq %xmm0, %rax +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: testq %rax, %rax +; AVX2-NEXT: js .LBB29_4 +; AVX2-NEXT: # BB#5: +; AVX2-NEXT: vcvtsi2ssq %rax, %xmm0, %xmm2 +; AVX2-NEXT: jmp .LBB29_6 +; AVX2-NEXT: .LBB29_4: +; AVX2-NEXT: shrq %rax +; AVX2-NEXT: orq %rax, %rcx +; AVX2-NEXT: vcvtsi2ssq %rcx, %xmm0, %xmm2 +; AVX2-NEXT: vaddss %xmm2, %xmm2, %xmm2 +; AVX2-NEXT: .LBB29_6: +; AVX2-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[2,3] +; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0 +; AVX2-NEXT: vmovq %xmm0, %rax +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: testq %rax, %rax +; AVX2-NEXT: js .LBB29_7 +; AVX2-NEXT: # BB#8: +; AVX2-NEXT: vcvtsi2ssq %rax, %xmm0, %xmm2 +; AVX2-NEXT: jmp .LBB29_9 +; AVX2-NEXT: .LBB29_7: +; AVX2-NEXT: shrq %rax +; AVX2-NEXT: orq %rax, %rcx +; AVX2-NEXT: vcvtsi2ssq %rcx, %xmm0, %xmm2 +; AVX2-NEXT: vaddss %xmm2, %xmm2, %xmm2 +; AVX2-NEXT: .LBB29_9: +; AVX2-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0],xmm1[3] +; AVX2-NEXT: vpextrq $1, %xmm0, %rax +; AVX2-NEXT: movl %eax, %ecx +; AVX2-NEXT: andl $1, %ecx +; AVX2-NEXT: testq %rax, %rax +; AVX2-NEXT: js .LBB29_10 +; AVX2-NEXT: # BB#11: +; AVX2-NEXT: vxorps %xmm0, %xmm0, %xmm0 +; AVX2-NEXT: vcvtsi2ssq %rax, %xmm0, %xmm0 +; AVX2-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0] +; AVX2-NEXT: vzeroupper +; AVX2-NEXT: retq +; AVX2-NEXT: .LBB29_10: +; AVX2-NEXT: shrq %rax +; AVX2-NEXT: orq %rax, %rcx +; AVX2-NEXT: vcvtsi2ssq %rcx, %xmm0, %xmm0 +; AVX2-NEXT: vaddss %xmm0, %xmm0, %xmm0 +; AVX2-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0] +; AVX2-NEXT: vzeroupper +; AVX2-NEXT: retq %cvt = uitofp <4 x i64> %a to <4 x float> ret <4 x float> %cvt } + +define <8 x float> @uitofp_8vf32_i16(<8 x i16> %a) { +; SSE2-LABEL: uitofp_8vf32_i16: +; SSE2: # BB#0: +; SSE2-NEXT: pxor %xmm1, %xmm1 +; SSE2-NEXT: movdqa %xmm0, %xmm2 +; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3] +; SSE2-NEXT: cvtdq2ps %xmm2, %xmm2 +; SSE2-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4,4,5,5,6,6,7,7] +; SSE2-NEXT: pand .LCPI30_0(%rip), %xmm0 +; SSE2-NEXT: cvtdq2ps %xmm0, %xmm1 +; SSE2-NEXT: movaps %xmm2, %xmm0 +; SSE2-NEXT: retq +; +; AVX1-LABEL: uitofp_8vf32_i16: +; AVX1: # BB#0: +; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] +; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX1-NEXT: vcvtdq2ps %ymm0, %ymm0 +; AVX1-NEXT: retq +; +; AVX2-LABEL: uitofp_8vf32_i16: +; AVX2: # BB#0: +; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero +; AVX2-NEXT: vcvtdq2ps %ymm0, %ymm0 +; AVX2-NEXT: retq + %cvt = uitofp <8 x i16> %a to <8 x float> + ret <8 x float> %cvt +} + +define <8 x float> @uitofp_8vf32_i8(<16 x i8> %a) { +; SSE2-LABEL: uitofp_8vf32_i8: +; SSE2: # BB#0: +; SSE2-NEXT: pxor %xmm1, %xmm1 +; SSE2-NEXT: movdqa %xmm0, %xmm2 +; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7] +; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3] +; SSE2-NEXT: cvtdq2ps %xmm2, %xmm2 +; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] +; SSE2-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4,4,5,5,6,6,7,7] +; SSE2-NEXT: pand .LCPI31_0(%rip), %xmm0 +; SSE2-NEXT: cvtdq2ps %xmm0, %xmm1 +; SSE2-NEXT: movaps %xmm2, %xmm0 +; SSE2-NEXT: retq +; +; AVX1-LABEL: uitofp_8vf32_i8: +; AVX1: # BB#0: +; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero +; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero +; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm1[4,4,5,5,6,6,7,7] +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX1-NEXT: vandps .LCPI31_0(%rip), %ymm0, %ymm0 +; AVX1-NEXT: vcvtdq2ps %ymm0, %ymm0 +; AVX1-NEXT: retq +; +; AVX2-LABEL: uitofp_8vf32_i8: +; AVX2: # BB#0: +; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero +; AVX2-NEXT: vpbroadcastd .LCPI31_0(%rip), %ymm1 +; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0 +; AVX2-NEXT: vcvtdq2ps %ymm0, %ymm0 +; AVX2-NEXT: retq + %shuf = shufflevector <16 x i8> %a, <16 x i8> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7> + %cvt = uitofp <8 x i8> %shuf to <8 x float> + ret <8 x float> %cvt +} + +; +; Aggregates +; + +%Arguments = type <{ <8 x i8>, <8 x i16>, <8 x float>* }> +define void @aggregate_sitofp_8f32_i16(%Arguments* nocapture readonly %a0) { +; SSE2-LABEL: aggregate_sitofp_8f32_i16: +; SSE2: # BB#0: +; SSE2-NEXT: movq 24(%rdi), %rax +; SSE2-NEXT: movdqu 8(%rdi), %xmm0 +; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] +; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3] +; SSE2-NEXT: psrad $16, %xmm1 +; SSE2-NEXT: cvtdq2ps %xmm1, %xmm1 +; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3] +; SSE2-NEXT: psrad $16, %xmm0 +; SSE2-NEXT: cvtdq2ps %xmm0, %xmm0 +; SSE2-NEXT: movaps %xmm0, (%rax) +; SSE2-NEXT: movaps %xmm1, 16(%rax) +; SSE2-NEXT: retq +; +; AVX1-LABEL: aggregate_sitofp_8f32_i16: +; AVX1: # BB#0: +; AVX1-NEXT: movq 24(%rdi), %rax +; AVX1-NEXT: vmovdqu 8(%rdi), %xmm0 +; AVX1-NEXT: vpmovsxwd %xmm0, %xmm1 +; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] +; AVX1-NEXT: vpmovsxwd %xmm0, %xmm0 +; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 +; AVX1-NEXT: vcvtdq2ps %ymm0, %ymm0 +; AVX1-NEXT: vmovaps %ymm0, (%rax) +; AVX1-NEXT: vzeroupper +; AVX1-NEXT: retq +; +; AVX2-LABEL: aggregate_sitofp_8f32_i16: +; AVX2: # BB#0: +; AVX2-NEXT: movq 24(%rdi), %rax +; AVX2-NEXT: vpmovsxwd 8(%rdi), %ymm0 +; AVX2-NEXT: vcvtdq2ps %ymm0, %ymm0 +; AVX2-NEXT: vmovaps %ymm0, (%rax) +; AVX2-NEXT: vzeroupper +; AVX2-NEXT: retq + %1 = load %Arguments, %Arguments* %a0, align 1 + %2 = extractvalue %Arguments %1, 1 + %3 = extractvalue %Arguments %1, 2 + %4 = sitofp <8 x i16> %2 to <8 x float> + store <8 x float> %4, <8 x float>* %3, align 32 + ret void +} diff --git a/test/CodeGen/X86/vec_shift8.ll b/test/CodeGen/X86/vec_shift8.ll index a32cb30b0b262..9d19f667ea9b2 100644 --- a/test/CodeGen/X86/vec_shift8.ll +++ b/test/CodeGen/X86/vec_shift8.ll @@ -8,114 +8,83 @@ define <2 x i64> @shl_8i16(<8 x i16> %r, <8 x i16> %a) nounwind readnone ssp { entry: -; SSE2: pextrw $7, %xmm0, %eax -; SSE2-NEXT: pextrw $7, %xmm1, %ecx -; SSE2-NEXT: shll %cl, %eax -; SSE2-NEXT: movd %eax, %xmm2 -; SSE2-NEXT: pextrw $3, %xmm0, %eax -; SSE2-NEXT: pextrw $3, %xmm1, %ecx -; SSE2-NEXT: shll %cl, %eax -; SSE2-NEXT: movd %eax, %xmm3 -; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3] -; SSE2-NEXT: pextrw $5, %xmm0, %eax -; SSE2-NEXT: pextrw $5, %xmm1, %ecx -; SSE2-NEXT: shll %cl, %eax -; SSE2-NEXT: movd %eax, %xmm4 -; SSE2-NEXT: pextrw $1, %xmm0, %eax -; SSE2-NEXT: pextrw $1, %xmm1, %ecx -; SSE2-NEXT: shll %cl, %eax -; SSE2-NEXT: movd %eax, %xmm2 -; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3] -; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3] -; SSE2-NEXT: pextrw $6, %xmm0, %eax -; SSE2-NEXT: pextrw $6, %xmm1, %ecx -; SSE2-NEXT: shll %cl, %eax -; SSE2-NEXT: movd %eax, %xmm3 -; SSE2-NEXT: pextrw $2, %xmm0, %eax -; SSE2-NEXT: pextrw $2, %xmm1, %ecx -; SSE2-NEXT: shll %cl, %eax -; SSE2-NEXT: movd %eax, %xmm4 -; SSE2-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3] -; SSE2-NEXT: pextrw $4, %xmm0, %eax -; SSE2-NEXT: pextrw $4, %xmm1, %ecx -; SSE2-NEXT: shll %cl, %eax -; SSE2-NEXT: movd %eax, %xmm3 -; SSE2-NEXT: movd %xmm0, %eax -; SSE2-NEXT: movd %xmm1, %ecx -; SSE2-NEXT: shll %cl, %eax -; SSE2-NEXT: movd %eax, %xmm0 -; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3] -; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3] -; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3] +; ALL-NOT: shll +; +; SSE2: psllw $12, %xmm1 +; SSE2-NEXT: movdqa %xmm1, %xmm2 +; SSE2-NEXT: psraw $15, %xmm2 +; SSE2-NEXT: movdqa %xmm2, %xmm3 +; SSE2-NEXT: pandn %xmm0, %xmm3 +; SSE2-NEXT: psllw $8, %xmm0 +; SSE2-NEXT: pand %xmm2, %xmm0 +; SSE2-NEXT: por %xmm3, %xmm0 +; SSE2-NEXT: paddw %xmm1, %xmm1 +; SSE2-NEXT: movdqa %xmm1, %xmm2 +; SSE2-NEXT: psraw $15, %xmm2 +; SSE2-NEXT: movdqa %xmm2, %xmm3 +; SSE2-NEXT: pandn %xmm0, %xmm3 +; SSE2-NEXT: psllw $4, %xmm0 +; SSE2-NEXT: pand %xmm2, %xmm0 +; SSE2-NEXT: por %xmm3, %xmm0 +; SSE2-NEXT: paddw %xmm1, %xmm1 +; SSE2-NEXT: movdqa %xmm1, %xmm2 +; SSE2-NEXT: psraw $15, %xmm2 +; SSE2-NEXT: movdqa %xmm2, %xmm3 +; SSE2-NEXT: pandn %xmm0, %xmm3 +; SSE2-NEXT: psllw $2, %xmm0 +; SSE2-NEXT: pand %xmm2, %xmm0 +; SSE2-NEXT: por %xmm3, %xmm0 +; SSE2-NEXT: paddw %xmm1, %xmm1 +; SSE2-NEXT: psraw $15, %xmm1 +; SSE2-NEXT: movdqa %xmm1, %xmm2 +; SSE2-NEXT: pandn %xmm0, %xmm2 +; SSE2-NEXT: psllw $1, %xmm0 +; SSE2-NEXT: pand %xmm1, %xmm0 +; SSE2-NEXT: por %xmm2, %xmm0 ; SSE2-NEXT: retq ; -; SSE41: pextrw $1, %xmm0, %eax -; SSE41-NEXT: pextrw $1, %xmm1, %ecx -; SSE41-NEXT: shll %cl, %eax -; SSE41-NEXT: movd %xmm0, %edx -; SSE41-NEXT: movd %xmm1, %ecx -; SSE41-NEXT: shll %cl, %edx -; SSE41-NEXT: movd %edx, %xmm2 -; SSE41-NEXT: pinsrw $1, %eax, %xmm2 -; SSE41-NEXT: pextrw $2, %xmm0, %eax -; SSE41-NEXT: pextrw $2, %xmm1, %ecx -; SSE41-NEXT: shll %cl, %eax -; SSE41-NEXT: pinsrw $2, %eax, %xmm2 -; SSE41-NEXT: pextrw $3, %xmm0, %eax -; SSE41-NEXT: pextrw $3, %xmm1, %ecx -; SSE41-NEXT: shll %cl, %eax -; SSE41-NEXT: pinsrw $3, %eax, %xmm2 -; SSE41-NEXT: pextrw $4, %xmm0, %eax -; SSE41-NEXT: pextrw $4, %xmm1, %ecx -; SSE41-NEXT: shll %cl, %eax -; SSE41-NEXT: pinsrw $4, %eax, %xmm2 -; SSE41-NEXT: pextrw $5, %xmm0, %eax -; SSE41-NEXT: pextrw $5, %xmm1, %ecx -; SSE41-NEXT: shll %cl, %eax -; SSE41-NEXT: pinsrw $5, %eax, %xmm2 -; SSE41-NEXT: pextrw $6, %xmm0, %eax -; SSE41-NEXT: pextrw $6, %xmm1, %ecx -; SSE41-NEXT: shll %cl, %eax -; SSE41-NEXT: pinsrw $6, %eax, %xmm2 -; SSE41-NEXT: pextrw $7, %xmm0, %eax -; SSE41-NEXT: pextrw $7, %xmm1, %ecx -; SSE41-NEXT: shll %cl, %eax -; SSE41-NEXT: pinsrw $7, %eax, %xmm2 -; SSE41-NEXT: movdqa %xmm2, %xmm0 +; SSE41: movdqa %xmm0, %xmm2 +; SSE41-NEXT: movdqa %xmm1, %xmm0 +; SSE41-NEXT: psllw $12, %xmm0 +; SSE41-NEXT: psllw $4, %xmm1 +; SSE41-NEXT: por %xmm0, %xmm1 +; SSE41-NEXT: movdqa %xmm1, %xmm3 +; SSE41-NEXT: paddw %xmm3, %xmm3 +; SSE41-NEXT: movdqa %xmm2, %xmm4 +; SSE41-NEXT: psllw $8, %xmm4 +; SSE41-NEXT: movdqa %xmm1, %xmm0 +; SSE41-NEXT: pblendvb %xmm4, %xmm2 +; SSE41-NEXT: movdqa %xmm2, %xmm1 +; SSE41-NEXT: psllw $4, %xmm1 +; SSE41-NEXT: movdqa %xmm3, %xmm0 +; SSE41-NEXT: pblendvb %xmm1, %xmm2 +; SSE41-NEXT: movdqa %xmm2, %xmm1 +; SSE41-NEXT: psllw $2, %xmm1 +; SSE41-NEXT: paddw %xmm3, %xmm3 +; SSE41-NEXT: movdqa %xmm3, %xmm0 +; SSE41-NEXT: pblendvb %xmm1, %xmm2 +; SSE41-NEXT: movdqa %xmm2, %xmm1 +; SSE41-NEXT: psllw $1, %xmm1 +; SSE41-NEXT: paddw %xmm3, %xmm3 +; SSE41-NEXT: movdqa %xmm3, %xmm0 +; SSE41-NEXT: pblendvb %xmm1, %xmm2 +; SSE41-NEXT: movdqa %xmm2, %xmm0 ; SSE41-NEXT: retq ; -; AVX: vpextrw $1, %xmm0, %eax -; AVX-NEXT: vpextrw $1, %xmm1, %ecx -; AVX-NEXT: shll %cl, %eax -; AVX-NEXT: vmovd %xmm0, %edx -; AVX-NEXT: vmovd %xmm1, %ecx -; AVX-NEXT: shll %cl, %edx -; AVX-NEXT: vmovd %edx, %xmm2 -; AVX-NEXT: vpinsrw $1, %eax, %xmm2, %xmm2 -; AVX-NEXT: vpextrw $2, %xmm0, %eax -; AVX-NEXT: vpextrw $2, %xmm1, %ecx -; AVX-NEXT: shll %cl, %eax -; AVX-NEXT: vpinsrw $2, %eax, %xmm2, %xmm2 -; AVX-NEXT: vpextrw $3, %xmm0, %eax -; AVX-NEXT: vpextrw $3, %xmm1, %ecx -; AVX-NEXT: shll %cl, %eax -; AVX-NEXT: vpinsrw $3, %eax, %xmm2, %xmm2 -; AVX-NEXT: vpextrw $4, %xmm0, %eax -; AVX-NEXT: vpextrw $4, %xmm1, %ecx -; AVX-NEXT: shll %cl, %eax -; AVX-NEXT: vpinsrw $4, %eax, %xmm2, %xmm2 -; AVX-NEXT: vpextrw $5, %xmm0, %eax -; AVX-NEXT: vpextrw $5, %xmm1, %ecx -; AVX-NEXT: shll %cl, %eax -; AVX-NEXT: vpinsrw $5, %eax, %xmm2, %xmm2 -; AVX-NEXT: vpextrw $6, %xmm0, %eax -; AVX-NEXT: vpextrw $6, %xmm1, %ecx -; AVX-NEXT: shll %cl, %eax -; AVX-NEXT: vpinsrw $6, %eax, %xmm2, %xmm2 -; AVX-NEXT: vpextrw $7, %xmm0, %eax -; AVX-NEXT: vpextrw $7, %xmm1, %ecx -; AVX-NEXT: shll %cl, %eax -; AVX-NEXT: vpinsrw $7, %eax, %xmm2, %xmm0 +; AVX: vpsllw $12, %xmm1, %xmm2 +; AVX-NEXT: vpsllw $4, %xmm1, %xmm1 +; AVX-NEXT: vpor %xmm2, %xmm1, %xmm1 +; AVX-NEXT: vpaddw %xmm1, %xmm1, %xmm2 +; AVX-NEXT: vpsllw $8, %xmm0, %xmm3 +; AVX-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0 +; AVX-NEXT: vpsllw $4, %xmm0, %xmm1 +; AVX-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0 +; AVX-NEXT: vpsllw $2, %xmm0, %xmm1 +; AVX-NEXT: vpaddw %xmm2, %xmm2, %xmm2 +; AVX-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0 +; AVX-NEXT: vpsllw $1, %xmm0, %xmm1 +; AVX-NEXT: vpaddw %xmm2, %xmm2, %xmm2 +; AVX-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0 ; AVX-NEXT: retq %shl = shl <8 x i16> %r, %a %tmp2 = bitcast <8 x i16> %shl to <2 x i64> @@ -124,88 +93,66 @@ entry: define <2 x i64> @shl_16i8(<16 x i8> %r, <16 x i8> %a) nounwind readnone ssp { entry: -; SSE2: psllw $5, %xmm1 -; SSE2-NEXT: pand {{.*}}(%rip), %xmm1 -; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128] -; SSE2-NEXT: movdqa %xmm2, %xmm3 -; SSE2-NEXT: pand %xmm1, %xmm3 -; SSE2-NEXT: pcmpeqb %xmm2, %xmm3 -; SSE2-NEXT: movdqa %xmm3, %xmm4 -; SSE2-NEXT: pandn %xmm0, %xmm4 -; SSE2-NEXT: psllw $4, %xmm0 -; SSE2-NEXT: pand {{.*}}(%rip), %xmm0 -; SSE2-NEXT: pand %xmm3, %xmm0 -; SSE2-NEXT: por %xmm4, %xmm0 -; SSE2-NEXT: paddb %xmm1, %xmm1 -; SSE2-NEXT: movdqa %xmm2, %xmm3 -; SSE2-NEXT: pand %xmm1, %xmm3 -; SSE2-NEXT: pcmpeqb %xmm2, %xmm3 -; SSE2-NEXT: movdqa %xmm3, %xmm4 -; SSE2-NEXT: pandn %xmm0, %xmm4 -; SSE2-NEXT: psllw $2, %xmm0 -; SSE2-NEXT: pand {{.*}}(%rip), %xmm0 -; SSE2-NEXT: pand %xmm3, %xmm0 -; SSE2-NEXT: por %xmm4, %xmm0 -; SSE2-NEXT: paddb %xmm1, %xmm1 -; SSE2-NEXT: pand %xmm2, %xmm1 -; SSE2-NEXT: pcmpeqb %xmm2, %xmm1 -; SSE2-NEXT: movdqa %xmm1, %xmm2 -; SSE2-NEXT: pandn %xmm0, %xmm2 -; SSE2-NEXT: paddb %xmm0, %xmm0 -; SSE2-NEXT: pand %xmm1, %xmm0 -; SSE2-NEXT: por %xmm2, %xmm0 +; SSE2: psllw $5, %xmm1 +; SSE2-NEXT: pxor %xmm2, %xmm2 +; SSE2-NEXT: pxor %xmm3, %xmm3 +; SSE2-NEXT: pcmpgtb %xmm1, %xmm3 +; SSE2-NEXT: movdqa %xmm3, %xmm4 +; SSE2-NEXT: pandn %xmm0, %xmm4 +; SSE2-NEXT: psllw $4, %xmm0 +; SSE2-NEXT: pand {{.*}}(%rip), %xmm0 +; SSE2-NEXT: pand %xmm3, %xmm0 +; SSE2-NEXT: por %xmm4, %xmm0 +; SSE2-NEXT: paddb %xmm1, %xmm1 +; SSE2-NEXT: pxor %xmm3, %xmm3 +; SSE2-NEXT: pcmpgtb %xmm1, %xmm3 +; SSE2-NEXT: movdqa %xmm3, %xmm4 +; SSE2-NEXT: pandn %xmm0, %xmm4 +; SSE2-NEXT: psllw $2, %xmm0 +; SSE2-NEXT: pand {{.*}}(%rip), %xmm0 +; SSE2-NEXT: pand %xmm3, %xmm0 +; SSE2-NEXT: por %xmm4, %xmm0 +; SSE2-NEXT: paddb %xmm1, %xmm1 +; SSE2-NEXT: pcmpgtb %xmm1, %xmm2 +; SSE2-NEXT: movdqa %xmm2, %xmm1 +; SSE2-NEXT: pandn %xmm0, %xmm1 +; SSE2-NEXT: paddb %xmm0, %xmm0 +; SSE2-NEXT: pand %xmm2, %xmm0 +; SSE2-NEXT: por %xmm1, %xmm0 ; SSE2-NEXT: retq ; -; SSE41: movdqa %xmm0, %xmm2 -; SSE41-NEXT: psllw $5, %xmm1 -; SSE41-NEXT: pand {{.*}}(%rip), %xmm1 -; SSE41-NEXT: movdqa %xmm1, %xmm5 -; SSE41-NEXT: paddb %xmm5, %xmm5 -; SSE41-NEXT: movdqa {{.*#+}} xmm3 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128] -; SSE41-NEXT: movdqa %xmm3, %xmm4 -; SSE41-NEXT: pand %xmm5, %xmm4 -; SSE41-NEXT: pcmpeqb %xmm3, %xmm4 -; SSE41-NEXT: pand %xmm3, %xmm1 -; SSE41-NEXT: pcmpeqb %xmm3, %xmm1 -; SSE41-NEXT: movdqa %xmm2, %xmm6 -; SSE41-NEXT: psllw $4, %xmm6 -; SSE41-NEXT: pand {{.*}}(%rip), %xmm6 -; SSE41-NEXT: movdqa %xmm1, %xmm0 -; SSE41-NEXT: pblendvb %xmm6, %xmm2 -; SSE41-NEXT: movdqa %xmm2, %xmm1 -; SSE41-NEXT: psllw $2, %xmm1 -; SSE41-NEXT: pand {{.*}}(%rip), %xmm1 -; SSE41-NEXT: movdqa %xmm4, %xmm0 -; SSE41-NEXT: pblendvb %xmm1, %xmm2 -; SSE41-NEXT: movdqa %xmm2, %xmm1 -; SSE41-NEXT: paddb %xmm1, %xmm1 -; SSE41-NEXT: paddb %xmm5, %xmm5 -; SSE41-NEXT: pand %xmm3, %xmm5 -; SSE41-NEXT: pcmpeqb %xmm5, %xmm3 -; SSE41-NEXT: movdqa %xmm3, %xmm0 -; SSE41-NEXT: pblendvb %xmm1, %xmm2 -; SSE41-NEXT: movdqa %xmm2, %xmm0 +; SSE41: movdqa %xmm0, %xmm2 +; SSE41-NEXT: psllw $5, %xmm1 +; SSE41-NEXT: movdqa %xmm2, %xmm3 +; SSE41-NEXT: psllw $4, %xmm3 +; SSE41-NEXT: pand {{.*}}(%rip), %xmm3 +; SSE41-NEXT: movdqa %xmm1, %xmm0 +; SSE41-NEXT: pblendvb %xmm3, %xmm2 +; SSE41-NEXT: movdqa %xmm2, %xmm3 +; SSE41-NEXT: psllw $2, %xmm3 +; SSE41-NEXT: pand {{.*}}(%rip), %xmm3 +; SSE41-NEXT: paddb %xmm1, %xmm1 +; SSE41-NEXT: movdqa %xmm1, %xmm0 +; SSE41-NEXT: pblendvb %xmm3, %xmm2 +; SSE41-NEXT: movdqa %xmm2, %xmm3 +; SSE41-NEXT: paddb %xmm3, %xmm3 +; SSE41-NEXT: paddb %xmm1, %xmm1 +; SSE41-NEXT: movdqa %xmm1, %xmm0 +; SSE41-NEXT: pblendvb %xmm3, %xmm2 +; SSE41-NEXT: movdqa %xmm2, %xmm0 ; SSE41-NEXT: retq ; -; AVX: vpsllw $5, %xmm1, %xmm1 -; AVX-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1 -; AVX-NEXT: vpaddb %xmm1, %xmm1, %xmm2 -; AVX-NEXT: vmovdqa {{.*#+}} xmm3 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128] -; AVX-NEXT: vpand %xmm2, %xmm3, %xmm4 -; AVX-NEXT: vpcmpeqb %xmm3, %xmm4, %xmm4 -; AVX-NEXT: vpand %xmm1, %xmm3, %xmm1 -; AVX-NEXT: vpcmpeqb %xmm3, %xmm1, %xmm1 -; AVX-NEXT: vpsllw $4, %xmm0, %xmm5 -; AVX-NEXT: vpand {{.*}}(%rip), %xmm5, %xmm5 -; AVX-NEXT: vpblendvb %xmm1, %xmm5, %xmm0, %xmm0 -; AVX-NEXT: vpsllw $2, %xmm0, %xmm1 -; AVX-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1 -; AVX-NEXT: vpblendvb %xmm4, %xmm1, %xmm0, %xmm0 -; AVX-NEXT: vpaddb %xmm0, %xmm0, %xmm1 -; AVX-NEXT: vpaddb %xmm2, %xmm2, %xmm2 -; AVX-NEXT: vpand %xmm2, %xmm3, %xmm2 -; AVX-NEXT: vpcmpeqb %xmm3, %xmm2, %xmm2 -; AVX-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0 +; AVX: vpsllw $5, %xmm1, %xmm1 +; AVX-NEXT: vpsllw $4, %xmm0, %xmm2 +; AVX-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2 +; AVX-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0 +; AVX-NEXT: vpsllw $2, %xmm0, %xmm2 +; AVX-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2 +; AVX-NEXT: vpaddb %xmm1, %xmm1, %xmm1 +; AVX-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0 +; AVX-NEXT: vpaddb %xmm0, %xmm0, %xmm2 +; AVX-NEXT: vpaddb %xmm1, %xmm1, %xmm1 +; AVX-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0 ; AVX-NEXT: retq %shl = shl <16 x i8> %r, %a %tmp2 = bitcast <16 x i8> %shl to <2 x i64> @@ -214,114 +161,83 @@ entry: define <2 x i64> @ashr_8i16(<8 x i16> %r, <8 x i16> %a) nounwind readnone ssp { entry: -; SSE2: pextrw $7, %xmm1, %ecx -; SSE2-NEXT: pextrw $7, %xmm0, %eax -; SSE2-NEXT: sarw %cl, %ax -; SSE2-NEXT: movd %eax, %xmm2 -; SSE2-NEXT: pextrw $3, %xmm1, %ecx -; SSE2-NEXT: pextrw $3, %xmm0, %eax -; SSE2-NEXT: sarw %cl, %ax -; SSE2-NEXT: movd %eax, %xmm3 -; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3] -; SSE2-NEXT: pextrw $5, %xmm1, %ecx -; SSE2-NEXT: pextrw $5, %xmm0, %eax -; SSE2-NEXT: sarw %cl, %ax -; SSE2-NEXT: movd %eax, %xmm4 -; SSE2-NEXT: pextrw $1, %xmm1, %ecx -; SSE2-NEXT: pextrw $1, %xmm0, %eax -; SSE2-NEXT: sarw %cl, %ax -; SSE2-NEXT: movd %eax, %xmm2 -; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3] -; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3] -; SSE2-NEXT: pextrw $6, %xmm1, %ecx -; SSE2-NEXT: pextrw $6, %xmm0, %eax -; SSE2-NEXT: sarw %cl, %ax -; SSE2-NEXT: movd %eax, %xmm3 -; SSE2-NEXT: pextrw $2, %xmm1, %ecx -; SSE2-NEXT: pextrw $2, %xmm0, %eax -; SSE2-NEXT: sarw %cl, %ax -; SSE2-NEXT: movd %eax, %xmm4 -; SSE2-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3] -; SSE2-NEXT: pextrw $4, %xmm1, %ecx -; SSE2-NEXT: pextrw $4, %xmm0, %eax -; SSE2-NEXT: sarw %cl, %ax -; SSE2-NEXT: movd %eax, %xmm3 -; SSE2-NEXT: movd %xmm1, %ecx -; SSE2-NEXT: movd %xmm0, %eax -; SSE2-NEXT: sarw %cl, %ax -; SSE2-NEXT: movd %eax, %xmm0 -; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3] -; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3] -; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3] +; ALL-NOT: sarw +; +; SSE2: psllw $12, %xmm1 +; SSE2-NEXT: movdqa %xmm1, %xmm2 +; SSE2-NEXT: psraw $15, %xmm2 +; SSE2-NEXT: movdqa %xmm2, %xmm3 +; SSE2-NEXT: pandn %xmm0, %xmm3 +; SSE2-NEXT: psraw $8, %xmm0 +; SSE2-NEXT: pand %xmm2, %xmm0 +; SSE2-NEXT: por %xmm3, %xmm0 +; SSE2-NEXT: paddw %xmm1, %xmm1 +; SSE2-NEXT: movdqa %xmm1, %xmm2 +; SSE2-NEXT: psraw $15, %xmm2 +; SSE2-NEXT: movdqa %xmm2, %xmm3 +; SSE2-NEXT: pandn %xmm0, %xmm3 +; SSE2-NEXT: psraw $4, %xmm0 +; SSE2-NEXT: pand %xmm2, %xmm0 +; SSE2-NEXT: por %xmm3, %xmm0 +; SSE2-NEXT: paddw %xmm1, %xmm1 +; SSE2-NEXT: movdqa %xmm1, %xmm2 +; SSE2-NEXT: psraw $15, %xmm2 +; SSE2-NEXT: movdqa %xmm2, %xmm3 +; SSE2-NEXT: pandn %xmm0, %xmm3 +; SSE2-NEXT: psraw $2, %xmm0 +; SSE2-NEXT: pand %xmm2, %xmm0 +; SSE2-NEXT: por %xmm3, %xmm0 +; SSE2-NEXT: paddw %xmm1, %xmm1 +; SSE2-NEXT: psraw $15, %xmm1 +; SSE2-NEXT: movdqa %xmm1, %xmm2 +; SSE2-NEXT: pandn %xmm0, %xmm2 +; SSE2-NEXT: psraw $1, %xmm0 +; SSE2-NEXT: pand %xmm1, %xmm0 +; SSE2-NEXT: por %xmm2, %xmm0 ; SSE2-NEXT: retq ; -; SSE41: pextrw $1, %xmm1, %ecx -; SSE41-NEXT: pextrw $1, %xmm0, %eax -; SSE41-NEXT: sarw %cl, %ax -; SSE41-NEXT: movd %xmm1, %ecx -; SSE41-NEXT: movd %xmm0, %edx -; SSE41-NEXT: sarw %cl, %dx -; SSE41-NEXT: movd %edx, %xmm2 -; SSE41-NEXT: pinsrw $1, %eax, %xmm2 -; SSE41-NEXT: pextrw $2, %xmm1, %ecx -; SSE41-NEXT: pextrw $2, %xmm0, %eax -; SSE41-NEXT: sarw %cl, %ax -; SSE41-NEXT: pinsrw $2, %eax, %xmm2 -; SSE41-NEXT: pextrw $3, %xmm1, %ecx -; SSE41-NEXT: pextrw $3, %xmm0, %eax -; SSE41-NEXT: sarw %cl, %ax -; SSE41-NEXT: pinsrw $3, %eax, %xmm2 -; SSE41-NEXT: pextrw $4, %xmm1, %ecx -; SSE41-NEXT: pextrw $4, %xmm0, %eax -; SSE41-NEXT: sarw %cl, %ax -; SSE41-NEXT: pinsrw $4, %eax, %xmm2 -; SSE41-NEXT: pextrw $5, %xmm1, %ecx -; SSE41-NEXT: pextrw $5, %xmm0, %eax -; SSE41-NEXT: sarw %cl, %ax -; SSE41-NEXT: pinsrw $5, %eax, %xmm2 -; SSE41-NEXT: pextrw $6, %xmm1, %ecx -; SSE41-NEXT: pextrw $6, %xmm0, %eax -; SSE41-NEXT: sarw %cl, %ax -; SSE41-NEXT: pinsrw $6, %eax, %xmm2 -; SSE41-NEXT: pextrw $7, %xmm1, %ecx -; SSE41-NEXT: pextrw $7, %xmm0, %eax -; SSE41-NEXT: sarw %cl, %ax -; SSE41-NEXT: pinsrw $7, %eax, %xmm2 -; SSE41-NEXT: movdqa %xmm2, %xmm0 +; SSE41: movdqa %xmm0, %xmm2 +; SSE41-NEXT: movdqa %xmm1, %xmm0 +; SSE41-NEXT: psllw $12, %xmm0 +; SSE41-NEXT: psllw $4, %xmm1 +; SSE41-NEXT: por %xmm0, %xmm1 +; SSE41-NEXT: movdqa %xmm1, %xmm3 +; SSE41-NEXT: paddw %xmm3, %xmm3 +; SSE41-NEXT: movdqa %xmm2, %xmm4 +; SSE41-NEXT: psraw $8, %xmm4 +; SSE41-NEXT: movdqa %xmm1, %xmm0 +; SSE41-NEXT: pblendvb %xmm4, %xmm2 +; SSE41-NEXT: movdqa %xmm2, %xmm1 +; SSE41-NEXT: psraw $4, %xmm1 +; SSE41-NEXT: movdqa %xmm3, %xmm0 +; SSE41-NEXT: pblendvb %xmm1, %xmm2 +; SSE41-NEXT: movdqa %xmm2, %xmm1 +; SSE41-NEXT: psraw $2, %xmm1 +; SSE41-NEXT: paddw %xmm3, %xmm3 +; SSE41-NEXT: movdqa %xmm3, %xmm0 +; SSE41-NEXT: pblendvb %xmm1, %xmm2 +; SSE41-NEXT: movdqa %xmm2, %xmm1 +; SSE41-NEXT: psraw $1, %xmm1 +; SSE41-NEXT: paddw %xmm3, %xmm3 +; SSE41-NEXT: movdqa %xmm3, %xmm0 +; SSE41-NEXT: pblendvb %xmm1, %xmm2 +; SSE41-NEXT: movdqa %xmm2, %xmm0 ; SSE41-NEXT: retq ; -; AVX: vpextrw $1, %xmm1, %ecx -; AVX-NEXT: vpextrw $1, %xmm0, %eax -; AVX-NEXT: sarw %cl, %ax -; AVX-NEXT: vmovd %xmm1, %ecx -; AVX-NEXT: vmovd %xmm0, %edx -; AVX-NEXT: sarw %cl, %dx -; AVX-NEXT: vmovd %edx, %xmm2 -; AVX-NEXT: vpinsrw $1, %eax, %xmm2, %xmm2 -; AVX-NEXT: vpextrw $2, %xmm1, %ecx -; AVX-NEXT: vpextrw $2, %xmm0, %eax -; AVX-NEXT: sarw %cl, %ax -; AVX-NEXT: vpinsrw $2, %eax, %xmm2, %xmm2 -; AVX-NEXT: vpextrw $3, %xmm1, %ecx -; AVX-NEXT: vpextrw $3, %xmm0, %eax -; AVX-NEXT: sarw %cl, %ax -; AVX-NEXT: vpinsrw $3, %eax, %xmm2, %xmm2 -; AVX-NEXT: vpextrw $4, %xmm1, %ecx -; AVX-NEXT: vpextrw $4, %xmm0, %eax -; AVX-NEXT: sarw %cl, %ax -; AVX-NEXT: vpinsrw $4, %eax, %xmm2, %xmm2 -; AVX-NEXT: vpextrw $5, %xmm1, %ecx -; AVX-NEXT: vpextrw $5, %xmm0, %eax -; AVX-NEXT: sarw %cl, %ax -; AVX-NEXT: vpinsrw $5, %eax, %xmm2, %xmm2 -; AVX-NEXT: vpextrw $6, %xmm1, %ecx -; AVX-NEXT: vpextrw $6, %xmm0, %eax -; AVX-NEXT: sarw %cl, %ax -; AVX-NEXT: vpinsrw $6, %eax, %xmm2, %xmm2 -; AVX-NEXT: vpextrw $7, %xmm1, %ecx -; AVX-NEXT: vpextrw $7, %xmm0, %eax -; AVX-NEXT: sarw %cl, %ax -; AVX-NEXT: vpinsrw $7, %eax, %xmm2, %xmm0 +; AVX: vpsllw $12, %xmm1, %xmm2 +; AVX-NEXT: vpsllw $4, %xmm1, %xmm1 +; AVX-NEXT: vpor %xmm2, %xmm1, %xmm1 +; AVX-NEXT: vpaddw %xmm1, %xmm1, %xmm2 +; AVX-NEXT: vpsraw $8, %xmm0, %xmm3 +; AVX-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0 +; AVX-NEXT: vpsraw $4, %xmm0, %xmm1 +; AVX-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0 +; AVX-NEXT: vpsraw $2, %xmm0, %xmm1 +; AVX-NEXT: vpaddw %xmm2, %xmm2, %xmm2 +; AVX-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0 +; AVX-NEXT: vpsraw $1, %xmm0, %xmm1 +; AVX-NEXT: vpaddw %xmm2, %xmm2, %xmm2 +; AVX-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0 ; AVX-NEXT: retq %ashr = ashr <8 x i16> %r, %a %tmp2 = bitcast <8 x i16> %ashr to <2 x i64> @@ -330,282 +246,122 @@ entry: define <2 x i64> @ashr_16i8(<16 x i8> %r, <16 x i8> %a) nounwind readnone ssp { entry: +; ALL-NOT: sarb ; -; SSE2: pushq %rbp -; SSE2-NEXT: pushq %r15 -; SSE2-NEXT: pushq %r14 -; SSE2-NEXT: pushq %r13 -; SSE2-NEXT: pushq %r12 -; SSE2-NEXT: pushq %rbx -; SSE2-NEXT: movaps %xmm1, -24(%rsp) -; SSE2-NEXT: movaps %xmm0, -40(%rsp) -; SSE2-NEXT: movb -9(%rsp), %cl -; SSE2-NEXT: movb -25(%rsp), %al -; SSE2-NEXT: sarb %cl, %al -; SSE2-NEXT: movzbl %al, %eax -; SSE2-NEXT: movd %eax, %xmm0 -; SSE2-NEXT: movb -17(%rsp), %cl -; SSE2-NEXT: movb -33(%rsp), %al -; SSE2-NEXT: sarb %cl, %al -; SSE2-NEXT: movb -13(%rsp), %cl -; SSE2-NEXT: movzbl %al, %eax -; SSE2-NEXT: movl %eax, -44(%rsp) -; SSE2-NEXT: movb -29(%rsp), %al -; SSE2-NEXT: sarb %cl, %al -; SSE2-NEXT: movzbl %al, %r9d -; SSE2-NEXT: movb -21(%rsp), %cl -; SSE2-NEXT: movb -37(%rsp), %al -; SSE2-NEXT: sarb %cl, %al -; SSE2-NEXT: movb -11(%rsp), %cl -; SSE2-NEXT: movzbl %al, %r10d -; SSE2-NEXT: movb -27(%rsp), %al -; SSE2-NEXT: sarb %cl, %al -; SSE2-NEXT: movb -19(%rsp), %cl -; SSE2-NEXT: movzbl %al, %r11d -; SSE2-NEXT: movb -35(%rsp), %al -; SSE2-NEXT: sarb %cl, %al -; SSE2-NEXT: movb -15(%rsp), %cl -; SSE2-NEXT: movzbl %al, %r14d -; SSE2-NEXT: movb -31(%rsp), %al -; SSE2-NEXT: sarb %cl, %al -; SSE2-NEXT: movzbl %al, %r15d -; SSE2-NEXT: movb -23(%rsp), %cl -; SSE2-NEXT: movb -39(%rsp), %al -; SSE2-NEXT: sarb %cl, %al -; SSE2-NEXT: movb -10(%rsp), %cl -; SSE2-NEXT: movzbl %al, %r12d -; SSE2-NEXT: movb -26(%rsp), %al -; SSE2-NEXT: sarb %cl, %al -; SSE2-NEXT: movb -18(%rsp), %cl -; SSE2-NEXT: movzbl %al, %r13d -; SSE2-NEXT: movb -34(%rsp), %al -; SSE2-NEXT: sarb %cl, %al -; SSE2-NEXT: movb -14(%rsp), %cl -; SSE2-NEXT: movzbl %al, %r8d -; SSE2-NEXT: movb -30(%rsp), %al -; SSE2-NEXT: sarb %cl, %al -; SSE2-NEXT: movb -22(%rsp), %cl -; SSE2-NEXT: movzbl %al, %ebp -; SSE2-NEXT: movb -38(%rsp), %al -; SSE2-NEXT: sarb %cl, %al -; SSE2-NEXT: movb -12(%rsp), %cl -; SSE2-NEXT: movzbl %al, %edi -; SSE2-NEXT: movb -28(%rsp), %dl -; SSE2-NEXT: sarb %cl, %dl -; SSE2-NEXT: movb -20(%rsp), %cl -; SSE2-NEXT: movzbl %dl, %esi -; SSE2-NEXT: movb -36(%rsp), %bl -; SSE2-NEXT: sarb %cl, %bl -; SSE2-NEXT: movb -16(%rsp), %cl -; SSE2-NEXT: movzbl %bl, %ebx -; SSE2-NEXT: movb -32(%rsp), %al -; SSE2-NEXT: sarb %cl, %al -; SSE2-NEXT: movzbl %al, %edx -; SSE2-NEXT: movb -24(%rsp), %cl -; SSE2-NEXT: movb -40(%rsp), %al -; SSE2-NEXT: sarb %cl, %al -; SSE2-NEXT: movzbl %al, %eax -; SSE2-NEXT: movd -44(%rsp), %xmm1 -; SSE2: movd %r9d, %xmm2 -; SSE2-NEXT: movd %r10d, %xmm3 -; SSE2-NEXT: movd %r11d, %xmm4 -; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] -; SSE2-NEXT: movd %r14d, %xmm0 -; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7] -; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3],xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7] -; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7] -; SSE2-NEXT: movd %r15d, %xmm1 -; SSE2-NEXT: movd %r12d, %xmm2 -; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7] -; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7] -; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3],xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7] -; SSE2-NEXT: movd %r13d, %xmm0 -; SSE2-NEXT: movd %r8d, %xmm1 -; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] -; SSE2-NEXT: movd %ebp, %xmm0 -; SSE2-NEXT: movd %edi, %xmm3 -; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7] -; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3],xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7] -; SSE2-NEXT: movd %esi, %xmm0 -; SSE2-NEXT: movd %ebx, %xmm1 -; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] -; SSE2-NEXT: movd %edx, %xmm4 -; SSE2-NEXT: movd %eax, %xmm0 -; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7] -; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] -; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7] -; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7] -; SSE2-NEXT: popq %rbx -; SSE2-NEXT: popq %r12 -; SSE2-NEXT: popq %r13 -; SSE2-NEXT: popq %r14 -; SSE2-NEXT: popq %r15 -; SSE2-NEXT: popq %rbp +; SSE2: punpckhbw {{.*#}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15] +; SSE2-NEXT: psllw $5, %xmm1 +; SSE2-NEXT: punpckhbw {{.*#}} xmm4 = xmm4[8],xmm1[8],xmm4[9],xmm1[9],xmm4[10],xmm1[10],xmm4[11],xmm1[11],xmm4[12],xmm1[12],xmm4[13],xmm1[13],xmm4[14],xmm1[14],xmm4[15],xmm1[15] +; SSE2-NEXT: pxor %xmm3, %xmm3 +; SSE2-NEXT: pxor %xmm5, %xmm5 +; SSE2-NEXT: pcmpgtw %xmm4, %xmm5 +; SSE2-NEXT: movdqa %xmm5, %xmm6 +; SSE2-NEXT: pandn %xmm2, %xmm6 +; SSE2-NEXT: psraw $4, %xmm2 +; SSE2-NEXT: pand %xmm5, %xmm2 +; SSE2-NEXT: por %xmm6, %xmm2 +; SSE2-NEXT: paddw %xmm4, %xmm4 +; SSE2-NEXT: pxor %xmm5, %xmm5 +; SSE2-NEXT: pcmpgtw %xmm4, %xmm5 +; SSE2-NEXT: movdqa %xmm5, %xmm6 +; SSE2-NEXT: pandn %xmm2, %xmm6 +; SSE2-NEXT: psraw $2, %xmm2 +; SSE2-NEXT: pand %xmm5, %xmm2 +; SSE2-NEXT: por %xmm6, %xmm2 +; SSE2-NEXT: paddw %xmm4, %xmm4 +; SSE2-NEXT: pxor %xmm5, %xmm5 +; SSE2-NEXT: pcmpgtw %xmm4, %xmm5 +; SSE2-NEXT: movdqa %xmm5, %xmm4 +; SSE2-NEXT: pandn %xmm2, %xmm4 +; SSE2-NEXT: psraw $1, %xmm2 +; SSE2-NEXT: pand %xmm5, %xmm2 +; SSE2-NEXT: por %xmm4, %xmm2 +; SSE2-NEXT: psrlw $8, %xmm2 +; SSE2-NEXT: punpcklbw {{.*#}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] +; SSE2-NEXT: punpcklbw {{.*#}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] +; SSE2-NEXT: pxor %xmm4, %xmm4 +; SSE2-NEXT: pcmpgtw %xmm1, %xmm4 +; SSE2-NEXT: movdqa %xmm4, %xmm5 +; SSE2-NEXT: pandn %xmm0, %xmm5 +; SSE2-NEXT: psraw $4, %xmm0 +; SSE2-NEXT: pand %xmm4, %xmm0 +; SSE2-NEXT: por %xmm5, %xmm0 +; SSE2-NEXT: paddw %xmm1, %xmm1 +; SSE2-NEXT: pxor %xmm4, %xmm4 +; SSE2-NEXT: pcmpgtw %xmm1, %xmm4 +; SSE2-NEXT: movdqa %xmm4, %xmm5 +; SSE2-NEXT: pandn %xmm0, %xmm5 +; SSE2-NEXT: psraw $2, %xmm0 +; SSE2-NEXT: pand %xmm4, %xmm0 +; SSE2-NEXT: por %xmm5, %xmm0 +; SSE2-NEXT: paddw %xmm1, %xmm1 +; SSE2-NEXT: pcmpgtw %xmm1, %xmm3 +; SSE2-NEXT: movdqa %xmm3, %xmm1 +; SSE2-NEXT: pandn %xmm0, %xmm1 +; SSE2-NEXT: psraw $1, %xmm0 +; SSE2-NEXT: pand %xmm3, %xmm0 +; SSE2-NEXT: por %xmm1, %xmm0 +; SSE2-NEXT: psrlw $8, %xmm0 +; SSE2-NEXT: packuswb %xmm2, %xmm0 ; SSE2-NEXT: retq ; -; SSE41: pextrb $1, %xmm1, %ecx -; SSE41-NEXT: pextrb $1, %xmm0, %eax -; SSE41-NEXT: sarb %cl, %al -; SSE41-NEXT: movzbl %al, %eax -; SSE41-NEXT: pextrb $0, %xmm1, %ecx -; SSE41-NEXT: pextrb $0, %xmm0, %edx -; SSE41-NEXT: sarb %cl, %dl -; SSE41-NEXT: movzbl %dl, %ecx -; SSE41-NEXT: movd %ecx, %xmm2 -; SSE41-NEXT: pinsrb $1, %eax, %xmm2 -; SSE41-NEXT: pextrb $2, %xmm1, %ecx -; SSE41-NEXT: pextrb $2, %xmm0, %eax -; SSE41-NEXT: sarb %cl, %al -; SSE41-NEXT: movzbl %al, %eax -; SSE41-NEXT: pinsrb $2, %eax, %xmm2 -; SSE41-NEXT: pextrb $3, %xmm1, %ecx -; SSE41-NEXT: pextrb $3, %xmm0, %eax -; SSE41-NEXT: sarb %cl, %al -; SSE41-NEXT: movzbl %al, %eax -; SSE41-NEXT: pinsrb $3, %eax, %xmm2 -; SSE41-NEXT: pextrb $4, %xmm1, %ecx -; SSE41-NEXT: pextrb $4, %xmm0, %eax -; SSE41-NEXT: sarb %cl, %al -; SSE41-NEXT: movzbl %al, %eax -; SSE41-NEXT: pinsrb $4, %eax, %xmm2 -; SSE41-NEXT: pextrb $5, %xmm1, %ecx -; SSE41-NEXT: pextrb $5, %xmm0, %eax -; SSE41-NEXT: sarb %cl, %al -; SSE41-NEXT: movzbl %al, %eax -; SSE41-NEXT: pinsrb $5, %eax, %xmm2 -; SSE41-NEXT: pextrb $6, %xmm1, %ecx -; SSE41-NEXT: pextrb $6, %xmm0, %eax -; SSE41-NEXT: sarb %cl, %al -; SSE41-NEXT: movzbl %al, %eax -; SSE41-NEXT: pinsrb $6, %eax, %xmm2 -; SSE41-NEXT: pextrb $7, %xmm1, %ecx -; SSE41-NEXT: pextrb $7, %xmm0, %eax -; SSE41-NEXT: sarb %cl, %al -; SSE41-NEXT: movzbl %al, %eax -; SSE41-NEXT: pinsrb $7, %eax, %xmm2 -; SSE41-NEXT: pextrb $8, %xmm1, %ecx -; SSE41-NEXT: pextrb $8, %xmm0, %eax -; SSE41-NEXT: sarb %cl, %al -; SSE41-NEXT: movzbl %al, %eax -; SSE41-NEXT: pinsrb $8, %eax, %xmm2 -; SSE41-NEXT: pextrb $9, %xmm1, %ecx -; SSE41-NEXT: pextrb $9, %xmm0, %eax -; SSE41-NEXT: sarb %cl, %al -; SSE41-NEXT: movzbl %al, %eax -; SSE41-NEXT: pinsrb $9, %eax, %xmm2 -; SSE41-NEXT: pextrb $10, %xmm1, %ecx -; SSE41-NEXT: pextrb $10, %xmm0, %eax -; SSE41-NEXT: sarb %cl, %al -; SSE41-NEXT: movzbl %al, %eax -; SSE41-NEXT: pinsrb $10, %eax, %xmm2 -; SSE41-NEXT: pextrb $11, %xmm1, %ecx -; SSE41-NEXT: pextrb $11, %xmm0, %eax -; SSE41-NEXT: sarb %cl, %al -; SSE41-NEXT: movzbl %al, %eax -; SSE41-NEXT: pinsrb $11, %eax, %xmm2 -; SSE41-NEXT: pextrb $12, %xmm1, %ecx -; SSE41-NEXT: pextrb $12, %xmm0, %eax -; SSE41-NEXT: sarb %cl, %al -; SSE41-NEXT: movzbl %al, %eax -; SSE41-NEXT: pinsrb $12, %eax, %xmm2 -; SSE41-NEXT: pextrb $13, %xmm1, %ecx -; SSE41-NEXT: pextrb $13, %xmm0, %eax -; SSE41-NEXT: sarb %cl, %al -; SSE41-NEXT: movzbl %al, %eax -; SSE41-NEXT: pinsrb $13, %eax, %xmm2 -; SSE41-NEXT: pextrb $14, %xmm1, %ecx -; SSE41-NEXT: pextrb $14, %xmm0, %eax -; SSE41-NEXT: sarb %cl, %al -; SSE41-NEXT: movzbl %al, %eax -; SSE41-NEXT: pinsrb $14, %eax, %xmm2 -; SSE41-NEXT: pextrb $15, %xmm1, %ecx -; SSE41-NEXT: pextrb $15, %xmm0, %eax -; SSE41-NEXT: sarb %cl, %al -; SSE41-NEXT: movzbl %al, %eax -; SSE41-NEXT: pinsrb $15, %eax, %xmm2 -; SSE41-NEXT: movdqa %xmm2, %xmm0 +; SSE41: movdqa %xmm0, %xmm2 +; SSE41-NEXT: psllw $5, %xmm1 +; SSE41-NEXT: punpckhbw {{.*#}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15] +; SSE41-NEXT: punpckhbw {{.*#}} xmm3 = xmm3[8],xmm2[8],xmm3[9],xmm2[9],xmm3[10],xmm2[10],xmm3[11],xmm2[11],xmm3[12],xmm2[12],xmm3[13],xmm2[13],xmm3[14],xmm2[14],xmm3[15],xmm2[15] +; SSE41-NEXT: movdqa %xmm3, %xmm4 +; SSE41-NEXT: psraw $4, %xmm4 +; SSE41-NEXT: pblendvb %xmm4, %xmm3 +; SSE41-NEXT: movdqa %xmm3, %xmm4 +; SSE41-NEXT: psraw $2, %xmm4 +; SSE41-NEXT: paddw %xmm0, %xmm0 +; SSE41-NEXT: pblendvb %xmm4, %xmm3 +; SSE41-NEXT: movdqa %xmm3, %xmm4 +; SSE41-NEXT: psraw $1, %xmm4 +; SSE41-NEXT: paddw %xmm0, %xmm0 +; SSE41-NEXT: pblendvb %xmm4, %xmm3 +; SSE41-NEXT: psrlw $8, %xmm3 +; SSE41-NEXT: punpcklbw {{.*#}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] +; SSE41-NEXT: punpcklbw {{.*#}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7] +; SSE41-NEXT: movdqa %xmm1, %xmm2 +; SSE41-NEXT: psraw $4, %xmm2 +; SSE41-NEXT: pblendvb %xmm2, %xmm1 +; SSE41-NEXT: movdqa %xmm1, %xmm2 +; SSE41-NEXT: psraw $2, %xmm2 +; SSE41-NEXT: paddw %xmm0, %xmm0 +; SSE41-NEXT: pblendvb %xmm2, %xmm1 +; SSE41-NEXT: movdqa %xmm1, %xmm2 +; SSE41-NEXT: psraw $1, %xmm2 +; SSE41-NEXT: paddw %xmm0, %xmm0 +; SSE41-NEXT: pblendvb %xmm2, %xmm1 +; SSE41-NEXT: psrlw $8, %xmm1 +; SSE41-NEXT: packuswb %xmm3, %xmm1 +; SSE41-NEXT: movdqa %xmm1, %xmm0 ; SSE41-NEXT: retq ; -; AVX: vpextrb $1, %xmm1, %ecx -; AVX-NEXT: vpextrb $1, %xmm0, %eax -; AVX-NEXT: sarb %cl, %al -; AVX-NEXT: movzbl %al, %eax -; AVX-NEXT: vpextrb $0, %xmm1, %ecx -; AVX-NEXT: vpextrb $0, %xmm0, %edx -; AVX-NEXT: sarb %cl, %dl -; AVX-NEXT: movzbl %dl, %ecx -; AVX-NEXT: vmovd %ecx, %xmm2 -; AVX-NEXT: vpinsrb $1, %eax, %xmm2, %xmm2 -; AVX-NEXT: vpextrb $2, %xmm1, %ecx -; AVX-NEXT: vpextrb $2, %xmm0, %eax -; AVX-NEXT: sarb %cl, %al -; AVX-NEXT: movzbl %al, %eax -; AVX-NEXT: vpinsrb $2, %eax, %xmm2, %xmm2 -; AVX-NEXT: vpextrb $3, %xmm1, %ecx -; AVX-NEXT: vpextrb $3, %xmm0, %eax -; AVX-NEXT: sarb %cl, %al -; AVX-NEXT: movzbl %al, %eax -; AVX-NEXT: vpinsrb $3, %eax, %xmm2, %xmm2 -; AVX-NEXT: vpextrb $4, %xmm1, %ecx -; AVX-NEXT: vpextrb $4, %xmm0, %eax -; AVX-NEXT: sarb %cl, %al -; AVX-NEXT: movzbl %al, %eax -; AVX-NEXT: vpinsrb $4, %eax, %xmm2, %xmm2 -; AVX-NEXT: vpextrb $5, %xmm1, %ecx -; AVX-NEXT: vpextrb $5, %xmm0, %eax -; AVX-NEXT: sarb %cl, %al -; AVX-NEXT: movzbl %al, %eax -; AVX-NEXT: vpinsrb $5, %eax, %xmm2, %xmm2 -; AVX-NEXT: vpextrb $6, %xmm1, %ecx -; AVX-NEXT: vpextrb $6, %xmm0, %eax -; AVX-NEXT: sarb %cl, %al -; AVX-NEXT: movzbl %al, %eax -; AVX-NEXT: vpinsrb $6, %eax, %xmm2, %xmm2 -; AVX-NEXT: vpextrb $7, %xmm1, %ecx -; AVX-NEXT: vpextrb $7, %xmm0, %eax -; AVX-NEXT: sarb %cl, %al -; AVX-NEXT: movzbl %al, %eax -; AVX-NEXT: vpinsrb $7, %eax, %xmm2, %xmm2 -; AVX-NEXT: vpextrb $8, %xmm1, %ecx -; AVX-NEXT: vpextrb $8, %xmm0, %eax -; AVX-NEXT: sarb %cl, %al -; AVX-NEXT: movzbl %al, %eax -; AVX-NEXT: vpinsrb $8, %eax, %xmm2, %xmm2 -; AVX-NEXT: vpextrb $9, %xmm1, %ecx -; AVX-NEXT: vpextrb $9, %xmm0, %eax -; AVX-NEXT: sarb %cl, %al -; AVX-NEXT: movzbl %al, %eax -; AVX-NEXT: vpinsrb $9, %eax, %xmm2, %xmm2 -; AVX-NEXT: vpextrb $10, %xmm1, %ecx -; AVX-NEXT: vpextrb $10, %xmm0, %eax -; AVX-NEXT: sarb %cl, %al -; AVX-NEXT: movzbl %al, %eax -; AVX-NEXT: vpinsrb $10, %eax, %xmm2, %xmm2 -; AVX-NEXT: vpextrb $11, %xmm1, %ecx -; AVX-NEXT: vpextrb $11, %xmm0, %eax -; AVX-NEXT: sarb %cl, %al -; AVX-NEXT: movzbl %al, %eax -; AVX-NEXT: vpinsrb $11, %eax, %xmm2, %xmm2 -; AVX-NEXT: vpextrb $12, %xmm1, %ecx -; AVX-NEXT: vpextrb $12, %xmm0, %eax -; AVX-NEXT: sarb %cl, %al -; AVX-NEXT: movzbl %al, %eax -; AVX-NEXT: vpinsrb $12, %eax, %xmm2, %xmm2 -; AVX-NEXT: vpextrb $13, %xmm1, %ecx -; AVX-NEXT: vpextrb $13, %xmm0, %eax -; AVX-NEXT: sarb %cl, %al -; AVX-NEXT: movzbl %al, %eax -; AVX-NEXT: vpinsrb $13, %eax, %xmm2, %xmm2 -; AVX-NEXT: vpextrb $14, %xmm1, %ecx -; AVX-NEXT: vpextrb $14, %xmm0, %eax -; AVX-NEXT: sarb %cl, %al -; AVX-NEXT: movzbl %al, %eax -; AVX-NEXT: vpinsrb $14, %eax, %xmm2, %xmm2 -; AVX-NEXT: vpextrb $15, %xmm1, %ecx -; AVX-NEXT: vpextrb $15, %xmm0, %eax -; AVX-NEXT: sarb %cl, %al -; AVX-NEXT: movzbl %al, %eax -; AVX-NEXT: vpinsrb $15, %eax, %xmm2, %xmm0 +; AVX: vpsllw $5, %xmm1, %xmm1 +; AVX-NEXT: vpunpckhbw {{.*#}} xmm2 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15] +; AVX-NEXT: vpunpckhbw {{.*#}} xmm3 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] +; AVX-NEXT: vpsraw $4, %xmm3, %xmm4 +; AVX-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm3 +; AVX-NEXT: vpsraw $2, %xmm3, %xmm4 +; AVX-NEXT: vpaddw %xmm2, %xmm2, %xmm2 +; AVX-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm3 +; AVX-NEXT: vpsraw $1, %xmm3, %xmm4 +; AVX-NEXT: vpaddw %xmm2, %xmm2, %xmm2 +; AVX-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm2 +; AVX-NEXT: vpsrlw $8, %xmm2, %xmm2 +; AVX-NEXT: vpunpcklbw {{.*#}} xmm1 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] +; AVX-NEXT: vpunpcklbw {{.*#}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] +; AVX-NEXT: vpsraw $4, %xmm0, %xmm3 +; AVX-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0 +; AVX-NEXT: vpsraw $2, %xmm0, %xmm3 +; AVX-NEXT: vpaddw %xmm1, %xmm1, %xmm1 +; AVX-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0 +; AVX-NEXT: vpsraw $1, %xmm0, %xmm3 +; AVX-NEXT: vpaddw %xmm1, %xmm1, %xmm1 +; AVX-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0 +; AVX-NEXT: vpsrlw $8, %xmm0, %xmm0 +; AVX-NEXT: vpackuswb %xmm2, %xmm0, %xmm0 ; AVX-NEXT: retq %ashr = ashr <16 x i8> %r, %a %tmp2 = bitcast <16 x i8> %ashr to <2 x i64> @@ -614,118 +370,83 @@ entry: define <2 x i64> @lshr_8i16(<8 x i16> %r, <8 x i16> %a) nounwind readnone ssp { entry: - -; SSE2: pextrw $7, %xmm0, %eax -; SSE2-NEXT: pextrw $7, %xmm1, %ecx -; SSE2-NEXT: shrl %cl, %eax -; SSE2-NEXT: movd %eax, %xmm2 -; SSE2-NEXT: pextrw $3, %xmm0, %eax -; SSE2-NEXT: pextrw $3, %xmm1, %ecx -; SSE2-NEXT: shrl %cl, %eax -; SSE2-NEXT: movd %eax, %xmm3 -; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3] -; SSE2-NEXT: pextrw $5, %xmm0, %eax -; SSE2-NEXT: pextrw $5, %xmm1, %ecx -; SSE2-NEXT: shrl %cl, %eax -; SSE2-NEXT: movd %eax, %xmm4 -; SSE2-NEXT: pextrw $1, %xmm0, %eax -; SSE2-NEXT: pextrw $1, %xmm1, %ecx -; SSE2-NEXT: shrl %cl, %eax -; SSE2-NEXT: movd %eax, %xmm2 -; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3] -; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3] -; SSE2-NEXT: pextrw $6, %xmm0, %eax -; SSE2-NEXT: pextrw $6, %xmm1, %ecx -; SSE2-NEXT: shrl %cl, %eax -; SSE2-NEXT: movd %eax, %xmm3 -; SSE2-NEXT: pextrw $2, %xmm0, %eax -; SSE2-NEXT: pextrw $2, %xmm1, %ecx -; SSE2-NEXT: shrl %cl, %eax -; SSE2-NEXT: movd %eax, %xmm4 -; SSE2-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3] -; SSE2-NEXT: pextrw $4, %xmm0, %eax -; SSE2-NEXT: pextrw $4, %xmm1, %ecx -; SSE2-NEXT: shrl %cl, %eax -; SSE2-NEXT: movd %eax, %xmm3 -; SSE2-NEXT: movd %xmm1, %ecx -; SSE2-NEXT: movd %xmm0, %eax -; SSE2-NEXT: movzwl %ax, %eax -; SSE2-NEXT: shrl %cl, %eax -; SSE2-NEXT: movd %eax, %xmm0 -; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3] -; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3] -; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3] +; ALL-NOT: shrl +; +; SSE2: psllw $12, %xmm1 +; SSE2-NEXT: movdqa %xmm1, %xmm2 +; SSE2-NEXT: psraw $15, %xmm2 +; SSE2-NEXT: movdqa %xmm2, %xmm3 +; SSE2-NEXT: pandn %xmm0, %xmm3 +; SSE2-NEXT: psrlw $8, %xmm0 +; SSE2-NEXT: pand %xmm2, %xmm0 +; SSE2-NEXT: por %xmm3, %xmm0 +; SSE2-NEXT: paddw %xmm1, %xmm1 +; SSE2-NEXT: movdqa %xmm1, %xmm2 +; SSE2-NEXT: psraw $15, %xmm2 +; SSE2-NEXT: movdqa %xmm2, %xmm3 +; SSE2-NEXT: pandn %xmm0, %xmm3 +; SSE2-NEXT: psrlw $4, %xmm0 +; SSE2-NEXT: pand %xmm2, %xmm0 +; SSE2-NEXT: por %xmm3, %xmm0 +; SSE2-NEXT: paddw %xmm1, %xmm1 +; SSE2-NEXT: movdqa %xmm1, %xmm2 +; SSE2-NEXT: psraw $15, %xmm2 +; SSE2-NEXT: movdqa %xmm2, %xmm3 +; SSE2-NEXT: pandn %xmm0, %xmm3 +; SSE2-NEXT: psrlw $2, %xmm0 +; SSE2-NEXT: pand %xmm2, %xmm0 +; SSE2-NEXT: por %xmm3, %xmm0 +; SSE2-NEXT: paddw %xmm1, %xmm1 +; SSE2-NEXT: psraw $15, %xmm1 +; SSE2-NEXT: movdqa %xmm1, %xmm2 +; SSE2-NEXT: pandn %xmm0, %xmm2 +; SSE2-NEXT: psrlw $1, %xmm0 +; SSE2-NEXT: pand %xmm1, %xmm0 +; SSE2-NEXT: por %xmm2, %xmm0 ; SSE2-NEXT: retq ; -; SSE41: pextrw $1, %xmm0, %eax -; SSE41-NEXT: pextrw $1, %xmm1, %ecx -; SSE41-NEXT: shrl %cl, %eax -; SSE41-NEXT: movd %xmm1, %ecx -; SSE41-NEXT: movd %xmm0, %edx -; SSE41-NEXT: movzwl %dx, %edx -; SSE41-NEXT: shrl %cl, %edx -; SSE41-NEXT: movd %edx, %xmm2 -; SSE41-NEXT: pinsrw $1, %eax, %xmm2 -; SSE41-NEXT: pextrw $2, %xmm0, %eax -; SSE41-NEXT: pextrw $2, %xmm1, %ecx -; SSE41-NEXT: shrl %cl, %eax -; SSE41-NEXT: pinsrw $2, %eax, %xmm2 -; SSE41-NEXT: pextrw $3, %xmm0, %eax -; SSE41-NEXT: pextrw $3, %xmm1, %ecx -; SSE41-NEXT: shrl %cl, %eax -; SSE41-NEXT: pinsrw $3, %eax, %xmm2 -; SSE41-NEXT: pextrw $4, %xmm0, %eax -; SSE41-NEXT: pextrw $4, %xmm1, %ecx -; SSE41-NEXT: shrl %cl, %eax -; SSE41-NEXT: pinsrw $4, %eax, %xmm2 -; SSE41-NEXT: pextrw $5, %xmm0, %eax -; SSE41-NEXT: pextrw $5, %xmm1, %ecx -; SSE41-NEXT: shrl %cl, %eax -; SSE41-NEXT: pinsrw $5, %eax, %xmm2 -; SSE41-NEXT: pextrw $6, %xmm0, %eax -; SSE41-NEXT: pextrw $6, %xmm1, %ecx -; SSE41-NEXT: shrl %cl, %eax -; SSE41-NEXT: pinsrw $6, %eax, %xmm2 -; SSE41-NEXT: pextrw $7, %xmm0, %eax -; SSE41-NEXT: pextrw $7, %xmm1, %ecx -; SSE41-NEXT: shrl %cl, %eax -; SSE41-NEXT: pinsrw $7, %eax, %xmm2 -; SSE41-NEXT: movdqa %xmm2, %xmm0 +; SSE41: movdqa %xmm0, %xmm2 +; SSE41-NEXT: movdqa %xmm1, %xmm0 +; SSE41-NEXT: psllw $12, %xmm0 +; SSE41-NEXT: psllw $4, %xmm1 +; SSE41-NEXT: por %xmm0, %xmm1 +; SSE41-NEXT: movdqa %xmm1, %xmm3 +; SSE41-NEXT: paddw %xmm3, %xmm3 +; SSE41-NEXT: movdqa %xmm2, %xmm4 +; SSE41-NEXT: psrlw $8, %xmm4 +; SSE41-NEXT: movdqa %xmm1, %xmm0 +; SSE41-NEXT: pblendvb %xmm4, %xmm2 +; SSE41-NEXT: movdqa %xmm2, %xmm1 +; SSE41-NEXT: psrlw $4, %xmm1 +; SSE41-NEXT: movdqa %xmm3, %xmm0 +; SSE41-NEXT: pblendvb %xmm1, %xmm2 +; SSE41-NEXT: movdqa %xmm2, %xmm1 +; SSE41-NEXT: psrlw $2, %xmm1 +; SSE41-NEXT: paddw %xmm3, %xmm3 +; SSE41-NEXT: movdqa %xmm3, %xmm0 +; SSE41-NEXT: pblendvb %xmm1, %xmm2 +; SSE41-NEXT: movdqa %xmm2, %xmm1 +; SSE41-NEXT: psrlw $1, %xmm1 +; SSE41-NEXT: paddw %xmm3, %xmm3 +; SSE41-NEXT: movdqa %xmm3, %xmm0 +; SSE41-NEXT: pblendvb %xmm1, %xmm2 +; SSE41-NEXT: movdqa %xmm2, %xmm0 ; SSE41-NEXT: retq ; -; AVX: vpextrw $1, %xmm0, %eax -; AVX-NEXT: vpextrw $1, %xmm1, %ecx -; AVX-NEXT: shrl %cl, %eax -; AVX-NEXT: vmovd %xmm1, %ecx -; AVX-NEXT: vmovd %xmm0, %edx -; AVX-NEXT: movzwl %dx, %edx -; AVX-NEXT: shrl %cl, %edx -; AVX-NEXT: vmovd %edx, %xmm2 -; AVX-NEXT: vpinsrw $1, %eax, %xmm2, %xmm2 -; AVX-NEXT: vpextrw $2, %xmm0, %eax -; AVX-NEXT: vpextrw $2, %xmm1, %ecx -; AVX-NEXT: shrl %cl, %eax -; AVX-NEXT: vpinsrw $2, %eax, %xmm2, %xmm2 -; AVX-NEXT: vpextrw $3, %xmm0, %eax -; AVX-NEXT: vpextrw $3, %xmm1, %ecx -; AVX-NEXT: shrl %cl, %eax -; AVX-NEXT: vpinsrw $3, %eax, %xmm2, %xmm2 -; AVX-NEXT: vpextrw $4, %xmm0, %eax -; AVX-NEXT: vpextrw $4, %xmm1, %ecx -; AVX-NEXT: shrl %cl, %eax -; AVX-NEXT: vpinsrw $4, %eax, %xmm2, %xmm2 -; AVX-NEXT: vpextrw $5, %xmm0, %eax -; AVX-NEXT: vpextrw $5, %xmm1, %ecx -; AVX-NEXT: shrl %cl, %eax -; AVX-NEXT: vpinsrw $5, %eax, %xmm2, %xmm2 -; AVX-NEXT: vpextrw $6, %xmm0, %eax -; AVX-NEXT: vpextrw $6, %xmm1, %ecx -; AVX-NEXT: shrl %cl, %eax -; AVX-NEXT: vpinsrw $6, %eax, %xmm2, %xmm2 -; AVX-NEXT: vpextrw $7, %xmm0, %eax -; AVX-NEXT: vpextrw $7, %xmm1, %ecx -; AVX-NEXT: shrl %cl, %eax -; AVX-NEXT: vpinsrw $7, %eax, %xmm2, %xmm0 +; AVX: vpsllw $12, %xmm1, %xmm2 +; AVX-NEXT: vpsllw $4, %xmm1, %xmm1 +; AVX-NEXT: vpor %xmm2, %xmm1, %xmm1 +; AVX-NEXT: vpaddw %xmm1, %xmm1, %xmm2 +; AVX-NEXT: vpsrlw $8, %xmm0, %xmm3 +; AVX-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0 +; AVX-NEXT: vpsrlw $4, %xmm0, %xmm1 +; AVX-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0 +; AVX-NEXT: vpsrlw $2, %xmm0, %xmm1 +; AVX-NEXT: vpaddw %xmm2, %xmm2, %xmm2 +; AVX-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0 +; AVX-NEXT: vpsrlw $1, %xmm0, %xmm1 +; AVX-NEXT: vpaddw %xmm2, %xmm2, %xmm2 +; AVX-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0 ; AVX-NEXT: retq %lshr = lshr <8 x i16> %r, %a %tmp2 = bitcast <8 x i16> %lshr to <2 x i64> @@ -734,281 +455,71 @@ entry: define <2 x i64> @lshr_16i8(<16 x i8> %r, <16 x i8> %a) nounwind readnone ssp { entry: -; SSE2: pushq %rbp -; SSE2-NEXT: pushq %r15 -; SSE2-NEXT: pushq %r14 -; SSE2-NEXT: pushq %r13 -; SSE2-NEXT: pushq %r12 -; SSE2-NEXT: pushq %rbx -; SSE2-NEXT: movaps %xmm1, -24(%rsp) -; SSE2-NEXT: movaps %xmm0, -40(%rsp) -; SSE2-NEXT: movb -9(%rsp), %cl -; SSE2-NEXT: movb -25(%rsp), %al -; SSE2-NEXT: shrb %cl, %al -; SSE2-NEXT: movzbl %al, %eax -; SSE2-NEXT: movd %eax, %xmm0 -; SSE2-NEXT: movb -17(%rsp), %cl -; SSE2-NEXT: movb -33(%rsp), %al -; SSE2-NEXT: shrb %cl, %al -; SSE2-NEXT: movb -13(%rsp), %cl -; SSE2-NEXT: movzbl %al, %eax -; SSE2-NEXT: movl %eax, -44(%rsp) -; SSE2-NEXT: movb -29(%rsp), %al -; SSE2-NEXT: shrb %cl, %al -; SSE2-NEXT: movzbl %al, %r9d -; SSE2-NEXT: movb -21(%rsp), %cl -; SSE2-NEXT: movb -37(%rsp), %al -; SSE2-NEXT: shrb %cl, %al -; SSE2-NEXT: movb -11(%rsp), %cl -; SSE2-NEXT: movzbl %al, %r10d -; SSE2-NEXT: movb -27(%rsp), %al -; SSE2-NEXT: shrb %cl, %al -; SSE2-NEXT: movb -19(%rsp), %cl -; SSE2-NEXT: movzbl %al, %r11d -; SSE2-NEXT: movb -35(%rsp), %al -; SSE2-NEXT: shrb %cl, %al -; SSE2-NEXT: movb -15(%rsp), %cl -; SSE2-NEXT: movzbl %al, %r14d -; SSE2-NEXT: movb -31(%rsp), %al -; SSE2-NEXT: shrb %cl, %al -; SSE2-NEXT: movzbl %al, %r15d -; SSE2-NEXT: movb -23(%rsp), %cl -; SSE2-NEXT: movb -39(%rsp), %al -; SSE2-NEXT: shrb %cl, %al -; SSE2-NEXT: movb -10(%rsp), %cl -; SSE2-NEXT: movzbl %al, %r12d -; SSE2-NEXT: movb -26(%rsp), %al -; SSE2-NEXT: shrb %cl, %al -; SSE2-NEXT: movb -18(%rsp), %cl -; SSE2-NEXT: movzbl %al, %r13d -; SSE2-NEXT: movb -34(%rsp), %al -; SSE2-NEXT: shrb %cl, %al -; SSE2-NEXT: movb -14(%rsp), %cl -; SSE2-NEXT: movzbl %al, %r8d -; SSE2-NEXT: movb -30(%rsp), %al -; SSE2-NEXT: shrb %cl, %al -; SSE2-NEXT: movb -22(%rsp), %cl -; SSE2-NEXT: movzbl %al, %ebp -; SSE2-NEXT: movb -38(%rsp), %al -; SSE2-NEXT: shrb %cl, %al -; SSE2-NEXT: movb -12(%rsp), %cl -; SSE2-NEXT: movzbl %al, %edi -; SSE2-NEXT: movb -28(%rsp), %dl -; SSE2-NEXT: shrb %cl, %dl -; SSE2-NEXT: movb -20(%rsp), %cl -; SSE2-NEXT: movzbl %dl, %esi -; SSE2-NEXT: movb -36(%rsp), %bl -; SSE2-NEXT: shrb %cl, %bl -; SSE2-NEXT: movb -16(%rsp), %cl -; SSE2-NEXT: movzbl %bl, %ebx -; SSE2-NEXT: movb -32(%rsp), %al -; SSE2-NEXT: shrb %cl, %al -; SSE2-NEXT: movzbl %al, %edx -; SSE2-NEXT: movb -24(%rsp), %cl -; SSE2-NEXT: movb -40(%rsp), %al -; SSE2-NEXT: shrb %cl, %al -; SSE2-NEXT: movzbl %al, %eax -; SSE2-NEXT: movd -44(%rsp), %xmm1 -; SSE2: movd %r9d, %xmm2 -; SSE2-NEXT: movd %r10d, %xmm3 -; SSE2-NEXT: movd %r11d, %xmm4 -; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] -; SSE2-NEXT: movd %r14d, %xmm0 -; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7] -; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3],xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7] -; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7] -; SSE2-NEXT: movd %r15d, %xmm1 -; SSE2-NEXT: movd %r12d, %xmm2 -; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7] -; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7] -; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3],xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7] -; SSE2-NEXT: movd %r13d, %xmm0 -; SSE2-NEXT: movd %r8d, %xmm1 -; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] -; SSE2-NEXT: movd %ebp, %xmm0 -; SSE2-NEXT: movd %edi, %xmm3 -; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7] -; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3],xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7] -; SSE2-NEXT: movd %esi, %xmm0 -; SSE2-NEXT: movd %ebx, %xmm1 -; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] -; SSE2-NEXT: movd %edx, %xmm4 -; SSE2-NEXT: movd %eax, %xmm0 -; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7] -; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] -; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7] -; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7] -; SSE2-NEXT: popq %rbx -; SSE2-NEXT: popq %r12 -; SSE2-NEXT: popq %r13 -; SSE2-NEXT: popq %r14 -; SSE2-NEXT: popq %r15 -; SSE2-NEXT: popq %rbp +; ALL-NOT: shrb +; +; SSE2: psllw $5, %xmm1 +; SSE2-NEXT: pxor %xmm2, %xmm2 +; SSE2-NEXT: pxor %xmm3, %xmm3 +; SSE2-NEXT: pcmpgtb %xmm1, %xmm3 +; SSE2-NEXT: movdqa %xmm3, %xmm4 +; SSE2-NEXT: pandn %xmm0, %xmm4 +; SSE2-NEXT: psrlw $4, %xmm0 +; SSE2-NEXT: pand {{.*}}(%rip), %xmm0 +; SSE2-NEXT: pand %xmm3, %xmm0 +; SSE2-NEXT: por %xmm4, %xmm0 +; SSE2-NEXT: paddb %xmm1, %xmm1 +; SSE2-NEXT: pxor %xmm3, %xmm3 +; SSE2-NEXT: pcmpgtb %xmm1, %xmm3 +; SSE2-NEXT: movdqa %xmm3, %xmm4 +; SSE2-NEXT: pandn %xmm0, %xmm4 +; SSE2-NEXT: psrlw $2, %xmm0 +; SSE2-NEXT: pand {{.*}}(%rip), %xmm0 +; SSE2-NEXT: pand %xmm3, %xmm0 +; SSE2-NEXT: por %xmm4, %xmm0 +; SSE2-NEXT: paddb %xmm1, %xmm1 +; SSE2-NEXT: pcmpgtb %xmm1, %xmm2 +; SSE2-NEXT: movdqa %xmm2, %xmm1 +; SSE2-NEXT: pandn %xmm0, %xmm1 +; SSE2-NEXT: psrlw $1, %xmm0 +; SSE2-NEXT: pand {{.*}}(%rip), %xmm0 +; SSE2-NEXT: pand %xmm2, %xmm0 +; SSE2-NEXT: por %xmm1, %xmm0 ; SSE2-NEXT: retq ; -; SSE41: pextrb $1, %xmm1, %ecx -; SSE41-NEXT: pextrb $1, %xmm0, %eax -; SSE41-NEXT: shrb %cl, %al -; SSE41-NEXT: movzbl %al, %eax -; SSE41-NEXT: pextrb $0, %xmm1, %ecx -; SSE41-NEXT: pextrb $0, %xmm0, %edx -; SSE41-NEXT: shrb %cl, %dl -; SSE41-NEXT: movzbl %dl, %ecx -; SSE41-NEXT: movd %ecx, %xmm2 -; SSE41-NEXT: pinsrb $1, %eax, %xmm2 -; SSE41-NEXT: pextrb $2, %xmm1, %ecx -; SSE41-NEXT: pextrb $2, %xmm0, %eax -; SSE41-NEXT: shrb %cl, %al -; SSE41-NEXT: movzbl %al, %eax -; SSE41-NEXT: pinsrb $2, %eax, %xmm2 -; SSE41-NEXT: pextrb $3, %xmm1, %ecx -; SSE41-NEXT: pextrb $3, %xmm0, %eax -; SSE41-NEXT: shrb %cl, %al -; SSE41-NEXT: movzbl %al, %eax -; SSE41-NEXT: pinsrb $3, %eax, %xmm2 -; SSE41-NEXT: pextrb $4, %xmm1, %ecx -; SSE41-NEXT: pextrb $4, %xmm0, %eax -; SSE41-NEXT: shrb %cl, %al -; SSE41-NEXT: movzbl %al, %eax -; SSE41-NEXT: pinsrb $4, %eax, %xmm2 -; SSE41-NEXT: pextrb $5, %xmm1, %ecx -; SSE41-NEXT: pextrb $5, %xmm0, %eax -; SSE41-NEXT: shrb %cl, %al -; SSE41-NEXT: movzbl %al, %eax -; SSE41-NEXT: pinsrb $5, %eax, %xmm2 -; SSE41-NEXT: pextrb $6, %xmm1, %ecx -; SSE41-NEXT: pextrb $6, %xmm0, %eax -; SSE41-NEXT: shrb %cl, %al -; SSE41-NEXT: movzbl %al, %eax -; SSE41-NEXT: pinsrb $6, %eax, %xmm2 -; SSE41-NEXT: pextrb $7, %xmm1, %ecx -; SSE41-NEXT: pextrb $7, %xmm0, %eax -; SSE41-NEXT: shrb %cl, %al -; SSE41-NEXT: movzbl %al, %eax -; SSE41-NEXT: pinsrb $7, %eax, %xmm2 -; SSE41-NEXT: pextrb $8, %xmm1, %ecx -; SSE41-NEXT: pextrb $8, %xmm0, %eax -; SSE41-NEXT: shrb %cl, %al -; SSE41-NEXT: movzbl %al, %eax -; SSE41-NEXT: pinsrb $8, %eax, %xmm2 -; SSE41-NEXT: pextrb $9, %xmm1, %ecx -; SSE41-NEXT: pextrb $9, %xmm0, %eax -; SSE41-NEXT: shrb %cl, %al -; SSE41-NEXT: movzbl %al, %eax -; SSE41-NEXT: pinsrb $9, %eax, %xmm2 -; SSE41-NEXT: pextrb $10, %xmm1, %ecx -; SSE41-NEXT: pextrb $10, %xmm0, %eax -; SSE41-NEXT: shrb %cl, %al -; SSE41-NEXT: movzbl %al, %eax -; SSE41-NEXT: pinsrb $10, %eax, %xmm2 -; SSE41-NEXT: pextrb $11, %xmm1, %ecx -; SSE41-NEXT: pextrb $11, %xmm0, %eax -; SSE41-NEXT: shrb %cl, %al -; SSE41-NEXT: movzbl %al, %eax -; SSE41-NEXT: pinsrb $11, %eax, %xmm2 -; SSE41-NEXT: pextrb $12, %xmm1, %ecx -; SSE41-NEXT: pextrb $12, %xmm0, %eax -; SSE41-NEXT: shrb %cl, %al -; SSE41-NEXT: movzbl %al, %eax -; SSE41-NEXT: pinsrb $12, %eax, %xmm2 -; SSE41-NEXT: pextrb $13, %xmm1, %ecx -; SSE41-NEXT: pextrb $13, %xmm0, %eax -; SSE41-NEXT: shrb %cl, %al -; SSE41-NEXT: movzbl %al, %eax -; SSE41-NEXT: pinsrb $13, %eax, %xmm2 -; SSE41-NEXT: pextrb $14, %xmm1, %ecx -; SSE41-NEXT: pextrb $14, %xmm0, %eax -; SSE41-NEXT: shrb %cl, %al -; SSE41-NEXT: movzbl %al, %eax -; SSE41-NEXT: pinsrb $14, %eax, %xmm2 -; SSE41-NEXT: pextrb $15, %xmm1, %ecx -; SSE41-NEXT: pextrb $15, %xmm0, %eax -; SSE41-NEXT: shrb %cl, %al -; SSE41-NEXT: movzbl %al, %eax -; SSE41-NEXT: pinsrb $15, %eax, %xmm2 -; SSE41-NEXT: movdqa %xmm2, %xmm0 +; SSE41: movdqa %xmm0, %xmm2 +; SSE41-NEXT: psllw $5, %xmm1 +; SSE41-NEXT: movdqa %xmm2, %xmm3 +; SSE41-NEXT: psrlw $4, %xmm3 +; SSE41-NEXT: pand {{.*}}(%rip), %xmm3 +; SSE41-NEXT: movdqa %xmm1, %xmm0 +; SSE41-NEXT: pblendvb %xmm3, %xmm2 +; SSE41-NEXT: movdqa %xmm2, %xmm3 +; SSE41-NEXT: psrlw $2, %xmm3 +; SSE41-NEXT: pand {{.*}}(%rip), %xmm3 +; SSE41-NEXT: paddb %xmm1, %xmm1 +; SSE41-NEXT: movdqa %xmm1, %xmm0 +; SSE41-NEXT: pblendvb %xmm3, %xmm2 +; SSE41-NEXT: movdqa %xmm2, %xmm3 +; SSE41-NEXT: psrlw $1, %xmm3 +; SSE41-NEXT: pand {{.*}}(%rip), %xmm3 +; SSE41-NEXT: paddb %xmm1, %xmm1 +; SSE41-NEXT: movdqa %xmm1, %xmm0 +; SSE41-NEXT: pblendvb %xmm3, %xmm2 +; SSE41-NEXT: movdqa %xmm2, %xmm0 ; SSE41-NEXT: retq ; -; AVX: vpextrb $1, %xmm1, %ecx -; AVX-NEXT: vpextrb $1, %xmm0, %eax -; AVX-NEXT: shrb %cl, %al -; AVX-NEXT: movzbl %al, %eax -; AVX-NEXT: vpextrb $0, %xmm1, %ecx -; AVX-NEXT: vpextrb $0, %xmm0, %edx -; AVX-NEXT: shrb %cl, %dl -; AVX-NEXT: movzbl %dl, %ecx -; AVX-NEXT: vmovd %ecx, %xmm2 -; AVX-NEXT: vpinsrb $1, %eax, %xmm2, %xmm2 -; AVX-NEXT: vpextrb $2, %xmm1, %ecx -; AVX-NEXT: vpextrb $2, %xmm0, %eax -; AVX-NEXT: shrb %cl, %al -; AVX-NEXT: movzbl %al, %eax -; AVX-NEXT: vpinsrb $2, %eax, %xmm2, %xmm2 -; AVX-NEXT: vpextrb $3, %xmm1, %ecx -; AVX-NEXT: vpextrb $3, %xmm0, %eax -; AVX-NEXT: shrb %cl, %al -; AVX-NEXT: movzbl %al, %eax -; AVX-NEXT: vpinsrb $3, %eax, %xmm2, %xmm2 -; AVX-NEXT: vpextrb $4, %xmm1, %ecx -; AVX-NEXT: vpextrb $4, %xmm0, %eax -; AVX-NEXT: shrb %cl, %al -; AVX-NEXT: movzbl %al, %eax -; AVX-NEXT: vpinsrb $4, %eax, %xmm2, %xmm2 -; AVX-NEXT: vpextrb $5, %xmm1, %ecx -; AVX-NEXT: vpextrb $5, %xmm0, %eax -; AVX-NEXT: shrb %cl, %al -; AVX-NEXT: movzbl %al, %eax -; AVX-NEXT: vpinsrb $5, %eax, %xmm2, %xmm2 -; AVX-NEXT: vpextrb $6, %xmm1, %ecx -; AVX-NEXT: vpextrb $6, %xmm0, %eax -; AVX-NEXT: shrb %cl, %al -; AVX-NEXT: movzbl %al, %eax -; AVX-NEXT: vpinsrb $6, %eax, %xmm2, %xmm2 -; AVX-NEXT: vpextrb $7, %xmm1, %ecx -; AVX-NEXT: vpextrb $7, %xmm0, %eax -; AVX-NEXT: shrb %cl, %al -; AVX-NEXT: movzbl %al, %eax -; AVX-NEXT: vpinsrb $7, %eax, %xmm2, %xmm2 -; AVX-NEXT: vpextrb $8, %xmm1, %ecx -; AVX-NEXT: vpextrb $8, %xmm0, %eax -; AVX-NEXT: shrb %cl, %al -; AVX-NEXT: movzbl %al, %eax -; AVX-NEXT: vpinsrb $8, %eax, %xmm2, %xmm2 -; AVX-NEXT: vpextrb $9, %xmm1, %ecx -; AVX-NEXT: vpextrb $9, %xmm0, %eax -; AVX-NEXT: shrb %cl, %al -; AVX-NEXT: movzbl %al, %eax -; AVX-NEXT: vpinsrb $9, %eax, %xmm2, %xmm2 -; AVX-NEXT: vpextrb $10, %xmm1, %ecx -; AVX-NEXT: vpextrb $10, %xmm0, %eax -; AVX-NEXT: shrb %cl, %al -; AVX-NEXT: movzbl %al, %eax -; AVX-NEXT: vpinsrb $10, %eax, %xmm2, %xmm2 -; AVX-NEXT: vpextrb $11, %xmm1, %ecx -; AVX-NEXT: vpextrb $11, %xmm0, %eax -; AVX-NEXT: shrb %cl, %al -; AVX-NEXT: movzbl %al, %eax -; AVX-NEXT: vpinsrb $11, %eax, %xmm2, %xmm2 -; AVX-NEXT: vpextrb $12, %xmm1, %ecx -; AVX-NEXT: vpextrb $12, %xmm0, %eax -; AVX-NEXT: shrb %cl, %al -; AVX-NEXT: movzbl %al, %eax -; AVX-NEXT: vpinsrb $12, %eax, %xmm2, %xmm2 -; AVX-NEXT: vpextrb $13, %xmm1, %ecx -; AVX-NEXT: vpextrb $13, %xmm0, %eax -; AVX-NEXT: shrb %cl, %al -; AVX-NEXT: movzbl %al, %eax -; AVX-NEXT: vpinsrb $13, %eax, %xmm2, %xmm2 -; AVX-NEXT: vpextrb $14, %xmm1, %ecx -; AVX-NEXT: vpextrb $14, %xmm0, %eax -; AVX-NEXT: shrb %cl, %al -; AVX-NEXT: movzbl %al, %eax -; AVX-NEXT: vpinsrb $14, %eax, %xmm2, %xmm2 -; AVX-NEXT: vpextrb $15, %xmm1, %ecx -; AVX-NEXT: vpextrb $15, %xmm0, %eax -; AVX-NEXT: shrb %cl, %al -; AVX-NEXT: movzbl %al, %eax -; AVX-NEXT: vpinsrb $15, %eax, %xmm2, %xmm0 +; AVX: vpsllw $5, %xmm1, %xmm1 +; AVX-NEXT: vpsrlw $4, %xmm0, %xmm2 +; AVX-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2 +; AVX-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0 +; AVX-NEXT: vpsrlw $2, %xmm0, %xmm2 +; AVX-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2 +; AVX-NEXT: vpaddb %xmm1, %xmm1, %xmm1 +; AVX-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0 +; AVX-NEXT: vpsrlw $1, %xmm0, %xmm2 +; AVX-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2 +; AVX-NEXT: vpaddb %xmm1, %xmm1, %xmm1 +; AVX-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0 ; AVX-NEXT: retq %lshr = lshr <16 x i8> %r, %a %tmp2 = bitcast <16 x i8> %lshr to <2 x i64> diff --git a/test/CodeGen/X86/vector-shuffle-512-v8.ll b/test/CodeGen/X86/vector-shuffle-512-v8.ll index 8dc76231856a2..2c6c8a3e7ade3 100644 --- a/test/CodeGen/X86/vector-shuffle-512-v8.ll +++ b/test/CodeGen/X86/vector-shuffle-512-v8.ll @@ -88,7 +88,7 @@ define <8 x double> @shuffle_v8f64_70000000(<8 x double> %a, <8 x double> %b) { define <8 x double> @shuffle_v8f64_01014545(<8 x double> %a, <8 x double> %b) { ; ALL-LABEL: shuffle_v8f64_01014545: ; ALL: # BB#0: -; ALL-NEXT: vpermpd $68, %zmm0, %zmm0 +; ALL-NEXT: vshuff64x2 $160, %zmm0, %zmm0, %zmm0 ; ALL-NEXT: retq %shuffle = shufflevector <8 x double> %a, <8 x double> %b, <8 x i32> <i32 0, i32 1, i32 0, i32 1, i32 4, i32 5, i32 4, i32 5> ret <8 x double> %shuffle @@ -650,7 +650,7 @@ define <8 x i64> @shuffle_v8i64_70000000(<8 x i64> %a, <8 x i64> %b) { define <8 x i64> @shuffle_v8i64_01014545(<8 x i64> %a, <8 x i64> %b) { ; ALL-LABEL: shuffle_v8i64_01014545: ; ALL: # BB#0: -; ALL-NEXT: vpermq $68, %zmm0, %zmm0 +; ALL-NEXT: vshufi64x2 $160, %zmm0, %zmm0, %zmm0 ; ALL-NEXT: retq %shuffle = shufflevector <8 x i64> %a, <8 x i64> %b, <8 x i32> <i32 0, i32 1, i32 0, i32 1, i32 4, i32 5, i32 4, i32 5> ret <8 x i64> %shuffle diff --git a/test/CodeGen/X86/win32-eh-states.ll b/test/CodeGen/X86/win32-eh-states.ll index 8db127df6da73..0aae8c4d01898 100644 --- a/test/CodeGen/X86/win32-eh-states.ll +++ b/test/CodeGen/X86/win32-eh-states.ll @@ -30,7 +30,7 @@ $"\01??_R0H@8" = comdat any @"\01??_R0H@8" = linkonce_odr global %rtti.TypeDescriptor2 { i8** @"\01??_7type_info@@6B@", i8* null, [3 x i8] c".H\00" }, comdat @llvm.eh.handlertype.H.0 = private unnamed_addr constant %eh.CatchHandlerType { i32 0, i8* bitcast (%rtti.TypeDescriptor2* @"\01??_R0H@8" to i8*) }, section "llvm.metadata" -define void @f() #0 { +define void @f() #0 personality i8* bitcast (i32 (...)* @__CxxFrameHandler3 to i8*) { entry: invoke void @may_throw(i32 1) to label %invoke.cont unwind label %lpad @@ -46,14 +46,14 @@ try.cont.9: ; preds = %invoke.cont.3, %inv ret void lpad: ; preds = %catch, %entry - %0 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__CxxFrameHandler3 to i8*) + %0 = landingpad { i8*, i32 } catch %eh.CatchHandlerType* @llvm.eh.handlertype.H.0 %1 = extractvalue { i8*, i32 } %0, 0 %2 = extractvalue { i8*, i32 } %0, 1 br label %catch.dispatch.4 lpad.1: ; preds = %invoke.cont - %3 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__CxxFrameHandler3 to i8*) + %3 = landingpad { i8*, i32 } catch i8* bitcast (%eh.CatchHandlerType* @llvm.eh.handlertype.H.0 to i8*) %4 = extractvalue { i8*, i32 } %3, 0 %5 = extractvalue { i8*, i32 } %3, 1 @@ -110,3 +110,5 @@ eh.resume: ; preds = %catch.dispatch.4 ; CHECK: movl $3, Lf$frame_escape_{{[0-9]+.*}} ; CHECK: movl $3, (%esp) ; CHECK: calll _may_throw + +; CHECK: .safeseh ___ehhandler$f diff --git a/test/CodeGen/X86/win32-eh.ll b/test/CodeGen/X86/win32-eh.ll index 42c9d9e2240de..f235d2884d03b 100644 --- a/test/CodeGen/X86/win32-eh.ll +++ b/test/CodeGen/X86/win32-eh.ll @@ -6,16 +6,27 @@ declare i32 @_except_handler4(...) declare i32 @__CxxFrameHandler3(...) declare void @llvm.eh.begincatch(i8*, i8*) declare void @llvm.eh.endcatch() +declare i32 @llvm.eh.typeid.for(i8*) -define void @use_except_handler3() { +define internal i32 @catchall_filt() { + ret i32 1 +} + +define void @use_except_handler3() personality i32 (...)* @_except_handler3 { +entry: invoke void @may_throw_or_crash() to label %cont unwind label %catchall cont: ret void catchall: - landingpad { i8*, i32 } personality i32 (...)* @_except_handler3 - catch i8* null - br label %cont + %0 = landingpad { i8*, i32 } + catch i8* bitcast (i32 ()* @catchall_filt to i8*) + %1 = extractvalue { i8*, i32 } %0, 1 + %2 = call i32 @llvm.eh.typeid.for(i8* bitcast (i32 ()* @catchall_filt to i8*)) #4 + %matches = icmp eq i32 %1, %2 + br i1 %matches, label %cont, label %eh.resume +eh.resume: + resume { i8*, i32 } %0 } ; CHECK-LABEL: _use_except_handler3: @@ -34,15 +45,27 @@ catchall: ; CHECK: movl %[[next]], %fs:0 ; CHECK: retl -define void @use_except_handler4() { +; CHECK: .section .xdata,"dr" +; CHECK-LABEL: L__ehtable$use_except_handler3: +; CHECK-NEXT: .long -1 +; CHECK-NEXT: .long _catchall_filt +; CHECK-NEXT: .long Ltmp{{[0-9]+}} + +define void @use_except_handler4() personality i32 (...)* @_except_handler4 { +entry: invoke void @may_throw_or_crash() to label %cont unwind label %catchall cont: ret void catchall: - landingpad { i8*, i32 } personality i32 (...)* @_except_handler4 - catch i8* null - br label %cont + %0 = landingpad { i8*, i32 } + catch i8* bitcast (i32 ()* @catchall_filt to i8*) + %1 = extractvalue { i8*, i32 } %0, 1 + %2 = call i32 @llvm.eh.typeid.for(i8* bitcast (i32 ()* @catchall_filt to i8*)) #4 + %matches = icmp eq i32 %1, %2 + br i1 %matches, label %cont, label %eh.resume +eh.resume: + resume { i8*, i32 } %0 } ; CHECK-LABEL: _use_except_handler4: @@ -64,13 +87,23 @@ catchall: ; CHECK: movl %[[next]], %fs:0 ; CHECK: retl -define void @use_CxxFrameHandler3() { +; CHECK: .section .xdata,"dr" +; CHECK-LABEL: L__ehtable$use_except_handler4: +; CHECK-NEXT: .long -2 +; CHECK-NEXT: .long 0 +; CHECK-NEXT: .long 9999 +; CHECK-NEXT: .long 0 +; CHECK-NEXT: .long -2 +; CHECK-NEXT: .long _catchall_filt +; CHECK-NEXT: .long Ltmp{{[0-9]+}} + +define void @use_CxxFrameHandler3() personality i32 (...)* @__CxxFrameHandler3 { invoke void @may_throw_or_crash() to label %cont unwind label %catchall cont: ret void catchall: - %ehvals = landingpad { i8*, i32 } personality i32 (...)* @__CxxFrameHandler3 + %ehvals = landingpad { i8*, i32 } catch i8* null %ehptr = extractvalue { i8*, i32 } %ehvals, 0 call void @llvm.eh.begincatch(i8* %ehptr, i8* null) @@ -110,3 +143,7 @@ catchall: ; CHECK-LABEL: ___ehhandler$use_CxxFrameHandler3: ; CHECK: movl $L__ehtable$use_CxxFrameHandler3, %eax ; CHECK: jmp ___CxxFrameHandler3 # TAILCALL + +; CHECK: .safeseh __except_handler3 +; CHECK: .safeseh __except_handler4 +; CHECK: .safeseh ___ehhandler$use_CxxFrameHandler3 diff --git a/test/CodeGen/X86/win64_call_epi.ll b/test/CodeGen/X86/win64_call_epi.ll index 71c44b0850040..096cbe41c5404 100644 --- a/test/CodeGen/X86/win64_call_epi.ll +++ b/test/CodeGen/X86/win64_call_epi.ll @@ -5,7 +5,7 @@ declare void @baz() declare i32 @personality(...) ; Check for 'nop' between the last call and the epilogue. -define void @foo1() { +define void @foo1() personality i32 (...)* @personality { invoke void @bar() to label %normal @@ -15,7 +15,7 @@ normal: ret void catch: - %1 = landingpad { i8*, i32 } personality i32 (...)* @personality cleanup + %1 = landingpad { i8*, i32 } cleanup resume { i8*, i32 } %1 } ; WIN64-LABEL: foo1: diff --git a/test/CodeGen/X86/win64_eh.ll b/test/CodeGen/X86/win64_eh.ll index d668f43c895ea..cb9d026bec2d6 100644 --- a/test/CodeGen/X86/win64_eh.ll +++ b/test/CodeGen/X86/win64_eh.ll @@ -101,7 +101,7 @@ declare void @_d_eh_resume_unwind(i8*) declare i32 @bar() -define i32 @foo4() #0 { +define i32 @foo4() #0 personality i32 (i32, i32, i64, i8*, i8*)* @_d_eh_personality { entry: %step = alloca i32, align 4 store i32 0, i32* %step @@ -115,7 +115,7 @@ finally: br label %endtryfinally landingpad: - %landing_pad = landingpad { i8*, i32 } personality i32 (i32, i32, i64, i8*, i8*)* @_d_eh_personality + %landing_pad = landingpad { i8*, i32 } cleanup %tmp3 = extractvalue { i8*, i32 } %landing_pad, 0 store i32 2, i32* %step diff --git a/test/CodeGen/X86/win_eh_prepare.ll b/test/CodeGen/X86/win_eh_prepare.ll index a33dd92ad72a9..3e3f9af058223 100644 --- a/test/CodeGen/X86/win_eh_prepare.ll +++ b/test/CodeGen/X86/win_eh_prepare.ll @@ -11,7 +11,7 @@ declare i32 @__C_specific_handler(...) declare i32 @__gxx_personality_seh0(...) declare i32 @llvm.eh.typeid.for(i8*) readnone nounwind -define i32 @use_seh() { +define i32 @use_seh() personality i32 (...)* @__C_specific_handler { entry: invoke void @maybe_throw() to label %cont unwind label %lpad @@ -20,7 +20,7 @@ cont: ret i32 0 lpad: - %ehvals = landingpad { i8*, i32 } personality i32 (...)* @__C_specific_handler + %ehvals = landingpad { i8*, i32 } cleanup catch i8* bitcast (i32 (i8*, i8*)* @filt_g to i8*) %ehsel = extractvalue { i8*, i32 } %ehvals, 1 @@ -51,7 +51,7 @@ define internal i32 @filt_g(i8*, i8*) { ; A MinGW64-ish EH style. It could happen if a binary uses both MSVC CRT and ; mingw CRT and is linked with LTO. -define i32 @use_gcc() { +define i32 @use_gcc() personality i32 (...)* @__gxx_personality_seh0 { entry: invoke void @maybe_throw() to label %cont unwind label %lpad @@ -60,7 +60,7 @@ cont: ret i32 0 lpad: - %ehvals = landingpad { i8*, i32 } personality i32 (...)* @__gxx_personality_seh0 + %ehvals = landingpad { i8*, i32 } cleanup catch i8* bitcast (i8** @_ZTIi to i8*) %ehsel = extractvalue { i8*, i32 } %ehvals, 1 diff --git a/test/CodeGen/X86/x86-setcc-int-to-fp-combine.ll b/test/CodeGen/X86/x86-setcc-int-to-fp-combine.ll index a2c5b3a6eedfa..248a9202e9979 100644 --- a/test/CodeGen/X86/x86-setcc-int-to-fp-combine.ll +++ b/test/CodeGen/X86/x86-setcc-int-to-fp-combine.ll @@ -27,12 +27,8 @@ define void @foo1(<4 x float> %val, <4 x float> %test, <4 x double>* %p) nounwin ; CHECK-NEXT: .long 1 ## 0x1 ; CHECK-NEXT: .long 1 ## 0x1 ; CHECK-LABEL: foo1: -; FIXME: The operation gets scalarized. If/when the compiler learns to better -; use [V]CVTDQ2PD, this will need updated. -; CHECK: cvtsi2sdq -; CHECK: cvtsi2sdq -; CHECK: cvtsi2sdq -; CHECK: cvtsi2sdq +; CHECK: cvtdq2pd +; CHECK: cvtdq2pd %cmp = fcmp oeq <4 x float> %val, %test %ext = zext <4 x i1> %cmp to <4 x i32> %result = sitofp <4 x i32> %ext to <4 x double> |