; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -O3 -mtriple=x86_64-pc-linux < %s | FileCheck %s define <1 x float> @constrained_vector_fdiv_v1f32() { ; CHECK-LABEL: constrained_vector_fdiv_v1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; CHECK-NEXT: divss {{.*}}(%rip), %xmm0 ; CHECK-NEXT: retq entry: %div = call <1 x float> @llvm.experimental.constrained.fdiv.v1f32( <1 x float> , <1 x float> , metadata !"round.dynamic", metadata !"fpexcept.strict") ret <1 x float> %div } define <2 x double> @constrained_vector_fdiv_v2f64() { ; CHECK-LABEL: constrained_vector_fdiv_v2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: movapd {{.*#+}} xmm0 = [1.0E+0,2.0E+0] ; CHECK-NEXT: divpd {{.*}}(%rip), %xmm0 ; CHECK-NEXT: retq entry: %div = call <2 x double> @llvm.experimental.constrained.fdiv.v2f64( <2 x double> , <2 x double> , metadata !"round.dynamic", metadata !"fpexcept.strict") ret <2 x double> %div } define <3 x float> @constrained_vector_fdiv_v3f32() { ; CHECK-LABEL: constrained_vector_fdiv_v3f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero ; CHECK-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero ; CHECK-NEXT: divss %xmm1, %xmm2 ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; CHECK-NEXT: divss %xmm1, %xmm0 ; CHECK-NEXT: movss {{.*#+}} xmm3 = mem[0],zero,zero,zero ; CHECK-NEXT: divss %xmm1, %xmm3 ; CHECK-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1] ; CHECK-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm2[0] ; CHECK-NEXT: retq entry: %div = call <3 x float> @llvm.experimental.constrained.fdiv.v3f32( <3 x float> , <3 x float> , metadata !"round.dynamic", metadata !"fpexcept.strict") ret <3 x float> %div } define <3 x double> @constrained_vector_fdiv_v3f64() { ; CHECK-LABEL: constrained_vector_fdiv_v3f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: movapd {{.*#+}} xmm0 = [1.0E+0,2.0E+0] ; CHECK-NEXT: divpd {{.*}}(%rip), %xmm0 ; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero ; CHECK-NEXT: divsd {{.*}}(%rip), %xmm1 ; CHECK-NEXT: movsd %xmm1, -{{[0-9]+}}(%rsp) ; CHECK-NEXT: movapd %xmm0, %xmm1 ; CHECK-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] ; CHECK-NEXT: fldl -{{[0-9]+}}(%rsp) ; CHECK-NEXT: retq entry: %div = call <3 x double> @llvm.experimental.constrained.fdiv.v3f64( <3 x double> , <3 x double> , metadata !"round.dynamic", metadata !"fpexcept.strict") ret <3 x double> %div } define <4 x double> @constrained_vector_fdiv_v4f64() { ; CHECK-LABEL: constrained_vector_fdiv_v4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: movapd {{.*#+}} xmm2 = [1.0E+1,1.0E+1] ; CHECK-NEXT: movapd {{.*#+}} xmm0 = [1.0E+0,2.0E+0] ; CHECK-NEXT: divpd %xmm2, %xmm0 ; CHECK-NEXT: movapd {{.*#+}} xmm1 = [3.0E+0,4.0E+0] ; CHECK-NEXT: divpd %xmm2, %xmm1 ; CHECK-NEXT: retq entry: %div = call <4 x double> @llvm.experimental.constrained.fdiv.v4f64( <4 x double> , <4 x double> , metadata !"round.dynamic", metadata !"fpexcept.strict") ret <4 x double> %div } define <1 x float> @constrained_vector_frem_v1f32() { ; CHECK-LABEL: constrained_vector_frem_v1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: pushq %rax ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero ; CHECK-NEXT: callq fmodf ; CHECK-NEXT: popq %rax ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq entry: %rem = call <1 x float> @llvm.experimental.constrained.frem.v1f32( <1 x float> , <1 x float> , metadata !"round.dynamic", metadata !"fpexcept.strict") ret <1 x float> %rem } define <2 x double> @constrained_vector_frem_v2f64() { ; CHECK-LABEL: constrained_vector_frem_v2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: subq $24, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 32 ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero ; CHECK-NEXT: callq fmod ; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero ; CHECK-NEXT: callq fmod ; CHECK-NEXT: unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload ; CHECK-NEXT: # xmm0 = xmm0[0],mem[0] ; CHECK-NEXT: addq $24, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq entry: %rem = call <2 x double> @llvm.experimental.constrained.frem.v2f64( <2 x double> , <2 x double> , metadata !"round.dynamic", metadata !"fpexcept.strict") ret <2 x double> %rem } define <3 x float> @constrained_vector_frem_v3f32() { ; CHECK-LABEL: constrained_vector_frem_v3f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: subq $40, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 48 ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero ; CHECK-NEXT: callq fmodf ; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero ; CHECK-NEXT: callq fmodf ; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero ; CHECK-NEXT: callq fmodf ; CHECK-NEXT: movaps (%rsp), %xmm1 # 16-byte Reload ; CHECK-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] ; CHECK-NEXT: unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload ; CHECK-NEXT: # xmm1 = xmm1[0],mem[0] ; CHECK-NEXT: movaps %xmm1, %xmm0 ; CHECK-NEXT: addq $40, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq entry: %rem = call <3 x float> @llvm.experimental.constrained.frem.v3f32( <3 x float> , <3 x float> , metadata !"round.dynamic", metadata !"fpexcept.strict") ret <3 x float> %rem } define <3 x double> @constrained_vector_frem_v3f64() { ; CHECK-LABEL: constrained_vector_frem_v3f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: subq $24, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 32 ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero ; CHECK-NEXT: callq fmod ; CHECK-NEXT: movsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero ; CHECK-NEXT: callq fmod ; CHECK-NEXT: movsd %xmm0, (%rsp) # 8-byte Spill ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero ; CHECK-NEXT: callq fmod ; CHECK-NEXT: movsd %xmm0, {{[0-9]+}}(%rsp) ; CHECK-NEXT: fldl {{[0-9]+}}(%rsp) ; CHECK-NEXT: movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Reload ; CHECK-NEXT: # xmm0 = mem[0],zero ; CHECK-NEXT: movsd (%rsp), %xmm1 # 8-byte Reload ; CHECK-NEXT: # xmm1 = mem[0],zero ; CHECK-NEXT: addq $24, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq entry: %rem = call <3 x double> @llvm.experimental.constrained.frem.v3f64( <3 x double> , <3 x double> , metadata !"round.dynamic", metadata !"fpexcept.strict") ret <3 x double> %rem } define <4 x double> @constrained_vector_frem_v4f64() { ; CHECK-LABEL: constrained_vector_frem_v4f64: ; CHECK: # %bb.0: ; CHECK-NEXT: subq $40, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 48 ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero ; CHECK-NEXT: callq fmod ; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero ; CHECK-NEXT: callq fmod ; CHECK-NEXT: unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload ; CHECK-NEXT: # xmm0 = xmm0[0],mem[0] ; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero ; CHECK-NEXT: callq fmod ; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero ; CHECK-NEXT: callq fmod ; CHECK-NEXT: movaps %xmm0, %xmm1 ; CHECK-NEXT: unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload ; CHECK-NEXT: # xmm1 = xmm1[0],mem[0] ; CHECK-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload ; CHECK-NEXT: addq $40, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq %rem = call <4 x double> @llvm.experimental.constrained.frem.v4f64( <4 x double> , <4 x double> , metadata !"round.dynamic", metadata !"fpexcept.strict") ret <4 x double> %rem } define <1 x float> @constrained_vector_fmul_v1f32() { ; CHECK-LABEL: constrained_vector_fmul_v1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; CHECK-NEXT: mulss {{.*}}(%rip), %xmm0 ; CHECK-NEXT: retq entry: %mul = call <1 x float> @llvm.experimental.constrained.fmul.v1f32( <1 x float> , <1 x float> , metadata !"round.dynamic", metadata !"fpexcept.strict") ret <1 x float> %mul } define <2 x double> @constrained_vector_fmul_v2f64() { ; CHECK-LABEL: constrained_vector_fmul_v2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: movapd {{.*#+}} xmm0 = [1.7976931348623157E+308,1.7976931348623157E+308] ; CHECK-NEXT: mulpd {{.*}}(%rip), %xmm0 ; CHECK-NEXT: retq entry: %mul = call <2 x double> @llvm.experimental.constrained.fmul.v2f64( <2 x double> , <2 x double> , metadata !"round.dynamic", metadata !"fpexcept.strict") ret <2 x double> %mul } define <3 x float> @constrained_vector_fmul_v3f32() { ; CHECK-LABEL: constrained_vector_fmul_v3f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero ; CHECK-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero ; CHECK-NEXT: mulss %xmm1, %xmm2 ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; CHECK-NEXT: mulss %xmm1, %xmm0 ; CHECK-NEXT: mulss {{.*}}(%rip), %xmm1 ; CHECK-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] ; CHECK-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm2[0] ; CHECK-NEXT: retq entry: %mul = call <3 x float> @llvm.experimental.constrained.fmul.v3f32( <3 x float> , <3 x float> , metadata !"round.dynamic", metadata !"fpexcept.strict") ret <3 x float> %mul } define <3 x double> @constrained_vector_fmul_v3f64() { ; CHECK-LABEL: constrained_vector_fmul_v3f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: movapd {{.*#+}} xmm0 = [1.7976931348623157E+308,1.7976931348623157E+308] ; CHECK-NEXT: mulpd {{.*}}(%rip), %xmm0 ; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero ; CHECK-NEXT: mulsd {{.*}}(%rip), %xmm1 ; CHECK-NEXT: movsd %xmm1, -{{[0-9]+}}(%rsp) ; CHECK-NEXT: movapd %xmm0, %xmm1 ; CHECK-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] ; CHECK-NEXT: fldl -{{[0-9]+}}(%rsp) ; CHECK-NEXT: retq entry: %mul = call <3 x double> @llvm.experimental.constrained.fmul.v3f64( <3 x double> , <3 x double> , metadata !"round.dynamic", metadata !"fpexcept.strict") ret <3 x double> %mul } define <4 x double> @constrained_vector_fmul_v4f64() { ; CHECK-LABEL: constrained_vector_fmul_v4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: movapd {{.*#+}} xmm1 = [1.7976931348623157E+308,1.7976931348623157E+308] ; CHECK-NEXT: movapd {{.*#+}} xmm0 = [2.0E+0,3.0E+0] ; CHECK-NEXT: mulpd %xmm1, %xmm0 ; CHECK-NEXT: mulpd {{.*}}(%rip), %xmm1 ; CHECK-NEXT: retq entry: %mul = call <4 x double> @llvm.experimental.constrained.fmul.v4f64( <4 x double> , <4 x double> , metadata !"round.dynamic", metadata !"fpexcept.strict") ret <4 x double> %mul } define <1 x float> @constrained_vector_fadd_v1f32() { ; CHECK-LABEL: constrained_vector_fadd_v1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; CHECK-NEXT: addss {{.*}}(%rip), %xmm0 ; CHECK-NEXT: retq entry: %add = call <1 x float> @llvm.experimental.constrained.fadd.v1f32( <1 x float> , <1 x float> , metadata !"round.dynamic", metadata !"fpexcept.strict") ret <1 x float> %add } define <2 x double> @constrained_vector_fadd_v2f64() { ; CHECK-LABEL: constrained_vector_fadd_v2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: movapd {{.*#+}} xmm0 = [1.7976931348623157E+308,1.7976931348623157E+308] ; CHECK-NEXT: addpd {{.*}}(%rip), %xmm0 ; CHECK-NEXT: retq entry: %add = call <2 x double> @llvm.experimental.constrained.fadd.v2f64( <2 x double> , <2 x double> , metadata !"round.dynamic", metadata !"fpexcept.strict") ret <2 x double> %add } define <3 x float> @constrained_vector_fadd_v3f32() { ; CHECK-LABEL: constrained_vector_fadd_v3f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xorps %xmm1, %xmm1 ; CHECK-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero ; CHECK-NEXT: addss %xmm2, %xmm1 ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; CHECK-NEXT: addss %xmm2, %xmm0 ; CHECK-NEXT: addss {{.*}}(%rip), %xmm2 ; CHECK-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] ; CHECK-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0] ; CHECK-NEXT: retq entry: %add = call <3 x float> @llvm.experimental.constrained.fadd.v3f32( <3 x float> , <3 x float> , metadata !"round.dynamic", metadata !"fpexcept.strict") ret <3 x float> %add } define <3 x double> @constrained_vector_fadd_v3f64() { ; CHECK-LABEL: constrained_vector_fadd_v3f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: movapd {{.*#+}} xmm0 = [1.7976931348623157E+308,1.7976931348623157E+308] ; CHECK-NEXT: addpd {{.*}}(%rip), %xmm0 ; CHECK-NEXT: xorpd %xmm1, %xmm1 ; CHECK-NEXT: addsd {{.*}}(%rip), %xmm1 ; CHECK-NEXT: movsd %xmm1, -{{[0-9]+}}(%rsp) ; CHECK-NEXT: movapd %xmm0, %xmm1 ; CHECK-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] ; CHECK-NEXT: fldl -{{[0-9]+}}(%rsp) ; CHECK-NEXT: retq entry: %add = call <3 x double> @llvm.experimental.constrained.fadd.v3f64( <3 x double> , <3 x double> , metadata !"round.dynamic", metadata !"fpexcept.strict") ret <3 x double> %add } define <4 x double> @constrained_vector_fadd_v4f64() { ; CHECK-LABEL: constrained_vector_fadd_v4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: movapd {{.*#+}} xmm1 = [1.7976931348623157E+308,1.7976931348623157E+308] ; CHECK-NEXT: movapd {{.*#+}} xmm0 = [1.0E+0,1.0000000000000001E-1] ; CHECK-NEXT: addpd %xmm1, %xmm0 ; CHECK-NEXT: addpd {{.*}}(%rip), %xmm1 ; CHECK-NEXT: retq entry: %add = call <4 x double> @llvm.experimental.constrained.fadd.v4f64( <4 x double> , <4 x double> , metadata !"round.dynamic", metadata !"fpexcept.strict") ret <4 x double> %add } define <1 x float> @constrained_vector_fsub_v1f32() { ; CHECK-LABEL: constrained_vector_fsub_v1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; CHECK-NEXT: subss {{.*}}(%rip), %xmm0 ; CHECK-NEXT: retq entry: %sub = call <1 x float> @llvm.experimental.constrained.fsub.v1f32( <1 x float> , <1 x float> , metadata !"round.dynamic", metadata !"fpexcept.strict") ret <1 x float> %sub } define <2 x double> @constrained_vector_fsub_v2f64() { ; CHECK-LABEL: constrained_vector_fsub_v2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: movapd {{.*#+}} xmm0 = [-1.7976931348623157E+308,-1.7976931348623157E+308] ; CHECK-NEXT: subpd {{.*}}(%rip), %xmm0 ; CHECK-NEXT: retq entry: %sub = call <2 x double> @llvm.experimental.constrained.fsub.v2f64( <2 x double> , <2 x double> , metadata !"round.dynamic", metadata !"fpexcept.strict") ret <2 x double> %sub } define <3 x float> @constrained_vector_fsub_v3f32() { ; CHECK-LABEL: constrained_vector_fsub_v3f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xorps %xmm0, %xmm0 ; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero ; CHECK-NEXT: movaps %xmm1, %xmm2 ; CHECK-NEXT: subss %xmm0, %xmm2 ; CHECK-NEXT: movaps %xmm1, %xmm0 ; CHECK-NEXT: subss {{.*}}(%rip), %xmm0 ; CHECK-NEXT: subss {{.*}}(%rip), %xmm1 ; CHECK-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] ; CHECK-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm2[0] ; CHECK-NEXT: retq entry: %sub = call <3 x float> @llvm.experimental.constrained.fsub.v3f32( <3 x float> , <3 x float> , metadata !"round.dynamic", metadata !"fpexcept.strict") ret <3 x float> %sub } define <3 x double> @constrained_vector_fsub_v3f64() { ; CHECK-LABEL: constrained_vector_fsub_v3f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xorpd %xmm0, %xmm0 ; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero ; CHECK-NEXT: subsd %xmm0, %xmm1 ; CHECK-NEXT: movapd {{.*#+}} xmm0 = [-1.7976931348623157E+308,-1.7976931348623157E+308] ; CHECK-NEXT: subpd {{.*}}(%rip), %xmm0 ; CHECK-NEXT: movsd %xmm1, -{{[0-9]+}}(%rsp) ; CHECK-NEXT: movapd %xmm0, %xmm1 ; CHECK-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] ; CHECK-NEXT: fldl -{{[0-9]+}}(%rsp) ; CHECK-NEXT: retq entry: %sub = call <3 x double> @llvm.experimental.constrained.fsub.v3f64( <3 x double> , <3 x double> , metadata !"round.dynamic", metadata !"fpexcept.strict") ret <3 x double> %sub } define <4 x double> @constrained_vector_fsub_v4f64() { ; CHECK-LABEL: constrained_vector_fsub_v4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: movapd {{.*#+}} xmm1 = [-1.7976931348623157E+308,-1.7976931348623157E+308] ; CHECK-NEXT: movapd %xmm1, %xmm0 ; CHECK-NEXT: subpd {{.*}}(%rip), %xmm0 ; CHECK-NEXT: subpd {{.*}}(%rip), %xmm1 ; CHECK-NEXT: retq entry: %sub = call <4 x double> @llvm.experimental.constrained.fsub.v4f64( <4 x double> , <4 x double> , metadata !"round.dynamic", metadata !"fpexcept.strict") ret <4 x double> %sub } define <1 x float> @constrained_vector_sqrt_v1f32() { ; CHECK-LABEL: constrained_vector_sqrt_v1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; CHECK-NEXT: sqrtss %xmm0, %xmm0 ; CHECK-NEXT: retq entry: %sqrt = call <1 x float> @llvm.experimental.constrained.sqrt.v1f32( <1 x float> , metadata !"round.dynamic", metadata !"fpexcept.strict") ret <1 x float> %sqrt } define <2 x double> @constrained_vector_sqrt_v2f64() { ; CHECK-LABEL: constrained_vector_sqrt_v2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sqrtpd {{.*}}(%rip), %xmm0 ; CHECK-NEXT: retq entry: %sqrt = call <2 x double> @llvm.experimental.constrained.sqrt.v2f64( <2 x double> , metadata !"round.dynamic", metadata !"fpexcept.strict") ret <2 x double> %sqrt } define <3 x float> @constrained_vector_sqrt_v3f32() { ; CHECK-LABEL: constrained_vector_sqrt_v3f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; CHECK-NEXT: sqrtss %xmm0, %xmm1 ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; CHECK-NEXT: sqrtss %xmm0, %xmm0 ; CHECK-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero ; CHECK-NEXT: sqrtss %xmm2, %xmm2 ; CHECK-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] ; CHECK-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0] ; CHECK-NEXT: retq entry: %sqrt = call <3 x float> @llvm.experimental.constrained.sqrt.v3f32( <3 x float> , metadata !"round.dynamic", metadata !"fpexcept.strict") ret <3 x float> %sqrt } define <3 x double> @constrained_vector_sqrt_v3f64() { ; CHECK-LABEL: constrained_vector_sqrt_v3f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: sqrtsd %xmm0, %xmm1 ; CHECK-NEXT: sqrtpd {{.*}}(%rip), %xmm0 ; CHECK-NEXT: movsd %xmm1, -{{[0-9]+}}(%rsp) ; CHECK-NEXT: movapd %xmm0, %xmm1 ; CHECK-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] ; CHECK-NEXT: fldl -{{[0-9]+}}(%rsp) ; CHECK-NEXT: retq entry: %sqrt = call <3 x double> @llvm.experimental.constrained.sqrt.v3f64( <3 x double> , metadata !"round.dynamic", metadata !"fpexcept.strict") ret <3 x double> %sqrt } define <4 x double> @constrained_vector_sqrt_v4f64() { ; CHECK-LABEL: constrained_vector_sqrt_v4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: sqrtpd {{.*}}(%rip), %xmm0 ; CHECK-NEXT: sqrtpd {{.*}}(%rip), %xmm1 ; CHECK-NEXT: retq entry: %sqrt = call <4 x double> @llvm.experimental.constrained.sqrt.v4f64( <4 x double> , metadata !"round.dynamic", metadata !"fpexcept.strict") ret <4 x double> %sqrt } define <1 x float> @constrained_vector_pow_v1f32() { ; CHECK-LABEL: constrained_vector_pow_v1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: pushq %rax ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero ; CHECK-NEXT: callq powf ; CHECK-NEXT: popq %rax ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq entry: %pow = call <1 x float> @llvm.experimental.constrained.pow.v1f32( <1 x float> , <1 x float> , metadata !"round.dynamic", metadata !"fpexcept.strict") ret <1 x float> %pow } define <2 x double> @constrained_vector_pow_v2f64() { ; CHECK-LABEL: constrained_vector_pow_v2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: subq $24, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 32 ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero ; CHECK-NEXT: callq pow ; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero ; CHECK-NEXT: callq pow ; CHECK-NEXT: unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload ; CHECK-NEXT: # xmm0 = xmm0[0],mem[0] ; CHECK-NEXT: addq $24, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq entry: %pow = call <2 x double> @llvm.experimental.constrained.pow.v2f64( <2 x double> , <2 x double> , metadata !"round.dynamic", metadata !"fpexcept.strict") ret <2 x double> %pow } define <3 x float> @constrained_vector_pow_v3f32() { ; CHECK-LABEL: constrained_vector_pow_v3f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: subq $40, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 48 ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero ; CHECK-NEXT: callq powf ; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero ; CHECK-NEXT: callq powf ; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero ; CHECK-NEXT: callq powf ; CHECK-NEXT: movaps (%rsp), %xmm1 # 16-byte Reload ; CHECK-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] ; CHECK-NEXT: unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload ; CHECK-NEXT: # xmm1 = xmm1[0],mem[0] ; CHECK-NEXT: movaps %xmm1, %xmm0 ; CHECK-NEXT: addq $40, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq entry: %pow = call <3 x float> @llvm.experimental.constrained.pow.v3f32( <3 x float> , <3 x float> , metadata !"round.dynamic", metadata !"fpexcept.strict") ret <3 x float> %pow } define <3 x double> @constrained_vector_pow_v3f64() { ; CHECK-LABEL: constrained_vector_pow_v3f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: subq $24, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 32 ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero ; CHECK-NEXT: callq pow ; CHECK-NEXT: movsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero ; CHECK-NEXT: callq pow ; CHECK-NEXT: movsd %xmm0, (%rsp) # 8-byte Spill ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero ; CHECK-NEXT: callq pow ; CHECK-NEXT: movsd %xmm0, {{[0-9]+}}(%rsp) ; CHECK-NEXT: fldl {{[0-9]+}}(%rsp) ; CHECK-NEXT: movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Reload ; CHECK-NEXT: # xmm0 = mem[0],zero ; CHECK-NEXT: movsd (%rsp), %xmm1 # 8-byte Reload ; CHECK-NEXT: # xmm1 = mem[0],zero ; CHECK-NEXT: addq $24, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq entry: %pow = call <3 x double> @llvm.experimental.constrained.pow.v3f64( <3 x double> , <3 x double> , metadata !"round.dynamic", metadata !"fpexcept.strict") ret <3 x double> %pow } define <4 x double> @constrained_vector_pow_v4f64() { ; CHECK-LABEL: constrained_vector_pow_v4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: subq $40, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 48 ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero ; CHECK-NEXT: callq pow ; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero ; CHECK-NEXT: callq pow ; CHECK-NEXT: unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload ; CHECK-NEXT: # xmm0 = xmm0[0],mem[0] ; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero ; CHECK-NEXT: callq pow ; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero ; CHECK-NEXT: callq pow ; CHECK-NEXT: movaps %xmm0, %xmm1 ; CHECK-NEXT: unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload ; CHECK-NEXT: # xmm1 = xmm1[0],mem[0] ; CHECK-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload ; CHECK-NEXT: addq $40, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq entry: %pow = call <4 x double> @llvm.experimental.constrained.pow.v4f64( <4 x double> , <4 x double> , metadata !"round.dynamic", metadata !"fpexcept.strict") ret <4 x double> %pow } define <1 x float> @constrained_vector_powi_v1f32() { ; CHECK-LABEL: constrained_vector_powi_v1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: pushq %rax ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; CHECK-NEXT: movl $3, %edi ; CHECK-NEXT: callq __powisf2 ; CHECK-NEXT: popq %rax ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq entry: %powi = call <1 x float> @llvm.experimental.constrained.powi.v1f32( <1 x float> , i32 3, metadata !"round.dynamic", metadata !"fpexcept.strict") ret <1 x float> %powi } define <2 x double> @constrained_vector_powi_v2f64() { ; CHECK-LABEL: constrained_vector_powi_v2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: subq $24, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 32 ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: movl $3, %edi ; CHECK-NEXT: callq __powidf2 ; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: movl $3, %edi ; CHECK-NEXT: callq __powidf2 ; CHECK-NEXT: unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload ; CHECK-NEXT: # xmm0 = xmm0[0],mem[0] ; CHECK-NEXT: addq $24, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq entry: %powi = call <2 x double> @llvm.experimental.constrained.powi.v2f64( <2 x double> , i32 3, metadata !"round.dynamic", metadata !"fpexcept.strict") ret <2 x double> %powi } define <3 x float> @constrained_vector_powi_v3f32() { ; CHECK-LABEL: constrained_vector_powi_v3f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: subq $40, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 48 ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; CHECK-NEXT: movl $3, %edi ; CHECK-NEXT: callq __powisf2 ; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; CHECK-NEXT: movl $3, %edi ; CHECK-NEXT: callq __powisf2 ; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; CHECK-NEXT: movl $3, %edi ; CHECK-NEXT: callq __powisf2 ; CHECK-NEXT: movaps (%rsp), %xmm1 # 16-byte Reload ; CHECK-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] ; CHECK-NEXT: unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload ; CHECK-NEXT: # xmm1 = xmm1[0],mem[0] ; CHECK-NEXT: movaps %xmm1, %xmm0 ; CHECK-NEXT: addq $40, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq entry: %powi = call <3 x float> @llvm.experimental.constrained.powi.v3f32( <3 x float> , i32 3, metadata !"round.dynamic", metadata !"fpexcept.strict") ret <3 x float> %powi } define <3 x double> @constrained_vector_powi_v3f64() { ; CHECK-LABEL: constrained_vector_powi_v3f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: subq $24, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 32 ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: movl $3, %edi ; CHECK-NEXT: callq __powidf2 ; CHECK-NEXT: movsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: movl $3, %edi ; CHECK-NEXT: callq __powidf2 ; CHECK-NEXT: movsd %xmm0, (%rsp) # 8-byte Spill ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: movl $3, %edi ; CHECK-NEXT: callq __powidf2 ; CHECK-NEXT: movsd %xmm0, {{[0-9]+}}(%rsp) ; CHECK-NEXT: fldl {{[0-9]+}}(%rsp) ; CHECK-NEXT: movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Reload ; CHECK-NEXT: # xmm0 = mem[0],zero ; CHECK-NEXT: movsd (%rsp), %xmm1 # 8-byte Reload ; CHECK-NEXT: # xmm1 = mem[0],zero ; CHECK-NEXT: addq $24, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq entry: %powi = call <3 x double> @llvm.experimental.constrained.powi.v3f64( <3 x double> , i32 3, metadata !"round.dynamic", metadata !"fpexcept.strict") ret <3 x double> %powi } define <4 x double> @constrained_vector_powi_v4f64() { ; CHECK-LABEL: constrained_vector_powi_v4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: subq $40, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 48 ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: movl $3, %edi ; CHECK-NEXT: callq __powidf2 ; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: movl $3, %edi ; CHECK-NEXT: callq __powidf2 ; CHECK-NEXT: unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload ; CHECK-NEXT: # xmm0 = xmm0[0],mem[0] ; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: movl $3, %edi ; CHECK-NEXT: callq __powidf2 ; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: movl $3, %edi ; CHECK-NEXT: callq __powidf2 ; CHECK-NEXT: movaps %xmm0, %xmm1 ; CHECK-NEXT: unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload ; CHECK-NEXT: # xmm1 = xmm1[0],mem[0] ; CHECK-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload ; CHECK-NEXT: addq $40, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq entry: %powi = call <4 x double> @llvm.experimental.constrained.powi.v4f64( <4 x double> , i32 3, metadata !"round.dynamic", metadata !"fpexcept.strict") ret <4 x double> %powi } define <1 x float> @constrained_vector_sin_v1f32() { ; CHECK-LABEL: constrained_vector_sin_v1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: pushq %rax ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; CHECK-NEXT: callq sinf ; CHECK-NEXT: popq %rax ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq entry: %sin = call <1 x float> @llvm.experimental.constrained.sin.v1f32( <1 x float> , metadata !"round.dynamic", metadata !"fpexcept.strict") ret <1 x float> %sin } define <2 x double> @constrained_vector_sin_v2f64() { ; CHECK-LABEL: constrained_vector_sin_v2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: subq $24, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 32 ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: callq sin ; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: callq sin ; CHECK-NEXT: unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload ; CHECK-NEXT: # xmm0 = xmm0[0],mem[0] ; CHECK-NEXT: addq $24, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq entry: %sin = call <2 x double> @llvm.experimental.constrained.sin.v2f64( <2 x double> , metadata !"round.dynamic", metadata !"fpexcept.strict") ret <2 x double> %sin } define <3 x float> @constrained_vector_sin_v3f32() { ; CHECK-LABEL: constrained_vector_sin_v3f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: subq $40, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 48 ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; CHECK-NEXT: callq sinf ; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; CHECK-NEXT: callq sinf ; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; CHECK-NEXT: callq sinf ; CHECK-NEXT: movaps (%rsp), %xmm1 # 16-byte Reload ; CHECK-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] ; CHECK-NEXT: unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload ; CHECK-NEXT: # xmm1 = xmm1[0],mem[0] ; CHECK-NEXT: movaps %xmm1, %xmm0 ; CHECK-NEXT: addq $40, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq entry: %sin = call <3 x float> @llvm.experimental.constrained.sin.v3f32( <3 x float> , metadata !"round.dynamic", metadata !"fpexcept.strict") ret <3 x float> %sin } define <3 x double> @constrained_vector_sin_v3f64() { ; CHECK-LABEL: constrained_vector_sin_v3f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: subq $24, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 32 ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: callq sin ; CHECK-NEXT: movsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: callq sin ; CHECK-NEXT: movsd %xmm0, (%rsp) # 8-byte Spill ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: callq sin ; CHECK-NEXT: movsd %xmm0, {{[0-9]+}}(%rsp) ; CHECK-NEXT: fldl {{[0-9]+}}(%rsp) ; CHECK-NEXT: movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Reload ; CHECK-NEXT: # xmm0 = mem[0],zero ; CHECK-NEXT: movsd (%rsp), %xmm1 # 8-byte Reload ; CHECK-NEXT: # xmm1 = mem[0],zero ; CHECK-NEXT: addq $24, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq entry: %sin = call <3 x double> @llvm.experimental.constrained.sin.v3f64( <3 x double> , metadata !"round.dynamic", metadata !"fpexcept.strict") ret <3 x double> %sin } define <4 x double> @constrained_vector_sin_v4f64() { ; CHECK-LABEL: constrained_vector_sin_v4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: subq $40, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 48 ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: callq sin ; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: callq sin ; CHECK-NEXT: unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload ; CHECK-NEXT: # xmm0 = xmm0[0],mem[0] ; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: callq sin ; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: callq sin ; CHECK-NEXT: movaps %xmm0, %xmm1 ; CHECK-NEXT: unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload ; CHECK-NEXT: # xmm1 = xmm1[0],mem[0] ; CHECK-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload ; CHECK-NEXT: addq $40, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq entry: %sin = call <4 x double> @llvm.experimental.constrained.sin.v4f64( <4 x double> , metadata !"round.dynamic", metadata !"fpexcept.strict") ret <4 x double> %sin } define <1 x float> @constrained_vector_cos_v1f32() { ; CHECK-LABEL: constrained_vector_cos_v1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: pushq %rax ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; CHECK-NEXT: callq cosf ; CHECK-NEXT: popq %rax ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq entry: %cos = call <1 x float> @llvm.experimental.constrained.cos.v1f32( <1 x float> , metadata !"round.dynamic", metadata !"fpexcept.strict") ret <1 x float> %cos } define <2 x double> @constrained_vector_cos_v2f64() { ; CHECK-LABEL: constrained_vector_cos_v2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: subq $24, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 32 ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: callq cos ; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: callq cos ; CHECK-NEXT: unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload ; CHECK-NEXT: # xmm0 = xmm0[0],mem[0] ; CHECK-NEXT: addq $24, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq entry: %cos = call <2 x double> @llvm.experimental.constrained.cos.v2f64( <2 x double> , metadata !"round.dynamic", metadata !"fpexcept.strict") ret <2 x double> %cos } define <3 x float> @constrained_vector_cos_v3f32() { ; CHECK-LABEL: constrained_vector_cos_v3f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: subq $40, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 48 ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; CHECK-NEXT: callq cosf ; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; CHECK-NEXT: callq cosf ; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; CHECK-NEXT: callq cosf ; CHECK-NEXT: movaps (%rsp), %xmm1 # 16-byte Reload ; CHECK-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] ; CHECK-NEXT: unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload ; CHECK-NEXT: # xmm1 = xmm1[0],mem[0] ; CHECK-NEXT: movaps %xmm1, %xmm0 ; CHECK-NEXT: addq $40, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq entry: %cos = call <3 x float> @llvm.experimental.constrained.cos.v3f32( <3 x float> , metadata !"round.dynamic", metadata !"fpexcept.strict") ret <3 x float> %cos } define <3 x double> @constrained_vector_cos_v3f64() { ; CHECK-LABEL: constrained_vector_cos_v3f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: subq $24, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 32 ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: callq cos ; CHECK-NEXT: movsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: callq cos ; CHECK-NEXT: movsd %xmm0, (%rsp) # 8-byte Spill ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: callq cos ; CHECK-NEXT: movsd %xmm0, {{[0-9]+}}(%rsp) ; CHECK-NEXT: fldl {{[0-9]+}}(%rsp) ; CHECK-NEXT: movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Reload ; CHECK-NEXT: # xmm0 = mem[0],zero ; CHECK-NEXT: movsd (%rsp), %xmm1 # 8-byte Reload ; CHECK-NEXT: # xmm1 = mem[0],zero ; CHECK-NEXT: addq $24, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq entry: %cos = call <3 x double> @llvm.experimental.constrained.cos.v3f64( <3 x double> , metadata !"round.dynamic", metadata !"fpexcept.strict") ret <3 x double> %cos } define <4 x double> @constrained_vector_cos_v4f64() { ; CHECK-LABEL: constrained_vector_cos_v4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: subq $40, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 48 ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: callq cos ; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: callq cos ; CHECK-NEXT: unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload ; CHECK-NEXT: # xmm0 = xmm0[0],mem[0] ; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: callq cos ; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: callq cos ; CHECK-NEXT: movaps %xmm0, %xmm1 ; CHECK-NEXT: unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload ; CHECK-NEXT: # xmm1 = xmm1[0],mem[0] ; CHECK-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload ; CHECK-NEXT: addq $40, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq entry: %cos = call <4 x double> @llvm.experimental.constrained.cos.v4f64( <4 x double> , metadata !"round.dynamic", metadata !"fpexcept.strict") ret <4 x double> %cos } define <1 x float> @constrained_vector_exp_v1f32() { ; CHECK-LABEL: constrained_vector_exp_v1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: pushq %rax ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; CHECK-NEXT: callq expf ; CHECK-NEXT: popq %rax ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq entry: %exp = call <1 x float> @llvm.experimental.constrained.exp.v1f32( <1 x float> , metadata !"round.dynamic", metadata !"fpexcept.strict") ret <1 x float> %exp } define <2 x double> @constrained_vector_exp_v2f64() { ; CHECK-LABEL: constrained_vector_exp_v2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: subq $24, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 32 ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: callq exp ; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: callq exp ; CHECK-NEXT: unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload ; CHECK-NEXT: # xmm0 = xmm0[0],mem[0] ; CHECK-NEXT: addq $24, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq entry: %exp = call <2 x double> @llvm.experimental.constrained.exp.v2f64( <2 x double> , metadata !"round.dynamic", metadata !"fpexcept.strict") ret <2 x double> %exp } define <3 x float> @constrained_vector_exp_v3f32() { ; CHECK-LABEL: constrained_vector_exp_v3f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: subq $40, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 48 ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; CHECK-NEXT: callq expf ; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; CHECK-NEXT: callq expf ; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; CHECK-NEXT: callq expf ; CHECK-NEXT: movaps (%rsp), %xmm1 # 16-byte Reload ; CHECK-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] ; CHECK-NEXT: unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload ; CHECK-NEXT: # xmm1 = xmm1[0],mem[0] ; CHECK-NEXT: movaps %xmm1, %xmm0 ; CHECK-NEXT: addq $40, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq entry: %exp = call <3 x float> @llvm.experimental.constrained.exp.v3f32( <3 x float> , metadata !"round.dynamic", metadata !"fpexcept.strict") ret <3 x float> %exp } define <3 x double> @constrained_vector_exp_v3f64() { ; CHECK-LABEL: constrained_vector_exp_v3f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: subq $24, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 32 ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: callq exp ; CHECK-NEXT: movsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: callq exp ; CHECK-NEXT: movsd %xmm0, (%rsp) # 8-byte Spill ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: callq exp ; CHECK-NEXT: movsd %xmm0, {{[0-9]+}}(%rsp) ; CHECK-NEXT: fldl {{[0-9]+}}(%rsp) ; CHECK-NEXT: movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Reload ; CHECK-NEXT: # xmm0 = mem[0],zero ; CHECK-NEXT: movsd (%rsp), %xmm1 # 8-byte Reload ; CHECK-NEXT: # xmm1 = mem[0],zero ; CHECK-NEXT: addq $24, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq entry: %exp = call <3 x double> @llvm.experimental.constrained.exp.v3f64( <3 x double> , metadata !"round.dynamic", metadata !"fpexcept.strict") ret <3 x double> %exp } define <4 x double> @constrained_vector_exp_v4f64() { ; CHECK-LABEL: constrained_vector_exp_v4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: subq $40, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 48 ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: callq exp ; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: callq exp ; CHECK-NEXT: unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload ; CHECK-NEXT: # xmm0 = xmm0[0],mem[0] ; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: callq exp ; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: callq exp ; CHECK-NEXT: movaps %xmm0, %xmm1 ; CHECK-NEXT: unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload ; CHECK-NEXT: # xmm1 = xmm1[0],mem[0] ; CHECK-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload ; CHECK-NEXT: addq $40, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq entry: %exp = call <4 x double> @llvm.experimental.constrained.exp.v4f64( <4 x double> , metadata !"round.dynamic", metadata !"fpexcept.strict") ret <4 x double> %exp } define <1 x float> @constrained_vector_exp2_v1f32() { ; CHECK-LABEL: constrained_vector_exp2_v1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: pushq %rax ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; CHECK-NEXT: callq exp2f ; CHECK-NEXT: popq %rax ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq entry: %exp2 = call <1 x float> @llvm.experimental.constrained.exp2.v1f32( <1 x float> , metadata !"round.dynamic", metadata !"fpexcept.strict") ret <1 x float> %exp2 } define <2 x double> @constrained_vector_exp2_v2f64() { ; CHECK-LABEL: constrained_vector_exp2_v2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: subq $24, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 32 ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: callq exp2 ; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: callq exp2 ; CHECK-NEXT: unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload ; CHECK-NEXT: # xmm0 = xmm0[0],mem[0] ; CHECK-NEXT: addq $24, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq entry: %exp2 = call <2 x double> @llvm.experimental.constrained.exp2.v2f64( <2 x double> , metadata !"round.dynamic", metadata !"fpexcept.strict") ret <2 x double> %exp2 } define <3 x float> @constrained_vector_exp2_v3f32() { ; CHECK-LABEL: constrained_vector_exp2_v3f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: subq $40, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 48 ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; CHECK-NEXT: callq exp2f ; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; CHECK-NEXT: callq exp2f ; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; CHECK-NEXT: callq exp2f ; CHECK-NEXT: movaps (%rsp), %xmm1 # 16-byte Reload ; CHECK-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] ; CHECK-NEXT: unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload ; CHECK-NEXT: # xmm1 = xmm1[0],mem[0] ; CHECK-NEXT: movaps %xmm1, %xmm0 ; CHECK-NEXT: addq $40, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq entry: %exp2 = call <3 x float> @llvm.experimental.constrained.exp2.v3f32( <3 x float> , metadata !"round.dynamic", metadata !"fpexcept.strict") ret <3 x float> %exp2 } define <3 x double> @constrained_vector_exp2_v3f64() { ; CHECK-LABEL: constrained_vector_exp2_v3f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: subq $24, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 32 ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: callq exp2 ; CHECK-NEXT: movsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: callq exp2 ; CHECK-NEXT: movsd %xmm0, (%rsp) # 8-byte Spill ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: callq exp2 ; CHECK-NEXT: movsd %xmm0, {{[0-9]+}}(%rsp) ; CHECK-NEXT: fldl {{[0-9]+}}(%rsp) ; CHECK-NEXT: movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Reload ; CHECK-NEXT: # xmm0 = mem[0],zero ; CHECK-NEXT: movsd (%rsp), %xmm1 # 8-byte Reload ; CHECK-NEXT: # xmm1 = mem[0],zero ; CHECK-NEXT: addq $24, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq entry: %exp2 = call <3 x double> @llvm.experimental.constrained.exp2.v3f64( <3 x double> , metadata !"round.dynamic", metadata !"fpexcept.strict") ret <3 x double> %exp2 } define <4 x double> @constrained_vector_exp2_v4f64() { ; CHECK-LABEL: constrained_vector_exp2_v4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: subq $40, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 48 ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: callq exp2 ; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: callq exp2 ; CHECK-NEXT: unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload ; CHECK-NEXT: # xmm0 = xmm0[0],mem[0] ; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: callq exp2 ; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: callq exp2 ; CHECK-NEXT: movaps %xmm0, %xmm1 ; CHECK-NEXT: unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload ; CHECK-NEXT: # xmm1 = xmm1[0],mem[0] ; CHECK-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload ; CHECK-NEXT: addq $40, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq entry: %exp2 = call <4 x double> @llvm.experimental.constrained.exp2.v4f64( <4 x double> , metadata !"round.dynamic", metadata !"fpexcept.strict") ret <4 x double> %exp2 } define <1 x float> @constrained_vector_log_v1f32() { ; CHECK-LABEL: constrained_vector_log_v1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: pushq %rax ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; CHECK-NEXT: callq logf ; CHECK-NEXT: popq %rax ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq entry: %log = call <1 x float> @llvm.experimental.constrained.log.v1f32( <1 x float> , metadata !"round.dynamic", metadata !"fpexcept.strict") ret <1 x float> %log } define <2 x double> @constrained_vector_log_v2f64() { ; CHECK-LABEL: constrained_vector_log_v2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: subq $24, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 32 ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: callq log ; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: callq log ; CHECK-NEXT: unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload ; CHECK-NEXT: # xmm0 = xmm0[0],mem[0] ; CHECK-NEXT: addq $24, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq entry: %log = call <2 x double> @llvm.experimental.constrained.log.v2f64( <2 x double> , metadata !"round.dynamic", metadata !"fpexcept.strict") ret <2 x double> %log } define <3 x float> @constrained_vector_log_v3f32() { ; CHECK-LABEL: constrained_vector_log_v3f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: subq $40, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 48 ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; CHECK-NEXT: callq logf ; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; CHECK-NEXT: callq logf ; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; CHECK-NEXT: callq logf ; CHECK-NEXT: movaps (%rsp), %xmm1 # 16-byte Reload ; CHECK-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] ; CHECK-NEXT: unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload ; CHECK-NEXT: # xmm1 = xmm1[0],mem[0] ; CHECK-NEXT: movaps %xmm1, %xmm0 ; CHECK-NEXT: addq $40, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq entry: %log = call <3 x float> @llvm.experimental.constrained.log.v3f32( <3 x float> , metadata !"round.dynamic", metadata !"fpexcept.strict") ret <3 x float> %log } define <3 x double> @constrained_vector_log_v3f64() { ; CHECK-LABEL: constrained_vector_log_v3f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: subq $24, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 32 ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: callq log ; CHECK-NEXT: movsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: callq log ; CHECK-NEXT: movsd %xmm0, (%rsp) # 8-byte Spill ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: callq log ; CHECK-NEXT: movsd %xmm0, {{[0-9]+}}(%rsp) ; CHECK-NEXT: fldl {{[0-9]+}}(%rsp) ; CHECK-NEXT: movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Reload ; CHECK-NEXT: # xmm0 = mem[0],zero ; CHECK-NEXT: movsd (%rsp), %xmm1 # 8-byte Reload ; CHECK-NEXT: # xmm1 = mem[0],zero ; CHECK-NEXT: addq $24, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq entry: %log = call <3 x double> @llvm.experimental.constrained.log.v3f64( <3 x double> , metadata !"round.dynamic", metadata !"fpexcept.strict") ret <3 x double> %log } define <4 x double> @constrained_vector_log_v4f64() { ; CHECK-LABEL: constrained_vector_log_v4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: subq $40, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 48 ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: callq log ; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: callq log ; CHECK-NEXT: unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload ; CHECK-NEXT: # xmm0 = xmm0[0],mem[0] ; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: callq log ; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: callq log ; CHECK-NEXT: movaps %xmm0, %xmm1 ; CHECK-NEXT: unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload ; CHECK-NEXT: # xmm1 = xmm1[0],mem[0] ; CHECK-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload ; CHECK-NEXT: addq $40, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq entry: %log = call <4 x double> @llvm.experimental.constrained.log.v4f64( <4 x double> , metadata !"round.dynamic", metadata !"fpexcept.strict") ret <4 x double> %log } define <1 x float> @constrained_vector_log10_v1f32() { ; CHECK-LABEL: constrained_vector_log10_v1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: pushq %rax ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; CHECK-NEXT: callq log10f ; CHECK-NEXT: popq %rax ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq entry: %log10 = call <1 x float> @llvm.experimental.constrained.log10.v1f32( <1 x float> , metadata !"round.dynamic", metadata !"fpexcept.strict") ret <1 x float> %log10 } define <2 x double> @constrained_vector_log10_v2f64() { ; CHECK-LABEL: constrained_vector_log10_v2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: subq $24, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 32 ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: callq log10 ; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: callq log10 ; CHECK-NEXT: unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload ; CHECK-NEXT: # xmm0 = xmm0[0],mem[0] ; CHECK-NEXT: addq $24, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq entry: %log10 = call <2 x double> @llvm.experimental.constrained.log10.v2f64( <2 x double> , metadata !"round.dynamic", metadata !"fpexcept.strict") ret <2 x double> %log10 } define <3 x float> @constrained_vector_log10_v3f32() { ; CHECK-LABEL: constrained_vector_log10_v3f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: subq $40, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 48 ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; CHECK-NEXT: callq log10f ; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; CHECK-NEXT: callq log10f ; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; CHECK-NEXT: callq log10f ; CHECK-NEXT: movaps (%rsp), %xmm1 # 16-byte Reload ; CHECK-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] ; CHECK-NEXT: unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload ; CHECK-NEXT: # xmm1 = xmm1[0],mem[0] ; CHECK-NEXT: movaps %xmm1, %xmm0 ; CHECK-NEXT: addq $40, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq entry: %log10 = call <3 x float> @llvm.experimental.constrained.log10.v3f32( <3 x float> , metadata !"round.dynamic", metadata !"fpexcept.strict") ret <3 x float> %log10 } define <3 x double> @constrained_vector_log10_v3f64() { ; CHECK-LABEL: constrained_vector_log10_v3f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: subq $24, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 32 ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: callq log10 ; CHECK-NEXT: movsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: callq log10 ; CHECK-NEXT: movsd %xmm0, (%rsp) # 8-byte Spill ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: callq log10 ; CHECK-NEXT: movsd %xmm0, {{[0-9]+}}(%rsp) ; CHECK-NEXT: fldl {{[0-9]+}}(%rsp) ; CHECK-NEXT: movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Reload ; CHECK-NEXT: # xmm0 = mem[0],zero ; CHECK-NEXT: movsd (%rsp), %xmm1 # 8-byte Reload ; CHECK-NEXT: # xmm1 = mem[0],zero ; CHECK-NEXT: addq $24, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq entry: %log10 = call <3 x double> @llvm.experimental.constrained.log10.v3f64( <3 x double> , metadata !"round.dynamic", metadata !"fpexcept.strict") ret <3 x double> %log10 } define <4 x double> @constrained_vector_log10_v4f64() { ; CHECK-LABEL: constrained_vector_log10_v4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: subq $40, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 48 ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: callq log10 ; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: callq log10 ; CHECK-NEXT: unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload ; CHECK-NEXT: # xmm0 = xmm0[0],mem[0] ; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: callq log10 ; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: callq log10 ; CHECK-NEXT: movaps %xmm0, %xmm1 ; CHECK-NEXT: unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload ; CHECK-NEXT: # xmm1 = xmm1[0],mem[0] ; CHECK-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload ; CHECK-NEXT: addq $40, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq entry: %log10 = call <4 x double> @llvm.experimental.constrained.log10.v4f64( <4 x double> , metadata !"round.dynamic", metadata !"fpexcept.strict") ret <4 x double> %log10 } define <1 x float> @constrained_vector_log2_v1f32() { ; CHECK-LABEL: constrained_vector_log2_v1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: pushq %rax ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; CHECK-NEXT: callq log2f ; CHECK-NEXT: popq %rax ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq entry: %log2 = call <1 x float> @llvm.experimental.constrained.log2.v1f32( <1 x float> , metadata !"round.dynamic", metadata !"fpexcept.strict") ret <1 x float> %log2 } define <2 x double> @constrained_vector_log2_v2f64() { ; CHECK-LABEL: constrained_vector_log2_v2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: subq $24, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 32 ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: callq log2 ; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: callq log2 ; CHECK-NEXT: unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload ; CHECK-NEXT: # xmm0 = xmm0[0],mem[0] ; CHECK-NEXT: addq $24, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq entry: %log2 = call <2 x double> @llvm.experimental.constrained.log2.v2f64( <2 x double> , metadata !"round.dynamic", metadata !"fpexcept.strict") ret <2 x double> %log2 } define <3 x float> @constrained_vector_log2_v3f32() { ; CHECK-LABEL: constrained_vector_log2_v3f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: subq $40, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 48 ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; CHECK-NEXT: callq log2f ; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; CHECK-NEXT: callq log2f ; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; CHECK-NEXT: callq log2f ; CHECK-NEXT: movaps (%rsp), %xmm1 # 16-byte Reload ; CHECK-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] ; CHECK-NEXT: unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload ; CHECK-NEXT: # xmm1 = xmm1[0],mem[0] ; CHECK-NEXT: movaps %xmm1, %xmm0 ; CHECK-NEXT: addq $40, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq entry: %log2 = call <3 x float> @llvm.experimental.constrained.log2.v3f32( <3 x float> , metadata !"round.dynamic", metadata !"fpexcept.strict") ret <3 x float> %log2 } define <3 x double> @constrained_vector_log2_v3f64() { ; CHECK-LABEL: constrained_vector_log2_v3f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: subq $24, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 32 ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: callq log2 ; CHECK-NEXT: movsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: callq log2 ; CHECK-NEXT: movsd %xmm0, (%rsp) # 8-byte Spill ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: callq log2 ; CHECK-NEXT: movsd %xmm0, {{[0-9]+}}(%rsp) ; CHECK-NEXT: fldl {{[0-9]+}}(%rsp) ; CHECK-NEXT: movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Reload ; CHECK-NEXT: # xmm0 = mem[0],zero ; CHECK-NEXT: movsd (%rsp), %xmm1 # 8-byte Reload ; CHECK-NEXT: # xmm1 = mem[0],zero ; CHECK-NEXT: addq $24, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq entry: %log2 = call <3 x double> @llvm.experimental.constrained.log2.v3f64( <3 x double> , metadata !"round.dynamic", metadata !"fpexcept.strict") ret <3 x double> %log2 } define <4 x double> @constrained_vector_log2_v4f64() { ; CHECK-LABEL: constrained_vector_log2_v4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: subq $40, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 48 ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: callq log2 ; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: callq log2 ; CHECK-NEXT: unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload ; CHECK-NEXT: # xmm0 = xmm0[0],mem[0] ; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: callq log2 ; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: callq log2 ; CHECK-NEXT: movaps %xmm0, %xmm1 ; CHECK-NEXT: unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload ; CHECK-NEXT: # xmm1 = xmm1[0],mem[0] ; CHECK-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload ; CHECK-NEXT: addq $40, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq entry: %log2 = call <4 x double> @llvm.experimental.constrained.log2.v4f64( <4 x double> , metadata !"round.dynamic", metadata !"fpexcept.strict") ret <4 x double> %log2 } define <1 x float> @constrained_vector_rint_v1f32() { ; CHECK-LABEL: constrained_vector_rint_v1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: pushq %rax ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; CHECK-NEXT: callq rintf ; CHECK-NEXT: popq %rax ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq entry: %rint = call <1 x float> @llvm.experimental.constrained.rint.v1f32( <1 x float> , metadata !"round.dynamic", metadata !"fpexcept.strict") ret <1 x float> %rint } define <2 x double> @constrained_vector_rint_v2f64() { ; CHECK-LABEL: constrained_vector_rint_v2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: subq $24, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 32 ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: callq rint ; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: callq rint ; CHECK-NEXT: unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload ; CHECK-NEXT: # xmm0 = xmm0[0],mem[0] ; CHECK-NEXT: addq $24, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq entry: %rint = call <2 x double> @llvm.experimental.constrained.rint.v2f64( <2 x double> , metadata !"round.dynamic", metadata !"fpexcept.strict") ret <2 x double> %rint } define <3 x float> @constrained_vector_rint_v3f32() { ; CHECK-LABEL: constrained_vector_rint_v3f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: subq $40, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 48 ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; CHECK-NEXT: callq rintf ; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; CHECK-NEXT: callq rintf ; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; CHECK-NEXT: callq rintf ; CHECK-NEXT: movaps (%rsp), %xmm1 # 16-byte Reload ; CHECK-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] ; CHECK-NEXT: unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload ; CHECK-NEXT: # xmm1 = xmm1[0],mem[0] ; CHECK-NEXT: movaps %xmm1, %xmm0 ; CHECK-NEXT: addq $40, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq entry: %rint = call <3 x float> @llvm.experimental.constrained.rint.v3f32( <3 x float> , metadata !"round.dynamic", metadata !"fpexcept.strict") ret <3 x float> %rint } define <3 x double> @constrained_vector_rint_v3f64() { ; CHECK-LABEL: constrained_vector_rint_v3f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: subq $24, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 32 ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: callq rint ; CHECK-NEXT: movsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: callq rint ; CHECK-NEXT: movsd %xmm0, (%rsp) # 8-byte Spill ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: callq rint ; CHECK-NEXT: movsd %xmm0, {{[0-9]+}}(%rsp) ; CHECK-NEXT: fldl {{[0-9]+}}(%rsp) ; CHECK-NEXT: movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Reload ; CHECK-NEXT: # xmm0 = mem[0],zero ; CHECK-NEXT: movsd (%rsp), %xmm1 # 8-byte Reload ; CHECK-NEXT: # xmm1 = mem[0],zero ; CHECK-NEXT: addq $24, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq entry: %rint = call <3 x double> @llvm.experimental.constrained.rint.v3f64( <3 x double> , metadata !"round.dynamic", metadata !"fpexcept.strict") ret <3 x double> %rint } define <4 x double> @constrained_vector_rint_v4f64() { ; CHECK-LABEL: constrained_vector_rint_v4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: subq $40, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 48 ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: callq rint ; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: callq rint ; CHECK-NEXT: unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload ; CHECK-NEXT: # xmm0 = xmm0[0],mem[0] ; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: callq rint ; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: callq rint ; CHECK-NEXT: movaps %xmm0, %xmm1 ; CHECK-NEXT: unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload ; CHECK-NEXT: # xmm1 = xmm1[0],mem[0] ; CHECK-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload ; CHECK-NEXT: addq $40, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq entry: %rint = call <4 x double> @llvm.experimental.constrained.rint.v4f64( <4 x double> , metadata !"round.dynamic", metadata !"fpexcept.strict") ret <4 x double> %rint } define <1 x float> @constrained_vector_nearbyint_v1f32() { ; CHECK-LABEL: constrained_vector_nearbyint_v1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: pushq %rax ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; CHECK-NEXT: callq nearbyintf ; CHECK-NEXT: popq %rax ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq entry: %nearby = call <1 x float> @llvm.experimental.constrained.nearbyint.v1f32( <1 x float> , metadata !"round.dynamic", metadata !"fpexcept.strict") ret <1 x float> %nearby } define <2 x double> @constrained_vector_nearbyint_v2f64() { ; CHECK-LABEL: constrained_vector_nearbyint_v2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: subq $24, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 32 ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: callq nearbyint ; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: callq nearbyint ; CHECK-NEXT: unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload ; CHECK-NEXT: # xmm0 = xmm0[0],mem[0] ; CHECK-NEXT: addq $24, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq entry: %nearby = call <2 x double> @llvm.experimental.constrained.nearbyint.v2f64( <2 x double> , metadata !"round.dynamic", metadata !"fpexcept.strict") ret <2 x double> %nearby } define <3 x float> @constrained_vector_nearbyint_v3f32() { ; CHECK-LABEL: constrained_vector_nearbyint_v3f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: subq $40, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 48 ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; CHECK-NEXT: callq nearbyintf ; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; CHECK-NEXT: callq nearbyintf ; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; CHECK-NEXT: callq nearbyintf ; CHECK-NEXT: movaps (%rsp), %xmm1 # 16-byte Reload ; CHECK-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] ; CHECK-NEXT: unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload ; CHECK-NEXT: # xmm1 = xmm1[0],mem[0] ; CHECK-NEXT: movaps %xmm1, %xmm0 ; CHECK-NEXT: addq $40, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq entry: %nearby = call <3 x float> @llvm.experimental.constrained.nearbyint.v3f32( <3 x float> , metadata !"round.dynamic", metadata !"fpexcept.strict") ret <3 x float> %nearby } define <3 x double> @constrained_vector_nearby_v3f64() { ; CHECK-LABEL: constrained_vector_nearby_v3f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: subq $24, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 32 ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: callq nearbyint ; CHECK-NEXT: movsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: callq nearbyint ; CHECK-NEXT: movsd %xmm0, (%rsp) # 8-byte Spill ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: callq nearbyint ; CHECK-NEXT: movsd %xmm0, {{[0-9]+}}(%rsp) ; CHECK-NEXT: fldl {{[0-9]+}}(%rsp) ; CHECK-NEXT: movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Reload ; CHECK-NEXT: # xmm0 = mem[0],zero ; CHECK-NEXT: movsd (%rsp), %xmm1 # 8-byte Reload ; CHECK-NEXT: # xmm1 = mem[0],zero ; CHECK-NEXT: addq $24, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq entry: %nearby = call <3 x double> @llvm.experimental.constrained.nearbyint.v3f64( <3 x double> , metadata !"round.dynamic", metadata !"fpexcept.strict") ret <3 x double> %nearby } define <4 x double> @constrained_vector_nearbyint_v4f64() { ; CHECK-LABEL: constrained_vector_nearbyint_v4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: subq $40, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 48 ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: callq nearbyint ; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: callq nearbyint ; CHECK-NEXT: unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload ; CHECK-NEXT: # xmm0 = xmm0[0],mem[0] ; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: callq nearbyint ; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: callq nearbyint ; CHECK-NEXT: movaps %xmm0, %xmm1 ; CHECK-NEXT: unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload ; CHECK-NEXT: # xmm1 = xmm1[0],mem[0] ; CHECK-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload ; CHECK-NEXT: addq $40, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq entry: %nearby = call <4 x double> @llvm.experimental.constrained.nearbyint.v4f64( <4 x double> , metadata !"round.dynamic", metadata !"fpexcept.strict") ret <4 x double> %nearby } define <1 x float> @constrained_vector_maxnum_v1f32() { ; CHECK-LABEL: constrained_vector_maxnum_v1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: pushq %rax ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero ; CHECK-NEXT: callq fmaxf ; CHECK-NEXT: popq %rax ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq entry: %max = call <1 x float> @llvm.experimental.constrained.maxnum.v1f32( <1 x float> , <1 x float> , metadata !"round.dynamic", metadata !"fpexcept.strict") ret <1 x float> %max } define <2 x double> @constrained_vector_maxnum_v2f64() { ; CHECK-LABEL: constrained_vector_maxnum_v2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: subq $24, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 32 ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero ; CHECK-NEXT: callq fmax ; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero ; CHECK-NEXT: callq fmax ; CHECK-NEXT: unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload ; CHECK-NEXT: # xmm0 = xmm0[0],mem[0] ; CHECK-NEXT: addq $24, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq entry: %max = call <2 x double> @llvm.experimental.constrained.maxnum.v2f64( <2 x double> , <2 x double> , metadata !"round.dynamic", metadata !"fpexcept.strict") ret <2 x double> %max } define <3 x float> @constrained_vector_maxnum_v3f32() { ; CHECK-LABEL: constrained_vector_maxnum_v3f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: subq $40, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 48 ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero ; CHECK-NEXT: callq fmaxf ; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; CHECK-NEXT: callq fmaxf ; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero ; CHECK-NEXT: callq fmaxf ; CHECK-NEXT: movaps (%rsp), %xmm1 # 16-byte Reload ; CHECK-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] ; CHECK-NEXT: unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload ; CHECK-NEXT: # xmm1 = xmm1[0],mem[0] ; CHECK-NEXT: movaps %xmm1, %xmm0 ; CHECK-NEXT: addq $40, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq entry: %max = call <3 x float> @llvm.experimental.constrained.maxnum.v3f32( <3 x float> , <3 x float> , metadata !"round.dynamic", metadata !"fpexcept.strict") ret <3 x float> %max } define <3 x double> @constrained_vector_max_v3f64() { ; CHECK-LABEL: constrained_vector_max_v3f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: subq $24, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 32 ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero ; CHECK-NEXT: callq fmax ; CHECK-NEXT: movsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero ; CHECK-NEXT: callq fmax ; CHECK-NEXT: movsd %xmm0, (%rsp) # 8-byte Spill ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero ; CHECK-NEXT: callq fmax ; CHECK-NEXT: movsd %xmm0, {{[0-9]+}}(%rsp) ; CHECK-NEXT: fldl {{[0-9]+}}(%rsp) ; CHECK-NEXT: movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Reload ; CHECK-NEXT: # xmm0 = mem[0],zero ; CHECK-NEXT: movsd (%rsp), %xmm1 # 8-byte Reload ; CHECK-NEXT: # xmm1 = mem[0],zero ; CHECK-NEXT: addq $24, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq entry: %max = call <3 x double> @llvm.experimental.constrained.maxnum.v3f64( <3 x double> , <3 x double> , metadata !"round.dynamic", metadata !"fpexcept.strict") ret <3 x double> %max } define <4 x double> @constrained_vector_maxnum_v4f64() { ; CHECK-LABEL: constrained_vector_maxnum_v4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: subq $40, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 48 ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero ; CHECK-NEXT: callq fmax ; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero ; CHECK-NEXT: callq fmax ; CHECK-NEXT: unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload ; CHECK-NEXT: # xmm0 = xmm0[0],mem[0] ; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero ; CHECK-NEXT: callq fmax ; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero ; CHECK-NEXT: callq fmax ; CHECK-NEXT: movaps %xmm0, %xmm1 ; CHECK-NEXT: unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload ; CHECK-NEXT: # xmm1 = xmm1[0],mem[0] ; CHECK-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload ; CHECK-NEXT: addq $40, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq entry: %max = call <4 x double> @llvm.experimental.constrained.maxnum.v4f64( <4 x double> , <4 x double> , metadata !"round.dynamic", metadata !"fpexcept.strict") ret <4 x double> %max } define <1 x float> @constrained_vector_minnum_v1f32() { ; CHECK-LABEL: constrained_vector_minnum_v1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: pushq %rax ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero ; CHECK-NEXT: callq fminf ; CHECK-NEXT: popq %rax ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq entry: %min = call <1 x float> @llvm.experimental.constrained.minnum.v1f32( <1 x float> , <1 x float> , metadata !"round.dynamic", metadata !"fpexcept.strict") ret <1 x float> %min } define <2 x double> @constrained_vector_minnum_v2f64() { ; CHECK-LABEL: constrained_vector_minnum_v2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: subq $24, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 32 ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero ; CHECK-NEXT: callq fmin ; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero ; CHECK-NEXT: callq fmin ; CHECK-NEXT: unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload ; CHECK-NEXT: # xmm0 = xmm0[0],mem[0] ; CHECK-NEXT: addq $24, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq entry: %min = call <2 x double> @llvm.experimental.constrained.minnum.v2f64( <2 x double> , <2 x double> , metadata !"round.dynamic", metadata !"fpexcept.strict") ret <2 x double> %min } define <3 x float> @constrained_vector_minnum_v3f32() { ; CHECK-LABEL: constrained_vector_minnum_v3f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: subq $40, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 48 ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero ; CHECK-NEXT: callq fminf ; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; CHECK-NEXT: callq fminf ; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero ; CHECK-NEXT: callq fminf ; CHECK-NEXT: movaps (%rsp), %xmm1 # 16-byte Reload ; CHECK-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] ; CHECK-NEXT: unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload ; CHECK-NEXT: # xmm1 = xmm1[0],mem[0] ; CHECK-NEXT: movaps %xmm1, %xmm0 ; CHECK-NEXT: addq $40, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq entry: %min = call <3 x float> @llvm.experimental.constrained.minnum.v3f32( <3 x float> , <3 x float> , metadata !"round.dynamic", metadata !"fpexcept.strict") ret <3 x float> %min } define <3 x double> @constrained_vector_min_v3f64() { ; CHECK-LABEL: constrained_vector_min_v3f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: subq $24, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 32 ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero ; CHECK-NEXT: callq fmin ; CHECK-NEXT: movsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero ; CHECK-NEXT: callq fmin ; CHECK-NEXT: movsd %xmm0, (%rsp) # 8-byte Spill ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero ; CHECK-NEXT: callq fmin ; CHECK-NEXT: movsd %xmm0, {{[0-9]+}}(%rsp) ; CHECK-NEXT: fldl {{[0-9]+}}(%rsp) ; CHECK-NEXT: movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Reload ; CHECK-NEXT: # xmm0 = mem[0],zero ; CHECK-NEXT: movsd (%rsp), %xmm1 # 8-byte Reload ; CHECK-NEXT: # xmm1 = mem[0],zero ; CHECK-NEXT: addq $24, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq entry: %min = call <3 x double> @llvm.experimental.constrained.minnum.v3f64( <3 x double> , <3 x double> , metadata !"round.dynamic", metadata !"fpexcept.strict") ret <3 x double> %min } define <4 x double> @constrained_vector_minnum_v4f64() { ; CHECK-LABEL: constrained_vector_minnum_v4f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: subq $40, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 48 ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero ; CHECK-NEXT: callq fmin ; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero ; CHECK-NEXT: callq fmin ; CHECK-NEXT: unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload ; CHECK-NEXT: # xmm0 = xmm0[0],mem[0] ; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero ; CHECK-NEXT: callq fmin ; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero ; CHECK-NEXT: callq fmin ; CHECK-NEXT: movaps %xmm0, %xmm1 ; CHECK-NEXT: unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload ; CHECK-NEXT: # xmm1 = xmm1[0],mem[0] ; CHECK-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload ; CHECK-NEXT: addq $40, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq entry: %min = call <4 x double> @llvm.experimental.constrained.minnum.v4f64( <4 x double> , <4 x double> , metadata !"round.dynamic", metadata !"fpexcept.strict") ret <4 x double> %min } define <1 x float> @constrained_vector_ceil_v1f32() { ; CHECK-LABEL: constrained_vector_ceil_v1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: pushq %rax ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; CHECK-NEXT: callq ceilf ; CHECK-NEXT: popq %rax ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq entry: %ceil = call <1 x float> @llvm.experimental.constrained.ceil.v1f32( <1 x float> , metadata !"round.dynamic", metadata !"fpexcept.strict") ret <1 x float> %ceil } define <2 x double> @constrained_vector_ceil_v2f64() { ; CHECK-LABEL: constrained_vector_ceil_v2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: subq $24, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 32 ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: callq ceil ; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: callq ceil ; CHECK-NEXT: unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload ; CHECK-NEXT: # xmm0 = xmm0[0],mem[0] ; CHECK-NEXT: addq $24, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq entry: %ceil = call <2 x double> @llvm.experimental.constrained.ceil.v2f64( <2 x double> , metadata !"round.dynamic", metadata !"fpexcept.strict") ret <2 x double> %ceil } define <3 x float> @constrained_vector_ceil_v3f32() { ; CHECK-LABEL: constrained_vector_ceil_v3f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: subq $40, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 48 ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; CHECK-NEXT: callq ceilf ; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; CHECK-NEXT: callq ceilf ; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; CHECK-NEXT: callq ceilf ; CHECK-NEXT: movaps (%rsp), %xmm1 # 16-byte Reload ; CHECK-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] ; CHECK-NEXT: unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload ; CHECK-NEXT: # xmm1 = xmm1[0],mem[0] ; CHECK-NEXT: movaps %xmm1, %xmm0 ; CHECK-NEXT: addq $40, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq entry: %ceil = call <3 x float> @llvm.experimental.constrained.ceil.v3f32( <3 x float> , metadata !"round.dynamic", metadata !"fpexcept.strict") ret <3 x float> %ceil } define <3 x double> @constrained_vector_ceil_v3f64() { ; CHECK-LABEL: constrained_vector_ceil_v3f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: subq $24, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 32 ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: callq ceil ; CHECK-NEXT: movsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: callq ceil ; CHECK-NEXT: movsd %xmm0, (%rsp) # 8-byte Spill ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: callq ceil ; CHECK-NEXT: movsd %xmm0, {{[0-9]+}}(%rsp) ; CHECK-NEXT: fldl {{[0-9]+}}(%rsp) ; CHECK-NEXT: movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Reload ; CHECK-NEXT: # xmm0 = mem[0],zero ; CHECK-NEXT: movsd (%rsp), %xmm1 # 8-byte Reload ; CHECK-NEXT: # xmm1 = mem[0],zero ; CHECK-NEXT: addq $24, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq entry: %ceil = call <3 x double> @llvm.experimental.constrained.ceil.v3f64( <3 x double> , metadata !"round.dynamic", metadata !"fpexcept.strict") ret <3 x double> %ceil } define <1 x float> @constrained_vector_floor_v1f32() { ; CHECK-LABEL: constrained_vector_floor_v1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: pushq %rax ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; CHECK-NEXT: callq floorf ; CHECK-NEXT: popq %rax ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq entry: %floor = call <1 x float> @llvm.experimental.constrained.floor.v1f32( <1 x float> , metadata !"round.dynamic", metadata !"fpexcept.strict") ret <1 x float> %floor } define <2 x double> @constrained_vector_floor_v2f64() { ; CHECK-LABEL: constrained_vector_floor_v2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: subq $24, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 32 ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: callq floor ; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: callq floor ; CHECK-NEXT: unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload ; CHECK-NEXT: # xmm0 = xmm0[0],mem[0] ; CHECK-NEXT: addq $24, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq entry: %floor = call <2 x double> @llvm.experimental.constrained.floor.v2f64( <2 x double> , metadata !"round.dynamic", metadata !"fpexcept.strict") ret <2 x double> %floor } define <3 x float> @constrained_vector_floor_v3f32() { ; CHECK-LABEL: constrained_vector_floor_v3f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: subq $40, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 48 ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; CHECK-NEXT: callq floorf ; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; CHECK-NEXT: callq floorf ; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; CHECK-NEXT: callq floorf ; CHECK-NEXT: movaps (%rsp), %xmm1 # 16-byte Reload ; CHECK-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] ; CHECK-NEXT: unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload ; CHECK-NEXT: # xmm1 = xmm1[0],mem[0] ; CHECK-NEXT: movaps %xmm1, %xmm0 ; CHECK-NEXT: addq $40, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq entry: %floor = call <3 x float> @llvm.experimental.constrained.floor.v3f32( <3 x float> , metadata !"round.dynamic", metadata !"fpexcept.strict") ret <3 x float> %floor } define <3 x double> @constrained_vector_floor_v3f64() { ; CHECK-LABEL: constrained_vector_floor_v3f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: subq $24, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 32 ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: callq floor ; CHECK-NEXT: movsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: callq floor ; CHECK-NEXT: movsd %xmm0, (%rsp) # 8-byte Spill ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: callq floor ; CHECK-NEXT: movsd %xmm0, {{[0-9]+}}(%rsp) ; CHECK-NEXT: fldl {{[0-9]+}}(%rsp) ; CHECK-NEXT: movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Reload ; CHECK-NEXT: # xmm0 = mem[0],zero ; CHECK-NEXT: movsd (%rsp), %xmm1 # 8-byte Reload ; CHECK-NEXT: # xmm1 = mem[0],zero ; CHECK-NEXT: addq $24, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq entry: %floor = call <3 x double> @llvm.experimental.constrained.floor.v3f64( <3 x double> , metadata !"round.dynamic", metadata !"fpexcept.strict") ret <3 x double> %floor } define <1 x float> @constrained_vector_round_v1f32() { ; CHECK-LABEL: constrained_vector_round_v1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: pushq %rax ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; CHECK-NEXT: callq roundf ; CHECK-NEXT: popq %rax ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq entry: %round = call <1 x float> @llvm.experimental.constrained.round.v1f32( <1 x float> , metadata !"round.dynamic", metadata !"fpexcept.strict") ret <1 x float> %round } define <2 x double> @constrained_vector_round_v2f64() { ; CHECK-LABEL: constrained_vector_round_v2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: subq $24, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 32 ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: callq round ; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: callq round ; CHECK-NEXT: unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload ; CHECK-NEXT: # xmm0 = xmm0[0],mem[0] ; CHECK-NEXT: addq $24, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq entry: %round = call <2 x double> @llvm.experimental.constrained.round.v2f64( <2 x double> , metadata !"round.dynamic", metadata !"fpexcept.strict") ret <2 x double> %round } define <3 x float> @constrained_vector_round_v3f32() { ; CHECK-LABEL: constrained_vector_round_v3f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: subq $40, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 48 ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; CHECK-NEXT: callq roundf ; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; CHECK-NEXT: callq roundf ; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; CHECK-NEXT: callq roundf ; CHECK-NEXT: movaps (%rsp), %xmm1 # 16-byte Reload ; CHECK-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] ; CHECK-NEXT: unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload ; CHECK-NEXT: # xmm1 = xmm1[0],mem[0] ; CHECK-NEXT: movaps %xmm1, %xmm0 ; CHECK-NEXT: addq $40, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq entry: %round = call <3 x float> @llvm.experimental.constrained.round.v3f32( <3 x float> , metadata !"round.dynamic", metadata !"fpexcept.strict") ret <3 x float> %round } define <3 x double> @constrained_vector_round_v3f64() { ; CHECK-LABEL: constrained_vector_round_v3f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: subq $24, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 32 ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: callq round ; CHECK-NEXT: movsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: callq round ; CHECK-NEXT: movsd %xmm0, (%rsp) # 8-byte Spill ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: callq round ; CHECK-NEXT: movsd %xmm0, {{[0-9]+}}(%rsp) ; CHECK-NEXT: fldl {{[0-9]+}}(%rsp) ; CHECK-NEXT: movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Reload ; CHECK-NEXT: # xmm0 = mem[0],zero ; CHECK-NEXT: movsd (%rsp), %xmm1 # 8-byte Reload ; CHECK-NEXT: # xmm1 = mem[0],zero ; CHECK-NEXT: addq $24, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq entry: %round = call <3 x double> @llvm.experimental.constrained.round.v3f64( <3 x double> , metadata !"round.dynamic", metadata !"fpexcept.strict") ret <3 x double> %round } define <1 x float> @constrained_vector_trunc_v1f32() { ; CHECK-LABEL: constrained_vector_trunc_v1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: pushq %rax ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; CHECK-NEXT: callq truncf ; CHECK-NEXT: popq %rax ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq entry: %trunc = call <1 x float> @llvm.experimental.constrained.trunc.v1f32( <1 x float> , metadata !"round.dynamic", metadata !"fpexcept.strict") ret <1 x float> %trunc } define <2 x double> @constrained_vector_trunc_v2f64() { ; CHECK-LABEL: constrained_vector_trunc_v2f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: subq $24, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 32 ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: callq trunc ; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: callq trunc ; CHECK-NEXT: unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload ; CHECK-NEXT: # xmm0 = xmm0[0],mem[0] ; CHECK-NEXT: addq $24, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq entry: %trunc = call <2 x double> @llvm.experimental.constrained.trunc.v2f64( <2 x double> , metadata !"round.dynamic", metadata !"fpexcept.strict") ret <2 x double> %trunc } define <3 x float> @constrained_vector_trunc_v3f32() { ; CHECK-LABEL: constrained_vector_trunc_v3f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: subq $40, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 48 ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; CHECK-NEXT: callq truncf ; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; CHECK-NEXT: callq truncf ; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; CHECK-NEXT: callq truncf ; CHECK-NEXT: movaps (%rsp), %xmm1 # 16-byte Reload ; CHECK-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] ; CHECK-NEXT: unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload ; CHECK-NEXT: # xmm1 = xmm1[0],mem[0] ; CHECK-NEXT: movaps %xmm1, %xmm0 ; CHECK-NEXT: addq $40, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq entry: %trunc = call <3 x float> @llvm.experimental.constrained.trunc.v3f32( <3 x float> , metadata !"round.dynamic", metadata !"fpexcept.strict") ret <3 x float> %trunc } define <3 x double> @constrained_vector_trunc_v3f64() { ; CHECK-LABEL: constrained_vector_trunc_v3f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: subq $24, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 32 ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: callq trunc ; CHECK-NEXT: movsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: callq trunc ; CHECK-NEXT: movsd %xmm0, (%rsp) # 8-byte Spill ; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: callq trunc ; CHECK-NEXT: movsd %xmm0, {{[0-9]+}}(%rsp) ; CHECK-NEXT: fldl {{[0-9]+}}(%rsp) ; CHECK-NEXT: movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Reload ; CHECK-NEXT: # xmm0 = mem[0],zero ; CHECK-NEXT: movsd (%rsp), %xmm1 # 8-byte Reload ; CHECK-NEXT: # xmm1 = mem[0],zero ; CHECK-NEXT: addq $24, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq entry: %trunc = call <3 x double> @llvm.experimental.constrained.trunc.v3f64( <3 x double> , metadata !"round.dynamic", metadata !"fpexcept.strict") ret <3 x double> %trunc } ; Single width declarations declare <2 x double> @llvm.experimental.constrained.fadd.v2f64(<2 x double>, <2 x double>, metadata, metadata) declare <2 x double> @llvm.experimental.constrained.fsub.v2f64(<2 x double>, <2 x double>, metadata, metadata) declare <2 x double> @llvm.experimental.constrained.fmul.v2f64(<2 x double>, <2 x double>, metadata, metadata) declare <2 x double> @llvm.experimental.constrained.fdiv.v2f64(<2 x double>, <2 x double>, metadata, metadata) declare <2 x double> @llvm.experimental.constrained.frem.v2f64(<2 x double>, <2 x double>, metadata, metadata) declare <2 x double> @llvm.experimental.constrained.sqrt.v2f64(<2 x double>, metadata, metadata) declare <2 x double> @llvm.experimental.constrained.pow.v2f64(<2 x double>, <2 x double>, metadata, metadata) declare <2 x double> @llvm.experimental.constrained.powi.v2f64(<2 x double>, i32, metadata, metadata) declare <2 x double> @llvm.experimental.constrained.sin.v2f64(<2 x double>, metadata, metadata) declare <2 x double> @llvm.experimental.constrained.cos.v2f64(<2 x double>, metadata, metadata) declare <2 x double> @llvm.experimental.constrained.exp.v2f64(<2 x double>, metadata, metadata) declare <2 x double> @llvm.experimental.constrained.exp2.v2f64(<2 x double>, metadata, metadata) declare <2 x double> @llvm.experimental.constrained.log.v2f64(<2 x double>, metadata, metadata) declare <2 x double> @llvm.experimental.constrained.log10.v2f64(<2 x double>, metadata, metadata) declare <2 x double> @llvm.experimental.constrained.log2.v2f64(<2 x double>, metadata, metadata) declare <2 x double> @llvm.experimental.constrained.rint.v2f64(<2 x double>, metadata, metadata) declare <2 x double> @llvm.experimental.constrained.nearbyint.v2f64(<2 x double>, metadata, metadata) declare <2 x double> @llvm.experimental.constrained.maxnum.v2f64(<2 x double>, <2 x double>, metadata, metadata) declare <2 x double> @llvm.experimental.constrained.minnum.v2f64(<2 x double>, <2 x double>, metadata, metadata) declare <2 x double> @llvm.experimental.constrained.ceil.v2f64(<2 x double>, metadata, metadata) declare <2 x double> @llvm.experimental.constrained.floor.v2f64(<2 x double>, metadata, metadata) declare <2 x double> @llvm.experimental.constrained.round.v2f64(<2 x double>, metadata, metadata) declare <2 x double> @llvm.experimental.constrained.trunc.v2f64(<2 x double>, metadata, metadata) ; Scalar width declarations declare <1 x float> @llvm.experimental.constrained.fadd.v1f32(<1 x float>, <1 x float>, metadata, metadata) declare <1 x float> @llvm.experimental.constrained.fsub.v1f32(<1 x float>, <1 x float>, metadata, metadata) declare <1 x float> @llvm.experimental.constrained.fmul.v1f32(<1 x float>, <1 x float>, metadata, metadata) declare <1 x float> @llvm.experimental.constrained.fdiv.v1f32(<1 x float>, <1 x float>, metadata, metadata) declare <1 x float> @llvm.experimental.constrained.frem.v1f32(<1 x float>, <1 x float>, metadata, metadata) declare <1 x float> @llvm.experimental.constrained.sqrt.v1f32(<1 x float>, metadata, metadata) declare <1 x float> @llvm.experimental.constrained.pow.v1f32(<1 x float>, <1 x float>, metadata, metadata) declare <1 x float> @llvm.experimental.constrained.powi.v1f32(<1 x float>, i32, metadata, metadata) declare <1 x float> @llvm.experimental.constrained.sin.v1f32(<1 x float>, metadata, metadata) declare <1 x float> @llvm.experimental.constrained.cos.v1f32(<1 x float>, metadata, metadata) declare <1 x float> @llvm.experimental.constrained.exp.v1f32(<1 x float>, metadata, metadata) declare <1 x float> @llvm.experimental.constrained.exp2.v1f32(<1 x float>, metadata, metadata) declare <1 x float> @llvm.experimental.constrained.log.v1f32(<1 x float>, metadata, metadata) declare <1 x float> @llvm.experimental.constrained.log10.v1f32(<1 x float>, metadata, metadata) declare <1 x float> @llvm.experimental.constrained.log2.v1f32(<1 x float>, metadata, metadata) declare <1 x float> @llvm.experimental.constrained.rint.v1f32(<1 x float>, metadata, metadata) declare <1 x float> @llvm.experimental.constrained.nearbyint.v1f32(<1 x float>, metadata, metadata) declare <1 x float> @llvm.experimental.constrained.maxnum.v1f32(<1 x float>, <1 x float>, metadata, metadata) declare <1 x float> @llvm.experimental.constrained.minnum.v1f32(<1 x float>, <1 x float>, metadata, metadata) declare <1 x float> @llvm.experimental.constrained.ceil.v1f32(<1 x float>, metadata, metadata) declare <1 x float> @llvm.experimental.constrained.floor.v1f32(<1 x float>, metadata, metadata) declare <1 x float> @llvm.experimental.constrained.round.v1f32(<1 x float>, metadata, metadata) declare <1 x float> @llvm.experimental.constrained.trunc.v1f32(<1 x float>, metadata, metadata) ; Illegal width declarations declare <3 x float> @llvm.experimental.constrained.fadd.v3f32(<3 x float>, <3 x float>, metadata, metadata) declare <3 x double> @llvm.experimental.constrained.fadd.v3f64(<3 x double>, <3 x double>, metadata, metadata) declare <3 x float> @llvm.experimental.constrained.fsub.v3f32(<3 x float>, <3 x float>, metadata, metadata) declare <3 x double> @llvm.experimental.constrained.fsub.v3f64(<3 x double>, <3 x double>, metadata, metadata) declare <3 x float> @llvm.experimental.constrained.fmul.v3f32(<3 x float>, <3 x float>, metadata, metadata) declare <3 x double> @llvm.experimental.constrained.fmul.v3f64(<3 x double>, <3 x double>, metadata, metadata) declare <3 x float> @llvm.experimental.constrained.fdiv.v3f32(<3 x float>, <3 x float>, metadata, metadata) declare <3 x double> @llvm.experimental.constrained.fdiv.v3f64(<3 x double>, <3 x double>, metadata, metadata) declare <3 x float> @llvm.experimental.constrained.frem.v3f32(<3 x float>, <3 x float>, metadata, metadata) declare <3 x double> @llvm.experimental.constrained.frem.v3f64(<3 x double>, <3 x double>, metadata, metadata) declare <3 x float> @llvm.experimental.constrained.sqrt.v3f32(<3 x float>, metadata, metadata) declare <3 x double> @llvm.experimental.constrained.sqrt.v3f64(<3 x double>, metadata, metadata) declare <3 x float> @llvm.experimental.constrained.pow.v3f32(<3 x float>, <3 x float>, metadata, metadata) declare <3 x double> @llvm.experimental.constrained.pow.v3f64(<3 x double>, <3 x double>, metadata, metadata) declare <3 x float> @llvm.experimental.constrained.powi.v3f32(<3 x float>, i32, metadata, metadata) declare <3 x double> @llvm.experimental.constrained.powi.v3f64(<3 x double>, i32, metadata, metadata) declare <3 x float> @llvm.experimental.constrained.sin.v3f32(<3 x float>, metadata, metadata) declare <3 x double> @llvm.experimental.constrained.sin.v3f64(<3 x double>, metadata, metadata) declare <3 x float> @llvm.experimental.constrained.cos.v3f32(<3 x float>, metadata, metadata) declare <3 x double> @llvm.experimental.constrained.cos.v3f64(<3 x double>, metadata, metadata) declare <3 x float> @llvm.experimental.constrained.exp.v3f32(<3 x float>, metadata, metadata) declare <3 x double> @llvm.experimental.constrained.exp.v3f64(<3 x double>, metadata, metadata) declare <3 x float> @llvm.experimental.constrained.exp2.v3f32(<3 x float>, metadata, metadata) declare <3 x double> @llvm.experimental.constrained.exp2.v3f64(<3 x double>, metadata, metadata) declare <3 x float> @llvm.experimental.constrained.log.v3f32(<3 x float>, metadata, metadata) declare <3 x double> @llvm.experimental.constrained.log.v3f64(<3 x double>, metadata, metadata) declare <3 x float> @llvm.experimental.constrained.log10.v3f32(<3 x float>, metadata, metadata) declare <3 x double> @llvm.experimental.constrained.log10.v3f64(<3 x double>, metadata, metadata) declare <3 x float> @llvm.experimental.constrained.log2.v3f32(<3 x float>, metadata, metadata) declare <3 x double> @llvm.experimental.constrained.log2.v3f64(<3 x double>, metadata, metadata) declare <3 x float> @llvm.experimental.constrained.rint.v3f32(<3 x float>, metadata, metadata) declare <3 x double> @llvm.experimental.constrained.rint.v3f64(<3 x double>, metadata, metadata) declare <3 x float> @llvm.experimental.constrained.nearbyint.v3f32(<3 x float>, metadata, metadata) declare <3 x double> @llvm.experimental.constrained.nearbyint.v3f64(<3 x double>, metadata, metadata) declare <3 x float> @llvm.experimental.constrained.maxnum.v3f32(<3 x float>, <3 x float>, metadata, metadata) declare <3 x double> @llvm.experimental.constrained.maxnum.v3f64(<3 x double>, <3 x double>, metadata, metadata) declare <3 x float> @llvm.experimental.constrained.minnum.v3f32(<3 x float>, <3 x float>, metadata, metadata) declare <3 x double> @llvm.experimental.constrained.minnum.v3f64(<3 x double>, <3 x double>, metadata, metadata) declare <3 x float> @llvm.experimental.constrained.ceil.v3f32(<3 x float>, metadata, metadata) declare <3 x double> @llvm.experimental.constrained.ceil.v3f64(<3 x double>, metadata, metadata) declare <3 x float> @llvm.experimental.constrained.floor.v3f32(<3 x float>, metadata, metadata) declare <3 x double> @llvm.experimental.constrained.floor.v3f64(<3 x double>, metadata, metadata) declare <3 x float> @llvm.experimental.constrained.round.v3f32(<3 x float>, metadata, metadata) declare <3 x double> @llvm.experimental.constrained.round.v3f64(<3 x double>, metadata, metadata) declare <3 x float> @llvm.experimental.constrained.trunc.v3f32(<3 x float>, metadata, metadata) declare <3 x double> @llvm.experimental.constrained.trunc.v3f64(<3 x double>, metadata, metadata) ; Double width declarations declare <4 x double> @llvm.experimental.constrained.fadd.v4f64(<4 x double>, <4 x double>, metadata, metadata) declare <4 x double> @llvm.experimental.constrained.fsub.v4f64(<4 x double>, <4 x double>, metadata, metadata) declare <4 x double> @llvm.experimental.constrained.fmul.v4f64(<4 x double>, <4 x double>, metadata, metadata) declare <4 x double> @llvm.experimental.constrained.fdiv.v4f64(<4 x double>, <4 x double>, metadata, metadata) declare <4 x double> @llvm.experimental.constrained.frem.v4f64(<4 x double>, <4 x double>, metadata, metadata) declare <4 x double> @llvm.experimental.constrained.sqrt.v4f64(<4 x double>, metadata, metadata) declare <4 x double> @llvm.experimental.constrained.pow.v4f64(<4 x double>, <4 x double>, metadata, metadata) declare <4 x double> @llvm.experimental.constrained.powi.v4f64(<4 x double>, i32, metadata, metadata) declare <4 x double> @llvm.experimental.constrained.sin.v4f64(<4 x double>, metadata, metadata) declare <4 x double> @llvm.experimental.constrained.cos.v4f64(<4 x double>, metadata, metadata) declare <4 x double> @llvm.experimental.constrained.exp.v4f64(<4 x double>, metadata, metadata) declare <4 x double> @llvm.experimental.constrained.exp2.v4f64(<4 x double>, metadata, metadata) declare <4 x double> @llvm.experimental.constrained.log.v4f64(<4 x double>, metadata, metadata) declare <4 x double> @llvm.experimental.constrained.log10.v4f64(<4 x double>, metadata, metadata) declare <4 x double> @llvm.experimental.constrained.log2.v4f64(<4 x double>, metadata, metadata) declare <4 x double> @llvm.experimental.constrained.rint.v4f64(<4 x double>, metadata, metadata) declare <4 x double> @llvm.experimental.constrained.nearbyint.v4f64(<4 x double>, metadata, metadata) declare <4 x double> @llvm.experimental.constrained.maxnum.v4f64(<4 x double>, <4 x double>, metadata, metadata) declare <4 x double> @llvm.experimental.constrained.minnum.v4f64(<4 x double>, <4 x double>, metadata, metadata) declare <4 x double> @llvm.experimental.constrained.ceil.v4f64(<4 x double>, metadata, metadata) declare <4 x double> @llvm.experimental.constrained.floor.v4f64(<4 x double>, metadata, metadata) declare <4 x double> @llvm.experimental.constrained.round.v4f64(<4 x double>, metadata, metadata) declare <4 x double> @llvm.experimental.constrained.trunc.v4f64(<4 x double>, metadata, metadata)