diff options
Diffstat (limited to 'test/CodeGen/X86/vector-half-conversions.ll')
-rw-r--r-- | test/CodeGen/X86/vector-half-conversions.ll | 2620 |
1 files changed, 714 insertions, 1906 deletions
diff --git a/test/CodeGen/X86/vector-half-conversions.ll b/test/CodeGen/X86/vector-half-conversions.ll index 6e664ba98d9..9feff88a576 100644 --- a/test/CodeGen/X86/vector-half-conversions.ll +++ b/test/CodeGen/X86/vector-half-conversions.ll @@ -1,7 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx,+f16c -verify-machineinstrs | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX1 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2,+f16c -verify-machineinstrs | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX2 -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,-f16c -verify-machineinstrs | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX512 --check-prefix=AVX512F +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f -verify-machineinstrs | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX512 --check-prefix=AVX512F ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vl -verify-machineinstrs | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX512 --check-prefix=AVX512VL ; @@ -9,35 +9,12 @@ ; define float @cvt_i16_to_f32(i16 %a0) nounwind { -; AVX1-LABEL: cvt_i16_to_f32: -; AVX1: # BB#0: -; AVX1-NEXT: movswl %di, %eax -; AVX1-NEXT: vmovd %eax, %xmm0 -; AVX1-NEXT: vcvtph2ps %xmm0, %xmm0 -; AVX1-NEXT: retq -; -; AVX2-LABEL: cvt_i16_to_f32: -; AVX2: # BB#0: -; AVX2-NEXT: movswl %di, %eax -; AVX2-NEXT: vmovd %eax, %xmm0 -; AVX2-NEXT: vcvtph2ps %xmm0, %xmm0 -; AVX2-NEXT: retq -; -; AVX512F-LABEL: cvt_i16_to_f32: -; AVX512F: # BB#0: -; AVX512F-NEXT: movswl %di, %eax -; AVX512F-NEXT: vmovd %eax, %xmm0 -; AVX512F-NEXT: vcvtph2ps %ymm0, %zmm0 -; AVX512F-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill> -; AVX512F-NEXT: vzeroupper -; AVX512F-NEXT: retq -; -; AVX512VL-LABEL: cvt_i16_to_f32: -; AVX512VL: # BB#0: -; AVX512VL-NEXT: movswl %di, %eax -; AVX512VL-NEXT: vmovd %eax, %xmm0 -; AVX512VL-NEXT: vcvtph2ps %xmm0, %xmm0 -; AVX512VL-NEXT: retq +; ALL-LABEL: cvt_i16_to_f32: +; ALL: # BB#0: +; ALL-NEXT: movswl %di, %eax +; ALL-NEXT: vmovd %eax, %xmm0 +; ALL-NEXT: vcvtph2ps %xmm0, %xmm0 +; ALL-NEXT: retq %1 = bitcast i16 %a0 to half %2 = fpext half %1 to float ret float %2 @@ -111,19 +88,18 @@ define <4 x float> @cvt_4i16_to_4f32(<4 x i16> %a0) nounwind { ; AVX512F-NEXT: shrq $48, %rdx ; AVX512F-NEXT: movswl %dx, %edx ; AVX512F-NEXT: vmovd %edx, %xmm0 -; AVX512F-NEXT: vcvtph2ps %ymm0, %zmm0 +; AVX512F-NEXT: vcvtph2ps %xmm0, %xmm0 ; AVX512F-NEXT: movswl %cx, %ecx ; AVX512F-NEXT: vmovd %ecx, %xmm1 -; AVX512F-NEXT: vcvtph2ps %ymm1, %zmm1 +; AVX512F-NEXT: vcvtph2ps %xmm1, %xmm1 ; AVX512F-NEXT: cwtl ; AVX512F-NEXT: vmovd %eax, %xmm2 -; AVX512F-NEXT: vcvtph2ps %ymm2, %zmm2 +; AVX512F-NEXT: vcvtph2ps %xmm2, %xmm2 ; AVX512F-NEXT: vmovd %esi, %xmm3 -; AVX512F-NEXT: vcvtph2ps %ymm3, %zmm3 +; AVX512F-NEXT: vcvtph2ps %xmm3, %xmm3 ; AVX512F-NEXT: vinsertps {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[2,3] ; AVX512F-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0,1],xmm1[0],xmm2[3] ; AVX512F-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0] -; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; ; AVX512VL-LABEL: cvt_4i16_to_4f32: @@ -222,19 +198,18 @@ define <4 x float> @cvt_8i16_to_4f32(<8 x i16> %a0) nounwind { ; AVX512F-NEXT: shrq $48, %rdx ; AVX512F-NEXT: movswl %dx, %edx ; AVX512F-NEXT: vmovd %edx, %xmm0 -; AVX512F-NEXT: vcvtph2ps %ymm0, %zmm0 +; AVX512F-NEXT: vcvtph2ps %xmm0, %xmm0 ; AVX512F-NEXT: movswl %cx, %ecx ; AVX512F-NEXT: vmovd %ecx, %xmm1 -; AVX512F-NEXT: vcvtph2ps %ymm1, %zmm1 +; AVX512F-NEXT: vcvtph2ps %xmm1, %xmm1 ; AVX512F-NEXT: cwtl ; AVX512F-NEXT: vmovd %eax, %xmm2 -; AVX512F-NEXT: vcvtph2ps %ymm2, %zmm2 +; AVX512F-NEXT: vcvtph2ps %xmm2, %xmm2 ; AVX512F-NEXT: vmovd %esi, %xmm3 -; AVX512F-NEXT: vcvtph2ps %ymm3, %zmm3 +; AVX512F-NEXT: vcvtph2ps %xmm3, %xmm3 ; AVX512F-NEXT: vinsertps {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[2,3] ; AVX512F-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0,1],xmm1[0],xmm2[3] ; AVX512F-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0] -; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; ; AVX512VL-LABEL: cvt_8i16_to_4f32: @@ -271,201 +246,54 @@ define <4 x float> @cvt_8i16_to_4f32(<8 x i16> %a0) nounwind { } define <8 x float> @cvt_8i16_to_8f32(<8 x i16> %a0) nounwind { -; AVX1-LABEL: cvt_8i16_to_8f32: -; AVX1: # BB#0: -; AVX1-NEXT: vpextrq $1, %xmm0, %rdx -; AVX1-NEXT: movq %rdx, %r8 -; AVX1-NEXT: movq %rdx, %r10 -; AVX1-NEXT: movswl %dx, %r9d -; AVX1-NEXT: # kill: %EDX<def> %EDX<kill> %RDX<kill> -; AVX1-NEXT: shrl $16, %edx -; AVX1-NEXT: shrq $32, %r8 -; AVX1-NEXT: shrq $48, %r10 -; AVX1-NEXT: vmovq %xmm0, %rdi -; AVX1-NEXT: movq %rdi, %rax -; AVX1-NEXT: movq %rdi, %rsi -; AVX1-NEXT: movswl %di, %ecx -; AVX1-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<kill> -; AVX1-NEXT: shrl $16, %edi -; AVX1-NEXT: shrq $32, %rax -; AVX1-NEXT: shrq $48, %rsi -; AVX1-NEXT: movswl %si, %esi -; AVX1-NEXT: vmovd %esi, %xmm0 -; AVX1-NEXT: vcvtph2ps %xmm0, %xmm0 -; AVX1-NEXT: cwtl -; AVX1-NEXT: vmovd %eax, %xmm1 -; AVX1-NEXT: vcvtph2ps %xmm1, %xmm1 -; AVX1-NEXT: movswl %di, %eax -; AVX1-NEXT: vmovd %eax, %xmm2 -; AVX1-NEXT: vcvtph2ps %xmm2, %xmm2 -; AVX1-NEXT: vmovd %ecx, %xmm3 -; AVX1-NEXT: vcvtph2ps %xmm3, %xmm3 -; AVX1-NEXT: movswl %r10w, %eax -; AVX1-NEXT: vmovd %eax, %xmm4 -; AVX1-NEXT: vcvtph2ps %xmm4, %xmm4 -; AVX1-NEXT: movswl %r8w, %eax -; AVX1-NEXT: vmovd %eax, %xmm5 -; AVX1-NEXT: vcvtph2ps %xmm5, %xmm5 -; AVX1-NEXT: movswl %dx, %eax -; AVX1-NEXT: vmovd %eax, %xmm6 -; AVX1-NEXT: vcvtph2ps %xmm6, %xmm6 -; AVX1-NEXT: vmovd %r9d, %xmm7 -; AVX1-NEXT: vcvtph2ps %xmm7, %xmm7 -; AVX1-NEXT: vinsertps {{.*#+}} xmm6 = xmm7[0],xmm6[0],xmm7[2,3] -; AVX1-NEXT: vinsertps {{.*#+}} xmm5 = xmm6[0,1],xmm5[0],xmm6[3] -; AVX1-NEXT: vinsertps {{.*#+}} xmm4 = xmm5[0,1,2],xmm4[0] -; AVX1-NEXT: vinsertps {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[2,3] -; AVX1-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0,1],xmm1[0],xmm2[3] -; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0] -; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm0 -; AVX1-NEXT: retq -; -; AVX2-LABEL: cvt_8i16_to_8f32: -; AVX2: # BB#0: -; AVX2-NEXT: vpextrq $1, %xmm0, %rdx -; AVX2-NEXT: movq %rdx, %r8 -; AVX2-NEXT: movq %rdx, %r10 -; AVX2-NEXT: movswl %dx, %r9d -; AVX2-NEXT: # kill: %EDX<def> %EDX<kill> %RDX<kill> -; AVX2-NEXT: shrl $16, %edx -; AVX2-NEXT: shrq $32, %r8 -; AVX2-NEXT: shrq $48, %r10 -; AVX2-NEXT: vmovq %xmm0, %rdi -; AVX2-NEXT: movq %rdi, %rax -; AVX2-NEXT: movq %rdi, %rsi -; AVX2-NEXT: movswl %di, %ecx -; AVX2-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<kill> -; AVX2-NEXT: shrl $16, %edi -; AVX2-NEXT: shrq $32, %rax -; AVX2-NEXT: shrq $48, %rsi -; AVX2-NEXT: movswl %si, %esi -; AVX2-NEXT: vmovd %esi, %xmm0 -; AVX2-NEXT: vcvtph2ps %xmm0, %xmm0 -; AVX2-NEXT: cwtl -; AVX2-NEXT: vmovd %eax, %xmm1 -; AVX2-NEXT: vcvtph2ps %xmm1, %xmm1 -; AVX2-NEXT: movswl %di, %eax -; AVX2-NEXT: vmovd %eax, %xmm2 -; AVX2-NEXT: vcvtph2ps %xmm2, %xmm2 -; AVX2-NEXT: vmovd %ecx, %xmm3 -; AVX2-NEXT: vcvtph2ps %xmm3, %xmm3 -; AVX2-NEXT: movswl %r10w, %eax -; AVX2-NEXT: vmovd %eax, %xmm4 -; AVX2-NEXT: vcvtph2ps %xmm4, %xmm4 -; AVX2-NEXT: movswl %r8w, %eax -; AVX2-NEXT: vmovd %eax, %xmm5 -; AVX2-NEXT: vcvtph2ps %xmm5, %xmm5 -; AVX2-NEXT: movswl %dx, %eax -; AVX2-NEXT: vmovd %eax, %xmm6 -; AVX2-NEXT: vcvtph2ps %xmm6, %xmm6 -; AVX2-NEXT: vmovd %r9d, %xmm7 -; AVX2-NEXT: vcvtph2ps %xmm7, %xmm7 -; AVX2-NEXT: vinsertps {{.*#+}} xmm6 = xmm7[0],xmm6[0],xmm7[2,3] -; AVX2-NEXT: vinsertps {{.*#+}} xmm5 = xmm6[0,1],xmm5[0],xmm6[3] -; AVX2-NEXT: vinsertps {{.*#+}} xmm4 = xmm5[0,1,2],xmm4[0] -; AVX2-NEXT: vinsertps {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[2,3] -; AVX2-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0,1],xmm1[0],xmm2[3] -; AVX2-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0] -; AVX2-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm0 -; AVX2-NEXT: retq -; -; AVX512F-LABEL: cvt_8i16_to_8f32: -; AVX512F: # BB#0: -; AVX512F-NEXT: vpextrq $1, %xmm0, %rdx -; AVX512F-NEXT: movq %rdx, %r8 -; AVX512F-NEXT: movq %rdx, %r9 -; AVX512F-NEXT: movswl %dx, %r10d -; AVX512F-NEXT: # kill: %EDX<def> %EDX<kill> %RDX<kill> -; AVX512F-NEXT: shrl $16, %edx -; AVX512F-NEXT: shrq $32, %r8 -; AVX512F-NEXT: shrq $48, %r9 -; AVX512F-NEXT: vmovq %xmm0, %rdi -; AVX512F-NEXT: movq %rdi, %rax -; AVX512F-NEXT: movq %rdi, %rcx -; AVX512F-NEXT: movswl %di, %esi -; AVX512F-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<kill> -; AVX512F-NEXT: shrl $16, %edi -; AVX512F-NEXT: shrq $32, %rax -; AVX512F-NEXT: shrq $48, %rcx -; AVX512F-NEXT: movswl %cx, %ecx -; AVX512F-NEXT: vmovd %ecx, %xmm0 -; AVX512F-NEXT: vcvtph2ps %ymm0, %zmm0 -; AVX512F-NEXT: cwtl -; AVX512F-NEXT: vmovd %eax, %xmm1 -; AVX512F-NEXT: vcvtph2ps %ymm1, %zmm1 -; AVX512F-NEXT: movswl %di, %eax -; AVX512F-NEXT: vmovd %eax, %xmm2 -; AVX512F-NEXT: vcvtph2ps %ymm2, %zmm2 -; AVX512F-NEXT: vmovd %esi, %xmm3 -; AVX512F-NEXT: vcvtph2ps %ymm3, %zmm3 -; AVX512F-NEXT: movswl %r9w, %eax -; AVX512F-NEXT: vmovd %eax, %xmm4 -; AVX512F-NEXT: vcvtph2ps %ymm4, %zmm4 -; AVX512F-NEXT: movswl %r8w, %eax -; AVX512F-NEXT: vmovd %eax, %xmm5 -; AVX512F-NEXT: vcvtph2ps %ymm5, %zmm5 -; AVX512F-NEXT: movswl %dx, %eax -; AVX512F-NEXT: vmovd %eax, %xmm6 -; AVX512F-NEXT: vcvtph2ps %ymm6, %zmm6 -; AVX512F-NEXT: vmovd %r10d, %xmm7 -; AVX512F-NEXT: vcvtph2ps %ymm7, %zmm7 -; AVX512F-NEXT: vinsertps {{.*#+}} xmm6 = xmm7[0],xmm6[0],xmm7[2,3] -; AVX512F-NEXT: vinsertps {{.*#+}} xmm5 = xmm6[0,1],xmm5[0],xmm6[3] -; AVX512F-NEXT: vinsertps {{.*#+}} xmm4 = xmm5[0,1,2],xmm4[0] -; AVX512F-NEXT: vinsertps {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[2,3] -; AVX512F-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0,1],xmm1[0],xmm2[3] -; AVX512F-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0] -; AVX512F-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm0 -; AVX512F-NEXT: retq -; -; AVX512VL-LABEL: cvt_8i16_to_8f32: -; AVX512VL: # BB#0: -; AVX512VL-NEXT: vpextrq $1, %xmm0, %rdx -; AVX512VL-NEXT: movq %rdx, %r8 -; AVX512VL-NEXT: movq %rdx, %r10 -; AVX512VL-NEXT: movswl %dx, %r9d -; AVX512VL-NEXT: # kill: %EDX<def> %EDX<kill> %RDX<kill> -; AVX512VL-NEXT: shrl $16, %edx -; AVX512VL-NEXT: shrq $32, %r8 -; AVX512VL-NEXT: shrq $48, %r10 -; AVX512VL-NEXT: vmovq %xmm0, %rdi -; AVX512VL-NEXT: movq %rdi, %rax -; AVX512VL-NEXT: movq %rdi, %rsi -; AVX512VL-NEXT: movswl %di, %ecx -; AVX512VL-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<kill> -; AVX512VL-NEXT: shrl $16, %edi -; AVX512VL-NEXT: shrq $32, %rax -; AVX512VL-NEXT: shrq $48, %rsi -; AVX512VL-NEXT: movswl %si, %esi -; AVX512VL-NEXT: vmovd %esi, %xmm0 -; AVX512VL-NEXT: vcvtph2ps %xmm0, %xmm0 -; AVX512VL-NEXT: cwtl -; AVX512VL-NEXT: vmovd %eax, %xmm1 -; AVX512VL-NEXT: vcvtph2ps %xmm1, %xmm1 -; AVX512VL-NEXT: movswl %di, %eax -; AVX512VL-NEXT: vmovd %eax, %xmm2 -; AVX512VL-NEXT: vcvtph2ps %xmm2, %xmm2 -; AVX512VL-NEXT: vmovd %ecx, %xmm3 -; AVX512VL-NEXT: vcvtph2ps %xmm3, %xmm3 -; AVX512VL-NEXT: movswl %r10w, %eax -; AVX512VL-NEXT: vmovd %eax, %xmm4 -; AVX512VL-NEXT: vcvtph2ps %xmm4, %xmm4 -; AVX512VL-NEXT: movswl %r8w, %eax -; AVX512VL-NEXT: vmovd %eax, %xmm5 -; AVX512VL-NEXT: vcvtph2ps %xmm5, %xmm5 -; AVX512VL-NEXT: movswl %dx, %eax -; AVX512VL-NEXT: vmovd %eax, %xmm6 -; AVX512VL-NEXT: vcvtph2ps %xmm6, %xmm6 -; AVX512VL-NEXT: vmovd %r9d, %xmm7 -; AVX512VL-NEXT: vcvtph2ps %xmm7, %xmm7 -; AVX512VL-NEXT: vinsertps {{.*#+}} xmm6 = xmm7[0],xmm6[0],xmm7[2,3] -; AVX512VL-NEXT: vinsertps {{.*#+}} xmm5 = xmm6[0,1],xmm5[0],xmm6[3] -; AVX512VL-NEXT: vinsertps {{.*#+}} xmm4 = xmm5[0,1,2],xmm4[0] -; AVX512VL-NEXT: vinsertps {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[2,3] -; AVX512VL-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0,1],xmm1[0],xmm2[3] -; AVX512VL-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0] -; AVX512VL-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm0 -; AVX512VL-NEXT: retq +; ALL-LABEL: cvt_8i16_to_8f32: +; ALL: # BB#0: +; ALL-NEXT: vpextrq $1, %xmm0, %rdx +; ALL-NEXT: movq %rdx, %r8 +; ALL-NEXT: movq %rdx, %r10 +; ALL-NEXT: movswl %dx, %r9d +; ALL-NEXT: # kill: %EDX<def> %EDX<kill> %RDX<kill> +; ALL-NEXT: shrl $16, %edx +; ALL-NEXT: shrq $32, %r8 +; ALL-NEXT: shrq $48, %r10 +; ALL-NEXT: vmovq %xmm0, %rdi +; ALL-NEXT: movq %rdi, %rax +; ALL-NEXT: movq %rdi, %rsi +; ALL-NEXT: movswl %di, %ecx +; ALL-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<kill> +; ALL-NEXT: shrl $16, %edi +; ALL-NEXT: shrq $32, %rax +; ALL-NEXT: shrq $48, %rsi +; ALL-NEXT: movswl %si, %esi +; ALL-NEXT: vmovd %esi, %xmm0 +; ALL-NEXT: vcvtph2ps %xmm0, %xmm0 +; ALL-NEXT: cwtl +; ALL-NEXT: vmovd %eax, %xmm1 +; ALL-NEXT: vcvtph2ps %xmm1, %xmm1 +; ALL-NEXT: movswl %di, %eax +; ALL-NEXT: vmovd %eax, %xmm2 +; ALL-NEXT: vcvtph2ps %xmm2, %xmm2 +; ALL-NEXT: vmovd %ecx, %xmm3 +; ALL-NEXT: vcvtph2ps %xmm3, %xmm3 +; ALL-NEXT: movswl %r10w, %eax +; ALL-NEXT: vmovd %eax, %xmm4 +; ALL-NEXT: vcvtph2ps %xmm4, %xmm4 +; ALL-NEXT: movswl %r8w, %eax +; ALL-NEXT: vmovd %eax, %xmm5 +; ALL-NEXT: vcvtph2ps %xmm5, %xmm5 +; ALL-NEXT: movswl %dx, %eax +; ALL-NEXT: vmovd %eax, %xmm6 +; ALL-NEXT: vcvtph2ps %xmm6, %xmm6 +; ALL-NEXT: vmovd %r9d, %xmm7 +; ALL-NEXT: vcvtph2ps %xmm7, %xmm7 +; ALL-NEXT: vinsertps {{.*#+}} xmm6 = xmm7[0],xmm6[0],xmm7[2,3] +; ALL-NEXT: vinsertps {{.*#+}} xmm5 = xmm6[0,1],xmm5[0],xmm6[3] +; ALL-NEXT: vinsertps {{.*#+}} xmm4 = xmm5[0,1,2],xmm4[0] +; ALL-NEXT: vinsertps {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[2,3] +; ALL-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0,1],xmm1[0],xmm2[3] +; ALL-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0] +; ALL-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm0 +; ALL-NEXT: retq %1 = bitcast <8 x i16> %a0 to <8 x half> %2 = fpext <8 x half> %1 to <8 x float> ret <8 x float> %2 @@ -664,98 +492,98 @@ define <16 x float> @cvt_16i16_to_16f32(<16 x i16> %a0) nounwind { ; ; AVX512F-LABEL: cvt_16i16_to_16f32: ; AVX512F: # BB#0: -; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm1 +; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm10 ; AVX512F-NEXT: vmovq %xmm0, %rax ; AVX512F-NEXT: movq %rax, %rcx ; AVX512F-NEXT: shrq $48, %rcx ; AVX512F-NEXT: movswl %cx, %ecx -; AVX512F-NEXT: vmovd %ecx, %xmm2 +; AVX512F-NEXT: vmovd %ecx, %xmm8 ; AVX512F-NEXT: movq %rax, %rcx ; AVX512F-NEXT: shrq $32, %rcx ; AVX512F-NEXT: movswl %cx, %ecx -; AVX512F-NEXT: vmovd %ecx, %xmm3 +; AVX512F-NEXT: vmovd %ecx, %xmm9 ; AVX512F-NEXT: movswl %ax, %ecx ; AVX512F-NEXT: # kill: %EAX<def> %EAX<kill> %RAX<kill> ; AVX512F-NEXT: shrl $16, %eax ; AVX512F-NEXT: cwtl -; AVX512F-NEXT: vmovd %eax, %xmm4 +; AVX512F-NEXT: vmovd %eax, %xmm11 ; AVX512F-NEXT: vpextrq $1, %xmm0, %rax -; AVX512F-NEXT: vmovd %ecx, %xmm0 +; AVX512F-NEXT: vmovd %ecx, %xmm12 ; AVX512F-NEXT: movq %rax, %rcx ; AVX512F-NEXT: shrq $48, %rcx ; AVX512F-NEXT: movswl %cx, %ecx -; AVX512F-NEXT: vmovd %ecx, %xmm5 +; AVX512F-NEXT: vmovd %ecx, %xmm13 ; AVX512F-NEXT: movq %rax, %rcx ; AVX512F-NEXT: shrq $32, %rcx ; AVX512F-NEXT: movswl %cx, %ecx -; AVX512F-NEXT: vmovd %ecx, %xmm6 +; AVX512F-NEXT: vmovd %ecx, %xmm14 ; AVX512F-NEXT: movswl %ax, %ecx ; AVX512F-NEXT: # kill: %EAX<def> %EAX<kill> %RAX<kill> ; AVX512F-NEXT: shrl $16, %eax ; AVX512F-NEXT: cwtl -; AVX512F-NEXT: vmovd %eax, %xmm7 -; AVX512F-NEXT: vmovq %xmm1, %rax -; AVX512F-NEXT: vmovd %ecx, %xmm8 +; AVX512F-NEXT: vmovd %eax, %xmm15 +; AVX512F-NEXT: vmovq %xmm10, %rax +; AVX512F-NEXT: vmovd %ecx, %xmm2 ; AVX512F-NEXT: movq %rax, %rcx ; AVX512F-NEXT: shrq $48, %rcx ; AVX512F-NEXT: movswl %cx, %ecx -; AVX512F-NEXT: vmovd %ecx, %xmm9 +; AVX512F-NEXT: vmovd %ecx, %xmm3 ; AVX512F-NEXT: movq %rax, %rcx ; AVX512F-NEXT: shrq $32, %rcx ; AVX512F-NEXT: movswl %cx, %ecx -; AVX512F-NEXT: vmovd %ecx, %xmm10 +; AVX512F-NEXT: vmovd %ecx, %xmm1 ; AVX512F-NEXT: movswl %ax, %ecx ; AVX512F-NEXT: # kill: %EAX<def> %EAX<kill> %RAX<kill> ; AVX512F-NEXT: shrl $16, %eax ; AVX512F-NEXT: cwtl -; AVX512F-NEXT: vmovd %eax, %xmm11 -; AVX512F-NEXT: vpextrq $1, %xmm1, %rax -; AVX512F-NEXT: vmovd %ecx, %xmm1 +; AVX512F-NEXT: vmovd %eax, %xmm4 +; AVX512F-NEXT: vpextrq $1, %xmm10, %rax +; AVX512F-NEXT: vmovd %ecx, %xmm10 ; AVX512F-NEXT: movq %rax, %rcx ; AVX512F-NEXT: shrq $48, %rcx ; AVX512F-NEXT: movswl %cx, %ecx -; AVX512F-NEXT: vmovd %ecx, %xmm12 +; AVX512F-NEXT: vmovd %ecx, %xmm5 ; AVX512F-NEXT: movq %rax, %rcx ; AVX512F-NEXT: shrq $32, %rcx ; AVX512F-NEXT: movswl %cx, %ecx -; AVX512F-NEXT: vmovd %ecx, %xmm13 +; AVX512F-NEXT: vmovd %ecx, %xmm6 ; AVX512F-NEXT: movl %eax, %ecx ; AVX512F-NEXT: shrl $16, %ecx ; AVX512F-NEXT: movswl %cx, %ecx -; AVX512F-NEXT: vmovd %ecx, %xmm14 +; AVX512F-NEXT: vmovd %ecx, %xmm7 ; AVX512F-NEXT: cwtl -; AVX512F-NEXT: vmovd %eax, %xmm15 -; AVX512F-NEXT: vcvtph2ps %ymm2, %zmm16 -; AVX512F-NEXT: vcvtph2ps %ymm3, %zmm3 -; AVX512F-NEXT: vcvtph2ps %ymm4, %zmm4 -; AVX512F-NEXT: vcvtph2ps %ymm0, %zmm0 -; AVX512F-NEXT: vcvtph2ps %ymm5, %zmm5 -; AVX512F-NEXT: vcvtph2ps %ymm6, %zmm6 -; AVX512F-NEXT: vcvtph2ps %ymm7, %zmm7 -; AVX512F-NEXT: vcvtph2ps %ymm8, %zmm8 -; AVX512F-NEXT: vcvtph2ps %ymm9, %zmm9 -; AVX512F-NEXT: vcvtph2ps %ymm10, %zmm10 -; AVX512F-NEXT: vcvtph2ps %ymm11, %zmm11 -; AVX512F-NEXT: vcvtph2ps %ymm1, %zmm1 -; AVX512F-NEXT: vcvtph2ps %ymm12, %zmm12 -; AVX512F-NEXT: vcvtph2ps %ymm13, %zmm13 -; AVX512F-NEXT: vcvtph2ps %ymm14, %zmm14 -; AVX512F-NEXT: vcvtph2ps %ymm15, %zmm15 -; AVX512F-NEXT: vinsertps {{.*#+}} xmm2 = xmm15[0],xmm14[0],xmm15[2,3] -; AVX512F-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1],xmm13[0],xmm2[3] -; AVX512F-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm12[0] -; AVX512F-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0],xmm11[0],xmm1[2,3] -; AVX512F-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm10[0],xmm1[3] -; AVX512F-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm9[0] -; AVX512F-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1 -; AVX512F-NEXT: vinsertps {{.*#+}} xmm2 = xmm8[0],xmm7[0],xmm8[2,3] -; AVX512F-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1],xmm6[0],xmm2[3] -; AVX512F-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm5[0] -; AVX512F-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[2,3] -; AVX512F-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm3[0],xmm0[3] -; AVX512F-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm16[0] -; AVX512F-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 -; AVX512F-NEXT: vinsertf64x4 $1, %ymm1, %zmm0, %zmm0 +; AVX512F-NEXT: vmovd %eax, %xmm0 +; AVX512F-NEXT: vcvtph2ps %xmm8, %xmm8 +; AVX512F-NEXT: vcvtph2ps %xmm9, %xmm9 +; AVX512F-NEXT: vcvtph2ps %xmm11, %xmm11 +; AVX512F-NEXT: vcvtph2ps %xmm12, %xmm12 +; AVX512F-NEXT: vcvtph2ps %xmm13, %xmm13 +; AVX512F-NEXT: vcvtph2ps %xmm14, %xmm14 +; AVX512F-NEXT: vcvtph2ps %xmm15, %xmm15 +; AVX512F-NEXT: vcvtph2ps %xmm2, %xmm2 +; AVX512F-NEXT: vcvtph2ps %xmm3, %xmm3 +; AVX512F-NEXT: vcvtph2ps %xmm1, %xmm1 +; AVX512F-NEXT: vcvtph2ps %xmm4, %xmm4 +; AVX512F-NEXT: vcvtph2ps %xmm10, %xmm10 +; AVX512F-NEXT: vcvtph2ps %xmm5, %xmm5 +; AVX512F-NEXT: vcvtph2ps %xmm6, %xmm6 +; AVX512F-NEXT: vcvtph2ps %xmm7, %xmm7 +; AVX512F-NEXT: vcvtph2ps %xmm0, %xmm0 +; AVX512F-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0],xmm7[0],xmm0[2,3] +; AVX512F-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm6[0],xmm0[3] +; AVX512F-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm5[0] +; AVX512F-NEXT: vinsertps {{.*#+}} xmm4 = xmm10[0],xmm4[0],xmm10[2,3] +; AVX512F-NEXT: vinsertps {{.*#+}} xmm1 = xmm4[0,1],xmm1[0],xmm4[3] +; AVX512F-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm3[0] +; AVX512F-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 +; AVX512F-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0],xmm15[0],xmm2[2,3] +; AVX512F-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm14[0],xmm1[3] +; AVX512F-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm13[0] +; AVX512F-NEXT: vinsertps {{.*#+}} xmm2 = xmm12[0],xmm11[0],xmm12[2,3] +; AVX512F-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1],xmm9[0],xmm2[3] +; AVX512F-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm8[0] +; AVX512F-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1 +; AVX512F-NEXT: vinsertf64x4 $1, %ymm0, %zmm1, %zmm0 ; AVX512F-NEXT: retq ; ; AVX512VL-LABEL: cvt_16i16_to_16f32: @@ -863,35 +691,12 @@ define <16 x float> @cvt_16i16_to_16f32(<16 x i16> %a0) nounwind { ; define float @load_cvt_i16_to_f32(i16* %a0) nounwind { -; AVX1-LABEL: load_cvt_i16_to_f32: -; AVX1: # BB#0: -; AVX1-NEXT: movswl (%rdi), %eax -; AVX1-NEXT: vmovd %eax, %xmm0 -; AVX1-NEXT: vcvtph2ps %xmm0, %xmm0 -; AVX1-NEXT: retq -; -; AVX2-LABEL: load_cvt_i16_to_f32: -; AVX2: # BB#0: -; AVX2-NEXT: movswl (%rdi), %eax -; AVX2-NEXT: vmovd %eax, %xmm0 -; AVX2-NEXT: vcvtph2ps %xmm0, %xmm0 -; AVX2-NEXT: retq -; -; AVX512F-LABEL: load_cvt_i16_to_f32: -; AVX512F: # BB#0: -; AVX512F-NEXT: movswl (%rdi), %eax -; AVX512F-NEXT: vmovd %eax, %xmm0 -; AVX512F-NEXT: vcvtph2ps %ymm0, %zmm0 -; AVX512F-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill> -; AVX512F-NEXT: vzeroupper -; AVX512F-NEXT: retq -; -; AVX512VL-LABEL: load_cvt_i16_to_f32: -; AVX512VL: # BB#0: -; AVX512VL-NEXT: movswl (%rdi), %eax -; AVX512VL-NEXT: vmovd %eax, %xmm0 -; AVX512VL-NEXT: vcvtph2ps %xmm0, %xmm0 -; AVX512VL-NEXT: retq +; ALL-LABEL: load_cvt_i16_to_f32: +; ALL: # BB#0: +; ALL-NEXT: movswl (%rdi), %eax +; ALL-NEXT: vmovd %eax, %xmm0 +; ALL-NEXT: vcvtph2ps %xmm0, %xmm0 +; ALL-NEXT: retq %1 = load i16, i16* %a0 %2 = bitcast i16 %1 to half %3 = fpext half %2 to float @@ -899,82 +704,24 @@ define float @load_cvt_i16_to_f32(i16* %a0) nounwind { } define <4 x float> @load_cvt_4i16_to_4f32(<4 x i16>* %a0) nounwind { -; AVX1-LABEL: load_cvt_4i16_to_4f32: -; AVX1: # BB#0: -; AVX1-NEXT: movswl 6(%rdi), %eax -; AVX1-NEXT: vmovd %eax, %xmm0 -; AVX1-NEXT: vcvtph2ps %xmm0, %xmm0 -; AVX1-NEXT: movswl 4(%rdi), %eax -; AVX1-NEXT: vmovd %eax, %xmm1 -; AVX1-NEXT: vcvtph2ps %xmm1, %xmm1 -; AVX1-NEXT: movswl (%rdi), %eax -; AVX1-NEXT: vmovd %eax, %xmm2 -; AVX1-NEXT: vcvtph2ps %xmm2, %xmm2 -; AVX1-NEXT: movswl 2(%rdi), %eax -; AVX1-NEXT: vmovd %eax, %xmm3 -; AVX1-NEXT: vcvtph2ps %xmm3, %xmm3 -; AVX1-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[2,3] -; AVX1-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0,1],xmm1[0],xmm2[3] -; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0] -; AVX1-NEXT: retq -; -; AVX2-LABEL: load_cvt_4i16_to_4f32: -; AVX2: # BB#0: -; AVX2-NEXT: movswl 6(%rdi), %eax -; AVX2-NEXT: vmovd %eax, %xmm0 -; AVX2-NEXT: vcvtph2ps %xmm0, %xmm0 -; AVX2-NEXT: movswl 4(%rdi), %eax -; AVX2-NEXT: vmovd %eax, %xmm1 -; AVX2-NEXT: vcvtph2ps %xmm1, %xmm1 -; AVX2-NEXT: movswl (%rdi), %eax -; AVX2-NEXT: vmovd %eax, %xmm2 -; AVX2-NEXT: vcvtph2ps %xmm2, %xmm2 -; AVX2-NEXT: movswl 2(%rdi), %eax -; AVX2-NEXT: vmovd %eax, %xmm3 -; AVX2-NEXT: vcvtph2ps %xmm3, %xmm3 -; AVX2-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[2,3] -; AVX2-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0,1],xmm1[0],xmm2[3] -; AVX2-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0] -; AVX2-NEXT: retq -; -; AVX512F-LABEL: load_cvt_4i16_to_4f32: -; AVX512F: # BB#0: -; AVX512F-NEXT: movswl 6(%rdi), %eax -; AVX512F-NEXT: vmovd %eax, %xmm0 -; AVX512F-NEXT: vcvtph2ps %ymm0, %zmm0 -; AVX512F-NEXT: movswl 4(%rdi), %eax -; AVX512F-NEXT: vmovd %eax, %xmm1 -; AVX512F-NEXT: vcvtph2ps %ymm1, %zmm1 -; AVX512F-NEXT: movswl (%rdi), %eax -; AVX512F-NEXT: vmovd %eax, %xmm2 -; AVX512F-NEXT: vcvtph2ps %ymm2, %zmm2 -; AVX512F-NEXT: movswl 2(%rdi), %eax -; AVX512F-NEXT: vmovd %eax, %xmm3 -; AVX512F-NEXT: vcvtph2ps %ymm3, %zmm3 -; AVX512F-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[2,3] -; AVX512F-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0,1],xmm1[0],xmm2[3] -; AVX512F-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0] -; AVX512F-NEXT: vzeroupper -; AVX512F-NEXT: retq -; -; AVX512VL-LABEL: load_cvt_4i16_to_4f32: -; AVX512VL: # BB#0: -; AVX512VL-NEXT: movswl 6(%rdi), %eax -; AVX512VL-NEXT: vmovd %eax, %xmm0 -; AVX512VL-NEXT: vcvtph2ps %xmm0, %xmm0 -; AVX512VL-NEXT: movswl 4(%rdi), %eax -; AVX512VL-NEXT: vmovd %eax, %xmm1 -; AVX512VL-NEXT: vcvtph2ps %xmm1, %xmm1 -; AVX512VL-NEXT: movswl (%rdi), %eax -; AVX512VL-NEXT: vmovd %eax, %xmm2 -; AVX512VL-NEXT: vcvtph2ps %xmm2, %xmm2 -; AVX512VL-NEXT: movswl 2(%rdi), %eax -; AVX512VL-NEXT: vmovd %eax, %xmm3 -; AVX512VL-NEXT: vcvtph2ps %xmm3, %xmm3 -; AVX512VL-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[2,3] -; AVX512VL-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0,1],xmm1[0],xmm2[3] -; AVX512VL-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0] -; AVX512VL-NEXT: retq +; ALL-LABEL: load_cvt_4i16_to_4f32: +; ALL: # BB#0: +; ALL-NEXT: movswl 6(%rdi), %eax +; ALL-NEXT: vmovd %eax, %xmm0 +; ALL-NEXT: vcvtph2ps %xmm0, %xmm0 +; ALL-NEXT: movswl 4(%rdi), %eax +; ALL-NEXT: vmovd %eax, %xmm1 +; ALL-NEXT: vcvtph2ps %xmm1, %xmm1 +; ALL-NEXT: movswl (%rdi), %eax +; ALL-NEXT: vmovd %eax, %xmm2 +; ALL-NEXT: vcvtph2ps %xmm2, %xmm2 +; ALL-NEXT: movswl 2(%rdi), %eax +; ALL-NEXT: vmovd %eax, %xmm3 +; ALL-NEXT: vcvtph2ps %xmm3, %xmm3 +; ALL-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[2,3] +; ALL-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0,1],xmm1[0],xmm2[3] +; ALL-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0] +; ALL-NEXT: retq %1 = load <4 x i16>, <4 x i16>* %a0 %2 = bitcast <4 x i16> %1 to <4 x half> %3 = fpext <4 x half> %2 to <4 x float> @@ -1046,19 +793,18 @@ define <4 x float> @load_cvt_8i16_to_4f32(<8 x i16>* %a0) nounwind { ; AVX512F-NEXT: shrq $48, %rdx ; AVX512F-NEXT: movswl %dx, %edx ; AVX512F-NEXT: vmovd %edx, %xmm0 -; AVX512F-NEXT: vcvtph2ps %ymm0, %zmm0 +; AVX512F-NEXT: vcvtph2ps %xmm0, %xmm0 ; AVX512F-NEXT: movswl %cx, %ecx ; AVX512F-NEXT: vmovd %ecx, %xmm1 -; AVX512F-NEXT: vcvtph2ps %ymm1, %zmm1 +; AVX512F-NEXT: vcvtph2ps %xmm1, %xmm1 ; AVX512F-NEXT: cwtl ; AVX512F-NEXT: vmovd %eax, %xmm2 -; AVX512F-NEXT: vcvtph2ps %ymm2, %zmm2 +; AVX512F-NEXT: vcvtph2ps %xmm2, %xmm2 ; AVX512F-NEXT: vmovd %esi, %xmm3 -; AVX512F-NEXT: vcvtph2ps %ymm3, %zmm3 +; AVX512F-NEXT: vcvtph2ps %xmm3, %xmm3 ; AVX512F-NEXT: vinsertps {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[2,3] ; AVX512F-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0,1],xmm1[0],xmm2[3] ; AVX512F-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0] -; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; ; AVX512VL-LABEL: load_cvt_8i16_to_4f32: @@ -1096,145 +842,40 @@ define <4 x float> @load_cvt_8i16_to_4f32(<8 x i16>* %a0) nounwind { } define <8 x float> @load_cvt_8i16_to_8f32(<8 x i16>* %a0) nounwind { -; AVX1-LABEL: load_cvt_8i16_to_8f32: -; AVX1: # BB#0: -; AVX1-NEXT: movswl 6(%rdi), %eax -; AVX1-NEXT: vmovd %eax, %xmm0 -; AVX1-NEXT: vcvtph2ps %xmm0, %xmm0 -; AVX1-NEXT: movswl 4(%rdi), %eax -; AVX1-NEXT: vmovd %eax, %xmm1 -; AVX1-NEXT: vcvtph2ps %xmm1, %xmm1 -; AVX1-NEXT: movswl (%rdi), %eax -; AVX1-NEXT: vmovd %eax, %xmm2 -; AVX1-NEXT: vcvtph2ps %xmm2, %xmm2 -; AVX1-NEXT: movswl 2(%rdi), %eax -; AVX1-NEXT: vmovd %eax, %xmm3 -; AVX1-NEXT: vcvtph2ps %xmm3, %xmm3 -; AVX1-NEXT: movswl 14(%rdi), %eax -; AVX1-NEXT: vmovd %eax, %xmm4 -; AVX1-NEXT: vcvtph2ps %xmm4, %xmm4 -; AVX1-NEXT: movswl 12(%rdi), %eax -; AVX1-NEXT: vmovd %eax, %xmm5 -; AVX1-NEXT: vcvtph2ps %xmm5, %xmm5 -; AVX1-NEXT: movswl 8(%rdi), %eax -; AVX1-NEXT: vmovd %eax, %xmm6 -; AVX1-NEXT: vcvtph2ps %xmm6, %xmm6 -; AVX1-NEXT: movswl 10(%rdi), %eax -; AVX1-NEXT: vmovd %eax, %xmm7 -; AVX1-NEXT: vcvtph2ps %xmm7, %xmm7 -; AVX1-NEXT: vinsertps {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[2,3] -; AVX1-NEXT: vinsertps {{.*#+}} xmm5 = xmm6[0,1],xmm5[0],xmm6[3] -; AVX1-NEXT: vinsertps {{.*#+}} xmm4 = xmm5[0,1,2],xmm4[0] -; AVX1-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[2,3] -; AVX1-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0,1],xmm1[0],xmm2[3] -; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0] -; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm0 -; AVX1-NEXT: retq -; -; AVX2-LABEL: load_cvt_8i16_to_8f32: -; AVX2: # BB#0: -; AVX2-NEXT: movswl 6(%rdi), %eax -; AVX2-NEXT: vmovd %eax, %xmm0 -; AVX2-NEXT: vcvtph2ps %xmm0, %xmm0 -; AVX2-NEXT: movswl 4(%rdi), %eax -; AVX2-NEXT: vmovd %eax, %xmm1 -; AVX2-NEXT: vcvtph2ps %xmm1, %xmm1 -; AVX2-NEXT: movswl (%rdi), %eax -; AVX2-NEXT: vmovd %eax, %xmm2 -; AVX2-NEXT: vcvtph2ps %xmm2, %xmm2 -; AVX2-NEXT: movswl 2(%rdi), %eax -; AVX2-NEXT: vmovd %eax, %xmm3 -; AVX2-NEXT: vcvtph2ps %xmm3, %xmm3 -; AVX2-NEXT: movswl 14(%rdi), %eax -; AVX2-NEXT: vmovd %eax, %xmm4 -; AVX2-NEXT: vcvtph2ps %xmm4, %xmm4 -; AVX2-NEXT: movswl 12(%rdi), %eax -; AVX2-NEXT: vmovd %eax, %xmm5 -; AVX2-NEXT: vcvtph2ps %xmm5, %xmm5 -; AVX2-NEXT: movswl 8(%rdi), %eax -; AVX2-NEXT: vmovd %eax, %xmm6 -; AVX2-NEXT: vcvtph2ps %xmm6, %xmm6 -; AVX2-NEXT: movswl 10(%rdi), %eax -; AVX2-NEXT: vmovd %eax, %xmm7 -; AVX2-NEXT: vcvtph2ps %xmm7, %xmm7 -; AVX2-NEXT: vinsertps {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[2,3] -; AVX2-NEXT: vinsertps {{.*#+}} xmm5 = xmm6[0,1],xmm5[0],xmm6[3] -; AVX2-NEXT: vinsertps {{.*#+}} xmm4 = xmm5[0,1,2],xmm4[0] -; AVX2-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[2,3] -; AVX2-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0,1],xmm1[0],xmm2[3] -; AVX2-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0] -; AVX2-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm0 -; AVX2-NEXT: retq -; -; AVX512F-LABEL: load_cvt_8i16_to_8f32: -; AVX512F: # BB#0: -; AVX512F-NEXT: movswl 6(%rdi), %eax -; AVX512F-NEXT: vmovd %eax, %xmm0 -; AVX512F-NEXT: vcvtph2ps %ymm0, %zmm0 -; AVX512F-NEXT: movswl 4(%rdi), %eax -; AVX512F-NEXT: vmovd %eax, %xmm1 -; AVX512F-NEXT: vcvtph2ps %ymm1, %zmm1 -; AVX512F-NEXT: movswl (%rdi), %eax -; AVX512F-NEXT: vmovd %eax, %xmm2 -; AVX512F-NEXT: vcvtph2ps %ymm2, %zmm2 -; AVX512F-NEXT: movswl 2(%rdi), %eax -; AVX512F-NEXT: vmovd %eax, %xmm3 -; AVX512F-NEXT: vcvtph2ps %ymm3, %zmm3 -; AVX512F-NEXT: movswl 14(%rdi), %eax -; AVX512F-NEXT: vmovd %eax, %xmm4 -; AVX512F-NEXT: vcvtph2ps %ymm4, %zmm4 -; AVX512F-NEXT: movswl 12(%rdi), %eax -; AVX512F-NEXT: vmovd %eax, %xmm5 -; AVX512F-NEXT: vcvtph2ps %ymm5, %zmm5 -; AVX512F-NEXT: movswl 8(%rdi), %eax -; AVX512F-NEXT: vmovd %eax, %xmm6 -; AVX512F-NEXT: vcvtph2ps %ymm6, %zmm6 -; AVX512F-NEXT: movswl 10(%rdi), %eax -; AVX512F-NEXT: vmovd %eax, %xmm7 -; AVX512F-NEXT: vcvtph2ps %ymm7, %zmm7 -; AVX512F-NEXT: vinsertps {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[2,3] -; AVX512F-NEXT: vinsertps {{.*#+}} xmm5 = xmm6[0,1],xmm5[0],xmm6[3] -; AVX512F-NEXT: vinsertps {{.*#+}} xmm4 = xmm5[0,1,2],xmm4[0] -; AVX512F-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[2,3] -; AVX512F-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0,1],xmm1[0],xmm2[3] -; AVX512F-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0] -; AVX512F-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm0 -; AVX512F-NEXT: retq -; -; AVX512VL-LABEL: load_cvt_8i16_to_8f32: -; AVX512VL: # BB#0: -; AVX512VL-NEXT: movswl 6(%rdi), %eax -; AVX512VL-NEXT: vmovd %eax, %xmm0 -; AVX512VL-NEXT: vcvtph2ps %xmm0, %xmm0 -; AVX512VL-NEXT: movswl 4(%rdi), %eax -; AVX512VL-NEXT: vmovd %eax, %xmm1 -; AVX512VL-NEXT: vcvtph2ps %xmm1, %xmm1 -; AVX512VL-NEXT: movswl (%rdi), %eax -; AVX512VL-NEXT: vmovd %eax, %xmm2 -; AVX512VL-NEXT: vcvtph2ps %xmm2, %xmm2 -; AVX512VL-NEXT: movswl 2(%rdi), %eax -; AVX512VL-NEXT: vmovd %eax, %xmm3 -; AVX512VL-NEXT: vcvtph2ps %xmm3, %xmm3 -; AVX512VL-NEXT: movswl 14(%rdi), %eax -; AVX512VL-NEXT: vmovd %eax, %xmm4 -; AVX512VL-NEXT: vcvtph2ps %xmm4, %xmm4 -; AVX512VL-NEXT: movswl 12(%rdi), %eax -; AVX512VL-NEXT: vmovd %eax, %xmm5 -; AVX512VL-NEXT: vcvtph2ps %xmm5, %xmm5 -; AVX512VL-NEXT: movswl 8(%rdi), %eax -; AVX512VL-NEXT: vmovd %eax, %xmm6 -; AVX512VL-NEXT: vcvtph2ps %xmm6, %xmm6 -; AVX512VL-NEXT: movswl 10(%rdi), %eax -; AVX512VL-NEXT: vmovd %eax, %xmm7 -; AVX512VL-NEXT: vcvtph2ps %xmm7, %xmm7 -; AVX512VL-NEXT: vinsertps {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[2,3] -; AVX512VL-NEXT: vinsertps {{.*#+}} xmm5 = xmm6[0,1],xmm5[0],xmm6[3] -; AVX512VL-NEXT: vinsertps {{.*#+}} xmm4 = xmm5[0,1,2],xmm4[0] -; AVX512VL-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[2,3] -; AVX512VL-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0,1],xmm1[0],xmm2[3] -; AVX512VL-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0] -; AVX512VL-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm0 -; AVX512VL-NEXT: retq +; ALL-LABEL: load_cvt_8i16_to_8f32: +; ALL: # BB#0: +; ALL-NEXT: movswl 6(%rdi), %eax +; ALL-NEXT: vmovd %eax, %xmm0 +; ALL-NEXT: vcvtph2ps %xmm0, %xmm0 +; ALL-NEXT: movswl 4(%rdi), %eax +; ALL-NEXT: vmovd %eax, %xmm1 +; ALL-NEXT: vcvtph2ps %xmm1, %xmm1 +; ALL-NEXT: movswl (%rdi), %eax +; ALL-NEXT: vmovd %eax, %xmm2 +; ALL-NEXT: vcvtph2ps %xmm2, %xmm2 +; ALL-NEXT: movswl 2(%rdi), %eax +; ALL-NEXT: vmovd %eax, %xmm3 +; ALL-NEXT: vcvtph2ps %xmm3, %xmm3 +; ALL-NEXT: movswl 14(%rdi), %eax +; ALL-NEXT: vmovd %eax, %xmm4 +; ALL-NEXT: vcvtph2ps %xmm4, %xmm4 +; ALL-NEXT: movswl 12(%rdi), %eax +; ALL-NEXT: vmovd %eax, %xmm5 +; ALL-NEXT: vcvtph2ps %xmm5, %xmm5 +; ALL-NEXT: movswl 8(%rdi), %eax +; ALL-NEXT: vmovd %eax, %xmm6 +; ALL-NEXT: vcvtph2ps %xmm6, %xmm6 +; ALL-NEXT: movswl 10(%rdi), %eax +; ALL-NEXT: vmovd %eax, %xmm7 +; ALL-NEXT: vcvtph2ps %xmm7, %xmm7 +; ALL-NEXT: vinsertps {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[2,3] +; ALL-NEXT: vinsertps {{.*#+}} xmm5 = xmm6[0,1],xmm5[0],xmm6[3] +; ALL-NEXT: vinsertps {{.*#+}} xmm4 = xmm5[0,1,2],xmm4[0] +; ALL-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[2,3] +; ALL-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0,1],xmm1[0],xmm2[3] +; ALL-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0] +; ALL-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm0 +; ALL-NEXT: retq %1 = load <8 x i16>, <8 x i16>* %a0 %2 = bitcast <8 x i16> %1 to <8 x half> %3 = fpext <8 x half> %2 to <8 x float> @@ -1378,65 +1019,65 @@ define <16 x float> @load_cvt_16i16_to_16f32(<16 x i16>* %a0) nounwind { ; AVX512F: # BB#0: ; AVX512F-NEXT: movswl 6(%rdi), %eax ; AVX512F-NEXT: vmovd %eax, %xmm0 -; AVX512F-NEXT: vcvtph2ps %ymm0, %zmm16 +; AVX512F-NEXT: vcvtph2ps %xmm0, %xmm8 ; AVX512F-NEXT: movswl 4(%rdi), %eax -; AVX512F-NEXT: vmovd %eax, %xmm1 -; AVX512F-NEXT: vcvtph2ps %ymm1, %zmm17 +; AVX512F-NEXT: vmovd %eax, %xmm0 +; AVX512F-NEXT: vcvtph2ps %xmm0, %xmm9 ; AVX512F-NEXT: movswl (%rdi), %eax -; AVX512F-NEXT: vmovd %eax, %xmm2 -; AVX512F-NEXT: vcvtph2ps %ymm2, %zmm2 +; AVX512F-NEXT: vmovd %eax, %xmm0 +; AVX512F-NEXT: vcvtph2ps %xmm0, %xmm10 ; AVX512F-NEXT: movswl 2(%rdi), %eax -; AVX512F-NEXT: vmovd %eax, %xmm3 -; AVX512F-NEXT: vcvtph2ps %ymm3, %zmm3 +; AVX512F-NEXT: vmovd %eax, %xmm0 +; AVX512F-NEXT: vcvtph2ps %xmm0, %xmm11 ; AVX512F-NEXT: movswl 14(%rdi), %eax -; AVX512F-NEXT: vmovd %eax, %xmm4 -; AVX512F-NEXT: vcvtph2ps %ymm4, %zmm4 +; AVX512F-NEXT: vmovd %eax, %xmm0 +; AVX512F-NEXT: vcvtph2ps %xmm0, %xmm12 ; AVX512F-NEXT: movswl 12(%rdi), %eax -; AVX512F-NEXT: vmovd %eax, %xmm5 -; AVX512F-NEXT: vcvtph2ps %ymm5, %zmm5 +; AVX512F-NEXT: vmovd %eax, %xmm0 +; AVX512F-NEXT: vcvtph2ps %xmm0, %xmm13 ; AVX512F-NEXT: movswl 8(%rdi), %eax -; AVX512F-NEXT: vmovd %eax, %xmm6 -; AVX512F-NEXT: vcvtph2ps %ymm6, %zmm6 +; AVX512F-NEXT: vmovd %eax, %xmm0 +; AVX512F-NEXT: vcvtph2ps %xmm0, %xmm14 ; AVX512F-NEXT: movswl 10(%rdi), %eax -; AVX512F-NEXT: vmovd %eax, %xmm7 -; AVX512F-NEXT: vcvtph2ps %ymm7, %zmm7 +; AVX512F-NEXT: vmovd %eax, %xmm0 +; AVX512F-NEXT: vcvtph2ps %xmm0, %xmm15 ; AVX512F-NEXT: movswl 22(%rdi), %eax -; AVX512F-NEXT: vmovd %eax, %xmm8 -; AVX512F-NEXT: vcvtph2ps %ymm8, %zmm8 +; AVX512F-NEXT: vmovd %eax, %xmm0 +; AVX512F-NEXT: vcvtph2ps %xmm0, %xmm0 ; AVX512F-NEXT: movswl 20(%rdi), %eax -; AVX512F-NEXT: vmovd %eax, %xmm9 -; AVX512F-NEXT: vcvtph2ps %ymm9, %zmm9 +; AVX512F-NEXT: vmovd %eax, %xmm1 +; AVX512F-NEXT: vcvtph2ps %xmm1, %xmm1 ; AVX512F-NEXT: movswl 16(%rdi), %eax -; AVX512F-NEXT: vmovd %eax, %xmm10 -; AVX512F-NEXT: vcvtph2ps %ymm10, %zmm10 +; AVX512F-NEXT: vmovd %eax, %xmm2 +; AVX512F-NEXT: vcvtph2ps %xmm2, %xmm2 ; AVX512F-NEXT: movswl 18(%rdi), %eax -; AVX512F-NEXT: vmovd %eax, %xmm11 -; AVX512F-NEXT: vcvtph2ps %ymm11, %zmm11 +; AVX512F-NEXT: vmovd %eax, %xmm3 +; AVX512F-NEXT: vcvtph2ps %xmm3, %xmm3 ; AVX512F-NEXT: movswl 30(%rdi), %eax -; AVX512F-NEXT: vmovd %eax, %xmm12 -; AVX512F-NEXT: vcvtph2ps %ymm12, %zmm12 +; AVX512F-NEXT: vmovd %eax, %xmm4 +; AVX512F-NEXT: vcvtph2ps %xmm4, %xmm4 ; AVX512F-NEXT: movswl 28(%rdi), %eax -; AVX512F-NEXT: vmovd %eax, %xmm13 -; AVX512F-NEXT: vcvtph2ps %ymm13, %zmm13 +; AVX512F-NEXT: vmovd %eax, %xmm5 +; AVX512F-NEXT: vcvtph2ps %xmm5, %xmm5 ; AVX512F-NEXT: movswl 24(%rdi), %eax -; AVX512F-NEXT: vmovd %eax, %xmm14 -; AVX512F-NEXT: vcvtph2ps %ymm14, %zmm14 +; AVX512F-NEXT: vmovd %eax, %xmm6 +; AVX512F-NEXT: vcvtph2ps %xmm6, %xmm6 ; AVX512F-NEXT: movswl 26(%rdi), %eax -; AVX512F-NEXT: vmovd %eax, %xmm15 -; AVX512F-NEXT: vcvtph2ps %ymm15, %zmm15 -; AVX512F-NEXT: vinsertps {{.*#+}} xmm0 = xmm14[0],xmm15[0],xmm14[2,3] -; AVX512F-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm13[0],xmm0[3] -; AVX512F-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm12[0] -; AVX512F-NEXT: vinsertps {{.*#+}} xmm1 = xmm10[0],xmm11[0],xmm10[2,3] -; AVX512F-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm9[0],xmm1[3] -; AVX512F-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm8[0] -; AVX512F-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 -; AVX512F-NEXT: vinsertps {{.*#+}} xmm1 = xmm6[0],xmm7[0],xmm6[2,3] -; AVX512F-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm5[0],xmm1[3] -; AVX512F-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm4[0] +; AVX512F-NEXT: vmovd %eax, %xmm7 +; AVX512F-NEXT: vcvtph2ps %xmm7, %xmm7 +; AVX512F-NEXT: vinsertps {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[2,3] +; AVX512F-NEXT: vinsertps {{.*#+}} xmm5 = xmm6[0,1],xmm5[0],xmm6[3] +; AVX512F-NEXT: vinsertps {{.*#+}} xmm4 = xmm5[0,1,2],xmm4[0] ; AVX512F-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[2,3] -; AVX512F-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1],xmm17[0],xmm2[3] -; AVX512F-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm16[0] +; AVX512F-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0,1],xmm1[0],xmm2[3] +; AVX512F-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0] +; AVX512F-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm0 +; AVX512F-NEXT: vinsertps {{.*#+}} xmm1 = xmm14[0],xmm15[0],xmm14[2,3] +; AVX512F-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm13[0],xmm1[3] +; AVX512F-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm12[0] +; AVX512F-NEXT: vinsertps {{.*#+}} xmm2 = xmm10[0],xmm11[0],xmm10[2,3] +; AVX512F-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1],xmm9[0],xmm2[3] +; AVX512F-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm8[0] ; AVX512F-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1 ; AVX512F-NEXT: vinsertf64x4 $1, %ymm0, %zmm1, %zmm0 ; AVX512F-NEXT: retq @@ -1518,38 +1159,13 @@ define <16 x float> @load_cvt_16i16_to_16f32(<16 x i16>* %a0) nounwind { ; define double @cvt_i16_to_f64(i16 %a0) nounwind { -; AVX1-LABEL: cvt_i16_to_f64: -; AVX1: # BB#0: -; AVX1-NEXT: movswl %di, %eax -; AVX1-NEXT: vmovd %eax, %xmm0 -; AVX1-NEXT: vcvtph2ps %xmm0, %xmm0 -; AVX1-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0 -; AVX1-NEXT: retq -; -; AVX2-LABEL: cvt_i16_to_f64: -; AVX2: # BB#0: -; AVX2-NEXT: movswl %di, %eax -; AVX2-NEXT: vmovd %eax, %xmm0 -; AVX2-NEXT: vcvtph2ps %xmm0, %xmm0 -; AVX2-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0 -; AVX2-NEXT: retq -; -; AVX512F-LABEL: cvt_i16_to_f64: -; AVX512F: # BB#0: -; AVX512F-NEXT: movswl %di, %eax -; AVX512F-NEXT: vmovd %eax, %xmm0 -; AVX512F-NEXT: vcvtph2ps %ymm0, %zmm0 -; AVX512F-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0 -; AVX512F-NEXT: vzeroupper -; AVX512F-NEXT: retq -; -; AVX512VL-LABEL: cvt_i16_to_f64: -; AVX512VL: # BB#0: -; AVX512VL-NEXT: movswl %di, %eax -; AVX512VL-NEXT: vmovd %eax, %xmm0 -; AVX512VL-NEXT: vcvtph2ps %xmm0, %xmm0 -; AVX512VL-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0 -; AVX512VL-NEXT: retq +; ALL-LABEL: cvt_i16_to_f64: +; ALL: # BB#0: +; ALL-NEXT: movswl %di, %eax +; ALL-NEXT: vmovd %eax, %xmm0 +; ALL-NEXT: vcvtph2ps %xmm0, %xmm0 +; ALL-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0 +; ALL-NEXT: retq %1 = bitcast i16 %a0 to half %2 = fpext half %1 to double ret double %2 @@ -1599,13 +1215,12 @@ define <2 x double> @cvt_2i16_to_2f64(<2 x i16> %a0) nounwind { ; AVX512F-NEXT: shrl $16, %eax ; AVX512F-NEXT: cwtl ; AVX512F-NEXT: vmovd %eax, %xmm0 -; AVX512F-NEXT: vcvtph2ps %ymm0, %zmm0 +; AVX512F-NEXT: vcvtph2ps %xmm0, %xmm0 ; AVX512F-NEXT: vmovd %ecx, %xmm1 -; AVX512F-NEXT: vcvtph2ps %ymm1, %zmm1 +; AVX512F-NEXT: vcvtph2ps %xmm1, %xmm1 ; AVX512F-NEXT: vcvtss2sd %xmm1, %xmm1, %xmm1 ; AVX512F-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0 ; AVX512F-NEXT: vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0] -; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; ; AVX512VL-LABEL: cvt_2i16_to_2f64: @@ -1701,15 +1316,15 @@ define <4 x double> @cvt_4i16_to_4f64(<4 x i16> %a0) nounwind { ; AVX512F-NEXT: shrl $16, %edx ; AVX512F-NEXT: movswl %dx, %edx ; AVX512F-NEXT: vmovd %edx, %xmm0 -; AVX512F-NEXT: vcvtph2ps %ymm0, %zmm0 +; AVX512F-NEXT: vcvtph2ps %xmm0, %xmm0 ; AVX512F-NEXT: vmovd %esi, %xmm1 -; AVX512F-NEXT: vcvtph2ps %ymm1, %zmm1 +; AVX512F-NEXT: vcvtph2ps %xmm1, %xmm1 ; AVX512F-NEXT: movswl %cx, %ecx ; AVX512F-NEXT: vmovd %ecx, %xmm2 -; AVX512F-NEXT: vcvtph2ps %ymm2, %zmm2 +; AVX512F-NEXT: vcvtph2ps %xmm2, %xmm2 ; AVX512F-NEXT: cwtl ; AVX512F-NEXT: vmovd %eax, %xmm3 -; AVX512F-NEXT: vcvtph2ps %ymm3, %zmm3 +; AVX512F-NEXT: vcvtph2ps %xmm3, %xmm3 ; AVX512F-NEXT: vcvtss2sd %xmm3, %xmm3, %xmm3 ; AVX512F-NEXT: vcvtss2sd %xmm2, %xmm2, %xmm2 ; AVX512F-NEXT: vmovlhps {{.*#+}} xmm2 = xmm2[0],xmm3[0] @@ -1791,13 +1406,12 @@ define <2 x double> @cvt_8i16_to_2f64(<8 x i16> %a0) nounwind { ; AVX512F-NEXT: shrl $16, %eax ; AVX512F-NEXT: cwtl ; AVX512F-NEXT: vmovd %eax, %xmm0 -; AVX512F-NEXT: vcvtph2ps %ymm0, %zmm0 +; AVX512F-NEXT: vcvtph2ps %xmm0, %xmm0 ; AVX512F-NEXT: vmovd %ecx, %xmm1 -; AVX512F-NEXT: vcvtph2ps %ymm1, %zmm1 +; AVX512F-NEXT: vcvtph2ps %xmm1, %xmm1 ; AVX512F-NEXT: vcvtss2sd %xmm1, %xmm1, %xmm1 ; AVX512F-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0 ; AVX512F-NEXT: vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0] -; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; ; AVX512VL-LABEL: cvt_8i16_to_2f64: @@ -1892,15 +1506,15 @@ define <4 x double> @cvt_8i16_to_4f64(<8 x i16> %a0) nounwind { ; AVX512F-NEXT: shrl $16, %edx ; AVX512F-NEXT: movswl %dx, %edx ; AVX512F-NEXT: vmovd %edx, %xmm0 -; AVX512F-NEXT: vcvtph2ps %ymm0, %zmm0 +; AVX512F-NEXT: vcvtph2ps %xmm0, %xmm0 ; AVX512F-NEXT: vmovd %esi, %xmm1 -; AVX512F-NEXT: vcvtph2ps %ymm1, %zmm1 +; AVX512F-NEXT: vcvtph2ps %xmm1, %xmm1 ; AVX512F-NEXT: movswl %cx, %ecx ; AVX512F-NEXT: vmovd %ecx, %xmm2 -; AVX512F-NEXT: vcvtph2ps %ymm2, %zmm2 +; AVX512F-NEXT: vcvtph2ps %xmm2, %xmm2 ; AVX512F-NEXT: cwtl ; AVX512F-NEXT: vmovd %eax, %xmm3 -; AVX512F-NEXT: vcvtph2ps %ymm3, %zmm3 +; AVX512F-NEXT: vcvtph2ps %xmm3, %xmm3 ; AVX512F-NEXT: vcvtss2sd %xmm3, %xmm3, %xmm3 ; AVX512F-NEXT: vcvtss2sd %xmm2, %xmm2, %xmm2 ; AVX512F-NEXT: vmovlhps {{.*#+}} xmm2 = xmm2[0],xmm3[0] @@ -1950,25 +1564,25 @@ define <8 x double> @cvt_8i16_to_8f64(<8 x i16> %a0) nounwind { ; AVX1-LABEL: cvt_8i16_to_8f64: ; AVX1: # BB#0: ; AVX1-NEXT: vmovq %xmm0, %rdx -; AVX1-NEXT: movq %rdx, %r8 +; AVX1-NEXT: movq %rdx, %r9 ; AVX1-NEXT: movl %edx, %r10d -; AVX1-NEXT: movswl %dx, %r9d +; AVX1-NEXT: movswl %dx, %r8d ; AVX1-NEXT: shrq $48, %rdx -; AVX1-NEXT: shrq $32, %r8 +; AVX1-NEXT: shrq $32, %r9 ; AVX1-NEXT: shrl $16, %r10d ; AVX1-NEXT: vpextrq $1, %xmm0, %rdi -; AVX1-NEXT: movq %rdi, %rax -; AVX1-NEXT: movl %edi, %esi +; AVX1-NEXT: movq %rdi, %rsi +; AVX1-NEXT: movl %edi, %eax ; AVX1-NEXT: movswl %di, %ecx ; AVX1-NEXT: shrq $48, %rdi -; AVX1-NEXT: shrq $32, %rax -; AVX1-NEXT: shrl $16, %esi -; AVX1-NEXT: movswl %si, %esi -; AVX1-NEXT: vmovd %esi, %xmm0 +; AVX1-NEXT: shrq $32, %rsi +; AVX1-NEXT: shrl $16, %eax +; AVX1-NEXT: cwtl +; AVX1-NEXT: vmovd %eax, %xmm0 ; AVX1-NEXT: vcvtph2ps %xmm0, %xmm1 ; AVX1-NEXT: vmovd %ecx, %xmm0 ; AVX1-NEXT: vcvtph2ps %xmm0, %xmm2 -; AVX1-NEXT: cwtl +; AVX1-NEXT: movswl %si, %eax ; AVX1-NEXT: vmovd %eax, %xmm0 ; AVX1-NEXT: vcvtph2ps %xmm0, %xmm3 ; AVX1-NEXT: movswl %di, %eax @@ -1977,9 +1591,9 @@ define <8 x double> @cvt_8i16_to_8f64(<8 x i16> %a0) nounwind { ; AVX1-NEXT: movswl %r10w, %eax ; AVX1-NEXT: vmovd %eax, %xmm0 ; AVX1-NEXT: vcvtph2ps %xmm0, %xmm0 -; AVX1-NEXT: vmovd %r9d, %xmm5 +; AVX1-NEXT: vmovd %r8d, %xmm5 ; AVX1-NEXT: vcvtph2ps %xmm5, %xmm5 -; AVX1-NEXT: movswl %r8w, %eax +; AVX1-NEXT: movswl %r9w, %eax ; AVX1-NEXT: vmovd %eax, %xmm6 ; AVX1-NEXT: vcvtph2ps %xmm6, %xmm6 ; AVX1-NEXT: movswl %dx, %eax @@ -2004,25 +1618,25 @@ define <8 x double> @cvt_8i16_to_8f64(<8 x i16> %a0) nounwind { ; AVX2-LABEL: cvt_8i16_to_8f64: ; AVX2: # BB#0: ; AVX2-NEXT: vmovq %xmm0, %rdx -; AVX2-NEXT: movq %rdx, %r8 +; AVX2-NEXT: movq %rdx, %r9 ; AVX2-NEXT: movl %edx, %r10d -; AVX2-NEXT: movswl %dx, %r9d +; AVX2-NEXT: movswl %dx, %r8d ; AVX2-NEXT: shrq $48, %rdx -; AVX2-NEXT: shrq $32, %r8 +; AVX2-NEXT: shrq $32, %r9 ; AVX2-NEXT: shrl $16, %r10d ; AVX2-NEXT: vpextrq $1, %xmm0, %rdi -; AVX2-NEXT: movq %rdi, %rax -; AVX2-NEXT: movl %edi, %esi +; AVX2-NEXT: movq %rdi, %rsi +; AVX2-NEXT: movl %edi, %eax ; AVX2-NEXT: movswl %di, %ecx ; AVX2-NEXT: shrq $48, %rdi -; AVX2-NEXT: shrq $32, %rax -; AVX2-NEXT: shrl $16, %esi -; AVX2-NEXT: movswl %si, %esi -; AVX2-NEXT: vmovd %esi, %xmm0 +; AVX2-NEXT: shrq $32, %rsi +; AVX2-NEXT: shrl $16, %eax +; AVX2-NEXT: cwtl +; AVX2-NEXT: vmovd %eax, %xmm0 ; AVX2-NEXT: vcvtph2ps %xmm0, %xmm1 ; AVX2-NEXT: vmovd %ecx, %xmm0 ; AVX2-NEXT: vcvtph2ps %xmm0, %xmm2 -; AVX2-NEXT: cwtl +; AVX2-NEXT: movswl %si, %eax ; AVX2-NEXT: vmovd %eax, %xmm0 ; AVX2-NEXT: vcvtph2ps %xmm0, %xmm3 ; AVX2-NEXT: movswl %di, %eax @@ -2031,9 +1645,9 @@ define <8 x double> @cvt_8i16_to_8f64(<8 x i16> %a0) nounwind { ; AVX2-NEXT: movswl %r10w, %eax ; AVX2-NEXT: vmovd %eax, %xmm0 ; AVX2-NEXT: vcvtph2ps %xmm0, %xmm0 -; AVX2-NEXT: vmovd %r9d, %xmm5 +; AVX2-NEXT: vmovd %r8d, %xmm5 ; AVX2-NEXT: vcvtph2ps %xmm5, %xmm5 -; AVX2-NEXT: movswl %r8w, %eax +; AVX2-NEXT: movswl %r9w, %eax ; AVX2-NEXT: vmovd %eax, %xmm6 ; AVX2-NEXT: vcvtph2ps %xmm6, %xmm6 ; AVX2-NEXT: movswl %dx, %eax @@ -2055,115 +1669,60 @@ define <8 x double> @cvt_8i16_to_8f64(<8 x i16> %a0) nounwind { ; AVX2-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1 ; AVX2-NEXT: retq ; -; AVX512F-LABEL: cvt_8i16_to_8f64: -; AVX512F: # BB#0: -; AVX512F-NEXT: vpextrq $1, %xmm0, %rdx -; AVX512F-NEXT: movq %rdx, %r8 -; AVX512F-NEXT: movl %edx, %r9d -; AVX512F-NEXT: movswl %dx, %r10d -; AVX512F-NEXT: shrq $48, %rdx -; AVX512F-NEXT: shrq $32, %r8 -; AVX512F-NEXT: shrl $16, %r9d -; AVX512F-NEXT: vmovq %xmm0, %rdi -; AVX512F-NEXT: movq %rdi, %rax -; AVX512F-NEXT: movl %edi, %ecx -; AVX512F-NEXT: movswl %di, %esi -; AVX512F-NEXT: shrq $48, %rdi -; AVX512F-NEXT: shrq $32, %rax -; AVX512F-NEXT: shrl $16, %ecx -; AVX512F-NEXT: movswl %cx, %ecx -; AVX512F-NEXT: vmovd %ecx, %xmm0 -; AVX512F-NEXT: vcvtph2ps %ymm0, %zmm0 -; AVX512F-NEXT: vmovd %esi, %xmm1 -; AVX512F-NEXT: vcvtph2ps %ymm1, %zmm1 -; AVX512F-NEXT: cwtl -; AVX512F-NEXT: vmovd %eax, %xmm2 -; AVX512F-NEXT: vcvtph2ps %ymm2, %zmm2 -; AVX512F-NEXT: movswl %di, %eax -; AVX512F-NEXT: vmovd %eax, %xmm3 -; AVX512F-NEXT: vcvtph2ps %ymm3, %zmm3 -; AVX512F-NEXT: movswl %r9w, %eax -; AVX512F-NEXT: vmovd %eax, %xmm4 -; AVX512F-NEXT: vcvtph2ps %ymm4, %zmm4 -; AVX512F-NEXT: vmovd %r10d, %xmm5 -; AVX512F-NEXT: vcvtph2ps %ymm5, %zmm5 -; AVX512F-NEXT: movswl %r8w, %eax -; AVX512F-NEXT: vmovd %eax, %xmm6 -; AVX512F-NEXT: vcvtph2ps %ymm6, %zmm6 -; AVX512F-NEXT: movswl %dx, %eax -; AVX512F-NEXT: vmovd %eax, %xmm7 -; AVX512F-NEXT: vcvtph2ps %ymm7, %zmm7 -; AVX512F-NEXT: vcvtss2sd %xmm7, %xmm7, %xmm7 -; AVX512F-NEXT: vcvtss2sd %xmm6, %xmm6, %xmm6 -; AVX512F-NEXT: vmovlhps {{.*#+}} xmm6 = xmm6[0],xmm7[0] -; AVX512F-NEXT: vcvtss2sd %xmm5, %xmm5, %xmm5 -; AVX512F-NEXT: vcvtss2sd %xmm4, %xmm4, %xmm4 -; AVX512F-NEXT: vmovlhps {{.*#+}} xmm4 = xmm5[0],xmm4[0] -; AVX512F-NEXT: vinsertf128 $1, %xmm6, %ymm4, %ymm4 -; AVX512F-NEXT: vcvtss2sd %xmm3, %xmm3, %xmm3 -; AVX512F-NEXT: vcvtss2sd %xmm2, %xmm2, %xmm2 -; AVX512F-NEXT: vmovlhps {{.*#+}} xmm2 = xmm2[0],xmm3[0] -; AVX512F-NEXT: vcvtss2sd %xmm1, %xmm1, %xmm1 -; AVX512F-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0 -; AVX512F-NEXT: vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0] -; AVX512F-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 -; AVX512F-NEXT: vinsertf64x4 $1, %ymm4, %zmm0, %zmm0 -; AVX512F-NEXT: retq -; -; AVX512VL-LABEL: cvt_8i16_to_8f64: -; AVX512VL: # BB#0: -; AVX512VL-NEXT: vpextrq $1, %xmm0, %rdx -; AVX512VL-NEXT: movq %rdx, %r8 -; AVX512VL-NEXT: movl %edx, %r10d -; AVX512VL-NEXT: movswl %dx, %r9d -; AVX512VL-NEXT: shrq $48, %rdx -; AVX512VL-NEXT: shrq $32, %r8 -; AVX512VL-NEXT: shrl $16, %r10d -; AVX512VL-NEXT: vmovq %xmm0, %rdi -; AVX512VL-NEXT: movq %rdi, %rax -; AVX512VL-NEXT: movl %edi, %esi -; AVX512VL-NEXT: movswl %di, %ecx -; AVX512VL-NEXT: shrq $48, %rdi -; AVX512VL-NEXT: shrq $32, %rax -; AVX512VL-NEXT: shrl $16, %esi -; AVX512VL-NEXT: movswl %si, %esi -; AVX512VL-NEXT: vmovd %esi, %xmm0 -; AVX512VL-NEXT: vcvtph2ps %xmm0, %xmm0 -; AVX512VL-NEXT: vmovd %ecx, %xmm1 -; AVX512VL-NEXT: vcvtph2ps %xmm1, %xmm1 -; AVX512VL-NEXT: cwtl -; AVX512VL-NEXT: vmovd %eax, %xmm2 -; AVX512VL-NEXT: vcvtph2ps %xmm2, %xmm2 -; AVX512VL-NEXT: movswl %di, %eax -; AVX512VL-NEXT: vmovd %eax, %xmm3 -; AVX512VL-NEXT: vcvtph2ps %xmm3, %xmm3 -; AVX512VL-NEXT: movswl %r10w, %eax -; AVX512VL-NEXT: vmovd %eax, %xmm4 -; AVX512VL-NEXT: vcvtph2ps %xmm4, %xmm4 -; AVX512VL-NEXT: vmovd %r9d, %xmm5 -; AVX512VL-NEXT: vcvtph2ps %xmm5, %xmm5 -; AVX512VL-NEXT: movswl %r8w, %eax -; AVX512VL-NEXT: vmovd %eax, %xmm6 -; AVX512VL-NEXT: vcvtph2ps %xmm6, %xmm6 -; AVX512VL-NEXT: movswl %dx, %eax -; AVX512VL-NEXT: vmovd %eax, %xmm7 -; AVX512VL-NEXT: vcvtph2ps %xmm7, %xmm7 -; AVX512VL-NEXT: vcvtss2sd %xmm7, %xmm7, %xmm7 -; AVX512VL-NEXT: vcvtss2sd %xmm6, %xmm6, %xmm6 -; AVX512VL-NEXT: vmovlhps {{.*#+}} xmm6 = xmm6[0],xmm7[0] -; AVX512VL-NEXT: vcvtss2sd %xmm5, %xmm5, %xmm5 -; AVX512VL-NEXT: vcvtss2sd %xmm4, %xmm4, %xmm4 -; AVX512VL-NEXT: vmovlhps {{.*#+}} xmm4 = xmm5[0],xmm4[0] -; AVX512VL-NEXT: vinsertf128 $1, %xmm6, %ymm4, %ymm4 -; AVX512VL-NEXT: vcvtss2sd %xmm3, %xmm3, %xmm3 -; AVX512VL-NEXT: vcvtss2sd %xmm2, %xmm2, %xmm2 -; AVX512VL-NEXT: vmovlhps {{.*#+}} xmm2 = xmm2[0],xmm3[0] -; AVX512VL-NEXT: vcvtss2sd %xmm1, %xmm1, %xmm1 -; AVX512VL-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0 -; AVX512VL-NEXT: vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0] -; AVX512VL-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 -; AVX512VL-NEXT: vinsertf64x4 $1, %ymm4, %zmm0, %zmm0 -; AVX512VL-NEXT: retq +; AVX512-LABEL: cvt_8i16_to_8f64: +; AVX512: # BB#0: +; AVX512-NEXT: vpextrq $1, %xmm0, %rdx +; AVX512-NEXT: movq %rdx, %r9 +; AVX512-NEXT: movl %edx, %r10d +; AVX512-NEXT: movswl %dx, %r8d +; AVX512-NEXT: shrq $48, %rdx +; AVX512-NEXT: shrq $32, %r9 +; AVX512-NEXT: shrl $16, %r10d +; AVX512-NEXT: vmovq %xmm0, %rdi +; AVX512-NEXT: movq %rdi, %rsi +; AVX512-NEXT: movl %edi, %eax +; AVX512-NEXT: movswl %di, %ecx +; AVX512-NEXT: shrq $48, %rdi +; AVX512-NEXT: shrq $32, %rsi +; AVX512-NEXT: shrl $16, %eax +; AVX512-NEXT: cwtl +; AVX512-NEXT: vmovd %eax, %xmm0 +; AVX512-NEXT: vcvtph2ps %xmm0, %xmm0 +; AVX512-NEXT: vmovd %ecx, %xmm1 +; AVX512-NEXT: vcvtph2ps %xmm1, %xmm1 +; AVX512-NEXT: movswl %si, %eax +; AVX512-NEXT: vmovd %eax, %xmm2 +; AVX512-NEXT: vcvtph2ps %xmm2, %xmm2 +; AVX512-NEXT: movswl %di, %eax +; AVX512-NEXT: vmovd %eax, %xmm3 +; AVX512-NEXT: vcvtph2ps %xmm3, %xmm3 +; AVX512-NEXT: movswl %r10w, %eax +; AVX512-NEXT: vmovd %eax, %xmm4 +; AVX512-NEXT: vcvtph2ps %xmm4, %xmm4 +; AVX512-NEXT: vmovd %r8d, %xmm5 +; AVX512-NEXT: vcvtph2ps %xmm5, %xmm5 +; AVX512-NEXT: movswl %r9w, %eax +; AVX512-NEXT: vmovd %eax, %xmm6 +; AVX512-NEXT: vcvtph2ps %xmm6, %xmm6 +; AVX512-NEXT: movswl %dx, %eax +; AVX512-NEXT: vmovd %eax, %xmm7 +; AVX512-NEXT: vcvtph2ps %xmm7, %xmm7 +; AVX512-NEXT: vcvtss2sd %xmm7, %xmm7, %xmm7 +; AVX512-NEXT: vcvtss2sd %xmm6, %xmm6, %xmm6 +; AVX512-NEXT: vmovlhps {{.*#+}} xmm6 = xmm6[0],xmm7[0] +; AVX512-NEXT: vcvtss2sd %xmm5, %xmm5, %xmm5 +; AVX512-NEXT: vcvtss2sd %xmm4, %xmm4, %xmm4 +; AVX512-NEXT: vmovlhps {{.*#+}} xmm4 = xmm5[0],xmm4[0] +; AVX512-NEXT: vinsertf128 $1, %xmm6, %ymm4, %ymm4 +; AVX512-NEXT: vcvtss2sd %xmm3, %xmm3, %xmm3 +; AVX512-NEXT: vcvtss2sd %xmm2, %xmm2, %xmm2 +; AVX512-NEXT: vmovlhps {{.*#+}} xmm2 = xmm2[0],xmm3[0] +; AVX512-NEXT: vcvtss2sd %xmm1, %xmm1, %xmm1 +; AVX512-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0 +; AVX512-NEXT: vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0] +; AVX512-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 +; AVX512-NEXT: vinsertf64x4 $1, %ymm4, %zmm0, %zmm0 +; AVX512-NEXT: retq %1 = bitcast <8 x i16> %a0 to <8 x half> %2 = fpext <8 x half> %1 to <8 x double> ret <8 x double> %2 @@ -2174,38 +1733,13 @@ define <8 x double> @cvt_8i16_to_8f64(<8 x i16> %a0) nounwind { ; define double @load_cvt_i16_to_f64(i16* %a0) nounwind { -; AVX1-LABEL: load_cvt_i16_to_f64: -; AVX1: # BB#0: -; AVX1-NEXT: movswl (%rdi), %eax -; AVX1-NEXT: vmovd %eax, %xmm0 -; AVX1-NEXT: vcvtph2ps %xmm0, %xmm0 -; AVX1-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0 -; AVX1-NEXT: retq -; -; AVX2-LABEL: load_cvt_i16_to_f64: -; AVX2: # BB#0: -; AVX2-NEXT: movswl (%rdi), %eax -; AVX2-NEXT: vmovd %eax, %xmm0 -; AVX2-NEXT: vcvtph2ps %xmm0, %xmm0 -; AVX2-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0 -; AVX2-NEXT: retq -; -; AVX512F-LABEL: load_cvt_i16_to_f64: -; AVX512F: # BB#0: -; AVX512F-NEXT: movswl (%rdi), %eax -; AVX512F-NEXT: vmovd %eax, %xmm0 -; AVX512F-NEXT: vcvtph2ps %ymm0, %zmm0 -; AVX512F-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0 -; AVX512F-NEXT: vzeroupper -; AVX512F-NEXT: retq -; -; AVX512VL-LABEL: load_cvt_i16_to_f64: -; AVX512VL: # BB#0: -; AVX512VL-NEXT: movswl (%rdi), %eax -; AVX512VL-NEXT: vmovd %eax, %xmm0 -; AVX512VL-NEXT: vcvtph2ps %xmm0, %xmm0 -; AVX512VL-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0 -; AVX512VL-NEXT: retq +; ALL-LABEL: load_cvt_i16_to_f64: +; ALL: # BB#0: +; ALL-NEXT: movswl (%rdi), %eax +; ALL-NEXT: vmovd %eax, %xmm0 +; ALL-NEXT: vcvtph2ps %xmm0, %xmm0 +; ALL-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0 +; ALL-NEXT: retq %1 = load i16, i16* %a0 %2 = bitcast i16 %1 to half %3 = fpext half %2 to double @@ -2213,58 +1747,18 @@ define double @load_cvt_i16_to_f64(i16* %a0) nounwind { } define <2 x double> @load_cvt_2i16_to_2f64(<2 x i16>* %a0) nounwind { -; AVX1-LABEL: load_cvt_2i16_to_2f64: -; AVX1: # BB#0: -; AVX1-NEXT: movswl (%rdi), %eax -; AVX1-NEXT: vmovd %eax, %xmm0 -; AVX1-NEXT: vcvtph2ps %xmm0, %xmm0 -; AVX1-NEXT: movswl 2(%rdi), %eax -; AVX1-NEXT: vmovd %eax, %xmm1 -; AVX1-NEXT: vcvtph2ps %xmm1, %xmm1 -; AVX1-NEXT: vcvtss2sd %xmm1, %xmm1, %xmm1 -; AVX1-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0 -; AVX1-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0] -; AVX1-NEXT: retq -; -; AVX2-LABEL: load_cvt_2i16_to_2f64: -; AVX2: # BB#0: -; AVX2-NEXT: movswl (%rdi), %eax -; AVX2-NEXT: vmovd %eax, %xmm0 -; AVX2-NEXT: vcvtph2ps %xmm0, %xmm0 -; AVX2-NEXT: movswl 2(%rdi), %eax -; AVX2-NEXT: vmovd %eax, %xmm1 -; AVX2-NEXT: vcvtph2ps %xmm1, %xmm1 -; AVX2-NEXT: vcvtss2sd %xmm1, %xmm1, %xmm1 -; AVX2-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0 -; AVX2-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0] -; AVX2-NEXT: retq -; -; AVX512F-LABEL: load_cvt_2i16_to_2f64: -; AVX512F: # BB#0: -; AVX512F-NEXT: movswl (%rdi), %eax -; AVX512F-NEXT: vmovd %eax, %xmm0 -; AVX512F-NEXT: vcvtph2ps %ymm0, %zmm0 -; AVX512F-NEXT: movswl 2(%rdi), %eax -; AVX512F-NEXT: vmovd %eax, %xmm1 -; AVX512F-NEXT: vcvtph2ps %ymm1, %zmm1 -; AVX512F-NEXT: vcvtss2sd %xmm1, %xmm1, %xmm1 -; AVX512F-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0 -; AVX512F-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0] -; AVX512F-NEXT: vzeroupper -; AVX512F-NEXT: retq -; -; AVX512VL-LABEL: load_cvt_2i16_to_2f64: -; AVX512VL: # BB#0: -; AVX512VL-NEXT: movswl (%rdi), %eax -; AVX512VL-NEXT: vmovd %eax, %xmm0 -; AVX512VL-NEXT: vcvtph2ps %xmm0, %xmm0 -; AVX512VL-NEXT: movswl 2(%rdi), %eax -; AVX512VL-NEXT: vmovd %eax, %xmm1 -; AVX512VL-NEXT: vcvtph2ps %xmm1, %xmm1 -; AVX512VL-NEXT: vcvtss2sd %xmm1, %xmm1, %xmm1 -; AVX512VL-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0 -; AVX512VL-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0] -; AVX512VL-NEXT: retq +; ALL-LABEL: load_cvt_2i16_to_2f64: +; ALL: # BB#0: +; ALL-NEXT: movswl (%rdi), %eax +; ALL-NEXT: vmovd %eax, %xmm0 +; ALL-NEXT: vcvtph2ps %xmm0, %xmm0 +; ALL-NEXT: movswl 2(%rdi), %eax +; ALL-NEXT: vmovd %eax, %xmm1 +; ALL-NEXT: vcvtph2ps %xmm1, %xmm1 +; ALL-NEXT: vcvtss2sd %xmm1, %xmm1, %xmm1 +; ALL-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0 +; ALL-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; ALL-NEXT: retq %1 = load <2 x i16>, <2 x i16>* %a0 %2 = bitcast <2 x i16> %1 to <2 x half> %3 = fpext <2 x half> %2 to <2 x double> @@ -2272,97 +1766,28 @@ define <2 x double> @load_cvt_2i16_to_2f64(<2 x i16>* %a0) nounwind { } define <4 x double> @load_cvt_4i16_to_4f64(<4 x i16>* %a0) nounwind { -; AVX1-LABEL: load_cvt_4i16_to_4f64: -; AVX1: # BB#0: -; AVX1-NEXT: movswl (%rdi), %eax -; AVX1-NEXT: vmovd %eax, %xmm0 -; AVX1-NEXT: vcvtph2ps %xmm0, %xmm0 -; AVX1-NEXT: movswl 2(%rdi), %eax -; AVX1-NEXT: vmovd %eax, %xmm1 -; AVX1-NEXT: vcvtph2ps %xmm1, %xmm1 -; AVX1-NEXT: movswl 4(%rdi), %eax -; AVX1-NEXT: vmovd %eax, %xmm2 -; AVX1-NEXT: vcvtph2ps %xmm2, %xmm2 -; AVX1-NEXT: movswl 6(%rdi), %eax -; AVX1-NEXT: vmovd %eax, %xmm3 -; AVX1-NEXT: vcvtph2ps %xmm3, %xmm3 -; AVX1-NEXT: vcvtss2sd %xmm3, %xmm3, %xmm3 -; AVX1-NEXT: vcvtss2sd %xmm2, %xmm2, %xmm2 -; AVX1-NEXT: vmovlhps {{.*#+}} xmm2 = xmm2[0],xmm3[0] -; AVX1-NEXT: vcvtss2sd %xmm1, %xmm1, %xmm1 -; AVX1-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0 -; AVX1-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0] -; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 -; AVX1-NEXT: retq -; -; AVX2-LABEL: load_cvt_4i16_to_4f64: -; AVX2: # BB#0: -; AVX2-NEXT: movswl (%rdi), %eax -; AVX2-NEXT: vmovd %eax, %xmm0 -; AVX2-NEXT: vcvtph2ps %xmm0, %xmm0 -; AVX2-NEXT: movswl 2(%rdi), %eax -; AVX2-NEXT: vmovd %eax, %xmm1 -; AVX2-NEXT: vcvtph2ps %xmm1, %xmm1 -; AVX2-NEXT: movswl 4(%rdi), %eax -; AVX2-NEXT: vmovd %eax, %xmm2 -; AVX2-NEXT: vcvtph2ps %xmm2, %xmm2 -; AVX2-NEXT: movswl 6(%rdi), %eax -; AVX2-NEXT: vmovd %eax, %xmm3 -; AVX2-NEXT: vcvtph2ps %xmm3, %xmm3 -; AVX2-NEXT: vcvtss2sd %xmm3, %xmm3, %xmm3 -; AVX2-NEXT: vcvtss2sd %xmm2, %xmm2, %xmm2 -; AVX2-NEXT: vmovlhps {{.*#+}} xmm2 = xmm2[0],xmm3[0] -; AVX2-NEXT: vcvtss2sd %xmm1, %xmm1, %xmm1 -; AVX2-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0 -; AVX2-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0] -; AVX2-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 -; AVX2-NEXT: retq -; -; AVX512F-LABEL: load_cvt_4i16_to_4f64: -; AVX512F: # BB#0: -; AVX512F-NEXT: movswl (%rdi), %eax -; AVX512F-NEXT: vmovd %eax, %xmm0 -; AVX512F-NEXT: vcvtph2ps %ymm0, %zmm0 -; AVX512F-NEXT: movswl 2(%rdi), %eax -; AVX512F-NEXT: vmovd %eax, %xmm1 -; AVX512F-NEXT: vcvtph2ps %ymm1, %zmm1 -; AVX512F-NEXT: movswl 4(%rdi), %eax -; AVX512F-NEXT: vmovd %eax, %xmm2 -; AVX512F-NEXT: vcvtph2ps %ymm2, %zmm2 -; AVX512F-NEXT: movswl 6(%rdi), %eax -; AVX512F-NEXT: vmovd %eax, %xmm3 -; AVX512F-NEXT: vcvtph2ps %ymm3, %zmm3 -; AVX512F-NEXT: vcvtss2sd %xmm3, %xmm3, %xmm3 -; AVX512F-NEXT: vcvtss2sd %xmm2, %xmm2, %xmm2 -; AVX512F-NEXT: vmovlhps {{.*#+}} xmm2 = xmm2[0],xmm3[0] -; AVX512F-NEXT: vcvtss2sd %xmm1, %xmm1, %xmm1 -; AVX512F-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0 -; AVX512F-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0] -; AVX512F-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 -; AVX512F-NEXT: retq -; -; AVX512VL-LABEL: load_cvt_4i16_to_4f64: -; AVX512VL: # BB#0: -; AVX512VL-NEXT: movswl (%rdi), %eax -; AVX512VL-NEXT: vmovd %eax, %xmm0 -; AVX512VL-NEXT: vcvtph2ps %xmm0, %xmm0 -; AVX512VL-NEXT: movswl 2(%rdi), %eax -; AVX512VL-NEXT: vmovd %eax, %xmm1 -; AVX512VL-NEXT: vcvtph2ps %xmm1, %xmm1 -; AVX512VL-NEXT: movswl 4(%rdi), %eax -; AVX512VL-NEXT: vmovd %eax, %xmm2 -; AVX512VL-NEXT: vcvtph2ps %xmm2, %xmm2 -; AVX512VL-NEXT: movswl 6(%rdi), %eax -; AVX512VL-NEXT: vmovd %eax, %xmm3 -; AVX512VL-NEXT: vcvtph2ps %xmm3, %xmm3 -; AVX512VL-NEXT: vcvtss2sd %xmm3, %xmm3, %xmm3 -; AVX512VL-NEXT: vcvtss2sd %xmm2, %xmm2, %xmm2 -; AVX512VL-NEXT: vmovlhps {{.*#+}} xmm2 = xmm2[0],xmm3[0] -; AVX512VL-NEXT: vcvtss2sd %xmm1, %xmm1, %xmm1 -; AVX512VL-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0 -; AVX512VL-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0] -; AVX512VL-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 -; AVX512VL-NEXT: retq +; ALL-LABEL: load_cvt_4i16_to_4f64: +; ALL: # BB#0: +; ALL-NEXT: movswl (%rdi), %eax +; ALL-NEXT: vmovd %eax, %xmm0 +; ALL-NEXT: vcvtph2ps %xmm0, %xmm0 +; ALL-NEXT: movswl 2(%rdi), %eax +; ALL-NEXT: vmovd %eax, %xmm1 +; ALL-NEXT: vcvtph2ps %xmm1, %xmm1 +; ALL-NEXT: movswl 4(%rdi), %eax +; ALL-NEXT: vmovd %eax, %xmm2 +; ALL-NEXT: vcvtph2ps %xmm2, %xmm2 +; ALL-NEXT: movswl 6(%rdi), %eax +; ALL-NEXT: vmovd %eax, %xmm3 +; ALL-NEXT: vcvtph2ps %xmm3, %xmm3 +; ALL-NEXT: vcvtss2sd %xmm3, %xmm3, %xmm3 +; ALL-NEXT: vcvtss2sd %xmm2, %xmm2, %xmm2 +; ALL-NEXT: vmovlhps {{.*#+}} xmm2 = xmm2[0],xmm3[0] +; ALL-NEXT: vcvtss2sd %xmm1, %xmm1, %xmm1 +; ALL-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0 +; ALL-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; ALL-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 +; ALL-NEXT: retq %1 = load <4 x i16>, <4 x i16>* %a0 %2 = bitcast <4 x i16> %1 to <4 x half> %3 = fpext <4 x half> %2 to <4 x double> @@ -2439,15 +1864,15 @@ define <4 x double> @load_cvt_8i16_to_4f64(<8 x i16>* %a0) nounwind { ; AVX512F-NEXT: shrl $16, %edx ; AVX512F-NEXT: movswl %dx, %edx ; AVX512F-NEXT: vmovd %edx, %xmm0 -; AVX512F-NEXT: vcvtph2ps %ymm0, %zmm0 +; AVX512F-NEXT: vcvtph2ps %xmm0, %xmm0 ; AVX512F-NEXT: vmovd %esi, %xmm1 -; AVX512F-NEXT: vcvtph2ps %ymm1, %zmm1 +; AVX512F-NEXT: vcvtph2ps %xmm1, %xmm1 ; AVX512F-NEXT: movswl %cx, %ecx ; AVX512F-NEXT: vmovd %ecx, %xmm2 -; AVX512F-NEXT: vcvtph2ps %ymm2, %zmm2 +; AVX512F-NEXT: vcvtph2ps %xmm2, %xmm2 ; AVX512F-NEXT: cwtl ; AVX512F-NEXT: vmovd %eax, %xmm3 -; AVX512F-NEXT: vcvtph2ps %ymm3, %zmm3 +; AVX512F-NEXT: vcvtph2ps %xmm3, %xmm3 ; AVX512F-NEXT: vcvtss2sd %xmm3, %xmm3, %xmm3 ; AVX512F-NEXT: vcvtss2sd %xmm2, %xmm2, %xmm2 ; AVX512F-NEXT: vmovlhps {{.*#+}} xmm2 = xmm2[0],xmm3[0] @@ -2579,91 +2004,48 @@ define <8 x double> @load_cvt_8i16_to_8f64(<8 x i16>* %a0) nounwind { ; AVX2-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1 ; AVX2-NEXT: retq ; -; AVX512F-LABEL: load_cvt_8i16_to_8f64: -; AVX512F: # BB#0: -; AVX512F-NEXT: movswl (%rdi), %eax -; AVX512F-NEXT: vmovd %eax, %xmm0 -; AVX512F-NEXT: vcvtph2ps %ymm0, %zmm0 -; AVX512F-NEXT: movswl 2(%rdi), %eax -; AVX512F-NEXT: vmovd %eax, %xmm1 -; AVX512F-NEXT: vcvtph2ps %ymm1, %zmm1 -; AVX512F-NEXT: movswl 4(%rdi), %eax -; AVX512F-NEXT: vmovd %eax, %xmm2 -; AVX512F-NEXT: vcvtph2ps %ymm2, %zmm2 -; AVX512F-NEXT: movswl 6(%rdi), %eax -; AVX512F-NEXT: vmovd %eax, %xmm3 -; AVX512F-NEXT: vcvtph2ps %ymm3, %zmm3 -; AVX512F-NEXT: movswl 8(%rdi), %eax -; AVX512F-NEXT: vmovd %eax, %xmm4 -; AVX512F-NEXT: vcvtph2ps %ymm4, %zmm4 -; AVX512F-NEXT: movswl 10(%rdi), %eax -; AVX512F-NEXT: vmovd %eax, %xmm5 -; AVX512F-NEXT: vcvtph2ps %ymm5, %zmm5 -; AVX512F-NEXT: movswl 12(%rdi), %eax -; AVX512F-NEXT: vmovd %eax, %xmm6 -; AVX512F-NEXT: vcvtph2ps %ymm6, %zmm6 -; AVX512F-NEXT: movswl 14(%rdi), %eax -; AVX512F-NEXT: vmovd %eax, %xmm7 -; AVX512F-NEXT: vcvtph2ps %ymm7, %zmm7 -; AVX512F-NEXT: vcvtss2sd %xmm7, %xmm7, %xmm7 -; AVX512F-NEXT: vcvtss2sd %xmm6, %xmm6, %xmm6 -; AVX512F-NEXT: vmovlhps {{.*#+}} xmm6 = xmm6[0],xmm7[0] -; AVX512F-NEXT: vcvtss2sd %xmm5, %xmm5, %xmm5 -; AVX512F-NEXT: vcvtss2sd %xmm4, %xmm4, %xmm4 -; AVX512F-NEXT: vmovlhps {{.*#+}} xmm4 = xmm4[0],xmm5[0] -; AVX512F-NEXT: vinsertf128 $1, %xmm6, %ymm4, %ymm4 -; AVX512F-NEXT: vcvtss2sd %xmm3, %xmm3, %xmm3 -; AVX512F-NEXT: vcvtss2sd %xmm2, %xmm2, %xmm2 -; AVX512F-NEXT: vmovlhps {{.*#+}} xmm2 = xmm2[0],xmm3[0] -; AVX512F-NEXT: vcvtss2sd %xmm1, %xmm1, %xmm1 -; AVX512F-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0 -; AVX512F-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0] -; AVX512F-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 -; AVX512F-NEXT: vinsertf64x4 $1, %ymm4, %zmm0, %zmm0 -; AVX512F-NEXT: retq -; -; AVX512VL-LABEL: load_cvt_8i16_to_8f64: -; AVX512VL: # BB#0: -; AVX512VL-NEXT: movswl (%rdi), %eax -; AVX512VL-NEXT: vmovd %eax, %xmm0 -; AVX512VL-NEXT: vcvtph2ps %xmm0, %xmm0 -; AVX512VL-NEXT: movswl 2(%rdi), %eax -; AVX512VL-NEXT: vmovd %eax, %xmm1 -; AVX512VL-NEXT: vcvtph2ps %xmm1, %xmm1 -; AVX512VL-NEXT: movswl 4(%rdi), %eax -; AVX512VL-NEXT: vmovd %eax, %xmm2 -; AVX512VL-NEXT: vcvtph2ps %xmm2, %xmm2 -; AVX512VL-NEXT: movswl 6(%rdi), %eax -; AVX512VL-NEXT: vmovd %eax, %xmm3 -; AVX512VL-NEXT: vcvtph2ps %xmm3, %xmm3 -; AVX512VL-NEXT: movswl 8(%rdi), %eax -; AVX512VL-NEXT: vmovd %eax, %xmm4 -; AVX512VL-NEXT: vcvtph2ps %xmm4, %xmm4 -; AVX512VL-NEXT: movswl 10(%rdi), %eax -; AVX512VL-NEXT: vmovd %eax, %xmm5 -; AVX512VL-NEXT: vcvtph2ps %xmm5, %xmm5 -; AVX512VL-NEXT: movswl 12(%rdi), %eax -; AVX512VL-NEXT: vmovd %eax, %xmm6 -; AVX512VL-NEXT: vcvtph2ps %xmm6, %xmm6 -; AVX512VL-NEXT: movswl 14(%rdi), %eax -; AVX512VL-NEXT: vmovd %eax, %xmm7 -; AVX512VL-NEXT: vcvtph2ps %xmm7, %xmm7 -; AVX512VL-NEXT: vcvtss2sd %xmm7, %xmm7, %xmm7 -; AVX512VL-NEXT: vcvtss2sd %xmm6, %xmm6, %xmm6 -; AVX512VL-NEXT: vmovlhps {{.*#+}} xmm6 = xmm6[0],xmm7[0] -; AVX512VL-NEXT: vcvtss2sd %xmm5, %xmm5, %xmm5 -; AVX512VL-NEXT: vcvtss2sd %xmm4, %xmm4, %xmm4 -; AVX512VL-NEXT: vmovlhps {{.*#+}} xmm4 = xmm4[0],xmm5[0] -; AVX512VL-NEXT: vinsertf128 $1, %xmm6, %ymm4, %ymm4 -; AVX512VL-NEXT: vcvtss2sd %xmm3, %xmm3, %xmm3 -; AVX512VL-NEXT: vcvtss2sd %xmm2, %xmm2, %xmm2 -; AVX512VL-NEXT: vmovlhps {{.*#+}} xmm2 = xmm2[0],xmm3[0] -; AVX512VL-NEXT: vcvtss2sd %xmm1, %xmm1, %xmm1 -; AVX512VL-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0 -; AVX512VL-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0] -; AVX512VL-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 -; AVX512VL-NEXT: vinsertf64x4 $1, %ymm4, %zmm0, %zmm0 -; AVX512VL-NEXT: retq +; AVX512-LABEL: load_cvt_8i16_to_8f64: +; AVX512: # BB#0: +; AVX512-NEXT: movswl (%rdi), %eax +; AVX512-NEXT: vmovd %eax, %xmm0 +; AVX512-NEXT: vcvtph2ps %xmm0, %xmm0 +; AVX512-NEXT: movswl 2(%rdi), %eax +; AVX512-NEXT: vmovd %eax, %xmm1 +; AVX512-NEXT: vcvtph2ps %xmm1, %xmm1 +; AVX512-NEXT: movswl 4(%rdi), %eax +; AVX512-NEXT: vmovd %eax, %xmm2 +; AVX512-NEXT: vcvtph2ps %xmm2, %xmm2 +; AVX512-NEXT: movswl 6(%rdi), %eax +; AVX512-NEXT: vmovd %eax, %xmm3 +; AVX512-NEXT: vcvtph2ps %xmm3, %xmm3 +; AVX512-NEXT: movswl 8(%rdi), %eax +; AVX512-NEXT: vmovd %eax, %xmm4 +; AVX512-NEXT: vcvtph2ps %xmm4, %xmm4 +; AVX512-NEXT: movswl 10(%rdi), %eax +; AVX512-NEXT: vmovd %eax, %xmm5 +; AVX512-NEXT: vcvtph2ps %xmm5, %xmm5 +; AVX512-NEXT: movswl 12(%rdi), %eax +; AVX512-NEXT: vmovd %eax, %xmm6 +; AVX512-NEXT: vcvtph2ps %xmm6, %xmm6 +; AVX512-NEXT: movswl 14(%rdi), %eax +; AVX512-NEXT: vmovd %eax, %xmm7 +; AVX512-NEXT: vcvtph2ps %xmm7, %xmm7 +; AVX512-NEXT: vcvtss2sd %xmm7, %xmm7, %xmm7 +; AVX512-NEXT: vcvtss2sd %xmm6, %xmm6, %xmm6 +; AVX512-NEXT: vmovlhps {{.*#+}} xmm6 = xmm6[0],xmm7[0] +; AVX512-NEXT: vcvtss2sd %xmm5, %xmm5, %xmm5 +; AVX512-NEXT: vcvtss2sd %xmm4, %xmm4, %xmm4 +; AVX512-NEXT: vmovlhps {{.*#+}} xmm4 = xmm4[0],xmm5[0] +; AVX512-NEXT: vinsertf128 $1, %xmm6, %ymm4, %ymm4 +; AVX512-NEXT: vcvtss2sd %xmm3, %xmm3, %xmm3 +; AVX512-NEXT: vcvtss2sd %xmm2, %xmm2, %xmm2 +; AVX512-NEXT: vmovlhps {{.*#+}} xmm2 = xmm2[0],xmm3[0] +; AVX512-NEXT: vcvtss2sd %xmm1, %xmm1, %xmm1 +; AVX512-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0 +; AVX512-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; AVX512-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 +; AVX512-NEXT: vinsertf64x4 $1, %ymm4, %zmm0, %zmm0 +; AVX512-NEXT: retq %1 = load <8 x i16>, <8 x i16>* %a0 %2 = bitcast <8 x i16> %1 to <8 x half> %3 = fpext <8 x half> %2 to <8 x double> @@ -2675,138 +2057,41 @@ define <8 x double> @load_cvt_8i16_to_8f64(<8 x i16>* %a0) nounwind { ; define i16 @cvt_f32_to_i16(float %a0) nounwind { -; AVX1-LABEL: cvt_f32_to_i16: -; AVX1: # BB#0: -; AVX1-NEXT: vcvtps2ph $4, %xmm0, %xmm0 -; AVX1-NEXT: vmovd %xmm0, %eax -; AVX1-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill> -; AVX1-NEXT: retq -; -; AVX2-LABEL: cvt_f32_to_i16: -; AVX2: # BB#0: -; AVX2-NEXT: vcvtps2ph $4, %xmm0, %xmm0 -; AVX2-NEXT: vmovd %xmm0, %eax -; AVX2-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill> -; AVX2-NEXT: retq -; -; AVX512F-LABEL: cvt_f32_to_i16: -; AVX512F: # BB#0: -; AVX512F-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def> -; AVX512F-NEXT: vcvtps2ph $4, %zmm0, %ymm0 -; AVX512F-NEXT: vmovd %xmm0, %eax -; AVX512F-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill> -; AVX512F-NEXT: vzeroupper -; AVX512F-NEXT: retq -; -; AVX512VL-LABEL: cvt_f32_to_i16: -; AVX512VL: # BB#0: -; AVX512VL-NEXT: vcvtps2ph $4, %xmm0, %xmm0 -; AVX512VL-NEXT: vmovd %xmm0, %eax -; AVX512VL-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill> -; AVX512VL-NEXT: retq +; ALL-LABEL: cvt_f32_to_i16: +; ALL: # BB#0: +; ALL-NEXT: vcvtps2ph $4, %xmm0, %xmm0 +; ALL-NEXT: vmovd %xmm0, %eax +; ALL-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill> +; ALL-NEXT: retq %1 = fptrunc float %a0 to half %2 = bitcast half %1 to i16 ret i16 %2 } define <4 x i16> @cvt_4f32_to_4i16(<4 x float> %a0) nounwind { -; AVX1-LABEL: cvt_4f32_to_4i16: -; AVX1: # BB#0: -; AVX1-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] -; AVX1-NEXT: vcvtps2ph $4, %xmm1, %xmm1 -; AVX1-NEXT: vmovd %xmm1, %eax -; AVX1-NEXT: shll $16, %eax -; AVX1-NEXT: vcvtps2ph $4, %xmm0, %xmm1 -; AVX1-NEXT: vmovd %xmm1, %ecx -; AVX1-NEXT: movzwl %cx, %ecx -; AVX1-NEXT: orl %eax, %ecx -; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3] -; AVX1-NEXT: vcvtps2ph $4, %xmm1, %xmm1 -; AVX1-NEXT: vmovd %xmm1, %eax -; AVX1-NEXT: shll $16, %eax -; AVX1-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] -; AVX1-NEXT: vcvtps2ph $4, %xmm0, %xmm0 -; AVX1-NEXT: vmovd %xmm0, %edx -; AVX1-NEXT: movzwl %dx, %edx -; AVX1-NEXT: orl %eax, %edx -; AVX1-NEXT: shlq $32, %rdx -; AVX1-NEXT: orq %rcx, %rdx -; AVX1-NEXT: vmovq %rdx, %xmm0 -; AVX1-NEXT: retq -; -; AVX2-LABEL: cvt_4f32_to_4i16: -; AVX2: # BB#0: -; AVX2-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] -; AVX2-NEXT: vcvtps2ph $4, %xmm1, %xmm1 -; AVX2-NEXT: vmovd %xmm1, %eax -; AVX2-NEXT: shll $16, %eax -; AVX2-NEXT: vcvtps2ph $4, %xmm0, %xmm1 -; AVX2-NEXT: vmovd %xmm1, %ecx -; AVX2-NEXT: movzwl %cx, %ecx -; AVX2-NEXT: orl %eax, %ecx -; AVX2-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3] -; AVX2-NEXT: vcvtps2ph $4, %xmm1, %xmm1 -; AVX2-NEXT: vmovd %xmm1, %eax -; AVX2-NEXT: shll $16, %eax -; AVX2-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] -; AVX2-NEXT: vcvtps2ph $4, %xmm0, %xmm0 -; AVX2-NEXT: vmovd %xmm0, %edx -; AVX2-NEXT: movzwl %dx, %edx -; AVX2-NEXT: orl %eax, %edx -; AVX2-NEXT: shlq $32, %rdx -; AVX2-NEXT: orq %rcx, %rdx -; AVX2-NEXT: vmovq %rdx, %xmm0 -; AVX2-NEXT: retq -; -; AVX512F-LABEL: cvt_4f32_to_4i16: -; AVX512F: # BB#0: -; AVX512F-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def> -; AVX512F-NEXT: vcvtps2ph $4, %zmm0, %ymm1 -; AVX512F-NEXT: vmovd %xmm1, %eax -; AVX512F-NEXT: movzwl %ax, %eax -; AVX512F-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] -; AVX512F-NEXT: vcvtps2ph $4, %zmm1, %ymm1 -; AVX512F-NEXT: vmovd %xmm1, %ecx -; AVX512F-NEXT: shll $16, %ecx -; AVX512F-NEXT: orl %eax, %ecx -; AVX512F-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] -; AVX512F-NEXT: vcvtps2ph $4, %zmm1, %ymm1 -; AVX512F-NEXT: vmovd %xmm1, %eax -; AVX512F-NEXT: movzwl %ax, %eax -; AVX512F-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3] -; AVX512F-NEXT: vcvtps2ph $4, %zmm0, %ymm0 -; AVX512F-NEXT: vmovd %xmm0, %edx -; AVX512F-NEXT: shll $16, %edx -; AVX512F-NEXT: orl %eax, %edx -; AVX512F-NEXT: shlq $32, %rdx -; AVX512F-NEXT: orq %rcx, %rdx -; AVX512F-NEXT: vmovq %rdx, %xmm0 -; AVX512F-NEXT: vzeroupper -; AVX512F-NEXT: retq -; -; AVX512VL-LABEL: cvt_4f32_to_4i16: -; AVX512VL: # BB#0: -; AVX512VL-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] -; AVX512VL-NEXT: vcvtps2ph $4, %xmm1, %xmm1 -; AVX512VL-NEXT: vmovd %xmm1, %eax -; AVX512VL-NEXT: shll $16, %eax -; AVX512VL-NEXT: vcvtps2ph $4, %xmm0, %xmm1 -; AVX512VL-NEXT: vmovd %xmm1, %ecx -; AVX512VL-NEXT: movzwl %cx, %ecx -; AVX512VL-NEXT: orl %eax, %ecx -; AVX512VL-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3] -; AVX512VL-NEXT: vcvtps2ph $4, %xmm1, %xmm1 -; AVX512VL-NEXT: vmovd %xmm1, %eax -; AVX512VL-NEXT: shll $16, %eax -; AVX512VL-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] -; AVX512VL-NEXT: vcvtps2ph $4, %xmm0, %xmm0 -; AVX512VL-NEXT: vmovd %xmm0, %edx -; AVX512VL-NEXT: movzwl %dx, %edx -; AVX512VL-NEXT: orl %eax, %edx -; AVX512VL-NEXT: shlq $32, %rdx -; AVX512VL-NEXT: orq %rcx, %rdx -; AVX512VL-NEXT: vmovq %rdx, %xmm0 -; AVX512VL-NEXT: retq +; ALL-LABEL: cvt_4f32_to_4i16: +; ALL: # BB#0: +; ALL-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] +; ALL-NEXT: vcvtps2ph $4, %xmm1, %xmm1 +; ALL-NEXT: vmovd %xmm1, %eax +; ALL-NEXT: shll $16, %eax +; ALL-NEXT: vcvtps2ph $4, %xmm0, %xmm1 +; ALL-NEXT: vmovd %xmm1, %ecx +; ALL-NEXT: movzwl %cx, %ecx +; ALL-NEXT: orl %eax, %ecx +; ALL-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3] +; ALL-NEXT: vcvtps2ph $4, %xmm1, %xmm1 +; ALL-NEXT: vmovd %xmm1, %eax +; ALL-NEXT: shll $16, %eax +; ALL-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] +; ALL-NEXT: vcvtps2ph $4, %xmm0, %xmm0 +; ALL-NEXT: vmovd %xmm0, %edx +; ALL-NEXT: movzwl %dx, %edx +; ALL-NEXT: orl %eax, %edx +; ALL-NEXT: shlq $32, %rdx +; ALL-NEXT: orq %rcx, %rdx +; ALL-NEXT: vmovq %rdx, %xmm0 +; ALL-NEXT: retq %1 = fptrunc <4 x float> %a0 to <4 x half> %2 = bitcast <4 x half> %1 to <4 x i16> ret <4 x i16> %2 @@ -2865,29 +2150,27 @@ define <8 x i16> @cvt_4f32_to_8i16_undef(<4 x float> %a0) nounwind { ; ; AVX512F-LABEL: cvt_4f32_to_8i16_undef: ; AVX512F: # BB#0: -; AVX512F-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def> -; AVX512F-NEXT: vcvtps2ph $4, %zmm0, %ymm1 -; AVX512F-NEXT: vmovd %xmm1, %eax -; AVX512F-NEXT: movzwl %ax, %eax ; AVX512F-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] -; AVX512F-NEXT: vcvtps2ph $4, %zmm1, %ymm1 +; AVX512F-NEXT: vcvtps2ph $4, %xmm1, %xmm1 +; AVX512F-NEXT: vmovd %xmm1, %eax +; AVX512F-NEXT: shll $16, %eax +; AVX512F-NEXT: vcvtps2ph $4, %xmm0, %xmm1 ; AVX512F-NEXT: vmovd %xmm1, %ecx -; AVX512F-NEXT: shll $16, %ecx +; AVX512F-NEXT: movzwl %cx, %ecx ; AVX512F-NEXT: orl %eax, %ecx -; AVX512F-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] -; AVX512F-NEXT: vcvtps2ph $4, %zmm1, %ymm1 +; AVX512F-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3] +; AVX512F-NEXT: vcvtps2ph $4, %xmm1, %xmm1 ; AVX512F-NEXT: vmovd %xmm1, %eax -; AVX512F-NEXT: movzwl %ax, %eax -; AVX512F-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3] -; AVX512F-NEXT: vcvtps2ph $4, %zmm0, %ymm0 +; AVX512F-NEXT: shll $16, %eax +; AVX512F-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] +; AVX512F-NEXT: vcvtps2ph $4, %xmm0, %xmm0 ; AVX512F-NEXT: vmovd %xmm0, %edx -; AVX512F-NEXT: shll $16, %edx +; AVX512F-NEXT: movzwl %dx, %edx ; AVX512F-NEXT: orl %eax, %edx ; AVX512F-NEXT: shlq $32, %rdx ; AVX512F-NEXT: orq %rcx, %rdx ; AVX512F-NEXT: vmovq %rdx, %xmm0 ; AVX512F-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7] -; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; ; AVX512VL-LABEL: cvt_4f32_to_8i16_undef: @@ -2974,29 +2257,27 @@ define <8 x i16> @cvt_4f32_to_8i16_zero(<4 x float> %a0) nounwind { ; ; AVX512F-LABEL: cvt_4f32_to_8i16_zero: ; AVX512F: # BB#0: -; AVX512F-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def> -; AVX512F-NEXT: vcvtps2ph $4, %zmm0, %ymm1 -; AVX512F-NEXT: vmovd %xmm1, %eax -; AVX512F-NEXT: movzwl %ax, %eax ; AVX512F-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] -; AVX512F-NEXT: vcvtps2ph $4, %zmm1, %ymm1 +; AVX512F-NEXT: vcvtps2ph $4, %xmm1, %xmm1 +; AVX512F-NEXT: vmovd %xmm1, %eax +; AVX512F-NEXT: shll $16, %eax +; AVX512F-NEXT: vcvtps2ph $4, %xmm0, %xmm1 ; AVX512F-NEXT: vmovd %xmm1, %ecx -; AVX512F-NEXT: shll $16, %ecx +; AVX512F-NEXT: movzwl %cx, %ecx ; AVX512F-NEXT: orl %eax, %ecx -; AVX512F-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] -; AVX512F-NEXT: vcvtps2ph $4, %zmm1, %ymm1 +; AVX512F-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3] +; AVX512F-NEXT: vcvtps2ph $4, %xmm1, %xmm1 ; AVX512F-NEXT: vmovd %xmm1, %eax -; AVX512F-NEXT: movzwl %ax, %eax -; AVX512F-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3] -; AVX512F-NEXT: vcvtps2ph $4, %zmm0, %ymm0 +; AVX512F-NEXT: shll $16, %eax +; AVX512F-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] +; AVX512F-NEXT: vcvtps2ph $4, %xmm0, %xmm0 ; AVX512F-NEXT: vmovd %xmm0, %edx -; AVX512F-NEXT: shll $16, %edx +; AVX512F-NEXT: movzwl %dx, %edx ; AVX512F-NEXT: orl %eax, %edx ; AVX512F-NEXT: shlq $32, %rdx ; AVX512F-NEXT: orq %rcx, %rdx ; AVX512F-NEXT: vmovq %rdx, %xmm0 ; AVX512F-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,u,u,u,u],zero,zero,zero,zero,zero,zero,zero,zero -; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; ; AVX512VL-LABEL: cvt_4f32_to_8i16_zero: @@ -3033,194 +2314,52 @@ define <8 x i16> @cvt_4f32_to_8i16_zero(<4 x float> %a0) nounwind { } define <8 x i16> @cvt_8f32_to_8i16(<8 x float> %a0) nounwind { -; AVX1-LABEL: cvt_8f32_to_8i16: -; AVX1: # BB#0: -; AVX1-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] -; AVX1-NEXT: vcvtps2ph $4, %xmm1, %xmm1 -; AVX1-NEXT: vmovd %xmm1, %eax -; AVX1-NEXT: shll $16, %eax -; AVX1-NEXT: vcvtps2ph $4, %xmm0, %xmm1 -; AVX1-NEXT: vmovd %xmm1, %ecx -; AVX1-NEXT: movzwl %cx, %ecx -; AVX1-NEXT: orl %eax, %ecx -; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3] -; AVX1-NEXT: vcvtps2ph $4, %xmm1, %xmm1 -; AVX1-NEXT: vmovd %xmm1, %edx -; AVX1-NEXT: shll $16, %edx -; AVX1-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] -; AVX1-NEXT: vcvtps2ph $4, %xmm1, %xmm1 -; AVX1-NEXT: vmovd %xmm1, %eax -; AVX1-NEXT: movzwl %ax, %eax -; AVX1-NEXT: orl %edx, %eax -; AVX1-NEXT: shlq $32, %rax -; AVX1-NEXT: orq %rcx, %rax -; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0 -; AVX1-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] -; AVX1-NEXT: vcvtps2ph $4, %xmm1, %xmm1 -; AVX1-NEXT: vmovd %xmm1, %ecx -; AVX1-NEXT: shll $16, %ecx -; AVX1-NEXT: vcvtps2ph $4, %xmm0, %xmm1 -; AVX1-NEXT: vmovd %xmm1, %edx -; AVX1-NEXT: movzwl %dx, %edx -; AVX1-NEXT: orl %ecx, %edx -; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3] -; AVX1-NEXT: vcvtps2ph $4, %xmm1, %xmm1 -; AVX1-NEXT: vmovd %xmm1, %ecx -; AVX1-NEXT: shll $16, %ecx -; AVX1-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] -; AVX1-NEXT: vcvtps2ph $4, %xmm0, %xmm0 -; AVX1-NEXT: vmovd %xmm0, %esi -; AVX1-NEXT: movzwl %si, %esi -; AVX1-NEXT: orl %ecx, %esi -; AVX1-NEXT: shlq $32, %rsi -; AVX1-NEXT: orq %rdx, %rsi -; AVX1-NEXT: vmovq %rsi, %xmm0 -; AVX1-NEXT: vmovq %rax, %xmm1 -; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] -; AVX1-NEXT: vzeroupper -; AVX1-NEXT: retq -; -; AVX2-LABEL: cvt_8f32_to_8i16: -; AVX2: # BB#0: -; AVX2-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] -; AVX2-NEXT: vcvtps2ph $4, %xmm1, %xmm1 -; AVX2-NEXT: vmovd %xmm1, %eax -; AVX2-NEXT: shll $16, %eax -; AVX2-NEXT: vcvtps2ph $4, %xmm0, %xmm1 -; AVX2-NEXT: vmovd %xmm1, %ecx -; AVX2-NEXT: movzwl %cx, %ecx -; AVX2-NEXT: orl %eax, %ecx -; AVX2-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3] -; AVX2-NEXT: vcvtps2ph $4, %xmm1, %xmm1 -; AVX2-NEXT: vmovd %xmm1, %edx -; AVX2-NEXT: shll $16, %edx -; AVX2-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] -; AVX2-NEXT: vcvtps2ph $4, %xmm1, %xmm1 -; AVX2-NEXT: vmovd %xmm1, %eax -; AVX2-NEXT: movzwl %ax, %eax -; AVX2-NEXT: orl %edx, %eax -; AVX2-NEXT: shlq $32, %rax -; AVX2-NEXT: orq %rcx, %rax -; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm0 -; AVX2-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] -; AVX2-NEXT: vcvtps2ph $4, %xmm1, %xmm1 -; AVX2-NEXT: vmovd %xmm1, %ecx -; AVX2-NEXT: shll $16, %ecx -; AVX2-NEXT: vcvtps2ph $4, %xmm0, %xmm1 -; AVX2-NEXT: vmovd %xmm1, %edx -; AVX2-NEXT: movzwl %dx, %edx -; AVX2-NEXT: orl %ecx, %edx -; AVX2-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3] -; AVX2-NEXT: vcvtps2ph $4, %xmm1, %xmm1 -; AVX2-NEXT: vmovd %xmm1, %ecx -; AVX2-NEXT: shll $16, %ecx -; AVX2-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] -; AVX2-NEXT: vcvtps2ph $4, %xmm0, %xmm0 -; AVX2-NEXT: vmovd %xmm0, %esi -; AVX2-NEXT: movzwl %si, %esi -; AVX2-NEXT: orl %ecx, %esi -; AVX2-NEXT: shlq $32, %rsi -; AVX2-NEXT: orq %rdx, %rsi -; AVX2-NEXT: vmovq %rsi, %xmm0 -; AVX2-NEXT: vmovq %rax, %xmm1 -; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] -; AVX2-NEXT: vzeroupper -; AVX2-NEXT: retq -; -; AVX512F-LABEL: cvt_8f32_to_8i16: -; AVX512F: # BB#0: -; AVX512F-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def> -; AVX512F-NEXT: vcvtps2ph $4, %zmm0, %ymm1 -; AVX512F-NEXT: vmovd %xmm1, %eax -; AVX512F-NEXT: movzwl %ax, %eax -; AVX512F-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] -; AVX512F-NEXT: vcvtps2ph $4, %zmm1, %ymm1 -; AVX512F-NEXT: vmovd %xmm1, %ecx -; AVX512F-NEXT: shll $16, %ecx -; AVX512F-NEXT: orl %eax, %ecx -; AVX512F-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] -; AVX512F-NEXT: vcvtps2ph $4, %zmm1, %ymm1 -; AVX512F-NEXT: vmovd %xmm1, %eax -; AVX512F-NEXT: movzwl %ax, %edx -; AVX512F-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3] -; AVX512F-NEXT: vcvtps2ph $4, %zmm1, %ymm1 -; AVX512F-NEXT: vmovd %xmm1, %eax -; AVX512F-NEXT: shll $16, %eax -; AVX512F-NEXT: orl %edx, %eax -; AVX512F-NEXT: shlq $32, %rax -; AVX512F-NEXT: orq %rcx, %rax -; AVX512F-NEXT: vextractf128 $1, %ymm0, %xmm0 -; AVX512F-NEXT: vcvtps2ph $4, %zmm0, %ymm1 -; AVX512F-NEXT: vmovd %xmm1, %ecx -; AVX512F-NEXT: movzwl %cx, %ecx -; AVX512F-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] -; AVX512F-NEXT: vcvtps2ph $4, %zmm1, %ymm1 -; AVX512F-NEXT: vmovd %xmm1, %edx -; AVX512F-NEXT: shll $16, %edx -; AVX512F-NEXT: orl %ecx, %edx -; AVX512F-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] -; AVX512F-NEXT: vcvtps2ph $4, %zmm1, %ymm1 -; AVX512F-NEXT: vmovd %xmm1, %ecx -; AVX512F-NEXT: movzwl %cx, %ecx -; AVX512F-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3] -; AVX512F-NEXT: vcvtps2ph $4, %zmm0, %ymm0 -; AVX512F-NEXT: vmovd %xmm0, %esi -; AVX512F-NEXT: shll $16, %esi -; AVX512F-NEXT: orl %ecx, %esi -; AVX512F-NEXT: shlq $32, %rsi -; AVX512F-NEXT: orq %rdx, %rsi -; AVX512F-NEXT: vmovq %rsi, %xmm0 -; AVX512F-NEXT: vmovq %rax, %xmm1 -; AVX512F-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] -; AVX512F-NEXT: vzeroupper -; AVX512F-NEXT: retq -; -; AVX512VL-LABEL: cvt_8f32_to_8i16: -; AVX512VL: # BB#0: -; AVX512VL-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] -; AVX512VL-NEXT: vcvtps2ph $4, %xmm1, %xmm1 -; AVX512VL-NEXT: vmovd %xmm1, %eax -; AVX512VL-NEXT: shll $16, %eax -; AVX512VL-NEXT: vcvtps2ph $4, %xmm0, %xmm1 -; AVX512VL-NEXT: vmovd %xmm1, %ecx -; AVX512VL-NEXT: movzwl %cx, %ecx -; AVX512VL-NEXT: orl %eax, %ecx -; AVX512VL-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3] -; AVX512VL-NEXT: vcvtps2ph $4, %xmm1, %xmm1 -; AVX512VL-NEXT: vmovd %xmm1, %edx -; AVX512VL-NEXT: shll $16, %edx -; AVX512VL-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] -; AVX512VL-NEXT: vcvtps2ph $4, %xmm1, %xmm1 -; AVX512VL-NEXT: vmovd %xmm1, %eax -; AVX512VL-NEXT: movzwl %ax, %eax -; AVX512VL-NEXT: orl %edx, %eax -; AVX512VL-NEXT: shlq $32, %rax -; AVX512VL-NEXT: orq %rcx, %rax -; AVX512VL-NEXT: vextractf128 $1, %ymm0, %xmm0 -; AVX512VL-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] -; AVX512VL-NEXT: vcvtps2ph $4, %xmm1, %xmm1 -; AVX512VL-NEXT: vmovd %xmm1, %ecx -; AVX512VL-NEXT: shll $16, %ecx -; AVX512VL-NEXT: vcvtps2ph $4, %xmm0, %xmm1 -; AVX512VL-NEXT: vmovd %xmm1, %edx -; AVX512VL-NEXT: movzwl %dx, %edx -; AVX512VL-NEXT: orl %ecx, %edx -; AVX512VL-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3] -; AVX512VL-NEXT: vcvtps2ph $4, %xmm1, %xmm1 -; AVX512VL-NEXT: vmovd %xmm1, %ecx -; AVX512VL-NEXT: shll $16, %ecx -; AVX512VL-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] -; AVX512VL-NEXT: vcvtps2ph $4, %xmm0, %xmm0 -; AVX512VL-NEXT: vmovd %xmm0, %esi -; AVX512VL-NEXT: movzwl %si, %esi -; AVX512VL-NEXT: orl %ecx, %esi -; AVX512VL-NEXT: shlq $32, %rsi -; AVX512VL-NEXT: orq %rdx, %rsi -; AVX512VL-NEXT: vmovq %rsi, %xmm0 -; AVX512VL-NEXT: vmovq %rax, %xmm1 -; AVX512VL-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] -; AVX512VL-NEXT: vzeroupper -; AVX512VL-NEXT: retq +; ALL-LABEL: cvt_8f32_to_8i16: +; ALL: # BB#0: +; ALL-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] +; ALL-NEXT: vcvtps2ph $4, %xmm1, %xmm1 +; ALL-NEXT: vmovd %xmm1, %eax +; ALL-NEXT: shll $16, %eax +; ALL-NEXT: vcvtps2ph $4, %xmm0, %xmm1 +; ALL-NEXT: vmovd %xmm1, %ecx +; ALL-NEXT: movzwl %cx, %ecx +; ALL-NEXT: orl %eax, %ecx +; ALL-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3] +; ALL-NEXT: vcvtps2ph $4, %xmm1, %xmm1 +; ALL-NEXT: vmovd %xmm1, %edx +; ALL-NEXT: shll $16, %edx +; ALL-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] +; ALL-NEXT: vcvtps2ph $4, %xmm1, %xmm1 +; ALL-NEXT: vmovd %xmm1, %eax +; ALL-NEXT: movzwl %ax, %eax +; ALL-NEXT: orl %edx, %eax +; ALL-NEXT: shlq $32, %rax +; ALL-NEXT: orq %rcx, %rax +; ALL-NEXT: vextractf128 $1, %ymm0, %xmm0 +; ALL-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] +; ALL-NEXT: vcvtps2ph $4, %xmm1, %xmm1 +; ALL-NEXT: vmovd %xmm1, %ecx +; ALL-NEXT: shll $16, %ecx +; ALL-NEXT: vcvtps2ph $4, %xmm0, %xmm1 +; ALL-NEXT: vmovd %xmm1, %edx +; ALL-NEXT: movzwl %dx, %edx +; ALL-NEXT: orl %ecx, %edx +; ALL-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3] +; ALL-NEXT: vcvtps2ph $4, %xmm1, %xmm1 +; ALL-NEXT: vmovd %xmm1, %ecx +; ALL-NEXT: shll $16, %ecx +; ALL-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] +; ALL-NEXT: vcvtps2ph $4, %xmm0, %xmm0 +; ALL-NEXT: vmovd %xmm0, %esi +; ALL-NEXT: movzwl %si, %esi +; ALL-NEXT: orl %ecx, %esi +; ALL-NEXT: shlq $32, %rsi +; ALL-NEXT: orq %rdx, %rsi +; ALL-NEXT: vmovq %rsi, %xmm0 +; ALL-NEXT: vmovq %rax, %xmm1 +; ALL-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] +; ALL-NEXT: vzeroupper +; ALL-NEXT: retq %1 = fptrunc <8 x float> %a0 to <8 x half> %2 = bitcast <8 x half> %1 to <8 x i16> ret <8 x i16> %2 @@ -3361,141 +2500,73 @@ define <16 x i16> @cvt_16f32_to_16i16(<16 x float> %a0) nounwind { ; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0 ; AVX2-NEXT: retq ; -; AVX512F-LABEL: cvt_16f32_to_16i16: -; AVX512F: # BB#0: -; AVX512F-NEXT: vextractf64x4 $1, %zmm0, %ymm1 -; AVX512F-NEXT: vcvtps2ph $4, %zmm1, %ymm2 -; AVX512F-NEXT: vmovd %xmm2, %eax -; AVX512F-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3] -; AVX512F-NEXT: vcvtps2ph $4, %zmm2, %ymm2 -; AVX512F-NEXT: vmovd %eax, %xmm3 -; AVX512F-NEXT: vmovd %xmm2, %eax -; AVX512F-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0] -; AVX512F-NEXT: vcvtps2ph $4, %zmm2, %ymm2 -; AVX512F-NEXT: vpinsrw $1, %eax, %xmm3, %xmm3 -; AVX512F-NEXT: vmovd %xmm2, %eax -; AVX512F-NEXT: vextractf128 $1, %ymm1, %xmm2 -; AVX512F-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[3,1,2,3] -; AVX512F-NEXT: vcvtps2ph $4, %zmm1, %ymm1 -; AVX512F-NEXT: vpinsrw $2, %eax, %xmm3, %xmm3 -; AVX512F-NEXT: vmovd %xmm1, %eax -; AVX512F-NEXT: vcvtps2ph $4, %zmm2, %ymm1 -; AVX512F-NEXT: vpinsrw $3, %eax, %xmm3, %xmm3 -; AVX512F-NEXT: vmovd %xmm1, %eax -; AVX512F-NEXT: vmovshdup {{.*#+}} xmm1 = xmm2[1,1,3,3] -; AVX512F-NEXT: vcvtps2ph $4, %zmm1, %ymm1 -; AVX512F-NEXT: vpinsrw $4, %eax, %xmm3, %xmm3 -; AVX512F-NEXT: vmovd %xmm1, %eax -; AVX512F-NEXT: vpermilpd {{.*#+}} xmm1 = xmm2[1,0] -; AVX512F-NEXT: vcvtps2ph $4, %zmm1, %ymm1 -; AVX512F-NEXT: vpinsrw $5, %eax, %xmm3, %xmm3 -; AVX512F-NEXT: vmovd %xmm1, %eax -; AVX512F-NEXT: vcvtps2ph $4, %zmm0, %ymm1 -; AVX512F-NEXT: vpermilps {{.*#+}} xmm2 = xmm2[3,1,2,3] -; AVX512F-NEXT: vcvtps2ph $4, %zmm2, %ymm2 -; AVX512F-NEXT: vpinsrw $6, %eax, %xmm3, %xmm3 -; AVX512F-NEXT: vmovd %xmm2, %eax -; AVX512F-NEXT: vpinsrw $7, %eax, %xmm3, %xmm2 -; AVX512F-NEXT: vmovd %xmm1, %eax -; AVX512F-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] -; AVX512F-NEXT: vcvtps2ph $4, %zmm1, %ymm1 -; AVX512F-NEXT: vmovd %eax, %xmm3 -; AVX512F-NEXT: vmovd %xmm1, %eax -; AVX512F-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] -; AVX512F-NEXT: vcvtps2ph $4, %zmm1, %ymm1 -; AVX512F-NEXT: vpinsrw $1, %eax, %xmm3, %xmm3 -; AVX512F-NEXT: vmovd %xmm1, %eax -; AVX512F-NEXT: vextractf128 $1, %ymm0, %xmm1 -; AVX512F-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3] -; AVX512F-NEXT: vcvtps2ph $4, %zmm0, %ymm0 -; AVX512F-NEXT: vpinsrw $2, %eax, %xmm3, %xmm3 -; AVX512F-NEXT: vmovd %xmm0, %eax -; AVX512F-NEXT: vcvtps2ph $4, %zmm1, %ymm0 -; AVX512F-NEXT: vpinsrw $3, %eax, %xmm3, %xmm3 -; AVX512F-NEXT: vmovd %xmm0, %eax -; AVX512F-NEXT: vmovshdup {{.*#+}} xmm0 = xmm1[1,1,3,3] -; AVX512F-NEXT: vcvtps2ph $4, %zmm0, %ymm0 -; AVX512F-NEXT: vpinsrw $4, %eax, %xmm3, %xmm3 -; AVX512F-NEXT: vmovd %xmm0, %eax -; AVX512F-NEXT: vpermilpd {{.*#+}} xmm0 = xmm1[1,0] -; AVX512F-NEXT: vcvtps2ph $4, %zmm0, %ymm0 -; AVX512F-NEXT: vpinsrw $5, %eax, %xmm3, %xmm3 -; AVX512F-NEXT: vmovd %xmm0, %eax -; AVX512F-NEXT: vpermilps {{.*#+}} xmm0 = xmm1[3,1,2,3] -; AVX512F-NEXT: vcvtps2ph $4, %zmm0, %ymm0 -; AVX512F-NEXT: vpinsrw $6, %eax, %xmm3, %xmm1 -; AVX512F-NEXT: vmovd %xmm0, %eax -; AVX512F-NEXT: vpinsrw $7, %eax, %xmm1, %xmm0 -; AVX512F-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0 -; AVX512F-NEXT: retq -; -; AVX512VL-LABEL: cvt_16f32_to_16i16: -; AVX512VL: # BB#0: -; AVX512VL-NEXT: vextractf64x4 $1, %zmm0, %ymm1 -; AVX512VL-NEXT: vcvtps2ph $4, %xmm1, %xmm2 -; AVX512VL-NEXT: vmovd %xmm2, %eax -; AVX512VL-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3] -; AVX512VL-NEXT: vcvtps2ph $4, %xmm2, %xmm2 -; AVX512VL-NEXT: vmovd %eax, %xmm3 -; AVX512VL-NEXT: vmovd %xmm2, %eax -; AVX512VL-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0] -; AVX512VL-NEXT: vcvtps2ph $4, %xmm2, %xmm2 -; AVX512VL-NEXT: vpinsrw $1, %eax, %xmm3, %xmm3 -; AVX512VL-NEXT: vmovd %xmm2, %eax -; AVX512VL-NEXT: vextractf128 $1, %ymm1, %xmm2 -; AVX512VL-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[3,1,2,3] -; AVX512VL-NEXT: vcvtps2ph $4, %xmm1, %xmm1 -; AVX512VL-NEXT: vpinsrw $2, %eax, %xmm3, %xmm3 -; AVX512VL-NEXT: vmovd %xmm1, %eax -; AVX512VL-NEXT: vcvtps2ph $4, %xmm2, %xmm1 -; AVX512VL-NEXT: vpinsrw $3, %eax, %xmm3, %xmm3 -; AVX512VL-NEXT: vmovd %xmm1, %eax -; AVX512VL-NEXT: vmovshdup {{.*#+}} xmm1 = xmm2[1,1,3,3] -; AVX512VL-NEXT: vcvtps2ph $4, %xmm1, %xmm1 -; AVX512VL-NEXT: vpinsrw $4, %eax, %xmm3, %xmm3 -; AVX512VL-NEXT: vmovd %xmm1, %eax -; AVX512VL-NEXT: vpermilpd {{.*#+}} xmm1 = xmm2[1,0] -; AVX512VL-NEXT: vcvtps2ph $4, %xmm1, %xmm1 -; AVX512VL-NEXT: vpinsrw $5, %eax, %xmm3, %xmm3 -; AVX512VL-NEXT: vmovd %xmm1, %eax -; AVX512VL-NEXT: vcvtps2ph $4, %xmm0, %xmm1 -; AVX512VL-NEXT: vpermilps {{.*#+}} xmm2 = xmm2[3,1,2,3] -; AVX512VL-NEXT: vcvtps2ph $4, %xmm2, %xmm2 -; AVX512VL-NEXT: vpinsrw $6, %eax, %xmm3, %xmm3 -; AVX512VL-NEXT: vmovd %xmm2, %eax -; AVX512VL-NEXT: vpinsrw $7, %eax, %xmm3, %xmm2 -; AVX512VL-NEXT: vmovd %xmm1, %eax -; AVX512VL-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] -; AVX512VL-NEXT: vcvtps2ph $4, %xmm1, %xmm1 -; AVX512VL-NEXT: vmovd %eax, %xmm3 -; AVX512VL-NEXT: vmovd %xmm1, %eax -; AVX512VL-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] -; AVX512VL-NEXT: vcvtps2ph $4, %xmm1, %xmm1 -; AVX512VL-NEXT: vpinsrw $1, %eax, %xmm3, %xmm3 -; AVX512VL-NEXT: vmovd %xmm1, %eax -; AVX512VL-NEXT: vextractf128 $1, %ymm0, %xmm1 -; AVX512VL-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3] -; AVX512VL-NEXT: vcvtps2ph $4, %xmm0, %xmm0 -; AVX512VL-NEXT: vpinsrw $2, %eax, %xmm3, %xmm3 -; AVX512VL-NEXT: vmovd %xmm0, %eax -; AVX512VL-NEXT: vcvtps2ph $4, %xmm1, %xmm0 -; AVX512VL-NEXT: vpinsrw $3, %eax, %xmm3, %xmm3 -; AVX512VL-NEXT: vmovd %xmm0, %eax -; AVX512VL-NEXT: vmovshdup {{.*#+}} xmm0 = xmm1[1,1,3,3] -; AVX512VL-NEXT: vcvtps2ph $4, %xmm0, %xmm0 -; AVX512VL-NEXT: vpinsrw $4, %eax, %xmm3, %xmm3 -; AVX512VL-NEXT: vmovd %xmm0, %eax -; AVX512VL-NEXT: vpermilpd {{.*#+}} xmm0 = xmm1[1,0] -; AVX512VL-NEXT: vcvtps2ph $4, %xmm0, %xmm0 -; AVX512VL-NEXT: vpinsrw $5, %eax, %xmm3, %xmm3 -; AVX512VL-NEXT: vmovd %xmm0, %eax -; AVX512VL-NEXT: vpermilps {{.*#+}} xmm0 = xmm1[3,1,2,3] -; AVX512VL-NEXT: vcvtps2ph $4, %xmm0, %xmm0 -; AVX512VL-NEXT: vpinsrw $6, %eax, %xmm3, %xmm1 -; AVX512VL-NEXT: vmovd %xmm0, %eax -; AVX512VL-NEXT: vpinsrw $7, %eax, %xmm1, %xmm0 -; AVX512VL-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0 -; AVX512VL-NEXT: retq +; AVX512-LABEL: cvt_16f32_to_16i16: +; AVX512: # BB#0: +; AVX512-NEXT: vextractf64x4 $1, %zmm0, %ymm1 +; AVX512-NEXT: vcvtps2ph $4, %xmm1, %xmm2 +; AVX512-NEXT: vmovd %xmm2, %eax +; AVX512-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3] +; AVX512-NEXT: vcvtps2ph $4, %xmm2, %xmm2 +; AVX512-NEXT: vmovd %eax, %xmm3 +; AVX512-NEXT: vmovd %xmm2, %eax +; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0] +; AVX512-NEXT: vcvtps2ph $4, %xmm2, %xmm2 +; AVX512-NEXT: vpinsrw $1, %eax, %xmm3, %xmm3 +; AVX512-NEXT: vmovd %xmm2, %eax +; AVX512-NEXT: vextractf128 $1, %ymm1, %xmm2 +; AVX512-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[3,1,2,3] +; AVX512-NEXT: vcvtps2ph $4, %xmm1, %xmm1 +; AVX512-NEXT: vpinsrw $2, %eax, %xmm3, %xmm3 +; AVX512-NEXT: vmovd %xmm1, %eax +; AVX512-NEXT: vcvtps2ph $4, %xmm2, %xmm1 +; AVX512-NEXT: vpinsrw $3, %eax, %xmm3, %xmm3 +; AVX512-NEXT: vmovd %xmm1, %eax +; AVX512-NEXT: vmovshdup {{.*#+}} xmm1 = xmm2[1,1,3,3] +; AVX512-NEXT: vcvtps2ph $4, %xmm1, %xmm1 +; AVX512-NEXT: vpinsrw $4, %eax, %xmm3, %xmm3 +; AVX512-NEXT: vmovd %xmm1, %eax +; AVX512-NEXT: vpermilpd {{.*#+}} xmm1 = xmm2[1,0] +; AVX512-NEXT: vcvtps2ph $4, %xmm1, %xmm1 +; AVX512-NEXT: vpinsrw $5, %eax, %xmm3, %xmm3 +; AVX512-NEXT: vmovd %xmm1, %eax +; AVX512-NEXT: vcvtps2ph $4, %xmm0, %xmm1 +; AVX512-NEXT: vpermilps {{.*#+}} xmm2 = xmm2[3,1,2,3] +; AVX512-NEXT: vcvtps2ph $4, %xmm2, %xmm2 +; AVX512-NEXT: vpinsrw $6, %eax, %xmm3, %xmm3 +; AVX512-NEXT: vmovd %xmm2, %eax +; AVX512-NEXT: vpinsrw $7, %eax, %xmm3, %xmm2 +; AVX512-NEXT: vmovd %xmm1, %eax +; AVX512-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] +; AVX512-NEXT: vcvtps2ph $4, %xmm1, %xmm1 +; AVX512-NEXT: vmovd %eax, %xmm3 +; AVX512-NEXT: vmovd %xmm1, %eax +; AVX512-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] +; AVX512-NEXT: vcvtps2ph $4, %xmm1, %xmm1 +; AVX512-NEXT: vpinsrw $1, %eax, %xmm3, %xmm3 +; AVX512-NEXT: vmovd %xmm1, %eax +; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm1 +; AVX512-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3] +; AVX512-NEXT: vcvtps2ph $4, %xmm0, %xmm0 +; AVX512-NEXT: vpinsrw $2, %eax, %xmm3, %xmm3 +; AVX512-NEXT: vmovd %xmm0, %eax +; AVX512-NEXT: vcvtps2ph $4, %xmm1, %xmm0 +; AVX512-NEXT: vpinsrw $3, %eax, %xmm3, %xmm3 +; AVX512-NEXT: vmovd %xmm0, %eax +; AVX512-NEXT: vmovshdup {{.*#+}} xmm0 = xmm1[1,1,3,3] +; AVX512-NEXT: vcvtps2ph $4, %xmm0, %xmm0 +; AVX512-NEXT: vpinsrw $4, %eax, %xmm3, %xmm3 +; AVX512-NEXT: vmovd %xmm0, %eax +; AVX512-NEXT: vpermilpd {{.*#+}} xmm0 = xmm1[1,0] +; AVX512-NEXT: vcvtps2ph $4, %xmm0, %xmm0 +; AVX512-NEXT: vpinsrw $5, %eax, %xmm3, %xmm3 +; AVX512-NEXT: vmovd %xmm0, %eax +; AVX512-NEXT: vpermilps {{.*#+}} xmm0 = xmm1[3,1,2,3] +; AVX512-NEXT: vcvtps2ph $4, %xmm0, %xmm0 +; AVX512-NEXT: vpinsrw $6, %eax, %xmm3, %xmm1 +; AVX512-NEXT: vmovd %xmm0, %eax +; AVX512-NEXT: vpinsrw $7, %eax, %xmm1, %xmm0 +; AVX512-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0 +; AVX512-NEXT: retq %1 = fptrunc <16 x float> %a0 to <16 x half> %2 = bitcast <16 x half> %1 to <16 x i16> ret <16 x i16> %2 @@ -3506,35 +2577,12 @@ define <16 x i16> @cvt_16f32_to_16i16(<16 x float> %a0) nounwind { ; define void @store_cvt_f32_to_i16(float %a0, i16* %a1) nounwind { -; AVX1-LABEL: store_cvt_f32_to_i16: -; AVX1: # BB#0: -; AVX1-NEXT: vcvtps2ph $4, %xmm0, %xmm0 -; AVX1-NEXT: vmovd %xmm0, %eax -; AVX1-NEXT: movw %ax, (%rdi) -; AVX1-NEXT: retq -; -; AVX2-LABEL: store_cvt_f32_to_i16: -; AVX2: # BB#0: -; AVX2-NEXT: vcvtps2ph $4, %xmm0, %xmm0 -; AVX2-NEXT: vmovd %xmm0, %eax -; AVX2-NEXT: movw %ax, (%rdi) -; AVX2-NEXT: retq -; -; AVX512F-LABEL: store_cvt_f32_to_i16: -; AVX512F: # BB#0: -; AVX512F-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def> -; AVX512F-NEXT: vcvtps2ph $4, %zmm0, %ymm0 -; AVX512F-NEXT: vmovd %xmm0, %eax -; AVX512F-NEXT: movw %ax, (%rdi) -; AVX512F-NEXT: vzeroupper -; AVX512F-NEXT: retq -; -; AVX512VL-LABEL: store_cvt_f32_to_i16: -; AVX512VL: # BB#0: -; AVX512VL-NEXT: vcvtps2ph $4, %xmm0, %xmm0 -; AVX512VL-NEXT: vmovd %xmm0, %eax -; AVX512VL-NEXT: movw %ax, (%rdi) -; AVX512VL-NEXT: retq +; ALL-LABEL: store_cvt_f32_to_i16: +; ALL: # BB#0: +; ALL-NEXT: vcvtps2ph $4, %xmm0, %xmm0 +; ALL-NEXT: vmovd %xmm0, %eax +; ALL-NEXT: movw %ax, (%rdi) +; ALL-NEXT: retq %1 = fptrunc float %a0 to half %2 = bitcast half %1 to i16 store i16 %2, i16* %a1 @@ -3542,83 +2590,24 @@ define void @store_cvt_f32_to_i16(float %a0, i16* %a1) nounwind { } define void @store_cvt_4f32_to_4i16(<4 x float> %a0, <4 x i16>* %a1) nounwind { -; AVX1-LABEL: store_cvt_4f32_to_4i16: -; AVX1: # BB#0: -; AVX1-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] -; AVX1-NEXT: vcvtps2ph $4, %xmm1, %xmm1 -; AVX1-NEXT: vmovd %xmm1, %eax -; AVX1-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] -; AVX1-NEXT: vcvtps2ph $4, %xmm1, %xmm1 -; AVX1-NEXT: vmovd %xmm1, %ecx -; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3] -; AVX1-NEXT: vcvtps2ph $4, %xmm1, %xmm1 -; AVX1-NEXT: vmovd %xmm1, %edx -; AVX1-NEXT: vcvtps2ph $4, %xmm0, %xmm0 -; AVX1-NEXT: vmovd %xmm0, %esi -; AVX1-NEXT: movw %si, (%rdi) -; AVX1-NEXT: movw %dx, 6(%rdi) -; AVX1-NEXT: movw %cx, 4(%rdi) -; AVX1-NEXT: movw %ax, 2(%rdi) -; AVX1-NEXT: retq -; -; AVX2-LABEL: store_cvt_4f32_to_4i16: -; AVX2: # BB#0: -; AVX2-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] -; AVX2-NEXT: vcvtps2ph $4, %xmm1, %xmm1 -; AVX2-NEXT: vmovd %xmm1, %eax -; AVX2-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] -; AVX2-NEXT: vcvtps2ph $4, %xmm1, %xmm1 -; AVX2-NEXT: vmovd %xmm1, %ecx -; AVX2-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3] -; AVX2-NEXT: vcvtps2ph $4, %xmm1, %xmm1 -; AVX2-NEXT: vmovd %xmm1, %edx -; AVX2-NEXT: vcvtps2ph $4, %xmm0, %xmm0 -; AVX2-NEXT: vmovd %xmm0, %esi -; AVX2-NEXT: movw %si, (%rdi) -; AVX2-NEXT: movw %dx, 6(%rdi) -; AVX2-NEXT: movw %cx, 4(%rdi) -; AVX2-NEXT: movw %ax, 2(%rdi) -; AVX2-NEXT: retq -; -; AVX512F-LABEL: store_cvt_4f32_to_4i16: -; AVX512F: # BB#0: -; AVX512F-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def> -; AVX512F-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] -; AVX512F-NEXT: vcvtps2ph $4, %zmm1, %ymm1 -; AVX512F-NEXT: vmovd %xmm1, %eax -; AVX512F-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] -; AVX512F-NEXT: vcvtps2ph $4, %zmm1, %ymm1 -; AVX512F-NEXT: vmovd %xmm1, %ecx -; AVX512F-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3] -; AVX512F-NEXT: vcvtps2ph $4, %zmm1, %ymm1 -; AVX512F-NEXT: vmovd %xmm1, %edx -; AVX512F-NEXT: vcvtps2ph $4, %zmm0, %ymm0 -; AVX512F-NEXT: vmovd %xmm0, %esi -; AVX512F-NEXT: movw %si, (%rdi) -; AVX512F-NEXT: movw %dx, 6(%rdi) -; AVX512F-NEXT: movw %cx, 4(%rdi) -; AVX512F-NEXT: movw %ax, 2(%rdi) -; AVX512F-NEXT: vzeroupper -; AVX512F-NEXT: retq -; -; AVX512VL-LABEL: store_cvt_4f32_to_4i16: -; AVX512VL: # BB#0: -; AVX512VL-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] -; AVX512VL-NEXT: vcvtps2ph $4, %xmm1, %xmm1 -; AVX512VL-NEXT: vmovd %xmm1, %eax -; AVX512VL-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] -; AVX512VL-NEXT: vcvtps2ph $4, %xmm1, %xmm1 -; AVX512VL-NEXT: vmovd %xmm1, %ecx -; AVX512VL-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3] -; AVX512VL-NEXT: vcvtps2ph $4, %xmm1, %xmm1 -; AVX512VL-NEXT: vmovd %xmm1, %edx -; AVX512VL-NEXT: vcvtps2ph $4, %xmm0, %xmm0 -; AVX512VL-NEXT: vmovd %xmm0, %esi -; AVX512VL-NEXT: movw %si, (%rdi) -; AVX512VL-NEXT: movw %dx, 6(%rdi) -; AVX512VL-NEXT: movw %cx, 4(%rdi) -; AVX512VL-NEXT: movw %ax, 2(%rdi) -; AVX512VL-NEXT: retq +; ALL-LABEL: store_cvt_4f32_to_4i16: +; ALL: # BB#0: +; ALL-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] +; ALL-NEXT: vcvtps2ph $4, %xmm1, %xmm1 +; ALL-NEXT: vmovd %xmm1, %eax +; ALL-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] +; ALL-NEXT: vcvtps2ph $4, %xmm1, %xmm1 +; ALL-NEXT: vmovd %xmm1, %ecx +; ALL-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3] +; ALL-NEXT: vcvtps2ph $4, %xmm1, %xmm1 +; ALL-NEXT: vmovd %xmm1, %edx +; ALL-NEXT: vcvtps2ph $4, %xmm0, %xmm0 +; ALL-NEXT: vmovd %xmm0, %esi +; ALL-NEXT: movw %si, (%rdi) +; ALL-NEXT: movw %dx, 6(%rdi) +; ALL-NEXT: movw %cx, 4(%rdi) +; ALL-NEXT: movw %ax, 2(%rdi) +; ALL-NEXT: retq %1 = fptrunc <4 x float> %a0 to <4 x half> %2 = bitcast <4 x half> %1 to <4 x i16> store <4 x i16> %2, <4 x i16>* %a1 @@ -3680,30 +2669,28 @@ define void @store_cvt_4f32_to_8i16_undef(<4 x float> %a0, <8 x i16>* %a1) nounw ; ; AVX512F-LABEL: store_cvt_4f32_to_8i16_undef: ; AVX512F: # BB#0: -; AVX512F-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def> -; AVX512F-NEXT: vcvtps2ph $4, %zmm0, %ymm1 -; AVX512F-NEXT: vmovd %xmm1, %eax -; AVX512F-NEXT: movzwl %ax, %eax ; AVX512F-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] -; AVX512F-NEXT: vcvtps2ph $4, %zmm1, %ymm1 +; AVX512F-NEXT: vcvtps2ph $4, %xmm1, %xmm1 +; AVX512F-NEXT: vmovd %xmm1, %eax +; AVX512F-NEXT: shll $16, %eax +; AVX512F-NEXT: vcvtps2ph $4, %xmm0, %xmm1 ; AVX512F-NEXT: vmovd %xmm1, %ecx -; AVX512F-NEXT: shll $16, %ecx +; AVX512F-NEXT: movzwl %cx, %ecx ; AVX512F-NEXT: orl %eax, %ecx -; AVX512F-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] -; AVX512F-NEXT: vcvtps2ph $4, %zmm1, %ymm1 +; AVX512F-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3] +; AVX512F-NEXT: vcvtps2ph $4, %xmm1, %xmm1 ; AVX512F-NEXT: vmovd %xmm1, %eax -; AVX512F-NEXT: movzwl %ax, %eax -; AVX512F-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3] -; AVX512F-NEXT: vcvtps2ph $4, %zmm0, %ymm0 +; AVX512F-NEXT: shll $16, %eax +; AVX512F-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] +; AVX512F-NEXT: vcvtps2ph $4, %xmm0, %xmm0 ; AVX512F-NEXT: vmovd %xmm0, %edx -; AVX512F-NEXT: shll $16, %edx +; AVX512F-NEXT: movzwl %dx, %edx ; AVX512F-NEXT: orl %eax, %edx ; AVX512F-NEXT: shlq $32, %rdx ; AVX512F-NEXT: orq %rcx, %rdx ; AVX512F-NEXT: vmovq %rdx, %xmm0 ; AVX512F-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7] ; AVX512F-NEXT: vmovdqa %xmm0, (%rdi) -; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; ; AVX512VL-LABEL: store_cvt_4f32_to_8i16_undef: @@ -3794,30 +2781,28 @@ define void @store_cvt_4f32_to_8i16_zero(<4 x float> %a0, <8 x i16>* %a1) nounwi ; ; AVX512F-LABEL: store_cvt_4f32_to_8i16_zero: ; AVX512F: # BB#0: -; AVX512F-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def> -; AVX512F-NEXT: vcvtps2ph $4, %zmm0, %ymm1 -; AVX512F-NEXT: vmovd %xmm1, %eax -; AVX512F-NEXT: movzwl %ax, %eax ; AVX512F-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] -; AVX512F-NEXT: vcvtps2ph $4, %zmm1, %ymm1 +; AVX512F-NEXT: vcvtps2ph $4, %xmm1, %xmm1 +; AVX512F-NEXT: vmovd %xmm1, %eax +; AVX512F-NEXT: shll $16, %eax +; AVX512F-NEXT: vcvtps2ph $4, %xmm0, %xmm1 ; AVX512F-NEXT: vmovd %xmm1, %ecx -; AVX512F-NEXT: shll $16, %ecx +; AVX512F-NEXT: movzwl %cx, %ecx ; AVX512F-NEXT: orl %eax, %ecx -; AVX512F-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] -; AVX512F-NEXT: vcvtps2ph $4, %zmm1, %ymm1 +; AVX512F-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3] +; AVX512F-NEXT: vcvtps2ph $4, %xmm1, %xmm1 ; AVX512F-NEXT: vmovd %xmm1, %eax -; AVX512F-NEXT: movzwl %ax, %eax -; AVX512F-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3] -; AVX512F-NEXT: vcvtps2ph $4, %zmm0, %ymm0 +; AVX512F-NEXT: shll $16, %eax +; AVX512F-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] +; AVX512F-NEXT: vcvtps2ph $4, %xmm0, %xmm0 ; AVX512F-NEXT: vmovd %xmm0, %edx -; AVX512F-NEXT: shll $16, %edx +; AVX512F-NEXT: movzwl %dx, %edx ; AVX512F-NEXT: orl %eax, %edx ; AVX512F-NEXT: shlq $32, %rdx ; AVX512F-NEXT: orq %rcx, %rdx ; AVX512F-NEXT: vmovq %rdx, %xmm0 ; AVX512F-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,u,u,u,u],zero,zero,zero,zero,zero,zero,zero,zero ; AVX512F-NEXT: vmovdqa %xmm0, (%rdi) -; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; ; AVX512VL-LABEL: store_cvt_4f32_to_8i16_zero: @@ -3856,150 +2841,41 @@ define void @store_cvt_4f32_to_8i16_zero(<4 x float> %a0, <8 x i16>* %a1) nounwi } define void @store_cvt_8f32_to_8i16(<8 x float> %a0, <8 x i16>* %a1) nounwind { -; AVX1-LABEL: store_cvt_8f32_to_8i16: -; AVX1: # BB#0: -; AVX1-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] -; AVX1-NEXT: vcvtps2ph $4, %xmm1, %xmm1 -; AVX1-NEXT: vmovd %xmm1, %r8d -; AVX1-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] -; AVX1-NEXT: vcvtps2ph $4, %xmm1, %xmm1 -; AVX1-NEXT: vmovd %xmm1, %r9d -; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3] -; AVX1-NEXT: vcvtps2ph $4, %xmm1, %xmm1 -; AVX1-NEXT: vmovd %xmm1, %r10d -; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 -; AVX1-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3] -; AVX1-NEXT: vcvtps2ph $4, %xmm2, %xmm2 -; AVX1-NEXT: vmovd %xmm2, %r11d -; AVX1-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0] -; AVX1-NEXT: vcvtps2ph $4, %xmm2, %xmm2 -; AVX1-NEXT: vmovd %xmm2, %eax -; AVX1-NEXT: vpermilps {{.*#+}} xmm2 = xmm1[3,1,2,3] -; AVX1-NEXT: vcvtps2ph $4, %xmm2, %xmm2 -; AVX1-NEXT: vmovd %xmm2, %ecx -; AVX1-NEXT: vcvtps2ph $4, %xmm0, %xmm0 -; AVX1-NEXT: vmovd %xmm0, %edx -; AVX1-NEXT: vcvtps2ph $4, %xmm1, %xmm0 -; AVX1-NEXT: vmovd %xmm0, %esi -; AVX1-NEXT: movw %si, 8(%rdi) -; AVX1-NEXT: movw %dx, (%rdi) -; AVX1-NEXT: movw %cx, 14(%rdi) -; AVX1-NEXT: movw %ax, 12(%rdi) -; AVX1-NEXT: movw %r11w, 10(%rdi) -; AVX1-NEXT: movw %r10w, 6(%rdi) -; AVX1-NEXT: movw %r9w, 4(%rdi) -; AVX1-NEXT: movw %r8w, 2(%rdi) -; AVX1-NEXT: vzeroupper -; AVX1-NEXT: retq -; -; AVX2-LABEL: store_cvt_8f32_to_8i16: -; AVX2: # BB#0: -; AVX2-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] -; AVX2-NEXT: vcvtps2ph $4, %xmm1, %xmm1 -; AVX2-NEXT: vmovd %xmm1, %r8d -; AVX2-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] -; AVX2-NEXT: vcvtps2ph $4, %xmm1, %xmm1 -; AVX2-NEXT: vmovd %xmm1, %r9d -; AVX2-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3] -; AVX2-NEXT: vcvtps2ph $4, %xmm1, %xmm1 -; AVX2-NEXT: vmovd %xmm1, %r10d -; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm1 -; AVX2-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3] -; AVX2-NEXT: vcvtps2ph $4, %xmm2, %xmm2 -; AVX2-NEXT: vmovd %xmm2, %r11d -; AVX2-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0] -; AVX2-NEXT: vcvtps2ph $4, %xmm2, %xmm2 -; AVX2-NEXT: vmovd %xmm2, %eax -; AVX2-NEXT: vpermilps {{.*#+}} xmm2 = xmm1[3,1,2,3] -; AVX2-NEXT: vcvtps2ph $4, %xmm2, %xmm2 -; AVX2-NEXT: vmovd %xmm2, %ecx -; AVX2-NEXT: vcvtps2ph $4, %xmm0, %xmm0 -; AVX2-NEXT: vmovd %xmm0, %edx -; AVX2-NEXT: vcvtps2ph $4, %xmm1, %xmm0 -; AVX2-NEXT: vmovd %xmm0, %esi -; AVX2-NEXT: movw %si, 8(%rdi) -; AVX2-NEXT: movw %dx, (%rdi) -; AVX2-NEXT: movw %cx, 14(%rdi) -; AVX2-NEXT: movw %ax, 12(%rdi) -; AVX2-NEXT: movw %r11w, 10(%rdi) -; AVX2-NEXT: movw %r10w, 6(%rdi) -; AVX2-NEXT: movw %r9w, 4(%rdi) -; AVX2-NEXT: movw %r8w, 2(%rdi) -; AVX2-NEXT: vzeroupper -; AVX2-NEXT: retq -; -; AVX512F-LABEL: store_cvt_8f32_to_8i16: -; AVX512F: # BB#0: -; AVX512F-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def> -; AVX512F-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] -; AVX512F-NEXT: vcvtps2ph $4, %zmm1, %ymm1 -; AVX512F-NEXT: vmovd %xmm1, %r8d -; AVX512F-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] -; AVX512F-NEXT: vcvtps2ph $4, %zmm1, %ymm1 -; AVX512F-NEXT: vmovd %xmm1, %r9d -; AVX512F-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3] -; AVX512F-NEXT: vcvtps2ph $4, %zmm1, %ymm1 -; AVX512F-NEXT: vmovd %xmm1, %r10d -; AVX512F-NEXT: vextractf128 $1, %ymm0, %xmm1 -; AVX512F-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3] -; AVX512F-NEXT: vcvtps2ph $4, %zmm2, %ymm2 -; AVX512F-NEXT: vmovd %xmm2, %r11d -; AVX512F-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0] -; AVX512F-NEXT: vcvtps2ph $4, %zmm2, %ymm2 -; AVX512F-NEXT: vmovd %xmm2, %eax -; AVX512F-NEXT: vpermilps {{.*#+}} xmm2 = xmm1[3,1,2,3] -; AVX512F-NEXT: vcvtps2ph $4, %zmm2, %ymm2 -; AVX512F-NEXT: vmovd %xmm2, %ecx -; AVX512F-NEXT: vcvtps2ph $4, %zmm0, %ymm0 -; AVX512F-NEXT: vmovd %xmm0, %edx -; AVX512F-NEXT: vcvtps2ph $4, %zmm1, %ymm0 -; AVX512F-NEXT: vmovd %xmm0, %esi -; AVX512F-NEXT: movw %si, 8(%rdi) -; AVX512F-NEXT: movw %dx, (%rdi) -; AVX512F-NEXT: movw %cx, 14(%rdi) -; AVX512F-NEXT: movw %ax, 12(%rdi) -; AVX512F-NEXT: movw %r11w, 10(%rdi) -; AVX512F-NEXT: movw %r10w, 6(%rdi) -; AVX512F-NEXT: movw %r9w, 4(%rdi) -; AVX512F-NEXT: movw %r8w, 2(%rdi) -; AVX512F-NEXT: vzeroupper -; AVX512F-NEXT: retq -; -; AVX512VL-LABEL: store_cvt_8f32_to_8i16: -; AVX512VL: # BB#0: -; AVX512VL-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] -; AVX512VL-NEXT: vcvtps2ph $4, %xmm1, %xmm1 -; AVX512VL-NEXT: vmovd %xmm1, %r8d -; AVX512VL-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] -; AVX512VL-NEXT: vcvtps2ph $4, %xmm1, %xmm1 -; AVX512VL-NEXT: vmovd %xmm1, %r9d -; AVX512VL-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3] -; AVX512VL-NEXT: vcvtps2ph $4, %xmm1, %xmm1 -; AVX512VL-NEXT: vmovd %xmm1, %r10d -; AVX512VL-NEXT: vextractf128 $1, %ymm0, %xmm1 -; AVX512VL-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3] -; AVX512VL-NEXT: vcvtps2ph $4, %xmm2, %xmm2 -; AVX512VL-NEXT: vmovd %xmm2, %r11d -; AVX512VL-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0] -; AVX512VL-NEXT: vcvtps2ph $4, %xmm2, %xmm2 -; AVX512VL-NEXT: vmovd %xmm2, %eax -; AVX512VL-NEXT: vpermilps {{.*#+}} xmm2 = xmm1[3,1,2,3] -; AVX512VL-NEXT: vcvtps2ph $4, %xmm2, %xmm2 -; AVX512VL-NEXT: vmovd %xmm2, %ecx -; AVX512VL-NEXT: vcvtps2ph $4, %xmm0, %xmm0 -; AVX512VL-NEXT: vmovd %xmm0, %edx -; AVX512VL-NEXT: vcvtps2ph $4, %xmm1, %xmm0 -; AVX512VL-NEXT: vmovd %xmm0, %esi -; AVX512VL-NEXT: movw %si, 8(%rdi) -; AVX512VL-NEXT: movw %dx, (%rdi) -; AVX512VL-NEXT: movw %cx, 14(%rdi) -; AVX512VL-NEXT: movw %ax, 12(%rdi) -; AVX512VL-NEXT: movw %r11w, 10(%rdi) -; AVX512VL-NEXT: movw %r10w, 6(%rdi) -; AVX512VL-NEXT: movw %r9w, 4(%rdi) -; AVX512VL-NEXT: movw %r8w, 2(%rdi) -; AVX512VL-NEXT: vzeroupper -; AVX512VL-NEXT: retq +; ALL-LABEL: store_cvt_8f32_to_8i16: +; ALL: # BB#0: +; ALL-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] +; ALL-NEXT: vcvtps2ph $4, %xmm1, %xmm1 +; ALL-NEXT: vmovd %xmm1, %r8d +; ALL-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] +; ALL-NEXT: vcvtps2ph $4, %xmm1, %xmm1 +; ALL-NEXT: vmovd %xmm1, %r9d +; ALL-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3] +; ALL-NEXT: vcvtps2ph $4, %xmm1, %xmm1 +; ALL-NEXT: vmovd %xmm1, %r10d +; ALL-NEXT: vextractf128 $1, %ymm0, %xmm1 +; ALL-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3] +; ALL-NEXT: vcvtps2ph $4, %xmm2, %xmm2 +; ALL-NEXT: vmovd %xmm2, %r11d +; ALL-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0] +; ALL-NEXT: vcvtps2ph $4, %xmm2, %xmm2 +; ALL-NEXT: vmovd %xmm2, %eax +; ALL-NEXT: vpermilps {{.*#+}} xmm2 = xmm1[3,1,2,3] +; ALL-NEXT: vcvtps2ph $4, %xmm2, %xmm2 +; ALL-NEXT: vmovd %xmm2, %ecx +; ALL-NEXT: vcvtps2ph $4, %xmm0, %xmm0 +; ALL-NEXT: vmovd %xmm0, %edx +; ALL-NEXT: vcvtps2ph $4, %xmm1, %xmm0 +; ALL-NEXT: vmovd %xmm0, %esi +; ALL-NEXT: movw %si, 8(%rdi) +; ALL-NEXT: movw %dx, (%rdi) +; ALL-NEXT: movw %cx, 14(%rdi) +; ALL-NEXT: movw %ax, 12(%rdi) +; ALL-NEXT: movw %r11w, 10(%rdi) +; ALL-NEXT: movw %r10w, 6(%rdi) +; ALL-NEXT: movw %r9w, 4(%rdi) +; ALL-NEXT: movw %r8w, 2(%rdi) +; ALL-NEXT: vzeroupper +; ALL-NEXT: retq %1 = fptrunc <8 x float> %a0 to <8 x half> %2 = bitcast <8 x half> %1 to <8 x i16> store <8 x i16> %2, <8 x i16>* %a1 @@ -4141,141 +3017,73 @@ define void @store_cvt_16f32_to_16i16(<16 x float> %a0, <16 x i16>* %a1) nounwin ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; -; AVX512F-LABEL: store_cvt_16f32_to_16i16: -; AVX512F: # BB#0: -; AVX512F-NEXT: vextractf128 $1, %ymm0, %xmm1 -; AVX512F-NEXT: vextractf64x4 $1, %zmm0, %ymm2 -; AVX512F-NEXT: vextractf128 $1, %ymm2, %xmm3 -; AVX512F-NEXT: vcvtps2ph $4, %zmm3, %ymm4 -; AVX512F-NEXT: vmovd %xmm4, %eax -; AVX512F-NEXT: vcvtps2ph $4, %zmm2, %ymm4 -; AVX512F-NEXT: movw %ax, 24(%rdi) -; AVX512F-NEXT: vmovd %xmm4, %eax -; AVX512F-NEXT: vcvtps2ph $4, %zmm1, %ymm4 -; AVX512F-NEXT: movw %ax, 16(%rdi) -; AVX512F-NEXT: vmovd %xmm4, %eax -; AVX512F-NEXT: vcvtps2ph $4, %zmm0, %ymm4 -; AVX512F-NEXT: movw %ax, 8(%rdi) -; AVX512F-NEXT: vmovd %xmm4, %eax -; AVX512F-NEXT: vpermilps {{.*#+}} xmm4 = xmm3[3,1,2,3] -; AVX512F-NEXT: vcvtps2ph $4, %zmm4, %ymm4 -; AVX512F-NEXT: movw %ax, (%rdi) -; AVX512F-NEXT: vmovd %xmm4, %eax -; AVX512F-NEXT: vpermilpd {{.*#+}} xmm4 = xmm3[1,0] -; AVX512F-NEXT: vcvtps2ph $4, %zmm4, %ymm4 -; AVX512F-NEXT: movw %ax, 30(%rdi) -; AVX512F-NEXT: vmovd %xmm4, %eax -; AVX512F-NEXT: vmovshdup {{.*#+}} xmm4 = xmm0[1,1,3,3] -; AVX512F-NEXT: vcvtps2ph $4, %zmm4, %ymm4 -; AVX512F-NEXT: vmovshdup {{.*#+}} xmm3 = xmm3[1,1,3,3] -; AVX512F-NEXT: vcvtps2ph $4, %zmm3, %ymm3 -; AVX512F-NEXT: movw %ax, 28(%rdi) -; AVX512F-NEXT: vmovd %xmm3, %eax -; AVX512F-NEXT: vpermilps {{.*#+}} xmm3 = xmm2[3,1,2,3] -; AVX512F-NEXT: vcvtps2ph $4, %zmm3, %ymm3 -; AVX512F-NEXT: movw %ax, 26(%rdi) -; AVX512F-NEXT: vmovd %xmm3, %eax -; AVX512F-NEXT: vpermilpd {{.*#+}} xmm3 = xmm2[1,0] -; AVX512F-NEXT: vcvtps2ph $4, %zmm3, %ymm3 -; AVX512F-NEXT: movw %ax, 22(%rdi) -; AVX512F-NEXT: vmovd %xmm3, %eax -; AVX512F-NEXT: vpermilpd {{.*#+}} xmm3 = xmm0[1,0] -; AVX512F-NEXT: vcvtps2ph $4, %zmm3, %ymm3 -; AVX512F-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3] -; AVX512F-NEXT: vcvtps2ph $4, %zmm0, %ymm0 -; AVX512F-NEXT: vmovshdup {{.*#+}} xmm2 = xmm2[1,1,3,3] -; AVX512F-NEXT: vcvtps2ph $4, %zmm2, %ymm2 -; AVX512F-NEXT: movw %ax, 20(%rdi) -; AVX512F-NEXT: vmovd %xmm2, %eax -; AVX512F-NEXT: vpermilps {{.*#+}} xmm2 = xmm1[3,1,2,3] -; AVX512F-NEXT: vcvtps2ph $4, %zmm2, %ymm2 -; AVX512F-NEXT: movw %ax, 18(%rdi) -; AVX512F-NEXT: vmovd %xmm2, %eax -; AVX512F-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3] -; AVX512F-NEXT: vcvtps2ph $4, %zmm2, %ymm2 -; AVX512F-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0] -; AVX512F-NEXT: vcvtps2ph $4, %zmm1, %ymm1 -; AVX512F-NEXT: movw %ax, 14(%rdi) -; AVX512F-NEXT: vmovd %xmm1, %eax -; AVX512F-NEXT: movw %ax, 12(%rdi) -; AVX512F-NEXT: vmovd %xmm2, %eax -; AVX512F-NEXT: movw %ax, 10(%rdi) -; AVX512F-NEXT: vmovd %xmm0, %eax -; AVX512F-NEXT: movw %ax, 6(%rdi) -; AVX512F-NEXT: vmovd %xmm3, %eax -; AVX512F-NEXT: movw %ax, 4(%rdi) -; AVX512F-NEXT: vmovd %xmm4, %eax -; AVX512F-NEXT: movw %ax, 2(%rdi) -; AVX512F-NEXT: vzeroupper -; AVX512F-NEXT: retq -; -; AVX512VL-LABEL: store_cvt_16f32_to_16i16: -; AVX512VL: # BB#0: -; AVX512VL-NEXT: vextractf128 $1, %ymm0, %xmm1 -; AVX512VL-NEXT: vextractf64x4 $1, %zmm0, %ymm2 -; AVX512VL-NEXT: vextractf128 $1, %ymm2, %xmm3 -; AVX512VL-NEXT: vcvtps2ph $4, %xmm3, %xmm4 -; AVX512VL-NEXT: vmovd %xmm4, %eax -; AVX512VL-NEXT: vcvtps2ph $4, %xmm2, %xmm4 -; AVX512VL-NEXT: movw %ax, 24(%rdi) -; AVX512VL-NEXT: vmovd %xmm4, %eax -; AVX512VL-NEXT: vcvtps2ph $4, %xmm1, %xmm4 -; AVX512VL-NEXT: movw %ax, 16(%rdi) -; AVX512VL-NEXT: vmovd %xmm4, %eax -; AVX512VL-NEXT: vcvtps2ph $4, %xmm0, %xmm4 -; AVX512VL-NEXT: movw %ax, 8(%rdi) -; AVX512VL-NEXT: vmovd %xmm4, %eax -; AVX512VL-NEXT: vpermilps {{.*#+}} xmm4 = xmm3[3,1,2,3] -; AVX512VL-NEXT: vcvtps2ph $4, %xmm4, %xmm4 -; AVX512VL-NEXT: movw %ax, (%rdi) -; AVX512VL-NEXT: vmovd %xmm4, %eax -; AVX512VL-NEXT: vpermilpd {{.*#+}} xmm4 = xmm3[1,0] -; AVX512VL-NEXT: vcvtps2ph $4, %xmm4, %xmm4 -; AVX512VL-NEXT: movw %ax, 30(%rdi) -; AVX512VL-NEXT: vmovd %xmm4, %eax -; AVX512VL-NEXT: vmovshdup {{.*#+}} xmm4 = xmm0[1,1,3,3] -; AVX512VL-NEXT: vcvtps2ph $4, %xmm4, %xmm4 -; AVX512VL-NEXT: vmovshdup {{.*#+}} xmm3 = xmm3[1,1,3,3] -; AVX512VL-NEXT: vcvtps2ph $4, %xmm3, %xmm3 -; AVX512VL-NEXT: movw %ax, 28(%rdi) -; AVX512VL-NEXT: vmovd %xmm3, %eax -; AVX512VL-NEXT: vpermilps {{.*#+}} xmm3 = xmm2[3,1,2,3] -; AVX512VL-NEXT: vcvtps2ph $4, %xmm3, %xmm3 -; AVX512VL-NEXT: movw %ax, 26(%rdi) -; AVX512VL-NEXT: vmovd %xmm3, %eax -; AVX512VL-NEXT: vpermilpd {{.*#+}} xmm3 = xmm2[1,0] -; AVX512VL-NEXT: vcvtps2ph $4, %xmm3, %xmm3 -; AVX512VL-NEXT: movw %ax, 22(%rdi) -; AVX512VL-NEXT: vmovd %xmm3, %eax -; AVX512VL-NEXT: vpermilpd {{.*#+}} xmm3 = xmm0[1,0] -; AVX512VL-NEXT: vcvtps2ph $4, %xmm3, %xmm3 -; AVX512VL-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3] -; AVX512VL-NEXT: vcvtps2ph $4, %xmm0, %xmm0 -; AVX512VL-NEXT: vmovshdup {{.*#+}} xmm2 = xmm2[1,1,3,3] -; AVX512VL-NEXT: vcvtps2ph $4, %xmm2, %xmm2 -; AVX512VL-NEXT: movw %ax, 20(%rdi) -; AVX512VL-NEXT: vmovd %xmm2, %eax -; AVX512VL-NEXT: vpermilps {{.*#+}} xmm2 = xmm1[3,1,2,3] -; AVX512VL-NEXT: vcvtps2ph $4, %xmm2, %xmm2 -; AVX512VL-NEXT: movw %ax, 18(%rdi) -; AVX512VL-NEXT: vmovd %xmm2, %eax -; AVX512VL-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3] -; AVX512VL-NEXT: vcvtps2ph $4, %xmm2, %xmm2 -; AVX512VL-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0] -; AVX512VL-NEXT: vcvtps2ph $4, %xmm1, %xmm1 -; AVX512VL-NEXT: movw %ax, 14(%rdi) -; AVX512VL-NEXT: vmovd %xmm1, %eax -; AVX512VL-NEXT: movw %ax, 12(%rdi) -; AVX512VL-NEXT: vmovd %xmm2, %eax -; AVX512VL-NEXT: movw %ax, 10(%rdi) -; AVX512VL-NEXT: vmovd %xmm0, %eax -; AVX512VL-NEXT: movw %ax, 6(%rdi) -; AVX512VL-NEXT: vmovd %xmm3, %eax -; AVX512VL-NEXT: movw %ax, 4(%rdi) -; AVX512VL-NEXT: vmovd %xmm4, %eax -; AVX512VL-NEXT: movw %ax, 2(%rdi) -; AVX512VL-NEXT: vzeroupper -; AVX512VL-NEXT: retq +; AVX512-LABEL: store_cvt_16f32_to_16i16: +; AVX512: # BB#0: +; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm1 +; AVX512-NEXT: vextractf64x4 $1, %zmm0, %ymm2 +; AVX512-NEXT: vextractf128 $1, %ymm2, %xmm3 +; AVX512-NEXT: vcvtps2ph $4, %xmm3, %xmm4 +; AVX512-NEXT: vmovd %xmm4, %eax +; AVX512-NEXT: vcvtps2ph $4, %xmm2, %xmm4 +; AVX512-NEXT: movw %ax, 24(%rdi) +; AVX512-NEXT: vmovd %xmm4, %eax +; AVX512-NEXT: vcvtps2ph $4, %xmm1, %xmm4 +; AVX512-NEXT: movw %ax, 16(%rdi) +; AVX512-NEXT: vmovd %xmm4, %eax +; AVX512-NEXT: vcvtps2ph $4, %xmm0, %xmm4 +; AVX512-NEXT: movw %ax, 8(%rdi) +; AVX512-NEXT: vmovd %xmm4, %eax +; AVX512-NEXT: vpermilps {{.*#+}} xmm4 = xmm3[3,1,2,3] +; AVX512-NEXT: vcvtps2ph $4, %xmm4, %xmm4 +; AVX512-NEXT: movw %ax, (%rdi) +; AVX512-NEXT: vmovd %xmm4, %eax +; AVX512-NEXT: vpermilpd {{.*#+}} xmm4 = xmm3[1,0] +; AVX512-NEXT: vcvtps2ph $4, %xmm4, %xmm4 +; AVX512-NEXT: movw %ax, 30(%rdi) +; AVX512-NEXT: vmovd %xmm4, %eax +; AVX512-NEXT: vmovshdup {{.*#+}} xmm4 = xmm0[1,1,3,3] +; AVX512-NEXT: vcvtps2ph $4, %xmm4, %xmm4 +; AVX512-NEXT: vmovshdup {{.*#+}} xmm3 = xmm3[1,1,3,3] +; AVX512-NEXT: vcvtps2ph $4, %xmm3, %xmm3 +; AVX512-NEXT: movw %ax, 28(%rdi) +; AVX512-NEXT: vmovd %xmm3, %eax +; AVX512-NEXT: vpermilps {{.*#+}} xmm3 = xmm2[3,1,2,3] +; AVX512-NEXT: vcvtps2ph $4, %xmm3, %xmm3 +; AVX512-NEXT: movw %ax, 26(%rdi) +; AVX512-NEXT: vmovd %xmm3, %eax +; AVX512-NEXT: vpermilpd {{.*#+}} xmm3 = xmm2[1,0] +; AVX512-NEXT: vcvtps2ph $4, %xmm3, %xmm3 +; AVX512-NEXT: movw %ax, 22(%rdi) +; AVX512-NEXT: vmovd %xmm3, %eax +; AVX512-NEXT: vpermilpd {{.*#+}} xmm3 = xmm0[1,0] +; AVX512-NEXT: vcvtps2ph $4, %xmm3, %xmm3 +; AVX512-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3] +; AVX512-NEXT: vcvtps2ph $4, %xmm0, %xmm0 +; AVX512-NEXT: vmovshdup {{.*#+}} xmm2 = xmm2[1,1,3,3] +; AVX512-NEXT: vcvtps2ph $4, %xmm2, %xmm2 +; AVX512-NEXT: movw %ax, 20(%rdi) +; AVX512-NEXT: vmovd %xmm2, %eax +; AVX512-NEXT: vpermilps {{.*#+}} xmm2 = xmm1[3,1,2,3] +; AVX512-NEXT: vcvtps2ph $4, %xmm2, %xmm2 +; AVX512-NEXT: movw %ax, 18(%rdi) +; AVX512-NEXT: vmovd %xmm2, %eax +; AVX512-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3] +; AVX512-NEXT: vcvtps2ph $4, %xmm2, %xmm2 +; AVX512-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0] +; AVX512-NEXT: vcvtps2ph $4, %xmm1, %xmm1 +; AVX512-NEXT: movw %ax, 14(%rdi) +; AVX512-NEXT: vmovd %xmm1, %eax +; AVX512-NEXT: movw %ax, 12(%rdi) +; AVX512-NEXT: vmovd %xmm2, %eax +; AVX512-NEXT: movw %ax, 10(%rdi) +; AVX512-NEXT: vmovd %xmm0, %eax +; AVX512-NEXT: movw %ax, 6(%rdi) +; AVX512-NEXT: vmovd %xmm3, %eax +; AVX512-NEXT: movw %ax, 4(%rdi) +; AVX512-NEXT: vmovd %xmm4, %eax +; AVX512-NEXT: movw %ax, 2(%rdi) +; AVX512-NEXT: vzeroupper +; AVX512-NEXT: retq %1 = fptrunc <16 x float> %a0 to <16 x half> %2 = bitcast <16 x half> %1 to <16 x i16> store <16 x i16> %2, <16 x i16>* %a1 |