; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck --check-prefixes=AVX2-SLOW %s
; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck --check-prefixes=AVX2-FAST %s
; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-perlane-shuffle | FileCheck --check-prefixes=AVX2-FAST %s

; These patterns are produced by LoopVectorizer for interleaved loads.

define void @vf2(<12 x i16>* %in.vec, <2 x i16>* %out.vec0, <2 x i16>* %out.vec1, <2 x i16>* %out.vec2, <2 x i16>* %out.vec3, <2 x i16>* %out.vec4, <2 x i16>* %out.vec5) nounwind {
; AVX2-SLOW-LABEL: vf2:
; AVX2-SLOW:       # %bb.0:
; AVX2-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
; AVX2-SLOW-NEXT:    vmovdqa (%rdi), %xmm0
; AVX2-SLOW-NEXT:    vmovdqa 16(%rdi), %xmm1
; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm2 = xmm0[0,3,2,3]
; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm2[0,2,2,3,4,5,6,7]
; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm2[1,3,2,3,4,5,6,7]
; AVX2-SLOW-NEXT:    vpbroadcastw 4(%rdi), %xmm4
; AVX2-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1],xmm4[2],xmm1[2],xmm4[3],xmm1[3]
; AVX2-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm5 = xmm5[3,1,2,3]
; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm5[0,3,2,3,4,5,6,7]
; AVX2-SLOW-NEXT:    vpbroadcastw 20(%rdi), %xmm6
; AVX2-SLOW-NEXT:    vpbroadcastw 8(%rdi), %xmm7
; AVX2-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm7[0],xmm6[0],xmm7[1],xmm6[1],xmm7[2],xmm6[2],xmm7[3],xmm6[3]
; AVX2-SLOW-NEXT:    vpsrlq $48, %xmm1, %xmm1
; AVX2-SLOW-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; AVX2-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; AVX2-SLOW-NEXT:    vmovd %xmm3, (%rsi)
; AVX2-SLOW-NEXT:    vmovd %xmm2, (%rdx)
; AVX2-SLOW-NEXT:    vmovd %xmm4, (%rcx)
; AVX2-SLOW-NEXT:    vmovd %xmm5, (%r8)
; AVX2-SLOW-NEXT:    vmovd %xmm6, (%r9)
; AVX2-SLOW-NEXT:    vmovd %xmm0, (%rax)
; AVX2-SLOW-NEXT:    retq
;
; AVX2-FAST-LABEL: vf2:
; AVX2-FAST:       # %bb.0:
; AVX2-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
; AVX2-FAST-NEXT:    vmovdqa (%rdi), %xmm0
; AVX2-FAST-NEXT:    vmovdqa 16(%rdi), %xmm1
; AVX2-FAST-NEXT:    vpshufd {{.*#+}} xmm2 = xmm0[0,3,2,3]
; AVX2-FAST-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm2[0,2,2,3,4,5,6,7]
; AVX2-FAST-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm2[1,3,2,3,4,5,6,7]
; AVX2-FAST-NEXT:    vpbroadcastw 4(%rdi), %xmm4
; AVX2-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1],xmm4[2],xmm1[2],xmm4[3],xmm1[3]
; AVX2-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[12,13,6,7,u,u,u,u,u,u,u,u,u,u,u,u]
; AVX2-FAST-NEXT:    vpbroadcastw 20(%rdi), %xmm6
; AVX2-FAST-NEXT:    vpbroadcastw 8(%rdi), %xmm7
; AVX2-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm7[0],xmm6[0],xmm7[1],xmm6[1],xmm7[2],xmm6[2],xmm7[3],xmm6[3]
; AVX2-FAST-NEXT:    vpsrlq $48, %xmm1, %xmm1
; AVX2-FAST-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; AVX2-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; AVX2-FAST-NEXT:    vmovd %xmm3, (%rsi)
; AVX2-FAST-NEXT:    vmovd %xmm2, (%rdx)
; AVX2-FAST-NEXT:    vmovd %xmm4, (%rcx)
; AVX2-FAST-NEXT:    vmovd %xmm5, (%r8)
; AVX2-FAST-NEXT:    vmovd %xmm6, (%r9)
; AVX2-FAST-NEXT:    vmovd %xmm0, (%rax)
; AVX2-FAST-NEXT:    retq
  %wide.vec = load <12 x i16>, <12 x i16>* %in.vec, align 32

  %strided.vec0 = shufflevector <12 x i16> %wide.vec, <12 x i16> poison, <2 x i32> <i32 0, i32 6>
  %strided.vec1 = shufflevector <12 x i16> %wide.vec, <12 x i16> poison, <2 x i32> <i32 1, i32 7>
  %strided.vec2 = shufflevector <12 x i16> %wide.vec, <12 x i16> poison, <2 x i32> <i32 2, i32 8>
  %strided.vec3 = shufflevector <12 x i16> %wide.vec, <12 x i16> poison, <2 x i32> <i32 3, i32 9>
  %strided.vec4 = shufflevector <12 x i16> %wide.vec, <12 x i16> poison, <2 x i32> <i32 4, i32 10>
  %strided.vec5 = shufflevector <12 x i16> %wide.vec, <12 x i16> poison, <2 x i32> <i32 5, i32 11>

  store <2 x i16> %strided.vec0, <2 x i16>* %out.vec0, align 32
  store <2 x i16> %strided.vec1, <2 x i16>* %out.vec1, align 32
  store <2 x i16> %strided.vec2, <2 x i16>* %out.vec2, align 32
  store <2 x i16> %strided.vec3, <2 x i16>* %out.vec3, align 32
  store <2 x i16> %strided.vec4, <2 x i16>* %out.vec4, align 32
  store <2 x i16> %strided.vec5, <2 x i16>* %out.vec5, align 32

  ret void
}

define void @vf4(<24 x i16>* %in.vec, <4 x i16>* %out.vec0, <4 x i16>* %out.vec1, <4 x i16>* %out.vec2, <4 x i16>* %out.vec3, <4 x i16>* %out.vec4, <4 x i16>* %out.vec5) nounwind {
; AVX2-SLOW-LABEL: vf4:
; AVX2-SLOW:       # %bb.0:
; AVX2-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
; AVX2-SLOW-NEXT:    vmovdqa (%rdi), %xmm0
; AVX2-SLOW-NEXT:    vmovdqa 16(%rdi), %xmm1
; AVX2-SLOW-NEXT:    vmovdqa 32(%rdi), %xmm2
; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} xmm3 = xmm0[0],xmm2[1],xmm0[2,3]
; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} xmm3 = xmm3[0,1],xmm1[2],xmm3[3]
; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[0,1,12,13,8,9,4,5,u,u,u,u,u,u,u,u]
; AVX2-SLOW-NEXT:    vpsrld $16, %xmm1, %xmm4
; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm5 = xmm0[0,1,0,3]
; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,5,7,6,7]
; AVX2-SLOW-NEXT:    vpunpckhdq {{.*#+}} xmm4 = xmm5[2],xmm4[2],xmm5[3],xmm4[3]
; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0,1,2],xmm2[3],xmm4[4,5,6,7]
; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} xmm5 = xmm0[0,1],xmm2[2,3]
; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} xmm5 = xmm1[0],xmm5[1,2],xmm1[3]
; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm6 = xmm5[4,5,0,1,12,13,8,9,u,u,u,u,u,u,u,u]
; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[6,7,2,3,14,15,10,11,u,u,u,u,u,u,u,u]
; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} xmm0 = xmm2[0,1],xmm0[2],xmm2[3]
; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm1 = xmm0[8,9,4,5,0,1,12,13,u,u,u,u,u,u,u,u]
; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[10,11,6,7,2,3,14,15,u,u,u,u,u,u,u,u]
; AVX2-SLOW-NEXT:    vmovq %xmm3, (%rsi)
; AVX2-SLOW-NEXT:    vmovq %xmm4, (%rdx)
; AVX2-SLOW-NEXT:    vmovq %xmm6, (%rcx)
; AVX2-SLOW-NEXT:    vmovq %xmm5, (%r8)
; AVX2-SLOW-NEXT:    vmovq %xmm1, (%r9)
; AVX2-SLOW-NEXT:    vmovq %xmm0, (%rax)
; AVX2-SLOW-NEXT:    retq
;
; AVX2-FAST-LABEL: vf4:
; AVX2-FAST:       # %bb.0:
; AVX2-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
; AVX2-FAST-NEXT:    vmovdqa (%rdi), %xmm0
; AVX2-FAST-NEXT:    vmovdqa 16(%rdi), %xmm1
; AVX2-FAST-NEXT:    vmovdqa 32(%rdi), %xmm2
; AVX2-FAST-NEXT:    vpblendd {{.*#+}} xmm3 = xmm0[0],xmm2[1],xmm0[2,3]
; AVX2-FAST-NEXT:    vpblendd {{.*#+}} xmm3 = xmm3[0,1],xmm1[2],xmm3[3]
; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[0,1,12,13,8,9,4,5,u,u,u,u,u,u,u,u]
; AVX2-FAST-NEXT:    vpsrld $16, %xmm1, %xmm4
; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm5 = xmm0[u,u,u,u,u,u,u,u,2,3,14,15,12,13,14,15]
; AVX2-FAST-NEXT:    vpunpckhdq {{.*#+}} xmm4 = xmm5[2],xmm4[2],xmm5[3],xmm4[3]
; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0,1,2],xmm2[3],xmm4[4,5,6,7]
; AVX2-FAST-NEXT:    vpblendd {{.*#+}} xmm5 = xmm0[0,1],xmm2[2,3]
; AVX2-FAST-NEXT:    vpblendd {{.*#+}} xmm5 = xmm1[0],xmm5[1,2],xmm1[3]
; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm6 = xmm5[4,5,0,1,12,13,8,9,u,u,u,u,u,u,u,u]
; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[6,7,2,3,14,15,10,11,u,u,u,u,u,u,u,u]
; AVX2-FAST-NEXT:    vpblendd {{.*#+}} xmm0 = xmm2[0,1],xmm0[2],xmm2[3]
; AVX2-FAST-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm1 = xmm0[8,9,4,5,0,1,12,13,u,u,u,u,u,u,u,u]
; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[10,11,6,7,2,3,14,15,u,u,u,u,u,u,u,u]
; AVX2-FAST-NEXT:    vmovq %xmm3, (%rsi)
; AVX2-FAST-NEXT:    vmovq %xmm4, (%rdx)
; AVX2-FAST-NEXT:    vmovq %xmm6, (%rcx)
; AVX2-FAST-NEXT:    vmovq %xmm5, (%r8)
; AVX2-FAST-NEXT:    vmovq %xmm1, (%r9)
; AVX2-FAST-NEXT:    vmovq %xmm0, (%rax)
; AVX2-FAST-NEXT:    retq
  %wide.vec = load <24 x i16>, <24 x i16>* %in.vec, align 32

  %strided.vec0 = shufflevector <24 x i16> %wide.vec, <24 x i16> poison, <4 x i32> <i32 0, i32 6, i32 12, i32 18>
  %strided.vec1 = shufflevector <24 x i16> %wide.vec, <24 x i16> poison, <4 x i32> <i32 1, i32 7, i32 13, i32 19>
  %strided.vec2 = shufflevector <24 x i16> %wide.vec, <24 x i16> poison, <4 x i32> <i32 2, i32 8, i32 14, i32 20>
  %strided.vec3 = shufflevector <24 x i16> %wide.vec, <24 x i16> poison, <4 x i32> <i32 3, i32 9, i32 15, i32 21>
  %strided.vec4 = shufflevector <24 x i16> %wide.vec, <24 x i16> poison, <4 x i32> <i32 4, i32 10, i32 16, i32 22>
  %strided.vec5 = shufflevector <24 x i16> %wide.vec, <24 x i16> poison, <4 x i32> <i32 5, i32 11, i32 17, i32 23>

  store <4 x i16> %strided.vec0, <4 x i16>* %out.vec0, align 32
  store <4 x i16> %strided.vec1, <4 x i16>* %out.vec1, align 32
  store <4 x i16> %strided.vec2, <4 x i16>* %out.vec2, align 32
  store <4 x i16> %strided.vec3, <4 x i16>* %out.vec3, align 32
  store <4 x i16> %strided.vec4, <4 x i16>* %out.vec4, align 32
  store <4 x i16> %strided.vec5, <4 x i16>* %out.vec5, align 32

  ret void
}

define void @vf8(<48 x i16>* %in.vec, <8 x i16>* %out.vec0, <8 x i16>* %out.vec1, <8 x i16>* %out.vec2, <8 x i16>* %out.vec3, <8 x i16>* %out.vec4, <8 x i16>* %out.vec5) nounwind {
; AVX2-SLOW-LABEL: vf8:
; AVX2-SLOW:       # %bb.0:
; AVX2-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
; AVX2-SLOW-NEXT:    vmovdqa (%rdi), %ymm3
; AVX2-SLOW-NEXT:    vmovdqa 32(%rdi), %ymm4
; AVX2-SLOW-NEXT:    vmovdqa 80(%rdi), %xmm0
; AVX2-SLOW-NEXT:    vpslld $16, %xmm0, %xmm2
; AVX2-SLOW-NEXT:    vmovdqa 64(%rdi), %xmm8
; AVX2-SLOW-NEXT:    vpsrldq {{.*#+}} xmm5 = xmm8[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
; AVX2-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm5[0],xmm2[0],xmm5[1],xmm2[1],xmm5[2],xmm2[2],xmm5[3],xmm2[3]
; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm3[0],ymm4[1],ymm3[2,3],ymm4[4],ymm3[5,6],ymm4[7]
; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm6 = xmm5[0,1,12,13,u,u,4,5,u,u,u,u,12,13,14,15]
; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm5, %xmm7
; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm7[0,2,0,3]
; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,6,6,7]
; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm6[0,1],xmm1[2],xmm6[3],xmm1[4,5],xmm6[6,7]
; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} xmm9 = xmm1[0,1,2],xmm2[3]
; AVX2-SLOW-NEXT:    vpbroadcastw 74(%rdi), %xmm1
; AVX2-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm6 = xmm7[u,u,u,u,10,11,u,u,2,3,14,15,u,u,u,u]
; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[2,3,14,15,u,u,6,7,u,u,u,u,12,13,14,15]
; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0,1],xmm6[2],xmm5[3],xmm6[4,5],xmm5[6,7]
; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} xmm10 = xmm5[0,1,2],xmm1[3]
; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm3[0,1],ymm4[2],ymm3[3,4],ymm4[5],ymm3[6,7]
; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm6 = xmm1[2,1,2,3]
; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm6[2,1,2,0,4,5,6,7]
; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm1, %xmm1
; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,3,2,1]
; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm1[0,0,2,3,4,5,6,7]
; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[0,1,3,3]
; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm7[0],xmm2[1,2],xmm7[3],xmm2[4,5,6,7]
; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} xmm7 = xmm8[0,1],xmm0[2],xmm8[3]
; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm7[u,u,u,u,u,u,u,u,u,u,0,1,12,13,8,9]
; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3,4],xmm5[5,6,7]
; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm6[3,1,2,1,4,5,6,7]
; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm1[0,1,3,3,4,5,6,7]
; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,7,7,7,7]
; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm5[0],xmm1[1,2],xmm5[3],xmm1[4,5,6,7]
; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm7[u,u,u,u,u,u,u,u,u,u,2,3,14,15,10,11]
; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4],xmm5[5,6,7]
; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm4[0,1],ymm3[2],ymm4[3,4],ymm3[5],ymm4[6,7]
; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm3, %xmm4
; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm4[2,2,2,2,4,5,6,7]
; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm6 = xmm3[8,9,u,u,0,1,12,13,u,u,u,u,u,u,u,u]
; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm5 = xmm6[0],xmm5[1],xmm6[2,3],xmm5[4],xmm6[5,6,7]
; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0],xmm8[1],xmm0[2,3]
; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm6 = xmm0[u,u,u,u,u,u,u,u,u,u,4,5,0,1,12,13]
; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0,1,2,3,4],xmm6[5,6,7]
; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[10,11,u,u,2,3,14,15,u,u,u,u,u,u,u,u]
; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm4 = xmm4[1,1,2,3]
; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,5,5,5,5]
; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0],xmm4[1],xmm3[2,3],xmm4[4],xmm3[5,6,7]
; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,u,u,u,u,u,6,7,2,3,14,15]
; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm3[0,1,2,3,4],xmm0[5,6,7]
; AVX2-SLOW-NEXT:    vmovdqa %xmm9, (%rsi)
; AVX2-SLOW-NEXT:    vmovdqa %xmm10, (%rdx)
; AVX2-SLOW-NEXT:    vmovdqa %xmm2, (%rcx)
; AVX2-SLOW-NEXT:    vmovdqa %xmm1, (%r8)
; AVX2-SLOW-NEXT:    vmovdqa %xmm5, (%r9)
; AVX2-SLOW-NEXT:    vmovdqa %xmm0, (%rax)
; AVX2-SLOW-NEXT:    vzeroupper
; AVX2-SLOW-NEXT:    retq
;
; AVX2-FAST-LABEL: vf8:
; AVX2-FAST:       # %bb.0:
; AVX2-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
; AVX2-FAST-NEXT:    vmovdqa (%rdi), %ymm2
; AVX2-FAST-NEXT:    vmovdqa 32(%rdi), %ymm4
; AVX2-FAST-NEXT:    vmovdqa 80(%rdi), %xmm0
; AVX2-FAST-NEXT:    vpslld $16, %xmm0, %xmm3
; AVX2-FAST-NEXT:    vmovdqa 64(%rdi), %xmm10
; AVX2-FAST-NEXT:    vpsrldq {{.*#+}} xmm5 = xmm10[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
; AVX2-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm8 = xmm5[0],xmm3[0],xmm5[1],xmm3[1],xmm5[2],xmm3[2],xmm5[3],xmm3[3]
; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm2[0],ymm4[1],ymm2[2,3],ymm4[4],ymm2[5,6],ymm4[7]
; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm6 = xmm5[0,1,12,13,u,u,4,5,u,u,u,u,12,13,14,15]
; AVX2-FAST-NEXT:    vextracti128 $1, %ymm5, %xmm7
; AVX2-FAST-NEXT:    vpshufd {{.*#+}} xmm7 = xmm7[2,1,0,3]
; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm3 = xmm7[u,u,u,u,0,1,u,u,8,9,12,13,u,u,u,u]
; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm6[0,1],xmm3[2],xmm6[3],xmm3[4,5],xmm6[6,7]
; AVX2-FAST-NEXT:    vpblendd {{.*#+}} xmm8 = xmm3[0,1,2],xmm8[3]
; AVX2-FAST-NEXT:    vpbroadcastw 74(%rdi), %xmm6
; AVX2-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm6[0],xmm0[0],xmm6[1],xmm0[1],xmm6[2],xmm0[2],xmm6[3],xmm0[3]
; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[2,3,14,15,u,u,6,7,u,u,u,u,12,13,14,15]
; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm7 = xmm7[u,u,u,u,2,3,u,u,10,11,14,15,u,u,u,u]
; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0,1],xmm7[2],xmm5[3],xmm7[4,5],xmm5[6,7]
; AVX2-FAST-NEXT:    vpblendd {{.*#+}} xmm9 = xmm5[0,1,2],xmm6[3]
; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm6 = ymm2[0,1],ymm4[2],ymm2[3,4],ymm4[5],ymm2[6,7]
; AVX2-FAST-NEXT:    vpshufd {{.*#+}} xmm7 = xmm6[2,1,2,3]
; AVX2-FAST-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm7[2,1,2,0,4,5,6,7]
; AVX2-FAST-NEXT:    vextracti128 $1, %ymm6, %xmm6
; AVX2-FAST-NEXT:    vpshufd {{.*#+}} xmm6 = xmm6[0,3,2,1]
; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm5 = xmm6[u,u,0,1,4,5,u,u,12,13,u,u,u,u,u,u]
; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0],xmm5[1,2],xmm3[3],xmm5[4,5,6,7]
; AVX2-FAST-NEXT:    vpblendd {{.*#+}} xmm5 = xmm10[0,1],xmm0[2],xmm10[3]
; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm1 = xmm5[u,u,u,u,u,u,u,u,u,u,0,1,12,13,8,9]
; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm3[0,1,2,3,4],xmm1[5,6,7]
; AVX2-FAST-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm7[3,1,2,1,4,5,6,7]
; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm6 = xmm6[u,u,2,3,6,7,u,u,14,15,u,u,u,u,u,u]
; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0],xmm6[1,2],xmm3[3],xmm6[4,5,6,7]
; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[u,u,u,u,u,u,u,u,u,u,2,3,14,15,10,11]
; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3,4],xmm5[5,6,7]
; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm4[0,1],ymm2[2],ymm4[3,4],ymm2[5],ymm4[6,7]
; AVX2-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm4
; AVX2-FAST-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm4[2,2,2,2,4,5,6,7]
; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm6 = xmm2[8,9,u,u,0,1,12,13,u,u,u,u,u,u,u,u]
; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm5 = xmm6[0],xmm5[1],xmm6[2,3],xmm5[4],xmm6[5,6,7]
; AVX2-FAST-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0],xmm10[1],xmm0[2,3]
; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm6 = xmm0[u,u,u,u,u,u,u,u,u,u,4,5,0,1,12,13]
; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0,1,2,3,4],xmm6[5,6,7]
; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[u,u,6,7,u,u,u,u,10,11,u,u,u,u,u,u]
; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[10,11,u,u,2,3,14,15,u,u,u,u,u,u,u,u]
; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0],xmm4[1],xmm2[2,3],xmm4[4],xmm2[5,6,7]
; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,u,u,u,u,u,6,7,2,3,14,15]
; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm2[0,1,2,3,4],xmm0[5,6,7]
; AVX2-FAST-NEXT:    vmovdqa %xmm8, (%rsi)
; AVX2-FAST-NEXT:    vmovdqa %xmm9, (%rdx)
; AVX2-FAST-NEXT:    vmovdqa %xmm1, (%rcx)
; AVX2-FAST-NEXT:    vmovdqa %xmm3, (%r8)
; AVX2-FAST-NEXT:    vmovdqa %xmm5, (%r9)
; AVX2-FAST-NEXT:    vmovdqa %xmm0, (%rax)
; AVX2-FAST-NEXT:    vzeroupper
; AVX2-FAST-NEXT:    retq
  %wide.vec = load <48 x i16>, <48 x i16>* %in.vec, align 32

  %strided.vec0 = shufflevector <48 x i16> %wide.vec, <48 x i16> poison, <8 x i32> <i32 0, i32 6, i32 12, i32 18, i32 24, i32 30, i32 36, i32 42>
  %strided.vec1 = shufflevector <48 x i16> %wide.vec, <48 x i16> poison, <8 x i32> <i32 1, i32 7, i32 13, i32 19, i32 25, i32 31, i32 37, i32 43>
  %strided.vec2 = shufflevector <48 x i16> %wide.vec, <48 x i16> poison, <8 x i32> <i32 2, i32 8, i32 14, i32 20, i32 26, i32 32, i32 38, i32 44>
  %strided.vec3 = shufflevector <48 x i16> %wide.vec, <48 x i16> poison, <8 x i32> <i32 3, i32 9, i32 15, i32 21, i32 27, i32 33, i32 39, i32 45>
  %strided.vec4 = shufflevector <48 x i16> %wide.vec, <48 x i16> poison, <8 x i32> <i32 4, i32 10, i32 16, i32 22, i32 28, i32 34, i32 40, i32 46>
  %strided.vec5 = shufflevector <48 x i16> %wide.vec, <48 x i16> poison, <8 x i32> <i32 5, i32 11, i32 17, i32 23, i32 29, i32 35, i32 41, i32 47>

  store <8 x i16> %strided.vec0, <8 x i16>* %out.vec0, align 32
  store <8 x i16> %strided.vec1, <8 x i16>* %out.vec1, align 32
  store <8 x i16> %strided.vec2, <8 x i16>* %out.vec2, align 32
  store <8 x i16> %strided.vec3, <8 x i16>* %out.vec3, align 32
  store <8 x i16> %strided.vec4, <8 x i16>* %out.vec4, align 32
  store <8 x i16> %strided.vec5, <8 x i16>* %out.vec5, align 32

  ret void
}

define void @vf16(<96 x i16>* %in.vec, <16 x i16>* %out.vec0, <16 x i16>* %out.vec1, <16 x i16>* %out.vec2, <16 x i16>* %out.vec3, <16 x i16>* %out.vec4, <16 x i16>* %out.vec5) nounwind {
; AVX2-SLOW-LABEL: vf16:
; AVX2-SLOW:       # %bb.0:
; AVX2-SLOW-NEXT:    vmovdqa (%rdi), %ymm13
; AVX2-SLOW-NEXT:    vmovdqa 32(%rdi), %ymm14
; AVX2-SLOW-NEXT:    vmovdqa 64(%rdi), %ymm2
; AVX2-SLOW-NEXT:    vmovdqa 96(%rdi), %ymm5
; AVX2-SLOW-NEXT:    vmovdqa 160(%rdi), %ymm15
; AVX2-SLOW-NEXT:    vmovdqa 128(%rdi), %ymm1
; AVX2-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm1[0,1],ymm15[2],ymm1[3,4],ymm15[5],ymm1[6,7]
; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm8, %xmm0
; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm6 = xmm0[0,1,4,5,4,5,u,u,0,1,12,13,u,u,4,5]
; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm8[2,2,2,2,4,5,6,7]
; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm7 = xmm7[0,1,2,2]
; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm6 = xmm6[0,1,2],xmm7[3],xmm6[4,5],xmm7[6],xmm6[7]
; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm9
; AVX2-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm12 = ymm2[2,3],ymm5[2,3]
; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm7 = ymm12[0,2,2,1,4,6,6,5]
; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} ymm11 = ymm7[0,1,2,3,6,6,6,6,8,9,10,11,14,14,14,14]
; AVX2-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm10 = ymm2[0,1],ymm5[0,1]
; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm5 = ymm10[0,3,2,3,4,7,6,7]
; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} ymm2 = ymm5[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15]
; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm2[0,1,2,2,4,5,6,6]
; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0,1],ymm11[2],ymm2[3,4,5,6],ymm11[7],ymm2[8,9],ymm11[10],ymm2[11,12,13,14],ymm11[15]
; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm11 = ymm13[0],ymm14[1],ymm13[2,3],ymm14[4],ymm13[5,6],ymm14[7]
; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm1 = xmm11[0,1,12,13,u,u,4,5,u,u,u,u,12,13,14,15]
; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm11, %xmm3
; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm4 = xmm3[0,2,0,3]
; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,6,6,7]
; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm4[2],xmm1[3],xmm4[4,5],xmm1[6,7]
; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2],ymm2[3,4,5],ymm1[6,7]
; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm1[0,1,2],ymm9[3,4,5,6,7],ymm1[8,9,10],ymm9[11,12,13,14,15]
; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
; AVX2-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm8[0,1,2,3,5,5,5,5]
; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[6,7,2,3,4,5,u,u,2,3,14,15,u,u,6,7]
; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[3],xmm0[4,5],xmm1[6],xmm0[7]
; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm12[2,1,2,1,6,5,6,5]
; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} ymm1 = ymm1[1,1,1,1,4,5,6,7,9,9,9,9,12,13,14,15]
; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} ymm4 = ymm5[1,3,2,3,4,5,6,7,9,11,10,11,12,13,14,15]
; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} ymm4 = ymm4[0,1,2,3,5,5,5,5,8,9,10,11,13,13,13,13]
; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm4[0,1],ymm1[2],ymm4[3,4,5,6],ymm1[7],ymm4[8,9],ymm1[10],ymm4[11,12,13,14],ymm1[15]
; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[u,u,u,u,10,11,u,u,2,3,14,15,u,u,u,u]
; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm11[2,3,14,15,u,u,6,7,u,u,u,u,12,13,14,15]
; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm4[0,1],xmm3[2],xmm4[3],xmm3[4,5],xmm4[6,7]
; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm3[0,1,2],ymm1[3,4,5],ymm3[6,7]
; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7],ymm1[8,9,10],ymm0[11,12,13,14,15]
; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
; AVX2-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm15[0,1],ymm11[2],ymm15[3,4],ymm11[5],ymm15[6,7]
; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm1
; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,1,2,1]
; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm1[0,1,2,3,6,5,6,4]
; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[2,1,0,3]
; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm0[0,0,0,0,4,5,6,7]
; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,4,6,7]
; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm4[0,1,2,3],xmm3[4],xmm4[5,6],xmm3[7]
; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm4 = ymm10[1,1,0,3,5,5,4,7]
; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} ymm4 = ymm4[0,1,2,3,4,4,6,7,8,9,10,11,12,12,14,15]
; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm9 = ymm12[0,3,2,3,4,7,6,7]
; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} ymm8 = ymm9[0,0,2,3,4,5,6,7,8,8,10,11,12,13,14,15]
; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} ymm8 = ymm8[0,1,2,3,4,4,4,4,8,9,10,11,12,12,12,12]
; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm4 = ymm4[0],ymm8[1,2,3,4],ymm4[5,6],ymm8[7],ymm4[8],ymm8[9,10,11,12],ymm4[13,14],ymm8[15]
; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm13[0,1],ymm14[2],ymm13[3,4],ymm14[5],ymm13[6,7]
; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm2 = xmm8[2,1,2,3]
; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm8, %xmm5
; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm5 = xmm5[0,3,2,1]
; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm5[0,0,2,3,4,5,6,7]
; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,6,6,6,6]
; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm2[2,1,2,0,4,5,6,7]
; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm6 = xmm7[0],xmm6[1,2],xmm7[3],xmm6[4,5,6,7]
; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm3 = ymm4[0,1,2],ymm3[3,4,5,6,7],ymm4[8,9,10],ymm3[11,12,13,14,15]
; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm4 = xmm6[0,1,2,3,4],xmm4[5,6,7]
; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm4[0,1,2,3],ymm3[4,5,6,7]
; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,7,5,6,5]
; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[1,1,1,1,4,5,6,7]
; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,7,7]
; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4],xmm0[5,6],xmm1[7]
; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm1 = ymm10[6,7,u,u,u,u,u,u,u,u,2,3,14,15,u,u,22,23,u,u,u,u,u,u,u,u,18,19,30,31,u,u]
; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} ymm3 = ymm9[0,1,3,3,4,5,6,7,8,9,11,11,12,13,14,15]
; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} ymm3 = ymm3[0,1,2,3,5,5,5,5,8,9,10,11,13,13,13,13]
; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0],ymm3[1,2,3,4],ymm1[5,6],ymm3[7],ymm1[8],ymm3[9,10,11,12],ymm1[13,14],ymm3[15]
; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7],ymm1[8,9,10],ymm0[11,12,13,14,15]
; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm2[3,1,2,1,4,5,6,7]
; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm5[0,1,3,3,4,5,6,7]
; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,7,7,7,7]
; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0],xmm3[1,2],xmm2[3],xmm3[4,5,6,7]
; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0,1,2,3,4],xmm1[5,6,7]
; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm9 = ymm1[0,1,2,3],ymm0[4,5,6,7]
; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm14[0,1],ymm13[2],ymm14[3,4],ymm13[5],ymm14[6,7]
; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm1
; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm1[2,2,2,2,4,5,6,7]
; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm0[8,9,u,u,0,1,12,13,u,u,12,13,12,13,14,15]
; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm3[0],xmm2[1],xmm3[2,3],xmm2[4],xmm3[5,6,7]
; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm3 = ymm10[2,1,2,1,6,5,6,5]
; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} ymm4 = ymm3[0,1,2,3,6,6,6,6,8,9,10,11,14,14,14,14]
; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm5 = ymm12[0,1,0,3,4,5,4,7]
; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} ymm6 = ymm5[2,2,2,2,4,5,6,7,10,10,10,10,12,13,14,15]
; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} ymm6 = ymm6[0,1,2,3,4,5,4,6,8,9,10,11,12,13,12,14]
; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm4 = ymm4[0],ymm6[1,2,3,4],ymm4[5],ymm6[6,7],ymm4[8],ymm6[9,10,11,12],ymm4[13],ymm6[14,15]
; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} xmm6 = [255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0]
; AVX2-SLOW-NEXT:    vpblendvb %ymm6, %ymm2, %ymm4, %ymm2
; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm11[0],ymm15[1],ymm11[2,3],ymm15[4],ymm11[5,6],ymm15[7]
; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} ymm3 = ymm3[1,1,1,1,4,5,6,7,9,9,9,9,12,13,14,15]
; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm3 = ymm3[0,1,3,3,4,5,7,7]
; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} ymm5 = ymm5[3,3,3,3,4,5,6,7,11,11,11,11,12,13,14,15]
; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} ymm5 = ymm5[0,1,2,3,4,5,5,7,8,9,10,11,12,13,13,15]
; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm3 = ymm3[0],ymm5[1,2,3,4],ymm3[5],ymm5[6,7],ymm3[8],ymm5[9,10,11,12],ymm3[13],ymm5[14,15]
; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[10,11,u,u,2,3,14,15,u,u,10,11,12,13,14,15]
; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[1,1,2,3]
; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,5,5,5]
; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3],xmm1[4],xmm0[5,6,7]
; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm4, %xmm1
; AVX2-SLOW-NEXT:    vpblendvb %ymm6, %ymm0, %ymm3, %ymm0
; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm1[u,u,u,u,u,u,u,u,8,9,u,u,0,1,12,13]
; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm4 = xmm4[0,3,2,1]
; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm4[0,1,0,2,4,5,6,7]
; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,6,6,6,6]
; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm5[0,1,2,3],xmm3[4],xmm5[5],xmm3[6,7]
; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm3[5,6,7]
; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[u,u,u,u,u,u,u,u,10,11,u,u,2,3,14,15]
; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm4[0,1,1,3,4,5,6,7]
; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[0,1,3,3]
; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm3[0,1,2,3],xmm1[4],xmm3[5],xmm1[6,7]
; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
; AVX2-SLOW-NEXT:    vmovaps %ymm1, (%rsi)
; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
; AVX2-SLOW-NEXT:    vmovaps %ymm1, (%rdx)
; AVX2-SLOW-NEXT:    vmovdqa %ymm8, (%rcx)
; AVX2-SLOW-NEXT:    vmovdqa %ymm9, (%r8)
; AVX2-SLOW-NEXT:    vmovdqa %ymm2, (%r9)
; AVX2-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
; AVX2-SLOW-NEXT:    vmovdqa %ymm0, (%rax)
; AVX2-SLOW-NEXT:    vzeroupper
; AVX2-SLOW-NEXT:    retq
;
; AVX2-FAST-LABEL: vf16:
; AVX2-FAST:       # %bb.0:
; AVX2-FAST-NEXT:    vmovdqa (%rdi), %ymm14
; AVX2-FAST-NEXT:    vmovdqa 32(%rdi), %ymm15
; AVX2-FAST-NEXT:    vmovdqa 64(%rdi), %ymm2
; AVX2-FAST-NEXT:    vmovdqa 96(%rdi), %ymm5
; AVX2-FAST-NEXT:    vmovdqa 160(%rdi), %ymm0
; AVX2-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FAST-NEXT:    vmovdqa 128(%rdi), %ymm13
; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm8 = ymm13[0,1],ymm0[2],ymm13[3,4],ymm0[5],ymm13[6,7]
; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm6 = xmm8[u,u,u,u,u,u,4,5,u,u,u,u,8,9,u,u]
; AVX2-FAST-NEXT:    vextracti128 $1, %ymm8, %xmm0
; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm7 = xmm0[0,1,4,5,4,5,u,u,0,1,12,13,u,u,4,5]
; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm6 = xmm7[0,1,2],xmm6[3],xmm7[4,5],xmm6[6],xmm7[7]
; AVX2-FAST-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm9
; AVX2-FAST-NEXT:    vperm2i128 {{.*#+}} ymm10 = ymm2[2,3],ymm5[2,3]
; AVX2-FAST-NEXT:    vpshufd {{.*#+}} ymm11 = ymm10[2,1,2,1,6,5,6,5]
; AVX2-FAST-NEXT:    vpshufb {{.*#+}} ymm12 = ymm11[u,u,u,u,u,u,u,u,u,u,u,u,u,u,12,13,u,u,u,u,16,17,u,u,u,u,u,u,u,u,u,u]
; AVX2-FAST-NEXT:    vperm2i128 {{.*#+}} ymm7 = ymm2[0,1],ymm5[0,1]
; AVX2-FAST-NEXT:    vpshufd {{.*#+}} ymm5 = ymm7[0,3,2,3,4,7,6,7]
; AVX2-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,8,9,u,u,16,17,20,21,u,u,22,23,u,u,u,u,u,u,u,u]
; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0,1],ymm12[2],ymm2[3,4,5,6],ymm12[7],ymm2[8,9],ymm12[10],ymm2[11,12,13,14],ymm12[15]
; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm12 = ymm14[0],ymm15[1],ymm14[2,3],ymm15[4],ymm14[5,6],ymm15[7]
; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm1 = xmm12[0,1,12,13,u,u,4,5,u,u,u,u,12,13,14,15]
; AVX2-FAST-NEXT:    vextracti128 $1, %ymm12, %xmm3
; AVX2-FAST-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[2,1,0,3]
; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm4 = xmm3[u,u,u,u,0,1,u,u,8,9,12,13,u,u,u,u]
; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm4[2],xmm1[3],xmm4[4,5],xmm1[6,7]
; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2],ymm2[3,4,5],ymm1[6,7]
; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm1[0,1,2],ymm9[3,4,5,6,7],ymm1[8,9,10],ymm9[11,12,13,14,15]
; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
; AVX2-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FAST-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm8[0,1,2,3,5,5,5,5]
; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[6,7,2,3,4,5,u,u,2,3,14,15,u,u,6,7]
; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[3],xmm0[4,5],xmm1[6],xmm0[7]
; AVX2-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
; AVX2-FAST-NEXT:    vpshuflw {{.*#+}} ymm1 = ymm11[1,1,1,1,4,5,6,7,9,9,9,9,12,13,14,15]
; AVX2-FAST-NEXT:    vpshufb {{.*#+}} ymm4 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,10,11,u,u,18,19,22,23,u,u,22,23,u,u,u,u,u,u,u,u]
; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm4[0,1],ymm1[2],ymm4[3,4,5,6],ymm1[7],ymm4[8,9],ymm1[10],ymm4[11,12,13,14],ymm1[15]
; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm4 = xmm12[2,3,14,15,u,u,6,7,u,u,u,u,12,13,14,15]
; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[u,u,u,u,2,3,u,u,10,11,14,15,u,u,u,u]
; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm4[0,1],xmm3[2],xmm4[3],xmm3[4,5],xmm4[6,7]
; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm3[0,1,2],ymm1[3,4,5],ymm3[6,7]
; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7],ymm1[8,9,10],ymm0[11,12,13,14,15]
; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
; AVX2-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm11[0,1],ymm13[2],ymm11[3,4],ymm13[5],ymm11[6,7]
; AVX2-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm1
; AVX2-FAST-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,1,2,1]
; AVX2-FAST-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm1[0,1,2,3,6,5,6,4]
; AVX2-FAST-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[2,1,0,3]
; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm4 = xmm0[0,1,0,1,0,1,0,1,u,u,8,9,12,13,u,u]
; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm12 = xmm4[0,1,2,3],xmm3[4],xmm4[5,6],xmm3[7]
; AVX2-FAST-NEXT:    vpshufb {{.*#+}} ymm4 = ymm7[4,5,u,u,u,u,u,u,u,u,0,1,12,13,u,u,20,21,u,u,u,u,u,u,u,u,16,17,28,29,u,u]
; AVX2-FAST-NEXT:    vpshufd {{.*#+}} ymm9 = ymm10[0,3,2,3,4,7,6,7]
; AVX2-FAST-NEXT:    vpshufb {{.*#+}} ymm8 = ymm9[u,u,0,1,4,5,6,7,8,9,u,u,u,u,8,9,u,u,16,17,20,21,22,23,24,25,u,u,u,u,24,25]
; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm4 = ymm4[0],ymm8[1,2,3,4],ymm4[5,6],ymm8[7],ymm4[8],ymm8[9,10,11,12],ymm4[13,14],ymm8[15]
; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm8 = ymm14[0,1],ymm15[2],ymm14[3,4],ymm15[5],ymm14[6,7]
; AVX2-FAST-NEXT:    vpshufd {{.*#+}} xmm5 = xmm8[2,1,2,3]
; AVX2-FAST-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm5[2,1,2,0,4,5,6,7]
; AVX2-FAST-NEXT:    vextracti128 $1, %ymm8, %xmm2
; AVX2-FAST-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[0,3,2,1]
; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm6 = xmm2[u,u,0,1,4,5,u,u,12,13,12,13,12,13,12,13]
; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0],xmm6[1,2],xmm3[3],xmm6[4,5,6,7]
; AVX2-FAST-NEXT:    vinserti128 $1, %xmm12, %ymm0, %ymm6
; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm6 = ymm4[0,1,2],ymm6[3,4,5,6,7],ymm4[8,9,10],ymm6[11,12,13,14,15]
; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3,4],xmm4[5,6,7]
; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm8 = ymm3[0,1,2,3],ymm6[4,5,6,7]
; AVX2-FAST-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,7,5,6,5]
; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[2,3,2,3,2,3,2,3,u,u,10,11,14,15,u,u]
; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4],xmm0[5,6],xmm1[7]
; AVX2-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm7[6,7,u,u,u,u,u,u,u,u,2,3,14,15,u,u,22,23,u,u,u,u,u,u,u,u,18,19,30,31,u,u]
; AVX2-FAST-NEXT:    vpshufb {{.*#+}} ymm3 = ymm9[u,u,2,3,6,7,6,7,10,11,u,u,u,u,10,11,u,u,18,19,22,23,22,23,26,27,u,u,u,u,26,27]
; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0],ymm3[1,2,3,4],ymm1[5,6],ymm3[7],ymm1[8],ymm3[9,10,11,12],ymm1[13,14],ymm3[15]
; AVX2-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7],ymm1[8,9,10],ymm0[11,12,13,14,15]
; AVX2-FAST-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm5[3,1,2,1,4,5,6,7]
; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u,2,3,6,7,u,u,14,15,14,15,14,15,14,15]
; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm3[0],xmm2[1,2],xmm3[3],xmm2[4,5,6,7]
; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0,1,2,3,4],xmm1[5,6,7]
; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm9 = ymm1[0,1,2,3],ymm0[4,5,6,7]
; AVX2-FAST-NEXT:    vpshufd {{.*#+}} ymm0 = ymm7[2,1,2,1,6,5,6,5]
; AVX2-FAST-NEXT:    vpshufhw {{.*#+}} ymm1 = ymm0[0,1,2,3,6,6,6,6,8,9,10,11,14,14,14,14]
; AVX2-FAST-NEXT:    vpshufd {{.*#+}} ymm2 = ymm10[0,1,0,3,4,5,4,7]
; AVX2-FAST-NEXT:    vpshufb {{.*#+}} ymm3 = ymm2[u,u,4,5,4,5,4,5,8,9,u,u,8,9,12,13,u,u,20,21,20,21,20,21,24,25,u,u,24,25,28,29]
; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0],ymm3[1,2,3,4],ymm1[5],ymm3[6,7],ymm1[8],ymm3[9,10,11,12],ymm1[13],ymm3[14,15]
; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm15[0,1],ymm14[2],ymm15[3,4],ymm14[5],ymm15[6,7]
; AVX2-FAST-NEXT:    vextracti128 $1, %ymm3, %xmm4
; AVX2-FAST-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm4[2,2,2,2,4,5,6,7]
; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm6 = xmm3[8,9,u,u,0,1,12,13,u,u,12,13,12,13,14,15]
; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm5 = xmm6[0],xmm5[1],xmm6[2,3],xmm5[4],xmm6[5,6,7]
; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} xmm6 = [255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0]
; AVX2-FAST-NEXT:    vpblendvb %ymm6, %ymm5, %ymm1, %ymm1
; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm13[0],ymm11[1],ymm13[2,3],ymm11[4],ymm13[5,6],ymm11[7]
; AVX2-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[2,3,u,u,u,u,u,u,u,u,14,15,u,u,u,u,18,19,u,u,u,u,u,u,u,u,30,31,u,u,u,u]
; AVX2-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = ymm2[u,u,6,7,6,7,6,7,8,9,u,u,10,11,14,15,u,u,22,23,22,23,22,23,24,25,u,u,26,27,30,31]
; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0],ymm2[1,2,3,4],ymm0[5],ymm2[6,7],ymm0[8],ymm2[9,10,11,12],ymm0[13],ymm2[14,15]
; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = xmm4[u,u,6,7,u,u,u,u,10,11,u,u,u,u,u,u]
; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[10,11,u,u,2,3,14,15,u,u,10,11,12,13,14,15]
; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm3[0],xmm2[1],xmm3[2,3],xmm2[4],xmm3[5,6,7]
; AVX2-FAST-NEXT:    vextracti128 $1, %ymm5, %xmm3
; AVX2-FAST-NEXT:    vpblendvb %ymm6, %ymm2, %ymm0, %ymm0
; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = xmm3[u,u,u,u,u,u,u,u,8,9,u,u,0,1,12,13]
; AVX2-FAST-NEXT:    vpshufd {{.*#+}} xmm4 = xmm5[0,3,2,1]
; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm5 = xmm4[0,1,2,3,0,1,4,5,u,u,12,13,u,u,u,u]
; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm5[0,1,2,3],xmm2[4],xmm5[5],xmm2[6,7]
; AVX2-FAST-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm2[5,6,7]
; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = xmm3[u,u,u,u,u,u,u,u,10,11,u,u,2,3,14,15]
; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm3 = xmm4[0,1,2,3,2,3,6,7,u,u,14,15,u,u,u,u]
; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4],xmm3[5],xmm2[6,7]
; AVX2-FAST-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm2[5,6,7]
; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
; AVX2-FAST-NEXT:    vmovaps %ymm2, (%rsi)
; AVX2-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
; AVX2-FAST-NEXT:    vmovaps %ymm2, (%rdx)
; AVX2-FAST-NEXT:    vmovdqa %ymm8, (%rcx)
; AVX2-FAST-NEXT:    vmovdqa %ymm9, (%r8)
; AVX2-FAST-NEXT:    vmovdqa %ymm1, (%r9)
; AVX2-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
; AVX2-FAST-NEXT:    vmovdqa %ymm0, (%rax)
; AVX2-FAST-NEXT:    vzeroupper
; AVX2-FAST-NEXT:    retq
  %wide.vec = load <96 x i16>, <96 x i16>* %in.vec, align 32

  %strided.vec0 = shufflevector <96 x i16> %wide.vec, <96 x i16> poison, <16 x i32> <i32 0, i32 6, i32 12, i32 18, i32 24, i32 30, i32 36, i32 42, i32 48, i32 54, i32 60, i32 66, i32 72, i32 78, i32 84, i32 90>
  %strided.vec1 = shufflevector <96 x i16> %wide.vec, <96 x i16> poison, <16 x i32> <i32 1, i32 7, i32 13, i32 19, i32 25, i32 31, i32 37, i32 43, i32 49, i32 55, i32 61, i32 67, i32 73, i32 79, i32 85, i32 91>
  %strided.vec2 = shufflevector <96 x i16> %wide.vec, <96 x i16> poison, <16 x i32> <i32 2, i32 8, i32 14, i32 20, i32 26, i32 32, i32 38, i32 44, i32 50, i32 56, i32 62, i32 68, i32 74, i32 80, i32 86, i32 92>
  %strided.vec3 = shufflevector <96 x i16> %wide.vec, <96 x i16> poison, <16 x i32> <i32 3, i32 9, i32 15, i32 21, i32 27, i32 33, i32 39, i32 45, i32 51, i32 57, i32 63, i32 69, i32 75, i32 81, i32 87, i32 93>
  %strided.vec4 = shufflevector <96 x i16> %wide.vec, <96 x i16> poison, <16 x i32> <i32 4, i32 10, i32 16, i32 22, i32 28, i32 34, i32 40, i32 46, i32 52, i32 58, i32 64, i32 70, i32 76, i32 82, i32 88, i32 94>
  %strided.vec5 = shufflevector <96 x i16> %wide.vec, <96 x i16> poison, <16 x i32> <i32 5, i32 11, i32 17, i32 23, i32 29, i32 35, i32 41, i32 47, i32 53, i32 59, i32 65, i32 71, i32 77, i32 83, i32 89, i32 95>

  store <16 x i16> %strided.vec0, <16 x i16>* %out.vec0, align 32
  store <16 x i16> %strided.vec1, <16 x i16>* %out.vec1, align 32
  store <16 x i16> %strided.vec2, <16 x i16>* %out.vec2, align 32
  store <16 x i16> %strided.vec3, <16 x i16>* %out.vec3, align 32
  store <16 x i16> %strided.vec4, <16 x i16>* %out.vec4, align 32
  store <16 x i16> %strided.vec5, <16 x i16>* %out.vec5, align 32

  ret void
}
