; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=SSE
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX

; fold (mul x, 0) -> 0
define <4 x i32> @combine_vec_mul_zero(<4 x i32> %x) {
; SSE-LABEL: combine_vec_mul_zero:
; SSE:       # %bb.0:
; SSE-NEXT:    xorps %xmm0, %xmm0
; SSE-NEXT:    retq
;
; AVX-LABEL: combine_vec_mul_zero:
; AVX:       # %bb.0:
; AVX-NEXT:    vxorps %xmm0, %xmm0, %xmm0
; AVX-NEXT:    retq
  %1 = mul <4 x i32> %x, zeroinitializer
  ret <4 x i32> %1
}

; fold (mul x, 1) -> x
define <4 x i32> @combine_vec_mul_one(<4 x i32> %x) {
; SSE-LABEL: combine_vec_mul_one:
; SSE:       # %bb.0:
; SSE-NEXT:    retq
;
; AVX-LABEL: combine_vec_mul_one:
; AVX:       # %bb.0:
; AVX-NEXT:    retq
  %1 = mul <4 x i32> %x, <i32 1, i32 1, i32 1, i32 1>
  ret <4 x i32> %1
}

; fold (mul x, -1) -> 0-x
define <4 x i32> @combine_vec_mul_negone(<4 x i32> %x) {
; SSE-LABEL: combine_vec_mul_negone:
; SSE:       # %bb.0:
; SSE-NEXT:    pxor %xmm1, %xmm1
; SSE-NEXT:    psubd %xmm0, %xmm1
; SSE-NEXT:    movdqa %xmm1, %xmm0
; SSE-NEXT:    retq
;
; AVX-LABEL: combine_vec_mul_negone:
; AVX:       # %bb.0:
; AVX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
; AVX-NEXT:    vpsubd %xmm0, %xmm1, %xmm0
; AVX-NEXT:    retq
  %1 = mul <4 x i32> %x, <i32 -1, i32 -1, i32 -1, i32 -1>
  ret <4 x i32> %1
}

; fold (mul x, (1 << c)) -> x << c
define <4 x i32> @combine_vec_mul_pow2a(<4 x i32> %x) {
; SSE-LABEL: combine_vec_mul_pow2a:
; SSE:       # %bb.0:
; SSE-NEXT:    paddd %xmm0, %xmm0
; SSE-NEXT:    retq
;
; AVX-LABEL: combine_vec_mul_pow2a:
; AVX:       # %bb.0:
; AVX-NEXT:    vpaddd %xmm0, %xmm0, %xmm0
; AVX-NEXT:    retq
  %1 = mul <4 x i32> %x, <i32 2, i32 2, i32 2, i32 2>
  ret <4 x i32> %1
}

define <4 x i32> @combine_vec_mul_pow2b(<4 x i32> %x) {
; SSE-LABEL: combine_vec_mul_pow2b:
; SSE:       # %bb.0:
; SSE-NEXT:    pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE-NEXT:    retq
;
; AVX-LABEL: combine_vec_mul_pow2b:
; AVX:       # %bb.0:
; AVX-NEXT:    vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; AVX-NEXT:    retq
  %1 = mul <4 x i32> %x, <i32 1, i32 2, i32 4, i32 16>
  ret <4 x i32> %1
}

define <4 x i64> @combine_vec_mul_pow2c(<4 x i64> %x) {
; SSE-LABEL: combine_vec_mul_pow2c:
; SSE:       # %bb.0:
; SSE-NEXT:    movdqa %xmm1, %xmm2
; SSE-NEXT:    psllq $4, %xmm2
; SSE-NEXT:    psllq $2, %xmm1
; SSE-NEXT:    pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4,5,6,7]
; SSE-NEXT:    movdqa %xmm0, %xmm2
; SSE-NEXT:    paddq %xmm0, %xmm2
; SSE-NEXT:    pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7]
; SSE-NEXT:    retq
;
; AVX-LABEL: combine_vec_mul_pow2c:
; AVX:       # %bb.0:
; AVX-NEXT:    vpsllvq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
; AVX-NEXT:    retq
  %1 = mul <4 x i64> %x, <i64 1, i64 2, i64 4, i64 16>
  ret <4 x i64> %1
}

; fold (mul x, -(1 << c)) -> -(x << c) or (-x) << c
define <4 x i32> @combine_vec_mul_negpow2a(<4 x i32> %x) {
; SSE-LABEL: combine_vec_mul_negpow2a:
; SSE:       # %bb.0:
; SSE-NEXT:    paddd %xmm0, %xmm0
; SSE-NEXT:    pxor %xmm1, %xmm1
; SSE-NEXT:    psubd %xmm0, %xmm1
; SSE-NEXT:    movdqa %xmm1, %xmm0
; SSE-NEXT:    retq
;
; AVX-LABEL: combine_vec_mul_negpow2a:
; AVX:       # %bb.0:
; AVX-NEXT:    vpaddd %xmm0, %xmm0, %xmm0
; AVX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
; AVX-NEXT:    vpsubd %xmm0, %xmm1, %xmm0
; AVX-NEXT:    retq
  %1 = mul <4 x i32> %x, <i32 -2, i32 -2, i32 -2, i32 -2>
  ret <4 x i32> %1
}

define <4 x i32> @combine_vec_mul_negpow2b(<4 x i32> %x) {
; SSE-LABEL: combine_vec_mul_negpow2b:
; SSE:       # %bb.0:
; SSE-NEXT:    pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE-NEXT:    retq
;
; AVX-LABEL: combine_vec_mul_negpow2b:
; AVX:       # %bb.0:
; AVX-NEXT:    vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; AVX-NEXT:    retq
  %1 = mul <4 x i32> %x, <i32 -1, i32 -2, i32 -4, i32 -16>
  ret <4 x i32> %1
}

define <4 x i64> @combine_vec_mul_negpow2c(<4 x i64> %x) {
; SSE-LABEL: combine_vec_mul_negpow2c:
; SSE:       # %bb.0:
; SSE-NEXT:    movdqa {{.*#+}} xmm2 = [4294967295,4294967295]
; SSE-NEXT:    movdqa %xmm0, %xmm3
; SSE-NEXT:    pmuludq %xmm2, %xmm3
; SSE-NEXT:    movdqa %xmm0, %xmm4
; SSE-NEXT:    psrlq $32, %xmm4
; SSE-NEXT:    movdqa {{.*#+}} xmm5 = [18446744073709551615,18446744073709551614]
; SSE-NEXT:    pmuludq %xmm5, %xmm4
; SSE-NEXT:    paddq %xmm3, %xmm4
; SSE-NEXT:    psllq $32, %xmm4
; SSE-NEXT:    pmuludq %xmm5, %xmm0
; SSE-NEXT:    paddq %xmm4, %xmm0
; SSE-NEXT:    pmuludq %xmm1, %xmm2
; SSE-NEXT:    movdqa %xmm1, %xmm3
; SSE-NEXT:    psrlq $32, %xmm3
; SSE-NEXT:    movdqa {{.*#+}} xmm4 = [18446744073709551612,18446744073709551600]
; SSE-NEXT:    pmuludq %xmm4, %xmm3
; SSE-NEXT:    paddq %xmm2, %xmm3
; SSE-NEXT:    psllq $32, %xmm3
; SSE-NEXT:    pmuludq %xmm4, %xmm1
; SSE-NEXT:    paddq %xmm3, %xmm1
; SSE-NEXT:    retq
;
; AVX-LABEL: combine_vec_mul_negpow2c:
; AVX:       # %bb.0:
; AVX-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [4294967295,4294967295,4294967295,4294967295]
; AVX-NEXT:    vpmuludq %ymm1, %ymm0, %ymm1
; AVX-NEXT:    vpsrlq $32, %ymm0, %ymm2
; AVX-NEXT:    vmovdqa {{.*#+}} ymm3 = [18446744073709551615,18446744073709551614,18446744073709551612,18446744073709551600]
; AVX-NEXT:    vpmuludq %ymm3, %ymm2, %ymm2
; AVX-NEXT:    vpaddq %ymm2, %ymm1, %ymm1
; AVX-NEXT:    vpsllq $32, %ymm1, %ymm1
; AVX-NEXT:    vpmuludq %ymm3, %ymm0, %ymm0
; AVX-NEXT:    vpaddq %ymm1, %ymm0, %ymm0
; AVX-NEXT:    retq
  %1 = mul <4 x i64> %x, <i64 -1, i64 -2, i64 -4, i64 -16>
  ret <4 x i64> %1
}

; (mul (shl X, c1), c2) -> (mul X, c2 << c1)
define <4 x i32> @combine_vec_mul_shl_const(<4 x i32> %x) {
; SSE-LABEL: combine_vec_mul_shl_const:
; SSE:       # %bb.0:
; SSE-NEXT:    pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE-NEXT:    retq
;
; AVX-LABEL: combine_vec_mul_shl_const:
; AVX:       # %bb.0:
; AVX-NEXT:    vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; AVX-NEXT:    retq
  %1 = shl <4 x i32> %x, <i32 1, i32 2, i32 8, i32 16>
  %2 = mul <4 x i32> %1, <i32 1, i32 3, i32 5, i32 7>
  ret <4 x i32> %2
}

; (mul (shl X, C), Y) -> (shl (mul X, Y), C) when the shift has one use.
define <4 x i32> @combine_vec_mul_shl_oneuse0(<4 x i32> %x, <4 x i32> %y) {
; SSE-LABEL: combine_vec_mul_shl_oneuse0:
; SSE:       # %bb.0:
; SSE-NEXT:    pmulld %xmm1, %xmm0
; SSE-NEXT:    pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE-NEXT:    retq
;
; AVX-LABEL: combine_vec_mul_shl_oneuse0:
; AVX:       # %bb.0:
; AVX-NEXT:    vpmulld %xmm1, %xmm0, %xmm0
; AVX-NEXT:    vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; AVX-NEXT:    retq
  %1 = shl <4 x i32> %x, <i32 1, i32 2, i32 8, i32 16>
  %2 = mul <4 x i32> %1, %y
  ret <4 x i32> %2
}

define <4 x i32> @combine_vec_mul_shl_oneuse1(<4 x i32> %x, <4 x i32> %y) {
; SSE-LABEL: combine_vec_mul_shl_oneuse1:
; SSE:       # %bb.0:
; SSE-NEXT:    pmulld %xmm1, %xmm0
; SSE-NEXT:    pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE-NEXT:    retq
;
; AVX-LABEL: combine_vec_mul_shl_oneuse1:
; AVX:       # %bb.0:
; AVX-NEXT:    vpmulld %xmm1, %xmm0, %xmm0
; AVX-NEXT:    vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; AVX-NEXT:    retq
  %1 = shl <4 x i32> %x, <i32 1, i32 2, i32 8, i32 16>
  %2 = mul <4 x i32> %y, %1
  ret <4 x i32> %2
}

define <4 x i32> @combine_vec_mul_shl_multiuse0(<4 x i32> %x, <4 x i32> %y) {
; SSE-LABEL: combine_vec_mul_shl_multiuse0:
; SSE:       # %bb.0:
; SSE-NEXT:    pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE-NEXT:    pmulld %xmm0, %xmm1
; SSE-NEXT:    paddd %xmm1, %xmm0
; SSE-NEXT:    retq
;
; AVX-LABEL: combine_vec_mul_shl_multiuse0:
; AVX:       # %bb.0:
; AVX-NEXT:    vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; AVX-NEXT:    vpmulld %xmm1, %xmm0, %xmm1
; AVX-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
; AVX-NEXT:    retq
  %1 = shl <4 x i32> %x, <i32 1, i32 2, i32 8, i32 16>
  %2 = mul <4 x i32> %1, %y
  %3 = add <4 x i32> %1, %2
  ret <4 x i32> %3
}

define <4 x i32> @combine_vec_mul_shl_multiuse1(<4 x i32> %x, <4 x i32> %y) {
; SSE-LABEL: combine_vec_mul_shl_multiuse1:
; SSE:       # %bb.0:
; SSE-NEXT:    pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE-NEXT:    pmulld %xmm0, %xmm1
; SSE-NEXT:    paddd %xmm1, %xmm0
; SSE-NEXT:    retq
;
; AVX-LABEL: combine_vec_mul_shl_multiuse1:
; AVX:       # %bb.0:
; AVX-NEXT:    vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; AVX-NEXT:    vpmulld %xmm0, %xmm1, %xmm1
; AVX-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
; AVX-NEXT:    retq
  %1 = shl <4 x i32> %x, <i32 1, i32 2, i32 8, i32 16>
  %2 = mul <4 x i32> %y, %1
  %3 = add <4 x i32> %1, %2
  ret <4 x i32> %3
}

; fold (mul (add x, c1), c2) -> (add (mul x, c2), c1*c2)

define <4 x i32> @combine_vec_mul_add(<4 x i32> %x) {
; SSE-LABEL: combine_vec_mul_add:
; SSE:       # %bb.0:
; SSE-NEXT:    pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE-NEXT:    paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE-NEXT:    retq
;
; AVX-LABEL: combine_vec_mul_add:
; AVX:       # %bb.0:
; AVX-NEXT:    vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; AVX-NEXT:    vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; AVX-NEXT:    retq
  %1 = add <4 x i32> %x, <i32 1, i32 2, i32 8, i32 16>
  %2 = mul <4 x i32> %1, <i32 4, i32 6, i32 2, i32 0>
  ret <4 x i32> %2
}

; TODO fold Y = sra (X, size(X)-1); mul (or (Y, 1), X) -> (abs X)

define <16 x i8> @combine_mul_to_abs_v16i8(<16 x i8> %x) {
; SSE-LABEL: combine_mul_to_abs_v16i8:
; SSE:       # %bb.0:
; SSE-NEXT:    pxor %xmm2, %xmm2
; SSE-NEXT:    pcmpgtb %xmm0, %xmm2
; SSE-NEXT:    por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
; SSE-NEXT:    pmovzxbw {{.*#+}} xmm3 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; SSE-NEXT:    punpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
; SSE-NEXT:    pmovzxbw {{.*#+}} xmm1 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
; SSE-NEXT:    punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
; SSE-NEXT:    pmullw %xmm0, %xmm2
; SSE-NEXT:    movdqa {{.*#+}} xmm0 = [255,255,255,255,255,255,255,255]
; SSE-NEXT:    pand %xmm0, %xmm2
; SSE-NEXT:    pmullw %xmm3, %xmm1
; SSE-NEXT:    pand %xmm0, %xmm1
; SSE-NEXT:    packuswb %xmm2, %xmm1
; SSE-NEXT:    movdqa %xmm1, %xmm0
; SSE-NEXT:    retq
;
; AVX-LABEL: combine_mul_to_abs_v16i8:
; AVX:       # %bb.0:
; AVX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
; AVX-NEXT:    vpcmpgtb %xmm0, %xmm1, %xmm1
; AVX-NEXT:    vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
; AVX-NEXT:    vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
; AVX-NEXT:    vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
; AVX-NEXT:    vpmullw %ymm0, %ymm1, %ymm0
; AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
; AVX-NEXT:    vextracti128 $1, %ymm0, %xmm1
; AVX-NEXT:    vpackuswb %xmm1, %xmm0, %xmm0
; AVX-NEXT:    vzeroupper
; AVX-NEXT:    retq
  %s = ashr <16 x i8> %x, <i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7>
  %o = or <16 x i8> %s, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
  %m = mul <16 x i8> %o, %x
  ret <16 x i8> %m
}

define <2 x i64> @combine_mul_to_abs_v2i64(<2 x i64> %x) {
; SSE-LABEL: combine_mul_to_abs_v2i64:
; SSE:       # %bb.0:
; SSE-NEXT:    movdqa %xmm0, %xmm1
; SSE-NEXT:    psrad $31, %xmm1
; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
; SSE-NEXT:    por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
; SSE-NEXT:    movdqa %xmm0, %xmm2
; SSE-NEXT:    psrlq $32, %xmm2
; SSE-NEXT:    pmuludq %xmm1, %xmm2
; SSE-NEXT:    movdqa %xmm1, %xmm3
; SSE-NEXT:    psrlq $32, %xmm3
; SSE-NEXT:    pmuludq %xmm0, %xmm3
; SSE-NEXT:    paddq %xmm2, %xmm3
; SSE-NEXT:    psllq $32, %xmm3
; SSE-NEXT:    pmuludq %xmm1, %xmm0
; SSE-NEXT:    paddq %xmm3, %xmm0
; SSE-NEXT:    retq
;
; AVX-LABEL: combine_mul_to_abs_v2i64:
; AVX:       # %bb.0:
; AVX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
; AVX-NEXT:    vpcmpgtq %xmm0, %xmm1, %xmm1
; AVX-NEXT:    vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
; AVX-NEXT:    vpsrlq $32, %xmm0, %xmm2
; AVX-NEXT:    vpmuludq %xmm1, %xmm2, %xmm2
; AVX-NEXT:    vpsrlq $32, %xmm1, %xmm3
; AVX-NEXT:    vpmuludq %xmm3, %xmm0, %xmm3
; AVX-NEXT:    vpaddq %xmm2, %xmm3, %xmm2
; AVX-NEXT:    vpsllq $32, %xmm2, %xmm2
; AVX-NEXT:    vpmuludq %xmm1, %xmm0, %xmm0
; AVX-NEXT:    vpaddq %xmm2, %xmm0, %xmm0
; AVX-NEXT:    retq
  %s = ashr <2 x i64> %x, <i64 63, i64 63>
  %o = or <2 x i64> %s, <i64 1, i64 1>
  %m = mul <2 x i64> %x, %o
  ret <2 x i64> %m
}

; 'Quadratic Reciprocity' - and(mul(x,x),2) -> 0

define i64 @combine_mul_self_knownbits(i64 %x) {
; SSE-LABEL: combine_mul_self_knownbits:
; SSE:       # %bb.0:
; SSE-NEXT:    xorl %eax, %eax
; SSE-NEXT:    retq
;
; AVX-LABEL: combine_mul_self_knownbits:
; AVX:       # %bb.0:
; AVX-NEXT:    xorl %eax, %eax
; AVX-NEXT:    retq
  %1 = mul i64 %x, %x
  %2 = and i64 %1, 2
  ret i64 %2
}

define <4 x i32> @combine_mul_self_knownbits_vector(<4 x i32> %x) {
; SSE-LABEL: combine_mul_self_knownbits_vector:
; SSE:       # %bb.0:
; SSE-NEXT:    xorps %xmm0, %xmm0
; SSE-NEXT:    retq
;
; AVX-LABEL: combine_mul_self_knownbits_vector:
; AVX:       # %bb.0:
; AVX-NEXT:    vxorps %xmm0, %xmm0, %xmm0
; AVX-NEXT:    retq
  %1 = mul <4 x i32> %x, %x
  %2 = and <4 x i32> %1, <i32 2, i32 2, i32 2, i32 2>
  ret <4 x i32> %2
}

; mul(x,x) - bit[1] is 0, but if demanding the other bits the source must not be undef

define i64 @combine_mul_self_demandedbits(i64 %x) {
; SSE-LABEL: combine_mul_self_demandedbits:
; SSE:       # %bb.0:
; SSE-NEXT:    movq %rdi, %rax
; SSE-NEXT:    imulq %rdi, %rax
; SSE-NEXT:    andq $-3, %rax
; SSE-NEXT:    retq
;
; AVX-LABEL: combine_mul_self_demandedbits:
; AVX:       # %bb.0:
; AVX-NEXT:    movq %rdi, %rax
; AVX-NEXT:    imulq %rdi, %rax
; AVX-NEXT:    andq $-3, %rax
; AVX-NEXT:    retq
  %1 = mul i64 %x, %x
  %2 = and i64 %1, -3
  ret i64 %2
}

define <4 x i32> @combine_mul_self_demandedbits_vector(<4 x i32> %x) {
; SSE-LABEL: combine_mul_self_demandedbits_vector:
; SSE:       # %bb.0:
; SSE-NEXT:    pmulld %xmm0, %xmm0
; SSE-NEXT:    retq
;
; AVX-LABEL: combine_mul_self_demandedbits_vector:
; AVX:       # %bb.0:
; AVX-NEXT:    vpmulld %xmm0, %xmm0, %xmm0
; AVX-NEXT:    retq
  %1 = freeze <4 x i32> %x
  %2 = mul <4 x i32> %1, %1
  %3 = and <4 x i32> %2, <i32 -3, i32 -3, i32 -3, i32 -3>
  ret <4 x i32> %3
}

; PR59217 - Reuse umul_lohi/smul_lohi node

define i64 @combine_mul_umul_lohi_i64(i64 %a, i64 %b) {
; SSE-LABEL: combine_mul_umul_lohi_i64:
; SSE:       # %bb.0:
; SSE-NEXT:    movq %rdi, %rax
; SSE-NEXT:    mulq %rsi
; SSE-NEXT:    xorq %rdx, %rax
; SSE-NEXT:    retq
;
; AVX-LABEL: combine_mul_umul_lohi_i64:
; AVX:       # %bb.0:
; AVX-NEXT:    movq %rdi, %rax
; AVX-NEXT:    mulq %rsi
; AVX-NEXT:    xorq %rdx, %rax
; AVX-NEXT:    retq
  %a128 = zext i64 %a to i128
  %b128 = zext i64 %b to i128
  %m128 = mul nuw i128 %a128, %b128
  %hi128 = lshr i128 %m128, 64
  %hi = trunc i128 %hi128 to i64
  %lo = mul i64 %a, %b
  %r = xor i64 %lo, %hi
  ret i64 %r
}

define i64 @combine_mul_smul_lohi_commute_i64(i64 %a, i64 %b) {
; SSE-LABEL: combine_mul_smul_lohi_commute_i64:
; SSE:       # %bb.0:
; SSE-NEXT:    movq %rdi, %rax
; SSE-NEXT:    imulq %rsi
; SSE-NEXT:    xorq %rdx, %rax
; SSE-NEXT:    retq
;
; AVX-LABEL: combine_mul_smul_lohi_commute_i64:
; AVX:       # %bb.0:
; AVX-NEXT:    movq %rdi, %rax
; AVX-NEXT:    imulq %rsi
; AVX-NEXT:    xorq %rdx, %rax
; AVX-NEXT:    retq
  %a128 = sext i64 %a to i128
  %b128 = sext i64 %b to i128
  %m128 = mul nsw i128 %a128, %b128
  %hi128 = lshr i128 %m128, 64
  %hi = trunc i128 %hi128 to i64
  %lo = mul i64 %b, %a
  %r = xor i64 %lo, %hi
  ret i64 %r
}

define i64 @combine_mul_umul_lohi_const_i64(i64 %h) {
; SSE-LABEL: combine_mul_umul_lohi_const_i64:
; SSE:       # %bb.0:
; SSE-NEXT:    movq %rdi, %rax
; SSE-NEXT:    movabsq $-4265267296055464877, %rcx # imm = 0xC4CEB9FE1A85EC53
; SSE-NEXT:    mulq %rcx
; SSE-NEXT:    xorq %rdx, %rax
; SSE-NEXT:    retq
;
; AVX-LABEL: combine_mul_umul_lohi_const_i64:
; AVX:       # %bb.0:
; AVX-NEXT:    movq %rdi, %rax
; AVX-NEXT:    movabsq $-4265267296055464877, %rcx # imm = 0xC4CEB9FE1A85EC53
; AVX-NEXT:    mulq %rcx
; AVX-NEXT:    xorq %rdx, %rax
; AVX-NEXT:    retq
  %h128 = zext i64 %h to i128
  %m128 = mul nuw i128 %h128, 14181476777654086739
  %hi128 = lshr i128 %m128, 64
  %hi = trunc i128 %hi128 to i64
  %lo = mul i64 %h, 14181476777654086739
  %r = xor i64 %lo, %hi
  ret i64 %r
}

define i64 @combine_mul_smul_lohi_const_i64(i64 %h) {
; SSE-LABEL: combine_mul_smul_lohi_const_i64:
; SSE:       # %bb.0:
; SSE-NEXT:    movq %rdi, %rax
; SSE-NEXT:    movq %rdi, %rcx
; SSE-NEXT:    sarq $63, %rcx
; SSE-NEXT:    movabsq $-4265267296055464877, %rsi # imm = 0xC4CEB9FE1A85EC53
; SSE-NEXT:    mulq %rsi
; SSE-NEXT:    imulq %rsi, %rcx
; SSE-NEXT:    addq %rdx, %rcx
; SSE-NEXT:    xorq %rcx, %rax
; SSE-NEXT:    retq
;
; AVX-LABEL: combine_mul_smul_lohi_const_i64:
; AVX:       # %bb.0:
; AVX-NEXT:    movq %rdi, %rax
; AVX-NEXT:    movq %rdi, %rcx
; AVX-NEXT:    sarq $63, %rcx
; AVX-NEXT:    movabsq $-4265267296055464877, %rsi # imm = 0xC4CEB9FE1A85EC53
; AVX-NEXT:    mulq %rsi
; AVX-NEXT:    imulq %rsi, %rcx
; AVX-NEXT:    addq %rdx, %rcx
; AVX-NEXT:    xorq %rcx, %rax
; AVX-NEXT:    retq
  %h128 = sext i64 %h to i128
  %m128 = mul nsw i128 %h128, 14181476777654086739
  %hi128 = lshr i128 %m128, 64
  %hi = trunc i128 %hi128 to i64
  %lo = mul i64 %h, 14181476777654086739
  %r = xor i64 %lo, %hi
  ret i64 %r
}

; This would infinite loop because DAGCombiner wants to turn this into a shift,
; but x86 lowering wants to avoid non-uniform vector shift amounts.

define <16 x i8> @PR35579(<16 x i8> %x) {
; SSE-LABEL: PR35579:
; SSE:       # %bb.0:
; SSE-NEXT:    pmovzxbw {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; SSE-NEXT:    punpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
; SSE-NEXT:    pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE-NEXT:    movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
; SSE-NEXT:    pand %xmm2, %xmm0
; SSE-NEXT:    pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
; SSE-NEXT:    pand %xmm2, %xmm1
; SSE-NEXT:    packuswb %xmm0, %xmm1
; SSE-NEXT:    movdqa %xmm1, %xmm0
; SSE-NEXT:    retq
;
; AVX-LABEL: PR35579:
; AVX:       # %bb.0:
; AVX-NEXT:    vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
; AVX-NEXT:    vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
; AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
; AVX-NEXT:    vextracti128 $1, %ymm0, %xmm1
; AVX-NEXT:    vpackuswb %xmm1, %xmm0, %xmm0
; AVX-NEXT:    vzeroupper
; AVX-NEXT:    retq
  %r = mul <16 x i8> %x, <i8 0, i8 1, i8 2, i8 1, i8 4, i8 1, i8 2, i8 1, i8 8, i8 1, i8 2, i8 1, i8 4, i8 1, i8 2, i8 1>
  ret <16 x i8> %r
}

; OSS Fuzz: https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=15429
define <4 x i64> @fuzz15429(<4 x i64> %InVec) {
; SSE-LABEL: fuzz15429:
; SSE:       # %bb.0:
; SSE-NEXT:    movdqa %xmm1, %xmm2
; SSE-NEXT:    psllq $3, %xmm2
; SSE-NEXT:    psllq $2, %xmm1
; SSE-NEXT:    pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4,5,6,7]
; SSE-NEXT:    paddq %xmm0, %xmm0
; SSE-NEXT:    movabsq $9223372036854775807, %rax # imm = 0x7FFFFFFFFFFFFFFF
; SSE-NEXT:    pinsrq $0, %rax, %xmm0
; SSE-NEXT:    retq
;
; AVX-LABEL: fuzz15429:
; AVX:       # %bb.0:
; AVX-NEXT:    vpsllvq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
; AVX-NEXT:    movabsq $9223372036854775807, %rax # imm = 0x7FFFFFFFFFFFFFFF
; AVX-NEXT:    vmovq %rax, %xmm1
; AVX-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5,6,7]
; AVX-NEXT:    retq
  %mul = mul <4 x i64> %InVec, <i64 1, i64 2, i64 4, i64 8>
  %I = insertelement <4 x i64> %mul, i64 9223372036854775807, i64 0
  ret <4 x i64> %I
}
