// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
// RUN: %clang_cc1 -ffixed-point -triple x86_64-unknown-linux-gnu -S -emit-llvm %s -o - | FileCheck %s --check-prefixes=CHECK,SIGNED
// RUN: %clang_cc1 -ffixed-point -triple x86_64-unknown-linux-gnu -fpadding-on-unsigned-fixed-point -S -emit-llvm %s -o - | FileCheck %s --check-prefixes=CHECK,UNSIGNED

short _Accum sa;
_Accum a, a2, a3, a4;
long _Accum la;
unsigned short _Accum usa;
unsigned _Accum ua;
unsigned long _Accum ula;

short _Fract sf;
_Fract f;
long _Fract lf;
unsigned short _Fract usf;
unsigned _Fract uf;
unsigned long _Fract ulf;

_Sat short _Accum sa_sat;
_Sat _Accum a_sat;
_Sat long _Accum la_sat;
_Sat unsigned short _Accum usa_sat;
_Sat unsigned _Accum ua_sat;
_Sat unsigned long _Accum ula_sat;
_Sat unsigned _Fract uf_sat;

int i;
unsigned int ui;
_Bool b;

// CHECK-LABEL: @smul_sasasa(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[TMP0:%.*]] = load i16, i16* @sa, align 2
// CHECK-NEXT:    [[TMP1:%.*]] = load i16, i16* @sa, align 2
// CHECK-NEXT:    [[TMP2:%.*]] = call i16 @llvm.smul.fix.i16(i16 [[TMP0]], i16 [[TMP1]], i32 7)
// CHECK-NEXT:    store i16 [[TMP2]], i16* @sa, align 2
// CHECK-NEXT:    ret void
//
void smul_sasasa() {
  sa = sa * sa;
}

// CHECK-LABEL: @smul_asaa(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[TMP0:%.*]] = load i16, i16* @sa, align 2
// CHECK-NEXT:    [[TMP1:%.*]] = load i32, i32* @a, align 4
// CHECK-NEXT:    [[RESIZE:%.*]] = sext i16 [[TMP0]] to i32
// CHECK-NEXT:    [[UPSCALE:%.*]] = shl i32 [[RESIZE]], 8
// CHECK-NEXT:    [[TMP2:%.*]] = call i32 @llvm.smul.fix.i32(i32 [[UPSCALE]], i32 [[TMP1]], i32 15)
// CHECK-NEXT:    store i32 [[TMP2]], i32* @a, align 4
// CHECK-NEXT:    ret void
//
void smul_asaa() {
  a = sa * a;
}

// CHECK-LABEL: @smul_sasasf(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[TMP0:%.*]] = load i16, i16* @sa, align 2
// CHECK-NEXT:    [[TMP1:%.*]] = load i8, i8* @sf, align 1
// CHECK-NEXT:    [[RESIZE:%.*]] = sext i8 [[TMP1]] to i16
// CHECK-NEXT:    [[TMP2:%.*]] = call i16 @llvm.smul.fix.i16(i16 [[TMP0]], i16 [[RESIZE]], i32 7)
// CHECK-NEXT:    store i16 [[TMP2]], i16* @sa, align 2
// CHECK-NEXT:    ret void
//
void smul_sasasf() {
  sa = sa * sf;
}

// CHECK-LABEL: @smul_sasaf(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[TMP0:%.*]] = load i16, i16* @sa, align 2
// CHECK-NEXT:    [[TMP1:%.*]] = load i16, i16* @f, align 2
// CHECK-NEXT:    [[RESIZE:%.*]] = sext i16 [[TMP0]] to i24
// CHECK-NEXT:    [[UPSCALE:%.*]] = shl i24 [[RESIZE]], 8
// CHECK-NEXT:    [[RESIZE1:%.*]] = sext i16 [[TMP1]] to i24
// CHECK-NEXT:    [[TMP2:%.*]] = call i24 @llvm.smul.fix.i24(i24 [[UPSCALE]], i24 [[RESIZE1]], i32 15)
// CHECK-NEXT:    [[DOWNSCALE:%.*]] = ashr i24 [[TMP2]], 8
// CHECK-NEXT:    [[RESIZE2:%.*]] = trunc i24 [[DOWNSCALE]] to i16
// CHECK-NEXT:    store i16 [[RESIZE2]], i16* @sa, align 2
// CHECK-NEXT:    ret void
//
void smul_sasaf() {
  sa = sa * f;
}

// CHECK-LABEL: @smul_aasf(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[TMP0:%.*]] = load i32, i32* @a, align 4
// CHECK-NEXT:    [[TMP1:%.*]] = load i8, i8* @sf, align 1
// CHECK-NEXT:    [[RESIZE:%.*]] = sext i8 [[TMP1]] to i32
// CHECK-NEXT:    [[UPSCALE:%.*]] = shl i32 [[RESIZE]], 8
// CHECK-NEXT:    [[TMP2:%.*]] = call i32 @llvm.smul.fix.i32(i32 [[TMP0]], i32 [[UPSCALE]], i32 15)
// CHECK-NEXT:    store i32 [[TMP2]], i32* @a, align 4
// CHECK-NEXT:    ret void
//
void smul_aasf() {
  a = a * sf;
}

// CHECK-LABEL: @smul_aalf(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[TMP0:%.*]] = load i32, i32* @a, align 4
// CHECK-NEXT:    [[TMP1:%.*]] = load i32, i32* @lf, align 4
// CHECK-NEXT:    [[RESIZE:%.*]] = sext i32 [[TMP0]] to i48
// CHECK-NEXT:    [[UPSCALE:%.*]] = shl i48 [[RESIZE]], 16
// CHECK-NEXT:    [[RESIZE1:%.*]] = sext i32 [[TMP1]] to i48
// CHECK-NEXT:    [[TMP2:%.*]] = call i48 @llvm.smul.fix.i48(i48 [[UPSCALE]], i48 [[RESIZE1]], i32 31)
// CHECK-NEXT:    [[DOWNSCALE:%.*]] = ashr i48 [[TMP2]], 16
// CHECK-NEXT:    [[RESIZE2:%.*]] = trunc i48 [[DOWNSCALE]] to i32
// CHECK-NEXT:    store i32 [[RESIZE2]], i32* @a, align 4
// CHECK-NEXT:    ret void
//
void smul_aalf() {
  a = a * lf;
}

// SIGNED-LABEL: @smul_sasausa(
// SIGNED-NEXT:  entry:
// SIGNED-NEXT:    [[TMP0:%.*]] = load i16, i16* @sa, align 2
// SIGNED-NEXT:    [[TMP1:%.*]] = load i16, i16* @usa, align 2
// SIGNED-NEXT:    [[RESIZE:%.*]] = sext i16 [[TMP0]] to i17
// SIGNED-NEXT:    [[UPSCALE:%.*]] = shl i17 [[RESIZE]], 1
// SIGNED-NEXT:    [[RESIZE1:%.*]] = zext i16 [[TMP1]] to i17
// SIGNED-NEXT:    [[TMP2:%.*]] = call i17 @llvm.smul.fix.i17(i17 [[UPSCALE]], i17 [[RESIZE1]], i32 8)
// SIGNED-NEXT:    [[DOWNSCALE:%.*]] = ashr i17 [[TMP2]], 1
// SIGNED-NEXT:    [[RESIZE2:%.*]] = trunc i17 [[DOWNSCALE]] to i16
// SIGNED-NEXT:    store i16 [[RESIZE2]], i16* @sa, align 2
// SIGNED-NEXT:    ret void
//
// UNSIGNED-LABEL: @smul_sasausa(
// UNSIGNED-NEXT:  entry:
// UNSIGNED-NEXT:    [[TMP0:%.*]] = load i16, i16* @sa, align 2
// UNSIGNED-NEXT:    [[TMP1:%.*]] = load i16, i16* @usa, align 2
// UNSIGNED-NEXT:    [[TMP2:%.*]] = call i16 @llvm.smul.fix.i16(i16 [[TMP0]], i16 [[TMP1]], i32 7)
// UNSIGNED-NEXT:    store i16 [[TMP2]], i16* @sa, align 2
// UNSIGNED-NEXT:    ret void
//
void smul_sasausa() {
  sa = sa * usa;
}

// SIGNED-LABEL: @smul_asaua(
// SIGNED-NEXT:  entry:
// SIGNED-NEXT:    [[TMP0:%.*]] = load i16, i16* @sa, align 2
// SIGNED-NEXT:    [[TMP1:%.*]] = load i32, i32* @ua, align 4
// SIGNED-NEXT:    [[RESIZE:%.*]] = sext i16 [[TMP0]] to i33
// SIGNED-NEXT:    [[UPSCALE:%.*]] = shl i33 [[RESIZE]], 9
// SIGNED-NEXT:    [[RESIZE1:%.*]] = zext i32 [[TMP1]] to i33
// SIGNED-NEXT:    [[TMP2:%.*]] = call i33 @llvm.smul.fix.i33(i33 [[UPSCALE]], i33 [[RESIZE1]], i32 16)
// SIGNED-NEXT:    [[DOWNSCALE:%.*]] = ashr i33 [[TMP2]], 1
// SIGNED-NEXT:    [[RESIZE2:%.*]] = trunc i33 [[DOWNSCALE]] to i32
// SIGNED-NEXT:    store i32 [[RESIZE2]], i32* @a, align 4
// SIGNED-NEXT:    ret void
//
// UNSIGNED-LABEL: @smul_asaua(
// UNSIGNED-NEXT:  entry:
// UNSIGNED-NEXT:    [[TMP0:%.*]] = load i16, i16* @sa, align 2
// UNSIGNED-NEXT:    [[TMP1:%.*]] = load i32, i32* @ua, align 4
// UNSIGNED-NEXT:    [[RESIZE:%.*]] = sext i16 [[TMP0]] to i32
// UNSIGNED-NEXT:    [[UPSCALE:%.*]] = shl i32 [[RESIZE]], 8
// UNSIGNED-NEXT:    [[TMP2:%.*]] = call i32 @llvm.smul.fix.i32(i32 [[UPSCALE]], i32 [[TMP1]], i32 15)
// UNSIGNED-NEXT:    store i32 [[TMP2]], i32* @a, align 4
// UNSIGNED-NEXT:    ret void
//
void smul_asaua() {
  a = sa * ua;
}

// SIGNED-LABEL: @smul_sasausf(
// SIGNED-NEXT:  entry:
// SIGNED-NEXT:    [[TMP0:%.*]] = load i16, i16* @sa, align 2
// SIGNED-NEXT:    [[TMP1:%.*]] = load i8, i8* @usf, align 1
// SIGNED-NEXT:    [[RESIZE:%.*]] = sext i16 [[TMP0]] to i17
// SIGNED-NEXT:    [[UPSCALE:%.*]] = shl i17 [[RESIZE]], 1
// SIGNED-NEXT:    [[RESIZE1:%.*]] = zext i8 [[TMP1]] to i17
// SIGNED-NEXT:    [[TMP2:%.*]] = call i17 @llvm.smul.fix.i17(i17 [[UPSCALE]], i17 [[RESIZE1]], i32 8)
// SIGNED-NEXT:    [[DOWNSCALE:%.*]] = ashr i17 [[TMP2]], 1
// SIGNED-NEXT:    [[RESIZE2:%.*]] = trunc i17 [[DOWNSCALE]] to i16
// SIGNED-NEXT:    store i16 [[RESIZE2]], i16* @sa, align 2
// SIGNED-NEXT:    ret void
//
// UNSIGNED-LABEL: @smul_sasausf(
// UNSIGNED-NEXT:  entry:
// UNSIGNED-NEXT:    [[TMP0:%.*]] = load i16, i16* @sa, align 2
// UNSIGNED-NEXT:    [[TMP1:%.*]] = load i8, i8* @usf, align 1
// UNSIGNED-NEXT:    [[RESIZE:%.*]] = zext i8 [[TMP1]] to i16
// UNSIGNED-NEXT:    [[TMP2:%.*]] = call i16 @llvm.smul.fix.i16(i16 [[TMP0]], i16 [[RESIZE]], i32 7)
// UNSIGNED-NEXT:    store i16 [[TMP2]], i16* @sa, align 2
// UNSIGNED-NEXT:    ret void
//
void smul_sasausf() {
  sa = sa * usf;
}

// SIGNED-LABEL: @smul_sasaulf(
// SIGNED-NEXT:  entry:
// SIGNED-NEXT:    [[TMP0:%.*]] = load i16, i16* @sa, align 2
// SIGNED-NEXT:    [[TMP1:%.*]] = load i32, i32* @ulf, align 4
// SIGNED-NEXT:    [[RESIZE:%.*]] = sext i16 [[TMP0]] to i41
// SIGNED-NEXT:    [[UPSCALE:%.*]] = shl i41 [[RESIZE]], 25
// SIGNED-NEXT:    [[RESIZE1:%.*]] = zext i32 [[TMP1]] to i41
// SIGNED-NEXT:    [[TMP2:%.*]] = call i41 @llvm.smul.fix.i41(i41 [[UPSCALE]], i41 [[RESIZE1]], i32 32)
// SIGNED-NEXT:    [[DOWNSCALE:%.*]] = ashr i41 [[TMP2]], 25
// SIGNED-NEXT:    [[RESIZE2:%.*]] = trunc i41 [[DOWNSCALE]] to i16
// SIGNED-NEXT:    store i16 [[RESIZE2]], i16* @sa, align 2
// SIGNED-NEXT:    ret void
//
// UNSIGNED-LABEL: @smul_sasaulf(
// UNSIGNED-NEXT:  entry:
// UNSIGNED-NEXT:    [[TMP0:%.*]] = load i16, i16* @sa, align 2
// UNSIGNED-NEXT:    [[TMP1:%.*]] = load i32, i32* @ulf, align 4
// UNSIGNED-NEXT:    [[RESIZE:%.*]] = sext i16 [[TMP0]] to i40
// UNSIGNED-NEXT:    [[UPSCALE:%.*]] = shl i40 [[RESIZE]], 24
// UNSIGNED-NEXT:    [[RESIZE1:%.*]] = zext i32 [[TMP1]] to i40
// UNSIGNED-NEXT:    [[TMP2:%.*]] = call i40 @llvm.smul.fix.i40(i40 [[UPSCALE]], i40 [[RESIZE1]], i32 31)
// UNSIGNED-NEXT:    [[DOWNSCALE:%.*]] = ashr i40 [[TMP2]], 24
// UNSIGNED-NEXT:    [[RESIZE2:%.*]] = trunc i40 [[DOWNSCALE]] to i16
// UNSIGNED-NEXT:    store i16 [[RESIZE2]], i16* @sa, align 2
// UNSIGNED-NEXT:    ret void
//
void smul_sasaulf() {
  sa = sa * ulf;
}

// CHECK-LABEL: @smul_aaaaa(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[TMP0:%.*]] = load i32, i32* @a, align 4
// CHECK-NEXT:    [[TMP1:%.*]] = load i32, i32* @a2, align 4
// CHECK-NEXT:    [[TMP2:%.*]] = call i32 @llvm.smul.fix.i32(i32 [[TMP0]], i32 [[TMP1]], i32 15)
// CHECK-NEXT:    [[TMP3:%.*]] = load i32, i32* @a3, align 4
// CHECK-NEXT:    [[TMP4:%.*]] = call i32 @llvm.smul.fix.i32(i32 [[TMP2]], i32 [[TMP3]], i32 15)
// CHECK-NEXT:    [[TMP5:%.*]] = load i32, i32* @a4, align 4
// CHECK-NEXT:    [[TMP6:%.*]] = call i32 @llvm.smul.fix.i32(i32 [[TMP4]], i32 [[TMP5]], i32 15)
// CHECK-NEXT:    store i32 [[TMP6]], i32* @a, align 4
// CHECK-NEXT:    ret void
//
void smul_aaaaa() {
  a = a * a2 * a3 * a4;
}


// SIGNED-LABEL: @umul_usausausa(
// SIGNED-NEXT:  entry:
// SIGNED-NEXT:    [[TMP0:%.*]] = load i16, i16* @usa, align 2
// SIGNED-NEXT:    [[TMP1:%.*]] = load i16, i16* @usa, align 2
// SIGNED-NEXT:    [[TMP2:%.*]] = call i16 @llvm.umul.fix.i16(i16 [[TMP0]], i16 [[TMP1]], i32 8)
// SIGNED-NEXT:    store i16 [[TMP2]], i16* @usa, align 2
// SIGNED-NEXT:    ret void
//
// UNSIGNED-LABEL: @umul_usausausa(
// UNSIGNED-NEXT:  entry:
// UNSIGNED-NEXT:    [[TMP0:%.*]] = load i16, i16* @usa, align 2
// UNSIGNED-NEXT:    [[TMP1:%.*]] = load i16, i16* @usa, align 2
// UNSIGNED-NEXT:    [[TMP2:%.*]] = call i16 @llvm.smul.fix.i16(i16 [[TMP0]], i16 [[TMP1]], i32 7)
// UNSIGNED-NEXT:    store i16 [[TMP2]], i16* @usa, align 2
// UNSIGNED-NEXT:    ret void
//
void umul_usausausa() {
  usa = usa * usa;
}

// SIGNED-LABEL: @umul_uausaua(
// SIGNED-NEXT:  entry:
// SIGNED-NEXT:    [[TMP0:%.*]] = load i16, i16* @usa, align 2
// SIGNED-NEXT:    [[TMP1:%.*]] = load i32, i32* @ua, align 4
// SIGNED-NEXT:    [[RESIZE:%.*]] = zext i16 [[TMP0]] to i32
// SIGNED-NEXT:    [[UPSCALE:%.*]] = shl i32 [[RESIZE]], 8
// SIGNED-NEXT:    [[TMP2:%.*]] = call i32 @llvm.umul.fix.i32(i32 [[UPSCALE]], i32 [[TMP1]], i32 16)
// SIGNED-NEXT:    store i32 [[TMP2]], i32* @ua, align 4
// SIGNED-NEXT:    ret void
//
// UNSIGNED-LABEL: @umul_uausaua(
// UNSIGNED-NEXT:  entry:
// UNSIGNED-NEXT:    [[TMP0:%.*]] = load i16, i16* @usa, align 2
// UNSIGNED-NEXT:    [[TMP1:%.*]] = load i32, i32* @ua, align 4
// UNSIGNED-NEXT:    [[RESIZE:%.*]] = zext i16 [[TMP0]] to i32
// UNSIGNED-NEXT:    [[UPSCALE:%.*]] = shl i32 [[RESIZE]], 8
// UNSIGNED-NEXT:    [[TMP2:%.*]] = call i32 @llvm.smul.fix.i32(i32 [[UPSCALE]], i32 [[TMP1]], i32 15)
// UNSIGNED-NEXT:    store i32 [[TMP2]], i32* @ua, align 4
// UNSIGNED-NEXT:    ret void
//
void umul_uausaua() {
  ua = usa * ua;
}

// SIGNED-LABEL: @umul_usausausf(
// SIGNED-NEXT:  entry:
// SIGNED-NEXT:    [[TMP0:%.*]] = load i16, i16* @usa, align 2
// SIGNED-NEXT:    [[TMP1:%.*]] = load i8, i8* @usf, align 1
// SIGNED-NEXT:    [[RESIZE:%.*]] = zext i8 [[TMP1]] to i16
// SIGNED-NEXT:    [[TMP2:%.*]] = call i16 @llvm.umul.fix.i16(i16 [[TMP0]], i16 [[RESIZE]], i32 8)
// SIGNED-NEXT:    store i16 [[TMP2]], i16* @usa, align 2
// SIGNED-NEXT:    ret void
//
// UNSIGNED-LABEL: @umul_usausausf(
// UNSIGNED-NEXT:  entry:
// UNSIGNED-NEXT:    [[TMP0:%.*]] = load i16, i16* @usa, align 2
// UNSIGNED-NEXT:    [[TMP1:%.*]] = load i8, i8* @usf, align 1
// UNSIGNED-NEXT:    [[RESIZE:%.*]] = zext i8 [[TMP1]] to i16
// UNSIGNED-NEXT:    [[TMP2:%.*]] = call i16 @llvm.smul.fix.i16(i16 [[TMP0]], i16 [[RESIZE]], i32 7)
// UNSIGNED-NEXT:    store i16 [[TMP2]], i16* @usa, align 2
// UNSIGNED-NEXT:    ret void
//
void umul_usausausf() {
  usa = usa * usf;
}

// SIGNED-LABEL: @umul_usausauf(
// SIGNED-NEXT:  entry:
// SIGNED-NEXT:    [[TMP0:%.*]] = load i16, i16* @usa, align 2
// SIGNED-NEXT:    [[TMP1:%.*]] = load i16, i16* @uf, align 2
// SIGNED-NEXT:    [[RESIZE:%.*]] = zext i16 [[TMP0]] to i24
// SIGNED-NEXT:    [[UPSCALE:%.*]] = shl i24 [[RESIZE]], 8
// SIGNED-NEXT:    [[RESIZE1:%.*]] = zext i16 [[TMP1]] to i24
// SIGNED-NEXT:    [[TMP2:%.*]] = call i24 @llvm.umul.fix.i24(i24 [[UPSCALE]], i24 [[RESIZE1]], i32 16)
// SIGNED-NEXT:    [[DOWNSCALE:%.*]] = lshr i24 [[TMP2]], 8
// SIGNED-NEXT:    [[RESIZE2:%.*]] = trunc i24 [[DOWNSCALE]] to i16
// SIGNED-NEXT:    store i16 [[RESIZE2]], i16* @usa, align 2
// SIGNED-NEXT:    ret void
//
// UNSIGNED-LABEL: @umul_usausauf(
// UNSIGNED-NEXT:  entry:
// UNSIGNED-NEXT:    [[TMP0:%.*]] = load i16, i16* @usa, align 2
// UNSIGNED-NEXT:    [[TMP1:%.*]] = load i16, i16* @uf, align 2
// UNSIGNED-NEXT:    [[RESIZE:%.*]] = zext i16 [[TMP0]] to i24
// UNSIGNED-NEXT:    [[UPSCALE:%.*]] = shl i24 [[RESIZE]], 8
// UNSIGNED-NEXT:    [[RESIZE1:%.*]] = zext i16 [[TMP1]] to i24
// UNSIGNED-NEXT:    [[TMP2:%.*]] = call i24 @llvm.smul.fix.i24(i24 [[UPSCALE]], i24 [[RESIZE1]], i32 15)
// UNSIGNED-NEXT:    [[DOWNSCALE:%.*]] = lshr i24 [[TMP2]], 8
// UNSIGNED-NEXT:    [[RESIZE2:%.*]] = trunc i24 [[DOWNSCALE]] to i16
// UNSIGNED-NEXT:    store i16 [[RESIZE2]], i16* @usa, align 2
// UNSIGNED-NEXT:    ret void
//
void umul_usausauf() {
  usa = usa * uf;
}


// CHECK-LABEL: @int_sasai(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[TMP0:%.*]] = load i16, i16* @sa, align 2
// CHECK-NEXT:    [[TMP1:%.*]] = load i32, i32* @i, align 4
// CHECK-NEXT:    [[RESIZE:%.*]] = sext i16 [[TMP0]] to i39
// CHECK-NEXT:    [[RESIZE1:%.*]] = sext i32 [[TMP1]] to i39
// CHECK-NEXT:    [[UPSCALE:%.*]] = shl i39 [[RESIZE1]], 7
// CHECK-NEXT:    [[TMP2:%.*]] = call i39 @llvm.smul.fix.i39(i39 [[RESIZE]], i39 [[UPSCALE]], i32 7)
// CHECK-NEXT:    [[RESIZE2:%.*]] = trunc i39 [[TMP2]] to i16
// CHECK-NEXT:    store i16 [[RESIZE2]], i16* @sa, align 2
// CHECK-NEXT:    ret void
//
void int_sasai() {
  sa = sa * i;
}

// CHECK-LABEL: @int_sasaui(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[TMP0:%.*]] = load i16, i16* @sa, align 2
// CHECK-NEXT:    [[TMP1:%.*]] = load i32, i32* @ui, align 4
// CHECK-NEXT:    [[RESIZE:%.*]] = sext i16 [[TMP0]] to i40
// CHECK-NEXT:    [[RESIZE1:%.*]] = zext i32 [[TMP1]] to i40
// CHECK-NEXT:    [[UPSCALE:%.*]] = shl i40 [[RESIZE1]], 7
// CHECK-NEXT:    [[TMP2:%.*]] = call i40 @llvm.smul.fix.i40(i40 [[RESIZE]], i40 [[UPSCALE]], i32 7)
// CHECK-NEXT:    [[RESIZE2:%.*]] = trunc i40 [[TMP2]] to i16
// CHECK-NEXT:    store i16 [[RESIZE2]], i16* @sa, align 2
// CHECK-NEXT:    ret void
//
void int_sasaui() {
  sa = sa * ui;
}

// SIGNED-LABEL: @int_usausai(
// SIGNED-NEXT:  entry:
// SIGNED-NEXT:    [[TMP0:%.*]] = load i16, i16* @usa, align 2
// SIGNED-NEXT:    [[TMP1:%.*]] = load i32, i32* @i, align 4
// SIGNED-NEXT:    [[RESIZE:%.*]] = zext i16 [[TMP0]] to i40
// SIGNED-NEXT:    [[RESIZE1:%.*]] = sext i32 [[TMP1]] to i40
// SIGNED-NEXT:    [[UPSCALE:%.*]] = shl i40 [[RESIZE1]], 8
// SIGNED-NEXT:    [[TMP2:%.*]] = call i40 @llvm.smul.fix.i40(i40 [[RESIZE]], i40 [[UPSCALE]], i32 8)
// SIGNED-NEXT:    [[RESIZE2:%.*]] = trunc i40 [[TMP2]] to i16
// SIGNED-NEXT:    store i16 [[RESIZE2]], i16* @usa, align 2
// SIGNED-NEXT:    ret void
//
// UNSIGNED-LABEL: @int_usausai(
// UNSIGNED-NEXT:  entry:
// UNSIGNED-NEXT:    [[TMP0:%.*]] = load i16, i16* @usa, align 2
// UNSIGNED-NEXT:    [[TMP1:%.*]] = load i32, i32* @i, align 4
// UNSIGNED-NEXT:    [[RESIZE:%.*]] = zext i16 [[TMP0]] to i39
// UNSIGNED-NEXT:    [[RESIZE1:%.*]] = sext i32 [[TMP1]] to i39
// UNSIGNED-NEXT:    [[UPSCALE:%.*]] = shl i39 [[RESIZE1]], 7
// UNSIGNED-NEXT:    [[TMP2:%.*]] = call i39 @llvm.smul.fix.i39(i39 [[RESIZE]], i39 [[UPSCALE]], i32 7)
// UNSIGNED-NEXT:    [[RESIZE2:%.*]] = trunc i39 [[TMP2]] to i16
// UNSIGNED-NEXT:    store i16 [[RESIZE2]], i16* @usa, align 2
// UNSIGNED-NEXT:    ret void
//
void int_usausai() {
  usa = usa * i;
}

// SIGNED-LABEL: @int_usausaui(
// SIGNED-NEXT:  entry:
// SIGNED-NEXT:    [[TMP0:%.*]] = load i16, i16* @usa, align 2
// SIGNED-NEXT:    [[TMP1:%.*]] = load i32, i32* @ui, align 4
// SIGNED-NEXT:    [[RESIZE:%.*]] = zext i16 [[TMP0]] to i40
// SIGNED-NEXT:    [[RESIZE1:%.*]] = zext i32 [[TMP1]] to i40
// SIGNED-NEXT:    [[UPSCALE:%.*]] = shl i40 [[RESIZE1]], 8
// SIGNED-NEXT:    [[TMP2:%.*]] = call i40 @llvm.umul.fix.i40(i40 [[RESIZE]], i40 [[UPSCALE]], i32 8)
// SIGNED-NEXT:    [[RESIZE2:%.*]] = trunc i40 [[TMP2]] to i16
// SIGNED-NEXT:    store i16 [[RESIZE2]], i16* @usa, align 2
// SIGNED-NEXT:    ret void
//
// UNSIGNED-LABEL: @int_usausaui(
// UNSIGNED-NEXT:  entry:
// UNSIGNED-NEXT:    [[TMP0:%.*]] = load i16, i16* @usa, align 2
// UNSIGNED-NEXT:    [[TMP1:%.*]] = load i32, i32* @ui, align 4
// UNSIGNED-NEXT:    [[RESIZE:%.*]] = zext i16 [[TMP0]] to i39
// UNSIGNED-NEXT:    [[RESIZE1:%.*]] = zext i32 [[TMP1]] to i39
// UNSIGNED-NEXT:    [[UPSCALE:%.*]] = shl i39 [[RESIZE1]], 7
// UNSIGNED-NEXT:    [[TMP2:%.*]] = call i39 @llvm.umul.fix.i39(i39 [[RESIZE]], i39 [[UPSCALE]], i32 7)
// UNSIGNED-NEXT:    [[RESIZE2:%.*]] = trunc i39 [[TMP2]] to i16
// UNSIGNED-NEXT:    store i16 [[RESIZE2]], i16* @usa, align 2
// UNSIGNED-NEXT:    ret void
//
void int_usausaui() {
  usa = usa * ui;
}

// CHECK-LABEL: @int_lflfui(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[TMP0:%.*]] = load i32, i32* @lf, align 4
// CHECK-NEXT:    [[TMP1:%.*]] = load i32, i32* @ui, align 4
// CHECK-NEXT:    [[RESIZE:%.*]] = sext i32 [[TMP0]] to i64
// CHECK-NEXT:    [[RESIZE1:%.*]] = zext i32 [[TMP1]] to i64
// CHECK-NEXT:    [[UPSCALE:%.*]] = shl i64 [[RESIZE1]], 31
// CHECK-NEXT:    [[TMP2:%.*]] = call i64 @llvm.smul.fix.i64(i64 [[RESIZE]], i64 [[UPSCALE]], i32 31)
// CHECK-NEXT:    [[RESIZE2:%.*]] = trunc i64 [[TMP2]] to i32
// CHECK-NEXT:    store i32 [[RESIZE2]], i32* @lf, align 4
// CHECK-NEXT:    ret void
//
void int_lflfui() {
  lf = lf * ui;
}

// CHECK-LABEL: @int_aab(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[TMP0:%.*]] = load i32, i32* @a, align 4
// CHECK-NEXT:    [[TMP1:%.*]] = load i8, i8* @b, align 1
// CHECK-NEXT:    [[TOBOOL:%.*]] = trunc i8 [[TMP1]] to i1
// CHECK-NEXT:    [[CONV:%.*]] = zext i1 [[TOBOOL]] to i32
// CHECK-NEXT:    [[RESIZE:%.*]] = sext i32 [[TMP0]] to i47
// CHECK-NEXT:    [[RESIZE1:%.*]] = sext i32 [[CONV]] to i47
// CHECK-NEXT:    [[UPSCALE:%.*]] = shl i47 [[RESIZE1]], 15
// CHECK-NEXT:    [[TMP2:%.*]] = call i47 @llvm.smul.fix.i47(i47 [[RESIZE]], i47 [[UPSCALE]], i32 15)
// CHECK-NEXT:    [[RESIZE2:%.*]] = trunc i47 [[TMP2]] to i32
// CHECK-NEXT:    store i32 [[RESIZE2]], i32* @a, align 4
// CHECK-NEXT:    ret void
//
void int_aab() {
  a = a * b;
}

// CHECK-LABEL: @int_aia(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[TMP0:%.*]] = load i32, i32* @i, align 4
// CHECK-NEXT:    [[TMP1:%.*]] = load i32, i32* @a, align 4
// CHECK-NEXT:    [[RESIZE:%.*]] = sext i32 [[TMP0]] to i47
// CHECK-NEXT:    [[UPSCALE:%.*]] = shl i47 [[RESIZE]], 15
// CHECK-NEXT:    [[RESIZE1:%.*]] = sext i32 [[TMP1]] to i47
// CHECK-NEXT:    [[TMP2:%.*]] = call i47 @llvm.smul.fix.i47(i47 [[UPSCALE]], i47 [[RESIZE1]], i32 15)
// CHECK-NEXT:    [[RESIZE2:%.*]] = trunc i47 [[TMP2]] to i32
// CHECK-NEXT:    store i32 [[RESIZE2]], i32* @a, align 4
// CHECK-NEXT:    ret void
//
void int_aia() {
  a = i * a;
}

// SIGNED-LABEL: @int_usauiusa(
// SIGNED-NEXT:  entry:
// SIGNED-NEXT:    [[TMP0:%.*]] = load i32, i32* @ui, align 4
// SIGNED-NEXT:    [[TMP1:%.*]] = load i16, i16* @usa, align 2
// SIGNED-NEXT:    [[RESIZE:%.*]] = zext i32 [[TMP0]] to i40
// SIGNED-NEXT:    [[UPSCALE:%.*]] = shl i40 [[RESIZE]], 8
// SIGNED-NEXT:    [[RESIZE1:%.*]] = zext i16 [[TMP1]] to i40
// SIGNED-NEXT:    [[TMP2:%.*]] = call i40 @llvm.umul.fix.i40(i40 [[UPSCALE]], i40 [[RESIZE1]], i32 8)
// SIGNED-NEXT:    [[RESIZE2:%.*]] = trunc i40 [[TMP2]] to i16
// SIGNED-NEXT:    store i16 [[RESIZE2]], i16* @usa, align 2
// SIGNED-NEXT:    ret void
//
// UNSIGNED-LABEL: @int_usauiusa(
// UNSIGNED-NEXT:  entry:
// UNSIGNED-NEXT:    [[TMP0:%.*]] = load i32, i32* @ui, align 4
// UNSIGNED-NEXT:    [[TMP1:%.*]] = load i16, i16* @usa, align 2
// UNSIGNED-NEXT:    [[RESIZE:%.*]] = zext i32 [[TMP0]] to i39
// UNSIGNED-NEXT:    [[UPSCALE:%.*]] = shl i39 [[RESIZE]], 7
// UNSIGNED-NEXT:    [[RESIZE1:%.*]] = zext i16 [[TMP1]] to i39
// UNSIGNED-NEXT:    [[TMP2:%.*]] = call i39 @llvm.umul.fix.i39(i39 [[UPSCALE]], i39 [[RESIZE1]], i32 7)
// UNSIGNED-NEXT:    [[RESIZE2:%.*]] = trunc i39 [[TMP2]] to i16
// UNSIGNED-NEXT:    store i16 [[RESIZE2]], i16* @usa, align 2
// UNSIGNED-NEXT:    ret void
//
void int_usauiusa() {
  usa = ui * usa;
}

// CHECK-LABEL: @int_sauisa(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[TMP0:%.*]] = load i32, i32* @ui, align 4
// CHECK-NEXT:    [[TMP1:%.*]] = load i16, i16* @sa, align 2
// CHECK-NEXT:    [[RESIZE:%.*]] = zext i32 [[TMP0]] to i40
// CHECK-NEXT:    [[UPSCALE:%.*]] = shl i40 [[RESIZE]], 7
// CHECK-NEXT:    [[RESIZE1:%.*]] = sext i16 [[TMP1]] to i40
// CHECK-NEXT:    [[TMP2:%.*]] = call i40 @llvm.smul.fix.i40(i40 [[UPSCALE]], i40 [[RESIZE1]], i32 7)
// CHECK-NEXT:    [[RESIZE2:%.*]] = trunc i40 [[TMP2]] to i16
// CHECK-NEXT:    store i16 [[RESIZE2]], i16* @sa, align 2
// CHECK-NEXT:    ret void
//
void int_sauisa() {
  sa = ui * sa;
}


// CHECK-LABEL: @sat_sassasas(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[TMP0:%.*]] = load i16, i16* @sa, align 2
// CHECK-NEXT:    [[TMP1:%.*]] = load i16, i16* @sa_sat, align 2
// CHECK-NEXT:    [[TMP2:%.*]] = call i16 @llvm.smul.fix.sat.i16(i16 [[TMP0]], i16 [[TMP1]], i32 7)
// CHECK-NEXT:    store i16 [[TMP2]], i16* @sa_sat, align 2
// CHECK-NEXT:    ret void
//
void sat_sassasas() {
  sa_sat = sa * sa_sat;
}

// SIGNED-LABEL: @sat_usasusausas(
// SIGNED-NEXT:  entry:
// SIGNED-NEXT:    [[TMP0:%.*]] = load i16, i16* @usa, align 2
// SIGNED-NEXT:    [[TMP1:%.*]] = load i16, i16* @usa_sat, align 2
// SIGNED-NEXT:    [[TMP2:%.*]] = call i16 @llvm.umul.fix.sat.i16(i16 [[TMP0]], i16 [[TMP1]], i32 8)
// SIGNED-NEXT:    store i16 [[TMP2]], i16* @usa_sat, align 2
// SIGNED-NEXT:    ret void
//
// UNSIGNED-LABEL: @sat_usasusausas(
// UNSIGNED-NEXT:  entry:
// UNSIGNED-NEXT:    [[TMP0:%.*]] = load i16, i16* @usa, align 2
// UNSIGNED-NEXT:    [[TMP1:%.*]] = load i16, i16* @usa_sat, align 2
// UNSIGNED-NEXT:    [[TMP2:%.*]] = call i16 @llvm.smul.fix.sat.i16(i16 [[TMP0]], i16 [[TMP1]], i32 7)
// UNSIGNED-NEXT:    [[RESIZE:%.*]] = trunc i16 [[TMP2]] to i15
// UNSIGNED-NEXT:    [[RESIZE1:%.*]] = zext i15 [[RESIZE]] to i16
// UNSIGNED-NEXT:    store i16 [[RESIZE1]], i16* @usa_sat, align 2
// UNSIGNED-NEXT:    ret void
//
void sat_usasusausas() {
  usa_sat = usa * usa_sat;
}

// SIGNED-LABEL: @sat_uasuausas(
// SIGNED-NEXT:  entry:
// SIGNED-NEXT:    [[TMP0:%.*]] = load i32, i32* @ua, align 4
// SIGNED-NEXT:    [[TMP1:%.*]] = load i16, i16* @usa_sat, align 2
// SIGNED-NEXT:    [[RESIZE:%.*]] = zext i16 [[TMP1]] to i32
// SIGNED-NEXT:    [[UPSCALE:%.*]] = shl i32 [[RESIZE]], 8
// SIGNED-NEXT:    [[TMP2:%.*]] = call i32 @llvm.umul.fix.sat.i32(i32 [[TMP0]], i32 [[UPSCALE]], i32 16)
// SIGNED-NEXT:    store i32 [[TMP2]], i32* @ua_sat, align 4
// SIGNED-NEXT:    ret void
//
// UNSIGNED-LABEL: @sat_uasuausas(
// UNSIGNED-NEXT:  entry:
// UNSIGNED-NEXT:    [[TMP0:%.*]] = load i32, i32* @ua, align 4
// UNSIGNED-NEXT:    [[TMP1:%.*]] = load i16, i16* @usa_sat, align 2
// UNSIGNED-NEXT:    [[RESIZE:%.*]] = zext i16 [[TMP1]] to i32
// UNSIGNED-NEXT:    [[UPSCALE:%.*]] = shl i32 [[RESIZE]], 8
// UNSIGNED-NEXT:    [[TMP2:%.*]] = call i32 @llvm.smul.fix.sat.i32(i32 [[TMP0]], i32 [[UPSCALE]], i32 15)
// UNSIGNED-NEXT:    [[RESIZE1:%.*]] = trunc i32 [[TMP2]] to i31
// UNSIGNED-NEXT:    [[RESIZE2:%.*]] = zext i31 [[RESIZE1]] to i32
// UNSIGNED-NEXT:    store i32 [[RESIZE2]], i32* @ua_sat, align 4
// UNSIGNED-NEXT:    ret void
//
void sat_uasuausas() {
  ua_sat = ua * usa_sat;
}

// CHECK-LABEL: @sat_sassasi(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[TMP0:%.*]] = load i16, i16* @sa_sat, align 2
// CHECK-NEXT:    [[TMP1:%.*]] = load i32, i32* @i, align 4
// CHECK-NEXT:    [[RESIZE:%.*]] = sext i16 [[TMP0]] to i39
// CHECK-NEXT:    [[RESIZE1:%.*]] = sext i32 [[TMP1]] to i39
// CHECK-NEXT:    [[UPSCALE:%.*]] = shl i39 [[RESIZE1]], 7
// CHECK-NEXT:    [[TMP2:%.*]] = call i39 @llvm.smul.fix.sat.i39(i39 [[RESIZE]], i39 [[UPSCALE]], i32 7)
// CHECK-NEXT:    [[TMP3:%.*]] = icmp sgt i39 [[TMP2]], 32767
// CHECK-NEXT:    [[SATMAX:%.*]] = select i1 [[TMP3]], i39 32767, i39 [[TMP2]]
// CHECK-NEXT:    [[TMP4:%.*]] = icmp slt i39 [[SATMAX]], -32768
// CHECK-NEXT:    [[SATMIN:%.*]] = select i1 [[TMP4]], i39 -32768, i39 [[SATMAX]]
// CHECK-NEXT:    [[RESIZE2:%.*]] = trunc i39 [[SATMIN]] to i16
// CHECK-NEXT:    store i16 [[RESIZE2]], i16* @sa_sat, align 2
// CHECK-NEXT:    ret void
//
void sat_sassasi() {
  sa_sat = sa_sat * i;
}

// CHECK-LABEL: @sat_sassasui(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[TMP0:%.*]] = load i16, i16* @sa_sat, align 2
// CHECK-NEXT:    [[TMP1:%.*]] = load i32, i32* @ui, align 4
// CHECK-NEXT:    [[RESIZE:%.*]] = sext i16 [[TMP0]] to i40
// CHECK-NEXT:    [[RESIZE1:%.*]] = zext i32 [[TMP1]] to i40
// CHECK-NEXT:    [[UPSCALE:%.*]] = shl i40 [[RESIZE1]], 7
// CHECK-NEXT:    [[TMP2:%.*]] = call i40 @llvm.smul.fix.sat.i40(i40 [[RESIZE]], i40 [[UPSCALE]], i32 7)
// CHECK-NEXT:    [[TMP3:%.*]] = icmp sgt i40 [[TMP2]], 32767
// CHECK-NEXT:    [[SATMAX:%.*]] = select i1 [[TMP3]], i40 32767, i40 [[TMP2]]
// CHECK-NEXT:    [[TMP4:%.*]] = icmp slt i40 [[SATMAX]], -32768
// CHECK-NEXT:    [[SATMIN:%.*]] = select i1 [[TMP4]], i40 -32768, i40 [[SATMAX]]
// CHECK-NEXT:    [[RESIZE2:%.*]] = trunc i40 [[SATMIN]] to i16
// CHECK-NEXT:    store i16 [[RESIZE2]], i16* @sa_sat, align 2
// CHECK-NEXT:    ret void
//
void sat_sassasui() {
  sa_sat = sa_sat * ui;
}

// SIGNED-LABEL: @sat_ufsufsufs(
// SIGNED-NEXT:  entry:
// SIGNED-NEXT:    [[TMP0:%.*]] = load i16, i16* @uf_sat, align 2
// SIGNED-NEXT:    [[TMP1:%.*]] = load i16, i16* @uf_sat, align 2
// SIGNED-NEXT:    [[TMP2:%.*]] = call i16 @llvm.umul.fix.sat.i16(i16 [[TMP0]], i16 [[TMP1]], i32 16)
// SIGNED-NEXT:    store i16 [[TMP2]], i16* @uf_sat, align 2
// SIGNED-NEXT:    ret void
//
// UNSIGNED-LABEL: @sat_ufsufsufs(
// UNSIGNED-NEXT:  entry:
// UNSIGNED-NEXT:    [[TMP0:%.*]] = load i16, i16* @uf_sat, align 2
// UNSIGNED-NEXT:    [[TMP1:%.*]] = load i16, i16* @uf_sat, align 2
// UNSIGNED-NEXT:    [[TMP2:%.*]] = call i16 @llvm.smul.fix.sat.i16(i16 [[TMP0]], i16 [[TMP1]], i32 15)
// UNSIGNED-NEXT:    [[RESIZE:%.*]] = trunc i16 [[TMP2]] to i15
// UNSIGNED-NEXT:    [[RESIZE1:%.*]] = zext i15 [[RESIZE]] to i16
// UNSIGNED-NEXT:    store i16 [[RESIZE1]], i16* @uf_sat, align 2
// UNSIGNED-NEXT:    ret void
//
void sat_ufsufsufs() {
  uf_sat = uf_sat * uf_sat;
}

// SIGNED-LABEL: @sat_usasusasi(
// SIGNED-NEXT:  entry:
// SIGNED-NEXT:    [[TMP0:%.*]] = load i16, i16* @usa_sat, align 2
// SIGNED-NEXT:    [[TMP1:%.*]] = load i32, i32* @i, align 4
// SIGNED-NEXT:    [[RESIZE:%.*]] = zext i16 [[TMP0]] to i40
// SIGNED-NEXT:    [[RESIZE1:%.*]] = sext i32 [[TMP1]] to i40
// SIGNED-NEXT:    [[UPSCALE:%.*]] = shl i40 [[RESIZE1]], 8
// SIGNED-NEXT:    [[TMP2:%.*]] = call i40 @llvm.smul.fix.sat.i40(i40 [[RESIZE]], i40 [[UPSCALE]], i32 8)
// SIGNED-NEXT:    [[TMP3:%.*]] = icmp sgt i40 [[TMP2]], 65535
// SIGNED-NEXT:    [[SATMAX:%.*]] = select i1 [[TMP3]], i40 65535, i40 [[TMP2]]
// SIGNED-NEXT:    [[TMP4:%.*]] = icmp slt i40 [[SATMAX]], 0
// SIGNED-NEXT:    [[SATMIN:%.*]] = select i1 [[TMP4]], i40 0, i40 [[SATMAX]]
// SIGNED-NEXT:    [[RESIZE2:%.*]] = trunc i40 [[SATMIN]] to i16
// SIGNED-NEXT:    store i16 [[RESIZE2]], i16* @usa_sat, align 2
// SIGNED-NEXT:    ret void
//
// UNSIGNED-LABEL: @sat_usasusasi(
// UNSIGNED-NEXT:  entry:
// UNSIGNED-NEXT:    [[TMP0:%.*]] = load i16, i16* @usa_sat, align 2
// UNSIGNED-NEXT:    [[TMP1:%.*]] = load i32, i32* @i, align 4
// UNSIGNED-NEXT:    [[RESIZE:%.*]] = zext i16 [[TMP0]] to i39
// UNSIGNED-NEXT:    [[RESIZE1:%.*]] = sext i32 [[TMP1]] to i39
// UNSIGNED-NEXT:    [[UPSCALE:%.*]] = shl i39 [[RESIZE1]], 7
// UNSIGNED-NEXT:    [[TMP2:%.*]] = call i39 @llvm.smul.fix.sat.i39(i39 [[RESIZE]], i39 [[UPSCALE]], i32 7)
// UNSIGNED-NEXT:    [[TMP3:%.*]] = icmp sgt i39 [[TMP2]], 32767
// UNSIGNED-NEXT:    [[SATMAX:%.*]] = select i1 [[TMP3]], i39 32767, i39 [[TMP2]]
// UNSIGNED-NEXT:    [[TMP4:%.*]] = icmp slt i39 [[SATMAX]], 0
// UNSIGNED-NEXT:    [[SATMIN:%.*]] = select i1 [[TMP4]], i39 0, i39 [[SATMAX]]
// UNSIGNED-NEXT:    [[RESIZE2:%.*]] = trunc i39 [[SATMIN]] to i16
// UNSIGNED-NEXT:    store i16 [[RESIZE2]], i16* @usa_sat, align 2
// UNSIGNED-NEXT:    ret void
//
void sat_usasusasi() {
  usa_sat = usa_sat * i;
}
