; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=aarch64-none-eabi | FileCheck %s

define <4 x i16> @usra_v4i16(<8 x i8> %0) {
; CHECK-LABEL: usra_v4i16:
; CHECK:       // %bb.0:
; CHECK-NEXT:    ushr v0.8b, v0.8b, #7
; CHECK-NEXT:    usra v0.4h, v0.4h, #7
; CHECK-NEXT:    ret
  %2 = lshr <8 x i8> %0, <i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7>
  %3 = bitcast <8 x i8> %2 to <4 x i16>
  %4 = lshr <4 x i16> %3, <i16 7, i16 7, i16 7, i16 7>
  %5 = or <4 x i16> %4, %3
  ret <4 x i16> %5
}

define <4 x i32> @usra_v4i32(<8 x i16> %0) {
; CHECK-LABEL: usra_v4i32:
; CHECK:       // %bb.0:
; CHECK-NEXT:    ushr v0.8h, v0.8h, #15
; CHECK-NEXT:    usra v0.4s, v0.4s, #15
; CHECK-NEXT:    ret
  %2 = lshr <8 x i16> %0, <i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15>
  %3 = bitcast <8 x i16> %2 to <4 x i32>
  %4 = lshr <4 x i32> %3, <i32 15, i32 15, i32 15, i32 15>
  %5 = or <4 x i32> %4, %3
  ret <4 x i32> %5
}

define <2 x i64> @usra_v2i64(<4 x i32> %0) {
; CHECK-LABEL: usra_v2i64:
; CHECK:       // %bb.0:
; CHECK-NEXT:    ushr v0.4s, v0.4s, #31
; CHECK-NEXT:    usra v0.2d, v0.2d, #31
; CHECK-NEXT:    ret
  %2 = lshr <4 x i32> %0, <i32 31, i32 31, i32 31, i32 31>
  %3 = bitcast <4 x i32> %2 to <2 x i64>
  %4 = lshr <2 x i64> %3, <i64 31, i64 31>
  %5 = or <2 x i64> %4, %3
  ret <2 x i64> %5
}

define <1 x i64> @usra_v1i64(<2 x i32> %0) {
; CHECK-LABEL: usra_v1i64:
; CHECK:       // %bb.0:
; CHECK-NEXT:    ushr v0.2s, v0.2s, #31
; CHECK-NEXT:    usra d0, d0, #31
; CHECK-NEXT:    ret
  %2 = lshr <2 x i32> %0, <i32 31, i32 31>
  %3 = bitcast <2 x i32> %2 to <1 x i64>
  %4 = lshr <1 x i64> %3, <i64 31>
  %5 = or <1 x i64> %4, %3
  ret <1 x i64> %5
}

define <4 x i16> @ssra_v4i16(<4 x i16> %0) {
; CHECK-LABEL: ssra_v4i16:
; CHECK:       // %bb.0:
; CHECK-NEXT:    ushr v1.4h, v0.4h, #15
; CHECK-NEXT:    bic v0.4h, #64, lsl #8
; CHECK-NEXT:    ssra v1.4h, v0.4h, #14
; CHECK-NEXT:    fmov d0, d1
; CHECK-NEXT:    ret
  ; set the 15th bit to zero. e.g. 0b1111111111111111 to 0b1011111111111111
  %2 = and <4 x i16> %0, <i16 49151, i16 49151,i16 49151,i16 49151>
  ; the first 15 bits are zero, the last bit can be zero or one. e.g. 0b1011111111111111 to 0b0000000000000001
  %3 = lshr <4 x i16> %0, <i16 15, i16 15, i16 15, i16 15>
  ; the first 15 bits maybe 1, and the last bit is zero. 0b1011111111111111 to 0b1111111111111110
  %4 = ashr <4 x i16> %2, <i16 14, i16 14, i16 14, i16 14>
  %5 = or <4 x i16> %3, %4
  ret <4 x i16> %5
}

define <4 x i32> @ssra_v4i32(<4 x i32> %0) {
; CHECK-LABEL: ssra_v4i32:
; CHECK:       // %bb.0:
; CHECK-NEXT:    ushr v1.4s, v0.4s, #31
; CHECK-NEXT:    bic v0.4s, #64, lsl #24
; CHECK-NEXT:    ssra v1.4s, v0.4s, #30
; CHECK-NEXT:    mov v0.16b, v1.16b
; CHECK-NEXT:    ret
  ; set the 31th bit to zero.
  %2 = and <4 x i32> %0, <i32 3221225471, i32 3221225471,i32 3221225471,i32 3221225471>
  ; the first 31 bits are zero, the last bit can be zero or one.
  %3 = lshr <4 x i32> %0, <i32 31, i32 31, i32 31, i32 31>
  ; the first 31 bits maybe 1, and the last bit is zero.
  %4 = ashr <4 x i32> %2, <i32 30, i32 30, i32 30, i32 30>
  %5 = or <4 x i32> %3, %4
  ret <4 x i32> %5
}

define <1 x i64> @ssra_v1i64(<2 x i32> %0) {
; CHECK-LABEL: ssra_v1i64:
; CHECK:       // %bb.0:
; CHECK-NEXT:    ushr d1, d0, #63
; CHECK-NEXT:    bic v0.2s, #64, lsl #24
; CHECK-NEXT:    ssra d1, d0, #62
; CHECK-NEXT:    fmov d0, d1
; CHECK-NEXT:    ret
  %2 = and <2 x i32> %0, <i32 3221225471, i32 3221225471>
  %3 = bitcast <2 x i32> %2 to <1 x i64>
  %4 = lshr <1 x i64> %3, <i64 63>
  %5 = ashr <1 x i64> %3, <i64 62>
  %6 = or <1 x i64> %4, %5
  ret <1 x i64> %6
}

define <2 x i64> @ssra_v2i64(<4 x i32> %0) {
; CHECK-LABEL: ssra_v2i64:
; CHECK:       // %bb.0:
; CHECK-NEXT:    ushr v1.2d, v0.2d, #63
; CHECK-NEXT:    bic v0.4s, #64, lsl #24
; CHECK-NEXT:    ssra v1.2d, v0.2d, #62
; CHECK-NEXT:    mov v0.16b, v1.16b
; CHECK-NEXT:    ret
  %2 = and <4 x i32> %0, <i32 3221225471, i32 3221225471,i32 3221225471,i32 3221225471>
  %3 = bitcast <4 x i32> %2 to <2 x i64>
  %4 = lshr <2 x i64> %3, <i64 63, i64 63>
  %5 = ashr <2 x i64> %3, <i64 62, i64 62>
  %6 = or <2 x i64> %4, %5
  ret <2 x i64> %6
}

; Expected to be able to deduce movi is generate a vector of integer
; and turn USHR+ORR into USRA.
define <8 x i16> @usra_with_movi_v8i16(<16 x i8> %0, <16 x i8> %1) {
; CHECK-LABEL: usra_with_movi_v8i16:
; CHECK:       // %bb.0:
; CHECK-NEXT:    movi v2.16b, #1
; CHECK-NEXT:    cmeq v0.16b, v0.16b, v1.16b
; CHECK-NEXT:    and v0.16b, v0.16b, v2.16b
; CHECK-NEXT:    usra v0.8h, v0.8h, #7
; CHECK-NEXT:    ret
  %3 = icmp eq <16 x i8> %0, %1
  %4 = zext <16 x i1> %3 to <16 x i8>
  %5 = bitcast <16 x i8> %4 to <8 x i16>
  %6 = lshr <8 x i16> %5, <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7>
  %7 = or <8 x i16> %6, %5
  ret <8 x i16> %7
}

; Expected to be able to deduce movi is generate a vector of integer
; and turn USHR+ORR into USRA.
define <4 x i32> @usra_with_movi_v4i32(<16 x i8> %0, <16 x i8> %1) {
; CHECK-LABEL: usra_with_movi_v4i32:
; CHECK:       // %bb.0:
; CHECK-NEXT:    movi v2.16b, #1
; CHECK-NEXT:    cmeq v0.16b, v0.16b, v1.16b
; CHECK-NEXT:    and v0.16b, v0.16b, v2.16b
; CHECK-NEXT:    usra v0.4s, v0.4s, #15
; CHECK-NEXT:    ret
  %3 = icmp eq <16 x i8> %0, %1
  %4 = zext <16 x i1> %3 to <16 x i8>
  %5 = bitcast <16 x i8> %4 to <4 x i32>
  %6 = lshr <4 x i32> %5, <i32 15, i32 15, i32 15, i32 15>
  %7 = or <4 x i32> %6, %5
  ret <4 x i32> %7
}

; Expected to be able to deduce movi is generate a vector of integer
; and turn USHR+ORR into USRA.
define <2 x i64> @usra_with_movi_v2i64(<16 x i8> %0, <16 x i8> %1) {
; CHECK-LABEL: usra_with_movi_v2i64:
; CHECK:       // %bb.0:
; CHECK-NEXT:    movi v2.16b, #1
; CHECK-NEXT:    cmeq v0.16b, v0.16b, v1.16b
; CHECK-NEXT:    and v0.16b, v0.16b, v2.16b
; CHECK-NEXT:    usra v0.2d, v0.2d, #31
; CHECK-NEXT:    ret
  %3 = icmp eq <16 x i8> %0, %1
  %4 = zext <16 x i1> %3 to <16 x i8>
  %5 = bitcast <16 x i8> %4 to <2 x i64>
  %6 = lshr <2 x i64> %5, <i64 31, i64 31>
  %7 = or <2 x i64> %6, %5
  ret <2 x i64> %7
}
