; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s
;
; Check that ldr1* instruction is generated to splat scalar during load,
; rather than mov from scalar to vector register (which would require the vector unit).
;
; one-off: ld1r_stack checks that ldr1b works with stack objects.
;
; Test axes:
;   types = [i8, i16, i32, i64, half, float, double]
;   methods = [direct load, gep upper bound - 1, gep out of range x {neg,pos}, sext..., zext..., unpacked_floats...]
;

@g8 = external global i8

; One-off test for splatted value coming from stack load.
define <vscale x 16 x i8> @ld1r_stack() {
; CHECK-LABEL: ld1r_stack:
; CHECK:       // %bb.0:
; CHECK-NEXT:    sub sp, sp, #16
; CHECK-NEXT:    .cfi_def_cfa_offset 16
; CHECK-NEXT:    adrp x8, :got:g8
; CHECK-NEXT:    ptrue p0.b
; CHECK-NEXT:    ldr x8, [x8, :got_lo12:g8]
; CHECK-NEXT:    ldrb w8, [x8]
; CHECK-NEXT:    strb w8, [sp, #12]
; CHECK-NEXT:    ld1rb { z0.b }, p0/z, [sp, #14]
; CHECK-NEXT:    add sp, sp, #16
; CHECK-NEXT:    ret
  %valp = alloca i8
  %valp2  = load volatile i8, ptr @g8
  store volatile i8 %valp2, ptr %valp
  %valp3 = getelementptr i8, ptr %valp, i32 2
  %val = load i8, ptr %valp3
  %1 = insertelement <vscale x 16 x i8> undef, i8 %val, i32 0
  %2 = shufflevector <vscale x 16 x i8> %1, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
  ret <vscale x 16 x i8> %2
}

define <vscale x 16 x i8> @ld1rb(ptr %valp) {
; CHECK-LABEL: ld1rb:
; CHECK:       // %bb.0:
; CHECK-NEXT:    ptrue p0.b
; CHECK-NEXT:    ld1rb { z0.b }, p0/z, [x0]
; CHECK-NEXT:    ret
  %val = load i8, ptr %valp
  %ins = insertelement <vscale x 16 x i8> undef, i8 %val, i32 0
  %shf = shufflevector <vscale x 16 x i8> %ins, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
  ret <vscale x 16 x i8> %shf
}

define <vscale x 16 x i8> @ld1rb_gep(ptr %valp) {
; CHECK-LABEL: ld1rb_gep:
; CHECK:       // %bb.0:
; CHECK-NEXT:    ptrue p0.b
; CHECK-NEXT:    ld1rb { z0.b }, p0/z, [x0, #63]
; CHECK-NEXT:    ret
  %valp2 = getelementptr i8, ptr %valp, i32 63
  %val = load i8, ptr %valp2
  %ins = insertelement <vscale x 16 x i8> undef, i8 %val, i32 0
  %shf = shufflevector <vscale x 16 x i8> %ins, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
  ret <vscale x 16 x i8> %shf
}

define <vscale x 16 x i8> @ld1rb_gep_out_of_range_up(ptr %valp) {
; CHECK-LABEL: ld1rb_gep_out_of_range_up:
; CHECK:       // %bb.0:
; CHECK-NEXT:    add x8, x0, #64
; CHECK-NEXT:    ptrue p0.b
; CHECK-NEXT:    ld1rb { z0.b }, p0/z, [x8]
; CHECK-NEXT:    ret
  %valp2 = getelementptr i8, ptr %valp, i32 64
  %val = load i8, ptr %valp2
  %ins = insertelement <vscale x 16 x i8> undef, i8 %val, i32 0
  %shf = shufflevector <vscale x 16 x i8> %ins, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
  ret <vscale x 16 x i8> %shf
}

define <vscale x 16 x i8> @ld1rb_gep_out_of_range_down(ptr %valp) {
; CHECK-LABEL: ld1rb_gep_out_of_range_down:
; CHECK:       // %bb.0:
; CHECK-NEXT:    sub x8, x0, #1
; CHECK-NEXT:    ptrue p0.b
; CHECK-NEXT:    ld1rb { z0.b }, p0/z, [x8]
; CHECK-NEXT:    ret
  %valp2 = getelementptr i8, ptr %valp, i32 -1
  %val = load i8, ptr %valp2
  %ins = insertelement <vscale x 16 x i8> undef, i8 %val, i32 0
  %shf = shufflevector <vscale x 16 x i8> %ins, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
  ret <vscale x 16 x i8> %shf
}

define <vscale x 8 x i16> @ld1rb_i8_i16_zext(ptr %valp) {
; CHECK-LABEL: ld1rb_i8_i16_zext:
; CHECK:       // %bb.0:
; CHECK-NEXT:    ptrue p0.h
; CHECK-NEXT:    ld1rb { z0.h }, p0/z, [x0]
; CHECK-NEXT:    ret
  %val = load i8, ptr %valp
  %ext = zext i8 %val to i16
  %ins = insertelement <vscale x 8 x i16> undef, i16 %ext, i32 0
  %shf = shufflevector <vscale x 8 x i16> %ins, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
  ret <vscale x 8 x i16> %shf
}

define <vscale x 8 x i16> @ld1rb_i8_i16_sext(ptr %valp) {
; CHECK-LABEL: ld1rb_i8_i16_sext:
; CHECK:       // %bb.0:
; CHECK-NEXT:    ptrue p0.h
; CHECK-NEXT:    ld1rsb { z0.h }, p0/z, [x0]
; CHECK-NEXT:    ret
  %val = load i8, ptr %valp
  %ext = sext i8 %val to i16
  %ins = insertelement <vscale x 8 x i16> undef, i16 %ext, i32 0
  %shf = shufflevector <vscale x 8 x i16> %ins, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
  ret <vscale x 8 x i16> %shf
}

define <vscale x 4 x i32> @ld1rb_i8_i32_zext(ptr %valp) {
; CHECK-LABEL: ld1rb_i8_i32_zext:
; CHECK:       // %bb.0:
; CHECK-NEXT:    ptrue p0.s
; CHECK-NEXT:    ld1rb { z0.s }, p0/z, [x0]
; CHECK-NEXT:    ret
  %val = load i8, ptr %valp
  %ext = zext i8 %val to i32
  %ins = insertelement <vscale x 4 x i32> undef, i32 %ext, i32 0
  %shf = shufflevector <vscale x 4 x i32> %ins, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
  ret <vscale x 4 x i32> %shf
}

define <vscale x 4 x i32> @ld1rb_i8_i32_sext(ptr %valp) {
; CHECK-LABEL: ld1rb_i8_i32_sext:
; CHECK:       // %bb.0:
; CHECK-NEXT:    ptrue p0.s
; CHECK-NEXT:    ld1rsb { z0.s }, p0/z, [x0]
; CHECK-NEXT:    ret
  %val = load i8, ptr %valp
  %ext = sext i8 %val to i32
  %ins = insertelement <vscale x 4 x i32> undef, i32 %ext, i32 0
  %shf = shufflevector <vscale x 4 x i32> %ins, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
  ret <vscale x 4 x i32> %shf
}

define <vscale x 2 x i64> @ld1rb_i8_i64_zext(ptr %valp) {
; CHECK-LABEL: ld1rb_i8_i64_zext:
; CHECK:       // %bb.0:
; CHECK-NEXT:    ptrue p0.d
; CHECK-NEXT:    ld1rb { z0.d }, p0/z, [x0]
; CHECK-NEXT:    ret
  %val = load i8, ptr %valp
  %ext = zext i8 %val to i64
  %ins = insertelement <vscale x 2 x i64> undef, i64 %ext, i32 0
  %shf = shufflevector <vscale x 2 x i64> %ins, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
  ret <vscale x 2 x i64> %shf
}

define <vscale x 2 x i64> @ld1rb_i8_i64_sext(ptr %valp) {
; CHECK-LABEL: ld1rb_i8_i64_sext:
; CHECK:       // %bb.0:
; CHECK-NEXT:    ptrue p0.d
; CHECK-NEXT:    ld1rsb { z0.d }, p0/z, [x0]
; CHECK-NEXT:    ret
  %val = load i8, ptr %valp
  %ext = sext i8 %val to i64
  %ins = insertelement <vscale x 2 x i64> undef, i64 %ext, i32 0
  %shf = shufflevector <vscale x 2 x i64> %ins, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
  ret <vscale x 2 x i64> %shf
}

define <vscale x 8 x i16> @ld1rh(ptr %valp) {
; CHECK-LABEL: ld1rh:
; CHECK:       // %bb.0:
; CHECK-NEXT:    ptrue p0.h
; CHECK-NEXT:    ld1rh { z0.h }, p0/z, [x0]
; CHECK-NEXT:    ret
  %val = load i16, ptr %valp
  %ins = insertelement <vscale x 8 x i16> undef, i16 %val, i32 0
  %shf = shufflevector <vscale x 8 x i16> %ins, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
  ret <vscale x 8 x i16> %shf
}

define <vscale x 8 x i16> @ld1rh_gep(ptr %valp) {
; CHECK-LABEL: ld1rh_gep:
; CHECK:       // %bb.0:
; CHECK-NEXT:    ptrue p0.h
; CHECK-NEXT:    ld1rh { z0.h }, p0/z, [x0, #126]
; CHECK-NEXT:    ret
  %valp2 = getelementptr i16, ptr %valp, i32 63
  %val = load i16, ptr %valp2
  %ins = insertelement <vscale x 8 x i16> undef, i16 %val, i32 0
  %shf = shufflevector <vscale x 8 x i16> %ins, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
  ret <vscale x 8 x i16> %shf
}

define <vscale x 8 x i16> @ld1rh_gep_out_of_range_up(ptr %valp) {
; CHECK-LABEL: ld1rh_gep_out_of_range_up:
; CHECK:       // %bb.0:
; CHECK-NEXT:    add x8, x0, #128
; CHECK-NEXT:    ptrue p0.h
; CHECK-NEXT:    ld1rh { z0.h }, p0/z, [x8]
; CHECK-NEXT:    ret
  %valp2 = getelementptr i16, ptr %valp, i32 64
  %val = load i16, ptr %valp2
  %ins = insertelement <vscale x 8 x i16> undef, i16 %val, i32 0
  %shf = shufflevector <vscale x 8 x i16> %ins, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
  ret <vscale x 8 x i16> %shf
}

define <vscale x 8 x i16> @ld1rh_gep_out_of_range_down(ptr %valp) {
; CHECK-LABEL: ld1rh_gep_out_of_range_down:
; CHECK:       // %bb.0:
; CHECK-NEXT:    sub x8, x0, #2
; CHECK-NEXT:    ptrue p0.h
; CHECK-NEXT:    ld1rh { z0.h }, p0/z, [x8]
; CHECK-NEXT:    ret
  %valp2 = getelementptr i16, ptr %valp, i32 -1
  %val = load i16, ptr %valp2
  %ins = insertelement <vscale x 8 x i16> undef, i16 %val, i32 0
  %shf = shufflevector <vscale x 8 x i16> %ins, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
  ret <vscale x 8 x i16> %shf
}

define <vscale x 4 x i32> @ld1rh_i16_i32_zext(ptr %valp) {
; CHECK-LABEL: ld1rh_i16_i32_zext:
; CHECK:       // %bb.0:
; CHECK-NEXT:    ptrue p0.s
; CHECK-NEXT:    ld1rh { z0.s }, p0/z, [x0]
; CHECK-NEXT:    ret
  %val = load i16, ptr %valp
  %ext = zext i16 %val to i32
  %ins = insertelement <vscale x 4 x i32> undef, i32 %ext, i32 0
  %shf = shufflevector <vscale x 4 x i32> %ins, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
  ret <vscale x 4 x i32> %shf
}

define <vscale x 4 x i32> @ld1rh_i16_i32_sext(ptr %valp) {
; CHECK-LABEL: ld1rh_i16_i32_sext:
; CHECK:       // %bb.0:
; CHECK-NEXT:    ptrue p0.s
; CHECK-NEXT:    ld1rsh { z0.s }, p0/z, [x0]
; CHECK-NEXT:    ret
  %val = load i16, ptr %valp
  %ext = sext i16 %val to i32
  %ins = insertelement <vscale x 4 x i32> undef, i32 %ext, i32 0
  %shf = shufflevector <vscale x 4 x i32> %ins, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
  ret <vscale x 4 x i32> %shf
}

define <vscale x 2 x i64> @ld1rh_i16_i64_zext(ptr %valp) {
; CHECK-LABEL: ld1rh_i16_i64_zext:
; CHECK:       // %bb.0:
; CHECK-NEXT:    ptrue p0.d
; CHECK-NEXT:    ld1rh { z0.d }, p0/z, [x0]
; CHECK-NEXT:    ret
  %val = load i16, ptr %valp
  %ext = zext i16 %val to i64
  %ins = insertelement <vscale x 2 x i64> undef, i64 %ext, i32 0
  %shf = shufflevector <vscale x 2 x i64> %ins, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
  ret <vscale x 2 x i64> %shf
}

define <vscale x 2 x i64> @ld1rh_i16_i64_sext(ptr %valp) {
; CHECK-LABEL: ld1rh_i16_i64_sext:
; CHECK:       // %bb.0:
; CHECK-NEXT:    ptrue p0.d
; CHECK-NEXT:    ld1rsh { z0.d }, p0/z, [x0]
; CHECK-NEXT:    ret
  %val = load i16, ptr %valp
  %ext = sext i16 %val to i64
  %ins = insertelement <vscale x 2 x i64> undef, i64 %ext, i32 0
  %shf = shufflevector <vscale x 2 x i64> %ins, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
  ret <vscale x 2 x i64> %shf
}

define <vscale x 4 x i32> @ld1rw(ptr %valp) {
; CHECK-LABEL: ld1rw:
; CHECK:       // %bb.0:
; CHECK-NEXT:    ptrue p0.s
; CHECK-NEXT:    ld1rw { z0.s }, p0/z, [x0]
; CHECK-NEXT:    ret
  %val = load i32, ptr %valp
  %ins = insertelement <vscale x 4 x i32> undef, i32 %val, i32 0
  %shf = shufflevector <vscale x 4 x i32> %ins, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
  ret <vscale x 4 x i32> %shf
}

define <vscale x 4 x i32> @ld1rw_gep(ptr %valp) {
; CHECK-LABEL: ld1rw_gep:
; CHECK:       // %bb.0:
; CHECK-NEXT:    ptrue p0.s
; CHECK-NEXT:    ld1rw { z0.s }, p0/z, [x0, #252]
; CHECK-NEXT:    ret
  %valp2 = getelementptr i32, ptr %valp, i32 63
  %val = load i32, ptr %valp2
  %ins = insertelement <vscale x 4 x i32> undef, i32 %val, i32 0
  %shf = shufflevector <vscale x 4 x i32> %ins, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
  ret <vscale x 4 x i32> %shf
}

define <vscale x 4 x i32> @ld1rw_gep_out_of_range_up(ptr %valp) {
; CHECK-LABEL: ld1rw_gep_out_of_range_up:
; CHECK:       // %bb.0:
; CHECK-NEXT:    add x8, x0, #256
; CHECK-NEXT:    ptrue p0.s
; CHECK-NEXT:    ld1rw { z0.s }, p0/z, [x8]
; CHECK-NEXT:    ret
  %valp2 = getelementptr i32, ptr %valp, i32 64
  %val = load i32, ptr %valp2
  %ins = insertelement <vscale x 4 x i32> undef, i32 %val, i32 0
  %shf = shufflevector <vscale x 4 x i32> %ins, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
  ret <vscale x 4 x i32> %shf
}

define <vscale x 4 x i32> @ld1rw_gep_out_of_range_down(ptr %valp) {
; CHECK-LABEL: ld1rw_gep_out_of_range_down:
; CHECK:       // %bb.0:
; CHECK-NEXT:    sub x8, x0, #4
; CHECK-NEXT:    ptrue p0.s
; CHECK-NEXT:    ld1rw { z0.s }, p0/z, [x8]
; CHECK-NEXT:    ret
  %valp2 = getelementptr i32, ptr %valp, i32 -1
  %val = load i32, ptr %valp2
  %ins = insertelement <vscale x 4 x i32> undef, i32 %val, i32 0
  %shf = shufflevector <vscale x 4 x i32> %ins, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
  ret <vscale x 4 x i32> %shf
}

define <vscale x 2 x i64> @ld1rw_i32_i64_zext(ptr %valp) {
; CHECK-LABEL: ld1rw_i32_i64_zext:
; CHECK:       // %bb.0:
; CHECK-NEXT:    ptrue p0.d
; CHECK-NEXT:    ld1rw { z0.d }, p0/z, [x0]
; CHECK-NEXT:    ret
  %val = load i32, ptr %valp
  %ext = zext i32 %val to i64
  %ins = insertelement <vscale x 2 x i64> undef, i64 %ext, i32 0
  %shf = shufflevector <vscale x 2 x i64> %ins, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
  ret <vscale x 2 x i64> %shf
}

define <vscale x 2 x i64> @ld1rw_i32_i64_sext(ptr %valp) {
; CHECK-LABEL: ld1rw_i32_i64_sext:
; CHECK:       // %bb.0:
; CHECK-NEXT:    ptrue p0.d
; CHECK-NEXT:    ld1rsw { z0.d }, p0/z, [x0]
; CHECK-NEXT:    ret
  %val = load i32, ptr %valp
  %ext = sext i32 %val to i64
  %ins = insertelement <vscale x 2 x i64> undef, i64 %ext, i32 0
  %shf = shufflevector <vscale x 2 x i64> %ins, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
  ret <vscale x 2 x i64> %shf
}

define <vscale x 2 x i64> @ld1rd(ptr %valp) {
; CHECK-LABEL: ld1rd:
; CHECK:       // %bb.0:
; CHECK-NEXT:    ptrue p0.d
; CHECK-NEXT:    ld1rd { z0.d }, p0/z, [x0]
; CHECK-NEXT:    ret
  %val = load i64, ptr %valp
  %ins = insertelement <vscale x 2 x i64> undef, i64 %val, i32 0
  %shf = shufflevector <vscale x 2 x i64> %ins, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
  ret <vscale x 2 x i64> %shf
}

define <vscale x 2 x i64> @ld1rd_gep(ptr %valp) {
; CHECK-LABEL: ld1rd_gep:
; CHECK:       // %bb.0:
; CHECK-NEXT:    ptrue p0.d
; CHECK-NEXT:    ld1rd { z0.d }, p0/z, [x0, #504]
; CHECK-NEXT:    ret
  %valp2 = getelementptr i64, ptr %valp, i32 63
  %val = load i64, ptr %valp2
  %ins = insertelement <vscale x 2 x i64> undef, i64 %val, i32 0
  %shf = shufflevector <vscale x 2 x i64> %ins, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
  ret <vscale x 2 x i64> %shf
}

define <vscale x 2 x i64> @ld1rd_gep_out_of_range_up(ptr %valp) {
; CHECK-LABEL: ld1rd_gep_out_of_range_up:
; CHECK:       // %bb.0:
; CHECK-NEXT:    add x8, x0, #512
; CHECK-NEXT:    ptrue p0.d
; CHECK-NEXT:    ld1rd { z0.d }, p0/z, [x8]
; CHECK-NEXT:    ret
  %valp2 = getelementptr i64, ptr %valp, i32 64
  %val = load i64, ptr %valp2
  %ins = insertelement <vscale x 2 x i64> undef, i64 %val, i32 0
  %shf = shufflevector <vscale x 2 x i64> %ins, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
  ret <vscale x 2 x i64> %shf
}

define <vscale x 2 x i64> @ld1rd_gep_out_of_range_down(ptr %valp) {
; CHECK-LABEL: ld1rd_gep_out_of_range_down:
; CHECK:       // %bb.0:
; CHECK-NEXT:    sub x8, x0, #8
; CHECK-NEXT:    ptrue p0.d
; CHECK-NEXT:    ld1rd { z0.d }, p0/z, [x8]
; CHECK-NEXT:    ret
  %valp2 = getelementptr i64, ptr %valp, i32 -1
  %val = load i64, ptr %valp2
  %ins = insertelement <vscale x 2 x i64> undef, i64 %val, i32 0
  %shf = shufflevector <vscale x 2 x i64> %ins, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
  ret <vscale x 2 x i64> %shf
}

define <vscale x 8 x half> @ld1rh_half(ptr %valp) {
; CHECK-LABEL: ld1rh_half:
; CHECK:       // %bb.0:
; CHECK-NEXT:    ptrue p0.h
; CHECK-NEXT:    ld1rh { z0.h }, p0/z, [x0]
; CHECK-NEXT:    ret
  %val = load half, ptr %valp
  %ins = insertelement <vscale x 8 x half> undef, half %val, i32 0
  %shf = shufflevector <vscale x 8 x half> %ins, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
  ret <vscale x 8 x half> %shf
}

define <vscale x 8 x half> @ld1rh_half_gep(ptr %valp) {
; CHECK-LABEL: ld1rh_half_gep:
; CHECK:       // %bb.0:
; CHECK-NEXT:    ptrue p0.h
; CHECK-NEXT:    ld1rh { z0.h }, p0/z, [x0, #126]
; CHECK-NEXT:    ret
  %valp2 = getelementptr half, ptr %valp, i32 63
  %val = load half, ptr %valp2
  %ins = insertelement <vscale x 8 x half> undef, half %val, i32 0
  %shf = shufflevector <vscale x 8 x half> %ins, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
  ret <vscale x 8 x half> %shf
}

define <vscale x 8 x half> @ld1rh_half_gep_out_of_range_up(ptr %valp) {
; CHECK-LABEL: ld1rh_half_gep_out_of_range_up:
; CHECK:       // %bb.0:
; CHECK-NEXT:    add x8, x0, #128
; CHECK-NEXT:    ptrue p0.h
; CHECK-NEXT:    ld1rh { z0.h }, p0/z, [x8]
; CHECK-NEXT:    ret
  %valp2 = getelementptr half, ptr %valp, i32 64
  %val = load half, ptr %valp2
  %ins = insertelement <vscale x 8 x half> undef, half %val, i32 0
  %shf = shufflevector <vscale x 8 x half> %ins, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
  ret <vscale x 8 x half> %shf
}

define <vscale x 8 x half> @ld1rh_half_gep_out_of_range_down(ptr %valp) {
; CHECK-LABEL: ld1rh_half_gep_out_of_range_down:
; CHECK:       // %bb.0:
; CHECK-NEXT:    sub x8, x0, #2
; CHECK-NEXT:    ptrue p0.h
; CHECK-NEXT:    ld1rh { z0.h }, p0/z, [x8]
; CHECK-NEXT:    ret
  %valp2 = getelementptr half, ptr %valp, i32 -1
  %val = load half, ptr %valp2
  %ins = insertelement <vscale x 8 x half> undef, half %val, i32 0
  %shf = shufflevector <vscale x 8 x half> %ins, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
  ret <vscale x 8 x half> %shf
}

define <vscale x 4 x half> @ld1rh_half_unpacked4(ptr %valp) {
; CHECK-LABEL: ld1rh_half_unpacked4:
; CHECK:       // %bb.0:
; CHECK-NEXT:    ptrue p0.s
; CHECK-NEXT:    ld1rh { z0.s }, p0/z, [x0]
; CHECK-NEXT:    ret
  %val = load half, ptr %valp
  %ins = insertelement <vscale x 4 x half> undef, half %val, i32 0
  %shf = shufflevector <vscale x 4 x half> %ins, <vscale x 4 x half> undef, <vscale x 4 x i32> zeroinitializer
  ret <vscale x 4 x half> %shf
}

define <vscale x 4 x half> @ld1rh_half_unpacked4_gep(ptr %valp) {
; CHECK-LABEL: ld1rh_half_unpacked4_gep:
; CHECK:       // %bb.0:
; CHECK-NEXT:    ptrue p0.s
; CHECK-NEXT:    ld1rh { z0.s }, p0/z, [x0, #126]
; CHECK-NEXT:    ret
  %valp2 = getelementptr half, ptr %valp, i32 63
  %val = load half, ptr %valp2
  %ins = insertelement <vscale x 4 x half> undef, half %val, i32 0
  %shf = shufflevector <vscale x 4 x half> %ins, <vscale x 4 x half> undef, <vscale x 4 x i32> zeroinitializer
  ret <vscale x 4 x half> %shf
}

define <vscale x 4 x half> @ld1rh_half_unpacked4_gep_out_of_range_up(ptr %valp) {
; CHECK-LABEL: ld1rh_half_unpacked4_gep_out_of_range_up:
; CHECK:       // %bb.0:
; CHECK-NEXT:    add x8, x0, #128
; CHECK-NEXT:    ptrue p0.s
; CHECK-NEXT:    ld1rh { z0.s }, p0/z, [x8]
; CHECK-NEXT:    ret
  %valp2 = getelementptr half, ptr %valp, i32 64
  %val = load half, ptr %valp2
  %ins = insertelement <vscale x 4 x half> undef, half %val, i32 0
  %shf = shufflevector <vscale x 4 x half> %ins, <vscale x 4 x half> undef, <vscale x 4 x i32> zeroinitializer
  ret <vscale x 4 x half> %shf
}

define <vscale x 4 x half> @ld1rh_half_unpacked4_gep_out_of_range_down(ptr %valp) {
; CHECK-LABEL: ld1rh_half_unpacked4_gep_out_of_range_down:
; CHECK:       // %bb.0:
; CHECK-NEXT:    sub x8, x0, #2
; CHECK-NEXT:    ptrue p0.s
; CHECK-NEXT:    ld1rh { z0.s }, p0/z, [x8]
; CHECK-NEXT:    ret
  %valp2 = getelementptr half, ptr %valp, i32 -1
  %val = load half, ptr %valp2
  %ins = insertelement <vscale x 4 x half> undef, half %val, i32 0
  %shf = shufflevector <vscale x 4 x half> %ins, <vscale x 4 x half> undef, <vscale x 4 x i32> zeroinitializer
  ret <vscale x 4 x half> %shf
}

define <vscale x 2 x half> @ld1rh_half_unpacked2(ptr %valp) {
; CHECK-LABEL: ld1rh_half_unpacked2:
; CHECK:       // %bb.0:
; CHECK-NEXT:    ptrue p0.d
; CHECK-NEXT:    ld1rh { z0.d }, p0/z, [x0]
; CHECK-NEXT:    ret
  %val = load half, ptr %valp
  %ins = insertelement <vscale x 2 x half> undef, half %val, i32 0
  %shf = shufflevector <vscale x 2 x half> %ins, <vscale x 2 x half> undef, <vscale x 2 x i32> zeroinitializer
  ret <vscale x 2 x half> %shf
}

define <vscale x 2 x half> @ld1rh_half_unpacked2_gep(ptr %valp) {
; CHECK-LABEL: ld1rh_half_unpacked2_gep:
; CHECK:       // %bb.0:
; CHECK-NEXT:    ptrue p0.d
; CHECK-NEXT:    ld1rh { z0.d }, p0/z, [x0, #126]
; CHECK-NEXT:    ret
  %valp2 = getelementptr half, ptr %valp, i32 63
  %val = load half, ptr %valp2
  %ins = insertelement <vscale x 2 x half> undef, half %val, i32 0
  %shf = shufflevector <vscale x 2 x half> %ins, <vscale x 2 x half> undef, <vscale x 2 x i32> zeroinitializer
  ret <vscale x 2 x half> %shf
}

define <vscale x 2 x half> @ld1rh_half_unpacked2_gep_out_of_range_up(ptr %valp) {
; CHECK-LABEL: ld1rh_half_unpacked2_gep_out_of_range_up:
; CHECK:       // %bb.0:
; CHECK-NEXT:    add x8, x0, #128
; CHECK-NEXT:    ptrue p0.d
; CHECK-NEXT:    ld1rh { z0.d }, p0/z, [x8]
; CHECK-NEXT:    ret
  %valp2 = getelementptr half, ptr %valp, i32 64
  %val = load half, ptr %valp2
  %ins = insertelement <vscale x 2 x half> undef, half %val, i32 0
  %shf = shufflevector <vscale x 2 x half> %ins, <vscale x 2 x half> undef, <vscale x 2 x i32> zeroinitializer
  ret <vscale x 2 x half> %shf
}

define <vscale x 2 x half> @ld1rh_half_unpacked2_gep_out_of_range_down(ptr %valp) {
; CHECK-LABEL: ld1rh_half_unpacked2_gep_out_of_range_down:
; CHECK:       // %bb.0:
; CHECK-NEXT:    sub x8, x0, #2
; CHECK-NEXT:    ptrue p0.d
; CHECK-NEXT:    ld1rh { z0.d }, p0/z, [x8]
; CHECK-NEXT:    ret
  %valp2 = getelementptr half, ptr %valp, i32 -1
  %val = load half, ptr %valp2
  %ins = insertelement <vscale x 2 x half> undef, half %val, i32 0
  %shf = shufflevector <vscale x 2 x half> %ins, <vscale x 2 x half> undef, <vscale x 2 x i32> zeroinitializer
  ret <vscale x 2 x half> %shf
}

define <vscale x 4 x float> @ld1rw_float(ptr %valp) {
; CHECK-LABEL: ld1rw_float:
; CHECK:       // %bb.0:
; CHECK-NEXT:    ptrue p0.s
; CHECK-NEXT:    ld1rw { z0.s }, p0/z, [x0]
; CHECK-NEXT:    ret
  %val = load float, ptr %valp
  %ins = insertelement <vscale x 4 x float> undef, float %val, i32 0
  %shf = shufflevector <vscale x 4 x float> %ins, <vscale x 4 x float> undef, <vscale x 4 x i32> zeroinitializer
  ret <vscale x 4 x float> %shf
}

define <vscale x 4 x float> @ld1rw_float_gep(ptr %valp) {
; CHECK-LABEL: ld1rw_float_gep:
; CHECK:       // %bb.0:
; CHECK-NEXT:    ptrue p0.s
; CHECK-NEXT:    ld1rw { z0.s }, p0/z, [x0, #252]
; CHECK-NEXT:    ret
  %valp2 = getelementptr float, ptr %valp, i32 63
  %val = load float, ptr %valp2
  %ins = insertelement <vscale x 4 x float> undef, float %val, i32 0
  %shf = shufflevector <vscale x 4 x float> %ins, <vscale x 4 x float> undef, <vscale x 4 x i32> zeroinitializer
  ret <vscale x 4 x float> %shf
}

define <vscale x 4 x float> @ld1rw_float_gep_out_of_range_up(ptr %valp) {
; CHECK-LABEL: ld1rw_float_gep_out_of_range_up:
; CHECK:       // %bb.0:
; CHECK-NEXT:    add x8, x0, #256
; CHECK-NEXT:    ptrue p0.s
; CHECK-NEXT:    ld1rw { z0.s }, p0/z, [x8]
; CHECK-NEXT:    ret
  %valp2 = getelementptr float, ptr %valp, i32 64
  %val = load float, ptr %valp2
  %ins = insertelement <vscale x 4 x float> undef, float %val, i32 0
  %shf = shufflevector <vscale x 4 x float> %ins, <vscale x 4 x float> undef, <vscale x 4 x i32> zeroinitializer
  ret <vscale x 4 x float> %shf
}

define <vscale x 4 x float> @ld1rw_float_gep_out_of_range_down(ptr %valp) {
; CHECK-LABEL: ld1rw_float_gep_out_of_range_down:
; CHECK:       // %bb.0:
; CHECK-NEXT:    sub x8, x0, #4
; CHECK-NEXT:    ptrue p0.s
; CHECK-NEXT:    ld1rw { z0.s }, p0/z, [x8]
; CHECK-NEXT:    ret
  %valp2 = getelementptr float, ptr %valp, i32 -1
  %val = load float, ptr %valp2
  %ins = insertelement <vscale x 4 x float> undef, float %val, i32 0
  %shf = shufflevector <vscale x 4 x float> %ins, <vscale x 4 x float> undef, <vscale x 4 x i32> zeroinitializer
  ret <vscale x 4 x float> %shf
}

define <vscale x 2 x float> @ld1rw_float_unpacked2(ptr %valp) {
; CHECK-LABEL: ld1rw_float_unpacked2:
; CHECK:       // %bb.0:
; CHECK-NEXT:    ptrue p0.d
; CHECK-NEXT:    ld1rw { z0.d }, p0/z, [x0]
; CHECK-NEXT:    ret
  %val = load float, ptr %valp
  %ins = insertelement <vscale x 2 x float> undef, float %val, i32 0
  %shf = shufflevector <vscale x 2 x float> %ins, <vscale x 2 x float> undef, <vscale x 2 x i32> zeroinitializer
  ret <vscale x 2 x float> %shf
}

define <vscale x 2 x float> @ld1rw_float_unpacked2_gep(ptr %valp) {
; CHECK-LABEL: ld1rw_float_unpacked2_gep:
; CHECK:       // %bb.0:
; CHECK-NEXT:    ptrue p0.d
; CHECK-NEXT:    ld1rw { z0.d }, p0/z, [x0, #252]
; CHECK-NEXT:    ret
  %valp2 = getelementptr float, ptr %valp, i32 63
  %val = load float, ptr %valp2
  %ins = insertelement <vscale x 2 x float> undef, float %val, i32 0
  %shf = shufflevector <vscale x 2 x float> %ins, <vscale x 2 x float> undef, <vscale x 2 x i32> zeroinitializer
  ret <vscale x 2 x float> %shf
}

define <vscale x 2 x float> @ld1rw_float_unpacked2_gep_out_of_range_up(ptr %valp) {
; CHECK-LABEL: ld1rw_float_unpacked2_gep_out_of_range_up:
; CHECK:       // %bb.0:
; CHECK-NEXT:    add x8, x0, #256
; CHECK-NEXT:    ptrue p0.d
; CHECK-NEXT:    ld1rw { z0.d }, p0/z, [x8]
; CHECK-NEXT:    ret
  %valp2 = getelementptr float, ptr %valp, i32 64
  %val = load float, ptr %valp2
  %ins = insertelement <vscale x 2 x float> undef, float %val, i32 0
  %shf = shufflevector <vscale x 2 x float> %ins, <vscale x 2 x float> undef, <vscale x 2 x i32> zeroinitializer
  ret <vscale x 2 x float> %shf
}

define <vscale x 2 x float> @ld1rw_float_unpacked2_gep_out_of_range_down(ptr %valp) {
; CHECK-LABEL: ld1rw_float_unpacked2_gep_out_of_range_down:
; CHECK:       // %bb.0:
; CHECK-NEXT:    sub x8, x0, #4
; CHECK-NEXT:    ptrue p0.d
; CHECK-NEXT:    ld1rw { z0.d }, p0/z, [x8]
; CHECK-NEXT:    ret
  %valp2 = getelementptr float, ptr %valp, i32 -1
  %val = load float, ptr %valp2
  %ins = insertelement <vscale x 2 x float> undef, float %val, i32 0
  %shf = shufflevector <vscale x 2 x float> %ins, <vscale x 2 x float> undef, <vscale x 2 x i32> zeroinitializer
  ret <vscale x 2 x float> %shf
}

define <vscale x 2 x double> @ld1rd_double(ptr %valp) {
; CHECK-LABEL: ld1rd_double:
; CHECK:       // %bb.0:
; CHECK-NEXT:    ptrue p0.d
; CHECK-NEXT:    ld1rd { z0.d }, p0/z, [x0]
; CHECK-NEXT:    ret
  %val = load double, ptr %valp
  %ins = insertelement <vscale x 2 x double> undef, double %val, i32 0
  %shf = shufflevector <vscale x 2 x double> %ins, <vscale x 2 x double> undef, <vscale x 2 x i32> zeroinitializer
  ret <vscale x 2 x double> %shf
}

define <vscale x 2 x double> @ld1rd_double_gep(ptr %valp) {
; CHECK-LABEL: ld1rd_double_gep:
; CHECK:       // %bb.0:
; CHECK-NEXT:    ptrue p0.d
; CHECK-NEXT:    ld1rd { z0.d }, p0/z, [x0, #504]
; CHECK-NEXT:    ret
  %valp2 = getelementptr double, ptr %valp, i32 63
  %val = load double, ptr %valp2
  %ins = insertelement <vscale x 2 x double> undef, double %val, i32 0
  %shf = shufflevector <vscale x 2 x double> %ins, <vscale x 2 x double> undef, <vscale x 2 x i32> zeroinitializer
  ret <vscale x 2 x double> %shf
}

define <vscale x 2 x double> @ld1rd_double_gep_out_of_range_up(ptr %valp) {
; CHECK-LABEL: ld1rd_double_gep_out_of_range_up:
; CHECK:       // %bb.0:
; CHECK-NEXT:    add x8, x0, #512
; CHECK-NEXT:    ptrue p0.d
; CHECK-NEXT:    ld1rd { z0.d }, p0/z, [x8]
; CHECK-NEXT:    ret
  %valp2 = getelementptr double, ptr %valp, i32 64
  %val = load double, ptr %valp2
  %ins = insertelement <vscale x 2 x double> undef, double %val, i32 0
  %shf = shufflevector <vscale x 2 x double> %ins, <vscale x 2 x double> undef, <vscale x 2 x i32> zeroinitializer
  ret <vscale x 2 x double> %shf
}

define <vscale x 2 x double> @ld1rd_double_gep_out_of_range_down(ptr %valp) {
; CHECK-LABEL: ld1rd_double_gep_out_of_range_down:
; CHECK:       // %bb.0:
; CHECK-NEXT:    sub x8, x0, #8
; CHECK-NEXT:    ptrue p0.d
; CHECK-NEXT:    ld1rd { z0.d }, p0/z, [x8]
; CHECK-NEXT:    ret
  %valp2 = getelementptr double, ptr %valp, i32 -1
  %val = load double, ptr %valp2
  %ins = insertelement <vscale x 2 x double> undef, double %val, i32 0
  %shf = shufflevector <vscale x 2 x double> %ins, <vscale x 2 x double> undef, <vscale x 2 x i32> zeroinitializer
  ret <vscale x 2 x double> %shf
}

define <vscale x 2 x double> @dupq_ld1rqd_f64(ptr %a) {
; CHECK-LABEL: dupq_ld1rqd_f64:
; CHECK:       // %bb.0:
; CHECK-NEXT:    ptrue p0.d
; CHECK-NEXT:    ld1rqd { z0.d }, p0/z, [x0]
; CHECK-NEXT:    ret
  %1 = load <2 x double>, ptr %a
  %2 = tail call fast <vscale x 2 x double> @llvm.vector.insert.nxv2f64.v2f64(<vscale x 2 x double> undef, <2 x double> %1, i64 0)
  %3 = tail call fast <vscale x 2 x double> @llvm.aarch64.sve.dupq.lane.nxv2f64(<vscale x 2 x double> %2, i64 0)
  ret <vscale x 2 x double> %3
}

define <vscale x 4 x float> @dupq_ld1rqw_f32(ptr %a) {
; CHECK-LABEL: dupq_ld1rqw_f32:
; CHECK:       // %bb.0:
; CHECK-NEXT:    ptrue p0.s
; CHECK-NEXT:    ld1rqw { z0.s }, p0/z, [x0]
; CHECK-NEXT:    ret
  %1 = load <4 x float>, ptr %a
  %2 = tail call fast <vscale x 4 x float> @llvm.vector.insert.nxv4f32.v4f32(<vscale x 4 x float> undef, <4 x float> %1, i64 0)
  %3 = tail call fast <vscale x 4 x float> @llvm.aarch64.sve.dupq.lane.nxv4f32(<vscale x 4 x float> %2, i64 0)
  ret <vscale x 4 x float> %3
}

define <vscale x 8 x half> @dupq_ld1rqh_f16(ptr %a) {
; CHECK-LABEL: dupq_ld1rqh_f16:
; CHECK:       // %bb.0:
; CHECK-NEXT:    ptrue p0.h
; CHECK-NEXT:    ld1rqh { z0.h }, p0/z, [x0]
; CHECK-NEXT:    ret
  %1 = load <8 x half>, ptr %a
  %2 = tail call fast <vscale x 8 x half> @llvm.vector.insert.nxv8f16.v8f16(<vscale x 8 x half> undef, <8 x half> %1, i64 0)
  %3 = tail call fast <vscale x 8 x half> @llvm.aarch64.sve.dupq.lane.nxv8f16(<vscale x 8 x half> %2, i64 0)
  ret <vscale x 8 x half> %3
}

define <vscale x 8 x bfloat> @dupq_ld1rqh_bf16(ptr %a) #0 {
; CHECK-LABEL: dupq_ld1rqh_bf16:
; CHECK:       // %bb.0:
; CHECK-NEXT:    ptrue p0.h
; CHECK-NEXT:    ld1rqh { z0.h }, p0/z, [x0]
; CHECK-NEXT:    ret
  %1 = load <8 x bfloat>, ptr %a
  %2 = tail call fast <vscale x 8 x bfloat> @llvm.vector.insert.nxv8bf16.v8bf16(<vscale x 8 x bfloat> undef, <8 x bfloat> %1, i64 0)
  %3 = tail call fast <vscale x 8 x bfloat> @llvm.aarch64.sve.dupq.lane.nxv8bf16(<vscale x 8 x bfloat> %2, i64 0)
  ret <vscale x 8 x bfloat> %3
}

define <vscale x 2 x i64> @dupq_ld1rqd_i64(ptr %a) #0 {
; CHECK-LABEL: dupq_ld1rqd_i64:
; CHECK:       // %bb.0:
; CHECK-NEXT:    ptrue p0.d
; CHECK-NEXT:    ld1rqd { z0.d }, p0/z, [x0]
; CHECK-NEXT:    ret
  %1 = load <2 x i64>, ptr %a
  %2 = tail call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v2i64(<vscale x 2 x i64> undef, <2 x i64> %1, i64 0)
  %3 = tail call <vscale x 2 x i64> @llvm.aarch64.sve.dupq.lane.nxv2i64(<vscale x 2 x i64> %2, i64 0)
  ret <vscale x 2 x i64> %3
}

define <vscale x 4 x i32> @dupq_ld1rqw_i32(ptr %a) #0 {
; CHECK-LABEL: dupq_ld1rqw_i32:
; CHECK:       // %bb.0:
; CHECK-NEXT:    ptrue p0.s
; CHECK-NEXT:    ld1rqw { z0.s }, p0/z, [x0]
; CHECK-NEXT:    ret
  %1 = load <4 x i32>, ptr %a
  %2 = tail call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> undef, <4 x i32> %1, i64 0)
  %3 = tail call <vscale x 4 x i32> @llvm.aarch64.sve.dupq.lane.nxv4i32(<vscale x 4 x i32> %2, i64 0)
  ret <vscale x 4 x i32> %3
}

define <vscale x 8 x i16> @dupq_ld1rqw_i16(ptr %a) #0 {
; CHECK-LABEL: dupq_ld1rqw_i16:
; CHECK:       // %bb.0:
; CHECK-NEXT:    ptrue p0.h
; CHECK-NEXT:    ld1rqh { z0.h }, p0/z, [x0]
; CHECK-NEXT:    ret
  %1 = load <8 x i16>, ptr %a
  %2 = tail call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.v8i16(<vscale x 8 x i16> undef, <8 x i16> %1, i64 0)
  %3 = tail call <vscale x 8 x i16> @llvm.aarch64.sve.dupq.lane.nxv8i16(<vscale x 8 x i16> %2, i64 0)
  ret <vscale x 8 x i16> %3
}

define <vscale x 16 x i8> @dupq_ld1rqw_i8(ptr %a) #0 {
; CHECK-LABEL: dupq_ld1rqw_i8:
; CHECK:       // %bb.0:
; CHECK-NEXT:    ptrue p0.b
; CHECK-NEXT:    ld1rqb { z0.b }, p0/z, [x0]
; CHECK-NEXT:    ret
  %1 = load <16 x i8>, ptr %a
  %2 = tail call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.v16i8(<vscale x 16 x i8> undef, <16 x i8> %1, i64 0)
  %3 = tail call <vscale x 16 x i8> @llvm.aarch64.sve.dupq.lane.nxv16i8(<vscale x 16 x i8> %2, i64 0)
  ret <vscale x 16 x i8> %3
}

;
;
; Tests for dup:
;
; Positive tests:
; * dup with passthru=undef or passthrue=zero.
; * sign/zero extending.
; * unpacked types.
;
; Negative tests:
; * dup with passthru as a parameter.
;
;

define <vscale x 16 x i8> @dup_ld1rb_i8_passthruundef_nxv16i8(<vscale x 16 x i1> %pg, ptr %addr) {
; CHECK-LABEL: dup_ld1rb_i8_passthruundef_nxv16i8:
; CHECK:       // %bb.0:
; CHECK-NEXT:    ld1rb { z0.b }, p0/z, [x0]
; CHECK-NEXT:    ret
    %ld = load i8, ptr %addr
    %res = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i1> %pg, i8 %ld)
    ret <vscale x 16 x i8> %res
}
define <vscale x 8 x i16> @dup_ld1rh_i16_passthruundef_nxv8i16(<vscale x 8 x i1> %pg, ptr %addr) {
; CHECK-LABEL: dup_ld1rh_i16_passthruundef_nxv8i16:
; CHECK:       // %bb.0:
; CHECK-NEXT:    ld1rh { z0.h }, p0/z, [x0]
; CHECK-NEXT:    ret
    %ld = load i16, ptr %addr
    %res = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i1> %pg, i16 %ld)
    ret <vscale x 8 x i16> %res
}
define <vscale x 8 x i16> @dup_ld1rh_i8_passthruundef_nxv8i16_sext(<vscale x 8 x i1> %pg, ptr %addr) {
; CHECK-LABEL: dup_ld1rh_i8_passthruundef_nxv8i16_sext:
; CHECK:       // %bb.0:
; CHECK-NEXT:    ld1rsb { z0.h }, p0/z, [x0]
; CHECK-NEXT:    ret
    %ld = load i8, ptr %addr
    %ext = sext i8 %ld to i16
    %res = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i1> %pg, i16 %ext)
    ret <vscale x 8 x i16> %res
}
define <vscale x 8 x i16> @dup_ld1rh_i8_passthruundef_nxv8i16_zext(<vscale x 8 x i1> %pg, ptr %addr) {
; CHECK-LABEL: dup_ld1rh_i8_passthruundef_nxv8i16_zext:
; CHECK:       // %bb.0:
; CHECK-NEXT:    ld1rb { z0.h }, p0/z, [x0]
; CHECK-NEXT:    ret
    %ld = load i8, ptr %addr
    %ext = zext i8 %ld to i16
    %res = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i1> %pg, i16 %ext)
    ret <vscale x 8 x i16> %res
}
define <vscale x 4 x i32> @dup_ld1rs_i32_passthruundef_nxv4i32(<vscale x 4 x i1> %pg, ptr %addr) {
; CHECK-LABEL: dup_ld1rs_i32_passthruundef_nxv4i32:
; CHECK:       // %bb.0:
; CHECK-NEXT:    ld1rw { z0.s }, p0/z, [x0]
; CHECK-NEXT:    ret
    %ld = load i32, ptr %addr
    %res = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i1> %pg, i32 %ld)
    ret <vscale x 4 x i32> %res
}
define <vscale x 4 x i32> @dup_ld1rs_i8_passthruundef_nxv4i32_sext(<vscale x 4 x i1> %pg, ptr %addr) {
; CHECK-LABEL: dup_ld1rs_i8_passthruundef_nxv4i32_sext:
; CHECK:       // %bb.0:
; CHECK-NEXT:    ld1rsb { z0.s }, p0/z, [x0]
; CHECK-NEXT:    ret
    %ld = load i8, ptr %addr
    %ext = sext i8 %ld to i32
    %res = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i1> %pg, i32 %ext)
    ret <vscale x 4 x i32> %res
}
define <vscale x 4 x i32> @dup_ld1rs_i8_passthruundef_nxv4i32_zext(<vscale x 4 x i1> %pg, ptr %addr) {
; CHECK-LABEL: dup_ld1rs_i8_passthruundef_nxv4i32_zext:
; CHECK:       // %bb.0:
; CHECK-NEXT:    ld1rb { z0.s }, p0/z, [x0]
; CHECK-NEXT:    ret
    %ld = load i8, ptr %addr
    %ext = zext i8 %ld to i32
    %res = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i1> %pg, i32 %ext)
    ret <vscale x 4 x i32> %res
}
define <vscale x 4 x i32> @dup_ld1rs_i16_passthruundef_nxv4i32_sext(<vscale x 4 x i1> %pg, ptr %addr) {
; CHECK-LABEL: dup_ld1rs_i16_passthruundef_nxv4i32_sext:
; CHECK:       // %bb.0:
; CHECK-NEXT:    ld1rsh { z0.s }, p0/z, [x0]
; CHECK-NEXT:    ret
    %ld = load i16, ptr %addr
    %ext = sext i16 %ld to i32
    %res = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i1> %pg, i32 %ext)
    ret <vscale x 4 x i32> %res
}
define <vscale x 4 x i32> @dup_ld1rs_i16_passthruundef_nxv4i32_zext(<vscale x 4 x i1> %pg, ptr %addr) {
; CHECK-LABEL: dup_ld1rs_i16_passthruundef_nxv4i32_zext:
; CHECK:       // %bb.0:
; CHECK-NEXT:    ld1rh { z0.s }, p0/z, [x0]
; CHECK-NEXT:    ret
    %ld = load i16, ptr %addr
    %ext = zext i16 %ld to i32
    %res = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i1> %pg, i32 %ext)
    ret <vscale x 4 x i32> %res
}
define <vscale x 2 x i64> @dup_ld1rd_i64_passthruundef_nxv2i64(<vscale x 2 x i1> %pg, ptr %addr) {
; CHECK-LABEL: dup_ld1rd_i64_passthruundef_nxv2i64:
; CHECK:       // %bb.0:
; CHECK-NEXT:    ld1rd { z0.d }, p0/z, [x0]
; CHECK-NEXT:    ret
    %ld = load i64, ptr %addr
    %res = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i1> %pg, i64 %ld)
    ret <vscale x 2 x i64> %res
}
define <vscale x 2 x i64> @dup_ld1rs_i8_passthruundef_nxv2i64_sext(<vscale x 2 x i1> %pg, ptr %addr) {
; CHECK-LABEL: dup_ld1rs_i8_passthruundef_nxv2i64_sext:
; CHECK:       // %bb.0:
; CHECK-NEXT:    ld1rsb { z0.d }, p0/z, [x0]
; CHECK-NEXT:    ret
    %ld = load i8, ptr %addr
    %ext = sext i8 %ld to i64
    %res = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i1> %pg, i64 %ext)
    ret <vscale x 2 x i64> %res
}
define <vscale x 2 x i64> @dup_ld1rs_i8_passthruundef_nxv2i64_zext(<vscale x 2 x i1> %pg, ptr %addr) {
; CHECK-LABEL: dup_ld1rs_i8_passthruundef_nxv2i64_zext:
; CHECK:       // %bb.0:
; CHECK-NEXT:    ld1rb { z0.d }, p0/z, [x0]
; CHECK-NEXT:    ret
    %ld = load i8, ptr %addr
    %ext = zext i8 %ld to i64
    %res = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i1> %pg, i64 %ext)
    ret <vscale x 2 x i64> %res
}
define <vscale x 2 x i64> @dup_ld1rs_i16_passthruundef_nxv2i64_sext(<vscale x 2 x i1> %pg, ptr %addr) {
; CHECK-LABEL: dup_ld1rs_i16_passthruundef_nxv2i64_sext:
; CHECK:       // %bb.0:
; CHECK-NEXT:    ld1rsh { z0.d }, p0/z, [x0]
; CHECK-NEXT:    ret
    %ld = load i16, ptr %addr
    %ext = sext i16 %ld to i64
    %res = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i1> %pg, i64 %ext)
    ret <vscale x 2 x i64> %res
}
define <vscale x 2 x i64> @dup_ld1rs_i16_passthruundef_nxv2i64_zext(<vscale x 2 x i1> %pg, ptr %addr) {
; CHECK-LABEL: dup_ld1rs_i16_passthruundef_nxv2i64_zext:
; CHECK:       // %bb.0:
; CHECK-NEXT:    ld1rh { z0.d }, p0/z, [x0]
; CHECK-NEXT:    ret
    %ld = load i16, ptr %addr
    %ext = zext i16 %ld to i64
    %res = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i1> %pg, i64 %ext)
    ret <vscale x 2 x i64> %res
}
define <vscale x 2 x i64> @dup_ld1rs_i32_passthruundef_nxv2i64_sext(<vscale x 2 x i1> %pg, ptr %addr) {
; CHECK-LABEL: dup_ld1rs_i32_passthruundef_nxv2i64_sext:
; CHECK:       // %bb.0:
; CHECK-NEXT:    ld1rsw { z0.d }, p0/z, [x0]
; CHECK-NEXT:    ret
    %ld = load i32, ptr %addr
    %ext = sext i32 %ld to i64
    %res = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i1> %pg, i64 %ext)
    ret <vscale x 2 x i64> %res
}
define <vscale x 2 x i64> @dup_ld1rs_i32_passthruundef_nxv2i64_zext(<vscale x 2 x i1> %pg, ptr %addr) {
; CHECK-LABEL: dup_ld1rs_i32_passthruundef_nxv2i64_zext:
; CHECK:       // %bb.0:
; CHECK-NEXT:    ld1rw { z0.d }, p0/z, [x0]
; CHECK-NEXT:    ret
    %ld = load i32, ptr %addr
    %ext = zext i32 %ld to i64
    %res = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i1> %pg, i64 %ext)
    ret <vscale x 2 x i64> %res
}
define <vscale x 8 x half> @dup_ld1rh_half_passthruundef_nxv8f16(<vscale x 8 x i1> %pg, ptr %addr) {
; CHECK-LABEL: dup_ld1rh_half_passthruundef_nxv8f16:
; CHECK:       // %bb.0:
; CHECK-NEXT:    ld1rh { z0.h }, p0/z, [x0]
; CHECK-NEXT:    ret
    %ld = load half, ptr %addr
    %res = call <vscale x 8 x half> @llvm.aarch64.sve.dup.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x i1> %pg, half %ld)
    ret <vscale x 8 x half> %res
}
define <vscale x 4 x float> @dup_ld1rs_float_passthruundef_nxv4f32(<vscale x 4 x i1> %pg, ptr %addr) {
; CHECK-LABEL: dup_ld1rs_float_passthruundef_nxv4f32:
; CHECK:       // %bb.0:
; CHECK-NEXT:    ld1rw { z0.s }, p0/z, [x0]
; CHECK-NEXT:    ret
    %ld = load float, ptr %addr
    %res = call <vscale x 4 x float> @llvm.aarch64.sve.dup.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x i1> %pg, float %ld)
    ret <vscale x 4 x float> %res
}
define <vscale x 2 x double> @dup_ld1rd_double_passthruundef_nxv2f64(<vscale x 2 x i1> %pg, ptr %addr) {
; CHECK-LABEL: dup_ld1rd_double_passthruundef_nxv2f64:
; CHECK:       // %bb.0:
; CHECK-NEXT:    ld1rd { z0.d }, p0/z, [x0]
; CHECK-NEXT:    ret
    %ld = load double, ptr %addr
    %res = call <vscale x 2 x double> @llvm.aarch64.sve.dup.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x i1> %pg, double %ld)
    ret <vscale x 2 x double> %res
}
define <vscale x 4 x half> @dup_ld1rh_half_passthruundef_nxv4f16(<vscale x 4 x i1> %pg, ptr %addr) {
; CHECK-LABEL: dup_ld1rh_half_passthruundef_nxv4f16:
; CHECK:       // %bb.0:
; CHECK-NEXT:    ld1rh { z0.s }, p0/z, [x0]
; CHECK-NEXT:    ret
    %ld = load half, ptr %addr
    %res = call <vscale x 4 x half> @llvm.aarch64.sve.dup.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x i1> %pg, half %ld)
    ret <vscale x 4 x half> %res
}
define <vscale x 16 x i8> @dup_ld1rb_i8_passthruzero_nxv16i8(<vscale x 16 x i1> %pg, ptr %addr) {
; CHECK-LABEL: dup_ld1rb_i8_passthruzero_nxv16i8:
; CHECK:       // %bb.0:
; CHECK-NEXT:    ld1rb { z0.b }, p0/z, [x0]
; CHECK-NEXT:    ret
    %ld = load i8, ptr %addr
    %res = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.nxv16i8(<vscale x 16 x i8> zeroinitializer, <vscale x 16 x i1> %pg, i8 %ld)
    ret <vscale x 16 x i8> %res
}
define <vscale x 8 x i16> @dup_ld1rh_i16_passthruzero_nxv8i16(<vscale x 8 x i1> %pg, ptr %addr) {
; CHECK-LABEL: dup_ld1rh_i16_passthruzero_nxv8i16:
; CHECK:       // %bb.0:
; CHECK-NEXT:    ld1rh { z0.h }, p0/z, [x0]
; CHECK-NEXT:    ret
    %ld = load i16, ptr %addr
    %res = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.nxv8i16(<vscale x 8 x i16> zeroinitializer, <vscale x 8 x i1> %pg, i16 %ld)
    ret <vscale x 8 x i16> %res
}
define <vscale x 4 x i32> @dup_ld1rs_i32_passthruzero_nxv4i32(<vscale x 4 x i1> %pg, ptr %addr) {
; CHECK-LABEL: dup_ld1rs_i32_passthruzero_nxv4i32:
; CHECK:       // %bb.0:
; CHECK-NEXT:    ld1rw { z0.s }, p0/z, [x0]
; CHECK-NEXT:    ret
    %ld = load i32, ptr %addr
    %res = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.nxv4i32(<vscale x 4 x i32> zeroinitializer, <vscale x 4 x i1> %pg, i32 %ld)
    ret <vscale x 4 x i32> %res
}
define <vscale x 2 x i64> @dup_ld1rd_i64_passthruzero_nxv2i64(<vscale x 2 x i1> %pg, ptr %addr) {
; CHECK-LABEL: dup_ld1rd_i64_passthruzero_nxv2i64:
; CHECK:       // %bb.0:
; CHECK-NEXT:    ld1rd { z0.d }, p0/z, [x0]
; CHECK-NEXT:    ret
    %ld = load i64, ptr %addr
    %res = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.nxv2i64(<vscale x 2 x i64> zeroinitializer, <vscale x 2 x i1> %pg, i64 %ld)
    ret <vscale x 2 x i64> %res
}
define <vscale x 8 x half> @dup_ld1rh_half_passthruzero_nxv8f16(<vscale x 8 x i1> %pg, ptr %addr) {
; CHECK-LABEL: dup_ld1rh_half_passthruzero_nxv8f16:
; CHECK:       // %bb.0:
; CHECK-NEXT:    ld1rh { z0.h }, p0/z, [x0]
; CHECK-NEXT:    ret
    %ld = load half, ptr %addr
    %res = call <vscale x 8 x half> @llvm.aarch64.sve.dup.nxv8f16(<vscale x 8 x half> zeroinitializer, <vscale x 8 x i1> %pg, half %ld)
    ret <vscale x 8 x half> %res
}
define <vscale x 4 x float> @dup_ld1rs_float_passthruzero_nxv4f32(<vscale x 4 x i1> %pg, ptr %addr) {
; CHECK-LABEL: dup_ld1rs_float_passthruzero_nxv4f32:
; CHECK:       // %bb.0:
; CHECK-NEXT:    ld1rw { z0.s }, p0/z, [x0]
; CHECK-NEXT:    ret
    %ld = load float, ptr %addr
    %res = call <vscale x 4 x float> @llvm.aarch64.sve.dup.nxv4f32(<vscale x 4 x float> zeroinitializer, <vscale x 4 x i1> %pg, float %ld)
    ret <vscale x 4 x float> %res
}
define <vscale x 2 x double> @dup_ld1rd_double_passthruzero_nxv2f64(<vscale x 2 x i1> %pg, ptr %addr) {
; CHECK-LABEL: dup_ld1rd_double_passthruzero_nxv2f64:
; CHECK:       // %bb.0:
; CHECK-NEXT:    ld1rd { z0.d }, p0/z, [x0]
; CHECK-NEXT:    ret
    %ld = load double, ptr %addr
    %res = call <vscale x 2 x double> @llvm.aarch64.sve.dup.nxv2f64(<vscale x 2 x double> zeroinitializer, <vscale x 2 x i1> %pg, double %ld)
    ret <vscale x 2 x double> %res
}
define <vscale x 4 x half> @dup_ld1rh_half_passthruzero_nxv4f16(<vscale x 4 x i1> %pg, ptr %addr) {
; CHECK-LABEL: dup_ld1rh_half_passthruzero_nxv4f16:
; CHECK:       // %bb.0:
; CHECK-NEXT:    ld1rh { z0.s }, p0/z, [x0]
; CHECK-NEXT:    ret
    %ld = load half, ptr %addr
    %res = call <vscale x 4 x half> @llvm.aarch64.sve.dup.nxv4f16(<vscale x 4 x half> zeroinitializer, <vscale x 4 x i1> %pg, half %ld)
    ret <vscale x 4 x half> %res
}
define <vscale x 2 x half> @dup_ld1rh_half_passthruzero_nxv2f16(<vscale x 2 x i1> %pg, ptr %addr) {
; CHECK-LABEL: dup_ld1rh_half_passthruzero_nxv2f16:
; CHECK:       // %bb.0:
; CHECK-NEXT:    ld1rh { z0.d }, p0/z, [x0]
; CHECK-NEXT:    ret
    %ld = load half, ptr %addr
    %res = call <vscale x 2 x half> @llvm.aarch64.sve.dup.nxv2f16(<vscale x 2 x half> zeroinitializer, <vscale x 2 x i1> %pg, half %ld)
    ret <vscale x 2 x half> %res
}
define <vscale x 2 x float> @dup_ld1rs_float_passthruzero_nxv2f32(<vscale x 2 x i1> %pg, ptr %addr) {
; CHECK-LABEL: dup_ld1rs_float_passthruzero_nxv2f32:
; CHECK:       // %bb.0:
; CHECK-NEXT:    ld1rw { z0.d }, p0/z, [x0]
; CHECK-NEXT:    ret
    %ld = load float, ptr %addr
    %res = call <vscale x 2 x float> @llvm.aarch64.sve.dup.nxv2f32(<vscale x 2 x float> zeroinitializer, <vscale x 2 x i1> %pg, float %ld)
    ret <vscale x 2 x float> %res
}
define <vscale x 16 x i8> @negtest_dup_ld1rb_i8_passthru_nxv16i8(<vscale x 16 x i8> %pt, <vscale x 16 x i1> %pg, ptr %addr) {
; CHECK-LABEL: negtest_dup_ld1rb_i8_passthru_nxv16i8:
; CHECK:       // %bb.0:
; CHECK-NEXT:    ldrb w8, [x0]
; CHECK-NEXT:    mov z0.b, p0/m, w8
; CHECK-NEXT:    ret
    %ld = load i8, ptr %addr
    %res = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.nxv16i8(<vscale x 16 x i8> %pt, <vscale x 16 x i1> %pg, i8 %ld)
    ret <vscale x 16 x i8> %res
}
define <vscale x 8 x i16> @negtest_dup_ld1rh_i16_passthru_nxv8i16(<vscale x 8 x i16> %pt, <vscale x 8 x i1> %pg, ptr %addr) {
; CHECK-LABEL: negtest_dup_ld1rh_i16_passthru_nxv8i16:
; CHECK:       // %bb.0:
; CHECK-NEXT:    ldrh w8, [x0]
; CHECK-NEXT:    mov z0.h, p0/m, w8
; CHECK-NEXT:    ret
    %ld = load i16, ptr %addr
    %res = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.nxv8i16(<vscale x 8 x i16> %pt, <vscale x 8 x i1> %pg, i16 %ld)
    ret <vscale x 8 x i16> %res
}
define <vscale x 4 x i32> @negtest_dup_ld1rs_i32_passthru_nxv4i32(<vscale x 4 x i32> %pt, <vscale x 4 x i1> %pg, ptr %addr) {
; CHECK-LABEL: negtest_dup_ld1rs_i32_passthru_nxv4i32:
; CHECK:       // %bb.0:
; CHECK-NEXT:    ldr w8, [x0]
; CHECK-NEXT:    mov z0.s, p0/m, w8
; CHECK-NEXT:    ret
    %ld = load i32, ptr %addr
    %res = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.nxv4i32(<vscale x 4 x i32> %pt, <vscale x 4 x i1> %pg, i32 %ld)
    ret <vscale x 4 x i32> %res
}
define <vscale x 2 x i64> @negtest_dup_ld1rd_i64_passthru_nxv2i64(<vscale x 2 x i64> %pt, <vscale x 2 x i1> %pg, ptr %addr) {
; CHECK-LABEL: negtest_dup_ld1rd_i64_passthru_nxv2i64:
; CHECK:       // %bb.0:
; CHECK-NEXT:    ldr x8, [x0]
; CHECK-NEXT:    mov z0.d, p0/m, x8
; CHECK-NEXT:    ret
    %ld = load i64, ptr %addr
    %res = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.nxv2i64(<vscale x 2 x i64> %pt, <vscale x 2 x i1> %pg, i64 %ld)
    ret <vscale x 2 x i64> %res
}
define <vscale x 8 x half> @negtest_dup_ld1rh_half_passthru_nxv8f16(<vscale x 8 x half> %pt, <vscale x 8 x i1> %pg, ptr %addr) {
; CHECK-LABEL: negtest_dup_ld1rh_half_passthru_nxv8f16:
; CHECK:       // %bb.0:
; CHECK-NEXT:    ldr h1, [x0]
; CHECK-NEXT:    mov z0.h, p0/m, h1
; CHECK-NEXT:    ret
    %ld = load half, ptr %addr
    %res = call <vscale x 8 x half> @llvm.aarch64.sve.dup.nxv8f16(<vscale x 8 x half> %pt, <vscale x 8 x i1> %pg, half %ld)
    ret <vscale x 8 x half> %res
}
define <vscale x 4 x float> @negtest_dup_ld1rs_float_passthru_nxv4f32(<vscale x 4 x float> %pt, <vscale x 4 x i1> %pg, ptr %addr) {
; CHECK-LABEL: negtest_dup_ld1rs_float_passthru_nxv4f32:
; CHECK:       // %bb.0:
; CHECK-NEXT:    ldr s1, [x0]
; CHECK-NEXT:    mov z0.s, p0/m, s1
; CHECK-NEXT:    ret
    %ld = load float, ptr %addr
    %res = call <vscale x 4 x float> @llvm.aarch64.sve.dup.nxv4f32(<vscale x 4 x float> %pt, <vscale x 4 x i1> %pg, float %ld)
    ret <vscale x 4 x float> %res
}
define <vscale x 2 x double> @negtest_dup_ld1rd_double_passthru_nxv2f64(<vscale x 2 x double> %pt, <vscale x 2 x i1> %pg, ptr %addr) {
; CHECK-LABEL: negtest_dup_ld1rd_double_passthru_nxv2f64:
; CHECK:       // %bb.0:
; CHECK-NEXT:    ldr d1, [x0]
; CHECK-NEXT:    mov z0.d, p0/m, d1
; CHECK-NEXT:    ret
    %ld = load double, ptr %addr
    %res = call <vscale x 2 x double> @llvm.aarch64.sve.dup.nxv2f64(<vscale x 2 x double> %pt, <vscale x 2 x i1> %pg, double %ld)
    ret <vscale x 2 x double> %res
}


; Check that a load consumed by a scalable splat prefers a replicating load.
define i8* @avoid_preindex_load(i8* %src, <vscale x 2 x i64>* %out) {
; CHECK-LABEL: avoid_preindex_load:
; CHECK:       // %bb.0:
; CHECK-NEXT:    ptrue p0.d
; CHECK-NEXT:    add x8, x0, #1
; CHECK-NEXT:    ld1rsb { z0.d }, p0/z, [x0, #1]
; CHECK-NEXT:    mov x0, x8
; CHECK-NEXT:    st1d { z0.d }, p0, [x1]
; CHECK-NEXT:    ret
  %ptr = getelementptr inbounds i8, i8* %src, i64 1
  %tmp = load i8, i8* %ptr, align 4
  %ext = sext i8 %tmp to i64
  %ins = insertelement <vscale x 2 x i64> undef, i64 %ext, i32 0
  %dup = shufflevector <vscale x 2 x i64> %ins, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
  store <vscale x 2 x i64> %dup, <vscale x 2 x i64>* %out
  ret i8* %ptr
}

; Check that a load consumed by a scalable splat prefers a replicating
; load over a pre-indexed load.
define i8* @avoid_preindex_load_dup(i8* %src, <vscale x 2 x i1> %pg, <vscale x 2 x i64>* %out) {
; CHECK-LABEL: avoid_preindex_load_dup:
; CHECK:       // %bb.0:
; CHECK-NEXT:    add x8, x0, #1
; CHECK-NEXT:    ld1rsb { z0.d }, p0/z, [x0, #1]
; CHECK-NEXT:    mov x0, x8
; CHECK-NEXT:    ptrue p0.d
; CHECK-NEXT:    st1d { z0.d }, p0, [x1]
; CHECK-NEXT:    ret
  %ptr = getelementptr inbounds i8, i8* %src, i64 1
  %tmp = load i8, i8* %ptr, align 4
  %ext = sext i8 %tmp to i64
  %dup = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i1> %pg, i64 %ext)
  store <vscale x 2 x i64> %dup, <vscale x 2 x i64>* %out
  ret i8* %ptr
}

; Same as avoid_preindex_load_dup, but with zero passthru.
define i8* @avoid_preindex_load_dup_passthru_zero(i8* %src, <vscale x 2 x i1> %pg, <vscale x 2 x i64>* %out) {
; CHECK-LABEL: avoid_preindex_load_dup_passthru_zero:
; CHECK:       // %bb.0:
; CHECK-NEXT:    add x8, x0, #1
; CHECK-NEXT:    ld1rsb { z0.d }, p0/z, [x0, #1]
; CHECK-NEXT:    mov x0, x8
; CHECK-NEXT:    ptrue p0.d
; CHECK-NEXT:    st1d { z0.d }, p0, [x1]
; CHECK-NEXT:    ret
  %ptr = getelementptr inbounds i8, i8* %src, i64 1
  %tmp = load i8, i8* %ptr, align 4
  %ext = sext i8 %tmp to i64
  %dup = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.nxv2i64(<vscale x 2 x i64> zeroinitializer, <vscale x 2 x i1> %pg, i64 %ext)
  store <vscale x 2 x i64> %dup, <vscale x 2 x i64>* %out
  ret i8* %ptr
}

; If a dup has a non-undef passthru, stick with the pre-indexed load.
define i8* @preindex_load_dup_passthru(<vscale x 2 x i64> %passthru, i8* %src, <vscale x 2 x i1> %pg, <vscale x 2 x i64>* %out) {
; CHECK-LABEL: preindex_load_dup_passthru:
; CHECK:       // %bb.0:
; CHECK-NEXT:    ldrsb x8, [x0, #1]!
; CHECK-NEXT:    ptrue p1.d
; CHECK-NEXT:    mov z0.d, p0/m, x8
; CHECK-NEXT:    st1d { z0.d }, p1, [x1]
; CHECK-NEXT:    ret
  %ptr = getelementptr inbounds i8, i8* %src, i64 1
  %tmp = load i8, i8* %ptr, align 4
  %ext = sext i8 %tmp to i64
  %dup = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.nxv2i64(<vscale x 2 x i64> %passthru, <vscale x 2 x i1> %pg, i64 %ext)
  store <vscale x 2 x i64> %dup, <vscale x 2 x i64>* %out
  ret i8* %ptr
}

; Show that a second user of the load prevents the replicating load
; check which would ordinarily inhibit indexed loads from firing.
define i8* @preidx8sext64_instead_of_ld1r(i8* %src, <vscale x 2 x i64>* %out, i64* %dst) {
; CHECK-LABEL: preidx8sext64_instead_of_ld1r:
; CHECK:       // %bb.0:
; CHECK-NEXT:    ldrsb x8, [x0, #1]!
; CHECK-NEXT:    ptrue p0.d
; CHECK-NEXT:    mov z0.d, x8
; CHECK-NEXT:    st1d { z0.d }, p0, [x1]
; CHECK-NEXT:    str x8, [x2]
; CHECK-NEXT:    ret
  %ptr = getelementptr inbounds i8, i8* %src, i64 1
  %tmp = load i8, i8* %ptr, align 4
  %ext = sext i8 %tmp to i64
  %ins = insertelement <vscale x 2 x i64> undef, i64 %ext, i32 0
  %dup = shufflevector <vscale x 2 x i64> %ins, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
  store <vscale x 2 x i64> %dup, <vscale x 2 x i64>* %out
  store i64 %ext, i64* %dst
  ret i8* %ptr
}


declare <vscale x 16 x i8> @llvm.aarch64.sve.dupq.lane.nxv16i8(<vscale x 16 x i8>, i64)
declare <vscale x 8 x i16> @llvm.aarch64.sve.dupq.lane.nxv8i16(<vscale x 8 x i16>, i64)
declare <vscale x 4 x i32> @llvm.aarch64.sve.dupq.lane.nxv4i32(<vscale x 4 x i32>, i64)
declare <vscale x 2 x i64> @llvm.aarch64.sve.dupq.lane.nxv2i64(<vscale x 2 x i64>, i64)
declare <vscale x 8 x half> @llvm.aarch64.sve.dupq.lane.nxv8f16(<vscale x 8 x half>, i64)
declare <vscale x 8 x bfloat> @llvm.aarch64.sve.dupq.lane.nxv8bf16(<vscale x 8 x bfloat>, i64)
declare <vscale x 4 x float> @llvm.aarch64.sve.dupq.lane.nxv4f32(<vscale x 4 x float>, i64)
declare <vscale x 2 x double> @llvm.aarch64.sve.dupq.lane.nxv2f64(<vscale x 2 x double>, i64)

declare <vscale x 2 x double> @llvm.vector.insert.nxv2f64.v2f64(<vscale x 2 x double>, <2 x double>, i64)
declare <vscale x 4 x float> @llvm.vector.insert.nxv4f32.v4f32(<vscale x 4 x float>, <4 x float>, i64)
declare <vscale x 8 x half> @llvm.vector.insert.nxv8f16.v8f16(<vscale x 8 x half>, <8 x half>, i64)
declare <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v2i64(<vscale x 2 x i64>, <2 x i64>, i64)
declare <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32>, <4 x i32>, i64)
declare <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.v8i16(<vscale x 8 x i16>, <8 x i16>, i64)
declare <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.v16i8(<vscale x 16 x i8>, <16 x i8>, i64)
declare <vscale x 8 x bfloat> @llvm.vector.insert.nxv8bf16.v8bf16(<vscale x 8 x bfloat>, <8 x bfloat>, i64)

declare <vscale x 16 x i8> @llvm.aarch64.sve.dup.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i1>, i8)
declare <vscale x 8 x i16> @llvm.aarch64.sve.dup.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i1>, i16)
declare <vscale x 4 x i32> @llvm.aarch64.sve.dup.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i1>, i32)
declare <vscale x 2 x i64> @llvm.aarch64.sve.dup.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i1>, i64)
declare <vscale x 8 x half> @llvm.aarch64.sve.dup.nxv8f16(<vscale x 8 x half>, <vscale x 8 x i1>, half)
declare <vscale x 4 x float> @llvm.aarch64.sve.dup.nxv4f32(<vscale x 4 x float>, <vscale x 4 x i1>, float)
declare <vscale x 2 x double> @llvm.aarch64.sve.dup.nxv2f64(<vscale x 2 x double>, <vscale x 2 x i1>, double)
declare <vscale x 4 x half> @llvm.aarch64.sve.dup.nxv4f16(<vscale x 4 x half>, <vscale x 4 x i1>, half)
declare <vscale x 2 x half> @llvm.aarch64.sve.dup.nxv2f16(<vscale x 2 x half>, <vscale x 2 x i1>, half)
declare <vscale x 2 x float> @llvm.aarch64.sve.dup.nxv2f32(<vscale x 2 x float>, <vscale x 2 x i1>, float)


attributes #0 = { "target-features"="+sve,+bf16" }
