# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
# RUN: llc -mtriple aarch64 -run-pass=aarch64-postlegalizer-lowering -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=LOWER
# RUN: llc -mtriple aarch64 -O2 -start-before=aarch64-postlegalizer-lowering -stop-after=instruction-select -global-isel -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=SELECT
...
---
name:            same_reg
legalized:       true
tracksRegLiveness: true
body:             |
  bb.0:
    liveins: $d0
    ; LOWER-LABEL: name: same_reg
    ; LOWER: liveins: $d0
    ; LOWER: %r:_(s8) = G_IMPLICIT_DEF
    ; LOWER: %build_vector:_(<8 x s8>) = G_DUP %r(s8)
    ; LOWER: $d0 = COPY %build_vector(<8 x s8>)
    ; LOWER: RET_ReallyLR implicit $d0
    ; SELECT-LABEL: name: same_reg
    ; SELECT: liveins: $d0
    ; SELECT: %r:gpr32 = IMPLICIT_DEF
    ; SELECT: %build_vector:fpr64 = DUPv8i8gpr %r
    ; SELECT: $d0 = COPY %build_vector
    ; SELECT: RET_ReallyLR implicit $d0
    %r:_(s8) = G_IMPLICIT_DEF
    %build_vector:_(<8 x s8>) = G_BUILD_VECTOR %r, %r, %r, %r, %r, %r, %r, %r
    $d0 = COPY %build_vector(<8 x s8>)
    RET_ReallyLR implicit $d0

...
---
name:            dont_combine_different_reg
legalized:       true
tracksRegLiveness: true
body:             |
  bb.0:
    liveins: $d0, $w0, $w1
    ; LOWER-LABEL: name: dont_combine_different_reg
    ; LOWER: liveins: $d0, $w0, $w1
    ; LOWER: %r:_(s32) = COPY $w0
    ; LOWER: %q:_(s32) = COPY $w1
    ; LOWER: %build_vector:_(<2 x s32>) = G_BUILD_VECTOR %r(s32), %q(s32)
    ; LOWER: $d0 = COPY %build_vector(<2 x s32>)
    ; LOWER: RET_ReallyLR implicit $d0
    ; SELECT-LABEL: name: dont_combine_different_reg
    ; SELECT: liveins: $d0, $w0, $w1
    ; SELECT: %r:gpr32all = COPY $w0
    ; SELECT: %q:gpr32 = COPY $w1
    ; SELECT: [[DEF:%[0-9]+]]:fpr128 = IMPLICIT_DEF
    ; SELECT: [[INSERT_SUBREG:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF]], %r, %subreg.ssub
    ; SELECT: [[INSvi32gpr:%[0-9]+]]:fpr128 = INSvi32gpr [[INSERT_SUBREG]], 1, %q
    ; SELECT: %build_vector:fpr64 = COPY [[INSvi32gpr]].dsub
    ; SELECT: $d0 = COPY %build_vector
    ; SELECT: RET_ReallyLR implicit $d0
    %r:_(s32) = COPY $w0
    %q:_(s32) = COPY $w1
    %build_vector:_(<2 x s32>) = G_BUILD_VECTOR %r, %q
    $d0 = COPY %build_vector(<2 x s32>)
    RET_ReallyLR implicit $d0

...
---
name:            dont_combine_zero
legalized:       true
tracksRegLiveness: true
body:             |
  bb.0:
    liveins: $d0
    ; Don't combine with 0. We want to avoid blocking immAllZerosV selection
    ; patterns.

    ; LOWER-LABEL: name: dont_combine_zero
    ; LOWER: liveins: $d0
    ; LOWER: %r:_(s8) = G_CONSTANT i8 0
    ; LOWER: %build_vector:_(<8 x s8>) = G_BUILD_VECTOR %r(s8), %r(s8), %r(s8), %r(s8), %r(s8), %r(s8), %r(s8), %r(s8)
    ; LOWER: $d0 = COPY %build_vector(<8 x s8>)
    ; LOWER: RET_ReallyLR implicit $d0
    ; SELECT-LABEL: name: dont_combine_zero
    ; SELECT: liveins: $d0
    ; SELECT: [[MOVIv2d_ns:%[0-9]+]]:fpr128 = MOVIv2d_ns 0
    ; SELECT: %build_vector:fpr64 = COPY [[MOVIv2d_ns]].dsub
    ; SELECT: $d0 = COPY %build_vector
    ; SELECT: RET_ReallyLR implicit $d0
    %r:_(s8) = G_CONSTANT i8 0
    %build_vector:_(<8 x s8>) = G_BUILD_VECTOR %r, %r, %r, %r, %r, %r, %r, %r
    $d0 = COPY %build_vector(<8 x s8>)
    RET_ReallyLR implicit $d0

...
---
name:            dont_combine_all_ones
legalized:       true
tracksRegLiveness: true
body:             |
  bb.0:
    liveins: $d0
    ; Don't combine with -1. We want to avoid blocking immAllOnesV selection
    ; patterns.

    ; LOWER-LABEL: name: dont_combine_all_ones
    ; LOWER: liveins: $d0
    ; LOWER: %r:_(s8) = G_CONSTANT i8 -1
    ; LOWER: %build_vector:_(<8 x s8>) = G_BUILD_VECTOR %r(s8), %r(s8), %r(s8), %r(s8), %r(s8), %r(s8), %r(s8), %r(s8)
    ; LOWER: $d0 = COPY %build_vector(<8 x s8>)
    ; LOWER: RET_ReallyLR implicit $d0
    ; SELECT-LABEL: name: dont_combine_all_ones
    ; SELECT: liveins: $d0
    ; SELECT: [[ADRP:%[0-9]+]]:gpr64common = ADRP target-flags(aarch64-page) %const.0
    ; SELECT: [[LDRDui:%[0-9]+]]:fpr64 = LDRDui [[ADRP]], target-flags(aarch64-pageoff, aarch64-nc) %const.0
    ; SELECT: $d0 = COPY [[LDRDui]]
    ; SELECT: RET_ReallyLR implicit $d0
    %r:_(s8) = G_CONSTANT i8 -1
    %build_vector:_(<8 x s8>) = G_BUILD_VECTOR %r, %r, %r, %r, %r, %r, %r, %r
    $d0 = COPY %build_vector(<8 x s8>)
    RET_ReallyLR implicit $d0

...
---
name:            all_zeros_pat_example
legalized:       true
tracksRegLiveness: true
body:             |
  bb.0:
    liveins: $d0
    ; We should get a NEGv2i32 here.

    ; LOWER-LABEL: name: all_zeros_pat_example
    ; LOWER: liveins: $d0
    ; LOWER: %v:_(<2 x s32>) = COPY $d0
    ; LOWER: %cst:_(s32) = G_CONSTANT i32 0
    ; LOWER: %build_vector:_(<2 x s32>) = G_BUILD_VECTOR %cst(s32), %cst(s32)
    ; LOWER: %sub:_(<2 x s32>) = G_SUB %build_vector, %v
    ; LOWER: $d0 = COPY %sub(<2 x s32>)
    ; LOWER: RET_ReallyLR implicit $d0
    ; SELECT-LABEL: name: all_zeros_pat_example
    ; SELECT: liveins: $d0
    ; SELECT: %v:fpr64 = COPY $d0
    ; SELECT: %sub:fpr64 = NEGv2i32 %v
    ; SELECT: $d0 = COPY %sub
    ; SELECT: RET_ReallyLR implicit $d0
      %v:_(<2 x s32>) = COPY $d0
    %cst:_(s32) = G_CONSTANT i32 0
    %build_vector:_(<2 x s32>) = G_BUILD_VECTOR %cst, %cst
    %sub:_(<2 x s32>) = G_SUB %build_vector, %v
    $d0 = COPY %sub(<2 x s32>)
    RET_ReallyLR implicit $d0

...
---
name:            all_ones_pat_example
legalized:       true
tracksRegLiveness: true
body:             |
  bb.0:
    liveins: $d0, $d1
    ; We should get a BICv8i8 here.

    ; LOWER-LABEL: name: all_ones_pat_example
    ; LOWER: liveins: $d0, $d1
    ; LOWER: %v0:_(<2 x s32>) = COPY $d0
    ; LOWER: %v1:_(<2 x s32>) = COPY $d1
    ; LOWER: %cst:_(s32) = G_CONSTANT i32 -1
    ; LOWER: %build_vector:_(<2 x s32>) = G_BUILD_VECTOR %cst(s32), %cst(s32)
    ; LOWER: %xor:_(<2 x s32>) = G_XOR %v0, %build_vector
    ; LOWER: %and:_(<2 x s32>) = G_AND %v1, %xor
    ; LOWER: $d0 = COPY %and(<2 x s32>)
    ; LOWER: RET_ReallyLR implicit $d0
    ; SELECT-LABEL: name: all_ones_pat_example
    ; SELECT: liveins: $d0, $d1
    ; SELECT: %v0:fpr64 = COPY $d0
    ; SELECT: %v1:fpr64 = COPY $d1
    ; SELECT: %and:fpr64 = BICv8i8 %v1, %v0
    ; SELECT: $d0 = COPY %and
    ; SELECT: RET_ReallyLR implicit $d0
    %v0:_(<2 x s32>) = COPY $d0
    %v1:_(<2 x s32>) = COPY $d1
    %cst:_(s32) = G_CONSTANT i32 -1
    %build_vector:_(<2 x s32>) = G_BUILD_VECTOR %cst, %cst
    %xor:_(<2 x s32>) = G_XOR %v0, %build_vector
    %and:_(<2 x s32>) = G_AND %v1, %xor
    $d0 = COPY %and(<2 x s32>)
    RET_ReallyLR implicit $d0
