# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
# RUN: llc -mtriple=arm64-apple-ios -global-isel -run-pass=legalizer %s -o - | FileCheck %s --check-prefix=CHECK-NOLSE
# RUN: llc -mtriple=arm64-apple-ios -mcpu=apple-a13 -global-isel -run-pass=legalizer %s -o - | FileCheck %s --check-prefix=CHECK-LSE

---
name:            compare_swap_128
alignment:       4
tracksRegLiveness: true
body:             |
  bb.1:
    liveins: $x0_x1, $x1

   liveins: $x0, $x1, $x2, $x3, $x4

    ; CHECK-LABEL: name: compare_swap_128
    ; CHECK: liveins: $x0_x1, $x1, $x0, $x1, $x2, $x3, $x4
    ; CHECK: [[COPY:%[0-9]+]]:gpr64(p0) = COPY $x0
    ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
    ; CHECK: [[COPY2:%[0-9]+]]:_(s64) = COPY $x2
    ; CHECK: [[COPY3:%[0-9]+]]:_(s64) = COPY $x3
    ; CHECK: [[COPY4:%[0-9]+]]:_(s64) = COPY $x4
    ; CHECK: [[COPY5:%[0-9]+]]:gpr64(s64) = COPY [[COPY1]](s64)
    ; CHECK: [[COPY6:%[0-9]+]]:gpr64(s64) = COPY [[COPY2]](s64)
    ; CHECK: [[COPY7:%[0-9]+]]:gpr64(s64) = COPY [[COPY3]](s64)
    ; CHECK: [[COPY8:%[0-9]+]]:gpr64(s64) = COPY [[COPY4]](s64)
    ; CHECK: early-clobber %13:gpr64(s64), early-clobber %14:gpr64(s64), early-clobber %16:gpr32common = CMP_SWAP_128_ACQUIRE [[COPY]](p0), [[COPY5]](s64), [[COPY6]](s64), [[COPY7]](s64), [[COPY8]](s64) :: (load store acquire acquire 16)
    ; CHECK: [[COPY9:%[0-9]+]]:gpr64 = COPY %16
    ; CHECK: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES %13(s64), %14(s64)
    ; CHECK: G_STORE [[MV]](s128), [[COPY]](p0) :: (store 16)
    ; CHECK: RET_ReallyLR
    ; CHECK-NOLSE-LABEL: name: compare_swap_128
    ; CHECK-NOLSE: liveins: $x0_x1, $x1, $x0, $x1, $x2, $x3, $x4
    ; CHECK-NOLSE: [[COPY:%[0-9]+]]:gpr64(p0) = COPY $x0
    ; CHECK-NOLSE: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
    ; CHECK-NOLSE: [[COPY2:%[0-9]+]]:_(s64) = COPY $x2
    ; CHECK-NOLSE: [[COPY3:%[0-9]+]]:_(s64) = COPY $x3
    ; CHECK-NOLSE: [[COPY4:%[0-9]+]]:_(s64) = COPY $x4
    ; CHECK-NOLSE: [[COPY5:%[0-9]+]]:gpr64(s64) = COPY [[COPY1]](s64)
    ; CHECK-NOLSE: [[COPY6:%[0-9]+]]:gpr64(s64) = COPY [[COPY2]](s64)
    ; CHECK-NOLSE: [[COPY7:%[0-9]+]]:gpr64(s64) = COPY [[COPY3]](s64)
    ; CHECK-NOLSE: [[COPY8:%[0-9]+]]:gpr64(s64) = COPY [[COPY4]](s64)
    ; CHECK-NOLSE: early-clobber %13:gpr64(s64), early-clobber %14:gpr64(s64), early-clobber %16:gpr32common = CMP_SWAP_128_ACQUIRE [[COPY]](p0), [[COPY5]](s64), [[COPY6]](s64), [[COPY7]](s64), [[COPY8]](s64) :: (load store acquire acquire (s128))
    ; CHECK-NOLSE: [[COPY9:%[0-9]+]]:gpr64 = COPY %16
    ; CHECK-NOLSE: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES %13(s64), %14(s64)
    ; CHECK-NOLSE: G_STORE [[MV]](s128), [[COPY]](p0) :: (store (s128))
    ; CHECK-NOLSE: RET_ReallyLR
    ; CHECK-LSE-LABEL: name: compare_swap_128
    ; CHECK-LSE: liveins: $x0_x1, $x1, $x0, $x1, $x2, $x3, $x4
    ; CHECK-LSE: [[COPY:%[0-9]+]]:gpr64sp(p0) = COPY $x0
    ; CHECK-LSE: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
    ; CHECK-LSE: [[COPY2:%[0-9]+]]:_(s64) = COPY $x2
    ; CHECK-LSE: [[COPY3:%[0-9]+]]:_(s64) = COPY $x3
    ; CHECK-LSE: [[COPY4:%[0-9]+]]:_(s64) = COPY $x4
    ; CHECK-LSE: [[REG_SEQUENCE:%[0-9]+]]:xseqpairsclass(s128) = REG_SEQUENCE [[COPY1]](s64), %subreg.sube64, [[COPY2]](s64), %subreg.subo64
    ; CHECK-LSE: [[REG_SEQUENCE1:%[0-9]+]]:xseqpairsclass(s128) = REG_SEQUENCE [[COPY3]](s64), %subreg.sube64, [[COPY4]](s64), %subreg.subo64
    ; CHECK-LSE: [[CASPAX:%[0-9]+]]:xseqpairsclass(s128) = CASPAX [[REG_SEQUENCE]](s128), [[REG_SEQUENCE1]](s128), [[COPY]](p0) :: (load store acquire acquire (s128))
    ; CHECK-LSE: [[EXTRACT:%[0-9]+]]:_(s64) = G_EXTRACT [[CASPAX]](s128), 0
    ; CHECK-LSE: [[EXTRACT1:%[0-9]+]]:_(s64) = G_EXTRACT [[CASPAX]](s128), 64
    ; CHECK-LSE: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[EXTRACT]](s64), [[EXTRACT1]](s64)
    ; CHECK-LSE: G_STORE [[MV]](s128), [[COPY]](p0) :: (store (s128))
    ; CHECK-LSE: RET_ReallyLR
    %0:_(p0) = COPY $x0
    %3:_(s64) = COPY $x1
    %4:_(s64) = COPY $x2
    %1:_(s128) = G_MERGE_VALUES %3(s64), %4(s64)
    %5:_(s64) = COPY $x3
    %6:_(s64) = COPY $x4
    %2:_(s128) = G_MERGE_VALUES %5(s64), %6(s64)
    %7:_(s128), %8:_(s1) = G_ATOMIC_CMPXCHG_WITH_SUCCESS %0(p0), %1, %2 :: (load store acquire acquire (s128))
    G_STORE %7(s128), %0(p0) :: (store (s128))
    RET_ReallyLR

...
