/*******************************************************************************
 Copyright (c) 2023 Arm  Corporation All rights reserved.

 Redistribution and use in source and binary forms, with or without
 modification, are permitted provided that the following conditions are met:

   * Redistributions of source code must retain the above copyright notice,
     this list of conditions and the following disclaimer.
   * Redistributions in binary form must reproduce the above copyright
     notice, this list of conditions and the following disclaimer in the
     documentation and/or other materials provided with the distribution.
   * Neither the name of Intel Corporation nor the names of its contributors
     may be used to endorse or promote products derived from this software
     without specific prior written permission.

 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
 DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
 FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
 DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
 SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
 CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
 OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*******************************************************************************/

.arch armv8-a+sve+crypto

#define VECTOR_LEN 32

.section .data

.align    8
.type    snow3g_invSR_SQ, %object
snow3g_invSR_SQ:
.byte   0xC2, 0xA6, 0x8F, 0x0A, 0x0D, 0xBE, 0xA7, 0x08
.byte   0x1D, 0x99, 0x45, 0x59, 0x13, 0xD2, 0x11, 0x9F
.byte   0xAE, 0xE6, 0xD4, 0xA4, 0x92, 0x8D, 0x58, 0xC1
.byte   0xD0, 0x97, 0xC8, 0x84, 0x9D, 0x4F, 0xBC, 0x3B
.byte   0x2D, 0xEB, 0x27, 0x53, 0x72, 0x4E, 0xE3, 0xEE
.byte   0xDA, 0x7F, 0xAA, 0x4D, 0x5C, 0x2F, 0x44, 0xDB
.byte   0x3E, 0x3A, 0x67, 0xC5, 0xC3, 0x6A, 0x16, 0x4C
.byte   0x38, 0xCC, 0xD7, 0xDD, 0x70, 0x62, 0xF2, 0x19
.byte   0x10, 0x09, 0x98, 0x4B, 0x61, 0xC9, 0x86, 0x03
.byte   0xA8, 0x6B, 0x5A, 0x33, 0x6E, 0x54, 0x5D, 0x8C
.byte   0x41, 0x1A, 0xF7, 0xF6, 0x82, 0xC6, 0xF8, 0x80
.byte   0xC0, 0xC7, 0xFE, 0xB3, 0x65, 0x2C, 0x7B, 0xBA
.byte   0xB4, 0xFC, 0x2A, 0x22, 0x0C, 0x73, 0xF5, 0x5F
.byte   0x64, 0x68, 0x2E, 0x94, 0xB2, 0x24, 0x35, 0x14
.byte   0x78, 0xFB, 0xBF, 0x48, 0xDE, 0xED, 0x43, 0x07
.byte   0xB6, 0x32, 0xE4, 0xBD, 0x74, 0x7D, 0x57, 0x46
.byte   0x3C, 0x37, 0xC4, 0xB7, 0x51, 0x8A, 0xF3, 0x55
.byte   0x6C, 0xCF, 0x79, 0xAB, 0x77, 0xA3, 0xE1, 0x93
.byte   0xD5, 0x6D, 0x81, 0x5B, 0x2B, 0x9A, 0x7E, 0x8B
.byte   0x04, 0xB5, 0x85, 0xD3, 0x91, 0xA1, 0x47, 0x52
.byte   0xA5, 0xEC, 0xD6, 0xBB, 0x20, 0x87, 0x26, 0xF0
.byte   0xAF, 0x4A, 0x89, 0xF4, 0xCE, 0x25, 0xCB, 0x50
.byte   0x00, 0x3F, 0xD9, 0x42, 0x90, 0x21, 0x3D, 0xA9
.byte   0xE7, 0x29, 0x01, 0xF1, 0x36, 0x5E, 0xFA, 0xCD
.byte   0xE5, 0x31, 0x1B, 0x05, 0xFD, 0x9E, 0xA0, 0x76
.byte   0x30, 0xB1, 0x75, 0xB0, 0x9B, 0x56, 0xEA, 0x1C
.byte   0xEF, 0x06, 0x69, 0x7A, 0x95, 0x88, 0x15, 0xFF
.byte   0xCA, 0xAC, 0x0E, 0x23, 0xD8, 0x0F, 0x28, 0x0B
.byte   0x18, 0xF9, 0x63, 0x1E, 0x83, 0x66, 0x39, 0x9C
.byte   0xE2, 0x49, 0x1F, 0xE8, 0xD1, 0x34, 0x7C, 0xA2
.byte   0xB9, 0xE0, 0x02, 0x12, 0xE9, 0xDF, 0xAD, 0x71
.byte   0x96, 0x8E, 0x6F, 0xB8, 0x40, 0x60, 0x17, 0xDC
.size    snow3g_invSR_SQ,.-snow3g_invSR_SQ

.align    8
.type    snow3g_MULa, %object
snow3g_MULa:
.byte   0x00, 0x13, 0x26, 0x35, 0x4C, 0x5F, 0x6A, 0x79
.byte   0x98, 0x8B, 0xBE, 0xAD, 0xD4, 0xC7, 0xF2, 0xE1
.byte   0x00, 0xCF, 0x37, 0xF8, 0x6E, 0xA1, 0x59, 0x96
.byte   0xDC, 0x13, 0xEB, 0x24, 0xB2, 0x7D, 0x85, 0x4A
.byte   0x00, 0x9F, 0x97, 0x08, 0x87, 0x18, 0x10, 0x8F
.byte   0xA7, 0x38, 0x30, 0xAF, 0x20, 0xBF, 0xB7, 0x28
.byte   0x00, 0xE1, 0x6B, 0x8A, 0xD6, 0x37, 0xBD, 0x5C
.byte   0x05, 0xE4, 0x6E, 0x8F, 0xD3, 0x32, 0xB8, 0x59
.byte   0x00, 0x99, 0x9B, 0x02, 0x9F, 0x06, 0x04, 0x9D
.byte   0x97, 0x0E, 0x0C, 0x95, 0x08, 0x91, 0x93, 0x0A
.byte   0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77
.byte   0x88, 0x99, 0xAA, 0xBB, 0xCC, 0xDD, 0xEE, 0xFF
.byte   0x00, 0xE7, 0x67, 0x80, 0xCE, 0x29, 0xA9, 0x4E
.byte   0x35, 0xD2, 0x52, 0xB5, 0xFB, 0x1C, 0x9C, 0x7B
.byte   0x00, 0x0A, 0x14, 0x1E, 0x28, 0x22, 0x3C, 0x36
.byte   0x50, 0x5A, 0x44, 0x4E, 0x78, 0x72, 0x6C, 0x66
.size    snow3g_MULa,.-snow3g_MULa

.align    8
.type    snow3g_DIVa, %object
snow3g_DIVa:
.byte   0x00, 0xCD, 0x33, 0xFE, 0x66, 0xAB, 0x55, 0x98
.byte   0xCC, 0x01, 0xFF, 0x32, 0xAA, 0x67, 0x99, 0x54
.byte   0x00, 0x40, 0x80, 0xC0, 0xA9, 0xE9, 0x29, 0x69
.byte   0xFB, 0xBB, 0x7B, 0x3B, 0x52, 0x12, 0xD2, 0x92
.byte   0x00, 0x0F, 0x1E, 0x11, 0x3C, 0x33, 0x22, 0x2D
.byte   0x78, 0x77, 0x66, 0x69, 0x44, 0x4B, 0x5A, 0x55
.byte   0x00, 0x18, 0x30, 0x28, 0x60, 0x78, 0x50, 0x48
.byte   0xC0, 0xD8, 0xF0, 0xE8, 0xA0, 0xB8, 0x90, 0x88
.byte   0x00, 0x31, 0x62, 0x53, 0xC4, 0xF5, 0xA6, 0x97
.byte   0x21, 0x10, 0x43, 0x72, 0xE5, 0xD4, 0x87, 0xB6
.byte   0x00, 0x5F, 0xBE, 0xE1, 0xD5, 0x8A, 0x6B, 0x34
.byte   0x03, 0x5C, 0xBD, 0xE2, 0xD6, 0x89, 0x68, 0x37
.byte   0x00, 0xF0, 0x49, 0xB9, 0x92, 0x62, 0xDB, 0x2B
.byte   0x8D, 0x7D, 0xC4, 0x34, 0x1F, 0xEF, 0x56, 0xA6
.byte   0x00, 0x29, 0x52, 0x7B, 0xA4, 0x8D, 0xF6, 0xDF
.byte   0xE1, 0xC8, 0xB3, 0x9A, 0x45, 0x6C, 0x17, 0x3E
.size    snow3g_DIVa,.-snow3g_DIVa

.align    6
.type    n_inv_aes_shift_row, %object
n_inv_aes_shift_row:
.byte    0x00, 0x0d, 0x0a, 0x07, 0x04, 0x01, 0x0e, 0x0b
.byte    0x08, 0x05, 0x02, 0x0f, 0x0c, 0x09, 0x06, 0x03
.byte    0x10, 0x1d, 0x1a, 0x17, 0x14, 0x11, 0x1e, 0x1b
.byte    0x18, 0x15, 0x12, 0x1f, 0x1c, 0x19, 0x16, 0x13
.size    n_inv_aes_shift_row,.-n_inv_aes_shift_row

.align    6
.type    ror8, %object
ror8:
.word    0x00030201, 0x04070605, 0x080b0a09, 0x0c0f0e0d
.word    0x10131211, 0x14171615, 0x181b1a19, 0x1c1f1e1d
.size    ror8,.-ror8

.align    6
.type    gather_clear_mask_mul, %object
gather_clear_mask_mul:
.byte   0x03, 0x07, 0x0b, 0x0f, 0x13, 0x17, 0x1b, 0x1f
.byte   0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
.byte   0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
.byte   0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
.size    gather_clear_mask_mul,.-gather_clear_mask_mul

.align    6
.type    gather_clear_mask_div, %object
gather_clear_mask_div:
.byte   0x00, 0x04, 0x08, 0x0c, 0x10, 0x14, 0x18, 0x1c
.byte   0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
.byte   0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
.byte   0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
.size    gather_clear_mask_div,.-gather_clear_mask_div

.align    6
.type    iv_swap_mask, %object
iv_swap_mask:
.quad    0x0405060700010203, 0x0c0d0e0f08090a0b
.size    iv_swap_mask,.-iv_swap_mask

.section .text

#define START_FUNC(fn) .globl fn; \
        .type fn, %function; \
        .align 6; \
        fn:

#define END_FUNC(fn) .size fn,.-fn

.macro declare_register name:req, reg:req
.ifdef def_\name
    .unreq    \name
.endif
    .set def_\name, 0
    \name    .req    \reg
.endm

declare_register LFSR_S0, z12
declare_register LFSR_S1, z13
declare_register LFSR_S2, z14
declare_register LFSR_S3, z15
declare_register LFSR_S4, z16
declare_register LFSR_S5, z17
declare_register LFSR_S6,  z18
declare_register LFSR_S7,  z19
declare_register LFSR_S8,  z20
declare_register LFSR_S9,  z21
declare_register LFSR_S10, z22
declare_register LFSR_S11, z23
declare_register LFSR_S12, z24
declare_register LFSR_S13, z25
declare_register LFSR_S14, z26
declare_register LFSR_S15, z27
declare_register FSM_R1,  z28
declare_register FSM_R2,  z29
declare_register FSM_R3,  z30
declare_register zINV_SHIFT_ROW, z31
declare_register zTMP0, z0
declare_register zTMP1, z1
declare_register zTMP2, z2
declare_register zTMP3, z3
declare_register zTMP4, z4
declare_register zTMP5, z5
declare_register zTMP6, z6
declare_register zTMP7, z7
declare_register zTMP8, z8
declare_register zTMP9, z9
declare_register zTMP10, z10
declare_register zTMP11, z11
declare_register vTMP0, v0
declare_register vTMP1, v1
declare_register vTMP2, v2
declare_register vTMP3, v3
declare_register vTMP4, v4
declare_register vTMP5, v5
declare_register vTMP6, v6
declare_register vTMP7, v7
declare_register vTMP8, v8
declare_register vTMP9, v9
declare_register vTMP10, v10
declare_register vTMP11, v11
declare_register xTMP0, x13
declare_register xTMP1, x14
declare_register xTMP2, x15
declare_register xTMP3, x16
declare_register xTMP4, x17
declare_register xTMP5, x18
declare_register xTMP6, x19
declare_register xTMP7, x20
declare_register xTMP8, x21
declare_register xTMP9, x22
declare_register xTMP10, x23
declare_register xTMP11, x24
declare_register xTMP12, x25
declare_register xTMP13, x26
declare_register xTMP14, x27
declare_register xTMP15, x28
declare_register xTMP16, x9
declare_register xTMP17, x10
declare_register xTMP18, x11
declare_register xTMP19, x12

declare_register wTMP15, w28
declare_register wTMP16, w9
declare_register wTMP17, w10
declare_register wTMP18, w11
declare_register wTMP19, w12
declare_register PRED8,   p3
declare_register PRED32,  p4
declare_register PRED32_HALF1, p5
declare_register PRED32_HALF2, p6
declare_register pTMP0, p2

.macro FUNC_SCALAR_SAVE
    stp x19, x20, [sp, -80]!
    stp x21, x22, [sp, 16]
    stp x23, x24, [sp, 32]
    stp x25, x26, [sp, 48]
    stp x27, x28, [sp, 64]
.endm

.macro FUNC_SCALAR_RESTORE
    ldp x21, x22, [sp, 16]
    ldp x23, x24, [sp, 32]
    ldp x25, x26, [sp, 48]
    ldp x27, x28, [sp, 64]
    ldp x19, x20, [sp], 80
.endm

.macro FUNC_VECTOR_SAVE
    stp d8, d9, [sp, -64]!
    stp d10, d11, [sp, 16]
    stp d12, d13, [sp, 32]
    stp d14, d15, [sp, 48]
.endm

.macro FUNC_VECTOR_RESTORE
    ldp d10, d11, [sp, 16]
    ldp d12, d13, [sp, 32]
    ldp d14, d15, [sp, 48]
    ldp d8, d9, [sp], 64
.endm

/*
 * S1_BOX_8_SVE256()
 *
 * params
 *     \x - input value
 *     \rslt - returen value
 * uses
 *     zTMP0-2
 */
.macro S1_BOX_8_SVE256 x, rslt
    tbl zTMP0.B, \x\().B, zINV_SHIFT_ROW.B
    compact zTMP1.S, PRED32_HALF2, zTMP0.S
    movi vTMP2.16B, #0
    aese vTMP0.16B, vTMP2.16B
    aesmc vTMP0.16B, vTMP0.16B
    aese VTMP1.16B, vTMP2.16B
    aesmc vTMP1.16B, vTMP1.16B
    insr zTMP1.D, X0
    insr zTMP1.D, X0
    mov \rslt\().S, PRED32_HALF1/M, zTMP0.S
    mov \rslt\().S, PRED32_HALF2/M, zTMP1.S
.endm

/*
 * LOOKUP_32X8BIT_SVE256()
 *
 * params
 *     \index  - input value
 *     \lookup - lookup table
 *     \rslt   - return value
 * uses
 *     zTMP0-2
 */
.macro LOOKUP_32X8BIT_SVE256 index, lookup, rslt
    mov zTMP0.B, PRED8/Z, #32

    ld1b {zTMP1.B}, PRED8/Z, [\lookup\(), #0, MUL VL]
    tbl \rslt\().B, {zTMP1.B}, \index\().B
    sub \index\().B, \index\().B, zTMP0.B

    ld1b {zTMP1.B}, PRED8/Z, [\lookup\(), #1, MUL VL]
    tbl zTMP2.B, {zTMP1.B}, \index\().B
    sub \index\().B, \index\().B, zTMP0.B
    eor \rslt\().D, \rslt\().D, zTMP2.D

    ld1b {zTMP1.B}, PRED8/Z, [\lookup\(), #2, MUL VL]
    tbl zTMP2.B, {zTMP1.B}, \index\().B
    sub \index\().B, \index\().B, zTMP0.B
    eor \rslt\().D, \rslt\().D, zTMP2.D

    ld1b {zTMP1.B}, PRED8/Z, [\lookup\(), #3, MUL VL]
    tbl zTMP2.B, {zTMP1.B}, \index\().B
    sub \index\().B, \index\().B, zTMP0.B
    eor \rslt\().D, \rslt\().D, zTMP2.D

    ld1b {zTMP1.B}, PRED8/Z, [\lookup\(), #4, MUL VL]
    tbl zTMP2.B, {zTMP1.B}, \index\().B
    sub \index\().B, \index\().B, zTMP0.B
    eor \rslt\().D, \rslt\().D, zTMP2.D

    ld1b {zTMP1.B}, PRED8/Z, [\lookup\(), #5, MUL VL]
    tbl zTMP2.B, {zTMP1.B}, \index\().B
    sub \index\().B, \index\().B, zTMP0.B
    eor \rslt\().D, \rslt\().D, zTMP2.D

    ld1b {zTMP1.B}, PRED8/Z, [\lookup\(), #6, MUL VL]
    tbl zTMP2.B, {zTMP1.B}, \index\().B
    sub \index\().B, \index\().B, zTMP0.B
    eor \rslt\().D, \rslt\().D, zTMP2.D

    ld1b {zTMP1.B}, PRED8/Z, [\lookup\(), #7, MUL VL]
    tbl zTMP2.B, {zTMP1.B}, \index\().B
    sub \index\().B, \index\().B, zTMP0.B
    eor \rslt\().D, \rslt\().D, zTMP2.D
.endm

/*
 * S2_MIXC_FIXUP_8_SVE256()
 *
 * params
 *     \no_mixc  - input value
 *     \mixc     - lookup table
 *     \rslt     - return value
 * uses
 *     xTMP0, zTMP0-2
 */
.macro S2_MIXC_FIXUP_8_SVE256 no_mixc, mixc, rslt
    // PAT = CMLT(NO_MIXC);
    cmplt pTMP0.B, PRED8/Z, \no_mixc\().B, #0
    mov zTMP1.B, pTMP0/Z, #0xFF
    // PAT_SHUF = TBL(PAT, ROR8);
    adrp xTMP0, ror8
    add xTMP0, xTMP0, #:lo12:ror8
    ld1b {zTMP0.B}, PRED8/Z, [xTMP0]
    tbl zTMP2.B, {zTMP1.B}, zTMP0.B
    // RSLT = MIXC ^ (0X72 AND (PAT ^ PAT_SHUF))
    eor zTMP1.D, zTMP1.D, zTMP2.D
    mov zTMP2.B, PRED8/Z, #0x72
    and zTMP0.D, zTMP2.D, zTMP1.D
    eor \rslt\().D, zTMP0.D, \mixc\().D
.endm

/*
 * S2_BOX_8_SVE256()
 *
 * params
 *     \x    - input value
 *     \rslt - return value
 * uses
 *     xTMP0, zTMP0-4
 */
.macro S2_BOX_8_SVE256 x, rslt
    // NEW_X = TBL(LOOKUP(X, snow3g_invSR_SQ), inv_aes_shift_row);
    adrp xTMP0, snow3g_invSR_SQ
    add xTMP0, xTMP0, #:lo12:snow3g_invSR_SQ
    LOOKUP_32X8BIT_SVE256 \x\(), xTMP0, \rslt\()
    tbl zTMP3.B, \rslt\().B, zINV_SHIFT_ROW.B
    compact zTMP1.S, PRED32_HALF2, zTMP3.S
    // NOMIXC = AESE(NEW_X, 0)
    movi vTMP2.16B, #0
    aese vTMP3.16B, vTMP2.16B
    aese vTMP1.16B, vTMP2.16B
    // MIXC = AESMC(NOMIXC)
    aesmc vTMP4.16B, vTMP3.16B
    aesmc vTMP0.16B, vTMP1.16B
    insr zTMP1.D, X0
    insr zTMP1.D, X0
    insr zTMP0.D, X0
    insr zTMP0.D, X0
    mov zTMP3.S, PRED32_HALF2/M, zTMP1.S
    mov zTMP4.S, PRED32_HALF2/M, zTMP0.S
    // S2_MIXC_FIXUP(NOMIXC, MIXC)
    S2_MIXC_FIXUP_8_SVE256 zTMP3, zTMP4, \rslt\()
.endm

/*
 * MUL_DIV_A_8_SVE256()
 *
 * params
 *     \S    - input value, S0 or S11
 *     \rslt - return value
 * uses
 *     xTMP0, zTMP0-4
 */
.macro MUL_DIV_A_8_SVE256 MUL_OR_DIV S, rslt
    // L = S0,3 & 0x0F
    mov zTMP0.B, PRED8/Z, #0x0F
.ifc \MUL_OR_DIV, MUL
    adrp xTMP0, gather_clear_mask_mul
    add xTMP0, xTMP0, #:lo12:gather_clear_mask_mul
.else
    adrp xTMP0, gather_clear_mask_div
    add xTMP0, xTMP0, #:lo12:gather_clear_mask_div
.endif
    ld1b {zTMP1.B}, PRED8/Z, [xTMP0]

    // TL = TBL8(MULa_B0, L) || TBL8(MULa_B1, L) || TBL8(MULa_B2, L) || TBL8(MULa_B3, L)
    tbl zTMP1.B, \S\().B, zTMP1.B
    and zTMP0.D, zTMP1.D, zTMP0.D
.ifc \MUL_OR_DIV, MUL
    adrp xTMP0, snow3g_MULa
    add xTMP0, xTMP0, #:lo12:snow3g_MULa
.else
    adrp xTMP0, snow3g_DIVa
    add xTMP0, xTMP0, #:lo12:snow3g_DIVa
.endif
    ld1 {vTMP2.16b,vTMP3.16b},[xTMP0],#32
    tbl zTMP2.B, zTMP2.B, zTMP0.B
    tbl zTMP3.B, zTMP3.B, zTMP0.B
    zip1 zTMP2.B, zTMP2.B, zTMP3.B
    ld1 {vTMP3.16b,vTMP4.16b},[xTMP0],#32
    tbl zTMP3.B, zTMP3.B, zTMP0.B
    tbl zTMP4.B, zTMP4.B, zTMP0.B
    zip1 zTMP3.B, zTMP3.B, zTMP4.B
    zip1 zTMP0.H, zTMP2.H, zTMP3.H

    // H = S0,3 & 0xF0
    lsr zTMP1.B, PRED8/M, zTMP1.B, #4

    // TH = TBL8(MULa_B4, H) || TBL8(MULa_B5, H) || TBL8(MULa_B6, H) || TBL8(MULa_B7, H)
    ld1 {vTMP2.16b,vTMP3.16b},[xTMP0],#32
    tbl zTMP2.B, zTMP2.B, zTMP1.B
    tbl zTMP3.B, zTMP3.B, zTMP1.B
    zip1 zTMP2.B, zTMP2.B, zTMP3.B
    ld1 {vTMP3.16b,vTMP4.16b},[xTMP0]
    tbl zTMP3.B, zTMP3.B, zTMP1.B
    tbl zTMP4.B, zTMP4.B, zTMP1.B
    zip1 zTMP3.B, zTMP3.B, zTMP4.B
    zip1 zTMP1.H, zTMP2.H, zTMP3.H

    // RSLT = TL ^ TH
    eor \rslt\().D, zTMP1.D, zTMP0.D
.endm

/*
 * CLOCK_FSM_8_SVE256()
 *
 * params
 *     \F - input value
 * uses
 *     xTMP0, zTMP0-5
 */
.macro CLOCK_FSM_8_SVE256 F
    // F = (S15 + R1) ^ R2
    // R = R2 + (R3 ^ S5)
    add \F\().S, LFSR_S15.S, FSM_R1.S
    eor zTMP5.D, LFSR_S5.D, FSM_R3.D
    eor \F\().D, \F\().D, FSM_R2.D
    add zTMP5.S, zTMP5.S, FSM_R2.S
    // R3 = S2(R2);
    S2_BOX_8_SVE256 FSM_R2, FSM_R3
    // R2 = S1(R1);
    S1_BOX_8_SVE256 FSM_R1, FSM_R2
    // R1 = R;
    mov FSM_R1.D, zTMP5.D
.endm

/*
 * SHIFT_LFSR_8_SVE256()
 *
 * uses
 *     zTMP0-2
 */
.macro SHIFT_LFSR_8_SVE256 S15
    mov zTMP0.D, LFSR_S4.D
    mov zTMP1.D, LFSR_S8.D
    mov zTMP2.D, LFSR_S12.D

    mov LFSR_S0.D, LFSR_S1.D
    mov LFSR_S4.D, LFSR_S5.D
    mov LFSR_S8.D, LFSR_S9.D
    mov LFSR_S12.D, LFSR_S13.D

    mov LFSR_S1.D, LFSR_S2.D
    mov LFSR_S5.D, LFSR_S6.D
    mov LFSR_S9.D, LFSR_S10.D
    mov LFSR_S13.D, LFSR_S14.D

    mov LFSR_S2.D, LFSR_S3.D
    mov LFSR_S6.D, LFSR_S7.D
    mov LFSR_S10.D, LFSR_S11.D
    mov LFSR_S14.D, LFSR_S15.D

    mov LFSR_S3.D, zTMP0.D
    mov LFSR_S7.D, zTMP1.D
    mov LFSR_S11.D, zTMP2.D
    mov LFSR_S15.D, \S15\().D
.endm

/*
 * CLOCK_LFSR_8_SVE256()
 *
 * uses
 *     xTMP0, zTMP0-6
 */
.macro CLOCK_LFSR_8_SVE256
    // V = (S0 << 8) ^ MULa(S0) ^ S2 ^ (S11 >> 8) ^ DIVa(S11)
    MUL_DIV_A_8_SVE256 MUL LFSR_S0, zTMP5
    MUL_DIV_A_8_SVE256 DIV LFSR_S11, zTMP6
    eor zTMP5.D, zTMP5.D, zTMP6.D
    lsl zTMP3.S, LFSR_S0.S, #8
    lsr zTMP1.S, LFSR_S11.S, #8
    eor zTMP3.D, zTMP3.D, zTMP1.D
    eor zTMP5.D, zTMP5.D, LFSR_S2.D
    eor zTMP3.D, zTMP3.D, zTMP5.D
    SHIFT_LFSR_8_SVE256 zTMP3
.endm

/*
 * SNOW3G_KEYSTREAM_8_4_SVE256()
 *
 * params
 *     \KEY - output keystream
 * uses
 *     xTMP0, zTMP0-6
 */
.macro SNOW3G_KEYSTREAM_8_4_SVE256 KEY
    CLOCK_FSM_8_SVE256 \KEY\()
    eor \KEY\().D, \KEY\().D, LFSR_S0.D
    CLOCK_LFSR_8_SVE256
.endm

/*
 * INTERLEAVE_IV_KEY_8()
 *
 * uses
 *     xTMP0, zTMP0-3 when SWAP == 0
 *     xTMP0, zTMP0-4 when SWAP == 1
 */
.macro INTERLEAVE_IV_KEY_8 SWAP RSLT0, RSLT1, RSLT2, RSLT3, \
                                ADDR1, ADDR2, ADDR3, ADDR4, \
                                ADDR5, ADDR6, ADDR7, ADDR8
    ld1 {v\RSLT0\().4S}, [\ADDR1\()]
    ld1 {vTMP0.4S}, [\ADDR2\()]
    ld1 {v\RSLT1\().4S}, [\ADDR3\()]
    ld1 {vTMP1.4S}, [\ADDR4\()]
    ld1 {v\RSLT2\().4S}, [\ADDR5\()]
    ld1 {vTMP2.4S}, [\ADDR6\()]
    ld1 {v\RSLT3\().4S}, [\ADDR7\()]
    ld1 {vTMP3.4S}, [\ADDR8\()]
.if \SWAP == 1
    adrp xTMP0, iv_swap_mask
    add xTMP0, xTMP0, #:lo12:iv_swap_mask
    ld1 {vTMP4.4S}, [xTMP0]
    tbl vTMP0.16B, {vTMP0.16B}, vTMP4.16B
    tbl vTMP1.16B, {vTMP1.16B}, vTMP4.16B
    tbl vTMP2.16B, {vTMP2.16B}, vTMP4.16B
    tbl vTMP3.16B, {vTMP3.16B}, vTMP4.16B
    tbl v\RSLT0\().16B, {v\RSLT0\().16B}, vTMP4.16B
    tbl v\RSLT1\().16B, {v\RSLT1\().16B}, vTMP4.16B
    tbl v\RSLT2\().16B, {v\RSLT2\().16B}, vTMP4.16B
    tbl v\RSLT3\().16B, {v\RSLT3\().16B}, vTMP4.16B
.endif
    zip1 z\RSLT0\().S, z\RSLT0\().S, zTMP0.S
    zip1 z\RSLT1\().S, z\RSLT1\().S, zTMP1.S
    zip1 z\RSLT2\().S, z\RSLT2\().S, zTMP2.S
    zip1 z\RSLT3\().S, z\RSLT3\().S, zTMP3.S

    zip1 zTMP0.D, z\RSLT0\().D, z\RSLT1\().D
    zip2 zTMP1.D, z\RSLT0\().D, z\RSLT1\().D
    zip1 zTMP2.D, z\RSLT2\().D, z\RSLT3\().D
    zip2 zTMP3.D, z\RSLT2\().D, z\RSLT3\().D

    compact z\RSLT1\().S, PRED32_HALF2, zTMP0.S
    mov z\RSLT1\().S, PRED32_HALF2/M, zTMP2.S
    insr zTMP2.D, x0
    insr zTMP2.D, x0
    sel z\RSLT0\().S, PRED32_HALF1, zTMP0.S, zTMP2.S

    compact z\RSLT3\().S, PRED32_HALF2, zTMP1.S
    mov z\RSLT3\().S, PRED32_HALF2/M, zTMP3.S
    insr zTMP3.D, x0
    insr zTMP3.D, x0
    sel z\RSLT2\().S, PRED32_HALF1, zTMP1.S, zTMP3.S
.endm

/*
 * SNOW3G_INITIALIZE_8_SVE256_FIRST()
 *
 * uses
 *     zTMP0-8
 */
.macro SNOW3G_INITIALIZE_8_SVE256_FIRST KEYADDR1 KEYADDR2 KEYADDR3 KEYADDR4 \
                                     KEYADDR5 KEYADDR6 KEYADDR7 KEYADDR8 \
                                     IVADDR1 IVADDR2 IVADDR3 IVADDR4 \
                                     IVADDR5 IVADDR6 IVADDR7 IVADDR8
    INTERLEAVE_IV_KEY_8 0, 4, 5, 6, 7, \
                        \KEYADDR1\(), \KEYADDR2\(), \KEYADDR3\(), \KEYADDR4\(), \
                        \KEYADDR5\(), \KEYADDR6\(), \KEYADDR7\(), \KEYADDR8\()
    mov LFSR_S4.D, zTMP4.D
    mov LFSR_S5.D, zTMP5.D
    mov LFSR_S6.D, zTMP6.D
    mov LFSR_S7.D, zTMP7.D
    mov LFSR_S12.D, zTMP4.D
    mov LFSR_S13.D, zTMP5.D
    mov LFSR_S14.D, zTMP6.D
    mov LFSR_S15.D, zTMP7.D
    not LFSR_S0.S, PRED32/M, zTMP4.S
    not LFSR_S1.S, PRED32/M, zTMP5.S
    not LFSR_S2.S, PRED32/M, zTMP6.S
    not LFSR_S3.S, PRED32/M, zTMP7.S
    mov LFSR_S8.D, LFSR_S0.D
    mov LFSR_S9.D, LFSR_S1.D
    mov LFSR_S10.D, LFSR_S2.D
    mov LFSR_S11.D, LFSR_S3.D

    INTERLEAVE_IV_KEY_8 1, 5, 6, 7, 8, \
                        \IVADDR1\(), \IVADDR2\(), \IVADDR3\(), \IVADDR4\(), \
                        \IVADDR5\(), \IVADDR6\(), \IVADDR7\(), \IVADDR8\()

    eor LFSR_S15.D, LFSR_S15.D, zTMP8.D
    eor LFSR_S12.D, LFSR_S12.D, zTMP7.D
    eor LFSR_S10.D, LFSR_S10.D, zTMP6.D
    eor LFSR_S9.D, LFSR_S9.D, zTMP5.D

    mov FSM_R1.B, PRED8/Z, #0
    mov FSM_R2.B, PRED8/Z, #0
    mov FSM_R3.B, PRED8/Z, #0
.endm

/*
 * SNOW3G_INITIALIZE_8_SVE256_SECOND()
 *
 * uses
 *     xTMP0, zTMP0-7
 */
.macro SNOW3G_INITIALIZE_8_SVE256_SECOND
.rept 32
    CLOCK_FSM_8_SVE256 zTMP7
    CLOCK_LFSR_8_SVE256
    eor LFSR_S15.D, LFSR_S15.D, zTMP7.D
.endr
    CLOCK_FSM_8_SVE256 zTMP7
    CLOCK_LFSR_8_SVE256
.endm

/*
 * SNOW3G_LOAD_CTX_8_SVE256()
 *
 */
.macro SNOW3G_LOAD_CTX_8_SVE256 ctx_addr
    ld1b {LFSR_S0.B}, PRED8/Z, [\ctx_addr\(), #0, MUL VL]
    ld1b {LFSR_S1.B}, PRED8/Z, [\ctx_addr\(), #1, MUL VL]
    ld1b {LFSR_S2.B}, PRED8/Z, [\ctx_addr\(), #2, MUL VL]
    ld1b {LFSR_S3.B}, PRED8/Z, [\ctx_addr\(), #3, MUL VL]
    ld1b {LFSR_S4.B}, PRED8/Z, [\ctx_addr\(), #4, MUL VL]
    ld1b {LFSR_S5.B}, PRED8/Z, [\ctx_addr\(), #5, MUL VL]
    ld1b {LFSR_S6.B}, PRED8/Z, [\ctx_addr\(), #6, MUL VL]
    ld1b {LFSR_S7.B}, PRED8/Z, [\ctx_addr\(), #7, MUL VL]
    add \ctx_addr\(), \ctx_addr\(), #(VECTOR_LEN*8)
    ld1b {LFSR_S8.B}, PRED8/Z, [\ctx_addr\(), #0, MUL VL]
    ld1b {LFSR_S9.B}, PRED8/Z, [\ctx_addr\(), #1, MUL VL]
    ld1b {LFSR_S10.B}, PRED8/Z, [\ctx_addr\(), #2, MUL VL]
    ld1b {LFSR_S11.B}, PRED8/Z, [\ctx_addr\(), #3, MUL VL]
    ld1b {LFSR_S12.B}, PRED8/Z, [\ctx_addr\(), #4, MUL VL]
    ld1b {LFSR_S13.B}, PRED8/Z, [\ctx_addr\(), #5, MUL VL]
    ld1b {LFSR_S14.B}, PRED8/Z, [\ctx_addr\(), #6, MUL VL]
    ld1b {LFSR_S15.B}, PRED8/Z, [\ctx_addr\(), #7, MUL VL]
    add \ctx_addr\(), \ctx_addr\(), #(VECTOR_LEN*8)
    ld1b {FSM_R1.B}, PRED8/Z, [\ctx_addr\(), #0, MUL VL]
    ld1b {FSM_R2.B}, PRED8/Z, [\ctx_addr\(), #1, MUL VL]
    ld1b {FSM_R3.B}, PRED8/Z, [\ctx_addr\(), #2, MUL VL]
    sub \ctx_addr\(), \ctx_addr\(), #(VECTOR_LEN*16)
.endm

/*
 * SNOW3G_STORE_CTX_8_SVE256()
 *
 */
.macro SNOW3G_STORE_CTX_8_SVE256 ctx_addr
    st1b {LFSR_S0.B}, PRED8, [\ctx_addr\(), #0, MUL VL]
    st1b {LFSR_S1.B}, PRED8, [\ctx_addr\(), #1, MUL VL]
    st1b {LFSR_S2.B}, PRED8, [\ctx_addr\(), #2, MUL VL]
    st1b {LFSR_S3.B}, PRED8, [\ctx_addr\(), #3, MUL VL]
    st1b {LFSR_S4.B}, PRED8, [\ctx_addr\(), #4, MUL VL]
    st1b {LFSR_S5.B}, PRED8, [\ctx_addr\(), #5, MUL VL]
    st1b {LFSR_S6.B}, PRED8, [\ctx_addr\(), #6, MUL VL]
    st1b {LFSR_S7.B}, PRED8, [\ctx_addr\(), #7, MUL VL]
    add \ctx_addr\(), \ctx_addr\(), #(VECTOR_LEN*8)
    st1b {LFSR_S8.B}, PRED8, [\ctx_addr\(), #0, MUL VL]
    st1b {LFSR_S9.B}, PRED8, [\ctx_addr\(), #1, MUL VL]
    st1b {LFSR_S10.B}, PRED8, [\ctx_addr\(), #2, MUL VL]
    st1b {LFSR_S11.B}, PRED8, [\ctx_addr\(), #3, MUL VL]
    st1b {LFSR_S12.B}, PRED8, [\ctx_addr\(), #4, MUL VL]
    st1b {LFSR_S13.B}, PRED8, [\ctx_addr\(), #5, MUL VL]
    st1b {LFSR_S14.B}, PRED8, [\ctx_addr\(), #6, MUL VL]
    st1b {LFSR_S15.B}, PRED8, [\ctx_addr\(), #7, MUL VL]
    add \ctx_addr\(), \ctx_addr\(), #(VECTOR_LEN*8)
    st1b {FSM_R1.B}, PRED8, [\ctx_addr\(), #0, MUL VL]
    st1b {FSM_R2.B}, PRED8, [\ctx_addr\(), #1, MUL VL]
    st1b {FSM_R3.B}, PRED8, [\ctx_addr\(), #2, MUL VL]
    add \ctx_addr\(), \ctx_addr\(), #(VECTOR_LEN*3)
    str wzr, [\ctx_addr\()]
    sub \ctx_addr\(), \ctx_addr\(), #(VECTOR_LEN*19)
.endm

/*
 * CLEAR_VECTORS_SVE256()
 *
 */
.macro CLEAR_VECTORS_SVE256
    eor zTMP0.D, zTMP0.D, zTMP0.D
    eor zTMP1.D, zTMP1.D, zTMP1.D
    eor zTMP2.D, zTMP2.D, zTMP2.D
    eor zTMP3.D, zTMP3.D, zTMP3.D
    eor zTMP4.D, zTMP4.D, zTMP4.D
    eor zTMP5.D, zTMP5.D, zTMP5.D
    eor zTMP6.D, zTMP6.D, zTMP6.D
    eor zTMP7.D, zTMP7.D, zTMP7.D
    eor zTMP8.D, zTMP8.D, zTMP8.D
    eor zTMP9.D, zTMP9.D, zTMP9.D
    eor zTMP10.D, zTMP10.D, zTMP10.D
    eor zTMP11.D, zTMP11.D, zTMP11.D
    eor LFSR_S0.D, LFSR_S0.D, LFSR_S0.D
    eor LFSR_S1.D, LFSR_S1.D, LFSR_S1.D
    eor LFSR_S2.D, LFSR_S2.D, LFSR_S2.D
    eor LFSR_S3.D, LFSR_S3.D, LFSR_S3.D
    eor LFSR_S4.D, LFSR_S4.D, LFSR_S4.D
    eor LFSR_S5.D, LFSR_S5.D, LFSR_S5.D
    eor LFSR_S6.D, LFSR_S6.D, LFSR_S6.D
    eor LFSR_S7.D, LFSR_S7.D, LFSR_S7.D
    eor LFSR_S8.D, LFSR_S8.D, LFSR_S8.D
    eor LFSR_S9.D, LFSR_S9.D, LFSR_S9.D
    eor LFSR_S10.D, LFSR_S10.D, LFSR_S10.D
    eor LFSR_S11.D, LFSR_S11.D, LFSR_S11.D
    eor LFSR_S12.D, LFSR_S12.D, LFSR_S12.D
    eor LFSR_S13.D, LFSR_S13.D, LFSR_S13.D
    eor LFSR_S14.D, LFSR_S14.D, LFSR_S14.D
    eor LFSR_S15.D, LFSR_S15.D, LFSR_S15.D
    eor FSM_R1.D, FSM_R1.D, FSM_R1.D
    eor FSM_R2.D, FSM_R2.D, FSM_R2.D
    eor FSM_R3.D, FSM_R3.D, FSM_R3.D
.endm
/*
 * snow3g_f8_8_buffer_initialize_aarch64_sve256_asm(
 *              void *ctx,
 *              snow3g_key_schedule_t **pKeySched,
 *              void **pIV)
 */
START_FUNC(snow3g_f8_8_buffer_initialize_aarch64_sve256_asm)
    FUNC_SCALAR_SAVE
    ptrue PRED8.B, ALL
    ptrue PRED32.S, ALL
    ptrue PRED32_HALF1.S, VL4
    not PRED32_HALF2.B, PRED32/Z, PRED32_HALF1.B
    FUNC_VECTOR_SAVE
    adrp xTMP0, n_inv_aes_shift_row
    add xTMP0, xTMP0, #:lo12:n_inv_aes_shift_row
    ld1b {zINV_SHIFT_ROW.B}, PRED8/Z, [xTMP0]
    mov xTMP17, x1
    mov xTMP18, x2

    ldp xTMP0, xTMP1, [xTMP17], #16
    ldp xTMP2, xTMP3, [xTMP17], #16
    ldp xTMP4, xTMP5, [xTMP17], #16
    ldp xTMP6, xTMP7, [xTMP17]
    ldp xTMP8, xTMP9, [xTMP18], #16
    ldp xTMP10, xTMP11, [xTMP18], #16
    ldp xTMP12, xTMP13, [xTMP18], #16
    ldp xTMP14, xTMP15, [xTMP18]

    SNOW3G_INITIALIZE_8_SVE256_FIRST xTMP0 xTMP1 xTMP2 xTMP3 xTMP4 xTMP5 xTMP6 xTMP7\
                                  xTMP8 xTMP9 xTMP10 xTMP11 xTMP12 xTMP13 xTMP14 xTMP15
    SNOW3G_INITIALIZE_8_SVE256_SECOND
    SNOW3G_STORE_CTX_8_SVE256 x0

    FUNC_VECTOR_RESTORE
    FUNC_SCALAR_RESTORE
    ret
END_FUNC(snow3g_f8_8_buffer_initialize_aarch64_sve256_asm)

#ifndef GATHER_SCATTER_IMPL
/*
 * snow3g_f8_8_buffer_stream_aarch64_sve256_asm(void *ctx,
 *                                           void **in,
 *                                           void **out,
 *                                           uint32_t lengthInBytes)
 *
 */
START_FUNC(snow3g_f8_8_buffer_stream_aarch64_sve256_asm)
    FUNC_SCALAR_SAVE
    ptrue PRED8.B, ALL
    ptrue PRED32.S, ALL
    ptrue PRED32_HALF1.S, VL4
    not PRED32_HALF2.B, PRED32/Z, PRED32_HALF1.B
    FUNC_VECTOR_SAVE
    adrp xTMP0, n_inv_aes_shift_row
    add xTMP0, xTMP0, #:lo12:n_inv_aes_shift_row
    ld1b {zINV_SHIFT_ROW.B}, PRED8/Z, [xTMP0]

    mov xTMP17, x1
    mov xTMP18, x2
    mov xTMP19, x3

    SNOW3G_LOAD_CTX_8_SVE256 x0
    ldp xTMP16, xTMP1, [xTMP17], #16
    ldp xTMP2, xTMP3, [xTMP17], #16
    ldp xTMP4, xTMP5, [xTMP17], #16
    ldp xTMP6, xTMP7, [xTMP17]
    ldp xTMP8, xTMP9, [xTMP18], #16
    ldp xTMP10, xTMP11, [xTMP18], #16
    ldp xTMP12, xTMP13, [xTMP18], #16
    ldp xTMP14, xTMP15, [xTMP18]

    cmp xTMP19, #16
    b.lt GEN8

GEN16_LOOP:
    SNOW3G_KEYSTREAM_8_4_SVE256 zTMP8
    SNOW3G_KEYSTREAM_8_4_SVE256 zTMP9
    SNOW3G_KEYSTREAM_8_4_SVE256 zTMP10
    SNOW3G_KEYSTREAM_8_4_SVE256 zTMP11
    zip1 zTMP0.S, zTMP8.S, zTMP9.S
    zip2 zTMP1.S, zTMP8.S, zTMP9.S
    zip1 zTMP2.S, zTMP10.S, zTMP11.S
    zip2 zTMP3.S, zTMP10.S, zTMP11.S
    zip1 zTMP8.D, zTMP0.D, zTMP2.D
    zip2 zTMP9.D, zTMP0.D, zTMP2.D
    zip1 zTMP10.D, zTMP1.D, zTMP3.D
    zip2 zTMP11.D, zTMP1.D, zTMP3.D
    revb zTMP8.S, PRED32/M, zTMP8.S
    revb zTMP9.S, PRED32/M, zTMP9.S
    revb zTMP10.S, PRED32/M, zTMP10.S
    revb zTMP11.S, PRED32/M, zTMP11.S

    ld1 {vTMP0.4S}, [xTMP16], #16
    ld1 {vTMP4.4S}, [xTMP1], #16
    ld1 {vTMP1.4S}, [xTMP2], #16
    ld1 {vTMP5.4S}, [xTMP3], #16
    ld1 {vTMP2.4S}, [xTMP4], #16
    ld1 {vTMP6.4S}, [xTMP5], #16
    ld1 {vTMP3.4S}, [xTMP6], #16
    ld1 {vTMP7.4S}, [xTMP7], #16
    insr zTMP4.D, x0
    insr zTMP5.D, x0
    insr zTMP6.D, x0
    insr zTMP7.D, x0
    insr zTMP4.D, x0
    insr zTMP5.D, x0
    insr zTMP6.D, x0
    insr zTMP7.D, x0
    mov zTMP0.S, PRED32_HALF2/M, zTMP4.S
    mov zTMP1.S, PRED32_HALF2/M, zTMP5.S
    mov zTMP2.S, PRED32_HALF2/M, zTMP6.S
    mov zTMP3.S, PRED32_HALF2/M, zTMP7.S
    eor zTMP0.D, zTMP0.D, zTMP8.D
    eor zTMP1.D, zTMP1.D, zTMP9.D
    eor zTMP2.D, zTMP2.D, zTMP10.D
    eor zTMP3.D, zTMP3.D, zTMP11.D

    compact zTMP4.S, PRED32_HALF2, zTMP0.S
    compact zTMP5.S, PRED32_HALF2, zTMP1.S
    compact zTMP6.S, PRED32_HALF2, zTMP2.S
    compact zTMP7.S, PRED32_HALF2, zTMP3.S

    st1 {vTMP0.4S}, [xTMP8], #16
    st1 {vTMP4.4S}, [xTMP9], #16
    st1 {vTMP1.4S}, [xTMP10], #16
    st1 {vTMP5.4S}, [xTMP11], #16
    st1 {vTMP2.4S}, [xTMP12], #16
    st1 {vTMP6.4S}, [xTMP13], #16
    st1 {vTMP3.4S}, [xTMP14], #16
    st1 {vTMP7.4S}, [xTMP15], #16

    sub xTMP19, xTMP19, #16
    cmp xTMP19, #16
    b.ge GEN16_LOOP

GEN8:
    cmp xTMP19, #8
    b.lt GEN4
    SNOW3G_KEYSTREAM_8_4_SVE256 zTMP8
    SNOW3G_KEYSTREAM_8_4_SVE256 zTMP9
    zip1 zTMP10.S, zTMP8.S, zTMP9.S
    zip2 zTMP11.S, zTMP8.S, zTMP9.S
    revb zTMP10.S, PRED32/M, zTMP10.S
    revb zTMP11.S, PRED32/M, zTMP11.S

    ld1 {vTMP0.D}[0], [xTMP16], #8
    ld1 {vTMP0.D}[1], [xTMP1], #8
    ld1 {vTMP1.D}[0], [xTMP2], #8
    ld1 {vTMP1.D}[1], [xTMP3], #8
    ld1 {vTMP2.D}[0], [xTMP4], #8
    ld1 {vTMP2.D}[1], [xTMP5], #8
    ld1 {vTMP3.D}[0], [xTMP6], #8
    ld1 {vTMP3.D}[1], [xTMP7], #8

    compact zTMP4.S, PRED32_HALF2, zTMP10.S
    compact zTMP5.S, PRED32_HALF2, zTMP11.S

    eor vTMP0.16B, vTMP0.16B, vTMP10.16B
    eor vTMP1.16B, vTMP1.16B, vTMP4.16B
    eor vTMP2.16B, vTMP2.16B, vTMP11.16B
    eor vTMP3.16B, vTMP3.16B, vTMP5.16B

    st1 {vTMP0.D}[0], [xTMP8], #8
    st1 {vTMP0.D}[1], [xTMP9], #8
    st1 {vTMP1.D}[0], [xTMP10], #8
    st1 {vTMP1.D}[1], [xTMP11], #8
    st1 {vTMP2.D}[0], [xTMP12], #8
    st1 {vTMP2.D}[1], [xTMP13], #8
    st1 {vTMP3.D}[0], [xTMP14], #8
    st1 {vTMP3.D}[1], [xTMP15], #8

    sub xTMP19, xTMP19, #8

GEN4:
    cmp xTMP19, #4
    b.lt FINISH
    SNOW3G_KEYSTREAM_8_4_SVE256 zTMP8
    revb zTMP8.S, PRED32/M, zTMP8.S

    ld1 {vTMP0.S}[0], [xTMP16], #4
    ld1 {vTMP0.S}[1], [xTMP1], #4
    ld1 {vTMP0.S}[2], [xTMP2], #4
    ld1 {vTMP0.S}[3], [xTMP3], #4
    ld1 {vTMP1.S}[0], [xTMP4], #4
    ld1 {vTMP1.S}[1], [xTMP5], #4
    ld1 {vTMP1.S}[2], [xTMP6], #4
    ld1 {vTMP1.S}[3], [xTMP7], #4

    compact zTMP4.S, PRED32_HALF2, zTMP8.S

    eor vTMP0.16B, vTMP0.16B, vTMP8.16B
    eor vTMP1.16B, vTMP1.16B, vTMP4.16B

    st1 {vTMP0.S}[0], [xTMP8], #4
    st1 {vTMP0.S}[1], [xTMP9], #4
    st1 {vTMP0.S}[2], [xTMP10], #4
    st1 {vTMP0.S}[3], [xTMP11], #4
    st1 {vTMP1.S}[0], [xTMP12], #4
    st1 {vTMP1.S}[1], [xTMP13], #4
    st1 {vTMP1.S}[2], [xTMP14], #4
    st1 {vTMP1.S}[3], [xTMP15], #4

FINISH:
    SNOW3G_STORE_CTX_8_SVE256 x0
    mov xTMP17, x1
    mov xTMP18, x2
    stp xTMP16, xTMP1, [xTMP17], #16
    stp xTMP2, xTMP3, [xTMP17], #16
    stp xTMP4, xTMP5, [xTMP17], #16
    stp xTMP6, xTMP7, [xTMP17]
    stp xTMP8, xTMP9, [xTMP18], #16
    stp xTMP10, xTMP11, [xTMP18], #16
    stp xTMP12, xTMP13, [xTMP18], #16
    stp xTMP14, xTMP15, [xTMP18]

    FUNC_VECTOR_RESTORE
    FUNC_SCALAR_RESTORE
    ret
END_FUNC(snow3g_f8_8_buffer_stream_aarch64_sve256_asm)

#else

/*
 * snow3g_f8_8_buffer_stream_aarch64_sve256_asm(void *ctx,
 *                                           void **in,
 *                                           void **out,
 *                                           uint32_t lengthInBytes)
 *
 * NOTE: This implementation uses SVE gather load and scatter store,
 *       but the performance is 10% worse than implementation using
 *       contiguous load and store.
 */
START_FUNC(snow3g_f8_8_buffer_stream_aarch64_sve256_asm)
    cbz x3, FINISH_GS
    FUNC_SCALAR_SAVE
    ptrue PRED8.B, ALL
    ptrue PRED32.S, ALL
    ptrue PRED32_HALF1.S, VL4
    not PRED32_HALF2.B, PRED32/Z, PRED32_HALF1.B
    FUNC_VECTOR_SAVE
    adrp xTMP0, n_inv_aes_shift_row
    add xTMP0, xTMP0, #:lo12:n_inv_aes_shift_row
    ld1b {zINV_SHIFT_ROW.B}, PRED8/Z, [xTMP0]

    SNOW3G_LOAD_CTX_8_SVE256 x0
    ld1d {zTMP10.D}, PRED32/Z, [x1]
    ld1d {zTMP0.D}, PRED32/Z, [x1, #1, MUL VL]
    uzp1 zTMP10.S, zTMP10.S, zTMP0.S
    ld1d {zTMP11.D}, PRED32/Z, [x2]
    ld1d {zTMP0.D}, PRED32/Z, [x2, #1, MUL VL]
    uzp1 zTMP11.S, zTMP11.S, zTMP0.S

    ldr xTMP18, [x1]
    ldr xTMP19, [x2]
    bfm xTMP18, XZR, #0, #31
    bfm xTMP19, XZR, #0, #31
    mov xTMP17, #0

GEN4_LOOP:
    SNOW3G_KEYSTREAM_8_4_SVE256 zTMP8
    revb zTMP8.S, PRED32/M, zTMP8.S

    ld1w {zTMP9.S}, PRED32/Z, [xTMP18, zTMP10.S, UXTW]
    eor zTMP9.D, zTMP9.D, zTMP8.D
    st1w {zTMP9.S}, PRED32, [xTMP19, zTMP11.S, UXTW]
    add xTMP18, xTMP18, #4
    add xTMP19, xTMP19, #4
    add xTMP17, xTMP17, #4
    cmp xTMP17, x3
    b.lt GEN4_LOOP

    SNOW3G_STORE_CTX_8_SVE256 x0

    cpy zTMP9.D, PRED32/M, x3
    ld1d {zTMP10.D}, PRED32/Z, [x1]
    ld1d {zTMP11.D}, PRED32/Z, [x1, #1, MUL VL]
    add zTMP10.D, zTMP10.D, zTMP9.D
    add zTMP11.D, zTMP11.D, zTMP9.D
    st1d {zTMP10.D}, PRED32, [x1]
    st1d {zTMP11.D}, PRED32, [x1, #1, MUL VL]

    ld1d {zTMP10.D}, PRED32/Z, [x2]
    ld1d {zTMP11.D}, PRED32/Z, [x2, #1, MUL VL]
    add zTMP10.D, zTMP10.D, zTMP9.D
    add zTMP11.D, zTMP11.D, zTMP9.D
    st1d {zTMP10.D}, PRED32, [x2]
    st1d {zTMP11.D}, PRED32, [x2, #1, MUL VL]

    FUNC_VECTOR_RESTORE
    FUNC_SCALAR_RESTORE
FINISH_GS:
    ret
END_FUNC(snow3g_f8_8_buffer_stream_aarch64_sve256_asm)

#endif

/* OUT = IN XOR OUT
 * use this macro to generate output when LEN is less than 16
 * use: vTMP0 */
.macro X_BYTE_STREAM IN, OUT, KEY, LEN
    cmp \LEN\(), #8
    b.lt 4f
    ld1 {vTMP0.D}[0], [\IN\()], #8
    eor vTMP0.16B, vTMP0.16B, \KEY\().16B
    st1 {vTMP0.D}[0], [\OUT\()], #8
    mov \KEY\().D[0], \KEY\().D[1]
    sub \LEN\(), \LEN\(), #8
4:
    cmp \LEN\(), #4
    b.lt 2f
    ld1 {vTMP0.S}[0], [\IN\()], #4
    eor vTMP0.16B, vTMP0.16B, \KEY\().16B
    st1 {vTMP0.S}[0], [\OUT\()], #4
    mov \KEY\().S[0], \KEY\().S[1]
    sub \LEN\(), \LEN\(), #4
2:
    cmp \LEN\(), #2
    b.lt 1f
    ld1 {vTMP0.H}[0], [\IN\()], #2
    eor vTMP0.16B, vTMP0.16B, \KEY\().16B
    st1 {vTMP0.H}[0], [\OUT\()], #2
    mov \KEY\().H[0], \KEY\().H[1]
    sub \LEN\(), \LEN\(), #2
1:
    cmp \LEN\(), #1
    b.lt 0f
    ld1 {vTMP0.B}[0], [\IN\()], #1
    eor vTMP0.16B, vTMP0.16B, \KEY\().16B
    st1 {vTMP0.B}[0], [\OUT\()], #1
0:
.endm

.macro GEN_1_TO_8_LANES LANE_NR SUFFIX
    CURR_LEN .req wTMP17
    LEFT_LEN .req wTMP17
    LEN .req wTMP18
    IN1 .req xTMP1
    IN2 .req xTMP2
    IN3 .req xTMP3
    IN4 .req xTMP4
    IN5 .req xTMP5
    IN6 .req xTMP6
    IN7 .req xTMP7
    IN8 .req xTMP8
    OUT1 .req xTMP9
    OUT2 .req xTMP10
    OUT3 .req xTMP11
    OUT4 .req xTMP12
    OUT5 .req xTMP13
    OUT6 .req xTMP14
    OUT7 .req xTMP15
    OUT8 .req xTMP16
    // lanes are sorted by length decrease
    // lane1.len >= lane2.len >= .... lane8.len
    // load length
    ldr CURR_LEN, [x4, #(4*(\LANE_NR\()-1))]
GEN_\LANE_NR\()LANES\SUFFIX\():
    cmp CURR_LEN, LEN
    b.lt FINISH_\LANE_NR\()TH\SUFFIX\()

    // load 16byte x LANE_NR input
    ld1 {vTMP0.4S}, [IN1], #16
.ifge \LANE_NR - 2
    ld1 {vTMP4.4S}, [IN2], #16
.ifge \LANE_NR - 3
    ld1 {vTMP1.4S}, [IN3], #16
.ifge \LANE_NR - 4
    ld1 {vTMP5.4S}, [IN4], #16
.ifge \LANE_NR - 5
    ld1 {vTMP2.4S}, [IN5], #16
.ifge \LANE_NR - 6
    ld1 {vTMP6.4S}, [IN6], #16
.ifge \LANE_NR - 7
    ld1 {vTMP3.4S}, [IN7], #16
.ifge \LANE_NR - 8
    ld1 {vTMP7.4S}, [IN8], #16
.endif
.endif
.endif
.endif
.endif
.endif
.endif

    // merge 16byte x LANE_NR input into at most 4 SVE registers
.rept 2
.ifge \LANE_NR - 2
    insr zTMP4.D, x0
.ifge \LANE_NR - 4
    insr zTMP5.D, x0
.ifge \LANE_NR - 6
    insr zTMP6.D, x0
.ifge \LANE_NR - 8
    insr zTMP7.D, x0
.endif
.endif
.endif
.endif
.endr
.ifge \LANE_NR - 2
    mov zTMP0.S, PRED32_HALF2/M, zTMP4.S
.ifge \LANE_NR - 4
    mov zTMP1.S, PRED32_HALF2/M, zTMP5.S
.ifge \LANE_NR - 6
    mov zTMP2.S, PRED32_HALF2/M, zTMP6.S
.ifge \LANE_NR - 8
    mov zTMP3.S, PRED32_HALF2/M, zTMP7.S
.endif
.endif
.endif
.endif

    // XOR with generated keystream
    eor zTMP0.D, zTMP0.D, zTMP8.D
.ifge \LANE_NR - 3
    eor zTMP1.D, zTMP1.D, zTMP9.D
.ifge \LANE_NR - 5
    eor zTMP2.D, zTMP2.D, zTMP10.D
.ifge \LANE_NR - 7
    eor zTMP3.D, zTMP3.D, zTMP11.D
.endif
.endif
.endif

    // compact SVE register into NEON register for store
.ifge \LANE_NR - 2
    compact zTMP4.S, PRED32_HALF2, zTMP0.S
.ifge \LANE_NR - 4
    compact zTMP5.S, PRED32_HALF2, zTMP1.S
.ifge \LANE_NR - 6
    compact zTMP6.S, PRED32_HALF2, zTMP2.S
.ifge \LANE_NR - 8
    compact zTMP7.S, PRED32_HALF2, zTMP3.S
.endif
.endif
.endif
.endif

    // store to 16byte x LANE_NR output
    st1 {vTMP0.4S}, [OUT1], #16
.ifge \LANE_NR - 2
    st1 {vTMP4.4S}, [OUT2], #16
.ifge \LANE_NR - 3
    st1 {vTMP1.4S}, [OUT3], #16
.ifge \LANE_NR - 4
    st1 {vTMP5.4S}, [OUT4], #16
.ifge \LANE_NR - 5
    st1 {vTMP2.4S}, [OUT5], #16
.ifge \LANE_NR - 6
    st1 {vTMP6.4S}, [OUT6], #16
.ifge \LANE_NR - 7
    st1 {vTMP3.4S}, [OUT7], #16
.ifge \LANE_NR - 8
    st1 {vTMP7.4S}, [OUT8], #16
.endif
.endif
.endif
.endif
.endif
.endif
.endif

    // update number of generated output
    add LEN, LEN, #16

    // generate 16byte x 8lanes keystream
    SNOW3G_KEYSTREAM_8_4_SVE256 zTMP8
    SNOW3G_KEYSTREAM_8_4_SVE256 zTMP9
    SNOW3G_KEYSTREAM_8_4_SVE256 zTMP10
    SNOW3G_KEYSTREAM_8_4_SVE256 zTMP11
    zip1 zTMP0.S, zTMP8.S, zTMP9.S
    zip2 zTMP1.S, zTMP8.S, zTMP9.S
    zip1 zTMP2.S, zTMP10.S, zTMP11.S
    zip2 zTMP3.S, zTMP10.S, zTMP11.S
    zip1 zTMP8.D, zTMP0.D, zTMP2.D
.ifge \LANE_NR - 3
    zip2 zTMP9.D, zTMP0.D, zTMP2.D
.ifge \LANE_NR - 5
    zip1 zTMP10.D, zTMP1.D, zTMP3.D
.ifge \LANE_NR - 7
    zip2 zTMP11.D, zTMP1.D, zTMP3.D
.endif
.endif
.endif

    revb zTMP8.S, PRED32/M, zTMP8.S
.ifge \LANE_NR - 3
    revb zTMP9.S, PRED32/M, zTMP9.S
.ifge \LANE_NR - 5
    revb zTMP10.S, PRED32/M, zTMP10.S
.ifge \LANE_NR - 7
    revb zTMP11.S, PRED32/M, zTMP11.S
.endif
.endif
.endif
    b GEN_\LANE_NR\()LANES\SUFFIX\()

FINISH_\LANE_NR\()TH\SUFFIX\():
    add CURR_LEN, CURR_LEN, 16
    sub LEFT_LEN, CURR_LEN, LEN
.if \LANE_NR == 8
    compact zTMP1.S, PRED32_HALF2, zTMP11.S
    X_BYTE_STREAM IN8, OUT8, vTMP1, LEFT_LEN
.endif
.if \LANE_NR == 7
    X_BYTE_STREAM IN7, OUT7, vTMP11, LEFT_LEN
.endif
.if \LANE_NR == 6
    compact zTMP1.S, PRED32_HALF2, zTMP10.S
    X_BYTE_STREAM IN6, OUT6, vTMP1, LEFT_LEN
.endif
.if \LANE_NR == 5
    X_BYTE_STREAM IN5, OUT5, vTMP10, LEFT_LEN
.endif
.if \LANE_NR == 4
    compact zTMP1.S, PRED32_HALF2, zTMP9.S
    X_BYTE_STREAM IN4, OUT4, vTMP1, LEFT_LEN
.endif
.if \LANE_NR == 3
    X_BYTE_STREAM IN3, OUT3, vTMP9, LEFT_LEN
.endif
.if \LANE_NR == 2
    compact zTMP1.S, PRED32_HALF2, zTMP8.S
    X_BYTE_STREAM IN2, OUT2, vTMP1, LEFT_LEN
.endif
.if \LANE_NR == 1
    X_BYTE_STREAM IN1, OUT1, vTMP8, LEFT_LEN
.endif
.endm

/*
 * snow3g_f8_8_buffer_aarch64_sve256_asm(void *key,
 *                                    void **iv,
 *                                    void **in,
 *                                    void **out,
 *                                    uint32_t lengthInBytes[])
 *
 */
START_FUNC(snow3g_f8_8_buffer_aarch64_sve256_asm)
    FUNC_SCALAR_SAVE
    ptrue PRED8.B, ALL
    ptrue PRED32.S, ALL
    ptrue PRED32_HALF1.S, VL4
    not PRED32_HALF2.B, PRED32/Z, PRED32_HALF1.B
    FUNC_VECTOR_SAVE

    adrp xTMP0, n_inv_aes_shift_row
    add xTMP0, xTMP0, #:lo12:n_inv_aes_shift_row
    ld1b {zINV_SHIFT_ROW.B}, PRED8/Z, [xTMP0]
    // key
    mov xTMP17, x0
    // iv
    mov xTMP18, x1
    ldp xTMP8, xTMP9, [xTMP18], #16
    ldp xTMP10, xTMP11, [xTMP18], #16
    ldp xTMP12, xTMP13, [xTMP18], #16
    ldp xTMP14, xTMP15, [xTMP18]

    SNOW3G_INITIALIZE_8_SVE256_FIRST xTMP17 xTMP17 xTMP17 xTMP17 xTMP17 xTMP17 xTMP17 xTMP17\
                                  xTMP8 xTMP9 xTMP10 xTMP11 xTMP12 xTMP13 xTMP14 xTMP15
    SNOW3G_INITIALIZE_8_SVE256_SECOND

    mov xTMP17, x2
    mov xTMP18, x3

    // in
    ldp xTMP1, xTMP2, [xTMP17], #16
    ldp xTMP3, xTMP4, [xTMP17], #16
    ldp xTMP5, xTMP6, [xTMP17], #16
    ldp xTMP7, xTMP8, [xTMP17]
    // out
    ldp xTMP9, xTMP10, [xTMP18], #16
    ldp xTMP11, xTMP12, [xTMP18], #16
    ldp xTMP13, xTMP14, [xTMP18], #16
    ldp xTMP15, xTMP16, [xTMP18]

    mov wTMP18, #16

    SNOW3G_KEYSTREAM_8_4_SVE256 zTMP8
    SNOW3G_KEYSTREAM_8_4_SVE256 zTMP9
    SNOW3G_KEYSTREAM_8_4_SVE256 zTMP10
    SNOW3G_KEYSTREAM_8_4_SVE256 zTMP11
    zip1 zTMP0.S, zTMP8.S, zTMP9.S
    zip2 zTMP1.S, zTMP8.S, zTMP9.S
    zip1 zTMP2.S, zTMP10.S, zTMP11.S
    zip2 zTMP3.S, zTMP10.S, zTMP11.S
    zip1 zTMP8.D, zTMP0.D, zTMP2.D
    zip2 zTMP9.D, zTMP0.D, zTMP2.D
    zip1 zTMP10.D, zTMP1.D, zTMP3.D
    zip2 zTMP11.D, zTMP1.D, zTMP3.D
    revb zTMP8.S, PRED32/M, zTMP8.S
    revb zTMP9.S, PRED32/M, zTMP9.S
    revb zTMP10.S, PRED32/M, zTMP10.S
    revb zTMP11.S, PRED32/M, zTMP11.S

    GEN_1_TO_8_LANES 8 _SINGLE_KEY
    GEN_1_TO_8_LANES 7 _SINGLE_KEY
    GEN_1_TO_8_LANES 6 _SINGLE_KEY
    GEN_1_TO_8_LANES 5 _SINGLE_KEY
    GEN_1_TO_8_LANES 4 _SINGLE_KEY
    GEN_1_TO_8_LANES 3 _SINGLE_KEY
    GEN_1_TO_8_LANES 2 _SINGLE_KEY
    GEN_1_TO_8_LANES 1 _SINGLE_KEY

    FUNC_VECTOR_RESTORE
    FUNC_SCALAR_RESTORE
    ret
END_FUNC(snow3g_f8_8_buffer_aarch64_sve256_asm)

/*
 * snow3g_f8_8_buffer_multikey_aarch64_sve256_asm(void **key,
 *                                             void **iv,
 *                                             void **in,
 *                                             void **out,
 *                                             uint32_t lengthInBytes[])
 *
 */
START_FUNC(snow3g_f8_8_buffer_multikey_aarch64_sve256_asm)
    FUNC_SCALAR_SAVE
    ptrue PRED8.B, ALL
    ptrue PRED32.S, ALL
    ptrue PRED32_HALF1.S, VL4
    not PRED32_HALF2.B, PRED32/Z, PRED32_HALF1.B
    FUNC_VECTOR_SAVE

    adrp xTMP0, n_inv_aes_shift_row
    add xTMP0, xTMP0, #:lo12:n_inv_aes_shift_row
    ld1b {zINV_SHIFT_ROW.B}, PRED8/Z, [xTMP0]
    // key
    mov xTMP17, x0
    // iv
    mov xTMP18, x1

    // key
    ldp xTMP0, xTMP1, [xTMP17], #16
    ldp xTMP2, xTMP3, [xTMP17], #16
    ldp xTMP4, xTMP5, [xTMP17], #16
    ldp xTMP6, xTMP7, [xTMP17]
    // iv
    ldp xTMP8, xTMP9, [xTMP18], #16
    ldp xTMP10, xTMP11, [xTMP18], #16
    ldp xTMP12, xTMP13, [xTMP18], #16
    ldp xTMP14, xTMP15, [xTMP18]

    SNOW3G_INITIALIZE_8_SVE256_FIRST xTMP0 xTMP1 xTMP2 xTMP3 xTMP4 xTMP5 xTMP6 xTMP7\
                                  xTMP8 xTMP9 xTMP10 xTMP11 xTMP12 xTMP13 xTMP14 xTMP15
    SNOW3G_INITIALIZE_8_SVE256_SECOND

    mov xTMP17, x2
    mov xTMP18, x3

    // in
    ldp xTMP1, xTMP2, [xTMP17], #16
    ldp xTMP3, xTMP4, [xTMP17], #16
    ldp xTMP5, xTMP6, [xTMP17], #16
    ldp xTMP7, xTMP8, [xTMP17]
    // out
    ldp xTMP9, xTMP10, [xTMP18], #16
    ldp xTMP11, xTMP12, [xTMP18], #16
    ldp xTMP13, xTMP14, [xTMP18], #16
    ldp xTMP15, xTMP16, [xTMP18]

    mov wTMP18, #16

    SNOW3G_KEYSTREAM_8_4_SVE256 zTMP8
    SNOW3G_KEYSTREAM_8_4_SVE256 zTMP9
    SNOW3G_KEYSTREAM_8_4_SVE256 zTMP10
    SNOW3G_KEYSTREAM_8_4_SVE256 zTMP11
    zip1 zTMP0.S, zTMP8.S, zTMP9.S
    zip2 zTMP1.S, zTMP8.S, zTMP9.S
    zip1 zTMP2.S, zTMP10.S, zTMP11.S
    zip2 zTMP3.S, zTMP10.S, zTMP11.S
    zip1 zTMP8.D, zTMP0.D, zTMP2.D
    zip2 zTMP9.D, zTMP0.D, zTMP2.D
    zip1 zTMP10.D, zTMP1.D, zTMP3.D
    zip2 zTMP11.D, zTMP1.D, zTMP3.D
    revb zTMP8.S, PRED32/M, zTMP8.S
    revb zTMP9.S, PRED32/M, zTMP9.S
    revb zTMP10.S, PRED32/M, zTMP10.S
    revb zTMP11.S, PRED32/M, zTMP11.S

    GEN_1_TO_8_LANES 8 _MULTI_KEY
    GEN_1_TO_8_LANES 7 _MULTI_KEY
    GEN_1_TO_8_LANES 6 _MULTI_KEY
    GEN_1_TO_8_LANES 5 _MULTI_KEY
    GEN_1_TO_8_LANES 4 _MULTI_KEY
    GEN_1_TO_8_LANES 3 _MULTI_KEY
    GEN_1_TO_8_LANES 2 _MULTI_KEY
    GEN_1_TO_8_LANES 1 _MULTI_KEY

    FUNC_VECTOR_RESTORE
    FUNC_SCALAR_RESTORE
    ret
END_FUNC(snow3g_f8_8_buffer_multikey_aarch64_sve256_asm)

/*
 * snow3g_f9_8_buffer_keystream_aarch64_sve256_asm(void *pCtx,
 *                                              uint32_t* ks)
 *
 */
START_FUNC(snow3g_f9_8_buffer_keystream_aarch64_sve256_asm)
    FUNC_SCALAR_SAVE
    ptrue PRED8.B, ALL
    ptrue PRED32.S, ALL
    ptrue PRED32_HALF1.S, VL4
    not PRED32_HALF2.B, PRED32/Z, PRED32_HALF1.B
    FUNC_VECTOR_SAVE
    adrp xTMP0, n_inv_aes_shift_row
    add xTMP0, xTMP0, #:lo12:n_inv_aes_shift_row
    ld1b {zINV_SHIFT_ROW.B}, PRED8/Z, [xTMP0]

    mov xTMP8, x1
    add xTMP9, xTMP8, #20
    add xTMP10, xTMP9, #20
    add xTMP11, xTMP10, #20
    add xTMP12, xTMP11, #20
    add xTMP13, xTMP12, #20
    add xTMP14, xTMP13, #20
    add xTMP15, xTMP14, #20

    SNOW3G_LOAD_CTX_8_SVE256 x0

    SNOW3G_KEYSTREAM_8_4_SVE256 zTMP8
    SNOW3G_KEYSTREAM_8_4_SVE256 zTMP9
    SNOW3G_KEYSTREAM_8_4_SVE256 zTMP10
    SNOW3G_KEYSTREAM_8_4_SVE256 zTMP11
    zip1 zTMP0.S, zTMP8.S, zTMP9.S
    zip2 zTMP1.S, zTMP8.S, zTMP9.S
    zip1 zTMP2.S, zTMP10.S, zTMP11.S
    zip2 zTMP3.S, zTMP10.S, zTMP11.S
    zip1 zTMP8.D, zTMP0.D, zTMP2.D
    zip2 zTMP9.D, zTMP0.D, zTMP2.D
    zip1 zTMP10.D, zTMP1.D, zTMP3.D
    zip2 zTMP11.D, zTMP1.D, zTMP3.D
    compact zTMP4.S, PRED32_HALF2, zTMP8.S
    compact zTMP5.S, PRED32_HALF2, zTMP9.S
    compact zTMP6.S, PRED32_HALF2, zTMP10.S
    compact zTMP7.S, PRED32_HALF2, zTMP11.S

    st1 {vTMP8.4S}, [xTMP8], #16
    st1 {vTMP4.4S}, [xTMP9], #16
    st1 {vTMP9.4S}, [xTMP10], #16
    st1 {vTMP5.4S}, [xTMP11], #16
    st1 {vTMP10.4S}, [xTMP12], #16
    st1 {vTMP6.4S}, [xTMP13], #16
    st1 {vTMP11.4S}, [xTMP14], #16
    st1 {vTMP7.4S}, [xTMP15], #16

    SNOW3G_KEYSTREAM_8_4_SVE256 zTMP8
    compact zTMP4.S, PRED32_HALF2, zTMP8.S

    st1 {vTMP8.S}[0], [xTMP8], #4
    st1 {vTMP8.S}[1], [xTMP9], #4
    st1 {vTMP8.S}[2], [xTMP10], #4
    st1 {vTMP8.S}[3], [xTMP11], #4
    st1 {vTMP4.S}[0], [xTMP12], #4
    st1 {vTMP4.S}[1], [xTMP13], #4
    st1 {vTMP4.S}[2], [xTMP14], #4
    st1 {vTMP4.S}[3], [xTMP15], #4

    FUNC_VECTOR_RESTORE
    FUNC_SCALAR_RESTORE
    ret
END_FUNC(snow3g_f9_8_buffer_keystream_aarch64_sve256_asm)