/*******************************************************************************
 Copyright (c) 2021-2023 Arm  Corporation All rights reserved.

 Redistribution and use in source and binary forms, with or without
 modification, are permitted provided that the following conditions are met:

   * Redistributions of source code must retain the above copyright notice,
     this list of conditions and the following disclaimer.
   * Redistributions in binary form must reproduce the above copyright
     notice, this list of conditions and the following disclaimer in the
     documentation and/or other materials provided with the distribution.
   * Neither the name of Intel Corporation nor the names of its contributors
     may be used to endorse or promote products derived from this software
     without specific prior written permission.

 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
 DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
 FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
 DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
 SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
 CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
 OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*******************************************************************************/

#include "zuc_sbox.inc"
#include "zuc_common.inc"

#ifndef	ZUC_CIPHER_4
#define	ZUC_CIPHER_4		asm_ZucCipher_4_aarch64
#define	ZUC128_INIT_4		asm_ZucInitialization_4_aarch64
#define	ZUC256_INIT			asm_Zuc256Initialization_aarch64
#define	ZUC256_INIT_4		asm_Zuc256Initialization_4_aarch64
#define	ZUC_KEYGEN16B_4		asm_ZucGenKeystream16B_4_aarch64
#define	ZUC_KEYGEN8B_4		asm_ZucGenKeystream8B_4_aarch64
#define	ZUC_KEYGEN4B_4		asm_ZucGenKeystream4B_4_aarch64
#define	ZUC_EIA3ROUND16B	asm_Eia3Round16B_aarch64
#define	ZUC_EIA3REMAINDER	asm_Eia3Remainder_aarch64
#define	ZUC_XORKEYSTREAM16B	asm_XorKeyStream16B_aarch64
#define	ZUC128_AUTH_4		asm_ZucAuth_4_aarch64
#define	ZUC256_AUTH_4		asm_Zuc256Auth_4_aarch64
#endif

#define	IMB_FEATURE_PMULL	(1ULL << 34)

.arch armv8-a+crypto

.section .data

// Constants to be used to initialize the LFSR registers
// This table contains four different sets of constants:
// 0-63 bytes: Encryption
// 64-127 bytes: Authentication with tag size = 4
// 128-191 bytes: Authentication with tag size = 8
// 192-255 bytes: Authentication with tag size = 16
.align	8
.type	EK256_d64, %object
EK256_d64:
.word	0x00220000, 0x002F0000, 0x00240000, 0x002A0000
.word	0x006D0000, 0x00400000, 0x00400000, 0x00400000
.word	0x00400000, 0x00400000, 0x00400000, 0x00400000
.word	0x00400000, 0x00520000, 0x00100000, 0x00300000
.word	0x00220000, 0x002F0000, 0x00250000, 0x002A0000
.word	0x006D0000, 0x00400000, 0x00400000, 0x00400000
.word	0x00400000, 0x00400000, 0x00400000, 0x00400000
.word	0x00400000, 0x00520000, 0x00100000, 0x00300000
.word	0x00230000, 0x002F0000, 0x00240000, 0x002A0000
.word	0x006D0000, 0x00400000, 0x00400000, 0x00400000
.word	0x00400000, 0x00400000, 0x00400000, 0x00400000
.word	0x00400000, 0x00520000, 0x00100000, 0x00300000
.word	0x00230000, 0x002F0000, 0x00250000, 0x002A0000
.word	0x006D0000, 0x00400000, 0x00400000, 0x00400000
.word	0x00400000, 0x00400000, 0x00400000, 0x00400000
.word	0x00400000, 0x00520000, 0x00100000, 0x00300000
.size	EK256_d64,.-EK256_d64

.align	6
.type	Ek_d, %object
Ek_d:
.word	0x0044D700, 0x0026BC00, 0x00626B00, 0x00135E00
.word	0x00578900, 0x0035E200, 0x00713500, 0x0009AF00
.word	0x004D7800, 0x002F1300, 0x006BC400, 0x001AF100
.word	0x005E2600, 0x003C4D00, 0x00789A00, 0x0047AC00
.size	Ek_d,.-Ek_d

.align	6
.type	shuf_mask_key, %object
shuf_mask_key:
.word	0x00FFFFFF, 0x01FFFFFF, 0x02FFFFFF, 0x03FFFFFF
.word	0x04FFFFFF, 0x05FFFFFF, 0x06FFFFFF, 0x07FFFFFF
.word	0x08FFFFFF, 0x09FFFFFF, 0x0AFFFFFF, 0x0BFFFFFF
.word	0x0CFFFFFF, 0x0DFFFFFF, 0x0EFFFFFF, 0x0FFFFFFF
.size	shuf_mask_key,.-shuf_mask_key

.align	6
.type	shuf_mask_iv, %object
shuf_mask_iv:
.word	0xFFFFFF00, 0xFFFFFF01, 0xFFFFFF02, 0xFFFFFF03
.word	0xFFFFFF04, 0xFFFFFF05, 0xFFFFFF06, 0xFFFFFF07
.word	0xFFFFFF08, 0xFFFFFF09, 0xFFFFFF0A, 0xFFFFFF0B
.word	0xFFFFFF0C, 0xFFFFFF0D, 0xFFFFFF0E, 0xFFFFFF0F
.size	shuf_mask_iv,.-shuf_mask_iv

.align	4
.type	KS_reorder, %object
KS_reorder:
.quad	0x0302010007060504, 0x070605040b0a0908
.size	KS_reorder,.-KS_reorder

.text
#define	OFS_R1	(16*16)
#define	OFS_R2	(OFS_R1 + 16)
#define	OFS_X0	(OFS_R2 + 16)
#define	OFS_X1	(OFS_X0 + 16)
#define	OFS_X2	(OFS_X1 + 16)

.altmacro

declare_register	xTMP,	x23
/* v0-v15 are assigned to LFSR0-15, should not be reused in CIPHERNx4B_4 */
/* v24-v26 are assigned to BRCX0-2, could be reused in CIPHERNx4B_4 */
declare_register	vBRCX0,	v24
declare_register	vBRCX1,	v25
declare_register	vBRCX2,	v26
/* v27-v28 are assigned to FR1-2, should not be reused in CIPHERNx4B_4 */
declare_register	vFR1,	v27
declare_register	vFR2,	v28
declare_register	qFR1,	q27
declare_register	qFR2,	q28

.macro	FUNC_SAVE
	stp	x29, x30, [sp, -160]!
	stp	d8, d9, [sp, 16]
	stp	d10, d11, [sp, 32]
	stp	d12, d13, [sp, 48]
	stp	d14, d15, [sp, 64]
	stp	x19, x20, [sp, 80]
	stp	x21, x22, [sp, 96]
	stp	x23, x24, [sp, 112]
	stp	x25, x26, [sp, 128]
	stp	x27, x28, [sp, 144]
.endm

.macro	FUNC_RESTORE
	ldp	d8, d9,[sp, 16]
	ldp	d10, d11, [sp, 32]
	ldp	d12, d13, [sp, 48]
	ldp	d14, d15, [sp, 64]
	ldp	x19, x20, [sp, 80]
	ldp	x21, x22, [sp, 96]
	ldp	x23, x24, [sp, 112]
	ldp	x25, x26, [sp, 128]
	ldp	x27, x28, [sp, 144]
	ldp	x29, x30, [sp],160
.endm

//
// Initialize LFSR registers for a single lane, for ZUC-128
//
// This macro initializes 4 LFSR registers at a time.
// so it needs to be called four times.
//
// From spec, s_i (LFSR) registers need to be loaded as follows:
//
// For 0 <= i <= 15, let s_i= k_i || d_i || iv_i.
// Where k_i is each byte of the key, d_i is a 15-bit constant
// and iv_i is each byte of the IV.
//
.macro	INIT_LFSR_128 KEY, IV, SHUF_KEY, SHUF_IV, EKD_MASK, LFSR, XTMP
	tbl		v\LFSR\().16b, {v\KEY\().16b}, \SHUF_KEY\().16b
	ushr	v\LFSR\().4s, v\LFSR\().4s, #1
	tbl		\XTMP\().16b, {v\IV\().16b}, \SHUF_IV\().16b
	eor		v\LFSR\().16b, v\LFSR\().16b, \XTMP\().16b
	eor		v\LFSR\().16b, v\LFSR\().16b, \EKD_MASK\().16b
.endm

.macro	rot_mod32 vOUT, vIN, ROTATE, vTMP
	ushr	\vOUT\().4s, \vIN\().4s, 32-\ROTATE
	sli		\vOUT\().4s, \vIN\().4s, \ROTATE
.endm

.macro	TRANSPOSE4_U32 V_0, V_1, V_2, V_3, T_0, T_1, T_2, T_3
	zip1	v\T_0\().4s, v\V_0\().4s, v\V_1\().4s    // T_0 = {b1 a1 b0 a0}
	zip2	v\T_1\().4s, v\V_0\().4s, v\V_1\().4s    // T_1 = {b3 a3 b2 a2}
	zip1	v\T_2\().4s, v\V_2\().4s, v\V_3\().4s    // T_2 = {d1 c1 d0 c0}
	zip2	v\T_3\().4s, v\V_2\().4s, v\V_3\().4s    // T_3 = {d3 c3 d2 c2}

	zip1	v\V_0\().2d, v\T_0\().2d, v\T_2\().2d    // V_0 = {d0 c0 b0 a0}
	zip2	v\V_1\().2d, v\T_0\().2d, v\T_2\().2d    // V_1 = {d1 c1 b1 a1}
	zip1	v\V_2\().2d, v\T_1\().2d, v\T_3\().2d    // V_2 = {d2 c2 b2 a2}
	zip2	v\V_3\().2d, v\T_1\().2d, v\T_3\().2d    // V_3 = {d3 c3 b3 a3}
.endm

.macro	USHR_4S vd, n, rot
	ushr	\vd\().4s, v\n\().4s, #\rot\()
.endm

.macro	STR_Q i, addrreg, offset
	str		q\i\(), [\addrreg\(), #\offset\()]
.endm

.macro	LDR_Q i, addrreg, offset
	ldr		q\i\(), [\addrreg\(), #\offset\()]
.endm

.macro	TRN1_8H vd, n, m
	trn1	\vd\().8h, v\n\().8h, v\m\().8h
.endm

.macro	bits_reorg4 ROUND_NUM, OUTPUT_X3=0, X3
	USHR_4S	v20, %((15 + \ROUND_NUM) % 16), 15
	USHR_4S	v21, %((9 + \ROUND_NUM) % 16), 15
	USHR_4S	v22, %((5 + \ROUND_NUM) % 16), 15
.if \OUTPUT_X3 == 1
	USHR_4S	v29, %((0 + \ROUND_NUM) % 16), 15
.endif

	TRN1_8H	vBRCX0, %((14 + \ROUND_NUM) % 16), 20
	TRN1_8H	vBRCX1, 21, %((11 + \ROUND_NUM) % 16)
	TRN1_8H	vBRCX2, 22, %((7 + \ROUND_NUM) % 16)
.if \OUTPUT_X3 == 1
	TRN1_8H	v\X3\(), 29, %((2 + \ROUND_NUM) % 16)     // BRC_X3
.endif
.endm

.macro	nonlin_fun4 OUTPUT_W=0, V_W
.if \OUTPUT_W == 1
	eor		\V_W\().16b, vBRCX0.16b, vFR1.16b
	add		\V_W\().4s, \V_W\().4s, vFR2.4s    // W = (BRC_X0 ^ F_R1) + F_R2
.endif
	add		v23.4s, vFR1.4s, vBRCX1.4s         // W1 = F_R1 + BRC_X1
	eor		v24.16b, vFR2.16b, vBRCX2.16b      // W2 = F_R2 ^ BRC_X2

	ushr	v25.4s, v23.4s, #16
	ushr	v26.4s, v24.4s, #16
	trn1	v21.8h, v25.8h, v24.8h             // W1L || W2H
	trn1	v20.8h, v26.8h, v23.8h             // W2L || W1H

	ushr	v26.4s, v21.4s, 32-8
	ushr	v23.4s, v20.4s, 32-10
	ushr	v27.4s, v21.4s, 32-14
	ushr	v22.4s, v20.4s, 32-2
	ushr	v28.4s, v21.4s, 32-22
	ushr	v24.4s, v20.4s, 32-18
	ushr	v31.4s, v21.4s, 32-30
	ushr	v25.4s, v20.4s, 32-24
	sli		v26.4s, v21.4s, 8
	sli		v23.4s, v20.4s, 10
	sli		v27.4s, v21.4s, 14
	sli		v22.4s, v20.4s, 2
	sli		v28.4s, v21.4s, 22
	sli		v24.4s, v20.4s, 18
	sli		v31.4s, v21.4s, 30
	sli		v25.4s, v20.4s, 24
	eor		v26.16b, v26.16b, v27.16b
	eor		v22.16b, v22.16b, v23.16b
	eor		v28.16b, v28.16b, v31.16b
	eor		v24.16b, v24.16b, v25.16b
	eor		v21.16b, v21.16b, v26.16b
	eor		v20.16b, v20.16b, v22.16b
	eor		v21.16b, v21.16b, v28.16b          // v21 = V = L2(Q)
	eor		v20.16b, v20.16b, v24.16b          // v20 = U = L1(P)

	// shuffle U and V to have all S0 lookups in v20 and all S1 lookups in v21
	// Compress all S0 and S1 input values in each register
	trn1	v23.16b, v21.16b, v20.16b
	trn2	v22.16b, v21.16b, v20.16b

	// Compute S0 and S1 values
	S0_compute_NEON	v22, v20, v21
	S1_compute_NEON	v23, v20, v21, v31
	// Need to shuffle back v20 & v21 before storing output
	// (revert what was done before S0 and S1 computations)
	trn1	vFR2.16b, v23.16b, v22.16b
	trn2	vFR1.16b, v23.16b, v22.16b
.endm

// add_mod31()
//     add two 32-bit args and reduce mod (2^31-1)
.macro	add_mod31 d, n, TMP
	add		v\d\().4s, v\d\().4s, v\n\().4s
	ushr	v\TMP\().4s, v\d\().4s, #31
	bic		v\d\().4s, #0x80, LSL #24
	add		v\d\().4s, v\d\().4s, v\TMP\().4s
.endm

// rot_mod31()
//     rotate (mult by pow of 2) 32-bit arg and reduce mod (2^31-1)
.macro	rot_mod31 ARG, DST, BITS
	ushr	v\DST\().4s, v\ARG\().4s, (31 - \BITS)
	sli		v\DST\().4s, v\ARG\().4s, \BITS
	bic		v\DST\().4s, #0x80, LSL #24
.endm

// TMP vreg: v20-v26, v31
.macro	lfsr_updt4 ROUND_NUM, W_ADD=0, V_W
	// Calculate LFSR feedback
	// s0 = w>>1 + 2^15*s15 + 2^17*s13 + 2^21*s10 + 2^20*s4 + (1+2^8)*s0 mod (2^31-1);

	rot_mod31	%((0 + \ROUND_NUM) % 16), 20, 8
	rot_mod31	%((4 + \ROUND_NUM) % 16), 21, 20
	rot_mod31	%((10 + \ROUND_NUM) % 16), 22, 21
	rot_mod31	%((13 + \ROUND_NUM) % 16), 23, 17
	rot_mod31	%((15 + \ROUND_NUM) % 16), 24, 15
	add_mod31	20, 21, 25
	add_mod31	22, 23, 26
	add_mod31	%((0 + \ROUND_NUM) % 16), 24, 31
	add_mod31	20, 22, 25
	add_mod31	%((0 + \ROUND_NUM) % 16), 20, 31

.if \W_ADD == 1
	add_mod31	%((0 + \ROUND_NUM) % 16), \V_W, 26
.endif
.endm

.macro	load_key_iv i, j, pKe, pIv, off
	ldr		x8, [pKe, \off]
	ldr		q\i, [x8]
	ldr		q\j, [pIv, \off*4]
.endm

.macro	str_vi i, pState, off
	str		q\i, [\pState, 4*\off + 16*\i]
.endm

//
// Initialize LFSR registers for a single lane, for ZUC-256
// [in] Key pointer
// [in] IV pointer
// [out] v register to contain initialized LFSR regs 0-3
// [out] v register to contain initialized LFSR regs 4-7
// [out] v register to contain initialized LFSR regs 8-11
// [out] v register to contain initialized LFSR regs 12-15
// [clobbered] vKEY1, used to load key0 - key15
// [clobbered] vKey2, used to load key16 -  key31
// [clobbered] vTMP temporary register
// [clobbered] xTP temporary register
// [clobbered] wTP temporary register
// [in] CONSTANTS Address to constants
//
.macro	INIT_LFSR_256 KEY, IV, LFSR0_3, LFSR4_7, LFSR8_11, LFSR12_15, \
		vKEY1, vKEY2, vTMP, xTP, wTP, CONSTANTS
	ld1		{\vKEY1\().16b, \vKEY2\().16b}, [\KEY]

	// s0 - s3
	eor		\LFSR0_3\().16b, \LFSR0_3\().16b, \LFSR0_3\().16b
	ins		\LFSR0_3\().B[3], \vKEY1\().B[0]                    // s0
	ins		\LFSR0_3\().B[7], \vKEY1\().B[1]                    // s1
	ins		\LFSR0_3\().B[11], \vKEY1\().B[2]                   // s2
	ins		\LFSR0_3\().B[15], \vKEY1\().B[3]                   // s3

	ushr	\LFSR0_3\().4s, \LFSR0_3\().4s, #1

	ld1		{\vTMP\().16b}, [\CONSTANTS], #16
	orr		\LFSR0_3\().16b, \LFSR0_3\().16b, \vTMP\().16b      // s0 - s3

	ins		\LFSR0_3\().B[1], \vKEY2\().B[5]                    // s0   k21
	ins		\LFSR0_3\().B[0], \vKEY2\().B[0]                    // s0   k16

	ins		\LFSR0_3\().B[5], \vKEY2\().B[6]                    // s1   k22
	ins		\LFSR0_3\().B[4], \vKEY2\().B[1]                    // s1   k17

	ins		\LFSR0_3\().B[9], \vKEY2\().B[7]                    // s2   k23
	ins		\LFSR0_3\().B[8], \vKEY2\().B[2]                    // s2   k18

	ins		\LFSR0_3\().B[13], \vKEY2\().B[8]                   // s3   k24
	ins		\LFSR0_3\().B[12], \vKEY2\().B[3]                   // s3   k19

	// s4 - s7
	mov		xTP, IV                                             // xTP = IV + 0
	eor		\LFSR4_7\().16b, \LFSR4_7\().16b, \LFSR4_7\().16b
	ins		\LFSR4_7\().B[3], \vKEY1\().B[4]                    // s4
	ld1		{\LFSR4_7\().B}[7], [xTP]                           // s5
	add		xTP, xTP, #1                                        // xTP = IV + 1
	ld1		{\LFSR4_7\().B}[11], [xTP]                          // s6
	add		xTP, xTP, #9                                        // xTP = IV + 10
	ld1		{\LFSR4_7\().B}[15], [xTP]                          // s7
	add		xTP, xTP, #-8                                       // xTP = IV + 2

	ushr	\LFSR4_7\().4s, \LFSR4_7\().4s, #1

	ins		\LFSR4_7\().B[1],\vKEY2\().B[9]                     // s4    k25
	ins		\LFSR4_7\().B[0],\vKEY2\().B[4]                     // s4    k20

	ins		\LFSR4_7\().B[5],\vKEY1\().B[5]                     // s5    k5
	ins		\LFSR4_7\().B[4],\vKEY2\().B[10]                    // s5    k26

	ins		\LFSR4_7\().B[9],\vKEY1\().B[6]                     // s6    k6
	ins		\LFSR4_7\().B[8],\vKEY2\().B[11]                    // s6    k27

	ins		\LFSR4_7\().B[13],\vKEY1\().B[7]                    // s7    k7
	ld1		{\LFSR4_7\().B}[12], [xTP]                          // s7
	add		xTP, xTP, #15                                       // xTP = IV + 17

	ld1		{\vTMP\().16b}, [\CONSTANTS], #16
	orr		\LFSR4_7\().16b, \LFSR4_7\().16b, \vTMP\().16b      // s4 - s7

	eor		\vTMP\().16b, \vTMP\().16b, \vTMP\().16b
	ld1		{\vTMP\().B}[6], [xTP]
	add		xTP, xTP, #1                                        // xTP = IV + 18
	ld1		{\vTMP\().B}[10], [xTP]
	add		xTP, xTP, #1                                        // xTP = IV + 19
	ld1		{\vTMP\().B}[14], [xTP]
	add	xTP, xTP, #-14                                          // xTP = IV + 5
	// LFSR8_11 = 0x003f0000 0x003f0000 0x003f0000 0x003f0000
	movi	\LFSR8_11\().4s, 0x3f, lsl 16
	and		\vTMP\().16b, \vTMP\().16b, \LFSR8_11\().16b

	orr		\LFSR4_7\().16b, \LFSR4_7\().16b, \vTMP\().16b

	// s8 - s11
	eor		\LFSR8_11\().16b, \LFSR8_11\().16b, \LFSR8_11\().16b
	ins		\LFSR8_11\().b[3], \vKEY1\().b[8]                   // s8
	ins		\LFSR8_11\().b[7], \vKEY1\().b[9]                   // s9
	ld1		{\LFSR8_11\().b}[11], [xTP]                         // s10
	add		xTP, xTP, #-2                                       // xTP = IV + 3
	ins		\LFSR8_11\().b[15], \vKEY1\().b[11]                 // s11

	ushr	\LFSR8_11\().4s, \LFSR8_11\().4s, #1

	ld1		{\LFSR8_11\().b}[1], [xTP]                          // s8
	add		xTP, xTP, #8                                        // xTP = IV + 11
	ld1		{\LFSR8_11\().b}[0], [xTP]                          // s8
	add		xTP, xTP, #1                                        // xTP = IV + 12

	ld1		{\LFSR8_11\().b}[5], [xTP]                          // s9
	add		xTP, xTP, #-8                                       // xTP = IV + 4
	ld1		{\LFSR8_11\().b}[4], [xTP]                          // s9
	add		xTP, xTP, #2                                        // xTP = IV + 6

	ins		\LFSR8_11\().b[9], \vKEY1\().b[10]                  // s10     k10
	ins		\LFSR8_11\().b[8], \vKEY2\().b[12]                  // s10     k28

	ld1		{\LFSR8_11\().b}[13], [xTP]                         // s11
	add		xTP, xTP, #7                                        // xTP = IV + 13
	ld1		{\LFSR8_11\().b}[12], [xTP]                         // s11
	add		xTP, xTP, #7                                        // xTP = IV + 20

	ld1		{\vTMP\().16b}, [\CONSTANTS], #16
	orr		\LFSR8_11\().16b, \LFSR8_11\().16b, \vTMP\().16b    // s8 - s11

	eor		\vTMP\().16b, \vTMP\().16b, \vTMP\().16b
	ld1		{\vTMP\().B}[2], [xTP]
	add		xTP, xTP, #1                                        // xTP = IV + 21
	ld1		{\vTMP\().B}[6], [xTP]
	add		xTP, xTP, #1                                        // xTP = IV + 22
	ld1		{\vTMP\().B}[10], [xTP]
	add		xTP, xTP, #1                                        // xTP = IV + 23
	ld1		{\vTMP\().B}[14], [xTP]
	add		xTP, xTP, #-16                                      // xTP = IV + 7
	// LFSR12_15 = 0x003f0000 0x003f0000 0x003f0000 0x003f0000
	movi	\LFSR12_15\().4s, 0x3f, lsl 16
	and		\vTMP\().16b, \vTMP\().16b, \LFSR12_15\().16b

	orr		\LFSR8_11\().16b, \LFSR8_11\().16b, \vTMP\().16b

	// s12 - s15
	eor		\LFSR12_15\().16b, \LFSR12_15\().16b, \LFSR12_15\().16b
	ins		\LFSR12_15\().b[3], \vKEY1\().b[12]                 // s12
	ins		\LFSR12_15\().b[7], \vKEY1\().b[13]                 // s13
	ins		\LFSR12_15\().b[11], \vKEY1\().b[14]                // s14
	ins		\LFSR12_15\().b[15], \vKEY1\().b[15]                // s15

	ushr	\LFSR12_15\().4s, \LFSR12_15\().4s, #1

	ld1		{\LFSR12_15\().b}[1], [xTP]                         // s12
	add		xTP, xTP, #7                                        // xTP = IV + 14
	ld1		{\LFSR12_15\().b}[0], [xTP]                         // s12
	add		xTP, xTP, #1                                        // xTP = IV + 15

	ld1		{\LFSR12_15\().b}[5], [xTP]                         // s13
	add		xTP, xTP, #-7                                       // xTP = IV + 8
	ld1		{\LFSR12_15\().b}[4], [xTP]                         // s13
	add		xTP, xTP, #8                                        // xTP = IV + 16

	ld1		{\LFSR12_15\().b}[9], [xTP]                         // s14
	add		xTP, xTP, #-7                                       // xTP = IV + 9
	ld1		{\LFSR12_15\().b}[8], [xTP]                         // s14
	add		xTP, xTP, #15                                       // xTP = IV + 24

	ins		\LFSR12_15\().b[13], \vKEY2\().b[14]                // s15    k30
	ins		\LFSR12_15\().b[12], \vKEY2\().b[13]                // s15    k29

	ld1		{\vTMP\().16b}, [\CONSTANTS]
	orr		\LFSR12_15\().16b, \LFSR12_15\().16b, \vTMP\().16b  // s12 - s15

	eor		\vTMP\().16b, \vTMP\().16b, \vTMP\().16b
	ld1		{\vTMP\().b}[2], [xTP]
	// vKEY1(released) = 0x003f0000 0x003f0000 0x003f0000 0x003f0000
	movi	\vKEY1\().4s, 0x3f, lsl 16
	and		\vTMP\().16b, \vTMP\().16b, vKEY1\().16b

	umov	\wTP, \vKEY2\().b[15]
	lsr		\wTP, \wTP, #4
	lsl		\wTP, \wTP, #16                                     // high nibble of k31
	ins		\vTMP\().s[2], \wTP

	umov	\wTP, \vKEY2\().b[15]
	lsl		\wTP, \wTP, #28
	lsr		\wTP, \wTP, #12                                     // low nibble of k31
	ins		\vTMP\().s[3], \wTP

	orr		\LFSR12_15\().16b, \LFSR12_15\().16b, \vTMP\().16b
.endm

.macro	ZUC256_INIT
	declare_register	pKe,	x0
	declare_register	pIv,	x1
	declare_register	pState,	x2
	declare_register	tag_sz,	x3
	declare_register	xW,	x18

	// save clobbered register
	FUNC_SAVE

	adrp	xTMP, EK256_d64
	add		xTMP, xTMP, #:lo12:EK256_d64
	rbit	tag_sz, tag_sz
	clz		tag_sz, tag_sz
	sub		tag_sz, tag_sz, #1
	lsl		tag_sz, tag_sz, #6
	add		x13, xTMP, tag_sz

	// Expand key
	INIT_LFSR_256	x0, x1, v0, v1, v2, v3, v4, v5, v6, x11, w11, x13
	st1		{v0.16b, v1.16b, v2.16b, v3.16b}, [pState]

	// Set R1 and R2 to zero
	eor		fR1, fR1, fR1
	eor		fR2, fR2, fR2

.set	counter, 0
.rept	32
	BITS_REORG	counter

	NONLIN_FUNC	1
	lsr		xW, xW, #1

	LFSR_UPDT	counter
.set	counter, (counter+1)
.endr

	// And once more, initial round from keygen phase = 33 times
	BITS_REORG	0
	NONLIN_FUNC	0
	eor		xW, xW, xW

	LFSR_UPDT	0

	// Save ZUC's state variables
	str		fR1, [pState, 16*4]
	str		fR2, [pState, 17*4]
	str		BRC_X0, [pState, 18*4]
	str		BRC_X1, [pState, 19*4]
	str		BRC_X2, [pState, 20*4]
	str		BRC_X3, [pState, 21*4]

	// Restore clobbered register
	FUNC_RESTORE
.endm

.altmacro
.macro	ZUC_INIT_4 KEY_SIZE
	declare_register	pKe,	x0
	declare_register	pIv,	x1
	declare_register	pState,	x2
	declare_register	tag_sz,	x3 // Only used in ZUC-256

	FUNC_SAVE

.if \KEY_SIZE == 128
	// Load key and IVs to v16-v23
.set	off, 0
.set	i, 16
.set	j, 20
.rept	4
	load_key_iv		%i, %j, pKe, pIv, off
.set	off, (off + 8)
.set	i, (i + 1)
.set	j, (j + 1)
.endr

	// Initialize all LFSR registers
.set	off, 0
.set	idx_off, 0
.rept	4
	adrp	xTMP, shuf_mask_key
	ldr		q24, [xTMP, #:lo12:shuf_mask_key + off]
	adrp	xTMP, shuf_mask_iv
	ldr		q25, [xTMP, #:lo12:shuf_mask_iv + off]
	adrp	xTMP, Ek_d
	ldr		q26, [xTMP, #:lo12:Ek_d + off]

.set	idx, idx_off
.set	i, 16
.set	j, 20
.rept	4
	INIT_LFSR_128	%i, %j, v24, v25, v26, %idx, v27
.set	idx, (idx + 1)
.set	i, (i + 1)
.set	j, (j + 1)
.endr

.set	id0, idx_off
.set	id1, (id0 + 1)
.set	id2, (id1 + 1)
.set	id3, (id2 + 1)

	// store 4xLFSR registers in memory (reordering first,
	// so all SX registers are together)
	TRANSPOSE4_U32	%id0, %id1, %id2, %id3, 27, 28, 29, 31

.set	off, (off + 16)
.set	idx_off, (idx_off + 4)
.endr
.else // KEY_SIZE == 256
	// Get pointer to constants (depending on tag size, this will point at
	// constants for encryption, authentication with 4-byte, 8-byte or 16-byte tags)
	adrp	xTMP, EK256_d64
	add		xTMP, xTMP, #:lo12:EK256_d64
	rbit	tag_sz, tag_sz
	clz		tag_sz, tag_sz
	sub		tag_sz, tag_sz, #1
	lsl		tag_sz, tag_sz, #6
	add		x13, xTMP, tag_sz

	// Initialize all LFSR registers
.set	off, 0
.rept	4
	// Load key and IV for each packet
	ldr		x5, [pKe, off]
	add		x6, pIv, off*4

	// restore x14
	mov		x14, x13

	// Initialize S0-15 for each packet
	INIT_LFSR_256	x5, x6, v0, v1, v2, v3, v4, v5, v6, x11, w11, x14

.irp	idx,0,1,2,3
	str		q\idx, [pState, 64*\idx + 2*off]
.endr

.set	off, (off + 8)
.endr

	// Read, transpose and store, so all S_X from the 4 packets are in the same register
.set	idx_off, 0
.rept	4

.set	idx, idx_off
.rept	4
	LDR_Q	%idx, pState, %(16*idx)
.set	idx, (idx + 1)
.endr

.set	id0, idx_off
.set	id1, (id0 + 1)
.set	id2, (id1 + 1)
.set	id3, (id2 + 1)
	TRANSPOSE4_U32	%id0, %id1, %id2, %id3, 27, 28, 29, 31

.set	idx_off, (idx_off + 4)
.endr

.endif // KEY_SIZE == 256

	// Zero out R1, R2(only lower 128bits)
	eor		vFR1.16b, vFR1.16b, vFR1.16b
	eor		vFR2.16b, vFR2.16b, vFR2.16b

.set	init_round_num, 0
.rept	32
	// Shift LFSR 32-times, update state variables
	bits_reorg4	init_round_num, 0, no_reg
	nonlin_fun4	1, v29
	ushr	v29.4s, v29.4s, #1             // Shift out LSB of W
	lfsr_updt4	init_round_num, 1, 29       // W (v0) used in LFSR update - not set to zero
.set	init_round_num, (init_round_num + 1)
.endr

2:
	// And once more, initial round from keygen phase = 33 times
	bits_reorg4	0, 0, no_reg
	nonlin_fun4	0, no_reg
	lfsr_updt4	0, 0, no_reg

	STORE_LFSR_LIST	pState, 0

	str		qFR1, [pState, #OFS_R1]
	str		qFR2, [pState, #OFS_R2]

	FUNC_RESTORE

	ret
.endm

.macro	simd_load_16 DST, ADDR, SIZE
	test	\SIZE, #16
	b.eq	_skip_16
	ld1		{\DST\().16b}, [\ADDR]
	b		end_load

_skip_16:
	eor		\DST\().16b, \DST\().16b, \DST\().16b
	cbz		\SIZE, end_load
	cmp		\SIZE, 1
	b.eq	_size_1
	cmp		\SIZE, 2
	b.eq	_size_2
	cmp		\SIZE, 3
	b.eq	_size_3
	cmp		\SIZE, 4
	b.eq	_size_4
	cmp		\SIZE, 5
	b.eq	_size_5
	cmp		\SIZE, 6
	b.eq	_size_6
	cmp		\SIZE, 7
	b.eq	_size_7
	cmp		\SIZE, 8
	b.eq	_size_8
	cmp		\SIZE, 9
	b.eq	_size_9
	cmp		\SIZE, 10
	b.eq	_size_10
	cmp		\SIZE, 11
	b.eq	_size_11
	cmp		\SIZE, 12
	b.eq	_size_12
	cmp		\SIZE, 13
	b.eq	_size_13
	cmp		\SIZE, 14
	b.eq	_size_14
_size_15:
	add		xTMP, \ADDR, 14
	ld1		{\DST\().B}[14], [xTMP]
_size_14:
	add		xTMP, \ADDR, 13
	ld1		{\DST\().B}[13], [xTMP]
_size_13:
	add		xTMP, \ADDR, 12
	ld1		{\DST\().B}[12], [xTMP]
_size_12:
	add		xTMP, \ADDR, 11
	ld1		{\DST\().B}[11], [xTMP]
_size_11:
	add		xTMP, \ADDR, 10
	ld1		{\DST\().B}[10], [xTMP]
_size_10:
	add		xTMP, \ADDR, 9
	ld1		{\DST\().B}[9], [xTMP]
_size_9:
	add		xTMP, \ADDR, 8
	ld1		{\DST\().B}[8], [xTMP]
_size_8:
	ld1		{\DST\().D}[0], [ADDR]
	b		end_load
_size_7:
	add		xTMP, \ADDR, 6
	ld1		{\DST\().B}[6], [xTMP]
_size_6:
	add		xTMP, \ADDR, 5
	ld1		{\DST\().B}[5], [xTMP]
_size_5:
	add		xTMP, \ADDR, 4
	ld1		{\DST\().B}[4], [xTMP]
_size_4:
	ld1		{\DST\().S}[0], [ADDR]
	b		end_load
_size_3:
	add		xTMP, \ADDR, 2
	ld1		{\DST\().B}[2], [xTMP]
_size_2:
	ld1		{\DST\().H}[0], [ADDR]
	b		end_load
_size_1:
	ld1		{\DST\().B}[0], [ADDR]
end_load:
.endm

.macro	simd_store_16 DST, SRC, SIZE, OFFSET

	mov		x11, \OFFSET
	tst		\SIZE, 16
	b.eq	1f
	add		x12, \DST, x11
	st1		{\SRC\().16b}, [x12]
	b		2f
1:
	tst		\SIZE, 8
	b.eq	1f
	add		x12, \DST, x11
	st1		{\SRC\().D}[0], [x12]
	ext		\SRC\().16b, \SRC\().16b, \SRC\().16b, #8
	add		x11, x11, #8
1:
	tst		\SIZE, 4
	b.eq	1f
	add		x12, \DST, x11
	st1		{\SRC\().S}[0], [x12]
	ushr	\SRC\().2d, \SRC\().2d, #32
	add		x11, x11, #4
1:
	tst		\SIZE, 2
	b.eq	1f
	add		x12, \DST, x11
	st1		{\SRC\().H}[0], [x12]
	ushr	\SRC\().2d, \SRC\().2d, #16
	add		x11, x11, #2
1:
	tst		\SIZE, 1
	b.eq	2f
	add		x12, \DST, x11
	st1		{\SRC\().B}[0], [x12]
2:
.endm

.macro	eor_vi i, j, vX
	eor		v\i\().16b, v\i\().16b, \vX\().16b
.endm

.macro	CIPHERNx4B_4 NROUNDS, INITIAL_ROUND, OFFSET, LAST_CALL
	#define	TMP1	x8
	#define	TMP2	x9

	// Generate N*4B of keystream in N rounds
.set	N, 1
.set	round, (\INITIAL_ROUND + N)
.rept	\NROUNDS
	bits_reorg4	round, 1, %(N+15)
	nonlin_fun4	1, v29
	// OFS_XR XOR W (v0)
	eor_vi		%(N+15), %(N+15), v29
	lfsr_updt4	round, 0, no_reg
.set	N, (N + 1)
.set	round, (round + 1)
.endr

	TRANSPOSE4_U32	16, 17, 18, 19, 20, 21, 22, 23

	// XOR Input buffer with keystream in rounds of 16B

	rev32	v16.16b, v16.16b
	ldr		q24, [x20, \OFFSET]
	rev32	v17.16b, v17.16b
	ldr		q25, [x21, \OFFSET]
	rev32	v18.16b, v18.16b
	ldr		q26, [x22, \OFFSET]
	rev32	v19.16b, v19.16b
	ldr		q31, [x19, \OFFSET]

	eor		v16.16b, v16.16b, v24.16b
	eor		v17.16b, v17.16b, v25.16b
	eor		v18.16b, v18.16b, v26.16b
	eor		v19.16b, v19.16b, v31.16b

.if \LAST_CALL == 1
	umov			w25, v30.s[0]
	simd_store_16	x26, v16, x25, \OFFSET
	umov			w25, v30.s[1]
	simd_store_16	x27, v17, x25, \OFFSET
	umov			w25, v30.s[2]
	simd_store_16	x28, v18, x25, \OFFSET
	umov			w25, v30.s[3]
	simd_store_16	x29, v19, x25, \OFFSET
.else
	str		q16, [x26, \OFFSET]
	str		q17, [x27, \OFFSET]
	str		q18, [x28, \OFFSET]
	str		q19, [x29, \OFFSET]
.endif
.endm

.macro	STORE_LFSR_LIST STATE, NUM_ROUNDS
.set	round_num, \NUM_ROUNDS
.set	offset, 0
.rept	16
	STR_Q	%((round_num) % 16), \STATE\(), offset
.set	round_num, (round_num + 1)
.set	offset, (offset + 16)
.endr
.endm

.macro	LOAD_LFSR_LIST STATE, NUM_ROUNDS
.set	round_num, \NUM_ROUNDS
.set	offset, 0
.rept	16
	LDR_Q	%((round_num) % 16), \STATE\(), offset
.set	round_num, (round_num + 1)
.set	offset, (offset + 16)
.endr
.endm

.macro	store_vi_to_keyaddr i, addr1, addr2, addr3, addr4
	st1		{v\i\().S}[0], [\addr1]
	st1		{v\i\().S}[1], [\addr2]
	st1		{v\i\().S}[2], [\addr3]
	st1		{v\i\().S}[3], [\addr4]
.endm

.macro	KEYGEN_4_AARCH64 NUM_ROUNDS
	declare_register	pState,	x0
	declare_register	pKS,	x1

	FUNC_SAVE

	ldr		qFR1, [pState, #OFS_R1]
	ldr		qFR2, [pState, #OFS_R2]
	LOAD_LFSR_LIST	pState, 0

	// Generate N*4B of keystream in N rounds
.set	N, 1
.rept	\NUM_ROUNDS
	bits_reorg4	N, 1, %(N+15)
	nonlin_fun4	1, v29
	// OFS_XR XOR W (v0)
	eor_vi		%(N+15), %(N+15), v29
	lfsr_updt4	N, 0, no_reg
.set	N, (N + 1)
.endr

	ldp		x10, x11, [pKS]
	ldp		x12, x13, [pKS, 16]

.if \NUM_ROUNDS == 4
	TRANSPOSE4_U32 16, 17, 18, 19, 20, 21, 22, 23
	st1		{v16.16b}, [x10]
	st1		{v17.16b}, [x11]
	st1		{v18.16b}, [x12]
	st1		{v19.16b}, [x13]
.else
.set	idx, 1
.rept	\NUM_ROUNDS
	store_vi_to_keyaddr	%(idx+15), x10, x11, x12, x13
	add		x10, x10, #4
	add		x11, x11, #4
	add		x12, x12, #4
	add		x13, x13, #4
.set	idx, (idx + 1)
.endr
.endif

	STORE_LFSR_LIST	pState, \NUM_ROUNDS

	str		qFR1, [pState, #OFS_R1]
	str		qFR2, [pState, #OFS_R2]

	FUNC_RESTORE
.endm

.macro	ROUND LANE vKS0 vKS1 PMULL_ENABLE TAG_SZ T_IN_REG
	// read 16 bytes and reverse bits
	ld1		{v30.16b}, [pIN\LANE\()]
	add		pIN\LANE\(), pIN\LANE\(), 16
	rbit	v30.16b, v30.16b

	ext		v24.16b, \vKS0\().16b, \vKS1\().16b, #8
	tbl		\vKS0\().16b, {\vKS0\().16b}, vKS_RO.16b // KS bits [ 63:32  31:0    95:64   63:32]
	tbl		v24.16b, {v24.16b}, vKS_RO.16b           // KS bits [127:96  95:64  159:128 127:96]

	// - set up DATA
	eor		v25.16b, v25.16b, v25.16b
	eor		v26.16b, v26.16b, v26.16b
	zip1	v25.4s, v30.4s, v25.4s
	zip2	v26.4s, v30.4s, v26.4s

	// - carry-less multiplication
.if PMULL_ENABLE == 1
	pmull	v27.1q, v25.1d, \vKS0\().1d
	pmull2	v28.1q, v25.2d, \vKS0\().2d
	pmull	v29.1q, v26.1d, v24.1d
	pmull2	v30.1q, v26.2d, v24.2d
.else
	EMULATE_PMULL	v27, v25, \vKS0\()
	EMULATE_PMULL2	v28, v25, \vKS0\()
	EMULATE_PMULL	v29, v26, v24
	EMULATE_PMULL2	v30, v26, v24
.endif

	// - xor the results from 4 32-bit words together
	eor		v27.16b, v27.16b, v28.16b
	eor		v29.16b, v29.16b, v30.16b
	eor		v27.16b, v27.16b, v29.16b

.if TAG_SZ != 4 // TAG_SZ == 8 or 16
	tbl		v0.16b, {\vKS1\().16b}, vKS_RO.16b   // KS bits [191:160 159:128 223:192 191:160]
	dup		v28.2D, \vKS0\().D[1]                // KS bits [ 95:64   63:32   95:64   63:32 ]
	dup		v29.2D, v24.D[0]                     // KS bits [127:96   95:64  127:96   95:64 ]
	dup		v30.2D, v24.D[1]                     // KS bits [159:128 127:96  159:128 127:96 ]
	dup		v1.2D, v0.D[0]                       // KS bits [191:160 159:128 191:160 159:128]
.if PMULL_ENABLE == 1
	pmull	v28.1q, v25.1d, v28.1d
	pmull2	v29.1q, v25.2d, v29.2d
	pmull	v2.1q, v26.1d, v30.1d
	pmull2	v3.1q, v26.2d, v1.2d
.else // PMULL_ENABLE == 0
	EMULATE_PMULL	v28, v25, v28
	EMULATE_PMULL2	v29, v25, v29
	EMULATE_PMULL	v2, v26, v30
	EMULATE_PMULL2	v3, v26, v1
.endif // PMULL_ENABLE
	// - xor the results from 4 32-bit words together
	eor		v28.16b, v28.16b, v29.16b
	eor		v2.16b, v2.16b, v3.16b
	eor		v28.16b, v28.16b, v2.16b

.if TAG_SZ == 16
	dup		\vKS1\().2D, \vKS1\().D[1]           // KS bits [255:224 223:192 255:224 223:192]
	rev64	\vKS1\().4s, \vKS1\().4s
.if PMULL_ENABLE == 1
	pmull	v29.1q, v25.1d, v24.1d
	pmull2	v2.1q, v25.2d, v24.2d
	pmull	v3.1q, v26.1d, v0.1d
	pmull2	v4.1q, v26.2d, v0.2d
.else // PMULL_ENABLE == 0
	EMULATE_PMULL	v29, v25, v24
	EMULATE_PMULL2	v2, v25, v24
	EMULATE_PMULL	v3, v26, v0
	EMULATE_PMULL2	v4, v26, v0
.endif // PMULL_ENABLE
	eor		v29.16b, v29.16b, v2.16b
	eor		v3.16b, v3.16b, v4.16b
	eor		v29.16b, v29.16b, v3.16b
	dup		v0.2D, v0.D[1]
.if PMULL_ENABLE == 1
	pmull	v30.1q, v25.1d, v30.1d
	pmull2	v1.1q, v25.2d, v1.2d
	pmull	v0.1q, v26.1d, v0.1d
	pmull2	\vKS1\().1q, v26.2d, \vKS1\().2d
.else // PMULL_ENABLE == 0
	EMULATE_PMULL	v30, v25, v30
	EMULATE_PMULL2	v1, v25, v1
	EMULATE_PMULL	v0, v26, v0
	EMULATE_PMULL2	\vKS1\(), v26, \vKS1\()
.endif // PMULL_ENABLE
	eor		v30.16b, v30.16b, v1.16b
	eor		v0.16b, v0.16b, \vKS1\().16b
	eor		v30.16b, v30.16b, v0.16b
.endif // TAG_SZ == 16
.endif // TAG_SZ == 8 or 16

.if TAG_SZ == 4
	// - update T
.if T_IN_REG == 0
	ldr		wT\LANE\(), [pT, #(4*\LANE\())]
.endif // T_IN_REG == 0
	mov		wT, v27.s[1]
	eor		wT\LANE\(), wT, wT\LANE\()
.if T_IN_REG == 0
	str		wT\LANE\(), [pT, #(4*\LANE\())]
.endif // T_IN_REG == 0
.else // TAG_SZ == 8 or 16
.if TAG_SZ == 8
.if T_IN_REG == 0
	ldr		xT\LANE\(), [pT, #(8*\LANE\())]
.endif // T_IN_REG == 0
	zip1	v27.4s, v27.4s, v28.4s
	mov		xT, v27.d[1]
	eor		xT\LANE\(), xT, xT\LANE\()
.if T_IN_REG == 0
	str		xT\LANE\(), [pT, #(8*\LANE\())]
.endif // T_IN_REG == 0
.else // TAG_SZ == 16
	/*
	 * For 128bit tag, T could not restore in assigned vector register
	 * because vector registers are used out.
	 */
	ldr		q24, [pT, #(16*\LANE\())]
	zip1	v27.4s, v27.4s, v28.4s
	zip1	v29.4s, v29.4s, v30.4s
	zip2	v27.2d, v27.2d, v29.2d
	eor		v24.16b, v24.16b, v27.16b
	str		q24, [pT, #(16*\LANE\())]
.endif
.endif // TAG_SZ == 8 or 16
.endm

.macro	ZUC_AUTH_4LANE KS0_0 KS1_0 KS0_1 KS1_1 KS0_2 KS1_2 KS0_3 KS1_3 PMULL_ENABLE TAG_SZ
	stp		q27, q28, [sp]
.if TAG_SZ != 4
	stp		q0, q1, [sp, #32]
	stp		q2, q3, [sp, #64]
.if TAG_SZ == 16
	str		q4, [sp, #96]
.endif
.endif
	ROUND	0 KS0_0 KS1_0 PMULL_ENABLE TAG_SZ 1
	ROUND	1 KS0_1 KS1_1 PMULL_ENABLE TAG_SZ 1
	ROUND	2 KS0_2 KS1_2 PMULL_ENABLE TAG_SZ 1
	ROUND	3 KS0_3 KS1_3 PMULL_ENABLE TAG_SZ 1
.if TAG_SZ != 4
	ldp		q0, q1, [sp, #32]
	ldp		q2, q3, [sp, #64]
.if TAG_SZ == 16
	ldr		q4, [sp, #96]
.endif
.endif
	ldp		q27, q28, [sp]
.endm

.macro	GENKS_AND_AUTH N4 KEYLEN
.set	OFFSET, 4
.rept	\N4
	// Generate N*4B of keystream in N rounds
.set	N, 1
.rept	4
	bits_reorg4	%(N+OFFSET), 1, %(N+15)
	nonlin_fun4	1, v29
	// OFS_XR XOR W (v0)
	eor_vi		%(N+15), %(N+15), v29
	lfsr_updt4	%(N+OFFSET), 0, no_reg
.set	N, (N + 1)
.endr // rept 4

	ldr		q20, [pKS0]
	ldr		q21, [pKS1]
	ldr		q22, [pKS2]
	ldr		q23, [pKS3]
	TRANSPOSE4_U32	16, 17, 18, 19, 24, 25, 26, 29
	str		q16, [pKS0]
	str		q17, [pKS1]
	str		q18, [pKS2]
	str		q19, [pKS3]
	str		q20, [pKS0, #16]
	str		q21, [pKS1, #16]
	str		q22, [pKS2, #16]
	str		q23, [pKS3, #16]

	/* ZUC authentication part
	 * - 4x32 data bits
	 * - set up KS
	 */
	adrp	xTMP, KS_reorder
	add		xTMP, xTMP, #:lo12:KS_reorder
	ld1		{vKS_RO.16b}, [xTMP]

	cmp		pmullEnable, 0
	b.ne	1f

.if KEYLEN == 256
	cmp		tagSize, 8
	b.eq	8f
	b.gt	16f
.endif

	ZUC_AUTH_4LANE	v20, v16, v21, v17, v22, v18, v23, v19, 0, 4
	b		2f
8:
	ZUC_AUTH_4LANE	v20, v16, v21, v17, v22, v18, v23, v19, 0, 8
	b		2f
16:
	ZUC_AUTH_4LANE	v20, v16, v21, v17, v22, v18, v23, v19, 0, 16
	b		2f
1:

.if KEYLEN == 256
	cmp		tagSize, 8
	b.eq	8f
	b.gt	16f
.endif

	ZUC_AUTH_4LANE	v20, v16, v21, v17, v22, v18, v23, v19, 1, 4
	b		2f
8:
	ZUC_AUTH_4LANE	v20, v16, v21, v17, v22, v18, v23, v19, 1, 8
	b		2f
16:
	ZUC_AUTH_4LANE	v20, v16, v21, v17, v22, v18, v23, v19, 1, 16
	b		2f
2:

.set	OFFSET, (OFFSET + 4)
.endr // rept N4
.endm

.macro	_ZUC_AUTH_4 KEYLEN
	declare_register	pState,	x0
	declare_register	pT,	x1
	declare_register	pIn,	x2
	declare_register	rounds,	x3
	declare_register	pKeyStream,	x4
	declare_register	tagSize,	x5
	declare_register	pmullEnable,	x24
	declare_register	wT,	w6
	declare_register	wT0,	w7
	declare_register	wT1,	w8
	declare_register	wT2,	w9
	declare_register	wT3,	w10
	declare_register	xT,	x6
	declare_register	xT0,	x7
	declare_register	xT1,	x8
	declare_register	xT2,	x9
	declare_register	xT3,	x10
	declare_register	pIN0,	x11
	declare_register	pIN1,	x12
	declare_register	pIN2,	x13
	declare_register	pIN3,	x14
	declare_register	pKS0,	x15
	declare_register	pKS1,	x16
	declare_register	pKS2,	x17
	declare_register	pKS3,	x18
	declare_register	vKS_RO,	v31

	FUNC_SAVE
	sub		sp, sp, 112

	// - save clobbered registers
	mov		x19, x0
	mov		x20, x1
	mov		x21, x2

	// - tell if pmull is supported
	bl		cpu_feature_detect
	and		pmullEnable, x0, IMB_FEATURE_PMULL

	// - restore registers
	mov		x0, x19
	mov		x1, x20
	mov		x2, x21

	ldr		qFR1, [pState, #OFS_R1]
	ldr		qFR2, [pState, #OFS_R2]
	LOAD_LFSR_LIST	pState, 0

.if KEYLEN == 256
	cmp		tagSize, 8
	b.eq	8f
	b.cc	4f
	b		16f
8:
	ldp		xT0, xT1, [pT]
	ldp		xT2, xT3, [pT, 16]
	b		16f
.endif
4:
	ldp		wT0, wT1, [pT]
	ldp		wT2, wT3, [pT, 8]
16:
	ldp		pIN0, pIN1, [pIn]
	ldp		pIN2, pIN3, [pIn, 16]
	ldp		pKS0, pKS1, [pKeyStream]
	ldp		pKS2, pKS3, [pKeyStream, 16]

	// Generate N*4B of keystream in N rounds
.set	N, 1
.rept	4
	bits_reorg4	N, 1, %(N+15)
	nonlin_fun4	1, v29
	// OFS_XR XOR W (v0)
	eor_vi		%(N+15), %(N+15), v29
	lfsr_updt4	N, 0, no_reg
.set	N, (N + 1)
.endr

	TRANSPOSE4_U32	16, 17, 18, 19, 20, 21, 22, 23
	str		q16, [pKS0]
	str		q17, [pKS1]
	str		q18, [pKS2]
	str		q19, [pKS3]

auth_4_loop_zuc\KEYLEN:
	cmp		rounds, #4
	b.lt	exit_auth_4_loop_zuc\KEYLEN

	GENKS_AND_AUTH	4 \KEYLEN

	sub		rounds, rounds, 4
	b		auth_4_loop_zuc\KEYLEN

exit_auth_4_loop_zuc\KEYLEN:
	cmp		rounds, #3
	b.eq	auth_3_zuc\KEYLEN
	cmp		rounds, #2
	b.eq	auth_2_zuc\KEYLEN
	cmp		rounds, #1
	b.eq	auth_1_zuc\KEYLEN
	cmp		rounds, #0
	b.eq	auth_0_zuc\KEYLEN

auth_3_zuc\KEYLEN:
	GENKS_AND_AUTH	3 \KEYLEN
	STORE_LFSR_LIST	pState, (4 + 3 * 4)
	str		qFR1, [pState, #OFS_R1]
	str		qFR2, [pState, #OFS_R2]
	b		auth_finish_zuc\KEYLEN
auth_2_zuc\KEYLEN:
	GENKS_AND_AUTH	2 \KEYLEN
	STORE_LFSR_LIST	pState, (4 + 2 * 4)
	str		qFR1, [pState, #OFS_R1]
	str		qFR2, [pState, #OFS_R2]
	b		auth_finish_zuc\KEYLEN
auth_1_zuc\KEYLEN:
	GENKS_AND_AUTH	1 \KEYLEN
	STORE_LFSR_LIST	pState, (4 + 1 * 4)
	str		qFR1, [pState, #OFS_R1]
	str		qFR2, [pState, #OFS_R2]
	b		auth_finish_zuc\KEYLEN
auth_0_zuc\KEYLEN:
	STORE_LFSR_LIST	pState, (4 + 0 * 4)
	str		qFR1, [pState, #OFS_R1]
	str		qFR2, [pState, #OFS_R2]
	b		auth_finish_zuc\KEYLEN

auth_finish_zuc\KEYLEN:
.if KEYLEN == 256
	cmp		tagSize, 8
	b.eq	8f
	b.cc	4f
	b		16f
8:
	stp		xT0, xT1, [pT]
	stp		xT2, xT3, [pT, 16]
	b		16f
.endif
4:
	stp		wT0, wT1, [pT]
	stp		wT2, wT3, [pT, 8]
16:
	stp		pIN0, pIN1, [pIn]
	stp		pIN2, pIN3, [pIn, 16]

	add		sp, sp, 112
	FUNC_RESTORE
	ret
.endm

/*
 * extern void asm_Zuc256Initialization_aarch64(uint8_t* pKey, uint8_t* pIV, uint32_t * pState, const unsigned tag_sz)
 * param[in]:
 *      x0 - pKey
 *      x1 - pIV
 *      x2 - pState
 *      x3 - tag_sz
 */
START_FUNC(ZUC256_INIT)

	ZUC256_INIT
	ret

END_FUNC(ZUC256_INIT)

/*
 * uint32_t asm_Eia3Round16B_aarch64(uint32_t *T, const void *KS, const void *DATA)
 * Updates authentication tag T based on keystream KS and DATA.
 * - it processes 16 bytes of DATA
 * - reads data in 16 byte chunks and bit reverses them
 * - reads and re-arranges KS
 * - employs clmul for the XOR & ROL part
 * - copies top 16 bytes of KS to bottom (for the next round)
 *
 *      x0 - T
 *      x1 - KS
 *      x2 - DATA
 */
START_FUNC(ZUC_EIA3ROUND16B)
	declare_register	pT,	x0
	declare_register	KS,	x1
	declare_register	pIN0,	x2
	declare_register	TAG_SZ,	x3
	declare_register	wT,	w4
	declare_register	xT,	x4
	declare_register	wT0,	w5
	declare_register	xT0,	x5
	declare_register	vKS_RO,	v31

	FUNC_SAVE

	// - save clobbered registers
	mov		x19, x0
	mov		x20, x1
	mov		x21, x2

	// - tell if pmull is supported
	bl		cpu_feature_detect
	ands	x24, x0, IMB_FEATURE_PMULL

	// - restore clobbered registers
	mov		x0, x19
	mov		x1, x20
	mov		x2, x21

	adrp	xTMP, KS_reorder
	add		xTMP, xTMP, #:lo12:KS_reorder
	ld1		{vKS_RO.16b}, [xTMP]

	b.eq	PMULL_DISABLE

PMULL_ENABLE:
	cmp		TAG_SZ, #8
	b.eq	TAG_8B_PMULL
	b.cc	TAG_4B_PMULL
TAG_16B_PMULL:
	ld1		{v14.16b, v15.16b}, [KS]
	ROUND	0, v14, v15, 1, 16, 0
	b		ROUND_END
TAG_8B_PMULL:
	ld1		{v14.16b, v15.16b}, [KS]
	ROUND	0, v14, v15, 1, 8, 0
	b		ROUND_END
TAG_4B_PMULL:
	ld1		{v14.16b, v15.16b}, [KS]
	ROUND	0, v14, v15, 1, 4, 0
	b		ROUND_END

PMULL_DISABLE:
	cmp		TAG_SZ, #8
	b.eq	TAG_8B_EMUL_PMULL
	b.cc	TAG_4B_EMUL_PMULL
TAG_16B_EMUL_PMULL:
	ld1		{v14.16b, v15.16b}, [KS]
	ROUND	0, v14, v15, 0, 16, 0
	b		ROUND_END
TAG_8B_EMUL_PMULL:
	ld1		{v14.16b, v15.16b}, [KS]
	ROUND	0, v14, v15, 0, 8, 0
	b		ROUND_END
TAG_4B_EMUL_PMULL:
	ld1		{v14.16b, v15.16b}, [KS]
	ROUND	0, v14, v15, 0, 4, 0
	b		ROUND_END

ROUND_END:

	FUNC_RESTORE

	ret

END_FUNC(ZUC_EIA3ROUND16B)

/*
 * uint32_t asm_Eia3Remainder_aarch64(const void *ks, const void *data, uint64_t n_bits)
 * Returns authentication update value to be XOR'ed with current authentication tag
 *
 *      x0 - KS
 *      x1 - DATA
 *      x2 - N_BITS
 */
START_FUNC(ZUC_EIA3REMAINDER)

	declare_register	KS,	x3
	declare_register	DATA,	x1
	declare_register	N_BITS,	x2

	FUNC_SAVE

	eor		v7.16b, v7.16b, v7.16b

	mov		x19, x0
	mov		x20, x1
	mov		x21, x2

	bl		cpu_feature_detect
	ands	x24, x0, IMB_FEATURE_PMULL

	mov		x3, x19
	mov		x1, x20
	mov		x2, x21

Eia3Rounds_dq_end:
.rept	3
	cmp		N_BITS, #32
	b.cc	Eia3Rounds_dw_end

	// swap dwords in KS
	ld1		{v1.8b}, [KS]
	add		KS, KS, #4
	rev64	v1.4s, v1.4s

	// bit-reverse 4 bytes of data
	eor		v0.16b, v0.16b, v0.16b
	ld1		{v0.s}[0], [DATA]
	add		DATA, DATA, #4
	rbit	v0.16b, v0.16b

	// rol & xor
	cbz		x24, 1f
	pmull	v0.1q, v0.1d, v1.1d
	b		2f
1:
	EMULATE_PMULL	v0, v0, v1
2:
	eor		v7.16b, v0.16b, v7.16b

	sub		N_BITS, N_BITS, #32
.endr
Eia3Rounds_dw_end:
	mov		w0, v7.s[1]
	cbz		N_BITS, Eia3Rounds_byte_loop_end

	ldr		KS, [KS]

Eia3Rounds_byte_loop:
	cbz		N_BITS, Eia3Rounds_byte_loop_end
	cmp		N_BITS, #8
	b.cc	Eia3Rounds_byte_partial

	ldrb	w4, [DATA]
	sub		N_BITS, N_BITS, #8
	b		Eia3Rounds_byte_read

Eia3Rounds_byte_partial:
	// process remaining bits (up to 7)
	adr		xTMP, bit_mask_table
	ldrb	w5, [xTMP, N_BITS]
	ldrb	w4, [DATA]
	and		w4, w4, w5
	eor		N_BITS, N_BITS, N_BITS

Eia3Rounds_byte_read:
.set	DATATEST, 0x80
.rept	8
	tst		x4, DATATEST
	csel	x5, KS, xzr, ne
	eor		w0, w0, w5
	ror		KS, KS, #63
.set	DATATEST, (DATATEST >> 1)
.endr
	add		DATA, DATA, #1
	b		Eia3Rounds_byte_loop

Eia3Rounds_byte_loop_end:

	FUNC_RESTORE

	ret

END_FUNC(ZUC_EIA3REMAINDER)

bit_mask_table:
	.byte	0x00
	.byte	0x80
	.byte	0xc0
	.byte	0xe0
	.byte	0xf0
	.byte	0xf8
	.byte	0xfc
	.byte	0xfe

START_FUNC(ZUC128_INIT_4)

	ZUC_INIT_4 128

END_FUNC(ZUC128_INIT_4)

START_FUNC(ZUC256_INIT_4)

	ZUC_INIT_4 256

END_FUNC(ZUC256_INIT_4)

START_FUNC(ZUC_CIPHER_4)

	declare_register	pState,	x0
	declare_register	pIn,	x1
	declare_register	pOut,	x2
	declare_register	lengths,	x3
	declare_register	min_len,	w4
	declare_register	buf_idx,	x5

	cbz		min_len, exit_cipher

	FUNC_SAVE

	ldp		x20, x21, [pIn, #0]
	ldp		x22, x19, [pIn, #16]

	ldp		x26, x27, [pOut, #0]
	ldp		x28, x29, [pOut, #16]

	// Convert all lengths from UINT16_MAX (indicating that lane is not valid) to min length
	dup		v0.4s, min_len
	ld1		{v1.4s}, [lengths]
	cmeq	v2.4s, v2.4s, v2.4s        // Get all ff's in v register
	cmeq	v3.4s, v1.4s, v2.4s        // Mask with FFFF in NULL jobs

	and		v4.16b, v3.16b, v0.16b     // Length of valid job in all NULL jobs
	eor		v2.16b, v2.16b, v3.16b     // Mask with 0000 in NULL jobs
	and		v1.16b, v1.16b, v2.16b     // Zero out lengths of NULL jobs
	orr		v1.16b, v1.16b, v4.16b     // v1 contains updated lengths

	// Round up to nearest multiple of 4 bytes
	movi	v5.4s, #0x3
	mov		w6, 0xfffffffc
	dup		v6.4s, w6
	add		v0.4s, v0.4s, v5.4s
	and		v0.16b, v0.16b, v6.16b

	// Calculate remaining bytes to encrypt after function call
	sub		v2.4s, v1.4s, v0.4s
	eor		v3.16b, v3.16b, v3.16b
	cmgt	v4.4s, v2.4s, v3.4s         // Mask with FFFF in lenghts > 0
	and		v2.16b, v2.16b, v4.16b      // Set to zero the lengths of the lanes which are going to be completed (lengths < 0)
	st1		{v2.4s}, [lengths]          // Update in memory the final updated lengths

	/* Calculate number of bytes to encrypt after rounds of 16 bytes (up to 15 bytes),
	 * for each lane, and store it in stack to be used in the last round
	 */
	sub		v1.4s, v1.4s, v2.4s         // Bytes to encrypt in all lanes
	movi	v5.4s, #0xf
	and		v1.16b, v1.16b, v5.16b      // Number of final bytes (up to 15 bytes) for each lane
	cmeq	v2.4s, v1.4s, v3.4s         // Mask with FFFF in lengths == 0
	movi	v5.4s, #0x10
	and		v2.16b, v2.16b, v5.16b      // 16 in positions where lengths was 0
	orr		v30.16b, v1.16b, v2.16b     // Number of final bytes (up to 16 bytes) for each lane

	eor		buf_idx, buf_idx, buf_idx

	LOAD_LFSR_LIST	pState, 0

	ldr		qFR1, [pState, #OFS_R1]
	ldr		qFR2, [pState, #OFS_R2]

loop_cipher64:
	cmp		min_len, #64
	b.lt	exit_loop_cipher64

.set	round_off, 0
.rept	4
	CIPHERNx4B_4	4, round_off, buf_idx, 0

	add		buf_idx, buf_idx, #16
	sub		min_len, min_len, #16
.set	round_off, (round_off + 4)
.endr
	b		loop_cipher64
exit_loop_cipher64:
	// Check if there are more bytes left to encrypt
	add		w6, min_len, 3
	lsr		w6, w6, #2            // number of rounds left (round up length to nearest multiple of 4B)
	cbz		w6, store_lfsr_and_exit

	cmp		w6, 8
	b.eq	_num_final_rounds_is_8
	b.lo	_final_rounds_is_1_7

	// Final blocks 9-16
	cmp		w6, 12
	b.eq	_num_final_rounds_is_12
	b.hi	_final_rounds_is_13_16

	// Final blocks 9-11
	cmp		w6, 10
	b.eq	_num_final_rounds_is_10
	b.lo	_num_final_rounds_is_9
	b.hi	_num_final_rounds_is_11

_final_rounds_is_13_16:
	cmp		w6, 16
	b.eq	_num_final_rounds_is_16
	cmp		w6, 14
	b.eq	_num_final_rounds_is_14
	b.lo	_num_final_rounds_is_13
	b.hi	_num_final_rounds_is_15

_final_rounds_is_1_7:
	cmp		w6, 4
	b.eq	_num_final_rounds_is_4
	b.lt	_final_rounds_is_1_3

	// Final blocks 5-7
	cmp		w6, 6
	b.eq	_num_final_rounds_is_6
	b.lo	_num_final_rounds_is_5
	b.hi	_num_final_rounds_is_7

_final_rounds_is_1_3:
	cmp		w6, 2
	b.eq	_num_final_rounds_is_2
	b.hi	_num_final_rounds_is_3

.irp	I,1,2,3,4
_num_final_rounds_is_\I:
	CIPHERNx4B_4	\I, 0, buf_idx, 1
	STORE_LFSR_LIST	pState, \I
	add		buf_idx, buf_idx, \I * 4
	b		exit_final_rounds
.endr

.irp	I,5,6,7,8
_num_final_rounds_is_\I:
	CIPHERNx4B_4	4, 0, buf_idx, 0
	add		buf_idx, buf_idx, #16
	CIPHERNx4B_4	(\I-4), 4, buf_idx, 1
	add		buf_idx, buf_idx, ((\I-4)*4)
	STORE_LFSR_LIST	pState, \I
	b		exit_final_rounds
.endr

.irp	I,9,10,11,12
_num_final_rounds_is_\I:
	CIPHERNx4B_4	4, 0, buf_idx, 0
	add		buf_idx, buf_idx, #16
	CIPHERNx4B_4	4, 4, buf_idx, 0
	add		buf_idx, buf_idx, #16
	CIPHERNx4B_4	(\I-8), 8, buf_idx, 1
	add		buf_idx, buf_idx, ((\I-8)*4)
	STORE_LFSR_LIST	pState, \I
	b		exit_final_rounds
.endr

.irp	I,13,14,15,16
_num_final_rounds_is_\I:
	CIPHERNx4B_4	4, 0, buf_idx, 0
	add		buf_idx, buf_idx, #16
	CIPHERNx4B_4	4, 4, buf_idx, 0
	add		buf_idx, buf_idx, #16
	CIPHERNx4B_4	4, 8, buf_idx, 0
	add		buf_idx, buf_idx, #16
	CIPHERNx4B_4	(\I-12), 12, buf_idx, 1
	add		buf_idx, buf_idx, ((\I-12)*4)
	STORE_LFSR_LIST	pState, \I
	b		exit_final_rounds
.endr

store_lfsr_and_exit:
	STORE_LFSR_LIST	pState, 0
exit_final_rounds:
	str		qFR1, [pState, #OFS_R1]
	str		qFR2, [pState, #OFS_R2]
	// update in/out pointers
	dup		v0.2d, buf_idx
	ld1		{v1.2d, v2.2d}, [pIn]
	add		v1.2d, v1.2d, v0.2d
	add		v2.2d, v2.2d, v0.2d
	st1		{v1.2d, v2.2d}, [pIn]
	ld1		{v1.2d, v2.2d}, [pOut]
	add		v1.2d, v1.2d, v0.2d
	add		v2.2d, v2.2d, v0.2d
	st1		{v1.2d, v2.2d}, [pOut]

	FUNC_RESTORE

exit_cipher:
	ret

END_FUNC(ZUC_CIPHER_4)

START_FUNC(ZUC_XORKEYSTREAM16B)

	declare_register	pIn,	x0
	declare_register	pOut,	x1
	declare_register	pKS,	x2
	declare_register	XKEY,	v0
	declare_register	XIN,	v1

	ld1		{XKEY.16b}, [pKS]
	rev32	XKEY.16b, XKEY.16b
	ld1		{XIN.16b}, [pIn]
	eor		XKEY.16b, XKEY.16b, XIN.16b
	st1		{XKEY.16b}, [pOut]

	ret

END_FUNC(ZUC_XORKEYSTREAM16B)

START_FUNC(ZUC_KEYGEN16B_4)

	KEYGEN_4_AARCH64 4

	ret

END_FUNC(ZUC_KEYGEN16B_4)

START_FUNC(ZUC_KEYGEN8B_4)

	KEYGEN_4_AARCH64 2

	ret

END_FUNC(ZUC_KEYGEN8B_4)

START_FUNC(ZUC_KEYGEN4B_4)

	KEYGEN_4_AARCH64 1

	ret

END_FUNC(ZUC_KEYGEN4B_4)

START_FUNC(ZUC128_AUTH_4)
	_ZUC_AUTH_4 128
END_FUNC(ZUC128_AUTH_4)

START_FUNC(ZUC256_AUTH_4)
	_ZUC_AUTH_4 256
END_FUNC(ZUC256_AUTH_4)
