#ifdef __aarch64__
    .text
    .align  5
    //.p2align 5,,15
    .global Conv8x8
#ifndef __APPLE__
    .type   Conv8x8, %function
#endif


.macro INIT_ZERO
    // we could also use "movi v0.4s, 0" to initialize v0 by 0
    // but let us use less immediate numbers
    // as wzr(w31) is prefined as 0
    // wo do this though it seems unnecessary to initialize register v16 ~ v31
    dup v16.4s, wzr
    dup v17.4s, wzr
    dup v18.4s, wzr
    dup v19.4s, wzr
    dup v20.4s, wzr
    dup v15.4s, wzr
    dup v16.4s, wzr
    dup v17.4s, wzr
    dup v18.4s, wzr
    dup v19.4s, wzr
    dup v20.4s, wzr
    dup v21.4s, wzr
    dup v22.4s, wzr
    dup v23.4s, wzr
    dup v24.4s, wzr
    dup v25.4s, wzr
    dup v26.4s, wzr
    dup v27.4s, wzr
    dup v28.4s, wzr
    dup v29.4s, wzr
    dup v30.4s, wzr
    dup v31.4s, wzr
.endm


// void Gemm8x8C4(input, weight, output, bias, step, offset);
// x0: input, x1: weight, x2: output, x3: bias, x4: step, x5: offset
Conv8x8:
    // registers v8 ~ v15 must be preserved by a callee across subroutine calls, according to
    // https://github.com/ARM-software/abi-aa/blob/master/aapcs64/aapcs64.rst#simd-and-floating-point-registers
    // r19 ~ r29 should be also preserved
    // whereas our coding style do not permit such amount of parameters
    sub sp, sp, #128
    // performance between storing 4 registers at the same time and seperatly storing them on in-order cores
    // is not tested yet
    st1 {v8.4s, v9.4s, v10.4s, v11.4s}, [sp], #64
    st1 {v12.4s, v13.4s, v14.4s, v15.4s}, [sp], #64

    //INIT_ZERO

    Conv8x8Loop:
        // load input
        ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [x0], #64

        // load weight
        ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [x1], #64

        // indirect gemm begin
        fmla v16.4s, v8.4s, v0.4s[0]
        fmla v18.4s, v8.4s, v1.4s[0]
        fmla v20.4s, v8.4s, v2.4s[0]
        fmla v22.4s, v8.4s, v3.4s[0]

        // load input season two
        // input cache should be refreshed after loading
        // ATTENTION: advance is prefered, but advancing too much may lead to invalid prefetching 
        ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [x0], #64
        prfm pldl1keep, [x0, #64]
        prfm pldl1keep, [x0, #128]

        fmla v24.4s, v8.4s, v4.4s[0]
        fmla v26.4s, v8.4s, v5.4s[0]
        fmla v28.4s, v8.4s, v6.4s[0]
        fmla v30.4s, v8.4s, v7.4s[0]

        // load weight season two
        // the same as above
        ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [x1], #64
        prfm pldl1keep, [x1, #64]
        prfm pldl1keep, [x1, #128]

        fmla v17.4s, v9.4s, v0.4s[0]
        fmla v19.4s, v9.4s, v1.4s[0]
        fmla v21.4s, v9.4s, v2.4s[0]
        fmla v23.4s, v9.4s, v3.4s[0]
        fmla v25.4s, v9.4s, v4.4s[0]
        fmla v27.4s, v9.4s, v5.4s[0]
        fmla v29.4s, v9.4s, v6.4s[0]
        fmla v31.4s, v9.4s, v7.4s[0]

        fmla v16.4s, v10.4s, v0.4s[1]
        fmla v18.4s, v10.4s, v1.4s[1]
        fmla v20.4s, v10.4s, v2.4s[1]
        fmla v22.4s, v10.4s, v3.4s[1]
        fmla v24.4s, v10.4s, v4.4s[1]
        fmla v26.4s, v10.4s, v5.4s[1]
        fmla v28.4s, v10.4s, v6.4s[1]
        fmla v30.4s, v10.4s, v7.4s[1]

        fmla v17.4s, v11.4s, v0.4s[1]
        fmla v19.4s, v11.4s, v1.4s[1]
        fmla v21.4s, v11.4s, v2.4s[1]
        fmla v23.4s, v11.4s, v3.4s[1]
        fmla v25.4s, v11.4s, v4.4s[1]
        fmla v27.4s, v11.4s, v5.4s[1]
        fmla v29.4s, v11.4s, v6.4s[1]
        fmla v31.4s, v11.4s, v7.4s[1]

        fmla v16.4s, v12.4s, v0.4s[2]
        fmla v18.4s, v12.4s, v1.4s[2]
        fmla v20.4s, v12.4s, v2.4s[2]
        fmla v22.4s, v12.4s, v3.4s[2]
        fmla v24.4s, v12.4s, v4.4s[2]
        fmla v26.4s, v12.4s, v5.4s[2]
        fmla v28.4s, v12.4s, v6.4s[2]
        fmla v30.4s, v12.4s, v7.4s[2]

        fmla v17.4s, v13.4s, v0.4s[2]
        fmla v19.4s, v13.4s, v1.4s[2]
        fmla v21.4s, v13.4s, v2.4s[2]
        fmla v23.4s, v13.4s, v3.4s[2]
        fmla v25.4s, v13.4s, v4.4s[2]
        fmla v27.4s, v13.4s, v5.4s[2]
        fmla v29.4s, v13.4s, v6.4s[2]
        fmla v31.4s, v13.4s, v7.4s[2]

        fmla v16.4s, v14.4s, v0.4s[3]
        fmla v18.4s, v14.4s, v1.4s[3]
        fmla v20.4s, v14.4s, v2.4s[3]
        fmla v22.4s, v14.4s, v3.4s[3]
        fmla v24.4s, v14.4s, v4.4s[3]
        fmla v26.4s, v14.4s, v5.4s[3]
        fmla v28.4s, v14.4s, v6.4s[3]
        fmla v30.4s, v14.4s, v7.4s[3]

        fmla v17.4s, v15.4s, v0.4s[3]
        fmla v19.4s, v15.4s, v1.4s[3]
        fmla v21.4s, v15.4s, v2.4s[3]
        fmla v23.4s, v15.4s, v3.4s[3]
        fmla v25.4s, v15.4s, v4.4s[3]
        fmla v27.4s, v15.4s, v5.4s[3]
        fmla v29.4s, v15.4s, v6.4s[3]
        fmla v31.4s, v15.4s, v7.4s[3]
    
        subs x4, x4, #1
        bne Conv8x8Loop

    // load bias
    ld1 {v0.4s, v1.4s}, [x3]

    fadd v16.4s, v16.4s, v0.4s
    fadd v17.4s, v17.4s, v1.4s
    fadd v18.4s, v18.4s, v2.4s
    fadd v19.4s, v19.4s, v3.4s
    fadd v20.4s, v20.4s, v4.4s
    fadd v21.4s, v21.4s, v5.4s
    fadd v22.4s, v22.4s, v6.4s
    fadd v23.4s, v23.4s, v7.4s
    fadd v24.4s, v24.4s, v8.4s
    fadd v25.4s, v25.4s, v9.4s
    fadd v26.4s, v26.4s, v10.4s
    fadd v27.4s, v27.4s, v11.4s
    fadd v28.4s, v28.4s, v12.4s
    fadd v29.4s, v29.4s, v13.4s
    fadd v30.4s, v30.4s, v14.4s
    fadd v31.4s, v31.4s, v15.4s 

    // prefetching is not prefered while writing results in spite of cache missings
    // you could try prfm pstl2strm
    // there are almost no benefits observed though
    st1 {v16.4s, v17.4s}, [x2], #32
    add x2, x2, x4
    st1 {v18.4s, v19.4s}, [x2], #32
    add x2, x2, x4
    st1 {v20.4s, v21.4s}, [x2], #32
    add x2, x2, x4
    st1 {v22.4s, v23.4s}, [x2], #32
    add x2, x2, x4
    st1 {v24.4s, v25.4s}, [x2], #32
    add x2, x2, x4
    st1 {v26.4s, v27.4s}, [x2], #32
    add x2, x2, x4
    st1 {v28.4s, v29.4s}, [x2], #32
    add x2, x2, x4
    st1 {v30.4s, v31.4s}, [x2], #32

    // do not forget to reset context
    sub sp, sp, #128
    ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [sp], #64
    ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [sp], #64
    ret
#endif
