#include <machine/asmdefs.h>

ASM_HEADER

FNDEFN_BEGIN(AdaptiveEqPreamble_7_asm)

        /* load coeffs   |   create vector (vr3) of energy_fixed */
        lr              %vr5,%r9,0              ||      slil            %r11,%r11,16
        lr              %vr7,%r9,32             ||      ctsr            %r11,72
        rbroad          %vr3,%ac0,0             ||      lir             %r11,0

	/*
	 * check if eq_count is even or odd
	 * if even, set loop counter to 4
	 * if odd, set loop counter to 3, increment r8 and r10 appropriately
	 */
        andi            %r13,%r12,1
        cmpei           0,%cf0,%r13,0
        jc              1,%cf0,EVEN_7
ODD_7:
        lir             %r15,3
        addi            %r10,%r10,-2
        addi            %r8,%r8,4
        j               END_7
EVEN_7:
        lir             %r15,4
        addi            %r10,%r10,-4
END_7:
        ctsr            %r15,5

/* load inputs   |   set acc0, acc1 to 0 */
        lr              %vr0,%r8,0              ||      ctsr            %r11,72
        lr              %vr2,%r8,32             ||      ctsr            %r11,73

/* set acc2, acc3 to 0 */
        ctsr            %r11,74
        ctsr            %r11,75

/* separate real/imaginary for coeffs, set mask for upper 16 bits */
        rshf1   %vr7,%vr5,7                     ||      lirl            %r13,0xFFFF0000


LOOP_PREAMBLE_7:
        /* separate real/imag for inputs */
        rshf1   %vr2,%vr0,7                     ||      lir             %r14,0

        /* perform real multiply and reduce (buff_LE * w_LE) */
        /* R*R, I*I */
        rmulreds %ac1,%vr0,%vr5                 ||      addil           %r8,%r8,8
        rmulreds %ac2,%vr2,%vr7                 ||      luhu            %r14,%r10,4
        /* R*I, I*R */
        rmulreds %ac0,%vr0,%vr7                 ||      cfsr            %r3,73
        rmulreds %ac0,%vr2,%vr5                 ||      cfsr            %r15,74

        /* broadcast ei (vr6) and shift in zero  |   calculate er */
        rbroad          %vr6,%ac0,0             ||      subf            %r15,%r15,%r3
        rshift          %vr6,%ac3,0             ||      and             %r15,%r15,%r13

        /* compute real/imaginary taps */
        /* vr1 <- R*I, vr6 <- I*I */
        rmul            %vr1,%vr0,%vr6          ||      subf            %r15,%r15,%r14
        rmul            %vr6,%vr2,%vr6          ||      ctsr            %r15,72

        /* compute er (vr4), with zero shifted in at end */
        rbroad          %vr4,%ac0,0
        rshift          %vr4,%ac3,0             ||      ctsr            %r11,72

        /* vr6 <- R*R - I*I, vr1 <- I*R + R*I */
        rmsub           %vr6,%vr0,%vr4          ||      ctsr            %r11,73
        rmac            %vr1,%vr2,%vr4          ||      ctsr            %r11,74

        /* multiply reals by energy fixed and add to real filter coeffs  |   load next input */
        rmacs           %vr5,%vr6,%vr3          ||      lr              %vr0,%r8,0

        /* multiply imags by energy fixed and sub from imag filter coeffs  |   load next input */
        rmuls           %vr4,%vr1,%vr3          ||      lr              %vr2,%r8,32
        rsubfs          %vr7,%vr4,%vr7          ||      loop            0,%lc1,LOOP_PREAMBLE_7

        /* shuffle back to interleaved real/imag format */
        rshf            %vr7,%vr5,7             ||      addil           %r8,%r8,-28

        /* store inputs back (shifted by 7) */
        str             %vr0,%r8,0
        str             %vr2,%r8,32             ||	addil           %r8,%r12,7

        /* store final coeffs */
        str             %vr5,%r9,0
        str             %vr7,%r9,32             ||      ji		%jt0,8


FNDEFN_END(AdaptiveEqPreamble_7_asm)


FNDEFN_BEGIN(AdaptiveEqPreamble_11_asm)

        /* load coeffs   |   create vector (vr3) of energy_fixed */
        lr              %vr5,%r9,0              ||      slil            %r11,%r11,16
        lr              %vr7,%r9,32             ||      ctsr            %r11,72
        rbroad          %vr3,%ac0,0             ||      lir             %r11,0

/* check if eq_count is even or odd */
/* if even, set loop counter to 6 */
/* if odd, set loop counter to 5, increment r8 and r10 appropriately */
        andi            %r13,%r12,1
        cmpei           0,%cf0,%r13,0
        jc              0,%cf0,ODD_11           ||      lir             %r15,6
EVEN_11:
        addi            %r10,%r10,-4            ||      j               END_11
ODD_11:
        lir             %r15,5
        addi            %r10,%r10,-2
        addi            %r8,%r8,4
END_11:
        ctsr            %r15,5

        /* load inputs   |   set acc0, acc1 to 0 */
        lr              %vr0,%r8,0              ||      ctsr            %r11,72
        lr              %vr2,%r8,32             ||      ctsr            %r11,73

        /* set acc2, acc3 to 0 */
        ctsr            %r11,74
        ctsr            %r11,75

/* separate real/imaginary for coeffs, set mask for upper 16 bits */
        rshf1   %vr7,%vr5,7                     ||      lirl            %r13,0xFFFF0000


LOOP_PREAMBLE_11:
        /* separate real/imag for inputs */
        rshf1   %vr2,%vr0,7                     ||      lir             %r14,0

        /* perform real multiply and reduce (buff_LE * w_LE) */
        /* R*R, I*I */
        rmulreds %ac1,%vr0,%vr5                 ||      addil           %r8,%r8,8
        rmulreds %ac2,%vr2,%vr7                 ||      luhu            %r14,%r10,4
        /* R*I, I*R */
        rmulreds %ac0,%vr0,%vr7                 ||      cfsr            %r3,73
        rmulreds %ac0,%vr2,%vr5                 ||      cfsr            %r15,74

        /* broadcast ei (vr6) and shift in zero  |   calculate er */
        rbroad          %vr6,%ac0,0             ||      subf            %r15,%r15,%r3
        rshift          %vr6,%ac3,0             ||      and             %r15,%r15,%r13

        /* compute real/imaginary taps */
        /* vr1 <- R*I, vr6 <- I*I */
        rmul            %vr1,%vr0,%vr6          ||      subf            %r15,%r15,%r14
        rmul            %vr6,%vr2,%vr6          ||      ctsr            %r15,72

        /* compute er (vr4), with zero shifted in at end */
        rbroad          %vr4,%ac0,0
        rshift          %vr4,%ac3,0             ||      ctsr            %r11,72

        /* vr6 <- R*R - I*I, vr1 <- I*R + R*I */
        rmsub           %vr6,%vr0,%vr4          ||      ctsr            %r11,73
        rmac            %vr1,%vr2,%vr4          ||      ctsr            %r11,74

        /* multiply reals by energy fixed and add to real filter coeffs  |   load next input */
        rmacs           %vr5,%vr6,%vr3          ||      lr              %vr0,%r8,0

        /* multiply imags by energy fixed and sub from imag filter coeffs  |   load next input */
        rmuls           %vr4,%vr1,%vr3          ||      lr              %vr2,%r8,32
        rsubfs          %vr7,%vr4,%vr7          ||      loop            0,%lc1,LOOP_PREAMBLE_11

        /* shuffle back to interleaved real/imag format */
        rshf            %vr7,%vr5,7             ||      addil           %r8,%r8,-44

        /* store inputs back (shifted by 11) */
        str             %vr0,%r8,0
        str             %vr2,%r8,32             ||	addil           %r8,%r12,11

        /* store final coeffs */
        str             %vr5,%r9,0
        str             %vr7,%r9,32             ||      ji		%jt0,8

FNDEFN_END(AdaptiveEqPreamble_11_asm)


FNDEFN_BEGIN(AdaptiveEqPreamble_11_4_asm)

        /* load coeffs   |   create vector (vr3) of energy_fixed */
        lr              %vr5,%r9,0              ||      slil            %r11,%r11,16
        lr              %vr7,%r9,32             ||      ctsr            %r11,72
        rbroad          %vr3,%ac0,0             ||      lir             %r11,0

/* check if eq_count % 4 is 0 or 1 or 2 or 3 */
        andi            %r13,%r12,3
        cmpei           0,%cf0,%r13,2
        jc              1,%cf0,REM_2            ||      cmpei           0,%cf0,%r13,3
        jc              1,%cf0,REM_3            ||      cmpei           0,%cf0,%r13,0
        jc              1,%cf0,REM_0            ||      lir             %r15,2

REM_1:
        addi            %r10,%r10,-2
        addil           %r8,%r8,12              ||      j               END_REM
REM_2:
        lir             %r15,3
        addi            %r10,%r10,-4
        addil           %r8,%r8,8               ||      j               END_REM
REM_3:
        lir             %r15,3
        addi            %r10,%r10,-6
        addi            %r8,%r8,4               ||      j               END_REM
REM_0:
        lir             %r15,3
        addi            %r10,%r10,-8
END_REM:
        ctsr            %r15,5

        /* load inputs   |   set acc0, acc1 to 0 */
        lr              %vr0,%r8,0              ||      ctsr            %r11,72
        lr              %vr2,%r8,32             ||      ctsr            %r11,73

        /* set acc2, acc3 to 0 */
        ctsr            %r11,74
        ctsr            %r11,75

/* separate real/imaginary for coeffs, set mask for upper 16 bits */
        rshf1   %vr7,%vr5,7                     ||      lirl            %r13,0xFFFF0000


LOOP_PREAMBLE_11_4:
        /* separate real/imag for inputs */
        rshf1   %vr2,%vr0,7                     ||      lir             %r14,0

        /* perform real multiply and reduce (buff_LE * w_LE) */
        /* R*R, I*I */
        rmulreds %ac1,%vr0,%vr5                 ||      addil           %r8,%r8,16
        rmulreds %ac2,%vr2,%vr7                 ||      luhu            %r14,%r10,8
        /* R*I, I*R */
        rmulreds %ac0,%vr0,%vr7                 ||      cfsr            %r3,73
        rmulreds %ac0,%vr2,%vr5                 ||      cfsr            %r15,74

        /* broadcast ei (vr6) and shift in zero  |   calculate er */
        rbroad          %vr6,%ac0,0             ||      subf            %r15,%r15,%r3
        rshift          %vr6,%ac3,0             ||      and             %r15,%r15,%r13

        /* compute real/imaginary taps */
        /* vr1 <- R*I, vr6 <- I*I */
        rmul            %vr1,%vr0,%vr6          ||      subf            %r15,%r15,%r14
        rmul            %vr6,%vr2,%vr6          ||      ctsr            %r15,72

        /* compute er (vr4), with zero shifted in at end */
        rbroad          %vr4,%ac0,0
        rshift          %vr4,%ac3,0             ||      ctsr            %r11,72

        /* vr6 <- R*R - I*I, vr1 <- I*R + R*I */
        rmsub           %vr6,%vr0,%vr4          ||      ctsr            %r11,73
        rmac            %vr1,%vr2,%vr4          ||      ctsr            %r11,74

        /* multiply reals by energy fixed and add to real filter coeffs  |   load next input */
        rmacs           %vr5,%vr6,%vr3          ||      lr              %vr0,%r8,0

        /* multiply imags by energy fixed and sub from imag filter coeffs  |   load next input */
        rmuls           %vr4,%vr1,%vr3          ||      lr              %vr2,%r8,32
        rsubfs          %vr7,%vr4,%vr7          ||      loop            0,%lc1,LOOP_PREAMBLE_11_4

        /* shuffle back to interleaved real/imag format */
        rshf            %vr7,%vr5,7             ||      addil           %r8,%r8,-44

        /* store inputs back (shifted by 11) */
        str             %vr0,%r8,0
        str             %vr2,%r8,32             ||	addil           %r8,%r12,11

        /* store final coeffs */
        str             %vr5,%r9,0
        str             %vr7,%r9,32             ||      ji		%jt0,8

FNDEFN_END(AdaptiveEqPreamble_11_4_asm)


FNDEFN_BEGIN(AdaptiveEqData_8_asm)

        /* load coeffs, set loop to 8 */
        lr              %vr5,%r9,0              ||        lir             %r14,8
        lr              %vr7,%r9,32             ||        ctsr            %r14,5

        /* load inputs */
        lr              %vr0,%r8,0
        lrr             %vr1,%r8,32
        lr              %vr2,%r8,32
        lrr             %vr3,%r8,64

LOOP_DATA_8:
        rxmulredsl2     1,%ac0,%vr0,%vr5
        rxmulredsh2n    %ac0,%vr0,%vr5
        rxmulredsl      %ac0,%vr2,%vr7
        rxmulredsh2n    %ac0,%vr2,%vr7          ||      loop            0,%lc1,LOOP_DATA_8

        /* final shift, store inputs back (shifted by 8) */
        rshift          %vr4,%ac0,2             ||      str             %vr0,%r8,0
        str             %vr2,%r8,32

        /* store output */
        str             %vr4,%r10,0             ||      ji		%jt0,8

FNDEFN_END(AdaptiveEqData_8_asm)


FNDEFN_BEGIN(AdaptiveEqData_11_asm)

        /* load coeffs, set loop to 7 */
        lr              %vr5,%r9,0              ||        lir             %r14,7
        lr              %vr7,%r9,32             ||        ctsr            %r14,5

        /* load inputs */
        lr              %vr0,%r8,0              ||        lir             %r13,0
        lrr             %vr1,%r8,32             ||        ctsr            %r13,73
        lr              %vr2,%r8,32
        lrr             %vr3,%r8,64

        /* loop 7 times */
LOOP_DATA_11:
        rxmulredsl2     1,%ac0,%vr0,%vr5
        rxmulredsh2n    %ac0,%vr0,%vr5
        rxmulredsl      %ac0,%vr2,%vr7
        rxmulredsh2n    %ac0,%vr2,%vr7          ||      loop            0,%lc1,LOOP_DATA_11

        /* 4 more iterations */
        rxmulredsl2     1,%ac0,%vr0,%vr5
        rxmulredsh2n    %ac0,%vr0,%vr5
        rxmulredsl      %ac0,%vr2,%vr7          ||      lrr             %vr1,%r8,64
        rxmulredsh2n    %ac0,%vr2,%vr7
        rxmulredsl2     1,%ac0,%vr0,%vr5        ||      lrr             %vr3,%r8,96
        rxmulredsh2n    %ac0,%vr0,%vr5          ||      str             %vr4,%r10,0
        rxmulredsl      %ac0,%vr2,%vr7
        rxmulredsh2n    %ac0,%vr2,%vr7
        rxmulredsl2     1,%ac0,%vr0,%vr5
        rxmulredsh2n    %ac0,%vr0,%vr5
        rxmulredsl      %ac0,%vr2,%vr7
        rxmulredsh2n    %ac0,%vr2,%vr7
        rxmulredsl2     1,%ac0,%vr0,%vr5
        rxmulredsh2n    %ac0,%vr0,%vr5
        rxmulredsl      %ac0,%vr2,%vr7
        rxmulredsh2n    %ac0,%vr2,%vr7          ||      addil           %r12,%r8,44

        /* final shifts, store inputs back (shifted by 11) */
        rshift          %vr4,%ac0,2             ||      lr              %vr6,%r12,0
        rshift          %vr4,%ac1,2             ||      lr              %vr7,%r12,32
        rshift          %vr4,%ac1,6             ||      str             %vr6,%r8,0
        rshift          %vr4,%ac1,6             ||      str             %vr7,%r8,32

        /* store output */
        str             %vr4,%r10,32            ||      ji		%jt0,8

FNDEFN_END(AdaptiveEqData_11_asm)


FNDEFN_BEGIN(AdaptiveEqData_16_asm)

        /* load coeffs, set loop to 7 */
        lr              %vr5,%r9,0              ||        lir             %r14,7
        lr              %vr7,%r9,32             ||        ctsr            %r14,5

        /* load inputs */
        lr              %vr0,%r8,0              ||        lir             %r13,0
        lrr             %vr1,%r8,32             ||        ctsr            %r13,73
        lr              %vr2,%r8,32
        lrr             %vr3,%r8,64

        /* loop 7 times */
LOOP_DATA_16_A:
        rxmulredsl2     1,%ac0,%vr0,%vr5
        rxmulredsh2n    %ac0,%vr0,%vr5
        rxmulredsl      %ac0,%vr2,%vr7
        rxmulredsh2n    %ac0,%vr2,%vr7          ||      loop            0,%lc1,LOOP_DATA_16_A

        /* 2 more iterations */
        rxmulredsl2     1,%ac0,%vr0,%vr5
        rxmulredsh2n    %ac0,%vr0,%vr5
        rxmulredsl      %ac0,%vr2,%vr7          ||      lrr             %vr1,%r8,64
        rxmulredsh2n    %ac0,%vr2,%vr7

        rxmulredsl2     1,%ac0,%vr0,%vr5        ||      lrr             %vr3,%r8,96
        rxmulredsh2n    %ac0,%vr0,%vr5          ||      str             %vr4,%r10,0
        rxmulredsl      %ac0,%vr2,%vr7
        rxmulredsh2n    %ac0,%vr2,%vr7          ||      ctsr            %r14,5

        /* loop 7 times */
LOOP_DATA_16_B:
        rxmulredsl2     1,%ac0,%vr0,%vr5
        rxmulredsh2n    %ac0,%vr0,%vr5
        rxmulredsl      %ac0,%vr2,%vr7
        rxmulredsh2n    %ac0,%vr2,%vr7          ||      loop            0,%lc1,LOOP_DATA_16_B

        /* final shift, store inputs back (shifted by 16) */
        rshift          %vr4,%ac0,2             ||      str             %vr0,%r8,0
        str             %vr2,%r8,32

        /* store output */
        str             %vr4,%r10,32            ||      ji		%jt0,8

FNDEFN_END(AdaptiveEqData_16_asm)
