/**
 * Copyright (c) 2018-2022, NXOS Development Team
 * SPDX-License-Identifier: Apache-2.0
 *
 * Contains: memory ops by arch
 *
 * Change Logs:
 * Date           Author            Notes
 * 2022-07-30     GuEe-GUI          Init
 */

.text

#define __ASSEMBLY__
#include <regs.h>
#include <base/defines.h>

/* void *NX_MemSet(void *dest, NX_U8 value, NX_Size sz) */
NX_FUNC(NX_MemSet)
    /* Preserve return value */
    move    t0, a0

    /* Defer to byte-oriented fill for small sizes */
    sltiu   a3, a2, 16
    bnez    a3, 4f

    /*
     * Round to nearest XLEN-aligned address
     * greater than or equal to start address
     */
    addi    a3, t0, REGSZ-1
    andi    a3, a3, ~(REGSZ-1)
    beq     a3, t0, 2f  /* Skip if already aligned */
    /* Handle initial misalignment */
    sub     a4, a3, t0
1:
    sb      a1, 0(t0)
    addi    t0, t0, 1
    bltu    t0, a3, 1b
    sub     a2, a2, a4  /* Update count */

2:  /* Duff's device with 32 XLEN stores per iteration */
    /* Broadcast value into all bytes */
    andi    a1, a1, 0xff
    slli    a3, a1, 8
    or      a1, a3, a1
    slli    a3, a1, 16
    or      a1, a3, a1
    slli    a3, a1, 32
    or      a1, a3, a1

    /* Calculate end address */
    andi    a4, a2, ~(REGSZ-1)
    add     a3, t0, a4

    andi    a4, a4, 31*REGSZ  /* Calculate remainder */
    beqz    a4, 3f            /* Shortcut if no remainder */
    neg     a4, a4
    addi    a4, a4, 32*REGSZ  /* Calculate initial offset */

    /* Adjust start address with offset */
    sub     t0, t0, a4

    /* Jump into loop body */
    /* Assumes 32-bit instruction lengths */
    la      a5, 3f
    srli    a4, a4, 1
    add     a5, a5, a4
    jr      a5
3:
    sd      a1,        0(t0)
    sd      a1,    REGSZ(t0)
    sd      a1,  2*REGSZ(t0)
    sd      a1,  3*REGSZ(t0)
    sd      a1,  4*REGSZ(t0)
    sd      a1,  5*REGSZ(t0)
    sd      a1,  6*REGSZ(t0)
    sd      a1,  7*REGSZ(t0)
    sd      a1,  8*REGSZ(t0)
    sd      a1,  9*REGSZ(t0)
    sd      a1, 10*REGSZ(t0)
    sd      a1, 11*REGSZ(t0)
    sd      a1, 12*REGSZ(t0)
    sd      a1, 13*REGSZ(t0)
    sd      a1, 14*REGSZ(t0)
    sd      a1, 15*REGSZ(t0)
    sd      a1, 16*REGSZ(t0)
    sd      a1, 17*REGSZ(t0)
    sd      a1, 18*REGSZ(t0)
    sd      a1, 19*REGSZ(t0)
    sd      a1, 20*REGSZ(t0)
    sd      a1, 21*REGSZ(t0)
    sd      a1, 22*REGSZ(t0)
    sd      a1, 23*REGSZ(t0)
    sd      a1, 24*REGSZ(t0)
    sd      a1, 25*REGSZ(t0)
    sd      a1, 26*REGSZ(t0)
    sd      a1, 27*REGSZ(t0)
    sd      a1, 28*REGSZ(t0)
    sd      a1, 29*REGSZ(t0)
    sd      a1, 30*REGSZ(t0)
    sd      a1, 31*REGSZ(t0)
    addi    t0, t0, 32*REGSZ
    bltu    t0, a3, 3b
    andi    a2, a2, REGSZ-1  /* Update count */

4:
    /* Handle trailing misalignment */
    beqz    a2, 6f
    add     a3, t0, a2
5:
    sb      a1, 0(t0)
    addi    t0, t0, 1
    bltu    t0, a3, 5b
6:
    ret
NX_ENDFUNC(NX_MemSet)

/* void *NX_MemCopy(void *dest, const void *src, NX_Size sz) */
NX_FUNC(NX_MemCopy)
    /* Preserve return value */
    move    t6, a0

    /* Defer to byte-oriented copy for small sizes */
    sltiu   a3, a2, 128
    bnez    a3, 4f
    /* Use word-oriented copy only if low-order bits match */
    andi    a3, t6, REGSZ-1
    andi    a4, a1, REGSZ-1
    bne     a3, a4, 4f

    beqz    a3, 2f  /* Skip if already aligned */
    /*
     * Round to nearest double word-aligned address
     * greater than or equal to start address
     */
    andi    a3, a1, ~(REGSZ-1)
    addi    a3, a3, REGSZ
    /* Handle initial misalignment */
    sub     a4, a3, a1
1:
    lb      a5, 0(a1)
    addi    a1, a1, 1
    sb      a5, 0(t6)
    addi    t6, t6, 1
    bltu    a1, a3, 1b
    sub     a2, a2, a4  /* Update count */

2:
    andi    a4, a2, ~((16*REGSZ)-1)
    beqz    a4, 4f
    add     a3, a1, a4
3:
    ld      a4,       0(a1)
    ld      a5,   REGSZ(a1)
    ld      a6, 2*REGSZ(a1)
    ld      a7, 3*REGSZ(a1)
    ld      t0, 4*REGSZ(a1)
    ld      t1, 5*REGSZ(a1)
    ld      t2, 6*REGSZ(a1)
    ld      t3, 7*REGSZ(a1)
    ld      t4, 8*REGSZ(a1)
    ld      t5, 9*REGSZ(a1)
    sd      a4,       0(t6)
    sd      a5,   REGSZ(t6)
    sd      a6, 2*REGSZ(t6)
    sd      a7, 3*REGSZ(t6)
    sd      t0, 4*REGSZ(t6)
    sd      t1, 5*REGSZ(t6)
    sd      t2, 6*REGSZ(t6)
    sd      t3, 7*REGSZ(t6)
    sd      t4, 8*REGSZ(t6)
    sd      t5, 9*REGSZ(t6)
    ld      a4, 10*REGSZ(a1)
    ld      a5, 11*REGSZ(a1)
    ld      a6, 12*REGSZ(a1)
    ld      a7, 13*REGSZ(a1)
    ld      t0, 14*REGSZ(a1)
    ld      t1, 15*REGSZ(a1)
    addi    a1, a1, 16*REGSZ
    sd      a4, 10*REGSZ(t6)
    sd      a5, 11*REGSZ(t6)
    sd      a6, 12*REGSZ(t6)
    sd      a7, 13*REGSZ(t6)
    sd      t0, 14*REGSZ(t6)
    sd      t1, 15*REGSZ(t6)
    addi    t6, t6, 16*REGSZ
    bltu    a1, a3, 3b
    andi    a2, a2, (16*REGSZ)-1  /* Update count */

4:
    /* Handle trailing misalignment */
    beqz    a2, 6f
    add     a3, a1, a2

    /* Use word-oriented copy if co-aligned to word boundary */
    or      a5, a1, t6
    or      a5, a5, a3
    andi    a5, a5, 3
    bnez    a5, 5f
7:
    lw      a4, 0(a1)
    addi    a1, a1, 4
    sw      a4, 0(t6)
    addi    t6, t6, 4
    bltu    a1, a3, 7b

    ret

5:
    lb      a4, 0(a1)
    addi    a1, a1, 1
    sb      a4, 0(t6)
    addi    t6, t6, 1
    bltu    a1, a3, 5b
6:
    ret
NX_ENDFUNC(NX_MemCopy)

/* void *NX_MemMove(void *dest, const void *src, NX_Size n) */
NX_FUNC(NX_MemMove)
    /*
     * Returns
     *   a0 - dest
     *
     * Parameters
     *   a0 - Inclusive first byte of dest
     *   a1 - Inclusive first byte of src
     *   a2 - Length of copy n
     *
     * Because the return matches the parameter register a0,
     * we will not clobber or modify that register.
     *
     * Note: This currently only works on little-endian.
     * To port to big-endian, reverse the direction of shifts
     * in the 2 misaligned fixup copy loops.
     */

    /* Return if nothing to do */
    beq     a0, a1, ReturnFromMemMove
    beqz    a2, ReturnFromMemMove

    /*
     * Register Uses
     *      Forward Copy: a1 - Index counter of src
     *      Reverse Copy: a4 - Index counter of src
     *      Forward Copy: t3 - Index counter of dest
     *      Reverse Copy: t4 - Index counter of dest
     *   Both Copy Modes: t5 - Inclusive first multibyte/aligned of dest
     *   Both Copy Modes: t6 - Non-Inclusive last multibyte/aligned of dest
     *   Both Copy Modes: t0 - Link / Temporary for load-store
     *   Both Copy Modes: t1 - Temporary for load-store
     *   Both Copy Modes: t2 - Temporary for load-store
     *   Both Copy Modes: a5 - dest to src alignment offset
     *   Both Copy Modes: a6 - Shift ammount
     *   Both Copy Modes: a7 - Inverse Shift ammount
     *   Both Copy Modes: a2 - Alternate breakpoint for unrolled loops
     */

    /*
     * Solve for some register values now.
     * Byte copy does not need t5 or t6.
     */
    mv      t3, a0
    add     t4, a0, a2
    add     a4, a1, a2

    /*
     * Byte copy if copying less than (2 * REGSZ) bytes. This can
     * cause problems with the bulk copy implementation and is
     * small enough not to bother.
     */
    andi    t0, a2, -(2 * REGSZ)
    beqz    t0, ByteCopy

    /*
     * Now solve for t5 and t6.
     */
    andi    t5, t3, -REGSZ
    andi    t6, t4, -REGSZ
    /*
     * If dest(Register t3) rounded down to the nearest naturally
     * aligned REGSZ address, does not equal dest, then add REGSZ
     * to find the low-bound of REGSZ alignment in the dest memory
     * region.  Note that this could overshoot the dest memory
     * region if n is less than REGSZ.  This is one reason why
     * we always byte copy if n is less than REGSZ.
     * Otherwise, dest is already naturally aligned to REGSZ.
     */
    beq     t5, t3, 1f
    addi    t5, t5, REGSZ
1:

    /*
     * If the dest and src are co-aligned to REGSZ, then there is
     * no need for the full rigmarole of a full misaligned fixup copy.
     * Instead, do a simpler co-aligned copy.
     */
    xor     t0, a0, a1
    andi    t1, t0, (REGSZ - 1)
    beqz    t1, CoalignedCopy
    /* Fall through to misaligned fixup copy */

MisalignedFixupCopy:
    bltu    a1, a0, MisalignedFixupCopyReverse

MisalignedFixupCopyForward:
    jal     t0, ByteCopyUntilAlignedForward

    andi    a5, a1, (REGSZ - 1) /* Find the alignment offset of src (a1) */
    slli    a6, a5, 3           /* Multiply by 8 to convert that to bits to shift */
    sub     a5, a1, t3          /* Find the difference between src and dest */
    andi    a1, a1, -REGSZ      /* Align the src pointer */
    addi    a2, t6, REGSZ       /* The other breakpoint for the unrolled loop*/

    /*
     * Compute The Inverse Shift
     * a7 = XLEN - a6 = XLEN + -a6
     * 2s complement negation to find the negative: -a6 = ~a6 + 1
     * Add that to XLEN.  XLEN = REGSZ * 8.
     */
    not     a7, a6
    addi    a7, a7, (REGSZ * 8 + 1)

    /*
     * Fix Misalignment Copy Loop - Forward
     * load_val0 = load_ptr[0];
     * do {
     *  load_val1 = load_ptr[1];
     *  store_ptr += 2;
     *  store_ptr[0 - 2] = (load_val0 >> {a6}) | (load_val1 << {a7});
     *
     *  if (store_ptr == {a2})
     *      break;
     *
     *  load_val0 = load_ptr[2];
     *  load_ptr += 2;
     *  store_ptr[1 - 2] = (load_val1 >> {a6}) | (load_val0 << {a7});
     *
     * } while (store_ptr != store_ptr_end);
     * store_ptr = store_ptr_end;
     */

    ld      t0, (0 * REGSZ)(a1)
1:
    ld      t1, (1 * REGSZ)(a1)
    addi    t3, t3, (2 * REGSZ)
    srl     t0, t0, a6
    sll     t2, t1, a7
    or      t2, t0, t2
    sd      t2, ((0 * REGSZ) - (2 * REGSZ))(t3)

    beq     t3, a2, 2f

    ld      t0, (2 * REGSZ)(a1)
    addi    a1, a1, (2 * REGSZ)
    srl     t1, t1, a6
    sll     t2, t0, a7
    or      t2, t1, t2
    sd      t2, ((1 * REGSZ) - (2 * REGSZ))(t3)

    bne     t3, t6, 1b
    2:
    mv      t3, t6          /* Fix the dest pointer in case the loop was broken */

    add     a1, t3, a5      /* Restore the src pointer */
    j       ByteCopyForward /* Copy any remaining bytes */

MisalignedFixupCopyReverse:
    jal     t0, ByteCopyUntilAlignedReverse

    andi    a5, a4, (REGSZ - 1) /* Find the alignment offset of src (a4) */
    slli    a6, a5, 3           /* Multiply by 8 to convert that to bits to shift */
    sub     a5, a4, t4          /* Find the difference between src and dest */
    andi    a4, a4, -REGSZ      /* Align the src pointer */
    addi    a2, t5, -REGSZ      /* The other breakpoint for the unrolled loop*/

    /*
     * Compute The Inverse Shift
     * a7 = XLEN - a6 = XLEN + -a6
     * 2s complement negation to find the negative: -a6 = ~a6 + 1
     * Add that to XLEN.  XLEN = REGSZ * 8.
     */
    not     a7, a6
    addi    a7, a7, (REGSZ * 8 + 1)

    /*
     * Fix Misalignment Copy Loop - Reverse
     * load_val1 = load_ptr[0];
     * do {
     *  load_val0 = load_ptr[-1];
     *  store_ptr -= 2;
     *  store_ptr[1] = (load_val0 >> {a6}) | (load_val1 << {a7});
     *
     *  if (store_ptr == {a2})
     *      break;
     *
     *  load_val1 = load_ptr[-2];
     *  load_ptr -= 2;
     *  store_ptr[0] = (load_val1 >> {a6}) | (load_val0 << {a7});
     *
     * } while (store_ptr != store_ptr_end);
     * store_ptr = store_ptr_end;
     */

    ld      t1, ( 0 * REGSZ)(a4)
    1:
    ld      t0, (-1 * REGSZ)(a4)
    addi    t4, t4, (-2 * REGSZ)
    sll     t1, t1, a7
    srl     t2, t0, a6
    or      t2, t1, t2
    sd      t2, ( 1 * REGSZ)(t4)

    beq     t4, a2, 2f

    ld      t1, (-2 * REGSZ)(a4)
    addi    a4, a4, (-2 * REGSZ)
    sll     t0, t0, a7
    srl     t2, t1, a6
    or      t2, t0, t2
    sd      t2, ( 0 * REGSZ)(t4)

    bne     t4, t5, 1b
2:
    mv      t4, t5          /* Fix the dest pointer in case the loop was broken */

    add     a4, t4, a5      /* Restore the src pointer */
    j       ByteCopyReverse /* Copy any remaining bytes */

/*
 * Simple copy loops for REGSZ co-aligned memory locations.
 * These also make calls to do byte copies for any unaligned
 * data at their terminations.
 */
CoalignedCopy:
    bltu    a1, a0, CoalignedCopyReverse

CoalignedCopyForward:
    jal     t0, ByteCopyUntilAlignedForward

    1:
    ld      t1, ( 0 * REGSZ)(a1)
    addi    a1, a1, REGSZ
    addi    t3, t3, REGSZ
    sd      t1, (-1 * REGSZ)(t3)
    bne     t3, t6, 1b

    j       ByteCopyForward /* Copy any remaining bytes */

CoalignedCopyReverse:
    jal     t0, ByteCopyUntilAlignedReverse

1:
    ld      t1, (-1 * REGSZ)(a4)
    addi    a4, a4, -REGSZ
    addi    t4, t4, -REGSZ
    sd      t1, ( 0 * REGSZ)(t4)
    bne     t4, t5, 1b

    j       ByteCopyReverse /* Copy any remaining bytes */

/*
 * These are basically sub-functions within the function.  They
 * are used to byte copy until the dest pointer is in alignment.
 * At which point, a bulk copy method can be used by the
 * calling code.  These work on the same registers as the bulk
 * copy loops.  Therefore, the register values can be picked
 * up from where they were left and we avoid code duplication
 * without any overhead except the call in and return jumps.
 */
ByteCopyUntilAlignedForward:
    beq     t3, t5, 2f
1:
    lb      t1,  0(a1)
    addi    a1, a1, 1
    addi    t3, t3, 1
    sb      t1, -1(t3)
    bne     t3, t5, 1b
2:
    jalr    zero, 0x0(t0) /* Return to multibyte copy loop */

ByteCopyUntilAlignedReverse:
    beq     t4, t6, 2f
1:
    lb      t1, -1(a4)
    addi    a4, a4, -1
    addi    t4, t4, -1
    sb      t1,  0(t4)
    bne     t4, t6, 1b
2:
    jalr    zero, 0x0(t0) /* Return to multibyte copy loop */

/*
 * Simple byte copy loops.
 * These will byte copy until they reach the end of data to copy.
 * At that point, they will call to return from memmove.
 */
ByteCopy:
    bltu    a1, a0, ByteCopyReverse

ByteCopyForward:
    beq     t3, t4, 2f
1:
    lb      t1,  0(a1)
    addi    a1, a1, 1
    addi    t3, t3, 1
    sb      t1, -1(t3)
    bne     t3, t4, 1b
    2:
    ret

ByteCopyReverse:
    beq     t4, t3, 2f
1:
    lb      t1, -1(a4)
    addi    a4, a4, -1
    addi    t4, t4, -1
    sb      t1,  0(t4)
    bne     t4, t3, 1b
2:

ReturnFromMemMove:
    ret
NX_ENDFUNC(NX_MemMove)
