#include <seminix/linkage.h>
#include <seminix/tcb.h>
#include <seminix/smp.h>
#include <seminix/signal.h>
#include <seminix/param.h>
#include <asm/ptrace.h>
#include <asm/esr.h>
#include <asm/fpsimd.h>
#include <asm/hwcap.h>
#include <asm/traps.h>

#define FPEXC_IOF	(1 << 0)
#define FPEXC_DZF	(1 << 1)
#define FPEXC_OFF	(1 << 2)
#define FPEXC_UFF	(1 << 3)
#define FPEXC_IXF	(1 << 4)
#define FPEXC_IDF	(1 << 7)

/*
 * (Note: in this discussion, statements about FPSIMD apply equally to SVE.)
 *
 * In order to reduce the number of times the FPSIMD state is needlessly saved
 * and restored, we need to keep track of two things:
 * (a) for each task, we need to remember which CPU was the last one to have
 *     the task's FPSIMD state loaded into its FPSIMD registers;
 * (b) for each CPU, we need to remember which task's userland FPSIMD state has
 *     been loaded into its FPSIMD registers most recently, or whether it has
 *     been used to perform kernel mode NEON in the meantime.
 *
 * For (a), we add a fpsimd_cpu field to thread_struct, which gets updated to
 * the id of the current CPU every time the state is loaded onto a CPU. For (b),
 * we add the per-cpu variable 'fpsimd_last_state' (below), which contains the
 * address of the userland FPSIMD state of the task that was loaded onto the CPU
 * the most recently, or NULL if kernel mode NEON has been performed after that.
 *
 * With this in place, we no longer have to restore the next FPSIMD state right
 * when switching between tasks. Instead, we can defer this check to userland
 * resume, at which time we verify whether the CPU's fpsimd_last_state and the
 * task's fpsimd_cpu are still mutually in sync. If this is the case, we
 * can omit the FPSIMD restore.
 *
 * As an optimization, we use the thread_info flag TIF_FOREIGN_FPSTATE to
 * indicate whether or not the userland FPSIMD state of the current task is
 * present in the registers. The flag is set unless the FPSIMD registers of this
 * CPU currently contain the most recent userland FPSIMD state of the current
 * task.
 *
 * In order to allow softirq handlers to use FPSIMD, kernel_neon_begin() may
 * save the task's FPSIMD context back to tcb from softirq context.
 * To prevent this from racing with the manipulation of the task's FPSIMD state
 * from task context and thereby corrupting the state, it is necessary to
 * protect any manipulation of a task's fpsimd_state or TIF_FOREIGN_FPSTATE
 * flag with local_bh_disable() unless softirqs are already masked.
 *
 * For a certain task, the sequence may look something like this:
 * - the task gets scheduled in; if both the task's fpsimd_cpu field
 *   contains the id of the current CPU, and the CPU's fpsimd_last_state per-cpu
 *   variable points to the task's fpsimd_state, the TIF_FOREIGN_FPSTATE flag is
 *   cleared, otherwise it is set;
 *
 * - the task returns to userland; if TIF_FOREIGN_FPSTATE is set, the task's
 *   userland FPSIMD state is copied from memory to the registers, the task's
 *   fpsimd_cpu field is set to the id of the current CPU, the current
 *   CPU's fpsimd_last_state pointer is set to this task's fpsimd_state and the
 *   TIF_FOREIGN_FPSTATE flag is cleared;
 *
 * - the task executes an ordinary syscall; upon return to userland, the
 *   TIF_FOREIGN_FPSTATE flag will still be cleared, so no FPSIMD state is
 *   restored;
 *
 * - the task executes a syscall which executes some NEON instructions; this is
 *   preceded by a call to kernel_neon_begin(), which copies the task's FPSIMD
 *   register contents to memory, clears the fpsimd_last_state per-cpu variable
 *   and sets the TIF_FOREIGN_FPSTATE flag;
 *
 * - the task gets preempted after kernel_neon_end() is called; as we have not
 *   returned from the 2nd syscall yet, TIF_FOREIGN_FPSTATE is still set so
 *   whatever is in the FPSIMD registers is not saved to memory, but discarded.
 */
struct fpsimd_last_state_struct {
    struct user_fpsimd_state *st;
};

static DEFINE_PER_CPU(struct fpsimd_last_state_struct, fpsimd_last_state);

/* Default VL for tasks that don't set it explicitly: */
static int sve_default_vl = -1;

/* Dummy declaration for code that will be optimised out: */
extern __ro_after_init DECLARE_BITMAP(sve_vq_map, SVE_VQ_MAX);

/*
 * Call __sve_free() directly only if you know task can't be scheduled
 * or preempted.
 */
static void __sve_free(struct tcb *task)
{
    kfree(task->thread.sve_state);
    task->thread.sve_state = NULL;
}

static void sve_free(struct tcb *task)
{
    WARN_ON(test_tsk_thread_flag(task, TIF_SVE));

    __sve_free(task);
}

/*
 * TIF_SVE controls whether a task can use SVE without trapping while
 * in userspace, and also the way a task's FPSIMD/SVE state is stored
 * in thread_struct.
 *
 * The kernel uses this flag to track whether a user task is actively
 * using SVE, and therefore whether full SVE register state needs to
 * be tracked.  If not, the cheaper FPSIMD context handling code can
 * be used instead of the more costly SVE equivalents.
 *
 *  * TIF_SVE set:
 *
 *    The task can execute SVE instructions while in userspace without
 *    trapping to the kernel.
 *
 *    When stored, Z0-Z31 (incorporating Vn in bits[127:0] or the
 *    corresponding Zn), P0-P15 and FFR are encoded in in
 *    task->thread.sve_state, formatted appropriately for vector
 *    length task->thread.sve_vl.
 *
 *    task->thread.sve_state must point to a valid buffer at least
 *    sve_state_size(task) bytes in size.
 *
 *    During any syscall, the kernel may optionally clear TIF_SVE and
 *    discard the vector state except for the FPSIMD subset.
 *
 *  * TIF_SVE clear:
 *
 *    An attempt by the user task to execute an SVE instruction causes
 *    do_sve_acc() to be called, which does some preparation and then
 *    sets TIF_SVE.
 *
 *    When stored, FPSIMD registers V0-V31 are encoded in
 *    task->thread.uw.fpsimd_state; bits [max : 128] for each of Z0-Z31 are
 *    logically zero but not stored anywhere; P0-P15 and FFR are not
 *    stored and have unspecified values from userspace's point of
 *    view.  For hygiene purposes, the kernel zeroes them on next use,
 *    but userspace is discouraged from relying on this.
 *
 *    task->thread.sve_state does not need to be non-NULL, valid or any
 *    particular size: it must not be dereferenced.
 *
 *  * FPSR and FPCR are always stored in task->thread.uw.fpsimd_state
 *    irrespective of whether TIF_SVE is clear or set, since these are
 *    not vector length dependent.
 */

/*
 * Update current's FPSIMD/SVE registers from thread_struct.
 *
 * This function should be called only when the FPSIMD/SVE state in
 * thread_struct is known to be up to date, when preparing to enter
 * userspace.
 *
 * Softirqs (and preemption) must be disabled.
 */
static void task_fpsimd_load(void)
{
    WARN_ON(!irqs_disabled());

    if (system_supports_sve() && test_thread_flag(TIF_SVE))
        sve_load_state(sve_pffr(&current->thread),
                   &current->thread.uw.fpsimd_state.fpsr,
                   sve_vq_from_vl(current->thread.sve_vl) - 1);
    else
        fpsimd_load_state(&current->thread.uw.fpsimd_state);
}

/*
 * Ensure FPSIMD/SVE storage in memory for the loaded context is up to
 * date with respect to the CPU registers.
 *
 * Softirqs (and preemption) must be disabled.
 */
void fpsimd_save(void)
{
    struct user_fpsimd_state *st = __this_cpu_read(fpsimd_last_state.st);
    /* set by fpsimd_bind_task_to_cpu() or fpsimd_bind_state_to_cpu() */

    WARN_ON(!irqs_disabled());

    if (!test_thread_flag(TIF_FOREIGN_FPSTATE)) {
        if (system_supports_sve() && test_thread_flag(TIF_SVE)) {
            if (WARN_ON(sve_get_vl() != current->thread.sve_vl)) {
                /*
                 * Can't save the user regs, so current would
                 * re-enter user with corrupt state.
                 * There's no way to recover, so kill it:
                 */
                force_signal_inject(SIGKILL, SI_KERNEL, 0);
                return;
            }

            sve_save_state(sve_pffr(&current->thread), &st->fpsr);
        } else
            fpsimd_save_state(st);
    }
}

/*
 * Helpers to translate bit indices in sve_vq_map to VQ values (and
 * vice versa).  This allows find_next_bit() to be used to find the
 * _maximum_ VQ not exceeding a certain value.
 */

static unsigned int vq_to_bit(unsigned int vq)
{
    return SVE_VQ_MAX - vq;
}

static unsigned int bit_to_vq(unsigned int bit)
{
    if (WARN_ON(bit >= SVE_VQ_MAX))
        bit = SVE_VQ_MAX - 1;

    return SVE_VQ_MAX - bit;
}

/*
 * All vector length selection from userspace comes through here.
 * We're on a slow path, so some sanity-checks are included.
 * If things go wrong there's a bug somewhere, but try to fall back to a
 * safe choice.
 */
static unsigned int find_supported_vector_length(unsigned int vl)
{
    int bit;
    int max_vl = sve_max_vl;

    if (WARN_ON(!sve_vl_valid(vl)))
        vl = SVE_VL_MIN;

    if (WARN_ON(!sve_vl_valid(max_vl)))
        max_vl = SVE_VL_MIN;

    if (vl > (unsigned int)max_vl)
        vl = max_vl;

    bit = find_next_bit(sve_vq_map, SVE_VQ_MAX,
                vq_to_bit(sve_vq_from_vl(vl)));
    return sve_vl_from_vq(bit_to_vq(bit));
}

static int __init sve_sysctl_init(void) { return 0; }

#define ZREG(sve_state, vq, n) ((char *)(sve_state) +		\
    (SVE_SIG_ZREG_OFFSET(vq, n) - SVE_SIG_REGS_OFFSET))

/*
 * Transfer the FPSIMD state in task->thread.uw.fpsimd_state to
 * task->thread.sve_state.
 *
 * Task can be a non-runnable task, or current.  In the latter case,
 * softirqs (and preemption) must be disabled.
 * task->thread.sve_state must point to at least sve_state_size(task)
 * bytes of allocated kernel memory.
 * task->thread.uw.fpsimd_state must be up to date before calling this
 * function.
 */
static void fpsimd_to_sve(struct tcb *task)
{
    unsigned int vq;
    void *sst = task->thread.sve_state;
    struct user_fpsimd_state const *fst = &task->thread.uw.fpsimd_state;
    unsigned int i;

    if (!system_supports_sve())
        return;

    vq = sve_vq_from_vl(task->thread.sve_vl);
    for (i = 0; i < 32; ++i)
        memcpy(ZREG(sst, vq, i), &fst->vregs[i],
               sizeof(fst->vregs[i]));
}

/*
 * Transfer the SVE state in task->thread.sve_state to
 * task->thread.uw.fpsimd_state.
 *
 * Task can be a non-runnable task, or current.  In the latter case,
 * softirqs (and preemption) must be disabled.
 * task->thread.sve_state must point to at least sve_state_size(task)
 * bytes of allocated kernel memory.
 * task->thread.sve_state must be up to date before calling this function.
 */
static void sve_to_fpsimd(struct tcb *task)
{
    unsigned int vq;
    void const *sst = task->thread.sve_state;
    struct user_fpsimd_state *fst = &task->thread.uw.fpsimd_state;
    unsigned int i;

    if (!system_supports_sve())
        return;

    vq = sve_vq_from_vl(task->thread.sve_vl);
    for (i = 0; i < 32; ++i)
        memcpy(&fst->vregs[i], ZREG(sst, vq, i),
               sizeof(fst->vregs[i]));
}

/*
 * Trapped SVE access
 *
 * Storage is allocated for the full SVE state, the current FPSIMD
 * register contents are migrated across, and TIF_SVE is set so that
 * the SVE access trap will be disabled the next time this task
 * reaches ret_to_user.
 *
 * TIF_SVE should be clear on entry: otherwise, task_fpsimd_load()
 * would have disabled the SVE access trap for userspace during
 * ret_to_user, making an SVE access trap impossible in that case.
 */
asmlinkage void do_sve_acc(unsigned int esr, struct pt_regs *regs)
{
    /* Even if we chose not to use SVE, the hardware could still trap: */
    if (unlikely(!system_supports_sve())) {
        force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc);
        return;
    }

    sve_alloc(current);

    fpsimd_save();
    fpsimd_to_sve(current);

    /* Force ret_to_user to reload the registers: */
    fpsimd_flush_task_state(current);
    set_thread_flag(TIF_FOREIGN_FPSTATE);

    if (test_and_set_thread_flag(TIF_SVE))
        WARN_ON(1); /* SVE access shouldn't have trapped */
}

/*
 * Trapped FP/ASIMD access.
 */
asmlinkage void do_fpsimd_acc(unsigned int esr, struct pt_regs *regs)
{
    /* TODO: implement lazy context saving/restoring */
    WARN_ON(1);
}

/*
 * Raise a SIGFPE for the current process.
 */
asmlinkage void do_fpsimd_exc(unsigned int esr, struct pt_regs *regs)
{
    unsigned int si_code = FPE_FLTUNK;

    if (esr & ESR_ELx_FP_EXC_TFV) {
        if (esr & FPEXC_IOF)
            si_code = FPE_FLTINV;
        else if (esr & FPEXC_DZF)
            si_code = FPE_FLTDIV;
        else if (esr & FPEXC_OFF)
            si_code = FPE_FLTOVF;
        else if (esr & FPEXC_UFF)
            si_code = FPE_FLTUND;
        else if (esr & FPEXC_IXF)
            si_code = FPE_FLTRES;
    }

    send_sig_fault(SIGFPE, si_code,
               (void __user *)instruction_pointer(regs),
               current);
}

void fpsimd_thread_switch(struct tcb *next)
{
    bool wrong_task, wrong_cpu;

    if (!system_supports_fpsimd())
        return;

    /* Save unsaved fpsimd state, if any: */
    fpsimd_save();

    /*
     * Fix up TIF_FOREIGN_FPSTATE to correctly describe next's
     * state.  For kernel threads, FPSIMD registers are never loaded
     * and wrong_task and wrong_cpu will always be true.
     */
    wrong_task = __this_cpu_read(fpsimd_last_state.st) !=
                    &next->thread.uw.fpsimd_state;
    wrong_cpu = next->thread.fpsimd_cpu != (unsigned int)smp_processor_id();

    update_tsk_thread_flag(next, TIF_FOREIGN_FPSTATE,
                   wrong_task || wrong_cpu);
}

void fpsimd_flush_thread(void)
{
    int vl, supported_vl;

    if (!system_supports_fpsimd())
        return;

    memset(&current->thread.uw.fpsimd_state, 0,
           sizeof(current->thread.uw.fpsimd_state));
    fpsimd_flush_task_state(current);

    if (system_supports_sve()) {
        clear_thread_flag(TIF_SVE);
        sve_free(current);

        /*
         * Reset the task vector length as required.
         * This is where we ensure that all user tasks have a valid
         * vector length configured: no kernel task can become a user
         * task without an exec and hence a call to this function.
         * By the time the first call to this function is made, all
         * early hardware probing is complete, so sve_default_vl
         * should be valid.
         * If a bug causes this to go wrong, we make some noise and
         * try to fudge thread.sve_vl to a safe value here.
         */
        vl = current->thread.sve_vl_onexec ?
            (int)current->thread.sve_vl_onexec : sve_default_vl;

        if (WARN_ON(!sve_vl_valid(vl)))
            vl = SVE_VL_MIN;

        supported_vl = find_supported_vector_length(vl);
        if (WARN_ON(supported_vl != vl))
            vl = supported_vl;

        current->thread.sve_vl = vl;

        /*
         * If the task is not set to inherit, ensure that the vector
         * length will be reset by a subsequent exec:
         */
        if (!test_thread_flag(TIF_SVE_VL_INHERIT))
            current->thread.sve_vl_onexec = 0;
    }

    set_thread_flag(TIF_FOREIGN_FPSTATE);
}

/*
 * Save the userland FPSIMD state of 'current' to memory, but only if the state
 * currently held in the registers does in fact belong to 'current'
 */
void fpsimd_preserve_current_state(void)
{
    if (!system_supports_fpsimd())
        return;

    fpsimd_save();
}

/*
 * Like fpsimd_preserve_current_state(), but ensure that
 * current->thread.uw.fpsimd_state is updated so that it can be copied to
 * the signal frame.
 */
void fpsimd_signal_preserve_current_state(void)
{
    fpsimd_preserve_current_state();
    if (system_supports_sve() && test_thread_flag(TIF_SVE))
        sve_to_fpsimd(current);
}

/*
 * Associate current's FPSIMD context with this cpu
 * Preemption must be disabled when calling this function.
 */
void fpsimd_bind_task_to_cpu(void)
{
    struct fpsimd_last_state_struct *last =
        this_cpu_ptr(&fpsimd_last_state);

    last->st = &current->thread.uw.fpsimd_state;
    current->thread.fpsimd_cpu = smp_processor_id();

    if (system_supports_sve()) {
        /* Toggle SVE trapping for userspace if needed */
        if (test_thread_flag(TIF_SVE))
            sve_user_enable();
        else
            sve_user_disable();

        /* Serialised by exception return to user */
    }
}

void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *st)
{
    struct fpsimd_last_state_struct *last =
        this_cpu_ptr(&fpsimd_last_state);

    WARN_ON(!irqs_disabled());

    last->st = st;
}

/*
 * Load the userland FPSIMD state of 'current' from memory, but only if the
 * FPSIMD state already held in the registers is /not/ the most recent FPSIMD
 * state of 'current'
 */
void fpsimd_restore_current_state(void)
{
    if (!system_supports_fpsimd())
        return;

    local_irq_disable();
    if (test_and_clear_thread_flag(TIF_FOREIGN_FPSTATE)) {
        task_fpsimd_load();
        fpsimd_bind_task_to_cpu();
    }
    local_irq_enable();
}

/*
 * Load an updated userland FPSIMD state for 'current' from memory and set the
 * flag that indicates that the FPSIMD register contents are the most recent
 * FPSIMD state of 'current'
 */
void fpsimd_update_current_state(struct user_fpsimd_state const *state)
{
    if (!system_supports_fpsimd())
        return;

    current->thread.uw.fpsimd_state = *state;
    if (system_supports_sve() && test_thread_flag(TIF_SVE))
        fpsimd_to_sve(current);

    task_fpsimd_load();
    fpsimd_bind_task_to_cpu();

    clear_thread_flag(TIF_FOREIGN_FPSTATE);
}

/*
 * Invalidate live CPU copies of task t's FPSIMD state
 */
void fpsimd_flush_task_state(struct tcb *t)
{
    t->thread.fpsimd_cpu = CONFIG_NR_CPUS;
}

void fpsimd_flush_cpu_state(void)
{
    __this_cpu_write(fpsimd_last_state.st, NULL);
    set_thread_flag(TIF_FOREIGN_FPSTATE);
}

/*
 * FP/SIMD support code initialisation.
 */
static int __init fpsimd_init(void)
{
    if (!(elf_hwcap & HWCAP_FP))
        pr_notice("Floating-point is not implemented\n");

    if (!(elf_hwcap & HWCAP_ASIMD))
        pr_notice("Advanced SIMD is not implemented\n");

    return sve_sysctl_init();
}
core_initcall(fpsimd_init);
