/*----------------------------------------------------------------------------
 * Tencent is pleased to support the open source community by making TencentOS
 * available.
 *
 * Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved.
 * If you have downloaded a copy of the TencentOS binary from Tencent, please
 * note that the TencentOS binary is licensed under the BSD 3-Clause License.
 *
 * If you have downloaded a copy of the TencentOS source code from Tencent,
 * please note that TencentOS source code is licensed under the BSD 3-Clause
 * License, except for the third-party components listed below which are
 * subject to different license terms. Your integration of TencentOS into your
 * own projects may require compliance with the BSD 3-Clause License, as well
 * as the other licenses applicable to the third-party components included
 * within TencentOS.
 *---------------------------------------------------------------------------*/

#include "tos_k.h"

__PRO__ __API__ k_err_t tos_knl_init(void)
{
    k_err_t err;

    readyqueue_init();

#if TOS_CFG_MMHEAP_EN > 0
#if TOS_CFG_MMHEAP_DEFAULT_POOL_EN > 0u
    err = mmheap_init_with_pool(k_mmheap_default_pool, TOS_CFG_MMHEAP_DEFAULT_POOL_SIZE);
#else
    err = mmheap_init();
#endif
    if (err != K_ERR_NONE) {
        return err;
    }
#endif

    err = knl_idle_init();
    if (err != K_ERR_NONE) {
        return err;
    }

#if TOS_CFG_TIMER_EN > 0
    err = soft_timer_init();
    if (err != K_ERR_NONE) {
        return err;
    }
#endif

#if TOS_CFG_PWR_MGR_EN > 0U
    pm_init();
#endif

#if TOS_CFG_TICKLESS_EN > 0u
    tickless_init();
#endif

    return K_ERR_NONE;
}

// The function may be called in ISR and task context!
__STATIC__ __PRO__ __KNL__ void knl_do_sched()
{
    uint8_t i, j;
    int8_t k;
    uint32_t msg;
    k_task_t* tl[TOS_CFG_SMP_CPU_MAX];
    uint32_t csch = 0; // cores need task sch
    memset(tl, 0, sizeof(tl));
    for (i = 0; i < TOS_CFG_SMP_CPU_MAX; i++) {
        if (i == TOS_CFG_SMP_PROCPU_INDEX) {
            continue;
        }
        while (smp_try_pop_msg(i, &msg) == K_ERR_NONE) { // try to recv all
            if (smp_get_msg_type(msg) == K_SMP_MSG_TYPE_TASKRDY) { // Task switched, implies that the core needs to be scheduled
                // tl[j++] = (k_task_t*)smp_get_msg_data(msg);
                task_state_reset_scheduled((k_task_t*)smp_get_msg_ptr(msg));
                csch |= (uint32_t)1 << i;
            } else if (smp_get_msg_type(msg) == K_SMP_MSG_TYPE_SIMPLE) {
                if (smp_get_msg_simple_type(msg) & K_SMP_MSG_SIMPLE_SCHREQ) {
                    csch |= (uint32_t)1 << i;
                }
            }
        }
    }


    if (k_prev_task[TOS_CFG_SMP_PROCPU_INDEX] != K_NULL && k_prev_task[TOS_CFG_SMP_PROCPU_INDEX] != k_curr_task[TOS_CFG_SMP_PROCPU_INDEX]) {
        task_state_reset_scheduled(k_prev_task[TOS_CFG_SMP_PROCPU_INDEX]); // reset the prev task on PROCPU
        k_prev_task[TOS_CFG_SMP_PROCPU_INDEX] = K_NULL;
    }

    k = __builtin_popcount(csch) + 1;
    // if (knl_is_idle(k_curr_task[TOS_CFG_SMP_PROCPU_INDEX])) {
    //     k += 1;
    //     j = readyqueue_highest_ready_task_get_n(tl, k);
    // } else {
    j = readyqueue_highest_ready_task_get_n(tl, TOS_CFG_SMP_CPU_MAX);
    // }

    for (i = 0, k = 0; i < TOS_CFG_SMP_CPU_MAX && k < j; i++) {
        if (i != TOS_CFG_SMP_PROCPU_INDEX && (csch & ((uint32_t)1 << i))) {
            task_state_set_scheduled(tl[k]);
            if (smp_try_push_msg(i, smp_msg_taskptr(tl[k])) != K_ERR_NONE) {
                task_state_reset_scheduled(tl[k]);
                continue;
            }
            k++;
        }
    }

    // static int bktmp = 0;

    // if (bktmp == 0x1c55) {
    //     asm("bkpt #0");
    // }

    // bktmp++;

    if (k < j) { // Task left, schedule on self
        k_next_task[TOS_CFG_SMP_PROCPU_INDEX] = tl[k];
#if TOS_CFG_SMP_EN > 0u
        task_state_set_scheduled(tl[k]);
#endif
    } else {
        // Current task could be scheduled on other cores, so we'd free it
        // k_next_task[TOS_CFG_SMP_PROCPU_INDEX] = &k_idle_task[TOS_CFG_SMP_PROCPU_INDEX];
    }
}

// The function may be called in ISR and task context!
__STATIC__ __KNL__ void knl_worker_srv()
{
    uint8_t i;
    uint32_t msg;

    i = cpu_get_index();

    if (k_prev_task[i] != K_NULL && k_prev_task[i] != k_curr_task[i]) { // A task has been run
        if (smp_try_push_msg(TOS_CFG_SMP_PROCPU_INDEX, smp_msg_taskrdy(k_prev_task[i])) != K_ERR_NONE) { // Notify the Pro CPU that the task is done and can be scheduled
            return;
        }
        k_prev_task[i] = K_NULL;
    } else {
        smp_try_push_msg(TOS_CFG_SMP_PROCPU_INDEX, smp_msg_simple(K_SMP_MSG_SIMPLE_SCHREQ)); // Request to schedule
    }

    if (k_next_task[i] == k_curr_task[i] || knl_is_idle(k_next_task[i])) { // Do not recv new task if task scheduled last time is not run
        while (smp_try_pop_msg(TOS_CFG_SMP_PROCPU_INDEX, &msg) == K_ERR_NONE) {
            if (smp_get_msg_type(msg) == K_SMP_MSG_TYPE_TASKSCH) {
                k_next_task[i] = (k_task_t*)smp_get_msg_ptr(msg);
                break; // Only one task to be run at a time, keep other msg in the fifo since we cannot peek
            }

            if (smp_get_msg_type(msg) == K_SMP_MSG_TYPE_SIMPLE) {
                if (smp_get_msg_simple_type(msg) & K_SMP_MSG_SIMPLE_FORCEIDLE) {
                    k_next_task[i] = &k_idle_task[i];
                    break;
                }
            }
        }
    }
}

__API__ void tos_knl_irq_enter(void)
{
    uint8_t i;

    if (!tos_knl_is_running()) {
        return;
    }

    i = cpu_get_index();

    if (unlikely(k_irq_nest_cnt[i] >= K_NESTING_LIMIT_IRQ)) {
        return;
    }

    ++k_irq_nest_cnt[i];
}

__API__ void tos_knl_irq_leave(void)
{
    uint8_t i, j = 0;
    TOS_CPU_CPSR_ALLOC();

    if (!tos_knl_is_running()) {
        return;
    }

    i = cpu_get_index();

    TOS_CPU_INT_DISABLE();
    if (!knl_is_inirq()) {
        TOS_CPU_INT_ENABLE();
        return;
    }

    --k_irq_nest_cnt[i];

    if (knl_is_inirq()) { // Still in irq
        TOS_CPU_INT_ENABLE();
        return;
    }

    if (knl_is_sched_locked()) {
        TOS_CPU_INT_ENABLE();
        return;
    }

    // check is current task deleted or suspended
    if (task_state_is_deleted(k_curr_task[i]) || task_state_is_suspended(k_curr_task[i]) || task_state_is_sleeping(k_curr_task[i]) || task_state_is_pending(k_curr_task[i])) {
        if (k_next_task[i] == k_curr_task[i]) {
            k_next_task[i] = &k_idle_task[i]; // Force idle in the next tick
        }
    }

    if (i == TOS_CFG_SMP_PROCPU_INDEX) { // On Pro CPU, do sched
        tos_knl_sched_lock();
        knl_do_sched();
        tos_knl_sched_unlock();
    } else { // On App CPU, recv task and run
        knl_worker_srv();
    }

    if (knl_is_self(k_next_task[i])) {
        TOS_CPU_INT_ENABLE();
        return; // No need to switch context
    }

    cpu_irq_context_switch();
    TOS_CPU_INT_ENABLE();
}

__API__ k_err_t tos_knl_sched_lock(void)
{
    // #if TOS_CFG_SMP_EN > 0u
    //     return smp_global_lock(TOS_SMP_GLOCK_SCHED);
    // #else
    TOS_CPU_CPSR_ALLOC();

    // TOS_IN_IRQ_CHECK();

    if (!tos_knl_is_running()) {
        return K_ERR_KNL_NOT_RUNNING;
    }

    if (k_sched_lock_nest_cnt >= K_NESTING_LIMIT_SCHED_LOCK) {
        return K_ERR_LOCK_NESTING_OVERFLOW;
    }

    while (1) {
        TOS_CPU_INT_DISABLE();
        smp_global_lock(TOS_SMP_GLOCK_SCHED);
        if (k_sched_lock_core == cpu_get_index() || k_sched_lock_core == 0xff)
            break;
        smp_global_unlock(TOS_SMP_GLOCK_SCHED);
        TOS_CPU_INT_ENABLE();
    }
    k_sched_lock_core = cpu_get_index();
    ++k_sched_lock_nest_cnt;
    smp_global_unlock(TOS_SMP_GLOCK_SCHED);
    TOS_CPU_INT_ENABLE();
    return K_ERR_NONE;
    // #endif
}

__API__ k_err_t tos_knl_sched_unlock(void)
{
    // #if TOS_CFG_SMP_EN > 0u
    //     return smp_global_unlock(TOS_SMP_GLOCK_SCHED);
    // #else
    TOS_CPU_CPSR_ALLOC();

    // TOS_IN_IRQ_CHECK();

    if (!tos_knl_is_running()) {
        return K_ERR_KNL_NOT_RUNNING;
    }

    if (!knl_is_sched_locked()) {
        return K_ERR_SCHED_NOT_LOCKED;
    }

    TOS_CPU_INT_DISABLE();
    smp_global_lock(TOS_SMP_GLOCK_SCHED);
    if (k_sched_lock_core != cpu_get_index()) {
        smp_global_unlock(TOS_SMP_GLOCK_SCHED);
        TOS_CPU_INT_ENABLE();
        return K_ERR_SCHED_LOCKED_BY_OTHER; // You can't unlock the sched for other core! This should not be happening but just in case...
    }

    --k_sched_lock_nest_cnt;
    if (k_sched_lock_nest_cnt == 0) {
        k_sched_lock_core = 0xff;
    }
    smp_global_unlock(TOS_SMP_GLOCK_SCHED);
    TOS_CPU_INT_ENABLE();

    // knl_sched();
    return K_ERR_NONE;
    // #endif
}

__TRS__ __API__ k_err_t tos_knl_start(void)
{
    uint8_t i;
    if (cpu_get_index() != TOS_CFG_SMP_PROCPU_INDEX) { // Worker core
        cpu_init();

        while (k_knl_state != KNL_STATE_RUNNING) {
            // Wait for the kernel to start
        }

        cpu_sched_start();
        return K_ERR_NONE;
    }

    if (unlikely(tos_knl_is_running())) {
        return K_ERR_KNL_RUNNING;
    }

    // k_next_task = readyqueue_highest_ready_task_get();
    // k_curr_task = k_next_task;
    // knl_do_sched(); // Tasks are sent to other cores, but they'd need to init

    // Run idle task first
    for (i = 0; i < TOS_CFG_SMP_CPU_MAX; i++) {
        k_next_task[i] = &k_idle_task[i];
        k_curr_task[i] = k_next_task[i];
        k_prev_task[i] = k_curr_task[i];
    }

    cpu_init();

    k_knl_state = KNL_STATE_RUNNING;

    cpu_sched_start();

    return K_ERR_NONE;
}

__API__ int tos_knl_is_running(void)
{
    return k_knl_state == KNL_STATE_RUNNING;
}

#if TOS_CFG_TICKLESS_EN > 0u

/**
 * @brief Get the remain ticks of the first oncoming task.
 *
 * @return The remian ticks of the first oncoming task to be scheduled.
 */
__KNL__ k_tick_t knl_next_expires_get(void)
{
    k_tick_t tick_next_expires;
#if TOS_CFG_TIMER_EN > 0u
    k_tick_t timer_next_expires;
#endif

    tick_next_expires = tick_next_expires_get();

#if TOS_CFG_TIMER_EN > 0u
    timer_next_expires = soft_timer_next_expires_get();
#endif

#if TOS_CFG_TIMER_EN > 0u
    return tick_next_expires < timer_next_expires ? tick_next_expires : timer_next_expires;
#else
    return tick_next_expires;
#endif
}

#endif

// Non ISR context only
__KNL__ void knl_sched(void)
{
    TOS_CPU_CPSR_ALLOC();

    if (unlikely(!tos_knl_is_running())) {
        return;
    }

    if (knl_is_inirq()) {
        return;
    }
    // if (knl_is_sched_locked()) {
    //     return;
    // }

    TOS_CPU_INT_DISABLE();
    // k_next_task = readyqueue_highest_ready_task_get();
    // if (knl_is_self(k_next_task)) {
    //     TOS_CPU_INT_ENABLE();
    //     return;
    // }

    TOS_ON_PROCPU
    {
        tos_knl_sched_lock();
        knl_do_sched();
        tos_knl_sched_unlock();
    }
    else
    {
        knl_worker_srv();
    }

    // We call this function for a task to activately yield the CPU,
    // so no matter waht is on the CPU, we should switch it off to
    // satisfy the task's request, even if the CPU will be in idle.

    if (knl_is_self(k_next_task[cpu_get_index()])) {
        k_next_task[cpu_get_index()] = &k_idle_task[cpu_get_index()]; // Force IDLE!
    }

    cpu_context_switch();
    TOS_CPU_INT_ENABLE();
}

__KNL__ int knl_is_sched_locked(void)
{
    // #if TOS_CFG_SMP_EN > 0u
    //     return smp_global_is_locked(TOS_SMP_GLOCK_SCHED);
    // #else
    return k_sched_lock_nest_cnt > 0u;
    // #endif
}

__TRS__ __KNL__ int knl_is_inirq(void)
{
    return k_irq_nest_cnt[cpu_get_index()] > 0u;
}

__TRS__ __KNL__ int knl_is_idle(k_task_t* task)
{
    // return task == &k_idle_task[cpu_get_index()];
    // return task->prio == K_TASK_PRIO_IDLE;
    return task >= k_idle_task && task < k_idle_task + TOS_CFG_SMP_CPU_MAX;
}

__TRS__ __KNL__ int knl_is_self(k_task_t* task)
{
    return task == k_curr_task[cpu_get_index()];
}

__STATIC__ void knl_idle_entry(void* arg)
{
    arg = arg; // make compiler happy

    while (K_TRUE) {
#if TOS_CFG_OBJ_DYNAMIC_CREATE_EN > 0u
        TOS_ON_PROCPU
        {
            task_free_all();
        }
#endif

#if TOS_CFG_PWR_MGR_EN > 0u
        pm_power_manager();
#endif
    }
}

__PRO__ __KNL__ k_err_t knl_idle_init(void)
{
    k_err_t e;
    uint8_t i;
    for (i = 0; i < TOS_CFG_SMP_CPU_MAX; i++) {
        e = tos_task_create(&k_idle_task[i], "idle",
            knl_idle_entry, K_NULL,
            K_TASK_PRIO_IDLE,
            k_idle_task_stk[i],
            TOS_CFG_IDLE_TASK_STK_SIZE,
            0);
        if (e != K_ERR_NONE) {
            return e;
        }
    }

    return K_ERR_NONE;
}
