/*
 * Copyright (c) 2024 iSOFT INFRASTRUCTURE SOFTWARE CO., LTD.
 * easyAda is licensed under Mulan PubL v2.
 * You can use this software according to the terms and conditions of the Mulan PubL v2.
 * You may obtain a copy of Mulan PubL v2 at:
 *          http://license.coscl.org.cn/MulanPubL-2.0
 * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
 * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
 * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
 * See the Mulan PubL v2 for more details.
 */

#include <arch/utils.h>
#include <stdlib/string.h>
#include <tools/list.h>
#include <core/schedule.h>
#include <core/domain.h>
#include <core/spinlock.h>
#include <core/timer.h>

schedule_data_t     sd[CONFIG_SMP_CORES];
static unsigned int get_readyqueue_index(tcb_t *tcb)
{
    int dom  = task_get_domain(tcb);
    int prio = task_get_priority(tcb);

    return dom * CONFIG_MAX_PRIORITY + prio;
}

static unsigned int bitmap_first_1(const schedule_data_t *sd, dom_t dom)
{
    unsigned int l1index;
    unsigned int l2index;
    assert(dom < domScheduleLength);

    l1index = clz(sd->lvl1bit[dom]);
    l1index += ((CONFIG_MAX_PRIORITY + ULONG_BITS - 1) / ULONG_BITS) * dom;
    l2index = clz(sd->lvl2bit[l1index]);
    l1index -= ((CONFIG_MAX_PRIORITY + ULONG_BITS - 1) / ULONG_BITS) * dom;

    return l1index * ULONG_BITS + l2index;
}

static inline void bitmap_clean(schedule_data_t *sd, dom_t dom, unsigned long prio)
{
    unsigned int l1index;
    unsigned int l2index;

    assert(dom < domScheduleLength);
    assert(prio < sizeof(sd->lvl2bit[0]) * 8);

    l1index = prio / ULONG_BITS + ((CONFIG_MAX_PRIORITY + ULONG_BITS - 1) / ULONG_BITS) * dom;
    l2index = prio % ULONG_BITS;

    sd->lvl2bit[l1index] &= ~(ULONG_LEFT_MOST_BIT >> l2index);

    if (sd->lvl2bit[l1index] == 0) {
        l1index -= ((CONFIG_MAX_PRIORITY + ULONG_BITS - 1) / ULONG_BITS) * dom;
        sd->lvl1bit[dom] &= ~(ULONG_LEFT_MOST_BIT >> l1index);
    }
}

static inline void bitmap_init(schedule_data_t *sd)
{
    int i = 0;
    while (i < CONFIG_DOM_NUMBER) {
        sd->lvl1bit[i] = 0;
        i++;
    }
    (void)memset(sd->lvl2bit, 0, sizeof(sd->lvl2bit));
}

static void readyqueue_deque(schedule_data_t *sd, tcb_t *tcb)
{
    list_remove(&tcb->runlist);
    int index = get_readyqueue_index(tcb);
    if (list_is_empty(&sd->readyqueue[index])) {
        bitmap_clean(sd, tcb->domain, tcb->priority);
    }
}

static void readyqueue_append(schedule_data_t *sd, tcb_t *tcb)
{
    assert(tcb->state == TASK_READY);

    int index = get_readyqueue_index(tcb);
    if (list_is_empty(&sd->readyqueue[index])) {
        bitmap_set(sd, tcb->domain, tcb->priority);
    }
    list_append(&sd->readyqueue[index], &tcb->runlist);
}

static tcb_t *readyqueue_next(schedule_data_t *sd, dom_t dom)
{
    int    first = bitmap_first_1(sd, dom);
    int    index;
    tcb_t *next;

    assert(first < CONFIG_MAX_PRIORITY);

    index = CONFIG_MAX_PRIORITY * dom + first;
    next  = container_of(sd->readyqueue[index].next, tcb_t, runlist);

    readyqueue_deque(sd, next);

    return next;
}

static inline void pendqueue_deque(schedule_data_t *sd, tcb_t *task)
{
    (void)sd;
    list_remove(&task->runlist);
}

static void pendqueue_enque(schedule_data_t *sd, tcb_t *tcb)
{
    tcb_t  *cur;
    list_t *pos = sd->pendqueue.prev;
    list_foreach(cur, &sd->pendqueue, tcb_t, runlist)
    {
        if (cur->timeslice.readytime.time > tcb->timeslice.readytime.time) {
            pos = cur->runlist.prev;
            break;
        }
    }
    list_insert(pos, &tcb->runlist);
}

void schedule_wakeup(unsigned int currenttime)
{
    (void)currenttime;
    tcb_t           *cur;
    schedule_data_t *sd = current_schedule_data();

    while (!list_is_empty(&sd->pendqueue)) {
        cur = container_of(sd->pendqueue.next, tcb_t, runlist);
#ifdef CONFIG_KERNEL_MCS
        if (refill_head(cur->tcbSchedContext)->rTime <= mcs_timer_current()) {
            if (cur->domain == curDomainId && task_get_priority(cur) <= task_get_priority(current())) {
                sd->action = ACTION_CHOOSE_NEW;
            }
            list_remove(&cur->runlist);
            cur->state = TASK_READY;
            readyqueue_append(sd, cur);
        } else {
            break;
        }

#else

        if (timer_expired(cur->timeslice.readytime, cur->affinity)) {
            if (cur->domain == curDomainId && task_get_priority(cur) <= task_get_priority(current())) {
                sd->action = ACTION_CHOOSE_NEW;
            }
            list_remove(&cur->runlist);
            cur->state = TASK_READY;
            readyqueue_append(sd, cur);
        } else {
            break;
        }
#endif
    }
}
void schedule_readyque_wakeup(unsigned int currenttime, tcb_t *task)
{
    (void)currenttime;
    tcb_t           *cur;
    int              i;
    schedule_data_t *schedule_data = current_schedule_data();
    for (i = curDomainId * CONFIG_MAX_PRIORITY; i < task->priority + curDomainId * CONFIG_MAX_PRIORITY; i++) {
        while (!list_is_empty(schedule_data->readyqueue[i].next)) {
            cur = container_of(schedule_data->readyqueue[i].next, tcb_t, runlist);
            if (timer_expired(cur->timeslice.readytime, cur->affinity)) {
                if (cur->priority < schedule_data->pcurrent->priority) {
                    list_remove(&cur->runlist);
                }
            }
        }
    }
}

void task_enque(schedule_data_t *sd, tcb_t *task)
{
#ifdef CONFIG_KERNEL_MCS
    if (task->critical_level >= sd->sys_critical_level) {
        criticalqueue_enque(sd, task);
        task_set_priority(task, task->backup_priority);
    }
#endif
    switch (task->state) {
        case TASK_READY:
            if (!timer_expired(task->timeslice.readytime, task->affinity)) {
                task->timeslice.readytime = timer_current();
            }

            readyqueue_append(sd, task);
            break;

        case TASK_SLEEP:
#ifdef CONFIG_KERNEL_MCS
            mcs_pendqueue_enque(sd, task);
#else
            if (timer_expired(task->timeslice.readytime, task->affinity)) {
                dprintf("current time: %u, sleep task's readytime: %u.\n", timer_current().time,
                        task->timeslice.readytime);
                assert(0);
            }
            pendqueue_enque(sd, task);
#endif
#ifdef SC_DEBUG
            dprintf("%s pendqueue_enque!\n", task->name);
#endif

            break;
        case TASK_IDLE:
            break;

        default:
            (void)tcb_put(task);
            break;
    }
}
void schedule_attach(tcb_t *task)
{
    unsigned int core_id = task_get_affinity(task);

    assert(core_id < CONFIG_SMP_CORES);

    dom_t            dom = task_get_domain(task);
    schedule_data_t *sd  = get_schedule_data(core_id);
    spin_lock(&sd->lock);

    assert(list_is_empty(&task->runlist) && task != current() && task != sd->pidle);

    if (dom == curDomainId && task_get_state(task) == TASK_READY &&
        task_get_priority(task) <= task_get_priority(current())) {
        sd->action = ACTION_CHOOSE_NEW;
    }
    tcb_get(task);
    task_enque(sd, task);

    spin_unlock(&sd->lock);
}
void schedule_detach(tcb_t *task)
{
    unsigned int     core_id = task_get_affinity(task);
    schedule_data_t *sd      = get_schedule_data(core_id);
    spin_lock(&sd->lock);
    assert(core_id < CONFIG_SMP_CORES);

    if (task == current()) {
        if (readyqueue_is_empty(sd, curDomainId)) {
            sd->action = ACTION_RUN_IDLE;
        } else {
            sd->action = ACTION_CHOOSE_NEW;
        }
    } else if (task->state == TASK_READY) {
        readyqueue_deque(sd, task);
        if (readyqueue_is_empty(sd, curDomainId)) {
            sd->action = ACTION_RUN_CURRENT;
        } else {
            sd->action = ACTION_CHOOSE_NEW;
        }
        (void)tcb_put(task);
    } else {
        pendqueue_deque(sd, task);
        (void)tcb_put(task);
    }
    spin_unlock(&sd->lock);
}

void schedule_init(tcb_t *idle)
{
    int i;
    int core_id = task_get_affinity(idle);

    assert(core_id == current_cpu());

    schedule_data_t *sd = current_schedule_data();
    sd->pidle           = idle;

    for (i = 0; i < sizeof(sd->readyqueue) / sizeof(list_t); i++) {
        list_init(&sd->readyqueue[i]);
    }
    spin_init_unlocked(&sd->lock);
    list_init(&sd->pendqueue);
    bitmap_init(sd);

    tcb_get(sd->pidle);
    sd->pcurrent = sd->pidle;
    sd->action   = ACTION_RUN_CURRENT;
}

void schedule(void)
{
#if SCHEDULE_TEST
    count_start();
#endif
    tcb_t           *prev = current();
    tcb_t           *next;
    schedule_data_t *sd = current_schedule_data();

    unsigned int next_interrupt;

#ifdef SC_DEBUG
    dprintf("core:%d,sd->action:%d\n", current_cpu(), sd->action);
#endif    // SC_DEBUG

#ifdef CONFIG_KERNEL_MCS

#else
    spin_lock(&sd->lock);
    if (task_get_state(prev) == TASK_READY) {
        task_set_readytime(prev, timer_current());
    }
#endif

#ifdef CONFIG_KERNEL_MCS
#ifdef SC_DEBUG
    dprintf("first check critical point:%u\n", check_critical_point(sd));
#endif
    if (check_critical_point(sd) >= 0 && check_critical_point(sd) == mcs_timer_current()) {
#ifdef SC_DEBUG
        dprintf("current is critical point!\n");
#endif    // SC_DEBUG
        next = get_critical_list_firsttask(sd);

        if (TASK_READY == next->state) {
            readyqueue_deque(sd, next);
        } else if (TASK_SLEEP == next->state) {
            pendqueue_deque(sd, next);
        }

        sd->action = ACTION_CHOOSE_NEW;

        task_set_priority(next, next->mcs_priority[next->critical_level]);

#ifdef SC_DEBUG
        dprintf("%s raise priority: %d to %d\n", next->name, next->backup_priority,
                next->mcs_priority[next->critical_level]);
#endif
        if (TASK_READY == next->state) {
            readyqueue_append(sd, next);
        } else if (TASK_SLEEP == next->state) {
            mcs_pendqueue_enque(sd, next);
        }
    }

    {
#endif
        if (sd->action == ACTION_CHOOSE_NEW) {
            next = readyqueue_next(sd, curDomainId);
            if (prev->affinity == current_cpu()) {
                task_enque(sd, prev);
            }
            if (readyqueue_is_empty(sd, curDomainId)) {
                sd->action = ACTION_RUN_CURRENT;
            }
        } else if (sd->action == ACTION_RUN_CURRENT) {
            next = prev;
        } else if (sd->action == ACTION_RUN_IDLE) {
            next = sd->pidle;
            task_enque(sd, prev);
            sd->action = ACTION_RUN_CURRENT;
        } else {
            next = sd->action;
            if (next != prev) {
                task_enque(sd, prev);
            }
            if (!readyqueue_is_empty(sd, curDomainId)) {
                sd->action = ACTION_CHOOSE_NEW;
            }
        }

#ifdef CONFIG_KERNEL_MCS
        criticalqueue_deque(next);
    }
#endif
#ifdef SC_DEBUG
    if (next->state != TASK_IDLE && prev->state != TASK_IDLE && (next != prev)) {
        dprintf("core:%d,sd->action:%d,prev->name:%s,next->name:%s\n", current_cpu(), sd->action, prev->name,
                next->name);
    }
#endif    // SC_DEBUG

    sd->pcurrent = next;
    if (next != prev) {
        task_switch_prepare(next, prev);
    }

    spin_unlock(&sd->lock);
#ifdef CONFIG_KERNEL_MCS
    switchSchedContext(next, current_cpu());
    next_interrupt = refill_head(next->tcbSchedContext)->rAmount;

#if CONFIG_DOM_NUMBER > 1
    next_interrupt = MIN(next_interrupt, curDomainTime);
#endif

    if (!list_is_empty(&sd->pendqueue)) {
#ifdef CONFIG_KERNEL_MCS
        cur = container_of(&sd->pendqueue, tcb_t, runlist);
        do {
            cur = container_of(cur->runlist.next, tcb_t, runlist);
#ifdef SC_DEBUG
            dprintf("pendqueue task name:%s,it's ready time is %u\n", cur->name,
                    refill_head(cur->tcbSchedContext)->rTime);
#endif    // SC_DEBUG
#if CONFIG_DOM_NUMBER > 1
            if (cur->domain == curDomainId) {
                next_interrupt = MIN(next_interrupt, refill_head(cur->tcbSchedContext)->rTime - timer_current().time);
                break;
            }
#else     // CONFIG_DOM_NUMBER
            next_interrupt = MIN(next_interrupt, refill_head(cur->tcbSchedContext)->rTime - timer_current().time);
            break;
#endif    // CONFIG_DOM_NUMBER
        } while (cur->runlist.next != &sd->pendqueue);

#else
        tcb_t *cur     = container_of(sd->pendqueue.next, tcb_t, runlist);
        next_interrupt = MIN(next_interrupt, cur->timeslice.readytime.time - timer_current().time);
        break;
#endif    // CONFIG_KERNEL_MCS
    }
#ifdef SC_DEBUG
    dprintf("next_interrupt:%d\n", next_interrupt);
#endif    // SC_DEBUG

#ifdef CONFIG_KERNEL_MCS
    if (check_critical_point(sd) > (long)mcs_timer_current()) {
#ifdef SC_DEBUG
        dprintf("critical_point:%u\n", check_critical_point(sd));
        dprintf("next_interrupt for mcs:%d\n", check_critical_point(sd) - mcs_timer_current());
#endif    // SC_DEBUG
        next_interrupt = MIN(next_interrupt, check_critical_point(sd) - mcs_timer_current());
    }

#endif    // CONFIG_KERNEL_MCS
    timer_next(next_interrupt);
#else
    next_interrupt = next->timeslice.timeslice * 1000;

    if (domScheduleLength > 1U) {
        next_interrupt = MIN(next_interrupt, +curDomainTime * 1000);
    }

    if (!pendqueue_is_empty(sd)) {
        tcb_t *cur     = container_of(sd->pendqueue.next, tcb_t, runlist);
        next_interrupt = MIN(next_interrupt, cur->timeslice.readytime.time - timer_current().time);
    }
    timer_next(next_interrupt);
#endif
#if SCHEDULE_TEST
    count_end();
#endif
}

void schedule_accept_task(tcb_t *task)
{
    schedule_data_t *_sd = current_schedule_data();
    switch (task->state) {
        case TASK_READY:
            schedule_attach(task);
            break;
        case TASK_SLEEP:
            pendqueue_enque(_sd, task);
            break;
        case TASK_DIED:
            break;
        default:
            break;
    }
    _sd->action = ACTION_CHOOSE_NEW;
    (void)tcb_put(task);
}

void schedule_action_change(tcb_t *action)
{
    assert(action == ACTION_RUN_IDLE || action == ACTION_CHOOSE_NEW);

    schedule_data_t *_sd = current_schedule_data();
    _sd->action          = action;
}
