#include "sched.h"

#ifndef CFG_CORE_NUM 
#define CFG_CORE_NUM 1
#endif  //CFG_CORE_NUM

#define FINE_SCHED_LOCK_IDLE 0xf000

// #define CFG_ONE_SCHED_LOCK
static unsigned fine_sched_lock[CFG_CORE_NUM];


/**
 * @brief everybody is expected to request an admission
 * for operating schedule data(such as ready queue) before
 * the actual operation on the data.
 *
 * @param self 
 * @param target 
 * 
 * Please ensure that intterrupt is safe!
 */
void request_sched_data(int self, int target)
{
#ifdef CFG_ONE_SCHED_LOCK
    target = 0;
#endif
    while (!arch_compare_and_swap(
        &fine_sched_lock[target],
        FINE_SCHED_LOCK_IDLE,
        self))
    {
    };
}

void release_sched_data(int self, int target)
{
#ifdef CFG_ONE_SCHED_LOCK
    target = 0;
#endif
    while (!arch_compare_and_swap(
        &fine_sched_lock[target],
        self,
        FINE_SCHED_LOCK_IDLE))
    {
    };
}


extern struct AstralScheduler __start_scheduler;
extern struct AstralScheduler __stop_scheduler;

AstralScheduler get_sheduler_id(int id)
{
    for(AstralScheduler s = &__start_scheduler; s < &__stop_scheduler; s++)
    {
        if(s->id == id)
        {
            return s;
        }
    }
    return NULL;
}

/** The following functions are defined by the 
 *  framework of schedule:
 *  1. add ready task to corresponding ready Q.
 *  2. remove ready task from corresponding ready Q.
 *  3. pick a task from corresponding ready Q regardless of current running task.
 *  4. pick a task to run under the priority comparsion of current task.
 */


void add_ready_task(AstralTask task)
{
    AstralScheduler as = get_sheduler_id(task->method);

    int core = arch_get_cpuid();
    int targ = 0;

    if (task->affinity >= CFG_CORE_NUM)
    {
        targ = task->id % arch_actual_core_num();
    }

    if(!arch_is_core_online(targ))
    {
        kprint("core %d offline, disptach task to core %d\n", targ, core);
        targ = core;
    }

    task->core = targ;

    if(task->status == FINISH)
    {
        kerror("put Finished task to q\n", task->name);
    }

    request_sched_data(core, targ);
    as->madd_ready(task, targ);
    if(targ != core)
    {
        mark_need_resched(targ);
    }
    release_sched_data(core, targ);
}

void remove_ready_task(AstralTask task)
{
    AstralScheduler as = get_sheduler_id(task->method);

    int core = arch_get_cpuid();

    request_sched_data(core, core);
    as->mremove_ready(task);
    release_sched_data(core, core);
}

AstralTask pick_next_task()
{
    AstralTask task = NULL;

    int core = arch_get_cpuid();

    request_sched_data(core, core);
    for (AstralScheduler as = &__start_scheduler; as < &__stop_scheduler; as++)
    {
        task = as->mpick_next(core);
        if(task)
        {
            break;
        }
    }
    release_sched_data(core, core);

    return task;
}

/**
 * @brief This function implements preemption and Round-Robin
 *
 * @note !!! Please do not !!! manually add curr to ready queue!
 * @note re-queue action is done at context switch.
 * @param curr current running task
 * @return AstralTask the next task
 */
AstralTask try_next_task(AstralTask curr)
{
    AstralTask task = NULL;

    int core = arch_get_cpuid();

    request_sched_data(core, core);
    for (AstralScheduler as = &__start_scheduler; as < &__stop_scheduler; as++)
    {
        task = as->mtry_next(core, curr);
        if(task)
        {
            break;
        }

    }
    release_sched_data(core, core);

    return task;

}



/** The following functions are the actual schedule entry points
 *  of various schedule application.
 *
 *  There are only 4 kinds of different schedule requironment:
 *  1. call style blocking/enterance
 *  2. call style preemption
 *  3. interrupt style preemption
 *  4. tick style preemption
 *
 *  Actually, tick style preemption is a special case of interrupt style.
 *  In order to support time-slice schedule algorithm, we special handle it.
 *
 *  So we can find that the exit of interrupt is of vital importance.
 *  It is the last chance for executing preemption.
 * 
 *  DY Young, @Xi'An, CN 2024
 */


AstralTask gCurrTask[CFG_CORE_NUM];
AstralTask gNextTask[CFG_CORE_NUM];

AstralTask get_curr_task()
{
    int core = arch_get_cpuid();
    return gCurrTask[core];
}

#define _SYSTEM_SP_

static void _SYSTEM_SP_ 
set_ready_and_switch_in()
{
    int core = arch_get_cpuid();
    AstralTask curr = gCurrTask[core];
    if(curr && (curr->status >= READY))
    {
        if(curr->status == BLOCKED)
        {
            kerror("status error\n");
            while(1){}
        }
        add_ready_task(curr);
        curr->status = READY;
    }
    if(curr)
    {
        curr->released = True;
    }

    curr = gNextTask[core];
    gCurrTask[core] = curr;
    curr->run_times++;
    curr->status = RUNNING;
    arch_switch_to_task(curr->ksp);
}

/**
 * @brief call style blocking or enterance
 * 
 * This function is expected to be called when:
 * 1. a thread wants to block itself (such as fail to obtain resources).
 * 2. a thread goes to the end of its life cycle.
 * 3. the whole system actives the schedule system for the first time.
 */
void schedule()
{
    // it is critical to keep irq safe
    FORBIT_LOCAL_INT(irqstatus);

    int core = arch_get_cpuid();
    if (is_sched_disabled(core))
    {
        RESUME_LOCAL_INT(irqstatus);
        // if in spinlock context, return directly
        return;
    }

    AstralTask curr = gCurrTask[core];
    AstralTask next = pick_next_task();

    while(!next)
    {
        next = pick_next_task();
        WINK_LOCAL_INT(irqstatus);
    }

    if(curr == next)
    {
        RESUME_LOCAL_INT(irqstatus);
        return;
    }

    gNextTask[core] = next;
 
    if(curr)
    {
        // current task's context is saved on its stack
        // and the sp is switched to system stack.

        // ATTENTION PLEASE Here!
        // ret should be used tightly after getting assigned
        int ret = arch_save_curr_context(&curr->ksp);
        if(!ret)
        {
            // here we 
            RESUME_LOCAL_INT(irqstatus);
            return;
        }
    }

    // we are working on system stack!
    // all of local variables above are invalid!
    set_ready_and_switch_in();
}

/**
 * @brief tick interrupt style preemption
 *
 * This function is expected to be called when:
 * 1. core gets interrupted by tick irq.
 *
 * This is the time slice checker but do not select the next task.
 * If current task uses out of its slices,
 * we just mark a reschedule flag, and do the actual schedule at
 * the exit of interrupt handling process.
 *
 * What's important!
 * The actual round-roubin algorithm implementation
 * is placed at schedule_irq_reschedule which actually
 * is a preemption checker and rr checker.
 */
static void schedule_tick()
{
    int core = arch_get_cpuid();
    AstralTask curr = gCurrTask[core];
    if(curr->status >= READY)
    {
        curr->slice_work--;
        if(curr->slice_work == 0)
        {
            curr->status = SLICEUP;
            mark_need_resched(core);
            curr->slice_work = curr->slice;
        }
    }
}

/**
 * @brief interrupt style preemption
 *
 * This function is expected to be called when:
 * 1. exiting interrupt handling process.
 *
 * If preemption occurs return ture to indicate the caller.
 *
 * The preemption and rr check is implemented by a detail
 * scheduler who provides an api named try_next_task.
 */
boolean schedule_irq_reschedule()
{
    int core = 0;
    AstralTask curr = NULL;
    AstralTask next = NULL;
    
    core = arch_get_cpuid();

    if(!need_reschedule(core))
    {
        return False;
    }

    if(is_sched_disabled(core))
    {
        return False;
    }

    clean_need_resched(core);

    curr = gCurrTask[core];
    next = try_next_task(curr);

    if (curr == next)
    {
        curr->status = RUNNING2;
        curr->run_times++;
        return False;
    }

    gNextTask[core] = next;

    return True;
}

/**
 * @brief call style preemption
 *
 * This function is expected to be called when:
 * 1. a thread resumes other threads.
 * 2. a thread releases block-style resources.
 * 3. a thread exits from a spinlock section.
 *
 * The caller should ensure that current excution
 * environment is a thread execution context.
 */
void schedule_cond()
{
    FORBIT_LOCAL_INT(irqstatus);

    int core = arch_get_cpuid();
    AstralTask curr = gCurrTask[core];

    if(is_sched_disabled(core))
    {
        // in spinlock critical section
        // defer schedule to the exit of unlock
        curr->status = RUNNING1;
        RESUME_LOCAL_INT(irqstatus);
        return;
    }

    AstralTask next = try_next_task(curr);
    while(!next)
    {
        next = try_next_task(curr);
        WINK_LOCAL_INT(irqstatus);
    }

    if(next == curr)
    {
        curr->status = RUNNING;
        RESUME_LOCAL_INT(irqstatus);
        return;
    }

    gNextTask[core] = next;

    int ret = arch_save_curr_context(&curr->ksp);
    if(!ret)
    {
        RESUME_LOCAL_INT(irqstatus);
        return;
    }


    // we are working on system stack!
    // all of local variables above are invalid!
    set_ready_and_switch_in();
}

/**
 * @brief Change status and execute schedule as need
 *
 * The execution is interrupt safe.
 */
boolean state_change(AstralTask task, AstralTaskStatus status, boolean do_sched)
{
    FORBIT_LOCAL_INT(irqstatus);

    switch (status)
    {
    case CREATED:
        task->status = CREATED;
        break;
    case RESUMED:
        /**
         * @brief Construct a new add ready task object
         * TODO: if the task is not in current core, signal other core
         */
        add_ready_task(task);
        task->status = RESUMED;
        task->released = True;
        break;
    case PREEMPTED:
        task->status = PREEMPTED;
        schedule_cond();
        break;
    case BLOCKED:
        task->status = BLOCKED;
        schedule();
        break;
    case FINISH:
        task->status = FINISH;
        schedule();
        break;
    default:
        break;
    }

    RESUME_LOCAL_INT(irqstatus);

    return True;
}

void shedule_irq_sync_status(AstralTask curr, AstralTask next)
{
    if(curr && (curr->status >= READY))
    {
        curr->status = READY;
        add_ready_task(curr);
    }
    curr->released = True;
    while (!next->released)
    {
    }
    next->status = RUNNING;
    next->released = False;
    next->run_times++;
}

void shed_init()
{
    arch_set_tick_sched(schedule_tick);

    for (AstralScheduler s = &__start_scheduler; s < &__stop_scheduler; s++)
    {
        if (s->minit)
        {
            s->minit();
        }
        kprint("active scheduler id %d\n",s->id);
    }
    int count = &__stop_scheduler - &__start_scheduler;
    kprint("Total %d scheduler installed\n", count);

    for(int i=0; i<CFG_CORE_NUM; i++)
    {
        fine_sched_lock[i] = FINE_SCHED_LOCK_IDLE;
    }
}


void core_idle()
{
    while(1)
    {
        arch_enable_local_irq(0x0);
    }
}