#include <linux/sched.h>
#include <linux/sched/debug.h>
// #include <linux/interrupt.h>
#include <linux/smp.h>
#include <trace/events/sched.h>
#include <linux/cpumask.h>
#include <linux/preempt.h>
#include <linux/rcupdate.h>
#include <linux/list.h>
#include <linux/sched/rt.h>
#include <linux/sched/task.h>
#include <linux/context_tracking.h>
#include <linux/rseq.h>

#include "inc/sched.h"

#include "_core_/switch.c"
#include "_core_/pick.c"
#include "_core_/work.c"
#include "_core_/block_task.c"
#include "_core_/smp.c"

#include "_core_/init.c"
#include "_core_/pi.c"
#include "_core_/rq.c"
#include "_core_/task.c"
#include "_core_/wake_q.c"
#include "_core_/wakeup.c"

/*
 * Constants for the sched_mode argument of __schedule().
 *
 * The mode argument allows RT enabled kernels to differentiate a
 * preemption from blocking on an 'sleeping' spin/rwlock.
 */
#define SM_IDLE (-1)
#define SM_NONE 0
#define SM_PREEMPT 1
#define SM_RTLOCK_WAIT 2

static void __sched __schedule(int sched_mode)
{
    struct task_struct *prev, *next;
    struct rq_flags rf;
    struct rq *rq;
    bool preempt = sched_mode > SM_NONE;
    unsigned int prev_state;
    int cpu;

    cpu = smp_processor_id();
    rq = cpu_rq(cpu);
    prev = rq->curr;

    /* Task state changes only considers SM_PREEMPT as preemption */
    preempt = sched_mode == SM_PREEMPT;
    /*
     * We must load prev->state once (task_struct::state is volatile), such
     * that we form a control dependency vs deactivate_task() below.
     */
    prev_state = READ_ONCE(prev->__state);

    local_irq_disable();

    rq_lock(rq, &rf);

    if (sched_mode == SM_IDLE)
    {
        /* SCX must consult the BPF scheduler to tell if rq is empty */
        if (!rq->nr_running && !scx_enabled())
        {
            next = prev;
            goto picked;
        }
    }
    else if (!preempt && prev_state)
    {
        try_to_block_task(rq, prev, prev_state);
    }

    next = pick_next_task(rq, prev, &rf);

picked:
    clear_tsk_need_resched(prev);

    if (likely(prev != next))
    {
        RCU_INIT_POINTER(rq->curr, next);

        /* Also unlocks the rq: */
        rq = context_switch(rq, prev, next, &rf);
    }
    else
    {
        rq_unpin_lock(rq, &rf);
        raw_spin_rq_unlock_irq(rq);
    }
}

static __always_inline void __schedule_loop(int sched_mode)
{
    do
    {
        preempt_disable();
        __schedule(sched_mode);
        sched_preempt_enable_no_resched();
    } while (need_resched());
}

static void __resched_curr(struct rq *rq, int tif)
{
    struct task_struct *curr = rq->curr;
    struct thread_info *cti = task_thread_info(curr);
    int cpu;

    /*
     * Always immediately preempt the idle task; no point in delaying doing
     * actual work.
     */
    if (is_idle_task(curr) && tif == TIF_NEED_RESCHED_LAZY)
        tif = TIF_NEED_RESCHED;

    cpu = cpu_of(rq);
    if (cpu == smp_processor_id())
    {
        set_ti_thread_flag(cti, tif);
        if (tif == TIF_NEED_RESCHED)
            set_preempt_need_resched();
        return;
    }
}

/********************************************************************************/
void __sched schedule(void)
{
    struct task_struct *tsk = current;

    if (!task_is_running(tsk))
        sched_submit_work(tsk);

    __schedule_loop(SM_NONE);
}

void __sched schedule_preempt_disabled(void)
{
    schedule();
}

void schedule_idle(void)
{
    do
    {
        __schedule(SM_IDLE);
    } while (need_resched());
}

void schedule_none(void)
{
    __schedule_loop(SM_NONE);
}

void resched_curr(struct rq *rq)
{
    __resched_curr(rq, TIF_NEED_RESCHED);
}

void resched_curr_lazy(struct rq *rq)
{
    __resched_curr(rq, TIF_NEED_RESCHED); // TODO
}

/**
 * schedule_tail - first thing a freshly forked thread must call.
 * @prev: the thread we just switched away from.
 */
void schedule_tail(struct task_struct *prev)
{
    finish_task_switch(prev);
    preempt_enable();
}

/*
 * This function gets called by the timer code, with HZ frequency.
 * We call it with interrupts disabled.
 */
void sched_tick(void)
{
    struct task_struct *donor;
    struct rq *rq;
    struct rq_flags rf;
    int cpu;

    cpu = smp_processor_id();
    rq = cpu_rq(cpu);

    rq_lock(rq, &rf);

    donor = rq->donor;

    update_rq_clock(rq);

    donor->sched_class->task_tick(rq, donor, 0);

    rq_unlock(rq, &rf);
}

/*
 * This is the entry point to schedule() from kernel preemption
 * off of IRQ context.
 * Note, that this is called and return with IRQs disabled. This will
 * protect us against recursive calling from IRQ contexts.
 */
void __sched preempt_schedule_irq(void)
{
    enum ctx_state prev_state;

    /* Catch callers which need to be fixed */
    BUG_ON(preempt_count() || !irqs_disabled());

    prev_state = exception_enter();

    do
    {
        preempt_disable();
        local_irq_enable();
        __schedule(SM_PREEMPT);
        local_irq_disable();
        sched_preempt_enable_no_resched();
    } while (need_resched());

    exception_exit(prev_state);
}

void __noreturn sched_task_dead(void)
{
    struct task_struct *tsk = current;

    /* Causes final put_task_struct in finish_task_switch(): */
    __set_special_state(tsk, TASK_DEAD);

    /* Tell freezer to ignore us: */
    tsk->flags |= PF_NOFREEZE;

    __schedule(SM_NONE);
    BUG();

    /* Avoid "noreturn function does return" - but don't continue if BUG() is a NOP: */
    for (;;)
        cpu_relax();
}

__sched struct task_struct *preempt_schedule_irq_no_switch(void)
{
    struct task_struct *prev, *next;
    struct rq_flags rf;
    struct rq *rq;
    int cpu;

    if (need_resched())
    {
        cpu = smp_processor_id();
        rq = cpu_rq(cpu);
        prev = rq->curr;

        rq_lock(rq, &rf);

        next = pick_next_task(rq, prev, &rf);

        rq_unlock(rq, &rf);

        if (next == prev)
            next = NULL;
    }
    else
    {
        next = NULL;
    }

    return next;
}
