#include "../inc/sched.h"

#include <linux/lockdep.h>
#include <trace/events/sched.h>

#ifdef CONFIG_SMP
static inline void task_woken(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
{
    if (p->sched_class->task_woken)
    {
        /*
         * Our task @p is fully woken up and running; so it's safe to
         * drop the rq->lock, hereafter rq is only used for statistics.
         */
        rq_unpin_lock(rq, rf);
        p->sched_class->task_woken(rq, p);
        rq_repin_lock(rq, rf);
    }
}
#else
static inline void task_woken(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
{
}
#endif

static void ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
{
}

static inline bool ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags)
{
    return false;
}

/*
 * Mark the task runnable.
 */
static inline void ttwu_do_wakeup(struct task_struct *p)
{
    WRITE_ONCE(p->__state, TASK_RUNNING);
}

static void ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags,
                             struct rq_flags *rf)
{
    int en_flags = ENQUEUE_WAKEUP | ENQUEUE_NOCLOCK;

    if (wake_flags & WF_RQ_SELECTED)
        en_flags |= ENQUEUE_RQ_SELECTED;
    if (wake_flags & WF_MIGRATED)
        en_flags |= ENQUEUE_MIGRATED;

    activate_task(rq, p, en_flags);
    wakeup_preempt(rq, p, wake_flags);

    ttwu_do_wakeup(p);

    task_woken(rq, p, rf);
}

static void ttwu_queue(struct task_struct *p, int cpu, int wake_flags)
{
    struct rq *rq = cpu_rq(cpu);
    struct rq_flags rf;

    if (ttwu_queue_wakelist(p, cpu, wake_flags))
        return;

    rq_lock(rq, &rf);
    update_rq_clock(rq);
    ttwu_do_activate(rq, p, wake_flags, &rf);
    rq_unlock(rq, &rf);
}

/*
 * Invoked from try_to_wake_up() to check whether the task can be woken up.
 *
 * The caller holds p::pi_lock if p != current or has preemption
 * disabled when p == current.
 *
 * The rules of saved_state:
 *
 *   The related locking code always holds p::pi_lock when updating
 *   p::saved_state, which means the code is fully serialized in both cases.
 *
 *   For PREEMPT_RT, the lock wait and lock wakeups happen via TASK_RTLOCK_WAIT.
 *   No other bits set. This allows to distinguish all wakeup scenarios.
 *
 *   For FREEZER, the wakeup happens via TASK_FROZEN. No other bits set. This
 *   allows us to prevent early wakeup of tasks before they can be run on
 *   asymmetric ISA architectures (eg ARMv9).
 */
static inline bool ttwu_state_match(struct task_struct *p, unsigned int state, int *success)
{
    int match;

    *success = !!(match = __task_state_match(p, state));

    /*
     * Saved state preserves the task state across blocking on
     * an RT lock or TASK_FREEZABLE tasks.  If the state matches,
     * set p::saved_state to TASK_RUNNING, but do not wake the task
     * because it waits for a lock wakeup or __thaw_task(). Also
     * indicate success because from the regular waker's point of
     * view this has succeeded.
     *
     * After acquiring the lock the task will restore p::__state
     * from p::saved_state which ensures that the regular
     * wakeup is not lost. The restore will also set
     * p::saved_state to TASK_RUNNING so any further tests will
     * not result in false positives vs. @success
     */
    if (match < 0)
        p->saved_state = TASK_RUNNING;

    return match > 0;
}

/*
 * Consider @p being inside a wait loop:
 *
 *   for (;;) {
 *      set_current_state(TASK_UNINTERRUPTIBLE);
 *
 *      if (CONDITION)
 *         break;
 *
 *      schedule();
 *   }
 *   __set_current_state(TASK_RUNNING);
 *
 * between set_current_state() and schedule(). In this case @p is still
 * runnable, so all that needs doing is change p->state back to TASK_RUNNING in
 * an atomic manner.
 *
 * By taking task_rq(p)->lock we serialize against schedule(), if @p->on_rq
 * then schedule() must still happen and p->state can be changed to
 * TASK_RUNNING. Otherwise we lost the race, schedule() has happened, and we
 * need to do a full wakeup with enqueue.
 *
 * Returns: %true when the wakeup is done,
 *          %false otherwise.
 */
static int ttwu_runnable(struct task_struct *p, int wake_flags)
{
    struct rq_flags rf;
    struct rq *rq;
    int ret = 0;

    rq = __task_rq_lock(p, &rf);
    if (task_on_rq_queued(p))
    {
        update_rq_clock(rq);
        if (p->se.sched_delayed)
            enqueue_task(rq, p, ENQUEUE_NOCLOCK | ENQUEUE_DELAYED);
        if (!task_on_cpu(rq, p))
        {
            /*
             * When on_rq && !on_cpu the task is preempted, see if
             * it should preempt the task that is current now.
             */
            wakeup_preempt(rq, p, wake_flags);
        }
        ttwu_do_wakeup(p);
        ret = 1;
    }
    __task_rq_unlock(rq, &rf);

    return ret;
}

#ifdef CONFIG_SMP
static inline int select_task_cpu(struct task_struct *p, int cpu, int *wake_flags)
{
    /*
     * We're doing the wakeup (@success == 1), they did a dequeue (p->on_rq
     * == 0), which means we need to do an enqueue, change p->state to
     * TASK_WAKING such that we can unlock p->pi_lock before doing the
     * enqueue, such as ttwu_queue_wakelist().
     */
    WRITE_ONCE(p->__state, TASK_WAKING);

    /*
     * If the owning (remote) CPU is still in the middle of schedule() with
     * this task as prev, considering queueing p on the remote CPUs wake_list
     * which potentially sends an IPI instead of spinning on p->on_cpu to
     * let the waker make forward progress. This is safe because IRQs are
     * disabled and the IPI will deliver after on_cpu is cleared.
     *
     * Ensure we load task_cpu(p) after p->on_cpu:
     *
     * set_task_cpu(p, cpu);
     *   STORE p->cpu = @cpu
     * __schedule() (switch to task 'p')
     *   LOCK rq->lock
     *   smp_mb__after_spin_lock()		smp_cond_load_acquire(&p->on_cpu)
     *   STORE p->on_cpu = 1		LOAD p->cpu
     *
     * to ensure we observe the correct CPU on which the task is currently
     * scheduling.
     */
    if (smp_load_acquire(&p->on_cpu) &&
        ttwu_queue_wakelist(p, task_cpu(p), *wake_flags))
        return -1;

    /*
     * If the owning (remote) CPU is still in the middle of schedule() with
     * this task as prev, wait until it's done referencing the task.
     *
     * Pairs with the smp_store_release() in finish_task().
     *
     * This ensures that tasks getting woken will be fully ordered against
     * their previous state and preserve Program Order.
     */
    smp_cond_load_acquire(&p->on_cpu, !VAL);

    cpu = select_task_rq(p, p->wake_cpu, wake_flags);
    if (task_cpu(p) != cpu)
    {
        *wake_flags |= WF_MIGRATED;

        set_task_cpu(p, cpu);
    }

    return cpu;
}
#else
static inline int select_task_cpu(struct task_struct *p, int cpu, int *wake_flags)
{
    return task_cpu(p);
}
#endif

/************************************************************************/

/*
 * wake_up_new_task - wake up a newly created task for the first time.
 *
 * This function will do some initial scheduler statistics housekeeping
 * that must be done for every newly created context, then puts the task
 * on the runqueue and wakes it.
 */
void wake_up_new_task(struct task_struct *p)
{
    struct rq_flags rf;
    struct rq *rq;
    int wake_flags = WF_FORK;

    raw_spin_lock_irqsave(&p->pi_lock, rf.flags);
    WRITE_ONCE(p->__state, TASK_RUNNING);
#ifdef CONFIG_SMP
    /*
     * Fork balancing, do it here and not earlier because:
     *  - cpus_ptr can change in the fork path
     *  - any previously selected CPU might disappear through hotplug
     *
     * Use __set_task_cpu() to avoid calling sched_class::migrate_task_rq,
     * as we're not fully set-up yet.
     */
    p->recent_used_cpu = task_cpu(p);
    rseq_migrate(p);
    __set_task_cpu(p, select_task_rq(p, task_cpu(p), &wake_flags));
#endif
    rq = __task_rq_lock(p, &rf);
    update_rq_clock(rq);

    activate_task(rq, p, ENQUEUE_NOCLOCK | ENQUEUE_INITIAL);
    trace_sched_wakeup_new(p);
    wakeup_preempt(rq, p, wake_flags);

    task_woken(rq, p, &rf);

    task_rq_unlock(rq, p, &rf);
}

int try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
{
    int cpu, success = 0;

    wake_flags |= WF_TTWU;

    if (p == current)
    {
        /*
         * We're waking current, this means 'p->on_rq' and 'task_cpu(p)
         * == smp_processor_id()'. Together this means we can special
         * case the whole 'p->on_rq && ttwu_runnable()' case below
         * without taking any locks.
         *
         * Specifically, given current runs ttwu() we must be before
         * schedule()'s block_task(), as such this must not observe
         * sched_delayed.
         *
         * In particular:
         *  - we rely on Program-Order guarantees for all the ordering,
         *  - we're serialized against set_special_state() by virtue of
         *    it disabling IRQs (this allows not taking ->pi_lock).
         */
        WARN_ON_ONCE(p->se.sched_delayed);
        if (!ttwu_state_match(p, state, &success))
        {
            ttwu_do_wakeup(p);
        }
    }
    else
    {
        /*
         * If we are going to wake up a thread waiting for CONDITION we
         * need to ensure that CONDITION=1 done by the caller can not be
         * reordered with p->state check below. This pairs with smp_store_mb()
         * in set_current_state() that the waiting thread does.
         */
        unsigned long flags;

        raw_spin_lock_irqsave(&p->pi_lock, flags);

        for (int i = 1; i; i--)
        {
            smp_mb__after_spinlock();
            if (!ttwu_state_match(p, state, &success))
                break;

            /*
             * Ensure we load p->on_rq _after_ p->state, otherwise it would
             * be possible to, falsely, observe p->on_rq == 0 and get stuck
             * in smp_cond_load_acquire() below.
             *
             * sched_ttwu_pending()			try_to_wake_up()
             *   STORE p->on_rq = 1			  LOAD p->state
             *   UNLOCK rq->lock
             *
             * __schedule() (switch to task 'p')
             *   LOCK rq->lock			  smp_rmb();
             *   smp_mb__after_spinlock();
             *   UNLOCK rq->lock
             *
             * [task p]
             *   STORE p->state = UNINTERRUPTIBLE	  LOAD p->on_rq
             *
             * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in
             * __schedule().  See the comment for smp_mb__after_spinlock().
             *
             * A similar smp_rmb() lives in __task_needs_rq_lock().
             */
            smp_rmb();
            if (READ_ONCE(p->on_rq) && ttwu_runnable(p, wake_flags))
                break;

            cpu = select_task_cpu(p, cpu, &wake_flags);
            if (cpu < 0)
                break;

            ttwu_queue(p, cpu, wake_flags);
        }

        raw_spin_unlock_irqrestore(&p->pi_lock, flags);
    }

    if (success)
        ttwu_stat(p, task_cpu(p), wake_flags);

    return success;
}

/**
 * wake_up_process - Wake up a specific process
 * @p: The process to be woken up.
 *
 * Attempt to wake up the nominated process and move it to the set of runnable
 * processes.
 *
 * Return: 1 if the process was woken up, 0 if it was already running.
 *
 * This function executes a full memory barrier before accessing the task state.
 */
int wake_up_process(struct task_struct *p)
{
    return try_to_wake_up(p, TASK_NORMAL, 0);
}

int wake_up_state(struct task_struct *p, unsigned int state)
{
    return try_to_wake_up(p, state, 0);
}
