#include "../inc/sched.h"

#include <linux/lockdep.h>
#include <trace/events/sched.h>

static void ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
{
}

static inline bool ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags)
{
    return false;
}

/*
 * Mark the task runnable.
 */
static inline void ttwu_do_wakeup(struct task_struct *p)
{
    WRITE_ONCE(p->__state, TASK_RUNNING);
}

static void ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags,
                             struct rq_flags *rf)
{
    int en_flags = ENQUEUE_WAKEUP | ENQUEUE_NOCLOCK;

    activate_task(rq, p, en_flags);
    wakeup_preempt(rq, p, wake_flags);
    ttwu_do_wakeup(p);
}

static void ttwu_queue(struct task_struct *p, int cpu, int wake_flags)
{
    struct rq *rq = cpu_rq(cpu);
    struct rq_flags rf;

    if (ttwu_queue_wakelist(p, cpu, wake_flags))
        return;

    rq_lock(rq, &rf);
    update_rq_clock(rq);
    ttwu_do_activate(rq, p, wake_flags, &rf);
    rq_unlock(rq, &rf);
}

static inline int __task_state_match(struct task_struct *p, unsigned int state)
{
    if (READ_ONCE(p->__state) & state)
        return 1;

    if (READ_ONCE(p->saved_state) & state)
        return -1;

    return 0;
}

/*
 * Invoked from try_to_wake_up() to check whether the task can be woken up.
 *
 * The caller holds p::pi_lock if p != current or has preemption
 * disabled when p == current.
 *
 * The rules of saved_state:
 *
 *   The related locking code always holds p::pi_lock when updating
 *   p::saved_state, which means the code is fully serialized in both cases.
 *
 *   For PREEMPT_RT, the lock wait and lock wakeups happen via TASK_RTLOCK_WAIT.
 *   No other bits set. This allows to distinguish all wakeup scenarios.
 *
 *   For FREEZER, the wakeup happens via TASK_FROZEN. No other bits set. This
 *   allows us to prevent early wakeup of tasks before they can be run on
 *   asymmetric ISA architectures (eg ARMv9).
 */
static inline bool ttwu_state_match(struct task_struct *p, unsigned int state, int *success)
{
    int match;

    *success = !!(match = __task_state_match(p, state));

    /*
     * Saved state preserves the task state across blocking on
     * an RT lock or TASK_FREEZABLE tasks.  If the state matches,
     * set p::saved_state to TASK_RUNNING, but do not wake the task
     * because it waits for a lock wakeup or __thaw_task(). Also
     * indicate success because from the regular waker's point of
     * view this has succeeded.
     *
     * After acquiring the lock the task will restore p::__state
     * from p::saved_state which ensures that the regular
     * wakeup is not lost. The restore will also set
     * p::saved_state to TASK_RUNNING so any further tests will
     * not result in false positives vs. @success
     */
    if (match < 0)
        p->saved_state = TASK_RUNNING;

    return match > 0;
}

/************************************************************************/

/*
 * Consider @p being inside a wait loop:
 *
 *   for (;;) {
 *      set_current_state(TASK_UNINTERRUPTIBLE);
 *
 *      if (CONDITION)
 *         break;
 *
 *      schedule();
 *   }
 *   __set_current_state(TASK_RUNNING);
 *
 * between set_current_state() and schedule(). In this case @p is still
 * runnable, so all that needs doing is change p->state back to TASK_RUNNING in
 * an atomic manner.
 *
 * By taking task_rq(p)->lock we serialize against schedule(), if @p->on_rq
 * then schedule() must still happen and p->state can be changed to
 * TASK_RUNNING. Otherwise we lost the race, schedule() has happened, and we
 * need to do a full wakeup with enqueue.
 *
 * Returns: %true when the wakeup is done,
 *          %false otherwise.
 */
static int ttwu_runnable(struct task_struct *p, int wake_flags)
{
    struct rq_flags rf;
    struct rq *rq;
    int ret = 0;

    rq = __task_rq_lock(p, &rf);
    if (task_on_rq_queued(p))
    {
        update_rq_clock(rq);
        if (p->se.sched_delayed)
            enqueue_task(rq, p, ENQUEUE_NOCLOCK | ENQUEUE_DELAYED);
        if (!task_on_cpu(rq, p))
        {
            /*
             * When on_rq && !on_cpu the task is preempted, see if
             * it should preempt the task that is current now.
             */
            wakeup_preempt(rq, p, wake_flags);
        }
        ttwu_do_wakeup(p);
        ret = 1;
    }
    __task_rq_unlock(rq, &rf);

    return ret;
}

/*
 * wake_up_new_task - wake up a newly created task for the first time.
 *
 * This function will do some initial scheduler statistics housekeeping
 * that must be done for every newly created context, then puts the task
 * on the runqueue and wakes it.
 */
void wake_up_new_task(struct task_struct *p)
{
    struct rq_flags rf;
    struct rq *rq;
    int wake_flags = WF_FORK;

    raw_spin_lock_irqsave(&p->pi_lock, rf.flags);
    WRITE_ONCE(p->__state, TASK_RUNNING);
#ifdef CONFIG_SMP
    /*
     * Fork balancing, do it here and not earlier because:
     *  - cpus_ptr can change in the fork path
     *  - any previously selected CPU might disappear through hotplug
     *
     * Use __set_task_cpu() to avoid calling sched_class::migrate_task_rq,
     * as we're not fully set-up yet.
     */
    p->recent_used_cpu = task_cpu(p);
    rseq_migrate(p);
    __set_task_cpu(p, select_task_rq(p, task_cpu(p), &wake_flags));
#endif
    rq = __task_rq_lock(p, &rf);
    update_rq_clock(rq);

    activate_task(rq, p, ENQUEUE_NOCLOCK | ENQUEUE_INITIAL);
    trace_sched_wakeup_new(p);
    wakeup_preempt(rq, p, wake_flags);
#ifdef CONFIG_SMP
    if (p->sched_class->task_woken)
    {
        /*
         * Nothing relies on rq->lock after this, so it's fine to
         * drop it.
         */
        rq_unpin_lock(rq, &rf);
        p->sched_class->task_woken(rq, p);
        rq_repin_lock(rq, &rf);
    }
#endif
    task_rq_unlock(rq, p, &rf);
}

int try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
{
    int cpu, success = 0;

    wake_flags |= WF_TTWU;

    if (p == current)
    {
    }
    else
    {
        /*
         * If we are going to wake up a thread waiting for CONDITION we
         * need to ensure that CONDITION=1 done by the caller can not be
         * reordered with p->state check below. This pairs with smp_store_mb()
         * in set_current_state() that the waiting thread does.
         */
        unsigned long flags;

        raw_spin_lock_irqsave(&p->pi_lock, flags);

        for (int i = 1; i; i--)
        {
            if (!ttwu_state_match(p, state, &success))
                break;

            if (READ_ONCE(p->on_rq) && ttwu_runnable(p, wake_flags))
                break;

            cpu = task_cpu(p);

            ttwu_queue(p, cpu, wake_flags);
        }

        raw_spin_unlock_irqrestore(&p->pi_lock, flags);
    }

    if (success)
        ttwu_stat(p, task_cpu(p), wake_flags);

    return success;
}

/**
 * wake_up_process - Wake up a specific process
 * @p: The process to be woken up.
 *
 * Attempt to wake up the nominated process and move it to the set of runnable
 * processes.
 *
 * Return: 1 if the process was woken up, 0 if it was already running.
 *
 * This function executes a full memory barrier before accessing the task state.
 */
int wake_up_process(struct task_struct *p)
{
    return try_to_wake_up(p, TASK_NORMAL, 0);
}

void wakeup_preempt(struct rq *rq, struct task_struct *p, int flags)
{
    struct task_struct *donor = rq->donor;

    if (p->sched_class == donor->sched_class)
        donor->sched_class->wakeup_preempt(rq, p, flags);
    else if (sched_class_above(p->sched_class, donor->sched_class))
        resched_curr(rq);

    /*
     * A queue event has occurred, and we're going to schedule.  In
     * this case, we can save a useless back to back clock update.
     */
    if (task_on_rq_queued(donor) && test_tsk_need_resched(rq->curr))
        rq_clock_skip_update(rq);
}

int wake_up_state(struct task_struct *p, unsigned int state)
{
	return try_to_wake_up(p, state, 0);
}
