#include <linux/hrtimer.h>

/*
 * wait_task_inactive - wait for a thread to unschedule.
 *
 * Wait for the thread to block in any of the states set in @match_state.
 * If it changes, i.e. @p might have woken up, then return zero.  When we
 * succeed in waiting for @p to be off its CPU, we return a positive number
 * (its total switch count).  If a second call a short while later returns the
 * same number, the caller can be sure that @p has remained unscheduled the
 * whole time.
 *
 * The caller must ensure that the task *will* unschedule sometime soon,
 * else this function might spin for a *long* time. This function can't
 * be called with interrupts off, or it may introduce deadlock with
 * smp_call_function() if an IPI is sent by the same process we are
 * waiting to become inactive.
 */
unsigned long wait_task_inactive(struct task_struct *p, unsigned int match_state)
{
    int running, queued, match;
    struct rq_flags rf;
    unsigned long ncsw;
    struct rq *rq;

    for (;;)
    {
        /*
         * We do the initial early heuristics without holding
         * any task-queue locks at all. We'll only try to get
         * the runqueue lock when things look like they will
         * work out!
         */
        rq = task_rq(p);

        /*
         * If the task is actively running on another CPU
         * still, just relax and busy-wait without holding
         * any locks.
         *
         * NOTE! Since we don't hold any locks, it's not
         * even sure that "rq" stays as the right runqueue!
         * But we don't care, since "task_on_cpu()" will
         * return false if the runqueue has changed and p
         * is actually now running somewhere else!
         */
        while (task_on_cpu(rq, p))
        {
            if (!task_state_match(p, match_state))
                return 0;
            cpu_relax();
        }

        /*
         * Ok, time to look more closely! We need the rq
         * lock now, to be *sure*. If we're wrong, we'll
         * just go back and repeat.
         */
        rq = task_rq_lock(p, &rf);
        /*
         * If task is sched_delayed, force dequeue it, to avoid always
         * hitting the tick timeout in the queued case
         */
        if (p->se.sched_delayed)
            dequeue_task(rq, p, DEQUEUE_SLEEP | DEQUEUE_DELAYED);
        trace_sched_wait_task(p);
        running = task_on_cpu(rq, p);
        queued = task_on_rq_queued(p);
        ncsw = 0;
        if ((match = __task_state_match(p, match_state)))
        {
            /*
             * When matching on p->saved_state, consider this task
             * still queued so it will wait.
             */
            if (match < 0)
                queued = 1;
            ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
        }
        task_rq_unlock(rq, p, &rf);

        /*
         * If it changed from the expected state, bail out now.
         */
        if (unlikely(!ncsw))
            break;

        /*
         * Was it really running after all now that we
         * checked with the proper locks actually held?
         *
         * Oops. Go back and try again..
         */
        if (unlikely(running))
        {
            cpu_relax();
            continue;
        }

        /*
         * It's not enough that it's not actively running,
         * it must be off the runqueue _entirely_, and not
         * preempted!
         *
         * So if it was still runnable (but just not actively
         * running right now), it's preempted, and we should
         * yield - it could be a while.
         */
        if (unlikely(queued))
        {
            ktime_t to = NSEC_PER_SEC / HZ;

            set_current_state(TASK_UNINTERRUPTIBLE);
            schedule_hrtimeout(&to, HRTIMER_MODE_REL_HARD);
            continue;
        }

        /*
         * Ahh, all good. It wasn't running, and it wasn't
         * runnable, which means that it will never become
         * running in the future either. We're all done!
         */
        break;
    }

    return ncsw;
}
