// SPDX-License-Identifier: GPL-2.0-only
/*
 * Generic waiting primitives.
 *
 * (C) 2004 Nadia Yvette Chambers, Oracle
 */

#include <linux/wait.h>
#include <linux/list.h>
#include <linux/sched.h>

/*
 * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just
 * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve
 * number) then we wake that number of exclusive tasks, and potentially all
 * the non-exclusive tasks. Normally, exclusive tasks will be at the end of
 * the list and any non-exclusive tasks will be woken first. A priority task
 * may be at the head of the list, and can consume the event without any other
 * tasks being woken.
 *
 * There are circumstances in which we can try to wake a task which has already
 * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns
 * zero in this (rare) case, and we handle it by continuing to scan the queue.
 */
static int __wake_up_common(struct wait_queue_head *wq_head, unsigned int mode,
                            int nr_exclusive, int wake_flags, uintptr_t key)
{
    wait_queue_entry_t *curr, *next;

    curr = list_first_entry(&wq_head->head, wait_queue_entry_t, entry);

    if (&curr->entry == &wq_head->head)
        return nr_exclusive;

    list_for_each_entry_safe_from(curr, next, &wq_head->head, entry)
    {
        unsigned flags = curr->flags;
        int ret;

        ret = curr->func(curr, mode, wake_flags, key);
        if (ret < 0)
            break;
        if (ret && (flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive)
            break;
    }

    return nr_exclusive;
}

static int __wake_up_common_lock(struct wait_queue_head *wq_head, unsigned int mode,
                                 int nr_exclusive, int wake_flags, uintptr_t key)
{
    unsigned long flags;
    int remaining;

    spin_lock_irqsave(&wq_head->lock, flags);
    remaining = __wake_up_common(wq_head, mode, nr_exclusive, wake_flags,
                                 key);
    spin_unlock_irqrestore(&wq_head->lock, flags);

    return nr_exclusive - remaining;
}

static inline void __remove_wait_queue(struct wait_queue_head *wq_head,
                                       struct wait_queue_entry *wq_entry)
{
    list_del_init(&wq_entry->entry);
}

void __init_waitqueue_head(struct wait_queue_head *wq_head, const char *name,
                           struct lock_class_key *key)
{
    spin_lock_init(&wq_head->lock);
    INIT_LIST_HEAD(&wq_head->head);
}

void __add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
{
    struct list_head *head = &wq_head->head;
    struct wait_queue_entry *wq;

    list_for_each_entry(wq, &wq_head->head, entry)
    {
        if (!(wq->flags & WQ_FLAG_PRIORITY))
            break;
        head = &wq->entry;
    }
    list_add(&wq_entry->entry, head);
}

void add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
{
    unsigned long flags;

    if (!wq_entry->added)
    {
        wq_entry->flags &= ~WQ_FLAG_EXCLUSIVE;
        spin_lock_irqsave(&wq_head->lock, flags);
        __add_wait_queue(wq_head, wq_entry);
        spin_unlock_irqrestore(&wq_head->lock, flags);
        wq_entry->added = 1;
    }
}

void remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
{
    unsigned long flags;

    spin_lock_irqsave(&wq_head->lock, flags);
    __remove_wait_queue(wq_head, wq_entry);
    spin_unlock_irqrestore(&wq_head->lock, flags);
}

/**
 * __wake_up - wake up threads blocked on a waitqueue.
 * @wq_head: the waitqueue
 * @mode: which threads
 * @nr_exclusive: how many wake-one or wake-many threads to wake up
 * @key: is directly passed to the wakeup function
 *
 * If this function wakes up a task, it executes a full memory barrier
 * before accessing the task state.  Returns the number of exclusive
 * tasks that were awaken.
 */
int __wake_up(struct wait_queue_head *wq_head, unsigned int mode,
              int nr_exclusive, uintptr_t key)
{
    return __wake_up_common_lock(wq_head, mode, nr_exclusive, 0, key);
}

void __init_wait_func(struct wait_queue_entry *wq_entry, wait_queue_func_t func)
{
    wq_entry->func = func;
    wq_entry->flags = 0;
    wq_entry->private = current;
    INIT_LIST_HEAD(&wq_entry->entry);
    wq_entry->added = 0;
}

int woken_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, uintptr_t key)
{
    /* Pairs with the smp_store_mb() in wait_woken(). */
    smp_mb(); /* C */
    wq_entry->flags |= WQ_FLAG_WOKEN;

    return default_wake_function(wq_entry, mode, sync, key);
}

int default_wake_function(wait_queue_entry_t *curr, unsigned mode, int wake_flags,
                          uintptr_t key)
{
    return try_to_wake_up(curr->private, mode, wake_flags);
}

/*
 * DEFINE_WAIT_FUNC(wait, woken_wake_func);
 *
 * add_wait_queue(&wq_head, &wait);
 * for (;;) {
 *     if (condition)
 *         break;
 *
 *     // in wait_woken()			// in woken_wake_function()
 *
 *     p->state = mode;				wq_entry->flags |= WQ_FLAG_WOKEN;
 *     smp_mb(); // A				try_to_wake_up():
 *     if (!(wq_entry->flags & WQ_FLAG_WOKEN))	   <full barrier>
 *         schedule()				   if (p->state & mode)
 *     p->state = TASK_RUNNING;			      p->state = TASK_RUNNING;
 *     wq_entry->flags &= ~WQ_FLAG_WOKEN;	~~~~~~~~~~~~~~~~~~
 *     smp_mb(); // B				condition = true;
 * }						smp_mb(); // C
 * remove_wait_queue(&wq_head, &wait);		wq_entry->flags |= WQ_FLAG_WOKEN;
 */
long wait_woken(struct wait_queue_entry *wq_entry, unsigned mode, long timeout)
{
    /*
     * The below executes an smp_mb(), which matches with the full barrier
     * executed by the try_to_wake_up() in woken_wake_function() such that
     * either we see the store to wq_entry->flags in woken_wake_function()
     * or woken_wake_function() sees our store to current->state.
     */
    if (!(wq_entry->flags & WQ_FLAG_WOKEN))
    {
        set_current_state(mode); /* A */
        timeout = schedule_timeout(timeout);
        __set_current_state(TASK_RUNNING);
    }

    /*
     * The below executes an smp_mb(), which matches with the smp_mb() (C)
     * in woken_wake_function() such that either we see the wait condition
     * being true or the store to wq_entry->flags in woken_wake_function()
     * follows ours in the coherence order.
     */
    smp_store_mb(wq_entry->flags, wq_entry->flags & ~WQ_FLAG_WOKEN); /* B */

    return timeout;
}
