/* Kernel thread helper functions.
 *   Copyright (C) 2004 IBM Corporation, Rusty Russell.
 *
 * Creation is done via kthreadd, so that we get a clean environment
 * even if we're invoked from userspace (think modprobe, hotplug cpu,
 * etc.).
 */
#include <seminix/sched.h>
#include <seminix/sched/signal.h>
#include <seminix/sched/task.h>
#include <seminix/kthread.h>
#include <seminix/completion.h>
#include <seminix/slab.h>
#include <seminix/spinlock.h>
#include <seminix/uaccess.h>
#include <seminix/cpumask.h>

static DEFINE_SPINLOCK(kthread_create_lock);
static LIST_HEAD(kthread_create_list);
struct task_struct *kthreadd_task;

struct kthread_create_info {
    /* Information passed to kthread() from kthreadd. */
    int (*threadfn)(void *data);
    void *data;

    /* Result passed back to kthread_create() from kthreadd. */
    struct task_struct *result;
    struct completion *done;

    struct list_head list;
};

struct kthread {
    unsigned long flags;
    unsigned int cpu;
    void *data;
    struct completion parked;
    struct completion exited;
};

enum KTHREAD_BITS {
    KTHREAD_IS_PER_CPU = 0,
    KTHREAD_SHOULD_STOP,
    KTHREAD_SHOULD_PARK,
};

static inline void set_kthread_struct(void *kthread)
{
    /*
     * We abuse ->set_child_tid to avoid the new member and because it
     * can't be wrongly copied by copy_process(). We also rely on fact
     * that the caller can't exec, so PF_KTHREAD can't be cleared.
     */
    current->set_child_tid = (__force void __user *)kthread;
}

static inline struct kthread *to_kthread(struct task_struct *k)
{
    WARN_ON(!(k->flags & PF_KTHREAD));
    return (__force void *)k->set_child_tid;
}

void free_kthread_struct(struct task_struct *k)
{
    struct kthread *kthread;

    /*
     * Can be NULL if this kthread was created by kernel_thread()
     * or if kmalloc() in kthread() failed.
     */
    kthread = to_kthread(k);
    kfree(kthread);
}

/**
 * kthread_should_stop - should this kthread return now?
 *
 * When someone calls kthread_stop() on your kthread, it will be woken
 * and this will return true.  You should then return, and your return
 * value will be passed through to kthread_stop().
 */
bool kthread_should_stop(void)
{
    return test_bit(KTHREAD_SHOULD_STOP, &to_kthread(current)->flags);
}

/**
 * kthread_should_park - should this kthread park now?
 *
 * When someone calls kthread_park() on your kthread, it will be woken
 * and this will return true.  You should then do the necessary
 * cleanup and call kthread_parkme()
 *
 * Similar to kthread_should_stop(), but this keeps the thread alive
 * and in a park position. kthread_unpark() "restarts" the thread and
 * calls the thread function again.
 */
bool kthread_should_park(void)
{
    return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(current)->flags);
}

/**
 * kthread_data - return data value specified on kthread creation
 * @task: kthread task in question
 *
 * Return the data value specified when kthread @task was created.
 * The caller is responsible for ensuring the validity of @task when
 * calling this function.
 */
void *kthread_data(struct task_struct *task)
{
    return to_kthread(task)->data;
}

/**
 * kthread_probe_data - speculative version of kthread_data()
 * @task: possible kthread task in question
 *
 * @task could be a kthread task.  Return the data value specified when it
 * was created if accessible.  If @task isn't a kthread task or its data is
 * inaccessible for any reason, %NULL is returned.  This function requires
 * that @task itself is safe to dereference.
 */
void *kthread_probe_data(struct task_struct *task)
{
    struct kthread *kthread = to_kthread(task);
    void *data = NULL;

    probe_kernel_read(&data, &kthread->data, sizeof(data));
    return data;
}

static void __kthread_parkme(struct kthread *self)
{
    for (;;) {
        /*
         * TASK_PARKED is a special state; we must serialize against
         * possible pending wakeups to avoid store-store collisions on
         * task->state.
         *
         * Such a collision might possibly result in the task state
         * changin from TASK_PARKED and us failing the
         * wait_task_inactive() in kthread_park().
         */
        set_special_state(TASK_PARKED);
        if (!test_bit(KTHREAD_SHOULD_PARK, &self->flags))
            break;

        complete(&self->parked);
        schedule();
    }
    __set_current_state(TASK_RUNNING);
}

void kthread_parkme(void)
{
    __kthread_parkme(to_kthread(current));
}

static int kthread(void *_create)
{
    /* Copy data: it's on kthread's stack */
    struct kthread_create_info *create = _create;
    int (*threadfn)(void *data) = create->threadfn;
    void *data = create->data;
    struct completion *done;
    struct kthread *self;
    int ret;

    self = kzalloc(sizeof(*self), GFP_KERNEL);
    set_kthread_struct(self);

    /* If user was SIGKILLed, I release the structure. */
    done = xchg(&create->done, NULL);
    if (!done) {
        kfree(create);
        do_exit(-EINTR);
    }

    if (!self) {
        create->result = ERR_PTR(-ENOMEM);
        complete(done);
        do_exit(-ENOMEM);
    }

    self->data = data;
    init_completion(&self->exited);
    init_completion(&self->parked);
    current->vfork_done = &self->exited;

    /* OK, tell user we're spawned, wait for stop or wakeup */
    __set_current_state(TASK_UNINTERRUPTIBLE);
    create->result = current;
    complete(done);
    schedule();

    ret = -EINTR;
    if (!test_bit(KTHREAD_SHOULD_STOP, &self->flags)) {
        __kthread_parkme(self);
        ret = threadfn(data);
    }
    do_exit(ret);
}

static void create_kthread(struct kthread_create_info *create)
{
    int pid;

    /* We want our own signal handler (we take no signals by default). */
    pid = kernel_thread(kthread, create, CLONE_FS | CLONE_FILES | SIGCHLD);
    if (pid < 0) {
        /* If user was SIGKILLed, I release the structure. */
        struct completion *done = xchg(&create->done, NULL);

        if (!done) {
            kfree(create);
            return;
        }
        create->result = ERR_PTR(pid);
        complete(done);
    }
}

static __printf(3, 0)
struct task_struct *____kthread_create(int (*threadfn)(void *data),
    void *data, const char namefmt[], va_list args)
{
    DECLARE_COMPLETION(done);
    struct task_struct *task;
    struct kthread_create_info *create = kmalloc(sizeof(*create), GFP_KERNEL);

    if (!create)
        return ERR_PTR(-ENOMEM);
    create->threadfn = threadfn;
    create->data = data;
    create->done = &done;

    spin_lock(&kthread_create_lock);
    list_add_tail(&create->list, &kthread_create_list);
    spin_unlock(&kthread_create_lock);

    wake_up_process(kthreadd_task);
    /*
     * Wait for completion in killable state, for I might be chosen by
     * the OOM killer while kthreadd is trying to allocate memory for
     * new kernel thread.
     */
    if (unlikely(wait_for_completion_killable(&done))) {
        /*
         * If I was SIGKILLed before kthreadd (or new kernel thread)
         * calls complete(), leave the cleanup of this structure to
         * that thread.
         */
        if (xchg(&create->done, NULL))
            return ERR_PTR(-EINTR);
        /*
         * kthreadd (or new kernel thread) will call complete()
         * shortly.
         */
        wait_for_completion(&done);
    }
    task = create->result;
    if (!IS_ERR(task)) {
        static const struct sched_param param = { .sched_priority = 0 };
        char name[TASK_COMM_LEN];

        /*
         * task is already visible to other tasks, so updating
         * COMM must be protected.
         */
        vsnprintf(name, sizeof(name), namefmt, args);
        set_task_comm(task, name);
        /*
         * root may have changed our (kthreadd's) priority or CPU mask.
         * The kernel thread should not inherit these properties.
         */
        sched_setscheduler_nocheck(task, SCHED_NORMAL, &param);
        set_cpus_allowed_ptr(task, cpu_all_mask);
    }
    kfree(create);
    return task;
}

struct task_struct *__kthread_create(int (*threadfn)(void *data),
                       void *data, const char namefmt[], ...)
{
    struct task_struct *task;
    va_list args;

    va_start(args, namefmt);
    task = ____kthread_create(threadfn, data, namefmt, args);
    va_end(args);

    return task;
}

static void __kthread_bind_mask(struct task_struct *p, const struct cpumask *mask, long state)
{
    unsigned long flags;

    if (!wait_task_inactive(p, state)) {
        WARN_ON(1);
        return;
    }

    /* It's safe because the task is inactive. */
    raw_spin_lock_irqsave(&p->pi_lock, flags);
    do_set_cpus_allowed(p, mask);
    p->flags |= PF_NO_SETAFFINITY;
    raw_spin_unlock_irqrestore(&p->pi_lock, flags);
}

static void __kthread_bind(struct task_struct *p, unsigned int cpu, long state)
{
    __kthread_bind_mask(p, cpumask_of(cpu), state);
}

void kthread_bind_mask(struct task_struct *p, const struct cpumask *mask)
{
    __kthread_bind_mask(p, mask, TASK_UNINTERRUPTIBLE);
}

/**
 * kthread_bind - bind a just-created kthread to a cpu.
 * @p: thread created by kthread_create().
 * @cpu: cpu (might not be online, must be possible) for @k to run on.
 *
 * Description: This function is equivalent to set_cpus_allowed(),
 * except that @cpu doesn't need to be online, and the thread must be
 * stopped (i.e., just returned from kthread_create()).
 */
void kthread_bind(struct task_struct *p, unsigned int cpu)
{
    __kthread_bind(p, cpu, TASK_UNINTERRUPTIBLE);
}

/**
 * kthread_create_on_cpu - Create a cpu bound kthread
 * @threadfn: the function to run until signal_pending(current).
 * @data: data ptr for @threadfn.
 * @cpu: The cpu on which the thread should be bound,
 * @namefmt: printf-style name for the thread. Format is restricted
 *	     to "name.*%u". Code fills in cpu number.
 *
 * Description: This helper function creates and names a kernel thread
 * The thread will be woken and put into park mode.
 */
struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
                      void *data, unsigned int cpu,
                      const char *namefmt)
{
    struct task_struct *p;

    p = __kthread_create(threadfn, data, namefmt, cpu);
    if (IS_ERR(p))
        return p;
    kthread_bind(p, cpu);
    /* CPU hotplug need to bind once again when unparking the thread. */
    set_bit(KTHREAD_IS_PER_CPU, &to_kthread(p)->flags);
    to_kthread(p)->cpu = cpu;
    return p;
}

/**
 * kthread_unpark - unpark a thread created by kthread_create().
 * @k:		thread created by kthread_create().
 *
 * Sets kthread_should_park() for @k to return false, wakes it, and
 * waits for it to return. If the thread is marked percpu then its
 * bound to the cpu again.
 */
void kthread_unpark(struct task_struct *k)
{
    struct kthread *kthread = to_kthread(k);

    /*
     * Newly created kthread was parked when the CPU was offline.
     * The binding was lost and we need to set it again.
     */
    if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags))
        __kthread_bind(k, kthread->cpu, TASK_PARKED);

    clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
    /*
     * __kthread_parkme() will either see !SHOULD_PARK or get the wakeup.
     */
    wake_up_state(k, TASK_PARKED);
}

/**
 * kthread_park - park a thread created by kthread_create().
 * @k: thread created by kthread_create().
 *
 * Sets kthread_should_park() for @k to return true, wakes it, and
 * waits for it to return. This can also be called after kthread_create()
 * instead of calling wake_up_process(): the thread will park without
 * calling threadfn().
 *
 * Returns 0 if the thread is parked, -ENOSYS if the thread exited.
 * If called by the kthread itself just the park bit is set.
 */
int kthread_park(struct task_struct *k)
{
    struct kthread *kthread = to_kthread(k);

    if (WARN_ON(k->flags & PF_EXITING))
        return -ENOSYS;

    if (WARN_ON_ONCE(test_bit(KTHREAD_SHOULD_PARK, &kthread->flags)))
        return -EBUSY;

    set_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
    if (k != current) {
        wake_up_process(k);
        /*
         * Wait for __kthread_parkme() to complete(), this means we
         * _will_ have TASK_PARKED and are about to call schedule().
         */
        wait_for_completion(&kthread->parked);
        /*
         * Now wait for that schedule() to complete and the task to
         * get scheduled out.
         */
        WARN_ON_ONCE(!wait_task_inactive(k, TASK_PARKED));
    }

    return 0;
}

/**
 * kthread_stop - stop a thread created by kthread_create().
 * @k: thread created by kthread_create().
 *
 * Sets kthread_should_stop() for @k to return true, wakes it, and
 * waits for it to exit. This can also be called after kthread_create()
 * instead of calling wake_up_process(): the thread will exit without
 * calling threadfn().
 *
 * If threadfn() may call do_exit() itself, the caller must ensure
 * task_struct can't go away.
 *
 * Returns the result of threadfn(), or %-EINTR if wake_up_process()
 * was never called.
 */
int kthread_stop(struct task_struct *k)
{
    struct kthread *kthread;
    int ret;

    get_task_struct(k);
    kthread = to_kthread(k);
    set_bit(KTHREAD_SHOULD_STOP, &kthread->flags);
    kthread_unpark(k);
    wake_up_process(k);
    wait_for_completion(&kthread->exited);
    ret = k->exit_code;
    put_task_struct(k);
    return ret;
}

int kthreadd(void *unused)
{
    struct task_struct *tsk = current;

    /* Setup a clean context for our children to inherit. */
    set_task_comm(tsk, "kthreadd");
    ignore_signals(tsk);
    set_cpus_allowed_ptr(tsk, cpu_all_mask);

    for (;;) {
        set_current_state(TASK_INTERRUPTIBLE);
        if (list_empty(&kthread_create_list))
            schedule();
        __set_current_state(TASK_RUNNING);

        spin_lock(&kthread_create_lock);
        while (!list_empty(&kthread_create_list)) {
            struct kthread_create_info *create;

            create = list_entry(kthread_create_list.next,
                        struct kthread_create_info, list);
            list_del_init(&create->list);
            spin_unlock(&kthread_create_lock);

            create_kthread(create);

            spin_lock(&kthread_create_lock);
        }
        spin_unlock(&kthread_create_lock);
    }

    return 0;
}
