#include <linux/init.h>
#include <linux/smp.h>
#include <linux/percpu.h>
#include <linux/sched/task.h>
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/cpumask.h>
#include <linux/stdlib.h>
#include <linux/sched.h>
#include <linux/topology.h>
#include <linux/bug.h>
#include <linux/smpboot.h>
#include <linux/kthread.h>

#include "inc/smpboot.h"

static LIST_HEAD(hotplug_threads);
static DEFINE_MUTEX(smpboot_threads_lock);

struct smpboot_thread_data
{
    unsigned int cpu;
    unsigned int status;
    struct smp_hotplug_thread *ht;
};

enum
{
    HP_THREAD_NONE = 0,
    HP_THREAD_ACTIVE,
    HP_THREAD_PARKED,
};

/**
 * smpboot_thread_fn - percpu hotplug thread loop function
 * @data:	thread data pointer
 *
 * Checks for thread stop and park conditions. Calls the necessary
 * setup, cleanup, park and unpark functions for the registered
 * thread.
 *
 * Returns 1 when the thread should exit, 0 otherwise.
 */
static int smpboot_thread_fn(void *data)
{
    struct smpboot_thread_data *td = data;
    struct smp_hotplug_thread *ht = td->ht;

    while (1)
    {
        set_current_state(TASK_INTERRUPTIBLE);
        preempt_disable();
        if (kthread_should_stop())
        {
            __set_current_state(TASK_RUNNING);
            preempt_enable();
            /* cleanup must mirror setup */
            if (ht->cleanup && td->status != HP_THREAD_NONE)
                ht->cleanup(td->cpu, cpu_online(td->cpu));
            kfree(td);
            return 0;
        }

        if (kthread_should_park())
        {
            __set_current_state(TASK_RUNNING);
            preempt_enable();
            if (ht->park && td->status == HP_THREAD_ACTIVE)
            {
                BUG_ON(td->cpu != smp_processor_id());
                ht->park(td->cpu);
                td->status = HP_THREAD_PARKED;
            }
            kthread_parkme();
            /* We might have been woken for stop */
            continue;
        }

        BUG_ON(td->cpu != smp_processor_id());

        /* Check for state change setup */
        switch (td->status)
        {
        case HP_THREAD_NONE:
            __set_current_state(TASK_RUNNING);
            preempt_enable();
            if (ht->setup)
                ht->setup(td->cpu);
            td->status = HP_THREAD_ACTIVE;
            continue;

        case HP_THREAD_PARKED:
            __set_current_state(TASK_RUNNING);
            preempt_enable();
            if (ht->unpark)
                ht->unpark(td->cpu);
            td->status = HP_THREAD_ACTIVE;
            continue;
        }

        if (!ht->thread_should_run(td->cpu))
        {
            preempt_enable_no_resched();
            schedule();
        }
        else
        {
            __set_current_state(TASK_RUNNING);
            preempt_enable();
            ht->thread_fn(td->cpu);
        }
    }

    return 0;
}

static int __smpboot_create_thread(struct smp_hotplug_thread *ht, unsigned int cpu)
{
    struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu);
    struct smpboot_thread_data *td;

    if (tsk)
        return 0;

    td = kzalloc_node(sizeof(*td), GFP_KERNEL, cpu_to_node(cpu));
    if (!td)
        return -ENOMEM;
    td->cpu = cpu;
    td->ht = ht;

    tsk = kthread_create_on_cpu(smpboot_thread_fn, td, cpu,
                                ht->thread_comm);
    if (IS_ERR(tsk))
    {
        kfree(td);
        return PTR_ERR(tsk);
    }
    kthread_set_per_cpu(tsk, cpu);
    /*
     * Park the thread so that it could start right on the CPU
     * when it is available.
     */
    kthread_park(tsk);
    get_task_struct(tsk);
    *per_cpu_ptr(ht->store, cpu) = tsk;
    if (ht->create)
    {
        /*
         * Make sure that the task has actually scheduled out
         * into park position, before calling the create
         * callback. At least the migration thread callback
         * requires that the task is off the runqueue.
         */
        if (!wait_task_inactive(tsk, TASK_PARKED))
            WARN_ON(1);
        else
            ht->create(cpu);
    }
    return 0;
}

static void smpboot_destroy_threads(struct smp_hotplug_thread *ht)
{
    unsigned int cpu;

    /* We need to destroy also the parked threads of offline cpus */
    for_each_possible_cpu(cpu)
    {
        struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu);

        if (tsk)
        {
            kthread_stop_put(tsk);
            *per_cpu_ptr(ht->store, cpu) = NULL;
        }
    }
}

static void smpboot_unpark_thread(struct smp_hotplug_thread *ht, unsigned int cpu)
{
    struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu);

    if (!ht->selfparking)
        kthread_unpark(tsk);
}

static void smpboot_park_thread(struct smp_hotplug_thread *ht, unsigned int cpu)
{
    struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu);

    if (tsk && !ht->selfparking)
        kthread_park(tsk);
}

static inline int __do_create_thread(struct smp_hotplug_thread *plug_thread)
{
    int ret = 0;
    int cpu;

    for_each_online_cpu(cpu)
    {
        ret = __smpboot_create_thread(plug_thread, cpu);
        if (ret)
        {
            smpboot_destroy_threads(plug_thread);
            break;
        }

        smpboot_unpark_thread(plug_thread, cpu);
    }

    return ret;
}

/* *************************************************************** */

int smpboot_create_threads(unsigned int cpu)
{
    struct smp_hotplug_thread *cur;
    int ret = 0;

    mutex_lock(&smpboot_threads_lock);
    list_for_each_entry(cur, &hotplug_threads, list)
    {
        ret = __smpboot_create_thread(cur, cpu);
        if (ret)
            break;
    }
    mutex_unlock(&smpboot_threads_lock);

    return ret;
}

/**
 * smpboot_register_percpu_thread - Register a per_cpu thread related
 * 					    to hotplug
 * @plug_thread:	Hotplug thread descriptor
 *
 * Creates and starts the threads on all online cpus.
 */
int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread)
{
    int ret;

    mutex_lock(&smpboot_threads_lock);

    ret = __do_create_thread(plug_thread);
    if (ret == 0)
    {
        list_add(&plug_thread->list, &hotplug_threads);
    }

    mutex_unlock(&smpboot_threads_lock);

    return ret;
}

int smpboot_unpark_threads(unsigned int cpu)
{
    struct smp_hotplug_thread *cur;

    mutex_lock(&smpboot_threads_lock);
    list_for_each_entry(cur, &hotplug_threads, list)
        smpboot_unpark_thread(cur, cpu);
    mutex_unlock(&smpboot_threads_lock);

    return 0;
}

int smpboot_park_threads(unsigned int cpu)
{
    struct smp_hotplug_thread *cur;

    mutex_lock(&smpboot_threads_lock);
    list_for_each_entry_reverse(cur, &hotplug_threads, list)
        smpboot_park_thread(cur, cpu);
    mutex_unlock(&smpboot_threads_lock);

    return 0;
}
