#include <linux/sched.h>
#include <linux/sched/task.h>
#include <linux/string.h>
#include <linux/init.h>
#include <linux/pid.h>
#include <linux/stdlib.h>
#include <linux/refcount.h>
#include <linux/sched/signal.h>
#include <linux/list.h>
#include <linux/sched/task_stack.h>

#include "inc/struct_alloc.h"

int __weak arch_dup_task_struct(struct task_struct *dst,
                                struct task_struct *src)
{
    *dst = *src;
    return 0;
}

static int dup_task_struct(struct task_struct *orig, int node, struct task_struct **p)
{
    static struct task_struct *tsk;
    int err = 0;

    tsk = alloc_task_struct_node(node);
    if (!tsk)
    {
        err = -ENOMEM;
    }
    else
    {
        arch_dup_task_struct(tsk, orig);

        err = alloc_thread_stack_node(tsk, node);
        if (err)
        {
            release_task_struct(tsk);
            tsk = NULL;
        }
    }

    *p = tsk;

    return err;
}

static inline void task_struct_setup(struct task_struct *p, struct kernel_clone_args *args)
{
    /*
     * One for the user space visible state that goes away when reaped.
     * One for the scheduler.
     */
    refcount_set(&p->rcu_users, 2);
    /* One for the rcu users */
    refcount_set(&p->usage, 1);

    p->flags &= ~PF_KTHREAD;
    if (args->kthread)
        p->flags |= PF_KTHREAD;
    if (args->name)
        __strncpy(p->name, args->name, sizeof(p->name));

    raw_spin_lock_init(&p->pi_lock);
    p->pi_waiters = RB_ROOT_CACHED;
    set_task_stack_end_magic(p);
    p->nivcsw = p->nvcsw = 0;
}

__weak int copy_extra(struct task_struct *orig, struct task_struct *dst, unsigned int flags)
{
    return 0;
}

static int do_copy_others(struct task_struct *orig, struct task_struct *p, struct kernel_clone_args *args)
{
    int err;

#ifdef CONFIG_SMP
	if (orig->cpus_ptr == &orig->cpus_mask)
		p->cpus_ptr = &p->cpus_mask;
#endif

    err = copy_thread(p, args);

    err = sched_fork(args->flags, p);

    err = copy_extra(orig, p, args->flags);

    return err;
}

static int do_pid_alloc(struct pid *pid, struct task_struct *p, struct kernel_clone_args *args)
{
    int err = 0;

    return err;
}

int copy_process(struct pid *pid,
                 int trace,
                 int node,
                 struct kernel_clone_args *args,
                 struct task_struct **tsk)
{
    struct task_struct *p;
    struct task_struct *cur = current;
    const u64 clone_flags = args->flags;
    int err = 0;

    err = dup_task_struct(cur, node, &p);
    if (err == 0)
    {
        task_struct_setup(p, args);

        err = do_copy_others(cur, p, args);
        if (err)
        {
            pr_todo();
        }
        else
        {
            err = do_pid_alloc(pid, p, args);

            /* CLONE_PARENT re-uses the old parent */
            if (clone_flags & (CLONE_PARENT | CLONE_THREAD))
            {
                pr_todo();
            }
            else
            {
                p->exit_signal = args->exit_signal;
            }

            if (thread_group_leader(p))
            {
                list_add_tail_rcu(&p->tasks, &init_task.tasks);
            }
            else
            {
                pr_todo();
            }

            *tsk = p;
        }
    }

    return err;
}
