#include <seminix/syscall.h>
#include <seminix/param.h>
#include <seminix/slab.h>
#include <seminix/init.h>
#include <seminix/smp.h>
#include <seminix/tcb.h>
#include <cap/cap.h>
#include <cap/cnode.h>
#include <cap/thread.h>
#include <cap/frame.h>
#include <cap/vspace.h>

static struct kmem_cache *cap_thread_cache;

static __init int tcb_cap_init(void)
{
    cap_thread_cache = KMEM_CACHE(cap_thread, SLAB_PANIC);

    return 0;
}
userver_initcall(tcb_cap_init)

static cap_t *tcb_cap_create(seminix_object_t *object)
{
    int ret = -SERRNO_ENOMEM;
    cap_thread_t *cap_thread;
    struct tcb *tsk;

    cap_thread = kmem_cache_alloc(cap_thread_cache, GFP_ZERO);
    if (!cap_thread)
        return ERR_PTR(ret);

    tsk = tcb_struct_create();
    if (!tsk)
        goto free_thread;

    cap_thread->task = tsk;

    return CAP_REF(cap_thread);
free_thread:
    kmem_cache_free(cap_thread_cache, cap_thread);
    return ERR_PTR(ret);
}

static cap_t *tcb_cap_dup(cap_t *cap)
{
    struct cap_thread *cap_thread;

    BUG_ON(cap_get_cap_type(cap) != cap_thread_cap);

    cap_thread = kmem_cache_alloc(cap_thread_cache, GFP_ZERO);
    if (!cap_thread)
        return ERR_PTR(-SERRNO_ENOMEM);

    cap_thread->task = CAP_THREAD_PTR(cap)->task;

    return CAP_REF(cap_thread);
}

static void tcb_cap_revoke(cap_t *cap)
{
    BUG_ON(!list_empty(&cap->child));
    BUG_ON(cap_get_cap_type(cap) != cap_thread_cap);

    kmem_cache_free(cap_thread_cache, cap);
}

static void tcb_cap_delete(cap_t *cap)
{
    cap_thread_t *thread = (cap_thread_t *)cap;

    BUG_ON(!list_empty(&cap->child));
    BUG_ON(cap_get_cap_type(cap) != cap_thread_cap);

    put_task_struct(thread->task);
    kmem_cache_free(cap_thread_cache, thread);
}

const struct cap_ops thread_cap_ops __ro_after_init = {
    .cap_create = tcb_cap_create,
    .cap_dup = tcb_cap_dup,
    .cap_revoke = tcb_cap_revoke,
    .cap_delete = tcb_cap_delete,
};

void thread_suspend(struct tcb *tsk)
{
    if (test_tsk_thread_flag(tsk, TIF_NEED_STOPED) || tsk->state == TASK_STOPED)
        return;
    set_tsk_thread_flag(tsk, TIF_NEED_STOPED);
    kick_process(tsk);
}

bool thread_resume(struct tcb *tsk)
{
    if (tsk->state != TASK_STOPED)
        return false;
    wake_up_process(tsk);
    return true;
}

static void _do_copy_thread(void *info)
{
    struct copy_thread_param *p = (struct copy_thread_param *)info;

    copy_thread(p);
    wake_up_new_task(p->new_tsk);
}

SYSCALL_DEFINE6(tcb_wake_up_new, int, new_tcb, int, copy_tcb,
    unsigned long, pc, unsigned long, reg0,
    unsigned long, stack,  unsigned long, flags)
{
    int ret;
    unsigned long val;
    cap_t *new_cap, *copy_cap;
    unsigned long __user *tidptr;
    struct copy_thread_param param = { 0 };

    new_cap = cnode_capget(new_tcb, cap_thread_cap);
    if (IS_ERR(new_cap))
        return PTR_ERR(new_cap);
    copy_cap = cnode_capget(copy_tcb, cap_thread_cap);
    if (IS_ERR(copy_cap)) {
        ret = PTR_ERR(copy_cap);
        goto out;
    }

    param.new_tsk = CAP_THREAD_PTR(new_cap)->task;
    param.copy_tsk = CAP_THREAD_PTR(copy_cap)->task;
    param.pc = pc;
    param.reg0 = reg0;
    param.stack = stack;
    param.flags = flags;
    if (flags & TCB_WAKEUP_PARENT_GETTID) {
        tidptr = (unsigned long *)(stack - sizeof (unsigned long));
        if (get_user(val, tidptr))
            return -EFAULT;
        if (put_user(0, tidptr))
            return -EFAULT;
        param.parent_tidptr = (unsigned long *)val;
        if (put_user(task_tid_nr(param.new_tsk), param.parent_tidptr))
            return -EFAULT;
        stack -= sizeof (unsigned long);
    }
    if (flags & (TCB_WAKEUP_CHILD_SETTID | TCB_WAKEUP_SETTLS)) {
        tidptr = (unsigned long *)(stack - sizeof (unsigned long));
        if (get_user(val, tidptr))
            return -EFAULT;
        if (put_user(0, tidptr))
            return -EFAULT;
        param.child_tidptr = (unsigned long *)val;
    }

    param.new_tsk->tid_address = (flags & TCB_WAKEUP_CHILD_SETTID) ? param.child_tidptr : NULL;

    ret = smp_call_function_single(task_cpu(param.copy_tsk), _do_copy_thread, &param, false);
    if (ret)
        ret = -SERRNO_EINVAL;
    cnode_capput(copy_cap);
out:
    cnode_capput(new_cap);
    return ret;
}

SYSCALL_DEFINE3(tcb_configure, int, tcb, int, op, tcb_config_t __user *, tcbconfig)
{
    int ret = 0;
    struct tcb *tsk;
    tcb_config_t tc;
    cap_t *cap, *cnode_cap;
    cap_thread_t *cap_thread;

    if (copy_from_user(&tc, tcbconfig, sizeof (tcb_config_t)))
        return -SERRNO_EFAULT;

    cap = cnode_capget(tcb, cap_thread_cap);
    if (IS_ERR(cap))
        return PTR_ERR(cap);

    cap_thread = CAP_THREAD_PTR(cap);
    tsk = cap_thread->task;
    if (op & TCB_CONFIG_CNODE) {
        cnode_cap = cnode_capget(tc.cnode, cap_cnode_cap);
        if (IS_ERR(cnode_cap)) {
            ret = PTR_ERR(cnode_cap);
            goto put_tcb;
        }
        if (tsk->cap_cnode) {
            ret = -SERRNO_EEXIST;
            goto put_cnode;
        }
        tsk->cap_cnode = CAP_CNODE_PTR(cnode_cap);
put_cnode:
        cnode_capput(cnode_cap);
    }
    if (op & TCB_CONFIG_VSPACE) {
        cap_t *mm_cap;

        mm_cap = cnode_capget(tc.vspace, cap_vspace_cap);
        if (IS_ERR(mm_cap)) {
            ret = PTR_ERR(mm_cap);
            goto put_tcb;
        }
        mm_set_task(CAP_VSPACE_PTR(mm_cap)->mm, tsk);
        cnode_capput(mm_cap);
        // TODO debug
        tsk->prio = DEFAULT_PRIO;
        tsk->mcprio = DEFAULT_PRIO;
        tsk->policy = SEMINIX_SCHED_NORMAL;
        sched_new(tsk, TASK_NEW);
        sched_set_affinity(tsk, 7);
    }
    if (op & TCB_CONFIG_EPFAULT) {
        // TODO;
    }
    if (op & TCB_CONFIG_IPCBUF) {
        cap_t *frame_cap;
        frame_t *frame;

        frame_cap = cnode_capget(tc.ipcbuf, cap_frame_cap);
        if (IS_ERR(frame_cap)) {
            ret = PTR_ERR(frame_cap);
            goto put_tcb;
        }
        if (tsk->ipc_buffer) {
            ret = -SERRNO_EEXIST;
            goto put_frame;
        }
        frame = CAP_FRAME_PTR(frame_cap)->frame;
        if (frame->frame_type != FRAMETYPE_PAGE ||
            frame->page.nr_pages != 1) {
            ret = -SERRNO_EILLEGAL;
            goto put_frame;
        }
        tsk->ipc_buffer = page_to_virt(frame->page.pages[0]);
put_frame:
        cnode_capput(frame_cap);
    }
    if (op & TCB_CONFIG_RLIMIT) {
        // TODO;
    }
put_tcb:
    cnode_capput(cap);
    return ret;
}

SYSCALL_DEFINE4(tcb_priority, int, tcb, int, auth_tcb, int, op,
    tcb_sched_params_t __user *, sched_params)
{
    return 0;
}

SYSCALL_DEFINE3(tcb_control, int, tcb, int, op, unsigned long, val2)
{
    cap_t *cap;

    cap = cnode_capget(tcb, cap_thread_cap);
    if (IS_ERR(cap))
        return PTR_ERR(cap);

    switch (op) {
    case TCB_CONTROL_SUSPEND:
        thread_suspend(CAP_THREAD_PTR(cap)->task);
        break;
    case TCB_CONTROL_RESUME:
        thread_resume(CAP_THREAD_PTR(cap)->task);
        break;
    default:
        panic("Not impl tcb control\n");
    }
    cnode_capput(cap);
    return 0;
}

SYSCALL_DEFINE0(tcb_yield)
{
    yield();
    return 0;
}
