#include <arch.h>
#include <paging.h>
#include <conio.h>
#include <context.h>
#include <paging.h>
#include <list.h>
#include <string.h>
#include <schedule.h>

#include <sys/kmalloc.h>
#include <sys/task.h>
#include <sys/thread.h>
#include <sys/unistd.h>

extern void *kern_stack;	/* in start.asm */
extern task_t kern_task;

static thread_t idle_thread;
static thread_t *zombie;
thread_t *curth = &idle_thread;
static list_t threads_list;
static int nthreads;

static thread_t *thread_alloc(task_t *);
static void thread_dealloc(thread_t *);

static int thread_tid(void)
{				/* todo: use the rand func for generating tid */
    return (nthreads++);
}

static thread_t *thread_alloc(task_t * task)
{
    thread_t *th;

    if (task == NULL)
	return NULL;

    th = (thread_t *) kmalloc(sizeof(thread_t), GFP_KERNEL);
    if (th == NULL)
        return NULL;

    bzero(th, sizeof(thread_t));
    th->owner = task;

    list_init(&th->mutexes);
    list_insert(&threads_list, &th->link);
    list_insert(&task->threads, &th->task_link);
    task->nthreads++;
    th->tid = thread_tid();

    return (th);
}

int thread_create(task_t * task, thread_t ** thp)
{
    thread_t *nth;
    context_t *ctx;
    addr_t vaddr;
    int error = 0;

    sched_lock();
    if (!task_valid(task)) {
	sched_unlock();
	return -1;
    }

    if ((curtask->flags & TASK_SYSTEM) == 0) {
	nth = NULL;
	if (copyout(&nth, thp, sizeof(nth))) {
	    sched_unlock();
	    return -1;
	}
    }

    nth = thread_alloc(task);	/* alloc thread_t struct */
    if (nth == NULL) {
	sched_unlock();
	return -1;
    }

    if (context_init(&nth->ctx) < 0) {
	kfree(nth);
	task->nthreads--;
	sched_unlock();
	return -1;
    }
    /* copying stack from current to new thread */
    memcpy(nth->ctx.stack0, curth->ctx.stack0, KSTACK_SIZE);
    nth->state = TH_SUSPEND;
    nth->suspend_cnt = task->suspend_cnt + 1;
    sched_start(nth, curth->base_prio, SCHED_RR);
    strcpy(nth->name, "*noname");

    if (curtask->flags & TASK_SYSTEM)
	*thp = nth;
    else
	copyout(&nth, thp, sizeof(nth));

    sched_unlock();

    return (error);
}

thread_t *kthread_create(task_t * task, void (*kentry), int prio)
{
    thread_t *nth = NULL;

    assert(curth->sched_lock > 0);

    if ((nth = thread_alloc(&kern_task)) == NULL) {
	return NULL;

	if (context_setup(&nth->ctx, KERNEL_THREAD, kentry) < 0)
	    thread_destroy(nth);

	return NULL;
    }
    sched_start(nth, prio, SCHED_FF);
    nth->suspend_cnt = 1;
    sched_resume(nth);

    return (nth);
}

void kthread_terminate(thread_t * th)
{
    assert(th != NULL);
    assert(th->owner->flags & TASK_SYSTEM);

    sched_lock();

    //mutex_cancel(t);
    //timer_cancel(t);
    sched_stop(th);
    thread_dealloc(th);

    sched_unlock();
}

/*
 * func loads ustack/entry of thread
 */
int thread_load(thread_t * th, void (*entry), void *ustack)
{
    int s;

    sched_lock();
    if (!thread_valid(th)) {
	sched_unlock();
	return -1;
    }
    s = splhi();
    if (!ustack)
	context_setup(&th->ctx, NULL, entry);
    else
	context_setup(&th->ctx, ustack, entry);
    splx(s);
    sched_unlock();

    return 0;
}

thread_t *thread_self(void)
{
    return (curth);
}

void thread_yield(void)
{
    sched_yield();
}

/*
 * permanently stop execution of the specified thread.
 * If given thread is a current thread, this routine
 * never returns.
 */
int thread_terminate(thread_t * th)
{
    sched_lock();
    if (!thread_valid(th)) {
	sched_unlock();
	return -1;
    }
    thread_destroy(th);
    sched_unlock();

    return 0;
}

/*
 * thread_destroy-- the internal version of thread_terminate.
 */
void thread_destroy(thread_t * th)
{
    msg_cancel(th);
    /*mutex_cancel(th);
       timer_cancel(th); */
    sched_stop(th);
    thread_dealloc(th);
}

/*
 * Deallocate a thread.
 *
 * We can not release the context of the "current" thread
 * because our thread switching always requires the current
 * context. So, the resource deallocation is deferred until
 * another thread calls thread_deallocate() later.
 */
static void thread_dealloc(thread_t * th)
{
    list_remove(&th->task_link);
    list_remove(&th->link);
    th->owner->nthreads--;

    if (zombie != NULL) {
	/*
	 * deallocate a zombie thread which
	 * was killed in previous request.
	 */
	assert(zombie != curth);
	context_destroy(&zombie->ctx);
	zombie->ctx.kstack = NULL;
	kfree(zombie);
	zombie = NULL;
    }

    if (th == curth) {
	/*
	 * Enter zombie state and wait for
	 * somebody to be killed us.
	 */
	zombie = th;
	return;
    }

    free_pages((uint32_t) th->ctx.kstack + KSTACK_SIZE,
	       (uint32_t) th->ctx.kstack);
    th->ctx.kstack = NULL;
    kfree(th);
}

/*
 * suspend thread.
 *
 * A thread can be suspended any number of times.
 * And, it does not start to run again unless the thread
 * is resumed by the same count of suspend request.
 */
int thread_suspend(thread_t * th)
{
    sched_lock();
    if (!thread_valid(th)) {
	sched_unlock();
	return -1;
    }
    if (++th->suspend_cnt == 1)
	sched_suspend(th);

    sched_unlock();

    return 0;
}

/*
 * Resume thread.
 *
 * A thread does not begin to run, unless both thread
 * suspend count and task suspend count are set to 0.
 */
int thread_resume(thread_t * th)
{
    assert(th != curth);

    sched_lock();
    if (!thread_valid(th)) {
	sched_unlock();
	return -1;
    }
    if (th->suspend_cnt == 0) {
	sched_unlock();
	return -1;
    }

    th->suspend_cnt--;
    if (th->suspend_cnt == 0 && th->owner->suspend_cnt == 0)
	sched_resume(th);

    sched_unlock();

    return 0;
}

void thread_idle(void)
{
    while (1) {
	cpu_idle();
	sched_yield();
    };
}

int thread_valid(thread_t * th)
{
    list_t *head, *n;
    thread_t *tmp;

    head = &threads_list;
    for (n = list_first(head); n != head; n = list_next(n)) {
	tmp = list_entry(n, thread_t, link);
	if (tmp == th)
	    return 1;
    }

    return 0;
}

/*
 * The first thread in the system is created here by hand.
 * This thread will become an idle thread when thread_idle()
 * is called later in main().
 */
void thread_init(void)
{
    kprintf("init threads...");
    list_init(&threads_list);
    if (context_init(&idle_thread.ctx) < 0)
	kprintf("context_init(): failed\n");
    if (context_setup(&idle_thread.ctx, KERNEL_THREAD, thread_idle) < 0)
	kprintf("context_setup(): failed\n");
    /* adding to scheduler & init sched params */
    sched_start(&idle_thread, PRIO_IDLE, SCHED_FF);
    idle_thread.tid = 0;	/* Thread ID */
    idle_thread.owner = &kern_task;
    idle_thread.state = TH_RUN;
    idle_thread.sched_lock = 1;
    list_init(&idle_thread.mutexes);

    list_insert(&threads_list, &idle_thread.link);
    list_insert(&kern_task.threads, &idle_thread.task_link);
    kern_task.nthreads = 1;
    nthreads++;
    kprintf("Ok.\n");
}
