#include <stdlib.h>
#include <stdio.h>
#include <unistd.h>
#include <stdbool.h>
#include <setjmp.h>
#include <assert.h>
#include <signal.h>
#include <string.h>
#include <sched.h>
#include <sys/types.h>

#include "user_threads_export.h"
#include "user_mutex_export.h"
#include "user_mutex.h"		/* uthread_mutex_t */
#include "user_threads.h"

enum join_state {
	JOIN_NORMAL,
	JOIN_JOINED,
	JOIN_JOINER,
	JOIN_DOUBLE,
	/* About three things I was absolutely positive. First, Edward was a
	 * vampire. Second, there was a part of him, and I didn't know how
	 * dominant that part might be, that thirsted for my blood. And third,
	 * I was unconditionally and irrevocably in love with him. */
	JOIN_VAMPIRE,
	/* A joined vampire holds a kung-fu deathgrip on its joiner */
	JOIN_KUNGFU_DEATHGRIP
};

struct uthread {
	unsigned long magic1;
	int state;
	int prio;
	thread_id id;
	bool active;
	int retval;

	sigjmp_buf jmp;
	sigjmp_buf jmp_creator;
	struct uthread * next;

	UTHREAD_START_ROUTINE start_routine;
	void * arg;

	enum join_state join_state;
	thread_id joined_id;
	int * join_ret;

	/* When this thread does not wait for any mutex, this is NULL */
	uthread_mutex_t mutex_waiting_for;

	/* A list of all the mutexes that this thread is currently locking. The
	 * data type of this list is 'uthread_mutex_t'.
	 * The order in the list is undefined.
	 * Note: there can't be two identical elements in this list. */
	struct list * mutexes_locking;
	unsigned long magic2;
};

enum sched_reason {
	REASON_SCHED = 1, 	/* regular scheduling */
	REASON_NEW,		/* creating a new thread */
	REASON_NEW_RETURN	/* finished creating a new thread */
};

/* Amusingly enough, uthread is _not_ thread safe */
static struct uthread uthread_map[NR_THREADS];
static struct uthread * curr;
static struct sigaction oldact;
static unsigned long next_id;

/* some yucky static vars for thread creation */
static struct uthread * __new__ = NULL; /* green thread being created */
static struct uthread * __old__ = NULL; /* green thread creating */
static struct uthread * __chg__ = NULL; /* helper green thread */

/* == Important Internal Functions == */
/* SIGALRM handler - main scheduling function */
static void schedule(int signal);
/* context switch */
static int context_switch(struct uthread * to,
                          enum sched_reason reason);
/* allocate stack for new thread */
static void creator_loop(struct uthread * new);
/* spawn a new thread and prepare to run it */
static void spawn_thread(struct uthread * new);

/* == Silly Helper Funtions == */
static void deactivate_uthread(thread_id thread);
static void wind_alarm();
static int set_handler();
static int previous_id(struct uthread * thread, bool adding);
static void remove_from_rq(struct uthread * thread);
static void add_to_rq(struct uthread * thread);
static void do_exit(struct uthread * thread, int retval);
static bool is_vampire(struct uthread *);
static void do_vampires(struct uthread *);
static void wake_up_from_join(struct uthread *, int);
#ifndef NDEBUG
static bool is_thread_id_valid(thread_id id);
static bool rq_integrity();
#endif

int uthread_init()
{
	int ret = UTHREAD_FAIL;
	uthread_map[0].mutexes_locking = NULL;

	memset(uthread_map, 0, sizeof(uthread_map));
	uthread_map[0].state = UTHREAD_RUNNING;
	uthread_map[0].prio = UTHREAD_MIN_PRIO;
	uthread_map[0].id = 0;
	uthread_map[0].active = true;
	uthread_map[0].next = uthread_map;
	uthread_map[0].join_state = JOIN_NORMAL;
	uthread_map[0].joined_id = -1;
	uthread_map[0].mutexes_locking = list_init();
	uthread_map[0].magic1 = 0xDEAFD00D;
	uthread_map[0].magic2 = 0xBEEFBABE;
	if (NULL == uthread_map[0].mutexes_locking) {
		goto err;
	}
	uthread_map[0].mutex_waiting_for = NULL;
	curr = uthread_map;
	next_id = 1;

	if (set_handler()) {
		goto err;
	}

	creator_loop(curr);

	wind_alarm();
	ret = UTHREAD_SUCCESS;
	goto out;

err:
	if (NULL != uthread_map[0].mutexes_locking) {
		list_destroy(uthread_map[0].mutexes_locking);
		uthread_map[0].mutexes_locking = NULL;
	}
out:
	return ret;

}

thread_id uthread_create(UTHREAD_START_ROUTINE start_routine, void * arg)
{
	struct uthread * new = uthread_map + next_id;
	struct uthread * tmp = NULL;
	thread_id ret = UTHREAD_FAIL;
	int switch_ret = -1;
	lock_t lock;

	int prio = 1;

	if ( (prio<UTHREAD_MIN_PRIO) || (prio>UTHREAD_MAX_PRIO) ) {
		goto err;
	}

	if (next_id >= NR_THREADS) {
		goto err;
	}

	assert(next_id < NR_THREADS);
	assert((new - uthread_map) > 0);

	new->join_state = JOIN_NORMAL;
	new->joined_id = -1;
	new->join_ret = NULL;
	new->mutexes_locking = NULL;
	new->retval = 0;
	new->magic1 = 0xDEAFD00D;
	new->magic2 = 0xBEEFBABE;

	mask_interrupts(&lock);
	assert(rq_integrity());

	new->id = next_id;
	new->prio = prio;
	new->active = true;
	new->state = UTHREAD_RUNNING;
	new->mutexes_locking = list_init();
	if (NULL == new->mutexes_locking) {
		goto out;
	}
	new->mutex_waiting_for = NULL;
	new->start_routine = start_routine;
	new->arg = arg;
	__new__ = new;
	__old__ = curr;
	assert(new->id - 1 >= 0);
	__chg__ = uthread_map + new->id - 1;

	if (__chg__->state == UTHREAD_RUNNING) {
		/* slightly faster than add_to_rq */
		tmp = __chg__->next;
		__chg__->next = new;
		new->next = tmp;
	} else {
		add_to_rq(new);
	}

	for (next_id = 1; uthread_map[next_id].active; ++next_id);

	/* nastiness to make sure the stack is alloc'd correctly */
	if (!(switch_ret = sigsetjmp(__old__->jmp, 1))) {
		siglongjmp(__chg__->jmp_creator, REASON_NEW);
	}
	assert(switch_ret == REASON_NEW_RETURN);

	ret = new->id;
out:
	assert(rq_integrity());
	unmask_interrupts(&lock);
err:
	return ret;
}

void uthread_exit(int retval)
{
	lock_t lock;
	assert(rq_integrity());
	mask_interrupts(&lock);
	do_exit(curr, retval);
	assert(rq_integrity());
	unmask_interrupts(&lock);
	uthread_yield();
}

thread_id uthread_self()
{
	assert(rq_integrity());
	return curr->id;
}

int uthread_join(thread_id thread, int * thread_return)
{
	int ret = UTHREAD_FAIL;
	lock_t lock;
	assert(rq_integrity());
	mask_interrupts(&lock);

	if (thread == uthread_self()) {
		ret = UTHREAD_FAIL;
		goto err;
	}

	if ((thread < 0) || (thread >= NR_THREADS)
	    || (!uthread_map[thread].active)) {
		ret = UTHREAD_INVALID;
		goto err;
	}

	if ((uthread_map[thread].state == UTHREAD_ZOMBIE) &&
	    (uthread_map[thread].join_state == JOIN_NORMAL)) {
		if (thread_return)
			*thread_return = uthread_map[thread].retval;
		deactivate_uthread(thread);
		goto was_zombie;
	}

	switch(uthread_map[thread].join_state) {
	case JOIN_NORMAL:
		uthread_map[thread].join_state = JOIN_JOINED;
		break;
	case JOIN_JOINER:
		uthread_map[thread].join_state = JOIN_DOUBLE;
		break;
	case JOIN_VAMPIRE:
		uthread_map[thread].join_state = JOIN_KUNGFU_DEATHGRIP;
		break;
	default:
		ret = UTHREAD_FAIL;
		goto err;
	}

	uthread_map[thread].joined_id = curr->id;
	curr->join_ret = thread_return;
	switch(curr->join_state) {
	case JOIN_JOINED:
		curr->join_state = JOIN_DOUBLE;
		break;
	case JOIN_NORMAL:
		curr->join_state = JOIN_JOINER;
		break;
	default:
		assert(false);
		break;
	}
	suspend_uthread();
	unmask_interrupts(&lock);
	while ((uthread_map[thread].active) &&
	       ((uthread_map[thread].state != UTHREAD_ZOMBIE) ||
	        (is_vampire(uthread_map + thread)))) {
		if (curr->next == curr) {
			sched_yield();
		} else {
			uthread_yield();
		}
	}
	mask_interrupts(&lock);
was_zombie:
	ret = UTHREAD_SUCCESS;
	assert(!uthread_map[thread].active);
err:
	assert(rq_integrity());
	unmask_interrupts(&lock);
	return ret;
}

int uthread_cancel(thread_id thread)
{
	lock_t lock;
	struct uthread * uthread = NULL;
	int ret = UTHREAD_FAIL;
	assert(rq_integrity());
	if (thread == curr->id)
		uthread_exit(UTHREAD_CANCELLED);
	mask_interrupts(&lock);
	if ((thread < 0) || (thread >= NR_THREADS) ||
		(!uthread_map[thread].active)) {

		ret = UTHREAD_INVALID;
		goto err;
	}
	if (uthread_map[thread].state == UTHREAD_ZOMBIE) {
		ret = UTHREAD_FAIL;
		goto err;
	}
	uthread = uthread_map + thread;
	do_exit(uthread, UTHREAD_CANCELLED);
	assert(uthread->state == UTHREAD_ZOMBIE);
	ret = UTHREAD_SUCCESS;
err:
	assert(rq_integrity());
	unmask_interrupts(&lock);
	return ret;
}

int uthread_yield()
{
	assert(rq_integrity());
	kill(getpid(), SIGALRM);
	return UTHREAD_SUCCESS;
}

void mask_interrupts(lock_t * p)
{
	assert(p != NULL);
	*p = ualarm(0,0);
}

void unmask_interrupts(lock_t * p)
{
	assert(p != NULL);
	ualarm(*p, 0);
}

void wake_up_uthread(thread_id id)
{
	lock_t lock = 0;
	mask_interrupts(&lock);
	struct uthread * this = uthread_map + id;
	if ((id < 0) || (id >= NR_THREADS)
	    || (!this->active)) {
		goto end;
	}
	/* XXX: thread1 join thread2, thread1 cancelled? */
	assert(this->state = UTHREAD_SUSPENDED);
	add_to_rq(this);
	this->state = UTHREAD_RUNNING;
end:
	unmask_interrupts(&lock);
	return;
}

void suspend_uthread()
{
	lock_t lock;
	mask_interrupts(&lock);
	struct uthread * this = curr;
	assert(this->state == UTHREAD_RUNNING);
	remove_from_rq(this);
	this->state = UTHREAD_SUSPENDED;
	unmask_interrupts(&lock);
}

static void deactivate_uthread(thread_id th)
{
	assert(is_thread_id_valid(th));
	uthread_map[th].active = false;
	if (th < next_id)
		next_id = th;
}

static int context_switch(struct uthread * to, enum sched_reason reason)
{
	int ret = -1;
	assert(to != (void *)0xDEAFD00D);
	if ((ret = sigsetjmp(curr->jmp, 1)) == 0) {
		curr = to;
		siglongjmp(curr->jmp, reason);
	}
	assert(curr != (void *)0xDEAFD00D);
	return ret;
}

static void wind_alarm()
{
	ualarm(curr->prio * UTHREAD_TIME_SLICE, 0);
}


static void schedule(int signal)
{
	int ret = -1;
	assert(curr != NULL);
	if (curr->next == curr) {
		goto out;
	}
	ret = context_switch(curr->next, REASON_SCHED);
	if (ret != REASON_SCHED)
		assert(ret == REASON_SCHED);
out:
	wind_alarm();
	return;
}

int set_handler()
{
	struct sigaction act;
	sigaction(SIGALRM, NULL, &act);
	act.sa_handler = schedule;
	return sigaction(SIGALRM, &act, &oldact);
}

/* we alloc the new stack for the thread on the stack, a preset number of bytes
 * forward. So, to avoid stack collisons, we need to make sure each thread main
 * is called from the base of the previous stack (otherwise the stacks are not
 * all the same size and may overlap).
 * This loop and the chg_jmpbuf take care of that dirty business. */
static void creator_loop(struct uthread * new)
{
	volatile char alloc_array[UTHREAD_DEF_STACK_SIZE];
	/* Don't optimize my array away! I need it =) */
	alloc_array[UTHREAD_DEF_STACK_SIZE-1] = 'Z' + alloc_array[0];

	/* note that sigsetjmp returns (at least) twice */
	if (sigsetjmp(new->jmp_creator, 1)) {
		/* if we're here, we need to spawn a new green thread */
		spawn_thread(__new__);
		siglongjmp(__old__->jmp, REASON_NEW_RETURN);
	}

}

static void spawn_thread(struct uthread * new)
{
	/* stack is alloc'd in here */
	creator_loop(new);

	if (sigsetjmp(new->jmp, 1)) {
		wind_alarm();
		uthread_exit(curr->start_routine(curr->arg));
	}
}

/* previous _RUNNING_ */
static int previous_id(struct uthread * thread, bool adding)
{
	struct uthread * i = NULL;
	thread_id max;
	if (curr == curr->next) {
		i = curr;
		goto out;
	}
	if (!adding) {
		for (i = curr; i->next != thread; i = i->next);
	} else {
		for (i = curr; (i->next->id > i->id); i = i->next);
		for (max = i->id;
		     ((i->next->id < thread->id) &&
		      (i->id != max));
		      i = i->next);
	}
out:
	return i->id;
}

static void add_to_rq(struct uthread * thread)
{
	struct uthread * prev = uthread_map + previous_id(thread, true);
	thread->next = prev->next;
	prev->next = thread;
}

static void remove_from_rq(struct uthread * thread)
{
	struct uthread * prev = uthread_map + previous_id(thread, false);
	if (thread->next == thread) {
		return; /* can't remove last thread... */
	}
	assert(prev->next == thread);
	prev->next = thread->next;
	/* thread->next = NULL; */
}

static bool is_vampire(struct uthread * vampire)
{
	assert(vampire->active);
	return ((vampire->join_state == JOIN_VAMPIRE) ||
	        (vampire->join_state == JOIN_KUNGFU_DEATHGRIP));
}

static void wake_up_from_join(struct uthread * thread, int retval)
{
	wake_up_uthread(thread->id);
	if (thread->join_ret)
		*thread->join_ret = retval;
	thread->join_ret = NULL;
	switch(thread->join_state) {
	case JOIN_JOINER:
		thread->join_state = JOIN_NORMAL;
		break;
	case JOIN_DOUBLE:
		thread->join_state = JOIN_JOINED;
		break;
	default:
		assert(false); /* impossible */
	}
}

static void do_vampires(struct uthread * vampire)
{
	assert(is_vampire(vampire));
	struct uthread * last_vampire = NULL;
	do {
		assert(vampire->state == UTHREAD_ZOMBIE);
		last_vampire = vampire;
		switch (vampire->join_state) {
		case JOIN_VAMPIRE:
			vampire->join_state = JOIN_NORMAL;
			break;
		default:
			assert(false);
		case JOIN_KUNGFU_DEATHGRIP:
			vampire->join_state = JOIN_NORMAL;
			deactivate_uthread(vampire->id);
			vampire = uthread_map + vampire->joined_id;
			break;
		}
		last_vampire->joined_id = -1;
	} while(vampire->active && (vampire->join_state == JOIN_KUNGFU_DEATHGRIP));
	if (!is_vampire(vampire) && (vampire->state != UTHREAD_ZOMBIE)) {
		last_vampire->join_state = JOIN_VAMPIRE;
		wake_up_from_join(vampire, UTHREAD_CANCELLED);
	}
	if (vampire->active && is_vampire(vampire)) {
		vampire->join_state = JOIN_NORMAL;
	}
}

static void do_exit(struct uthread * thread, int retval)
{
	bool remove = false;
	assert(rq_integrity());
	_mutex_cleanup(thread->id);
	thread->retval = retval;
	if (thread->state == UTHREAD_RUNNING)
		remove = true;
	thread->state = UTHREAD_ZOMBIE;
	if (thread->joined_id != -1) {
		struct uthread * papa = NULL;
		switch(thread->join_state) {
		case JOIN_DOUBLE:
			thread->join_state = JOIN_KUNGFU_DEATHGRIP;
			break;
		case JOIN_JOINED:
			papa = uthread_map + thread->joined_id;
			if (is_vampire(papa)) {
				do_vampires(papa);
			} else {
				wake_up_from_join(papa, retval);
			}
			deactivate_uthread(thread->id);
			/* fall-through */
		case JOIN_NORMAL:
			thread->join_state = JOIN_NORMAL;
			break;
		case JOIN_JOINER:
		case JOIN_KUNGFU_DEATHGRIP:
		case JOIN_VAMPIRE:
			/* unpossiburu! */
			assert(false); /* vampire tried to exit */
		}
	} else if (thread->join_state == JOIN_JOINER) {
		thread->join_state = JOIN_VAMPIRE;
	}
	list_destroy(thread->mutexes_locking);
	if (remove)
		remove_from_rq(thread);

	/* XXX: improve this */
	if (thread->id == 0)
		exit(retval);
}

#ifndef NDEBUG
static bool is_thread_id_valid(thread_id id)
{
	return (id >= 0) && (id < NR_THREADS) && (uthread_map[id].active);
}

static bool rq_integrity()
{
	int i = 0;
	struct uthread * th = NULL;
	assert(curr != (void *)0xDEAFD00D);
	for (i = 0; i < NR_THREADS; ++i) {
		if (uthread_map[i].active == false)
			continue;
		assert(uthread_map[i].magic1 == 0xDEAFD00D);
		assert(uthread_map[i].magic2 == 0xBEEFBABE);
		if (uthread_map[i].state == UTHREAD_ZOMBIE)
			switch(uthread_map[i].join_state) {
			case JOIN_NORMAL:
			case JOIN_VAMPIRE:
			case JOIN_KUNGFU_DEATHGRIP:
				break;
			default:
				return false;
		}
		switch(uthread_map[i].join_state) {
		case JOIN_NORMAL:
		case JOIN_JOINER:
		case JOIN_VAMPIRE:
			break;
		default:
			if (uthread_map[i].joined_id == -1)
				return false;
		}
		if (uthread_map[i].state != UTHREAD_RUNNING)
			continue;
		for (th = curr->next; th != curr; th = th->next) {
			assert(th->state != UTHREAD_ZOMBIE);
			if (i == th->id)
				break;
		}
		if (th->id != i)
			return false;
	}
	return true;
}
#endif /* NDEBUG */

uthread_mutex_t _thread_peek_waiting(thread_id t)
{
	assert(is_thread_id_valid(t));
	struct uthread * uthread;
	uthread = uthread_map + t;
	return uthread->mutex_waiting_for;
}

uthread_mutex_t _thread_peek_locking(thread_id t)
{
	assert(is_thread_id_valid(t));
	struct uthread * uthread;
	uthread = uthread_map + t;


	uthread_mutex_t ret;
	int tmp = list_get_front(uthread->mutexes_locking,(void*)&ret);
	if (0 != tmp) {
		ret = NULL;
	}
	return ret;
}

void _thread_mutex_wait(thread_id t, uthread_mutex_t m)
{
	assert(is_thread_id_valid(t));
	struct uthread * uthread;
	uthread = uthread_map + t;

	uthread->mutex_waiting_for = m;
}

void _thread_mutex_clear_wait(thread_id t)
{
	assert(is_thread_id_valid(t));
	struct uthread * uthread = uthread_map + t;
	uthread->mutex_waiting_for = NULL;
}

int _thread_mutex_lock(thread_id t, uthread_mutex_t m)
{
	struct uthread * uthread;
	assert(is_thread_id_valid(t));
	uthread = uthread_map + t;
	int ret = -1;

	assert ((NULL == uthread->mutex_waiting_for) ||
	        (m == uthread->mutex_waiting_for));

	ret = list_push_back(uthread->mutexes_locking, m);
	return -!!ret;
}

void _thread_mutex_unlock(thread_id t, uthread_mutex_t m)
{
	assert(is_thread_id_valid(t));
	struct uthread * uthread;
	uthread = uthread_map + t;

	int tmp = list_remove_first_occurrence(uthread->mutexes_locking, m);
	if (tmp != 0)
		assert(tmp == 0);
}
