#include "uthread.h"
#include "mutex.h"

#define UNLOCKED 	0
#define LOCKED 	 	1

#ifdef KERNEL_LEVEL_THREAD_EXTENSION
extern sigset_t set, old;
#else
extern volatile uthread_t curr_thread;
#endif

int uthread_spin_lock(uthread_spinlock_t *spinlock)
{
	while(1)
	{
		if(__sync_bool_compare_and_swap(&(spinlock->lock), UNLOCKED, LOCKED))
			break;
	}

	return 0;
}

int uthread_spin_trylock(uthread_spinlock_t *spinlock)
{
	if(__sync_bool_compare_and_swap(&(spinlock->lock), UNLOCKED, LOCKED))
		return 0;
	else
		return -1;
}

int uthread_spin_unlock(uthread_spinlock_t *spinlock)
{
	spinlock->lock = UNLOCKED;
	return 0;
}

//Acuqire the lock atomically
//if succeesful, this function will return immediately
//if not, the calling thread will be blocked until be awaken
int uthread_mutex_lock(uthread_mutex_t *mutex)
{
	assert(mutex);

#ifdef KERNEL_LEVEL_THREAD_EXTENSION
	thread_t curr_thread = uthread_self();
#endif
	assert(curr_thread >= 0 && curr_thread < MAX_NUM_THREADS);
	assert(threads_table[curr_thread]);
	
	//try to obtain the lock
	while(1)
	{
			//the lock is free and I have obtained the lock!
		if (__sync_bool_compare_and_swap(&(mutex->lock), UNLOCKED, LOCKED)) 
		{
			assert(threads_table[curr_thread] != NULL);
			mutex->owner = curr_thread;
			break;
		}
			//the lock has been obtained by someone else, I'd better go to sleep...{
		else
		{
			assert(threads_table[curr_thread] != NULL);
		
			//put myself in the hlist of this mutex
			struct hlist_node & l = threads_table[curr_thread]->mutex_hlist;
			struct hlist_head & h = mutex->mutex_hlist_head;
			hlist_add_head(&l, &h);

			//I will now move to and wait in the blocked list until 
			//some other thread unlock the mutex and wake me up
			switch_thread_state(curr_thread, BLOCKED);
#ifdef KERNEL_LEVEL_THREAD_EXTENSION
			int signal;
			sigwait(&set, &signal);
			
			switch (signal)
			{
				case SIGUSR2:
					break;
				default:
					assert(0);
					break;
			}
#else
			uthread_yield();
#endif
		}
	}

	return 0;
}

/*
uthread_mutex_unlock will unlock a mutex if called by the owning thread. Calling this routine is required after a thread has completed its use of protected data if other threads are to acquire the mutex for their work with the protected data. An error will be returned if:
	If the mutex was already unlocked
	If the mutex is owned by another thread
reference:
	https://computing.llnl.gov/tutorials/pthreads/
*/

int uthread_mutex_unlock(uthread_mutex_t *mutex)
{
#ifdef KERNEL_LEVEL_THREAD_EXTENSION
	thread_t curr_thread = uthread_self();
#endif
	assert(curr_thread >= 0 && curr_thread < MAX_NUM_THREADS);
	assert(threads_table[curr_thread]);

	assert(mutex);
	if ((int) (mutex->owner) != (int) curr_thread)
		return -1;
	
	assert (int (mutex->owner) == int (curr_thread));

	if (__sync_lock_test_and_set(&(mutex->lock), UNLOCKED) == LOCKED)
	{
		mutex->owner = -1;

		//if there is some thread waiting, wake up the next thread waiting on this mutex
		if(!hlist_empty(&(mutex->mutex_hlist_head)))
		{
			tcb_t * t = hlist_entry(mutex->mutex_hlist_head.first, tcb_t, mutex_hlist);
			
			switch_thread_state(t->id, RUNNABLE);

#ifdef KERNEL_LEVEL_THREAD_EXTENSION
			thr_kill(t->kernel_thread_id, SIGUSR2);
#endif
		}

#ifndef KERNEL_LEVEL_THREAD_EXTENSION
		uthread_yield();
#endif
	}
	else
		//the mutex was already unlocked before we do anything
		return -1;
	return 0;
}

int uthread_mutex_trylock(uthread_mutex_t *mutex)
{
#ifdef KERNEL_LEVEL_THREAD_EXTENSION
	thread_t curr_thread = uthread_self();
#endif
	assert(curr_thread >= 0 && curr_thread < MAX_NUM_THREADS);
	assert(threads_table[curr_thread]);
	
	assert(mutex);

	if (__sync_bool_compare_and_swap(&(mutex->lock), UNLOCKED, LOCKED)) 
		//the lock is free and I have obtained the lock!
	{
		assert(threads_table[curr_thread] != NULL);
		mutex->owner = curr_thread;
		return 0;
	}
	else
		return -1;
}

int uthread_cond_wait(uthread_cond_t *cond, uthread_mutex_t *mutex)
{
#ifdef KERNEL_LEVEL_THREAD_EXTENSION
	thread_t curr_thread = uthread_self();
#endif
	assert(curr_thread >= 0 && curr_thread < MAX_NUM_THREADS);
	assert(threads_table[curr_thread]);
	
	assert((int) (mutex->owner) == (int) curr_thread);

	if (__sync_bool_compare_and_swap(&(mutex->lock), LOCKED, UNLOCKED)) 
	{
		mutex->owner = -1;
		
		//I'd better go to sleep rather than waiting for the condition
		assert(threads_table[curr_thread] != NULL);
	
		//put myself in the hlist of this mutex
		struct hlist_node & l = threads_table[curr_thread]->cond_hlist;
		struct hlist_head & h = cond->cond_hlist_head;
		hlist_add_head(&l, &h);
		cond->mutex = mutex;

		//I will now move to and wait in the blocked list until 
		//some other thread unlock the mutex and wake me up
		switch_thread_state(curr_thread, BLOCKED);

#ifdef KERNEL_LEVEL_THREAD_EXTENSION
		int signal;
		sigwait(&set, &signal);
		
		switch (signal)
		{
			case SIGINT:
				break;
			default:
				assert(0);
				break;
		}
#else
		uthread_yield();
#endif
	}
	else
		assert(0);

	uthread_mutex_lock(mutex);
	
	return 0;
}

int uthread_cond_signal(uthread_cond_t *cond)
{
	assert(cond);
	if(cond->mutex == NULL) //the uthread_cond_wait has not been called by another thread
	{
		assert(hlist_empty(&(cond->cond_hlist_head)));
		return 0;
	}

	assert(cond->mutex);

	if (__sync_bool_compare_and_swap(&(cond->mutex->lock), LOCKED, UNLOCKED)) 
	{
		cond->mutex->owner = -1;
		
		//if there is some thread waiting on this condition, wake up the next thread waiting
		assert(!hlist_empty(&(cond->cond_hlist_head)));
		tcb_t * t = hlist_entry(cond->cond_hlist_head.first, tcb_t, cond_hlist);
		switch_thread_state(t->id, RUNNABLE);
		
#ifdef KERNEL_LEVEL_THREAD_EXTENSION
		thr_kill(t->kernel_thread_id, SIGINT);
#else
		uthread_yield();
#endif
	}
	else
		assert(0);
	
	uthread_mutex_lock(cond->mutex);
	return 0;
}

int uthread_cond_broadcast(uthread_cond_t *cond)
{
	assert(cond);
	if(cond->mutex == NULL)
	{
		assert(hlist_empty(&(cond->cond_hlist_head)));
		return 0;
	}

	assert(cond->mutex);
	if (__sync_bool_compare_and_swap(&(cond->mutex->lock), LOCKED, UNLOCKED)) 
	{
		cond->mutex->owner = -1;
		
		hlist_node * p = NULL;
		assert(!hlist_empty(&(cond->cond_hlist_head)));
		//if there is some thread waiting on this condition, wake up the next thread waiting
		hlist_for_each(p, &(cond->cond_hlist_head))
		{
			tcb_t * t = hlist_entry(p, tcb_t, cond_hlist);
			switch_thread_state(t->id, RUNNABLE);

#ifdef KERNEL_LEVEL_THREAD_EXTENSION
			thr_kill(t->kernel_thread_id, SIGINT);
#endif
		}

#ifndef KERNEL_LEVEL_THREAD_EXTENSION
		uthread_yield();
#endif
	}
	else
		assert(0);
	
	uthread_mutex_lock(cond->mutex);
	return 0;
}

