#include "uthread.h"
#include "mutex.h"

tcb_t * threads_table[MAX_NUM_THREADS] = {NULL}; // global thread table containing the addresses of all threads' tcbs
int nr_threads = 0;  		// the number of all threads including the scheduler thread

#ifdef KERNEL_LEVEL_THREAD_EXTENSION
sigset_t set, old;			// used for signal waiting
#else
volatile uthread_t curr_thread;		//current running thread
unsigned nr_runnable = 0;		//number of runnable threads
unsigned nr_blocked = 0;		//number of blocked threads
struct list_head runnable_list_head = LIST_HEAD_INIT(runnable_list_head);	//runnable list
struct list_head blocked_list_head = LIST_HEAD_INIT(blocked_list_head);		//blocked list
#endif

#ifndef KERNEL_LEVEL_THREAD_EXTENSION
/**
 *  *  Linux hides addresses in jump buffers by xoring them with gs:0x18 and then
 *   *  rotating them left 9.  This code does the same, so that we can put our own
 *    *  addresses into a jump buffer
 *     *
 *      *  NB: gcc inline assembly "=r" indicates an in/out parameter
 *       */
int xor_and_rol(void* p)
{
	int ret = (int)p;
#ifndef __sun__
#warning "Linux"
	asm volatile ("xorl %%gs:0x18, %0;\n roll $0x9, %0;" : "=r"(ret));
#endif
	return ret;
}
#endif

//this is used to atomically increase an integer variable
void atomic_inc(int *t)
{
	__sync_bool_compare_and_swap(t, *t, *t+1);
}

#ifndef KERNEL_LEVEL_THREAD_EXTENSION
//this is used to switch current context to another one.
//Only if when some other thread switch to this thread can this function return.
void switch_context(thread_t goal)
{
	if(setjmp(threads_table[curr_thread]->context) == 0)
		longjmp(threads_table[goal]->context, 403);
	return;
}

#endif

//print the list using its head
void print_list(struct list_head * head, int type)
{
	struct list_head * p = NULL;
	printf("list_head: %p\n", head);

	list_for_each(p, head)
	{
		tcb_t *t = NULL;
		switch(type)
		{
#ifndef KERNEL_LEVEL_THREAD_EXTENSION
			case RUNNABLE_BLOCKED_LIST:
				t = list_entry(p, tcb_t, runnable_blocked_list);
				break;
#endif
			case JOINED_THREADS_LIST:
				t = list_entry(p, tcb_t, joined_threads_list);
				break;
			default:
				assert(0);
				break;
		}
		printf("%d ", t->id);
	}

	printf("\n");
}

//print the hlist using its head
void print_hlist(struct hlist_head * head, int type)
{
	struct hlist_node * p = NULL;
	printf("hlist_head: %p\n", head);

	hlist_for_each(p, head)
	{
		tcb_t *t = NULL;
		switch(type)
		{
			case MUTEX_HLIST:
				t = hlist_entry(p, tcb_t, mutex_hlist);
				break;
			case COND_HLIST:
				t = hlist_entry(p, tcb_t, cond_hlist);
				break;
			default:
				assert(0);
				break;
		}
		printf("%d ", t->id);
	}

	printf("\n");
}


#ifndef KERNEL_LEVEL_THREAD_EXTENSION

void on_timer(int sig);
//this function set the timer
//the SIGVTALRM signal will be sent to the program after micro_secs microseconds
//which will lead us to the on_timer function
void set_timer(int micro_secs)
{
	struct itimerval remaining_time;
	remaining_time.it_interval.tv_sec = 0;
	remaining_time.it_interval.tv_usec = micro_secs;
	remaining_time.it_value.tv_sec = 0;
	remaining_time.it_value.tv_usec = micro_secs;

	signal(SIGVTALRM, on_timer);
	setitimer(ITIMER_VIRTUAL, &remaining_time, NULL);
}

//When the time is up, this function will be invoked
//if this is a scheduler, we set the next timer and return
//if this is not scheduler, we yield to the scheduler 
//which will set the timer later for us.
void on_timer(int sig)
{
	if (curr_thread != SCHEDULER)
		uthread_yield();

	set_timer(TIME_INTERVAL);
	return;
}
#endif

//get an available thread ID
uthread_t next_thread_id()
{
	static uthread_t id = -1;
	atomic_inc(&id);
	return id;
}


#ifdef KERNEL_LEVEL_THREAD_EXTENSION
//for the kernel-level thread
//we don't really need the scheduler because the kernel will do this for us
void * schedule(void *)
{
	while(1)
	{
		;
	}

	return 0;
}
#else
//this is the major scheduler function used by user-level threads
//the scheduler pick one runnable thread use the round-robin algorithm
//and then start running that thread
void * schedule(void *)
{
	while(1)
	{
		//try to schedule the next available thread
		if (nr_runnable)	
		{
			//find the tcb correspondent to current thread
			tcb_t * curr_tcb = threads_table[curr_thread];
			assert(curr_tcb);
			tcb_t * next_tcb = NULL;

			list_head * next = NULL;
			assert(!list_empty(&runnable_list_head));
			
			switch(threads_table[curr_thread]->state)
			{
				case BLOCKED:
				case EXIT:
					//if current thread has been blocked, 
					//i.e., in the blocked list,
					//we have to find a runnable thread 
					//in the runnable list. In this case, we just 
					//use the first thread in the runnable list.
					next = runnable_list_head.next;
					assert(next);
					next_tcb = list_entry(next, tcb_t,
								runnable_blocked_list);
					break;
				case RUNNABLE:
					//If current running thread must exist 
					//in the runnable list rather than the 
					//blocked list, we can safely test if current 
					//thread's tcb is the last entry linked 
					//in this runnable list. If it is, we should move
					//to the first entry of the list
					//Otherwise, we should move to the next
					if (list_is_last(
						&(curr_tcb->runnable_blocked_list),
						&(runnable_list_head)))
					// this is the last entry of the runnable list
						next = runnable_list_head.next;
					else
						next = 
						curr_tcb->runnable_blocked_list.next;
					
					assert(next);
					next_tcb  = list_entry(next, tcb_t, 
								runnable_blocked_list);
					break;
				default:
					assert(0);
					break;
			}

			assert(next_tcb);

			//if we just jump back from other threads, we just
			//continue our loop to find another available thread that
			//can be scheduled.
			
			int ret;
			if((ret = setjmp(threads_table[SCHEDULER]->context)) == 403)
				continue;
			
			//if I am the scheduler itself, I will set the timer and 
			//resuming the selected thread.
			curr_thread = next_tcb->id;
			set_timer(TIME_INTERVAL);
			longjmp(threads_table[next_tcb->id]->context, 20);
		}
	}
}
#endif

//this function is the assisted function where a thread actually starts with
//this function tries to wrapper and invoke the user-defined function and then
//get its return value before it finally explicitly call uthread_exit to terminate
void * __assisted_start(void * uthread_id)
{
#ifdef KERNEL_LEVEL_THREAD_EXTENSION
	void * ret = threads_table[(int)uthread_id]->start(
			threads_table[(int)uthread_id]->args);
	assert(uthread_id);  //this thread is not the main thread
	uthread_exit(ret);
	return ret;
#else
	return (threads_table[(int)uthread_id]->start)(
			threads_table[(int)uthread_id]->args);
#endif
}

//used to support implicit invocation of uthread_exit after the thread finishes running its starting function
//the function pointer is pushed to the stack of each threads
//when the threads returns, the exit_code will be fetched from eax register
void __assisted_exit(void)
{
	int exit_code = 0;
	asm volatile ("movl %%eax, %0":
			"=r"(exit_code)::"%eax");
	uthread_exit((void *)exit_code);
}


//init the stack of the thread control block of thread id
//push the parameter and the return address into the stack respectively
void __init_stack(uthread_t id, void * para, void * ret_addr)
{
	assert(threads_table[id]);
	
	int i = 1, j = 3;
	//push the parameters into the stack
	for(j = 3; j >= 0; --j, ++i)
		threads_table[id]->stack[STACKSIZE-i] = *((char *)&para + j);
		
	//push the return address into the stack
	for(j = 3; j >= 0; --j, ++i)
		threads_table[id]->stack[STACKSIZE-i] = *((char *)&ret_addr + j);
	
	return;
}

//Init the various fields of the thread control block.
//Creating and manipulating its stack and its program counter
//to pass the parameters and designate return address.
int __init_tcb(tcb_t * t, uthread_t  id, void* (*start)(void *), void *args)
{
#ifdef KERNEL_LEVEL_THREAD_EXTENSION
	thread_t thread_id;
#endif
	assert(id < MAX_NUM_THREADS);
	t->id = id;				
	t->state = RUNNABLE;		//ready to be scheduled
	t->attr.joinable = 1;		//all threads are created joinable by default
	
	//records the start function and function arguments 
	//so that we can invoke the user-defined function
	//when we executing a new thread.
	//Main thread is a speical case because it does not need any start functions.
	//So we just set the start function as NULL.
	t->start = start;
	t->args = args;

	//Init all the list head and list node in the tcb
	INIT_LIST_HEAD(&(t->joined_threads_list_head));
	INIT_LIST_HEAD(&(t->joined_threads_list));
	INIT_HLIST_NODE(&(t->mutex_hlist));
	INIT_HLIST_NODE(&(t->cond_hlist));

	//add this newly-created tcb into threads_table as a new entry
	threads_table[id] = t;

#ifdef KERNEL_LEVEL_THREAD_EXTENSION
	//block all the signals of this thread
	//when creating more threads, they will inherite this signal mask, 
	//thus blocking all the signals too.
	sigfillset(&set);
	sigprocmask(SIG_BLOCK, &set, &old);
#else
	setjmp(t->context);

	//initialize the stack with the parameters being the thread id, which is
	//used for discriminating threads, and the return address being the 
	//__assisted_exit, which is used to get the implicit return address.
	void * __assisted_arg = (void *)id;
	__init_stack(id, __assisted_arg, (void *)__assisted_exit);
	//set the stack pointer
	((unsigned *)(t->context))[_SP] = xor_and_rol(&(t->stack[STACKSIZE-8]));
#endif
	//if start is not null, then this is not the main thread,
	//we don't need to set the program counter or create a new thread
	if(start)
	{
#ifdef KERNEL_LEVEL_THREAD_EXTENSION
		//create kernel-level thread and record the kernel-level thread in the tcb sturcture
		thr_create(&(t->stack[STACKSIZE]), STACKSIZE, __assisted_start, (void *)(t->id), 0, &thread_id);
		t->kernel_thread_id = thread_id;
#else
		//set the program counter
		((unsigned *)(t->context))[_PC] = xor_and_rol((void *)__assisted_start);
#endif
	}
	else//main thread
	{
#ifdef KERNEL_LEVEL_THREAD_EXTENSION
		t->kernel_thread_id = thr_self();
#else
#endif
	}

#ifndef KERNEL_LEVEL_THREAD_EXTENSION
	INIT_LIST_HEAD(&(t->runnable_blocked_list));
	//add the created thread to the runnable list, if this is not the scheduler thread
	if(id != SCHEDULER)
	{
		list_add_tail(&(t->runnable_blocked_list), &runnable_list_head);
		nr_runnable ++;
	}
	nr_threads++;
#else
	atomic_inc(&nr_threads);
#endif
	return 0;
}

/*      Creates a new thread that will begin executing the function indicated by
        the 'start' parameter, using arguments as specified by 'args'.  The first
	parameter will be updated to hold the id of the thread that was created.
	A return value of 0 indicates success, and -1 indicates failure.*/
int uthread_create(uthread_t * t, void* (*start)(void *), void * args)
{
	int ret = 0;
	
	if (nr_threads == 0)  // we are have no threads yet, we need to create the main thread and the scheduler.
	{
		//create main thread
		tcb_t * main_thread = (tcb_t *)malloc(sizeof(tcb_t));
		if(!main_thread)
		{
			printf("Creating thread failed! Cannot allocate memory for tcb of thread\n");
			return -1;
		}
		ret = __init_tcb(main_thread, next_thread_id(), NULL, NULL);
		if (ret == -1)
			return -1;
		
		//create scheduler thread
		tcb_t * scheduler = (tcb_t *)malloc(sizeof(tcb_t));
		if(!scheduler)
		{
			printf("Creating thread failed! Cannot allocate memory for tcb of thread\n");
			return -1;
		}
		ret = __init_tcb(scheduler, next_thread_id(), schedule, NULL);
		if (ret == -1)
			return -1;
#ifdef KERNEL_LEVEL_THREAD_EXTENSION
		uthread_yield();
#else
		//jump to the scheduler
		//waiting for the scheduler to schedule me and come back here
		switch_context(SCHEDULER);
#endif
	}
	
	//allocate tcb
	tcb_t * new_thread = (tcb_t *)malloc(sizeof(tcb_t));
	if(!new_thread)
	{
		printf("Creating thread failed! Cannot allocate memory for tcb of thread\n");
		return -1;
	}
	ret = __init_tcb(new_thread, next_thread_id(), start, args);
	if (ret == -1)
		return -1;

	*t = new_thread->id;
	
	return 0;
}

//switch the state of thread c either:
//			from BLOCKED to RUNNABLE
//				or
//			from RUNNABLE to BLOCKED
//and switch the corresponding list it resides in.
void switch_thread_state(uthread_t c, state_t s)
{
#ifndef KERNEL_LEVEL_THREAD_EXTENSION
	struct list_head & l = threads_table[c]->runnable_blocked_list;

	assert(s == RUNNABLE || s == BLOCKED);
	assert(threads_table[c]->state == RUNNABLE || threads_table[c]->state == BLOCKED);

	//if we have already reached that state, we do nothing
	if(threads_table[c]->state == s)
		return;

	switch(s)
	{
		case BLOCKED:
			//switch the state of this thread from running to blocked
			list_move_tail(&l, &blocked_list_head);
			nr_runnable --;
			nr_blocked ++;
			break;
		case RUNNABLE:
			//switch the state of this thread from blocked to running
			list_move_tail(&l, &runnable_list_head);
			nr_runnable ++;
			nr_blocked --;
			break;
		default:
			assert(0);
			break;
	}
#endif
	threads_table[c]->state = s;
}

/*
      Waits for the thread with thread_id of t to call uthread_exit.  If the
      thread has already exited, then uthread_join should not block.  If called
      twice for the same value of t, one call should have a return value
      indicating error, and the other a return value indicating success (e.g.,
      the value 0 on success, and -1 on failure)
*/
int uthread_join(uthread_t & t, void ** status)
{
#ifdef KERNEL_LEVEL_THREAD_EXTENSION
	thread_t curr_thread = uthread_self();
#endif
	assert(curr_thread >= 0 && curr_thread < MAX_NUM_THREADS);
	assert(threads_table[curr_thread]);

	// the target thread does not exist, so this thread returns immediately without blocking
	if(threads_table[t] == NULL) 
		return -1;

	//the target thread has already exited before the join operation of this thread. 
	//so we pass the exit_code to the caller and return -1 to indicate abnormal states.
	if(threads_table[t] != NULL && threads_table[t]->state == EXIT)
	{
		if(status)
			*status = threads_table[t]->exit_code;
		return -1; 
	}

	assert(threads_table[curr_thread] != NULL);

	//let the caller thread link to the joined_thread_list_head of the target thread
	struct list_head & l = threads_table[curr_thread]->joined_threads_list;
	struct list_head & h = threads_table[t]->joined_threads_list_head;
	list_add_tail(&l, &h);


	//current thread blocked itself, and move itself to the blocked list
	switch_thread_state(curr_thread, BLOCKED);
	
#ifdef KERNEL_LEVEL_THREAD_EXTENSION
//	sigset_t waitset;
	int signal;
	
//	sigemptyset(&waitset);
//	sigaddset(&waitset, SIGUSR1);

	//wait on the SIGUSR1 signal
	sigwait(&set, &signal);
	
	switch (signal)
	{
		case SIGUSR1:
			break;
		default:
			assert(0);
			break;
	}
#else
	//jump to the scheduler
	switch_context(SCHEDULER);
#endif
	//if I am resumed, get the exit_code of exited thread and return to the caller
	if(status)
		*status = threads_table[curr_thread]->message;
	return 0;
}


//get the id of current executing thread
uthread_t uthread_self()
{
#ifdef KERNEL_LEVEL_THREAD_EXTENSION

	int index = -1;
	int i = 0;

	//get the kernel thread id
	thread_t temp = thr_self();

	//traverse all the threads_table to find the uthread id corresponding to this kernel thread id
	//this implementation is not efficient, since it takes O(MAX_NUM_THREADS) comparations to find 
	//the uthread id, this low-efficiency problem will be solved in future versions.
	
	for (i = 0; i < MAX_NUM_THREADS; i++)
	{
		if(threads_table[i] && temp == threads_table[i]->kernel_thread_id)
		{
			index = i;
			break;
		}
	}

	return (uthread_t)index;
#else
	//the user-level threads, curr_thread is a global variable indicating the current running thread
	//so we just return this variable to the caller.
	return curr_thread;
#endif
}

/*
      When a thread calls uthread_exit(), it should terminate, in a manner that
      is compatible with the uthread_join() call.  Furthermore, the val_ptr
      value should be made available to a thread calling uthread_join.

      An implicit call to uthread_exit should be made if a thread returns from
      its start routine.  In that case, the function's val_ptr should indicate
      the return value of the start routine.

      -from the description of this project of CSE403.
*/

void uthread_exit(void * valptr)
{
#ifdef KERNEL_LEVEL_THREAD_EXTENSION
	thread_t curr_thread = uthread_self();
#endif
	assert(curr_thread >= 0 && curr_thread < MAX_NUM_THREADS);
	assert(threads_table[curr_thread]);

	//Termination of the initial thread by uthread_exit or any thread termination mechanism terminates the
	//entire process. -- from IBM

	if (curr_thread == MAIN)
	{

		//free all the memory allocations of threads in the table when the main thread is terminated.
		int i = 0;
		for(i = 0; i < MAX_NUM_THREADS; i ++)
		{
			if(threads_table[i])
				free(threads_table[i]);
			threads_table[i] = NULL;
			nr_threads --;
		}

		exit((int)valptr);
	}

	//notify other threads that have joined to me of my exit code
	struct list_head * p = NULL;
	list_for_each(p, &(threads_table[curr_thread]->joined_threads_list_head))
	{
		tcb_t * t = list_entry(p, tcb_t, joined_threads_list);

		//notify each thread by sending it a exiting code valptr
		t->message = valptr;

		//wake up each thread from the blocked list
		switch_thread_state(t->id, RUNNABLE);

#ifdef KERNEL_LEVEL_THREAD_EXTENSION
		//wake up the other threads by sending the signal
		thr_kill(t->kernel_thread_id, SIGUSR1);
#endif
	}
	
	list_del(&(threads_table[curr_thread]->joined_threads_list));
	
	//delete the possible connections with other threads 
	//as nodes in the joined_threads_list and runnable_blocked_list
	assert (threads_table[curr_thread]->state == RUNNABLE);
#ifndef KERNEL_LEVEL_THREAD_EXTENSION
	list_del(&(threads_table[curr_thread]->runnable_blocked_list));
	nr_runnable --;
#endif
	//we mark the state of this exiting thread as EXIT and wait for the scheduler to process
	//the scheduler actually does not do the cleanup job now, this portion of work is postponed
	//to the point when main program dies.
	threads_table[curr_thread]->state = EXIT;
	
	//set the exit code of this uthread
	threads_table[curr_thread]->exit_code = valptr;

#ifdef KERNEL_LEVEL_THREAD_EXTENSION
	thr_exit(0);
#else
	switch_context(SCHEDULER);
#endif
}

/*
	When called from a thread T, this function indicates to the scheduler
	that T may be de-scheduled, so that another thread may be scheduled.  If
	there is no other available thread, then T will continue running.
	Otherwise, the implementation *must* switch to another thread.
*/
void uthread_yield()
{
#ifdef KERNEL_LEVEL_THREAD_EXTENSION
	thread_t curr_thread = uthread_self();
#endif
	assert(curr_thread >= 0 && curr_thread < MAX_NUM_THREADS);
	assert(threads_table[curr_thread]);

#ifdef KERNEL_LEVEL_THREAD_EXTENSION
	thr_yield();
#else
	if (nr_runnable <= 1) 	//except the thread calling uthread_yield, 
				//there is no other available runnable thread, 
				//so we don't have to swich to another thread
				//just return and continue running
	{
		return;
	}

	//otherwise, we have to ask scheduler to switch to another thread....
	switch_context(SCHEDULER);
#endif
	return;
}

