#include "proc.h"
#include "slab.h"
#include "heap.h"
#include "arch/pgd.h"
#include "arch/cpu.h"
#include "arch/atomic.h"
#include "kernel/time.h"

// test in win32
#ifdef TEST_DBG_WIN32
#include <stdio.h>
#define vmalloc(s)	malloc(s * PAGE_SZ)
#undef thread_switch_to
#define thread_switch_to(prev, next) do {} while (0)
#endif

// means that there is a pending
// schedule request
#define SCHED_FLAG_SCHED_PENDING		(1)

// scheduler data structure
typedef struct proc_static_data
{
	// static data definition (for task)
	uint			taskcnt;
	uint			maxtasks;
	listnode_t		task_list;
	kmem_cache_t*	task_cache;
	avl_node_t*		task_avl_head;
	spinlock_t		task_spinlock;

	// static data definition (for thread)
	uint			threadcnt;
	uint			maxthreads;
	avl_node_t*		thd_avl_head;
	spinlock_t		thd_spinlock;

	// data defined for schedule
	spinlock_t		spinlock;

	uint			sched_flags;
	listnode_t		other_thd_list;					// for SCHED_OTHER
	sched_node_t	pri_tbl[THD_PRIORITY_MAX];
	uint			active_pri_mask[THD_PRIORITY_MAX / 32 + 1];

	// for threads that is not active
	listnode_t		interruptable_list;
	listnode_t		uninterruptable_list;
	listnode_t		creation_list;
	listnode_t		zombie_list;
}
proc_static_data_t;

thread_t* current_thd = NULL;
atomic_t spinlock_cnt = ATOMIC_INIT(-1);
static proc_static_data_t* proc_sdata = NULL;

#define is_sched_paused()	(atomic_read(&spinlock_cnt) > -1)
#define INC_TASK_COUNT()	do { proc_sdata->taskcnt++; } while (0)
#define INC_THREAD_COUNT()	do { proc_sdata->threadcnt++; } while (0)


static void resume_sched(void)
{
	if (proc_sdata->sched_flags & SCHED_FLAG_SCHED_PENDING)
	{
		proc_sdata->sched_flags &= ~SCHED_FLAG_SCHED_PENDING;
		schedule();
	}
}

// spinlock implementation for single core
void disable_preempt(spinlock_t *lock)
{
	uint flags;
	local_irq_save(flags);

	if (atomic_inc_and_test((atomic_t*)lock))
		atomic_inc(&spinlock_cnt);
	local_irq_restore(flags);
}

void enable_preempt(spinlock_t *lock)
{
	uint flags;
	local_irq_save(flags);

	if (atomic_add_negative(-1, (atomic_t*)lock))
	{
		kassert(atomic_read((atomic_t*)lock) >= -1);
		if (atomic_add_negative(-1, &spinlock_cnt))
		{
			kassert(atomic_read(&spinlock_cnt) >= -1);
			if (current) resume_sched();
		}
	}
	local_irq_restore(flags);
}

static int task_avl_compare(avl_node_t *fst, avl_node_t *snd)
{
	task_t *first = AVLNODE_ENTRY(task_t, avlnode, fst);
	task_t *second = AVLNODE_ENTRY(task_t, avlnode, snd);
	if (first->pid < second->pid)
		return -1;
	else if (first->pid > second->pid)
		return 1;
	else return 0;
}

static int thread_avl_compare(avl_node_t *fst, avl_node_t *snd)
{
	thread_t *first = AVLNODE_ENTRY(thread_t, avlnode, fst);
	thread_t *second = AVLNODE_ENTRY(thread_t, avlnode, snd);
	if (first->tid < second->tid)
		return -1;
	else if (first->tid > second->tid)
		return 1;
	else return 0;
}

static uint _new_pid(void)
{
	static pid_t pid_magic = 1;
	pid_t tmp = pid_magic++;
	if (!pid_magic) pid_magic = 1;
	return tmp;
}

static uint _new_tid(void)
{
	static tid_t tid_magic = 1;
	tid_t tmp = tid_magic++;
	if (!tid_magic) tid_magic = 1;
	return tmp;
}

static pid_t pid_alloc(void)
{
	pid_t now;

	// need lock
	spin_lock(&(proc_sdata->task_spinlock));

	// make sure the pid is never used
	for (now = _new_pid();
		avl_find(proc_sdata->task_avl_head, MAKE_FIND_OBJECT(now, task_t, pid, avlnode),
		task_avl_compare); now = _new_pid());

	// unlock
	spin_unlock(&(proc_sdata->task_spinlock));
	return now;
}

static tid_t tid_alloc(void)
{
	tid_t now;

	// need lock
	spin_lock(&(proc_sdata->thd_spinlock));

	// make sure the pid is never used
	for (now = _new_tid();
		avl_find(proc_sdata->thd_avl_head, MAKE_FIND_OBJECT(now, thread_t, tid, avlnode),
		task_avl_compare); now = _new_tid());

	// unlock
	spin_unlock(&(proc_sdata->thd_spinlock));
	return now;
}

static bool create_task_page_dir(task_t *tsk, bool krnltask)
{
	if (NULL != tsk->mm.page_dir)
		return true;

	// if this is the kernel task?
	if (krnltask)
		tsk->mm.page_dir = page_root_dir;
	else
	{
		uint* page_dir = (uint*)vmalloc(1);
		if (!page_dir) return false;

		memset(page_dir, 0, sizeof(PAGE_SZ));
		link_task_pgd(page_dir);
		tsk->mm.page_dir = page_dir;
	}
	return true;
}

task_t* create_task(void)
{
	return NULL;
}

// thread operations

// this function move the specific thread to the
// tail of current schedule list
// this function is not locked
static void sched_thread_move_to_tail(thread_t* thd)
{
	if (THD_STAT_RUNNING != thd->state || listnode_issingle(thd->sched_list))
		return;

	listnode_del(thd->sched_list);
	if (thd->flags & THD_SCHED_POLICY_OTHER)
		listnode_add(proc_sdata->other_thd_list, thd->sched_list);
	else
	{
		uint pri = THD_PRIORITY_MAX - thd->priority - 1;
		if (thd->flags & THD_SCHED_POLICY_FIFO)
			listnode_add(proc_sdata->pri_tbl[pri].fifo_list,\
				thd->sched_list);
		else if (thd->flags & THD_SCHED_POLICY_RR)
			listnode_add(proc_sdata->pri_tbl[pri].rr_list,	\
				thd->sched_list);
	}
}

static thread_t* sched_get_pending_thread(void)
{
	uint i;
	const uint total = (THD_PRIORITY_MAX / 32 + 1) * sizeof(uint);
	unsigned char* msk = (unsigned char*)proc_sdata->active_pri_mask;

	for (i = 0; i < total; ++i)
	{
		uint j, mskbit = 1, data = *msk++;
		if (!data) continue;

		for (j = 0; j < 8; ++j, mskbit <<= 1)
		{
			if (data & mskbit)
			{
				uint pri = i * 8 + j;
				sched_node_t* sched_nd = &proc_sdata->pri_tbl[pri];
				if (!listnode_isempty(sched_nd->fifo_list))
				{
					listnode_t* next = sched_nd->fifo_list.next;
					return list_entry(thread_t, sched_list, next);
				}
				else if (!listnode_isempty(sched_nd->rr_list))
				{
					listnode_t* next = sched_nd->rr_list.next;
					return list_entry(thread_t, sched_list, next);
				}
			}
		}
	}

	// looking for other list
	if (!listnode_isempty(proc_sdata->other_thd_list))
	{
		listnode_t *next = proc_sdata->other_thd_list.next;
		return list_entry(thread_t, sched_list, next);
	}
	return NULL;
}

static bool need_resched(void)
{
	return (current &&
		(current->flags & THD_FLAG_NEED_RESCHED))
		? true : false;
}

void handle_sched(void)
{
	if (need_resched())
	{
		if (is_sched_paused())
		{
			// clear the need_sched flag
			current->flags &= ~THD_FLAG_NEED_RESCHED;
			// and set a pending flag
			proc_sdata->sched_flags |= SCHED_FLAG_SCHED_PENDING;
			return;
		}

		// We are going to enable the interrupt, since a schedule
		// may switch to other thread, thus leads to a long time
		// before we switch back here again
		local_irq_enable();
		schedule();
	}
}

bool request_resched(void)
{
	if (!current)
		return false;

	current->flags |= THD_FLAG_NEED_RESCHED;
	return true;
}

void schedule(void)
{
	thread_t *prev, *curr;

	if (in_interrupt())
		return;

	// select next thread
	spin_lock(&(proc_sdata->spinlock));

	if (!current)
	{
		spin_unlock(&(proc_sdata->spinlock));
		return;
	}

	prev = current;
	prev->flags &= ~THD_FLAG_NEED_RESCHED;

	sched_thread_move_to_tail(current);
	curr = sched_get_pending_thread();

	spin_unlock(&(proc_sdata->spinlock));

	// if we need schedule
	if (!curr || prev == curr)
		return;

	// switch the context
	++kstat.content_switch;
	thread_switch_to(&(prev->arch_data), &(curr->arch_data));
}

// this is to set the policy of thread
// this function is not locked
static void set_thread_sched_policy(thread_t* thd, uint policy, uint pri)
{
	thd->flags |= policy;
	if (policy == THD_SCHED_POLICY_OTHER)
		thd->priority = 0;
	else if (!pri)
		thd->priority = 1;
	else if (pri >= THD_PRIORITY_MAX)
		thd->priority = THD_PRIORITY_MAX - 1;
	else thd->priority = pri;
}

// init the thread structure
// this function is not locked
static bool init_thread_object(thread_t *thd, task_t *tsk)
{
	kassert(NULL != thd && NULL != tsk);

	thd->tid = tid_alloc();
	if (avl_insert(&(proc_sdata->thd_avl_head), &(thd->avlnode), thread_avl_compare))
		return false;

	// initialize the object
	listnode_add(proc_sdata->creation_list, thd->sched_list);
	thd->task = tsk;

	// initialize the object flags
	// by default, a thread is set as joinable
	thd->flags = THD_FLAG_JOINABLE;
	thd->parents = current;
	thd->priority = 0;
	thd->state = THD_STAT_UNKNOWN;
	
	// add this thread to the task
	listnode_add(tsk->thdlist, thd->ownerlist);
	++tsk->thdcnt;

	return true;
}

bool set_thread_state(thread_t *thd, uint newstate, bool preempt)
{
	if (NULL == thd)
		return false;
	if (thd->state == newstate)
		return true;

	if (THD_STAT_RUNNING == newstate
		&& (!(thd->flags & THD_SCHED_POLICY_MASK)
		|| thd->priority >= THD_PRIORITY_MAX))
		return false;

	// need lock
	// since we operate on sched_list
	spin_lock(&(proc_sdata->spinlock));
	if (!listnode_issingle(thd->sched_list))
	{
		listnode_del(thd->sched_list);
		if (thd->flags & (THD_SCHED_POLICY_RR | THD_SCHED_POLICY_FIFO))
		{
			uint pri = THD_PRIORITY_MAX - thd->priority - 1;
			if (listnode_issingle(proc_sdata->pri_tbl[pri].fifo_list)
				&& listnode_issingle(proc_sdata->pri_tbl[pri].rr_list))
			{
				// clear the mask bit
				proc_sdata->active_pri_mask[pri / 32] &= ~(1 << (pri % 32));
			}
		}
	}

	thd->state = newstate;

	switch (newstate)
	{
	case THD_STAT_RUNNING: {
		if (thd->flags & (THD_SCHED_POLICY_RR | THD_SCHED_POLICY_FIFO))
		{
			uint pri = thd->priority;
			if (!pri) return false;

			kassert(pri < THD_PRIORITY_MAX);
			pri = THD_PRIORITY_MAX - pri - 1;

			// add the thread to active list
			if (thd->flags & THD_SCHED_POLICY_FIFO)
			{
				if (preempt)
					listnode_insertfirst(proc_sdata->pri_tbl[pri].fifo_list, thd->sched_list);
				else listnode_add(proc_sdata->pri_tbl[pri].fifo_list, thd->sched_list);
			}
			else
			{
				if (preempt)
					listnode_insertfirst(proc_sdata->pri_tbl[pri].rr_list, thd->sched_list);
				else listnode_add(proc_sdata->pri_tbl[pri].rr_list, thd->sched_list);
			}
			// set this priority active
			proc_sdata->active_pri_mask[pri / 32] |= 1 << (pri % 32);
		}
		else
		{
			if (preempt)
				listnode_insertfirst(proc_sdata->other_thd_list, thd->sched_list);
			else listnode_add(proc_sdata->other_thd_list, thd->sched_list);
		}
	}	break;

	case THD_STAT_INTERRUPTABLE:
		if (preempt)
			listnode_insertfirst(proc_sdata->interruptable_list, thd->sched_list);
		else listnode_add(proc_sdata->interruptable_list, thd->sched_list);
		break;

	case THD_STAT_UNINTERRUPTABLE:
		if (preempt)
			listnode_insertfirst(proc_sdata->uninterruptable_list, thd->sched_list);
		else listnode_add(proc_sdata->uninterruptable_list, thd->sched_list);
		break;

	case THD_STAT_CREATION:
		listnode_add(proc_sdata->creation_list, thd->sched_list);
		break;

	case THD_STAT_ZOMBIE:
		listnode_add(proc_sdata->zombie_list, thd->sched_list);
		break;

	default: break;
	}

	// unlock
	spin_unlock(&(proc_sdata->spinlock));
	return true;
}

// this function make the thread to sleep
// interruptable/uninterruptable immediately
void thread_sleep(thread_t** thd, bool interruptable)
{
	thread_t* t = NULL;
	bool need_sched = false;

	if (current)
	{
		t = current;
		set_thread_state(t, (interruptable) ? THD_STAT_INTERRUPTABLE
			: THD_STAT_UNINTERRUPTABLE, false);
		need_sched = true;
	}
	if (thd) *thd = t;

	// switch to other thread immediately
	if (need_sched) schedule();
}

// this function wake up an (un)interruptable thread
// and make it running immediately
bool thread_wakeup(thread_t* thd, uint flags)
{
	uint state;

	if (!thd || listnode_issingle(thd->sched_list))
		return false;
	state = thd->state;
	if (THD_STAT_RUNNING == state)
		return true;
	if (state != THD_STAT_INTERRUPTABLE
		&& state != THD_STAT_UNINTERRUPTABLE)
		return false;

	if (state == THD_STAT_INTERRUPTABLE &&
		!(flags & THD_OPE_WAKEUP_INTERRUPTABLE))
		return false;
	if (state == THD_STAT_UNINTERRUPTABLE &&
		!(flags & THD_OPE_WAKEUP_UNINTERRUPTABLE))
		return false;

	set_thread_state(thd, THD_STAT_RUNNING,
		(flags & THD_OPE_WAKEUP_PREEMPT) ? true : false);

	if (flags & THD_OPE_WAKEUP_SYNCHRONOUS)
		schedule();
	return true;
}

// set the detach state of a thread
int thread_setdetachstate(thread_t* thd, bool detach)
{
	if (NULL == thd)
		return ERR_BAD_PARAMETERS;

	if (detach)
		thd->flags &= ~THD_FLAG_JOINABLE;
	else thd->flags |= THD_FLAG_JOINABLE;

	if (detach)
		thread_wakeup_interruptable(thd->parents);

	return 0;
}

static thread_t* init_kernel_thread(task_t* tsk)
{
	thread_t *thd = (thread_t*)idle_thread_stack;
	if (!init_thread_object(thd, tsk))
		return NULL;

	if (thread_setdetachstate(thd, true))
		return NULL;

	set_thread_sched_policy(thd, THD_SCHED_POLICY_OTHER, 0);
	if (!set_thread_state(thd, THD_STAT_RUNNING, false))
		return NULL;

	INC_THREAD_COUNT();
	return thd;
}

void thread_exit(int errcode)
{
	thread_t *p, *thd = current;
	if (NULL == thd) return;

	// need lock
	spin_lock(&(proc_sdata->spinlock));
	set_thread_state(thd, THD_STAT_ZOMBIE, false);

	// wakeup the parents if needed
	if (thd->flags & THD_FLAG_JOINABLE)
	{
		p = thd->parents;
		if (p && p->state == THD_STAT_INTERRUPTABLE
			&& !listnode_issingle(p->sched_list))
			set_thread_state(p, THD_STAT_RUNNING, false);
	}
	spin_unlock(&(proc_sdata->spinlock));
	schedule();
}

static void thread_entry(void)
{
	int ret = 0;
	kassert(NULL != current);

	if (NULL == current->start_routine)
		thread_exit(ret);

	// if this is a kernel thread
	if (((uint)current->start_routine) > KERNEL_BASE)
		ret = current->start_routine(current->arguments);
	else
	{
		// todo:
		// user space thread func
	}

	thread_exit(ret);
}

static thread_t* alloc_thread_object(void)
{
	int i;
	thread_t* ret = NULL;

	// try to allocate thread object from
	// a zombie thread object
	spin_lock(&(proc_sdata->spinlock));
	if (!listnode_isempty(proc_sdata->zombie_list))
	{
		listnode_t *node = proc_sdata->zombie_list.next;
		ret = list_entry(thread_t, sched_list, node);
	}
	spin_unlock(&(proc_sdata->spinlock));

	if (NULL == ret)
		ret = (thread_t*)vmalloc(KRNL_THREAD_STACK_PAGES);
	if (NULL == ret) return ret;

	// make sure all pages accessable
	for (i = 0; i < KRNL_THREAD_STACK_PAGES; ++i)
		((uint*)ret)[i * PAGE_SZ / sizeof(uint)] = 0;

	return ret;
}

int thread_create(thread_t** pthd, thdfunc_t start_routine, void* arg)
{
	thread_t* thd;

	if (!current) return ERR_LOGIC;

	if (NULL == start_routine)
		return ERR_BAD_PARAMETERS;

	if (proc_sdata->threadcnt >= proc_sdata->maxthreads)
		return ERR_RESLIMIT;

	thd = alloc_thread_object();
	if (NULL == thd) return ERR_NOMEMORY;

	kassert(NULL != current->task);
	if (!init_thread_object(thd, current->task))
	{
		vfree((void*)thd, KRNL_THREAD_STACK_PAGES);
		return ERR_LOGIC;
	}

	// save the address of start_routine and arg
	thd->start_routine = start_routine;
	thd->arguments = arg;

	// set the stack pointer and thread entry
	// note that thread entry is always pointed to
	// thread_entry since the real start_routine will
	// be called inside the thread_entry function
	init_thread_arch_data(thd->arch_data, \
		(((uint)thd) + (KRNL_THREAD_STACK_PAGES * PAGE_SZ)), \
		(uint)thread_entry);

	dbg_output3("thread (tid:%u) created at 0x%x\n", thd->tid, thd);
	INC_THREAD_COUNT();
	if (pthd) *pthd = thd;
	return 0;
}

static bool init_kernel_task_fs(task_t* task)
{
	task->fs_data.root = rootfs.root;
	task->fs_data.pwd = rootfs.root;
	return true;
}

static int create_kernel_task(void)
{
	thread_t* thd;
	task_t *task = (task_t*)kmem_cache_alloc(proc_sdata->task_cache);
	if (NULL == task) return ERR_NOMEMORY;

	// use the default page dir
	if (!create_task_page_dir(task, true))
		return ERR_LOGIC;

	// init the filesystem
	if (!init_kernel_task_fs(task))
		return ERR_LOGIC;

	// activate the task
	listnode_add(proc_sdata->task_list, task->ownerlist);
	if (avl_insert(&(proc_sdata->task_avl_head), &(task->avlnode), task_avl_compare))
		return ERR_LOGIC;

	INC_TASK_COUNT();

	// init the first thread
	thd = init_kernel_thread(task);
	if (NULL == thd)
		bug("fail to create the kernel thread.\n");

	// activate current thread
	// after this, the time slice based scheduling
	// will start immediately
	current_thd = thd;

	dbg_output2("kernel task (pid/tid:[%u/%u]) created.\n",\
		get_current()->task->pid, get_current()->tid);
	return 0;
}

static void init_mm_stru(mm_struct_t* mm)
{
	kassert(NULL != mm);

	// the "mm" already memset to 0
	// to be allocated when it is really
	// in use (for page_dir and userspace)
	mm->krnlspace = &kernel_vma_mgr;
}

static void task_stru_ctor(kmem_cache_t* mcache, void* p, size_t sz)
{
	task_t *tsk = (task_t*)p;
	memset(p, 0, sz);

	// initialize the thread list
	listnode_init(tsk->thdlist);

	// allocate the pid
	tsk->pid = pid_alloc();

	init_mm_stru(&(tsk->mm));
}

static void task_stru_dtor(kmem_cache_t* mcache, void* p, size_t sz)
{
	task_t* tsk = (task_t*)p;

	kassert(listnode_isempty(tsk->thdlist));

	// remove from tasklist if necessary
	if (!listnode_issingle(tsk->ownerlist))
		listnode_del(tsk->ownerlist);

	// todo
}

static void init_proc_sdata(void)
{
	uint i;

	proc_sdata = (proc_static_data_t *)vmalloc(1);
	kassert(NULL != proc_sdata);

	init_spinlock(&(proc_sdata->spinlock));

	// static data definition (for task)
	proc_sdata->taskcnt = 0;
	proc_sdata->maxtasks = MAX_TASKS;
	listnode_init(proc_sdata->task_list);

	proc_sdata->task_cache = kmem_cache_create("task_cache", sizeof(task_t),
		(PAGE_SZ * 2 / sizeof(task_t)) - 1, 0,
		task_stru_ctor, task_stru_dtor);
	kassert(NULL != proc_sdata->task_cache);

	proc_sdata->task_avl_head = NULL;
	init_spinlock(&(proc_sdata->task_spinlock));

	// static data definition (for thread)
	proc_sdata->threadcnt = 0;
	proc_sdata->maxthreads = MAX_THREADS;
	proc_sdata->thd_avl_head = NULL;
	init_spinlock(&(proc_sdata->thd_spinlock));

	// initialization of static data for schedule
	proc_sdata->sched_flags = 0;
	memset(&(proc_sdata->active_pri_mask), 0,
		(THD_PRIORITY_MAX / 32 + 1) * sizeof(uint));

	listnode_init(proc_sdata->other_thd_list);
	listnode_init(proc_sdata->interruptable_list);
	listnode_init(proc_sdata->uninterruptable_list);
	listnode_init(proc_sdata->creation_list);
	listnode_init(proc_sdata->zombie_list);

	for (i = 0; i < THD_PRIORITY_MAX; ++i)
	{
		sched_node_t *node = &(proc_sdata->pri_tbl[i]);
		listnode_init(node->fifo_list);
		listnode_init(node->rr_list);
	}
}

#include "semaphore.h"
DECLARE_MUTEX(sem_tst);

static int rot(void *arg)
{
	int i = 0;
	extern volatile uint jiffies;
	uint st;
	sem_enter(&sem_tst);
	dbg_output1("enter thread: %x\n", current);
	for (st = jiffies + 500; jiffies < st; ++i);
	dbg_output1("%x finished, val = %u\n", current, i);
	sem_exit(&sem_tst);
	return 0;
}

// global initialization
void global_init_proc_module(void)
{
	thread_t *thd1, *thd2;
	// init the data structure for scheduler
	init_proc_sdata();

	// create the kernel task and it's thread
	if (create_kernel_task())
		bug("fail to create kernel task.\n");

	if (thread_create(&thd1, rot, NULL))
		dbg_output1("error in thread_create1\n");
	if (thread_create(&thd2, rot, NULL))
		dbg_output1("error in thread_create2\n");

	set_thread_sched_policy(thd1, THD_SCHED_POLICY_OTHER, 0);
	if (!set_thread_state(thd1, THD_STAT_RUNNING, false))
		dbg_output1("error in set_thread_state1\n");
	set_thread_sched_policy(thd2, THD_SCHED_POLICY_OTHER, 0);
	if (!set_thread_state(thd2, THD_STAT_RUNNING, false))
		dbg_output1("error in set_thread_state2\n");

	// enable the task scheduler
	sched_enable();
}

// test in win32
#ifdef TEST_DBG_WIN32

#include <stdio.h>

bool add_timer(timer_t *timer) { return true; }

kernel_stat_t kstat;
unsigned int volatile jiffies;
irq_cpu_stat_t irq_stat[NR_CPUS];
unsigned char _idle_thread_stack[PAGE_SZ];
void* idle_thread_stack = (void*)_idle_thread_stack;

void sched_enable(void)
{
	schedule();
}


bool link_task_pgd(uint* tsk_pgd)
{
	return true;
}

uint* page_root_dir = NULL;

int proc_test(void)
{
	global_init_proc_module();
	return 0;
}

#endif

/* EOF */
