#include"thread.h"
#include"string.h"
#include"memory.h"
#include"switch.h"
#include"debug.h"
#include"interrupt.h"
#include"tss.h"
#include"global.h"
#include"stdio_kernal.h"
#include"file.h"
#include"bitmap.h"

#define MAX_TASK_CNT 128
#define INIT_PID 1

list thread_ready_list;
list thread_all_list;
task_tcb* idle_task;

struct pid_pool{
	bitmap pid_bitmap;
	pid_t  start_pid;
	lock pid_lock;
};

struct pid_pool pid_pool;

static pid_t _alloc_pid(void);

static void init_pid_pool(void);

static void make_main_thread(void);

static void make_idle_thread(void);

static void kthread(thread_func function,void* args);

static pid_t _alloc_pid(void){
	int32_t idx;	
	pid_t pid_no;
	lock_acquire(&pid_pool.pid_lock);
	idx=bitmap_scan(&pid_pool.pid_bitmap,1);
	if(idx<0){
		pid_no=-1;
		goto done;
	}
	bitmap_set(&pid_pool.pid_bitmap,idx,1);
	pid_no=(pid_t)(pid_pool.start_pid+idx);
done:
	lock_release(&pid_pool.pid_lock);
	return pid_no;
}

static void idle_thread(UNUSED void* args){
	while(1){
		thread_block(TASK_BLOCKED);
		asm volatile("sti;hlt");
	}
}

static void init_pid_pool(void){
	pid_pool.start_pid=INIT_PID;
	pid_pool.pid_bitmap.bmap_bytes_len=MAX_TASK_CNT/8;
	pid_pool.pid_bitmap.bits=sys_malloc(MAX_TASK_CNT/8);
	bitmap_init(&pid_pool.pid_bitmap);
	init_lock(&pid_pool.pid_lock);
	//preserve pid 1 to init process
	ASSERT(_alloc_pid()==INIT_PID);		
}

//pid 1 we left to init process
pid_t alloc_pid(char* task_name){
	pid_t p_no;
	if(!strcmp(task_name,"init")){
		p_no=INIT_PID;
	}else{
		p_no=_alloc_pid();
	}
	return p_no;
}

void release_pid(pid_t pid){
	lock_acquire(&pid_pool.pid_lock);
	bitmap_set(&pid_pool.pid_bitmap,pid-INIT_PID,0);
	lock_release(&pid_pool.pid_lock);
}

void thread_init(thread_des* t_des,task_tcb* tcb){
	strcpy(tcb->name,t_des->name);
	tcb->pid=alloc_pid(tcb->name);
	tcb->ppid=-1;
	tcb->priority=t_des->priority;
	//set thread running ticks
	tcb->tick=t_des->priority;
	//kernal main thread is already running
	tcb->status= !strcmp(t_des->name,"kmain") ? TASK_RUNNING : TASK_READY ;
	//make the task kernal stack to the page top
	tcb->k_stack=(uint32_t*)((uint32_t)tcb+PG_SIZE);
	//allocate interrupt context space
	tcb->k_stack-=sizeof(intr_stack);
	//allocate thread context space;
	tcb->k_stack-=sizeof(thread_stack);
	//init task current work dir defalut is root
	tcb->cwd_i_no=0;
	//init task fd table
	tcb->fd_table[0]=stdin_no;
	tcb->fd_table[1]=stdout_no;
	tcb->fd_table[2]=stderr_no;
	uint8_t fd_idx;
	for(fd_idx=3;fd_idx<TASK_MAX_FD_CNT;fd_idx++){
		tcb->fd_table[fd_idx]=-1;
	}
	//for kernal thread stack overflow check
	tcb->stack_magic=STACK_MAGIC;
}

void thread_create(thread_des* t_des,task_tcb* tcb){
	thread_stack* t_stack=(thread_stack*)tcb->k_stack;
	//make the ip point to kthread
	t_stack->ip=kthread;
	t_stack->function=t_des->func;
	t_stack->args=t_des->args;
	ASSERT(!elem_find(&thread_ready_list,&tcb->gen_tag));
	list_append(&thread_ready_list,&tcb->gen_tag);
	ASSERT(!elem_find(&thread_all_list,&tcb->all_tag));
	list_append(&thread_all_list,&tcb->all_tag);
}

/*
 * the schedule is occur when cpu mask outside interrupt 
 * so when the thread context change wo should enable
 * interrupt to make sure kernal can allocaite the ticks
 * to all the threads
 */
static void kthread(thread_func function,void* args){
	intr_enable();
	function(args);
}

task_tcb* thread_start(thread_des* t_des){
	task_tcb* new_task=(task_tcb*)get_page(PG_K,1);
	thread_init(t_des,new_task);
	thread_create(t_des,new_task);
	return new_task;
}

static void make_main_thread(void){
	thread_des main_des;
	task_tcb* main_tcb=current_thread();
	main_des.name="kmain";
	main_des.priority=30;
	thread_init(&main_des,main_tcb);
	list_push(&thread_all_list,&main_tcb->all_tag);
}

static void make_idle_thread(void){
	thread_des idle_des;
	idle_des.name="idle";
	idle_des.priority=10;
	idle_des.func=(thread_func)idle_thread;
	idle_task=thread_start(&idle_des);
}

void active_page_dir(task_tcb* to){
	void* cr3;
	//cr3 page dir regiser must use physical address
	cr3=vaddr2paddr((uint32_t)(to->page_dir==NULL ? (uint32_t*)VK_PD_BASE : to->page_dir));
	asm volatile("movl %0,%%cr3"::"r"(cr3));
	update_tss_esp0(to);

}

void schedule(void){
	task_tcb* curr;
	task_tcb* to;
	list_elem* to_tag;
	curr=current_thread();
	//task time tick is run out
       	if(curr->status==TASK_RUNNING && curr->tick==0){
		curr->status=TASK_READY;
		curr->tick=curr->priority;
		//insert current thread to thread ready list last
		ASSERT(!elem_find(&thread_ready_list,&curr->gen_tag));
		list_append(&thread_ready_list,&curr->gen_tag);
	}
	
	//may be thread ready list is empty so only idle is runing in this case we should keep idle
	if(list_empty(&thread_ready_list) && curr!=idle_task){
		thread_unblock(idle_task);
	}
	
	//ready list not empty we can switch to other task
	if(!list_empty(&thread_ready_list)){
		to_tag=list_pop(&thread_ready_list);
		to=elem2entry(task_tcb,gen_tag,to_tag);
		to->status=TASK_RUNNING;
		active_page_dir(to);
		switch_to(curr,to);
	}
}
	
task_tcb* current_thread(void){
	uint32_t esp;
	asm volatile("movl %%esp,%0":"=g"(esp));
	return (task_tcb*)(esp & 0xfffff000);
}

void init_kernal_thread(void){
	list_init(&thread_ready_list);
	list_init(&thread_all_list);
	init_pid_pool();
	make_main_thread();
	make_idle_thread();
}

void thread_block(task_status ts){
	task_tcb* curr;
	ASSERT(ts==TASK_BLOCKED || ts==TASK_WAITING || ts==TASK_HANGING);
	task_status old_status=intr_disable();
	curr=current_thread();
	curr->status=ts;
	schedule();
	intr_set_status(old_status);
}

void thread_unblock(task_tcb* thread){
	task_status ts=thread->status;
	ASSERT(ts==TASK_BLOCKED || ts==TASK_WAITING || ts==TASK_HANGING);
	task_status old_status=intr_disable();
	thread->status=TASK_READY;
	ASSERT(!elem_find(&thread_ready_list,&thread->gen_tag));
	list_append(&thread_ready_list,&thread->gen_tag);	
	intr_set_status(old_status);
}

//let current thread yield cpu for other thread
void thread_yield(void){
	task_status old_status=intr_disable();
	task_tcb* curr=current_thread();
	curr->status=TASK_READY;
	ASSERT(!elem_find(&thread_ready_list,&curr->gen_tag));
	list_append(&thread_ready_list,&curr->gen_tag);	
	schedule();
	intr_set_status(old_status);
}


extern struct partition* curr_part;

static char* getcwd(char* path_buffer,char* iobuffer,int32_t p_i_no,int32_t c_i_no){
	struct winode* wi;
	struct dir_entry p_entry;
	struct dir_entry c_entry;
	char* dir_name;

	memset(&p_entry,0,sizeof(struct dir_entry));
	memset(&c_entry,0,sizeof(struct dir_entry));
	wi=open_inode(curr_part,p_i_no);
	if(!p_i_no){
		if(c_i_no<0){
			strcat(path_buffer,"/");
		}
		get_dir_entry(curr_part,iobuffer,wi,&c_entry,c_i_no);
		close_inode(wi);
		return c_entry.name[0]==0 ? NULL : c_entry.name;
	}
	ide_read(curr_part->disk,iobuffer,wi->pure_node.blocks[0],1);
	get_parent_dir_entry((struct dir_entry*)iobuffer,&p_entry,DIR_ENTRY_PER_SECTOR);
	ASSERT(p_entry.ft==FT_DIR);
	dir_name=getcwd(path_buffer,iobuffer,p_entry.i_no,p_i_no);
	if(dir_name!=NULL){
		strcat(path_buffer,"/");
		strcat(path_buffer,dir_name);
	}
	get_dir_entry(curr_part,iobuffer,wi,&c_entry,c_i_no);
	close_inode(wi);
	return c_entry.name[0]==0 ? NULL : c_entry.name;
}

char* sys_getcwd(char* path_buffer,uint32_t size){
	char* iobuffer;
	iobuffer=sys_malloc(SECTOR_SIZE);	
	getcwd(path_buffer,iobuffer,current_thread()->cwd_i_no,-1);
	sys_free(iobuffer);
	return path_buffer;
}

int32_t sys_chdir(const char* path){
	path_record pr;
	int32_t i_no;	
	char* filename;
	int ret=-1;

	i_no=path_parse(path,&pr);
	filename=strrchr(pr.path,'/')+1;
	if(i_no<0){
		printk("%s not exist\n",filename);
	}else if(pr.ft==FT_NORMAL){
		printk("%s is a normal file\n",filename);
	}else if(!pr.is_path_valid){
		printk("%s not invalide\n",pr.path);
	}else{
		ret=0;
		current_thread()->cwd_i_no=i_no;
	}
	close_dir(pr.pdir);
	return ret;
}

pid_t sys_getpid(void){
	return current_thread()->pid;
}

static bool task_info(list_elem* tag,int arg UNUSED){
	task_tcb* t;
	t=elem2entry(task_tcb,all_tag,tag);
	printk("%d    %d    %s   %d\n",t->pid,t->ppid,t->name,t->status);
	return true;
}

void sys_ps(void){
	list_traversal(&thread_all_list,task_info,0);
}

void thread_exit(task_tcb* thread,bool need_schedule){
	thread->status=TASK_DIED;
	release_pid(thread->pid);
	list_remove(&thread->all_tag);
	//free task pcb
	free_page(PG_K,thread,1);
	if(need_schedule){
		schedule();
	}
}

static bool pid_iterator(list_elem* tag,int arg){
	pid_t pid;
	task_tcb* t;
	pid=(pid_t)arg;
	t=elem2entry(task_tcb,all_tag,tag);
	if(t->pid==pid){
		return false;
	}
	return true;
}

static bool child_iterator(list_elem* tag,int arg){
	pid_t ppid;
	task_tcb* t;
	ppid=(pid_t)arg;
	t=elem2entry(task_tcb,all_tag,tag);
	if(t->ppid==ppid){
		return false;
	}
	return true;
}

static bool child_hanging_iterator(list_elem* tag,int arg){
	pid_t ppid;
	task_tcb* t;
	ppid=(pid_t)arg;
	t=elem2entry(task_tcb,all_tag,tag);
	if(t->ppid==ppid && t->status==TASK_HANGING){
		return false;
	}
	return true;
}

static bool child_adopt_iterator(list_elem* tag,int arg){
	pid_t ppid;
	task_tcb* t;
	ppid=(pid_t)arg;
	t=elem2entry(task_tcb,all_tag,tag);
	if(t->ppid==ppid){
		t->ppid=INIT_PID;
	}
	return true;
}

task_tcb* pid2thread(pid_t pid){
	list_elem* p;
	p=list_traversal(&thread_all_list,pid_iterator,pid);
	return !p ? NULL : elem2entry(task_tcb,all_tag,p);
}

//find parent first child task which status is handing
task_tcb* find_hanging_child(task_tcb* parent){
	list_elem* p;
	p=list_traversal(&thread_all_list,child_hanging_iterator,parent->pid);
	return !p ? NULL : elem2entry(task_tcb,all_tag,p);
}

//find parent first child task
task_tcb* find_child(task_tcb* parent){
	list_elem* p;
	p=list_traversal(&thread_all_list,child_iterator,parent->pid);
	return !p ? NULL : elem2entry(task_tcb,all_tag,p);
}

//gave parent all child task to init process
void child_adopt(task_tcb* parent){
	list_traversal(&thread_all_list,child_adopt_iterator,parent->pid);
}
