#include <inc/types.h>
#include <inc/memlayout.h>
#include <inc/traps.h>
#include <inc/mmu.h>
#include <inc/elf.h>
#include <inc/string.h>
#include <inc/assert.h>
#include <inc/x86.h>

#include <kern/kalloc.h>
#include <kern/proc.h>
#include <kern/vm.h>
#include <kern/trap.h>
#include <kern/cpu.h>

#define DEBUG_PROC

struct ptable ptable;

uint32_t nextpid = 1;
extern pde_t *kpgdir;
extern void forkret(void);
extern void trapret(void);
void swtch(struct context **, struct context *);


//
// Initialize something about process, such as ptable.lock
//
void
proc_init(void)
{
	// DEBUG: your code here
	spin_initlock(&ptable.lock);
	// Initialize ptable in BSP
	for (uint32_t i = 0; i < NPROC; i++) {
		ptable.proc[i].state = UNUSED;
	}
}

// Look in the process table for an UNUSED proc.
// If found, change state to EMBRYO and initialize
// state required to run in the kernel.
// Otherwise return 0
static struct proc *
proc_alloc(void)
{
	// DEBUG
	//
	// Following things you need to do:
	// - set state and pid.
	// - allocate kernel stack.
	// - leave room for trap frame in kernel stack.
	// - Set up new context to start executing at forkret, which returns to trapret.
	uint32_t i;
	struct proc *p;
	char *sp;
	uint16_t ds = (SEG_UDATA << 3) | DPL_USER, cs = (SEG_UCODE << 3) | DPL_USER;
	spin_lock(&ptable.lock);
	// Find unused proc in ptable
	for (i = 0; i < NPROC; i++) {
		if (UNUSED == ptable.proc[i].state) {
			break;
		}
	}
	if (NPROC == i) {
		// No available proc in ptable
		spin_unlock(&ptable.lock);
		return 0;
	}

	p = ptable.proc + i;

	p->kstack = kalloc();
	if (0 == p->kstack) {
		// Kernel stack allocation failed
		spin_unlock(&ptable.lock);
		return 0;
	}
	sp = p->kstack + KSTACKSIZE;
	p->state = EMBRYO;
	p->pid = nextpid++;
	p->parent = NULL; // TODO: Chang null pointer to parent process
	p->pgdir = kvm_init();

	// leave room for trap frame and set up new context
	sp -= sizeof(struct trapframe);
	p->tf = (struct trapframe *) sp;
	memset((void *) p->tf, 0, sizeof(struct trapframe));
	p->tf->esp = USTACKTOP;  // set user mode %esp
	p->tf->gs = ds;
	p->tf->fs = ds;
	p->tf->es = ds;
	p->tf->ds = ds;
	p->tf->cs = cs;
	p->tf->ss = ds;
	sp -= sizeof(uint32_t);
	*((uint32_t *) sp) = (uint32_t)trapret; // forkret return addr
	((uint32_t *) sp)[-1] = (uint32_t)forkret; // swtch context addr
	p->context = (struct context *) sp - 1;

	spin_unlock(&ptable.lock);
	return p;
}

//
// Set up the initial program binary, stack, and processor flags
// for a user process.
// This function is ONLY called during kernel initialization,
// before running the first user-mode process.
//
// This function loads all loadable segments from the ELF binary image
// into the environment's user memory, starting at the appropriate
// virtual addresses indicated in the ELF program header.
// At the same time it clears to zero any portions of these segments
// that are marked in the program header as being mapped
// but not actually present in the ELF file - i.e., the program's bss section.
//
// All this is very similar to what our boot loader does, except the boot
// loader also needs to read the code from disk.  Take a look at
// boot/bootmain.c to get ideas.
//
// Finally, this function maps one page for the program's initial stack.
//
// load_icode panics if it encounters problems.
//  - How might load_icode fail?  What might be wrong with the given input?
//
static void
ucode_load(struct proc *p, uint8_t *binary) {
	// Hints:
	//  Load each program segment into virtual memory
	//  at the address specified in the ELF segment header.
	//  You should only load segments with ph->p_type == ELF_PROG_LOAD.
	//  Each segment's virtual address can be found in ph->p_va
	//  and its size in memory can be found in ph->p_memsz.
	//  The ph->p_filesz bytes from the ELF binary, starting at
	//  'binary + ph->p_offset', should be copied to virtual address
	//  ph->p_va.  Any remaining memory bytes should be cleared to zero.
	//  (The ELF header should have ph->p_filesz <= ph->p_memsz.)
	//  Use functions from the previous lab to allocate and map pages.
	//
	//  All page protection bits should be user read/write for now.
	//  ELF segments are not necessarily page-aligned, but you can
	//  assume for this function that no two segments will touch
	//  the same virtual page.
	//
	//  You may find a function like region_alloc useful.
	//
	//  Loading the segments is much simpler if you can move data
	//  directly into the virtual addresses stored in the ELF binary.
	//  So which page directory should be in force during
	//  this function?
	//
	//  You must also do something with the program's entry point,
	//  to make sure that the environment starts executing there.
	//  What? 

	// DEBUG
	struct Proghdr *ph, *eph;
	void *pg_ptr;
	uint32_t addr;
	pte_t *pte_ptr;
	if (ELF_MAGIC != ((struct Elf *) binary)->e_magic) {
		panic("Invalid ELF binary!\n");
	}
	ph = (struct Proghdr *) (binary + ((struct Elf *) binary)->e_phoff);
	eph = ph + ((struct Elf *) binary)->e_phnum;

	spin_lock(&ptable.lock);
	uvm_switch(p);

	for (; ph < eph; ph++) {
		if (ELF_PROG_LOAD == ph->p_type) {
			// Allocate memory for a program header and set up process page table

			// region_alloc(p, ph->p_va, ph->p_memsz);
#ifdef DEBUG_PROC
			cprintf("ucode_load: create address map for 0x%x with size of 0x%x\n",
					PGROUNDDOWN(ph->p_va), ph->p_memsz);
#endif
			region_alloc(p, (void *) PGROUNDDOWN(ph->p_va), ph->p_memsz);
			memcpy((void *) ph->p_va, (void *) binary + ph->p_offset, ph->p_filesz);
			if (ph->p_filesz < ph->p_memsz) {
				memset((void *) binary + ph->p_offset + ph->p_filesz,
						0, ph->p_memsz - ph->p_filesz);
			}
		}
	}
	p->tf->eip = (uintptr_t) ((struct Elf *) binary)->e_entry;
	p->state = RUNNABLE;
	
	// Now map one page for the program's initial stack
	// at virtual address USTACKTOP - PGSIZE.

	region_alloc(p, (void *) (USTACKTOP - PGSIZE), 2 * PGSIZE);
	spin_unlock(&ptable.lock);

	kvm_switch();
}

//
// Allocates a new proc with proc_alloc, loads the user_hello elf
// binary into it with UCODE_LOAD.
// This function is ONLY called during kernel initialization,
// before running the first user-mode process.
// The new proc's parent ID is set to 0.
//
void
user_init(void)
{
	struct proc *u_proc = proc_alloc();
	UCODE_LOAD(u_proc, user_hello);
	u_proc->state = RUNNABLE;
}

//
// Context switch from scheduler to first proc.
//
// This function does not return.
//
void
ucode_run(void)
{
	// DEBUG
	//
	// Hints:
	// - you may need sti() and cli()
	// - you may need uvm_switch(), swtch() and kvm_switch()

	// struct proc *p = NULL;
	// spin_lock(&ptable.lock);
	// for (uint32_t i = 0; i < NPROC; i++) {
	// 	if (RUNNABLE == ptable.proc[i].state) {
	// 		p = ptable.proc + i;
	// 		break;
	// 	}
	// }

	// uvm_switch(p);
	// cli();
	// thiscpu->proc = p;
	// swtch(&(thiscpu->scheduler), p->context);
	// sti();
	// kvm_switch();
	// spin_unlock(&ptable.lock);
	
	schedule();
	// struct proc *p = ptable.proc;
	// struct CpuInfo *c;

	// sti();
	// c = thiscpu;
	// c->proc = NULL;
	// while (1)
	// {
	// 	if (p == ptable.proc + NPROC) {
	// 		p = ptable.proc;
	// 	}
	// 	pushcli();
	// 	spin_lock(&ptable.lock);
	// 	if (RUNNABLE == p->state) {
	// 		c->proc = p;
	// 		uvm_switch(p);
	// 		p->state = RUNNING;
	// 		spin_unlock(&ptable.lock);
	// 		popcli();
	// 		swtch(&(c->scheduler), p->context);
	// 		kvm_switch();
	// 		c->proc = NULL;
	// 	} else {
	// 		if (ZOMBIE == p->state && NULL == p->parent) {
	// 			kfree(p->kstack);
	// 			p->kstack = NULL;
	// 			vm_free(p->pgdir);
	// 			p->pid = 0;
	// 			p->state = UNUSED;
	// 		}
	// 		spin_unlock(&ptable.lock);
	// 		popcli();
	// 		p++;
	// 	}
	// }
}

struct proc *
thisproc(void) {
	struct proc *p;
	pushcli();
	p = thiscpu->proc;
	popcli();
	return p;
}

//
// Context switch from thisproc to scheduler.
//
void
sched(void)
{
	// DEBUG
	int intena;
	struct proc *p = thisproc();

	assert(spin_holding(&ptable.lock));
	assert(p->state != RUNNING);

	intena = thiscpu->intena;
	swtch(&(thisproc()->context), thiscpu->scheduler);
	thiscpu->intena = intena;
}


void
forkret(void)
{
	// Return to "caller", actually trapret (see proc_alloc)
	// That means the first proc starts here.
	// When it returns from forkret, it need to return to trapret.
	// DEBUG
	
	// swtch(&(thiscpu->scheduler), thisproc()->context);
	spin_unlock(&ptable.lock);
	return;
}

void
exit(void)
{
	// sys_exit() call to here.
	// DEBUG
	
	// TODO: clean up process memory
	struct proc *p = thisproc();
	
	spin_lock(&ptable.lock);

	for (struct proc *i = ptable.proc; i < ptable.proc + NPROC; i++)
		if (i->parent == p) {
			i->parent == p->parent;
			if (i->state == ZOMBIE) {
				kfree(i->kstack);
				i->kstack = NULL;
				vm_free(i->pgdir);
				i->pid = 0;
				i->state = UNUSED;
			}
		}

	p->state = ZOMBIE;
	sched();
	panic("exit returned");
}

void
schedule(void)
{
	struct proc *p = ptable.proc;
	struct CpuInfo *c;

	c = thiscpu;
	c->proc = NULL;
	while (1)
	{
		sti();
		if (p == ptable.proc + NPROC) {
			p = ptable.proc;
		}
		spin_lock(&ptable.lock);
		if (RUNNABLE == p->state) {
			c->proc = p;
			uvm_switch(p);
			p->state = RUNNING;
			swtch(&(c->scheduler), p->context);
			kvm_switch();
			c->proc = NULL;
		} else
			if (ZOMBIE == p->state && NULL == p->parent) {
				kfree(p->kstack);
				p->kstack = NULL;
				vm_free(p->pgdir);
				p->pid = 0;
				p->state = UNUSED;
			}
		spin_unlock(&ptable.lock);
		p++;
	}
}

int
fork(void)
{
	// sys_fork() call
	struct proc * p, *cur_proc = thisproc();
	p = proc_alloc();
	if (NULL == p)
		return -1;
	p->parent = cur_proc;
	uvm_copy(p->pgdir, cur_proc->pgdir);
	*p->tf = *cur_proc->tf;
	p->tf->eax = 0;
	spin_lock(&ptable.lock);
	p->state = RUNNABLE;
	spin_unlock(&ptable.lock);
	return p->pid;
}

void
yield(void)
{
	// sys_yield() call
	spin_lock(&ptable.lock);
	thisproc()->state = RUNNABLE;
	sched();
	spin_unlock(&ptable.lock);
}

void
sleep(void *chain, struct spinlock *lk)
{
	struct proc *p = thisproc();
	assert(0 != p && NULL != lk);

	if (NULL == p)
		panic("sleep");

	if (NULL == lk)
		panic("sleep without lock");

	if (lk != &ptable.lock) {
		spin_lock(&ptable.lock);
		spin_unlock(lk);
	}

	p->chain = chain;
	p->state = SLEEPING;

	sched();

	p->chain = NULL;

	if (lk != &ptable.lock) {
		spin_unlock(&ptable.lock);
		spin_lock(lk);
	}
}

static void
wakeup1(void *chain)
{
	struct proc *p = ptable.proc;

	// need ptable.lock to run this function
	assert(spin_holding(&ptable.lock));

	for (; p < ptable.proc + NPROC; p++)
		if (p->state == SLEEPING && p->chain == chain)
			p->state = RUNNABLE;
}

void
wakeup(void *chain)
{
	spin_lock(&ptable.lock);
	wakeup1(chain);
	spin_unlock(&ptable.lock);
}
