#include "kernel.h"

volatile struct proc_t *procs;
mcs_lock_t proc_alloc_lock;
volatile proc_id_t proc_free_head;

volatile struct env_t envs[PROCESS_MAX_COUNT];
mcs_lock_t env_alloc_lock;
volatile proc_id_t env_free_head;

struct scheduler_t scheduler;

static int
simple_wait(void *data, proc_id_t pid)
{
	 int cpu = cur_cpu();
	 
	 if (procs[pid].sch_status != PSCH_READY)
		  return -E_BAD_PROC;

	 procs[pid].sch_status = PSCH_WAIT;

	 int pos = procs[pid].sch_info.pos;
	 int next;
	 int prev;
	 
	 if ((next = procs[pid].sch_info.next) == pid)
	 {
		  int dis = (pos - cpus[cpu].rq_cur) & PRIORITY_COUNT;
		  cpus[cpu].rq_head[pos] = PROC_NULL;
		  cpus[cpu].rq_bits &= ~(uint32_t)(1 << dis);
	 }
	 else
	 {
		  prev = procs[pid].sch_info.prev;
		  procs[next].sch_info.prev = prev;
		  procs[prev].sch_info.next = next;

		  if (cpus[cpu].rq_head[pos] == pid)
			   cpus[cpu].rq_head[pos] = next;
	 }
	 
	 return 0;
}

static int
simple_notify(void *data, proc_id_t pid)
{
	 int cpu = cur_cpu();
	 
	 if (procs[pid].action == PACT_WAIT)
	 {
		  procs[pid].action = 0;
		  return 0;
	 }
	 else if (procs[pid].sch_status != PSCH_WAIT)
	 {
		  return -E_BAD_PROC;
	 }
	 procs[pid].sch_status = PSCH_READY;
	 int priority = procs[pid].priority;

	 int pos =
		  (cpus[cpu].rq_cur + 1 + priority) &
		  PRIORITY_COUNT;
	 procs[pid].sch_info.pos = pos;
	 
	 if (cpus[cpu].rq_head[pos] != PROC_NULL)
	 {
		  int next = cpus[cpu].rq_head[pos];
		  int prev = procs[cpus[cpu].rq_head[pos]].sch_info.prev;
		  procs[next].sch_info.prev = pid;
		  procs[prev].sch_info.next = pid;
		  procs[pid].sch_info.next = next;
		  procs[pid].sch_info.prev = prev;
	 }
	 else
	 {
		  procs[pid].sch_info.next =
			   procs[pid].sch_info.prev = pid;
		  cpus[cpu].rq_head[pos] = pid;
		  cpus[cpu].rq_bits |= 1 << (1 + procs[pid].priority);
	 }

	 return 0;
}

static int
simple_do_schedule(void *data, proc_id_t cur, int hint)
{
	 int cpu = cur_cpu();

	 procs[cur].sch_status = PSCH_READY;
	 
	 int pos = cpus[cpu].rq_cur;
	 if ((cpus[cpu].rq_head[pos] = procs[cur].sch_info.next)
		 == cur)
	 {
		  cpus[cpu].rq_head[pos] = PROC_NULL;
		  cpus[cpu].rq_bits &= ~(uint32_t)1;
	 }
	 else
	 {
		  int next = procs[cur].sch_info.next;
		  int prev = procs[cur].sch_info.prev;
		  procs[next].sch_info.prev = prev;
		  procs[prev].sch_info.next = next;
	 }

	 switch (procs[cur].action)
	 {
	 case PACT_WAIT:
		  procs[cur].action = 0;
		  procs[cur].sch_status = PSCH_WAIT_R;
		  break;
	 }

	 if (procs[cur].sch_status == PSCH_READY)
	 {
		  procs[cur].sch_info.pos =
			   pos = (cpus[cpu].rq_cur + 1 + procs[cur].priority) &
			   PRIORITY_COUNT;
		  
		  if (cpus[cpu].rq_head[pos] != PROC_NULL)
		  {
			   int next = cpus[cpu].rq_head[pos];
			   int prev = procs[cpus[cpu].rq_head[pos]].sch_info.prev;
			   procs[next].sch_info.prev = cur;
			   procs[prev].sch_info.next = cur;
			   procs[cur].sch_info.next = next;
			   procs[cur].sch_info.prev = prev;
		  }
		  else
		  {
			   procs[cur].sch_info.next =
					procs[cur].sch_info.prev = cur;
			   
			   cpus[cpu].rq_head[pos] = cur;
			   cpus[cpu].rq_bits |= 1 << (1 + procs[cur].priority);
		  }
	 }
	 else if (procs[cur].sch_status == PSCH_WAIT_R)
	 {
		  procs[cur].sch_status = PSCH_WAIT;
	 }

	 if (cpus[cpu].rq_bits != 0)
	 {
		  cpus[cpu].rq_cur =
			   (cpus[cpu].rq_cur + bsf(cpus[cpu].rq_bits)) &
			   PRIORITY_COUNT;
		  cpus[cpu].rq_bits >>= bsf(cpus[cpu].rq_bits);
	 }
	 else
	 {
		  kprintf("Panic: no process on cpu %d\n", cpu);
		  return -1;
	 }

	 proc_id_t result = cpus[cpu].rq_head[cpus[cpu].rq_cur];
	 if (result == PROC_NULL)
	 {
		  return -1;
	 }
	 
	 return result;
}

int
sch_wait(proc_id_t pid, int plock, spin_lock_t *lock)
{
	 int result;
	 
	 if (plock) spl_acquire(&procs[pid].lock);
	 if (pid == cur_proc_id())
	 {
		  procs[pid].action = PACT_WAIT;
		  result = -E_YIELD;
	 }
	 else
	 {
		  scheduler.wait(scheduler.data, pid);
		  result = 0;
	 }
	 if (plock) spl_release(&procs[pid].lock);

	 if (lock != NULL)
		  spl_release(lock);
	 return result;
}

int
sch_notify(proc_id_t pid, int plock)
{
	 if (plock) spl_acquire(&procs[pid].lock);
	 int result =
		  scheduler.notify(scheduler.data, pid);
	 if (plock) spl_release(&procs[pid].lock);
	 return result;
}

int
proc_init()
{
	 int i;

	 mcs_init(&proc_alloc_lock);
	 mcs_init(&env_alloc_lock);
	 for (i = 0; i != PROCESS_MAX_COUNT; ++i)
	 {
		  spl_init(&procs[i].lock);
		  procs[i].flags = 0;
		  procs[i].free_next = i + 1;

		  spl_init(&envs[i].lock);
		  envs[i].free_next = i + 1;
	 }
	 proc_free_head = 0;
	 env_free_head = 0;

	 proc_id_t idle_env = env_free_head;
	 env_free_head = envs[env_free_head].free_next;

	 spl_init(&envs[idle_env].lock);
	 envs[idle_env].instance_count = 0;

	 /* Create idle processes for APs */
	 for (i = 0; i != sysconf.cpu_count; ++i)
	 {
		  int id = cpu_ids[i];
		  
		  proc_id_t cur = proc_free_head;
		  proc_free_head = procs[cur].free_next;

		  cpus[id].idle_pid = cur;
		  cpus[id].vpt = PADDR(init_vpd);

		  spl_init(&procs[cur].lock);
		  procs[cur].flags = PFLAG_VALID;
		  procs[cur].vpt = PADDR(init_vpd);
		  procs[cur].eflags = read_eflags() | FL_IF | FL_IOPL_3;
		  procs[cur].env = idle_env;
		  ++ envs[idle_env].instance_count;
		  procs[cur].cpu = id;
		  procs[cur].sch_info.pos = 0;
		  procs[cur].sch_info.next =
			   procs[cur].sch_info.prev = cur;
		  procs[cur].action = 0;
		  procs[cur].signal_pending = 0;
		  procs[cur].signal_flags = 0;
		  procs[cur].signal_handler = 0;
		  
		  procs[cur].priority = PRIORITY_COUNT - 1;
		  cpus[id].rq_cur = 0;
		  cpus[id].rq_bits = 0x00000001;
		  int j;
		  for (j = 0; j != PRIORITY_COUNT + 1; ++ j)
		  {
			   cpus[id].rq_head[j] = PROC_NULL;
		  }
		  cpus[id].rq_head[0] = cur;

		  /* Touch the stack for each proc */
		  uintptr_t s;
		  for (s = PROC_KSTACK_TOP(cur - 1) + PAGE_SIZE;
			   s != PROC_KSTACK_TOP(cur);
			   s += PAGE_SIZE)
		  {
			   ptab_entry_t pte = PTE_W;
			   int err;
			   if ((err = fix_addr((void *)s, &pte, 0)) < 0)
					return err;
		  }
		  procs[cur].ksp = KSTACK + ((cur + 1) << KSTACK_SHIFT);
	 }

	 /* Set the scheduler */
	 scheduler.do_schedule    = &simple_do_schedule;
	 scheduler.wait 		  = &simple_wait;
	 scheduler.notify		  = &simple_notify;
	 
	 return 0;
}

physaddr_t
do_schedule(uint32_t *esp, uint32_t *eflags, int hint)
{
	 int cur = cur_proc_id();
	 int cpu = cur_cpu();
	 int next;

	 spl_acquire(&procs[cur].lock);
	 /* restore temp stack pointer and flag */
	 /* Save the stack pointer */
	 procs[cur].eflags = *eflags;
	 procs[cur].esp = *esp;
	 procs[cur].ksp = get_task_ksp(cpu);

	 int last = cur;
	 next = cur;
	 while (1)
	 {
		  next = scheduler.do_schedule
			   (scheduler.data, next, hint);

		  spl_release(&procs[last].lock);
		  if (next < 0) break;
		  last = next;
		  
		  spl_acquire(&procs[next].lock);
		  if (procs[next].action == 0)
		  {
			   break;
		  }
	 }

	 if (next < 0)
	 {
		  kprintf("schedular: panic, err %d\n", next);
		  next = cpus[cpu].idle_pid;

		  procs[next].sch_status = PSCH_RUN;
		  procs[next].cpu = cpu;

		  *eflags = procs[next].eflags;
		  *esp = procs[next].esp;
		  set_task_ksp(cpu, procs[next].ksp);

		  return procs[next].vpt;
	 }

	 procs[next].sch_status = PSCH_RUN;
	 procs[next].cpu = cpu;

	 if (next != cur)
	 {
		  /* set the stack pointer and flag */
		  *eflags = procs[next].eflags;
		  *esp = procs[next].esp;
		  set_task_ksp(cpu, procs[next].ksp);
	 }
	 spl_release(&procs[next].lock);
	 
	 if (next == cur)
		  return 0;
	 else return procs[next].vpt;
}

proc_id_t
env_alloc(void)
{
	 struct mcs_lock_node_t lock;
	 mcs_acquire(&env_alloc_lock, &lock);
	 if (env_free_head == PROC_NULL)
	 {
		  mcs_release(&env_alloc_lock, &lock);
		  return PROC_NULL;
	 }
	 proc_id_t result = env_free_head;
	 env_free_head = envs[result].free_next;	 
	 mcs_release(&env_alloc_lock, &lock);

	 spl_init(&envs[result].lock);
	 envs[result].instance_count = 0;

	 return result;
}

void
env_free(proc_id_t id)
{
	 struct mcs_lock_node_t lock;
	 mcs_acquire(&env_alloc_lock, &lock);
	 envs[id].free_next = env_free_head;
	 env_free_head = id;
	 mcs_release(&env_alloc_lock, &lock);	 
}

int
proc_set_priority(int priority)
{
	 procs[cur_proc_id()].priority = priority;
	 return 0;
}

int
proc_set_signal_handler(uintptr_t handler)
{
	 procs[cur_proc_id()].signal_handler = handler;
	 return 0;
}
	 
int
proc_set_signal_flags(uint16_t flags, uint16_t sch_count)
{
	 procs[cur_proc_id()].signal_flags = flags;
	 if (sch_count != 0)
	 {
		  procs[cur_proc_id()].sch_signal_count = sch_count;
		  procs[cur_proc_id()].sch_signal_trigger = sch_count;
	 }
	 return 0;
}


void
fork_proc(struct trapframe_t *tf)
{
	 struct mcs_lock_node_t node;
	 proc_id_t cur = cur_proc_id();
	 proc_id_t npid;

	 mcs_acquire(&proc_alloc_lock, &node);
	 npid = proc_free_head;
	 if (npid == PROC_NULL)
	 {
		  mcs_release(&proc_alloc_lock, &node);
		  tf->regs.eax = -E_NO_PROC;
		  return;
	 }
	 proc_free_head = procs[npid].free_next;
	 mcs_release(&proc_alloc_lock, &node);

	 procs[npid].ksp = KSTACK + ((npid + 1) << KSTACK_SHIFT);
	 uintptr_t kstk_delta = procs[npid].ksp - procs[cur].ksp;
	 /* Touch the kernel stack for the new proc */
	 uintptr_t s;
	 for (s = PROC_KSTACK_TOP(npid - 1) + PAGE_SIZE;
		  s != PROC_KSTACK_TOP(npid);
		  s += PAGE_SIZE)
	 {
		  ptab_entry_t pte = PTE_W;
		  int err;
		  if ((err = fix_addr((void *)s, &pte, 0)) < 0)
		  {
			   /* Clear the flag */
			   /* We should to clean work here. :( */
			   procs[npid].flags = 0;
			   tf->regs.eax = err;
			   return;
		  }
		  /* Copy the stack data */
		  memmove((void *)s, (void *)(s - kstk_delta), PAGE_SIZE);
	 }

	 spl_init(&procs[npid].lock);
	 procs[npid].flags = procs[cur].flags;
	 procs[npid].cpu = procs[cur].cpu;
	 
	 procs[npid].env = procs[cur].env;
	 spl_acquire(&envs[procs[npid].env].lock);
	 ++ envs[procs[npid].env].instance_count;
	 spl_release(&envs[procs[npid].env].lock);

	 procs[npid].eflags = procs[cur].eflags;
	 procs[npid].vpt = procs[cur].vpt;
	 /* Increase the ref count of vpt */
	 ppage_inc_ref(procs[npid].vpt);
	 
	 /* Tricky here for currectly fork */
	 procs[npid].esp = (uint32_t)tf + kstk_delta;
	 procs[npid].sch_status = PSCH_WAIT;
	 
	 procs[npid].action = 0;
	 procs[npid].signal_pending = 0;
	 procs[npid].signal_flags = procs[cur].signal_flags & ~SF_READY;
	 procs[npid].signal_handler = procs[cur].signal_handler;
	 procs[npid].sch_signal_trigger =
		  procs[npid].sch_signal_count = procs[cur].sch_signal_count;
	 procs[npid].priority = procs[cur].priority;
	 
	 /* Insert into ready cycle */
	 scheduler.notify(scheduler.data, npid);

	 return;
}
