#include "../kernel.h"

/* The scheduler interface */
struct scheduler_t
{
	 /* point to the implement-accociated data */
	 void *data;
	 /* get the next ready env idx, args: data, hint */
	 int (*do_schedule)(void *, proc_id_t cur, int hint);
	 int (*attach)(void *, proc_id_t pid);
	 int (*detach)(void *, proc_id_t pid);
	 int (*notify)(void *, proc_id_t pid);
};

volatile struct proc_t *procs;
mcs_lock_t proc_alloc_lock;
volatile proc_id_t proc_free_head;

volatile struct {
	 spin_lock_t lock;
	 proc_id_t head;
} lcpu_notify_queues[LAPIC_COUNT];
	 
volatile struct env_t envs[PROCESS_MAX_COUNT];
mcs_lock_t env_alloc_lock;
volatile proc_id_t env_free_head;

struct scheduler_t scheduler;

static int
simple_attach(void *data, proc_id_t pid)
{
	 int lcpu = cur_lcpu();
	 int priority = procs[pid].priority;		  
	 int pos =
		  (lcpus[lcpu].rq_cur + 1 + priority) &
		  PRIORITY_COUNT;
	 procs[pid].lcpu = lcpu;
	 procs[pid].sch_info.pos = pos;
	 
	 if (lcpus[lcpu].rq_head[pos] != PROC_NULL)
	 {
		  int next = lcpus[lcpu].rq_head[pos];
		  int prev = procs[lcpus[lcpu].rq_head[pos]].sch_info.prev;
		  procs[next].sch_info.prev = pid;
		  procs[prev].sch_info.next = pid;
		  procs[pid].sch_info.next = next;
		  procs[pid].sch_info.prev = prev;
	 }
	 else
	 {
		  procs[pid].sch_info.next =
			   procs[pid].sch_info.prev = pid;
		  lcpus[lcpu].rq_head[pos] = pid;
		  lcpus[lcpu].rq_bits |= 1 << (1 + priority);
	 }

	 return 0;
}

static int
simple_detach(void *data, proc_id_t pid)
{
	 int lcpu = procs[pid].lcpu;
	 int pos = procs[pid].sch_info.pos;
	 int next;
	 int prev;
	 
	 if ((next = procs[pid].sch_info.next) == pid)
	 {
		  int dis = (pos - lcpus[lcpu].rq_cur) & PRIORITY_COUNT;
		  lcpus[lcpu].rq_head[pos] = PROC_NULL;
		  lcpus[lcpu].rq_bits &= ~(uint32_t)(1 << dis);
	 }
	 else
	 {
		  prev = procs[pid].sch_info.prev;
		  procs[next].sch_info.prev = prev;
		  procs[prev].sch_info.next = next;

		  if (lcpus[lcpu].rq_head[pos] == pid)
			   lcpus[lcpu].rq_head[pos] = next;
#if 0
		  int i = 0;
		  for (i = 0; i != PRIORITY_COUNT + 1; ++ i)
		  {
			   kprintf("%d ", lcpus[lcpu].rq_head[i]);
		  }
		  kprintf("\nn = %d, p = %d, cur = %d\n", next, prev, lcpus[lcpu].rq_cur);
#endif				 
	 }

	 return 0;
}

static int
simple_notify(void *data, proc_id_t pid)
{
	 if (// procs[pid].sch_wait_sem ++ == 0 &&
		 procs[pid].sch_status == PSCH_WAIT)
	 {
		  int lcpu = procs[pid].lcpu;
		  spl_acquire(&lcpu_notify_queues[lcpu].lock);
		  int head = lcpu_notify_queues[lcpu].head;
		  if (head == PROC_NULL)
		  {
			   procs[pid].notify_queue.next =
					procs[pid].notify_queue.prev = pid;
			   lcpu_notify_queues[lcpu].head = pid;
		  }
		  else
		  {
			   procs[pid].notify_queue.next = head;
			   procs[pid].notify_queue.prev = procs[head].notify_queue.prev;
			   
			   procs[procs[pid].notify_queue.prev].notify_queue.next = pid;
			   procs[procs[pid].notify_queue.next].notify_queue.prev = pid;
		  }
		  spl_release(&lcpu_notify_queues[lcpu].lock);
		  procs[pid].sch_status = PSCH_NOTIFYING;
		  procs[pid].sch_action = PACT_WAIT_PRETEND;
	 }
	 else if (procs[pid].sch_action == PACT_WAIT)
	 {
		  procs[pid].sch_action = PACT_WAIT_PRETEND;
	 }
	 else if (procs[pid].sch_action == PACT_WAIT_PRETEND)
	 {
		  procs[pid].sch_action = 0;
	 }

	 return 0;
}

static int
simple_do_schedule(void *data, proc_id_t cur, int hint)
{
	 int lcpu = cur_lcpu();

	 if (cur != PROC_NULL)
	 {
		  procs[cur].sch_status = PSCH_READY;
		  /* Detach the head */
		  int pos = lcpus[lcpu].rq_cur;
		  if ((lcpus[lcpu].rq_head[pos] = procs[cur].sch_info.next)
			  == cur)
		  {
			   lcpus[lcpu].rq_head[pos] = PROC_NULL;
			   lcpus[lcpu].rq_bits &= ~(uint32_t)1;
		  }
		  else
		  {
			   int next = procs[cur].sch_info.next;
			   int prev = procs[cur].sch_info.prev;
			   procs[next].sch_info.prev = prev;
			   procs[prev].sch_info.next = next;
		  }
		  
		  switch (procs[cur].sch_action)
		  {
		  case PACT_WAIT:
			   /* Process wait request */
			   procs[cur].sch_status = PSCH_WAIT_R;
			   procs[cur].sch_action = 0;
			   break;
		  }

		  if (procs[cur].sch_status == PSCH_READY)
		  {
			   /* Re-attach ready proc to new position */
			   procs[cur].sch_info.pos =
					pos = (lcpus[lcpu].rq_cur + 1 + procs[cur].priority) &
					PRIORITY_COUNT;
		  
			   if (lcpus[lcpu].rq_head[pos] != PROC_NULL)
			   {
					int next = lcpus[lcpu].rq_head[pos];
					int prev = procs[lcpus[lcpu].rq_head[pos]].sch_info.prev;
					procs[next].sch_info.prev = cur;
					procs[prev].sch_info.next = cur;
					procs[cur].sch_info.next = next;
					procs[cur].sch_info.prev = prev;
			   }
			   else
			   {
					procs[cur].sch_info.next =
						 procs[cur].sch_info.prev = cur;
			   
					lcpus[lcpu].rq_head[pos] = cur;
					lcpus[lcpu].rq_bits |= 1 << (1 + procs[cur].priority);
			   }
		  }
		  else if (procs[cur].sch_status == PSCH_WAIT_R)
		  {
			   procs[cur].sch_status = PSCH_WAIT;
		  }
	 }

	 /* Get the notify queue */
	 int nq_head = lcpu_notify_queues[lcpu].head;

	 if (nq_head != PROC_NULL)
	 {
		  /* release the notify queue */
		  spl_acquire(&lcpu_notify_queues[lcpu].lock);
		  lcpu_notify_queues[lcpu].head = PROC_NULL;
		  spl_release(&lcpu_notify_queues[lcpu].lock);

		  /* attach procs in queue */
		  int nq_cur = nq_head;
		  while (nq_cur != PROC_NULL)
		  {
			   simple_attach(data, nq_cur);
			   procs[nq_cur].sch_status == PSCH_READY;
			   
			   nq_cur = procs[nq_cur].notify_queue.next;
			   if (nq_cur == nq_head) break;
		  }
	 }

	 /* Get the new proc in queue*/
	 if (lcpus[lcpu].rq_bits != 0)
	 {
		  lcpus[lcpu].rq_cur =
			   (lcpus[lcpu].rq_cur + bsf(lcpus[lcpu].rq_bits)) &
			   PRIORITY_COUNT;
		  lcpus[lcpu].rq_bits >>= bsf(lcpus[lcpu].rq_bits);
	 }
	 else
	 {
		  // kprintf("SCHED: no process on cpu %d\n", lcpu);
		  return -1;
	 }

	 proc_id_t result = lcpus[lcpu].rq_head[lcpus[lcpu].rq_cur];
	 if (result == PROC_NULL)
	 {
		  return -1;
	 }
	 return result;
}

int
sch_wait_pretend(int level)
{
	 int pid = cur_proc_id();
	 
	 spl_acquire(&procs[pid].lock);
	 procs[pid].wait_level = level;
	 procs[pid].sch_action = PACT_WAIT_PRETEND;
	 spl_release(&procs[pid].lock);

	 return 0;
}

int
sch_wait_try(void)
{
	 int pid = cur_proc_id();
	 int result;
	 spl_acquire(&procs[pid].lock);
	 if (procs[pid].sch_action == PACT_WAIT_PRETEND)
	 {
		  procs[pid].sch_action = PACT_WAIT;
		  result = -E_YIELD;
	 }
	 else result = 0;
	 spl_release(&procs[pid].lock);
	 
	 return result;
}

int
sch_wait(int level)
{
	 int pid = cur_proc_id();
	 spl_acquire(&procs[pid].lock);
	 procs[pid].wait_level = level;
	 procs[pid].sch_action = PACT_WAIT;
	 spl_release(&procs[pid].lock);
	 return -E_YIELD;
}

int
sch_notify(proc_id_t pid, int level)
{
	 int result;
	 spl_acquire(&procs[pid].lock);
	 if (level >= procs[pid].wait_level)
	 {
		  result =
			   scheduler.notify(scheduler.data, pid);
		  procs[pid].wait_level = 0;
	 }
	 else
	 {
		  result = -E_BUSY;
		  kprintf("notify %d (%d)=> %d(%d) failed\n", cur_proc_id(), level, pid, procs[pid].wait_level);
	 }
	 spl_release(&procs[pid].lock);
	 return result;
}

int
sch_detach(proc_id_t pid)
{
	 if (!(procs[pid].flags & PFLAG_VALID) ||
		 (procs[pid].flags & PFLAG_IDLE) ||
		 procs[pid].sch_status == PSCH_DETACHED ||
		 procs[pid].sch_status == PSCH_NOTIFYING ||
		 procs[pid].sch_status == PSCH_RUN ||
		 procs[pid].lcpu != cur_lcpu())
		  return -E_INVAL;
	 int result;
	 if (procs[pid].sch_status == PSCH_READY)
	 {
		  result = scheduler.detach(scheduler.data, pid);
	 }
	 procs[pid].sch_status = PSCH_DETACHED;
	 return result;	 
}

int
sch_attach(proc_id_t pid)
{
	 if (procs[pid].sch_status != PSCH_DETACHED)
		  return -E_INVAL;
	 int result =
		  scheduler.attach(scheduler.data, pid);
	 procs[pid].sch_status = PSCH_READY;
	 return result;	 
}

int
proc_init()
{
	 int i;

	 mcs_init(&proc_alloc_lock);
	 mcs_init(&env_alloc_lock);
	 for (i = 0; i != PROCESS_MAX_COUNT; ++i)
	 {
		  spl_init(&procs[i].lock);
		  procs[i].flags = 0;
		  procs[i].free_next = i + 1;

		  spl_init(&envs[i].lock);
		  envs[i].free_next = i + 1;
	 }
	 proc_free_head = 0;
	 env_free_head = 0;

	 proc_id_t idle_env = env_free_head;
	 env_free_head = envs[env_free_head].free_next;

	 spl_init(&envs[idle_env].lock);
	 envs[idle_env].adm_level = 0;
	 envs[idle_env].vpt = rcr3();
	 envs[idle_env].instance_count = 0;

	 /* Fix the recv and send level? */
	 int init_ex_ap = ips_ap_alloc(0, 0);

	 /* Create idle processes for APs */
	 for (i = 0; i != sysconf.lcpu_count; ++i)
	 {
		  spl_init(&lcpu_notify_queues[i].lock);
		  lcpu_notify_queues[i].head = PROC_NULL;
		  
		  proc_id_t cur = proc_free_head;
		  proc_free_head = procs[cur].free_next;

		  lcpus[i].idle_pid = cur;

		  spl_init(&procs[cur].lock);
		  procs[cur].flags = PFLAG_VALID | PFLAG_IDLE;
		  procs[cur].eflags = read_eflags() | FL_IF | FL_IOPL_3;
		  procs[cur].env = idle_env;
		  ++ envs[idle_env].instance_count;
		  procs[cur].lcpu = i;
		  procs[cur].sch_status = PSCH_RUN;
		  procs[cur].sch_info.pos = 0;
		  procs[cur].sch_info.next =
			   procs[cur].sch_info.prev = cur;
		  procs[cur].sch_action = PACT_WAIT_PRETEND;
		  // procs[cur].sch_wait_sem = 0;
		  procs[cur].signal_pending = 0;
		  procs[cur].signal_processing = 0;
		  procs[cur].signal_flags = 0;
		  procs[cur].signal_handler = 0;
		  procs[cur].ex_ap = init_ex_ap;
		  procs[cur].priority = PRIORITY_COUNT - 1;
		  lcpus[i].rq_cur = 0;
		  lcpus[i].rq_bits = 0x00000001;
		  int j;
		  for (j = 0; j != PRIORITY_COUNT + 1; ++ j)
		  {
			   lcpus[i].rq_head[j] = PROC_NULL;
		  }
		  lcpus[i].rq_head[0] = cur;

		  /* Touch the stack for each proc */
		  uintptr_t s;
		  for (s = PROC_KSTACK_TOP(cur - 1) + PAGE_SIZE;
			   s != PROC_KSTACK_TOP(cur);
			   s += PAGE_SIZE)
		  {
			   ptab_entry_t pte = PTE_W;
			   int err;
			   if ((err = fix_addr((void *)s, &pte, 0)) < 0)
					return err;
		  }
		  procs[cur].ksp = KSTACK + ((cur + 1) << KSTACK_SHIFT);
	 }

	 /* Set the scheduler */
	 scheduler.do_schedule    = &simple_do_schedule;
	 scheduler.attach		  = &simple_attach;
	 scheduler.detach		  = &simple_detach;
	 scheduler.notify		  = &simple_notify;
	 
	 return 0;
}

physaddr_t
do_schedule(uint32_t *esp, uint32_t *eflags, int hint)
{
	 int cur = cur_proc_id();
	 int lcpu = cur_lcpu();
	 int next;
	 
	 spl_acquire(&procs[cur].lock);
	 /* restore temp stack pointer and flag */
	 /* Do not keep the flags, means all return points of scheduling
	  * are in userspace */
	 // procs[next].eflags = *eflags
	 /* Save the stack pointer */
	 procs[cur].esp = *esp;
	 procs[cur].ksp = get_task_ksp(lcpu);

	 int last = cur;
	 next = cur;
	 while (1)
	 {
		  next = scheduler.do_schedule
			   (scheduler.data, next, hint);

		  spl_release(&procs[last].lock);
		  if (next < 0) break;
		  last = next;
		  
		  spl_acquire(&procs[next].lock);
		  if (procs[next].sch_action != PACT_WAIT)
		  {
			   break;
		  }
	 }

	 if (next < 0)
	 {
		  /* No proc to be scheduled, select to the idle proc */
		  sch_notify(lcpus[lcpu].idle_pid, WLEVEL_SYSTEM);
		  next = scheduler.do_schedule
			   (scheduler.data, PROC_NULL, hint);

		  if (next < 0)
		  {
			   kprintf("Panic while scheduling on lcpu %d\n", lcpu);
			   monitor(NULL);
		  }
		  else
		  {
			   spl_acquire(&procs[next].lock);
		  }
	 }

	 procs[next].sch_status = PSCH_RUN;
	 procs[next].lcpu = lcpu;

	 /* set the stack pointer and flag */
	 *eflags = procs[next].eflags;
	 *esp = procs[next].esp;
	 set_task_ksp(lcpu, procs[next].ksp);

	 /* Process the schedule event */
	 if (procs[next].signal_flags & SF_SCH)
	 {
		  if (++ procs[next].sch_signal_count >= SIG_SCH_TRIGGER(procs[next].signal_flags))
		  {
			   procs[next].sch_signal_count = SIG_SCH_TRIGGER(procs[next].signal_flags);
			   procs[next].signal_pending |= SIG_SCHEDULE;
		  }
	 }

	 procs[next].signal_processing = xchg32(&procs[next].signal_pending, 0);
	 spl_release(&procs[next].lock);

	 if (next == cur)
		  return 0;
	 else return envs[procs[next].env].vpt;
}

proc_id_t
env_alloc(physaddr_t vpt)
{
	 struct mcs_lock_node_t lock;
	 mcs_acquire(&env_alloc_lock, &lock);
	 if (env_free_head == PROC_NULL)
	 {
		  mcs_release(&env_alloc_lock, &lock);
		  return PROC_NULL;
	 }
	 proc_id_t result = env_free_head;
	 env_free_head = envs[result].free_next;	 
	 mcs_release(&env_alloc_lock, &lock);

	 spl_init(&envs[result].lock);
	 envs[result].instance_count = 0;
	 envs[result].vpt = vpt;

	 return result;
}

void
env_free(proc_id_t id)
{
	 struct mcs_lock_node_t lock;
	 mcs_acquire(&env_alloc_lock, &lock);
	 envs[id].free_next = env_free_head;
	 env_free_head = id;
	 mcs_release(&env_alloc_lock, &lock);	 
}

int
proc_set_priority(int pid, int priority)
{
	 procs[pid].priority = priority;
	 return 0;
}

int
proc_set_signal_handler(uintptr_t sh)
{
	 procs[cur_proc_id()].signal_handler = sh;
	 return 0;
}
	 
int
proc_set_signal_flags(uint32_t flags)
{
	 procs[cur_proc_id()].signal_flags = flags;
	 return 0;
}

int proc_signal(proc_id_t proc, uint32_t sig)
{
	 spl_acquire(&procs[proc].lock);
	 if ((procs[proc].flags & PFLAG_VALID))
	 {
		  procs[proc].signal_pending |= sig;
		  if (procs[proc].wait_level <= WLEVEL_USER) 
			   scheduler.notify(scheduler.data, proc);
	 }
	 spl_release(&procs[proc].lock);
}

int
proc_fork(struct trapframe_t *tf, int proc)
{
	 struct mcs_lock_node_t node;
	 proc_id_t cur = cur_proc_id();
	 proc_id_t npid;

	 mcs_acquire(&proc_alloc_lock, &node);
	 npid = proc_free_head;
	 if (npid == PROC_NULL)
	 {
		  mcs_release(&proc_alloc_lock, &node);
		  tf->regs.eax = -E_NO_PROC;
		  return -E_NO_PROC;
	 }
	 proc_free_head = procs[npid].free_next;
	 mcs_release(&proc_alloc_lock, &node);

	 procs[npid].ksp = KSTACK + ((npid + 1) << KSTACK_SHIFT);
	 uintptr_t kstk_delta = procs[npid].ksp - procs[cur].ksp;
	 /* Touch the kernel stack for the new proc */
	 uintptr_t s;
	 for (s = PROC_KSTACK_TOP(npid - 1) + PAGE_SIZE;
		  s != PROC_KSTACK_TOP(npid);
		  s += PAGE_SIZE)
	 {
		  ptab_entry_t pte = PTE_W;
		  int err;
		  if ((err = fix_addr((void *)s, &pte, 0)) < 0)
		  {
			   /* Clear the flag */
			   /* We should to clean work here. :( */
			   procs[npid].flags = 0;
			   tf->regs.eax = err;
			   return err;
		  }
		  /* Copy the stack data */
		  memmove((void *)s, (void *)(s - kstk_delta), PAGE_SIZE);
	 }

	 spl_init(&procs[npid].lock);
	 procs[npid].flags = procs[proc].flags & ~PFLAG_IDLE;
	 procs[npid].lcpu = procs[proc].lcpu;
	 
	 procs[npid].env = procs[proc].env;
	 spl_acquire(&envs[procs[npid].env].lock);
	 ++ envs[procs[npid].env].instance_count;
	 spl_release(&envs[procs[npid].env].lock);

	 procs[npid].eflags = procs[proc].eflags;
	 /* Increase the ref count of vpt */
	 pmem_page_inc_ref(envs[procs[npid].env].vpt);
	 
	 /* Tricky here for currectly fork */
	 procs[npid].esp = (uint32_t)tf + kstk_delta;
	 procs[npid].sch_status = PSCH_WAIT;
	 procs[npid].wait_level = WLEVEL_START;
	 
	 procs[npid].sch_action = PACT_WAIT_PRETEND;
	 // procs[npid].sch_wait_sem = 0;
	 procs[npid].signal_pending = 0;
	 procs[npid].signal_processing = 0;
	 procs[npid].signal_flags = procs[proc].signal_flags & ~SF_READY;
	 procs[npid].signal_handler = procs[proc].signal_handler;
	 procs[npid].sch_signal_count = procs[proc].sch_signal_count;
	 procs[npid].priority = procs[proc].priority;

	 procs[npid].ex_ap = procs[proc].ex_ap;
	 
	 return npid;
}

int
env_create(struct trapframe_t *tf, physaddr_t vpt,
		   uint32_t init_flags, uint32_t init_eflags, int init_priority,
		   int ex_ap)
{
	 int env = env_alloc(vpt);
	 
	 struct mcs_lock_node_t node;
	 proc_id_t cur = cur_proc_id();
	 proc_id_t npid;

	 mcs_acquire(&proc_alloc_lock, &node);
	 npid = proc_free_head;
	 if (npid == PROC_NULL)
	 {
		  mcs_release(&proc_alloc_lock, &node);
		  tf->regs.eax = -E_NO_PROC;
		  return -E_NO_PROC;
	 }
	 proc_free_head = procs[npid].free_next;
	 mcs_release(&proc_alloc_lock, &node);

	 procs[npid].ksp = KSTACK + ((npid + 1) << KSTACK_SHIFT);
	 uintptr_t kstk_delta = procs[npid].ksp - procs[cur].ksp;
	 /* Touch the kernel stack for the new proc */
	 uintptr_t s;
	 for (s = PROC_KSTACK_TOP(npid - 1) + PAGE_SIZE;
		  s != PROC_KSTACK_TOP(npid);
		  s += PAGE_SIZE)
	 {
		  ptab_entry_t pte = PTE_W;
		  int err;
		  if ((err = fix_addr((void *)s, &pte, 0)) < 0)
		  {
			   /* Clear the flag */
			   /* We should to clean work here. :( */
			   procs[npid].flags = 0;
			   tf->regs.eax = err;
			   return err;
		  }
		  /* Copy the stack data */
		  memmove((void *)s, (void *)(s - kstk_delta), PAGE_SIZE);
	 }

	 spl_init(&procs[npid].lock);
	 procs[npid].flags = init_flags;
	 procs[npid].lcpu = procs[cur].lcpu;
	 
	 procs[npid].env = env;
	 ++ envs[env].instance_count;

	 procs[npid].eflags = init_eflags;
	 /* Increase the ref count of vpt */
	 pmem_page_inc_ref(envs[procs[npid].env].vpt);
	 
	 /* Tricky here for currectly fork */
	 procs[npid].esp = (uint32_t)tf + kstk_delta;
	 procs[npid].sch_status = PSCH_WAIT;
	 procs[npid].wait_level = WLEVEL_START;
	 
	 procs[npid].sch_action = PACT_WAIT_PRETEND;
	 // procs[npid].sch_wait_sem = 0;
	 procs[npid].signal_pending = 0;
	 procs[npid].signal_processing = 0;
	 procs[npid].signal_flags = 0;
	 procs[npid].signal_handler = 0;
	 procs[npid].sch_signal_count = 0;
	 procs[npid].priority = init_priority;
	 procs[npid].ex_ap = ex_ap;

	 return npid;
}

int
proc_kill(int proc)
{
	 /* TO FIX MORE */
	 int result = 0;
	 int env;
	 spl_acquire(&procs[proc].lock);
	 if ((procs[proc].flags & PFLAG_VALID) &&
		 !(procs[proc].flags & PFLAG_IDLE) &&
		 (procs[proc].sch_status == PSCH_DETACHED))
	 {
		  procs[proc].flags = 0;
		  env = procs[proc].env;
	 }
	 else result = -E_INVAL;
	 spl_release(&procs[proc].lock);

	 if (result == 0)
	 {
		  spl_acquire(&envs[procs[proc].env].lock);
		  -- envs[procs[proc].env].instance_count;
		  spl_release(&envs[procs[proc].env].lock);
	 }

	 return result;
}

void
send_exception(int pid, uint64_t info, int level)
{
	 int ips = ((pid << PROCESS_IPS_COUNT_SHIFT) | PROCESS_IPS_EXCEPTION);
	 sch_wait_pretend(level);
	 ips_send(procs[pid].ex_ap, ips,
			  info);
	 sch_wait_try();
	 call_yield();
	 
	 ips_send_break(ips);
	 ips_send_try(ips, 0);
}
