/*
  S.M.A.C.K - An operating system kernel
  Copyright (C) 2010,2011 Mattias Holm and Kristian Rietveld
  For licensing and a full list of authors of the kernel, see the files
  COPYING and AUTHORS.
*/

#include <assert.h>
#include <process.h>
#include <interrupt.h>
#include <timer.h>
#include <string.h>
#include <arch-types.h>
#include <vm.h>
#include <sync.h>
#include <slaballoc.h>
#include <errno.h>
#include <idle.h>
#include <syscall.h> /* for proc_fill_processes() */


#define MAX_PID 32768

slab_t proc_slab = SLAB_INITIALISER(proc_t);
static proc_t kernel_proc = { .time_quanta = PROC_TIME_QUANTA, .pid = 0 };
static pid_t last_proc = 0;

static spinlock_t plock = SPIN_INITIALISER;

// List of all processes
static proc_list_t processes = { NULL, NULL };

// Ready queues
static proc_list_t normal_prio_ready = {NULL, NULL};
static proc_list_t idle_prio_ready = {NULL, NULL};

#if 0
/* To be used for MLFQ */
static proc_list_t high_prio_ready = {NULL, NULL};
static proc_list_t low_prio_ready = {NULL, NULL};
#endif

// Blocked process are located in the blocks proc_list_t list.
static proc_list_t sleeping = { NULL, NULL };
static proc_list_t blocked_wait = { NULL, NULL };
static proc_list_t zombie_list = { NULL, NULL };

proc_t *current_proc = &kernel_proc;

static inline bool
is_idle_process(proc_t *p)
{
  return p == &kernel_proc;
}

proc_t*
proc_current(void)
{
  return current_proc;
}

extern vm_map_t kernel_vm_map[];


/* Called from an interrupt handler */
void
proc_tick(proc_t *p)
{
  p->cpu_time++;
  if (p == &kernel_proc)
    {
      proc_t *next = proc_schedule();
      if (next)
        proc_preempt(next);
    }
}

static pid_t
next_pid(void)
{
  last_proc++;
  /* FIXME: Should have overflow protection.  After we have wrapped
   * around, we cannot just hand out the next pid anymore, but
   * actually have to check if it is in use.
   */

  return last_proc;
}

proc_t*
proc_create(proc_t *parent)
{
  proc_t *proc = slab_alloc(&proc_slab);

  if (!proc)
    return NULL;

  memset(proc, 0, sizeof(proc_t));

  /* Set start and size of process' address space */
  proc->vm_map.start = 0x00001000;
  proc->vm_map.size = 0x7ffff000; // All user space minus null page

  hw_proc_create(proc);

  proc->name = NULL;
  proc->parent = parent;
  proc->pid = next_pid();
  proc->time_quanta = 0;
  proc->cpu_time = 0;

  /* Set working directory of process */
  proc->cwd = vfs_getnode("/");

  memset(proc->fd, 0, sizeof (proc->fd));
  memset(proc->directory, 0, sizeof (proc->directory));

  spin_lock(&plock);
  LIST_APPEND(processes, proc, processes);
  spin_unlock(&plock);

  return proc;
}

int
proc_load(proc_t *proc, const char *file)
{
  /* Do the architecture-dependent portion of the process loading */
  hw_proc_load(proc, file);

  if (proc != current_proc)
    {
      spin_lock(&plock);
      proc_enqueue(proc);
      spin_unlock(&plock);
    }

  return 0;
}

void
proc_unmap(proc_t *p)
{
  /* Unmap all pages in p's address space */
  while (!LIST_EMPTY(p->vm_map.region))
    {
      vm_region_t *reg = LIST_FIRST(p->vm_map.region);

      vm_unmap(&p->vm_map, (void *)reg->start);
    }
}

void
proc_unload(proc_t *p)
{
  /* Close files and directories */
  for (int i = 0; i < PROC_MAX_FILES; i++)
    /* Temporary hack to e.g. not close the ramdisk while it is in use
     * (for example when fsck opens the ramdisk for reading).
     */
    if (p->fd[i] && p->fd[i]->vnode->type != VFS_DEVICE)
      vfs_close(p->fd[i]);

  for (int i = 0; i < PROC_MAX_FILES; i++)
    if (p->directory[i])
      vfs_closedir(p->directory[i]);

  proc_unmap(p);
  hw_proc_unload(p);

  spin_lock(&plock);
  LIST_REMOVE(processes, p, processes);
  spin_unlock(&plock);

  slab_free(p);
}

proc_t *
proc_dup(proc_t *p)
{
  proc_t *dup;

  dup = proc_create(p);
  hw_proc_dup(dup, p);

  dup->cwd = p->cwd;

  for (int i = 0; i < PROC_MAX_FILES; i++)
    if (p->fd[i])
      dup->fd[i] = vfs_dup(p->fd[i]);

  for (int i = 0; i < PROC_MAX_FILES; i++)
    if (p->directory[i])
      dup->directory[i] = vfs_dup(p->directory[i]);

  spin_lock(&plock);
  proc_enqueue(dup);
  spin_unlock(&plock);

  return dup;
}

void
proc_exit(proc_t *p, int exit_code)
{
  proc_t *next = NULL;
  proc_t *entry;
  uint32_t flags = save_flags();

  disable_interrupts();

  spin_lock(&plock);

  /* Look for parent process in blocked_wait */
  LIST_FOREACH(entry, blocked_wait, sched)
    {
      if (entry == p->parent)
        {
          /* If the parent is indeed blocked in wait(), it will
           * be the next process to run.
           */
          next = entry;
          break;
        }
    }
  spin_unlock(&plock);

  /* FIXME: Perhaps have a zombie state? */
  p->state = PROC_BLOCKED;

  /* If we did not find a parent blocked in wait(), find another
   * process to run next.
   */
  if (!next)
    next = proc_schedule();
  else
    {
      /* Resume the blocked parent */
      proc_resume(&blocked_wait, next);

      /* Immediately take it off the run queue because it will run next */
      spin_lock(&plock);
      proc_dequeue(next);
      spin_unlock(&plock);
    }

  p->exit_code = exit_code;

  spin_lock(&plock);
  proc_dequeue(p);
  LIST_APPEND(zombie_list, p, sched);
  spin_unlock(&plock);

  if (p == current_proc)
    proc_switch(next);

  /* This is never called */
  restore_flags(flags);
}

int
proc_wait(proc_t *p, int *exit_code)
{
  int child_pid;
  proc_t *child = NULL;

  while (!child)
    {
      proc_t *entry;

      spin_lock(&plock);
      LIST_FOREACH(entry, zombie_list, sched)
        {
          if (entry->parent == p)
            {
              child = entry;
              break;
            }
        }
      spin_unlock(&plock);

      /* If we did not find a zombie process, block waiting for now */
      if (child == NULL)
        proc_block(&blocked_wait, p);
    }

  spin_lock(&plock);
  LIST_REMOVE(zombie_list, child, sched);
  spin_unlock(&plock);

  child_pid = child->pid;
  if (exit_code)
    *exit_code = child->exit_code;
  proc_unload(child);

  return child_pid;
}

void
proc_yield(proc_t *p)
{
  assert(p->state == PROC_RUNNING && "Cannot yield a non running process");

  spin_lock(&plock);
  {
    p->state = PROC_IDLE;
  }
  spin_unlock(&plock);

  proc_schedule_and_switch();
}

void
proc_block(proc_list_t *block_list, proc_t *p)
{
  assert(p->state != PROC_BLOCKED && "Cannot block a blocked process");

  bool should_switch = true;

  spin_lock(&plock);
  {
    if (p->state == PROC_IDLE) {
      proc_dequeue(p);
      should_switch = false; // This is not the process you are looking for...
    }

    p->state = PROC_BLOCKED;
    LIST_APPEND(*block_list, p, sched);
  }
  spin_unlock(&plock);

  if (should_switch) {
    proc_schedule_and_switch();
  }
}

void
proc_resume(proc_list_t *block_list, proc_t *p)
{
  assert(p->state == PROC_BLOCKED && "Cannot resume an unblocked process");

  spin_lock(&plock);
  {
    p->state = PROC_IDLE;
    LIST_REMOVE(*block_list, p, sched);
    proc_enqueue(p);
  }
  spin_unlock(&plock);
}

void
proc_signal(proc_t *p, int sig)
{
  // TODO: Implement: should modify the process stack pointer, pc and lr in
  //       order to invoke a process signal handler.
}

/* Can only be used from SVC mode (system call invocation) */
void
proc_switch(proc_t *p)
{
  proc_t *current;
  uint32_t flags = save_flags();

  disable_interrupts();

  current = current_proc;

  assert(p != current_proc && "Cannot switch to same process");

  /* The state of the current process has already been set to non-running.
   * Here, we switch current process to p.
   */
  current_proc = p;
  current_proc->state = PROC_RUNNING;

  cswitch(p, current);

  /* By the time cswitch "returns", "current" is running again. */
  current_proc = current;
  current_proc->state = PROC_RUNNING;

  restore_flags(flags);
}

/* Can only be used from IRQ mode (interrupt handler) */
void
proc_preempt(proc_t *p)
{
  uint32_t flags = save_flags();
  disable_interrupts();

  hw_proc_preempt(p);

  current_proc->state = PROC_IDLE;
  p->state = PROC_RUNNING;
  current_proc = p;

  restore_flags(flags);
}

void
proc_enqueue(proc_t *p)
{
  LIST_APPEND(normal_prio_ready, p, sched);
}

void
proc_dequeue(proc_t *p)
{
  LIST_REMOVE(normal_prio_ready, p, sched);
}

proc_t*
proc_schedule(void)
{
  proc_t *current = NULL;
  proc_t *next = NULL;

  spin_lock(&plock);
  {
    current = proc_current();

    if (current->state == PROC_BLOCKED)
      {
        next = LIST_FIRST(normal_prio_ready);
        proc_dequeue(next);

      }
    else if (current == &kernel_proc && !LIST_EMPTY(normal_prio_ready))
      {
        proc_enqueue(current);
        next = LIST_FIRST(normal_prio_ready);
        proc_dequeue(next);
      }
    else
      next = current;
  }
  spin_unlock(&plock);

  return next;
}

/* In the event an interrupt occurs in between proc_schedule()
 * and proc_switch(), the selected next_proc can be rendered
 * invalid.  To avoid this, we disable interrupts.
 */
void
proc_schedule_and_switch(void)
{
  uint32_t flags = save_flags();

  disable_interrupts();

  proc_t *next_proc = proc_schedule();
  if (next_proc)
    proc_switch(next_proc);

  restore_flags(flags);
}

void
proc_unblock_list(proc_list_t *block_list)
{
  spin_lock(&plock);

  while (!LIST_EMPTY(*block_list))
    {
      proc_t *entry = LIST_FIRST(*block_list);
      LIST_REMOVE(*block_list, entry, sched);

      // Problem here is that the process state tag may be wrong
      proc_enqueue(entry);
    }

  spin_unlock(&plock);
}

/* This function is called from an interrupt handler */
void
proc_wake(proc_t *p)
{
  proc_t *next_proc;
  uint32_t flags;

  proc_resume(&sleeping, p);

  flags = save_flags();

  disable_interrupts();

  next_proc = proc_schedule();
  if (next_proc)
    proc_preempt(next_proc);

  restore_flags(flags);
}

void
proc_sleep(proc_t *p, int seconds)
{
  timer_register_timeout(seconds * 1000, (timeout_func_t)proc_wake, p);

  proc_block(&sleeping, p);
}

int
proc_opendir(proc_t *p, vfs_vnode_t *n)
{
  int i;

  for (i = 0; i < PROC_MAX_FILES; i++)
    if (p->directory[i] == NULL)
      break;

  if (i == PROC_MAX_FILES)
    return -EMFILE;

  p->directory[i] = vfs_opendir(n);
  if (p->directory[i] == NULL)
    return -ENOMEM;

  return i;
}

void
proc_closedir(proc_t *p, int handle)
{
  /* FIXME: Should we release the node?  We need ref counting for vfs nodes */
  p->directory[handle] = NULL;
}

int
proc_open(proc_t *p, vfs_vnode_t *n)
{
  int i;

  for (i = 0; i < PROC_MAX_FILES; i++)
    if (p->fd[i] == NULL)
      break;

  if (i == PROC_MAX_FILES)
    return -EMFILE;

  p->fd[i] = vfs_open(n);
  if (p->fd[i] == NULL)
    return -ENOMEM;

  return i;
}

void
proc_close(proc_t *p, int handle)
{
  vfs_close(p->fd[handle]);

  /* FIXME: Should we release the node?  We need ref counting for vfs nodes */
  p->fd[handle] = NULL;
}

int
proc_chdir(proc_t *p, const char *directory)
{
  vfs_vnode_t *vnode;

  vnode = vfs_getnode(directory);
  if (!vnode)
    return -ENOENT;

  if (vnode->type != VFS_DIR && vnode->type != VFS_MOUNT)
    return -ENOTDIR;

  p->cwd = vnode;

  return 0;
}

void
proc_getwd(proc_t *p, char *directory, size_t size)
{
  vfs_getpath(p->cwd, directory, size);
}

void
proc_fill_processes(void *args)
{
  proc_t *entry;
  struct sys_get_processes *get_proc = args;

  /* add kernel process (idle) */
  get_proc->processes[0].pid = kernel_proc.pid;
  get_proc->processes[0].cpu_time = kernel_proc.cpu_time;

  get_proc->n_processes = 1;

  spin_lock(&plock);
  LIST_FOREACH(entry, processes, processes)
    {
      get_proc->processes[get_proc->n_processes].pid = entry->pid;
      get_proc->processes[get_proc->n_processes].cpu_time = entry->cpu_time;
      get_proc->n_processes++;

      if (get_proc->n_processes >= 32)
        break;
    }
  spin_unlock(&plock);
}
