/*
 * sched.c - initializes struct for task 0
 */

#include <sched.h>

struct semaphore sem[NR_SEM];

unsigned int pending_eoi = 0;
unsigned int remaining_quantum = DEFAULT_QUANTUM;
LIST_HEAD(runqueue);
LIST_HEAD(freequeue);
LIST_HEAD(keyboardqueue);
unsigned int next_pid = 0;
unsigned int get_next_pid(void) { return ++next_pid; }

struct protected_task_struct task[NR_TASKS]
  __attribute__((__section__(".data.task")));

void init_sems(void)
{
  int i;
  for (i = 0; i < NR_SEM; i++)
    sem[i].initialized = 0;
}

void init_task_info(struct task_struct* t, unsigned int pid, unsigned int ppid)
{
  t->pid = pid;
  t->ppid = ppid;
  t->stat = READY;
  t->task_stats.tics = 0;
  t->task_stats.cs = 0;
  t->task_stats.remaining_quantum = t->quantum;
  t->kb_params.remaining_bytes = 0;
  t->kb_params.read_bytes = 0;
}

int alloc_task_pages(struct task_struct* t)
{
  int i;
  for (i = 0; i < NUM_PAG_DATA; i++)
  {
    t->data_pages[i] = alloc_frame();

    // If there are no free pages
    if (t->data_pages[i] < 0) {
      // Undo changes
      i--;
      for (; i >= 0; i--) {
        free_frame(t->data_pages[i]);
      }
      return 0;
    }
  }
  return 1;
}

void init_tasks(void)
{
  int i;
  for (i = 0; i < NR_TASKS; i++) {
    list_add_tail(&task[i].t.task.list, &freequeue);
    task[i].t.task.stat = FREE;
  }
}

void init_task0_channels(struct task_struct* t)
{
  int i;
  for (i = 0; i < NR_CHANNELS; i++) {
    t->channels[i].initialized = 0;
  }
  init_open_file(t, 0, 1, O_RDONLY);
  init_open_file(t, 1, 0, O_WRONLY);
  init_open_file(t, 2, 0, O_RDONLY);
}

void init_task0(void)
{
  /* Initializes paging for the process 0 adress space */
  set_user_pages(&task[0].t.task); // Now also saves the pages in task[0].t.task.data_pages
  set_cr3();
  task[0].t.task.quantum = DEFAULT_QUANTUM;
  init_task_info(&task[0].t.task, 0, 0); // Set task0 with pid 0 and ppid 0
  init_task0_channels(&task[0].t.task); // Set default channels 0, 1 and 2
  list_move(&task[0].t.task.list, &runqueue); // Move task0 from freequeue to runqueue
}

struct task_struct* current()
{
  unsigned long c = 0;
  __asm__("movl %%esp, %0": "=g" (c) );
  return (struct task_struct*)(c & 0xFFFFF000);
}

struct task_struct* list_head_to_task_struct(struct list_head* l)
{
  return list_entry(l, struct task_struct, list);
}

struct task_struct* get_active_task(unsigned int pid){
  int i;
  for (i = 0; i < NR_TASKS; i++)
    if (task[i].t.task.pid == pid && (task[i].t.task.stat == READY || task[i].t.task.stat == BLOCKED))
        return &task[i].t.task;

  return 0;
}  

void task_switch(union task_union *t)
{
  int i;
  // a) Update the TSS
  tss.esp0 = (DWord)&(t->stack[KERNEL_STACK_SIZE]);

  // b) Update page table
  for (i = 0; i < NUM_PAG_DATA; i++)
    set_ss_pag(PAG_LOG_INIT_DATA_P0 + i, t->task.data_pages[i]);
  set_cr3(); // Flush the TLB

  // e) EOI
  if (pending_eoi) {
    __asm__ __volatile__(
      "movb $0x20, %al\n"
      "outb %al, $0x20\n" );
    pending_eoi = 0;
  }

  // c) Switch the system stack to the current process
  __asm__ __volatile__(
    "movl %0, %%esp\n" : : "g" ((unsigned long)&(t->stack[KERNEL_STACK_SIZE - 16])) );

  __asm__ __volatile__(
  // d) Restore the registers
    "popl %ebx; \n"
    "popl %ecx; \n"
    "popl %edx; \n"
    "popl %esi; \n"
    "popl %edi; \n"
    "popl %ebp; \n"
    "popl %eax; \n"
    "popl %ds; \n"
    "popl %es; \n"
    "popl %fs; \n"
    "popl %gs \n"
  // f) IRET
    "iret\n" );
}

void scheduling() {
  struct task_struct* curr;
  struct task_struct *newt;
  
  // restar temps en cpu
  remaining_quantum--;
  
  // actualitzar estadístiques
  curr = current();
  curr->task_stats.tics++;
  curr->task_stats.remaining_quantum--;

  // si s'acaba el quantum hem de intentar canviar de proces
  if (remaining_quantum == 0) {
    // si el proces actual es l'unic a cpu no cal fer el canvi de context
    if (list_is_last(&curr->list, &runqueue)) {
      remaining_quantum = curr->quantum;
      curr->task_stats.remaining_quantum = curr->quantum;
    }
    else {
      // posar al final de la cua el current
      list_move(&curr->list, &runqueue);
      // agafar el primer de la llista de runqueue
      newt = list_head_to_task_struct(list_first(&runqueue));
      // inicialitzar remaining_quantum 
      remaining_quantum = newt->quantum;
      // incrementar el numero de transicions de ready a run i actualitzar els remaining_quantum
      newt->task_stats.cs++;
      newt->task_stats.remaining_quantum = newt->quantum;
      task_switch((union task_union*)newt);
    }
  }
}


void block_process(struct task_struct *ts,struct list_head *list){
  struct task_struct *tn;
  ts->stat = BLOCKED;
  list_move(&ts->list, list);
  tn = list_head_to_task_struct(list_first(&runqueue));
  remaining_quantum = tn->quantum;
  tn->task_stats.cs++;
  tn->task_stats.remaining_quantum = tn->quantum;
  task_switch((union task_union*)tn);
}

void unblock_process(struct task_struct *ts, int return_value){
  list_move(&ts->list, &runqueue);
  ts->stat = READY;
  ((union task_union*) ts)->stack[KERNEL_STACK_SIZE - 10] = return_value;
}
