#include "include/sched.h"
#include "include/types.h"
#include "include/string.h"
#include "include/mm.h"
#include "include/segment.h"
#include "include/tss.h"
#include "include/print.h"
#include "include/mm.h"

static struct task* task_head;
static struct task* task_tail;
struct task* current;
struct task* idle_task; // idle task0, do not join task ready queue

uint64_t ticks; // clock tick count
struct timer* timer_head;
struct timer* timer_tail;

/**
 * Simulate user process context stack pushing
 * 
 * @param kstack user process kernel stack base virtual address
 */
static void fake_task_stack(unsigned long kstack) {
  uint16_t ss = USER_DS; // user mode data segment register selector
  unsigned long rsp = 0x8000000; // customized 128MB user stack bottom
  uint16_t cs = USER_CS; // user mode code segment register selector
  unsigned long rip = 0x100000; // user program starting virtual address
  unsigned long rsp_tmp; // Used to protect and restore the %rsp state before the following operations

  /* To simulate switching from kernel mode to user mode, the user program context is pushed into
   * the task kernel stack, and then the `iretq` instruction pops out the 5 context information pushed
   * from the top of the kernel stack in sequence.
   * 
   * 8B(64-bits)
   * ----------|- n-0 <- kstack base
   *    ss     |
   * ----------|- n-1
   *    rsp    |
   * ----------|- n-2
   *    flags  |
   * ----------|- n-3
   *    cs     |
   * ----------|- n-4
   *    rip    |
   * ----------|- n-5 <- kstack top (= kstack base - 8B * 5)  <- current task rsp0
   */
  __asm__("mov %%rsp, %5\n\t" /* save %rsp old value to variable rsp_tmp */
          "mov %4, %%rsp\n\t" /* %rsp pointer to kstack */
          "pushq %0\n\t" /* push ss to kstack, `q` informing processor to stack with 64 bit */
          "pushq %1\n\t" /* push rsp to kstack */
          "pushf\n\t" /* push flags register */
          "pushq %2\n\t" /* push cs to kstack */
          "pushq %3\n\t" /* push rip to kstack */
          "mov %5, %%rsp\n\t" /* restore variable rsp_tmp to %rsp */
          :
          : "m"(ss), /* index 0 */
            "m"(rsp), /* index 1 */
            "m"(cs), /* index 2 */
            "m"(rip), /* index 3 */
            "m"(kstack), /* index 4 */
            "m"(rsp_tmp) /* index 5 */
  );
}

static void print_kstack_flags(unsigned long kstack) {
  print(*((unsigned long*)(kstack - 8 * 3))); // FLAGS: 0x286 => 0010,1000,0110 <= IF(9-bit) is 1 enabled
}

static void print_kstack_rip(unsigned long kstack) {
  print(*((unsigned long*)(kstack - 8 * 5)));
}

static void print_kstack_rsp(unsigned long kstack) {
  print(*((unsigned long*)(kstack - 8 * 2)));
}

/**
 * make process task function
 * 1.create task
 * 2.establish kernel space mapping
 * 3.establish user space mapping
 * 4.set the process kernel stack
 * 5.final append task to task queue
 * 
 * @param id task id
 * @param entry task starting virtual address [user state space]
 * @param entry_pa task starting physical address [memory space]
 */
static void make_task(unsigned long id, unsigned long entry, unsigned long entry_pa) {
  // 1.create task
  struct task* task = malloc(sizeof(struct task));
  task->id = id;
  task->state = TASK_RUNNING; /* Initial state */
  // print((unsigned long)task); /* 0xffff8880 0x0405f018 */

  // 2.establish kernel space mapping
  task->pml4 = alloc_page(); // allocate process level 4 root page table(4KB)
  // print(task->pml4); /* 0x0 0x04060000 */

  // init 0, copy the high 256 page table entries (kernel) from TASK0_PML4 to task
  memset(VA(task->pml4), 0, PAGE_SIZE);
  memcpy(VA(task->pml4 + 8 * 256), VA(TASK0_PML4 + 8 * 256), 8 * 256);

  // 3.establish user space mapping: 1024 page(4KB) => 4MB user code space
  map_range(task->pml4, entry, entry_pa, 0x4, 1024);

  // 4.Set the process kernel stack
  // apply for a new page as the kernel stack
  task->kstack = (unsigned long)(VA(alloc_page())) + PAGE_SIZE; // kernel stack base(high address)
  // print(0x801);
  // print(task->kstack); /* 0xffff8880 0x04067000 */
  task->rsp0 = task->kstack - 8 * 5; // kernel stack top (pushed 5 items), `iretq` pop from here
  task->rip = (unsigned long)&ret_from_kernel; // The simulation point at which a process enters kernel mode for the first time

  // [Test] fake user program context
  fake_task_stack(task->kstack);
  // print_kstack_flags(task->kstack); // 0x286 IF(1)

  // 5.final append to task queue
  if (!task_head) { // No head
    task_head = task;
    task_tail = task;
    task->prev = NULL;
    task->next = NULL;
  } else { // Tail append
    task_tail->next = task;
    task->prev = task_tail;
    task->next = NULL;
    task_tail = task;
  }
}

static void make_idle_task() {
  idle_task = malloc(sizeof(struct task));
  idle_task->id = 0;
  idle_task->state = TASK_RUNNING;
  idle_task->pml4 = TASK0_PML4;
  idle_task->kstack = (unsigned long)&task0_stack; //stack bottom. task0_stack defined in head64.S
  idle_task->rsp0 = (unsigned long)&task0_stack; // statck top
  idle_task->rip = (unsigned long)&idle_task_entry; // task0 entry defined in head64.S
}

void sched_init() {
  // Why are the virtual addresses the same for both ?
  // - Different from the 4-level root page table used in app1 => [User State Space Isolation]
  // - This highlights the importance of switching the CR3 register to different process level 4 root page tables
  make_task(1, 0x100000, 0xc800000); // app1
  make_task(2, 0x100000, 0xd000000); // app2
  make_idle_task(); // default idle task0

  current = task_head;
}

void schedule() {
  struct task* next = NULL;

  for (struct task* t = task_head; t; t = t->next) { // Find the next scheduling task
    if (t->state == TASK_RUNNING) {
      next = t;
      break;
    }
  }

  //print(0x66);
  //print((unsigned long)next);
  
  if (!next) { // Run task0 by default, when there are no user tasks
    //print(0x55);
    next = idle_task;
  }

  if (next != current) { // Task switching
    /*
    * 1.When an interrupt occurs, the process is forced to switch from user state to kernel state,
    *   and the CPU automatically saves the user state context;
    * 2.When switching processes in kernel state,
    *   the kernel state context needs to be manually saved; [Here]
    * 3.When the process obtains scheduling again, it first recovers from the kernel state context,
    *   and then returns to the user state context.
    * 
    * Kstack base                                            Process
    * +----+ n                                               
    * |    |                                                 [User State]
    * | -- | -      --------------------    Interrupt   -> ↓ -------------- ↑ <- iretq
    * |    |   <--- User State Context                       [Kernel State]
    * | -- | -      --------------------    Switch task -> ↓ -------------- ↑ <- ret
    * |    |   <--- Kernel State Context                     <Unscheduled>
    * | -- | - top
    * |    |
    * +----+ 0
    * 
    * Task switching
    *                   current [6] redirect -------------------------+
    *                      |                                          ↓
    *                      ↓             Kstack base                 next              Kstack base 
    *                   <task>    +----> +----+ n                    <task>       +--> +----+ n
    *                  +--------+ |      |    |                     +----------+  |    |    |
    *                  |  pml4  | |      |    |     [7]  CPU-cr3 <- |  pml4    |  |    |    |
    *                  | kstack |-+  +-> | -- |     [5] tss.rsp0 <- | kstack   |--+    | 1: |  <-+
    *  [1]  CPU-rsp -> |  rsp0  |----+   |    |     [3]  CPU-rsp <- |  rsp0    |-----> | -- | ↓  | -> [8] `ret` pop 1:
    *  [2] label-1: -> |  rip   | top    |    |     [4]  push 1: +- |  rip[1:] | top   |    |    |          |
    *        ↑         +--------+        +----+ 0                |  +----------+       +----+ 0  |          |
    *        |                                                   |                               |          |
    *        |                                                   +-------------------------------+          ↓
    *   [Kernel State]                                                                                [Kernel State]
    *   Process recovery point save                                                  Process recovery point recovery
    */
    __asm__ ("mov %%rsp, %0\n\t" /* save current RSP register value */
             "movq $1f, %1\n\t" /* save process recovery point label 1: */
             "mov %2, %%rsp\n\t" /* first, RSP register to next process kstack top */
             "push %3\n\t" /* second, push 1: to kstack top */
             : "=m"(current->rsp0),"=m"(current->rip) /* Save cureent process partial context */
             : "m"(next->rsp0), "m"(next->rip)
    );

    tss.rsp0 = (unsigned long)next->kstack;
    // print(next->rip); // Due to rip=0 in app2, the redirect address is incorrect !
    // print_kstack_flags(next->kstack); // 0x286 IF(1), No Problem !
    current = next;
    //print_kstack_rip(next->kstack);
    //print_kstack_rsp(next->kstack);

    // Switch CR3 register to next process root page table, enter virtual address space for next processes
    // User space isolation and kernel space sharing for different processes
    __asm__ ("mov %0, %%cr3" : : "a"(next->pml4));
    // Pop the next process recovery point [1:] from its kernel stack
    __asm__ ("ret");

    __asm__ ("1:"); // [Kernel State] Process recovery point
  }
}

/**
 * Clock interrupt handling function
 */
void do_timer() {
  // print('T');
  //print(ticks); // 0x0 0x0

  ticks++; // clock tick count increase
  
  //print(ticks); // 0x0 0x1

  // Check for sleep expiration tasks
  for (struct timer* t = timer_head; t; t = t->next) {
    if (t->alarm <= ticks) { // when expire
      t->task->state = TASK_RUNNING; // ready again

      if (t == timer_head && t == timer_tail) {
        timer_head = NULL;
        timer_tail = NULL;
      } else if (t == timer_head) {
        timer_head = t->next;
        t->next->prev = NULL;
      } else if (t == timer_tail) {
        timer_tail->prev->next = NULL;
        timer_tail = timer_tail->prev;
      } else {
        t->prev->next = t->next;
        t->next->prev = t->prev;
      }

      free(t); // Release the current task timer memory
    }
  }

  // Current task moved to the end of the ready queue
  if (current != idle_task) {
    // When it is not an idle task, move the task to the end of the ready queue and wait for the next scheduling
    // - Idle task0 is not in the ready queue
    if (current != task_tail) {
      if (current->prev) {
        current->prev->next = current->next;
      }
      current->next->prev = current->prev;

      current->prev = task_tail;
      task_tail->next = current;

      if (current == task_head) {
        task_head = current->next;
      }
      task_tail = current;

      current->next = NULL;
    }
  }
  schedule();
}

/**
 * System call `sleep` implemention
 * 
 * @param ms Sleep time ms
 * @return 0 Success
 */
int do_sleep(long ms) {
  // print('S');

  // Bind current task and set expire ticks
  struct timer* t = malloc(sizeof(struct timer)); // Apply for current task timer memory
  t->task = current;
  t->alarm = ticks + ms / 10;
  // print(ms); // 1000 => 0x0 0x3e8
  // print(ticks); // 0x0 0x0
  // print(t->alarm); // 100 => 0x0 0x64

  // enqueue
  if (!timer_head) {
    timer_head = t;
    timer_tail = t;
    t->prev = t->next = NULL;
  } else {
    timer_tail->next = t;
    t->prev = timer_tail;
    t->next = NULL;
    timer_tail = t;
  }
  
  // Abandoning CPU scheduling for current task
  current->state = TASK_INTERRUPTIBLE;
  //print(0x77);
  //print((unsigned int)current->state);

  // Rescheduling
  // - Process kernel state context switching, and `BLOCK` at this code location
  // - When sleep expires, the process obtains scheduling again, 
  // - - `RECORER`s from this breakpoint, and continues to execute further
  schedule();

  return 0;
}