#include "proc.h"
#include "console.h"
#include "kalloc.h"
#include "spinlock.h"
#include "trap.h"
#include "string.h"
#include "vm.h"
#include "mmu.h"
#include "memlayout.h"
#include "list.h"
#include "file.h"
#include "log.h"
#include <stdint.h>

// #define PROC_TRACE

struct {
    // struct spinlock lock;
    struct spinlock waitlock;
    struct proc proc[NPROC];
} ptable;

struct {
    struct spinlock lock;
    list_node_t head;
} procs;

/**
 * NOTE: you need to hold p->lock to call add_proc and remove_proc.
 */

static void add_proc(struct proc *p) {
    acquire(&procs.lock);

#ifdef PROC_TRACE
    cprintf("add_proc: pid %d\n", p->pid);
#endif

    list_node_t *node = &p->pnode;
    list_node_t *next = procs.head.next;

    node->next = next;
    if (next)
        next->prev = node;
    node->prev = &procs.head;
    __atomic_store_n(&procs.head.next, node, __ATOMIC_RELEASE);

    release(&procs.lock);
}

static void remove_proc(struct proc *p) {
    acquire(&procs.lock);

#ifdef PROC_TRACE
    cprintf("remove_proc: pid %d\n", p->pid);
#endif

    list_node_t *node = &p->pnode;
    list_node_t *prev = node->prev;
    list_node_t *next = node->next;

    if (next)
        next->prev = prev;
    __atomic_store_n(&prev->next, next, __ATOMIC_RELEASE);

    release(&procs.lock);
}

// static struct proc *initproc;

int nextpid = 1;
void forkret();
extern void trapret();
extern void _user_trapret();
void swtch(struct context *old, struct context *new);

/*
 * Initialize the spinlock for ptable to serialize the access to ptable
 *
 * should be placed in once_init.
 */
void
proc_init()
{
    // initlock(&ptable.lock, "ptable.lock");

    for (int i = 0; i < NPROC; i++) {
        struct proc *p = ptable.proc + i;
        p->pid = i;
        p->state = UNUSED;
        initlock(&p->lock, "process lock");
        init_affinity(&p->affinity);
    }

    initlock(&procs.lock, "procs.lock");
    initlock(&ptable.waitlock, "ptable.waitlock");
    list_node_init(&procs.head);
}

/*
 * reset PCB
 */
static void proc_reset(struct proc *p) {
    p->sz = 0;

    if (p->pgdir)
        vm_free(p->pgdir, 0);
    p->pgdir = 0;

    if (p->kstack)
        kfree(p->kstack);
    p->kstack = 0;

    p->state = UNUSED;
    p->parent = 0;
    p->tf = 0;
    p->context = 0;
    p->chan = 0;
    p->killed = 0;
    init_affinity(&p->affinity);
    p->cwd = namei("/");

    memset(p->name, 0, sizeof(p->name));
}

/*
 * Look through the process table for an UNUSED proc.
 * If found, change state to EMBRYO and -initialize
 * state (allocate stack, clear trapframe, set context for switch...)-
 * required to run in the kernel. Otherwise return 0.
 */
static struct proc *
proc_alloc()
{

    for (int i = 0; i < NPROC; i++) {
        struct proc *p = ptable.proc + i;

        acquire(&p->lock);

        if (p->state == UNUSED) {
            proc_reset(p);
            p->state = EMBRYO;

            // release(&p->lock);
            return p;
        }

        release(&p->lock);
    }

    return 0;
}

/**
 * once allocating kstack, use this function to fill
 * in p->trapframe and p->context.
 * if tf or ctx is specified, memcpy will be used to initialize
 * corresponding memory, rather than memset.
 * the final ksp is returned.
 */
static uint64_t proc_kstack_setup(
    struct proc *p,
    struct trapframe *tf,
    struct context *ctx
) {
    struct _pair_t {
        uint64_t lo, hi;
    } *ksp = (struct _pair_t*) (p->kstack + PGSIZE);

#define _KSP_DEC(x) ksp = (struct _pair_t*) (((char *) ksp) - (x))

    // allocate trapframe object
    _KSP_DEC(sizeof(struct trapframe));
    p->tf = (struct trapframe*) ksp;

    if (tf)
        memcpy(p->tf, tf, sizeof(struct trapframe));
    else
        memset(p->tf, 0, sizeof(struct trapframe));

    // allocate context object
    _KSP_DEC(sizeof(struct context));
    p->context = (struct context*) ksp;

    if (ctx)
        memcpy(p->context, ctx, sizeof(struct context));
    else
        memset(p->context, 0, sizeof(struct context));

#undef _KSP_DEC

    return (uint64_t) ksp;
}

/**
 * setup context for a newly created process.
 */
static void proc_context_reset(struct proc *p) {
    // the entry point for a new process is <del>forkret</del> _user_trapret
    p->context->lr = (uint64_t) /*forkret*/ _user_trapret;
    p->context->sp = (uint64_t) p->tf;
}

int pvm_create_stack(uint64_t *pgdir, uint64_t vm_size, uint64_t *sp, uint64_t *vsp) {
    char *ustack, *uguard;
    if ((ustack = (char*) kalloc()) == 0 ||
        (uguard = (char*) kalloc()) == 0)
        return -1;

    memset(ustack, 0, PGSIZE);
    memset(uguard, 0xcc, PGSIZE);

    if (map_region(pgdir, (void*) (uint64_t) ((vm_size + 0) * PGSIZE),
        PGSIZE, IN_USPACE(uguard), PTE_KERNEL | PTE_RO) < 0 ||
        map_region(pgdir, (void*) (uint64_t) ((vm_size + 1) * PGSIZE),
        PGSIZE, IN_USPACE(ustack), PTE_USER | PTE_RW) < 0)
        return -1;

    if (sp)
        *sp = (uint64_t) ustack;
    if (vsp)
        *vsp = (vm_size + 1) * PGSIZE;

    return 0;
}

/**
 * setup process' inital vm
 */
void pvm_setup(char *data, int size, pvm_info_t *info) {
    // setup virtual memory
    uint64_t *pgdir;
    if ((pgdir = pgdir_init()) == 0)
        panic("pvm_setup: failed to allocate pgdir.");

    // code + bss mapped to virtual address 0x0
    int npage = uvm_init(pgdir, data, size);

    // user stack and guard pages
    uint64_t ustack;
    if (pvm_create_stack(pgdir, npage, &ustack, 0) < 0)
        panic("pvm_setup: failed to create user stack.");

    if (info) {
        info->pgdir = pgdir;
        info->size = (npage + 2) * PGSIZE;
        info->stack = (char *) ustack;
    }
}

/*
 * Set up first user process(Only used once).
 * Set trapframe for the new process to run
 * from the beginning of the user process determined
 * by uvm_init
 */
void
user_init()
{
    struct proc *p;
    /**
     * for why our symbols differ from xv6, please refer
     * https://stackoverflow.com/questions/10486116/what-does-this-gcc-error-relocation-truncated-to-fit-mean
     */
    extern char _binary_obj_user_initcode_start[], _binary_obj_user_initcode_size[];
    cprintf("user_init: binary: start=%p, size=%d\n",
        _binary_obj_user_initcode_start, _binary_obj_user_initcode_size);

    if ((p = proc_alloc()) == 0)
        panic("user_init: failed to allocate PCB.");

    // initialize kernel stack
    if ((p->kstack = (char*) kalloc()) == 0)
        panic("user_init: failed to allocate kernel stack.");
    proc_kstack_setup(p, 0, 0);
    proc_context_reset(p);

    // setup virtual memory
    assert(p->pgdir == 0);
    pvm_info_t pvm;
    pvm_setup(_binary_obj_user_initcode_start, (uint64_t) _binary_obj_user_initcode_size, &pvm);
    p->pgdir = pvm.pgdir;
    p->tf->stack = p->sz = pvm.size;

    // mark as RUNNABLE
    // acquire(&p->lock);
    p->state = RUNNABLE;
    add_proc(p);
    release(&p->lock);
}

/*
 * Per-CPU process scheduler
 * Each CPU calls scheduler() after setting itself up.
 * Scheduler never returns.  It loops, doing:
 *  - choose a process to run
 *  - swtch to start running that process
 *  - eventually that process transfers control
 *        via swtch back to the scheduler.
 */
void
scheduler()
{
    struct proc *p;
    struct cpu *c = thiscpu;
    c->proc = NULL;

    list_node_t *node = &procs.head;
    for (;;) {
        /* Loop over process table looking for process to run. */

        node = __atomic_load_n(&node->next, __ATOMIC_ACQUIRE);
        if (!node) {
            node = &procs.head;
            continue;
        }

        p = LIST_NODE_AS(node, struct proc *);
        if (!try_acquire(&p->lock))
            continue;

        if (p->state == RUNNABLE && test_affinity(&p->affinity)) {
#ifdef PROC_TRACE
            cprintf("cpu %d: scheduler: selected pid %d.\n", arm_cpuid(), p->pid);
#endif

            p->state = RUNNING;
            c->proc = p;

            // assert(0xffff000000080000 >= arm_get_sp());
            // assert(0xffff000000080000 >= c->scheduler.sp);
            // assert(0xffff000010000000 <= (uint64_t)p->context);
            // assert(0xffff000010000000 <= p->context->sp);
            swtch(&c->scheduler, p->context);
            // assert(0xffff000000080000 >= arm_get_sp());
            // assert(0xffff000000080000 >= c->scheduler.sp);
            // assert(0xffff000010000000 <= (uint64_t)p->context);
            // assert(0xffff000010000000 <= p->context->sp);

#ifdef PROC_TRACE
            cprintf("cpu %d: scheduler: pid %d comes back.\n", arm_cpuid(), p->pid);
#endif

            assert(holding(&p->lock));
            assert(p->state != RUNNING);

            c->proc = 0;
        }

        if (p->state != SLEEPING && p->state != RUNNABLE && p->state != RUNNING)
            remove_proc(p);

        release(&p->lock);
    }
}

/*
 * Enter scheduler.
 * Must hold only ptable.lock while calling this procedure
 */
void sched() {
    struct cpu *c = cpus + arm_cpuid();
    struct proc *p = c->proc;

    if (!holding(&p->lock))
        panic("sched: not holding ptable.lock");
    if (!p)
        panic("sched: no running process");

    struct context ctx;
    p->context = &ctx;

    // assert(0xffff000010000000 <= arm_get_sp());
    swtch(p->context, &c->scheduler);
    // assert(0xffff000010000000 <= arm_get_sp());
    // assert(0xffff000010000000 <= p->context->sp);

    // restore page table
    uvm_switch(p);
}

/**
 * if there's a process running on the CPU, `sched` it.
 */
void try_yield() {
    struct cpu *c = cpus + arm_cpuid();
    struct proc *p = c->proc;
    if (!p)
        return;

    acquire(&p->lock);

    if (p->state == RUNNING) {
        p->state = RUNNABLE;
        sched();
    }

    release(&p->lock);
}

/*
 * A fork child will first swtch here, and then "return" to user space.
 */
void
forkret()
{
    struct proc *p = thiscpu->proc;
    assert(p);
    release(&p->lock);

    // setup page table
    uvm_switch(p);
}

/*
 * Create a new process copying p as the parent.
 * Sets up stack to return as if from system call.
 * Caller must set state of returned proc to RUNNABLE.
 */

/*
 * create a new process, copying the parent.
 * it does not support copy-on-write.
 *
 * on success, return the pid of child to parent, and
 * return 0 to child.
 * on failure, return -1 to parent.
 */
int fork() {
    struct proc *p = thiscpu->proc;

    struct proc *np;
    if ((np = proc_alloc()) == 0)
        return -1;

    // copy from p
    np->sz = p->sz;
    strncpy(np->name, p->name, sizeof(np->name));
    np->pgdir = vm_copy(p->pgdir, 0);
    if (!np->pgdir)
        goto error;

    np->kstack = kalloc();
    if (!np->kstack)
        goto error;
    memset(np->kstack, 0, PGSIZE);

    proc_kstack_setup(np, p->tf, 0);
    proc_context_reset(np);
    np->tf->x[0] = 0;  // child returns 0

    // open files
    for (int i = 0; i < NOFILE; i++) {
        if (p->ofile[i])
            np->ofile[i] = filedup(p->ofile[i]);
    }
    np->cwd = idup(p->cwd);

    memcpy(np->name, p->name, sizeof(np->name));
    np->affinity = p->affinity;

#ifdef PROC_TRACE
    cprintf("fork: new process %d.\n", np->pid);
#endif

    release(&np->lock);
    acquire(&ptable.waitlock);
    np->parent = p;
    release(&ptable.waitlock);

    acquire(&np->lock);
    np->state = RUNNABLE;
    add_proc(np);
    release(&np->lock);

    return np->pid;

error:
    if (np->pgdir)
        vm_free(np->pgdir, 0);
    if (np->kstack)
        kfree(np->kstack);

    acquire(&np->lock);
    np->state = UNUSED;
    release(&np->lock);

    return -1;
}

static void reparent(struct proc *p) {
    struct proc *p_init = ptable.proc + 1;
    for (int i = 0; i < NPROC; i++) {
        struct proc *u = ptable.proc + i;
        if (u->parent == p) {
            u->parent = p_init;
            wakeup(p_init);  // u may be a zombie process
        }
    }
}

/*
 * Exit the current process.  Does not return.
 * An exited process remains in the zombie state
 * until its parent calls wait() to find out it exited.
 */
void
exit()
{
    struct proc *p = thiscpu->proc;

#ifdef PROC_TRACE
    cprintf("exit: pid=%d\n", p->pid);
#endif

    for (int i = 0; i < NOFILE; i++) {
        struct file *f = p->ofile[i];
        if (f) {
            fileclose(f);
            p->ofile[i] = 0;
        }
    }

    begin_op();
    iput(p->cwd);
    p->cwd = 0;
    end_op();

    acquire(&ptable.waitlock);
    reparent(p);
    wakeup(p->parent);
    acquire(&p->lock);
    p->state = ZOMBIE;
    release(&ptable.waitlock);

    sched();

    // this function never returns.
    panic("exit returns.");
}

struct hash_entry {
    struct spinlock lock;
    list_t list;
};

struct hash_entry sleep_table[256];

static uint64_t hash_key(uint64_t value) {
    // xor 8 bytes into one byte
    value ^= value >> 8;
    value ^= value >> 16;
    value ^= value >> 32;
    return value & 0xff;
}

/*
 * Atomically release lock and sleep on chan.
 * Reacquires lock when awakened.
 *
 * See wakeup for some issues on lk.
 */
void
sleep(void *chan, struct spinlock *lk)
{
    struct proc *p = thiscpu->proc;
    uint64_t key = hash_key((uint64_t) chan);
    struct hash_entry *entry = sleep_table + key;

#ifdef PROC_TRACE
    cprintf("sleep: chan=%p, key=%llu, pid=%d\n", chan, key, p->pid);
#endif

    /**
     * to avoid deadlock of entry->lock and p->lock
     * with wakeup, which must acquires entry->lock first,
     * we moved the following code snippet out of
     * the critical region of p->lock.
     */
    acquire(&entry->lock);
    acquire(&p->lock);

    remove_proc(p);
    list_node_init(&p->pnode);
    list_push_back(&entry->list, &p->pnode);

    release(&p->lock);
    release(&entry->lock);

    acquire(&p->lock);
    if (lk) release(lk);

    p->chan = chan;
    p->state = SLEEPING;
    sched();
    release(&p->lock);

    if (lk) acquire(lk);
}

/**
 * Wake up all processes sleeping on chan.
 *
 * NOTE: wakeup is often paired with sleep(lk),
 * but you should notice that both wakeup and sleep need to
 * acquire p->lock. In this case, once sleep release lk before
 * sched, deadlock is possible if wakeup is also protected by
 * lk. In conclusion, lk is required that condition test and
 * state changing to SLEEPING is atomic, but wakeup is not needed
 * to be protected by lk.
 */
void
wakeup(void *chan)
{
    uint64_t key = hash_key((uint64_t) chan);
    struct hash_entry *entry = sleep_table + key;

#ifdef PROC_TRACE
    cprintf("wakeup: chan=%p, key=%llu\n", chan, key);
#endif

    acquire(&entry->lock);

    list_node_t *next_node;
    for (list_node_t *node = entry->list.head; node; node = next_node) {
        struct proc *p = LIST_NODE_AS(node, struct proc *);

        acquire(&p->lock);

        next_node = node->next;
        if (p->state == SLEEPING && p->chan == chan) {

#ifdef PROC_TRACE
            cprintf("wakeup: pid=%d\n", p->pid);
#endif

            p->state = RUNNABLE;
            // p->chan = 0;

            list_remove(&entry->list, node);
            add_proc(p);
        }

        release(&p->lock);
    }

    release(&entry->lock);
}

/* Give up CPU. */
void yield() {
    try_yield();
}

/*
 * Wait for a child process to exit and return its pid.
 * Return -1 if this process has no children.
 */
int wait() {
    struct proc *p = thisproc();

    acquire(&ptable.waitlock);

    while (1) {
        int any = 0;
        for (int i = 0; i < NPROC; i++) {
            struct proc *u = ptable.proc + i;
            if (u->parent != p)
                continue;

            acquire(&u->lock);
            any = 1;

            if (u->state == ZOMBIE) {
                // got it!
                int pid = u->pid;
                proc_reset(u);
                release(&u->lock);
                release(&ptable.waitlock);
                return pid;
            }

            release(&u->lock);
        }

        if (any) {
            sleep(p, &ptable.waitlock);
        } else {
            release(&ptable.waitlock);
            return -1;
        }
    }
}

/*
 * Print a process listing to console.  For debugging.
 * Runs when user types ^P on console.
 * No lock to avoid wedging a stuck machine further.
 */
void procdump() {
    static char *state_name[64] = {
        [UNUSED  ] = "UNUSED",
        [EMBRYO  ] = "EMBRYO",
        [SLEEPING] = "SLEEPING",
        [RUNNABLE] = "RUNNABLE",
        [RUNNING ] = "RUNNING",
        [ZOMBIE  ] = "ZOMBIE",
    };

    cprintf("procdump: dumping all processes...\n");
    for (int i = 0; i < NPROC; i++) {
        struct proc *p = ptable.proc + i;
        if (p->state == UNUSED)
            continue;

        cprintf("- #%d: \"%s\", state=%s, chan=0x%p\n",
            i, p->name, state_name[p->state], p->chan);
    }
}

int getpid() {
    return thiscpu->proc->pid;
}
