#include <stdint.h>
#include "types.h"
#include "mmu.h"
#include "string.h"
#include "memlayout.h"
#include "console.h"

#include "vm.h"
#include "kalloc.h"
#include "proc.h"

extern uint64_t *kpgdir;

// #define __VM_FREE_TRACE

/*
 * Given 'pgdir', a pointer to a page directory, pgdir_walk returns
 * a pointer to the page table entry (PTE) for virtual address 'va'.
 * This requires walking the four-level page table structure.
 *
 * The relevant page table page might not exist yet.
 * If this is true, and alloc == false, then pgdir_walk returns NULL.
 * Otherwise, pgdir_walk allocates a new page table page with kalloc.
 *   - If the allocation fails, pgdir_walk returns NULL.
 *   - Otherwise, the new page is cleared, and pgdir_walk returns
 *     a pointer into the new page table page.
 */

/*static*/ uint64_t *
pgdir_walk(uint64_t *pgdir, const void *va, int64_t alloc)
{
    /* Your code here. */

    uint64_t *ptr = 0;  /* pointer to current entry */
    uint64_t *current_dir = pgdir;

    for (int i = 0; i < 4; i++) {
        uint64_t offset = PTX(i, va);
        ptr = current_dir + offset;

        uint64_t entry = *ptr;

        /* entry not valid. */
        if (!(entry & PTE_P)) {
            /* do not allocate */
            if (!alloc)
                return 0;

            uint64_t *new_page = 0;

            /* we do not allocate the underlying page at the leaf. */
            if (i < 3) {
                new_page = (uint64_t*) kalloc();
                if (!new_page)
                    return 0;
                    /* panic("pgdir_walk: failed to allocate new page."); */
                memset(new_page, 0, PGSIZE);
            }

            /**
             * we need to indicate that this is a table/page entry.
             * `PTE_TABLE` is the same as `PTE_PAGE`.
             */
            uint64_t new_entry = (uint64_t) IN_USPACE(new_page) | PTE_P | PTE_TABLE;
            *ptr = entry = new_entry;
        }

        current_dir = (uint64_t*) IN_KSPACE(PTE_ADDR(entry));

        /* encounter a block entry */
        if ((entry & 2) == PTE_BLOCK)
            break;
    }

    return ptr;
}

/**
 * access underlying page and allocate if needed
 */
uint64_t *pgdir_access(uint64_t *pgdir, uint64_t va, char **p_page) {
    uint64_t *ptr = pgdir_walk(pgdir, (void *) va, 1);
    if (!ptr)
        return 0;

    uint64_t pte = *ptr;
    char *page = (char *) PTE_ADDR(pte);
    if (!page) {
        page = (char *) kalloc();
        memset(page, 0, PGSIZE);
        pte |= (uint64_t) IN_USPACE(page);
    } else
        page = IN_KSPACE(page);

    if (p_page)
        *p_page = page;

    *ptr = pte;
    return ptr;
}

/*
 * Create PTEs for virtual addresses starting at va that refer to
 * physical addresses starting at pa. va and size might **NOT**
 * be page-aligned.
 * Use permission bits perm|PTE_P|PTE_TABLE|(MT_NORMAL << 2)|PTE_AF|PTE_SH for the entries.
 *
 * Hint: call pgdir_walk to get the corresponding page table entry
 */

/**
 * the target physical region is expected to be allocated in advance.
 *
 * return 0 on success, -1 on page allocation failure.
 * if a block entry is returned by `pgdir_walk`, `map_region` simply panics.
 */
/*static*/ int
map_region(uint64_t *pgdir, void *va, uint64_t size, void *pa, int64_t perm)
{
    if (IN_USPACE(pa) != pa)
        return -1;

    uint64_t va_start = ROUNDDOWN((uint64_t) va, PGSIZE);
    uint64_t va_end = ROUNDUP((uint64_t) va + size, PGSIZE);
    uint64_t current_va = va_start;
    uint64_t current_pa = ROUNDDOWN((uint64_t) pa, PGSIZE);

    while (current_va < va_end) {
        uint64_t *ptr = pgdir_walk(pgdir, (void*) current_va, 1);

        if (!ptr)
            return -1;
            /* panic("map_region: failed to allocate PTE."); */

        uint64_t pte = *ptr;
        if ((pte & 2) == PTE_BLOCK)
            panic("map_region: remap block region.");

        *ptr = current_pa | perm | PTE_P | PTE_PAGE | (MT_NORMAL << 2) | PTE_AF | PTE_SH;

        current_va += PGSIZE;
        current_pa += PGSIZE;
    }

    return 0;
}

/*
 * create a new page table from the old one.
 * all pages including the underlying pages are newly
 * allocated by kalloc.
 */
uint64_t *vm_copy(uint64_t *old, int level) {
    if (!old)
        return 0;
    if (level < 0 || level > 4)
        return 0;

    uint64_t *new = kalloc();
    if (!new)
        return 0;
    memcpy(new, old, PGSIZE);

    uint64_t processed = 0;
    if (level < 4) {
        for (uint64_t *p = new; p != new + PGLEN; p++) {
            uint64_t entry = *p;

            if (entry & PTE_P) {
                uint64_t *dir_ptr = (uint64_t*) IN_KSPACE(PTE_ADDR(entry));
                uint64_t flags = entry & PGMASK;

                uint64_t *new_dir_ptr = vm_copy(dir_ptr, level + 1);
                if (!new_dir_ptr)
                    goto error;

                *p = ((uint64_t) IN_USPACE(new_dir_ptr)) | flags;
            }

            processed++;
        }
    }

    return new;

error:
    // unwinds allocated pages.
    for (uint64_t *p = new; p != new + processed; p++) {
        uint64_t entry = *p;

        if (entry & PTE_P) {
            uint64_t *dir_ptr = (uint64_t*) IN_KSPACE(PTE_ADDR(entry));
            vm_free(dir_ptr, level + 1);
        }
    }

    kfree(new);

    return 0;
}

/*
 * Free a page table.
 *
 * Hint: You need to free all existing PTEs for this pgdir.
 *
 * NOTE: it also frees all associated pages by kfree.
 */

void vm_free(uint64_t *pgdir, int level) {
#ifdef __VM_FREE_TRACE
    cprintf("vmfree: pgdir=%p, level=%d\n", pgdir, level);
#endif

    if (level < 0 || level > 4)
        return;

    if (level < 4) {
        for (uint64_t *p = pgdir; p != pgdir + PGLEN; p++) {
            uint64_t entry = *p;

            if (entry & PTE_P) {
                uint64_t *dir_ptr = (uint64_t*) IN_KSPACE(PTE_ADDR(entry));
                vm_free(dir_ptr, level + 1);
            }
        }
    }

    kfree(IN_KSPACE(pgdir));
}

/* Get a new page table */
uint64_t *pgdir_init() {
    uint64_t *p = 0;
    p = (uint64_t*) kalloc();
    if (p)
        memset(p, 0, PGSIZE);
    return p;
}

/**
 * returns the total number of pages allocated below.
 */
int uvmmap(uint64_t *pgdir, char *data, uint64_t start, uint64_t size, uint64_t pte_flags) {
    assert(start % PGSIZE == 0);

    int cnt = 0;
    for (uint64_t i = 0; i < size; i += PGSIZE) {
        // NOTE: binary may not be aligned on 4 bytes boundary!
        char *p;
        if ((p = (char*) kalloc()) == 0)
            panic("uvmmap: failed to allocate page to hold binary data.");
        memset(p, 0, PGSIZE);

        cnt++;
        int csize = MIN(size - i, PGSIZE);
        memcpy(p, data, csize);
        map_region(pgdir, (void*)(start + i), PGSIZE, IN_USPACE(p), PTE_USER | pte_flags);
    }

    return start / PGSIZE + cnt;
}

/*
 * Load binary code into address 0 of pgdir.
 * <del>sz must be less than a page</del>.
 * The page table entry should be set with
 * additional PTE_USER|PTE_RW|PTE_PAGE permission
 * returns the number of page allocated.
 */
int uvm_init(uint64_t *pgdir, char *binary, int sz) {
    return uvmmap(pgdir, binary, 0, sz, PTE_RW);
}

/*
 * switch to the process's own page table for execution of it
 */
void
uvm_switch(struct proc *p)
{
    arm_set_ttbr0((uint64_t) IN_USPACE(p->pgdir));
}