#include "mmu.h"
#include "fs/fat32.h"
#include "mmulayout.h"
#include "proc.h"
#include "riscv.h"
#include "stddef.h"
#include "stdio.h"
#include "string.h"
#include "type.h"
#include "utils.h"

#include "dbg.h"

extern char kernel_end[];  // linker script provide
extern char etext[];
extern char trampoline[];
extern char userret[];

address_t listp;

pageTable_t kernel_pagetable;
typedef struct page {
    struct page* next;
} page;
// pageTable_t kernel_pagetable;

// may need a lock
// kmem is a globl variable
struct {
    spinlock_t lk;
    page* free_page_list;
    uint32_t npages;
} kmem;

void
kinit() {
    __DEBUG_FUNC_START;
    spinlock_init(&kmem.lk, "kmem");
    kmem.free_page_list = NULL;
    kmem.npages = 0;
    kfree_pages(kernel_end, (void*)PA_STOP);
    __DEBUG_FUNC_END;
}

void
kfree_pages(void* pa_start, void* pa_end) {
    byte_t* p = (byte_t*)pa_start;
    p = (byte_t*)PAGE_ROUND_UP(p);  // PPN must be 4k aligned
    for (; p < (byte_t*)pa_end; p += PAGE_SIZE) {
        kfree(p);
    }
}

void
kfree(void* pa) {
    page* p;
    if ((address_t)pa % PAGE_SIZE != 0 || (address_t)pa < (address_t)kernel_end || (address_t)pa >= PA_STOP) {
        panic("kfree");
    }
    // fill with junk
    memset(pa, 1, PAGE_SIZE);
    p = (page*)pa;
    spinlock_acquire(&kmem.lk);
    // __DEBUG_OUTPUT("kmem.lk.locked:%d\n", kmem.lk.locked);
    p->next = kmem.free_page_list;
    kmem.free_page_list = p;
    ++kmem.npages;
    // __DEBUG_OUTPUT("kmem.lk.locked:%d\n", kmem.lk.locked);
    spinlock_release(&kmem.lk);
}

void*
kalloc(void) {
    page* p;
    spinlock_acquire(&kmem.lk);
    // __DEBUG_OUTPUT("kmem.lk.locked:%d\n", kmem.lk.locked);
    p = kmem.free_page_list;
    if (p == NULL) {
        panic("kalloc: Memory full!\n");
        while (1)
            ;
    }
    if (p) {
        kmem.free_page_list = p->next;
        --kmem.npages;
    }
    // __DEBUG_OUTPUT("kmem.lk.locked:%d\n", kmem.lk.locked);
    spinlock_release(&kmem.lk);

    if (p) {
        memset(p, 0, PAGE_SIZE);
    }
    // __DEBUG_FUNC_END;
    return (void*)p;
}

void
kvminit(void) {
    __DEBUG_FUNC_START;
    kernel_pagetable = (pageTable_t)kalloc();
    memset(kernel_pagetable, 0, PAGE_SIZE);
    // can't write
    create_pages_map(kernel_pagetable, (address_t)UART_V, (address_t)UART, PAGE_SIZE, PTE_R | PTE_W);
    create_pages_map(kernel_pagetable, (address_t)CLINT_V, (address_t)CLINT, 0x10000, PTE_R | PTE_W);
    create_pages_map(kernel_pagetable, (address_t)PLIC_V, (address_t)PLIC, 0x4000, PTE_R | PTE_W);
    create_pages_map(kernel_pagetable, (address_t)PLIC_V + 0x200000, (address_t)PLIC + 0x200000, 0x4000, PTE_R | PTE_W);
#ifdef QEMU
    create_pages_map(kernel_pagetable, (address_t)VIRTIO0_V, (address_t)VIRTIO0, PAGE_SIZE, PTE_R | PTE_W);
#endif

#ifndef QEMU
    create_pages_map(kernel_pagetable, (address_t)GPIOHS_V, (address_t)GPIOHS, 0x1000, PTE_R | PTE_W);
    create_pages_map(kernel_pagetable, (address_t)DMAC_V, (address_t)DMAC, 0x1000, PTE_R | PTE_W);
    create_pages_map(kernel_pagetable, (address_t)SPI_SLAVE_V, (address_t)SPI_SLAVE, 0x1000, PTE_R | PTE_W);
    create_pages_map(kernel_pagetable, (address_t)FPIOA_V, (address_t)FPIOA, 0x1000, PTE_R | PTE_W);
    create_pages_map(kernel_pagetable, (address_t)SPI0_V, (address_t)SPI0, 0x1000, PTE_R | PTE_W);
    create_pages_map(kernel_pagetable, (address_t)SPI1_V, (address_t)SPI1, 0x1000, PTE_R | PTE_W);
    create_pages_map(kernel_pagetable, (address_t)SPI2_V, (address_t)SPI2, 0x1000, PTE_R | PTE_W);
    create_pages_map(kernel_pagetable, (address_t)SYSCTL_V, (address_t)SYSCTL, 0x1000, PTE_R | PTE_W);
#endif

    create_pages_map(kernel_pagetable, (address_t)KERNEL_START, (address_t)KERNEL_START, (uint64_t)etext - (uint64_t)KERNEL_START,
                     (uint64_t)(PTE_R | PTE_X));
    create_pages_map(kernel_pagetable, (address_t)etext, (address_t)etext, (uint64_t)PA_STOP - (uint64_t)etext, (uint64_t)(PTE_R | PTE_W));
    create_pages_map(kernel_pagetable, (address_t)TRAMPOLINE, (address_t)trampoline, PAGE_SIZE, (uint64_t)(PTE_R | PTE_X));

    __DEBUG_FUNC_END;
    // don't creat some i/o vmmap 2022/4/29
}

// void kvmmap(address_t va, address_t pa, uint64_t size, uint64_t flag)
// {
//     return create_pages_map(kernel_pagetable, va, pa, size, flag);
// }

void
create_pages_map(pageTable_t pageTable, address_t va, address_t pa, uint64_t size, uint64_t flag) {
    address_t a, last;
    pte_t* pte;
    a = PAGE_ROUND_DOWN(va);
    last = PAGE_ROUND_DOWN(va + size - 1);
    while (1) {
        pte = get_level0_pte(pageTable, a, 1);
        if (!pte) {
            panic("full");
        }
        if ((*pte & PTE_V) == 1) {
            panic("kvmmap-remap");
        }
        *pte = GET_PPN(pa) | flag | PTE_V;
        if (a == last)
            break;
        a += PAGE_SIZE;
        pa += PAGE_SIZE;
    }
    return;
}

pte_t*
get_level0_pte(pageTable_t pageTable, address_t va, int alloc) {
    if (va > MAX_VA) {
        panic("get_level0_pte out of bound");
    }
    pte_t* pte;
    for (int level = 2; level > 0; level--) {
        pte = &pageTable[VPN(va, level)];  // level > 0 is right.Don't change
        if (*pte & PTE_V) {
            pageTable = (pageTable_t)GET_PAGETABLE(*pte);
        } else {
            if (!alloc)
                return NULL;
            pageTable = (pageTable_t)kalloc();
            if (pageTable == NULL)
                return NULL;
            memset(pageTable, 0, PAGE_SIZE);
            *pte = GET_PPN(pageTable) | PTE_V;
        }
    }
    // debug

    // pageTable_t x = &pageTable[VPN(va, 0)];
    return &pageTable[VPN(va, 0)];
}

void
kvminithart() {
    __DEBUG_FUNC_START;
    w_satp(PAGE_TABLE_SHIFT(kernel_pagetable));
    sfenve_vma();
    __DEBUG_FUNC_END;
}
// tranlate uer vm to pm
//  address_t uvm_trans(address_t va, pageTable_t pageTable)
//  {
//      PTE *pte;
//      address_t pa;
//      pte = get_level0_pte(pageTable, va, 0);
//      if(pte == NULL)
//          return NULL;
//      if((*pte & PTE_V) == 0)
//          return NULL;
//      if((*pte & PTE_U) == 0)
//          return NULL;
//      pa = GET_PA(va, *pte)
//      return pa;
//  }
address_t
kvm_trans(address_t va, pageTable_t pagetable) {
    uint64_t pa;
    pte_t* pte = get_level0_pte(pagetable, va, 0);
    if (pte == NULL) {
        printf("kvm_trans: unmapped va: %p", va);
        while (1)
            ;
    }
    if ((*pte & PTE_V) == 0) {
        printf("kvm_trans: unmapped va: %p", va);
        while (1)
            ;
    }
    pa = GET_PA(va, *pte);
    return pa;
}
void
kmm_init(void) {
    __DEBUG_FUNC_START;
    __DEBUG_OUTPUT("hartid:%d\n", r_tp());  // mayneed lock
    listp = 0;
    extend_heap();
    __DEBUG_FUNC_END;
}

void*
kmalloc(uint64_t size) {
    uint64_t newsize = ALIGN(size);
    void* goal;
    goal = find_first_fit(newsize);
    if (goal == 0) {
        goal = extend_heap();
    }
    place(goal, newsize);
    return goal;
}

void
kmfree(void* address) {
    PUT(HEAD(address), PACK(GET_SIZE(address), 0));
    void* free_page = merge(address);
    ;
    if ((GET_SIZE(free_page) >= PAGE_SIZE) && (((uint64_t)free_page & (uint64_t)4095) == 0)) {
        // TODO 变量未使用 pte_t* pte = get_level0_pte(kernel_pagetable, (address_t)free_page, 0);
        kfree(free_page);
#ifdef DEBUG
        printf("kmfree: free page\n");
        printf("address: %p,  size: %d", free_page, GET_SIZE(free_page));
#endif
    }
    return;
}

void*
merge(void* address) {
    byte_t* prev = (byte_t*)PREV_ADDRESS(address);
    byte_t* next = (byte_t*)NEXT_ADDRESS(address);
    uint64_t size = GET_SIZE(address);
    uint64_t next_alloc, prev_alloc;
    if ((prev == 0) && (next == 0)) {
    } else if ((prev == 0) && (next != 0)) {
        next_alloc = NEXT_ALLOC(address);
        if (next_alloc == 0) {
            if (((uint64_t)next & (~4095)) == ((uint64_t)address & (~4095))) {
                size += GET_SIZE(next);
                PUT(HEAD(address), PACK(size, 0));
                if (NEXT_ADDRESS(next) != 0) {
                    PUT(PREV(NEXT_ADDRESS((void*)next)), address);
                }
            }
        }
        return address;
    } else if ((prev != 0) && (next == 0)) {
        prev_alloc = PREV_ALLOC(address);
        if (prev_alloc == 0) {
            if (((uint64_t)prev & (~4095)) == ((uint64_t)address & (~4095))) {
                size += GET_SIZE((void*)prev);
                PUT(HEAD((void*)prev), PACK(size, 0));
                PUT(NEXT((void*)prev), 0);
                return (void*)prev;
            }
        }
        return address;
    } else {
        prev_alloc = PREV_ALLOC(address);
        next_alloc = NEXT_ALLOC(address);
        if ((prev_alloc == 0) && (next_alloc == 0)) {
            if (((uint64_t)prev & (~4095)) == ((uint64_t)address & (~4095))) {
                if (((uint64_t)prev & (~4095)) == ((uint64_t)next & (~4095))) {
                    size = GET_SIZE((void*)prev) + GET_SIZE((void*)next) + size;
                    PUT(HEAD((void*)prev), PACK(size, 0));
                    PUT(NEXT((void*)prev), NEXT_ADDRESS(next));
                    if (NEXT_ADDRESS((void*)next) != 0) {
                        PUT(PREV(NEXT_ADDRESS((void*)next)), prev);
                    }
                } else {
                    size += GET_SIZE(prev);
                    PUT(HEAD((void*)prev), PACK(size, 0));
                    PUT(NEXT((void*)prev), next);
                    PUT(PREV((void*)next), prev);
                }
                return (void*)prev;
            } else {
                if (((uint64_t)address & (~4095)) == ((uint64_t)next & (~4095))) {
                    size += GET_SIZE((void*)next);
                    PUT(HEAD(address), PACK(size, 0));
                    PUT(NEXT(address), NEXT_ADDRESS((void*)next));
                    if (NEXT_ADDRESS((void*)next) != 0) {
                        PUT(PREV(NEXT_ADDRESS((void*)next)), address);
                    }
                }
                return address;
            }
        } else if ((prev_alloc == 0) && (next_alloc)) {
            if (((uint64_t)prev & (~4095)) == ((uint64_t)address & (~4095))) {
                size += GET_SIZE((void*)prev);
                PUT(HEAD((void*)prev), PACK(size, 0));
                PUT(NEXT((void*)prev), next);
                PUT(PREV((void*)next), prev);
                return (void*)prev;
            }
            return address;
        } else if ((prev_alloc) && (next_alloc == 0)) {
            if (((uint64_t)next & (~4095)) == ((uint64_t)address & (~4095))) {
                size += GET_SIZE((void*)next);
                PUT(HEAD(address), PACK(size, 0));
                PUT(NEXT(address), NEXT_ADDRESS((void*)next));
                if (NEXT_ADDRESS((void*)next) != 0) {
                    PUT(PREV(NEXT_ADDRESS((void*)next)), address);
                }
            }
            return address;
        }
    }
    return address;
}

void*
extend_heap(void) {
    void* next_page = kalloc();
    // create_pages_map(kernel_pagetable, (address_t)next_page, (address_t)next_page, PAGE_SIZE, PTE_R | PTE_W);
    byte_t* new = (byte_t*)next_page + TSIZE;
    PUT((void*)next_page, PACK(PAGE_SIZE, 0));
    PUT(PREV((void*)new), 0);
    PUT(NEXT((void*)new), listp);
    listp = (address_t) new;
    return (void*)new;
}
void
place(void* goal, uint64_t size) {
#ifdef DEBUG
    printf("hart: %d    function: place\n", r_tp());
#endif
    uint64_t goal_size = GET_SIZE(goal);
    uint64_t free_size = goal_size - size;
    byte_t* free;
    if (free_size > MIN_BLOCK) {
        free = goal - free_size;
        PUT(HEAD((void*)free), PACK(free_size, 0));
        PUT(PREV((void*)free), goal);
        PUT(NEXT((void*)free), NEXT_ADDRESS(goal));
        PUT(NEXT(goal), free);
        PUT(HEAD(goal), PACK(size, 1));
    } else {
        PUT(HEAD(goal), PACK(goal_size, 1));
    }
    return;
}

void*
find_first_fit(uint64_t size) {
    byte_t* current_listp = (byte_t*)listp;
    for (; current_listp != 0; current_listp = (byte_t*)NEXT_ADDRESS(current_listp)) {
        if (GET_SIZE(current_listp) >= size) {
            break;
        }
    }
    return current_listp;
}
void
alloc_proc_mem(proc_t* p) {
    p->trapFrame = (trapFrame_t*)kalloc();
    p->upageTable = (pageTable_t)kalloc();
    p->kpageTable = (pageTable_t)kalloc();

    byte_t* stack = (void*)kalloc();
    memset(p->kpageTable, 0, PAGE_SIZE);
    memset(p->upageTable, 0, PAGE_SIZE);
    memset(p->trapFrame, 0, PAGE_SIZE);
    memmove(p->kpageTable, kernel_pagetable, PAGE_SIZE);
    create_pages_map(p->upageTable, (address_t)TRAMPOLINE, (address_t)trampoline, PAGE_SIZE, (uint64_t)(PTE_R | PTE_X));
    create_pages_map(p->upageTable, (address_t)TRAP_FRAME, (address_t)p->trapFrame, PAGE_SIZE, (uint64_t)(PTE_R | PTE_W));
    //  create_pages_map(p->upageTable, (address_t)TRAMRET, (address_t)userret, PAGE_SIZE, (uint64_t)(PTE_R | PTE_X));
    create_pages_map(p->kpageTable, (address_t)UVK_STACK, (address_t)stack, PAGE_SIZE, (uint64_t)(PTE_R | PTE_W));
}

void
uvminit(pageTable_t pageTable, pageTable_t kpageTable, code_t* code, uint64_t size) {
    void* mem = kalloc();
    memset(mem, 0, PAGE_SIZE);
    create_pages_map(pageTable, (address_t)UVA_START, (address_t)mem, PAGE_SIZE, PTE_W | PTE_R | PTE_X | PTE_U);
    create_pages_map(kpageTable, (address_t)UVA_START, (address_t)mem, PAGE_SIZE, PTE_W | PTE_R | PTE_X);
    memmove(mem, code, size);
}

int
copy_to_kernel(void* dst, void* src, size_t size) {}

void
uvmcopy(proc_t* p, proc_t* newp) {
    size_t old_size = p->size;
    for (size_t new_size = 0; new_size < old_size; new_size += PAGE_SIZE) {
        void* new_mem = kalloc();
        pte_t* pte = get_level0_pte(p->upageTable, (address_t)new_size, 0);
        uint64_t flag = GET_FLAG(*(uint64_t*)pte);
        address_t pa = GET_PA(new_size, *(uint64_t*)pte);
        if ((pa % 4096) != 0) {
            panic("uvmcpoy: pa is not aligned\n");
            while (1)
                ;
        }
        memmove(new_mem, (void*)pa, PAGE_SIZE);
        create_pages_map(newp->kpageTable, (address_t)new_size, (address_t)new_mem, PAGE_SIZE, flag & (~PTE_U));
        create_pages_map(newp->upageTable, (address_t)new_size, (address_t)new_mem, PAGE_SIZE, flag | PTE_U);
    }
    newp->cwd = p->cwd;
    memmove((void*)newp->trapFrame, (void*)p->trapFrame, PAGE_SIZE);
    memmove((void*)newp->openFiles, (void*)p->openFiles, N_OPEN_FILE * sizeof(file_t*));
}

static int
loadseg(pageTable_t pageTable, address_t va, dirent_t* dir, int32_t off, size_t size) {
    if ((va % PAGE_SIZE) != 0) {
        panic("loadseg: va is not aligned\n");
        while (1)
            ;
    }
    address_t pa = kvm_trans(va, pageTable);
    for (size_t i = 0; i < size; i += PAGE_SIZE) {
        size_t read_size = __MIN(size - i, PAGE_SIZE);
        __fat32_read(dir, 0, (void*)pa, off + i, &read_size);
        pa += read_size;  // TODO 是否应当修改pa值
    }
    return 0;
}

void
elf_load(proc_t* p, char* path) {
    elf_head_t elf_head;
    pro_head_t pro_head;
    dirent_t* dir = __fat32_name(path);
    dentry_lock(dir);
    size_t elf_head_size = sizeof(elf_head);
    size_t pro_head_size = sizeof(pro_head);
    // TODO elf_head地址不对
    // address_t temp_addr = (address_t)kvm_trans((address_t)&elf_head, getCurrentProc()->kpageTable);
    void* temp_addr = (void*)&elf_head;
    if (__fat32_read(dir, 0, (void*)temp_addr, 0, &elf_head_size) < 0) {
        panic("elf_load:read elf head wrong\n");
        while (1)
            ;
    }
    p->trapFrame->u_pc = elf_head.entry;
    if (elf_head.magic != ELF_MAGIC) {
        panic("elf_load: elf.magic wrong!\n");
        printf("elf.head: %x", elf_head.magic);
        while (1)
            ;
    }
    // TODO pro_head地址不对
    // temp_addr = (address_t)kvm_trans((address_t)&pro_head, getCurrentProc()->kpageTable);
    temp_addr = (void*)&pro_head;
    for (int i = 0, off = elf_head.phoff; i < elf_head.phnum; i++, off += sizeof(pro_head)) {
        if (__fat32_read(dir, 0, (void*)temp_addr, off, &pro_head_size) < 0) {
            panic("elf_load: read program head wrong\n");
            while (1)
                ;
        }
        if (pro_head.type != ELF_PROG_LOAD)
            continue;
        loadseg(p->kpageTable, pro_head.vaddr, dir, pro_head.off, pro_head.filesz);
    }
    dentry_unlock(dir);
    __fat32_put(dir);  // TODO
}

void
vmunmap(pageTable_t pageTable, address_t va, size_t size) {
    if ((size % 4096) != 0) {
        panic("vmunmap: size is not aligned\n");
    }
    if ((va % 4096) != 0) {
        panic("vmunmap: va is not aligned\n");
    }
    for (size_t i = 0; i < size; i += PAGE_SIZE) {
        address_t pa = kvm_trans(va + i, pageTable);
        kfree((void*)pa);
    }
}
void
free_pageTable(pageTable_t pageTable) {
    for (int i = 0; i < 512; i++) {
        pte_t pte = pageTable[i];
        if ((pte & PTE_V) && ((pte & (PTE_R | PTE_W | PTE_X)) == 0)) {
            free_pageTable((pageTable_t)(GET_PAGETABLE(pte)));
            pageTable[i] = 0;
        } else if (pte & PTE_V) {
            break;
        }
    }
    kfree((void*)pageTable);
}
void
kvmfree(pageTable_t pageTable) {
    vmunmap(pageTable, UVK_STACK, PAGE_SIZE);
    static int flag = 0;
    pageTable_t level0_pagetable = (pageTable_t)GET_PAGETABLE(pageTable[VPN(UVK_STACK, 0)]);
    pageTable_t level1_pagetable = (pageTable_t)GET_PAGETABLE(pageTable[VPN(UVK_STACK, 1)]);
    if (flag == 0) {
        flag = 1;
        int level0_page_num = 0;
        int level1_page_num = 0;
        for (int i = 0; i < 512; i++) {
            if (level0_pagetable[i] & PTE_V) {
                level0_page_num++;
                if (level0_page_num > 1) {
                    break;
                }
            }
        }
        if (level0_page_num == 1) {
            kfree((void*)level0_pagetable);
            flag = 2;
            for (int i = 0; i < 512; i++) {
                if (level1_pagetable[i] & PTE_V) {
                    level1_page_num++;
                    if (level1_page_num > 1) {
                        break;
                    }
                }
            }
        }
        if ((level1_page_num == 1) && (level0_page_num == 1)) {
            flag = 3;
            kfree((void*)level1_pagetable);
        }
    } else if (flag == 2) {
        kfree((void*)level0_pagetable);
    } else if (flag == 3) {
        kfree((void*)level0_pagetable);
        kfree((void*)level1_pagetable);
    }

    kfree((void*)pageTable);
}
void
uvmfree(proc_t* p) {
    vmunmap(p->upageTable, 0, p->size);
    if (p->heap) {
        vmunmap(p->upageTable, p->size, PAGE_SIZE);
    }
    free_pageTable(p->upageTable);
}
