/* RISC-V kernel boot stage */
#include <context.h>
#include <os/elf.h>
#include <pgtable.h>
#include <sbi.h>

typedef void (*kernel_entry_t)(unsigned long);

extern unsigned char _elf_main[];
extern unsigned _length_main;
uintptr_t boot_memCurr = PGDIR_PA + NORMAL_PAGE_SIZE;

/********* setup memory mapping ***********/
uintptr_t alloc_page()
{
    // TODO: alloc pages for page table entries
    uintptr_t mem;
    mem = boot_memCurr;
    boot_memCurr += NORMAL_PAGE_SIZE;
    clear_pgdir(mem);
    return mem;
}

// using 2MB large page
void map_page(uint64_t va, uint64_t pa, PTE *pgdir)
{
    // TODO: map va to pa
    // write ppn to first pagetable
    uint64_t second_pgdir;
    uint64_t first_ppn;
    second_pgdir = alloc_page();
    first_ppn = (second_pgdir / 4096) << 10;
    *pgdir = first_ppn | _PAGE_PRESENT | _PAGE_GLOBAL | _PAGE_ACCESSED | _PAGE_DIRTY;

    // write ppn to second pagetable
    uint64_t vpn1;
    uint64_t second_ppn;
    for(vpn1 = 0; vpn1 <= 0x1ff; vpn1++){
        second_ppn = (pa / 4096) << 10;
        *(PTE *)second_pgdir = second_ppn | _PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC | _PAGE_GLOBAL | _PAGE_ACCESSED | _PAGE_DIRTY;
        second_pgdir += 8;
        pa += LARGE_PAGE_SIZE;
    }
}

void enable_vm()
{
    // TODO: write satp to enable paging
    // remember to flush TLB
    set_satp(SATP_MODE_SV39, 0, PGDIR_PA/4096);
    local_flush_tlb_all();
}

/* Sv-39 mode
 * 0x0000_0000_0000_0000-0x0000_003f_ffff_ffff is for user mode
 * 0xffff_ffc0_0000_0000-0xffff_ffff_ffff_ffff is for kernel mode
 */
void setup_vm()
{
    // TODO:
    clear_pgdir(PGDIR_PA);
    // map boot part's 2MB
    uint64_t b_pa;
    uint64_t first_b_pgdir;
    uint64_t second_b_pgdir;
    b_pa = 0x50200000;
    first_b_pgdir = PGDIR_PA + 1 * 8;             //vpn2 = 1
    second_b_pgdir = alloc_page() + 129 * 8;      //vpn1 = 129
    *(PTE *)first_b_pgdir = ((second_b_pgdir / 4096) << 10) | _PAGE_PRESENT | _PAGE_GLOBAL | _PAGE_ACCESSED | _PAGE_DIRTY;
    *(PTE *)second_b_pgdir = ((b_pa / 4096) << 10) | _PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC | _PAGE_GLOBAL | _PAGE_ACCESSED | _PAGE_DIRTY;
    // map kernel virtual address(kva) to kernel physical
    // address(kpa) kva = kpa + 0xffff_ffc0_0000_0000 use 2MB page,
    // map all physical memory
    uint64_t k_va;
    uint64_t k_pa;
    uint64_t first_k_pgdir;
    first_k_pgdir = PGDIR_PA + 0x800;      //vpn2 = 0x100
    k_va = 0xffffffc000000000;
    while(k_va <= 0xffffffffffffffff && k_va >= 0xffffffc000000000){
        k_pa = (k_va << 26) >> 26;
        map_page(k_va, k_pa, (PTE *)first_k_pgdir);
        k_va += 0x40000000;
        first_k_pgdir += 8;
    }
    // enable virtual memory
    enable_vm();
}

uintptr_t directmap(uintptr_t kva, uintptr_t pgdir)
{
    // ignore pgdir
    return kva;
}

kernel_entry_t start_kernel = NULL;

/*********** start here **************/
int boot_kernel(unsigned long mhartid)
{
    if (mhartid == 0) {
        setup_vm();
        // load kernel
        start_kernel =
            (kernel_entry_t)load_elf(_elf_main, _length_main,
                                     PGDIR_PA, directmap);
    } else {
        // TODO: what should we do for other cores?
        /* set SATP to enable sv39_mode */
        set_satp(SATP_MODE_SV39, 0, PGDIR_PA/4096);
    }
    start_kernel(mhartid);
    return 0;
}
