#include <asm/pgtable.h>
#include <linux/compiler.h>
#include <linux/memblock.h>
#include <linux/stddef.h>
#include <riscv/mmu-early.h>
#include <linux/kconfig.h>
#include <riscv/csr.h>

#define kernel_mapping_pa_to_va(y) ((void *)((unsigned long)(y) + kernel_map.va_kernel_pa_offset))

struct kernel_mapping
{
    unsigned long page_offset;
    unsigned long virt_addr;
    unsigned long virt_offset;
    unsigned long phys_addr;
    unsigned long size;
    /* Offset between linear mapping virtual address and kernel load address */
    unsigned long va_pa_offset;
    /* Offset between kernel mapping virtual address and kernel load address */
    unsigned long va_kernel_pa_offset;
};

struct kernel_mapping kernel_map;

pgd_t trampoline_pg_dir[PTRS_PER_PGD] __aligned(PAGE_SIZE);
static pmd_t trampoline_pmd[PTRS_PER_PMD] __aligned(PAGE_SIZE);
static pgd_t early_pg_dir[PTRS_PER_PGD] __aligned(PAGE_SIZE);
static pte_t fixmap_pte[PTRS_PER_PTE] __aligned(PAGE_SIZE);

static pmd_t io_pmd[512] __aligned(PAGE_SIZE);

static struct pt_alloc_ops pt_ops = {};

static void create_kernel_page_table(pgd_t *pgdir, bool early)
{
    uintptr_t va, end_va;

    end_va = kernel_map.virt_addr + kernel_map.size;
    for (va = kernel_map.virt_addr; va < end_va; va += PMD_SIZE)
    {
        create_pgd_mapping(&pt_ops, pgdir, va,
                           kernel_map.phys_addr + (va - kernel_map.virt_addr),
                           PMD_SIZE,
                           PAGE_KERNEL_EXEC);
    }
}

static phys_addr_t alloc_pte_fixmap(uintptr_t va)
{
    return 0; // TODO
}

static pte_t *get_pte_virt_fixmap(phys_addr_t pa)
{
    return NULL; // TODO
}

static inline phys_addr_t alloc_pte_early(uintptr_t va)
{
    /* BUG */
    /*
     * We only create PMD or PGD early mappings so we
     * should never reach here with MMU disabled.
     */
    return 0;
}

static inline pte_t *get_pte_virt_early(phys_addr_t pa)
{
    return (pte_t *)(pa);
}

/*
 * MMU is enabled but page table setup is not complete yet.
 * fixmap page table alloc functions must be used as a means to temporarily
 * map the allocated physical pages since the linear mapping does not exist yet.
 *
 * Note that this is called with MMU disabled, hence kernel_mapping_pa_to_va,
 * but it will be used as described above.
 */
static void pt_ops_set_fixmap(void)
{
    pt_ops.alloc_pte = kernel_mapping_pa_to_va(alloc_pte_fixmap);
    pt_ops.get_pte_virt = kernel_mapping_pa_to_va(get_pte_virt_fixmap);
}

/*
 * MMU is not enabled, the page tables are allocated directly using
 * early_pmd/pud/p4d and the address returned is the physical one.
 */
static void pt_ops_set_early(void)
{
    pt_ops.alloc_pte = alloc_pte_early;
    pt_ops.get_pte_virt = get_pte_virt_early;
}

#define trampoline_pgd_next ((uintptr_t)trampoline_pmd)
#define fixmap_pgd_next ((uintptr_t)fixmap_pte)

#define FIXADDR_START KERNEL_LINK_ADDR

uintptr_t early_pgtable_setup(uintptr_t dtb_pa, uintptr_t start_pa)
{
    uintptr_t satp;

    kernel_map.virt_addr = KERNEL_LINK_ADDR + kernel_map.virt_offset;
    kernel_map.phys_addr = start_pa;
    kernel_map.va_kernel_pa_offset = kernel_map.virt_addr - kernel_map.phys_addr;
    kernel_map.size = 12 * 1024 * 1024;

    pt_ops_set_early();

    /* Setup early PGD for fixmap */
    create_pgd_mapping(&pt_ops, early_pg_dir, FIXADDR_START,
                       fixmap_pgd_next, PGDIR_SIZE, PAGE_TABLE);

    create_pgd_mapping(&pt_ops, early_pg_dir, 0x0c000000, /* plic */
                       io_pmd, PGDIR_SIZE, PAGE_TABLE);
    create_pgd_mapping(&pt_ops, early_pg_dir, 0x10000000, /*uart*/
                       io_pmd, PGDIR_SIZE, PAGE_TABLE);

    for (uintptr_t va = 0; va < (64 * 1024 * 1024); va += PMD_SIZE)
    {
        create_pgd_mapping(&pt_ops, early_pg_dir, va + 0x0c000000,
                           va + 0x0c000000,
                           PMD_SIZE,
                           PAGE_KERNEL_EXEC);
    }

    create_pgd_mapping(&pt_ops, early_pg_dir, 0x10000000,
                       0x10000000,
                       PMD_SIZE,
                       PAGE_KERNEL_EXEC);

    /*
     * Setup early PGD covering entire kernel which will allow
     * us to reach paging_init(). We map all memory banks later
     * in setup_vm_final() below.
     */
    create_kernel_page_table(early_pg_dir, true);

    satp = (uintptr_t)early_pg_dir >> PAGE_SHIFT;
    satp |= SATP_MODE_39;

    return satp;
}

void setup_vm(void)
{
    /* Setup trampoline PGD and PMD */
    create_pgd_mapping(&pt_ops, trampoline_pg_dir, kernel_map.virt_addr,
                       trampoline_pgd_next, PGDIR_SIZE, PAGE_TABLE);

    /*
     * Setup early PGD covering entire kernel which will allow
     * us to reach paging_init(). We map all memory banks later
     * in setup_vm_final() below.
     */
    create_kernel_page_table(early_pg_dir, true);

    pt_ops_set_fixmap();
}

uintptr_t __virt_to_phys(uintptr_t x)
{
    uintptr_t pa;

    pa = x - kernel_map.va_kernel_pa_offset;

    return pa;
}

uintptr_t linear_mapping_pa_to_va(uintptr_t x)
{
    return x + kernel_map.va_kernel_pa_offset;
}
