#include <arch/riscv64/mm/arch.h>
#include <mm/page_table_flags.h>
#include <libs/klibc.h>
#include <mm/mm.h>

uint64_t *get_current_page_dir(bool user) {
    (void)user;
    uint64_t satp = read_satp();
    uint64_t root_ppn = satp & SATP_PPN_MASK;

    uint64_t page_table_base = root_ppn << 12;

    return (uint64_t *)phys_to_virt(page_table_base);
}

uint64_t get_arch_page_table_flags(uint64_t flags)
{
    uint64_t result = ARCH_PT_FLAG_VALID | ARCH_PT_FLAG_DIRTY;

    if ((flags & PT_FLAG_R) != 0) {
        result |= ARCH_PT_FLAG_READ;
    }

    if ((flags & PT_FLAG_W) != 0) {
        result |= ARCH_PT_FLAG_WRITE;
    }

    if ((flags & PT_FLAG_U) != 0) {
        result |= ARCH_PT_FLAG_USER;
    }

    if ((flags & PT_FLAG_X) != 0) {
        result |= ARCH_PT_FLAG_EXEC;
    }

    return result;
}

void arch_flush_tlb(uint64_t vaddr) {
    __asm__ volatile("sfence.vma %0, zero" : : "r"(vaddr) : "memory");
}

// RISC-V 页表相关常量定义
#define PAGE_SIZE 4096
#define PAGE_SHIFT 12
#define PTE_PPN_SHIFT 10
#define PTE_FLAGS_MASK 0x3FF

// Sv48 虚拟地址布局 (48位)
#define PGDIR_SHIFT 39 // PGD level
#define PUD_SHIFT 30   // PUD level
#define PMD_SHIFT 21   // PMD level
#define PTE_SHIFT 12   // PTE level

#define PTRS_PER_PGD 512
#define PTRS_PER_PUD 512
#define PTRS_PER_PMD 512
#define PTRS_PER_PTE 512

// 虚拟地址索引提取宏 (Sv48)
#define PGD_INDEX(addr) (((addr) >> PGDIR_SHIFT) & 0x1FF)
#define PUD_INDEX(addr) (((addr) >> PUD_SHIFT) & 0x1FF)
#define PMD_INDEX(addr) (((addr) >> PMD_SHIFT) & 0x1FF)
#define PTE_INDEX(addr) (((addr) >> PTE_SHIFT) & 0x1FF)

// 虚拟地址掩码 (48位)
#define VA_MASK 0xFFFFFFFFFFFFUL

// 页表项类型定义
typedef uint64_t pte_t;
typedef uint64_t pmd_t;
typedef uint64_t pud_t;
typedef uint64_t pgd_t;


// 辅助函数：从页表项获取物理页帧号
static inline uint64_t pte_to_pfn(pte_t pte) {
    return (pte >> PTE_PPN_SHIFT) & 0xFFFFFFFFFFUL;
}

// 辅助函数：从物理页帧号创建页表项
static inline pte_t pfn_to_pte(uint64_t pfn, uint64_t flags) {
    return (pfn << PTE_PPN_SHIFT) | (flags & PTE_FLAGS_MASK);
}

// 辅助函数：检查页表项是否有效
static inline bool pte_present(pte_t pte) { return pte & ARCH_PT_FLAG_VALID; }

// 辅助函数：检查页表项是否为叶子节点
static inline bool pte_is_leaf(pte_t pte) { return ARCH_PT_IS_LARGE(pte); }

// 辅助函数：将虚拟地址转换为规范形式 (符号扩展到64位)
static inline uint64_t canonicalize_va(uint64_t vaddr) {
    // Sv48使用48位地址，需要进行符号扩展
    if (vaddr & (1UL << 47)) {
        return vaddr | (~VA_MASK); // 符号扩展高位
    }
    return vaddr & VA_MASK;
}

uint64_t *kernel_page_dir = NULL;

uint64_t *get_kernel_page_dir()
{
    return kernel_page_dir;
}

 void map_page(uint64_t *pgdir, uint64_t vaddr, uint64_t paddr, uint64_t flags, bool force)
 {
    if (!kernel_page_dir)
        kernel_page_dir = pgdir;

    // 规范化虚拟地址并确保地址按页对齐
    vaddr = canonicalize_va(vaddr) & ~(PAGE_SIZE - 1);
    paddr &= ~(PAGE_SIZE - 1);

    // 确保Valid位被设置
    flags |= ARCH_PT_FLAG_VALID;

    // 获取各级页表索引
    uint64_t pgd_idx = PGD_INDEX(vaddr);
    uint64_t pud_idx = PUD_INDEX(vaddr);
    uint64_t pmd_idx = PMD_INDEX(vaddr);
    uint64_t pte_idx = PTE_INDEX(vaddr);

    pgd_t *pgd_entry = phys_to_virt(&pgdir[pgd_idx]);
    pud_t *pud_table;

    if (!pte_present(*pgd_entry)) {
        // 分配新的PUD页表
        pud_table = (pud_t *)alloc_frames(1);
        if (!pud_table) {
            return 0; // 内存分配失败
        }

        // 清零新分配的页表
        for (int i = 0; i < PTRS_PER_PUD; i++) {
            phys_to_virt(pud_table)[i] = 0;
        }

        // 设置PGD项指向新的PUD表
        uint64_t pud_pfn = ((uint64_t)pud_table) >> PAGE_SHIFT;
        *pgd_entry = pfn_to_pte(pud_pfn, ARCH_PT_FLAG_VALID);
    } else {
        // 获取现有PUD表地址
        uint64_t pud_pfn = pte_to_pfn(*pgd_entry);
        pud_table = (pud_t *)(pud_pfn << PAGE_SHIFT);
    }

    pud_t *pud_entry = phys_to_virt(&pud_table[pud_idx]);
    pmd_t *pmd_table;

    if (!pte_present(*pud_entry)) {
        // 分配新的PMD页表
        pmd_table = (pmd_t *)alloc_frames(1);
        if (!pmd_table) {
            return 0; // 内存分配失败
        }

        // 清零新分配的页表
        for (int i = 0; i < PTRS_PER_PMD; i++) {
            phys_to_virt(pmd_table)[i] = 0;
        }

        // 设置PUD项指向新的PMD表
        uint64_t pmd_pfn = ((uint64_t)pmd_table) >> PAGE_SHIFT;
        *pud_entry = pfn_to_pte(pmd_pfn, ARCH_PT_FLAG_VALID);
    } else {
        // 检查是否为1GB大页映射
        if (pte_is_leaf(*pud_entry)) {
            return 0; // 已存在1GB大页映射，冲突
        }

        // 获取现有PMD表地址
        uint64_t pmd_pfn = pte_to_pfn(*pud_entry);
        pmd_table = (pmd_t *)(pmd_pfn << PAGE_SHIFT);
    }

    pmd_t *pmd_entry = phys_to_virt(&pmd_table[pmd_idx]);
    pte_t *pte_table;

    if (!pte_present(*pmd_entry)) {
        // 分配新的PTE页表
        pte_table = (pte_t *)alloc_frames(1);
        if (!pte_table) {
            return 0; // 内存分配失败
        }

        // 清零新分配的页表
        for (int i = 0; i < PTRS_PER_PTE; i++) {
            phys_to_virt(pte_table)[i] = 0;
        }

        // 设置PMD项指向新的PTE表
        uint64_t pte_pfn = ((uint64_t)pte_table) >> PAGE_SHIFT;
        *pmd_entry = pfn_to_pte(pte_pfn, ARCH_PT_FLAG_VALID);
    } else {
        // 检查是否为2MB大页映射
        if (pte_is_leaf(*pmd_entry)) {
            return 0; // 已存在2MB大页映射，冲突
        }

        // 获取现有PTE表地址
        uint64_t pte_pfn = pte_to_pfn(*pmd_entry);
        pte_table = (pte_t *)(pte_pfn << PAGE_SHIFT);
    }

    pte_t *pte_entry = phys_to_virt(&pte_table[pte_idx]);

    if (pte_present(*pte_entry) && !force) {
        return 0; // 页面已被映射
    }

    // 创建页表项映射
    uint64_t pfn = paddr >> PAGE_SHIFT;
    *pte_entry = pfn_to_pte(pfn, flags);

    arch_flush_tlb(vaddr);

    return 0;
 }

 bool stack_range(uint64_t pml4_idx, uint64_t pdpt_idx, uint64_t pd_idx, uint64_t pt_idx, uint64_t user_stack_start, uint64_t user_stack_end)
{
    uint64_t addr = pml4_idx << 39 | pdpt_idx << 30 | pd_idx << 21 | pt_idx << 12;
    return user_stack_start <= addr && addr < user_stack_end;
}

uint64_t clone_page_table(uint64_t root_pa, uint64_t user_stack_start, uint64_t user_stack_end)
{
    // constants for SV48
    const size_t ENTRIES = 512;
    const size_t LEVELS = 4; // pgd (L3) -> pud (L2) -> pmd (L1) -> pte (L0)
    const size_t KERN_IDX_SPLIT = 256; // top-half indices (>=256) are kernel-space at top-level

    // allocate new root page (physical address)
    uint64_t new_root_pa = alloc_frames(1);
    if (new_root_pa == 0) {
        printk("Cannot clone page table: no page can be allocated");
        return root_pa; // fall back to old root on failure
    }

    // zero new root, and prepare pointers
    uint64_t *old_root = (uint64_t *)phys_to_virt(root_pa);
    uint64_t *new_root = (uint64_t *)phys_to_virt(new_root_pa);
    memset(new_root, 0, PAGE_SIZE/2);

    // copy kernel-half entries at top-level (like your x86 copy of upper half)
    // For SV48 the sign-bit split at top-level index 256 works similarly.
    for (size_t i = KERN_IDX_SPLIT; i < ENTRIES; ++i) {
        new_root[i] = old_root[i];
    }

    // iterate user-space top-level entries (0 .. KERN_IDX_SPLIT-1)
    for (size_t l3_idx = 0; l3_idx < KERN_IDX_SPLIT; ++l3_idx) {
        uint64_t l3e_old = old_root[l3_idx];
        if (!pte_present(l3e_old)) continue;

        if (pte_is_leaf(l3e_old)) {
            // encountered a huge 1GiB mapping at top-level -- not supported here
            // cpu_halt();
        }

        // create new second-level table
        uint64_t l2_pa = alloc_frames(1);
        if (l2_pa == 0) { printk("clone_page_table: alloc l2 failed\n"); return root_pa; }
        new_root[l3_idx] = pfn_to_pte(l2_pa >> PAGE_SHIFT, ARCH_PT_FLAG_VALID | (l3e_old & (ARCH_PT_FLAG_WRITE|ARCH_PT_FLAG_USER)));

        uint64_t *l2_old = (uint64_t *)phys_to_virt((pte_to_pfn(l3e_old) << PAGE_SHIFT));
        uint64_t *l2_new = (uint64_t *)phys_to_virt(l2_pa);
        memset(l2_new, 0, PAGE_SIZE);

        for (size_t l2_idx = 0; l2_idx < ENTRIES; ++l2_idx) {
            uint64_t l2e_old = l2_old[l2_idx];
            if (!pte_present(l2e_old)) continue;

            if (pte_is_leaf(l2e_old)) {
                // encountered a 2MiB (or 1GiB depending on level) large mapping -> not supported
                // cpu_halt();
            }

            // allocate new L1 table
            uint64_t l1_pa = alloc_frames(1);
            if (l1_pa == 0) { printk("clone_page_table: alloc l1 failed\n"); return root_pa; }
            l2_new[l2_idx] = pfn_to_pte(l1_pa >> PAGE_SHIFT, ARCH_PT_FLAG_VALID | (l2e_old & (ARCH_PT_FLAG_WRITE|ARCH_PT_FLAG_USER)));

            uint64_t *l1_old = (uint64_t *)phys_to_virt((pte_to_pfn(l2e_old) << PAGE_SHIFT));
            uint64_t *l1_new = (uint64_t *)phys_to_virt(l1_pa);
            memset(l1_new, 0, PAGE_SIZE);

            for (size_t l1_idx = 0; l1_idx < ENTRIES; ++l1_idx) {
                uint64_t l1e_old = l1_old[l1_idx];
                if (!pte_present(l1e_old)) continue;

                if (pte_is_leaf(l1e_old)) {
                    // encountered a 2MiB large page at L1 in some implementations -> not supported here
                    // cpu_halt();
                }

                // allocate new L0 (pte) table
                uint64_t l0_pa = alloc_frames(1);
                if (l0_pa == 0) { printk("clone_page_table: alloc l0 failed\n"); return root_pa; }
                l1_new[l1_idx] = pfn_to_pte(l0_pa >> PAGE_SHIFT, ARCH_PT_FLAG_VALID | (l1e_old & (ARCH_PT_FLAG_WRITE|ARCH_PT_FLAG_USER)));

                uint64_t *l0_old = (uint64_t *)phys_to_virt((pte_to_pfn(l1e_old) << PAGE_SHIFT));
                uint64_t *l0_new = (uint64_t *)phys_to_virt(l0_pa);
                memset(l0_new, 0, PAGE_SIZE);

                for (size_t l0_idx = 0; l0_idx < ENTRIES; ++l0_idx) {
                    uint64_t pte_old = l0_old[l0_idx];
                    if (!pte_present(pte_old)) continue;

                    // compute the virtual address that this PTE maps so we can test stack range
                    // virtual = (l3_idx << 39) | (l2_idx << 30) | (l1_idx << 21) | (l0_idx << 12)
                    uint64_t vaddr = ((uint64_t)l3_idx << PGDIR_SHIFT)
                                   | ((uint64_t)l2_idx << PUD_SHIFT)
                                   | ((uint64_t)l1_idx << PMD_SHIFT)
                                   | ((uint64_t)l0_idx << PTE_SHIFT);

                    // canonicalize/sign-extend if necessary
                    vaddr = canonicalize_va(vaddr);

                    // If this PTE maps a stack page, copy the page (allocate new physical page and memcpy)
                    if (vaddr >= user_stack_start && vaddr < user_stack_end) {
                        uint64_t new_page_pa = alloc_frames(1);
                        if (new_page_pa == 0) { printk("clone_page_table: alloc page failed\n"); return root_pa; }

                        // set new pte with same permission bits except physical address
                        uint64_t new_pte = pfn_to_pte(new_page_pa >> PAGE_SHIFT, pte_old & PTE_FLAGS_MASK);
                        l0_new[l0_idx] = new_pte;

                        // copy page contents
                        void *old_page_v = (void *)phys_to_virt(pte_to_pfn(pte_old) << PAGE_SHIFT);
                        void *new_page_v = (void *)phys_to_virt(new_page_pa);
                        memcpy(new_page_v, old_page_v, PAGE_SIZE);
                    } else {
                        // reuse the same PTE (share mapping)
                        l0_new[l0_idx] = pte_old;
                    }
                } // end l0 loop
            } // end l1 loop
        } // end l2 loop
    } // end l3 loop

    return new_root_pa;
}
