#include <tinx/interrupt.h>
#include <tinx/printk.h>
#include <tinx/memory.h>
#include <tinx/task.h>
#include <tinx/debug.h>
#include <tinx/swap.h>
#include <tinx/global.h>
#include <tinx/bl.h>
#include <tinx/cpu.h>
#include <tinx/task.h>
#include <tinx/fs.h>
#include <tinx/bitmap.h>
#include <tinx/stdlib.h>
#include <tinx/string.h>
#include <tinx/assert.h>
#include <tinx/errno.h>
#include <tinx/syscall.h>

// #define LOGK(str, args...) DEBUGK(str, ##args)
#define LOGK(str, arg...)

#define KERNEL_BITMAP_ADDR 0x90000

typedef struct ards_t
{
    u64 base;
    u64 length;
    u32 type;
} _packed ards_t;

static u32 memory_base;
static u32 memory_size;

static u32 used_pages;
static u32 free_pages;
static u32 total_pages;
static u32 start_page;

static bitmap_t kernel_map;
static bitmap_t kernel_vmap;

u8 *memory_map;

u32 get_page()
{
repeat:
    for (int i = start_page; i < total_pages; i++)
    {
        if (memory_map[i] == 0)
        {
            memory_map[i]++;
            free_pages--;
            used_pages++;
            u32 page = PAGE(i);
            LOGK("GET page 0x%p\n", page);
            return PAGE(i);
        }
    }
    if (swap_out())
        goto repeat;
    panic("No more pages");
}

void put_page(u32 page)
{
    u32 idx = IDX(page);
    memory_map[idx]--;
    if (memory_map[idx] == 0)
    {
        free_pages++;
        used_pages--;
    }
    LOGK("PUT page 0x%p count %d\n", page, memory_map[idx]);
}

void set_cr3(u32 cr3)
{
    asm volatile(
        "movl %%eax, %%cr3\n" :: "a"(cr3));
}

u32 get_cr3()
{
    asm volatile(
        "movl %cr3, %eax\n");
}

// 刷新虚拟地址 vaddr 的 块表 TLB
void flush_tlb(u32 vaddr)
{
    asm volatile("invlpg (%0)" ::"r"(vaddr)
                 : "memory");
}

void entry_init(page_entry_t *entry, u32 paddr)
{
    *(u32 *)entry = 0;
    entry->user = 1;
    entry->write = 1;
    entry->present = 1;
    entry->dirty = 1;
    entry->index = IDX(paddr);
}

void entry_free(page_entry_t *entry)
{
    *(u32 *)entry = 0;
}

page_entry_t *get_pgdir()
{
    return (page_entry_t *)0xfffff000;
}

page_entry_t *get_pde(u32 vaddr)
{
    page_entry_t *pgdir = get_pgdir();
    u32 ptr = (u32)&pgdir[DIDX(vaddr)];
    return (page_entry_t *)ptr;
}

page_entry_t *get_pte(u32 vaddr, bool create)
{
    page_entry_t *pde = get_pde(vaddr);
    page_entry_t *taddr = (page_entry_t *)(PDE_MASK + (DIDX(vaddr) << 12));
    if (!pde->present && create)
    {
        u32 paddr = get_page();
        entry_init(pde, paddr);
        memset((u8 *)taddr, 0, PAGE_SIZE);
    }
    return &taddr[TIDX(vaddr)];
}

u32 get_paddr(u32 vaddr)
{
    page_entry_t *entry = get_pte(vaddr, false);
    return PAGE(entry->index) + (vaddr & 0xfff);
}

// 获得 pte 对应的虚拟地址
u32 pte_vaddr(page_entry_t *entry)
{
    u32 vaddr = ((u32)entry << 10) & (~0xfff);
    return vaddr;
}

u32 scan_pages(bitmap_t *map, u32 count)
{
    idx_t idx = bitmap_scan(map, count);
    if (idx == EOF)
    {
        return (u32)NULL;
    }
    return PAGE(idx);
}

void reset_pages(bitmap_t *map, u32 pages, u32 count)
{
    idx_t idx = IDX(pages);
    for (int i = 0; i < count; i++)
    {
        bitmap_set(map, idx++, false);
    }
}

void free_kpage(u32 pages, u32 count)
{
    reset_pages(&kernel_map, pages, count);
    LOGK("FREE kernel pages 0x%p count %d\n", pages, count);
}

u32 alloc_kpage(u32 count)
{
    u32 pages = scan_pages(&kernel_map, count);
    if (!pages)
    {
        panic("No more kernel pages");
    }
    LOGK("ALLOC kernel pages 0%p count %d\n", pages, count);
    return pages;
}

void map_page(u32 vaddr, u32 paddr, u32 count)
{
    ASSERT_PAGE(vaddr);
    ASSERT_PAGE(paddr);

    for (int i = 0; i < count; i++)
    {
        page_entry_t *entry = get_pte(vaddr, true);
        if (entry->present)
        {
            assert(PAGE(entry->index) == paddr);
        }
        entry_init(entry, paddr);
        flush_tlb(vaddr);

        vaddr += PAGE_SIZE;
        paddr += PAGE_SIZE;
    }
}

void unmap_page(u32 vaddr, u32 count)
{
    for (int i = 0; i < count; i++, vaddr += PAGE_SIZE)
    {
        page_entry_t *entry = get_pte(vaddr, false);
        assert(entry->present);
        entry_free(entry);
        flush_tlb(vaddr);
    }
}

void map_area(u32 vaddr, u32 len)
{
    map_page(vaddr, vaddr, div_round_up(len, PAGE_SIZE));
}

static u32 copy_page(u32 page)
{
    static u32 vaddr = 0xc0000000; // buffer

    page_entry_t *entry = get_pte(vaddr, true);

    u32 paddr = get_page();
    entry_init(entry, paddr);
    flush_tlb(vaddr);

    memcpy((u8 *)vaddr, (u8 *)page, PAGE_SIZE);

    entry_free(entry);
    flush_tlb(vaddr);

    return paddr;
}

void copy_on_write(u32 vaddr, int level)
{
    if (!level)
    {
        return;
    }

    page_entry_t *entry = get_pte(vaddr, false);

    copy_on_write((u32)entry, level - 1);

    if (entry->write)
    {
        return;
    }

    assert(memory_map[entry->index] > 0);

    if (memory_map[entry->index] == 1)
    {
        entry->write = true;
    }
    else
    {
        u32 paddr = copy_page(PAGE(IDX(vaddr)));
        put_page(PAGE(entry->index));

        entry->index = IDX(paddr);
        entry->write = true;
    }

    flush_tlb(vaddr);
}

u32 link_page(u32 vaddr)
{
    ASSERT_PAGE(vaddr);

    page_entry_t *entry = get_pte(vaddr, true);
    if (entry->present)
        return vaddr;

    copy_on_write((u32)entry, 2);

    u32 paddr = get_page();
    entry_init(entry, paddr);

    flush_tlb(vaddr);

    LOGK("LINK page 0x%p\n", vaddr);
    return vaddr;
}

void unlink_page(u32 vaddr)
{
    ASSERT_PAGE(vaddr);

    page_entry_t *pde = get_pde(vaddr);
    if (!pde->present)
        return;

    page_entry_t *entry = get_pte(vaddr, false);
    if (!entry->present)
        return;

    copy_on_write(vaddr, 2);

    u32 paddr = PAGE(entry->index);
    put_page(paddr);

    entry_free(entry);
    flush_tlb(vaddr);

    LOGK("UNLINK page 0x%p\n", vaddr);
}

page_entry_t *copy_pgdir()
{
    task_t *task = running_task();
    page_entry_t *pgdir = (page_entry_t *)task->pgdir;

    u32 start = DIDX(USER_EXEC_ADDR);
    u32 count = DIDX(USER_MEMORY_END);

    for (int didx = start; didx < count; didx++)
    {
        page_entry_t *pde = pgdir + didx;

        if (!pde->present)
        {
            continue;
        }

        assert(memory_map[pde->index] > 0);
        page_entry_t *taddr = (page_entry_t *)(PDE_MASK + (didx << 12));

        for (int tidx = 0; tidx < 1024; tidx++)
        {
            page_entry_t *pte = taddr + tidx;
            if (!*(u32 *)pte)
            {
                continue;
            }
            if (!pte->present)
            {
                swap_in(pte);
            }
            assert(memory_map[pte->index] > 0);
            memory_map[pte->index]++;
            if (!pte->shared)
            {
                pte->write = false;
            }
            continue;
        }

        memory_map[pde->index]++;
        pde->write = false;
    }

    page_entry_t *new_pgdir = (page_entry_t *)alloc_kpage(1);
    memcpy(new_pgdir, pgdir, PAGE_SIZE);

    entry_init(&new_pgdir[1023], (u32)new_pgdir);
    set_cr3((u32)pgdir);

    return new_pgdir;
}

void free_pgdir()
{
    task_t *task = running_task();
    page_entry_t *pgdir = (page_entry_t *)task->pgdir;

    u32 start = DIDX(USER_EXEC_ADDR);
    u32 count = DIDX(USER_MEMORY_END);

    for (int didx = start; didx < count; didx++)
    {
        page_entry_t *pde = pgdir + didx;

        if (!pde->present)
        {
            continue;
        }

        page_entry_t *taddr = (page_entry_t *)(PDE_MASK + (didx << 12));

        for (int tidx = 0; tidx < 1024; tidx++)
        {
            page_entry_t *pte = taddr + tidx;

            if (!*(u32 *)pte)
            {
                continue;
            }
            if (pte->present)
            {
                put_page(PAGE(pte->index));
            }
            else
            {
                swap_entry_t *entry = (swap_entry_t *)pte;
                put_swap_page(entry->index);
            }
        }
        put_page(PAGE(pde->index));
    }
    free_kpage((u32)pgdir, 1);
}

u32 get_cr2()
{
    asm volatile(
        "movl %cr2, %eax\n");
}

int sys_brk(u32 newbrk)
{
    task_t *task = running_task();

    LOGK("brk 0x%p\n", newbrk);

    if (newbrk < USER_EXEC_ADDR || newbrk >= USER_STACK_BOTTOM || !newbrk)
        return task->brk;

    u32 old_brk = task->brk;

    // 把释放的内存取消映射
    if (old_brk > newbrk)
    {
        for (u32 page = newbrk; page < old_brk; page += PAGE_SIZE)
        {
            unlink_page(page);
        }
    }

    task->brk = newbrk;
    return task->brk;
}

int sys_mprotect(void *addr, size_t length, int prot)
{
    DEBUGK("mprotect addr 0x%p length 0x%x prot 0x%x\n", addr, length, prot);

    u32 vaddr = (u32)addr;

    if (vaddr < USER_EXEC_ADDR || vaddr >= USER_MEMORY_END)
        return -EFAULT;

    if (prot & PROT_NONE && (prot & ~PROT_NONE) != 0)
        return -EINVAL;

    for (int i = 0; i < div_round_up(length, PAGE_SIZE); i++)
    {
        u32 page = vaddr + PAGE_SIZE * i;
        page_entry_t *entry = get_pte(page, false);
        assert(entry->user);
        if (!entry->present)
            return -ENOMEM;
        if (prot & PROT_NONE)
            entry->write = false;
        if (prot & PROT_READ)
            entry->write = false;
        if (prot & PROT_WRITE)
            entry->write = true;
        flush_tlb(page);
    }

    return EOK;
}

int sys_munmap(void *addr, size_t length)
{
    u32 vaddr = (u32)addr;
    ASSERT_PAGE(vaddr);

    task_t *task = running_task();
    assert(task->vmap);

    u32 count = div_round_up(length, PAGE_SIZE);

    for (int i = 0; i < count; i++)
    {
        u32 page = vaddr + PAGE_SIZE * i;
        if (page >= USER_MMAP_ADDR && page < USER_MMAP_END)
        {
            u32 idx = IDX(page);
            bitmap_set(task->vmap, idx, false);
        }
        unlink_page(page);
    }
    return EOK;
}

int sys_mmap(void *addr, size_t length, int prot, int flags, int fd, int offset)
{
    u32 vaddr = (u32)addr;
    ASSERT_PAGE(vaddr);

    task_t *task = running_task();
    assert(task->vmap);

    u32 pages = div_round_up(length, PAGE_SIZE);

    // assert(vaddr >= USER_EXEC_ADDR && vaddr < USER_MEMORY_END);

    DEBUGK("mmap 0x%p length 0x%x pages %d\n", addr, length, pages);

    if (vaddr >= USER_EXEC_ADDR && vaddr < USER_MEMORY_END)
        flags &= ~MAP_FIXED;

    if (flags & MAP_FIXED)
        return -EINVAL;

    page_entry_t emask;
    *(u32 *)&emask = 0;
    emask.present = true;
    emask.user = true;

    if (prot & (PROT_READ | PROT_EXEC))
    {
        emask.write = false;
    }

    if (!(prot & PROT_WRITE))
    {
        goto map;
    }

    emask.write = true;

    if ((flags & MAP_TYPE) == MAP_PRIVATE)
    {
        emask.shared = false;
        goto map;
    }

    if ((flags & MAP_TYPE) == MAP_SHARED)
    {
        emask.shared = true;
        goto map;
    }

    return -EINVAL;

map:
    // 获得整形的 mask
    u32 mask = *(u32 *)&emask;

    if (!(vaddr >= USER_EXEC_ADDR && vaddr < USER_MEMORY_END))
    {
        addr = (void *)scan_pages(task->vmap, div_round_up(length, PAGE_SIZE));
    }
    else
    {
        sys_munmap(addr, length);
    }

    for (u32 i = 0, vaddr = (u32)addr; i < pages; i++, vaddr += PAGE_SIZE)
    {
        link_page(vaddr);
        page_entry_t *entry = get_pte(vaddr, false);
        *(u32 *)entry |= mask;
        flush_tlb(vaddr);
    }

    if (fd < 0 || fd >= OPEN_NR)
        return (int)addr;

    file_t *file = task->files[fd];
    if (!file)
        return -EBADF;
    inode_t *inode = file->inode;
    inode->op->read(inode, addr, length, offset);

    return (int)addr;
}

int sys_mmap2(void *addr, size_t length, int prot, int flags, int fd, int offset)
{
    // 这样处理是不对的，因为还是不支持 4 GB 以上的偏移，但为了兼容musl, 凑合吧
    assert(offset <= 0xfffff);
    return sys_mmap(addr, length, prot, flags, fd, offset * PAGE_SIZE);
}

typedef struct page_error_code_t
{
    u8 present : 1;
    u8 write : 1;
    u8 user : 1;
    u8 RESERVED : 1;
    u8 fetch : 1;
    u8 protection : 1;
    u8 shadow : 1;
    u16 reserved1 : 8;
    u8 sgx : 1;
    u16 RESERVED;
} _packed page_error_code_t;

void page_fault(
    u32 vector, u32 edi, u32 esi, u32 ebp, u32 esp_dummy,
    u32 ebx, u32 edx, u32 ecx, u32 eax,
    u32 gs, u32 fs, u32 es, u32 ds,
    u32 vector0, u32 error, u32 eip, u32 cs, u32 eflags, u32 esp, u32 ss)
{
    u32 vaddr = get_cr2();
    page_error_code_t *code = (page_error_code_t *)&error;
    task_t *task = running_task();

    LOGK("page fault vaddr 0x%p eip 0x%p task %s\n", vaddr, eip, task->name);

    if (code->present)
    {
        page_entry_t *entry = get_pte(vaddr, false);
        assert(entry->present);
        copy_on_write(vaddr, 3);
        return;
    }

    page_entry_t *pde = get_pde(vaddr);
    if (pde->present)
    {
        swap_entry_t *entry = (swap_entry_t *)get_pte(vaddr, false);
        if (!entry->present && entry->index)
        {
            swap_in(entry);
            return;
        }
    }

    if (vaddr < USER_MEMORY_END && vaddr >= USER_EXEC_ADDR)
    {
        link_page(PAGE(IDX(vaddr)));
        return;
    }

    intr_fault(element_entry(intr_frame_t, vector, &vector));
    panic("page fault!!! vaddr 0x%p\n", vaddr);
}

void mapping_init()
{
    // 分配一个页目录
    page_entry_t *pgdir = (page_entry_t *)alloc_kpage(1);

    int index = 0;

    // 初始化页目录
    memset(pgdir, 0, PAGE_SIZE);

    for (u32 didx = 0; didx < 4; didx++)
    {
        // 得到页目录项
        page_entry_t *pde = &pgdir[didx];

        // 分配一个页表
        page_entry_t *taddr = (page_entry_t *)alloc_kpage(1);

        // 映射页表
        entry_init(pde, (u32)taddr);

        for (u32 tidx = 0; tidx < 1024; tidx++, index++)
        {
            // 得到页表项
            page_entry_t *pte = taddr + tidx;

            // 映射页框
            entry_init(pte, PAGE(index));
        }
    }

    // 把最后一项设置为页表本身，方便访问
    entry_init(&pgdir[1023], (u32)pgdir);

    // 开启分页并且设置页目录
    set_cr3((u32)pgdir);
    set_cr0(get_cr0() | CR0_PG);

    unmap_page(0, 1);
}

void memory_init()
{
    u32 addr = get_bl_addr();

    int ards_count = *(u32 *)addr;
    ards_t *ards = (ards_t *)(addr + 4);

    for (int i = 0; i < ards_count; i++)
    {
        ards_t *ard = ards + i;

        if ((u32)ard->length > memory_size && ard->type == 1)
        {
            memory_size = (u32)ard->length;
            memory_base = (u32)ard->base;
        }

        LOGK("Memory base %p length %p type %d\n",
            (u32)ard->base, (u32)ard->length, ard->type);
    }

    assert(memory_base >= 0x100000);
    LOGK("Memory Base 0x%p Size 0x%X\n", memory_base, memory_size);

    memory_size += memory_base;

    memory_map = (u8 *)memory_base;

    total_pages = memory_size / PAGE_SIZE;
    memset(memory_map, 0, total_pages);

    used_pages = div_round_up(total_pages, PAGE_SIZE) + memory_base / PAGE_SIZE;

    start_page = used_pages;

    u32 pages = (((KERNEL_MEMORY_SIZE - KERNEL_RAMDISK_SIZE) / PAGE_SIZE) - used_pages);
    bitmap_init(&kernel_map, (u8 *)KERNEL_BITMAP_ADDR, pages / 8, start_page);

    u32 ramdisk_pages = KERNEL_RAMDISK_SIZE / PAGE_SIZE;

    used_pages += pages + ramdisk_pages;
    start_page += pages + ramdisk_pages;
    free_pages = total_pages - used_pages;
    memset(memory_map, 1, used_pages);
}