#include <mm/mm.h>
#include <ds/lru.h>
#include <arch/cpu.h>
#include <string.h>
#include <common.h>

#define LOG_TAG "mm"
#include <log.h>

static uptr vmap_start = VMAP_START;

page_t pm_table[PAGE_MAX_SIZE];

uptr max_pg;

extern lru_t swap_lru;

uptr lookup_pn_ptr;

void mm_init(u32 end) {
    max_pg = end >> 12;

    lookup_pn_ptr = PM_PN_START;

    for (int i = 0; i < PAGE_MAX_SIZE; i++) {
        pm_table[i] = (page_t){
            .ref_counts = 1,
            .flags = 0,
            .index = 0,
            .lru.next = &pm_table[i].lru,
            .lru.prev = &pm_table[i].lru,
            .mapping = NULL
        };
    }
}

static inline void add_to_lru(u32 pn, u32 flags) {
    if ((flags & PG_KERNEL)) {
        return;
    }
    if ((flags & PG_LOCKED)) {
        return;
    }
    lru_use_one(&swap_lru, &pm_table[pn].lru);
}

int free_page(uptr addr) {
    unsigned pn = addr >> 12;
    page_t* p = &pm_table[pn];

    if (pn >= max_pg || !(p->ref_counts)) {
        LOGE("free_page err pn: %X ref_counts: %d\n", pn, p->ref_counts);
        return -1;
    }

    p->ref_counts--;
    if (!p->ref_counts) {
        p->flags = 0;
        p->index = 0;
        p->mapping = NULL;
        lru_remove(&swap_lru, &p->lru);
    }
    return 0;
}

int ref_page(uptr addr) {
    unsigned pn = addr >> 12;

    if (pn >= max_pg)
        goto err;

    page_t* p = &pm_table[pn];
    if (!p->ref_counts)
        goto err;

    p->ref_counts++;
    add_to_lru(pn, p->flags);
    return 0;

err:
    return -1;
}

uptr oom_handle(size_t count, u32 flags) {
    size_t used_pages = swap_lru.size;
    if (used_pages < count) {
        LOGE("OOM: not enough pages, need %d, used %d\n", count, used_pages);
        return 0;
    }

    size_t try_swapout = count << 1;
    if (lru_must_evict(&swap_lru, count)) {
        return get_pages(count, flags);
    }
    return 0;
}

uptr get_page(u32 flags) {
    uptr page_found = 0;
    uptr old_ptr = lookup_pn_ptr;
    uptr upper_bound = max_pg;
    page_t* p;
    while (!page_found && lookup_pn_ptr < upper_bound) {
        p = &pm_table[lookup_pn_ptr];
        if (!p->ref_counts) {
            page_found = lookup_pn_ptr << 12;
            *p = (page_t){
                .ref_counts = 1,
                .flags = flags
            };
        } else {
            lookup_pn_ptr++;

            if (lookup_pn_ptr >= upper_bound && old_ptr != PM_PN_START) {
                lookup_pn_ptr = PM_PN_START;
                upper_bound = old_ptr;
                old_ptr = PM_PN_START;
            }
        }
    }
    if (!page_found) {
        // TODO: handle out of memory for process
        uptr res = oom_handle(1, flags);
        if (!res) {
            LOGE("Out of memory\n");
        }
        return res;
    }
    add_to_lru(lookup_pn_ptr, flags);
    return page_found;
}

uptr get_pages(size_t count, u32 flags) {
    uptr page_found = 0;
    uptr found_count = 0;
    uptr old_ptr = lookup_pn_ptr;
    uptr upper_bound = max_pg;
    page_t* p;
    while (!page_found && lookup_pn_ptr < upper_bound) {
        p = &pm_table[lookup_pn_ptr];
        if (!p->ref_counts) {
            found_count = 1;

            for (uptr idx = lookup_pn_ptr + 1; idx < lookup_pn_ptr + count && idx < upper_bound; idx++) {
                if (!pm_table[idx].ref_counts) {
                    found_count++;
                } else {
                    lookup_pn_ptr = idx;
                    break;
                }
            }

            if (found_count == count) {
                page_found = lookup_pn_ptr << 12;
                for (size_t i = lookup_pn_ptr; i < lookup_pn_ptr + count && i < max_pg; i++) {
                    pm_table[i] = (page_t){
                        .ref_counts = 1,
                        .flags = flags
                    };
                    add_to_lru(i, flags);
                }
                break;
            }
        } else {
            lookup_pn_ptr++;

            if (lookup_pn_ptr >= upper_bound && old_ptr != PM_PN_START) {
                lookup_pn_ptr = PM_PN_START;
                upper_bound = old_ptr;
                old_ptr = PM_PN_START;
            }
        }
    }
    if (!page_found) {
        uptr res = oom_handle(count, flags);
        if (!res) {
            LOGE("Cannot find %d contiguous pages", count);
        }
        return res;
    }
    return page_found;
}

void mark_page_free(uptr pn) {
    if (pn >= max_pg) return;

    pm_table[pn].ref_counts = 0;
}

void mark_chunk_free(uptr start_pn, size_t count) {
    for (size_t i = start_pn; i < start_pn + count && i < max_pg; i++) {
        pm_table[i].ref_counts = 0;
    }
}

void mark_page_used(uptr pn, u32 flags) {
    pm_table[pn] = (page_t){
        .ref_counts = 1,
        .flags = flags
    };
}

void mark_chunk_used(uptr start_pn, size_t count, u32 flags) {
    for (size_t i = start_pn; i < start_pn + count && i < max_pg; i++) {
        pm_table[i] = (page_t){
            .ref_counts = 1,
            .flags = flags
        };
    }
}

inline page_t* page_query(uptr addr) {
    uptr pn = addr >> 12;

    if (pn >= PAGE_MAX_SIZE)
        return NULL;

    return &pm_table[pn];
}

inline void set_page_unlock(page_t* page) {
    page->flags &= ~PG_LOCKED;
    add_to_lru(page_query_pn(page), page->flags);
}

inline void set_page_addr_unlock(uptr pa){
    uptr pn = pa >> PG_SIZE_BITS;
    pm_table[pn].flags &= ~PG_LOCKED;
    add_to_lru(pn, pm_table[pn].flags);
}

inline uptr page_query_pn(page_t* page) {
    uptr pn = page - pm_table;

    if (pn >= PAGE_MAX_SIZE) {
        return 0;
    }

    return pn;
}

int mapping_p2v(uptr virtual_address, uptr physical_address, u32 flags, u32 opt) {
    uptr dir_index = DIRECTORY_INDEX(virtual_address);
    uptr tbl_index = TABLE_INDEX(virtual_address);
    pg_table_t* dir = (pg_table_t*)DIR_BASE_VADDR;
    pg_table_t* tbl = (pg_table_t*)(TBL_BASE_VADDR | (dir_index << 12));

    if (!dir->entry[dir_index].data) {
        pg_table_t* new_tbl_pa = (pg_table_t*)get_page(PG_LOCKED);

        // 物理内存已满！
        if (!new_tbl_pa) {
            // may swap could fix it
            LOGE("Physical memory is full!");
            return 0;
        }

        // This must be writable
        dir->entry[dir_index].frame = (uptr)new_tbl_pa >> PG_SIZE_BITS;
        dir->entry[dir_index].present = 1;
        dir->entry[dir_index].rw = 1;
        dir->entry[dir_index].pwt = 1;

        memset(tbl, 0, PG_SIZE);
    } else {
        if (tbl->entry[tbl_index].data && (opt & VMAP_IGNORE)) {
            return 1;
        }
    }
    cpu_flush_page(virtual_address);

    if ((opt & VMAP_RESERVE)) {
        return 1;
    }

    tbl->entry[tbl_index].frame = physical_address >> PG_SIZE_BITS;
    tbl->entry[tbl_index].present = 1;
    tbl->entry[tbl_index].data |= flags;

    return 1;
}

uptr mapping_pages(uptr pa, size_t count, u32 flags, u32 opt) {

    if ((pa & 0xFFF)) {
        LOGE("pa is not aligned to page boundary");
        return 0;
    }
    uptr cur_addr = vmap_start;
    size_t pg_count = 0, secound_loop = 0;
    pg_table_t* dir = (pg_table_t*)DIR_BASE_VADDR;

    while (!secound_loop || cur_addr < vmap_start) {
        u32 dir_idx = DIRECTORY_INDEX(cur_addr);
        if (!(dir->entry[dir_idx].data)) {
            pg_count += PG_MAX_ENTRIES;
            cur_addr = (cur_addr & 0xFFC00000) + MEM_4MB;
        } else {
            pg_table_t* tbl = (pg_table_t*)(TBL_BASE_VADDR | (dir_idx << 12));
            u32 i = TABLE_INDEX(cur_addr);
            for (; i < PG_MAX_ENTRIES && pg_count < count; i++) {
                if (!tbl->entry[i].data) {
                    pg_count++;
                } else if (pg_count) {
                    pg_count = 0;
                    i++;
                    break;
                }
            }
            cur_addr += pg_count << PG_SIZE_BITS;
        }

        if (pg_count >= count) {
            goto found;
        }

        if (cur_addr >= VMAP_END) {
            cur_addr = VMAP_START;
            secound_loop = 1;
        }
    }

    LOGE("Not enough continuous virtual memory");
    return 0;

found:
    uptr va = cur_addr - (pg_count << PG_SIZE_BITS);
    for (size_t i = 0; i < count; i++) {
        mapping_p2v(va + (i << PG_SIZE_BITS), pa + (i << PG_SIZE_BITS), flags, VMAP_NULL);
        if (!(opt & VMAP_NON_RAM)) {
            ref_page(pa + (i << PG_SIZE_BITS));
        }
    }
    vmap_start = va + (count << PG_SIZE_BITS);
    return va;
}

uptr unmapping_p2v(uptr virtual_address) {
    uptr dir_index = DIRECTORY_INDEX(virtual_address);
    uptr tbl_index = TABLE_INDEX(virtual_address);

    if (dir_index == PG_MAX_ENTRIES - 1) {
        return 0;
    }

    pg_table_t* dir = (pg_table_t*)DIR_BASE_VADDR;

    if (dir->entry[dir_index].data) {
        pg_table_t* tbl = (pg_table_t*)(TBL_BASE_VADDR | (dir_index << 12));
        pg_entry_t entry = tbl->entry[tbl_index];

        cpu_flush_page(virtual_address);
        tbl->entry[tbl_index].data = 0;
        return entry.frame << PG_SIZE_BITS;
    }

    return 0;
}

uptr copy_page(uptr physical_address) {
    uptr new_page = get_page(page_query(physical_address)->flags);
    if (new_page == 0) {
        return 0;
    }
    mapping_p2v(PG_MP_3, new_page, PG_WRITE, VMAP_NULL);
    mapping_p2v(PG_MP_4, physical_address, PG_WRITE, VMAP_NULL);

    asm volatile("movl %1, %%edi\n"
        "movl %2, %%esi\n"
        "rep movsl\n" ::"c"(1024),
        "r"(PG_MP_3),
        "r"(PG_MP_4)
        : "memory", "%edi", "%esi");

    unmapping_p2v(PG_MP_3);
    unmapping_p2v(PG_MP_4);

    return new_page;
}

uptr get_page_zero(u32 flags) {
    uptr pa = get_page(flags);
    if (pa) {
        mapping_p2v(PG_MP_3, pa, PG_WRITE, VMAP_NULL);
        memset((void*)PG_MP_3, 0, PG_SIZE);
        unmapping_p2v(PG_MP_3);
    }
    return pa;
}

void mapping_page_dir(uptr mnt, uptr pde) {
    pg_entry_t* entry = &PDE_ENTRY(TBL_BASE_VADDR, mnt >> PG_SIZE_BITS);
    entry->data = (uptr)pde;
    entry->present = 1;
    entry->rw = 1;
    entry->pwt = 1;
    entry->pcd = 1;
    cpu_flush_page(mnt);
}

void unmapping_page_dir(uptr mnt) {
    pg_table_t* dir = (pg_table_t*)DIR_BASE_VADDR;
    dir->entry[(mnt >> 22)].data = 0;
    cpu_flush_page(mnt);
}

void pm_usage() {
    u32 pn = 0;
    u32 count = 0, free_count;
    u32 ref_count = pm_table[0].ref_counts;
    for (u32 i = 0; i < max_pg; i++) {
        if (pm_table[i].ref_counts == 0) {
            free_count++;
        }
        if (pm_table[i].ref_counts == ref_count) {
            count++;
        } else {
            LOGW("Phy Page %X - %X: ref: %d\n", pn, pn + count - 1, ref_count);
            pn += count;
            count = 1;
            ref_count = pm_table[i].ref_counts;
        }
    }
    LOGW("Phy Page %X - %X: ref: %d\n", pn, pn + count - 1, ref_count);
    LOGW("Total Pages: %d, Free pages: %d\n", max_pg, free_count);
}

uptr get_pa(uptr va) {
    u32 dir_index = DIRECTORY_INDEX(va);
    u32 tbl_index = TABLE_INDEX(va);

    pg_table_t* dir = (pg_table_t*)DIR_BASE_VADDR;
    pg_entry_t entry = dir->entry[dir_index];

    if (entry.data) {
        pg_table_t* tbl = (pg_table_t*)(TBL_BASE_VADDR | (dir_index << 12));
        if (tbl->entry[tbl_index].data) {
            return (tbl->entry[tbl_index].frame << 12) | (va & 0xFFF);
        }
    }
    LOGE("Try to get unmapping pa of va:%X\n", va);
    return 0;
}