/*
 * Copyright (c) 2024 iSOFT INFRASTRUCTURE SOFTWARE CO., LTD.
 * easyAda is licensed under Mulan PubL v2.
 * You can use this software according to the terms and conditions of the Mulan PubL v2.
 * You may obtain a copy of Mulan PubL v2 at:
 *          http://license.coscl.org.cn/MulanPubL-2.0
 * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
 * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
 * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
 * See the Mulan PubL v2 for more details.
 */

#include <core/mm.h>
#include <core/schedule.h>
#include <core/task.h>
#include <uapi/errors.h>

static inline unsigned long round_hint_to_min(unsigned long hint)
{
    hint &= PAGE_MASK;
    if (hint < current()->mm.mmap_base) {
        return PAGE_ALIGN_UP(current()->mm.mmap_base);
    }
    return hint;
}

static unsigned long get_ummapped_area(unsigned long addr, unsigned long len)
{
    uint8_t mmap_size = current()->mm.mmap_index;
    for (uint8_t i = 0; i < mmap_size; i++) {
        if (current()->mm.mmap[i].vm_end > addr) {
            if (i == (mmap_size - 1UL)) {
                return current()->mm.mmap[i].vm_end;
            } else {
                unsigned long tmp_addr = current()->mm.mmap[i].vm_end;
                if ((tmp_addr + len) <= current()->mm.mmap[i + 1UL].vm_start) {
                    return tmp_addr;
                }
            }
        }
    }

    return addr;
}

int do_map_region(addrspace_t *as, uint64_t des_va, uint64_t pa, uint64_t len, uint64_t prot, uint64_t flags)
{
    int retval;

    unsigned mm_access_flag = 0x0;
    unsigned mm_extra_flag  = 0x0;

    if ((prot & PROT_READ) != 0U) {
        mm_access_flag = MAP_AP_KRO_URO;
        mm_extra_flag  = MAP_FLAG_CACHEABLE;
    }

    if ((prot & PROT_WRITE) != 0U) {
        mm_access_flag = MAP_AP_KRW_URW;
        mm_extra_flag  = MAP_FLAG_CACHEABLE;
        ;
    }

    if ((prot & PROT_EXEC) != 0U) {
        mm_extra_flag |= MAP_FLAG_EXECUTABLE;
    }

    if (prot == PROT_NONE) {
        mm_access_flag = MAP_AP_KRO_UNA;
        mm_extra_flag  = MAP_FLAG_NONE;
    }

    if ((prot & MAP_GLOBAL) != 0U) {
        mm_access_flag |= AARCH64_PTE_GLOBAL;
    } else {
        mm_access_flag |= AARCH64_PTE_NGLOBAL;
    }

    retval =
        addrspace_map(as, pa, des_va, len, map_mk_attr(MAP_LEVEL_EL0, MAP_TYPE_RAM, mm_access_flag, mm_extra_flag));

    return retval;
}

unsigned long do_remap_region(uint64_t des_va, uint64_t src_va, uint64_t len, uint64_t prot, uint64_t flags)
{
    int retval;

    addrspace_t  *cur_space = &current()->addrspace;
    unsigned long pa;

    len         = PAGE_ALIGN_UP(des_va - PAGE_ALIGN_DOWN(des_va) + len);
    des_va      = PAGE_ALIGN_DOWN(des_va);
    int t_index = look_vma_region(&current()->mm, des_va, len);
    pa          = trans_va_into_pa(cur_space->pg, des_va);

    if ((des_va == 0U) || ((t_index == -1) && (pa != 0U))) {
        if ((flags & MAP_FIXED) == 0U) {
            des_va = round_hint_to_min(des_va);
        }

        des_va = get_ummapped_area(des_va, len);
    }

    src_va = PAGE_ALIGN_DOWN(src_va);
    pa     = trans_va_into_pa(cur_space->pg, src_va);
    if (pa == 0) {
        return MAP_FAILED;
    }

    if (des_va != 0) {
        retval = do_map_region(cur_space, des_va, pa, len, prot, flags);
        if (retval == 0) {
            add_vma_region(&current()->mm, des_va, src_va, pa, len, prot);
            return des_va;
        }
    }
    return MAP_FAILED;
}

int do_unmap_region(unsigned long addr, unsigned long len)
{
    addrspace_t *cur_space = &current()->addrspace;
    int          retval;

    addr   = PAGE_ALIGN_DOWN(addr);
    len    = PAGE_ALIGN_UP(len);
    retval = addrspace_unmap(cur_space, addr, len);
    if (retval == 0) {
        int res = remove_vma_region(&current()->mm, addr, len);
        if (res != 0) {
            dprintf("remove_vma_region fail addr = %x len = 0x%x. \n", addr, len);
        }
        return retval;
    }

    return retval;
}

void add_vma_region(struct mm_struct *mm, uint64_t va_des, uint64_t va_heap, uint64_t pa, uint64_t size, uint64_t prot)
{
    int res = look_vma_region(mm, va_des, size);
    if (res != -1) {
        return;
    }

    int index = mm->mmap_index;
    if (index >= TASK_MAX_MMAP_COUNT) {
        dprintf("can not record mmap becoase :mmap overflow index = %d . \n", index);
        return;
    }

    for (int i = 0; i < index - 1; i++) {
        if (mm->mmap[i].vm_end < va_des && mm->mmap[i + 1].vm_start > va_des) {
            for (size_t j = i + 1; j < index; j++) {
                mm->mmap[j + 1].vm_start     = mm->mmap[j].vm_start;
                mm->mmap[j + 1].vm_end       = mm->mmap[j].vm_end;
                mm->mmap[j + 1].pm_start     = mm->mmap[j].pm_end;
                mm->mmap[j + 1].pm_end       = mm->mmap[j].pm_end;
                mm->mmap[j + 1].vm_page_prot = mm->mmap[j].vm_page_prot;
                mm->mmap[j + 1].size         = mm->mmap[j].size;
                mm->mmap[j + 1].heap_addr    = mm->mmap[j].heap_addr;
            }
            mm->mmap[i + 1].vm_start     = va_des;
            mm->mmap[i + 1].vm_end       = va_des + size;
            mm->mmap[i + 1].pm_start     = pa;
            mm->mmap[i + 1].pm_end       = pa + size;
            mm->mmap[i + 1].vm_page_prot = prot;
            mm->mmap[i + 1].size         = size;
            mm->mmap[i + 1].heap_addr    = va_heap;
            mm->mmap_index++;
            return;
        }
    }
    mm->mmap[index].vm_start     = va_des;
    mm->mmap[index].vm_end       = va_des + size;
    mm->mmap[index].heap_addr    = va_heap;
    mm->mmap[index].size         = size;
    mm->mmap[index].vm_page_prot = prot;
    mm->mmap[index].pm_start     = pa;
    mm->mmap[index].pm_end       = pa + size;
    mm->mmap_index++;

    return;
}

#define GET_PADDR_IN_PTE(entry) (((uint64_t)(entry).table.next_table_addr) << PAGE_SHIFT)
#define GET_NEXT_PTP(entry)     allocator_vbase(GET_PADDR_IN_PTE(entry))

int remove_vma_region(struct mm_struct *mm, unsigned long va_start, unsigned long va_size)
{
    int vma_size     = mm->mmap_index;
    int target_index = look_vma_region(mm, va_start, va_size);
    if (target_index == -1) {
        return -1;
    }

    for (size_t j = target_index; j < vma_size; j++) {
        mm->mmap[j].vm_start     = mm->mmap[j + 1].vm_start;
        mm->mmap[j].vm_end       = mm->mmap[j + 1].vm_end;
        mm->mmap[j].pm_start     = mm->mmap[j + 1].pm_end;
        mm->mmap[j].pm_end       = mm->mmap[j + 1].pm_end;
        mm->mmap[j].vm_page_prot = mm->mmap[j + 1].vm_page_prot;
        mm->mmap[j].size         = mm->mmap[j + 1].size;
        mm->mmap[j].heap_addr    = mm->mmap[j + 1].heap_addr;
    }

    mm->mmap_index--;
    return 0;
}

int look_vma_region(struct mm_struct *mm, unsigned long va, unsigned long size)
{
    int vma_size = mm->mmap_index;
    for (int i = 0; i < vma_size; i++) {
        if ((mm->mmap[i].vm_start <= va) && (mm->mmap[i].vm_end > va)) {
            return i;
        }
    }
    return -1;
}

static void detail_pte(pte_t p, int level);

void do_info_memory(page_table_t *cur_pg, int cur_level)
{
    for (size_t i = 0; i < PTP_ENTRIES; i++) {
        if (!IS_PTE_INVALID(cur_pg->entry[i].pte)) {
            if (cur_level < 3) {
                if (cur_pg->entry[i].table.is_table != 0U) {
                    page_table_t *next_ptp = (page_table_t *)GET_NEXT_PTP(cur_pg->entry[i]);
                    do_info_memory(next_ptp, ++cur_level);
                } else {
                    dprintf("L%d.index = %d page table item is block ==> pte = %x  \n", cur_level, i,
                            cur_pg->entry[i].pte);
                    detail_pte(cur_pg->entry[i], cur_level);
                }

            } else if (cur_level == 3) {
                if (cur_pg->entry[i].l3_page.is_page != 0U) {
                    dprintf("L%d.index = %d page table item is page ==> pte = %x \n", cur_level, i,
                            cur_pg->entry[i].pte);
                    detail_pte(cur_pg->entry[i], cur_level);
                } else {
                    dprintf("error flag mask==> L%d.is_page = 0 \n", cur_level);
                }
            } else {
                dprintf("error Page Level = %d  \n", cur_level);
            }
        }
    }
}

void check_va_attribute(page_table_t *pgtbl, uint64_t va)
{
    page_table_t *cur_ptp = pgtbl;
    pte_t        *pte     = NULL;
    int           ret;

    dprintf("check va = %x \n", va);
    dprintf("L0.ptp->addr = %x \n", pgtbl);
    // L0 Table
    ret = get_next_ptp(cur_ptp, 0, va, &cur_ptp, &pte, false);
    if (ret < 0) {
        dprintf("error va not exist in pgtbl ==> L0 Table\n");
        return;
    }

    dprintf("L1.ptp-> %x \n", cur_ptp);
    // L1 Table
    ret = get_next_ptp(cur_ptp, 1, va, &cur_ptp, &pte, false);
    if (ret < 0) {
        dprintf("error va not exist in pgtbl ==> L1 Table\n");
        return;
    }
    if (ret == 1) {
        detail_pte(*pte, 1);
        return;
    }

    dprintf("L2.ptp-> %x \n", cur_ptp);
    // L2 Table
    ret = get_next_ptp(cur_ptp, 2, va, &cur_ptp, &pte, false);
    if (ret < 0) {
        dprintf("error va not exist in pgtbl ==> L2 Table \n");
        return;
    }
    if (ret == 1) {
        detail_pte(*pte, 2);
        return;
    }

    dprintf("L3.ptp-> %x \n", cur_ptp);
    // L3 Table
    ret = get_next_ptp(cur_ptp, 3, va, &cur_ptp, &pte, false);
    if (ret < 0) {
        dprintf("error va not exist in pgtbl ==> L3 Table\n");
        return;
    } else {
        detail_pte(*pte, 3);
        return;
    }
}

static void detail_pte(pte_t p, int level)
{
    dprintf("-------------------L%d.pte----------------------- \n");
    dprintf("pte = %x \n", p);
    switch (level) {
        case 1:
            dprintf("block represent 1G \n");
            dprintf("vaild = %x table = %x attr_index = %x \n", p.l1_block.is_valid, p.l1_block.is_table,
                    p.l1_block.attr_index);
            dprintf(" NS = %x AP = %x SH = %x AF = %x nG = %x \n", p.l1_block.NS, p.l1_block.AP, p.l1_block.SH,
                    p.l1_block.AF, p.l1_block.nG);

            dprintf("nT = %x pfn = %x GP = %x DBM = %x \n", p.l1_block.nT, p.l1_block.pfn, p.l1_block.GP,
                    p.l1_block.DBM);
            dprintf("contiguous = %x PXN = %x UXN = %x PBHA = %x \n", p.l2_block.Contiguous, p.l2_block.PXN,
                    p.l2_block.UXN, p.l2_block.PBHA);

            break;
        case 2:
            dprintf("block represent 2M \n");
            dprintf("vaild = %x table = %x attr_index = %x \n", p.l2_block.is_valid, p.l2_block.is_table,
                    p.l2_block.attr_index);
            dprintf(" NS = %x AP = %x SH = %x AF = %x nG = %x \n", p.l2_block.NS, p.l2_block.AP, p.l2_block.SH,
                    p.l2_block.AF, p.l2_block.nG);

            dprintf("nT = %x pfn = %x GP = %x DBM = %x \n", p.l2_block.nT, p.l2_block.pfn, p.l2_block.GP,
                    p.l2_block.DBM);
            dprintf("contiguous = %x PXN = %x UXN = %x PBHA = %x \n", p.l2_block.Contiguous, p.l2_block.PXN,
                    p.l2_block.UXN, p.l2_block.PBHA);

            break;
        case 3:
            dprintf("block/page represent 4K \n");

            dprintf("vaild = %x page = %x attr_index = %x \n", p.l3_page.is_valid, p.l3_page.is_page,
                    p.l3_page.attr_index);
            dprintf(" NS = %x AP = %x SH = %x AF = %x nG = %x \n", p.l3_page.NS, p.l3_page.AP, p.l3_page.SH,
                    p.l3_page.AF, p.l3_page.nG);
            dprintf("pfn = %x DBM = %x \n", p.l3_page.pfn, p.l3_page.DBM);
            dprintf("contiguous = %x PXN = %x UXN = %x PBHA = %x \n", p.l3_page.Contiguous, p.l3_page.PXN,
                    p.l3_page.UXN, p.l3_page.PBHA);
            break;

        default:
            dprintf("error level...\n");
            break;
    }
    dprintf("\n ------------------------------------------ \n ");
}

uint64_t get_user_heap_addr(struct mm_struct *mm, uint64_t addr, uint64_t len)
{
    for (size_t i = 0; i < mm->mmap_index; i++) {
        if ((mm->mmap[i].vm_start <= addr) && (mm->mmap[i].vm_end >= (addr + len))) {
            return mm->mmap[i].heap_addr + (addr - mm->mmap[i].vm_start);
        }
    }
    return 0;
}

void show_all_vma(struct mm_struct *mm)
{
    dprintf("show all mmap region : \n");
    unsigned int map_index = mm->mmap_index;
    for (unsigned int i = 0; i < map_index; i++) {
        dprintf("   vma ==> va :[%x , %x]  pa :[%x , %x]\n", mm->mmap[i].vm_start,
                mm->mmap[i].vm_start + mm->mmap[i].size, mm->mmap[i].pm_start, mm->mmap[i].pm_start + mm->mmap[i].size);
    }
}

void free_mmap_region(tcb_t *tcb)
{
    struct mm_struct *mm        = &tcb->mm;
    unsigned int      map_index = mm->mmap_index;
    for (unsigned int i = 0; i < map_index; i++) {
        uint64_t va   = mm->mmap[i].vm_start;
        uint64_t size = mm->mmap[i].size;
        dprintf("release mmap[%d] = [addr = %x size = %x] \n", i, va, size);
        addrspace_unmap(&tcb->addrspace, va, size);
    }
    mm->mmap_index = 0;
}
