/*
 * Copyright (c) 2024 iSOFT INFRASTRUCTURE SOFTWARE CO., LTD.
 * easyAda is licensed under Mulan PubL v2.
 * You can use this software according to the terms and conditions of the Mulan PubL v2.
 * You may obtain a copy of Mulan PubL v2 at:
 *          http://license.coscl.org.cn/MulanPubL-2.0
 * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
 * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
 * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
 * See the Mulan PubL v2 for more details.
 */

#include <arch/utils.h>
#include <arch/scr.h>
#include <arch/cpu.h>
#include <arch/pagetable.h>
#include <stdlib/assert.h>
#include <stdlib/string.h>
#include <tools/macros.h>
#include <tools/macros.h>
#include <core/allocate.h>
#include <core/addrspace.h>
#include <core/allocate.h>
#include <core/dprintf.h>
#include <core/mm.h>
#include <uapi/addrspace.h>
#include <uapi/errors.h>

#define GET_PADDR_IN_PTE(entry) (((uint64_t)entry->table.next_table_addr) << PAGE_SHIFT)
#define GET_NEXT_PTP(entry)     allocator_vbase(GET_PADDR_IN_PTE(entry))

#define NORMAL_PTP (0)
#define BLOCK_PTP  (1)

static int map(page_table_t *pgtbl, uint64_t va, uint64_t pa, uint64_t len, uint64_t attr, uint8_t set_level);

int verify_va_to_pa(page_table_t *pgtbl, uint64_t va, uint64_t t_pa)
{
    page_table_t *cur_ptp = pgtbl;
    pte_t        *pte     = NULL;
    int           ret     = 0;
    uint64_t      pa;
    // L0 Table
    ret = get_next_ptp(cur_ptp, 0, va, &cur_ptp, &pte, false);
    if (ret < 0) {
        dprintf("error va!cannot verify va to pa!\n");
        return ret;
    }

    // L1 Table
    ret = get_next_ptp(cur_ptp, 1, va, &cur_ptp, &pte, false);
    if (ret < 0) {
        dprintf("error va!cannot verify va to pa!\n");
        return ret;
    } else if (ret == BLOCK_PTP) {
        pa = (pte->l1_block.pfn) << L1_INDEX_SHIFT;
        pa |= GET_VA_OFFSET_L1(va);
        return t_pa == pa ? 0 : -1;
    }

    // L2 Table
    ret = get_next_ptp(cur_ptp, 2, va, &cur_ptp, &pte, false);
    if (ret < 0) {
        dprintf("error va!cannot verify va to pa!\n");
        return ret;
    } else if (ret == BLOCK_PTP) {
        pa = (pte->l2_block.pfn) << L2_INDEX_SHIFT;
        pa |= GET_VA_OFFSET_L2(va);
        return t_pa == pa ? 0 : -1;
    }

    // L3 Table
    ret = get_next_ptp(cur_ptp, 3, va, &cur_ptp, &pte, false);
    if (ret < 0) {
        dprintf("error va!cannot verify va to pa!\n");
        return ret;
    }

    pa = (pte->l3_page.pfn) << L3_INDEX_SHIFT;
    pa |= GET_VA_OFFSET_L3(va);

    return t_pa == pa ? 0 : -1;
}

uint64_t trans_va_into_pa(page_table_t *pgtbl, uint64_t va)
{
    page_table_t *cur_ptp = pgtbl;
    pte_t        *pte     = NULL;
    int           ret     = 0;
    uint64_t      pa;
    // L0 Table
    ret = get_next_ptp(cur_ptp, 0, va, &cur_ptp, &pte, false);
    if (ret < 0) {
        return 0;
    }

    // L1 Table
    ret = get_next_ptp(cur_ptp, 1, va, &cur_ptp, &pte, false);
    if (ret < 0) {
        return 0;
    } else if (ret == BLOCK_PTP) {
        pa = (pte->l1_block.pfn) << L1_INDEX_SHIFT;
        pa |= GET_VA_OFFSET_L1(va);
        return pa;
    }

    // L2 Table
    ret = get_next_ptp(cur_ptp, 2, va, &cur_ptp, &pte, false);
    if (ret < 0) {
        return 0;
    } else if (ret == BLOCK_PTP) {
        pa = (pte->l2_block.pfn) << L2_INDEX_SHIFT;
        pa |= GET_VA_OFFSET_L2(va);
        return pa;
    }

    // L3 Table
    ret = get_next_ptp(cur_ptp, 3, va, &cur_ptp, &pte, false);
    if (ret < 0) {
        return 0;
    }

    pa = (pte->l3_page.pfn) << L3_INDEX_SHIFT;
    pa |= GET_VA_OFFSET_L3(va);

    return pa;
}

unsigned long page_table_missing_pt(page_table_t *pd, unsigned long vbase, unsigned long size, uint64_t level)
{
    return 0;
}

int page_table_recycle(page_table_t *pd, int level)
{
    for (int i = 0; i < PTP_ENTRIES; i++) {
        if (pd->entry[i].table.is_valid && pd->entry[i].table.is_table) {
            unsigned long next_pa = (pd->entry[i].table.next_table_addr) << PAGE_SHIFT;
            page_table_t *entry   = (page_table_t *)allocator_vbase(next_pa);

            if (level == 2) {
                allocator_free_block((unsigned long)entry);
            } else if (level < 2) {
                page_table_recycle(entry, level + 1);
            }
        }
    }
    allocator_free_block((unsigned long)pd);
    return 0;
}

void page_table_copy(page_table_t *des, page_table_t *src, int level)
{
    for (int i = 0; i < PTP_ENTRIES; i++) {
        if (src->entry[i].table.is_valid) {
            if (src->entry[i].table.is_table) {
                pte_t pte     = src->entry[i];
                void *new_ptp = allocator_alloc_block(PAGE_SIZE);
                memset(new_ptp, 0, sizeof(page_table_t));
                pte.table.next_table_addr = allocator_pbase((unsigned long)new_ptp) >> PAGE_SHIFT;
                des->entry[i]             = pte;

                page_table_t *next_des    = (page_table_t *)new_ptp;
                uint64_t      next_src_pa = (src->entry[i].table.next_table_addr) << PAGE_SHIFT;
                page_table_t *next_src    = (page_table_t *)allocator_vbase(next_src_pa);

                if (level < 2) {
                    page_table_copy(next_des, next_src, level + 1);
                } else if (level == 2) {
                    memcpy(next_des, next_src, sizeof(page_table_t));
                }
            } else {
                des->entry[i].pte = src->entry[i].pte;
            }
        }
    }
}

void page_table_switch_to_asid(page_table_t *pd, unsigned short asid, int iskernel)
{
    unsigned long ttbr_value = 0;
    unsigned long pa         = allocator_pbase((unsigned long)pd);
    ttbr_value               = pa;
    ttbr_value |= ((unsigned long)asid << SCR_TTBRx_BIT_ASID);
    if (iskernel) {
        scr_write_TTBR1_EL1(ttbr_value);
    } else {
        scr_write_TTBR0_EL1(ttbr_value);
    }
}

void page_table_switch_to(page_table_t *pd, int iskernel)
{
    unsigned long base = allocator_pbase((unsigned long)pd);

    dsb();
    if (iskernel) {
        scr_write_TTBR1_EL1((uint64_t)base);

    } else {
        scr_write_TTBR0_EL1((uint64_t)base);
    }
    isb();

    assert(base != 0);
}

int page_table_map(page_table_t *pd, uint64_t pbase, uint64_t vbase, uint64_t size, uint64_t attr)
{
    int result;
    if (((size & PAGE_MASK) > 0) || ((vbase & PAGE_MASK) > 0) || ((pbase & PAGE_MASK) > 0)) {
        assert(0);
    }
    do {
        if (((size > PUD_SIZE) && ((vbase & PUD_MASK) == 0) && ((pbase & PUD_MASK) == 0))) {
            result = map(pd, vbase, pbase, PUD_SIZE, attr, 1);
            if (result < 0) {
                return result;
            }
            assert(0 == verify_va_to_pa(pd, vbase, pbase));
            vbase += PUD_SIZE;
            pbase += PUD_SIZE;
            size -= PUD_SIZE;
        } else if (((size > PMD_SIZE) && ((vbase & PMD_MASK) == 0) && ((pbase & PMD_MASK) == 0))) {
            result = map(pd, vbase, pbase, PMD_SIZE, attr, 2);
            if (result < 0) {
                return result;
            }
            assert(0 == verify_va_to_pa(pd, vbase, pbase));
            vbase += PMD_SIZE;
            pbase += PMD_SIZE;
            size -= PMD_SIZE;
        } else {
            result = map(pd, vbase, pbase, PAGE_SIZE, attr, 3);
            if (result < 0) {
                return result;
            }
            assert(0 == verify_va_to_pa(pd, vbase, pbase));
            vbase += PAGE_SIZE;
            pbase += PAGE_SIZE;
            size = size > PAGE_SIZE ? (size - PAGE_SIZE) : 0;
        }
    } while (size > 0);

    return 0;
}

int page_table_unmap(page_table_t *pg, uint64_t va, size_t len)
{
    int retval;

    while (len > 0) {
        pte_t        *entry;
        page_table_t *cur_ptp = (page_table_t *)pg;
        page_table_t *next_ptp;
        uint64_t      do_unmap_size;
        uint64_t      block_level_size;

        int level  = 0;
        int delete = 0;

        while (level < 3) {
            int ret = get_next_ptp(cur_ptp, level, va, &next_ptp, &entry, false);
            if (ret == -ENOMAPPING) {
                break;
            }
            if (ret < 0) {
                return ret;
            }

            if (ret == BLOCK_PTP) {
                switch (level) {
                    case 1:
                        block_level_size = PUD_SIZE;
                        break;
                    case 2:
                        block_level_size = PMD_SIZE;
                        break;

                    default:
                        dprintf("error level .. \n");
                        assert(1);
                        break;
                }

                if (len >= block_level_size) {
                    entry->pte    = 0x0;
                    do_unmap_size = block_level_size;

                } else {
                    do_unmap_size = len;
                    retval        = split_block_into_table(pg, entry, va, do_unmap_size, level);
                    if (retval != 0) {
                        return -1;
                    }
                }

                va += do_unmap_size;
                len -= do_unmap_size;
                delete = 1;
                break;
            }
            cur_ptp = next_ptp;
            level++;
        }

        if (!delete) {
            size_t index = GET_L3_INDEX(va);
            entry        = &(next_ptp->entry[index]);
            entry->pte   = 0x0;
            len -= PAGE_SIZE;
            va += PAGE_SIZE;
        }
    }
    return 0;
}

uint64_t get_attr_from_pte(pte_t *pte)
{
    uint64_t res_attr = 0x0;

    if (!pte->l2_block.UXN) {
        res_attr |= MAP_FLAG_EXECUTABLE;
        res_attr |= MAP_LEVEL_EL0;
    }

    if (!pte->l2_block.PXN) {
        res_attr |= MAP_FLAG_EXECUTABLE;
        res_attr |= MAP_LEVEL_EL1;
    }

    res_attr |= pte->l2_block.AP;

    if (pte->l2_block.SH) {
        res_attr |= MAP_FLAG_SHAREABLE;
    }

    if (pte->l2_block.attr_index == DEVICE_MEMORY) {
        res_attr |= MAP_TYPE_DEVICE;
    } else if (pte->l2_block.attr_index == NORMAL_MEMORY) {
        res_attr |= MAP_TYPE_RAM;
    }

    if (!pte->l2_block.nG) {
        res_attr |= MAP_FLAG_GLOBAL;
    }

    return res_attr;
}

int split_block_into_table(page_table_t *pgtbl, pte_t *cur_pte, uint64_t va, size_t len, int level)
{
    uint64_t round_size;
    uint64_t attr;

    int retval;
    if (level == 1) {
        round_size = PUD_SIZE;
    } else if (level == 2) {
        round_size = PMD_SIZE;
    } else {
        dprintf("spilt_block_into_table ==> error level.\n");
        return -1;
    }

    uint64_t vbase1 = ROUND_DOWN(va, round_size);
    uint64_t pbase1 = trans_va_into_pa(pgtbl, vbase1);
    size_t   len1   = va - vbase1;

    uint64_t vbase2 = va + len;
    uint64_t pbase2 = trans_va_into_pa(pgtbl, vbase2);
    size_t   len2   = ROUND_UP(vbase2, round_size) - vbase2;

    attr = get_attr_from_pte(cur_pte);

    cur_pte->table.is_valid = 0x0;

    scr_invalidate_tlb_all_el1();

    if (len1 > 0) {
        retval = page_table_map(pgtbl, pbase1, vbase1, len1, attr);
        if (retval) {
            dprintf("page_table_map fail ==> vbase = %x pbase = %x len = %x \n", vbase1, pbase1, len1);
            return -1;
        }
    }

    if (len2 > 0) {
        dprintf("page_table_map call pbase = %x vbase = %x len = %x \n", pbase2, vbase2, len2);

        retval = page_table_map(pgtbl, pbase2, vbase2, len2, attr);
        if (retval) {
            dprintf("page_table_map fail ==> vbase = %x pbase = %x len = %x \n", vbase2, pbase2, len2);
            return -1;
        }
    }

    return 0;
}

void BOOTONLY page_table_active(page_table_t *pd, int iskernel)
{
    page_table_switch_to(pd, iskernel);
}

static void set_page_table(uint64_t pgtbl)
{
}

static int set_pte_flags(pte_t *entry, uint64_t attr, uint8_t level)
{
    if (!(attr & MAP_FLAG_EXECUTABLE) && (attr & MAP_LEVEL_EL1)) {
        entry->l3_page.PXN = AARCH64_PTE_PXN;
    } else if (!(attr & MAP_FLAG_EXECUTABLE) && (attr & MAP_LEVEL_EL0)) {
        entry->l3_page.UXN = AARCH64_PTE_UXN;
    }

    entry->l3_page.AP = attr & MAP_AP_MASK;

    if (attr & MAP_LEVEL_EL0) {
        entry->l3_page.PXN = AARCH64_PTE_PXN;
    } else if (attr & MAP_LEVEL_EL1) {
        entry->l3_page.UXN = AARCH64_PTE_UXN;
    }

    if (attr & MAP_FLAG_SHAREABLE) {
        entry->l3_page.SH = INNER_SHAREABLE;
    }

    if (attr & MAP_TYPE_DEVICE) {
        entry->l3_page.attr_index = DEVICE_MEMORY;
    } else if (attr & MAP_TYPE_RAM) {
        entry->l3_page.attr_index = NORMAL_MEMORY;
    }

    if ((attr & MAP_FLAG_GLOBAL)) {
        entry->l3_page.nG = AARCH64_PTE_GLOBAL;
    } else {
        entry->l3_page.nG = AARCH64_PTE_NGLOBAL;
    }

    entry->l3_page.AF = AARCH64_PTE_AF_ACCESSED;
    return 0;
}

int get_next_ptp(page_table_t *cur_ptp, uint8_t level, uint64_t va, page_table_t **next_ptp, pte_t **pte, bool_t alloc)
{
    uint32_t index = 0;
    pte_t   *entry;

    if (cur_ptp == NULL) {
        return -ENOMAPPING;
    }

    switch (level) {
        case 0:
            index = GET_L0_INDEX(va);
            break;
        case 1:
            index = GET_L1_INDEX(va);
            break;
        case 2:
            index = GET_L2_INDEX(va);
            break;
        case 3:
            index = GET_L3_INDEX(va);
            break;
        default:
            dprintf("error index!\n");
            assert(0);
    }

    entry = &(cur_ptp->entry[index]);
    if (IS_PTE_INVALID(entry->pte)) {
        if (alloc == false) {
            return -ENOMAPPING;
        } else {
            page_table_t *new_ptp;
            uint64_t      new_ptp_paddr;
            pte_t         new_pte_val;

            new_ptp = allocator_alloc_block(PAGE_SIZE);
            assert(new_ptp != NULL);
            memset((void *)new_ptp, 0, PAGE_SIZE);
            new_ptp_paddr = allocator_pbase((uint64_t)new_ptp);

            new_pte_val.pte                   = 0;
            new_pte_val.table.is_valid        = 1;
            new_pte_val.table.is_table        = 1;
            new_pte_val.table.next_table_addr = new_ptp_paddr >> PAGE_SHIFT;

            entry->pte = new_pte_val.pte;
        }
    }

    *next_ptp = (page_table_t *)GET_NEXT_PTP(entry);
    *pte      = entry;
    if (IS_PTE_TABLE(entry->pte)) {
        return NORMAL_PTP;
    } else {
        return BLOCK_PTP;
    }
}

static inline int map(page_table_t *pgtbl, uint64_t va, uint64_t pa, uint64_t len, uint64_t attr, uint8_t set_level)
{
    page_table_t *cur_ptp;
    pte_t        *pte;
    uint8_t       level;
    int           ret;

    cur_ptp = pgtbl;
    pte     = NULL;
    for (level = 0; level < set_level; ++level) {
        ret = get_next_ptp(cur_ptp, level, va, &cur_ptp, &pte, true);
        if (ret < 0) {
            return ret;
        }
    }

    switch (set_level) {
        case 3:
            pte                   = &(cur_ptp->entry[GET_L3_INDEX(va)]);
            pte->pte              = 0;
            pte->l3_page.is_valid = 1;
            pte->l3_page.is_page  = 1;
            pte->l3_page.pfn      = (pa) >> PAGE_SHIFT;
            set_pte_flags(pte, attr, set_level);
            break;
        case 2:
            pte                    = &(cur_ptp->entry[GET_L2_INDEX(va)]);
            pte->pte               = 0;
            pte->l2_block.is_valid = 1;
            pte->l2_block.pfn      = (pa) >> PMD_SHIFT;
            set_pte_flags(pte, attr, set_level);
            break;
        case 1:
            pte                    = &(cur_ptp->entry[GET_L1_INDEX(va)]);
            pte->pte               = 0;
            pte->l1_block.is_valid = 1;
            pte->l1_block.pfn      = (pa) >> PUD_SHIFT;
            set_pte_flags(pte, attr, set_level);
            break;
        default:
            dprintf("set_level:%d error set_level!\n", set_level);
            assert(1);
            break;
    }

    return 0;
}

#define IS_VALID (1UL << 0)
#define IS_TABLE (1UL << 1)
#define IS_PAGE  IS_TABLE

#define PTE_UXN            (0x1UL << 54)
#define PTE_ACCESSED       (0x1UL << 10)
#define PTE_INNER_SHARABLE (0x3UL << 8)

#define PTE_NORMAL_MEMORY (0x2UL << 2)
#define PTE_DEVICE_MEMORY (0x1UL << 2)

page_table_t BOOTDATA boot_ttbr0_l0 ALIGNED(1 << 16);
page_table_t BOOTDATA               boot_ttbr0_l1[PTP_ENTRIES] ALIGNED(1 << 16);
page_table_t BOOTDATA               boot_ttbr0_l2[PTP_ENTRIES] ALIGNED(1 << 16);
page_table_t BOOTDATA               boot_ttbr0_l3[PTP_ENTRIES] ALIGNED(1 << 16);

void BOOTPHYSIC init_boot_pd(uint64_t _ksram, uint64_t _ksdev, uint64_t _boot_end)
{
    const region_t *pksram = (region_t *)(_ksram - CONFIG_EXEC_ADDR + CONFIG_LOAD_ADDR);
    const region_t *pksdev = (region_t *)(_ksdev - CONFIG_EXEC_ADDR + CONFIG_LOAD_ADDR);

    page_table_t *bootpd = (void *)&boot_ttbr0_l0;
    uint64_t      pbase, count, vbase;

    count = (_boot_end - CONFIG_EXEC_ADDR + PMD_SIZE - 1) >> PMD_SHIFT;
    pbase = CONFIG_LOAD_ADDR;
    vbase = pbase;

    while (count > 0) {
        if (0 == IS_PTE_INVALID(boot_ttbr0_l0.entry[GET_L0_INDEX(vbase)].pte)) {
            assert(GET_ADDR(boot_ttbr0_l0.entry[GET_L0_INDEX(vbase)].pte) ==
                   (uint64_t)(&boot_ttbr0_l1[GET_L0_INDEX(vbase)]));
        }

        boot_ttbr0_l0.entry[GET_L0_INDEX(vbase)].pte = ((uint64_t)&boot_ttbr0_l1[GET_L0_INDEX(vbase)]) | IS_TABLE |
                                                       IS_VALID;

        if (0 == IS_PTE_INVALID(boot_ttbr0_l1[GET_L0_INDEX(vbase)].entry[GET_L1_INDEX(vbase)].pte)) {
            assert(GET_ADDR(boot_ttbr0_l1[GET_L0_INDEX(vbase)].entry[GET_L1_INDEX(vbase)].pte) ==
                   ((uint64_t)&boot_ttbr0_l2[GET_L1_INDEX(vbase)]));
        }
        boot_ttbr0_l1[GET_L0_INDEX(vbase)].entry[GET_L1_INDEX(vbase)].pte =
            ((uint64_t)&boot_ttbr0_l2[GET_L1_INDEX(vbase)]) | IS_TABLE | IS_VALID;

        assert(IS_PTE_INVALID(boot_ttbr0_l2[GET_L1_INDEX(vbase)].entry[GET_L2_INDEX(vbase)].pte));

        boot_ttbr0_l2[GET_L1_INDEX(vbase)].entry[GET_L2_INDEX(vbase)].pte =
            pbase | PTE_UXN | PTE_ACCESSED | PTE_INNER_SHARABLE | PTE_NORMAL_MEMORY | IS_VALID;

        vbase += PMD_SIZE;
        pbase += PMD_SIZE;
        count--;
    }

    while (pksram->size != 0) {
        vbase = pksram->vbase;
        pbase = pksram->pbase;
        count = pksram->size >> PMD_SHIFT;
        while (count > 0) {
            if (0 == IS_PTE_INVALID(boot_ttbr0_l0.entry[GET_L0_INDEX(vbase)].pte)) {
                assert(GET_ADDR(boot_ttbr0_l0.entry[GET_L0_INDEX(vbase)].pte) ==
                       (uint64_t)(&boot_ttbr0_l1[GET_L0_INDEX(vbase)]));
            }
            boot_ttbr0_l0.entry[GET_L0_INDEX(vbase)].pte = ((uint64_t)&boot_ttbr0_l1[GET_L0_INDEX(vbase)]) | IS_TABLE |
                                                           IS_VALID;

            if (0 == IS_PTE_INVALID(boot_ttbr0_l1[GET_L0_INDEX(vbase)].entry[GET_L1_INDEX(vbase)].pte)) {
                assert(GET_ADDR(boot_ttbr0_l1[GET_L0_INDEX(vbase)].entry[GET_L1_INDEX(vbase)].pte) ==
                       ((uint64_t)&boot_ttbr0_l2[GET_L1_INDEX(vbase)]));
            }
            boot_ttbr0_l1[GET_L0_INDEX(vbase)].entry[GET_L1_INDEX(vbase)].pte =
                ((uint64_t)&boot_ttbr0_l2[GET_L1_INDEX(vbase)]) | IS_TABLE | IS_VALID;

            assert(IS_PTE_INVALID(boot_ttbr0_l2[GET_L1_INDEX(vbase)].entry[GET_L2_INDEX(vbase)].pte));
            boot_ttbr0_l2[GET_L1_INDEX(vbase)].entry[GET_L2_INDEX(vbase)].pte =
                pbase | PTE_UXN | PTE_ACCESSED | PTE_INNER_SHARABLE | PTE_NORMAL_MEMORY | IS_VALID;

            pbase += PMD_SIZE;
            vbase += PMD_SIZE;
            count--;
        }
        pksram++;
    }

    while (pksdev->size != 0) {
        count = pksdev->size >> PAGE_SHIFT;
        vbase = pksdev->vbase;
        pbase = pksdev->pbase;
        while (count > 0) {
            if (0 == IS_PTE_INVALID(bootpd->entry[GET_L0_INDEX(vbase)].pte)) {
                assert(GET_ADDR(bootpd->entry[GET_L0_INDEX(vbase)].pte) ==
                       ((uint64_t)&boot_ttbr0_l1[GET_L0_INDEX(vbase)]));
            }
            bootpd->entry[GET_L0_INDEX(vbase)].pte = ((uint64_t)&boot_ttbr0_l1[GET_L0_INDEX(vbase)]) | IS_TABLE |
                                                     IS_VALID;

            if (0 == IS_PTE_INVALID(boot_ttbr0_l1[GET_L0_INDEX(vbase)].entry[GET_L1_INDEX(vbase)].pte)) {
                assert(GET_ADDR(boot_ttbr0_l1[GET_L0_INDEX(vbase)].entry[GET_L1_INDEX(vbase)].pte) ==
                       ((uint64_t)&boot_ttbr0_l2[GET_L1_INDEX(vbase)]));
            }
            boot_ttbr0_l1[GET_L0_INDEX(vbase)].entry[GET_L1_INDEX(vbase)].pte =
                ((uint64_t)&boot_ttbr0_l2[GET_L1_INDEX(vbase)]) | IS_TABLE | IS_VALID;

            if (0 == IS_PTE_INVALID(boot_ttbr0_l2[GET_L1_INDEX(vbase)].entry[GET_L2_INDEX(vbase)].pte)) {
                assert(GET_ADDR(boot_ttbr0_l2[GET_L1_INDEX(vbase)].entry[GET_L2_INDEX(vbase)].pte) ==
                       ((uint64_t)&boot_ttbr0_l3[GET_L2_INDEX(vbase)]));
            }
            boot_ttbr0_l2[GET_L1_INDEX(vbase)].entry[GET_L2_INDEX(vbase)].pte =
                ((uint64_t)&boot_ttbr0_l3[GET_L2_INDEX(vbase)]) | IS_TABLE | IS_VALID;

            assert(IS_PTE_INVALID(boot_ttbr0_l3[GET_L2_INDEX(vbase)].entry[GET_L3_INDEX(vbase)].pte));
            boot_ttbr0_l3[GET_L2_INDEX(vbase)].entry[GET_L3_INDEX(vbase)].pte =
                pbase | PTE_UXN | PTE_ACCESSED | PTE_INNER_SHARABLE | PTE_DEVICE_MEMORY | IS_TABLE | IS_VALID;
            vbase += PAGE_SIZE;
            pbase += PAGE_SIZE;
            count--;
        }
        pksdev++;
    }
}

void BOOTPHYSIC set_mmu(uint64_t _ksram, uint64_t _ksdev, uint64_t _boot_end)
{
    if (0 == current_cpu()) {
        init_boot_pd(_ksram, _ksdev, _boot_end);
    }
    uint64_t tcr_el1 = 0;
    tcr_el1 |= (((unsigned long)(0x00)) << SCR_TCR_BIT_TG0 | ((unsigned long)(0x05ul)) << SCR_TCR_BIT_IPSlow |
                ((unsigned long)(0x03)) << SCR_TCR_BIT_SH0low | ((unsigned long)(0x01)) << SCR_TCR_BIT_ORGN0low |
                ((unsigned long)(0X01)) << SCR_TCR_BIT_IRGN0low | ((unsigned long)(16)) << SCR_TCR_BIT_T0SZ |
                ((unsigned long)(0x02)) << SCR_TCR_BIT_TG1low | ((unsigned long)(0x03)) << SCR_TCR_BIT_SH1low |
                ((unsigned long)(0x01)) << SCR_TCR_BIT_ORGN1low | ((unsigned long)(0X01)) << SCR_TCR_BIT_IRGN1low |
                ((unsigned long)(0x0)) << SCR_TCR_BIT_A1 | ((unsigned long)(0x1)) << SCR_TCR_BIT_AS |
                ((unsigned long)(16)) << SCR_TCR_BIT_T1SZ);
    tcr_el1 &= (uint64_t) ~(1 << SCR_TCR_BIT_EPD1);
    tcr_el1 &= (uint64_t) ~(1 << SCR_TCR_BIT_EPD0);
    scr_write_TCR_EL1(tcr_el1);
    scr_write_TTBR0_EL1((uint64_t)&boot_ttbr0_l0);
    scr_write_TTBR1_EL1((uint64_t)&boot_ttbr0_l0);
    scr_write_MAIR_EL1((MMU_MAIR_ATTR1) | (MMU_MAIR_ATTR2) | (MMU_MAIR_ATTR3));
}
