/**
 * Copyright (c) 2018-2022, NXOS Development Team
 * SPDX-License-Identifier: Apache-2.0
 * 
 * Contains: Page init 
 * 
 * Change Logs:
 * Date           Author            Notes
 * 2021-11-28     JasonHu           Init
 */

#include <base/memory.h>

#include <platform.h>
#include <page_zone.h>
#include <base/page.h>
#include <base/mmu.h>
#include <base/page.h>
#include <arch/mmu.h>
#define NX_LOG_LEVEL NX_LOG_INFO
#define NX_LOG_NAME "Page"
#include <base/log.h>

#include <base/debug.h>
#include <drivers/direct_uart.h>
#include <arch/base.h>

#include <arch/pgtable_hwdef.h>
#include <arch/pgtable_prot.h>
#include <arch/pgtable_types.h>
#include <arch/pgtable.h>
#include <arch/mm.h>

NX_PRIVATE NX_U64 *kernelTable;
extern void *NX_HalMapPageWithPhy_2MB(NX_Mmu *mmu, NX_Addr virAddr, NX_Addr phyAddr, NX_Size size, NX_UArch attr);

NX_PRIVATE void NX_HalEarlyMap(NX_Mmu *mmu, NX_Addr kernelTop)
{
    NX_Addr kernelImageEnd2MB;

    kernelImageEnd2MB = NX_ALIGN_UP(NX_PAGE_ALIGNUP(NX_KernelGetSegmentEnd("bss")), NX_MB * 2);

    /* map kernel */
    NX_LOG_I("Map Kernel Normal: %p~%p", DRAM_BASE, NX_KernelGetSegmentStart("text"));
    NX_LOG_I("Map Kernel Image: text: %p~%p", NX_KernelGetSegmentStart("text"), NX_KernelGetSegmentEnd("text"));
    NX_LOG_I("Map Kernel Image: data: %p~%p", NX_KernelGetSegmentStart("data"), NX_KernelGetSegmentEnd("data"));
    NX_LOG_I("Map Kernel Image: bss: %p~%p", NX_KernelGetSegmentStart("bss"), NX_KernelGetSegmentEnd("bss"));
    NX_LOG_I("Map Kernel Pages: %p~%p", NX_PAGE_ALIGNUP(NX_KernelGetSegmentEnd("bss")), kernelTop);
    NX_LOG_I("Map Kernel Pages 2MB: %p~%p", kernelImageEnd2MB, kernelTop);
    NX_LOG_I("Map Kernel Device: %p~%p", PBASE, PBASE + DEVICE_SIZE);

    /* map kernel before image */
	NX_MmuMapPageWithPhy(mmu, DRAM_BASE, DRAM_BASE, NX_KernelGetSegmentStart("text") - DRAM_BASE, NX_PAGE_ATTR_KERNEL);

    /* map kernel code RX */
	NX_MmuMapPageWithPhy(mmu, NX_KernelGetSegmentStart("text"), NX_KernelGetSegmentStart("text"),
        NX_KernelGetSegmentEnd("text") - NX_KernelGetSegmentStart("text"), NX_PAGE_ATTR_KERNEL_ROX);

    /* map kernel data RW */
	NX_MmuMapPageWithPhy(mmu, NX_PAGE_ALIGNUP(NX_KernelGetSegmentEnd("text")), NX_PAGE_ALIGNUP(NX_KernelGetSegmentEnd("text")),
        kernelImageEnd2MB - NX_PAGE_ALIGNUP(NX_KernelGetSegmentEnd("text")), NX_PAGE_ATTR_KERNEL);

    /* map kernel page 2MB aligned */
	NX_HalMapPageWithPhy_2MB(mmu, kernelImageEnd2MB, kernelImageEnd2MB,
        kernelTop - kernelImageEnd2MB, NX_PAGE_ATTR_KERNEL);

	/* map device */
	NX_HalMapPageWithPhy_2MB(mmu, PBASE, PBASE, DEVICE_SIZE, PROT_DEVICE_nGnRnE);
}

NX_PRIVATE void TestKernelMap(void)
{
	int step = 0x1000; // 0x1000
	for (unsigned long vaddr = DRAM_BASE; vaddr < DRAM_BASE + MEM_KERNEL_SPACE_SZ; vaddr += step) {
		void *paddr = NX_MmuVir2Phy(&gKernelMmu, vaddr);
		if (!paddr) {
			NX_LOG_E("kernel virtual addr %p access invalid!", vaddr);
		}
	}
    NX_LOG_I("kernel memory map test done");
}

/**
 * Init physic memory and map kernel on virtual memory.
 */
void NX_HalPageZoneInit(void)
{    
    NX_Size memSize = DRAM_SIZE_DEFAULT;
    
    NX_LOG_I("Memory NX_Size: %x Bytes %d MB", memSize, memSize / NX_MB);

    if (memSize == 0)
    {
        NX_PANIC("Get Memory NX_Size Failed!");
    }
    if (memSize < MEM_MIN_SIZE)
    {
        NX_LOG_E("Must has %d MB memory!", MEM_MIN_SIZE / NX_MB);
        NX_PANIC("Memory too small");
    }
    
    /* calc normal base & size */
    NX_Size avaliableSize = memSize - MEM_KERNEL_SZ - MEM_KERNEL_BASE_OFF;
    
    NX_Size normalSize = avaliableSize;
    if (normalSize > MEM_KERNEL_SPACE_SZ)
    {
        normalSize = MEM_KERNEL_SPACE_SZ;
    }

    if (normalSize > MEM_KERNEL_TOP - MEM_NORMAL_BASE)
    {
        normalSize = MEM_KERNEL_TOP - MEM_NORMAL_BASE;
    }
    
    NX_LOG_I("Normal memory: %p~%p NX_Size:%d MB", MEM_NORMAL_BASE, MEM_NORMAL_BASE + normalSize, normalSize / NX_MB);

    /* init page zone */
    NX_PageInitZone(NX_PAGE_ZONE_NORMAL, (void *)MEM_NORMAL_BASE, normalSize);

    kernelTable = NX_PageAlloc(1);
    NX_MemZero(kernelTable, NX_PAGE_SIZE);
    NX_ASSERT(kernelTable != NX_NULL);
    NX_MmuInit(&gKernelMmu, kernelTable, 0, NX_GB, MEM_NORMAL_BASE + normalSize);

    NX_HalEarlyMap(&gKernelMmu, MEM_NORMAL_BASE + normalSize);

    NX_LOG_I("set MMU table: %p", NX_MmuGetKernelTable());

    NX_MmuSetPageTable((NX_UArch)NX_MmuGetKernelTable());
    NX_MmuEnable();

    NX_LOG_I("MMU enabled");
    
    TestKernelMap();

    // NX_MmuDump(&gKernelMmu, 0, MEM_KERNEL_TOP);

    NX_LOG_I("Memroy init done.");
}
