//==========================================================================
// Copyright (c) 2000-2008,  Elastos, Inc.  All Rights Reserved.
//==========================================================================

#include <core.h>
#include <_hal.h>

EXTERN DECL_CODEINIT bool_t CDECL InitHAT()
{
    return TRUE;
}

//
// class HAT
//
const uint_t c_cUDirEntries = c_uNumberOfPageDirEntries / 2 - 1;
const uint_t c_cKDirEntries = c_uNumberOfPageDirEntries - c_cUDirEntries;

HAT::HAT()
{
    // Clear page directory entries of user space.
    //
    memset(&m_pageDir[0], 0, c_cUDirEntries * sizeof(PageDirEntry));

    // Copy page directory entries of kernel to new page dir.
    //
    memcpy(
        &m_pageDir[c_cUDirEntries],
        &g_initPageDir[c_cUDirEntries],
        c_cKDirEntries * sizeof(PageDirEntry));
}

void HAT::SyncGlobalHAT()
{
    memcpy(
        &m_pageDir[c_cUDirEntries],
        &g_initPageDir[c_cUDirEntries],
        c_cKDirEntries * sizeof(PageDirEntry));
}

bool_t HAT::LoadMap(
    uint_t vBase, uint_t vLimit, uint_t pBase,
    dword_t dwProtect, Boolean bIsFlushCache)
{
    assert(vLimit > vBase);
    assert(vLimit <= (uint_t)KERNEL_BASE);
    assert(PAGE_OFFSET(pBase) == 0);
    assert(PAGE_OFFSET(vBase) == 0);
    assert(PAGE_OFFSET(vLimit) == 0);

    bool_t bIsActive;
    dword_t dwProt;
    PageTabEntry *pPageTab;
    register uint_t m, n;

    bIsActive = (this == HAT::GetCurrent());

    dwProt = PageEntryFlag_Present | PageEntryFlag_User;
    if (dwProtect & MemoryProtection_Write) {
        dwProt |= PageEntryFlag_RW;
    }
    if (dwProtect & IoRemapFlag_Direct) {
        dwProt |= PageEntryFlag_PCD;
    }

    while (vBase < vLimit) {
        pPageTab = (PageTabEntry *)m_pageDir[PD_INDEX(vBase)].uValue;

        if (pPageTab) {
            pPageTab = (PageTabEntry *)((uint_t)__VA(pPageTab)
                    & PAGETAB_BASEADDR_MASK);
        }
        else {
            // No page table be pointed by page_dir_entry, create one.
            //
            pPageTab = AllocPageTable();
            if (!pPageTab) return FALSE;
            memset(pPageTab, 0, PAGE_SIZE);
            m_pageDir[PD_INDEX(vBase)].uValue =
                __PA(pPageTab) | dwProt | PageEntryFlag_RW;
        }

        m = Min(c_cPageTabEntries, (vLimit - (vBase & PD_MASK)) >> PAGE_SHIFT);
        for (n = PT_INDEX(vBase); n < m; n++) {
            pPageTab[n].uValue = pBase | dwProt;
            pBase += PAGE_SIZE;
            vBase += PAGE_SIZE;
        }
    }

    return TRUE;
}

void HAT::UnloadMap(uint_t vBase, uint_t vLimit)
{
    assert(vLimit > vBase);
    assert(vLimit <= (uint_t)KERNEL_BASE);
    assert(PAGE_OFFSET(vBase) == 0);
    assert(PAGE_OFFSET(vLimit) == 0);

    bool_t bIsActive;
    PageTabEntry *pPageTab;
    register uint_t m, n;

    bIsActive = (this == HAT::GetCurrent());

    while (vBase < vLimit) {
        pPageTab = (PageTabEntry *)m_pageDir[PD_INDEX(vBase)].uValue;

        if (pPageTab) {
            pPageTab = (PageTabEntry *)((uint_t)__VA(pPageTab)
                    & PAGETAB_BASEADDR_MASK);

            m = Min(c_cPageTabEntries,
                (vLimit - (vBase & PD_MASK)) >> PAGE_SHIFT);
            for (n = PT_INDEX(vBase); n < m; n++) {
                pPageTab[n].uValue = 0;
                if (bIsActive) FlushTlb((virtaddr_t)vBase);
                vBase += PAGE_SIZE;
            }
        }
        else {
            vBase = (vBase + (~PD_MASK + 1)) & PD_MASK;
        }
    }
}

bool_t HAT::SetProtect(uint_t vBase, uint_t vLimit, dword_t dwProtect)
{
    assert(vLimit > vBase);
    assert(vLimit <= (uint_t)KERNEL_BASE);
    assert(PAGE_OFFSET(vBase) == 0);
    assert(PAGE_OFFSET(vLimit) == 0);

    bool_t bIsActive;
    dword_t dwProt;
    PageTabEntry *pPageTab;
    register uint_t m, n;

    bIsActive = (this == HAT::GetCurrent());

    dwProt = 0;
    if (dwProtect & MemoryProtection_Write) {
        dwProt |= PageEntryFlag_RW;
    }
    if (dwProtect & IoRemapFlag_Direct) {
        dwProt |= PageEntryFlag_PCD;
    }

    const dword_t c_dwProtMask = ~(PageEntryFlag_RW | PageEntryFlag_PCD);

    while (vBase < vLimit) {
        pPageTab = (PageTabEntry *)m_pageDir[PD_INDEX(vBase)].uValue;

        if (pPageTab) {
            pPageTab = (PageTabEntry *)((uint_t)__VA(pPageTab)
                    & PAGETAB_BASEADDR_MASK);

            m = Min(c_cPageTabEntries,
                (vLimit - (vBase & PD_MASK)) >> PAGE_SHIFT);
            for (n = PT_INDEX(vBase); n < m; n++) {
                if (0 != pPageTab[n].uValue) {
                    pPageTab[n].uValue =
                        (pPageTab[n].uValue & c_dwProtMask) | dwProt;
                    if (bIsActive) FlushTlb((virtaddr_t)vBase);
                }
                vBase += PAGE_SIZE;
            }
        }
        else {
            vBase = (vBase + (~PD_MASK + 1)) & PD_MASK;
        }
    }

    return TRUE;
}

void HAT::Dispose()
{
    assert(this != HAT::GetCurrent());

    register uint_t n;

    for (n = 0; n < c_cUDirEntries; n++) {
        if (m_pageDir[n].uValue) {
            FreePageTable(
                (PageTabEntry *)(__VA(m_pageDir[n].uValue
                    & PAGETAB_BASEADDR_MASK)));
            m_pageDir[n].uValue = 0;
        }
    }
}

void HAT::ActivateKHAT()
{
    SetPageDirAddress(PHYSADDR(g_initPageDir));
}

bool_t HAT::LoadKernelMap(
    uint_t vBase, uint_t vLimit, uint_t pBase, dword_t dwProtect)
{
    assert(vLimit > vBase);
    assert(vBase >= (uint_t)KERNEL_BASE);
    assert(PAGE_OFFSET(pBase) == 0);
    assert(PAGE_OFFSET(vBase) == 0);
    assert(PAGE_OFFSET(vLimit) == 0);

    dword_t dwProt;
    PageTabEntry *pPageTab;
    register uint_t m, n;

    dwProt = PageEntryFlag_Present;
    if (dwProtect & MemoryProtection_Write) {
        dwProt |= (PageEntryFlag_RW | PageEntryFlag_User);
    }
    else if (dwProtect & MemoryProtection_Read) {
        dwProt |= PageEntryFlag_User;
    }

    if (dwProtect & IoRemapFlag_Direct) {
        dwProt |= PageEntryFlag_PCD;
    }

    while (vBase < vLimit) {
        pPageTab = (PageTabEntry *)g_initPageDir[PD_INDEX(vBase)].uValue;

        if (pPageTab) {
            pPageTab = (PageTabEntry *)((uint_t)__VA(pPageTab)
                    & PAGETAB_BASEADDR_MASK);
        }
        else {
            // No page table be pointed by page_dir_entry, create one.
            //
            pPageTab = AllocPageTable();
            if (!pPageTab) return FALSE;
            memset(pPageTab, 0, PAGE_SIZE);
            g_initPageDir[PD_INDEX(vBase)].uValue =
                __PA(pPageTab) | dwProt | PageEntryFlag_RW;
        }

        g_initPageDir[PD_INDEX(vBase)].uValue |= PageEntryFlag_User;

        m = Min(c_cPageTabEntries, (vLimit - (vBase & PD_MASK)) >> PAGE_SHIFT);
        for (n = PT_INDEX(vBase); n < m; n++) {
            pPageTab[n].uValue = pBase | dwProt;
            FlushTlb((virtaddr_t)vBase);
            pBase += PAGE_SIZE;
            vBase += PAGE_SIZE;
        }
    }

    return TRUE;
}

void HAT::UnloadKernelMap(uint_t vBase, uint_t vLimit)
{
    assert(vLimit > vBase);
    assert(vBase >= (uint_t)KERNEL_BASE);
    assert(PAGE_OFFSET(vBase) == 0);
    assert(PAGE_OFFSET(vLimit) == 0);

    PageTabEntry *pPageTab;
    register uint_t m, n;

    while (vBase < vLimit) {
        pPageTab = (PageTabEntry *)g_initPageDir[PD_INDEX(vBase)].uValue;

        if (pPageTab) {
            pPageTab = (PageTabEntry *)((uint_t)__VA(pPageTab)
                    & PAGETAB_BASEADDR_MASK);

            m = Min(c_cPageTabEntries,
                (vLimit - (vBase & PD_MASK)) >> PAGE_SHIFT);
            for (n = PT_INDEX(vBase); n < m; n++) {
                pPageTab[n].uValue = 0;
                FlushTlb((virtaddr_t)vBase);
                vBase += PAGE_SIZE;
            }
        }
        else {
            vBase = (vBase + (~PD_MASK + 1)) & PD_MASK;
        }
    }
}
