//==========================================================================
// Copyright (c) 2000-2008,  Elastos, Inc.  All Rights Reserved.
//==========================================================================

#include <core.h>
#include <_hal.h>
#include <init.h>

Address g_ASIDManage[256];

EXTERN DECL_CODEINIT bool_t CDECL InitHAT()
{
    return TRUE;
}

// BUG:
const uint_t c_cUDirEntries = PHYSADDR_TO_PAGETABNO(0x80000000) - 2;
const uint_t c_cKDirEntries = c_uNumberOfPageDirEntries - c_cUDirEntries;

//HAT *HAT::s_pCurrent = (HAT *)g_initPageDir;

HAT::HAT()
{
    // TODO(mips): now, make sure of disabled irq or disabled preemption
    assert(PAGE_SIZE == (c_uNumberOfPageDirEntries * sizeof(PageDirEntry)));
    m_pPageDir = (PageDirEntry *)DzAllocKernelPage();

    if (NULL == m_pPageDir) {
        assert(0 && "must be enough memory!");
    }

    // Clear page directory entries of user space.
    //
    memset(&m_pPageDir[0], 0, c_cUDirEntries * sizeof(PageDirEntry));

    // TODO(mips): Copy is unuseful.
    // Copy page directory entries of kernel to new page dir.
    //
    memcpy(
        &m_pPageDir[c_cUDirEntries],
        &g_initPageDir[c_cUDirEntries],
        c_cKDirEntries * sizeof(PageDirEntry));

    // TODO(mips): It's maybe cause synchronization problem.
    for (m_uASID = 1; 0 != g_ASIDManage[m_uASID] && m_uASID < 256; m_uASID++);
    if (m_uASID >= 255) {
        kprintf("Too many process\n");
        DebugBreak();
    }

    g_ASIDManage[m_uASID] = (Address)(this);
}

void HAT::SyncGlobalHAT()
{
    memcpy(
        &m_pageDir[c_cUDirEntries],
        &g_initPageDir[c_cUDirEntries],
        c_cKDirEntries * sizeof(PageDirEntry));
}

HAT::~HAT()
{
    DzFreeKernelPage(m_pPageDir);
    FlushAllTlbs();
    g_ASIDManage[m_uASID] = 0;
}

bool_t HAT::LoadMap(
    uint_t vBase, uint_t vLimit, uint_t pBase,
    dword_t dwProtect, Boolean bIsFlushCache)
{
    assert(vLimit > vBase);
    assert(vLimit <= (uint_t)KERNEL_BASE);
    assert(PAGE_OFFSET(pBase) == 0);
    assert(PAGE_OFFSET(vBase) == 0);
    assert(PAGE_OFFSET(vLimit) == 0);

#if 0
    kprintf(">>>map 0x%08x to 0x%08x, limit = 0x%08x, dwProtect = 0x%08x\n",
        vBase, pBase, vLimit, dwProtect);
#endif

    bool_t bIsActive;
    dword_t dwProt = PageEntryFlag_Valid;
    PageTabEntry *pPageTab;
    register uint_t m, n;

    bIsActive = (this == HAT::GetCurrent());

    if (dwProtect & MemoryProtection_Write) {
        dwProt |= PageEntryFlag_Writable;
    }

    if (dwProtect & MemoryProtection_IO) {
        dwProt |= PageEntryFlag_Uncached;
    }
    else {
        dwProt |= PageEntryFlag_CacheableNoncoherent;
    }

    if (dwProtect & MemoryProtection_Exec) {
        dwProt &= PageEntryFlag_Exe;
    }
    else {
        dwProt |= PageEntryFlag_NotExe;
    }

    while (vBase < vLimit) {
        pPageTab = (PageTabEntry *)m_pPageDir[PAGEDIR_INDEX(vBase)].uValue;
        if (!pPageTab) {
            // No page table be pointed by PageDirEntry, create one.
            //
            pPageTab = (PageTabEntry *)DzAllocKernelPage();

            if (!pPageTab) {
                return FALSE;
            }

            memset(pPageTab, 0, c_uSizeOfPageTab);
            m_pPageDir[PAGEDIR_INDEX(vBase)].uValue = (uint32_t)pPageTab;
        }

        m = Min(c_cPageTabEntries,
            (vLimit - (vBase & PAGEDIR_MASK)) >> PAGE_SHIFT);

        for (n = PAGETAB_INDEX(vBase); n < m; n++) {
            pPageTab[n].uValue = ((pBase & 0x1fffffff) >> 6) | dwProt;
            if (!(dwProt & PageEntryFlag_NotExe)) {
                dma_cache_wback_inv((uint32_t)VIRTADDR(pBase), PAGE_SIZE);
            }
            pBase += PAGE_SIZE;
            vBase += PAGE_SIZE;
        }
    }

//  FlushCache();

    return TRUE;
}

void HAT::UnloadMap(uint_t vBase, uint_t vLimit)
{
    assert(vLimit > vBase);
    assert(vLimit <= (uint_t)KERNEL_BASE);
    assert(PAGE_OFFSET(vBase) == 0);
    assert(PAGE_OFFSET(vLimit) == 0);

    bool_t bIsActive;
    PageTabEntry *pPageTab;
    register uint_t m, n;

    bIsActive = (this == HAT::GetCurrent());

    while (vBase < vLimit) {
        pPageTab = (PageTabEntry *)m_pPageDir[PAGEDIR_INDEX(vBase)].uValue;

        if (pPageTab) {
            pPageTab = (PageTabEntry *)((uint_t)pPageTab);

            m = Min(c_cPageTabEntries,
                (vLimit - (vBase & PAGEDIR_MASK)) >> PAGE_SHIFT);

            for (n = PAGETAB_INDEX(vBase); n < m; n++) {
                pPageTab[n].uValue = 0;
                if (bIsActive) InvalidTLB(vBase);
                vBase += PAGE_SIZE;
            }
        }
        else {
            vBase = (vBase + (~PAGEDIR_MASK + 1)) & PAGEDIR_MASK;
        }
    }
}

bool_t HAT::SetProtect(uint_t vBase, uint_t vLimit, dword_t dwProtect)
{
//    assert(vLimit > vBase);
//    assert(vLimit <= (uint_t)KERNEL_BASE);
//    assert(PAGE_OFFSET(vBase) == 0);
//    assert(PAGE_OFFSET(vLimit) == 0);
//
//    bool_t bIsActive;
//    dword_t dwProt;
//    PageTabEntry *pPageTab;
//    register uint_t m, n;
//
//    bIsActive = (this == HAT::GetCurrent());
//
//    dwProt = 0;
//    if (dwProtect & MemoryProtection_Write) {
//        dwProt |= PageEntryFlag_RW;
//    }
//    if (dwProtect & MemoryProtection_IO) {
//        dwProt |= PageEntryFlag_PCD;
//    }
//
//    const dword_t c_dwProtMask = ~(PageEntryFlag_RW | PageEntryFlag_PCD);
//
//    while (vBase < vLimit) {
//        pPageTab = (PageTabEntry *)m_pPageDir[PAGEDIR_INDEX(vBase)].uValue;
//
//        // BUG: need assertion ?
//        if (pPageTab) {
//            pPageTab = (PageTabEntry *)((uint_t)__VA(pPageTab) & PAGE_MASK);
//
//            m = Min(c_cPageTabEntries,
//                (vLimit - (vBase & PAGEDIR_MASK)) >> PAGE_SHIFT);
//            for (n = PAGETAB_INDEX(vBase); n < m; n++) {
//                pPageTab[n].uValue =
//                    (pPageTab[n].uValue & c_dwProtMask) | dwProt;
//                if (bIsActive) FlushTlb((virtaddr_t)vBase);
//                vBase += PAGE_SIZE;
//            }
//        }
//    }
    ASM("break 0");

    return TRUE;
}

void HAT::Dispose()
{
    assert(this != HAT::GetCurrent());

    register uint_t n;
    for (n = 0; n < c_cUDirEntries; n++) {
        if (m_pPageDir[n].uValue) {
            DzFreeKernelPage((void *)(m_pPageDir[n].uValue & PAGE_MASK));
            m_pPageDir[n].uValue = 0;
        }
    }
}

void HAT::ActivateKHAT()
{
    SetPageDirAddress(0);
}

bool_t HAT::LoadKernelMap(
    uint_t vBase, uint_t vLimit, uint_t pBase, dword_t dwProtect)
{
    // TODO:
    return FALSE;
}

void HAT::UnloadKernelMap(uint_t vBase, uint_t vLimit)
{
    // TODO:
}
