/*
 * Copyright (C) 2015 Niek Linnenbank
 *
 * This program is free software: you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation, either version 3 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
 */

#include <FreeNOS/System.h>
#include <SplitAllocator.h>
#include <MemoryBlock.h>
#include <Log.h>
#include "CoreInfo.h"
#include "ARMCore.h"
#include "ARMControl.h"
#include "ARMPaging.h"
#include "ARMFirstTable.h"

ARMPaging::ARMPaging(MemoryMap *map, SplitAllocator *alloc)
    : MemoryContext(map, alloc)
    , m_firstTable(0)
    , m_firstTableAddr(0)
    , m_kernelBaseAddr(coreInfo.memory.phys)
{
}

ARMPaging::~ARMPaging()
{
    if (m_firstTableAddr != 0)
    {
        for (Size i = 0; i < sizeof(ARMFirstTable); i += PAGESIZE)
            m_alloc->release(m_firstTableAddr + i);
    }
}

ARMPaging::ARMPaging(MemoryMap *map,
                     Address firstTableAddress,
                     Address kernelBaseAddress)
    : MemoryContext(map, ZERO)
    , m_firstTable((ARMFirstTable *) firstTableAddress) //这里把数组的内存赋给了类，因为类对象只有数据段是独立内存，而刚好数据段对应上了。因此可以赋值给它，
    , m_firstTableAddr(firstTableAddress)
    , m_kernelBaseAddr(kernelBaseAddress)
{
}

MemoryContext::Result ARMPaging::initialize()
{
    // Allocate first page table if needed
    if (m_firstTable == 0)
    {
        Allocator::Range phys, virt;
        phys.address = 0;
        phys.size = sizeof(ARMFirstTable);
        phys.alignment = sizeof(ARMFirstTable);

        // Allocate page directory
        if (m_alloc->allocate(phys, virt) != Allocator::Success)
        {
            return MemoryContext::OutOfMemory;
        }

        m_firstTable = (ARMFirstTable *) virt.address;
        m_firstTableAddr = phys.address;
    }

    // Initialize the page directory
    MemoryBlock::set(m_firstTable, 0, sizeof(ARMFirstTable));

    // Map the kernel. The kernel has permanently mapped 1GB of
    // physical memory. This 1GiB memory region starts at its physical
    // base address offset which varies per core.
    Memory::Range kernelRange = m_map->range(MemoryMap::KernelData); //指代一段内存地址
    kernelRange.phys = m_kernelBaseAddr; //设置kernel数据段的起始物理地址
    m_firstTable->mapLarge(kernelRange, m_alloc);//使用物理内存分配器，把kernel物理地址映射到页表的虚拟地址里

    //内核栈开始地址，先从页表上取消映射栈要用的那段地址，然后把这段改成栈段，重新映射到虚拟地址上
    //所谓取消映射，就是把物理地址对应的页表项，设置位page_none，即没有物理页映射上去
    //映射则是相反的过程
#ifndef BCM2835
    // Temporary stack is used for kernel initialization code
    // and for SMP the temporary stack is shared between cores.
    // This is needed in order to perform early-MMU enable.
    m_firstTable->unmap(TMPSTACKADDR, m_alloc);

    const Memory::Range tmpStackRange = {
        TMPSTACKADDR, TMPSTACKADDR, MegaByte(1), Memory::Readable|Memory::Writable
    };
    m_firstTable->mapLarge(tmpStackRange, m_alloc);
#endif /* BCM2835 */

    // Unmap I/O zone
    for (Size i = 0; i < IO_SIZE; i += MegaByte(1))
        m_firstTable->unmap(IO_BASE + i, m_alloc);

    // Map the I/O zone as Device / Uncached memory.
    Memory::Range io;
    io.phys = IO_BASE;
    io.virt = IO_BASE;
    io.size = IO_SIZE;
    io.access = Memory::Readable | Memory::Writable | Memory::Device;
    m_firstTable->mapLarge(io, m_alloc);

    return MemoryContext::Success;
}

#ifdef ARMV6
MemoryContext::Result ARMPaging::enableMMU()
{
    ARMControl ctrl;

    // Program first level table. Enable L2 cache for page walking.
    ctrl.write(ARMControl::TranslationTable0, ((u32) m_firstTableAddr | 1));
    ctrl.write(ARMControl::TranslationTable1,    0);
    ctrl.write(ARMControl::TranslationTableCtrl, 0);

    // Set Control flags
    ctrl.set(ARMControl::DomainClient);
    ctrl.set(ARMControl::DisablePageColoring);
    ctrl.set(ARMControl::AccessPermissions);
    ctrl.set(ARMControl::ExtendedPaging);
    ctrl.unset(ARMControl::BranchPrediction);

    // Flush TLB's and caches
    tlb_flush_all();
    m_cache.cleanInvalidate(Cache::Unified);

    // Disable caches.
    ctrl.unset(ARMControl::InstructionCache);
    ctrl.unset(ARMControl::DataCache);

    // Enable the MMU. This re-enables instruction and data cache too.
    ctrl.set(ARMControl::MMUEnabled);
    tlb_flush_all();

    // Reactivate both caches and branch prediction
    ctrl.set(ARMControl::InstructionCache);
    ctrl.set(ARMControl::DataCache);
    ctrl.set(ARMControl::BranchPrediction);

    return Success;
}

#elif defined(ARMV7)

MemoryContext::Result ARMPaging::enableMMU()
{
    ARMControl ctrl;

    // Flush TLB's
    tlb_flush_all();
    dsb();
    isb();

    // Enable branch prediction
    ctrl.set(ARMControl::BranchPrediction);

    // Program first level table
    ctrl.write(ARMControl::TranslationTable0, (((u32) m_firstTableAddr) |
        (1 << 3) | // outer write-back, write-allocate
        (1 << 6)   // inner write-back, write-allocate
    ));
    //页表寄存器1一般给用户态程序用，因此这里暂时不填入
    ctrl.write(ARMControl::TranslationTable1,    0);
    ctrl.write(ARMControl::TranslationTableCtrl, 0);
    dsb();
    isb();

    // Set as client for all domains
    ctrl.write(ARMControl::DomainControl, 0x55555555);

    // Enable the MMU.
    u32 nControl = ctrl.read(ARMControl::SystemControl);

    // Raise all caching, MMU and branch prediction flags.
    nControl |= (1 << 11) | (1 << 2) | (1 << 12) | (1 << 0) | (1 << 5);

    // Write back to set.
    ctrl.write(ARMControl::SystemControl, nControl);
    isb();

    // Need to enable alignment faults separately of the MMU,
    // otherwise QEMU will hard reset the CPU
    ctrl.set(ARMControl::AlignmentFaults);

    // Flush all
    tlb_flush_all();
    dsb();
    isb();
    return Success;
}
#endif /* ARMV7 */

MemoryContext::Result ARMPaging::activate(bool initializeMMU)
{
    ARMControl ctrl;

    // Do we need to (re)enable the MMU?
    if (initializeMMU)
    {
        enableMMU();
    }
    // MMU already enabled, we only need to change first level table and flush caches.
    //高速缓存(cache)是cpu存储结构的第二级，其上有寄存器，其下有主存。现代cpu的cache分多级，分指令和数据cache，
    //分cpu的片内和片外。这里可以看到清空了指令和数据cache，和第二级(？)cache,一般二三级cache不分指令和数据。
    //同时添加了内存屏障，这会截停cpu的指令读入和发射，确保屏障前后的代码和数据，是执行上的前后，为了实现这种效果，
    //这可能使流水线断流，以确保实现它。
    //对于cache，它是一段存储空间，被划分成way，line，等，它是主存中内容的部分缓存，也是采用一定的映射关系，选择部分
    //主存的内容，保存到cache中，一般是多路组相联。其中每个cache line的最高的一些位，保存主存对应的地址，脏标志等。
    //这里清空了cache，意味着后续，cpu寻址某个主存地址时(一般是虚拟地址)，会cache miss，
    //cpu会拿着虚拟地址，向MMU请求转换为物理地址，如果MMU可以通过页表项，完成虚拟地址到物理地址的转换，
    //则会返回给出该地址对应的内容，如果cpu访问的是内核程序的地址范围，MMU一定会存在对于转换，因为内核程序的地址，是
    //一次性全部映射到页表中的。
    //对于应用程序或者内核程序之外的地址，可能会发现不存在映射，此时产生缺页中断，
    //会通过磁盘io从外存中调入页内容到主存，再建立该物理地址到虚拟地址的映射，写入页表，并读入内容，送到cache。
    //这个称为虚拟内存，使用缺页中断+请求调页程序完成的。
    else
    {
#ifdef ARMV6
        mcr(p15, 0, 0, c7, c5,  0);    // flush entire instruction cache
        mcr(p15, 0, 0, c7, c10, 0);    // flush entire data cache
        mcr(p15, 0, 0, c7, c7,  0);    // flush entire cache
        mcr(p15, 0, 5, c7, c10, 0);    // data memory barrier
        mcr(p15, 0, 4, c7, c10, 0);    // memory sync barrier
#else
        m_cache.cleanInvalidate(Cache::Unified);
#endif /* ARMV6 */

        //顶级页表，写入页表寄存器0，后续MMU寻址，直接把页表寄存器0的值送入MMU，然后MMU硬件自动查找页表项(在主存中)，
        //完成虚址到实址的转换。而转换关系，就是我们调用map函数填到页表中的。
        // Switch first page table and re-enable L1 caching
        ctrl.write(ARMControl::TranslationTable0, (((u32) m_firstTableAddr) |
            (1 << 3) | /* outer write-back, write-allocate */
            (1 << 6)   /* inner write-back, write-allocate */
        ));
        //TLB是页表项的部分缓存，以一定的映射关系，选择一部分页表项，缓存到TLB硬件单元中(理解成一段高速存储区)。
        //MMU在查找页表项时，一般是同时在TLB和主存中查找，并且查找TLB更快，因此，这里需要先清空TLB，防止错误的页表项
        //(即错误的映射关系)存在。
        // Flush TLB caches
        tlb_flush_all();
        //指令屏障，确保屏障前的指令，全部commit完成。
        // Synchronize execution stream
        isb();
    }
    //设置内存管理者为当前类
    // Done. Update currently active context pointer
    m_current = this;
    return Success;
}

MemoryContext::Result ARMPaging::map(Address virt, Address phys, Memory::Access acc)
{
    // Modify page tables
    Result r = m_firstTable->map(virt, phys, acc, m_alloc);

    // Flush the TLB to refresh the mapping
    if (m_current == this)
        tlb_invalidate(virt);

    // Synchronize execution stream.
    isb();
    return r;
}

MemoryContext::Result ARMPaging::unmap(Address virt)
{
    // Clean the given data page in cache
    if (m_current == this)
        m_cache.cleanInvalidateAddress(Cache::Data, virt);

    // Modify page tables
    Result r = m_firstTable->unmap(virt, m_alloc);

    // Flush TLB to refresh the mapping
    if (m_current == this)
        tlb_invalidate(virt);

    // Synchronize execution stream
    isb();
    return r;
}

MemoryContext::Result ARMPaging::lookup(Address virt, Address *phys) const
{
    return m_firstTable->translate(virt, phys, m_alloc);
}

MemoryContext::Result ARMPaging::access(Address virt, Memory::Access *access) const
{
    return m_firstTable->access(virt, access, m_alloc);
}

MemoryContext::Result ARMPaging::releaseSection(const Memory::Range & range,
                                                const bool tablesOnly)
{
    return m_firstTable->releaseSection(range, m_alloc, tablesOnly);
}

MemoryContext::Result ARMPaging::releaseRange(Memory::Range *range)
{
    return m_firstTable->releaseRange(*range, m_alloc);
}
