/*
 * Copyright (C) 2015 Niek Linnenbank
 *
 * This program is free software: you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation, either version 3 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
 */

#include <FreeNOS/System.h>
#include <FreeNOS/API.h>
#include <MemoryBlock.h>
#include <MemoryChannel.h>
#include <SplitAllocator.h>
#include "Process.h"
#include "ProcessEvent.h"

Process::Process(ProcessID id, Address entry, bool privileged, const MemoryMap &map)
    : m_id(id), m_map(map), m_shares(id)
{
    m_state         = Stopped;
    m_parent        = 0;
    m_waitId        = 0;
    m_waitResult    = 0;
    m_wakeups       = 0;
    m_entry         = entry;
    m_privileged    = privileged;
    m_memoryContext = ZERO;
    m_kernelChannel = ZERO;
    //进程实例的休眠定时器，控制进程的休眠时间，进程调度器会使用
    MemoryBlock::set(&m_sleepTimer, 0, sizeof(m_sleepTimer));
}

Process::~Process()
{
    if (m_kernelChannel)
    {
        delete m_kernelChannel;
    }

    if (m_memoryContext)
    {
        m_memoryContext->releaseSection(m_map.range(MemoryMap::UserData));
        m_memoryContext->releaseSection(m_map.range(MemoryMap::UserHeap));
        m_memoryContext->releaseSection(m_map.range(MemoryMap::UserStack));
        m_memoryContext->releaseSection(m_map.range(MemoryMap::UserPrivate));
        m_memoryContext->releaseSection(m_map.range(MemoryMap::UserArgs));
        m_memoryContext->releaseSection(m_map.range(MemoryMap::UserShare), true);
        delete m_memoryContext;
    }
}

ProcessID Process::getID() const
{
    return m_id;
}

ProcessID Process::getParent() const
{
    return m_parent;
}

ProcessID Process::getWait() const
{
    return m_waitId;
}

uint Process::getWaitResult() const
{
    return m_waitResult;
}

Process::State Process::getState() const
{
    return m_state;
}

ProcessShares & Process::getShares()
{
    return m_shares;
}

const Timer::Info & Process::getSleepTimer() const
{
    return m_sleepTimer;
}

MemoryContext * Process::getMemoryContext()
{
    return m_memoryContext;
}

bool Process::isPrivileged() const
{
    return m_privileged;
}

void Process::setParent(ProcessID id)
{
    m_parent = id;
}

Process::Result Process::wait(ProcessID id)
{
    if (m_state != Ready)
    {
        ERROR("Process ID " << m_id << " has invalid state: " << (uint) m_state);
        return InvalidArgument;
    }

    m_state  = Waiting;
    m_waitId = id;

    return Success;
}

Process::Result Process::join(const uint result)
{
    if (m_state != Waiting)
    {
        ERROR("PID " << m_id << " has invalid state: " << (uint) m_state);
        return InvalidArgument;
    }

    m_waitResult = result;
    m_state = Ready;
    return Success;
}

Process::Result Process::stop()
{
    if (m_state != Ready && m_state != Sleeping && m_state != Stopped)
    {
        ERROR("PID " << m_id << " has invalid state: " << (uint) m_state);
        return InvalidArgument;
    }

    m_state = Stopped;
    return Success;
}

Process::Result Process::resume()
{
    if (m_state != Stopped)
    {
        ERROR("PID " << m_id << " has invalid state: " << (uint) m_state);
        return InvalidArgument;
    }

    m_state = Ready;
    return Success;
}

Process::Result Process::raiseEvent(const ProcessEvent *event)
{
    // Write the message. Be sure to flush the caches because
    // the kernel has mapped the channel pages separately in low memory.
    m_kernelChannel->write(event);
    m_kernelChannel->flush();

    // Wakeup the Process, if needed
    return wakeup();
}

Process::Result Process::initialize()
{
    Memory::Range range;
    Arch::Cache cache;
    Allocator::Range allocPhys, allocVirt;

    // Create new kernel event channel object
    m_kernelChannel = new MemoryChannel(Channel::Producer, sizeof(ProcessEvent));
    if (!m_kernelChannel)
    {
        ERROR("failed to allocate kernel event channel object");
        return OutOfMemory;
    }

    // Allocate two pages for the kernel event channel
    allocPhys.address = 0;
    allocPhys.size = PAGESIZE * 2;
    allocPhys.alignment = PAGESIZE;
    //分配空闲的物理内存及虚拟地址，给内核channel对象使用，channel用于IPC机制？
    if (Kernel::instance()->getAllocator()->allocate(allocPhys, allocVirt) != Allocator::Success)
    {
        ERROR("failed to allocate kernel event channel pages");
        return OutOfMemory;
    }

    // Initialize pages with zeroes
    //此处memset虚拟地址处的内存，考虑此时已经使能了MMU，当对虚拟地址读写时，
    //从CPU发出的访存地址，会被MMU捕获，并从TLB和主存页表中翻译，从而对物理地址读写。
    //因此，当MMU开启后，对某个物理地址0x1234读写时，必须要先映射到虚拟地址，这样MMU才能正确处理
    //而内核程序中的某个变量，比如int a, 为什么可以直接读写，还能&a取地址呢？
    //因为a在编译链接后，是装载到内核程序里的，只在编译阶段考虑。即使运行时，内核会被映射虚拟地址，
    //此时也是内核程序整体的映射，相对偏移也是固定的。
    //而对某一个固定物理地址的访问，明显是脱离了编译阶段，在真实的硬件上运行，此时要考虑MMU地址翻译，
    //加载地址偏移等情况。
    MemoryBlock::set((void *)allocVirt.address, 0, PAGESIZE*2);
    //可以看到这里送到cache的也是虚拟地址,因为程序这里使用虚拟地址，所以当cpu执行时用到这段地址时，
    //发出的必然也是虚拟地址。cpu访问的分别是寄存器，cache，产生miss才会把访存地址，发到地址总线上，
    //然后会被MMU捕获并翻译。所以访存地址，都是虚拟地址。
    
    //以上说的其实是一种理想情况，实际的CPU硬件设计，cache里关于地址的翻译，有index和tag两个字段，
    //并且index和tag可以自由选择成保存主存的虚拟地址还是物理地址。因此cache有4种地址翻译的组合。
    //PIPT PIVT VIPT VIVT。 具体介绍可以参考这篇文章https://zhuanlan.zhihu.com/p/577138649
    //值得注意的是，在处理器的微架构中，cache的这个机制是硬件自动完成，os程序无感知的。
    cache.cleanData(allocVirt.address);
    cache.cleanData(allocVirt.address + PAGESIZE);

    //从主存中分配出一段内存，和找到的空闲虚拟地址绑定映射。并设置这段内存用户态程序可以用。
    // Map data and feedback pages in userspace
    range.phys   = allocPhys.address; //为什么要这么设置???
    range.access = Memory::User | Memory::Readable;
    range.size   = PAGESIZE * 2;
    //分配空闲的虚拟地址
    m_memoryContext->findFree(range.size, MemoryMap::UserShare, &range.virt);
    //当发现没有分配物理地址时，会分配一段内存，并映射到上一个函数的虚拟地址上
    m_memoryContext->mapRangeContiguous(&range);

    // Remap the feedback page with write permissions
    //重新映射内存，并设置成用户态可读写权限
    m_memoryContext->unmap(range.virt + PAGESIZE);
    m_memoryContext->map(range.virt + PAGESIZE,
                         range.phys + PAGESIZE, Memory::User | Memory::Readable | Memory::Writable);

    // Create shares entry
    m_shares.setMemoryContext(m_memoryContext);
    //创建该进程的内存共享给kernel程序？
    m_shares.createShare(KERNEL_PID, Kernel::instance()->getCoreInfo()->coreId, 0, range.virt, range.size);

    // Setup the kernel event channel
    //申请的内存被抽象为环形缓冲区，用于进程间的消息传递，包括用户态和内核态的传递
    m_kernelChannel->setVirtual(allocVirt.address, allocVirt.address + PAGESIZE);

    return Success;
}

Process::Result Process::wakeup()
{
    // This process might be just about to call sleep().
    // When another process is asking to wakeup this Process
    // such that it can receive an IPC message, we must guarantee
    // that the next sleep will be skipped.
    m_wakeups++;

    if (m_state == Sleeping)
    {
        m_state = Ready;
        MemoryBlock::set(&m_sleepTimer, 0, sizeof(m_sleepTimer));
        return Success;
    }
    else
    {
        return WakeupPending;
    }
}

Process::Result Process::sleep(const Timer::Info *timer, bool ignoreWakeups)
{
    if (m_state != Ready)
    {
        ERROR("PID " << m_id << " has invalid state: " << (uint) m_state);
        return InvalidArgument;
    }

    if (!m_wakeups || ignoreWakeups)
    {
        m_state = Sleeping;

        if (timer)
            MemoryBlock::copy(&m_sleepTimer, timer, sizeof(m_sleepTimer));

        return Success;
    }
    m_wakeups = 0;
    return WakeupPending;
}

bool Process::operator==(Process *proc)
{
    return proc->getID() == m_id;
}
