
#include "./fiber-env-impl.h"

#include <assert.h>
#include <stdio.h>

#include "./cooperative-condition.h"
#include "./cooperative-event.h"
#include "./cooperative-mutex.h"
#include "./cooperative-rwlock.h"
#include "./cooperative-semaphore.h"
#include "./fiber-impl.h"
#include "./interlocked.h"
#include "./platform-fiber.h"
#include "./thread.h"

namespace fasmio { namespace fiber_env {

static const char* const g_join    = "join";
static const char* const g_quitted = "quitted";
static const char* const g_sleep   = "sleep";

FiberEnvImpl::FiberEnvImpl() :
    quit_flag_(false),
    live_fibers_(),
    quitted_fibers_(),
    joining_fibers_(),
    fibers_lock_(),
    no_live_fibers_cond_(&fibers_lock_),
    ready_queue_(),
    threads_(),
    fiberized_threads_(0),
    waiting_fiber_manager_(),
    epoller_()
{
}

FiberEnvImpl::~FiberEnvImpl()
{
}

bool FiberEnvImpl::Start(unsigned int thread_count)
{
    if (!waiting_fiber_manager_.Start(this))
        return false;
    if (!epoller_.Start(this))
        return false;

    const unsigned int kDefaultThreadCount = 8;
    const unsigned long kThreadStackSize = 256 * 1024;
    if (thread_count == THREADS_DEFAULT)
        thread_count = kDefaultThreadCount;

    for (unsigned int i = 0; i < thread_count; ++i)
        threads_.push_back(new Thread(ThreadProc, static_cast<void*>(this), kThreadStackSize));
    for (unsigned int i = 0; i < thread_count; ++i)
        threads_[i]->Start();

    return true;
}

bool FiberEnvImpl::NotifyToStop()
{
    quit_flag_ = true;
    return true;
}

bool FiberEnvImpl::Stop()
{
    ThreadContext *thread_context = GetThreadContext();
    if (thread_context != nullptr)
    {
        // cannot call Stop inside a fiber, otherwise would cause a deadlock
        return false;
    }

    quit_flag_ = true;

    const unsigned int thread_count = threads_.size();
    for (unsigned int i = 0; i < thread_count; ++i)
        threads_[i]->Join();
    for (unsigned int i = 0; i < thread_count; ++i)
        delete threads_[i];
    threads_.clear();

    while (fiberized_threads_ > 0)
        usleep(1000);

    // should no living fibers right now
    assert(live_fibers_.empty());

    // cleanup all the quitted fibers, lock is innecessary here
    for (fibers_t::iterator iter = quitted_fibers_.begin();
        iter != quitted_fibers_.end(); ++iter)
    {
        FiberImpl* fiber = iter->second;
        delete fiber;
    }
    quitted_fibers_.clear();

    epoller_.Stop();
    waiting_fiber_manager_.Stop();
    return true;
}

Fiber* FiberEnvImpl::CreateFiber(int (*func)(void*), void* arg, const char* name)
{
    FiberImpl* fiber = new FiberImpl(this, func, arg, 0, name);

    fibers_lock_.Lock();
    live_fibers_[fiber->id_] = fiber;
    fibers_lock_.Unlock();

    ReadyFiber(fiber);
    return static_cast<Fiber*>(fiber);
}

bool FiberEnvImpl::FiberizeThisThread()
{
    ThreadContext *thread_context = GetThreadContext();
    if (thread_context != nullptr)
    {
        // already inside a fiber
        return false;
    }

    interlocked::Increment((long*)&fiberized_threads_);
    ThreadProc();
    interlocked::Decrement((long*)&fiberized_threads_);
    return true;
}

void FiberEnvImpl::DumpFibers(FILE *fp)
{
    // TODO, FIXME: lock fibers_lock_ does not help to much, as the fiber
    // itself may modify itself in the same time too.  There's a way more
    // safty and does not require a lock for the fiber -- just let the fiber
    // to dump its info when the fiber enter ThreadContext::Schedule.
    fibers_lock_.Lock();
    for (fibers_t::iterator iter = live_fibers_.begin();
        iter != live_fibers_.end(); ++iter)
    {
        FiberImpl *fiber = iter->second;
        fiber->Dump(fp);
    }
    for (fibers_t::iterator iter = quitted_fibers_.begin();
        iter != quitted_fibers_.end(); ++iter)
    {
        FiberImpl *fiber = iter->second;
        fiber->Dump(fp);
    }
    fibers_lock_.Unlock();
}

void FiberEnvImpl::Sleep(unsigned long milliseconds)
{
    ThreadContext *thread_context = GetThreadContext();
    if (thread_context == nullptr)
        return;

    FiberImpl* fiber = thread_context->current_fiber_;
    assert(fiber != nullptr);

    if (milliseconds == 0)
    {
        Yield();
        return;
    }
 
    ABSTime timeout;
    timeout.Adjust(0, milliseconds * 1000);

    WaitingFiberManager::WaitingFiber wfiber;
    wfiber.fiber_   = fiber;
    wfiber.state_   = WaitingFiberManager::STATE_SLEEPING;
    wfiber.timeout_ = timeout;
    thread_context->Schedule(CompleteSleep, static_cast<void*>(&wfiber), g_sleep);

    assert(wfiber.reason_ == WaitingFiberManager::REASON_TIMEDOUT);
}

void FiberEnvImpl::CompleteSleep(ThreadContext* thread_context, void* arg)
{
    WaitingFiberManager::WaitingFiber *wfiber =
            reinterpret_cast<WaitingFiberManager::WaitingFiber*>(arg);
    assert(wfiber != nullptr);
    assert(wfiber->state_ == WaitingFiberManager::STATE_SLEEPING);

    WaitingFiberManager *manager = thread_context->GetWaitingFiberManager();
    assert (manager != nullptr);
    manager->AddWaitingFiber(wfiber);
}

void FiberEnvImpl::Yield()
{
    ThreadContext *thread_context = GetThreadContext();
    if (thread_context == nullptr)
        return;

    FiberEnvImpl *env = thread_context->env_;
    unsigned int ready_queue_size = env->ready_queue_.Size();
    if (ready_queue_size == 0)
        // no other ready fiber, continue current one
        return;

    thread_context->Schedule(CompleteYield, nullptr, nullptr);
}

void FiberEnvImpl::CompleteYield(ThreadContext* thread_context, void*)
{
    thread_context->env_->ReadyFiber(thread_context->current_fiber_);
}

void FiberEnvImpl::Quit(int quit_code)
{
    ThreadContext *thread_context = GetThreadContext();
    if (thread_context == nullptr)
        return;

    FiberImpl* fiber = thread_context->current_fiber_;
    assert(fiber != nullptr);
    fiber->SetQuitCode(quit_code);

    thread_context->Schedule(CompleteQuit, nullptr, g_quitted);
}

void FiberEnvImpl::CompleteQuit(ThreadContext* thread_context, void*)
{
    FiberEnvImpl *env = thread_context->env_;
    FiberImpl *fiber = thread_context->current_fiber_;

    fiber->FreeResource();

    env->fibers_lock_.Lock();
    {
        // remove the fiber from live-fibers
        env->live_fibers_.erase(fiber->id_);
        if (env->live_fibers_.empty())
            env->no_live_fibers_cond_.Broadcast();

        // check whether someone is joining this fiber
        fibers_t::iterator iter = env->joining_fibers_.find(fiber->id_);
        if (iter != env->joining_fibers_.end())
        {
            // Don't add the fiber to quitted-fibers if someone is joining it,
            // but instead wake up the joining fiber.
            // The fiber object will be deleted in the Join function
            env->ReadyFiber(iter->second);
            env->joining_fibers_.erase(iter);
        }
        else if (fiber->daemon_)
        {
            // It's a daemon fiber and no one if is joining it, delete the
            // object directly
            delete fiber;
        }
        else
        {
            // add the fiber to quitted-fibers if no one is joining it
            env->quitted_fibers_[fiber->id_] = fiber;
        }
    }
    env->fibers_lock_.Unlock();
}

void FiberEnvImpl::NotifyToStop_s()
{
    ThreadContext* thread_context = GetThreadContext();
    if (thread_context == nullptr)
        return;

    FiberEnvImpl* env = thread_context->env_;
    assert(env != nullptr);

    env->NotifyToStop();
}

Fiber* FiberEnvImpl::CreateFiber_s(int (*func)(void*), void* arg, const char* name)
{
    ThreadContext* thread_context = GetThreadContext();
    if (thread_context == nullptr)
        return nullptr;

    FiberEnvImpl* env = thread_context->env_;
    assert(env != nullptr);

    return env->CreateFiber(func, arg, name);
}

Fiber* FiberEnvImpl::SelfFiber()
{
    ThreadContext* thread_context = GetThreadContext();
    if (thread_context == nullptr)
        return nullptr;
    assert(thread_context->current_fiber_ != nullptr);
    return thread_context->current_fiber_;
}

int FiberEnvImpl::JoinFiber(FiberImpl* fiber)
{
    if (fiber == nullptr)
        return false;

    ThreadContext *thread_context = GetThreadContext();
    if (thread_context == nullptr)
        return false;

    FiberEnvImpl *env = thread_context->env_;
    assert(env != nullptr);

    env->fibers_lock_.Lock();

    fibers_t::iterator iter = env->quitted_fibers_.find(fiber->id_);
    if (iter == env->quitted_fibers_.end())
    {
        thread_context->Schedule(CompleteJoin, static_cast<void*>(fiber), g_join);
        // upon the return of thread_context->Schedule, 
        // fibers_lock_ will be Unlock'ed
    }
    else
    {
        assert(fiber == iter->second);
        env->quitted_fibers_.erase(iter);
        env->fibers_lock_.Unlock();
    }

    int fiber_quit_code = fiber->quit_code_;
    delete fiber;
    return fiber_quit_code;
}

void FiberEnvImpl::CompleteJoin(ThreadContext* thread_context, void* arg)
{
    FiberImpl *fiber = reinterpret_cast<FiberImpl*>(arg);
    assert(fiber != nullptr);

    FiberImpl *joining_fiber = thread_context->current_fiber_;
    assert(joining_fiber != nullptr);

    FiberEnvImpl *env = thread_context->env_;
    assert(env != nullptr);

    env->joining_fibers_[fiber->id_] = joining_fiber;
    env->fibers_lock_.Unlock();
}

void FiberEnvImpl::DumpFibers_s(FILE *fp)
{
    ThreadContext *thread_context = GetThreadContext();
    if (thread_context == nullptr)
        return;

    FiberEnvImpl *env = thread_context->env_;
    assert(env != nullptr);

    env->DumpFibers(fp);
}

void FiberEnvImpl::SetCurrentFiberState(const char* state)
{
    ThreadContext* thread_context = GetThreadContext();
    assert(thread_context != nullptr);
    thread_context->current_fiber_->SetState(state);
}

void FiberEnvImpl::SetCurrentFiberStateFromRunning(const char* state)
{
    ThreadContext* thread_context = GetThreadContext();
    assert(thread_context != nullptr);
    thread_context->current_fiber_->SetStateFromRunning(state);
}

/////////////////////////////////////////////////////////////////////////////

void FiberEnvImpl::ReadyFiber(FiberImpl* fiber)
{
    assert(fiber != nullptr);
    fiber->SetStateToReady();
    fiber->ChangeStatus(FiberImpl::FS_READY);
    ready_queue_.Push(fiber);
}

void FiberEnvImpl::RecycleFiberIfQuitted(FiberImpl* fiber)
{
    fibers_lock_.Lock();

    fibers_t::iterator iter = quitted_fibers_.find(fiber->id_);
    if (iter != quitted_fibers_.end())
    {
        assert(iter->second == fiber);
        quitted_fibers_.erase(iter);
        delete fiber;
    }

    fibers_lock_.Unlock();
}

void FiberEnvImpl::ThreadProc(void* arg)
{
    FiberEnvImpl* env = reinterpret_cast<FiberEnvImpl*>(arg);
    if (env != nullptr)
        env->ThreadProc();
}

void FiberEnvImpl::ThreadProc()
{
    PlatformFiber this_fiber;

    ThreadContext thread_context;
    thread_context.env_                = this;
    thread_context.schedule_fiber_     = &this_fiber;
    thread_context.current_fiber_      = nullptr;
    thread_context.post_switch_action_ = nullptr;
    thread_context.post_switch_arg_    = nullptr;

    Thread::SetSpecific(&thread_context);

    while (true)
    {
        if (quit_flag_)
        {
            // quit the schedule thread only if no living fibers
            fibers_lock_.Lock();
            bool no_living_fibers = live_fibers_.empty();
            fibers_lock_.Unlock();
            if (no_living_fibers)
                break;
        }

        FiberImpl* fiber = ready_queue_.Pop(true, 10);
        if (fiber == nullptr)
            continue;

        fiber->ChangeStatus(FiberImpl::FS_RUNNING);

        fiber->SetStateToRunning();
        thread_context.current_fiber_ = fiber;
        fiber->fiber_->SwitchTo(&this_fiber);
        if (thread_context.post_switch_action_ != nullptr)
        {
            (thread_context.post_switch_action_)(&thread_context, thread_context.post_switch_arg_);
            thread_context.post_switch_action_ = nullptr;
        }
    }

    Thread::SetSpecific(nullptr);
}


/////////////////////////////////////////////////////////////////////////////

FiberImpl* ThreadContext::GetCurrentFiber()
{
    return current_fiber_;
}

void ThreadContext::ReadyFiber(FiberImpl* fiber)
{
    env_->ReadyFiber(fiber);
}

bool ThreadContext::Schedule(const char* state)
{
    return Schedule(nullptr, nullptr, state);
}

bool ThreadContext::Schedule(void (*func)(ThreadContext*, void*), void*arg, const char* state)
{
    current_fiber_->ChangeStatus(FiberImpl::FS_WAITING);

    if (state != nullptr)
        current_fiber_->SetStateFromRunning(state);

    post_switch_action_ = func;
    post_switch_arg_    = arg;
    return schedule_fiber_->SwitchTo(current_fiber_->fiber_);
}

WaitingFiberManager* ThreadContext::GetWaitingFiberManager()
{
    return &env_->waiting_fiber_manager_;
}

Epoller* ThreadContext::GetEpoller()
{
    return &env_->epoller_;
}

ThreadContext* FiberEnvImpl::GetThreadContext()
{
    return reinterpret_cast<ThreadContext*>(Thread::GetSpecific());
}

}}  // namespace fasmio::fiber_env

