#include "monitor.hpp"


namespace libco {

#define MAX_STEAL_TRIES 4
#define MAX_EPOLL_EVENTS 256
constexpr uint64_t DEFAULT_EPOLL_WAIT_TIME_MS{1000};

TaskQueue* s_global_q = nullptr;

static Monitor s_monitor;
static Semaphore init_sem;
static std::atomic<uint8_t> done{0};

bool Monitor::stopping() {
    return s_monitor.m_stop;
}

Monitor* Monitor::GetThis() {
    return &s_monitor;
}

TaskQueue* Monitor::GetGlobalQue() {
    LIBCO_ASSERT(s_global_q);
    return s_global_q;
}

Monitor::Monitor() {
    s_global_q = new TaskQueue;
    m_active_procs = 0;

    for(size_t i=0; i<COMAXPROCS; ++i) {
        int rt = pipe(m_pipe_fd[i]);
        LIBCO_ASSERT(rt == 0);
    }
    for(size_t i=0; i<COMAXPROCS; ++i) {
        m_procs[i] = new Processor;
    }
    for(size_t i=0; i<COMAXPROCS; ++i) {
        m_threads[i] = new Thread(std::bind(&libco::Monitor::run_proc, this, m_procs[i]));
    }
    init_sem.wait();
    m_root_thread = new Thread(&libco::Monitor::run, "thread_monitor");
}

Monitor::~Monitor() {
    LIBCO_LOG_DEBUG << "waiting for all processors done";
    close(m_epfd);

    m_stop = true;
    for(size_t i=0; i<COMAXPROCS; ++i) {
        m_threads[i]->join();
    }
    LIBCO_LOG_DEBUG << "all processors done";

    for(size_t i=0; i<COMAXPROCS; ++i) {
        delete m_procs[i];
        delete m_threads[i];
    }
    delete s_global_q;
    m_root_thread->join();
    delete m_root_thread;
    LIBCO_LOG_DEBUG << "monitor end working";
}

void Monitor::run_proc(Processor* proc) {
    LIBCO_ASSERT(t_proc == nullptr);
    LIBCO_ASSERT(t_local_q == nullptr);

    t_proc = proc;
    t_local_q = proc->m_local_tasks;
    t_id = proc->m_id;
    proc->m_co = Coroutine::GetThis();
    libco::set_hook_enable();

    ++done;
    if (done == COMAXPROCS) {
        init_sem.notify();
    }
    while (true) {
        /// case 1: 首先尝试pop本地队列, 本地队列不为空，取出队首coroutine执行 
        /// case 2: 否则尝试pop全局队列, 全局队列不为空，取出队首coroutine执行 
        /// case 3: 本地队列和全局队列都为空, 尝试从其他processor中steal coroutines
        std::optional<Coroutine::ptr> task = t_local_q->try_pop();
        if (task == std::nullopt) {
            task = s_global_q->try_pop();
            if (task == std::nullopt) {
                /// case 3
                bool success = false;
                std::vector<Coroutine::ptr> steals;
                size_t tries = 0;
                while(!success && tries < MAX_STEAL_TRIES) {
                    for(size_t i=0; i<COMAXPROCS; ++i) {
                        if (t_proc == m_procs[i]) {
                            continue;
                        }
                        if (size_t num = m_procs[i]->pop_task_n(steals, m_procs[i]->tasks() / 4 > 0)) {
                            t_proc->push_task_n(steals, num);
                            success = true;
                            LIBCO_LOG_DEBUG << "steal " << num << " coroutines from processor " << m_procs[i]->m_id;
                            break;
                        }
                    }
                    ++tries;
                }
                /// 如果monitor已经通知退出了，且本地队列和全局队列都为空，退出
                if (s_monitor.m_stop == true && t_local_q->tasks() == 0 && s_global_q->tasks() == 0){
                    // 此时t_proc->m_co的use_count为2，一份在t_proc->m_co，一份在t_thread_coroutine
                    t_proc->m_co.reset();
                    return;
                }
            } else {
                /// case 2
                task.value()->resume();
            }
        } else {
            /// case 1
            task.value()->resume();
        }
    }

}

void Monitor::schedule(Coroutine::ptr task) {
    if (t_proc == nullptr) {
        LIBCO_LOG_DEBUG << "add task to global task_q";
        s_global_q->push(task);
        return;
    }

    /// case 1: 首先尝试push本地队列，本地队列未满，push成功
    /// case 2: 否则push全局队列

    bool ok = t_local_q->try_push(task);
    if (!ok) {
        LIBCO_LOG_DEBUG << "add task to global task_q";
        s_global_q->push(task);
    }
    LIBCO_LOG_DEBUG << "add task to local task_q";
    return;
}

void Monitor::schedule(std::function<void()> task) {
    schedule(Coroutine::ptr(new Coroutine(task)));
}

void Monitor::run() {
    LIBCO_LOG << "monitor start listening epoll events";
    epoll_event *events = new epoll_event[MAX_EPOLL_EVENTS];
    while (true) {
        if (stopping()) {
            delete[] events;
            break;
        }
        uint64_t waiting_time = s_monitor.getNextTimerMs();
        if (waiting_time = ~0ull) {
            waiting_time = DEFAULT_EPOLL_WAIT_TIME_MS;
        } else {
            waiting_time = std::min(waiting_time, DEFAULT_EPOLL_WAIT_TIME_MS);
        }
        int rt = epoll_wait(s_monitor.m_epfd, events, MAX_EPOLL_EVENTS, waiting_time);
        if(rt < 0) {
            if(errno == EINTR) {
                continue;
            }
            LIBCO_LOG_ERROR << "epoll_wait(" << s_monitor.m_epfd << ") (rt="
                                      << rt << ") (errno=" << errno << ") (errstr:" << strerror(errno) << ")";
            break;
        }
        for(int i=0; i<rt; ++i) {
            int fd = events[i].data.fd;
            FdContext* ctx = (FdContext*)events[i].data.ptr;
            if (events[i].events & EPOLLIN) {
                s_monitor.m_procs[ctx->proc_id]->add_task(ctx->read_cb);
            }
            if (events[i].events & EPOLLOUT) {
                s_monitor.m_procs[ctx->proc_id]->add_task(ctx->write_cb);
            }
        }
        std::vector<TimerManager::basefunc> expired_cb_vec;
        s_monitor.listAllExpired(expired_cb_vec);
        size_t sz = expired_cb_vec.size();
        if (sz > 0) {
            std::vector<Coroutine::ptr> expired_coroutines(sz);
            for(size_t i=0; i<sz; ++i) {
                expired_coroutines[i] = Coroutine::ptr(new Coroutine(expired_cb_vec[i]));
            }
            LIBCO_LOG_DEBUG << "monitor push " << sz << " timer tasks to global task_q";
            s_global_q->try_push_all(expired_coroutines);
        }
    }
    LIBCO_LOG_DEBUG << "monitor end listening epoll events";
}

} // namespace libco
