//
// Created by 抑~风 on 2023/2/3.
//
#include "iomanager.h"
#include "macro.h"
#include <sys/eventfd.h>
#include <cstring>
#include <unistd.h>
#include <fcntl.h>


#include "log.h"
#include "hook.h"

namespace CWJ_CO_NET {

    static auto g_logger = GET_LOGGER("system");
    thread_local int thread_epoll_fd = -1;


    void IOManager::wake() {
        if (m_idle_thread_count <= 0) return;
        uint64_t u = 1;
        if (write(m_wakeup_fd, &u, sizeof(uint64_t)) == -1) {
            CWJ_ASSERT(false);
            ERROR_LOG(g_logger) << "wake write fail ,errno=" << errno << " strerror= " << strerror(errno);
        }
    }

    void IOManager::idle() {


        // TODO 注意：在这里用static会导致多线程去共有一个缓冲区，导致其出现数据冲突
         uint64_t MAX_EVENTS = 256;
         uint64_t epoll_event_buf_size = MAX_EVENTS * sizeof (epoll_event);
         std::shared_ptr<epoll_event> events(new epoll_event[MAX_EVENTS], [](auto &a) { delete[]a; });
        memset(events.get(),0,epoll_event_buf_size);
        do {
            int len = 0;
            do {
                static uint64_t  MAX_TIMEOUT = 30000ul;
                uint64_t ms = getNextTimer();
                ms = ms > MAX_TIMEOUT ? MAX_TIMEOUT : ms;

                // TODO 这里在处理全局的定时器时，应该要避免惊群发生，即避免多个线程同时因为时间事件超时完成

                len = epoll_wait(thread_epoll_fd, events.get(), MAX_EVENTS, ms);
                if (len >0 || (len == 0 && errno == EFAULT)) break;
                // 忽略该信号，因为gdb调试时总是会触发该信号
                else if(errno != EINTR){
                    ERROR_LOG(g_logger) <<"len= " << len << " epoll_wait error ,errno="<<errno<<" strerror="<<strerror(errno);
                }
//                if(len == 0){
////                    sleep(1);
//                    CWJ_ASSERT(false);
//                }
                ERROR_LOG(g_logger) << "len="<<len;

            } while (true);

            bool has_task = false;

            std::vector<TimerManager::CallBack>list;
            listExpiredCb(list);
            has_task = list.size() || len > 0;
            for(auto a : list){
                schedule(a,-1);
            }


            auto evs = events.get();
            for (int i = 0; i < len; i++) {
                if (evs[i].data.fd == m_wakeup_fd) {
                    uint64_t u;
                    read(m_wakeup_fd, &u, sizeof(u));
                    continue;
                }
                epoll_event &event = evs[i];
                // 已解决 epoll_wait成功返回，但是可能出现epoll_event无效，可能会出现fd_context空指针
                // 原因：event数组被设置为static了，导致多线程共享
                //(gdb) print evs[i] ,len = 2
                //$4 = {events = 0, data = {ptr = 0x0, fd = 0, u32 = 0, u64 = 0}}

                auto fd_context = (FdContext *) evs[i].data.ptr;

                FdContext::MutexType::Lock lock(fd_context->m_mutex);

                CWJ_ASSERT(fd_context);
                int real_event = ((EPOLLIN | EPOLLOUT) & event.events) & fd_context->m_types;

                if (real_event == NONE) continue;



                fd_context->m_types = (EventType) (fd_context->m_types & ~(real_event));
                event.events = fd_context->m_types | EPOLLET;
                int op = fd_context->m_types == NONE ? EPOLL_CTL_DEL : EPOLL_CTL_MOD;
                int rt = epoll_ctl(thread_epoll_fd, op, fd_context->m_fd, &event);

                // 要是操作不超过，那么就不执行任务；
                if (rt) {
                    ERROR_LOG(g_logger) << "epoll_ctl(" << thread_epoll_fd << ", "
                                        << op << ", " << fd_context->m_fd << ", " << (EPOLL_EVENTS) evs[i].events
                                        << "):"
                                        << rt << " (" << errno << ") (" << strerror(errno) << ")";
                    continue;
                }



                if (real_event & EPOLLIN) {
                    fd_context->triggerEvent(READ);
                    --m_pending_event_count;
                }

                if (real_event & EPOLLOUT) {
                    fd_context->triggerEvent(WRITE);
//                    INFO_LOG(g_logger) << "IOManager::idle() sock="<<fd_context->m_fd<<" trigger write";
                    --m_pending_event_count;
                }

//                if (fd_context->m_types == NONE) fd_context->reset();

            }

            if(has_task){
                auto tid = Thread::GetPId();
                SetConsumeIntentionId(tid),m_global_intention_id = tid;
            }
            else    SetConsumeIntentionId(m_global_intention_id);

            memset(events.get(),0,epoll_event_buf_size);

        }while(false && !isStop());


    }

    IOManager::IOManager(size_t size, bool useCurThread, const std::string &name) : Scheduler(size, useCurThread,
                                                                                              name) {
        m_epoll_fd = epoll_create(1024);
        m_wakeup_fd = eventfd(0, 0);

        epoll_event event;
        memset(&event, 0, sizeof(event));
        event.events = EPOLLIN | EPOLLET;
        event.data.fd = m_wakeup_fd;

        if (fcntl(m_wakeup_fd, F_SETFL, O_NONBLOCK)) {
            CWJ_ASSERT(false);
        }

        if (epoll_ctl(m_epoll_fd, EPOLL_CTL_ADD, m_wakeup_fd, &event) == -1) {
            ERROR_LOG(g_logger) << "epoll add wakeup_fd error ,errno=" << errno << " str(errno)= " << strerror(errno);
            CWJ_ASSERT(false);
        }


    }

    IOManager::~IOManager() {
        close(m_epoll_fd);
        close(m_wakeup_fd);
        for (auto &p : m_fd_contexts) {
            delete p.second;
        }
    }

    bool IOManager::addEvent(int fd, IOManager::EventType event_type, Scheduler::CallBack cb) {

        MutexType::RLock lock(m_mutex);
        if (!m_fd_contexts.count(fd)) {
            lock.unlock();
            {
                MutexType::WLock lock1(m_mutex);
                m_fd_contexts[fd] = new FdContext(fd);
                m_fd_contexts[fd]->reset();
            }
        }
        auto &fd_ctx = m_fd_contexts[fd];


        FdContext::MutexType::Lock lock2(fd_ctx->m_mutex);
        int op = fd_ctx->m_types == NONE ? EPOLL_CTL_ADD : EPOLL_CTL_MOD;

        epoll_event event;
        memset(&event, 0, sizeof(event));
        event.events = (fd_ctx->m_types | event_type) | EPOLLET | EPOLLONESHOT;
        event.data.ptr = m_fd_contexts[fd];

        int rt = epoll_ctl(thread_epoll_fd, op, fd, &event);
        if (rt) {
            ERROR_LOG(g_logger) << "addEvent(fd=" << fd << " event_type="
                                << event_type << ") epoll_ctl error ,errno=" << errno
                                << " strerror= " << strerror(errno);
            return false;
        }

        fd_ctx->m_types = EventType(fd_ctx->m_types | event_type);
        auto &ev_ctx = fd_ctx->getContextFromType(event_type);
        if(cb){
            ev_ctx.m_cb.swap(cb);
        }else{
            ev_ctx.m_co = Coroutine::GetThis();
        }

        ev_ctx.m_scheduler = shared_from_this();

        ++m_pending_event_count;



        return true;
    }

    bool IOManager::delEvent(int fd, IOManager::EventType event_type) {

        MutexType::RLock lock(m_mutex);
        if (!m_fd_contexts.count(fd)) {
            return false;
        }

        auto &fd_ctx = m_fd_contexts[fd];
        lock.unlock();

        FdContext::MutexType::Lock lock2(fd_ctx->m_mutex);

        if (!(event_type & fd_ctx->m_types)) return false;

        epoll_event event;
        memset(&event, 0, sizeof(event));
        event.events = (fd_ctx->m_types & ~event_type) | EPOLLET;
        int op = event.events ? EPOLL_CTL_MOD : EPOLL_CTL_DEL;
        event.data.ptr = fd_ctx;
        int rt = epoll_ctl(thread_epoll_fd, op, fd, &event);
        if (rt) {
            ERROR_LOG(g_logger) << "addEvent(fd=" << fd << " event_type="
                                << event_type << ") epoll_ctl error ,errno=" << errno
                                << " strerror= " << strerror(errno);
            return false;
        }

        --m_pending_event_count;
        fd_ctx->m_types = (EventType) (fd_ctx->m_types & ~event_type);
        if (event_type == READ) {
            // TODO 这里可以优化
//            fd_ctx->m_read_ev.reset();
              fd_ctx->setMReadEv(FdContext::EventContext());
        } else if (event_type == WRITE) {
//            fd_ctx->m_write_ev.reset();
            fd_ctx->setMWriteEv(FdContext::EventContext());
        }
        return true;
    }

    bool IOManager::cancelEvent(int fd, IOManager::EventType event_type) {
        MutexType::WLock lock(m_mutex);
        if (!m_fd_contexts.count(fd)) {
            return false;
        }

        auto &fd_ctx = m_fd_contexts[fd];

        FdContext::MutexType::Lock lock2(fd_ctx->m_mutex);

        if (!(event_type & fd_ctx->m_types)) return false;

        epoll_event event;
        memset(&event, 0, sizeof(event));
        event.events = (fd_ctx->m_types & ~event_type) | EPOLLET;
        int op = event.events ? EPOLL_CTL_MOD : EPOLL_CTL_DEL;
        event.data.ptr = fd_ctx;
        int rt = epoll_ctl(thread_epoll_fd, op, fd, &event);
        if (rt) {
            ERROR_LOG(g_logger) << "cancelEvent(fd=" << fd << " event_type="
                                << event_type << ") epoll_ctl error ,errno=" << errno
                                << " strerror= " << strerror(errno);
            return false;
        }

        --m_pending_event_count;
        fd_ctx->m_types = (EventType)(fd_ctx->m_types & ~event_type);
        if (event_type == READ) {
            fd_ctx->triggerEvent(event_type);
//            fd_ctx->m_read_ev.reset();
            fd_ctx->setMReadEv(FdContext::EventContext());
            INFO_LOG(g_logger) << "sock="<<fd<<" cancel read ";
        } else if (event_type == WRITE) {
            fd_ctx->triggerEvent(event_type);
//            fd_ctx->m_write_ev.reset();
            fd_ctx->setMWriteEv(FdContext::EventContext());
            INFO_LOG(g_logger) << "sock="<<fd<<" cancel write ";
        }
        return true;
    }

    bool IOManager::cancelAll(int fd) {
        MutexType::RLock lock(m_mutex);
        if (!m_fd_contexts.count(fd)) {
            return false;
        }

        auto &fd_ctx = m_fd_contexts[fd];
        lock.unlock();

        FdContext::MutexType::Lock lock2(fd_ctx->m_mutex);

        if (!(fd_ctx->m_types)) return false;

        epoll_event event;
        memset(&event, 0, sizeof(event));
        event.events = NONE;
        int op = EPOLL_CTL_DEL;
        event.data.ptr = fd_ctx;
        int rt = epoll_ctl(thread_epoll_fd, op, fd, &event);
        if (rt) {
            ERROR_LOG(g_logger) << "cancelEvent(fd=" << fd << " event_type="
                                << NONE << ") epoll_ctl error ,errno=" << errno
                                << " strerror= " << strerror(errno);
            return false;
        }


        if (fd_ctx->triggerEvent(READ)) {
            INFO_LOG(g_logger) << "sock="<<fd<<" cancel read ";
            --m_pending_event_count;
        }
        if (fd_ctx->triggerEvent(WRITE)) {
            INFO_LOG(g_logger) << "sock="<<fd<<" cancel write ";
            --m_pending_event_count;
        }

        return true;
    }

    IOManager::ptr IOManager::GetThis() {
        return std::dynamic_pointer_cast<IOManager>(Scheduler::GetThis());
    }

    void IOManager::onTimerInsertedAtFront() {
        wake();
    }

    const std::atomic<size_t> &IOManager::getMPendingEventCount() const {
        return m_pending_event_count;
    }

    void IOManager::beforeRunScheduler() {
        SetHookEnable(true);
        thread_epoll_fd = dup(m_epoll_fd);
    }

    void IOManager::afterRunScheduler() {
        close(thread_epoll_fd);
    }


    IOManager::FdContext::FdContext(const int fd) : m_fd(fd) {
        this->reset();
    }

    IOManager::FdContext::EventContext &IOManager::FdContext::getContextFromType(IOManager::EventType type) {
        switch (type) {
            case READ:
                return m_read_ev;
                break;
            case WRITE:
                return m_write_ev;
                break;
            default:
                CWJ_ASSERT(false);
        }
        throw std::invalid_argument("getContext invalid event");
    }

    void IOManager::FdContext::reset() {
        this->m_types = NONE;
        this->m_read_ev.reset();
        this->m_write_ev.reset();
    }

    bool IOManager::FdContext::triggerEvent(EventType type) {

        if (type == NONE || type & m_types) return false;

        CWJ_ASSERT(type == READ || type == WRITE);

        m_types = (EventType) (m_types & (~type));

        auto &ctx = this->getContextFromType(type);

        if (ctx.m_co) {
            if(ctx.m_co->getMState() != CoState::State::HOLD) {
                ERROR_LOG(g_logger) << "ctx.m_co.state="<<ctx.m_co->getMState()<<" "<<ctx.m_co->getMId();
                // TODO 这里是否可以出现HOLD状态的协程
                CWJ_ASSERT(ctx.m_co->getMState() == CoState::State::HOLD);
            }
//            INFO_LOG(g_logger) << " FdContext::triggerEvent(EventType type) trigger="<<ctx.m_co->getMId();
            ctx.m_co->setMState(CoState::READY);
            ctx.m_scheduler->schedule(ctx.m_co, -1);
        } else if (ctx.m_cb) {
            ctx.m_scheduler->schedule(ctx.m_cb, -1);
        } else {
            CWJ_ASSERT(false);
        }
        return true;
    }

    const IOManager::FdContext::EventContext &IOManager::FdContext::getMReadEv() const {
        return m_read_ev;
    }

    void IOManager::FdContext::setMReadEv(const IOManager::FdContext::EventContext &mReadEv) {
        if(mReadEv.m_co){
            CWJ_ASSERT(mReadEv.m_co->getMState() == CoState::State::HOLD);
        }
        m_read_ev = mReadEv;
    }

    const IOManager::FdContext::EventContext &IOManager::FdContext::getMWriteEv() const {
        return m_write_ev;
    }

    void IOManager::FdContext::setMWriteEv(const IOManager::FdContext::EventContext &mWriteEv) {
        if(mWriteEv.m_co){
            CWJ_ASSERT(mWriteEv.m_co->getMState() == CoState::State::HOLD);
        }
        m_write_ev = mWriteEv;
    }

    void IOManager::FdContext::EventContext::reset() {
        this->m_co.reset();
        this->m_scheduler.reset();
        this->m_cb = nullptr;
    }

    IOManager::FdContext::EventContext::EventContext() {
    }
}
