#include "io_manager.h"
#include "util.h"
#include "fiber.h"
#include "scheduler.h"
#include "log.h"
#include "global.h"

#include <asm-generic/errno-base.h>
#include <cerrno>
#include <cstddef>
#include <cstdint>
#include <functional>
#include <memory>
#include <mutex>
#include <shared_mutex>
#include <sys/epoll.h>
#include <fcntl.h>
#include <unistd.h>
#include <vector>

//IOManager 基于epoll封装 - 继承于 scheduler 
//当没有任务时会执行idle协程 - 阻塞到epoll_waite 
//当有任务时 唤醒epoll_waite 从消息队列中取出消息并执行

std::string opToString(int op)
{
    switch (op) 
    {
#define XX(name)                \
    case name: return #name;    

    XX(EPOLL_CTL_ADD);
    XX(EPOLL_CTL_DEL);
    XX(EPOLL_CTL_MOD);
#undef XX
    default: break;
    }

    return "no find op";
}


IOManager::IOManager(size_t threads, bool use_caller, const std::string& name)
:Scheduler(threads, use_caller, name)
{
    //创建epoll红黑树
    m_epfd = epoll_create(5000);
    ASSERT(m_epfd > 0);

    //创建无名管道 - 成功返回0
    int rt = pipe(m_tickleFds);
    ASSERT(!rt);

    //监听管道读事件 - 用于唤醒idle协程
    epoll_event event;
    memset(&event, 0, sizeof(epoll_event));
    event.events = EPOLLIN | EPOLLET; //监听读事件，边沿触发
    event.data.fd = m_tickleFds[0];

    rt = fcntl(m_tickleFds[0], F_SETFL, O_NONBLOCK);
    ASSERT(!rt);

    //将事件添加到红黑树
    rt = epoll_ctl(m_epfd, EPOLL_CTL_ADD, m_tickleFds[0], &event);
    ASSERT(!rt);

    contextResize(32);

    //启动协程调度器
    start();
}

IOManager::~IOManager() 
{
    stop(); //停止协程调度器
    close(m_epfd);
    close(m_tickleFds[0]);
    close(m_tickleFds[1]);

    //释放内存
    for(size_t i = 0; i < m_fdContexts.size(); ++i)
    {
        if(m_fdContexts[i])
        {
            delete m_fdContexts[i];
        }
    }
}

//重置m_fdContexts容器大小
void IOManager::contextResize(size_t size)
{
    m_fdContexts.resize(size);
    
    for(size_t i = 0; i < m_fdContexts.size(); ++i)
    {
        if(!m_fdContexts[i])
        {
            m_fdContexts[i] = new FdContext;
            m_fdContexts[i]->fd = i;
        }
    }
}

//给对应fd添加对应类型事件
//return 成功返回0，失败返回-1
int IOManager::addEvent(int fd, Event event, std::function<void()> cb)
{
    //从fd容器获取对应fd
    FdContext* fd_ctx = nullptr;
    std::shared_lock<std::shared_mutex> read_lock(m_sharedMutex);
    if((int)m_fdContexts.size() > fd)
    {
        fd_ctx = m_fdContexts[fd];
        read_lock.unlock();
    }
    else //如果没有对应fd,则扩容fd容器
    {
        read_lock.unlock();
        std::unique_lock<std::shared_mutex> write_lock(m_sharedMutex);
        contextResize(fd * 1.5);
        fd_ctx = m_fdContexts[fd];
    }

    std::unique_lock<std::mutex> fd_lock(fd_ctx->fdMutex);
    //一个fd上同种类型的事件只能添加一次
    //如果一个fd上有两个同种类型事件时（ps:两种读事件），可能有两个线程处理同一个函数（读事件函数）
    if(fd_ctx->events & event)
    {
        log_error("addEvent error, fd = {}, event = {}, fd_ctx.event = {}", 
            fd, (int)event, (int)fd_ctx->events);
        ASSERT(!(fd_ctx->events & event));
    }

    //添加读事件
    int op = fd_ctx->events ? EPOLL_CTL_MOD : EPOLL_CTL_ADD;
    epoll_event epevent;
    epevent.events = EPOLLET | (uint32_t)fd_ctx->events | event; //边沿触发 | fd_ctx当前事件 | event 要添加的事件
    epevent.data.ptr = fd_ctx;
    int rt = epoll_ctl(m_epfd, op, fd, &epevent);
    if(rt)
    {
        log_error("epoll_ctl({}, {}, {}): rt: {}, errno:{},erron str:{}, fd_ctx->events:{}",
            m_epfd, opToString(op), fd, (int)epevent.events, rt, 
            errno, strerror(errno), (int)fd_ctx->events);
        return -1;
    }

    ++m_pendingEventCount; //当前等待执行事件的数量
    fd_ctx->events = (Event)(fd_ctx->events | event);
    FdContext::EventContext& event_ctx = fd_ctx->getContext(event); //根据事件类型获取对应事件
    //断言全部为空
    ASSERT(!event_ctx.scheduler
            && !event_ctx.fiberEvent
            && !event_ctx.cbEvent);
    
    event_ctx.scheduler = Scheduler::GetThis();
    if(cb)  //如果是回调任务
    {
        event_ctx.cbEvent.swap(cb);
    }
    else    //将事件设置为当前线程的执行状态
    {
        event_ctx.fiberEvent = Fiber::GetThis();
        //断言当前协程为运行状态
        ASSERT2(event_ctx.fiberEvent->getState() == Fiber::EXEC,
            "state = " << event_ctx.fiberEvent->getState());
    }

    return 0;
}

//删除事件（不会触发事件）
bool IOManager::delEvent(int fd, Event event)
{
    //取出要删除的事件对应的FdContext
    std::shared_lock<std::shared_mutex> read_lock(m_sharedMutex);
    if((int)m_fdContexts.size() <= fd)
    {
        log_error("IOManager::delEvent error, m_fdContexts.size(): {}, fd:{}",m_fdContexts.size(), fd);
        return false;
    }
    FdContext* fd_ctx = m_fdContexts[fd];
    read_lock.unlock();

    std::unique_lock<std::mutex> fd_lock(fd_ctx->fdMutex);
    if(!(fd_ctx->events & event))
    {
        log_error("no event, fd_ctx->events:{}, event:{}", (int)fd_ctx->events, (int)event);
        return false;
    }

    //从fd_ctx->events去除event事件
    Event new_events = (Event)(fd_ctx->events & ~event);
    int op = new_events ? EPOLL_CTL_MOD : EPOLL_CTL_ADD;
    epoll_event epevent;
    epevent.events = EPOLLET | (uint32_t)new_events;
    epevent.data.ptr = fd_ctx;
    int ret = epoll_ctl(m_epfd, op, fd, &epevent);
    if(ret)
    {
        log_error("epoll_ctl({}, {}, {}): rt: {}, errno:{},erron str:{}, fd_ctx->events:{}",
            m_epfd, opToString(op), fd, (int)epevent.events, ret, 
            errno, strerror(errno), (int)fd_ctx->events);
        return false;
    }

    --m_pendingEventCount;
    fd_ctx->events = new_events;

    FdContext::EventContext& event_ctx = fd_ctx->getContext(event);
    fd_ctx->resetContext(event_ctx); //清空对应类型的事件

    return true;
}

//取消事件（如果事件存在则触发事件）
bool IOManager::cancelEvent(int fd, Event event)
{
    //取出要删除的事件对应的FdContext
    std::shared_lock<std::shared_mutex> read_lock(m_sharedMutex);
    if((int)m_fdContexts.size() <= fd)
    {
        log_error("IOManager::delEvent error, m_fdContexts.size(): {}, fd:{}",m_fdContexts.size(), fd);
        return false;
    }
    FdContext* fd_ctx = m_fdContexts[fd];
    read_lock.unlock();

    std::unique_lock<std::mutex> fd_lock(fd_ctx->fdMutex);
    if(!(fd_ctx->events & event))
    {
        log_error("no event, fd_ctx->events:{}, event:{}", (int)fd_ctx->events, (int)event);
        return false;
    }

    //从fd_ctx->events中去除event事件
    Event new_enents = (Event)(fd_ctx->events & ~event);
    int op = new_enents ? EPOLL_CTL_MOD : EPOLL_CTL_DEL;
    epoll_event epevent;
    epevent.events = EPOLLET | (uint32_t)new_enents;
    epevent.data.ptr = fd_ctx;
    int ret = epoll_ctl(m_epfd, op, fd, &epevent);
    if(ret)
    {
        log_error("epoll_ctl({}, {}, {}): rt: {}, errno:{},erron str:{}, fd_ctx->events:{}",
            m_epfd, opToString(op), fd, (int)epevent.events, ret, 
            errno, strerror(errno), (int)fd_ctx->events);
        return false;
    }
    
    fd_ctx->triggerEvent(event);
    --m_pendingEventCount;

    return true;
}

//取消对应fd上所有事件
bool IOManager::cancelAll(int fd)
{
    //取出要删除的事件对应的FdContext
    std::shared_lock<std::shared_mutex> read_lock(m_sharedMutex);
    if((int)m_fdContexts.size() <= fd)
    {
        log_error("IOManager::delEvent error, m_fdContexts.size(): {}, fd:{}",m_fdContexts.size(), fd);
        return false;
    }
    FdContext* fd_ctx = m_fdContexts[fd];
    read_lock.unlock();

    std::unique_lock<std::mutex> fd_lock(fd_ctx->fdMutex);
    if(!fd_ctx->events)
    {
        log_error("no event, fd_ctx->events:{}", (int)fd_ctx->events);
        return false;
    }

    int op = EPOLL_CTL_DEL;
    epoll_event epevent;
    epevent.events = 0;
    epevent.data.ptr = fd_ctx;
    int ret = epoll_ctl(m_epfd, op, fd, &epevent);
    if(ret)
    {
        log_error("epoll_ctl({}, {}, {}): rt: {}, errno:{},erron str:{}, fd_ctx->events:{}",
            m_epfd, opToString(op), fd, (int)epevent.events, ret, 
            errno, strerror(errno), (int)fd_ctx->events);
        return false;
    }

    if(fd_ctx->events & READ)
    {
        fd_ctx->triggerEvent(READ);
        --m_pendingEventCount;
    }
    if(fd_ctx->events & WRITE)
    {
        fd_ctx->triggerEvent(WRITE);
        --m_pendingEventCount;
    }
    ASSERT(fd_ctx->events == 0);

    return true;
}

//返回 协程调度器的指针
IOManager* IOManager::GetThis()
{
    //将父类指针转换为子类指针（发生了继承，其实子类指针和父类指针相同）
    return dynamic_cast<IOManager*>(Scheduler::GetThis());
}





//通知协程调度器有任务了
void IOManager::tickle()
{
    //如果线程池中没有空闲线程
    if(!hasIdleThreads())
    {
        return;
    }

    //向管道写入数据，唤醒idle epoll_waite
    int rt = write(m_tickleFds[1], "T", 1);
    ASSERT(rt == 1);
}

//协程无任务时调用idle协程
void IOManager::idle()
{
    log_info("IOManager idle");

    epoll_event* events = new epoll_event[g_max_evnets];
    std::shared_ptr<epoll_event> shared_events(events, [](epoll_event* ptr){
        delete[] ptr;
    });

    while(true)
    {
        uint64_t next_timeout = 0;
        if(stopping(next_timeout))
        {
            log_info("Scheduler name = {}, idle stopping exit", getName());
            break;
        }

        int ret = 0;
        while(true)
        {
            //确定epoll_wait超时时间
            if(next_timeout != UINT64_MAX) //当前有待执行的定时任务 
            {
                next_timeout = next_timeout > g_max_timeout ? g_max_timeout : next_timeout;
            }
            else
            {
                next_timeout = g_max_timeout;
            }

            ret = epoll_wait(m_epfd, events, g_max_evnets, next_timeout);
            //忽略系统中断
            if(ret < 0 && errno == EINTR){
            }else {
                break;
            }
        }

        //获取定时器中超时的任务
        std::vector<std::function<void()>> cbs;
        listExpiredCb(cbs);
        if(!cbs.empty())
        {
            //将任务添加到协程调度器
            schedule(cbs.begin(), cbs.end());
            cbs.clear();
        }

        //处理epoll中的事件
        for(int i = 0; i < ret; ++i)
        {
            epoll_event& event = events[i];
            if(event.data.fd == m_tickleFds[0]) //如果是管道读端的数据 - 有任务了，唤醒idle协程
            {
                uint8_t data[256];
                while(read(m_tickleFds[0], data, sizeof(data)) > 0);    //将所有数据读完
                continue;
            }

            FdContext* fd_ctx = (FdContext*)event.data.ptr;
            std::unique_lock<std::mutex> fd_lock(fd_ctx->fdMutex);

            //如果对应的文件描述符发生错误或者文件描述符被挂断 - 对方断开连接了
            if(event.events & (EPOLLERR | EPOLLHUP))
            {
                events->events |= (EPOLLIN | EPOLLOUT) & fd_ctx->events; //将当前fd对应的所有事件全部取消
            }

            int real_events = NONE; //当前fd要触发的事件类型
            if(events->events & EPOLLIN)
            {
                real_events |= READ;
            }
            if(events->events & EPOLLOUT)
            {
                real_events |= WRITE;
            }

            //如果对应fd中没有要触发的事件
            if((fd_ctx->events & real_events) == NONE)
            {
                continue;
            }

            //去除要触发的事件
            int left_events = (fd_ctx->events & ~real_events);
            int op = left_events ? EPOLL_CTL_MOD : EPOLL_CTL_DEL;
            event.events = EPOLLET | left_events;
            //从新加回到树中
            int rt = epoll_ctl(m_epfd, op, fd_ctx->fd, &event);
            if(rt)
            {
                log_error("epoll_ctl({}, {}, {}): rt: {}, errno:{},erron str:{}, fd_ctx->events:{}",
                    m_epfd, opToString(op), fd_ctx->fd, (int)event.events, rt, 
                    errno, strerror(errno), (int)fd_ctx->events);
                continue;
            }

            //添加对应任务
            if(real_events & READ)
            {
                fd_ctx->triggerEvent(READ);
                --m_pendingEventCount;
            }
            if(real_events & WRITE)
            {
                fd_ctx->triggerEvent(WRITE);
                --m_pendingEventCount;
            }
        }

        //让出执行时间，执行协程调度函数
        // Fiber::ptr cur = Fiber::GetThis();
        // auto raw_ptr = cur.get();
        // cur.reset();
        // raw_ptr->swapOut();
        Fiber::YieldToHold();
    }
}

//返回是否可以停止
bool IOManager::stopping()
{
    uint64_t timeout = 0;
    return stopping(timeout);
}

//当有新的定任务插入到定队列的首部，执行该函数
void IOManager::onTimerInsertedAtFront()
{
    //唤醒epoll_wait,重新设置等待时间
    tickle();
}


//判断是否可以停止
//timeout 最近要触发的定时器事件时间间隔
bool IOManager::stopping(uint64_t& timeout)
{
    timeout = getNextTimer();
    return timeout == UINT64_MAX    //定时器的定时任务为空
        && m_pendingEventCount == 0 //当前等待执行的事件数量=0
        && Scheduler::stopping();   //协程调度器可以停止
}







void IOManager::FdContext::resetContext(EventContext& ctx)
{
    ctx.scheduler = nullptr;
    ctx.fiberEvent.reset();
    ctx.cbEvent = nullptr;
}

//根据事件类型获取对应事件
IOManager::FdContext::EventContext& IOManager::FdContext::getContext(Event event)
{
    switch (event) 
    {
    case IOManager::READ:
        return readEvent;   
    case IOManager::WRITE:
        return writeEvent;
    default:
        ASSERT2(false, "getContex");
    }
    
    //std::invalid_argument 函数参数无效时抛出这个异常
    throw std::invalid_argument("getContext invalid event");
}

//触发事件 - 将事件添加到调度器中
void IOManager::FdContext::triggerEvent(Event event)
{
    //断言当前事件中包含了要触发的事件
    ASSERT(this->events & event);

    //从当前事件中去除要触发的事件
    this->events = (Event)(this->events & ~event);

    //获取要触发的事件
    EventContext& ctx = getContext(event);
    if(ctx.cbEvent)
    {
        ctx.scheduler->schedule(&ctx.cbEvent);  //传递地址在FiberAndThread时会调用swap，所以ctx.cbEvent会自动清空
    }
    else
    {
        ctx.scheduler->schedule(&ctx.fiberEvent);
    }
    ctx.scheduler = nullptr;
}