#ifndef _SERVER_H_
#define _SERVER_H_
#include <vector>
#include <cstdint>
#include <string>
#include <unordered_map>
#include <assert.h>
#include <cstring>
#include <functional>
#include <mutex>
#include <condition_variable>
#include <thread>
#include <memory>
#include <typeinfo>

#include <unistd.h>         // close(int fd)
#include <fcntl.h>          // int fcntl(int, int, ...)
#include <sys/socket.h>     // 套接字接口
#include <netinet/in.h>     // Internet domain sockets // 地址结构头文件
#include <sys/signal.h>     // struct NerWork-> SIGPIPE SIG_IGN
#include <sys/eventfd.h>
/**
 *  struct sockaddr_in
 */
#include <arpa/inet.h>  // 地址 htonl htons 头文件
#include <sys/epoll.h>
#include <sys/timerfd.h>// timerfd

#include "log.h"

const static std::uint32_t g_default_buffer_size = 1024;

class Buffer{
private:
    std::vector<char> _buffer;      // buffer 需要自己控制， vector 扩容逻辑（每次扩容的大小）每个平台不一样
    std::uint64_t _reader_idx;      // 相对读偏移量--读下标开始
    std::uint64_t _writer_idx;      // 相对写偏移量--写下标开始

public:
    Buffer(): _reader_idx(0), _writer_idx(0), _buffer(g_default_buffer_size){}

    char *begin(){ return &*_buffer.begin(); }
    // 获取 读/写的起始地址 for std::copy
    char *read_pos() { return begin() + _reader_idx; }
    char *write_pos(){ return begin() + _writer_idx; }
    // 获取 剩余head/tail空间
    std::uint64_t head_idle_size(){ return _reader_idx; } 
    std::uint64_t tail_idle_size(){ return _buffer.size() - _writer_idx; }
    // 获取 可读数据大小
    std::uint64_t readable_size(){ return _writer_idx - _reader_idx; }
    // reader/writer 后移
    void move_reader(std::uint64_t len){
        assert(len <= readable_size());
        _reader_idx += len;
    }
    void move_writer(std::uint64_t len){
        assert(len <= tail_idle_size());
        _writer_idx += len;
    }
    // ensure_write_enough() 逻辑：tail 空间不够 尝试向前移 尽量做到不扩容确保 tail 空间足够
    void ensure_write_enough(std::uint64_t len){
        if(len <= tail_idle_size()) return;

        if(len <= tail_idle_size() + head_idle_size()){
            uint64_t copy_size = readable_size();
            std::copy(read_pos(), read_pos() + copy_size, begin()); // 拷贝数据从 _buffer 开始
            _reader_idx = 0;
            _writer_idx = copy_size;
        }
        else{
            _buffer.resize(_writer_idx + len);
        }
    }
    // 读 _buffer
    void read(void *buf, std::uint64_t len){
        assert(len <= readable_size());
        std::copy(read_pos(), read_pos() + len, (char*)buf);
    }
    void read_and_pop(void *buf, std::uint64_t len){
        read(buf, len);
        move_reader(len);
    }
    std::string read_as_string(std::uint64_t len){
        assert(len <= readable_size());
        std::string str;
        str.resize(len);
        // std::copy(read_pos(), read_pos() + len, &str[0]);
        read(&str[0], len);
        return str;
    }
    std::string read_as_string_and_pop(std::uint64_t len){
        std::string str = read_as_string(len);
        move_reader(len);
        return str;
    }
    // 获取一行  carriage return/line feed
    char *find_CRLF(){ return (char*)std::memchr(read_pos(), '\n', readable_size()); }
    std::string get_line(){
        char *pos = find_CRLF();
        // 没找到说明就一行，直接发出去。
        if(pos == nullptr){
            return read_as_string(readable_size());
        }
        return read_as_string(pos - read_pos() + 1);
    }
    std::string get_line_and_pop(){
        std::string str = get_line();
        move_reader(str.size());
        return str;
    }
    // 写 _buffer
    void write(const void *data, uint64_t len){
        if(!len) return;
        ensure_write_enough(len);
        const char *temp = (const char*)data;
        std::copy(temp , temp + len, write_pos());
    }
    void write_and_push(const void *data, uint64_t len){
        write(data, len);
        move_writer(len);
    }
    void write_string(const std::string &str){
        return write(str.c_str(), str.size());
    }
    void write_string_and_push(const std::string &str){
        write_string(str);
        move_writer(str.size());
    }

    void write_buffer(Buffer &data){
        return write(data.read_pos(), data.readable_size());
    }
    void write_buffer_and_push(Buffer &data){
        write_buffer(data);
        move_writer(data.readable_size());
    }
    
};

static const int max_listen = 1024;  // listen backlog
class Socket{
    int _sockfd;
public:
    Socket() : _sockfd(-1) {}
    Socket(int fd) : _sockfd(fd) {}

    int Fd(){
        return _sockfd;
    }

    bool Create(){
        // socket(int domain, int type, int protocol)
        _sockfd = socket(AF_INET, SOCK_STREAM, 0);
        if(_sockfd < 0) {
            ERROR_LOG("sock create err");
            return false;
        }
        return true;
    }
    // 尽量还是大驼峰吧 和 目前先和Java命名规范保持一致
    bool Bind(const std::string &ip, uint16_t port){
        struct sockaddr_in addr;
        addr.sin_family = AF_INET; // IPv4
        addr.sin_port =  htons(port);
        addr.sin_addr.s_addr = inet_addr(ip.c_str()); // arpa/inet.h
        socklen_t len = sizeof(struct sockaddr_in);
        //int ::bind(int sockfd, const struct sockaddr* addr,
        //        socklen_t addrlen);
        int ret = ::bind(_sockfd, (struct sockaddr*)&addr, len);
        if(ret < 0){
            ERROR_LOG("Bind address failed");
            return false;
        }
        return true;
    }
    bool Listen(int backlog = max_listen){
        int ret = listen(_sockfd, backlog);
        if(ret < 0){
            ERROR_LOG("listen sock failed");
            return false;
        }
        return true;
    }
    bool Connect(const std::string &ip, uint16_t port){
        struct sockaddr_in addr;
        addr.sin_family = AF_INET;
        addr.sin_port = htons(port);
        addr.sin_addr.s_addr = inet_addr(ip.c_str());
        socklen_t len = sizeof(struct sockaddr_in);
        int ret = connect(_sockfd, (struct sockaddr*)&addr, len);
        if(ret < 0){
            ERROR_LOG("Connect error");
            return false;
        }
        return true;
    }
    int Accept(){
        int newfd = accept(_sockfd, nullptr, nullptr);
        if(newfd < 0){
            ERROR_LOG("Socket accept failed");
            return -1;
        }
        return newfd;
    }
    ssize_t Recv(void *buf, size_t len, int flag = 0){
        for(;;){
            ssize_t ret = ::recv(_sockfd, buf, len, flag);
            if(ret > 0) return ret;
            if(ret == 0) return 0;
            switch(errno){
                case EINTR:
                    continue;
                case EAGAIN:
                    return -2;  //暂时不可写
                // case EWOULDBLOCK:        // EAGAIN == EWOULDBLOCK
                default:
                    ERROR_LOG("recv failed: %s", strerror(errno));
                    return -1;
            }
        }
    }
    // ssize_t Recv(void *buf, size_t len, int flag = 0){
    //     ssize_t ret = ::recv(_sockfd, buf, len, flag);
    //     if(ret > 0) return ret;
    //     if(ret == 0) return 0; // 对端关闭
    //     if(errno == EINTR) return 0; // 被信号中断，重试
    //     if(errno == EAGAIN || errno == EWOULDBLOCK) return -2; // 暂时无数据（非阻塞）
    //     return -1; // 其他错误
    // }
    ssize_t NonBlockRecv(void *buf, size_t len){
        return Recv(buf, len, MSG_DONTWAIT);
    }
    // ssize_t Send(const void *buf, size_t len, int flag = 0){
    //     ssize_t ret = ::send(_sockfd, buf, len, flag);
    //     if(ret < 0){
    //         if(errno == EAGAIN || errno == EINTR) return 0;
    //         ERROR_LOG("Socket send failed");
    //         return -1;
    //     }
    //     return ret;
    // }
    ssize_t Send(const void *buf, size_t len, int flag = 0){
        for(;;){
            ssize_t ret = ::send(_sockfd, buf, len, flag);
            if(ret >= 0) return ret;
            if(errno == EINTR) continue;
            else if(errno == EAGAIN || errno == EWOULDBLOCK) return -2;
            else if(errno == EPIPE) {ERROR_LOG("send failed: broken pipe"); return -1;}
            else {ERROR_LOG("send failed: %s", strerror(errno)); return -1;}
        }
    }
    ssize_t NonBlockSend(void *buf, size_t len){
        if(len == 0) return 0;
        return Send(buf, len, MSG_DONTWAIT);
    }
    // 关闭套接字
    void Close(){
        if(_sockfd != -1){
            ::close(_sockfd);
            _sockfd = -1;
        }
    }

    void NonBlock(){
        int flag =::fcntl(_sockfd, F_GETFL);
        fcntl(_sockfd, F_SETFL, flag | O_NONBLOCK);
    }
    void ReuseAddress(){
        int val = 1;
        setsockopt(_sockfd, SOL_SOCKET, SO_REUSEADDR, (void*)&val, sizeof(int));
        val = 1;
        setsockopt(_sockfd, SOL_SOCKET, SO_REUSEPORT, (void*)&val, sizeof(int));
    }
    // 成套的函数
    bool CreateServer(uint16_t port, const std::string &ip = "0.0.0.0", bool non_block = true){
        DEBUG_LOG("CreateServer");
        // 创建套接字,绑定地址,监听,设置非阻塞,设置地址复用
        if(Create() == false) return false;
        ReuseAddress();
        if(non_block)
            NonBlock();
        if(Bind(ip, port) == false) return false;
        if(Listen() == false) return false;
        return true;
    }
    bool CreateClient(uint16_t port, const std::string &ip){
        if(Create() == false) return false;
        if(Connect(ip, port) == false) return false;
        return true;
    }

private:
};

class Poller;
class EventLoop;
class Channel{
private:
    int _fd;                    //
    EventLoop *_loop;           //回指自己属于哪个线程
    uint32_t _events;           //监控的事件
    uint32_t _revents;          //系统返回的（触发的）事件
    using EventCallback = std::function<void()>;
    EventCallback _read_cb;     //可读事件触发回调
    EventCallback _write_cb;    //可写事件触发回调
    EventCallback _error_cb;    //错误事件回调
    EventCallback _close_cb;    //连接断开事件回调
    EventCallback _event_cb;    //任意时间回调

public:
    Channel(EventLoop* loop,int fd):_fd(fd), _events(0), _revents(0), _loop(loop) {}
    
    int Fd() { return _fd; }
    uint32_t Events(){ return _events; }
    void SetReturnEvents(uint32_t revents) { _revents = revents; }
    void SetEvents(uint32_t events)  { _events = events ;}
    void SetREvent(uint32_t revents) {_revents = revents;}

    //注册回调
    void SetReadCallback(const EventCallback & cb)  { _read_cb = cb; }
    void SetWriteCallback(const EventCallback & cb) { _write_cb = cb;}
    void SetErrorCallback(const EventCallback & cb) { _error_cb = cb;}
    void SetCloseCallback(const EventCallback & cb) { _close_cb = cb;}
    void SetEventCallback(const EventCallback & cb) { _event_cb = cb;}

    //是否监控了可读
    bool Readable() { return _events & EPOLLIN; }
    bool Writable() { return _events & EPOLLOUT;}

    // 读写监控控制
    void EnableRead()  { _events |= EPOLLIN;  Update();}
    void EnableWrite() { _events |= EPOLLOUT; Update();}
    void DisableRead() { _events &= ~EPOLLIN; Update();}
    void DisableWrite(){ _events &= ~EPOLLOUT;Update();}
    void DisableAll()  { _events = 0; Update();}
    // 移除、更新监控
    void Remove();
    void Update();
    //处理触发事件
    void HandleEvent(){
        if((_revents & EPOLLIN) | (_revents & EPOLLRDHUP) | (_revents & EPOLLPRI)){
            if(_event_cb) _event_cb();
            if(_read_cb) _read_cb();
        }
        // 有可能释放连接，所以一次只处理一次 if-else
        if(_revents & EPOLLOUT){
            if(_event_cb) _event_cb();
            if(_write_cb) _write_cb();
        }
        else if(_revents & EPOLLERR){
            if(_error_cb) _error_cb();
        }
        else if(_revents & EPOLLHUP){
            if(_event_cb) _event_cb();
            if(_close_cb) _close_cb();
        }
    }

};

const static int max_epoll_events = 1024;
class Poller{
private:
    int _epfd;  // epollfd
    struct epoll_event _evs[max_epoll_events];   // events
    std::unordered_map<int, Channel*> _channels;

private:
    // op: EPOLL_CTL_ADD / EPOLL_CTL_MOD / EPOLL_CTL_DEL
    void Update(Channel *channel, int op) {
        // int epoll_ctl(int epfd, int op, int fd, struct epoll_event * event)
        int fd = channel->Fd();
        struct epoll_event ev;
        ev.data.fd = fd;
        ev.events = channel->Events();
        int ret = epoll_ctl(_epfd, op, fd, &ev);
        if(ret < 0){
            if(errno == EINTR) return;
            ERROR_LOG("Epollctl failed");
        }
    }
    bool HasChannel(Channel *channel) {
        auto it = _channels.find(channel->Fd());
        return it != _channels.end();
    }

public:
    Poller() {
        _epfd = ::epoll_create(1);  // arg > 0
        if(_epfd < 0){
            ERROR_LOG("Epoll create failed");
            ::abort();
        }
    }
    void UpdateEvent(Channel *channel) {
        bool ret = HasChannel(channel);
        if(!ret){
            // 不存在则添加
            Update(channel, EPOLL_CTL_ADD);
            _channels.insert(std::make_pair(channel->Fd(), channel));
        }
        return Update(channel, EPOLL_CTL_MOD);
    }
    void RemoveEvent(Channel *channel) {
        auto it = _channels.find(channel->Fd());
        if(it != _channels.end()){
            _channels.erase(it);
        }
        Update(channel, EPOLL_CTL_DEL);
    }
    // 开始监控
    void Poll(std::vector<Channel*> *active) {
        // int epoll_wait(int epfd, struct epoll_event *evs, int maxevents, int timeout)
        int nfds = epoll_wait(_epfd, _evs, max_epoll_events, -1);   // -1 代表阻塞
        
        if(nfds < 0){
            if(errno == EINTR) return;
            ERROR_LOG("epoll wait error:%s", strerror(errno));
            ::abort();
        }
        for(int i = 0; i < nfds; ++i){
            auto it = _channels.find(_evs[i].data.fd);
            assert(it != _channels.end());
            // ::abort();
            it->second->SetREvent(_evs[i].events);
            active->push_back(it->second);
        }
        return;
    }
};

using TaskFunc = std::function<void()>;
using ReleaseFunc = std::function<void()>;
class TimerTask{
private:
    uint64_t _id;           // 定时器（任务） ID
    uint32_t _timeout;      // 定时器超时时间
    TaskFunc _task;         // 定时器的回调任务
    bool _canceled;          // 是否被取消

    ReleaseFunc _release;   // 删除 TimerWheel 中保存的定时器对象信息
    /* 既然 TimerTask 最清楚自己什么时候要死，那么最自然的办法是：
    在它析构时调用一个回调函数 _release，这个回调专门去清理 _timers。*/ 

public:
    TimerTask(uint64_t id, uint32_t timeout, const TaskFunc& task) : _id(id), _timeout(timeout), _task(task), _canceled(false)  //, _release(release)
    {}

    ~TimerTask() {
        if(_canceled == false) { _task();}
        _release(); //清理 TimeWheel 的 _timers
    }
    void Cancel(){
        _canceled = true;
    }
    void SetRelease(const ReleaseFunc& release_cb){
        _release = release_cb;
    }
    uint32_t DelayTime() {return _timeout;}
};

// 时间轮
class TimerWheel{
private:
    using PtrTask = std::shared_ptr<TimerTask>;
    using WeakTask = std::weak_ptr<TimerTask>;    
    int _tick;                                      // 当前时间指针，指到哪就销毁哪
    int _capacity;                                  // 表盘最大数量——最大延迟时间
    std::vector<std::vector<PtrTask>> _wheel;       // 依赖 _capacity 放到下面
    std::unordered_map<uint64_t, WeakTask> _timers;

    //这三个是为了把 TimerWheel放到一个线程中，防止时间轮成为临界资源，最终所有东西会绑定到对应的1个_loop里面
    int _timerfd;                                                           
    EventLoop *_loop;
    std::unique_ptr<Channel> _timerfd_channel;
private:
    void RemoveTimer(uint64_t id){
        auto it = _timers.find(id);
        if(it != _timers.end()){
            _timers.erase(it);
        }
    }
    static int CreateTimerFd(){
        while(true){
            int timerfd = ::timerfd_create(0,0);
            if(timerfd == -1){
                ERROR_LOG("Timerfd Create Failed!");
                ::abort();
            }
            struct itimerspec itime;
            itime.it_value.tv_sec = 1;
            itime.it_value.tv_nsec = 0;
            itime.it_interval.tv_sec = 1;
            itime.it_interval.tv_nsec = 0;
            timerfd_settime(timerfd, 0, &itime, nullptr);
            return timerfd;
        }
    }
    void ReadTimerFd(){
        uint64_t times;
        int ret = ::read(_timerfd, &times, sizeof(times));
        if(ret < 0){    
            ERROR_LOG("Read TimerFd failed");
            ::abort();
        }
    }
    // 这个函数应该每一秒被执行一次
    void RunTimerTask(){
        _tick = (_tick + 1) % _capacity;
        _wheel[_tick].clear();      // 清空该处的 PtrTask 调用定时器析构
    }
    void OnTime(){
        ReadTimerFd();
        RunTimerTask();
    }

    // "*InLoop" auxillary func
    void TimerAddInLoop(uint64_t id, uint32_t delay, const TaskFunc& cb){
        PtrTask pt(new TimerTask(id, delay, cb));
        pt->SetRelease(std::bind(&TimerWheel::RemoveTimer, this, id));
        int pos = (_tick + delay) % _capacity; 
        _wheel[pos].push_back(pt);
        _timers[id] = WeakTask(pt);
    }
    void TimerRefreshInLoop(uint64_t id){
        auto it = _timers.find(id);
        if(it == _timers.end()) return;

        PtrTask pt = it->second.lock(); // 延长 由 weakptr 指向的 ptr 的生命——即再生成一个 shared_ptr 锁住对象的存活
        int delay = pt->DelayTime();
        int pos = (_tick + delay) % _capacity;
        _wheel[pos].push_back(pt);
    }
    void TimerCancelInLoop(uint64_t id){
        auto it = _timers.find(id);
        if(it == _timers.end()) return;

        PtrTask pt = it->second.lock();
        if(pt) pt->Cancel();
    }
public:
    TimerWheel(EventLoop *loop): _capacity(60), _wheel(_capacity), _tick(0) 
                , _loop(loop)
                , _timerfd(CreateTimerFd())
                , _timerfd_channel(new Channel(loop, _timerfd))
    {
        _timerfd_channel->SetReadCallback(std::bind(&TimerWheel::OnTime, this));
        _timerfd_channel->EnableRead();
    }
    ~TimerWheel(){}
    void TimerAdd(uint64_t id, uint32_t delay, const TaskFunc& cb);
    void TimerRefresh(uint64_t id);
    void TimerCancel(uint64_t id);
    // void TimerAdd(uint64_t id, uint32_t delay, const TaskFunc& cb){
    //     //添加一个Timer肯定是在对应的一个线程中运行，该线程是 _loop
    //     _loop->RunInLoop(std::bind(&TimerWheel::TimerAddInLoop, this, id, delay, cb));
    // }
    // void TimerRefresh(uint64_t id){
    //     _loop->RunInLoop(std::bind(&TimerWheel::TimerRefreshInLoop, this, id));
    // }
    // void TimerCancel(uint64_t id){
    //     _loop->RunInLoop(std::bind(&TimerWheel::TimerCancelInLoop, this, id));
    // }
    //  1个loop即为1个线程，在外界直接对共享资源 TimerWheel::_timers 进行修改肯定有线程安全问题的，而上面的三个函数则不存在，因为他绑定了_loop而不是
    //像HasTimer直接裸读/写 _timers. 所以HasTimer有线程安全问题，所以约定HasTimer只在EventLoop里面调用。
    bool HasTimer(uint64_t id){ 
        const auto &it = _timers.find(id);
        if(it == _timers.end()) return false;
        return true;
    }

};


using Functor = std::function<void()>;                // task type
class EventLoop{
private:
    std::thread::id _thread_id;                           // 1 thread 1 loop
    int _event_fd;                                        // 事件描述符
    std::unique_ptr<Channel> _event_fd_channel;           //eventfd 本身也需要监控
    //若为指针需要手动析构，可以用一个对象来管理
    std::vector<Functor> _tasks;                          // queue of task
    Poller _poller;                                       // process event
    std::mutex _mutex;                          

    TimerWheel _timer_wheel;
public:
    std::thread::id ThreadId(){ return _thread_id; }
    void RunAllTask(){
        std::vector<Functor> func;
        {
            std::unique_lock<std::mutex> _lock(_mutex);
            _tasks.swap(func);
        }
        for(Functor &f : func){
            f();
        }
        return;
    }
private:
    static int CreateEventFd() {
        int fd = ::eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK);
        if(fd < 0){
            ERROR_LOG("Create efd ERROR");
            ::abort();
        }
        return fd;
    }
    void ReadEventFd() {
        uint64_t val = 0;
        int ret = read(_event_fd, &val, sizeof(val));
        if(ret < 0){
            // EINTR -- 遇到信号， EAGAIN -- 没有有效数据
            if(errno == EAGAIN || errno == EINTR){
                return;
            }
            ERROR_LOG("ReadEventFd Failed");
            ::abort();
        } 
    }
    void WakeUpEventFd(){
        uint64_t val = 1;
        int ret = write(_event_fd, &val, sizeof(val));
        if(ret < 0){
            if(errno == EAGAIN || errno == EINTR){
                return;
            }
            ERROR_LOG("ReadEventFd Failed");
            ::abort();
        }
    }
public:
    EventLoop() : _thread_id(std::this_thread::get_id())
                , _event_fd(CreateEventFd())
                , _event_fd_channel(new Channel(this, _event_fd))
                , _timer_wheel(this)
                // , _event_fd_channel(new Channel(&_poller, _event_fd))
    {
        _event_fd_channel->SetReadCallback(std::bind(&EventLoop::ReadEventFd, this));
        _event_fd_channel->EnableRead();
    }
    void Start(){
        while(1){
            std::vector<Channel*> actives;
            _poller.Poll(&actives);
            for(auto channel : actives){
                channel->HandleEvent();
            }
            RunAllTask();   //critical zone
        }
    }
    bool IsInLoop(){
        return std::this_thread::get_id() == _thread_id;
    }
    void AssertInLoop(){
        assert(std::this_thread::get_id() == _thread_id);
    }
    void QueueInLoop(const Functor &cb){
        {
            //上锁
            std::unique_lock<std::mutex> lock(_mutex);
            _tasks.push_back(cb);
        }
        // 防止 eventfd 唤醒没有数据可读，给 event_fd 写入一个数据，以免 epoll 阻塞
        WakeUpEventFd();
    }

    void RunInLoop(const Functor &cb){
        if(IsInLoop()) return cb();
        return QueueInLoop(cb);
    }

    void UpdateEvent(Channel *channel) { return _poller.UpdateEvent(channel); }
    void RemoveEvent(Channel *channel) { return _poller.RemoveEvent(channel); }
    
    void TimerAdd(uint64_t id, uint32_t delay, const TaskFunc &cb){return _timer_wheel.TimerAdd(id, delay, cb);}
    void TimerRefresh(uint64_t id) { return _timer_wheel.TimerRefresh(id);}
    void TimerCancel(uint64_t id) { return _timer_wheel.TimerCancel(id);}

    bool HasTimer(uint64_t id){ return _timer_wheel.HasTimer(id);}
};

void Channel::Remove() { _loop->RemoveEvent(this);}
void Channel::Update() { _loop->UpdateEvent(this);}
void TimerWheel::TimerAdd(uint64_t id, uint32_t delay, const TaskFunc& cb){
    //添加一个Timer肯定是在对应的一个线程中运行，该线程是 _loop
    _loop->RunInLoop(std::bind(&TimerWheel::TimerAddInLoop, this, id, delay, cb));
}
void TimerWheel::TimerRefresh(uint64_t id){
    _loop->RunInLoop(std::bind(&TimerWheel::TimerRefreshInLoop, this, id));
}
void TimerWheel::TimerCancel(uint64_t id){
    _loop->RunInLoop(std::bind(&TimerWheel::TimerCancelInLoop, this, id));
}

class LoopThread{
private:
    std::mutex _mutex;              // 互斥锁
    std::condition_variable _cond;  // 条件变量，控制_loop未初始化好就调用 Loop()
    EventLoop *_loop;               // _loop 在线程内部初始化
    std::thread _thread;

private:
    // 实例化 _loop 指向的对象。
    void ThreadEntry(){
        EventLoop loop;
        {
            std::unique_lock<std::mutex> lock(_mutex);
            _loop = &loop;
            _cond.notify_all();
        }
        _loop->Start();
    }

public:
    LoopThread(): _loop(nullptr), _thread(std::thread(&LoopThread::ThreadEntry, this)) {}
    EventLoop *Loop() {
        EventLoop *loop = nullptr;
        {
            std::unique_lock<std::mutex> lock(_mutex);
            _cond.wait(lock, [&](){ return _loop != nullptr;});
            loop = _loop;
        }
        return loop;
    }

};

// 线程池
class LoopThreadPool{
private:
    int _thread_count;
    EventLoop *_base_loop;                  // 保证有一个线程，若 _thread_count 为0，则所有操作都在 _base_loop 上
    std::vector<LoopThread*> _threads;       // 保存所有LoopThread线程对象
    int _next_loop_idx;
    std::vector<EventLoop*> _loops;         // 分配事件到 _next_loop_idx 处
public:
    LoopThreadPool(EventLoop *base_loop):_thread_count(0), _next_loop_idx(0), _base_loop(base_loop){};
    void SetThreadCount(int thread_count){ this->_thread_count = thread_count;}
    // 创建所有线程（默认LoopThread的构造会启动）
    void CreateAndStart(){
        if(_thread_count > 0){
            _threads.resize(_thread_count);
            _loops.resize(_thread_count);
            for(int i = 0; i < _thread_count; ++i){
                _threads[i] = new LoopThread();
                _loops[i] = _threads[i]->Loop();
            }
        }
    }
    EventLoop *NextLoop(){
        if(_thread_count == 0) return _base_loop;
        _next_loop_idx = (_next_loop_idx + 1) % _thread_count;
        return _loops[_next_loop_idx];
    }
};


class Any
{
private:
    struct holder
    {
    public:
        holder() = default;
        virtual ~holder() = default;
        virtual const std::type_info &type() = 0;
        virtual holder* clone() = 0;
    };
    //
    template<class T>
    struct placeholder : public holder
    {
    public:
        placeholder(const T& val) : _val(val) {};
        // virtual ~placeholder(){};                   // 不需要实现， T 的析构应由外部实现
        virtual const std::type_info &type() override { return typeid(T); }
        virtual holder* clone() override { return new placeholder(_val); }

    // private:
        T _val;
    };
private:
    // for slave
    Any &swap(Any &other) noexcept{
        std::swap(other._content, this->_content);
        return *this;
    }
 
private:
    holder *_content;

public:
    Any() : _content(nullptr) {}
    // 模板函数
    template<class T>
    Any(const T &val): _content(new placeholder<T>(val)) {}
    Any(const Any &other): _content(other._content ? other._content->clone() : nullptr) {}
    ~Any() { delete _content; }

    // 返回所保存的数据的指针
    template<class T>
    T *get(){
        assert(_content->type() == typeid(T));
        return &(((placeholder<T>*)_content)->_val);
    }

    // operator=
    template<class T>
    Any& operator=(const T &val){
        Any(val).swap(*this);
        return *this;
    }

    Any& operator=(const Any &other){
        Any(other).swap(*this);
        return *this;
    }
};

typedef enum {
    DISCONNECTED,
    CONNECTING,
    CONNECTED,
    DISCONNECTING
}ConnStat;
class Connection;
using PtrConnection = std::shared_ptr<Connection>;
class Connection : public std::enable_shared_from_this<Connection>{
private:
    int _conn_id;       // 连接的唯一id
    int _sockfd;        // 连接的描述符
    bool _enable_inactive_release;
    ConnStat _stat;
    Socket _socket;     // 管理套接字
    Channel _channel;   // 管理事件
    EventLoop *_loop;
    Buffer _in_buffer;  
    Buffer _out_buffer;
    Any _context;       // 请求的接受处理上下文

    using ConnectedCallback = std::function<void(const PtrConnection&)>;
    using ClosedCallback = std::function<void(const PtrConnection&)>;
    using MessageCallback = std::function<void(const PtrConnection&, Buffer*)>;
    using AnyEventCallback = std::function<void(const PtrConnection&)>;
    ConnectedCallback  _connected_cb;
    ClosedCallback      _closed_cb;
    MessageCallback     _message_cb;
    AnyEventCallback    _event_cb;
    ClosedCallback      _server_closed_cb;

private:
    // In 1 loop in 1 thread, thread safe
    //读到缓冲区，启动写事件监控
    void SendInLoop(Buffer buf){
        if(_stat == DISCONNECTED) return; // DISCONNECTING return
        _out_buffer.write_buffer_and_push(buf);
        if(!_channel.Writable()) _channel.EnableWrite();
    }
    //关闭连接，内部需先判断是否有缓冲区未处理
    void ShutDownInLoop(){
        _stat = DISCONNECTING;
        if(_in_buffer.readable_size() > 0){
            if(_message_cb) _message_cb(shared_from_this(), &_in_buffer);
        }
        if(_out_buffer.readable_size() > 0){
            if(_channel.Writable() == false) _channel.EnableWrite();
        }
        if(_out_buffer.readable_size() == 0) ReleaseInLoop();
    }
    //真正关闭连接的函数
    void ReleaseInLoop(){
        _stat = DISCONNECTED;
        _channel.Remove(); //移除连接的事件监控
        _socket.Close();    //关闭描述符
        if(_loop->HasTimer(_conn_id)) _loop->TimerCancel(_conn_id); //如果还有定时器任务记得取消
        // 调用关闭回调函数
        if(_closed_cb) _closed_cb(shared_from_this());
        if(_server_closed_cb) _server_closed_cb(shared_from_this());
    }
    void EnableInactiveReleaseInLoop(uint32_t sec){
        _enable_inactive_release = true;
        if(_loop->HasTimer(_conn_id)) return _loop->TimerRefresh(_conn_id);
        _loop->TimerAdd(_conn_id, sec, std::bind(&Connection::ReleaseInLoop, this));
    }
    void DisableInactiveReleaseInLoop(){
        _enable_inactive_release = false;
        if(_loop->HasTimer(_conn_id)) _loop->TimerCancel(_conn_id);
    }
    
    // 链接建立
    void EstablishInLoop(){
        assert(_stat == CONNECTING);
        _stat = CONNECTED;
        // 一旦启动读事件则有可能立即触发读事件，如果此时启动了非活跃链接销毁则会调用到还未初始化的时间轮模块
        _channel.EnableRead();
        if(_connected_cb) _connected_cb(shared_from_this());
    }


    // 切换协议
    void UpgreadeInLoop(
        const Any& context,
        const ConnectedCallback&   conn,
        const ClosedCallback&       close,
        const MessageCallback&      msg,
        const AnyEventCallback&     event
    )
    {
        _context = context;
        _connected_cb = conn;
        _closed_cb = close;
        _message_cb = msg;
        _event_cb = event;
    }

    // 五个事件回调
    void HandleRead(){
        char buf[65536] = {0};
        ssize_t ret = _socket.NonBlockRecv(buf, 65535);
        if(ret > 0){
            _in_buffer.write_and_push(buf, ret);        // 写入之后将写偏移向后移
            if(_in_buffer.readable_size() > 0){
                return _message_cb(shared_from_this(), &_in_buffer);
            }
        }
        else if(ret == 0){
            DEBUG_LOG("Peer closed");
            return ShutDownInLoop();
        } // 对端关闭
        else if(ret == -2){
            DEBUG_LOG("data is not ready");
            return ;
        }
        else if(ret == -1){
            DEBUG_LOG("Read Error");
            return ShutDownInLoop();
        }
    }
    void HandleWrite(){
        ssize_t ret = _socket.NonBlockSend(_out_buffer.read_pos(), _out_buffer.readable_size());
        if(ret < 0){
            if(_in_buffer.readable_size() > 0){
                _message_cb(shared_from_this(), &_in_buffer);
            }
            return ReleaseInLoop();
        }
        _out_buffer.move_reader(ret);
        if(_out_buffer.readable_size() == 0){
            _channel.DisableWrite();
            if(_stat == DISCONNECTING) return ReleaseInLoop();
        }
    }
    void HandleClose(){
        if(_in_buffer.readable_size() > 0){
            _message_cb(shared_from_this(), &_in_buffer);
        }
        return ReleaseInLoop();
    }
    void HandleError(){ return HandleClose(); }
    void HandleEvent(){
        if(_enable_inactive_release == true) _loop->TimerRefresh(_conn_id);   // _conn_id 作为 timerfd_id
        if(_event_cb) _event_cb(shared_from_this());
    }

public:

    Connection(EventLoop *loop, uint64_t conn_id, int sockfd):_conn_id(conn_id), _loop(loop), _sockfd(sockfd)
    , _enable_inactive_release(false), _stat(CONNECTING), _socket(sockfd), _channel(loop, sockfd){
        _channel.SetCloseCallback(std::bind(&Connection::HandleClose, this));
        _channel.SetErrorCallback(std::bind(&Connection::HandleError, this));
        _channel.SetEventCallback(std::bind(&Connection::HandleEvent, this));
        _channel.SetReadCallback(std::bind(&Connection::HandleRead, this));
        _channel.SetWriteCallback(std::bind(&Connection::HandleWrite, this));
    }
    ~Connection(){
        DEBUG_LOG("Release Connection: %p", this);
    }
    int Fd() { return _sockfd; }
    int Id() { return _conn_id; }
    bool Connected() { return _stat == CONNECTED; }
    void SetContext(const Any& context) { _context = context; }
    void SetConnectedCallback(const ConnectedCallback &cb){ _connected_cb = cb; }
    void SetMessageCallback(const MessageCallback &cb){ _message_cb = cb; }
    void SetClosedCallback(const ClosedCallback &cb){ _closed_cb = cb; }
    void SetAnyEventCallback(const AnyEventCallback &cb){ _event_cb = cb; }
    void SetSvrClosedCallback(const ClosedCallback &cb){ _server_closed_cb = cb; }
    void Established(){
        _loop->RunInLoop(std::bind(&Connection::EstablishInLoop, this));
    }
    //读到缓冲区，启动写事件监控
    void Send(const char *data, size_t len){
        Buffer buf;
        buf.write_and_push(data, len);
        _loop->RunInLoop(std::bind(&Connection::SendInLoop, this, buf));
    }
    //关闭连接，内部需先判断是否有缓冲区未处理
    void ShutDown(){
        _loop->RunInLoop(std::bind(&Connection::ShutDownInLoop, this));
    }
    void EnableInactiveRelease(int sec){
        _loop->RunInLoop(std::bind(&Connection::EnableInactiveReleaseInLoop, this, sec));
    }
    void DisableInactiveRelease(){
        _loop->RunInLoop(std::bind(&Connection::DisableInactiveReleaseInLoop, this));
    }
    // 切换协议
    void Upgreade(
        const Any& context,
        const ConnectedCallback&   conn,
        const ClosedCallback&       close,
        const MessageCallback&      msg,
        const AnyEventCallback&     event
    )
    {
        // 立即执行——防备新事件触发后没有第一时间切换任务导致使用旧的 Context协议处理。
        _loop->AssertInLoop();
        // 压到队列执行。
        _loop->RunInLoop(std::bind(&Connection::UpgreadeInLoop, this, context, conn, close, msg, event));
    }
};



class Acceptor{
private:
    Socket _socket;
    EventLoop *_loop;
    Channel _channel;
    using AcceptCallback = std::function<void(int)>;
    AcceptCallback on_accept_cb;
private:
    void HandleRead(){
        int newfd = _socket.Accept();
        if(newfd == -1) return ;
        if(on_accept_cb) on_accept_cb(newfd);
    }
    int CreateServer(int port){
        int ret = _socket.CreateServer(port);
        assert(ret);
        return _socket.Fd();
    }

public:
    Acceptor(EventLoop* loop, int port): _socket(CreateServer(port)), _loop(loop)
    , _channel(loop, _socket.Fd()){
        _channel.SetReadCallback(std::bind(&Acceptor::HandleRead,this));
        // _channel.EnableRead(); 见接口Listen()
    }
    //  单独给调用者一个接口让外部来调用，因为EnableRead之后可能要调用到回调函数了。
    //而只有外部才知道啥时候调用。
    void Listen(){ _channel.EnableRead();}
    void SetOnAcceptCallback(const AcceptCallback &cb){ on_accept_cb = cb;}
};

class TcpServer{
private:
    uint64_t        _next_id;                   // auto_increment Connection ID
    int             _port;
    int             _timeout;                   // time to out
    bool            _enable_inactive_release;   // inactive release flag
    EventLoop       _baseloop;                  // master thread, in charge of listening event
    Acceptor        _acceptor;                  // listening socket manager
    LoopThreadPool  _pool;                      // worker EventLoop pool for handling client I/O

    std::unordered_map<uint64_t, PtrConnection> _conns;// save and manage all PtrConnection Obj
    
    // 回调参数字段
    using ConnectedCallback = std::function<void(const PtrConnection&)>;
    using ClosedCallback = std::function<void(const PtrConnection&)>;
    using MessageCallback = std::function<void(const PtrConnection&, Buffer*)>;
    using AnyEventCallback = std::function<void(const PtrConnection&)>;
    ConnectedCallback  _connected_cb;
    ClosedCallback      _closed_cb;
    MessageCallback     _message_cb;
    AnyEventCallback    _event_cb;
private:
    // create connection from _conns
    void NewConnection(int fd){
        _next_id++;
        PtrConnection conn(new Connection(_pool.NextLoop(), _next_id, fd));
        
        conn->SetMessageCallback(_message_cb);
        conn->SetClosedCallback(_closed_cb);
        conn->SetConnectedCallback(_connected_cb);
        conn->SetAnyEventCallback(_event_cb);
        conn->SetSvrClosedCallback(std::bind(&TcpServer::RemoveConnection, this, std::placeholders::_1));
        if(_enable_inactive_release) conn->EnableInactiveRelease(_timeout);
        conn->Established();
        _conns.insert(std::make_pair(_next_id, conn));
    }
    void RemoveConnectionInLoop(const PtrConnection &conn){
        _conns.erase(conn->Id());
    }
    // remove connection from _conns
    void RemoveConnection(const PtrConnection &conn){
        _baseloop.RunInLoop(std::bind(&TcpServer::RemoveConnectionInLoop, this, conn));
    }

    void RunAfterInLoop(const Functor& task, int delay){
        _next_id++;
        _baseloop.TimerAdd(_next_id, delay, task);
    }
public:
    TcpServer(int port): _port(port), _next_id(0), _enable_inactive_release(false),
    _acceptor(&_baseloop, port), _pool(&_baseloop)
    {
        _acceptor.SetOnAcceptCallback(std::bind(&TcpServer::NewConnection, this, std::placeholders::_1));
        _acceptor.Listen();
        // 还没开始正式启动， baseloop还没启动。
    }
    void SetThreadCount(int count) { _pool.SetThreadCount(count);}
    void SetConnectedCallback(const ConnectedCallback &cb){ _connected_cb = cb; }
    void SetMessageCallback(const MessageCallback &cb){ _message_cb = cb; }
    void SetClosedCallback(const ClosedCallback &cb){ _closed_cb = cb; }
    void SetAnyEventCallback(const AnyEventCallback &cb){ _event_cb = cb; }
    void EnableInactiveRelease(int timeout) { _timeout = timeout; _enable_inactive_release = true;}
    // add a task and will run in ($delay) seconds
    void RunAfter(const Functor &task, int delay){
        _baseloop.RunInLoop(std::bind(&TcpServer::RunAfterInLoop, this, task, delay));
    }
    void Start(){
        _pool.CreateAndStart(); //不能在构造函数中，因为我们写的cstr中没有初始化ThreadCount
        _baseloop.Start();  // 启动
    }
};


// OPTIMIZE
struct NetWork{
    NetWork(){
        INFO_LOG("SIGPIPE ignored");
        signal(SIGPIPE, SIG_IGN);
    }
};
NetWork network;

#endif