#ifndef SERVER_H
#define SERVER_H

#include <vector>
#include <iostream>
#include <string>
#include <memory>
#include <mutex>
#include <condition_variable>
#include <thread>
#include <functional>
#include <algorithm>
#include <unordered_map>
#include <cstring>
#include <ctime>
#include <cassert>
#include <unistd.h>
#include <pthread.h>
#include <fcntl.h>
#include <signal.h>
#include <netinet/in.h>
#include <arpa/inet.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/epoll.h>
#include <sys/eventfd.h>
#include <sys/socket.h>
#include <sys/timerfd.h>

using std::vector;
using std::string;
using std::cerr;
using std::cout;
using std::cin;
using std::endl;

#define DEFAULT_BUFFER_SIZE 10
#define DEFAULT_BACKLOG_SIZE 128
#define DEFAULT_PORT 8088
#define DEFAULT_IP  "0.0.0.0"

#define INF 0
#define DBG 1
#define ERR 2
#define LOG_LEVEL ERR

//宏参数 format 对应 fprintf的参数format， 而让 __VA_ARGS__能够识别
//do while 能更清晰地表示宏定义是一个代码块
//fix bug: tm是结构体名，不是变量名
#define LOG(level, format, ...) do{\
    if(level < LOG_LEVEL) break;\
    time_t t = time(NULL);\
    struct tm * tm_info = localtime(&t);\
    char res [64] = {0};\
    strftime(res,64,"%F %H:%M:%S", tm_info);\
    fprintf(stdout, "[tid : %ld] [%s] [%s : %d] : " format "\n", pthread_self(), res,  __FILE__, __LINE__, ##__VA_ARGS__);\
}while(0)

//低于DBG等级的不打印
#define INF_LOG(format, ...)   LOG(INF, format, ##__VA_ARGS__)
#define DBG_LOG(format, ...)   LOG(DBG, format, ##__VA_ARGS__)
#define ERR_LOG(format, ...)   LOG(ERR, format, ##__VA_ARGS__)

class Buffer
{
private:
    vector<char> _buffer;
    uint64_t _write_index;      //已写数据的下一个位置
    uint64_t _read_index;
    //读/写完成后指针直接移动，不再另外设计函数
public:
    Buffer():_buffer(DEFAULT_BUFFER_SIZE), _write_index(0), _read_index(0){}
    
    //返回地址有什么用?--> 数据移动用
    char* GetWritePosition()    {return &(_buffer[_write_index]);}   

    char* GetReadPosition() {return &(_buffer[_read_index]);}
    
    uint64_t GetFrontSize()  {return _read_index;}

    uint64_t GetBackSize()  {return _buffer.size() - _write_index;}     //fix bug: 没有return，导致EnsureWritable判断错误

    uint64_t GetReadableSize()  {return _write_index - _read_index;}

    void SetBufferSize(int size)    {_buffer.resize(size);}

    void MoveWriteIndex(uint64_t len)
    {
        assert(len <= GetBackSize());   //fix bug: 小于等于
        _write_index += len;
    }

    void MoveReadIndex(uint64_t len)
    {
        assert(len <= GetReadableSize());
        _read_index += len;
    }

    //数据移动时记得移动读/写指针
    void EnsureWritable(uint64_t wlen)
    {
        //1. 直接写
        if(GetBackSize() >= wlen)
        {
            return;
        }

        //2. 数据移动了再写
        uint64_t old = GetReadableSize();
        if(GetFrontSize() + GetBackSize() > wlen)
        {
            memmove(GetReadPosition() - GetFrontSize(), GetReadPosition(),  GetFrontSize());        //fix bug: dst和src反了！
            _read_index = 0;
            _write_index = old;

            return;
        }

        //3. 扩容、数据移动了再写 --> 注意：至少要扩容至满足大小
        //_buffer.resize(_buffer.size() + wlen);   
        _buffer.resize(_write_index + wlen);   
        DBG_LOG("debug: now whole size is %d, readable size is %d", _buffer.size(), GetReadableSize());   //for debug
        memmove(GetReadPosition() -  GetFrontSize(), GetReadPosition(),  GetFrontSize());
        _read_index = 0;
        _write_index = old;
        //_write_index = GetFrontSize();      //fix bug:写指针应该置为原可读数据的大小
    }

    void Write(const void* data, uint64_t wlen)
    {
        EnsureWritable(wlen);
        memmove(GetWritePosition(), data, wlen);
        MoveWriteIndex(wlen);
    }

    void WriteString(const string& s)
    {
        Write((char*)s.c_str(), s.size());
    }

    void WriteBuffer(Buffer& buff)        
    {
        //Write((void*)&(buff._buffer[0]), buff._buffer.size());    
        //fix bug: 一次发送多条请求但只返回了一条的原因：读的数据比能读的多，即把好多\0也读进发送缓冲区中了；
        //数据确实是全都读到了，但打印时由于\0，就表现出 “只返回了一条” 的现象
        DBG_LOG("%p--%p  %d--%d",&(buff._buffer[0]),buff.GetReadPosition(), buff._buffer.size(), buff.GetReadableSize());
        Write((void*)buff.GetReadPosition(), buff.GetReadableSize());
    }

    void Read(void* buffer, uint64_t rlen)
    {
        assert(rlen <= GetReadableSize());
        memmove(buffer, GetReadPosition(), rlen);
        MoveReadIndex(rlen);
    }

    string ReadString(uint64_t rlen)
    {
        assert(rlen <= GetReadableSize());
        MoveReadIndex(rlen);
        return string(GetReadPosition(),rlen);
    }

    //找到换行符
    char* FindCRLF()
    {
        return (char*)memchr(GetReadPosition(), '\n', GetReadableSize());
    }

    //提取一行（包括换行符）
    string GetLine()
    {
        char* pos = FindCRLF();
        if(pos == nullptr)
        {
            return "";
        }
        string ret = string(GetReadPosition(), pos - GetReadPosition() + 1);
        MoveReadIndex(ret.size());

        return ret;
    }

    //直接将读写指针归零即可（可覆盖）
    void DataClean()
    {
        _read_index = 0;
        _write_index = 0;
    }
};

class Socket
{
private:
    int _socket;        //既作为监听套接字，又作为正常读写的文件描述符

public:
    Socket():_socket(0){}
    Socket(int fd):_socket(fd){}
    ~Socket(){Close();}

    int Getfd()
    {
        return _socket;
    }

    bool Create()
    {
        _socket = socket(AF_INET, SOCK_STREAM, 0);
        if(_socket < 0)
        {
            ERR_LOG("socket failed: %s", strerror(errno));
            return false;
        }
        return true;
    }

    bool Bind(uint16_t port, string ip)
    {
        sockaddr_in local;
        local.sin_addr.s_addr = inet_addr(ip.c_str());
        local.sin_family = AF_INET;
        local.sin_port = htons(port);
        
        if(bind(_socket, (sockaddr*)(&local), sizeof(local)) < 0)
        {
            ERR_LOG("bind failed: %s", strerror(errno));
            return false;            
        }
        return true;
    }

    bool Listen()
    {
        if(listen(_socket, DEFAULT_BACKLOG_SIZE) < 0)
        {
            ERR_LOG("listen failed: %s", strerror(errno));
            return false;              
        }
        return true;
    }

    int Accept()
    {
        int newfd = accept(_socket, nullptr, nullptr);      //只获取fd，后两个参数传空即可
        if(newfd < 0)
        {
            ERR_LOG("accept failed: %s", strerror(errno));
            abort();
        }
        return newfd;
    }

    bool Connect(uint16_t port, string ip)
    {
        sockaddr_in peer;
        peer.sin_addr.s_addr = inet_addr(ip.c_str());
        peer.sin_port = htons(port);
        peer.sin_family  = AF_INET;
        if(connect(_socket, (sockaddr*)(&peer), sizeof(peer)) < 0)
        {
            ERR_LOG("connect failed: %s", strerror(errno));
            return false;
        }
        return true;
    }

    //要对errno判断
    ssize_t Recv(void* buffer, size_t len, int flag =  0)
    {
        int ret = recv(_socket, buffer,len, flag);
        if(ret <= 0)
        {
            //EAGAIN 当前socket的接收缓冲区中没有数据了，在非阻塞的情况下才会有这个错误
            //EINTR  表示当前socket的阻塞等待，被信号打断了，
            if (errno == EAGAIN || errno == EINTR || errno == EWOULDBLOCK) 
            {
                return 0;//表示这次接收没有接收到数据
            }
            else
            {
                ERR_LOG("ret is %d, recv info: %s", ret, strerror(errno));
                return -1;      //连接断开和读出错时一律返回-1                
            }
        }

        return ret;
    }
    
    ssize_t Send(void* buffer, size_t len, int flag = 0)
    {
        int ret = send(_socket,buffer,len, flag);   //几乎不会返回0
        if(ret <= 0)
        {
            if (errno == EAGAIN || errno == EINTR || errno == EWOULDBLOCK) 
            {
                return 0;
            }
            else
            {
                ERR_LOG("send failed: %s", strerror(errno));
                return -1;                  
            }
        }

        return ret;        
    }

    ssize_t NonBlockRecv(void* buffer, size_t len)    {return Recv(buffer, len, MSG_DONTWAIT);}
    ssize_t NonBlockSend(void* buffer, size_t len)    {return Send(buffer, len, MSG_DONTWAIT);}
    void Close()
    {
        //close(_socket);   不能单纯地关闭套接字，还需要将其置为-1，否则会影响后面连接的获取！
        if(_socket != -1)
        {
            close(_socket);
        }
        _socket = -1;
    }

    void SetNonBlock()
    {
        DBG_LOG("SetNonBlock");
        int flag = fcntl(_socket, F_GETFL, 0);
        fcntl(_socket, F_SETFL, O_NONBLOCK | flag);     //文件状态标志：还包括追加读等
    }

    //默认非阻塞
    bool ServerConnect(uint16_t port = DEFAULT_PORT, const string& ip = DEFAULT_IP, bool is_nonblock = false)
    {
        Reuse();
        if(is_nonblock)  
            SetNonBlock();
        if(Create() && Bind(port, ip) && Listen())
        {  
            return true;
        }
        return false;
    }

    bool ClinetConnect(uint16_t port, const string& ip)     //client不用设置非阻塞!
    {
        if(Create() && Connect(port,ip))
        {
            return true;
        }

        return false;
    }


    void Reuse()
    {
        int val = 1;
        setsockopt(_socket, SOL_SOCKET, SO_REUSEADDR, &val, sizeof(val));

        val = 1;
        setsockopt(_socket, SOL_SOCKET, SO_REUSEPORT, &val, sizeof(val));
    }
};


class Poller;
class EventLoop;
class Channel
{
private:
    int _fd;        //这里也可使用封装的Socket
    uint32_t _events;
    uint32_t _revents;
    EventLoop* _loop;

using EventCallback = std::function<void()>;
    EventCallback _read_callback;
    EventCallback _write_callback;
    EventCallback _error_callback;
    EventCallback _close_callback;
    EventCallback _any_callback;

public:
    Channel(int fd, EventLoop* loop):_fd(fd), _events(0), _revents(0), _loop(loop){}
    ~Channel()
    {
        DBG_LOG("~Channel --- fd is %d", _fd);
        _read_callback = _write_callback  = _error_callback = _close_callback = _any_callback = nullptr;
    }

    int Getfd() {return _fd;}
    uint32_t GetEvents() {return _events;}

    void SetEvents(uint32_t setev) {_revents = setev;}
    void SetReadCallback(EventCallback rcb) {_read_callback = rcb;}
    void SetWriteCallback(EventCallback wcb) {_write_callback = wcb;}
    void SetErrorCallback(EventCallback ecb) {_error_callback = ecb;}
    void SetCloseCallback(EventCallback ccb) {_close_callback = ccb;}
    void SetAnyCallback(EventCallback acb) {_any_callback = acb;}

    bool Readable() {return _events & EPOLLIN;}
    bool Writeable() {return _events & EPOLLOUT;}

    bool IsReadEvent() {return (_revents & EPOLLIN) || (_revents & EPOLLPRI) || (_revents & EPOLLRDHUP);}
    bool IsWriteEvent() {return (_revents & EPOLLOUT);}
    bool IsErrorEvent() {return (_revents & EPOLLERR);}
    bool IsCloseEvent() {return (_revents & EPOLLHUP);}

    //本质就是添加写事件
    void EnableRead() {_events |= EPOLLIN; Update(); }//DBG_LOG("EnableRead fd: %d",_fd);}
    void EnableWrite() {_events |= EPOLLOUT; Update();}
    void DisableRead() {_events &= ~EPOLLIN; Update();}
    void DisableWrite() {_events &= ~EPOLLOUT; Update();}
    void DisableAll() {_events = 0; Update();}
    void DisableR() {_revents = 0;}

    void Remove();
    void Update();

    //fix bug: 注意一个fd不一定需要设置所有回调 故要作判空
    void HandleEvent() 
    {
        if(IsReadEvent())
        {
            //DBG_LOG("IsReadEvent true");
            if(_any_callback)   _any_callback();    //任意事件处理放在之前，以防连接的提前关闭
            if(_read_callback)  _read_callback();       
        }

        if(IsWriteEvent())
        {
            //DBG_LOG("IsWriteEvent true");
            if(_any_callback)    _any_callback();
            if(_write_callback)    _write_callback();
        }

        //细节：关闭连接的回调一次只能执行一个
        //DBG_LOG("debug: %d----%d", _revents & EPOLLERR, _events & EPOLLERR);

        if(IsErrorEvent())
        {
            //DBG_LOG("IsErrorEvent true");
            if(_any_callback)    _any_callback();
            if(_error_callback)    _error_callback();
        }
        else if(IsCloseEvent())
        {
            //DBG_LOG("IsCloseEvent true");
            if(_any_callback)    _any_callback();
            if(_close_callback)    _close_callback();
        }
    }

};

#define MAX_EPOLL_SIZE 1024
#define DEFALUT_EVENT_SIZE 1024
class Poller
{
private:
    int _epfd;
    epoll_event _evs[DEFALUT_EVENT_SIZE];
    std::unordered_map<int, Channel*> _channels;

public:
    Poller()
    {
        _epfd = epoll_create(MAX_EPOLL_SIZE);
        if(_epfd < 0)
        {
            ERR_LOG("epoll_create failed: %s", strerror(errno));
        }
    }

    bool HasChannel(Channel* ch)
    {
        return _channels.count(ch->Getfd()) == 1;
    }

    //这里传Channel* 和Channel模块关联
    void Update(int op, Channel* ch)
    {
        epoll_event local;
        int fd = ch->Getfd();
        local.data.fd = fd;
        local.events = ch->GetEvents();     //fix bug: 没有设置事件关心导致epoll_wait阻塞
        if(epoll_ctl(_epfd, op, fd, &local) < 0)
        {
            ERR_LOG("epoll_ctl failed: %s", strerror(errno));
        } 
    }
    void UpdateEvent(Channel* ch)
    {
        if(HasChannel(ch))
        {
            Update(EPOLL_CTL_MOD, ch);
        }
        else
        {
            _channels[ch->Getfd()] = ch;
            Update(EPOLL_CTL_ADD, ch);      
        }
    }

    void RemoveEvent(Channel* ch)
    {
        if(HasChannel(ch))
        {
            Update(EPOLL_CTL_DEL, ch);
            _channels.erase(ch->Getfd());
        }
    }

    void Poll(vector<Channel*>* active)
    {
        int n = epoll_wait(_epfd, _evs, DEFALUT_EVENT_SIZE, -1);

        if(n < 0)
        {
            if(errno == EINTR)      //interrupted by a signal handler or timeout expired
            {
                return;
            }
            ERR_LOG("epoll_wait failed: %s", strerror(errno));
            abort();        //其他出错情况程序直接结束
        }

        //DBG_LOG("debug n: %d", n);     //?:加上定时器后，只读数据，应该是2才对 --> fix bug见CreateTimerFd
        for(int i = 0; i < n; i++)
        {
            assert(_channels.count(_evs[i].data.fd) == 1);      //其实可以不用判断
            //DBG_LOG("debug: fd: %d", _evs[i].data.fd);
            Channel* ch = _channels[_evs[i].data.fd];
            ch->SetEvents(_evs[i].events);      //设置实际就绪的事件让后续HandleEvent
            active->push_back(ch);
        }
    }
};


using TaskFunc = std::function<void()>;
class TimerTask
{
    using ReleaseFunc = std::function<void()>;
private:
    uint64_t _id;
    int _timeout;
    bool _is_cancel;
    TaskFunc _func_cb;
    ReleaseFunc _release;   //倒逼使用者传参
public:
    TimerTask(uint64_t id, int timeout, TaskFunc func_cb, ReleaseFunc release)
    :_id(id), _timeout(timeout), _func_cb(func_cb), _release(release),_is_cancel(false)
    {

    }

    ~TimerTask()
    {
        if(!_is_cancel)
            _func_cb();
        _release();
    }

    int GetTimeout()
    {
        return _timeout;
    }

    void Cancel()
    {
        _is_cancel = true;
    }
    // void SetRelease(ReleaseFunc& release)
    // {
    //     _release = release;
    // }
};

class TimerWheel
{
    using PtrTask = std::shared_ptr<TimerTask>;
    using WeakPtr = std::weak_ptr<TimerTask>;
private:
    int _timer_fd;
    int _capacity;
    int _tick;      //_tick的值即为“当前时刻”
    vector<vector<PtrTask>> _wheels;
    std::unordered_map<uint64_t, WeakPtr>  _timers;     //主要用于索引

    EventLoop* _loop;   //事件触发，回调执行
    std::unique_ptr<Channel> _timer_channel;  //事件监控，回调注册 
private:
    void RemoveTimer(uint64_t id)      //本质从索引中删除，绑定给TimeTask，在TimeTask的析构中统一执行！
    {
        auto it = _timers.find(id);
        if(it != _timers.end())
        {
            _timers.erase(it);
        }
    }
    
public:
    static int CreateTimerFd()
    {
        int retfd = timerfd_create(CLOCK_MONOTONIC, 0);
        if(retfd < 0)
        {
            ERR_LOG("timerfd_create falied: %s", strerror(errno));
            abort();
        }

        itimerspec new_val;
        new_val.it_value.tv_sec = 1;
        new_val.it_value.tv_nsec = 0;
        new_val.it_interval.tv_sec = 1;
        new_val.it_interval.tv_nsec = 0;
        int ret = timerfd_settime(retfd, 0, &new_val, nullptr);
        if(ret < 0)
        {
            ERR_LOG("timerfd_settime failed: %s",strerror(errno));
            abort();
        }    

        //return ret;     //fix bug: return了timerfd_settime的返回值导致Channel管理文件描述符（0）是错误的！！！故epoll_wait一直等不到
        return retfd;
    }

    int ReadTimerFd()
    {
        uint64_t times = 0;
        int n = read(_timer_fd, &times, sizeof(times));
        if(n <= 0)
        {
            ERR_LOG("read failed: %s", strerror(errno));
            abort();    
        }
        
        return times;
    }

    void OnTimeRun()
    {
        int times = ReadTimerFd();
        for(int i = 0; i < times; i++)
        {
            TimerRun();
        }
    }

    TimerWheel(EventLoop* loop, int capacity = 60)
    :_timer_fd(CreateTimerFd()), _capacity(capacity), _tick(0), _wheels(_capacity),
    _loop(loop), _timer_channel(new Channel(_timer_fd, _loop))
    {
        _timer_channel->SetReadCallback(std::bind(&TimerWheel::OnTimeRun, this));
        _timer_channel->EnableRead();
    }

    ~TimerWheel(){}

    void TimerAddInLoop(uint64_t id, int timeout, const TaskFunc& func_cb)  //bind之后相当于不需要参数了
    {
        PtrTask pt (new TimerTask(id,timeout,func_cb, std::bind(&TimerWheel::RemoveTimer,this,id)));    //注意bind成员函数时的写法
        int pos = (_tick + timeout) % _capacity;
        _wheels[pos].push_back(pt);     //fix bug：要在当前tick的基础上添加任务！
        _timers[id] = WeakPtr(pt);
    }

    //刷新给定的id的任务的超时时间
    void TimerRefreshInLoop(uint64_t id)
    {
        DBG_LOG("TimerRefreshInLoop");
        //1. 取出任务对应的shared_ptr
        auto it = _timers.find(id);
        if(it == _timers.end())
        {
            return;
        }
        PtrTask pt = it->second.lock();

        //2. 添加至时间轮中     只有在时间轮中的位置变了，id和回调都不变
        _wheels[(_tick + pt->GetTimeout()) % _capacity].push_back(pt);
    }
    
    //取消给定的id的定时任务
    void TimerCancelInLoop(uint64_t id)
    {
        auto it = _timers.find(id);
        if(it == _timers.end())
        {
            return;
        }
        PtrTask pt = it->second.lock();     //不能直接RemoveTimer原因：RemoveTimer的本质作用在索引上
        DBG_LOG("lock ret, source addr: %p %p", &pt, pt.get());
        if(pt)  pt->Cancel();               
        //fix bug: pt任务已被销毁（执行析构了），lock会返回一个空的shared_ptr!!! 但注意shared_ptr本身的地址不为空，而指向的对象地址的为空
    }

    void TimerAdd(uint64_t id, int timeout, const TaskFunc& func_cb);
    void TimerRefresh(uint64_t id);
    void TimerCancel(uint64_t id);

    //每个单位时间执行一次
    void TimerRun()
    {
        _tick = (_tick + 1) % _capacity;

        //直接释放对应单位时间上的对象，相当于“自动执行回调”（调用析构）
        _wheels[_tick].clear();
    }

    bool HasTimer(uint64_t id)
    {
        return _timers.count(id) == 1;
    }
};

class EventLoop
{
private:
    Poller _poller;
    std::thread::id _thread_id;

    using TaskFunc = std::function<void()>;
    vector<TaskFunc> _tasks;    //任务池
    std::mutex _mutex;

    int _event_fd;
    std::unique_ptr<Channel> _event_channel;    //对eventfd的事件的管理
    
    TimerWheel _timer_wheel;

public:
    //EFD_CLOEXEC 程序替换时不让子进程打开从父进程拷贝下来的文件描述符（打开同一份文件）
    static int CreateEventFd()  //static便于初始化逻辑，仅看结果当然也可以全放进构造函数中
    {
        int res = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK);
        if(res < 0)
        {
            ERR_LOG("eventfd failed: %s", strerror(errno));
            abort();
        }
        return res;
    }

    void ReadEventFd()      //读事件就绪本质上就等于被唤醒
    {
        uint64_t val = 0;
        int n = read(_event_fd, &val, sizeof(val));
        if(n <= 0)
        {
            if(errno == EINTR || errno == EAGAIN)
            {
                return;
            }
            ERR_LOG("read eventfd failed: %s", strerror(errno));
            abort();
        }
    }   

    void WakeEventFd()      //本质是让读事件就绪
    {
        uint64_t val = 1;
        int n = write(_event_fd, &val, sizeof(val));
        if(n <= 0)
        {
            if(errno == EINTR)
            {
                return;
            }
            ERR_LOG("write eventfd failed: %s", strerror(errno));
            abort();
        }
    }

public:
    EventLoop()
    :_thread_id(std::this_thread::get_id()), 
    _event_fd(CreateEventFd()),
    _event_channel(new Channel(_event_fd, this)),
    _timer_wheel(this)
    {
        _event_channel->SetReadCallback(std::bind(&EventLoop::ReadEventFd, this));
        _event_channel->EnableRead();
    }

    void RunAllTask()
    {
        vector<TaskFunc> funcs;
        {
            std::unique_lock<std::mutex> lck (_mutex);
            std::swap(_tasks, funcs);
        }

        for(auto& f : funcs)    //执行任务无需加锁
        {
            f();
        }
    }

    bool IsInLoop() {return _thread_id == std::this_thread::get_id();}

    void AssertInLoop() {assert(_thread_id == std::this_thread::get_id());}

    void PushTask(const TaskFunc& taskfunc)
    {
        {
           std::unique_lock<std::mutex> lck (_mutex);
            _tasks.push_back(taskfunc); 
        }
        WakeEventFd();
    }

    void RunInLoop(const TaskFunc& taskfunc)
    {
        if(IsInLoop())      
        {
            DBG_LOG("IsInLoop");
            //cout << "debug: "<< _thread_id << " --- "<< std::this_thread::get_id() << endl;
            taskfunc();
        }
        else
        {
            //DBG_LOG("Ready to PushTask");       
            //cout << "debug: "<< _thread_id << " --- "<< std::this_thread::get_id() << endl;
            PushTask(taskfunc);
        }
    }

    void Start()
    {
        while(1)
        {
            vector<Channel*> actives;
            _poller.Poll(&actives);

            for(auto& a : actives)
            {
                a->HandleEvent();
            }

            RunAllTask();            
        }
    }

    void TimerAdd(uint64_t id, int timeout, const TaskFunc& func_cb) {_timer_wheel.TimerAdd(id, timeout, func_cb);}
    void TimerRefresh(uint64_t id)  {_timer_wheel.TimerRefresh(id);}
    void TimerCancel(uint64_t id)   {_timer_wheel.TimerCancel(id);}
    bool HasTimer(uint64_t id) {return _timer_wheel.HasTimer(id);}


    void UpdateEvent(Channel* channel)  {_poller.UpdateEvent(channel);}
    void RemoveEvent(Channel* channel)  {_poller.RemoveEvent(channel);}
};

void TimerWheel::TimerAdd(uint64_t id, int timeout, const TaskFunc& func_cb)
{_loop->RunInLoop(std::bind(&TimerWheel::TimerAddInLoop, this, id, timeout, func_cb));}

void TimerWheel::TimerRefresh(uint64_t id)
{_loop->RunInLoop(std::bind(&TimerWheel::TimerRefreshInLoop, this, id));}

void TimerWheel::TimerCancel(uint64_t id)
{_loop->RunInLoop(std::bind(&TimerWheel::TimerCancelInLoop, this, id));}

void Channel::Remove() {_loop->RemoveEvent(this);}
void Channel::Update() {_loop->UpdateEvent(this);}

class Any
{
public:
    Any():_content(nullptr){}

    template<class T>
    Any(const T& val)
    :_content(new placeholders<T>(val))      //用子类的指针初始化父类的指针
    {}

    Any(const Any& any)
    :_content(any._content == nullptr ? nullptr : (any._content->clone()))      //指针使用注意要判空！
    {}

    Any& swap(Any any)       //交换地址，让临时变量把老地址“带走”       //由于要交换临时变量（常性），故无法传引用了
    {
        std::swap(_content, any._content);
        return *this;
    }

    template<class T>
    Any& operator=(const T& val)    
    {
        // if(_content)
        // {
        //     delete _content;
        // }
        // _content = Any(val)._content->clone();

        swap(Any(val));
        return *this;
    }    

    Any& operator=(Any& any)
    {
        // if(_content)
        // {
        //     delete _content;
        // }
        // _content = any._content;
        swap(any);
        return *this;
    }

    ~Any()
    {
        if(_content != nullptr)
        delete _content;
    }

    template<class T>
    T* Get()    
    {
        assert(typeid(T) == _content->type());
        return &(((placeholders<T>*)(_content)) ->_val);
    }

    class holders
    {
    public:
        virtual const std::type_info& type(){}    //主要用于类型判断而不是类型使用
        virtual holders* clone(){}     //注意是子类指针(placeholders<T>*) 不是子类的类型的指针(T*)！！!
        virtual ~holders(){}
    };

    template<class T>
    class placeholders : public holders
    {
    public:
        placeholders(T val)
        :_val(val)
        {}

        const std::type_info& type(){return typeid(T);}

        holders* clone()
        {
            return (new placeholders<T>(_val));
        }

    public:
        T _val;
    };

private:
    holders* _content;
};

enum ConnectionStatus
{
    CONNECTING,CONNECTED,DISCONNECTING,DISCONNECTED
};

class Connection;
using PtrConnection = std::shared_ptr<Connection>;
using ConnectedCallback = std::function<void(const PtrConnection&)>;
using MessageCallback = std::function<void(const PtrConnection&, Buffer*)>;
using ClosedCallback = std::function<void(const PtrConnection&)>;
using AnyEventCallback = std::function<void(const PtrConnection&)>;


class Connection : public std::enable_shared_from_this<Connection>
{
private:
    uint64_t _conn_id;   //由于其唯一性，可同时作为定时器id
    int _sockfd;
    Socket _socket;
    bool _enable_active_release;
    ConnectionStatus _conn_stat;

    EventLoop* _loop;
    Channel _channel;
    Buffer _in_buffer;
    Buffer _out_buffer;
    Any _context;

    ConnectedCallback _connected_callback;
    MessageCallback _message_callback;
    ClosedCallback _closed_callback;
    AnyEventCallback _any_event_callback;

    ClosedCallback _server_closed_callback;

private:
    void SendInLoop(Buffer& data)      
    {
        if(IsDisconnected())    return;     //连接已真正关闭，不能发送数据
        _out_buffer.WriteBuffer(data);
        _channel.EnableWrite();     //启动写事件监控后，HandleWrite即将被调用
    }

    void ShutdownInLoop()
    {
        _conn_stat = DISCONNECTING;
        if(_in_buffer.GetReadableSize() > 0)
        {
            //DBG_LOG("ready to _message_callback");
            if(_message_callback)   _message_callback(shared_from_this(), &_in_buffer);
        }

        if(_out_buffer.GetReadableSize() > 0)
        {
            _channel.EnableWrite();     //这里是启动写监控，而不能直接手动调用HandleWrite
        }

        if(_out_buffer.GetReadableSize() == 0)      //发送缓冲区中没数据（没数据发给对方了），即可真正关闭连接
        {
            //DBG_LOG("ready to Release");
            //Release();
            PushReleaseTask();
        }
    }

    void EnableInactiveReleaseInLoop(int sec)
    {
        _enable_active_release = true;
        if(_loop->HasTimer(_conn_id))
        {
            _loop->TimerRefresh(_conn_id);
            return;
        }
        _loop->TimerAdd(_conn_id, sec, std::bind(&Connection::HandleClose,this));
    }

    void CancelInactiveReleaseInLoop()
    {
        _enable_active_release = false;
        if(_loop->HasTimer(_conn_id))
        {
            _loop->TimerCancel(_conn_id);
            return;
        }        
    }

    void UpgradeInLoop(const Any& context, const ConnectedCallback& cntcb, const MessageCallback& mcb,
     const ClosedCallback& cldcb, const AnyEventCallback& aecb)
    {
        _context = context;
        _connected_callback = cntcb;
        _message_callback = mcb;
        _closed_callback = cldcb;
        _any_event_callback = aecb;
    }

    void EstablishedInLoop()
    {
        assert(IsConnecting());
        _conn_stat = CONNECTED;

        if(_connected_callback) _connected_callback(shared_from_this());

        _channel.EnableRead();      //连接真正建立后才启动读监控，而不是在构造函数处就启动
    }

    void ReleaseInLoop()
    {
        //assert(IsDisconnecting()); 
        DBG_LOG("ReleaseInLoop");
        _conn_stat = DISCONNECTED;

        _channel.Remove();
        _socket.Close();
        //if(_loop->HasTimer(_conn_id))  {_loop->TimerCancel(_conn_id);}     
        if(_loop->HasTimer(_conn_id))  {CancelInactiveReleaseInLoop();}     //先改变标志，再进行取消
        //fix bug: TimerCancel绑定了自己而不是TimerCancelInLoop

        if(_closed_callback)    _closed_callback(shared_from_this());
        if(_server_closed_callback) _server_closed_callback(shared_from_this());
        DBG_LOG("END OF ReleaseInLoop");
    }

public:

    int GetSocketFd()   {return _sockfd;}
    uint64_t GetConnectionId()  {return _conn_id;}
    void SetContext(Any context)   {_context = context;}
    Any* GetContext()   {return &_context;}
    bool IsConnecting() {return _conn_stat == CONNECTING;}
    bool IsDisconnecting()    {return _conn_stat == DISCONNECTING;}
    bool IsConnected()  {return _conn_stat == CONNECTED;}
    bool IsDisconnected() {return _conn_stat == DISCONNECTED;}

    Connection(uint64_t conn_id, EventLoop* loop, int sockfd)
    :_conn_id(conn_id), _sockfd(sockfd), _socket(sockfd), _enable_active_release(false),
    _conn_stat(CONNECTING), _loop(loop), _channel(_sockfd,loop)
    {
        //在构造处为对应的channel设置回调，但注意不能启动读监控！！！
        _channel.SetReadCallback(std::bind(&Connection::HandleRead, this));
        _channel.SetWriteCallback(std::bind(&Connection::HandleWrite, this));
        _channel.SetErrorCallback(std::bind(&Connection::HandleError, this));
        _channel.SetCloseCallback(std::bind(&Connection::HandleClose, this));
        _channel.SetAnyCallback(std::bind(&Connection::HandleAnyEvent, this));
    }
    ~Connection(){}

    //实现基本管理的功能接口
    void Send(const char* data, size_t len)   
    {
        //外界传入的data，可能是个临时的空间，我们现在只是把发送操作压入了任务池，有可能并没有被立即执行
        //因此有可能执行的时候，data指向的空间有可能已经被释放了，所以要立即将数据写入一个临时缓冲区中进行传参
        Buffer buf;
        buf.Write(data,len);
        _loop->RunInLoop(std::bind(&Connection::SendInLoop, this, std::move(buf)));     //右值引用提高效率
    }
    void Shutdown() {_loop->RunInLoop(std::bind(&Connection::ShutdownInLoop, this));}
    void EnableInactiveRelease(int sec)    {_loop->RunInLoop(std::bind(&Connection::EnableInactiveReleaseInLoop, this, sec));}
    void CancelInactiveRelease()    {_loop->RunInLoop(std::bind(&Connection::CancelInactiveReleaseInLoop, this));}
    void Established()  {_loop->RunInLoop(std::bind(&Connection::EstablishedInLoop, this));}
    void Release()  {_loop->RunInLoop(std::bind(&Connection::ReleaseInLoop, this));}
    void PushReleaseTask()  {_loop->PushTask(std::bind(&Connection::ReleaseInLoop, this));}
    void Upgrade(const Any& context, const ConnectedCallback& cntcb, const MessageCallback& mcb,
     const ClosedCallback& cldcb, const AnyEventCallback& aecb) 
    {
        _loop->AssertInLoop();      //确保协议的立即切换，避免用旧协议处理新任务
        _loop->RunInLoop(std::bind(&Connection::UpgradeInLoop, this, context, cntcb, mcb, cldcb, aecb));
    }
    
    //上层可能需设置的五个回调
    void SetConnectedCallback(const ConnectedCallback& cb) {_connected_callback = cb;}
    void SetMessageCallback(const MessageCallback& cb) {_message_callback = cb;}
    void SetClosedCallback(const ClosedCallback& cb) {_closed_callback = cb;}
    void SetAnyEventCallback(const AnyEventCallback& cb) {_any_event_callback = cb;}
    void SetSvrClosedCallback(const ClosedCallback& cb) {_server_closed_callback = cb;}

    //为channel设置的五个回调
    void HandleRead()
    {
#define BUFFERSIZE 65535

        char buff[BUFFERSIZE] = {0};
        ssize_t n = _socket.NonBlockRecv(buff, sizeof(buff));
        if(n < 0)
        {
            ShutdownInLoop();       //InLoop，立即执行
            return;
        }
        _in_buffer.Write(buff, n);

        if(_message_callback && _in_buffer.GetReadableSize() > 0)   //注意要有数据时才调用上层提供的函数处理！！！
        {
            _message_callback(shared_from_this(), &_in_buffer); 
        }
        //直接调用shared_from_this返回的是临时对象，故前面函数类型定义时要加上const
    }
    void HandleWrite()
    {
        ssize_t n = _socket.NonBlockSend(_out_buffer.GetReadPosition(), _out_buffer.GetReadableSize());
        if(n < 0)   //发送出错，直接真正关闭连接，当然关闭连接前同样需检查一下接收缓冲区中是否还有数据待处理
        {
            if(_in_buffer.GetReadableSize() > 0)
            {
                if(_message_callback)
                    _message_callback(shared_from_this(), &_in_buffer); 
            }     
  
            //ReleaseInLoop();        
            //Release();
            PushReleaseTask();
            return;
        }
        _out_buffer.MoveReadIndex(n);

        if(_out_buffer.GetReadableSize() == 0)
        {
            _channel.DisableWrite();     //当数据发完时，关闭写监控，提高系统效率
            if(IsDisconnecting())        //表示上层已经关闭连接了，即，是最后一次发送了，故发完数据直接关闭连接
            {
                //ReleaseInLoop();
                //Release();
                PushReleaseTask();
            }
        }
    }
    void HandleError()  {HandleClose();}
    void HandleClose()
    {
        DBG_LOG("HandleClose");
        if(_in_buffer.GetReadableSize() > 0)
        {
            if(_message_callback)
                _message_callback(shared_from_this(), &_in_buffer); 
        }  
        PushReleaseTask();
        //Release();
        //ReleaseInLoop();
    }
    //这里与上面四个设置给channel的回调不同的是，可以调上层设置的任意事件回调，而其他三个上层设置的回调严格与连接所处的阶段挂钩
    void HandleAnyEvent()   
    {
        if(_enable_active_release == true)
        {
            DBG_LOG("HandleAnyEvent");
            _loop->TimerRefresh(_conn_id);
        }
        if(_any_event_callback) _any_event_callback(shared_from_this());
    }
    
};

class Acceptor
{
private:
    Socket _listen_socket;
    EventLoop* _loop;
    Channel _listen_channel;

    using AcceptCallback = std::function<void(int)>;
    AcceptCallback _accept_callback;

private:
    int CreateServer(uint16_t port)
    {
        int ret = _listen_socket.ServerConnect(port);
        assert(ret);
        return _listen_socket.Getfd();
    }

    void HandleRead()
    {
        int newfd = _listen_socket.Accept();
        if(_accept_callback)
            _accept_callback(newfd);        //预留位在这“填写”
    }
public:
    Acceptor(EventLoop* loop, uint16_t port = DEFAULT_PORT)
    :_loop(loop), _listen_channel(CreateServer(port),_loop)
    {
        DBG_LOG("Acceptor");
        _listen_channel.SetReadCallback(std::bind(&Acceptor::HandleRead, this));
    }
    void Listen()   {_listen_channel.EnableRead();}
    void SetAcceptCallback(const AcceptCallback& cb)    {_accept_callback = cb;}

};


class LoopThread
{
private:
    std::mutex _mutex;
    std::condition_variable _cond;
    std::thread _thread;
    EventLoop* _loop;

    void ThreadEntry()
    {
        EventLoop loop;
        _loop = &loop;
        {
            std::unique_lock<std::mutex> lck(_mutex);
            _cond.notify_all();     //唤醒全部会有问题吗?
        }
        loop.Start();
    }
public:
    LoopThread() 
    :_loop(nullptr), _thread(&LoopThread::ThreadEntry, this){}

    EventLoop* GetEventLoop()
    {
        std::unique_lock<std::mutex> lck(_mutex);
        _cond.wait(lck, [&](){return _loop != nullptr;});   //为false就一直等待；使用类成员变量lambda[]中要加‘&’
        return _loop;     
    }
};

class LoopThreadPool
{
private:
    int _slave_thread_count;
    int _next_loop_index;
    EventLoop* _baseloop;
    vector<LoopThread*> _threads;
    vector<EventLoop*>  _loops;

public:
    LoopThreadPool(EventLoop* baseloop)
    :_slave_thread_count(0), _next_loop_index(0), _baseloop(baseloop){}

    void SetThreadCount(int count) {_slave_thread_count = count;}
    void CreateSlaveThread()
    {
        for(int i = 0; i < _slave_thread_count; i++)
        {
            LoopThread* ltp = new LoopThread;
            _threads.push_back(ltp);
            _loops.push_back(ltp->GetEventLoop());
        }
    }

    EventLoop* NextLoop()   
    {
        if(_slave_thread_count == 0)    return _baseloop;
        _next_loop_index = (_next_loop_index + 1) % _slave_thread_count;
        return _loops[_next_loop_index];
    }
};

class TcpServer
{
private:
    uint64_t _conn_id;
    uint16_t _port;
    Acceptor _acceptor;      //?:监听的操作设计在哪个接口 --> 服务器构造之时即可设置acceptor的回调和开启读监听
    EventLoop _baseloop;
    LoopThreadPool _pool;

    bool _is_enable_inactive_release;
    int _timeout;
    std::unordered_map<uint64_t, PtrConnection> _conns;

    ConnectedCallback _connected_callback;
    MessageCallback _message_callback;
    ClosedCallback _closed_callback;
    AnyEventCallback _any_event_callback;

    void NewConnection(int newfd)      //?:上层设置回调的接口如何设计-->和Connection模块一样，只不过是当连接到来时才能“设置进”该连接
    {
        PtrConnection newconn (new Connection(_conn_id, _pool.NextLoop(), newfd));  
        //一个线程对应一个EventLoop，但一个EventLoop可以管理多个连接，通过_pool.NextLoop()把它们关联起来
        //例如：有3个连接但又只有两个线程时，通过NextLoop就会让第一个线程同时管理两个连接
        newconn->SetConnectedCallback(_connected_callback);
        newconn->SetMessageCallback(_message_callback);
        newconn->SetClosedCallback(_closed_callback);
        newconn->SetAnyEventCallback(_any_event_callback);
        newconn->SetSvrClosedCallback(std::bind(&TcpServer::RemoveConnection, this, std::placeholders::_1));

        newconn->Established();
        if(_is_enable_inactive_release) newconn->EnableInactiveRelease(_timeout);

        _conns[_conn_id] = newconn;
        _conn_id++;
    }

    void RemoveConnectionInLoop(const PtrConnection& pc)      //注意：这些回调类型的参数都是PtrConnection
    {
        uint64_t id = pc->GetConnectionId();
        if(_conns.count(id) == 1)
        {
            _conns.erase(id);
        }
    }
    void RemoveConnection(const PtrConnection& pc){ _baseloop.RunInLoop(std::bind(&TcpServer::RemoveConnectionInLoop, this, pc));}

    void RunAfterInLoop(const TaskFunc& task, int delay)  {_baseloop.TimerAdd(_conn_id, delay, task);}      //?:哪个loop来添加任务
    void EnableInactiveReleaseInLoop(int sec) {_is_enable_inactive_release = true; _timeout = sec;}

public:
    TcpServer(uint16_t port)
    :_conn_id(0), _port(port), _acceptor(&_baseloop, _port), _pool(&_baseloop), _is_enable_inactive_release(false)
    {
        _acceptor.SetAcceptCallback(std::bind(&TcpServer::NewConnection, this, std::placeholders::_1));
        _acceptor.Listen();
    }

    void SetThreadCount(int cnt){_pool.SetThreadCount(cnt);}
    void SetConnectedCallback(const ConnectedCallback& cb) {_connected_callback = cb;}
    void SetMessageCallback(const MessageCallback& cb) {_message_callback = cb;}
    void SetClosedCallback(const ClosedCallback& cb) {_closed_callback = cb;}
    void SetAnyEventCallback(const AnyEventCallback& cb) {_any_event_callback = cb;}

    void EnableInactiveRelease(int sec) {_baseloop.RunInLoop(std::bind(&TcpServer::EnableInactiveReleaseInLoop, this, sec));}
    void RunAfter(const TaskFunc& task, int delay) {_baseloop.RunInLoop(std::bind(&TcpServer::RunAfterInLoop,this,task,delay));}     

    void Start()    {_pool.CreateSlaveThread(); _baseloop.Start();}  //本质是baseloop的start
};

class NetWork
{
public:
    NetWork()
    {
        signal(SIGPIPE, SIG_IGN);
    }
};

static NetWork nw;
#endif