#include "LoopManager.h"
#include <memory>
#include "Debug.h"
#include "EventLoop.h"
#include "Manager.h"
#include "PlatLogger.h"
#include "Poller.h"
#include "StreamClient.h"
#include "StreamServer.h"
#include "ThreadChannel.h"
#include "Util.h"
#include "XnccAddress.h"
#include "XnccClient.h"
#include "XnccServer.h"
#include "XnccThread.h"
#include "XnccType.h"

namespace xncc {
namespace foundation {
    static constexpr int DEFAULT_WAIT_US = 1000 * 1000;

    int LoopManager::init(const config::PlatformInfo& cfg)
    {
        initBusiThreads(cfg);
        initIpcServers(cfg);
        initIpcRecvQueues(cfg);
        bindChannel();
        return 0;
    }

    void LoopManager::initBusiThreads(const config::PlatformInfo& cfg)
    {  /// 创建业务循环类
        for (const auto& busi : cfg.BusiThreads) {
            const auto busi_tid = static_cast<types::thread_id_t>(busi.Id);
            auto       ptr      = std::make_shared<EventLoop>(busi_tid, busi.Name,
                                                              static_cast<types::IO_MULTI_MODE::value>(busi.IOMultiMode));
            ptr->init(false);  // 两段式初始化 避免不完整构造
            ptr->setCoreId(busi.CoreId);
            busiLoops_[busi_tid] = ptr;

            int waitUs = DEFAULT_WAIT_US;
            if (static_cast<types::IO_MULTI_MODE::value>(busi.IOMultiMode) ==
                types::IO_MULTI_MODE::value::NO_IO_MULTI) {
                waitUs = 0;  // 业务线程高性能
            }
            // 创建线程类，并绑定执行函数
            std::unique_ptr<PThread> pptr(new PThread(
                [this, busi_tid, waitUs]() { busiLoops_[busi_tid]->loop(waitUs); }, busi.Name, busi_tid, busi.CoreId));
            threads_[busi_tid] = std::move(pptr);
        }
    }

    void LoopManager::initIpcServers(const config::PlatformInfo& cfg)
    {  /// 创建Server循环类
        for (const auto& serverConfig : cfg.ServerThreads) {
            const auto serverId = static_cast<types::thread_id_t>(serverConfig.Id);
            auto       ptr      = std::make_shared<EventLoop>(serverId, serverConfig.Name,
                                                              static_cast<types::IO_MULTI_MODE::value>(serverConfig.IOMultiMode));
            ptr->init(true);  // 两段式初始化 避免不完整构造
            ptr->setCoreId(serverConfig.CoreId);
            auto* server = XnccServer::CreateXnccServer(ptr.get(), XnccAddress::CreateAddress(serverConfig.Addr),
                                                        serverConfig.Name, serverConfig.BlackList);
            server->init();
            servers_[serverId]  = server;
            ipcLoops_[serverId] = ptr;
            // 创建线程类，并绑定执行函数
            std::unique_ptr<PThread> pptr(
                new PThread([this, serverId]() { ipcLoops_[serverId]->loop(DEFAULT_WAIT_US); }, serverConfig.Name,
                            serverId, serverConfig.CoreId));
            threads_[serverId] = std::move(pptr);

            /// 创建SubServer循环类
            for (const auto& subServerConfig : serverConfig.SubServer) {
                const auto subServerId = static_cast<types::thread_id_t>(subServerConfig.Id);
                auto       subptr =
                    std::make_shared<EventLoop>(subServerId, subServerConfig.Name,
                                                static_cast<types::IO_MULTI_MODE::value>(subServerConfig.IOMultiMode));
                subptr->init(true);  // 两段式初始化 避免不完整构造
                subptr->setCoreId(subServerConfig.CoreId);

                ipcLoops_[subServerId] = subptr;

                // 创建线程类，并绑定执行函数
                std::unique_ptr<PThread> subpptr(
                    new PThread([this, subServerId]() { ipcLoops_[subServerId]->loop(DEFAULT_WAIT_US); },
                                subServerConfig.Name, subServerId, subServerConfig.CoreId));
                threads_[subServerId] = std::move(subpptr);
                servers_[serverId]->addSubServerLoop(subptr.get());
            }
        }
    }

    void LoopManager::initIpcClients(const config::PlatformInfo& cfg)
    {  /// 创建Client循环类
        for (const auto& clientConfig : cfg.ClientThreads) {
            const auto client_tid = static_cast<types::thread_id_t>(clientConfig.Id);
            auto       ptr        = std::make_shared<EventLoop>(client_tid, clientConfig.Name,
                                                                static_cast<types::IO_MULTI_MODE::value>(clientConfig.IOMultiMode));
            ptr->init(true);  // 两段式初始化 避免不完整构造
            ptr->setCoreId(clientConfig.CoreId);
            auto* client = XnccClient::CreateXnccClient(ptr.get(), XnccAddress::CreateAddress(clientConfig.Addr),
                                                        clientConfig.Name);
            client->init();
            if (clientConfig.ReConn) {
                client->enableRetry(clientConfig.ReConnIntervalMs);
            }
            clients_[client_tid]  = client;
            ipcLoops_[client_tid] = ptr;
            // 创建线程类，并绑定执行函数
            std::unique_ptr<PThread> pptr(
                new PThread([this, client_tid]() { ipcLoops_[client_tid]->loop(DEFAULT_WAIT_US); }, clientConfig.Name,
                            client_tid, clientConfig.CoreId));
            threads_[client_tid] = std::move(pptr);
        }
    }

    void LoopManager::initIpcRecvQueues(const config::PlatformInfo& cfg)
    {  /// 构建IPC线程数据接收队列

        for (const auto& ipc : cfg.ServerThreads) {
            const auto serverId = static_cast<types::thread_id_t>(ipc.Id);
            auto       loop     = ipcLoops_[serverId];
            for (const auto& busi : cfg.BusiThreads) {
                const auto busiId = static_cast<types::thread_id_t>(busi.Id);
                auto       ptr    = QUEUE_MGR.createIPCThreadRecvQueue(serverId, busiId);
                auto       thd_ch = std::make_shared<ThreadChannel>(loop.get(), ptr);
                thd_ch->init();
                thd_ch->enableReading();
                TRACE_INFO << "business Thread:"_s << busiId << " will send message to server thread "_s << serverId
                           << " by queue id:(real id:"_s << ptr->id() << ")";
            }

            for (const auto& sub : ipc.SubServer) {
                const auto subServerId = static_cast<types::thread_id_t>(sub.Id);
                auto       loop        = ipcLoops_[subServerId];
                for (const auto& busi : cfg.BusiThreads) {
                    const auto busiId = static_cast<types::thread_id_t>(busi.Id);
                    auto       ptr    = QUEUE_MGR.createIPCThreadRecvQueue(subServerId, busiId);
                    auto       thd_ch = std::make_shared<ThreadChannel>(loop.get(), ptr);
                    thd_ch->init();
                    thd_ch->enableReading();
                    TRACE_INFO << "business Thread:"_s << busiId << " will send message to sub server thread "_s
                               << subServerId << " by queue id:(real id:"_s << ptr->id() << ")";
                }
            }
        }

        for (const auto& ipc : cfg.ClientThreads) {
            const auto clientId = static_cast<types::thread_id_t>(ipc.Id);
            auto       loop     = ipcLoops_[clientId];
            for (const auto& busi : cfg.BusiThreads) {
                const auto busiId = static_cast<types::thread_id_t>(busi.Id);
                auto       ptr    = QUEUE_MGR.createIPCThreadRecvQueue(clientId, busiId);
                auto       thd_ch = std::make_shared<ThreadChannel>(loop.get(), ptr);
                thd_ch->init();
                thd_ch->enableReading();
                TRACE_INFO << "business Thread:"_s << busiId << " will send message to client thread "_s << clientId
                           << " by queue id:(real id:"_s << ptr->id() << ")";
            }
        }
    }

    std::shared_ptr<EventLoop> LoopManager::getLoop(types::thread_id_t tid)
    {
        if (busiLoops_.find(tid) == busiLoops_.end()) {
            return nullptr;
        }
        return busiLoops_[tid];
    }

    std::shared_ptr<EventLoop> LoopManager::getIpcLoop(types::thread_id_t tid)
    {
        if (ipcLoops_.find(tid) == ipcLoops_.end()) {
            return nullptr;
        }
        return ipcLoops_[tid];
    }

    XnccServer* LoopManager::getXnccServer(types::thread_id_t tid)
    {
        if (servers_.find(tid) == servers_.end()) {
            return nullptr;
        }
        return servers_[tid];
    }

    LoopManager::~LoopManager()
    {
        for (auto& ele : servers_) {
            XnccServer::Destory(ele.second);
        }

        for (auto& ele : clients_) {
            XnccClient::Destory(ele.second);
        }
    }

    int LoopManager::subQueue(types::thread_id_t  tid,
                              types::queue_id_t   queueId,
                              types::queue_size_t queueSize,
                              bool                autoExpand)
    {
#ifdef XNCC_DEBUG
        if (busiLoops_.find(tid) == busiLoops_.end()) {
            std::ostringstream oss;
            oss << "thread id:" << tid << " is not exist";
            throw shared::logic_exception(oss.str());
        }
#endif
        auto loop = busiLoops_[tid];
        if (loop->status() != 0) {
            throw shared::logic_exception("not allowed to call this func when thread is running");
        }
        auto ptr = QUEUE_MGR.subQueue(queueId, queueSize, autoExpand);
        if (ptr == nullptr) {
            std::ostringstream oss;
            oss << "queue id:" << queueId << " is already exist";
            throw shared::logic_exception(oss.str());
        }
        if (loop->mode() != types::IO_MULTI_MODE::value::NO_IO_MULTI) {
            ptr->setNeedNotify();
        }
        TRACE_INFO << "user will send messages from another threads by queue id:"_s << queueId << "(real id:"_s
                   << ptr->id() << ")";
        auto thd_ch = std::make_shared<ThreadChannel>(loop.get(), ptr);
        thd_ch->init();
        thd_ch->enableReading();
        return 0;
    }

    /// Loop创建完成之后，需要创建 channel，channel要绑定对应的loop
    void LoopManager::bindChannel()
    {
        ///< 绑定的的fd 只需要读操作就可以了，也就是创建的channel

        /// 对于一个事件循环线程来说，有以下几种情况
        ///[1] 对于TCP的事件循环，需要往 Queue里写数据， 不需要关注Queue的fd
        ///[2] 对于业务事件循环， 需要往Queue里写数据， 不需要关注Queue的fd
        ///[3] 对于业务事件循环， 需要从某个Queue里读数据， 这个Queue  的 eventfd 的 读事件

        for (const auto& loopEle : busiLoops_) {
            auto looptr = loopEle.second;
            // 找到这个也许线程 订阅了那些 queue
            auto ptrs = QUEUE_MGR.getBusiThreadSubQueues(looptr->id());
            for (auto ptr : ptrs) {
                /// 这里创建的channel 都是用于线程间通信 会在整个程序的生命周期都存在，因此是不需要主动释放内存的
                if (looptr->poller()->ioType() != types::IO_MULTI_MODE::value::NO_IO_MULTI) {
                    ptr->setNeedNotify();
                }
                auto thd_ch = std::make_shared<ThreadChannel>(loopEle.second.get(), ptr);
                thd_ch->init();
                thd_ch->enableReading();
            }
        }
    }

    void LoopManager::startAll()
    {
        for (auto& thd : threads_) {
            thd.second->start();
        }
        for (auto& ser : clients_) {
            ser.second->connect();
        }
        for (auto& ser : servers_) {
            ser.second->start();
        }
    }

    void LoopManager::joinAll()
    {
        for (auto& thd : threads_) {
            thd.second->join();
        }
    }

    void LoopManager::stopAll()
    {
        for (auto& loopEle : busiLoops_) {
            loopEle.second->stop();
        }
        for (auto& loopEle : ipcLoops_) {
            loopEle.second->stop();
        }
    }
}  // namespace foundation
}  // namespace xncc