#include "asioservicepool.h"
#include <iostream>
#include <string>
#include "../util/logger.h"
#include <memory>
namespace Yizhi {

AsioServicePool::AsioServicePool(size_t size) 
    : m_nextService(0) {
    
    if (size == 0) {
        size = std::thread::hardware_concurrency();
        if (size == 0) size = 2; // 保底值
    }
    LOG_DEBUG("Initializing AsioServicepool with size:"+std::to_string(size));
   
    // 预分配空间
    m_ioservices.reserve(size);
    m_works.reserve(size);
    m_threadpool.reserve(size);
    
    // 创建io_context和work对象
    for (size_t i = 0; i < size; ++i) {
        m_ioservices.emplace_back(std::make_unique<IOService>());
        m_works.emplace_back(std::make_unique<Work>(*m_ioservices[i]));
    }
    start();
}

AsioServicePool::~AsioServicePool() {
    stop();
}

void AsioServicePool::start() {
    std::lock_guard<std::mutex> lock(m_mutex);
    
    if (m_running) {
        LOG_DEBUG("AsioServicePool is Already running");
        return;
    }
    LOG_DEBUG("Starting AsioServicePool...");
    
    // 为每个io_context创建线程
    for (size_t i = 0; i < m_ioservices.size(); ++i) {
        m_threadpool.emplace_back([this, i]() {
            std::cout << "IO service thread " << i << " started" << std::endl;
            
            try {
                boost::system::error_code ec;
                m_ioservices[i]->run(ec);
                
                if (ec) {
                    LOG_ERROR("IO service thread: "+std::to_string(i)+ec.message());
                }
            } catch (const std::exception& e) {
                 LOG_ERROR("IO service thread: "+std::to_string(i)+e.what());
            }
             LOG_DEBUG("IO service thread: "+std::to_string(i)+"\t stoped");
            
        });
    }
    
    m_running = true;
    LOG_DEBUG( "AsioServicePool started successfully with "+std::to_string(m_threadpool.size()));
}

void AsioServicePool::stop() {
    std::lock_guard<std::mutex> lock(m_mutex);
    
    if (!m_running) {
        return;
    }
    LOG_DEBUG( "Stopping AsioServicePool...");
    
    // 1. 先停止所有work对象，允许io_context自然结束
    for (auto& work : m_works) {
        work.reset();
    }
    
    // 2. 停止所有io_context
    for (auto& ioservice : m_ioservices) {
        ioservice->stop();
    }
    
    // 3. 等待所有线程结束
    for (auto& thread : m_threadpool) {
        if (thread.joinable()) {
            thread.join();
        }
    }
    
    // 4. 清理线程池
    m_threadpool.clear();
    
    // 5. 重新创建工作对象（为可能的重新启动做准备）
    for (size_t i = 0; i < m_ioservices.size(); ++i) {
        m_works[i] = std::make_unique<Work>(*m_ioservices[i]);
        m_ioservices[i]->restart(); // 重置io_context状态
    }
    
    m_running = false;
     LOG_DEBUG( "AsioServicePool stopped successfully");
}

boost::asio::io_context& AsioServicePool::GetIoservice() {
    if (m_ioservices.empty()) {
        throw std::runtime_error("No IO services available");
    }
    
    // 原子操作，线程安全的轮询
    size_t index = m_nextService.fetch_add(1, std::memory_order_relaxed);
    
    index = index % m_ioservices.size();
    
    return *m_ioservices[index];
}

boost::asio::io_context& AsioServicePool::GetLeastLoadedIoservice() {
    if (m_ioservices.empty()) {
        throw std::runtime_error("No IO services available");
    }
    
    // 简单的负载均衡：选择第一个可用的io_context
    // 在实际项目中，可以根据pending任务数量进行更复杂的负载均衡
    size_t index = m_nextService.fetch_add(1, std::memory_order_relaxed);
    index = index % m_ioservices.size();
    
    return *m_ioservices[index];
}



} // namespace Yizhi