#pragma once
#include <vector>
#include <queue>
#include <functional>
#include <condition_variable>
#include <mutex>
#include <thread>
#include <atomic>
#include <unordered_map>
#include <iostream>
#include <map>
#include <memory>
#include <future>
using namespace std;

const int MAX_THREAD_SIZE = 20;
const int MAX_TASK_SIZE = 1024;

class Thread
{
public:
    using threadFunc = function<void(int)>;
    Thread(threadFunc func);
    ~Thread();
    int getId();

    void start();

private:
    threadFunc func_;
    int threadid;
    static int generateId_;
};

enum PoolMode
{
    MODE_FIXED,
    MODE_CACHED
};

class WorkerThreadPool
{
public:
    // 删除拷贝构造和赋值运算符
    WorkerThreadPool(const WorkerThreadPool&) = delete;
    WorkerThreadPool& operator=(const WorkerThreadPool&) = delete;
    
    // 获取单例实例
    static WorkerThreadPool* getInstance();
    
    // 初始化线程池（替代原来的start）
    void init(int initSize = thread::hardware_concurrency());
    
    // 销毁线程池
    static void destroy();
    
    void setMode(PoolMode mode);
    // 如果是动态增长模式就需要这个函数
    void setMaxThreadSize(int size);
    void setMaxTaskMaxSize(int size);

    template <typename Func, typename... Args>
    auto submitTask(Func &&func, Args &&...args) -> future<decltype(func(args...))>
    {
        // 检查线程池是否已初始化
        if (!m_initialized) {
            throw runtime_error("ThreadPool not initialized. Call init() first.");
        }
        
        using RType = decltype(func(args...));
        auto task = std::make_shared<packaged_task<RType()>>(bind(forward<Func>(func), forward<Args>(args)...));

        future<RType> result = task->get_future();
        // 提交任务以后队列不空，通知其他线程消费任务
        unique_lock<mutex> lock(taskQueMux_);
        if (!notFull_.wait_for(lock, chrono::seconds(1), [&]() -> bool
                               { return taskQue_.size() < taskMaxSize; }))
        {
            cout << "任务提交超时" << endl;
            auto dummyTask = std::make_shared<std::packaged_task<RType()>>(
                []() -> RType
                { return RType(); });
            (*dummyTask)();
            return dummyTask->get_future();
        }

        taskQue_.emplace([task]()
                         { (*task)(); });
        taskSize_++;
        notEmpty_.notify_one();

        // 动态模式
        if (mode == PoolMode::MODE_CACHED && taskSize_ > m_idleThreadSize && m_curThreadSize < m_maxThreadSize)
        {
            // 创建线程
            auto ptr = make_unique<Thread>(bind(&WorkerThreadPool::threadFunc, this, placeholders::_1));
            // 放入map容器中
            int threadid = ptr->getId();
            threads_.emplace(make_pair(threadid, move(ptr)));
            threads_[threadid]->start();
            this->m_idleThreadSize++;
            this->m_curThreadSize++;
        }
        return result;
    }
    
    // 线程函数
    void threadFunc(int threadid);

private:
    WorkerThreadPool();  // 私有构造函数
    ~WorkerThreadPool(); // 私有析构函数
    
    static WorkerThreadPool* instance;  // 单例实例指针
    static mutex instanceMutex;        // 实例互斥锁
    
    std::atomic<bool> m_initialized{false};
    
    atomic_int m_initThreadSize;
    atomic_int m_maxThreadSize;
    atomic_int m_curThreadSize;
    atomic_int m_idleThreadSize;
    atomic_bool is_stop;
    // 任务队列锁
    mutex taskQueMux_;
    // 任务阈值
    atomic_int taskMaxSize;
    // 模式
    PoolMode mode;
    // 任务队列
    queue<function<void()>> taskQue_;
    // 线程队列
    map<int, unique_ptr<Thread>> threads_;

    // 两个信号量
    condition_variable notFull_;
    condition_variable notEmpty_;
    condition_variable exitcondition_;
    std::atomic_int taskSize_;
};