#ifndef THREADPOOL_H
#define THREADPOOL_H

#include <vector>
#include <queue>
#include <memory>
#include <thread>
#include <mutex>
#include <condition_variable>
#include <future>
#include <functional>
#include <stdexcept>
#include <atomic>

class ThreadPool {
public:
    explicit ThreadPool(size_t initialThreads = std::thread::hardware_concurrency(),
                       size_t maxThreads = std::thread::hardware_concurrency() * 2);
    ~ThreadPool();

    template<class F, class... Args>
    auto enqueue(F&& f, Args&&... args) -> std::future<typename std::result_of<F(Args...)>::type>;

    void resize(size_t newSize);
    size_t getThreadCount() const;

private:
    std::atomic<size_t> maxThreads_;
    std::vector<std::thread> workers_;
    std::queue<std::function<void()>> tasks_;
    mutable std::mutex queueMutex_;
    std::condition_variable condition_;
    std::atomic<bool> stop_;
    std::atomic<size_t> activeWorkers_{0};

    void workerFunction();
    bool tryAddWorker();
};

template<class F, class... Args>
auto ThreadPool::enqueue(F&& f, Args&&... args) -> std::future<typename std::result_of<F(Args...)>::type> {
    using return_type = typename std::result_of<F(Args...)>::type;

    if (stop_) {
        throw std::runtime_error("enqueue on stopped ThreadPool");
    }

    auto task = std::make_shared<std::packaged_task<return_type()>>(
        std::bind(std::forward<F>(f), std::forward<Args>(args)...)
    );

    std::future<return_type> res = task->get_future();
    
    {
        std::unique_lock<std::mutex> lock(queueMutex_);
        tasks_.emplace([task]() { (*task)(); });
    }

    // 尝试动态扩容
    if (activeWorkers_ < workers_.size() || tryAddWorker()) {
        condition_.notify_one();
    } else {
        condition_.notify_all();
    }

    return res;
}

#endif // THREADPOOL_H