//
// Created by Scave on 2025/9/22.
//

#ifndef TIEC_THREAD_H
#define TIEC_THREAD_H

#include <atomic>
#include <condition_variable>
#include <future>
#include <iostream>
#include <mutex>
#include <queue>
#include <stdexcept>
#include <thread>
#include <vector>

#include "macro.h"

namespace NS_TIEC {
  class ThreadPool {
  public:
    explicit ThreadPool(size_t threads);
    ~ThreadPool();

    /// 向线程池添加任务
    template <class F, class... Args>
    auto enqueue(F&& f, Args&&... args)
      -> std::future<typename std::result_of<F(Args...)>::type>;

    /// 等待所有任务完成
    void waitAll();

    /// 获取线程数量
    size_t getThreadCount() const { return workers.size(); }

  private:
    // 工作线程
    std::vector<std::thread> workers;
    // 任务队列
    std::queue<std::function<void()>> tasks;

    // 同步原语
    std::mutex queue_mutex;
    std::condition_variable condition;
    std::condition_variable completion_condition;

    // 池状态
    std::atomic_bool stop;
    std::atomic_uint active_tasks;
  };

  inline ThreadPool::ThreadPool(size_t threads) : stop(false), active_tasks(0) {
    for (size_t i = 0; i < threads; ++i) {
      workers.emplace_back([this] {
        for (;;) {
          std::function<void()> task;
          {
            std::unique_lock<std::mutex> lock(this->queue_mutex);
            this->condition.wait(
              lock, [this] { return this->stop || !this->tasks.empty(); });

            if (this->stop && this->tasks.empty()) return;

            task = std::move(this->tasks.front());
            this->tasks.pop();
            active_tasks++;
          }

          task();

          {
            std::unique_lock<std::mutex> lock(this->queue_mutex);
            active_tasks--;
            if (active_tasks == 0 && tasks.empty()) {
              completion_condition.notify_all();
            }
          }
        }
      });
    }
  }

  inline ThreadPool::~ThreadPool() {
    {
      std::unique_lock<std::mutex> lock(queue_mutex);
      stop = true;
    }

    condition.notify_all();
    for (std::thread& worker : workers) {
      worker.join();
    }
  }

  template <class F, class... Args>
  auto ThreadPool::enqueue(F&& f, Args&&... args)
    -> std::future<typename std::result_of<F(Args...)>::type> {
    using return_type = typename std::result_of<F(Args...)>::type;

    auto task = std::make_shared<std::packaged_task<return_type()>>(
      std::bind(std::forward<F>(f), std::forward<Args>(args)...));

    std::future<return_type> res = task->get_future();
    {
      std::unique_lock<std::mutex> lock(queue_mutex);

      if (stop) {
        throw std::runtime_error("enqueue on stopped ThreadPool");
      }

      tasks.emplace([task]() { (*task)(); });
    }

    condition.notify_one();
    return res;
  }

  inline void ThreadPool::waitAll() {
    std::unique_lock<std::mutex> lock(queue_mutex);
    completion_condition.wait(
      lock, [this]() { return tasks.empty() && active_tasks == 0; });
  }
} // namespace NS_TIEC

#endif  // TIEC_THREAD_H
