#pragma once

#include <iostream>
#include <vector>
#include <queue>
#include <thread>
#include <mutex>
#include <condition_variable>
#include <functional>
#include <atomic>
#include <unistd.h>
#include <sys/epoll.h>

namespace good_cptl
{

// 任务类型封装
using Task = std::function<void()>;

// Executor 抽象基类
class Executor
{
public:
  virtual ~Executor() = default;
  virtual void add(Task task) = 0;
  virtual void stop() = 0;
};

// CPU 密集型任务线程池
class CPUThreadPoolExecutor : public Executor
{
public:
  static constexpr int8_t kDefaultThreadPoolSize = 10;
public:
  explicit CPUThreadPoolExecutor(size_t num_threads = kDefaultThreadPoolSize)
    : stop_(false)
  {
    for (size_t i = 0; i < num_threads; i++) {
      threads_.emplace_back([this] { worker_loop(); });
    }
  }

  virtual ~CPUThreadPoolExecutor
  ()
  {
    stop_ = true;
    condition_.notify_all();
    for (auto& thread : threads_) {
      if (thread.joinable()) {
        thread.join();
      }
    }
  }

  void add(Task task) override
  {
    {
      std::lock_guard<std::mutex> lock(queue_mutex_);
      task_queue_.push(std::move(task));
    }
    condition_.notify_one();
  }

  void stop() override
  {
    {
      std::lock_guard<std::mutex> lock(queue_mutex_);
      stop_ = true;
    }
    condition_.notify_all();
    for (auto& thread : threads_) {
      if (thread.joinable()) {
        thread.join();
      }
    }
  }
private:
  void worker_loop()
  {
    while (!stop_)
    {
      Task task;
      {
        std::unique_lock<std::mutex> lock(queue_mutex_);
        condition_.wait(lock, [this] { return !task_queue_.empty() || stop_; });
        if (stop_ && task_queue_.empty()) {
          return;
        }
        task = std::move(task_queue_.front());
        task_queue_.pop();
      }
      task();
    }
  }
private:
  std::vector<std::thread> threads_;
  std::queue<Task> task_queue_;
  std::mutex queue_mutex_;
  std::condition_variable condition_;
  std::atomic<bool> stop_;
};


// IO 密集型任务线程池
class IOThreadPoolExecutor : public ThreadPoolExecutor
{
public:
  explicit IOThreadPoolExecutor(size_t num_threads = kDefaultThreadPoolSize)
    : stop_(false)
  {
    for (size_t i = 0; i < num_threads; i++) {
      threads_.emplace_back([this] { event_loop(); });
    }
  }
  virtual ~IOThreadPoolExecutor()
  {
    stop_ = true;
    uint64_t count = 1;
    write(event_fd_, &count, sizeof(count));
    for (auto& thread : threads_) {
      if (thread.joinable()) {
        thread.join();
      }
    }
  }

  void add(Task task) override
  {
    {
      std::lock_guard<std::mutex> lock(queue_mutex_);
      task_queue_.push(std::move(task));
    }
    uint64_t count = 1;
    write(event_fd_, &count, sizeof(count));
  }
  void stop() override
  {
    stop_ = true;
    uint64_t count = 1;
    write(event_fd_, &count, sizeof(count));
    for (auto& thread : threads_) {
      if (thread.joinable()) {
        thread.join();
      }
    }
    close(event_fd_);
  }
private:
  void event_loop()
  {
    int epoll_fd = epoll_create1(0);
    struct epoll_event event;
    event.events = EPOLLIN;
    event.data.fd = event_fd_;
    epoll_ctl(epoll_fd, EPOLL_CTL_ADD, event_fd_, &event);

    const int max_events = 10;
    struct epoll_event events[max_events];
    while (!stop_) {
      int nfds = epoll_wait(epoll_fd, events, max_events, -1);
      for (int i = 0; i < nfds; ++i) {
        if (events[i].data.fd == event_fd_) {
          uint64_t count;
          read(event_fd_, &count, sizeof(count));
          process_tasks();
        }
      }
    }
    close(epoll_fd);
  }
  void process_tasks()
  {
    std::queue<Task> local_queue;
    {
      std::lock_guard<std::mutex> lock(queue_mutex_);
      std::swap(local_queue, task_queue_);
    }
    while (!local_queue.empty()) {
      local_queue.front()();
      local_queue.pop();
    }
  }
private:
  int event_fd_ = eventfd(0, EFD_NONBLOCK); // 用于事件通知
  std::vector<std::thread> threads_;
  std::queue<Task> task_queue_;
  std::mutex queue_mutex_;
  std::atomic<bool> stop_;
};

}