#pragma once

#include "Thread.h"
#include "poomodel.h"

#include <atomic>
#include <condition_variable>
#include <future>
#include <iostream>
#include <memory>
#include <queue>
#include <unordered_map>

const size_t THREAD_MAXTHRESHHOLD = 128;
const int THREAD_MAX_IDLE_TIME = 10; // 秒为单位
const size_t TASK_MAX_THRESHHOLD = 1024;

// 线程池:
//  固定模式下无需考虑线程安全，缓存模式需要考虑
class ThreadPool {
public:
  ThreadPool();
  ~ThreadPool();
  ThreadPool(const ThreadPool &) = delete;
  ThreadPool &operator=(const ThreadPool &) = delete;
  ThreadPool(ThreadPool&&) = delete;                 // 禁止移动构造
  ThreadPool& operator=(ThreadPool&&) = delete;      // 禁止移动赋值

  void setMode(PoolMode mode);
  void setTaskQueMaxThreshHold(size_t threshold);
  void setThreadSizeThreshHold(size_t threshold);
  // TODO: 后续重载支持传入lambda
  template <typename Func, typename... Args>
  auto submitTask(Func&& func, Args&&... args)
      -> std::future<decltype(func(args...))> {
      using RType = decltype(func(args...));
		  auto task = std::make_shared<std::packaged_task<RType()>>(
			std::bind(std::forward<Func>(func), std::forward<Args>(args)...));
      std::future<RType> result = task->get_future();

      // 获取锁
      std::unique_lock<std::mutex> lock(taskQueMtx_);
      // 线程通信
      if (auto flag = notFull_.wait_for(
              lock, std::chrono::seconds(1),
              [&]() { return taskQue_.size() < taskQueThreshHold_; });
          !flag) {
          // 超时仍未满足条件，返回失败
          std::cerr << "task queue is full, submit task fail.\n";
          auto task = std::make_shared<std::packaged_task<RType()>>(
              []() { return RType(); });
          (*task)(); // 在返回处把 future 交出去了，但 shared_ptr 在离开作用域时销毁了 packaged_task，而这个 packaged_task 从未被调用过。当它的析构函数发现 promise 未完成，就触发了 broken promise 异常。
          return task->get_future();
      }

      taskQue_.emplace([=](){
        (*task)();
      });
      taskSize_++;
      // 队列不空了, 通知消费者
      notEmpty_.notify_all();

      if (poolMode_ == PoolMode::MODE_CACHED &&
          this->taskSize_ > this->idleThreadSize_ &&
          this->curThreadSize_ < this->threadSizeThreshold_) {  // 达到CACHED模式
          auto ptr = std::make_unique<Thread>(
              [this](int threadId) { threadFunc(threadId); });
          int threadId = ptr->getThreadID();
          threads_.emplace(threadId, std::move(ptr));

          threads_[threadId]->start();

          this->curThreadSize_++;
          this->idleThreadSize_++;
          std::cout << __PRETTY_FUNCTION__ << ":" << __LINE__
                    << " tid:" << std::this_thread::get_id() << " CACHED模式"
                    << "\n";
      }

      return result;
  }

  void start(size_t initThreadSize = std::thread::hardware_concurrency());
private:
  void threadFunc(int threadID);
  bool checkRunningState() const;
private:
  std::unordered_map<int, std::unique_ptr<Thread>> threads_; // 线程列表
  size_t initThreadSize_{4};                     // 初始的线程数量
  std::atomic_int curThreadSize_{0}; // 正在运行的线程数; std::vector线程不安全
  std::atomic_int idleThreadSize_{0}; // 线程池中的空闲线程数
  size_t threadSizeThreshold_{THREAD_MAXTHRESHHOLD}; // 线程数量上限

  using Task = std::function<void()>;
  std::queue<Task> taskQue_; // 任务队列
  std::atomic_int taskSize_ {0};               // 任务数量
  size_t taskQueThreshHold_{TASK_MAX_THRESHHOLD}; // 任务队列数量上限阈值

  std::condition_variable notFull_;  // 任务队列不满, 可生产
  std::condition_variable notEmpty_; // 任务队列不空, 可消费
  std::condition_variable exitCond_; // 等待线程资源回收

  std::mutex taskQueMtx_; // 保证任务队列的线程安全

  PoolMode poolMode_ {PoolMode::MODE_FIXED}; // 当前线程池的工作模式
  std::atomic_bool isPoolRunning_ {false}; // 线程池运行状态
};