#pragma once

#include "shared.hpp"
#include "task.hpp"
#include "worker.hpp"
#include <cstddef>
#include <future>
#include <latch>
#include <memory>
#include <print>
#include <stdexcept>
#include <vector>
namespace Thread {
class threadpool {
  friend class detail::Shared;

public:
  threadpool(std::size_t thread_num = std::thread::hardware_concurrency()) {
    std::latch latch{1};
    latch.arrive_and_wait();
    work(thread_num);
  }
  ~threadpool() { close(); }

  void close() {
    __share.shutdown();
    for (auto &t : __threads) {
      if (t.joinable()) {
        t.join();
      }
    }
  }

public:
  template <typename F, typename... Args>
  std::future<std::invoke_result_t<F, Args...>>
  submit(F &&f, Args &&...args, TaskPriority p = TaskPriority::NORMAL) {
    using return_type = std::invoke_result_t<F, Args...>;
    auto task = std::make_shared<std::packaged_task<return_type()>>(
        [f = std::forward<F>(f),
         ... args = std::forward<Args>(args)]() mutable {
          return std::invoke(std::move(f), std::move(args)...);
        });
    // 基于优先级push任务
    switch (p) {
    case TaskPriority::HIGH:
      select_worker()->priority_push_back([task]() { (*task)(); });
      break;
    case TaskPriority::NORMAL:
      select_worker()->local_push_back_maybe_overflow([task]() { (*task)(); });
      break;
    case TaskPriority::LOW:
      __share.push_global_task([task]() { (*task)(); });
      break;
    default:
      throw std::runtime_error("invalid priority");
      break;
    }
    auto res = task->get_future();
    return res;
  }

private:
  // 主要工作函数,使用latch保证构造worker的一致性和线程安全
  void work(std::size_t num) {
    for (std::size_t i = 0; i < num; ++i) {
      std::latch sync{2}; // 初始值2
      __threads.emplace_back([this, i, &sync]() {
        detail::Worker worker{&__share, i};
        sync.count_down(); // 减一
        worker.run();
      });
      sync.arrive_and_wait(); // 减一并且等待减完
    }
  }

  // 基于负载均衡选择合适worker
  detail::Worker *select_worker() {
    auto workers = __share.get_workers();
    if (workers.empty())
      return nullptr;

    const std::size_t worker_count = workers.size();

    // 快速路径：轮询选择（90%的情况）
    auto now = std::chrono::steady_clock::now();
    if (now - __last_balance_check < BALANCE_CHECK_INTERVAL) {
      std::size_t index =
          __round_robin_counter.fetch_add(1, std::memory_order_relaxed) %
          worker_count;
      return workers[index];
    }

    // 慢速路径：负载感知选择（10%的情况）
    __last_balance_check = now;

    std::size_t min_load = std::numeric_limits<std::size_t>::max();
    std::size_t best_index = 0;
    std::size_t total_load = 0;

    // 一次遍历收集所有信息
    for (std::size_t i = 0; i < worker_count; ++i) {
      std::size_t current_load = workers[i]->local_size();
      total_load += current_load;

      if (current_load < min_load) {
        min_load = current_load;
        best_index = i;
      }
    }

    // 如果负载差异不大，继续使用轮询
    std::size_t avg_load = total_load / worker_count;
    if (min_load + LOAD_THRESHOLD >= avg_load) {
      std::size_t index =
          __round_robin_counter.fetch_add(1, std::memory_order_relaxed) %
          worker_count;
      return workers[index];
    }

    // 负载差异较大时，选择最轻负载的worker
    return workers[best_index];
  }

private:
  detail::Shared __share;             // 共享资源
  std::vector<std::thread> __threads; // 线程数组
  mutable std::atomic<std::size_t> __round_robin_counter{0};
  mutable std::chrono::steady_clock::time_point __last_balance_check{
      std::chrono::steady_clock::now()};
  static constexpr std::chrono::milliseconds BALANCE_CHECK_INTERVAL{
      100};                                         // 100ms检查一次负载
  static constexpr std::size_t LOAD_THRESHOLD = 50; // 负载阈值
};

} // namespace Thread
