#ifndef COASYNC_DETAIL_STATIC_THREAD_POOL_INCLUDED
#define COASYNC_DETAIL_STATIC_THREAD_POOL_INCLUDED
#include <condition_variable>
#include <limits>
#include <memory_resource>
#include <mutex>
#include <optional>
#include <span>
#include <thread>

#include "atomic_intrusive_queue.hpp"
#include "bwos_lifo_queue.hpp"
#include "intrusive_queue.hpp"
#include "xorshift.hpp"
namespace coasync::detail {
class static_thread_pool;
struct bwos_params {
  std::size_t num_blocks{32};
  std::size_t block_size{8};
};
struct task_base {
  task_base* next = nullptr;
  void (*execute)(task_base*) noexcept = nullptr;
};
struct remote_queue {
  explicit remote_queue(std::size_t nthreads) noexcept : M_queues(nthreads) {}
  explicit remote_queue(remote_queue* next, std::size_t nthreads) noexcept
      : M_next(next), M_queues(nthreads) {}
  remote_queue* M_next = nullptr;
  std::vector<atomic_intrusive_queue<&task_base::next>> M_queues;
  std::thread::id M_id = std::this_thread::get_id();
  std::size_t M_index = std::numeric_limits<std::size_t>::max();
};
struct remote_queue_list {
 private:
  std::atomic<remote_queue*> M_head;
  remote_queue* M_tail;
  std::size_t M_nthreads;
  remote_queue M_this_remotes;

 public:
  explicit remote_queue_list(std::size_t nthreads) noexcept
      : M_head{&M_this_remotes},
        M_tail{&M_this_remotes},
        M_nthreads(nthreads),
        M_this_remotes(nthreads) {}
  ~remote_queue_list() noexcept {
    remote_queue* head = M_head.load(std::memory_order_acquire);
    while (head != M_tail) {
      remote_queue* tmp = std::exchange(head, head->M_next);
      delete tmp;
    }
  }
  auto pop_all_reversed(std::size_t tid) noexcept
      -> intrusive_queue<&task_base::next> {
    remote_queue* head = M_head.load(std::memory_order_acquire);
    intrusive_queue<&task_base::next> tasks{};
    while (head != nullptr) {
      tasks.append(head->M_queues[tid].pop_all_reversed());
      head = head->M_next;
    }
    return tasks;
  }
  auto get() -> remote_queue* {
    thread_local std::thread::id this_id = std::this_thread::get_id();
    remote_queue* head = M_head.load(std::memory_order_acquire);
    remote_queue* queue = head;
    while (queue != M_tail) {
      if (queue->M_id == this_id) {
        return queue;
      }
      queue = queue->M_next;
    }
    auto* new_head = new remote_queue{head, M_nthreads};
    while (!M_head.compare_exchange_weak(head, new_head,
                                         std::memory_order_acq_rel)) {
      new_head->M_next = head;
    }
    return new_head;
  }
};
class workstealing_victim {
 public:
  explicit workstealing_victim(
      bwos_lifo_queue<task_base*, std::pmr::polymorphic_allocator<task_base*>>*
          queue,
      std::uint32_t index) noexcept
      : M_queue(queue), M_index(index) {}
  auto try_steal() noexcept -> task_base* { return M_queue->steal_front(); }
  [[nodiscard]] auto index() const noexcept -> std::uint32_t { return M_index; }

 private:
  bwos_lifo_queue<task_base*, std::pmr::polymorphic_allocator<task_base*>>*
      M_queue;
  std::uint32_t M_index;
};
class thread_state {
 public:
  struct pop_result {
    task_base* task;
    std::uint32_t queue_index;
  };
  explicit thread_state(static_thread_pool* pool, std::uint32_t index,
                        bwos_params params) noexcept
      : M_index(index),
        M_resource(),
        M_local_queue(params.num_blocks, params.block_size,
                      std::pmr::polymorphic_allocator(&M_resource)),
        M_state(state::running),
        M_pool(pool) {
    std::random_device rd;
    M_rng.seed(rd);
  }
  auto pop() -> pop_result;
  void push_local(task_base* task);
  void push_local(intrusive_queue<&task_base::next>&& tasks);
  auto notify() -> bool;
  void request_stop();
  void victims(std::vector<workstealing_victim> const& victims) {
    for (workstealing_victim v : victims) {
      if (v.index() == M_index) {
        continue;
      }
      M_all_victims.push_back(v);
    }
  }
  [[nodiscard]] auto index() const noexcept -> std::uint32_t { return M_index; }
  auto as_victim() noexcept -> workstealing_victim {
    return workstealing_victim{&M_local_queue, M_index};
  }

 private:
  enum state { running, sleeping, notified };
  auto try_pop() -> pop_result;
  auto try_remote() -> pop_result;
  auto try_steal(std::span<workstealing_victim> victims) -> pop_result;
  auto try_steal_any() -> pop_result;
  void notify_one_sleeping();
  void set_stealing();
  void clear_stealing();
  std::uint32_t M_index;
  std::pmr::unsynchronized_pool_resource M_resource;
  bwos_lifo_queue<task_base*, std::pmr::polymorphic_allocator<task_base*>>
      M_local_queue;
  intrusive_queue<&task_base::next> M_pending_queue{};
  std::mutex M_mut{};
  std::condition_variable M_cv{};
  bool M_stop_requested{false};
  std::vector<workstealing_victim> M_all_victims{};
  std::atomic<state> M_state;
  static_thread_pool* M_pool;
  xorshift M_rng{};
};
class static_thread_pool {
 public:
  static_thread_pool();
  static_thread_pool(std::uint32_t thread_count, bwos_params params = {});
  ~static_thread_pool();
  auto get_remote_queue() noexcept -> remote_queue* {
    remote_queue* queue = M_remotes.get();
    std::size_t index = 0;
    for (std::thread& t : M_threads) {
      if (t.get_id() == queue->M_id) {
        queue->M_index = index;
        break;
      }
      ++index;
    }
    return queue;
  }
  void request_stop() noexcept;
  [[nodiscard]] auto available_parallelism() const -> std::uint32_t {
    return M_thread_count;
  }
  [[nodiscard]] auto params() const -> bwos_params { return M_params; }
  void enqueue(task_base* task) noexcept;
  void enqueue(remote_queue& queue, task_base* task) noexcept;
  void enqueue(remote_queue& queue, task_base* task,
               std::size_t thread_index) noexcept;
  void run(std::uint32_t index) noexcept;
  void join() noexcept;
  alignas(64) std::atomic<std::uint32_t> M_num_thiefs{};
  alignas(64) remote_queue_list M_remotes;
  std::uint32_t M_thread_count;
  std::uint32_t M_max_steals{M_thread_count + 1};
  bwos_params M_params;
  std::vector<std::thread> M_threads;
  std::vector<std::optional<thread_state>> M_thread_states;
  [[nodiscard]] auto num_threads() const noexcept -> std::size_t;
};
inline void move_pending_to_local(
    intrusive_queue<&task_base::next>& pending_queue,
    bwos_lifo_queue<task_base*, std::pmr::polymorphic_allocator<task_base*>>&
        local_queue) {
  auto last = local_queue.push_back(pending_queue.begin(), pending_queue.end());
  intrusive_queue<&task_base::next> tmp{};
  tmp.splice(tmp.begin(), pending_queue, pending_queue.begin(), last);
  tmp.clear();
}
inline auto thread_state::try_remote() -> thread_state::pop_result {
  pop_result result{nullptr, M_index};
  intrusive_queue<&task_base::next> remotes =
      M_pool->M_remotes.pop_all_reversed(M_index);
  M_pending_queue.append(std::move(remotes));
  if (!M_pending_queue.empty()) {
    move_pending_to_local(M_pending_queue, M_local_queue);
    result.task = M_local_queue.pop_back();
  }
  return result;
}
inline auto thread_state::try_pop() -> thread_state::pop_result {
  pop_result result{nullptr, M_index};
  result.task = M_local_queue.pop_back();
  if (result.task) [[likely]] {
    return result;
  }
  return try_remote();
}
inline auto thread_state::try_steal(std::span<workstealing_victim> victims)
    -> thread_state::pop_result {
  if (victims.empty()) {
    return {nullptr, M_index};
  }
  std::uniform_int_distribution<std::uint32_t> dist(
      0, static_cast<std::uint32_t>(victims.size() - 1));
  std::uint32_t victim_index = dist(M_rng);
  auto& v = victims[victim_index];
  return {v.try_steal(), v.index()};
}
inline auto thread_state::try_steal_any() -> thread_state::pop_result {
  return try_steal(M_all_victims);
}
inline void thread_state::push_local(task_base* task) {
  if (!M_local_queue.push_back(task)) {
    M_pending_queue.push_back(task);
  }
}
inline void thread_state::push_local(
    intrusive_queue<&task_base::next>&& tasks) {
  M_pending_queue.prepend(std::move(tasks));
}
inline void thread_state::set_stealing() {
  M_pool->M_num_thiefs.fetch_add(1, std::memory_order_relaxed);
}
inline void thread_state::clear_stealing() {
  if (M_pool->M_num_thiefs.fetch_sub(1, std::memory_order_relaxed) == 1) {
    notify_one_sleeping();
  }
}
inline void thread_state::notify_one_sleeping() {
  std::uniform_int_distribution<std::uint32_t> dist(0,
                                                    M_pool->M_thread_count - 1);
  std::uint32_t startIndex = dist(M_rng);
  for (std::uint32_t i = 0; i < M_pool->M_thread_count; ++i) {
    std::uint32_t index = (startIndex + i) % M_pool->M_thread_count;
    if (index == M_index) {
      continue;
    }
    if (M_pool->M_thread_states[index]->notify()) {
      return;
    }
  }
}
inline auto thread_state::pop() -> thread_state::pop_result {
  pop_result result = try_pop();
  while (!result.task) {
    set_stealing();
    for (std::size_t i = 0; i < M_pool->M_max_steals; ++i) {
      result = try_steal_any();
      if (result.task) {
        clear_stealing();
        return result;
      }
    }
    std::this_thread::yield();
    clear_stealing();
    std::unique_lock lock{M_mut};
    if (M_stop_requested) {
      return result;
    }
    state expected = state::running;
    if (M_state.compare_exchange_weak(expected, state::sleeping,
                                      std::memory_order_relaxed)) {
      result = try_remote();
      if (result.task) {
        return result;
      }
      M_cv.wait(lock);
    }
    lock.unlock();
    M_state.store(state::running, std::memory_order_relaxed);
    result = try_pop();
  }
  return result;
}
inline auto thread_state::notify() -> bool {
  if (M_state.exchange(state::notified, std::memory_order_relaxed) ==
      state::sleeping) {
    {
      std::lock_guard lock{M_mut};
    }
    M_cv.notify_one();
    return true;
  }
  return false;
}
inline void thread_state::request_stop() {
  {
    std::lock_guard lock{M_mut};
    M_stop_requested = true;
  }
  M_cv.notify_one();
}
inline static_thread_pool::static_thread_pool()
    : static_thread_pool(std::thread::hardware_concurrency()) {}
inline static_thread_pool::static_thread_pool(std::uint32_t thread_count,
                                              bwos_params params)
    : M_remotes(thread_count),
      M_thread_count(thread_count),
      M_params(params),
      M_thread_states(thread_count) {
  assert(thread_count > 0);
  for (std::uint32_t index = 0; index < thread_count; ++index) {
    M_thread_states[index].emplace(this, index, params);
  }
  std::vector<workstealing_victim> victims{};
  for (auto& state : M_thread_states) {
    victims.emplace_back(state->as_victim());
  }
  for (auto& state : M_thread_states) {
    state->victims(victims);
  }
  M_threads.reserve(thread_count);
  try {
    for (std::uint32_t i = 0; i < thread_count; ++i) {
      M_threads.emplace_back([this, i] { run(i); });
    }
  } catch (...) {
    request_stop();
    join();
    throw;
  }
}
inline static_thread_pool::~static_thread_pool() {
  request_stop();
  join();
}
inline void static_thread_pool::request_stop() noexcept {
  for (auto& state : M_thread_states) {
    state->request_stop();
  }
}
inline void static_thread_pool::run(std::uint32_t thread_index) noexcept {
  assert(thread_index < M_thread_count);
  while (true) {
    auto [task, queue_index] = M_thread_states[thread_index]->pop();
    if (!task) {
      return;
    }
    task->execute(task);
  }
}
inline void static_thread_pool::join() noexcept {
  for (auto& t : M_threads) {
    t.join();
  }
  M_threads.clear();
}
inline void static_thread_pool::enqueue(task_base* task) noexcept {
  this->enqueue(*get_remote_queue(), task);
}
inline auto static_thread_pool::num_threads() const noexcept -> std::size_t {
  return M_thread_count;
}
inline void static_thread_pool::enqueue(remote_queue& queue,
                                        task_base* task) noexcept {
  static thread_local std::thread::id this_id = std::this_thread::get_id();
  remote_queue* correct_queue =
      this_id == queue.M_id ? &queue : get_remote_queue();
  std::size_t idx = correct_queue->M_index;
  if (idx < M_thread_states.size()) {
    M_thread_states[idx]->push_local(task);
    return;
  }
  const std::size_t thread_index =
      std::uint64_t(std::random_device{}()) % num_threads();
  queue.M_queues[thread_index].push_front(task);
  M_thread_states[thread_index]->notify();
}
inline void static_thread_pool::enqueue(remote_queue& queue, task_base* task,
                                        std::size_t thread_index) noexcept {
  thread_index %= M_thread_count;
  queue.M_queues[thread_index].push_front(task);
  M_thread_states[thread_index]->notify();
}
}  // namespace coasync::detail
#endif  // COASYNC_DETAIL_STATIC_THREAD_POOL_INCLUDED