#pragma once

#include <spdlog/spdlog.h>

#include <atomic>
#include <condition_variable>
#include <cstdint>
#include <functional>
#include <future>
#include <limits>
#include <mutex>
#include <queue>
#include <thread>

#include "task_set.hpp"

constexpr double DEFUALT_PRIORITIES = 1;

class Scheduler;

class Worker {
 public:
  Worker(size_t id, Scheduler *scheduler, bool is_gpu = false);

  Worker(Worker &&) = delete;
  Worker(const Worker &) = delete;
  Worker &operator=(Worker &&) = delete;
  Worker &operator=(const Worker &) = delete;
  ~Worker() { stop(); }

  void start() {
    if (!_running.exchange(true)) {
      _thread = _is_gpu ? std::thread(&Worker::_run_on_gpu, this)
                        : std::thread(&Worker::_run, this);
    }
  }

  void stop() {
    if (_running.exchange(false)) {
      _cv.notify_one();
      _thread.join();
    }
  }

  void init_value(uint64_t slot_index) {
    std::unique_lock<std::mutex> lock(_mutex);
    _passes[slot_index] = _global_pass;
    _priorities[slot_index] = DEFUALT_PRIORITIES;
  }

  void notify_active(uint64_t slot_index) {
    std::unique_lock<std::mutex> lock(_mutex);
    spdlog::debug("worker[{}] receive active slot on {}", _id, slot_index);
    _active_slots |= (1ULL << slot_index);
    _cv.notify_one();
  }

  void notify_finish(uint64_t slot_index) {
    std::unique_lock<std::mutex> lock(_mutex);
    spdlog::debug("worker[{}] receive finish on {}", _id, slot_index);
    _active_slots &= ~(1ULL << slot_index);
  }

  void adjust_priority(uint64_t slot_index, size_t numa_hint) {  // NOLINT
    if (numa_hint != _numa_id) {
      std::unique_lock<std::mutex> lock(_mutex);
      _priorities[slot_index] = 0;
      _passes[slot_index] = std::numeric_limits<double>::max() - 1e293; // NOLINT
    } else {
      _priorities[slot_index] += 1;
    }
  }

 private:
  void _run();
  void _run_on_gpu();
  void _loop();
  size_t _select_slot();

  size_t _id;
  size_t _numa_id;
  bool _is_gpu = false;
  Scheduler *_scheduler;

  std::atomic<bool> _running;
  std::thread _thread;

  std::mutex _mutex;
  std::condition_variable _cv;

  double _global_pass = 0.0;
  double _global_stride = 0.0;
  std::vector<double> _passes;
  std::vector<double> _priorities;

  uint64_t _active_slots = 0;
};

////////////////////////////////////////////////////////////////////
//                        DEPRECATED BELOW                        //
////////////////////////////////////////////////////////////////////

/**
 * @class Worker
 * @brief One worker bound to one cpu core
 * with one loop thread fetching local tasks
 */
class OldWorker {
 public:
  explicit OldWorker(size_t worker_id, bool gpu_worker = false);

  OldWorker(OldWorker &&) = delete;
  OldWorker(const OldWorker &) = delete;
  OldWorker &operator=(OldWorker &&) = delete;
  OldWorker &operator=(const OldWorker &) = delete;
  ~OldWorker();

  template <typename F, typename... Args>
  auto submit_task(F &&func, Args &&...args)
      -> std::future<std::invoke_result_t<F, Args...>>;

  /**
   * @brief used to delete worker instance in deconsturctor
   */
  void stop();

 private:
  void _loop();
  void _run();
  void _run_on_gpu();

  bool _gpu_worker;

  /**
   * current worker's thread object
   */
  std::thread _thread;
  size_t _worker_id;

  /**
   * every worker has its own task queue
   */
  std::queue<std::function<void()>> _tasks;
  std::mutex _queue_mutex;
  std::condition_variable _condition;

  std::atomic<bool> _stop{false};
};

template <typename F, typename... Args>
auto OldWorker::submit_task(F &&func, Args &&...args)
    -> std::future<std::invoke_result_t<F, Args...>> {
  using return_type = std::invoke_result_t<F, Args...>;

  auto task = std::make_shared<std::packaged_task<return_type()>>(
      std::bind(std::forward<F>(func), std::forward<Args>(args)...));

  // submit to queue
  {
    std::unique_lock<std::mutex> lock(_queue_mutex);
    _tasks.emplace([task]() { (*task)(); });
  }

  // wake up worker
  _condition.notify_one();
  return task->get_future();
}
