#include "worker.hpp"

#include <numa.h>

#include <iostream>
#include <numeric>

#include "cuda_utils.hpp"
#include "scheduler.hpp"

Worker::Worker(size_t id, Scheduler* scheduler, bool is_gpu)
    : _id(id),
      _numa_id(numa_node_of_cpu(int(id))),
      _scheduler(scheduler),
      _running(false),
      _is_gpu(is_gpu) {
  // init params
  _passes.resize(MAX_RESOURCE_GROUPS, 0.0);
  _priorities.resize(MAX_RESOURCE_GROUPS,
                     DEFUALT_PRIORITIES);  // TODO: dynamic priorities
}

void Worker::_run() {
  // bind thread
  cpu_set_t cpuset;
  CPU_ZERO(&cpuset);
  CPU_SET(_id, &cpuset);
  if (sched_setaffinity(0, sizeof(cpuset), &cpuset) == -1) {
    std::cerr << "Failed to set thread affinity to CPU " << _id;
  }

  // bind memory
  numa_set_localalloc();

  _loop();
}

void Worker::_run_on_gpu() {
  set_cuda_device((int)_id);
  _loop();
}

void Worker::_loop() {
  // protect _active_slots, _passes, priorities
  std::unique_lock<std::mutex> lock(_mutex);
  while (_running) {
    if (_active_slots != 0) {
      size_t selected_slot = _select_slot();

      lock.unlock();  // no need to protect

      // get resource group, maybe dead lock with scheduler if lock _mutex
      auto rg = _scheduler->get_resource_group(selected_slot);
      if (rg != nullptr) {
        // get task set
        auto task_set = rg->_get_task_set();
        // execute task set
        if (task_set != nullptr) {
          spdlog::debug("worker[{}] execute task set on {}", _id,
                        selected_slot);
          bool success = task_set->execute_task(_id);

          lock.lock();
          if (success && _priorities[selected_slot] != 0) {
            // update passes and global pass if execute successfully
            _passes[selected_slot] += 1.0 / _priorities[selected_slot];

            _global_stride = std::accumulate(_priorities.begin(),  // NOLINT
                                             _priorities.end(), 0.0);
            _global_pass += 1.0 / _global_stride;
          }
          continue;
        }
      }

      lock.lock();
      continue;
    }

    spdlog::debug("worker[{}] into wait", _id);
    _cv.wait(lock, [this] { return !_running || _active_slots != 0; });
  }
}

size_t Worker::_select_slot() {
  // find min pass
  double min_pass = std::numeric_limits<double>::max();
  size_t selected_slot = MAX_RESOURCE_GROUPS;

  for (size_t i = 0; i < MAX_RESOURCE_GROUPS; ++i) {
    if ((_active_slots & (1ULL << i)) != 0U) {
      if (_passes[i] < min_pass) {
        min_pass = _passes[i];
        selected_slot = i;
      }
    }
  }

  return selected_slot;
}

////////////////////////////////////////////////////////////////////
//                        DEPRECATED BELOW                        //
////////////////////////////////////////////////////////////////////

OldWorker::OldWorker(size_t worker_id, bool gpu_worker)
    : _worker_id(worker_id), _gpu_worker(gpu_worker) {
  _thread = _gpu_worker ? std::thread(&OldWorker::_run_on_gpu, this)
                        : std::thread(&OldWorker::_run, this);
}

OldWorker::~OldWorker() {
  stop();
  _thread.join();
}

void OldWorker::stop() {
  std::unique_lock<std::mutex> lock(_queue_mutex);
  _stop = true;
  _condition.notify_all();
}

void OldWorker::_loop() {
  while (!_stop) {  // NOLINT
    std::function<void()> task;

    {
      std::unique_lock<std::mutex> lock(_queue_mutex);
      _condition.wait(lock, [this] { return _stop || !_tasks.empty(); });

      if (_stop) {
        return;
      }

      task = std::move(_tasks.front());
      _tasks.pop();
    }

    task();
  }
}

void OldWorker::_run() {
  // bind thread
  cpu_set_t cpuset;
  CPU_ZERO(&cpuset);
  CPU_SET(_worker_id, &cpuset);
  if (sched_setaffinity(0, sizeof(cpuset), &cpuset) == -1) {
    std::cerr << "Failed to set thread affinity to CPU " << _worker_id;
  }

  // bind memory
  numa_set_localalloc();

  _loop();
}

void OldWorker::_run_on_gpu() {
  set_cuda_device((int)_worker_id);

  _loop();
}
