#include "scheduler.hpp"

#include "cuda_utils.hpp"

Scheduler::Scheduler(size_t cpu_workers, size_t gpu_workers) {
  global_id = 0;

  int max_gpu = 0;
  size_t max_cpu = std::thread::hardware_concurrency();
  get_device_count(&max_gpu);
  max_gpu = (max_gpu == 0) ? 1 : max_gpu;

  _cpu_workers.reserve(cpu_workers);
  for (size_t i = 0; i < cpu_workers; ++i) {
    _cpu_workers.emplace_back(std::make_unique<Worker>(i % max_cpu, this));
  }

  _gpu_workers.reserve(gpu_workers);
  for (size_t i = 0; i < gpu_workers; ++i) {
    _gpu_workers.emplace_back(
        std::make_unique<Worker>(i % max_gpu, this, true));
  }

  _analyse.resize(cpu_workers + gpu_workers);
}

size_t Scheduler::submit_resource_group(
    std::unique_ptr<ResourceGroup> resource_group) {
  std::unique_lock<std::mutex> lock(_mutex);

  // find free slot
  size_t slot_index = 0;
  while (slot_index < MAX_RESOURCE_GROUPS &&
         _global_slots[slot_index] != nullptr) {
    ++slot_index;
  }

  if (slot_index >= MAX_RESOURCE_GROUPS) {
    _wait_queue.push(std::move(resource_group));
    return -1;
  }

  resource_group->_set_context(this, slot_index);

  bool gpu = resource_group->_get_task_set()->is_gpu();
  int numa_hint = resource_group->_get_task_set()->numa_hint();

  _global_slots[slot_index] = std::move(resource_group);

  _global_start =
      std::chrono::duration_cast<std::chrono::duration<double, std::milli>>(
          std::chrono::high_resolution_clock::now().time_since_epoch())
          .count();

  // broadcast to workers
  _broadcast_init(slot_index, numa_hint);
  _broadcast_active(slot_index, gpu);

  return slot_index;
}

void Scheduler::reset_resource_group(size_t slot_index) {
  std::unique_lock<std::mutex> lock(_mutex);
  spdlog::debug("reset slot index {}", slot_index);
  if (!_wait_queue.empty()) {
    bool gpu = _wait_queue.front()->_get_task_set()->is_gpu();
    int numa_hint = _wait_queue.front()->_get_task_set()->numa_hint();
    _broadcast_init(slot_index, numa_hint);
    _broadcast_active(slot_index,
                      gpu);  // broadcast first to avoid scheduling in advance
    _wait_queue.front()->_set_context(this, slot_index);
    _global_slots[slot_index] = std::move(_wait_queue.front());
    _wait_queue.pop();
  } else {
    _global_slots[slot_index] = nullptr;
  }
}

std::shared_ptr<ResourceGroup> Scheduler::get_resource_group(
    size_t slot_index) {
  std::unique_lock<std::mutex> lock(_mutex);
  if (slot_index < MAX_RESOURCE_GROUPS) {
    return _global_slots[slot_index];
  }
  return nullptr;
}

////////////////////////////////////////////////////////////////////
//                        DEPRECATED BELOW                        //
////////////////////////////////////////////////////////////////////

OldScheduler::OldScheduler(size_t cpu_workers, size_t gpu_workers)
    : _cpu_workers(cpu_workers), _gpu_workers(gpu_workers) {
  get_device_count(&_max_gpu_device);
  _max_gpu_device = (_max_gpu_device == 0) ? 1 : _max_gpu_device;

  _workers.reserve(cpu_workers + gpu_workers);
  for (size_t i = 0; i < cpu_workers; ++i) {  // NOLINT
    _workers.emplace_back(std::make_unique<OldWorker>(i % _max_cpu_core));
  }

  for (size_t i = 0; i < gpu_workers; ++i) {  // NOLINT
    _workers.emplace_back(
        std::make_unique<OldWorker>(i % _max_gpu_device, true));
  }
}
