#pragma once

#include <array>
#include <cassert>
#include <fstream>
#include <iostream>
#include <memory>
#include <mutex>
#include <vector>

#include "task_set.hpp"
#include "worker.hpp"

constexpr size_t MAX_RESOURCE_GROUPS = 64;

class Scheduler {
 public:
  explicit Scheduler(size_t cpu_workers = std::thread::hardware_concurrency(),
                     size_t gpu_workers = 0);

  Scheduler(Scheduler&&) = delete;
  Scheduler(const Scheduler&) = delete;
  Scheduler& operator=(Scheduler&&) = delete;
  Scheduler& operator=(const Scheduler&) = delete;

  ~Scheduler() { stop(); }

  // start all workers
  void start() {
    for (auto& worker : _cpu_workers) {
      worker->start();
    }
    for (auto& worker : _gpu_workers) {
      worker->start();
    }
  }

  // stop all workers
  void stop() {
    for (auto& worker : _cpu_workers) {
      worker->stop();
    }
    for (auto& worker : _gpu_workers) {
      worker->stop();
    }
  }

  // to collect analyse
  void collect_analyse(size_t worker_id,
                       std::tuple<size_t, double, double> info) {
    _analyse[worker_id].push_back(info);
  }

  void export_analyse(const std::string& filename) {
    std::ofstream outFile(filename, std::ios::binary);

    if (!outFile.is_open()) {
      std::cerr << "Failed to open file: " << filename << '\n';
      return;
    }

    size_t numThreads = _analyse.size();
    outFile.write(reinterpret_cast<const char*>(&numThreads),
                  sizeof(numThreads));

    for (size_t i = 0; i < numThreads; ++i) {
      size_t numTasks = _analyse[i].size();
      outFile.write(reinterpret_cast<const char*>(&numTasks), sizeof(numTasks));

      for (const auto& task : _analyse[i]) {
        size_t taskId = std::get<0>(task);
        double startTime = std::get<1>(task) - _global_start;
        double endTime = std::get<2>(task) - _global_start;

        outFile.write(reinterpret_cast<const char*>(&taskId), sizeof(taskId));
        outFile.write(reinterpret_cast<const char*>(&startTime),
                      sizeof(startTime));
        outFile.write(reinterpret_cast<const char*>(&endTime), sizeof(endTime));
      }
    }

    outFile.close();
    std::cout << "Data exported to " << filename << " successfully." << '\n';
  }

  size_t get_cpu_worker_num() { return _cpu_workers.size(); }

  size_t get_gpu_worker_num() { return _gpu_workers.size(); }

  size_t submit_resource_group(std::unique_ptr<ResourceGroup> resource_group);

  void reset_resource_group(size_t slot_index);

  std::shared_ptr<ResourceGroup> get_resource_group(size_t slot_index);

  void _broadcast_finish(size_t slot_index, bool gpu = false) {
    if (gpu) {
      for (auto& worker : _gpu_workers) {
        worker->notify_finish(slot_index);
      }
    } else {
      for (auto& worker : _cpu_workers) {
        worker->notify_finish(slot_index);
      }
    }
  }

  void _broadcast_active(size_t slot_index, bool gpu = false) {
    if (gpu) {
      for (auto& worker : _gpu_workers) {
        worker->notify_active(slot_index);
      }
    } else {
      for (auto& worker : _cpu_workers) {
        worker->notify_active(slot_index);
      }
    }
  }

  void _broadcast_init(size_t slot_index, int numa_hint) {  // NOLINT
    for (auto& worker : _gpu_workers) {
      worker->init_value(slot_index);
    }
    for (auto& worker : _cpu_workers) {
      worker->init_value(slot_index);
    }
    if (numa_hint >= 0) {
      for (auto& worker : _cpu_workers) {
        worker->adjust_priority(slot_index, numa_hint);
      }
    }
  }

 private:
  std::vector<std::unique_ptr<Worker>> _cpu_workers;
  std::vector<std::unique_ptr<Worker>> _gpu_workers;

  std::array<std::shared_ptr<ResourceGroup>, MAX_RESOURCE_GROUPS> _global_slots;
  std::queue<std::unique_ptr<ResourceGroup>> _wait_queue;

  std::vector<std::vector<std::tuple<size_t, double, double>>> _analyse;
  double _global_start = 0;

  std::mutex _mutex;
};

////////////////////////////////////////////////////////////////////
//                        DEPRECATED BELOW                        //
////////////////////////////////////////////////////////////////////

/**
 * @class Scheduler
 * @brief contain n workers
 *
 */
class OldScheduler {
 public:
  explicit OldScheduler(
      size_t cpu_workers = std::thread::hardware_concurrency(),
      size_t gpu_workers = 0);

  /**
   * @brief submit task to a specific worker
   *
   * @param worker_id [0, cpu_workers) is cpu thread while [cpu_workers,
   * cpu_workers + gpu_workers) is gpu thread
   */
  template <typename F, typename... Args>
  auto submit_task(size_t worker_id, F&& func, Args&&... args)
      -> std::future<std::invoke_result_t<F, Args...>>;

  [[nodiscard]] auto cpu_worker_num() const -> size_t { return _cpu_workers; }

  [[nodiscard]] auto gpu_worker_num() const -> size_t { return _gpu_workers; }

 private:
  size_t _max_cpu_core = std::thread::hardware_concurrency();
  int _max_gpu_device = 0;

  size_t _cpu_workers;
  size_t _gpu_workers;
  std::vector<std::unique_ptr<OldWorker>> _workers;
};

template <typename F, typename... Args>
auto OldScheduler::submit_task(size_t worker_id, F&& func, Args&&... args)
    -> std::future<std::invoke_result_t<F, Args...>> {
  assert(worker_id < _workers.size() && "Invalid worker ID");
  return _workers[worker_id]->submit_task(std::forward<F>(func),
                                          std::forward<Args>(args)...);
}
