#pragma once

#include <spdlog/spdlog.h>

#include <future>
#include <iterator>
#include <mutex>
#include <tuple>
#include <type_traits>
#include <vector>

static int global_id = 0;  // NOLINT

template <typename T>
struct is_future : std::false_type {};

template <typename T>
struct is_future<std::future<T>> : std::true_type {};

template <typename T>
struct is_future<std::shared_future<T>> : std::true_type {};

template <typename T>
struct remove_future {
  using type = T;
};

template <typename T>
struct remove_future<std::future<T>> {
  using type = T;
};

template <typename T>
struct remove_future<std::shared_future<T>> {
  using type = T;
};

template <typename T>
using remove_future_t = typename remove_future<T>::type;

template <typename T>
struct is_ptr : std::false_type {};

template <typename T>
struct is_ptr<std::shared_ptr<T>> : std::true_type {};

template <typename T>
struct remove_ptr {
  using type = T;
};

template <typename T>
struct remove_ptr<std::shared_ptr<T>> {
  using type = T;
};

template <typename T>
using remove_ptr_t = typename remove_ptr<T>::type;

constexpr double TMIN = 0.1;   // tmin = 0.1ms
constexpr double TMAX = 2;     // tmax = 2ms
constexpr double ALPHA = 0.8;  // T = (1 - ALPHA) * T_old + ALPHA * T_new
constexpr size_t INIT_CHUNK = 16;

constexpr auto VOID_FINALIZE =
    [](std::vector<bool> &&results) -> bool {  // NOLINT
  return true;
};

template <typename T>
constexpr auto MOVE_RESULTS =
    [](std::vector<T> &&results) { return std::move(results); };

class ResourceGroup;
class Scheduler;

class TaskSetBase {
 public:
  TaskSetBase() = default;
  TaskSetBase(TaskSetBase &&) = delete;
  TaskSetBase(const TaskSetBase &) = delete;
  TaskSetBase &operator=(TaskSetBase &&) = delete;
  TaskSetBase &operator=(const TaskSetBase &) = delete;

  virtual ~TaskSetBase() = default;

  virtual bool execute_task(size_t worker_id) = 0;
  virtual bool is_gpu() = 0;
  virtual int numa_hint() = 0;
  virtual void _set_context(ResourceGroup *rg, size_t id) = 0;
  virtual void _init_task_set(size_t worker_num) = 0;
};

class ResourceGroup {
 public:
  /**
   * @brief add task set, but not in parallel
   */
  void add_task_set(std::unique_ptr<TaskSetBase> task_set) {
    task_set->_set_context(this, global_id++);
    _task_sets.push_back(std::move(task_set));
  }

  void _next_task_set();

  void _set_context(Scheduler *scheduler, size_t slot_index);

  std::shared_ptr<TaskSetBase> _get_task_set() {
    size_t index = _current_task_set_index.load();
    if (index >= _task_sets.size()) {
      return nullptr;
    }

    return _task_sets[index];
  }

  std::future<bool> get_future() { return _finish.get_future(); }

  size_t _get_cpu_worker_num();

  void _collect_analyse(size_t worker_id,
                        std::tuple<size_t, double, double> info);

  void _broadcast_finish(bool is_gpu);

  ResourceGroup() = default;
  ResourceGroup(ResourceGroup &&) = delete;
  ResourceGroup(const ResourceGroup &) = delete;
  ResourceGroup &operator=(ResourceGroup &&) = delete;
  ResourceGroup &operator=(const ResourceGroup &) = delete;
  ~ResourceGroup() = default;

 private:
  // BUG: resource group is dead -> task_set is dead, use shared point
  std::vector<std::shared_ptr<TaskSetBase>> _task_sets;
  std::atomic<int> _current_task_set_index = 0;
  Scheduler *_scheduler = nullptr;
  size_t _slot_index = 0;
  std::promise<bool> _finish;
};

template <typename FutureInputType, typename Iter, typename ResultType,
          typename FinalResultType, typename TaskFunc, typename FinalizeFunc,
          typename... Args>
class TaskSet : public TaskSetBase {
 public:
  using TransformedArgs = std::tuple<remove_future_t<std::decay_t<Args>>...>;
  // used to judge if the func return void
  using TrueResultType =
      std::invoke_result_t<TaskFunc, Iter, Iter,
                           remove_future_t<std::decay_t<Args>>...>;
  enum Status {
    START,
    NORMAL,
    END,
  };
  TaskSet(Iter begin, Iter end, TaskFunc &&func, FinalizeFunc &&finalize,
          Args &&...args)
      : _begin(begin),
        _end(end),
        _current(begin),
        _operator(std::move(func)),
        _finalize(std::move(finalize)),
        _args(std::make_tuple(std::forward<Args>(args)...)) {}

  bool is_gpu() override { return false; }

  /**
   * @brief execute the next task chunk
   */
  bool execute_task(size_t worker_id) override {
    std::unique_lock<std::mutex> latch(_mutex);

    if (_status == Status::END) {
      return false;
    }

    // 1. get the chunk
    auto begin = _current;
    size_t left = std::distance(_current, _end);
    size_t count = 0;
    if (_chunk_size > left) {
      count = left;
      _current = _end;
      _status = Status::END;
      if (_resource_group != nullptr) {
        _resource_group->_broadcast_finish(false);
      }
    } else {
      count = _chunk_size;
      _current += _chunk_size;
    }

    auto end = _current;

    ++_active_threads;

    latch.unlock();

    // 2. run the task
    auto apply_args = [&](auto &&...args) {
      return _operator(begin, end, std::forward<decltype(args)>(args)...);
    };

    ResultType result;

    double start_time =
        std::chrono::duration_cast<std::chrono::duration<double, std::milli>>(
            std::chrono::high_resolution_clock::now().time_since_epoch())
            .count();
    // start run
    if constexpr (std::is_void_v<TrueResultType>) {
      std::apply(apply_args, _transformed_args);
      result = true;
    } else {
      result = std::apply(apply_args, _transformed_args);
    }
    // end run
    double end_time =
        std::chrono::duration_cast<std::chrono::duration<double, std::milli>>(
            std::chrono::high_resolution_clock::now().time_since_epoch())
            .count();
    double milliseconds = end_time - start_time;

    latch.lock();
    if (_resource_group != nullptr) {
      _resource_group->_collect_analyse(worker_id, {_id, start_time, end_time});
    }

    // update metadata
    if constexpr (!std::is_void_v<TrueResultType>) {
      _results.push_back(std::move(result));
    }
    --_active_threads;

    if (_status == Status::START) {
      if (2 * milliseconds > _tmax) {
        _status = Status::NORMAL;
        _throughput = (double)count / milliseconds;
      } else {
        _chunk_size = std::max(_chunk_size, count * 2);
        _tmax -= milliseconds;
      }
    } else if (_status == Status::NORMAL) {
      _throughput =
          (1 - ALPHA) * _throughput + ALPHA * (double)count / milliseconds;

      double t_expect = 0;
      double t_close =
          std::distance(_current, _end) / _throughput / _worker_num;

      t_expect = t_close < TMAX ? std::max(t_close, TMIN) : TMAX;
      _chunk_size = static_cast<size_t>(std::ceil(_throughput * t_expect));
    }

    spdlog::debug(
        "TaskSet: Execution time: {:.2f}ms, Throughput: {:.2f} items/ms, "
        "New chunk size: {}",
        milliseconds, _throughput, _chunk_size);

    // 3. finalize
    if (_status == Status::END) {
      if (_active_threads == 0) {
        latch.unlock();
        _next(worker_id);
      }
    }

    return true;
  }

  std::future<FinalResultType> get_future() { return _promise.get_future(); }

  void _set_future(FutureInputType &&future) {
    _input_future = std::move(future);
  }

  void _init_task_set(size_t n) override {
    _worker_num = n;
    if constexpr (is_ptr<FutureInputType>::value) {  //  is a ptr type
      _begin = _input_future->begin();
      _end = _input_future->end();
    } else {  // is a future type
      if (_input_future.valid()) {
        _input = _input_future.get();

        if constexpr (is_ptr<remove_future_t<FutureInputType>>::
                          value) {  // is a future
          _begin = _input->begin();
          _end = _input->end();
        } else {
          _begin = _input.begin();
          _end = _input.end();
        }
      }
    }

    _current = _begin;

    _resolve_args();
  }

  void _set_context(ResourceGroup *rg, size_t id) override {
    _resource_group = rg;
    _id = id;
  }

  int numa_hint() override { return _numa_hint; };

  void set_numa_hint(int numa_hint) { _numa_hint = numa_hint; }

  TaskSet(TaskSet &&) = delete;
  TaskSet(const TaskSet &) = delete;
  TaskSet &operator=(TaskSet &&) = delete;
  TaskSet &operator=(const TaskSet &) = delete;

  ~TaskSet() override = default;

 private:
  Status _status = Status::START;
  size_t _chunk_size = INIT_CHUNK;  // NOLINT
  Iter _begin = nullptr, _end = nullptr, _current = nullptr;

  TaskFunc _operator;
  FinalizeFunc _finalize;
  std::tuple<Args...> _args;
  TransformedArgs _transformed_args;
  size_t _id = 0;

  template <typename T>
  auto _transform_arg(T &&arg) const {
    if constexpr (is_future<std::decay_t<T>>::value) {
      return arg.get();
    } else if constexpr (std::is_same_v<std::decay_t<T>,
                                        std::pair<Iter, Iter>>) {
      return std::pair<Iter, Iter>{_begin, _end};
    } else {
      return std::forward<T>(arg);
    }
  }

  void _resolve_args() {
    _transformed_args = std::apply(
        [this](auto &&...args) {
          return std::make_tuple(
              _transform_arg(std::forward<decltype(args)>(args))...);
        },
        _args);
  }

  std::vector<ResultType> _results;

  double _tmax = TMAX;
  double _throughput = 0;  // count / ms
  size_t _active_threads = 0;

  std::mutex _mutex;

  ResourceGroup *_resource_group = nullptr;

  size_t _worker_num = 0;
  int _numa_hint = -1;

  // used to get final result
  std::promise<FinalResultType> _promise;
  [[maybe_unused]] FutureInputType _input_future;
  [[maybe_unused]] remove_future_t<FutureInputType> _input;

  // next shouble be invoke only once
  void _next(size_t worker_id) {
    // start run
    double start_time =
        std::chrono::duration_cast<std::chrono::duration<double, std::milli>>(
            std::chrono::high_resolution_clock::now().time_since_epoch())
            .count();
    FinalResultType final_result = _finalize(std::move(_results));
    // end run
    double end_time =
        std::chrono::duration_cast<std::chrono::duration<double, std::milli>>(
            std::chrono::high_resolution_clock::now().time_since_epoch())
            .count();
    if (_resource_group != nullptr) {
      _resource_group->_collect_analyse(worker_id, {_id, start_time, end_time});
    }

    spdlog::debug("next");
    _promise.set_value(std::move(final_result));
    // next task set
    if (_resource_group != nullptr) {
      _resource_group->_next_task_set();
    }
  }
};

template <typename ResultType, typename TaskFunc, typename... Args>
class GpuTaskSet : public TaskSetBase {
 public:
  using TransformedArgs = std::tuple<remove_future_t<std::decay_t<Args>>...>;
  // used to judge if the func return void
  using TrueResultType =
      std::invoke_result_t<TaskFunc, remove_future_t<std::decay_t<Args>>...>;

  explicit GpuTaskSet(TaskFunc &&func, Args &&...args)
      : _operator(std::move(func)),
        _args(std::make_tuple(std::forward<Args>(args)...)) {}

  bool is_gpu() override { return true; }

  /**
   * @brief execute the gpu task once
   */
  bool execute_task(size_t worker_id) override {
    if (_running.exchange(true)) {
      return false;
    }

    if (_resource_group != nullptr) {
      _resource_group->_broadcast_finish(true);
    }

    // run the task
    auto apply_args = [&](auto &&...args) {
      return _operator(std::forward<decltype(args)>(args)...);
    };

    ResultType result;

    double start_time =
        std::chrono::duration_cast<std::chrono::duration<double, std::milli>>(
            std::chrono::high_resolution_clock::now().time_since_epoch())
            .count();
    // start run
    if constexpr (std::is_void_v<TrueResultType>) {
      std::apply(apply_args, _transformed_args);
      result = true;
    } else {
      result = std::apply(apply_args, _transformed_args);
    }
    // end run
    double end_time =
        std::chrono::duration_cast<std::chrono::duration<double, std::milli>>(
            std::chrono::high_resolution_clock::now().time_since_epoch())
            .count();
    double milliseconds = end_time - start_time;

    if (_resource_group != nullptr) {
      _resource_group->_collect_analyse(worker_id, {_id, start_time, end_time});
    }

    _promise.set_value(std::move(result));

    // next task set
    if (_resource_group != nullptr) {
      _resource_group->_next_task_set();
    }

    return true;
  }

  std::future<ResultType> get_future() { return _promise.get_future(); }

  int numa_hint() override { return -1; };

  void _init_task_set(size_t /**/) override { _resolve_args(); }

  void _set_context(ResourceGroup *rg, size_t id) override {
    _resource_group = rg;
    _id = id;
  }

  GpuTaskSet(GpuTaskSet &&) = delete;
  GpuTaskSet(const GpuTaskSet &) = delete;
  GpuTaskSet &operator=(GpuTaskSet &&) = delete;
  GpuTaskSet &operator=(const GpuTaskSet &) = delete;

  ~GpuTaskSet() override = default;

 private:
  std::atomic<bool> _running = false;
  TaskFunc _operator;

  std::tuple<Args...> _args;
  TransformedArgs _transformed_args;
  size_t _id = 0;

  template <typename T>
  auto _transform_arg(T &&arg) const {
    if constexpr (is_future<std::decay_t<T>>::value) {
      return arg.get();
    } else {
      return std::forward<T>(arg);
    }
  }

  void _resolve_args() {
    _transformed_args = std::apply(
        [this](auto &&...args) {
          return std::make_tuple(
              _transform_arg(std::forward<decltype(args)>(args))...);
        },
        _args);
  }

  std::promise<ResultType> _promise;
  ResourceGroup *_resource_group = nullptr;
};

/**
 * @brief Helper function to create a task set with type deduction
 *
 * @tparam Iter - Iterator type
 * @tparam TaskFunc - Type of the task function
 * @tparam FinalizeFunc - Type of the finalize function
 * @tparam Args - Types of additional arguments
 * @param begin - Start of the range
 * @param end - End of the range
 * @param func - Task function
 * @param finalize - Finalize function
 * @param args - Additional arguments
 * @return A new task set
 */
template <typename Iter, typename TaskFunc, typename FinalizeFunc,
          typename... Args>
auto make_task_set(Iter begin, Iter end, TaskFunc func, FinalizeFunc finalize,
                   Args... args) {
  using InputType = typename std::iterator_traits<Iter>::value_type;
  using ResultType =
      std::invoke_result_t<TaskFunc, Iter, Iter,
                           remove_future_t<std::decay_t<Args>>...>;

  if constexpr (std::is_void_v<ResultType>) {
    using ResultType = bool;
    using FinalResultType =
        std::invoke_result_t<FinalizeFunc, std::vector<ResultType>>;

    auto task_set = std::make_unique<
        TaskSet<std::future<std::vector<InputType>>, Iter, ResultType,
                FinalResultType, TaskFunc, FinalizeFunc, Args...>>(
        begin, end, std::move(func), std::move(finalize),
        std::forward<Args>(args)...);
    return task_set;
  } else {
    using FinalResultType =
        std::invoke_result_t<FinalizeFunc, std::vector<ResultType>>;

    // the first template type is useless
    auto task_set = std::make_unique<
        TaskSet<std::future<std::vector<InputType>>, Iter, ResultType,
                FinalResultType, TaskFunc, FinalizeFunc, Args...>>(
        begin, end, std::move(func), std::move(finalize),
        std::forward<Args>(args)...);
    return task_set;
  }
}

template <typename FutureInputType, typename TaskFunc, typename FinalizeFunc,
          typename... Args>
auto make_task_set(FutureInputType input, TaskFunc func, FinalizeFunc finalize,
                   Args... args) {
  using InputType = remove_ptr_t<remove_future_t<FutureInputType>>;
  using Iter = InputType::iterator;
  using ResultType =
      std::invoke_result_t<TaskFunc, Iter, Iter,
                           remove_future_t<std::decay_t<Args>>...>;

  if constexpr (std::is_void_v<ResultType>) {
    using ResultType = bool;
    using FinalResultType =
        std::invoke_result_t<FinalizeFunc, std::vector<ResultType>>;

    auto task_set = std::make_unique<
        TaskSet<FutureInputType, Iter, ResultType, FinalResultType, TaskFunc,
                FinalizeFunc, Args...>>((Iter) nullptr, (Iter) nullptr,
                                        std::move(func), std::move(finalize),
                                        std::forward<Args>(args)...);
    task_set->_set_future(std::move(input));
    return task_set;
  } else {
    using FinalResultType =
        std::invoke_result_t<FinalizeFunc, std::vector<ResultType>>;

    // the first template type is useless
    auto task_set = std::make_unique<
        TaskSet<FutureInputType, Iter, ResultType, FinalResultType, TaskFunc,
                FinalizeFunc, Args...>>((Iter) nullptr, (Iter) nullptr,
                                        std::move(func), std::move(finalize),
                                        std::forward<Args>(args)...);
    task_set->_set_future(std::move(input));
    return task_set;
  }
}

template <typename TaskFunc, typename... Args>
auto make_gpu_task_set(TaskFunc func, Args... args) {
  using ResultType =
      std::invoke_result_t<TaskFunc, remove_future_t<std::decay_t<Args>>...>;

  if constexpr (std::is_void_v<ResultType>) {
    using ResultType = bool;

    auto task_set = std::make_unique<GpuTaskSet<ResultType, TaskFunc, Args...>>(
        std::move(func), std::forward<Args>(args)...);
    return task_set;
  } else {
    auto task_set = std::make_unique<GpuTaskSet<ResultType, TaskFunc, Args...>>(
        std::move(func), std::forward<Args>(args)...);
    return task_set;
  }
}
