// Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#ifndef BACKENDS_INTEL_HPU_KERNELS_HPU_OPERATOR_H_  // NOLINT
#define BACKENDS_INTEL_HPU_KERNELS_HPU_OPERATOR_H_  // NOLINT

#include <assert.h>

#include <condition_variable>
#include <functional>
#include <future>
#include <memory>
#include <mutex>
#include <queue>
#include <thread>

#include "glog/logging.h"
#include "habanalabs/synapse_api.h"
#include "habanalabs/synapse_common_types.h"
#include "kernels/funcs.h"
#include "paddle/phi/backends/device_ext.h"
#include "paddle/phi/common/type_traits.h"
#include "paddle/phi/extension.h"

#define ENABLE_ASYNC_RUN

class HpuOperator {
 public:
  explicit HpuOperator(const std::string guid, bool is_eager = true)
      : guid_(guid), is_eager_(is_eager) {
    if (is_eager_) {
      synStatus status = synGraphCreateEager(&graphHandle_, synDeviceGaudi2);
      PD_CHECK(status == synSuccess,
               "synGraphCreateEager() ",
               guid_,
               " failed = ",
               status);
    } else {
      synStatus status = synGraphCreate(&graphHandle_, synDeviceGaudi2);
      PD_CHECK(status == synSuccess,
               "synGraphCreate() ",
               guid_,
               " failed = ",
               status);
    }
  }

  void Compile();
  virtual ~HpuOperator() {}
  synSectionHandle createSection();
  synTensor createTensor(unsigned dims,
                         synDataType data_type,
                         DIMS tensor_size,
                         bool is_presist,
                         std::string name,
                         synSectionHandle section = nullptr);

 public:
  synRecipeHandle GetRecipe() { return recipeHandle_; }

 protected:
  std::string guid_;
  synGraphHandle graphHandle_;
  synRecipeHandle recipeHandle_;
  std::vector<synSectionHandle> sectons_;
  bool is_eager_;

  std::map<std::string, synTensor> tensors_;
};

#ifdef ENABLE_ASYNC_RUN
class GlobalWorkStreamExecutor {
 public:
  static GlobalWorkStreamExecutor& instance() {
    static GlobalWorkStreamExecutor executor;
    return executor;
  }

  template <typename R>
  std::future<R> async(synStreamHandle stream, std::function<R()> func) {
    auto task = std::make_shared<std::packaged_task<R()>>(std::move(func));
    std::future<R> res = task->get_future();
    add_task(stream, [task]() { (*task)(); });
    return res;
  }

  template <typename F>
  auto async(synStreamHandle stream, F&& func)
      -> std::future<decltype(func())> {
    using R = decltype(func());
    auto task =
        std::make_shared<std::packaged_task<R()>>(std::forward<F>(func));
    std::future<R> res = task->get_future();
    add_task(stream, [task]() { (*task)(); });
    return res;
  }

  template <typename R>
  R sync(synStreamHandle stream, std::function<R()> func) {
    return async(stream, std::move(func)).get();
  }

  template <typename F>
  auto sync(synStreamHandle stream, F&& func) -> decltype(func()) {
    return async(stream, std::forward<F>(func)).get();
  }

 private:
  struct WorkerThread {
    std::thread thread;
    std::queue<std::function<void()>> tasks;
    std::mutex mutex;
    std::condition_variable condition;
    bool stop = false;
  };

  GlobalWorkStreamExecutor() = default;
  ~GlobalWorkStreamExecutor() {
    for (auto& [stream, worker] : workers_) {
      {
        std::lock_guard<std::mutex> lock(worker->mutex);
        worker->stop = true;
      }
      worker->condition.notify_all();
      if (worker->thread.joinable()) {
        worker->thread.join();
      }
    }
  }

  void add_task(const synStreamHandle stream, std::function<void()> task);

  GlobalWorkStreamExecutor(const GlobalWorkStreamExecutor&) = delete;
  GlobalWorkStreamExecutor& operator=(const GlobalWorkStreamExecutor&) = delete;

  std::unordered_map<synStreamHandle, std::shared_ptr<WorkerThread>> workers_;
  std::mutex workers_mutex_;
};
#endif

class RecipeRunner {
 public:
  explicit RecipeRunner(synRecipeHandle h) : recipeHandle_(h) {}
  ~RecipeRunner() {}

  void prepareTensorInfo(synRecipeHandle recipe,
                         synLaunchTensorInfo* tensorInfo,
                         uint32_t totalNumOfTensors);
#ifdef ENABLE_ASYNC_RUN
  void Run(C_Stream stream, std::map<std::string, uint64_t> tensors) {
    synRecipeHandle recipehandle = this->recipeHandle_;
    auto future = GlobalWorkStreamExecutor::instance().async(
        reinterpret_cast<synStreamHandle>(stream),
        [this, stream, tensors, recipehandle] {
          ExecuteRecipe(stream, tensors, recipehandle);
        });
  }
#else
  void Run(C_Stream stream, const std::map<std::string, uint64_t>& tensors);
#endif

 protected:
#ifdef ENABLE_ASYNC_RUN
  void ExecuteRecipe(C_Stream stream,
                     const std::map<std::string, uint64_t>& tensors,
                     synRecipeHandle recipeHandle_);
#endif

  synRecipeHandle recipeHandle_;

 private:
  C_Status MallocDeviceMem(uint64_t* buffer, const uint64_t size);
  C_Status FreeDeviceMem(const uint64_t buffer, const uint64_t size);
};

#endif  // BACKENDS_INTEL_HPU_KERNELS_HPU_OPERATOR_H_ // NOLINT
