#include "nvidia/trt_infer.h"

#include <NvOnnxParser.h>

#include <cstring>
#include <filesystem>
#include <fstream>
#include <numeric>

namespace autodrive {
namespace perception {

TrtInfer::TrtInfer() : is_build_{false} {}
TrtInfer::~TrtInfer() {}

void TrtInfer::Build(TrtInferSettings const& settings) noexcept(false) {
  settings_ = settings;
  auto buildFunc{[this, &settings]() {
    nvinferLogInfo << "TensorRT version: " << getInferLibVersion() << std::endl;
    // set cuda device
    set_cuda_device(settings.use_cuda_device, nvinferLogInfo);

    // build&&deserialize model
    nvinferLogInfo << "Begin build network, engine......" << std::endl;
    auto nvinferPair{build_cuda_engine_retry(settings)};
    nvinfer_runtime_ptr_ = nvinferPair.first;
    nvinfer_engine_ptr_ = nvinferPair.second;

    // enum io tensors
    nvinferLogInfo << settings.model_file << ", enumerate io tensors......"
                   << std::endl;
    enumerate_io_tensors(nvinfer_engine_ptr_, io_tensor_map_);
  }};

  std::call_once(build_flag_, buildFunc);
  is_build_ = true;
}

NvinferHostMemorySharePtr TrtInfer::construct_plan(
    TrtInferSettings const& settings) noexcept(false) {
  auto const& maxBatchSize{settings.max_batch_size};
  auto const& modelFile{settings.model_file};
  auto const& maxWorkSpaceSize{settings.max_work_space_size};
  auto const& scalerType{settings.scaler_type};
  auto const& useDlaCore{settings.use_dla_core};

  if (!std::filesystem::exists(modelFile)) {
    throw std::runtime_error("model file " + modelFile + " does not exist");
  }

  auto nvinferBuildPtr{NvinferUniquePtr<nvinfer1::IBuilder>(
      nvinfer1::createInferBuilder(nvinferLogger.getTRTLogger()))};
  if (nvinferBuildPtr == nullptr) {
    throw std::runtime_error("createInferBuilder fail!");
  }

  // nvinferBuildPtr->setMaxBatchSize(settings.max_batch_size);

  auto const& explicitBatch{
      maxBatchSize << static_cast<std::uint32_t>(
          nvinfer1::NetworkDefinitionCreationFlag::kEXPLICIT_BATCH)};

  auto nvinferNetworkPtr{NvinferUniquePtr<nvinfer1::INetworkDefinition>(
      nvinferBuildPtr->createNetworkV2(explicitBatch))};
  if (nvinferNetworkPtr == nullptr) {
    throw std::runtime_error("createNetworkV2 fail!");
  }

  auto nvinferParserPtr{
      NvinferUniquePtr<nvonnxparser::IParser>(nvonnxparser::createParser(
          *nvinferNetworkPtr, nvinferLogger.getTRTLogger()))};
  if (nvinferParserPtr == nullptr) {
    throw std::runtime_error("createParser for onnx fail!");
  }

  auto bRet{nvinferParserPtr->parseFromFile(
      modelFile.c_str(),
      static_cast<std::int32_t>(nvinferLogger.getReportableSeverity()))};
  if (!bRet) {
    throw std::runtime_error("parseFromFile for model " + modelFile + ", fail");
  }

  // set config
  auto nvinferConfigPtr{NvinferUniquePtr<nvinfer1::IBuilderConfig>(
      nvinferBuildPtr->createBuilderConfig())};
  if (nvinferConfigPtr == nullptr) {
    throw std::runtime_error("createBuilderConfig fail!");
  }

  // max work space size
  nvinferConfigPtr->setMemoryPoolLimit(nvinfer1::MemoryPoolType::kWORKSPACE,
                                       maxWorkSpaceSize * (1 << 30));

  // scaler type
  nvinferLogInfo << ">>>>>>>>> scaler type: " << scalerType << std::endl;
  if (scalerType == "fp16") {
    nvinferConfigPtr->setFlag(nvinfer1::BuilderFlag::kFP16);
  } else if (scalerType == "int8") {
    nvinferConfigPtr->setFlag(nvinfer1::BuilderFlag::kINT8);
  } else {
    throw std::runtime_error("unsupport scaler type: " + scalerType);
  }

  // dla settings
  if (useDlaCore >= 0) {
    nvinferLogInfo << "use dla core: " << useDlaCore << std::endl;
    auto dlaCores{nvinferBuildPtr->getNbDLACores()};
    if (dlaCores == 0) {
      throw std::runtime_error("no dla cores available");
    }
    nvinferConfigPtr->setFlag(nvinfer1::BuilderFlag::kGPU_FALLBACK);
    // By default run in FP16 mode. FP32 mode is not permitted.
    if (scalerType == "fp32") {
      nvinferLogInfo << "[DLA]"
                     << "fp32 is not permitted, set to fp16!!!" << std::endl;
      nvinferConfigPtr->setFlag(nvinfer1::BuilderFlag::kFP16);
    }
    nvinferConfigPtr->setDefaultDeviceType(nvinfer1::DeviceType::kDLA);
    nvinferConfigPtr->setDLACore(useDlaCore);
    nvinferConfigPtr->setFlag(
        nvinfer1::BuilderFlag::kPREFER_PRECISION_CONSTRAINTS);
  }

  // CUDA stream used for profiling by the builder.
  auto profileStreamPtr{make_cuda_stream()};
  nvinferConfigPtr->setProfileStream(*profileStreamPtr);

  auto hostMemPtr{nvinferBuildPtr->buildSerializedNetwork(*nvinferNetworkPtr,
                                                          *nvinferConfigPtr)};
  if (nullptr == hostMemPtr) {
    throw std::runtime_error("buildSerializedNetwork fail");
  }

  return std::shared_ptr<nvinfer1::IHostMemory>(hostMemPtr, InferDeleter());
}

std::pair<nvinfer1::IRuntime*, nvinfer1::ICudaEngine*>
TrtInfer::build_cuda_engine(TrtInferSettings const& settings,
                            std::string const& plan_file) noexcept(false) {
  // 序列化plan文件不存在，从onnx文件加载并序列化
  if (!std::filesystem::exists(plan_file)) {
    nvinferLogInfo << "*** begin load onnx and construct plan***" << std::endl;
    auto nvinferPlanPtr{construct_plan(settings)};
    nvinferLogInfo << "*** construct plan ok!!!***" << std::endl;

    nvinferLogInfo << "*** serialize plan***" << std::endl;
    serialize_plan(nvinferPlanPtr, plan_file);
    nvinferLogInfo << "*** serialize plan OK!!!***" << std::endl;
  }

  nvinferLogInfo << "*** find serialized plan file:" << plan_file << "***"
                 << std::endl;
  std::string cacheEngine;
  deserialize_plan(plan_file, cacheEngine);
  nvinferLogInfo << "*** deserialize plan file OK***" << std::endl;

  auto runtimePtr{nvinfer1::createInferRuntime(nvinferLogger.getTRTLogger())};
  if (nullptr == runtimePtr) {
    throw std::runtime_error("createInferRuntime error");
  }

  auto cudaEnginePtr{runtimePtr->deserializeCudaEngine(cacheEngine.data(),
                                                       cacheEngine.size())};
  if (nullptr == cudaEnginePtr) {
    throw std::runtime_error("deserializeCudaEngine error");
  }

  return std::make_pair(runtimePtr, cudaEnginePtr);
}

std::pair<nvinfer1::IRuntime*, nvinfer1::ICudaEngine*>
TrtInfer::build_cuda_engine_retry(TrtInferSettings const& settings) noexcept(
    false) {
  std::int32_t tryTimes{3};
  auto const& modelFile{std::filesystem::path{settings.model_file}};
  auto const& planFile{modelFile.filename().stem().string() + ".engine"};

  for (std::int32_t i{0}; i < tryTimes; ++i) {
    try {
      auto nvinferPair{build_cuda_engine(settings, planFile)};
      return nvinferPair;
    } catch (const std::exception& e) {
      nvinferLogError << "build cuda engine error: " << i << ", " << e.what()
                      << std::endl;
      std::error_code ec;
      auto bRet{std::filesystem::remove(planFile, ec)};
      if (!bRet) {
        nvinferLogError << "delete plan file " << planFile << " error, "
                        << ec.message() << std::endl;
      }
    }
  }

  throw std::runtime_error("build cuda engine fail.");
}

std::int32_t TrtInfer::get_date_type_size(nvinfer1::DataType t) noexcept {
  std::int32_t size{0};
  switch (t) {
    case nvinfer1::DataType::kINT32:
      size = 4;
      break;

    case nvinfer1::DataType::kFLOAT:
      size = 4;
      break;

    case nvinfer1::DataType::kHALF:
      size = 2;
      break;

    case nvinfer1::DataType::kINT8:
      size = 1;
      break;

    case nvinfer1::DataType::kBOOL:
      size = 1;
      break;
  }

  return size;
}

TrtInfer::IOTensor TrtInfer::get_io_tensor(
    nvinfer1::ICudaEngine const* engine_ptr,
    std::int32_t index) noexcept(false) {
  if (nullptr == engine_ptr) {
    throw std::runtime_error("engine_ptr is nullptr");
  }
  auto const& name{engine_ptr->getIOTensorName(index)};

  IOTensor tensor;

  tensor.index = index;
  tensor.name = name;
  tensor.mode = engine_ptr->getTensorIOMode(name);
  tensor.shape = engine_ptr->getTensorShape(name);
  tensor.data_type = engine_ptr->getTensorDataType(name);

  auto& shape{tensor.shape};
  tensor.count = std::accumulate(shape.d, shape.d + shape.nbDims, 1,
                                 std::multiplies<int64_t>());

  tensor.size = tensor.count * get_date_type_size(tensor.data_type);
  tensor.mem_ptr = make_cuda_pinned_mem(tensor.size);
  return tensor;
}

void TrtInfer::print_io_tensor(IOTensor const& tensor,
                               std::iostream& stream) const noexcept {
  std::string dataType{"None"};
  switch (tensor.data_type) {
    case nvinfer1::DataType::kFLOAT:
      dataType = "kFLOAT";
      break;

    case nvinfer1::DataType::kHALF:
      dataType = "kHALF";
      break;

    case nvinfer1::DataType::kINT8:
      dataType = "kINT8";
      break;

    case nvinfer1::DataType::kINT32:
      dataType = "kINT32";
      break;

    case nvinfer1::DataType::kBOOL:
      dataType = "kBOOL";
      break;

    case nvinfer1::DataType::kUINT8:
      dataType = "kUINT8";
      break;

    case nvinfer1::DataType::kFP8:
      dataType = "kFP8";
      break;

    default:
      break;
  }

  stream << "[" << tensor.index << "] " << tensor.name << ", ( ";

  for (std::int32_t i{0}; i < tensor.shape.nbDims; ++i) {
    stream << tensor.shape.d[i] << " ";
  }

  stream << ")" << ", " << dataType << ", " << tensor.count << ", "
         << tensor.size << std::endl;
}

void TrtInfer::enumerate_io_tensors(nvinfer1::ICudaEngine const* engine_ptr,
                                    IOTensorMap& map) noexcept(false) {
  // get trtNode info
  std::stringstream sin;
  std::stringstream sout;
  std::stringstream sother;

  sin << std::endl << "Input IO: " << std::endl;
  sout << "Output IO: " << std::endl;
  sother << "Other IO:" << std::endl;

  auto nIOTensors{nvinfer_engine_ptr_->getNbIOTensors()};
  map.clear();
  for (std::int32_t i{0}; i < nIOTensors; ++i) {
    auto const& tensor{get_io_tensor(nvinfer_engine_ptr_, i)};
    map.emplace(tensor.name, tensor);

    switch (tensor.mode) {
      case nvinfer1::TensorIOMode::kNONE:
        print_io_tensor(tensor, sother);
        break;

      case nvinfer1::TensorIOMode::kINPUT:
        print_io_tensor(tensor, sin);
        break;

      case nvinfer1::TensorIOMode::kOUTPUT:
        print_io_tensor(tensor, sout);
        break;

      default:
        break;
    }
  }

  nvinferLogInfo << "---IO Tensor List---" << sin.str() << sout.str()
                 << sother.str() << "------------------------------------------"
                 << std::endl;
}

void TrtInfer::serialize_plan(NvinferHostMemorySharePtr nvinferPlanPtr,
                              std::string const& file) noexcept(false) {
  std::string outStr;
  outStr.resize(nvinferPlanPtr->size());
  memcpy(static_cast<void*>(outStr.data()), nvinferPlanPtr->data(),
         nvinferPlanPtr->size());

  std::ofstream stream{file};
  stream << outStr;
  stream.close();
}

void TrtInfer::deserialize_plan(std::string const& file,
                                std::string& cache) const noexcept(false) {
  std::ifstream fs{file};
  while (fs.peek() != EOF) {
    std::stringstream buffer;
    buffer << fs.rdbuf();
    cache.append(buffer.str());
  }
  fs.close();
}

void TrtInfer::DoInfer() noexcept(false) {
  if (!IsBuild()) {
    throw std::runtime_error(Settings().model_file + " is not build yet");
  }

  if (nullptr == nvinfer_context_ptr_) {
    auto contextPtr{nvinfer_engine_ptr_->createExecutionContext()};
    if (nullptr == contextPtr) {
      throw std::runtime_error("createExecutionContext error");
    }
    nvinfer_context_ptr_ = contextPtr;
  }

  bindings_.clear();

  auto ioTensorSize{IOTensors().size()};
  bindings_.resize(ioTensorSize);
  for (auto const& i : IOTensors()) {
    auto const& ioTensor{i.second};
    bindings_.at(ioTensor.index) = ioTensor.mem_ptr->dev;
  }

  CUDATimer t{0};

  auto bRet{nvinfer_context_ptr_->executeV2(bindings_.data())};

  nvinferLogInfo << "[CUDA EVENT] " << settings_.model_file
                 << " inference time cost:" << t.Elapsed() << " ms"
                 << std::endl;
  if (!bRet) {
    throw std::runtime_error("executeV2 run error");
  }
}

void TrtInfer::Release() noexcept {
  std::lock_guard<std::mutex> l(lock_);
  bindings_.clear();
  io_tensor_map_.clear();
  if (nullptr != nvinfer_context_ptr_) {
    delete nvinfer_context_ptr_;
    nvinfer_context_ptr_ = nullptr;
  }

  if (nullptr != nvinfer_engine_ptr_) {
    delete nvinfer_engine_ptr_;
    nvinfer_engine_ptr_ = nullptr;
  }

  if (nullptr != nvinfer_runtime_ptr_) {
    delete nvinfer_runtime_ptr_;
    nvinfer_runtime_ptr_ = nullptr;
  }
  is_build_ = false;
}
}  // namespace perception
}  // namespace autodrive