#include "nn_yolo.hpp"

#include <NvOnnxParser.h>

#include <fstream>
#include <map>

#include "nn_algo.hpp"
#include "yololayer.h"

bool TRTYoloBuilder::CreateEngineWithOnnx(IBuilder* builder,
                                          IBuilderConfig* config) {
  const auto explicitBatch =
      1U << static_cast<uint32_t>(
          NetworkDefinitionCreationFlag::kEXPLICIT_BATCH);
  network_ =
      UniquePtr<INetworkDefinition>(builder->createNetworkV2(explicitBatch));
  if (!network_) {
    SPDLOG_ERROR("[TRTDetector] createNetworkV2 Fail.");
    return false;
  } else
    SPDLOG_DEBUG("[TRTDetector] createNetworkV2 OK.");

  auto parser = nvonnxparser::createParser(*network_, logger_);
  if (!parser) {
    SPDLOG_ERROR("[TRTDetector] createParser Fail.");
    return false;
  } else
    SPDLOG_DEBUG("[TRTDetector] createParser OK.");

  auto parsed =
      parser->parseFromFile(file_path_.c_str(), logger_.GetVerbosity());
  if (!parsed) {
    SPDLOG_ERROR("[TRTDetector] parseFromFile Fail.");
    return false;
  } else
    SPDLOG_DEBUG("[TRTDetector] parseFromFile OK.");

  std::unique_ptr<cudaStream_t, decltype(TRT::StreamDeleter)> stream(
      new cudaStream_t, TRT::StreamDeleter);
  if (cudaStreamCreate(stream.get()) != cudaSuccess) {
    SPDLOG_ERROR("[TRTDetector] cudaStreamCreate Fail.");
    stream.reset(nullptr);
    return false;
  } else
    SPDLOG_ERROR("[TRTDetector] cudaStreamCreate OK.");
  config->setProfileStream(*stream);

#if NV_TENSORRT_SONAME_MAJOR == 8
  UniquePtr<IHostMemory> plan = UniquePtr<IHostMemory>(
      builder->buildSerializedNetwork(*network_.get(), *config));
  if (!plan) {
    SPDLOG_ERROR("[TRTYoloBuilder] buildSerializedNetwork Fail.");
    return false;
  } else
    SPDLOG_INFO("[TRTYoloBuilder] buildSerializedNetwork OK.");

  UniquePtr<IRuntime> runtime =
      UniquePtr<IRuntime>(createInferRuntime(logger_));
  if (!runtime) {
    SPDLOG_ERROR("[TRTYoloBuilder] createInferRuntime Fail.");
    return false;
  } else
    SPDLOG_INFO("[TRTYoloBuilder] createInferRuntime OK.");
  engine_ = UniquePtr<ICudaEngine>(
      runtime->deserializeCudaEngine(plan->data(), plan->size()));
#endif
  return true;
}

bool TRTYoloBuilder::CreateEngineWithWeights(IBuilder* builder,
                                             IBuilderConfig* config,
                                             DataType datatype) {
  // - 3. 利用builder创建网络
  network_ = UniquePtr<INetworkDefinition>(builder->createNetworkV2(0U));
  if (!network_) {
    SPDLOG_ERROR("[YoloBuilder] createNetworkV2 Fail.");
    return false;
  } else
    SPDLOG_INFO("[YoloBuilder] createNetworkV2 OK.");

  // - 4. 加载权重
  std::map<std::string, Weights> weight_map = algo::LoadWeights(file_path_);

  // - 5. 构建网络
  ITensor* data = network_->addInput(Yolo::INPUT_BLOB_NAME, datatype,
                                     Dims3{3, Yolo::INPUT_H, Yolo::INPUT_W});
  if (!data) {
    SPDLOG_ERROR("[YoloBuilder] addInput Fail.");
    return false;
  } else
    SPDLOG_INFO("[YoloBuilder] addInput OK.");

  // * 下面的过程参考yolov5s.yaml文件，其中基本操作都在nn_algo.hpp文件实现

  /* ------ yolov5 backbone ------ */
  auto focus0 =
      algo::focus(network_.get(), weight_map, *data, 3, 32, 3, "model.0");
  auto conv1 = algo::convBlock(network_.get(), weight_map,
                               *focus0->getOutput(0), 64, 3, 2, 1, "model.1");
  auto bottleneck_csp2 =
      algo::C3(network_.get(), weight_map, *conv1->getOutput(0), 64, 64, 1,
               true, 1, 0.5, "model.2");
  auto conv3 =
      algo::convBlock(network_.get(), weight_map,
                      *bottleneck_csp2->getOutput(0), 128, 3, 2, 1, "model.3");
  auto bottleneck_csp4 =
      algo::C3(network_.get(), weight_map, *conv3->getOutput(0), 128, 128, 3,
               true, 1, 0.5, "model.4");
  auto conv5 =
      algo::convBlock(network_.get(), weight_map,
                      *bottleneck_csp4->getOutput(0), 256, 3, 2, 1, "model.5");
  auto bottleneck_csp6 =
      algo::C3(network_.get(), weight_map, *conv5->getOutput(0), 256, 256, 3,
               true, 1, 0.5, "model.6");
  auto conv7 =
      algo::convBlock(network_.get(), weight_map,
                      *bottleneck_csp6->getOutput(0), 512, 3, 2, 1, "model.7");
  auto spp8 = algo::SPP(network_.get(), weight_map, *conv7->getOutput(0), 512,
                        512, 5, 9, 13, "model.8");

  /* ------ yolov5 head ------ */
  auto bottleneck_csp9 =
      algo::C3(network_.get(), weight_map, *spp8->getOutput(0), 512, 512, 1,
               false, 1, 0.5, "model.9");
  auto conv10 =
      algo::convBlock(network_.get(), weight_map,
                      *bottleneck_csp9->getOutput(0), 256, 1, 1, 1, "model.10");

  auto upsample11 = network_->addResize(*conv10->getOutput(0));
  upsample11->setResizeMode(ResizeMode::kNEAREST);
  upsample11->setOutputDimensions(
      bottleneck_csp6->getOutput(0)->getDimensions());

  ITensor* inputTensors12[] = {upsample11->getOutput(0),
                               bottleneck_csp6->getOutput(0)};
  auto cat12 = network_->addConcatenation(inputTensors12, 2);
  auto bottleneck_csp13 =
      algo::C3(network_.get(), weight_map, *cat12->getOutput(0), 512, 256, 1,
               false, 1, 0.5, "model.13");
  auto conv14 = algo::convBlock(network_.get(), weight_map,
                                *bottleneck_csp13->getOutput(0), 128, 1, 1, 1,
                                "model.14");

  auto deconv15 = network_->addResize(*conv14->getOutput(0));
  deconv15->setResizeMode(ResizeMode::kNEAREST);
  deconv15->setOutputDimensions(bottleneck_csp4->getOutput(0)->getDimensions());

  ITensor* inputTensors16[] = {deconv15->getOutput(0),
                               bottleneck_csp4->getOutput(0)};
  auto cat16 = network_->addConcatenation(inputTensors16, 2);
  auto bottleneck_csp17 =
      algo::C3(network_.get(), weight_map, *cat16->getOutput(0), 256, 128, 1,
               false, 1, 0.5, "model.17");

  /* ------ detect ------ */
  IConvolutionLayer* det0 = network_->addConvolutionNd(
      *bottleneck_csp17->getOutput(0), 3 * (Yolo::CLASS_NUM + 5), DimsHW{1, 1},
      weight_map["model.24.m.0.weight"], weight_map["model.24.m.0.bias"]);

  auto conv18 = algo::convBlock(network_.get(), weight_map,
                                *bottleneck_csp17->getOutput(0), 128, 3, 2, 1,
                                "model.18");
  ITensor* inputTensors19[] = {conv18->getOutput(0), conv14->getOutput(0)};
  auto cat19 = network_->addConcatenation(inputTensors19, 2);
  auto bottleneck_csp20 =
      algo::C3(network_.get(), weight_map, *cat19->getOutput(0), 256, 256, 1,
               false, 1, 0.5, "model.20");
  IConvolutionLayer* det1 = network_->addConvolutionNd(
      *bottleneck_csp20->getOutput(0), 3 * (Yolo::CLASS_NUM + 5), DimsHW{1, 1},
      weight_map["model.24.m.1.weight"], weight_map["model.24.m.1.bias"]);
  auto conv21 = algo::convBlock(network_.get(), weight_map,
                                *bottleneck_csp20->getOutput(0), 256, 3, 2, 1,
                                "model.21");
  ITensor* inputTensors22[] = {conv21->getOutput(0), conv10->getOutput(0)};
  auto cat22 = network_->addConcatenation(inputTensors22, 2);
  auto bottleneck_csp23 =
      algo::C3(network_.get(), weight_map, *cat22->getOutput(0), 512, 512, 1,
               false, 1, 0.5, "model.23");
  auto det2 = network_->addConvolutionNd(
      *bottleneck_csp23->getOutput(0), 3 * (Yolo::CLASS_NUM + 5), DimsHW{1, 1},
      weight_map["model.24.m.2.weight"], weight_map["model.24.m.2.bias"]);

  SPDLOG_WARN("-------------------- AddYoloLayer --------------------");

  // * 获取yolo输出头，有三个尺度，分别为缩放8/16/32
  auto yolo =
      algo::AddYoloLayer(network_.get(), weight_map, "model.24",
                         std::vector<IConvolutionLayer*>{det0, det1, det2});
  yolo->getOutput(0)->setName(Yolo::OUTPUT_BLOB_NAME);
  network_->markOutput(*yolo->getOutput(0));

  SPDLOG_WARN("Building engine, please wait for a while...");

  // - 6. 构建Engine
  engine_ = UniquePtr<ICudaEngine>(
      builder->buildEngineWithConfig(*network_, *config));
  SPDLOG_WARN("Build engine successfully!");

  for (auto& mem : weight_map) {
    free((void*)(mem.second.values));
  }
  return true;
}

bool TRTYoloBuilder::CreateEngineWithFile() {
  std::vector<char> engine_bin;
  std::ifstream engine_file(engine_path_, std::ios::binary);

  if (engine_file.good()) {
    engine_file.seekg(0, engine_file.end);
    engine_bin.resize(engine_file.tellg());
    engine_file.seekg(0, engine_file.beg);
    engine_file.read(engine_bin.data(), engine_bin.size());
    engine_file.close();
  } else {
    SPDLOG_ERROR("[TRTYoloBuilder] LoadEngine Fail. Could not open file.");
    return false;
  }
  auto runtime = UniquePtr<IRuntime>(createInferRuntime(logger_));
  engine_ = UniquePtr<ICudaEngine>(
      runtime->deserializeCudaEngine(engine_bin.data(), engine_bin.size()));
  return true;
}

bool TRTYoloBuilder::CreateEngineWithCache() { return true; }

bool TRTYoloBuilder::CreateEngine(CreateMethod method) {
  DataType datatype;

  // - 1. 创建builder
  auto builder = UniquePtr<IBuilder>(createInferBuilder(logger_));
  if (!builder) {
    SPDLOG_ERROR("[YoloBuilder] createInferBuilder Fail.");
    return false;
  } else
    SPDLOG_INFO("[YoloBuilder] createInferBuilder OK.");
  builder->setMaxBatchSize(Yolo::BATCH_SIZE);

  // - 2. 创建config
  auto config = UniquePtr<IBuilderConfig>(builder->createBuilderConfig());
  if (!config) {
    SPDLOG_ERROR("[YoloBuilder] createBuilderConfig Fail.");
    return false;
  } else
    SPDLOG_INFO("[YoloBuilder] createBuilderConfig OK.");

  config->setMaxWorkspaceSize(1 << 6 << 20);  // 64 (1^20)B = 64 MB

  if (builder->platformHasFastFp16()) {
    config->setFlag(BuilderFlag::kFP16);
    datatype = DataType::kFLOAT;
  }

#if 0
  if (builder->platformHasFastInt8()) {
    config->setFlag(BuilderFlag::kINT8);
    datatype = DataType::kINT8;
  }
#endif

  if (builder->getNbDLACores() == 0)
    SPDLOG_WARN("[YoloBuilder] The platform doesn't have any DLA cores.");
  else {
    SPDLOG_INFO("[YoloBuilder] Using DLA core 0.");
    config->setDefaultDeviceType(DeviceType::kDLA);
    config->setDLACore(0);
    config->setFlag(BuilderFlag::kSTRICT_TYPES);
    config->setFlag(BuilderFlag::kGPU_FALLBACK);
  }

  if (method == CreateMethod::kONNX) {
    CreateEngineWithOnnx(builder.get(), config.get());
  } else if (method == CreateMethod::kWEIGHTS) {
    CreateEngineWithWeights(builder.get(), config.get(), datatype);
  } else if (method == CreateMethod::kCACHE) {
    CreateEngineWithCache();
  } else if (method == CreateMethod::kFILE) {
    CreateEngineWithFile();
  }

  SPDLOG_WARN("Build engine successfully!");

  size_t free, total;
  cudaMemGetInfo(&free, &total);

  SPDLOG_INFO("total gpu mem: {} MB", (total >> 20));
  SPDLOG_INFO("free gpu mem: {} MB", (free >> 20));
  SPDLOG_INFO("max workspace size will use all of free gpu mem");

  // auto profile = builder->createOptimizationProfile();
  // profile->setDimensions(network_.get()->getInput(0)->getName(),
  //                        OptProfileSelector::kMIN, Dims4{1, 3, 640, 640});
  // profile->setDimensions(network_.get()->getInput(0)->getName(),
  //                        OptProfileSelector::kOPT, Dims4{1, 3, 640, 640});
  // profile->setDimensions(network_.get()->getInput(0)->getName(),
  //                        OptProfileSelector::kMAX, Dims4{1, 3, 640, 640});
  // config->addOptimizationProfile(profile);

  SPDLOG_INFO("[TRTYoloBuilder] LoadEngine.");

  if (!engine_) {
    SPDLOG_ERROR("[TRTYoloBuilder] LoadEngine Fail.");
    return false;
  }
  SPDLOG_INFO("[TRTYoloBuilder] LoadEngine OK.");
  return true;
}

bool TRTYoloBuilder::SaveEngine() {
  // - 7. 序列化engine，序列化到本地，生成yolov5s.engine
  IHostMemory* modelStream = engine_->serialize();
  assert(modelStream != nullptr);

  std::ofstream p(engine_path_, std::ios::binary);
  if (!p) {
    SPDLOG_ERROR("Could not open plan output file");
    return false;
  }
  p.write(reinterpret_cast<const char*>(modelStream->data()),
          modelStream->size());

#if NV_TENSORRT_SONAME_MAJOR == 7
  modelStream->destroy();
#endif

  return true;
}

TRTYoloBuilder::TRTYoloBuilder() { SPDLOG_TRACE("Constructed"); }

TRTYoloBuilder::TRTYoloBuilder(const std::string& file_path,
                               const std::string& engine_path) {
  SetFilePath(file_path);
  SetEnginePath(engine_path);
  // CreateEngine(CreateMethod::kCACHE);
  SPDLOG_TRACE("Constructed");
}

TRTYoloBuilder::~TRTYoloBuilder() { SPDLOG_TRACE("Destructed"); }

void TRTYoloBuilder::SetFilePath(const std::string& file_path) {
  file_path_ = file_path;
  SPDLOG_WARN("File Path : {}", file_path_);
}

void TRTYoloBuilder::SetEnginePath(const std::string& engine_path) {
  engine_path_ = engine_path;
  SPDLOG_WARN("Engine Path : {}", file_path_);
}

UniquePtr<ICudaEngine> TRTYoloBuilder::GetEngine() {
  return {std::move(engine_)};
}
