#include <malloc.h>
#include "tensorrt/cls/cls_video.h"
#include "tensorrt/det/det_predictor.h"
// #include "tensorrt/ocr/rec_predictor.h"
#include "common/model_config.h"

#include "tensorrt/trtmodel_manager.h"

namespace ai {

void checkCudaErrorCode(cudaError_t code) {
  if (code != 0) {
    std::string errMsg = "CUDA operation failed with code: " + std::to_string(code) + "(" + cudaGetErrorName(code) +
                          "), with message: " + cudaGetErrorString(code);
    std::cout << errMsg << std::endl;
    throw std::runtime_error(errMsg);
  }
}

TRTModelManager::TRTModelManager(LogInfo *log_info):
  ai::ModelManager(log_info) {


}
TRTModelManager::~TRTModelManager(){

}

int TRTModelManager::init_model_imp(std::string model_path, std::string cfg_path, int gpu_idx, ai::InitModelData &imd, BasePredictor** infer, int debug_mode) {
  int ret = 0;

  ai::TRTPackPredictor* trtp = new ai::TRTPackPredictor();
  trtp->cfg = new ModelConfig(log_ifo);
  trtp->cfg->load_config(cfg_path);
  trtp->cfg->gpu_id = gpu_idx;
  trtp->model_id = imd.model_id;
  ret = load_model(model_path, trtp, imd.model_id);
  if (log_ifo->log_level_1) spdlog::get("logger")->info("TRTModelManager::init_model load_model: {} ", ret);
  if (ret != 0){
    printf("load_model fail ret:%d\n",ret);
    return ret;
  }


  if(trtp->cfg->algorithm == ai::model_cls_video) {
    *infer = new TRTClsVideo(trtp, this, log_ifo);
  }
  else if(trtp->cfg->algorithm == ai::model_det) {
    *infer = new TRTDetPredictor(trtp, this, log_ifo);
  }
  else {
    printf("init_model error. algorithm:%s\n", trtp->cfg->algorithm);
    return -1;
  }

  return 0;
}

int TRTModelManager::load_model(std::string onnx_model_path, ai::TRTPackPredictor* trtp, std::string model_id) {
  int ret = 0;

  // Specify options for GPU inference
  Options options;
  options.deviceIndex = trtp->cfg->gpu_id;
  options.optBatchSize = trtp->cfg->batch_size;
  options.maxBatchSize = trtp->cfg->batch_size;

  options.precision = Precision::FP16;
  options.calibrationDataDirectoryPath = "";

  if (options.precision == Precision::INT8) {
      if (options.calibrationDataDirectoryPath.empty()) {
          throw std::runtime_error("Error: Must supply calibration data path for INT8 calibration");
      }
  }
  trtp->m_options = options;
  
  std::cout<<"trtp->m_options.deviceIndex :"<<trtp->m_options.deviceIndex<<std::endl;

  // 使用加密接口从内存加载模型
  std::string trt_path = onnx_model_path + "." + std::to_string(trtp->cfg->gpu_id) + ".trt";
  if (!bf::exists(onnx_model_path) && !bf::exists(trt_path)) {
      trtp->m_runtime = nvinfer1::createInferRuntime(trtp->logger_trt);
      if (!trtp->m_runtime) { return -1; }
      auto ret = cudaSetDevice(trtp->m_options.deviceIndex);
      if (ret != 0) {
        int numGPUs;
        cudaGetDeviceCount(&numGPUs);
        auto errMsg = "Unable to set GPU device index to: " + std::to_string(trtp->m_options.deviceIndex) + ". Note, your device has " + std::to_string(numGPUs) + " CUDA-capable GPU(s).";
        throw std::runtime_error(errMsg);
      }
    
    // 从内存加载加密模型
    if (get_model == nullptr) {return model_load_so_error;}
    // TODO: 同步更新并测试
    void* tmpptr = trtp->m_runtime; 
    nvinfer1::ICudaEngine* tmpEngine = trtp->m_engine;
    ret = get_model(model_id,&tmpptr,reinterpret_cast<void*>(&options),&tmpEngine,trtp->cfg->gpu_id);  //获取模型data
    if(ret != 0){spdlog::get("logger")->info("TRTModelManager::load_model. get_model ret: {}, model_id:{}", ret, model_id);}
    if (ret != 0) {
      printf("get_model error ret=%d\n", ret);
      return ret;
    }
    trtp->m_engine = reinterpret_cast<nvinfer1::ICudaEngine*>(std::move(tmpEngine));
    
  // // Create an engine, a representation of the optimized model.
  if (trtp->m_engine == nullptr) {printf("m_engine nullptr !!\n");  return -1; }
  printf("test111 !!\n");    

  trtp->m_context = trtp->m_engine->createExecutionContext();
  if (!trtp->m_context) { return -1; }
  printf("test112 !!\n");    

  // Storage for holding the input and output buffers
  // This will be passed to TensorRT for inference
  clearGpuBuffers(trtp);
  trtp->m_buffers.resize(trtp->m_engine->getNbIOTensors());
  printf("test113 !!\n");    

  trtp->m_outputLengths.clear();
  trtp->m_inputDims.clear();
  trtp->m_outputDims.clear();
  trtp->m_IOTensorNames.clear();

  // Create a cuda stream
  cudaStream_t stream;
  checkCudaErrorCode(cudaStreamCreate(&stream));

  // Allocate GPU memory for input and output buffers
  trtp->m_outputLengths.clear();
  for (int i = 0; i < trtp->m_engine->getNbIOTensors(); ++i) {
      const auto tensorName = trtp->m_engine->getIOTensorName(i);
      trtp->m_IOTensorNames.emplace_back(tensorName);
      const auto tensorType = trtp->m_engine->getTensorIOMode(tensorName);
      const auto tensorShape = trtp->m_engine->getTensorShape(tensorName);
      const auto tensorDataType = trtp->m_engine->getTensorDataType(tensorName);

      std::cout << "tensorName:" << tensorName << std::endl;
      std::cout << "tensorShape.nbDims:" << tensorShape.nbDims << std::endl;
      std::cout << "tensorShape:";
      for (int ii = 0; ii < tensorShape.nbDims; ++ii) {
        std::cout << tensorShape.d[ii] << " ";
      }
      std::cout << std::endl;

      if (tensorType == nvinfer1::TensorIOMode::kINPUT) {
          // The implementation currently only supports inputs of type float
          if (trtp->m_engine->getTensorDataType(tensorName) != nvinfer1::DataType::kFLOAT) {
              throw std::runtime_error("Error, the implementation currently only supports float inputs");
          }

          // Don't need to allocate memory for inputs as we will be using the OpenCV
          // GpuMat buffer directly.

          // Store the input dims for later use
          trtp->m_inputDims.emplace_back(tensorShape.d[1], tensorShape.d[2], tensorShape.d[3]);
          trtp->m_inputBatchSize = tensorShape.d[0];
          cudaMalloc(&trtp->m_buffers[i], tensorShape.d[0] * tensorShape.d[1] * tensorShape.d[2] * tensorShape.d[3] * sizeof(float));

      } else if (tensorType == nvinfer1::TensorIOMode::kOUTPUT) {
          // The binding is an output
          uint32_t outputLength = 1;
          trtp->m_outputDims.push_back(tensorShape);

          for (int j = 1; j < tensorShape.nbDims; ++j) {
              // We ignore j = 0 because that is the batch size, and we will take that
              // into account when sizing the buffer
              outputLength *= tensorShape.d[j];
          }
          std::cout << "outputLength:" << outputLength << std::endl;

          trtp->m_outputLengths.push_back(outputLength);
          // Now size the output buffer appropriately, taking into account the max
          // possible batch size (although we could actually end up using less
          // memory)
          checkCudaErrorCode(cudaMallocAsync(&trtp->m_buffers[i], outputLength * trtp->m_options.maxBatchSize * sizeof(float), stream));
      } else {
          throw std::runtime_error("Error, IO Tensor is neither an input or output!");
      }
  }

  // Synchronize and destroy the cuda stream
  checkCudaErrorCode(cudaStreamSynchronize(stream));
  checkCudaErrorCode(cudaStreamDestroy(stream));
  printf("test114 !!\n");    

    return 0;
  }
  
  // 使用原始模型
  if (bf::exists(onnx_model_path) && !bf::exists(trt_path)) {
    ret = buildNetwork(onnx_model_path, trt_path, trtp);
    spdlog::get("logger")->info("TRTModelManager::load_model. build_network: {} ", ret);
  }

  ret = loadNetwork(trt_path, trtp);
  return ret;
}


int TRTModelManager::free_model(int md_idx) {
  for (auto& infer : infers) {
    TRTPredictor* cur_infer = (TRTPredictor*)infer;
      // std::cout << "1.1.1"  << std::endl;
    cur_infer->stop();
      // std::cout << "1.1.2"  << std::endl;
    delete cur_infer->mdl_trt->cfg;
    clearGpuBuffers(cur_infer->mdl_trt);
      // std::cout << "1.1.3"  << std::endl;
    // cur_infer->mdl_trt->sessionOptions.release();
      // std::cout << "1.1.4"  << std::endl;
    // cur_infer->mdl_trt->session->release();
      // std::cout << "1.1.5"  << std::endl;
      
    // delete cur_infer->mdl_trt->m_trtEngine;
    delete cur_infer->mdl_trt;
    delete cur_infer;
      // std::cout << "1.1.6"  << std::endl;
  }
  infers.clear();

  return 0;
}


int TRTModelManager::loadNetwork(std::string trtModelPath, ai::TRTPackPredictor* trtp) {
  // Read the serialized model from disk
  if (!bf::exists(trtModelPath)) {
    std::cout << "Error, unable to read TensorRT model at path: " + trtModelPath << std::endl;
    return -1;
  } else {
    std::cout << "Loading TensorRT engine file at path: " << trtModelPath << std::endl;
  }

  std::ifstream file(trtModelPath, std::ios::binary | std::ios::ate);
  std::streamsize size = file.tellg();
  file.seekg(0, std::ios::beg);

  std::vector<char> buffer(size);
  if (!file.read(buffer.data(), size)) {
    std::cout << "Unable to read engine file" << std::endl;
    return -1;
  }
  return loadNetwork(buffer, trtp);
}

int TRTModelManager::loadNetwork(std::vector<char> buffer, ai::TRTPackPredictor* trtp) {

  // Create a runtime to deserialize the engine file.
  trtp->m_runtime = nvinfer1::createInferRuntime(trtp->logger_trt);
  if (!trtp->m_runtime) { return -1; }

  // Set the device index
  auto ret = cudaSetDevice(trtp->m_options.deviceIndex);
  if (ret != 0) {
    int numGPUs;
    cudaGetDeviceCount(&numGPUs);
    auto errMsg = "Unable to set GPU device index to: " + std::to_string(trtp->m_options.deviceIndex) + ". Note, your device has " + std::to_string(numGPUs) + " CUDA-capable GPU(s).";
    throw std::runtime_error(errMsg);
  }

  // Create an engine, a representation of the optimized model.
  trtp->m_engine = trtp->m_runtime->deserializeCudaEngine(buffer.data(), buffer.size());
  if (!trtp->m_engine) { return -1; }

  // The execution context contains all of the state associated with a
  // particular invocation
  trtp->m_context = trtp->m_engine->createExecutionContext();
  if (!trtp->m_context) { return -1; }

  // Storage for holding the input and output buffers
  // This will be passed to TensorRT for inference
  clearGpuBuffers(trtp);
  trtp->m_buffers.resize(trtp->m_engine->getNbIOTensors());

  trtp->m_outputLengths.clear();
  trtp->m_inputDims.clear();
  trtp->m_outputDims.clear();
  trtp->m_IOTensorNames.clear();

  // Create a cuda stream
  cudaStream_t stream;
  checkCudaErrorCode(cudaStreamCreate(&stream));

  // Allocate GPU memory for input and output buffers
  trtp->m_outputLengths.clear();
  for (int i = 0; i < trtp->m_engine->getNbIOTensors(); ++i) {
      const auto tensorName = trtp->m_engine->getIOTensorName(i);
      trtp->m_IOTensorNames.emplace_back(tensorName);
      const auto tensorType = trtp->m_engine->getTensorIOMode(tensorName);
      const auto tensorShape = trtp->m_engine->getTensorShape(tensorName);
      const auto tensorDataType = trtp->m_engine->getTensorDataType(tensorName);

      std::cout << "tensorName:" << tensorName << std::endl;
      std::cout << "tensorShape.nbDims:" << tensorShape.nbDims << std::endl;
      std::cout << "tensorShape:";
      for (int ii = 0; ii < tensorShape.nbDims; ++ii) {
        std::cout << tensorShape.d[ii] << " ";
      }
      std::cout << std::endl;

      if (tensorType == nvinfer1::TensorIOMode::kINPUT) {
          // The implementation currently only supports inputs of type float
          if (trtp->m_engine->getTensorDataType(tensorName) != nvinfer1::DataType::kFLOAT) {
              throw std::runtime_error("Error, the implementation currently only supports float inputs");
          }

          // Don't need to allocate memory for inputs as we will be using the OpenCV
          // GpuMat buffer directly.

          // Store the input dims for later use
          trtp->m_inputDims.emplace_back(tensorShape.d[1], tensorShape.d[2], tensorShape.d[3]);
          trtp->m_inputBatchSize = tensorShape.d[0];
          cudaMalloc(&trtp->m_buffers[i], tensorShape.d[0] * tensorShape.d[1] * tensorShape.d[2] * tensorShape.d[3] * sizeof(float));

      } else if (tensorType == nvinfer1::TensorIOMode::kOUTPUT) {
          // // Ensure the model output data type matches the template argument
          // // specified by the user
          // if (tensorDataType == nvinfer1::DataType::kFLOAT && !std::is_same<float, float>::value) {
          //     throw std::runtime_error("Error, the model has expected output of type float. Engine class "
          //                               "template parameter must be adjusted.");
          // } else if (tensorDataType == nvinfer1::DataType::kHALF && !std::is_same<__half, __half>::value) {
          //     throw std::runtime_error("Error, the model has expected output of type __half. Engine class "
          //                               "template parameter must be adjusted.");
          // } else if (tensorDataType == nvinfer1::DataType::kINT8 && !std::is_same<int8_t, int8_t>::value) {
          //     throw std::runtime_error("Error, the model has expected output of type int8_t. Engine class "
          //                               "template parameter must be adjusted.");
          // } else if (tensorDataType == nvinfer1::DataType::kINT32 && !std::is_same<int32_t, int32_t>::value) {
          //     throw std::runtime_error("Error, the model has expected output of type int32_t. Engine "
          //                               "class template parameter must be adjusted.");
          // } else if (tensorDataType == nvinfer1::DataType::kBOOL && !std::is_same<bool, bool>::value) {
          //     throw std::runtime_error("Error, the model has expected output of type bool. Engine class "
          //                               "template parameter must be adjusted.");
          // } else if (tensorDataType == nvinfer1::DataType::kUINT8 && !std::is_same<uint8_t, uint8_t>::value) {
          //     throw std::runtime_error("Error, the model has expected output of type uint8_t. Engine "
          //                               "class template parameter must be adjusted.");
          // } else if (tensorDataType == nvinfer1::DataType::kFP8) {
          //     throw std::runtime_error("Error, model has unsupported output type");
          // }

          // The binding is an output
          uint32_t outputLength = 1;
          trtp->m_outputDims.push_back(tensorShape);

          for (int j = 1; j < tensorShape.nbDims; ++j) {
              // We ignore j = 0 because that is the batch size, and we will take that
              // into account when sizing the buffer
              outputLength *= tensorShape.d[j];
          }
          std::cout << "outputLength:" << outputLength << std::endl;

          trtp->m_outputLengths.push_back(outputLength);
          // Now size the output buffer appropriately, taking into account the max
          // possible batch size (although we could actually end up using less
          // memory)
          checkCudaErrorCode(cudaMallocAsync(&trtp->m_buffers[i], outputLength * trtp->m_options.maxBatchSize * sizeof(float), stream));
      } else {
          throw std::runtime_error("Error, IO Tensor is neither an input or output!");
      }
  }

  // Synchronize and destroy the cuda stream
  checkCudaErrorCode(cudaStreamSynchronize(stream));
  checkCudaErrorCode(cudaStreamDestroy(stream));

  return 0;
}


int TRTModelManager::buildNetwork(std::string onnxModelPath, std::string trtModelPath, ai::TRTPackPredictor* trtp) {
  int ret = 0;
  if (!bf::exists(onnxModelPath)) { return -1; }

  // Was not able to find the engine file, generate...
  std::cout << "Engine not found, generating. This could take a while..." << std::endl;
  
  // 设置当前指定的显卡来转换
  ret = cudaSetDevice(trtp->m_options.deviceIndex);
  if (ret != 0) {
    checkCudaErrorCode((cudaError_t)ret);
    return ret;
  }
  // Create our engine builder.
  auto builder = std::unique_ptr<nvinfer1::IBuilder>(nvinfer1::createInferBuilder(trtp->logger_trt));
  if (!builder) {
    return -1;
  }

  // Define an explicit batch size and then create the network (implicit batch
  // size is deprecated). More info here:
  // https://docs.nvidia.com/deeplearning/tensorrt/developer-guide/index.html#explicit-implicit-batch
  auto explicitBatch = 1U << static_cast<uint32_t>(nvinfer1::NetworkDefinitionCreationFlag::kEXPLICIT_BATCH);
  auto network = std::unique_ptr<nvinfer1::INetworkDefinition>(builder->createNetworkV2(explicitBatch));
  if (!network) {
    return -1;
  }

  // Create a parser for reading the onnx file.
  auto parser = std::unique_ptr<nvonnxparser::IParser>(nvonnxparser::createParser(*network, trtp->logger_trt));
  if (!parser) {
    return -1;
  }

  // We are going to first read the onnx file into memory, then pass that buffer
  // to the parser. Had our onnx model file been encrypted, this approach would
  // allow us to first decrypt the buffer.
  std::ifstream file(onnxModelPath, std::ios::binary | std::ios::ate);
  std::streamsize size = file.tellg();
  file.seekg(0, std::ios::beg);

  std::vector<char> buffer(size);
  if (!file.read(buffer.data(), size)) {
    throw std::runtime_error("Unable to read engine file");
  }

  // Parse the buffer we read into memory.
  auto parsed = parser->parse(buffer.data(), buffer.size());
  if (!parsed) {
    return -1;
  }

  // Ensure that all the inputs have the same batch size
  const auto numInputs = network->getNbInputs();
  if (numInputs < 1) {
    return -1;
    std::cout << "Error, model needs at least 1 input!" << std::endl;
  }
  const auto input0Batch = network->getInput(0)->getDimensions().d[0];
  for (int32_t i = 1; i < numInputs; ++i) {
    if (network->getInput(i)->getDimensions().d[0] != input0Batch) {
      return -1;
      std::cout <<  "Error, the model has multiple inputs, each with differing batch sizes!" << std::endl;
    }
  }

  // Check to see if the model supports dynamic batch size or not
  bool doesSupportDynamicBatch = false;
  if (input0Batch == -1) {
    doesSupportDynamicBatch = true;
    std::cout << "Model supports dynamic batch size" << std::endl;
  } else {
    std::cout << "Model only supports fixed batch size of " << input0Batch << std::endl;
    // If the model supports a fixed batch size, ensure that the maxBatchSize
    // and optBatchSize were set correctly.
    if (trtp->m_options.optBatchSize != input0Batch || trtp->m_options.maxBatchSize != input0Batch) {
      std::cout << "Error, model only supports a fixed batch size of " + std::to_string(input0Batch) + ". Must set Options.optBatchSize and Options.maxBatchSize to 1" << std::endl;
      return -1;
    }
  }

  auto config = std::unique_ptr<nvinfer1::IBuilderConfig>(builder->createBuilderConfig());
  if (!config) {
    return -1;
  }

  // Register a single optimization profile
  nvinfer1::IOptimizationProfile *optProfile = builder->createOptimizationProfile();
  for (int32_t i = 0; i < numInputs; ++i) {
    // Must specify dimensions for all the inputs the model expects.
    const auto input = network->getInput(i);
    const auto inputName = input->getName();
    const auto inputDims = input->getDimensions();
    int32_t inputC = inputDims.d[1];
    int32_t inputH = inputDims.d[2];
    int32_t inputW = inputDims.d[3];

    // Specify the optimization profile`
    if (doesSupportDynamicBatch) {
      optProfile->setDimensions(inputName, nvinfer1::OptProfileSelector::kMIN, nvinfer1::Dims4(1, inputC, inputH, inputW));
    } else {
      optProfile->setDimensions(inputName, nvinfer1::OptProfileSelector::kMIN,
                                  nvinfer1::Dims4(trtp->m_options.optBatchSize, inputC, inputH, inputW));
    }
    optProfile->setDimensions(inputName, nvinfer1::OptProfileSelector::kOPT,
                              nvinfer1::Dims4(trtp->m_options.optBatchSize, inputC, inputH, inputW));
    optProfile->setDimensions(inputName, nvinfer1::OptProfileSelector::kMAX,
                                nvinfer1::Dims4(trtp->m_options.maxBatchSize, inputC, inputH, inputW));
  }
  config->addOptimizationProfile(optProfile);

  // Set the precision level
  if (trtp->m_options.precision == Precision::FP16) {
      // Ensure the GPU supports FP16 inference
      if (!builder->platformHasFastFp16()) {
          std::cout << "Error: GPU does not support FP16 precision" << std::endl;
          return -1;
      }
      config->setFlag(nvinfer1::BuilderFlag::kFP16);
  }

  // CUDA stream used for profiling by the builder.
  cudaStream_t profileStream;
  // Util::checkCudaErrorCode(cudaStreamCreate(&profileStream));
  ret = cudaStreamCreate(&profileStream);
  if (ret != 0) {
    checkCudaErrorCode((cudaError_t)ret);
    return ret;
  }
  config->setProfileStream(profileStream);

  // Build the engine
  // If this call fails, it is suggested to increase the logger verbosity to
  // kVERBOSE and try rebuilding the engine. Doing so will provide you with more
  // information on why exactly it is failing.
  std::unique_ptr<nvinfer1::IHostMemory> plan{builder->buildSerializedNetwork(*network, *config)};
  if (!plan) {
    return -1;
  }

  // Write the engine to disk
  // const auto engineName = serializeEngineOptions(m_options, onnxModelPath);

  std::ofstream outfile(trtModelPath, std::ofstream::binary);
  outfile.write(reinterpret_cast<const char *>(plan->data()), plan->size());
  std::cout << "Success, saved engine to " << trtModelPath << std::endl;

  // Util::checkCudaErrorCode(cudaStreamDestroy(profileStream));
  ret = cudaStreamDestroy(profileStream);
  if (ret != 0) {
    checkCudaErrorCode((cudaError_t)ret);
    return ret;
  }
  return 0;

}

void TRTModelManager::clearGpuBuffers(TRTPackPredictor* mdl_trt) {
  if (!mdl_trt->m_buffers.empty()) {
    // Free GPU memory of outputs
    const auto numInputs = mdl_trt->m_inputDims.size();
    for (int32_t outputBinding = numInputs; outputBinding < mdl_trt->m_engine->getNbIOTensors(); ++outputBinding) {
      checkCudaErrorCode(cudaFree(mdl_trt->m_buffers[outputBinding]));
    }
    mdl_trt->m_buffers.clear();
  }
}

void TRTModelManager::getDeviceNames(std::vector<std::string> &deviceNames) {
  int numGPUs;
  cudaGetDeviceCount(&numGPUs);

  for (int device = 0; device < numGPUs; device++) {
    cudaDeviceProp prop;
    cudaGetDeviceProperties(&prop, device);

    deviceNames.push_back(std::string(prop.name));
  }
}

}