#include <malloc.h>
#include "onnx/cls/cls_video.h"
#include "onnx/det/det_predictor.h"
#include "onnx/ocr/rec_predictor.h"
// #include "onnx/face/scrfd_predictor.h"
// #include "onnx/face/facefeature_predictor.h"
#include "common/model_config.h"

#include "onnx/onnxmodel_manager.h"

namespace ai {

ONNXModelManager::ONNXModelManager(LogInfo *log_info):
  ai::ModelManager(log_info) {


}
ONNXModelManager::~ONNXModelManager(){

}

int ONNXModelManager::init_model_imp(std::string model_path, std::string cfg_path, int gpu_idx, ai::InitModelData &imd, BasePredictor** infer, int debug_mode) {
  int ret = 0;

  ai::ONNXPackPredictor* oxp = new ai::ONNXPackPredictor();
  oxp->cfg = new ModelConfig(log_ifo);
  oxp->cfg->load_config(cfg_path);
  if (debug_mode) {oxp->cfg->use_gpu=false;}
  oxp->cfg->gpu_id = gpu_idx;
  oxp->model_id = imd.model_id;
  ret = load_model(model_path, oxp, imd.model_id);
  if (log_ifo->log_level_1) spdlog::get("logger")->info("ONNXModelManager::init_model load_model: {} ", ret);
  if (ret != 0){
    printf("load_model fail ret:%d\n");
    return ret;
  }


  if(oxp->cfg->algorithm == ai::model_cls_video) {
    *infer = new ONNXClsVideo(oxp, this, log_ifo);
  }
  else if(oxp->cfg->algorithm == ai::model_det) {
    *infer = new ONNXDetPredictor(oxp, this, log_ifo);
  }
  else if (oxp->cfg->algorithm == ai::model_text_rec) {
    *infer = new ONNXOcrRecPredictor(oxp, this, log_ifo);
  }
  // else if (oxp->cfg->algorithm == ai::model_face_det) {
  //   *infer = new ONNXFaceDetPredictor(oxp, this, log_ifo);
  // }
  // else if (oxp->cfg->algorithm == ai::model_face_rec) {
  //   *infer = new ONNXFaceFeaturePredictor(oxp, this, log_ifo);
  // }
  else {
    printf("init_model error. algorithm:%s\n", oxp->cfg->algorithm);
    return -1;
  }

  return 0;
}

int ONNXModelManager::load_model(std::string model_path, ai::ONNXPackPredictor* oxp, std::string model_id) {
  int ret = 0;
  // 使用加密接口从内存加载模型
  // if (!bf::exists(model_path)) { model_path = imd.model_prefix; }

  // 解决青海平台发布失败cpu问题
  oxp->sessionOptions.SetIntraOpNumThreads(1);
  oxp->sessionOptions.SetInterOpNumThreads(1);
  oxp->sessionOptions.SetGraphOptimizationLevel(GraphOptimizationLevel::ORT_ENABLE_EXTENDED);
      if (log_ifo->log_level_1) spdlog::get("logger")->info("ONNXModelManager::load_model oxp->cfg->use_gpu: {} ", oxp->cfg->use_gpu);
  if (oxp->cfg->use_gpu) {
    OrtCUDAProviderOptions cuda_options{};
    cuda_options.device_id = oxp->cfg->gpu_id;
    oxp->sessionOptions.AppendExecutionProvider_CUDA(cuda_options);
  }

      if (log_ifo->log_level_1) spdlog::get("logger")->info("ONNXModelManager::load_model model_path: {} ", model_path);
  // // 使用加密接口从内存加载模型
  if (!bf::exists(model_path)) {
    // 从内存加载加密模型
    if (get_model == nullptr) {return model_load_so_error;}
    void * tmpptr = oxp->session;
    void* tmpp = nullptr; 
    ret = get_model(model_id,&tmpptr,&oxp->sessionOptions,&tmpp,oxp->cfg->gpu_id);  //获取模型data
    if(ret != 0){spdlog::get("logger")->info("HBModelManager::load_model. get_model ret: {}, model_id:{}", ret, model_id);}
    if (ret != 0) {
      printf("get_model error ret=%d\n", ret);
      return ret;
    }
    oxp->session = (Ort::Session*)tmpptr;
    printf("get_model ok!!\n");
  }
  else{
    // 加载原始未加密模型
#ifdef _WIN32
    std::wstring widestr = std::wstring(model_path.begin(), model_path.end());
    const wchar_t* widestrc = widestr.c_str();
    oxp->session = new Ort::Session(oxp->env, widestrc, oxp->sessionOptions);
#else
    oxp->session = new Ort::Session(oxp->env, model_path.c_str(), oxp->sessionOptions);
#endif
  }
  
  Ort::AllocatorWithDefaultOptions allocator;
      if (log_ifo->log_level_1) spdlog::get("logger")->info("ONNXModelManager::load_model oxp->session: {} ", oxp->session == nullptr);

  oxp->input_count = oxp->session->GetInputCount();
      if (log_ifo->log_level_1) spdlog::get("logger")->info("ONNXModelManager::load_model oxp->input_count: {} ", oxp->input_count);

  oxp->input_tensor_sizes.resize(oxp->input_count);
  oxp->input_tensor_dims.resize(oxp->input_count);

  for (size_t idx = 0; idx < oxp->input_count; idx++) {
    Ort::AllocatedStringPtr inputNamePtr = oxp->session->GetInputNameAllocated(idx, allocator);
    const char* inputName = inputNamePtr.get();
    std::cout << "inputName: " << inputName << std::endl;

    Ort::TypeInfo input_type = oxp->session->GetInputTypeInfo(idx);
    auto input_tensor_ifo = input_type.GetTensorTypeAndShapeInfo();
	  ONNXTensorElementDataType inputNodeDataType = input_tensor_ifo.GetElementType();
    std::cout << "inputNodeDataType: " << inputNodeDataType << std::endl;
    std::vector<int64_t> shapes = input_tensor_ifo.GetShape();
    std::cout << "Input shapes: " << shapes[0]<< " " << shapes[1]<< " " << shapes[2]<< " " << shapes[3] << std::endl;
    // TODO: 统一
    // 其它模型
    // int64_t cur_tensor_size = accumulate(shapes.begin()+1, shapes.end(), 1, std::multiplies<int64_t>());
    // 视频分类模型
    int64_t cur_tensor_size = accumulate(shapes.begin(), shapes.end(), 1, std::multiplies<int64_t>());
    std::cout << "cur_tensor_size: " << cur_tensor_size << std::endl;
    oxp->input_tensor_sizes[idx] = cur_tensor_size;
    oxp->input_tensor_dims[idx] = shapes;
  }

  oxp->output_count = oxp->session->GetOutputCount();
  oxp->output_tensor_dims.resize(oxp->output_count);
  oxp->output_tensor_values.resize(oxp->output_count);

  for (size_t idx = 0; idx < oxp->output_count; idx++) {
    Ort::AllocatedStringPtr outputNamePtr = oxp->session->GetOutputNameAllocated(idx, allocator);
    const char* outputName = outputNamePtr.get();
    std::cout << "outputName: " << outputName << std::endl;

    Ort::TypeInfo output_type = oxp->session->GetOutputTypeInfo(idx);
    auto output_tensor_ifo = output_type.GetTensorTypeAndShapeInfo();
	  ONNXTensorElementDataType outputNodeDataType = output_tensor_ifo.GetElementType();
    std::cout << "outputNodeDataType: " << outputNodeDataType << std::endl;
    std::vector<int64_t> shapes = output_tensor_ifo.GetShape();
    std::cout << "Output shapes: " << shapes[0]<< " " << shapes[1]<< " " << shapes[2]<< " " << shapes[3] << std::endl;

    int64_t cur_tensor_size = accumulate(shapes.begin()+1, shapes.end(), 1, std::multiplies<int64_t>());
    std::cout << "cur_tensor_size: " << cur_tensor_size << std::endl;
    oxp->output_tensor_values[idx].resize(cur_tensor_size);
    oxp->output_tensor_dims[idx] = shapes;
  }          

  return 0;
}



int ONNXModelManager::free_model(int md_idx) {
  for (auto& infer : infers) {
    ONNXPredictor* cur_infer = (ONNXPredictor*)infer;
      // std::cout << "1.1.1"  << std::endl;
    cur_infer->stop();
      // std::cout << "1.1.2"  << std::endl;
    delete cur_infer->mdl_ox->cfg;
      // std::cout << "1.1.3"  << std::endl;
    cur_infer->mdl_ox->sessionOptions.release();
      // std::cout << "1.1.4"  << std::endl;
    cur_infer->mdl_ox->session->release();
      // std::cout << "1.1.5"  << std::endl;
    delete cur_infer->mdl_ox;
    delete cur_infer;
      // std::cout << "1.1.6"  << std::endl;
  }
  infers.clear();

  return 0;
}

}