
#include <json/json.h>
#include "base_util/utils.h"

// #include "ocr/ocr_det.h"
// #include "ocr/ocr_rec.h"
// #include "cls/cls_predictor.h"
#include "horizon/cls/cls_video.h"
#include "horizon/det/det_predictor.h"
// #include "horizon/det/det_yolov8_predictor.h"
// #include "seg/seg_predictor.h"


#include "common/model_config.h"
// #include "ocr/postprocess_op.h"

#include "horizon/hbmodel_manager.h"

#define CYCLE_SIZE 3

namespace ai {

HBModelManager::HBModelManager(LogInfo *log_info):
  ai::ModelManager(log_info) {

}

HBModelManager::~HBModelManager(){

}

int HBModelManager::init_model_imp(std::string model_path, std::string cfg_path, int gpu_idx, ai::InitModelData &imd, BasePredictor** infer, int debug_mode) {
  int ret = 0;

  ai::HBPackPredictor* hbp = new ai::HBPackPredictor();
  hbp->cfg = new ModelConfig(log_ifo);
  // boost::filesystem::path path(cfg_path);
  // std::string filename = path.filename().string();
  // std::cout<<"filename:"<<filename<<std::endl;
  // if(filename == "xxx"){return 0;}
  hbp->cfg->load_config(cfg_path);
  hbp->cfg->gpu_id = gpu_idx;
  hbp->model_id = imd.model_id;
  ret = load_model(model_path, hbp, imd.model_id);
  if (log_ifo->log_level_1) spdlog::get("logger")->info("HBModelManager::init_model load_model: {} ", ret);
  if (ret != 0){
    printf("load_model fail ret:%d\n");
    return ret;
  }


  if(hbp->cfg->algorithm == ai::model_cls_video) {
    *infer = new HBClsVideo(hbp, this, log_ifo);
  }
  else if(hbp->cfg->algorithm == ai::model_det) {
    *infer = new HBDetPredictor(hbp, this, log_ifo);
  }
  else {
    printf("init_model error. algorithm:%s\n", hbp->cfg->algorithm);
    return -1;
  }

  return 0;
}

int HBModelManager::load_model(std::string model_path, ai::HBPackPredictor* hbp, std::string model_id) {
  int ret = 0;

  if (bf::exists(model_path)) {
    const char *ccp = model_path.c_str();
    ret = hbDNNInitializeFromFiles(&hbp->dnn_packed, &ccp, 1);
  } else {
    // 从内存加载加密模型
    if (get_model == nullptr) {return model_load_so_error;}
    void* tmpp = nullptr; 
    ret = get_model(model_id,&hbp->dnn_packed,NULL,&tmpp,hbp->cfg->gpu_id);  //获取模型data
    if(ret != 0){spdlog::get("logger")->info("HBModelManager::load_model. get_model ret: {}, model_id:{}", ret, model_id);}
    if (ret != 0) {
      printf("get_model error ret=%d\n", ret);
      return ret;
    }
  }
  if (log_ifo->log_level_1) spdlog::get("logger")->info("HBModelManager::load_model. hbDNNInitializeFromDDR ret: {}", ret);
  if (ret != 0) {return model_model_init_error; } // "hbDNNInitializeFromFiles fail"; 

  const char **model_name_list;
  int32_t model_count = 0;
  ret = hbDNNGetModelNameList(&model_name_list, &model_count, hbp->dnn_packed);
  if (ret != 0) {return model_get_model_name_error; } // , "hbDNNGetModelNameList fail")
  if (log_ifo->log_level_1) spdlog::get("logger")->info("Model info:\nmodel_name: {}", model_name_list[0]);

  ret = hbDNNGetModelHandle(&hbp->dnn_handle, hbp->dnn_packed, model_name_list[0]);
  if (ret != 0) {return model_get_model_handle_error; } // , "hbDNNGetModelHandle fail")


  ret = hbDNNGetInputCount(&hbp->input_count, hbp->dnn_handle);
  if (ret != 0) {spdlog::get("logger")->info("hbDNNGetInputCount failed", model_name_list[0]);}
  ret = hbDNNGetOutputCount(&hbp->output_count, hbp->dnn_handle);
  if (ret != 0) {spdlog::get("logger")->info("hbDNNGetOutputCount failed");}


  // input size alloc
  if (log_ifo->log_level_1) spdlog::get("logger")->info("Input count: {}", hbp->input_count);
  hbp->input_tensors.resize(hbp->input_count);
  for (int i = 0; i < hbp->input_count; i++) {
    auto& t = hbp->input_tensors[i];
    ret = hbDNNGetInputTensorProperties(&t.properties, hbp->dnn_handle, i);
    if (ret != 0) {spdlog::get("logger")->info("hbDNNGetInputTensorProperties failed");}
    // printf("t.properties.tensorType:%d t.properties.tensorLayout:%d\n",t.properties.tensorType,t.properties.tensorLayout);
    // printf("validShape.dimensionSize [%d %d %d %d]  alignedByteSize:%d \n",t.properties.validShape.dimensionSize[0],t.properties.validShape.dimensionSize[1],t.properties.validShape.dimensionSize[2],t.properties.validShape.dimensionSize[3],t.properties.alignedByteSize);
    // std::cout << "t.properties.alignedByteSize:" << t.properties.alignedByteSize << std::endl;
    // int32_t yuv_size1 = t.properties.validShape.dimensionSize[1] * t.properties.validShape.dimensionSize[2] * t.properties.validShape.dimensionSize[3];
    // std::cout << "yuv_size1:" << yuv_size1 << std::endl;
    // hbSysAllocCachedMem(t.sysMem, yuv_size1);
    int32_t yuv_size = t.properties.alignedByteSize;
    hbSysAllocCachedMem(t.sysMem, yuv_size);
    // t.properties.alignedShape = t.properties.validShape;      

    std::string str = util::Format("input[{0}]: tensorLayout: {1} tensorType: {2} validShape:(", i, t.properties.tensorLayout, t.properties.tensorType) ;
    for (int j = 0; j < t.properties.validShape.numDimensions; j++) str += util::Format("{0}, ", t.properties.validShape.dimensionSize[j]);
    str += "), alignedShape:(";
    for (int j = 0; j < t.properties.alignedShape.numDimensions; j++) str += util::Format("{0}, ", t.properties.alignedShape.dimensionSize[j]);
    str += ")";
    if (log_ifo->log_level_1) spdlog::get("logger")->info("{}", str);
  }

  // output size alloc
  if (log_ifo->log_level_1) spdlog::get("logger")->info("Output count: {}", hbp->output_count);
  hbp->output_tensors.resize(hbp->output_count);
  for (int i = 0; i < hbp->output_count; i++) {
    auto& t = hbp->output_tensors[i];
    ret = hbDNNGetOutputTensorProperties(&t.properties, hbp->dnn_handle, i);
    if (ret != 0) {spdlog::get("logger")->info("hbDNNGetOutputTensorProperties failed");}

    // 获取模型输出尺寸
    int32_t aligned_size = 4;
    for (int32_t j = 0; j < t.properties.alignedShape.numDimensions; j++) {
      aligned_size *= t.properties.alignedShape.dimensionSize[j];
    }
    ret = hbSysAllocCachedMem(&t.sysMem[0], aligned_size);
    if (ret != 0) {
      spdlog::get("logger")->info("[BPU ERR] {}:hbSysAllocCachedMem failed!Error code:{}\n", __func__, ret);
      return ret;
    }

    std::string str = util::Format("Output[{0}]: tensorLayout: {1} tensorType: {2} validShape:(", i, t.properties.tensorLayout, t.properties.tensorType) ;
    for (int j = 0; j < t.properties.validShape.numDimensions; j++) str += util::Format("{0}, ", t.properties.validShape.dimensionSize[j]);
    str += "), alignedShape:(";
    for (int j = 0; j < t.properties.alignedShape.numDimensions; j++) str += util::Format("{0}, ", t.properties.alignedShape.dimensionSize[j]);
    str += ")";
    if (log_ifo->log_level_1) spdlog::get("logger")->info("{}", str);
  }
  return 0;
}

int HBModelManager::free_model(int md_idx) {
  std::vector<int> rm_idx;
  // if (md_idx < 0) { for (auto& it : predictors) {rm_idx.push_back(it.first);}}
  // else { rm_idx.push_back(md_idx); }
  
  // for (auto k : rm_idx) {
  //   auto it = predictors.find(k);
  //   if (it != predictors.end()) {
  //     delete it->second->cfg;
  //     for (int i = 0; i < it->second->input_count; i++) { 
  //       hbSysFreeMem(&(it->second->input_tensors[i].sysMem[0])); 
  //     }
  //     for (int idx = 0; idx < CYCLE_SIZE; idx++) { 
  //       for (int i = 0; i < it->second->output_count; i++) { 
  //         hbSysFreeMem(&(it->second->output_tensors[idx][i].sysMem[0])); 
  //       }
  //     }
  //     hbDNNRelease(it->second->dnn_packed);
  //     // free(it->second);
  //     predictors.erase(it);
  //   }
  // }
  return 0;
}



} // namespace hb