#include <malloc.h>
#include <json/json.h>
#include "base_util/utils.h"

#include "rockchip/base/rk_predictor.h"
// #include "cls/cls_predictor.h"
#include "rockchip/cls/cls_video.h"
#include "rockchip/det/det_predictor.h"
#include "rockchip/ocr/ocr_det.h"
#include "rockchip/ocr/ocr_rec.h"
// #include "seg/seg_predictor.h"
#include "rockchip/face/scrfd_predictor.h"
#include "rockchip/face/facefeature_predictor.h"

#include "common/model_config.h"
// #include "ocr/postprocess_op.h"

#include "rockchip/rkmodel_manager.h"

#define CYCLE_SIZE 3

namespace ai {


void dump_tensor_attr(rknn_tensor_attr *attr) {
  printf("  index=%d, name=%s, n_dims=%d, dims=[%d, %d, %d, %d], n_elems=%d, size=%d, fmt=%s, type=%s, qnt_type=%s, "
          "zp=%d, scale=%f\n",
          attr->index, attr->name, attr->n_dims, attr->dims[0], attr->dims[1], attr->dims[2], attr->dims[3],
          attr->n_elems, attr->size, get_format_string(attr->fmt), get_type_string(attr->type),
          get_qnt_type_string(attr->qnt_type), attr->zp, attr->scale);
}

RKModelManager::RKModelManager(LogInfo *log_info):
  ai::ModelManager(log_info) {

}

RKModelManager::~RKModelManager(){

}


int RKModelManager::init_model_imp(std::string model_path, std::string cfg_path, int gpu_idx, ai::InitModelData &imd, BasePredictor** infer, int debug_mode) {
  int ret = 0;
  
  RKPackPredictor* rkp = new RKPackPredictor();
  rkp->cfg = new ModelConfig(log_ifo);
  rkp->cfg->load_config(cfg_path);
  rkp->cfg->gpu_id = gpu_idx;
  rkp->model_id = imd.model_id;
  ret = load_model(model_path, rkp, imd.model_id);
  if (log_ifo->log_level_1) spdlog::get("logger")->info("ModelManager::init_model load_model: {} ", ret);
  if (ret != 0){
    printf("load_model fail ret:%d\n", ret);
    return ret;
  }


  if(rkp->cfg->algorithm == ai::model_cls_video) {
    *infer = new RKClsVideo(rkp, this, log_ifo);
  }
  else if(rkp->cfg->algorithm == ai::model_det) {
    *infer = new RKDetPredictor(rkp, this, log_ifo);
  }
  else if(rkp->cfg->algorithm == ai::model_text_det) {
    *infer = new RKOcrDetPredictor(rkp, this, log_ifo);
  }
  else if(rkp->cfg->algorithm == ai::model_text_rec) {
    *infer = new RKOcrRecPredictor(rkp, this, log_ifo);
  }
  else if(rkp->cfg->algorithm == ai::model_face_det) {
    *infer = new RKScrfdPredictor(rkp, this, log_ifo);
  }
  else if(rkp->cfg->algorithm == ai::model_face_rec) {
    *infer = new RKFacefeaturePredictor(rkp, this, log_ifo);
  }
  else {
    printf("init_model error. algorithm:%s\n", rkp->cfg->algorithm);
    return -1;
  }

  return 0;
}


int RKModelManager::load_model(std::string model_path, RKPackPredictor* rkp,std::string model_id) {
  int ret = 0;

  // 使用加密接口从内存加载模型
  if (!bf::exists(model_path)) {
    // 从内存加载加密模型
    if (get_model == nullptr) {return model_load_so_error;}
    printf("get_model start!\n", ret);
    void* tmpptr = &rkp->ctx; 
    void* tmpp = nullptr; 
    int gpu_id = 0;
    ret = get_model(model_id,&tmpptr,NULL,&tmpp,gpu_id);  //获取模型data
    if(ret != 0){spdlog::get("logger")->info("HBModelManager::load_model. get_model ret: {}, model_id:{}", ret, model_id);}
    if (ret != 0) {
      printf("get_model error ret=%d\n", ret);
      return ret;
    }
    rkp->ctx = *((rknn_context*)tmpptr);
    printf("get_model ok \n");
  }
  else{
    //加载原始未加密模型
    FILE *fp;
    fp = fopen(model_path.c_str(), "rb");
    if (NULL == fp) {
      printf("Open file %s failed.\n", model_path.c_str());
      return NULL;
    }
    fseek(fp, 0, SEEK_END);
    int model_data_size = ftell(fp);
    if (NULL == fp) { return NULL; }
    ret = fseek(fp, 0, SEEK_SET);
    if (ret != 0) {
      printf("blob seek failure.\n");
      return NULL;
    }

    unsigned char *model_data = NULL;
    model_data = (unsigned char *)malloc(model_data_size);
    if (model_data == NULL) {
      printf("buffer malloc failure.\n");
      return NULL;
    }
    ret = fread(model_data, 1, model_data_size, fp);  
    fclose(fp);

    ret = rknn_init(&rkp->ctx, model_data, model_data_size, 0, NULL);
    if (ret < 0) {
      printf("rknn_init error ret=%d\n", ret);
      return ret;
    }
  }

  rknn_input_output_num io_num;
  ret = rknn_query(rkp->ctx, RKNN_QUERY_IN_OUT_NUM, &io_num, sizeof(io_num));
  if (ret < 0) {
    printf("rknn_init error ret=%d\n", ret);
    return ret;
  }
  printf("model input num: %d, output num: %d\n", io_num.n_input, io_num.n_output);
  rkp->input_count = io_num.n_input;
  rkp->output_count = io_num.n_output;

  rknn_tensor_attr input_attrs[rkp->input_count];
  memset(input_attrs, 0, sizeof(input_attrs));
  for (int i = 0; i < rkp->input_count; i++) {
    input_attrs[i].index = i;
    ret = rknn_query(rkp->ctx, RKNN_QUERY_INPUT_ATTR, &(input_attrs[i]), sizeof(rknn_tensor_attr));
    if (ret < 0) {
      printf("rknn_init error ret=%d\n", ret);
      return ret;
    }
    dump_tensor_attr(&(input_attrs[i]));
  }

  printf("input_attrs[0].fmt:%d\n", input_attrs[0].fmt);
  rkp->fmt = input_attrs[0].fmt;
  if (rkp->fmt == RKNN_TENSOR_NCHW) {
    rkp->nchw.push_back(input_attrs[0].dims[0]);
    rkp->nchw.push_back(input_attrs[0].dims[1]);
    rkp->nchw.push_back(input_attrs[0].dims[2]);
    rkp->nchw.push_back(input_attrs[0].dims[3]);
  } 
  else if (rkp->fmt == RKNN_TENSOR_NHWC) {
    rkp->nchw.push_back(input_attrs[0].dims[0]);
    rkp->nchw.push_back(input_attrs[0].dims[3]);
    rkp->nchw.push_back(input_attrs[0].dims[1]);
    rkp->nchw.push_back(input_attrs[0].dims[2]);
  }

  printf("model input batch=%d, channel=%d, height=%d, width=%d\n", rkp->nchw[0], rkp->nchw[1], rkp->nchw[2], rkp->nchw[3]);
  
  rknn_input input;
  input.index = 0;
  input.size = rkp->nchw[0] * rkp->nchw[1] * rkp->nchw[2] * rkp->nchw[3];
  input.pass_through = 0;
  input.type = RKNN_TENSOR_UINT8;
  input.fmt = RKNN_TENSOR_NHWC;
  rkp->inputs.push_back(input);

  rkp->outputs.resize(rkp->output_count);
  for (int i = 0; i < rkp->output_count; i++) { rkp->outputs[i].want_float = 0; } 

  return 0;
}




int RKModelManager::free_model(int md_idx) {
  // std::vector<int> rm_idx;
  // if (md_idx < 0) { for (auto& it : predictors) {rm_idx.push_back(it.first);}}
  // else { rm_idx.push_back(md_idx); }
  
  // for (auto k : rm_idx) {
  //   auto it = predictors.find(k);
  //   if (it != predictors.end()) {
  //     delete it->second->cfg;
  //     for (int i = 0; i < it->second->input_count; i++) { 
  //       hbSysFreeMem(&(it->second->input_tensors[i].sysMem[0])); 
  //     }
  //     for (int idx = 0; idx < CYCLE_SIZE; idx++) { 
  //       for (int i = 0; i < it->second->output_count; i++) { 
  //         hbSysFreeMem(&(it->second->output_tensors[idx][i].sysMem[0])); 
  //       }
  //     }
  //     hbDNNRelease(it->second->dnn_packed);
  //     // free(it->second);
  //     predictors.erase(it);
  //   }
  // }
  return 0;
}



} // namespace ai