#include <malloc.h>

#include "base_predictor.h"
#include "model_manager.h"
#include "base_util/utils.h"
#include "common/model_config.h"
#include "common/base_struct.h"


#define CYCLE_SIZE 3

namespace ai {


ModelManager::ModelManager(LogInfo *log_info):
  log_ifo(log_info), init_ok(-1),
  stop_flag1(common::CameraClosed), stop_flag2(common::CameraClosed) {
  
  in_data_cnt = 0;

#ifdef _WIN32
#else
  std::string auth_path = "/userdata/service/lib/libauth.so";
  if (bf::exists(auth_path)) {
    auth_handle = dlopen(auth_path.c_str(), RTLD_LAZY);
    if(auth_handle) {
      init_param = (INIT_PARAM)dlsym(auth_handle, "init_param");
      get_all_model_info = (GET_ALL_MODEL_INFO)dlsym(auth_handle, "get_all_model_info");
      get_model = (GET_MODEL)dlsym(auth_handle, "get_model");
    } else {
      printf("open failed: %s\n", dlerror());
    }
  }
#endif


}

ModelManager::~ModelManager(){

}

int ModelManager::init_model(std::vector<InitModelData> &init_model_datas, int debug_mode) {
  int ret = 0;

  for (size_t idx = 0; idx < init_model_datas.size(); idx++) {
    auto& imd = init_model_datas[idx];
    std::string cfg_path;
    cfg_path = bf::path(imd.model_dir).append(imd.model_prefix).append(imd.mdoel_cfg_path).string();

    ModelConfig cfg = ModelConfig(log_ifo);
    if ((ret = cfg.load_config(cfg_path)) != 0){
      printf("[ERROR] load_config fail ret:%d\n", ret);
      continue;
    }
    std::string model_path = bf::path(cfg_path).parent_path().append("model.bin").string();

    // 每一个模型都有自己唯一的 model_id 
    in_datas[imd.model_id].resize(MAX_SUPPORT_CHANNEL, nullptr);
    for (int ii = 0; ii < MAX_SUPPORT_CHANNEL; ii++) {
      in_datas[imd.model_id][ii] = new stream::ImageBlob();
    }
    in_loop_idx[imd.model_id] = 0;
    
    if (debug_mode) {cfg.use_gpu=false;}
    if (!cfg.use_gpu) {cfg.gpu_ids = {0};}
    // 根据配置文件来开线程推理
    for (auto gpu_idx : cfg.gpu_ids) {
      BasePredictor* infer = nullptr;

      ret = init_model_imp(model_path, cfg_path, gpu_idx, imd, &infer, debug_mode);
      if (ret != 0) {continue;}
      
      std::thread(&BasePredictor::run_thread, infer).detach();
      infers.push_back(infer);
    }

    // 在初始化时存储模型信息
    total_mdl[imd.model_id] = ModuleInfo(imd.model_index, imd.model_type);
  }
  init_ok = 0;
  // log
  std::string log_str = "total model:\n";
  for (auto& tm : total_mdl) {
    log_str += "\t\t" + tm.first + "\t\t" + std::to_string(tm.second.mdl_idx)+ "\n";
  }
  spdlog::get("logger")->info("ModelManager::init_model: {} ", log_str);

  return 0;
}


int ModelManager::start_run_thread() {

  
  return 0;
}

int ModelManager::stop_run_thread() {
  int ret = 0;
  if (stop_flag1 == common::CameraGrabing) {
    stop_flag1 = common::CameraClosed;
    while (stop_flag1 != common::CameraOpened) {
      spdlog::get("logger")->info("ModelManager::stop_run_thread stop_flag1: {} ", stop_flag1);
      std::this_thread::sleep_for(milliseconds(50));
    }
  }
  if (stop_flag2 == common::CameraGrabing) {
    stop_flag2 = common::CameraClosed;
    while (stop_flag2 != common::CameraOpened) {
      spdlog::get("logger")->info("ModelManager::stop_run_thread stop_flag2: {} ", stop_flag2);
      std::this_thread::sleep_for(milliseconds(50));
    }
  }
  // free
  out_data_mtx.lock();
  for (auto it = out_datas.begin(); it != out_datas.end(); it++) {
    for (auto it2 = it->second.begin(); it2 != it->second.end(); it2++) {
      for (auto& det_info : it2->second.infer_res) {delete det_info;}
    }
    it->second.clear();
  }
  out_data_mtx.unlock();
#ifdef _WIN32
#else
  malloc_trim(0);
#endif

  return 0;
}


int ModelManager::set_data(std::string model_id, int& set_idx, stream::ImageBlob& img) {
  set_idx = -1;

  // 在赋值的时候会标记 rec_flag 为 0 
  in_data_mtx.lock();
  for (int cur_idx = 0; cur_idx < MAX_SUPPORT_CHANNEL; cur_idx++) {
    if (in_datas[model_id][cur_idx]->rec_flag == 0) { continue; }

    img.model_id = model_id;
    *in_datas[model_id][cur_idx] = img;
    in_datas[model_id][cur_idx]->infer_id = set_data_idx;
    set_idx = set_data_idx;
    out_data_mtx.lock();
    out_datas[model_id][set_data_idx] = ai::InferResult();
    out_data_mtx.unlock();
    set_data_idx++;
    break;
  }
  if (set_data_idx > 1e6) {set_data_idx = 0;}
  in_data_mtx.unlock();

  return set_idx < 0 ? -1 : 0;
}

int ModelManager::get_data(std::string model_id, int idx, ai::InferResult& infer_res) {
  out_data_mtx.lock();
  if (out_datas[model_id].count(idx) == 0 || out_datas[model_id][idx].infer_id < 0) {out_data_mtx.unlock(); return -1;}
  infer_res = out_datas[model_id][idx];
  out_datas[model_id].erase(idx);
  out_data_mtx.unlock();
  return 0;
}

int ModelManager::set_data(std::vector<std::string> model_ids, std::vector<int>& idxs, stream::ImageBlob& img) {
  idxs.resize(model_ids.size(), -1);
  in_data_mtx.lock();

  // 先计算当前是否能分配成功
  std::vector<int> set_idxs(model_ids.size(), -1);
  for (int index = 0; index < model_ids.size(); index++) {
    auto& model_id = model_ids[index];

    for (int cur_idx = 0; cur_idx < MAX_SUPPORT_CHANNEL; cur_idx++) {
      if (in_datas[model_id][cur_idx]->rec_flag == 0) { continue; }

      set_idxs[index] = cur_idx;
      break;
    }
  }
  // 不符合条件 直接返回
  if (std::count(set_idxs.begin(), set_idxs.end(), -1) > 0) {in_data_mtx.unlock(); return -1;}

  // 在赋值的时候会标记 rec_flag 为 0 
  for (int index = 0; index < model_ids.size(); index++) {
    auto& model_id = model_ids[index];

    auto cur_idx = set_idxs[index];

    img.model_id = model_id;
    *in_datas[model_id][cur_idx] = img;
    in_datas[model_id][cur_idx]->infer_id = set_data_idx;
    idxs[index] = set_data_idx;
    out_data_mtx.lock();
    out_datas[model_id][set_data_idx] = ai::InferResult();
    out_data_mtx.unlock();
    set_data_idx++;

    if (set_data_idx > 1e6) {set_data_idx = 0;}
  }

  in_data_mtx.unlock();
  return std::count(idxs.begin(), idxs.end(), -1);
}

int ModelManager::get_data(std::vector<std::string> model_ids, std::vector<int> idxs, ai::InferResult& infer_res) {
  out_data_mtx.lock();
  int work_end = 0;
  for (int index = 0; index < model_ids.size(); index++) {
    auto& model_id = model_ids[index];
    if (out_datas[model_id].count(idxs[index]) == 0 || out_datas[model_id][idxs[index]].infer_id < 0) {
      work_end = -1;
    }
  }
  if (work_end != 0) {out_data_mtx.unlock(); return -1;}

  for (int index = 0; index < model_ids.size(); index++) {
    auto& model_id = model_ids[index];
    if (index == 0) {
      infer_res = out_datas[model_id][idxs[index]];
    } else {
      auto& ir = out_datas[model_id][idxs[index]].infer_res;
      infer_res.infer_res.insert(infer_res.infer_res.end(), ir.begin(), ir.end());
    }
    out_datas[model_id].erase(idxs[index]);
  }

  out_data_mtx.unlock();
  return 0;
}



} // namespace ai