#include "tensorrt/trtmodel_manager.h"
#include "tensorrt/det/det_predictor.h"
#include "common/model_config.h"
#include "common/visualize.h"
#include "stream/osd_text.h"
#include "infer_demo.h"

using namespace httplib;

std::mutex cvt_lock;
std::vector<std::pair<std::string, std::string>> cvt_in_mdls;
std::vector<std::pair<std::string, std::string>> cvt_out_mdls;
std::string SAVE_DIR = "./data";



int infer_img_ocr_rec(ai::TRTModelManager* model, std::string model_id, std::string img_path) {
  int ret = 0;

  int frame_cnt = 0;
  int rec_idx = 0;
  stream::ImageBlob blob(stream::ImageBlobMode_BGR);
  blob.model_id = model_id;

  cv::Mat bgr_img = cv::imread(img_path);
  blob.img = bgr_img;

  frame_cnt++;
  model->set_data(blob.model_id, rec_idx, blob);

  ai::InferResult infer_res;
  while ((ret = model->get_data(blob.model_id, rec_idx, infer_res)) != 0) {
    std::this_thread::sleep_for(milliseconds(3));
  }

  for (auto& det_info : infer_res.infer_res) {
    ai::DetInfo* di = (ai::DetInfo*)det_info;
    cv::rectangle(blob.img, di->det_box, cv::Scalar(0, 0, 255), 2);
    cv::putText(blob.img, di->category + ":" + std::to_string(di->score), cv::Point(di->det_box.x, di->det_box.y - 20), cv::FONT_HERSHEY_SIMPLEX, 1, cv::Scalar(0, 0, 255), 2);
    delete det_info;
  }
  cv::imwrite(img_path+".rec.jpg", blob.img);
  printf("frame_cnt:%d\n", frame_cnt);
  return 0;
}

int infer_img_ocr_det_and_rec(ai::TRTModelManager* model, stream::CVOsdText* osd, std::vector<ai::InitModelData>& init_model_datas, std::string img_path) {
  int ret = 0;

  int frame_cnt = 0;
  // int plate_det_idx = 0;
  // int plate_rec_idx = 1;
  stream::ImageBlob blob_det(stream::ImageBlobMode_BGR);
  stream::ImageBlob blob_rec(stream::ImageBlobMode_BGR);
  stream::ImageBlob draw_data(stream::ImageBlobMode_BGR);
  // 先调用检测模型
  blob_det.model_id = init_model_datas[0].model_id;
  blob_rec.model_id = init_model_datas[1].model_id;

  cv::Mat bgr_img = cv::imread(img_path);
  blob_det.img = bgr_img;

  frame_cnt++;
  int infer_id = -1;
  model->set_data(blob_det.model_id, infer_id, blob_det);
  if (infer_id < 0) {return -1;;}

  ai::InferResult infer_res;
  while ((ret = model->get_data(blob_det.model_id, infer_id, infer_res)) != 0) {
    std::this_thread::sleep_for(milliseconds(3));
  }

  for (auto& det_info : infer_res.infer_res) {
    ai::TextDetInfo* di = (ai::TextDetInfo*)det_info;
    cv::Mat crop_image;
    ai::GetRotateCropImage(bgr_img, di->four_points, crop_image);
    cv::imwrite(img_path+".crop.jpg", crop_image);


    blob_rec.img = crop_image;
    int infer_rec_id = -1;
    model->set_data(blob_rec.model_id, infer_rec_id, blob_rec);
    if (infer_rec_id < 0) {return -1;}

    ai::InferResult plate_infer_res;
    while ((ret = model->get_data(blob_rec.model_id, infer_rec_id, plate_infer_res)) != 0) {
      std::this_thread::sleep_for(milliseconds(3));
    }
    
    if (plate_infer_res.infer_res.size() > 0) {
      ai::TextRecInfo* plate_rec_ifo = (ai::TextRecInfo*)plate_infer_res.infer_res[0];

      std::vector<stream::OSDDrawInfo> osdd_ifos;
      osdd_ifos.push_back(stream::OSDDrawInfo(di->det_box.x, di->det_box.y, di->det_box.width, di->det_box.height,di->score));
      osdd_ifos.back().texts.push_back("N"+std::to_string(di->score));
      osdd_ifos.back().texts.push_back("N"+plate_rec_ifo->text+":"+std::to_string(plate_rec_ifo->score));
      osdd_ifos.back().texts.push_back("N"+plate_rec_ifo->color);

      draw_data.img = bgr_img;
      osd->draw_osd(draw_data, osdd_ifos);
      cv::imwrite(img_path + ".draw.jpg", bgr_img);
      delete plate_infer_res.infer_res[0];
    }
    
    // cv::rectangle(blob.img, di->det_box, cv::Scalar(0, 0, 255), 2);
    // cv::putText(blob.img, di->category + ":" + std::to_string(di->score), cv::Point(di->det_box.x, di->det_box.y - 20), cv::FONT_HERSHEY_SIMPLEX, 1, cv::Scalar(0, 0, 255), 2);
    delete det_info;
  }
  // cv::imwrite(img_path+".rec.jpg", blob.img);
  printf("frame_cnt:%d\n", frame_cnt);
  return 0;
}


int http_addModel(std::string body, std::string& ret_str) {
  int ret = 0;

  Json::Value json_data;
  ret = parse_json(body, json_data, false);
  if (ret != 0) { return ret; }

  std::string model_id = json_data.get("modelId", Json::Value("")).asString();
  std::string model_url = json_data.get("modelUrl", Json::Value("")).asString();
  if (model_id.empty() || model_url.empty()) { return HTTP_KEY_ERROR;}

  std::string model_path = bf::path(SAVE_DIR).append(model_url).string();
  if (!bf::exists(model_path)) {return file_not_exist_error;}

  cvt_lock.lock();
  cvt_in_mdls.push_back(std::make_pair(model_id, model_path));
  cvt_lock.unlock();

  Json::Value ret_root;
  ret_root["code"] = 0;
  ret_root["msg"] = "模型添加成功，正在转换中...";
  ret_str = ret_root.toStyledString();
  return 0;
}

int http_queryModel(std::string body, std::string& ret_str) {
  int ret = 0;

  Json::Value json_data;
  ret = parse_json(body, json_data, false);
  if (ret != 0) { return ret; }

  std::string model_id = json_data.get("modelId", Json::Value("")).asString();
  if (model_id.empty()) { return HTTP_KEY_ERROR;}

  std::string model_url = "";
  cvt_lock.lock();
  for (auto it = cvt_out_mdls.begin(); it != cvt_out_mdls.end();) {
    if (it->first != model_id) {it++; continue;}
    model_url = it->second;
    it = cvt_out_mdls.erase(it);
    break;
  }
  cvt_lock.unlock();

  Json::Value ret_root;
  if (model_url.empty()){
    ret_root["code"] = 2350;
    ret_root["msg"] = "模型正在转换中...";
  } else {
    ret_root["code"] = 0;
    ret_root["modelId"] = model_id;
    ret_root["modelUrl"] = model_url;
    ret_root["msg"] = "模型转换成功";
  }
  ret_str = ret_root.toStyledString();
  return 0;
}


int run_cvt_model_thread(ai::TRTModelManager** model) {
  int ret = 0;
  int64_t cur_time=0;

  while (true) {
    std::this_thread::sleep_for(milliseconds(20));
    cur_time = duration_cast<milliseconds>(system_clock::now().time_since_epoch()).count();

    std::pair<std::string, std::string> in_mdl;
    cvt_lock.lock();
    if (cvt_in_mdls.empty()) {cvt_lock.unlock();continue;}
    in_mdl = cvt_in_mdls.front();
    cvt_in_mdls.erase(cvt_in_mdls.begin());
    cvt_lock.unlock();
    


    std::string onnx_model_path = in_mdl.second;
    std::string trt_model_path = bf::path(onnx_model_path).parent_path().append(bf::path(onnx_model_path).filename().stem().string()+".trt").string();
    // ret = (*model)->build_network(0, onnx_model_path, trt_model_path);
    std::cout << "build_network ret: " << ret << std::endl;

    if (ret == 0) {
      boost::algorithm::replace_all(trt_model_path, SAVE_DIR, "");
    } else {
      trt_model_path = "";
    }

    cvt_lock.lock();
    cvt_out_mdls.push_back(std::make_pair(in_mdl.first, trt_model_path));
    cvt_lock.unlock();

  }

  return 0;
}

int run_http_listen(ai::TRTModelManager** model) {
  int ret = 0;
  std::mutex save_lock;

  std::thread(run_cvt_model_thread, model).detach();


  // 启动http服务端
  Server svr;
  ret = svr.set_mount_point("/data", "./data");
  svr.Get("/hi", [](const Request& req, Response& res) {
    res.set_content("c++ service for trt model convert start ok!", "text/plain");
  });


  svr.Post("/api/trt/uploadModel", [&](const auto& req, auto& res) {
    std::string return_msg;
    auto size = req.files.size();
    auto ret = req.has_file("file");
    if (!ret) { return_error(ret, "字段错误: file.", return_msg); }
    else {
      const auto& file = req.get_file_value("file");
      save_lock.lock();
      int64_t cur_time = duration_cast<milliseconds>(system_clock::now().time_since_epoch()).count();
      std::string write_name = std::to_string(cur_time) + "." + randstr(8, 0) + "." + file.filename;
      save_lock.unlock();
      std::string cur_save_dir = bf::path(SAVE_DIR).append("upload_data").append(time_int64_to_string(cur_time, "%Y-%m-%d")).string();
      if (!bf::exists(cur_save_dir)) {bf::create_directories(cur_save_dir);}
      std::string write_path = bf::path(cur_save_dir).append(write_name).string();
      
      // file.filename; file.content_type; file.content;
      {
        std::ofstream ofs(write_path, std::ios::binary);
        ofs << file.content;
      }
      std::string url = boost::algorithm::replace_all_copy(write_path, SAVE_DIR, "");
      Json::Value root;
      root["code"] = 200;
      root["msg"] = "ok";
      Json::Value item;
      item["url"] = url;
      item["name"] = url;
      root["data"].append(item);
      return_msg = root.toStyledString();
    }
    res.set_content(return_msg, "application/json");
  });

  svr.Post("/api/trt/addModel", [&](const Request& req, Response& res) {
    std::string return_msg;
    int ret = 0;
    if (0 != (ret = http_addModel(req.body, return_msg))) {
      return_error(ret, "error", return_msg);
    }
    res.set_content(return_msg, "application/json");
  });

  svr.Post("/api/trt/queryModel", [&](const Request& req, Response& res) {
    std::string return_msg;
    int ret = 0;
    if (0 != (ret = http_queryModel(req.body, return_msg))) {
      return_error(ret, "error", return_msg);
    }
    res.set_content(return_msg, "application/json");
  });

  svr.listen("0.0.0.0", 9993);
  return 0;
}

int main(int argc, char *argv[]){
  std::string model_dir = argv[1];

  int ret = 0;
  LogInfo *log_info = new LogInfo(1, "log", "log");

  // 替代 opencv putText 函数 画中文
  stream::CVStreamOsdInfo osd_ifo;
  osd_ifo.boxColorOK = cv::Scalar(255,0,0);
  osd_ifo.boxColorNG = cv::Scalar(0,0,255);
  osd_ifo.fontHeight = 48;
  stream::CVOsdText* osd = new stream::CVOsdText(&osd_ifo, log_info);
  if ((ret = osd->init_params()) != 0) {
    std::cout << "osd->init_params error: " << ret << std::endl;
    return ret;
  }

  ai::TRTModelManager* model = new ai::TRTModelManager(log_info);

  // // 测试加密模型时需要
  // //初始化授权
  // ret = model->init_param(log_info, "Rockchip");
  // if(ret != 0){
  //   std::cout << "ERROR. init_param ret:" << ret << std::endl; 
  //   return ret;
  // }
  // //获取所支持的算法标签
  // std::vector<au::authModelLabel> auth_mdls;
  // ret = model->get_all_model_info(auth_mdls);


  if (model_dir == "server") {
    return run_http_listen(&model);
  }


  std::vector<ai::InitModelData> init_model_datas;
  // init_model_datas.push_back(ai::InitModelData(model_dir,"cls/firesmoke", "sdh67fgws", "firesmoke", ai::model_cls_video, 0, "cfg.yaml"));
  // init_model_datas.push_back(ai::InitModelData(model_dir,"det/common5", "sdh67fgws", "common5", ai::model_det, 0, "cfg.yaml"));
  // init_model_datas.push_back(ai::InitModelData(model_dir,"det/car", "sdh67fgws", "car", ai::model_det, 0, "cfg.yaml"));
  // init_model_datas.push_back(ai::InitModelData(model_dir,"det/cow", "gwssdh67f", cow_model_name, ai::model_det, 0, "cfg.yaml"));
  // init_model_datas.push_back(ai::InitModelData(model_dir, "det/plate", "7fgwssdh6", "plate", ai::model_det, 0, "cfg.yaml"));
  // init_model_datas.push_back(ai::InitModelData(model_dir, "ocr/plate/rec", "ssdh67fgw", "plate/rec", ai::model_text_rec, 1, "cfg.yaml"));
  init_model_datas.push_back(ai::InitModelData(model_dir,"det/common5", "gwssdh67f", "common5", ai::model_det, 1, "cfg.yaml"));

  ret = model->init_model(init_model_datas, 0);
  if (ret != 0) { 
    std::cout << "init_model ret: " << ret << std::endl;
    return ret; 
  }

  std::this_thread::sleep_for(std::chrono::milliseconds(3000));
  

  std::string file_path = argv[2];
  // // 性能测试 支持多模型
  // int max_size = 12;
  // performance_test(model, file_path, init_model_datas, max_size);

  // // 两个模型一起测试的时候要错开，第一部推理不能同时推理，不然不出现显存不足的错误
  // std::this_thread::sleep_for(std::chrono::milliseconds(3000));
  // std::thread(set_data, model, cow_model_name, max_size).detach();
  // get_data(model, cow_model_name, max_size);


  // 图片 视频 识别
  // infer_det_mp4(model, osd, init_model_datas, file_path);
  // infer_det_mp4_plate(model, osd, file_path);
  // infer_det_img_plate(model, osd, file_path);
  infer_det_img(model, osd, init_model_datas, file_path);
  // infer_cls_video(model, osd, init_model_datas, file_path);
  // infer_det_img_to_via_file(model, osd, init_model_datas, file_path);
  // infer_face_img_det_rec(model, osd, file_path);
  // 多模型同时推理
  // infer_det_imgmulti(model, osd, init_model_datas, file_path);


  // // 启动http服务端
  // Server svr;
  // svr.Get("/hi", [](const Request& req, Response& res) {
  //   res.set_content("c++ service for zqdl smartbox start ok!", "text/plain");
  // });
  // svr.Post("/api/rec", [&](const Request& req, Response& res) {
  //   std::string return_msg;
  //   int ret = 0;
  //   if (0 != (ret = infer_det_img_api(model, init_model_datas, req.body, return_msg))) {
  //     return_error(ret, MyError::GetErrorString(ret), return_msg);
  //   }
  //   res.set_content(return_msg, "application/json");
  // });
  // svr.listen("0.0.0.0", 9918);

  return 0;
}
