#include <sstream>
#include <iomanip>
#include <opencv2/dnn.hpp>

#include "base_util/utils.h"
#include "scrfd_predictor.h"


namespace ai {


ONNXFaceDetPredictor::ONNXFaceDetPredictor(ONNXPackPredictor* model, ONNXModelManager* manager, LogInfo *lg):
  ONNXPredictor(model, manager, lg) { }


ONNXFaceDetPredictor::~ONNXFaceDetPredictor(){

}


int ONNXFaceDetPredictor::RunDet(stream::ImageBlob* blob, std::vector<BaseInfo*>& det_infos) {
  int ret = 0;
  
  stream::ImageBlob img_blob(stream::ImageBlobMode_BGR);
/*  
  if (!mdl_ox->cfg->transforms->run(blob->img, img_blob, mdl_ox->cfg)) { 
    printf("transforms->run fail \n");
    return model_image_channels_check_error;
  }
  // std::cout << img_blob.img.cols << ", " << img_blob.img.rows << ", c:" << img_blob.img.channels() << std::endl;
  // std::cout << "img_blob.scale: " << img_blob.scale << std::endl;
  // img_blob.img.convertTo(img_blob.img, CV_32FC(img_blob.img.channels()));
  // std::cout << img_blob.img.type() << std::endl;
*/
  cv::Mat padimg;
  SCRFDScaleParams scale_params;
  resize_unscale(blob->img, padimg, mdl_ox->cfg->input_shape[1], mdl_ox->cfg->input_shape[0], scale_params);// h w
  cv::Mat blob_in = cv::dnn::blobFromImage(padimg, 1.0/128 , cv::Size(mdl_ox->cfg->input_shape[1],mdl_ox->cfg->input_shape[0]), cv::Scalar(127.5, 127.5, 127.5), true, false);
  std::vector<Ort::Value> inputTensors;
  std::vector<const char *> input_names;
  for (int idx = 0; idx < mdl_ox->cfg->input_names.size(); idx++) {
    auto& input_name = mdl_ox->cfg->input_names[idx];
    input_names.push_back(input_name.c_str());
    
    if (input_name == "image" || input_name == "images" || input_name == "input" || input_name == "input.1") {
      std::vector<int64_t> shape;
      float* pdata = nullptr;
      if (mdl_ox->cfg->data_format == "CHW"){
        // pdata = img_blob.im_vec_data.data();
        // shape = { 1, mdl_ox->cfg->channels, img_blob.new_im_shape[0], img_blob.new_im_shape[1] }; 
        pdata = (float*)blob_in.data;
        shape = { 1, mdl_ox->cfg->channels, padimg.rows, padimg.cols };
      }
      else { 
        pdata = (float*)img_blob.img.data;
        shape = { 1, img_blob.new_im_shape[0], img_blob.new_im_shape[1], mdl_ox->cfg->channels }; 
      }
      // std::cout << "shape 0: " << shape[0] << ", "<< shape[1] << ", " << shape[2]<< ", " << shape[3] << std::endl;

      // float* pdata_ = (float*)img_blob.img.data;
      // std::cout << "pdata 0:" << pdata[0] << ", " << pdata[1] << ", " << pdata[2]<< ", " << pdata[3]<< ", " << pdata[4]<< ", " << pdata[5]<< ", " << pdata[6]<< ", " << pdata[7]<< ", " << pdata[8]<< ", " << pdata[9] << ", " << pdata[10] << ", " << pdata[11]<< ", " << pdata[12]<< ", " << pdata[13]<< ", " << pdata[14]<< ", " << pdata[15]<< ", " << pdata[16]<< ", " << pdata[17] << std::endl;
      // std::cout << "pdata 200:" << pdata[128000+0] << ", " << pdata[128000+1] << ", " << pdata[128000+2]<< ", " << pdata[128000+3]<< ", " << pdata[128000+4]<< ", " << pdata[128000+5]<< ", " << pdata[128000+6]<< ", " << pdata[128000+7]<< ", " << pdata[128000+8]<< ", " << pdata[128000+9] << ", " << pdata[128000+10] << ", " << pdata[128000+11]<< ", " << pdata[128000+12]<< ", " << pdata[128000+13]<< ", " << pdata[128000+14]<< ", " << pdata[128000+15]<< ", " << pdata[128000+16]<< ", " << pdata[128000+17] << std::endl;
      // std::cout << "pdata 400:" << pdata[256000+0] << ", " << pdata[256000+1] << ", " << pdata[256000+2]<< ", " << pdata[256000+3]<< ", " << pdata[256000+4]<< ", " << pdata[256000+5]<< ", " << pdata[256000+6]<< ", " << pdata[256000+7]<< ", " << pdata[256000+8]<< ", " << pdata[256000+9] << ", " << pdata[256000+10] << ", " << pdata[256000+11]<< ", " << pdata[256000+12]<< ", " << pdata[256000+13]<< ", " << pdata[256000+14]<< ", " << pdata[256000+15]<< ", " << pdata[256000+16]<< ", " << pdata[256000+17] << std::endl;
      inputTensors.push_back(Ort::Value::CreateTensor<float>(mdl_ox->ortMemoryInfo, 
                        pdata, 
                        accumulate(shape.begin(), shape.end(), 1, std::multiplies<int>()), 
                        shape.data(), 
                        shape.size()));
    }
    else if (input_name == "scale_factor") {
      std::vector<int64_t> shape = {1,2};
      std::vector<float> os = {img_blob.scale, img_blob.scale};
      // std::vector<float> os = {0.00001, 0.00001};
      inputTensors.push_back(Ort::Value::CreateTensor<float>(mdl_ox->ortMemoryInfo, 
                        os.data(), 
                        accumulate(shape.begin(), shape.end(), 1, std::multiplies<int>()), 
                        shape.data(), 
                        shape.size()));
    }

  }


  std::vector<const char *> output_names;
  for (auto& str : mdl_ox->cfg->output_names) {output_names.push_back(str.c_str());}

  // double t1 = cv::getTickCount();;
  std::vector<Ort::Value> output_tensor_values = mdl_ox->session->Run(
        Ort::RunOptions{nullptr}, input_names.data(),
        inputTensors.data(), input_names.size(), 
        output_names.data(), output_names.size());

  // double tt = ((double)cv::getTickCount() - t1) / cv::getTickFrequency();
  // std::cout << " time:" << tt/100 << std::endl;
  // std::cout << " output_tensor_values.size():" << output_tensor_values.size() << std::endl;

  // outputs.clear();
  float* output_tensors[output_tensor_values.size()];
  outputs_size.clear();
  for (size_t idx = 0; idx < output_tensor_values.size(); idx++) {
    // size_t count = output_tensor_values[idx].IsSparseTensor();
    // std::cout << " IsSparseTensor:" << output_tensor_values[idx].IsSparseTensor() << std::endl;
    // std::cout << " IsTensor:" << output_tensor_values[idx].IsTensor() << std::endl;
    int output_size = 0;
    if (output_tensor_values[idx].IsTensor()) {
      Ort::TensorTypeAndShapeInfo info = output_tensor_values[idx].GetTensorTypeAndShapeInfo();
      
      std::vector<int64_t> shape = info.GetShape();
      output_size = accumulate(shape.begin(), shape.end(), 1, std::multiplies<int>());
      // std::cout << "output_tensor_values shape: ";
      // for (int ii = 0; ii < shape.size(); ii++) { std::cout << " " << shape[ii]; }
      // std::cout << std::endl;
    }


    float* cur_data = output_tensor_values[idx].GetTensorMutableData<float>();
    // std::cout << cur_data[0] << "  " << cur_data[1] << "  " << (double)cur_data[2] << "  " << (double)cur_data[3]<< "  " << (double)cur_data[4] << "  " << (double)cur_data[5] << "  " << cur_data[6] << "  " << cur_data[7] << "  " << (double)cur_data[8] << "  " << (double)cur_data[9] << "  " << (double)cur_data[10] << "  " << (double)cur_data[11]<< std::endl;

    // outputs.push_back(cur_data);
    output_tensors[idx]=cur_data;
    outputs_size.push_back(output_size);
  }
  
  if (mdl_ox->cfg->arch == "YOLOV5.paddle") {
    GetConvDetectionResult_paddle(img_blob, det_infos);
  }
  else if (mdl_ox->cfg->arch == "YOLOV5.plate") {
    GetConvDetectionResult_plate(img_blob, det_infos);
  }
  else if (mdl_ox->cfg->arch == "YOLOV8.out1") {
    GetConvDetectionResult(img_blob, det_infos);
  } 
  else if (mdl_ox->cfg->arch == "SCRFD.face") {
    std::vector<BoxfWithLandmarks> detected_boxes;
    GetConvDetectionResult_face(output_tensors, scale_params, mdl_ox->cfg->draw_threshold,  mdl_ox->cfg->nms_threshold, blob->img.rows, blob->img.cols, det_infos, 400, detected_boxes);
  }  
  else {
    spdlog::get("logger")->info("ERROR. ONNXFaceDetPredictor::RunDet. mdl_ox->cfg->arch error: {} ", mdl_ox->cfg->arch);
  }

  outputs.clear();
  outputs_size.clear();

  // std::cout << " NMSBoxes:" << det_infos.size() << std::endl;
  return 0;
}

int ONNXFaceDetPredictor::GetConvDetectionResult(stream::ImageBlob& img_blob, std::vector<BaseInfo*>& det_infos) {
  int ret = 0;

  std::vector<cv::Rect> boxs;
  std::vector<float> confidences;
  std::vector<int> labels;


  int class_num = mdl_ox->cfg->label_list.size();
  int net_width = class_num+4;
  
  for (int idx = 0; idx < outputs.size(); idx++) {
	  cv::Mat output0 = cv::Mat(cv::Size((int)mdl_ox->output_tensor_dims[idx][2], (int)mdl_ox->output_tensor_dims[idx][1]), CV_32F, outputs[idx]).t();  //[bs,116,8400]=>[bs,8400,116]
    float* pdata = (float*)output0.data;
    int rows = output0.rows;

    for (int r = 0; r < rows; ++r) {    //stride
      cv::Mat scores(1, class_num, CV_32F, pdata + 4);
      cv::Point classIdPoint;
      double max_class_socre;
      cv::minMaxLoc(scores, 0, &max_class_socre, 0, &classIdPoint);
      max_class_socre = (float)max_class_socre;
      if (max_class_socre >= mdl_ox->cfg->draw_threshold) {

        //rect [x,y,w,h]
        float x = pdata[0] / img_blob.scale;
        float y = pdata[1] / img_blob.scale;
        int w = pdata[2] / img_blob.scale;
        int h = pdata[3] / img_blob.scale;
        // std::cout << "score:" << max_class_socre << " idx:" << classIdPoint.x << " x:" << x << " y: " << y << " w: " << w << " h:" << h << std::endl;

        int left = std::max(int(x - 0.5 * w + 0.5), 0);
        int top = std::max(int(y - 0.5 * h + 0.5), 0);
        if (left + w >= img_blob.ori_im_shape[1]) { w = img_blob.ori_im_shape[1] - left - 1;}
        if (top + h >= img_blob.ori_im_shape[0]) { h = img_blob.ori_im_shape[0] - top - 1;}
        labels.push_back(classIdPoint.x);
        confidences.push_back(max_class_socre);
        boxs.push_back(cv::Rect(left, top, w, h));
      }
      pdata += net_width;
    }
  }

  std::vector<int> indices;
  cv::dnn::NMSBoxes(boxs, confidences, mdl_ox->cfg->draw_threshold, mdl_ox->cfg->nms_threshold, indices, 100);
  for (auto& idx : indices) {
    DetInfo* di = new DetInfo(confidences[idx], labels[idx], boxs[idx], mdl_ox->cfg->label_list[labels[idx]]);
    det_infos.emplace_back(di);
  }

  return ret;
}

int ONNXFaceDetPredictor::GetConvDetectionResult_plate(stream::ImageBlob& img_blob, std::vector<BaseInfo*>& det_infos) {
  int ret = 0;

  std::vector<cv::Rect> boxs;
  std::vector<std::vector<cv::Point>> land_marks;
  std::vector<float> confidences;
  std::vector<int> labels;
  std::vector<int> double_plates;


  int class_num = mdl_ox->cfg->label_list.size();
  int net_width = 15;
  
  for (int idx = 0; idx < outputs.size(); idx++) {
	  cv::Mat output0 = cv::Mat(cv::Size((int)mdl_ox->output_tensor_dims[idx][2], (int)mdl_ox->output_tensor_dims[idx][1]), CV_32F, outputs[idx]);  
    float* pdata = (float*)output0.data;
    int rows = output0.rows;
    // std::cout << "shape: " << output0.rows << "," << output0.cols << std::endl;

    for (int r = 0; r < rows; ++r) {
      float max_class_socre = 0;
      int double_plate = 0;
      if (pdata[14] > pdata[13]) {
        double_plate = 1;
        max_class_socre = pdata[14] * pdata[4];
      } else {
        max_class_socre = pdata[13] * pdata[4];
      }
      // cv::Mat scores(1, class_num, CV_32F, pdata + 4);
      // cv::Point classIdPoint;
      // double max_class_socre;
      // cv::minMaxLoc(scores, 0, &max_class_socre, 0, &classIdPoint);
      // max_class_socre = (float)max_class_socre;
      if (max_class_socre >= mdl_ox->cfg->draw_threshold) {

        //rect [x,y,w,h]
        float w = pdata[2] / img_blob.scale;
        float h = pdata[3] / img_blob.scale;
        float x = pdata[0] / img_blob.scale;
        float y = pdata[1] / img_blob.scale;

        // std::cout << "score:" << max_class_socre << " idx:" << classIdPoint.x << " x:" << x << " y: " << y << " w: " << w << " h:" << h << std::endl;
        // std::cout << pdata[0] << "  " << pdata[1] << "  " << pdata[2] << "  " << pdata[3]<< "  " << pdata[4] << "  " << pdata[5] << "  " << pdata[6] << "  " << pdata[7] << "  " << pdata[8] << "  " << pdata[9] << "  " << pdata[10] << "  " << pdata[11]<< "  " << pdata[12]<< "  " << pdata[13]<< "  " << pdata[14]<< std::endl;
        std::vector<cv::Point> land_mark;
        land_mark.push_back(cv::Point(pdata[5] / img_blob.scale, pdata[6] / img_blob.scale));
        land_mark.push_back(cv::Point(pdata[7] / img_blob.scale, pdata[8] / img_blob.scale));
        land_mark.push_back(cv::Point(pdata[9] / img_blob.scale, pdata[10] / img_blob.scale));
        land_mark.push_back(cv::Point(pdata[11] / img_blob.scale, pdata[12] / img_blob.scale));


        int left = std::max(int(x - 0.5 * w + 0.5), 0);
        int top = std::max(int(y - 0.5 * h + 0.5), 0);
        if (left + w >= img_blob.ori_im_shape[1]) { w = img_blob.ori_im_shape[1] - left - 1;}
        if (top + h >= img_blob.ori_im_shape[0]) { h = img_blob.ori_im_shape[0] - top - 1;}
        double_plates.push_back(double_plate);
        land_marks.push_back(land_mark);
        labels.push_back(0);
        confidences.push_back(max_class_socre);
        boxs.push_back(cv::Rect(left, top, w, h));
      }
      pdata += net_width;
    }
  }

  std::vector<int> indices;
  cv::dnn::NMSBoxes(boxs, confidences, mdl_ox->cfg->draw_threshold, mdl_ox->cfg->nms_threshold, indices, 100);
  for (auto& idx : indices) {
    TextDetInfo* di = new TextDetInfo(land_marks[idx], confidences[idx]);
    di->det_box = boxs[idx];
    di->double_plate = double_plates[idx];
    det_infos.emplace_back(di);
  }

  return ret;
}

void ONNXFaceDetPredictor::resize_unscale(const cv::Mat &mat, cv::Mat &mat_rs,int target_height, int target_width,SCRFDScaleParams &scale_params)
{

  if (mat.empty()) return;
  int img_height = static_cast<int>(mat.rows);
  int img_width = static_cast<int>(mat.cols);

  mat_rs = cv::Mat(target_height, target_width, CV_8UC3,
                   cv::Scalar(0, 0, 0));
  // scale ratio (new / old) new_shape(h,w)
  float w_r = (float) target_width / (float) img_width;
  float h_r = (float) target_height / (float) img_height;
  float r = std::min(w_r, h_r);
  // compute padding
  int new_unpad_w = static_cast<int>((float) img_width * r); // floor
  int new_unpad_h = static_cast<int>((float) img_height * r); // floor
  int pad_w = target_width - new_unpad_w; // >=0
  int pad_h = target_height - new_unpad_h; // >=0

  int dw = pad_w / 2;
  int dh = pad_h / 2;

  // resize with unscaling
  cv::Mat new_unpad_mat;
  // cv::Mat new_unpad_mat = mat.clone(); // may not need clone.
  cv::resize(mat, new_unpad_mat, cv::Size(new_unpad_w, new_unpad_h));
  new_unpad_mat.copyTo(mat_rs(cv::Rect(dw, dh, new_unpad_w, new_unpad_h)));

  // record scale params.
  scale_params.ratio = r;
  scale_params.dw = dw;
  scale_params.dh = dh;
  scale_params.flag = true;


}

//SCRFD人脸检测、关键点检测后处理算法具体函数实现
void ONNXFaceDetPredictor::generate_points(const int target_height, const int target_width)
{
  if (center_points_is_update) return;
  // 8, 16, 32
  for (auto stride : feat_stride_fpn)
  {
    unsigned int num_grid_w = target_width / stride;
    unsigned int num_grid_h = target_height / stride;
    // y
    for (unsigned int i = 0; i < num_grid_h; ++i)
    {
      // x
      for (unsigned int j = 0; j < num_grid_w; ++j)
      {
        // num_anchors, col major
        for (unsigned int k = 0; k < num_anchors; ++k)
        {
          SCRFDPoint point;
          point.cx = (float) j;
          point.cy = (float) i;
          point.stride = (float) stride;
          center_points[stride].push_back(point);
        }

      }
    }
  }

  center_points_is_update = true;
}

void ONNXFaceDetPredictor::generate_bboxes_kps_single_stride(const SCRFDScaleParams &scale_params,
                                           float* score_ptr,
                                           float* bbox_ptr,
                                           float* kps_ptr,
                                           unsigned int stride,
                                           float score_threshold,
                                           float img_height,
                                           float img_width,
                                           std::vector<BoxfWithLandmarks> &bbox_kps_collection)
{

  unsigned int nms_pre_ = (stride / 8) * nms_pre; // 1 * 1000,2*1000,...
  nms_pre_ = nms_pre_ >= nms_pre ? nms_pre_ : nms_pre;
  // printf("[liufeng debug] nms_pre_: %d\n",nms_pre_);

  unsigned int num_points;
  if(stride == 8)
  {
    num_points = output_num_points[0];
  }
  else if (stride == 16)
  {
    num_points = output_num_points[1];
  }
  else
  {
    num_points = output_num_points[2];
  }
  // printf("[liufeng debug] num_points:%d score_threshold:%.4f\n",num_points,score_threshold);

  float ratio = scale_params.ratio;
  int dw = scale_params.dw;
  int dh = scale_params.dh;
  // printf("[liufeng debug] ratio:%.4f dw:%d dh:%d\n",ratio,dw,dh);
  unsigned int count = 0;
  auto &stride_points = center_points[stride];

  for (unsigned int i = 0; i < num_points; ++i)
  {
    float cls_conf = score_ptr[i];
    // printf("[liufeng debug] cls_conf:%.4f \n",cls_conf);
    if (cls_conf < score_threshold) continue; // filter
    auto &point = stride_points.at(i);
    const float cx = point.cx; // cx
    const float cy = point.cy; // cy
    const float s = point.stride; // stride

    // bbox
    const float *offsets = bbox_ptr + i * 4;
    float l = offsets[0]; // left
    float t = offsets[1]; // top
    float r = offsets[2]; // right
    float b = offsets[3]; // bottom

    BoxfWithLandmarks box_kps;
    float x1 = ((cx - l) * s - (float) dw) / ratio;  // cx - l x1
    float y1 = ((cy - t) * s - (float) dh) / ratio;  // cy - t y1
    float x2 = ((cx + r) * s - (float) dw) / ratio;  // cx + r x2
    float y2 = ((cy + b) * s - (float) dh) / ratio;  // cy + b y2
    box_kps.box.x1 = std::max(0.f, x1);
    box_kps.box.y1 = std::max(0.f, y1);
    box_kps.box.x2 = std::min(img_width - 1.f, x2);
    box_kps.box.y2 = std::min(img_height - 1.f, y2);
    box_kps.box.score = cls_conf;
    box_kps.box.label = 1;
    box_kps.box.label_text = "face";
    box_kps.box.flag = true;

    // landmarks
    const float *kps_offsets = kps_ptr + i * 10;
    for (unsigned int j = 0; j < 10; j += 2)
    {
      cv::Point2f kps;
      float kps_l = kps_offsets[j];
      float kps_t = kps_offsets[j + 1];
      float kps_x = ((cx + kps_l) * s - (float) dw) / ratio;  // cx + l x
      float kps_y = ((cy + kps_t) * s - (float) dh) / ratio;  // cy + t y
      kps.x = std::min(std::max(0.f, kps_x), img_width - 1.f);
      kps.y = std::min(std::max(0.f, kps_y), img_height - 1.f);
      box_kps.landmarks.points.push_back(kps);
    }
    box_kps.landmarks.flag = true;
    box_kps.flag = true;

    bbox_kps_collection.push_back(box_kps);

    count += 1; // limit boxes for nms.
    if (count > max_nms)
      break;
  }  
  // printf("[liufeng debug] bbox_kps_collection.size: %d\n",bbox_kps_collection.size());
  if (bbox_kps_collection.size() > nms_pre_)
  {
    std::sort(
        bbox_kps_collection.begin(), bbox_kps_collection.end(),
        [](const BoxfWithLandmarks &a, const BoxfWithLandmarks &b)
        { return a.box.score > b.box.score; }
    ); // sort inplace
    // trunc
    bbox_kps_collection.resize(nms_pre_);
  }

}

void ONNXFaceDetPredictor::generate_bboxes_kps(const SCRFDScaleParams &scale_params,
                             std::vector<BoxfWithLandmarks> &bbox_kps_collection,
                             float** output_tensors,
                             float score_threshold, float img_height,
                             float img_width,float input_height,float input_width)
{
  // score_8,score_16,score_32,bbox_8,bbox_16,bbox_32
  float* score_8 = output_tensors[0];  // e.g [1,12800,1]
  float* score_16 = output_tensors[1]; // e.g [1,3200,1]
  float* score_32 = output_tensors[2]; // e.g [1,800,1]
  float* bbox_8 = output_tensors[3];   // e.g [1,12800,4]
  float* bbox_16 = output_tensors[4];  // e.g [1,3200,4]
  float* bbox_32 = output_tensors[5];  // e.g [1,800,4]

  // generate center points.
  generate_points(input_height, input_width);

  bbox_kps_collection.clear();

  if (use_kps)
  {
    float* kps_8 = output_tensors[6];   // e.g [1,12800,10]
    float* kps_16 = output_tensors[7];  // e.g [1,3200,10]
    float* kps_32 = output_tensors[8];  // e.g [1,800,10]

    // level 8 & 16 & 32 with kps
    generate_bboxes_kps_single_stride(scale_params, score_8, bbox_8, kps_8, 8, score_threshold,
                                            img_height, img_width, bbox_kps_collection);
    // printf("[liufeng debug] level 8: bbox_kps_collection.size:%d\n",bbox_kps_collection.size());
    generate_bboxes_kps_single_stride(scale_params, score_16, bbox_16, kps_16, 16, score_threshold,
                                            img_height, img_width, bbox_kps_collection);
    generate_bboxes_kps_single_stride(scale_params, score_32, bbox_32, kps_32, 32, score_threshold,
                                            img_height, img_width, bbox_kps_collection);
  } // no kps
  else
  {
    // // level 8 & 16 & 32
    // this->generate_bboxes_single_stride(scale_params, score_8, bbox_8, 8, score_threshold,
    //                                     img_height, img_width, bbox_kps_collection);
    // this->generate_bboxes_single_stride(scale_params, score_16, bbox_16, 16, score_threshold,
    //                                     img_height, img_width, bbox_kps_collection);
    // this->generate_bboxes_single_stride(scale_params, score_32, bbox_32, 32, score_threshold,
    //                                     img_height, img_width, bbox_kps_collection);
  }

}                            

float ONNXFaceDetPredictor::iou_of(Boxf tbox,Boxf tb)
{
    
    float inner_x1 = tb.x1 > tbox.x1 ? tb.x1 : tbox.x1;
    float inner_y1 = tb.y1 > tbox.y1 ? tb.y1 : tbox.y1;
    float inner_x2 = tb.x2 < tbox.x2 ? tb.x2 : tbox.x2;
    float inner_y2 = tb.y2 < tbox.y2 ? tb.y2 : tbox.y2;
    float inner_h = inner_y2 - inner_y1 + static_cast<float>(1.0f);
    float inner_w = inner_x2 - inner_x1 + static_cast<float>(1.0f);
    if (inner_h <= static_cast<float>(0.f) || inner_w <= static_cast<float>(0.f))
        return std::numeric_limits<float>::min();
    float inner_area = inner_h * inner_w;
    float tb_area=std::abs<float>(tb.y2 - tb.y1 + 1)*(tb.x2 - tb.x1 + 1);
    float tbox_area=std::abs<float>(tbox.y2 - tbox.y1 + 1)*(tbox.x2 - tbox.x1 + 1);
    return static_cast<float>(inner_area / (tb_area + tbox_area - inner_area));
}

void ONNXFaceDetPredictor::nms_bboxes_kps(std::vector<BoxfWithLandmarks> &input,
                           std::vector<BoxfWithLandmarks> &output,
                           float iou_threshold, unsigned int topk)
{
  if (input.empty()) return;
  std::sort(
      input.begin(), input.end(),
      [](const BoxfWithLandmarks &a, const BoxfWithLandmarks &b)
      { return a.box.score > b.box.score; }
  );
  const unsigned int box_num = input.size();
  std::vector<int> merged(box_num, 0);

  unsigned int count = 0;
  for (unsigned int i = 0; i < box_num; ++i)
  {
    if (merged[i]) continue;
    std::vector<BoxfWithLandmarks> buf;

    buf.push_back(input[i]);
    merged[i] = 1;

    for (unsigned int j = i + 1; j < box_num; ++j)
    {
      if (merged[j]) continue;

      float iou = static_cast<float>(iou_of(input[i].box,input[j].box));

      if (iou > iou_threshold)
      {
        merged[j] = 1;
        buf.push_back(input[j]);
      }

    }
    output.push_back(buf[0]);

    // keep top k
    count += 1;
    if (count >= topk)
      break;
  }
}
//SCRFD人脸检测、关键点检测后处理算法具体函数实现

int ONNXFaceDetPredictor::GetConvDetectionResult_face(float** output_tensors, SCRFDScaleParams scale_params, float score_threshold, float iou_threshold, int img_height, int img_width, std::vector<BaseInfo*>& det_infos, unsigned int topk, std::vector<BoxfWithLandmarks>& detected_boxes) {
  int ret = 0;

  std::vector<BoxfWithLandmarks> bbox_kps_collection;
  generate_bboxes_kps(scale_params, bbox_kps_collection, output_tensors,
                      score_threshold, img_height, img_width, mdl_ox->cfg->input_shape[1], mdl_ox->cfg->input_shape[0]);
  // printf("[liufeng debug+] bbox_kps_collection.size:%d\n",bbox_kps_collection.size());

  nms_bboxes_kps(bbox_kps_collection, detected_boxes, iou_threshold, topk);

  // printf("[liufeng debug] detected_boxes.size:%d\n",detected_boxes.size());


  std::vector<int> indices;
  for (int idx=0; idx < detected_boxes.size(); idx++) {indices.push_back(idx);}

  for (auto& idx : indices) {
    //box左上角点 //box右下角点
    DetInfo* di = new DetInfo(detected_boxes[idx].box.score, detected_boxes[idx].box.label, cv::Rect(cv::Point2f(detected_boxes[idx].box.x1,detected_boxes[idx].box.y1), cv::Point2f(detected_boxes[idx].box.x2,detected_boxes[idx].box.y2)));
    di->category = "face";
    di->points = detected_boxes[idx].landmarks.points;//5个关键点
    det_infos.emplace_back(di);
  }


  return ret;
}

int ONNXFaceDetPredictor::GetConvDetectionResult_paddle(stream::ImageBlob& img_blob, std::vector<BaseInfo*>& det_infos) {
  int ret = 0;

  std::vector<cv::Rect> boxs;
  std::vector<float> confidences;
  std::vector<int> labels;


  int class_num = mdl_ox->cfg->label_list.size();
  int step = 6;


  for (int idx = 0; idx < outputs.size(); idx++) {
    cv::Mat output0 = cv::Mat(cv::Size(100, 6), CV_32F, outputs[idx]);  //[bs,116,8400]=>[bs,8400,116]

    int box_cnt = outputs_size[idx] / step;
    float* pdata = (float*)output0.data;

    for (int box_idx = 0; box_idx < box_cnt; box_idx++) {
      int class_idx = pdata[box_idx * step];
      float score = pdata[box_idx * step + 1];
      float x = pdata[box_idx * step + 2];
      float y = pdata[box_idx * step + 3];
      float w = pdata[box_idx * step + 4];
      float h = pdata[box_idx * step + 5];

      std::cout << class_idx << ", " << score << ", " << x << ", " << y << ", " << w << ", " << h << std::endl;
    }

    // for (int r = 0; r < rows; ++r) {    //stride
    //   cv::Mat scores(1, class_num, CV_32F, pdata + 4);
    //   cv::Point classIdPoint;
    //   double max_class_socre;
    //   cv::minMaxLoc(scores, 0, &max_class_socre, 0, &classIdPoint);
    //   max_class_socre = (float)max_class_socre;
    //   if (max_class_socre >= mdl_ox->cfg->draw_threshold) {

    //     //rect [x,y,w,h]
    //     float x = pdata[0] / img_blob.scale;
    //     float y = pdata[1] / img_blob.scale;
    //     int w = pdata[2] / img_blob.scale;
    //     int h = pdata[3] / img_blob.scale;
    //     // std::cout << "score:" << max_class_socre << " idx:" << classIdPoint.x << " x:" << x << " y: " << y << " w: " << w << " h:" << h << std::endl;

    //     int left = std::max(int(x - 0.5 * w + 0.5), 0);
    //     int top = std::max(int(y - 0.5 * h + 0.5), 0);
    //     if (left + w >= img_blob.ori_im_shape[1]) { w = img_blob.ori_im_shape[1] - left - 1;}
    //     if (top + h >= img_blob.ori_im_shape[0]) { h = img_blob.ori_im_shape[0] - top - 1;}
    //     labels.push_back(classIdPoint.x);
    //     confidences.push_back(max_class_socre);
    //     boxs.push_back(cv::Rect(left, top, w, h));
    //   }
    //   pdata += net_width;
    // }
  }

  // std::vector<int> indices;
  // cv::dnn::NMSBoxes(boxs, confidences, mdl_ox->cfg->draw_threshold, mdl_ox->cfg->nms_threshold, indices, 100);
  // for (auto& idx : indices) {
  //   DetInfo* di = new DetInfo(confidences[idx], labels[idx], boxs[idx], mdl_ox->cfg->label_list[labels[idx]]);
  //   det_infos.emplace_back(di);
  // }

  return ret;
}
}
