#include <sstream>
#include <iomanip>
#include <opencv2/dnn.hpp>

#include "base_util/utils.h"
#include "rec_predictor.h"


namespace ai {


ONNXOcrRecPredictor::ONNXOcrRecPredictor(ONNXPackPredictor* model, ONNXModelManager* manager, LogInfo *lg):
  ONNXPredictor(model, manager, lg) { }


int ONNXOcrRecPredictor::RunDet(stream::ImageBlob* blob, std::vector<BaseInfo*>& det_infos) {
  int ret = 0;

  stream::ImageBlob img_blob(stream::ImageBlobMode_BGR);
  if (!mdl_ox->cfg->transforms->run(blob->img, img_blob, mdl_ox->cfg)) { 
    printf("transforms->run fail \n");
    return model_image_channels_check_error;
  }
  std::cout << img_blob.img.cols << ", " << img_blob.img.rows << ", c:" << img_blob.img.channels() << std::endl;
  // std::cout << "img_blob.scale: " << img_blob.scale << std::endl;
  // img_blob.img.convertTo(img_blob.img, CV_32FC(img_blob.img.channels()));
  // std::cout << img_blob.img.type() << std::endl;
  
  std::vector<Ort::Value> inputTensors;
  std::vector<const char *> input_names;
  for (int idx = 0; idx < mdl_ox->cfg->input_names.size(); idx++) {
    auto& input_name = mdl_ox->cfg->input_names[idx];
    input_names.push_back(input_name.c_str());
    
    if (input_name == "image" || input_name == "images" || input_name == "input") {
      std::vector<int64_t> shape;
      float* pdata = nullptr;
      if (mdl_ox->cfg->data_format == "CHW"){
        pdata = img_blob.im_vec_data.data();
        shape = { 1, mdl_ox->cfg->channels, img_blob.new_im_shape[0], img_blob.new_im_shape[1] }; 
      }
      else { 
        pdata = (float*)img_blob.img.data;
        shape = { 1, img_blob.new_im_shape[0], img_blob.new_im_shape[1], mdl_ox->cfg->channels }; 
      }
      // std::cout << "shape 0: " << shape[0] << ", "<< shape[1] << ", " << shape[2]<< ", " << shape[3] << std::endl;

      // float* pdata_ = (float*)img_blob.img.data;
      // std::cout << "pdata 0:" << pdata[0] << ", " << pdata[1] << ", " << pdata[2]<< ", " << pdata[3]<< ", " << pdata[4]<< ", " << pdata[5]<< ", " << pdata[6]<< ", " << pdata[7]<< ", " << pdata[8]<< ", " << pdata[9] << ", " << pdata[10] << ", " << pdata[11]<< ", " << pdata[12]<< ", " << pdata[13]<< ", " << pdata[14]<< ", " << pdata[15]<< ", " << pdata[16]<< ", " << pdata[17] << std::endl;
      // std::cout << "pdata 200:" << pdata[128000+0] << ", " << pdata[128000+1] << ", " << pdata[128000+2]<< ", " << pdata[128000+3]<< ", " << pdata[128000+4]<< ", " << pdata[128000+5]<< ", " << pdata[128000+6]<< ", " << pdata[128000+7]<< ", " << pdata[128000+8]<< ", " << pdata[128000+9] << ", " << pdata[128000+10] << ", " << pdata[128000+11]<< ", " << pdata[128000+12]<< ", " << pdata[128000+13]<< ", " << pdata[128000+14]<< ", " << pdata[128000+15]<< ", " << pdata[128000+16]<< ", " << pdata[128000+17] << std::endl;
      // std::cout << "pdata 400:" << pdata[256000+0] << ", " << pdata[256000+1] << ", " << pdata[256000+2]<< ", " << pdata[256000+3]<< ", " << pdata[256000+4]<< ", " << pdata[256000+5]<< ", " << pdata[256000+6]<< ", " << pdata[256000+7]<< ", " << pdata[256000+8]<< ", " << pdata[256000+9] << ", " << pdata[256000+10] << ", " << pdata[256000+11]<< ", " << pdata[256000+12]<< ", " << pdata[256000+13]<< ", " << pdata[256000+14]<< ", " << pdata[256000+15]<< ", " << pdata[256000+16]<< ", " << pdata[256000+17] << std::endl;
      inputTensors.push_back(Ort::Value::CreateTensor<float>(mdl_ox->ortMemoryInfo, 
                        pdata, 
                        accumulate(shape.begin(), shape.end(), 1, std::multiplies<int>()), 
                        shape.data(), 
                        shape.size()));
    }
    else if (input_name == "scale_factor") {
      std::vector<int64_t> shape = {1,2};
      std::vector<float> os = {img_blob.scale, img_blob.scale};
      // std::vector<float> os = {0.00001, 0.00001};
      inputTensors.push_back(Ort::Value::CreateTensor<float>(mdl_ox->ortMemoryInfo, 
                        os.data(), 
                        accumulate(shape.begin(), shape.end(), 1, std::multiplies<int>()), 
                        shape.data(), 
                        shape.size()));
    }

  }


  std::vector<const char *> output_names;
  for (auto& str : mdl_ox->cfg->output_names) {output_names.push_back(str.c_str());}

  // double t1 = cv::getTickCount();;
  std::vector<Ort::Value> output_tensor_values = mdl_ox->session->Run(
        Ort::RunOptions{nullptr}, input_names.data(),
        inputTensors.data(), input_names.size(), 
        output_names.data(), output_names.size());

  // double tt = ((double)cv::getTickCount() - t1) / cv::getTickFrequency();
  // std::cout << " time:" << tt/100 << std::endl;
  // std::cout << " output_tensor_values.size():" << output_tensor_values.size() << std::endl;

  outputs.clear();
  outputs_size.clear();
  for (size_t idx = 0; idx < output_tensor_values.size(); idx++) {
    // size_t count = output_tensor_values[idx].IsSparseTensor();
    // std::cout << " IsSparseTensor:" << output_tensor_values[idx].IsSparseTensor() << std::endl;
    // std::cout << " IsTensor:" << output_tensor_values[idx].IsTensor() << std::endl;
    int output_size = 0;
    std::vector<int64_t> shape;
    if (output_tensor_values[idx].IsTensor()) {
      Ort::TensorTypeAndShapeInfo info = output_tensor_values[idx].GetTensorTypeAndShapeInfo();
      
      shape = info.GetShape();
      output_size = accumulate(shape.begin(), shape.end(), 1, std::multiplies<int>());
      // std::cout << "output_tensor_values shape: ";
      // for (int ii = 0; ii < shape.size(); ii++) { std::cout << " " << shape[ii]; }
      // std::cout << std::endl;
    }


    float* cur_data = output_tensor_values[idx].GetTensorMutableData<float>();
    // std::cout << cur_data[0] << "  " << cur_data[1] << "  " << (double)cur_data[2] << "  " << (double)cur_data[3]<< "  " << (double)cur_data[4] << "  " << (double)cur_data[5] << "  " << cur_data[6] << "  " << cur_data[7] << "  " << (double)cur_data[8] << "  " << (double)cur_data[9] << "  " << (double)cur_data[10] << "  " << (double)cur_data[11]<< std::endl;

    outputs.push_back(cur_data);
    outputs_shapes.push_back(shape);
    outputs_size.push_back(output_size);
  }
  if (mdl_ox->cfg->arch == "CRNN.plate") {
    GetConvDetectionResult(img_blob, det_infos);
  } else {
    spdlog::get("logger")->info("ERROR. ONNXDetPredictor::RunDet. mdl_ox->cfg->arch error: {} ", mdl_ox->cfg->arch);
  }

  outputs.clear();
  outputs_size.clear();

  // std::cout << " NMSBoxes:" << det_infos.size() << std::endl;
  return 0;
}

int ONNXOcrRecPredictor::GetConvDetectionResult(stream::ImageBlob& img_blob, std::vector<BaseInfo*>& rec_infos) {
  int ret = 0;

  std::string plate_text = "";
  // outputs[0] 为车牌; outputs[1] 为颜色
  int class_num = (int)mdl_ox->output_tensor_dims[0][2];
  cv::Mat output0 = cv::Mat(cv::Size((int)mdl_ox->output_tensor_dims[0][2], (int)mdl_ox->output_tensor_dims[0][1]), CV_32F, outputs[0]);
  float* pdata = (float*)output0.data;
  int rows = output0.rows;

  int pre_char_idx = 0;
  for (int r = 0; r < rows; ++r) {
    cv::Mat scores(1, class_num, CV_32F, pdata);
    cv::Point classIdPoint;
    double max_class_socre;
    cv::minMaxLoc(scores, 0, &max_class_socre, 0, &classIdPoint);
    if (classIdPoint.x != 0 && pre_char_idx != classIdPoint.x) {
      // max_class_socre = (float)max_class_socre;
      // std::cout << classIdPoint.x << ", " << mdl_ox->cfg->label_list[classIdPoint.x] << ", " << max_class_socre << std::endl;;
      plate_text += mdl_ox->cfg->label_list[classIdPoint.x];
    }
    pre_char_idx = classIdPoint.x;
    pdata += class_num;
  }

  int color_num = (int)mdl_ox->output_tensor_dims[1][1];
  cv::Mat color(1, color_num, CV_32F, outputs[1]);
  cv::Point classIdPoint;
  double max_class_socre;
  cv::minMaxLoc(color, 0, &max_class_socre, 0, &classIdPoint);
  // std::cout << classIdPoint.x << ", " << max_class_socre << std::endl;;
  std::vector<std::string> color_map = {"黑色", "蓝色", "绿色", "白色", "黄色"};
  TextRecInfo* rec_ifo = new TextRecInfo(0.8, plate_text, color_map[classIdPoint.x]);
  rec_infos.push_back(rec_ifo);

  return ret;
}


}  // namespace ai
