#include <sstream>
#include <iomanip>
#include <opencv2/dnn.hpp>

#include "base_util/utils.h"
#include "cls_video.h"

namespace ai {



ONNXClsVideo::ONNXClsVideo(ONNXPackPredictor* model, ONNXModelManager* manager, LogInfo *lg):
  ONNXPredictor(model, manager, lg) { }


int ONNXClsVideo::start_postprocess_thread_imp() {
  int ret = 0;
  std::pair<std::pair<int,int>,int> cur_block;
  // stop_flag2 = common::CameraGrabing;
  // while (stop_flag2 == common::CameraGrabing) {

  //   run_mtx2.lock();
  //   if (post_data2.empty()) {run_mtx2.unlock(); std::this_thread::sleep_for(std::chrono::milliseconds(3));continue;}
  //   cur_block = post_data2.front();
  //   post_data2.pop();
  //   run_mtx2.unlock();

  //   int loop_idx = cur_block.first.second;
  //   std::vector<BaseInfo*> det_infos;
  //   ret = det_hb_->PostProcess(det_infos, cur_block.first.first, this, cur_block.second);  //处理图片
  //   run_mtx3.lock();
  //   out_data3[loop_idx].push(det_infos);  //将处理后的数据返回到det_infos,加入队列中
  //   run_mtx3.unlock();
  // }
  // stop_flag2 = common::CameraOpened;
  return 0;
}

int ONNXClsVideo::RunDet(stream::ImageBlob* blob, std::vector<BaseInfo*>& det_infos) {
  int ret;
  if (blob->imgs.size() != mdl_ox->input_tensor_dims[0][0]) {return model_image_batchs_check_error;}

  std::vector<float> im_vec_data;
  int img_buffer_size = mdl_ox->cfg->input_shape[0] * mdl_ox->cfg->input_shape[1] * mdl_ox->cfg->channels;
  im_vec_data.resize(mdl_ox->input_tensor_dims[0][0] * img_buffer_size);
  std::cout << "im_vec_data:" << im_vec_data.size() << std::endl;
  stream::ImageBlob img_blob(stream::ImageBlobMode_BGR);
  for (int idx = 0; idx < mdl_ox->input_tensor_dims[0][0]; idx++) {
    if (!mdl_ox->cfg->transforms->run(blob->imgs[idx], img_blob, mdl_ox->cfg)) { 
      printf("transforms->run fail \n");
      return model_image_channels_check_error;
    }
    std::memcpy(im_vec_data.data() + img_buffer_size*idx, img_blob.im_vec_data.data(), img_buffer_size* sizeof(float));
  }

  std::vector<Ort::Value> inputTensors;
  std::vector<const char *> input_names;
  for (int idx = 0; idx < mdl_ox->cfg->input_names.size(); idx++) {
    auto& input_name = mdl_ox->cfg->input_names[idx];
    input_names.push_back(input_name.c_str());

    float* pdata = im_vec_data.data();
    std::vector<int64_t> shape = mdl_ox->input_tensor_dims[0];
    inputTensors.push_back(Ort::Value::CreateTensor<float>(mdl_ox->ortMemoryInfo, 
                      pdata, 
                      accumulate(shape.begin(), shape.end(), 1, std::multiplies<int>()), 
                      shape.data(), 
                      shape.size()));

  }

  std::vector<const char *> output_names;
  for (auto& str : mdl_ox->cfg->output_names) {output_names.push_back(str.c_str());}


  std::vector<Ort::Value> output_tensor_values = mdl_ox->session->Run(
        Ort::RunOptions{nullptr}, input_names.data(),
        inputTensors.data(), input_names.size(), 
        output_names.data(), output_names.size());


  // outputs.clear();
  // outputs_size.clear();
  for (size_t idx = 0; idx < output_tensor_values.size(); idx++) {
    // size_t count = output_tensor_values[idx].IsSparseTensor();
    // std::cout << " IsSparseTensor:" << output_tensor_values[idx].IsSparseTensor() << std::endl;
    // std::cout << " IsTensor:" << output_tensor_values[idx].IsTensor() << std::endl;
    int output_size = 0;
    if (output_tensor_values[idx].IsTensor()) {
      Ort::TensorTypeAndShapeInfo info = output_tensor_values[idx].GetTensorTypeAndShapeInfo();
      
      std::vector<int64_t> shape = info.GetShape();
      output_size = accumulate(shape.begin(), shape.end(), 1, std::multiplies<int>());
      // std::cout << "output_tensor_values shape: ";
      // for (int ii = 0; ii < shape.size(); ii++) { std::cout << " " << shape[ii]; }
      // std::cout << std::endl;
    }

    std::vector<std::pair<float, int>> output_tensors(output_size);


    float* cur_data = output_tensor_values[idx].GetTensorMutableData<float>();
    // std::cout << "output_size:  " << output_size << std::endl;
    // std::cout << cur_data[0] << "  " << cur_data[1] << std::endl;

    for(int ii = 0; ii < output_size; ii++) {
      output_tensors[ii] = std::make_pair(cur_data[ii], ii); 
    }


    // softmax
    float total_sum = 0;
    for (auto& x : output_tensors) {x.first = std::exp(x.first);}
    for (auto& x : output_tensors) {total_sum += x.first;}
    for (auto& x : output_tensors) {x.first = x.first / total_sum;}

    // top5
    ai::ClsInfo* cls_ifo = new ClsInfo();
    std::sort(output_tensors.begin(), output_tensors.end(), [](std::pair<float, int>&a, std::pair<float, int>&b){return a.first > b.first;});
    for (int top_idx = 0; top_idx < 5 && top_idx < output_size; top_idx++) {
      cls_ifo->top5_score.push_back(output_tensors[top_idx].first);
      cls_ifo->top5_class.push_back(output_tensors[top_idx].second);
    }
    cls_ifo->class_idx = cls_ifo->top5_class[0];
    cls_ifo->score = cls_ifo->top5_score[0];
    cls_ifo->category = mdl_ox->cfg->label_list[cls_ifo->class_idx];
    det_infos.push_back(cls_ifo);


    // outputs.push_back(cur_data);
    // outputs_size.push_back(output_size);
  }



  return 0;

}




int ONNXClsVideo::PostProcess(std::vector<BaseInfo*>& det_infos, 
                int cur_block,
                ONNXModelManager* mng, 
                int md_idx) {

  return 0;
}



}  // namespace hb
