#include <sstream>
#include <iomanip>
#include <opencv2/dnn.hpp>

#include "base_util/utils.h"
#include "cls_video.h"

namespace ai {


static void dump_tensor_attr(rknn_tensor_attr *attr) {
  printf("  index=%d, name=%s, n_dims=%d, dims=[%d, %d, %d, %d], n_elems=%d, size=%d, fmt=%s, type=%s, qnt_type=%s, "
          "zp=%d, scale=%f\n",
          attr->index, attr->name, attr->n_dims, attr->dims[0], attr->dims[1], attr->dims[2], attr->dims[3],
          attr->n_elems, attr->size, get_format_string(attr->fmt), get_type_string(attr->type),
          get_qnt_type_string(attr->qnt_type), attr->zp, attr->scale);
}

static float DeQnt2F32(int8_t qnt, int zp, float scale) {
  return ((float)qnt - (float)zp) * scale;
}

RKClsVideo::RKClsVideo(RKPackPredictor* model, RKModelManager* manager, LogInfo *lg):
  RKPredictor(model, manager, lg) { }


int RKClsVideo::start_postprocess_thread_imp() {
  int ret = 0;
  std::pair<std::pair<int,int>,int> cur_block;
  // stop_flag2 = common::CameraGrabing;
  // while (stop_flag2 == common::CameraGrabing) {

  //   run_mtx2.lock();
  //   if (post_data2.empty()) {run_mtx2.unlock(); std::this_thread::sleep_for(std::chrono::milliseconds(3));continue;}
  //   cur_block = post_data2.front();
  //   post_data2.pop();
  //   run_mtx2.unlock();

  //   int loop_idx = cur_block.first.second;
  //   std::vector<BaseInfo*> det_infos;
  //   ret = det_hb_->PostProcess(det_infos, cur_block.first.first, this, cur_block.second);  //处理图片
  //   run_mtx3.lock();
  //   out_data3[loop_idx].push(det_infos);  //将处理后的数据返回到det_infos,加入队列中
  //   run_mtx3.unlock();
  // }
  // stop_flag2 = common::CameraOpened;
  return 0;
}

int RKClsVideo::RunDet(stream::ImageBlob* blob, std::vector<BaseInfo*>& det_infos) {
  int ret;
  if (blob->imgs.size() != mdl_rk->nchw[0]) {return model_image_batchs_check_error;}

  std::vector<float> im_vec_data;
  int img_buffer_size = mdl_rk->cfg->input_shape[0] * mdl_rk->cfg->input_shape[1] * mdl_rk->cfg->channels;
  im_vec_data.resize(mdl_rk->nchw[0] * img_buffer_size);
  stream::ImageBlob img_blob(stream::ImageBlobMode_BGR);
  for (int idx = 0; idx < mdl_rk->nchw[0]; idx++) {
    if (!mdl_rk->cfg->transforms->run(blob->imgs[idx], img_blob, mdl_rk->cfg)) { 
      printf("transforms->run fail \n");
      return model_image_channels_check_error;
    }
    std::memcpy(im_vec_data.data() + img_buffer_size*idx, img_blob.img.data, img_buffer_size* sizeof(float));
  }

  // std::cout << "img_blob.img w:" << img_blob.img.cols << " h:" << img_blob.img.rows << std::endl;
  rknn_tensor_attr output_attrs[mdl_rk->output_count];
  memset(output_attrs, 0, sizeof(output_attrs));
  for (int i = 0; i < mdl_rk->output_count; i++) {
    output_attrs[i].index = i;
    ret = rknn_query(mdl_rk->ctx, RKNN_QUERY_OUTPUT_ATTR, &(output_attrs[i]), sizeof(rknn_tensor_attr));
    // dump_tensor_attr(&(output_attrs[i]));
  }

  mdl_rk->inputs[0].size = mdl_rk->nchw[0] * img_buffer_size * sizeof(float);
  mdl_rk->inputs[0].type = RKNN_TENSOR_FLOAT32;
  mdl_rk->inputs[0].fmt = RKNN_TENSOR_NHWC;
  mdl_rk->inputs[0].buf = (void *)(im_vec_data.data());
  rknn_inputs_set(mdl_rk->ctx, mdl_rk->input_count, &mdl_rk->inputs[0]);

  rknn_core_mask core_mask = RKNN_NPU_CORE_0;
  if(mdl_rk->cfg->gpu_id==1) { core_mask = RKNN_NPU_CORE_1; }
  if(mdl_rk->cfg->gpu_id==2) { core_mask = RKNN_NPU_CORE_2; }
  ret = rknn_set_core_mask(mdl_rk->ctx, core_mask);
  ret = rknn_run(mdl_rk->ctx, NULL);
  if (ret != 0) {printf("rknn_run fail %d", ret); return -1;}
  ret = rknn_outputs_get(mdl_rk->ctx, mdl_rk->output_count, &mdl_rk->outputs[0], NULL);
  if (ret != 0) {printf("rknn_outputs_get fail %d", ret); return -1;}
 
  // 后处理部分
  std::vector<float> out_scales;
  std::vector<int32_t> out_zps;
  for (int i = 0; i < mdl_rk->output_count; ++i) {
    out_scales.push_back(output_attrs[i].scale);
    out_zps.push_back(output_attrs[i].zp);
  }
  
  for (int j = 0; j < mdl_rk->output_count; j++) {
    int output_tensor_size = 1;
    for (int dim_idx = 0; dim_idx < output_attrs[j].n_dims; dim_idx++) {
      output_tensor_size *= output_attrs[j].dims[dim_idx];
    }
    // printf("output_tensor_size: %d\n", output_tensor_size);
    std::vector<std::pair<float, int>> output_tensors(output_tensor_size);
    int8_t *pblob = (int8_t *)mdl_rk->outputs[j].buf;
    for(int i=0;i<output_attrs[j].n_elems;i++) {
      output_tensors[i] = std::make_pair(DeQnt2F32(pblob[i], out_zps[j], out_scales[j]), i); 
    }
    // std::cout << output_tensors[0].first << ", " << output_tensors[0].second << std::endl;
    // std::cout << output_tensors[1].first << ", " << output_tensors[1].second << std::endl;
    // std::cout << output_tensors[2].first << ", " << output_tensors[2].second << std::endl;
    // std::cout << output_tensors[3].first << ", " << output_tensors[3].second << std::endl;

    // softmax
    float total_sum = 0;
    for (auto& x : output_tensors) {x.first = std::exp(x.first);}
    for (auto& x : output_tensors) {total_sum += x.first;}
    for (auto& x : output_tensors) {x.first = x.first / total_sum;}

    // top5
    ai::ClsInfo* cls_ifo = new ClsInfo();
    std::sort(output_tensors.begin(), output_tensors.end(), [](std::pair<float, int>&a, std::pair<float, int>&b){return a.first > b.first;});
    for (int top_idx = 0; top_idx < 5 && top_idx < output_tensor_size; top_idx++) {
      cls_ifo->top5_score.push_back(output_tensors[top_idx].first);
      cls_ifo->top5_class.push_back(output_tensors[top_idx].second);
    }
    cls_ifo->class_idx = cls_ifo->top5_class[0];
    cls_ifo->score = cls_ifo->top5_score[0];
    cls_ifo->category = mdl_rk->cfg->label_list[cls_ifo->class_idx];
    det_infos.push_back(cls_ifo);
  }


  rknn_outputs_release(mdl_rk->ctx, mdl_rk->output_count, &mdl_rk->outputs[0]);

  return 0;

}




int RKClsVideo::PostProcess(std::vector<BaseInfo*>& det_infos, 
                int cur_block,
                RKModelManager* mng, 
                int md_idx) {

  return 0;
}



}  // namespace hb
