#include <sstream>
#include <iomanip>
#include <opencv2/dnn.hpp>

#include "base_util/utils.h"
#include "cls_video.h"

namespace ai {



TRTClsVideo::TRTClsVideo(TRTPackPredictor* model, TRTModelManager* manager, LogInfo *lg):
  TRTPredictor(model, manager, lg) { }


int TRTClsVideo::start_postprocess_thread_imp() {
  int ret = 0;
  std::pair<std::pair<int,int>,int> cur_block;
  // stop_flag2 = common::CameraGrabing;
  // while (stop_flag2 == common::CameraGrabing) {

  //   run_mtx2.lock();
  //   if (post_data2.empty()) {run_mtx2.unlock(); std::this_thread::sleep_for(std::chrono::milliseconds(3));continue;}
  //   cur_block = post_data2.front();
  //   post_data2.pop();
  //   run_mtx2.unlock();

  //   int loop_idx = cur_block.first.second;
  //   std::vector<BaseInfo*> det_infos;
  //   ret = det_hb_->PostProcess(det_infos, cur_block.first.first, this, cur_block.second);  //处理图片
  //   run_mtx3.lock();
  //   out_data3[loop_idx].push(det_infos);  //将处理后的数据返回到det_infos,加入队列中
  //   run_mtx3.unlock();
  // }
  // stop_flag2 = common::CameraOpened;
  return 0;
}

int TRTClsVideo::RunDet(stream::ImageBlob* blob, std::vector<BaseInfo*>& det_infos) {
  int batchSize = 8;
  if (blob->imgs.size() != batchSize) {return -1;}
  for (int idx = 0; idx < batchSize; idx++) {
    if (blob->imgs[idx].empty()) {
      spdlog::get("logger")->info("ERROR. TRTClsVideo::RunDet blob->imgs[idx].empty() error.");
      return -1;
    }
  }
  // int ret = 0;
  // // 设置当前线程中处理的gpu id
  // cv::cuda::setDevice(mdl_trt->cfg->gpu_id);


  int ret;
  std::vector<float> im_vec_data;
  const auto &dims = mdl_trt->m_inputDims[0];
  nvinfer1::Dims4 inputDims = {batchSize, dims.d[0], dims.d[1], dims.d[2]};
  int img_buffer_size = dims.d[0] * dims.d[1] * dims.d[2];
  // int img_buffer_size = mdl_trt->cfg->input_shape[0] * mdl_trt->cfg->input_shape[1] * mdl_trt->cfg->channels;
  // std::cout << "img_buffer_size:" << img_buffer_size << std::endl;


  im_vec_data.resize(batchSize * img_buffer_size);
  // std::cout << "im_vec_data:" << im_vec_data.size() << std::endl;
  stream::ImageBlob img_blob(stream::ImageBlobMode_BGR);
  for (int idx = 0; idx < batchSize; idx++) {
    if (!mdl_trt->cfg->transforms->run(blob->imgs[idx], img_blob, mdl_trt->cfg)) { 
      printf("transforms->run fail \n");
      return model_image_channels_check_error;
    }
    std::memcpy(im_vec_data.data() + img_buffer_size*idx, img_blob.im_vec_data.data(), img_buffer_size* sizeof(float));
  }

  int input_buffer_idx = 0;
  cudaError_t succ = cudaMemcpyAsync(mdl_trt->m_buffers[input_buffer_idx], im_vec_data.data(), batchSize * img_buffer_size * sizeof(float), cudaMemcpyHostToDevice);
  checkCudaErrorCode(succ);



  // Run inference using the TensorRT engine
  // int64_t time2 = duration_cast<milliseconds>(system_clock::now().time_since_epoch()).count();


  cudaStream_t inferenceCudaStream;
  checkCudaErrorCode(cudaStreamCreate(&inferenceCudaStream));

  const auto numInputs = mdl_trt->m_inputDims.size();
  // std::cout << "numInputs:  " << numInputs << std::endl;


  mdl_trt->m_context->setInputShape(mdl_trt->m_IOTensorNames[0].c_str(), inputDims); // Define the batch size

  // Ensure all dynamic bindings have been defined.
  if (!mdl_trt->m_context->allInputDimensionsSpecified()) {
    throw std::runtime_error("Error, not all required dimensions specified.");
  }

  // Set the address of the input and output buffers
  for (size_t i = 0; i < mdl_trt->m_buffers.size(); ++i) {
    bool status = mdl_trt->m_context->setTensorAddress(mdl_trt->m_IOTensorNames[i].c_str(), mdl_trt->m_buffers[i]);
    if (!status) { 
      std::cout << "setTensorAddress:  " << status << std::endl;
      return -1; 
    }
  }

  // Run inference.
  bool status = mdl_trt->m_context->enqueueV3(inferenceCudaStream);
  if (!status) { 
    std::cout << "enqueueV3:  " << status << std::endl;
    return -1; 
  }


  // Batch
  std::vector<std::vector<float>> batchOutputs{};
  for (int32_t outputBinding = numInputs; outputBinding < mdl_trt->m_engine->getNbIOTensors(); ++outputBinding) {
    // We start at index m_inputDims.size() to account for the inputs in our
    // m_buffers
    std::vector<float> output;
    auto outputLength = mdl_trt->m_outputLengths[outputBinding - numInputs];
    output.resize(outputLength);
    // Copy the output
    auto ret = cudaMemcpyAsync(output.data(), static_cast<char *>(mdl_trt->m_buffers[outputBinding]), outputLength * sizeof(float), cudaMemcpyDeviceToHost, inferenceCudaStream);
    checkCudaErrorCode(ret);
    batchOutputs.emplace_back(std::move(output));
  }

  // Synchronize the cuda stream
  checkCudaErrorCode(cudaStreamSynchronize(inferenceCudaStream));
  checkCudaErrorCode(cudaStreamDestroy(inferenceCudaStream));



  // int64_t time3 = duration_cast<milliseconds>(system_clock::now().time_since_epoch()).count();
  // std::cout << "batchOutputs.size():  " << batchOutputs.size() << std::endl;
  // std::cout << "batchOutputs[0].size():  " << batchOutputs[0].size() << std::endl;

  // Check if our model does only object detection or also supports segmentation
  const auto &numOutputs = mdl_trt->m_outputDims.size();
  
  // Object detection or pose estimation
  // Since we have a batch size of 1 and only 1 output, we must convert the output from a 3D array to a 1D array.
  std::vector<float> featureVector = batchOutputs[0];

  const auto &outputDims = mdl_trt->m_outputDims;
  size_t numChannels = outputDims[outputDims.size() - 1].d[1];

  // std::cout << "numChannels:  " << numChannels << std::endl;
  // std::cout << "numOutputs:  " << numOutputs << std::endl;
  // std::cout << featureVector[0] << "  " << featureVector[1] << std::endl;

  std::vector<std::pair<float, int>> output_tensors(numChannels);
  for(int ii = 0; ii < numChannels; ii++) {
    output_tensors[ii] = std::make_pair(featureVector[ii], ii); 
  }

  // softmax
  float total_sum = 0;
  for (auto& x : output_tensors) {x.first = std::exp(x.first);}
  for (auto& x : output_tensors) {total_sum += x.first;}
  for (auto& x : output_tensors) {x.first = x.first / total_sum;}

  // top5
  ai::ClsInfo* cls_ifo = new ClsInfo();
  std::sort(output_tensors.begin(), output_tensors.end(), [](std::pair<float, int>&a, std::pair<float, int>&b){return a.first > b.first;});
  for (int top_idx = 0; top_idx < 5 && top_idx < numChannels; top_idx++) {
    cls_ifo->top5_score.push_back(output_tensors[top_idx].first);
    cls_ifo->top5_class.push_back(output_tensors[top_idx].second);
  }
  cls_ifo->class_idx = cls_ifo->top5_class[0];
  cls_ifo->score = cls_ifo->top5_score[0];
  cls_ifo->category = mdl_trt->cfg->label_list[cls_ifo->class_idx];
  det_infos.push_back(cls_ifo);

  return 0;

}

// int TRTClsVideo::detectObjects(const std::vector<cv::Mat> &inputImageBGRs, std::vector<BaseInfo*>& det_infos) {
//   // Upload the image to GPU memory
//   std::vector<cv::cuda::GpuMat> gpuImgs;
//   for (auto& img : inputImageBGRs) {
//     cv::cuda::GpuMat gpuImg;
//     gpuImg.upload(img);
//     gpuImgs.push_back(gpuImg);
//   }

//   // Call detectObjects with the GPU image
//   int ret = detectObjects(gpuImgs, det_infos);
//   return ret;
// }

// int TRTClsVideo::detectObjects(const std::vector<cv::cuda::GpuMat> &inputImageBGRs, std::vector<BaseInfo*>& det_infos) {
//     // int64_t time1 = duration_cast<milliseconds>(system_clock::now().time_since_epoch()).count();

//   int batchSize = 8;

//   // cv::cuda::GpuMat gpu_dst(1, batchSize * img_size, CV_32FC3);
//   // void *imgPtr = nullptr;
//   // cudaMalloc((void **)&imgPtr, batchSize * img_size * sizeof(float));

//   for (int idx = 0; idx < batchSize; idx++) {
//     auto input = preprocess(inputImageBGRs[idx]);
    
//     std::cout << "preprocess rows:" << input.rows << " cols:" << input.cols << " step:" << input.step << " sizeof(float):" << int(sizeof(float)) << std::endl;
//     // auto tmp_img = cv::cuda::GpuMat(dims.d[2], dims.d[1], CV_32FC3, &(gpu_dst.ptr()[idx * img_size]));
//     // input.copyTo(tmp_img);

//     cudaError_t ret = cudaMemcpyAsync(imgPtr + idx * img_size * sizeof(float), input.data, img_size * sizeof(float), cudaMemcpyDeviceToDevice);
//     checkCudaErrorCode(ret);
//   }


//   std::vector<float> host_data(batchSize * img_size);
//   cudaError_t ret = cudaMemcpyAsync(host_data.data(), imgPtr, batchSize * img_size * sizeof(float), cudaMemcpyDeviceToHost);
//   checkCudaErrorCode(ret);

//   for (int idx = 0; idx < batchSize; idx++) {

//     cv::Mat img = cv::Mat(dims.d[1], dims.d[2], CV_32FC3, host_data.data()+ idx * img_size);
//     img.convertTo(img, CV_8UC3);

//     cv::cvtColor(img, img, cv::COLOR_BGR2RGB);
//     cv::imwrite(std::to_string(idx)+".jpg", img);

//   }



//   // Run inference using the TensorRT engine
//   // int64_t time2 = duration_cast<milliseconds>(system_clock::now().time_since_epoch()).count();


//   cudaStream_t inferenceCudaStream;
//   checkCudaErrorCode(cudaStreamCreate(&inferenceCudaStream));

//   const auto numInputs = mdl_trt->m_inputDims.size();
//   std::cout << "numInputs:  " << numInputs << std::endl;


//   mdl_trt->m_context->setInputShape(mdl_trt->m_IOTensorNames[0].c_str(), inputDims); // Define the batch size

//   // Ensure all dynamic bindings have been defined.
//   if (!mdl_trt->m_context->allInputDimensionsSpecified()) {
//     throw std::runtime_error("Error, not all required dimensions specified.");
//   }

//   mdl_trt->m_buffers[0] = imgPtr;
//   // Set the address of the input and output buffers
//   for (size_t i = 0; i < mdl_trt->m_buffers.size(); ++i) {
//     bool status = mdl_trt->m_context->setTensorAddress(mdl_trt->m_IOTensorNames[i].c_str(), mdl_trt->m_buffers[i]);
//     if (!status) { 
//       std::cout << "setTensorAddress:  " << status << std::endl;
//       return -1; 
//     }
//   }

//   // Run inference.
//   bool status = mdl_trt->m_context->enqueueV3(inferenceCudaStream);
//   if (!status) { 
//     std::cout << "enqueueV3:  " << status << std::endl;
//     return -1; 
//   }


//   // Batch
//   std::vector<std::vector<float>> batchOutputs{};
//   for (int32_t outputBinding = numInputs; outputBinding < mdl_trt->m_engine->getNbIOTensors(); ++outputBinding) {
//     // We start at index m_inputDims.size() to account for the inputs in our
//     // m_buffers
//     std::vector<float> output;
//     auto outputLength = mdl_trt->m_outputLengths[outputBinding - numInputs];
//     output.resize(outputLength);
//     // Copy the output
//     auto ret = cudaMemcpyAsync(output.data(), static_cast<char *>(mdl_trt->m_buffers[outputBinding]), outputLength * sizeof(float), cudaMemcpyDeviceToHost, inferenceCudaStream);
//     checkCudaErrorCode(ret);
//     batchOutputs.emplace_back(std::move(output));
//   }

//   // Synchronize the cuda stream
//   checkCudaErrorCode(cudaStreamSynchronize(inferenceCudaStream));
//   checkCudaErrorCode(cudaStreamDestroy(inferenceCudaStream));



//     // int64_t time3 = duration_cast<milliseconds>(system_clock::now().time_since_epoch()).count();
//     std::cout << "batchOutputs.size():  " << batchOutputs.size() << std::endl;
//     std::cout << "batchOutputs[0].size():  " << batchOutputs[0].size() << std::endl;

//     // Check if our model does only object detection or also supports segmentation
//     const auto &numOutputs = mdl_trt->m_outputDims.size();
    
//     // Object detection or pose estimation
//     // Since we have a batch size of 1 and only 1 output, we must convert the output from a 3D array to a 1D array.
//     std::vector<float> featureVector = batchOutputs[0];

//     const auto &outputDims = mdl_trt->m_outputDims;
//     size_t numChannels = outputDims[outputDims.size() - 1].d[1];

//     std::cout << "numChannels:  " << numChannels << std::endl;
//     std::cout << "numOutputs:  " << numOutputs << std::endl;
//     std::cout << featureVector[0] << "  " << featureVector[1] << std::endl;

//     // if (numChannels == 4 + mdl_trt->cfg->label_list.size() + NUM_KPS * 3) {
//     //     // Pose estimation
//     //     postprocessPose(featureVector, det_infos);
//     // } else if (numChannels == 4 + mdl_trt->cfg->label_list.size()){
//     //     // Object detection
//     //     postprocessDetect(featureVector, det_infos);
//     // }
//     // else {
//     //     std::string str = "";
//     //     for (int id = 0; id < outputDims[outputDims.size() - 1].nbDims; id++) {str += std::to_string(outputDims[outputDims.size() - 1].d[id]) + " ";}
//     //     spdlog::get("logger")->info("str:{}, numChannels:{}, mdl_trt->cfg->label_list.size():{}", str, numChannels, mdl_trt->cfg->label_list.size());
//     //     // throw std::runtime_error("Error: Unable to identify whether the model is for Pose estimation or Object detection.");
//     // }

//     // int64_t time4 = duration_cast<milliseconds>(system_clock::now().time_since_epoch()).count();

//     // std::cout << "time1:" << time2 - time1 << " time2:" << time3 - time2 << " time3:" << time4 - time3 << std::endl;

//     return 0;
// }




cv::cuda::GpuMat TRTClsVideo::preprocess(const cv::cuda::GpuMat &gpuImg) {
  // Populate the input vectors
  const auto &inputDims = mdl_trt->m_inputDims[0];
  std::cout << "inputDims[0].d[1]:" << inputDims.d[0] << " " << inputDims.d[1] << " " << inputDims.d[2] << std::endl;


  // std::vector<cv::cuda::GpuMat> input;
  // Convert the image from BGR to RGB
  cv::cuda::GpuMat rgbMat;
  if (gpuImg.channels() == 1) { cv::cvtColor(gpuImg, rgbMat, cv::COLOR_GRAY2BGR); }
  else {
    rgbMat = gpuImg;
  }
  cv::cuda::cvtColor(rgbMat, rgbMat, cv::COLOR_BGR2RGB);

  cv::cuda::GpuMat resized;
  cv::cuda::resize(rgbMat, resized, cv::Size(inputDims.d[2], inputDims.d[1]));
  
  // std::cout << "resized rows:" << resized.rows << " cols:" << resized.cols << " step:" << resized.step << " sizeof(float):" << int(sizeof(float)) << std::endl;

  // cv::Mat debug_img;
  // resized.download(debug_img);
  // cv::imwrite("1.jpg", debug_img);


   // HWC -> CHW
  cv::cuda::GpuMat gpu_dst(inputDims.d[1], inputDims.d[2], CV_8UC3);
  
  size_t img_size = inputDims.d[2] * inputDims.d[1];
  std::vector<cv::cuda::GpuMat> input_channels{
      cv::cuda::GpuMat(inputDims.d[2], inputDims.d[1], CV_8U, &(gpu_dst.ptr()[img_size * 2])),
      cv::cuda::GpuMat(inputDims.d[2], inputDims.d[1], CV_8U, &(gpu_dst.ptr()[img_size * 1])),
      cv::cuda::GpuMat(inputDims.d[2], inputDims.d[1], CV_8U, &(gpu_dst.ptr()[img_size * 0]))};
  cv::cuda::split(resized, input_channels); // HWC -> CHW
  
  // for (int ii = 0; ii < 3; ii++) {
  //   cv::Mat debug_img;
  //   input_channels[ii].download(debug_img);
  //   cv::imwrite(std::to_string(ii) + ".jpg", debug_img);
  // }


  gpu_dst.convertTo(gpu_dst, CV_32FC3, 1.f / 255.f);

  // Apply scaling and mean subtraction
  std::vector<float> subVals{0.485, 0.456, 0.406};
  std::vector<float> divVals{0.229, 0.224, 0.225};
  cv::cuda::subtract(gpu_dst, cv::Scalar(subVals[0], subVals[1], subVals[2]), gpu_dst, cv::noArray(), -1);
  cv::cuda::divide(gpu_dst, cv::Scalar(divVals[0], divVals[1], divVals[2]), gpu_dst, 1, -1);


  return gpu_dst;


    // Resize to the model expected input size while maintaining the aspect ratio with the use of padding
    // if (rgbMat.rows != inputDims.d[1] || rgbMat.cols != inputDims.d[2]) {
        // Only resize if not already the right size to avoid unecessary copy
        // resized = resizeKeepAspectRatioPadRightBottom(rgbMat, inputDims[0].d[1], inputDims[0].d[2]);
        // cv::cuda::resize(rgbMat, resized, cv::Size(inputDims.d[2], inputDims.d[1]));
    // }
    // input.push_back(resized);


  // Convert to format expected by our inference engine
  // The reason for the strange format is because it supports models with multiple inputs as well as batching
  // In our case though, the model only has a single input and we are using a batch size of 1.
  // std::vector<std::vector<cv::cuda::GpuMat>> inputs{std::move(input)};

  // // These params will be used in the post-processing stage
  // m_imgHeight = rgbMat.rows;
  // m_imgWidth = rgbMat.cols;
  // m_ratio = 1.f / std::min(inputDims[0].d[2] / static_cast<float>(rgbMat.cols), inputDims[0].d[1] / static_cast<float>(rgbMat.rows));

  // return input;
}

int TRTClsVideo::PostProcess(std::vector<BaseInfo*>& det_infos, 
                int cur_block,
                TRTModelManager* mng, 
                int md_idx) {

  return 0;
}



}  // namespace hb
