#include "yolov3_gpu_detector.h"
#include <algorithm>
#include <chrono>
#include <string>
#include <opencv2/dnn/dnn.hpp>
#include <opencv2/core.hpp>
#include <opencv2/imgcodecs.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/core/cuda.hpp>
#include <opencv2/opencv.hpp>



  yoloV3GPUDetector::~yoloV3GPUDetector() {};
 /**
 * @brief The interface class of init
 * @param resource_id
 * @return true or false
 *
 */
  yoloV3GPUDetector::yoloV3GPUDetector(const std::string& modelpath)
  {
    std::string aConfigFilePath =  modelpath + "/model.cfg";
    std::string aBinaryFilePath =  modelpath + "/model.weights";
    _dnnNet = cv::dnn::readNetFromDarknet(aConfigFilePath, aBinaryFilePath);

    if (_dnnNet.empty())
    {
      printf("Could not load net\n");
    }

    //cv::cuda::setDevice(0);
    //_dnnNet.setPreferableBackend(cv::dnn::DNN_BACKEND_CUDA);
    //_dnnNet.setPreferableTarget(cv::dnn::DNN_TARGET_CUDA);
    _classNames = {"Himars_launcher","Hummer","Himars_carrier","Others","Command"};
    // _confidenceThreshold = (float)aConfThres;
    _inWidth = 608;
    _inHeight = 608;
    _inScaleFactor = 1 / 255.f; //0.007843f;
    _meanVal = 0;               //127.5;
    _targetName.insert("car");

    int cnt = 0;
    double time_sum = 0;
    double time_mean = 0;
    printf("-- Loaded  Net-Config with %s\n",modelpath.c_str() );
  }

/**
 * @brief The interface class of setParams
 * @param aConfigFilePath
 * @param aBinaryFilePath
 * @param aConfidenceThres
 * @param aWidth
 * @param aHeight
 * @param aScaleFactor
 * @param aMeanValue
 * @param aTargetClass
 * @return void
 *
 */
  void yoloV3GPUDetector::setParams(float aConfidenceThres,                           // Confidence threshold
                                    std::unordered_set<std::string> aTargetClassname, // Classes need to be detected
                                    std::string aModelFilePath,                       // Model file path
                                    size_t aWidth,                                    // Block width to DNN
                                    size_t aHeight)
  {
    _confidenceThreshold = aConfidenceThres;
    _inWidth = aWidth;
    _inHeight = aHeight;
  }

/**
 * @brief The interface class of inference
 * @param pFrame
 * @param pRoi
 * @param pName
 * @param pConf
 * @return true or false
 *
 */
  bool yoloV3GPUDetector::inference(cv::Mat &pFrame,
                                           std::vector<cv::Rect2d> &pRoi,
                                           std::vector<std::string> &pName,
                                           std::vector<float> &pConf)
  {
    if (pFrame.empty())
    {
      cv::waitKey(0);
      printf("The video is empty!!!!!");
    }
    cnt++;
    bool detected = false;
    pRoi.clear();
    pName.clear();
    pConf.clear();
    // Generate blob: convert cv::Mat to batch of images
    cv::Mat inputBlob = cv::dnn::blobFromImage(pFrame,
                                               _inScaleFactor,
                                               cv::Size(_inWidth, _inHeight),
                                               cv::Scalar(_meanVal, _meanVal, _meanVal),
                                               true,
                                               false);
    // Set input blob: set the network input
    _dnnNet.setInput(inputBlob);
    if (_dnnNet.getLayer(0)->outputNameToIndex("im_info") != -1) // Faster-RCNN or R-FCN
    {
      cv::resize(pFrame, pFrame, cv::Size(_inWidth, _inHeight));
      cv::Mat imInfo = (cv::Mat_<float>(1, 3) << _inHeight, _inWidth, 1.6f);
      _dnnNet.setInput(imInfo, "im_info");
    }

    std::vector<cv::Mat> outs;
    // Make forward pass: inference bounding boxes
    auto start = std::chrono::high_resolution_clock::now();
    //cv::Mat detection = _dnnNet.forward();
    _dnnNet.forward(outs, getOutputsNames(_dnnNet));

    auto end = std::chrono::high_resolution_clock::now();
    std::chrono::duration<double> time_span = std::chrono::duration_cast<std::chrono::duration<double>>(end - start);
    printf("-- It took %.6f seconds...\n", time_span.count());
    // Statistic frequency and time
    // Scan through all deteced objects

    int *rect = runofftiny(pFrame, _confidenceThreshold, outs, _dnnNet, pRoi, pName, pConf, detected);
    //
    std::vector<double> layersTimes;
    double freq = cv::getTickFrequency() / 1000;
    double t = _dnnNet.getPerfProfile(layersTimes) / freq;
    //if (cnt > 10)
    //{
    //time_sum = time_sum + t;
    //time_mean = time_sum / (cnt - 10);
    printf("  -- Inference time is %d s \n ",t / 1000);
    return detected;
  }
/**
 * @brief The interface class of runofftiny
 * @param pFrame
 * @param _confidenceThreshold
 * @param pOut
 * @param pNet
 * @param pRoi
 * @param pName
 * @param pConf
 * @param pDetected
 * @return int
 *
 */
  int * yoloV3GPUDetector::runofftiny(cv::Mat &pFrame,
                  float _confidenceThreshold,
                  const std::vector<cv::Mat> &pOut,
                  cv::dnn::Net &pNet,
                  std::vector<cv::Rect2d> &pRoi,
                  std::vector<std::string> &pName,
                  std::vector<float> &pConf,
                  bool &pDetected)
  {
    pDetected = false;
    static std::vector<int> outLayers = pNet.getUnconnectedOutLayers();
    static std::string outLayerType = pNet.getLayer(outLayers[0])->type;
    int *rect = new int;
    if (pNet.getLayer(0)->outputNameToIndex("im_info") != -1) // Faster-RCNN or R-FCN
    {
      // Network produces output blob with a shape 1x1xNx7 where N is a number of
      // detections and an every detection is a vector of values
      // [batchId, classId, confidence, left, top, right, bottom]
      CV_Assert(pOut.size() == 1);
      float *data = (float *)pOut[0].data;
      for (size_t i = 0; i < pOut[0].total(); i += 7)
      {

        float confidence = data[i + 2];
        if (confidence > _confidenceThreshold)
        {
          int left = (int)data[i + 3];
          int top = (int)data[i + 4];
          int right = (int)data[i + 5];
          int bottom = (int)data[i + 6];
          int classId = (int)(data[i + 1]) - 1; // Skip 0th background class id.
          rect[1] = left;
          rect[2] = top;
          rect[3] = right;
          rect[4] = bottom;
          //drawPred(classId, confidence, left, top, right, bottom, pFrame);
        }
      }
    }
    else if (outLayerType == "DetectionOutput")
    {
      // Network produces output blob with a shape 1x1xNx7 where N is a number of
      // detections and an every detection is a vector of values
      // [batchId, classId, confidence, left, top, right, bottom]
      CV_Assert(pOut.size() == 1);
      float *data = (float *)pOut[0].data;
      for (size_t i = 0; i < pOut[0].total(); i += 7)
      {
        float confidence = data[i + 2];
        if (confidence > _confidenceThreshold)
        {
          int left = (int)(data[i + 3] * pFrame.cols);
          int top = (int)(data[i + 4] * pFrame.rows);
          int right = (int)(data[i + 5] * pFrame.cols);
          int bottom = (int)(data[i + 6] * pFrame.rows);
          rect[1] = left;
          rect[2] = top;
          rect[3] = right;
          rect[4] = bottom;
          int classId = (int)(data[i + 1]) - 1; // Skip 0th background class id.
          //drawPred(classId, confidence, left, top, right, bottom, pFrame);
        }
      }
    }
    else if (outLayerType == "Region")
    {
      std::vector<int> classIds;
      std::vector<float> confidences;
      std::vector<cv::Rect> boxes;
      for (size_t i = 0; i < pOut.size(); ++i)
      {
        // Network produces output blob with a shape NxC where N is a number of
        // detected objects and C is a number of classes + 4 where the first 4
        // numbers are [center_x, center_y, width, height]
        float *data = (float *)pOut[i].data;
        for (int j = 0; j < pOut[i].rows; ++j, data += pOut[i].cols)
        {
          cv::Mat scores = pOut[i].row(j).colRange(5, pOut[i].cols);
          cv::Point classIdPoint;
          double confidence;
          cv::minMaxLoc(scores, 0, &confidence, 0, &classIdPoint);
          if (confidence > _confidenceThreshold)
          {
            int centerX = (int)(data[0] * pFrame.cols);
            int centerY = (int)(data[1] * pFrame.rows);
            int width = (int)(data[2] * pFrame.cols);
            int height = (int)(data[3] * pFrame.rows);
            int left = centerX - width / 2;
            int top = centerY - height / 2;
            int right = centerX + width / 2;
            int bottom = centerY + height / 2;
            rect[1] = left;
            rect[2] = top;
            rect[3] = right;
            rect[4] = bottom;
            classIds.push_back(classIdPoint.x);
            confidences.push_back((float)confidence);
            boxes.push_back(cv::Rect(left, top, width, height));
          }
        }
      }
      std::vector<int> indices;
      cv::dnn::NMSBoxes(boxes, confidences, _confidenceThreshold, 0.4f, indices);
      if (indices.size() > 0)
      {
        pDetected = true;
        for (size_t i = 0; i < indices.size(); ++i)
        {
          int idx = indices[i];
          cv::Rect box = boxes[idx];
          pRoi.push_back(box);
          int clsId = classIds[idx];
          std::string className = _classNames[clsId];
          pName.push_back(className);
          float confidence = confidences[idx];
          pConf.push_back(confidence);
          //drawPred(classIds[idx], confidences[idx], box.x, box.y, box.x + box.width, box.y + box.height, pFrame);
        }
      }
    }
    else
      CV_Error(cv::Error::StsNotImplemented, "Unknown output layer type: " + outLayerType);

    return rect;
  }
/**
 * @brief The interface class of draw predict box
 * @param aClassId
 * @param aConf
 * @param aLeft
 * @param aTop
 * @param aRight
 * @param aBottom
 * @param pFrame
 * @return void
 *
 */
  void yoloV3GPUDetector::drawPred(int aClassId, float aConf, int aLeft, int aTop, int aRight, int aBottom, cv::Mat &pFrame)
  {
    cv::rectangle(pFrame, cv::Point(aLeft, aTop), cv::Point(aRight, aBottom), cv::Scalar(0, 255, 0));

    std::string label = cv::format("%.2f", aConf);
    if (!_classNames.empty())
    {
      //CV_Assert(classId < (int)classes.size());
      label = _classNames[aClassId] + ": " + label;
    }

    int baseLine;
    cv::Size labelSize = cv::getTextSize(label, cv::FONT_HERSHEY_SIMPLEX, 0.5, 1, &baseLine);

    aTop = cv::max(aTop, labelSize.height);
  }

 /**
 * @brief The interface class of getting output names
 * @param cv::dnn::Net &pNet
 * @return vector<cv::String>
 *
 */
  std::vector<cv::String> yoloV3GPUDetector::getOutputsNames(const cv::dnn::Net &pNet)
  {
    std::vector<cv::String> names;
    if (names.empty())
    {
      std::vector<int> outLayers = pNet.getUnconnectedOutLayers();
      std::vector<cv::String> layersNames = pNet.getLayerNames();
      names.resize(outLayers.size());
      for (size_t i = 0; i < outLayers.size(); ++i){
        names[i] = layersNames[outLayers[i] - 1];
      }
    }
    return names;
  }
