/*=====================================================================================
* Copyright (c) 2020, micROS Group, NIIDT, TAIIC.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification, are permitted
*  provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this list of conditions and
*      the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions
*      and the following disclaimer in the documentation and/or other materials provided with the
*      distribution.
* 3. All advertising materials mentioning features or use of this software must display the following
*      acknowledgement: This product includes software developed by the micROS Group and its
*      contributors.
* 4. Neither the name of the Group nor the names of contributors may be used to endorse or promote
*     products derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY MICROS GROUP AND CONTRIBUTORS ''AS IS''AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR  PURPOSE ARE DISCLAIMED. IN NO EVENT
* SHALL THE MICROS, GROUP OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
*  SPECIAL,  EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
*  PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION). HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY
* WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
=====================================================================================
* Author: Longfei Su, Tianhao Zhang.
*/

#include "yolov3_gpu_detector/yolov3_gpu_detector.h"
#include <algorithm>
#include <cognition/resource_collection.h>
#include <chrono>
#include <string>


namespace optical_detector
{
  /**
 * @brief The interface class of yoloV3 detector
 *
 */
  yoloV3GPUDetector::yoloV3GPUDetector()
  {
    printf ("-- Creat instence of Resource yoloV3GPUDetector GPU\n");
  }

  yoloV3GPUDetector::~yoloV3GPUDetector() {};
 /**
 * @brief The interface class of init
 * @param resource_id
 * @return true or false
 *
 */
  bool yoloV3GPUDetector::Initialize(const std::string& resource_id)
  {
    std::string package_name = "pretrained_model";
    // 2022.11.08: since it is the unit test, we truncate package name and delete the "_COG" from the end of resource id.
    auto true_resource_id = resource_id.find("_COG") != std::string::npos ? resource_id.substr(0, resource_id.size() - 4) : resource_id;
    std::string package_dir = cognition::getPackagePath(package_name);
    std::string aConfigFilePath =  package_dir + "/Darknet/" + true_resource_id + "/model.cfg";
    std::string aBinaryFilePath =  package_dir + "/Darknet/" + true_resource_id + "/model.weights";
    _dnnNet = cv::dnn::readNetFromDarknet(aConfigFilePath, aBinaryFilePath);

    if (_dnnNet.empty())
    {
      printf("Could not load net\n");
    }

    cv::cuda::setDevice(0);
    _dnnNet.setPreferableBackend(cv::dnn::DNN_BACKEND_CUDA);
    _dnnNet.setPreferableTarget(cv::dnn::DNN_TARGET_CUDA);
    _classNames = {
        "car"};
    // _confidenceThreshold = (float)aConfThres;
    _inWidth = 608;
    _inHeight = 608;
    _inScaleFactor = 1 / 255.f; //0.007843f;
    _meanVal = 0;               //127.5;
    _targetName.insert("car");

    int cnt = 0;
    double time_sum = 0;
    double time_mean = 0;
    printf("-- Loaded  Net-Config with %s\n",resource_id.c_str() );

    return true;
  }

/**
 * @brief The interface class of setParams
 * @param aConfigFilePath
 * @param aBinaryFilePath
 * @param aConfidenceThres
 * @param aWidth
 * @param aHeight
 * @param aScaleFactor
 * @param aMeanValue
 * @param aTargetClass
 * @return void
 *
 */
  void yoloV3GPUDetector::setParams(float aConfidenceThres,                           // Confidence threshold
                                    std::unordered_set<std::string> aTargetClassname, // Classes need to be detected
                                    std::string aModelFilePath,                       // Model file path
                                    size_t aWidth,                                    // Block width to DNN
                                    size_t aHeight)
  {
    // Deep neural network
    // _dnnNet = cv::dnn::readNetFromCaffe(aConfigFilePath, aBinaryFilePath);

    // Configuration parameters
    _confidenceThreshold = aConfidenceThres;
    _inWidth = aWidth;
    _inHeight = aHeight;
    // _inScaleFactor = anScaleFactor;
    // _meanVal = aMeanValue;
    // _targetName.insert(aTargetClass);
  }

/**
 * @brief The interface class of inference
 * @param pFrame
 * @param pRoi
 * @param pName
 * @param pConf
 * @return true or false
 *
 */
  bool yoloV3GPUDetector::inference(cv::Mat &pFrame,
                                           std::vector<cv::Rect2d> &pRoi,
                                           std::vector<std::string> &pName,
                                           std::vector<float> &pConf)
  {
    if (pFrame.empty())
    {
      cv::waitKey();
      printf("The video is empty!!!!!");
    }
    cnt++;
    bool detected = false;
    pRoi.clear();
    pName.clear();
    pConf.clear();
    // Generate blob: convert cv::Mat to batch of images
    cv::Mat inputBlob = cv::dnn::blobFromImage(pFrame,
                                               _inScaleFactor,
                                               cv::Size(_inWidth, _inHeight),
                                               cv::Scalar(_meanVal, _meanVal, _meanVal),
                                               true,
                                               false);
    // Set input blob: set the network input
    _dnnNet.setInput(inputBlob);
    if (_dnnNet.getLayer(0)->outputNameToIndex("im_info") != -1) // Faster-RCNN or R-FCN
    {
      resize(pFrame, pFrame, cv::Size(_inWidth, _inHeight));
      cv::Mat imInfo = (cv::Mat_<float>(1, 3) << _inHeight, _inWidth, 1.6f);
      _dnnNet.setInput(imInfo, "im_info");
    }

    std::vector<cv::Mat> outs;
    // Make forward pass: inference bounding boxes
    auto start = std::chrono::high_resolution_clock::now();
    //cv::Mat detection = _dnnNet.forward();
    _dnnNet.forward(outs, getOutputsNames(_dnnNet));

    auto end = std::chrono::high_resolution_clock::now();
    std::chrono::duration<double> time_span = std::chrono::duration_cast<std::chrono::duration<double>>(end - start);
    printf("-- It took %.6f seconds...\n", time_span.count());
    // Statistic frequency and time
    // Scan through all deteced objects

    int *rect = runofftiny(pFrame, _confidenceThreshold, outs, _dnnNet, pRoi, pName, pConf, detected);
    //
    std::vector<double> layersTimes;
    double freq = cv::getTickFrequency() / 1000;
    double t = _dnnNet.getPerfProfile(layersTimes) / freq;
    //if (cnt > 10)
    //{
    //time_sum = time_sum + t;
    //time_mean = time_sum / (cnt - 10);
    printf("  -- Inference time is %d s \n ",t / 1000);
    //std::cout << "The FPS is:  " << 1000 / time_mean << "  frame per second" << std::endl;
    //for (int i = 0; i < layersTimes.size(); i++)
    //td::cout << layersTimes.at(i) /freq << std::endl;
    //}
//    std::string label = cv::format("  -- Inference time: %.2f ms", time_mean);
//    putText(pFrame, label, cv::Point(0, 15), cv::FONT_HERSHEY_SIMPLEX, 0.5, cv::Scalar(0, 255, 0));
    //cv::imshow("YOLO_V4", pFrame);
    //cv::waitKey(10);

    /*cv::Mat detectionMat(detection.size[2],
                         detection.size[3],
                         CV_32F,
                         detection.ptr<float>());

    for (int i = 0; i < detectionMat.rows; i++)
    {
      float confidence = detectionMat.at<float>(i, 2);

      if (confidence > _confidenceThreshold)
      {
        size_t objectClassID = (size_t)(detectionMat.at<float>(i, 1));

        // Calculate pixel coordinates
        int left = static_cast<int>(detectionMat.at<float>(i, 3) * pFrame.cols);
        int top = static_cast<int>(detectionMat.at<float>(i, 4) * pFrame.rows);
        int right = static_cast<int>(detectionMat.at<float>(i, 5) * pFrame.cols);
        int bottom = static_cast<int>(detectionMat.at<float>(i, 6) * pFrame.rows);

        // Filter non-car objects
        //std::string targetDetected = _classNames[objectClassID];
        std::string targetDetected = "car";
        if (_targetName.find(targetDetected) != _targetName.end())
        {
          //protectBBox(aFrame, left, top, right, bottom);
          cv::Rect rt(left, top, right - left, bottom - top);
          pRoi.push_back(rt);
          pName.push_back(targetDetected);
          pConf.push_back(confidence);
          detected = true;
        }
      }
    }*/
    // //#ifdef _DEBUG
    // for (int k = 0; k < pRoi.size(); k++)
    // {
    //   cv::rectangle(pFrame, pRoi[k], cv::Scalar(35, 45, 255, 0), 4);
    // }
    // cv::imshow(_kWinName, pFrame);
    // cv::waitKey(1);
    // //#endif
    return detected;
  }
/**
 * @brief The interface class of runofftiny
 * @param pFrame
 * @param _confidenceThreshold
 * @param pOut
 * @param pNet
 * @param pRoi
 * @param pName
 * @param pConf
 * @param pDetected
 * @return int
 *
 */
  int *runofftiny(cv::Mat &pFrame,
                  float _confidenceThreshold,
                  const std::vector<cv::Mat> &pOut,
                  cv::dnn::Net &pNet,
                  std::vector<cv::Rect2d> &pRoi,
                  std::vector<std::string> &pName,
                  std::vector<float> &pConf,
                  bool &pDetected)
  {
    pDetected = false;
    static std::vector<int> outLayers = pNet.getUnconnectedOutLayers();
    static std::string outLayerType = pNet.getLayer(outLayers[0])->type;
    int *rect = new int;
    if (pNet.getLayer(0)->outputNameToIndex("im_info") != -1) // Faster-RCNN or R-FCN
    {
      // Network produces output blob with a shape 1x1xNx7 where N is a number of
      // detections and an every detection is a vector of values
      // [batchId, classId, confidence, left, top, right, bottom]
      CV_Assert(pOut.size() == 1);
      float *data = (float *)pOut[0].data;
      for (size_t i = 0; i < pOut[0].total(); i += 7)
      {

        float confidence = data[i + 2];
        if (confidence > _confidenceThreshold)
        {
          int left = (int)data[i + 3];
          int top = (int)data[i + 4];
          int right = (int)data[i + 5];
          int bottom = (int)data[i + 6];
          int classId = (int)(data[i + 1]) - 1; // Skip 0th background class id.
          rect[1] = left;
          rect[2] = top;
          rect[3] = right;
          rect[4] = bottom;
          //drawPred(classId, confidence, left, top, right, bottom, pFrame);
        }
      }
    }
    else if (outLayerType == "DetectionOutput")
    {
      // Network produces output blob with a shape 1x1xNx7 where N is a number of
      // detections and an every detection is a vector of values
      // [batchId, classId, confidence, left, top, right, bottom]
      CV_Assert(pOut.size() == 1);
      float *data = (float *)pOut[0].data;
      for (size_t i = 0; i < pOut[0].total(); i += 7)
      {
        float confidence = data[i + 2];
        if (confidence > _confidenceThreshold)
        {
          int left = (int)(data[i + 3] * pFrame.cols);
          int top = (int)(data[i + 4] * pFrame.rows);
          int right = (int)(data[i + 5] * pFrame.cols);
          int bottom = (int)(data[i + 6] * pFrame.rows);
          rect[1] = left;
          rect[2] = top;
          rect[3] = right;
          rect[4] = bottom;
          int classId = (int)(data[i + 1]) - 1; // Skip 0th background class id.
          //drawPred(classId, confidence, left, top, right, bottom, pFrame);
        }
      }
    }
    else if (outLayerType == "Region")
    {
      std::vector<int> classIds;
      std::vector<float> confidences;
      std::vector<cv::Rect> boxes;
      for (size_t i = 0; i < pOut.size(); ++i)
      {
        // Network produces output blob with a shape NxC where N is a number of
        // detected objects and C is a number of classes + 4 where the first 4
        // numbers are [center_x, center_y, width, height]
        float *data = (float *)pOut[i].data;
        for (int j = 0; j < pOut[i].rows; ++j, data += pOut[i].cols)
        {
          cv::Mat scores = pOut[i].row(j).colRange(5, pOut[i].cols);
          cv::Point classIdPoint;
          double confidence;
          cv::minMaxLoc(scores, 0, &confidence, 0, &classIdPoint);
          if (confidence > _confidenceThreshold)
          {
            int centerX = (int)(data[0] * pFrame.cols);
            int centerY = (int)(data[1] * pFrame.rows);
            int width = (int)(data[2] * pFrame.cols);
            int height = (int)(data[3] * pFrame.rows);
            int left = centerX - width / 2;
            int top = centerY - height / 2;
            int right = centerX + width / 2;
            int bottom = centerY + height / 2;
            rect[1] = left;
            rect[2] = top;
            rect[3] = right;
            rect[4] = bottom;
            classIds.push_back(classIdPoint.x);
            confidences.push_back((float)confidence);
            boxes.push_back(cv::Rect(left, top, width, height));
          }
        }
      }
      std::vector<int> indices;
      cv::dnn::NMSBoxes(boxes, confidences, _confidenceThreshold, 0.4f, indices);
      if (indices.size() > 0)
      {
        pDetected = true;
        for (size_t i = 0; i < indices.size(); ++i)
        {
          int idx = indices[i];
          cv::Rect box = boxes[idx];
          pRoi.push_back(box);
          int clsId = classIds[idx];
          //std::string className = _classNames[clsId];
          pName.push_back("car");
          float confidence = confidences[idx];
          pConf.push_back(confidence);
          //drawPred(classIds[idx], confidences[idx], box.x, box.y, box.x + box.width, box.y + box.height, pFrame);
        }
      }
    }
    else
      CV_Error(cv::Error::StsNotImplemented, "Unknown output layer type: " + outLayerType);

    return rect;
  }
/**
 * @brief The interface class of draw predict box
 * @param aClassId
 * @param aConf
 * @param aLeft
 * @param aTop
 * @param aRight
 * @param aBottom
 * @param pFrame
 * @return void
 *
 */
  void yoloV3GPUDetector::drawPred(int aClassId, float aConf, int aLeft, int aTop, int aRight, int aBottom, cv::Mat &pFrame)
  {
    cv::rectangle(pFrame, cv::Point(aLeft, aTop), cv::Point(aRight, aBottom), cv::Scalar(0, 255, 0));

    std::string label = cv::format("%.2f", aConf);
    if (!_classNames.empty())
    {
      //CV_Assert(classId < (int)classes.size());
      label = _classNames[aClassId] + ": " + label;
    }

    int baseLine;
    cv::Size labelSize = cv::getTextSize(label, cv::FONT_HERSHEY_SIMPLEX, 0.5, 1, &baseLine);

    aTop = cv::max(aTop, labelSize.height);
  }

  void cakkbacktiny(int aPos, void *userdata)
  {
    yoloV3GPUDetector *ydt = new yoloV3GPUDetector();
    ydt->_confidenceThreshold = aPos * 0.01f;
  }
 /**
 * @brief The interface class of getting output names
 * @param cv::dnn::Net &pNet
 * @return vector<cv::String>
 *
 */
  std::vector<cv::String> yoloV3GPUDetector::getOutputsNames(const cv::dnn::Net &pNet)
  {
    std::vector<cv::String> names;
    if (names.empty())
    {
      std::vector<int> outLayers = pNet.getUnconnectedOutLayers();
      std::vector<cv::String> layersNames = pNet.getLayerNames();
      names.resize(outLayers.size());
      for (size_t i = 0; i < outLayers.size(); ++i){
        names[i] = layersNames[outLayers[i] - 1];
      }
    }
    return names;
  }

} // namespace optical_detector

#include <class_loader/class_loader.h>
CLASS_LOADER_REGISTER_CLASS(optical_detector::yoloV3GPUDetector, cognition::OpticalDetection::Resource);