/*=====================================================================================
* Copyright (c) 2020, micROS Group, NIIDT, TAIIC.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification, are permitted
*  provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this list of conditions and
*      the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions
*      and the following disclaimer in the documentation and/or other materials provided with the
*      distribution.
* 3. All advertising materials mentioning features or use of this software must display the following
*      acknowledgement: This product includes software developed by the micROS Group and its
*      contributors.
* 4. Neither the name of the Group nor the names of contributors may be used to endorse or promote
*     products derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY MICROS GROUP AND CONTRIBUTORS ''AS IS''AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR  PURPOSE ARE DISCLAIMED. IN NO EVENT
* SHALL THE MICROS, GROUP OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
*  SPECIAL,  EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
*  PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION). HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY
* WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
=====================================================================================
*/

#ifndef __OPTICAL_CLIP_DETECTOR_H
#define __OPTICAL_CLIP_DETECTOR_H

#if (__cplusplus == 201103L)
#include <unordered_map>
#include <unordered_set>
#else
#include <tr1/unordered_map>
#include <tr1/unordered_set>
#endif

#include <string>
#include <vector>
#include <numeric>
#include "opencv2/opencv.hpp"
#include <cognition_resource/optical_detection.resource.h>
#include <cognition/cognition.h>
#include <cognition/resource_collection.h>
#include <cognition/cognition_common.h>
#include <micros_bt/tree_manager/blackboard.h>

#ifdef INFERENCE_ENGINE_GPU_CUDA
#include <NvInfer.h>
#include <NvInferRuntime.h>
#include <cuda.h>
#include <cuda_runtime.h>
#endif

namespace optical_clip_detector
{
#ifdef INFERENCE_ENGINE_GPU_CUDA
  template <typename T>
  struct TrtDestroyer
  {
      void operator()(T* t) { t->destroy(); }
  };

  template <typename T> using TrtUniquePtr = std::unique_ptr<T, TrtDestroyer<T> >;

  class Logger : public nvinfer1::ILogger {
	  void log(Severity severity, const char* msg) noexcept override {
      // suppress info-level messages
      if (severity <= Severity::kWARNING)
        std::cout << std::string(msg) << std::endl;
    }
  } logger;

  class Yolov3CLIPDetector : public cognition::OpticalDetection::Resource
  {
  public:
    Yolov3CLIPDetector();
    ~Yolov3CLIPDetector() override;
    virtual bool Initialize(const std::string& resource_id) override;

    virtual void setParams(float aConfidenceThres,                           // Confidence threshold
                    std::unordered_set<std::string> aTargetClassname, // Classes need to be detected
                    std::string aModelFilePath,                       // Model file path
                    size_t aWidth,                                    // Block width to DNN
                    size_t aHeight) override;


    virtual bool inference(cv::Mat &visible_pFrame,
                          std::vector<cv::Rect2d> &pRoi,
                          std::vector<std::string> &pName,
                          std::vector<float> &pConf
                          ) override;
    
    bool buildEngine(std::string &onnx_file, std::string &trt_file);

    bool initModelConfig(const std::string modelconfigpath);

    bool initModelRunCon(const std::string modelRunconfigpath);

    bool initEngine(const std::string enginePath);

    double time_yolo_sum=0;
    double time_yolo_mean=0;
    double time_sum=0;
    double time_mean=0;
    int time_count=0;
    int pattern=0;
    std::vector<float> means;
    std::vector<float> stds;
    std::unordered_set<std::string> target_names;
    std::unordered_set<std::string> imagebind_class_names;
    float temperature = 10;
    float cof_threshold = 0.5;
    std::string model_path;
    std::string text_emdeding_path;
    bool first_init =false;

  private:

    // optical_detector::YoloGpuDetector _YoloV3GPUDetector;
    boost::shared_ptr<cognition::Handle> cognition_handle_ = boost::make_shared<cognition::Handle>();
    boost::shared_ptr<cognition::OpticalDetection::Resource> _YoloV3GPUDetector;
    TrtUniquePtr<nvinfer1::ICudaEngine> _engine;
    TrtUniquePtr<nvinfer1::IExecutionContext> _context;
    std::vector<void*> _bindings;
    std::vector<int64_t> _buffer_size;
    cudaStream_t _stream{};
    std::vector<float> _embeddings;
    std::vector<std::map<std::string ,std::vector<float>>> _known_embeddings;
    std::vector<std::string> cls_names;
    size_t _inWidth, _inHeight;
    size_t _clip_Width=224, _clip_Height=224;
    size_t _embedding_size;
    size_t _gpu_id=0;
    float _inScaleFactor, _meanVal, _stdVal;
    std::string _resourceId = "";
    micros_bt::Blackboard _blackboard;
  };

  nvinfer1::ICudaEngine* getEngine(const std::string& engine);
  bool image_preprocess(const cv::Mat &src_image, int width, int height, std::vector<float> &input_vectors,std::vector<float> &means, std::vector<float> &stds);
  bool cv2vec(const cv::Mat src_image, std::vector<float> &input_vectors);
  int volume(const nvinfer1::Dims& d);
  int getElementSize(nvinfer1::DataType &type);
  std::map<std::string ,std::vector<float>> load_pre_text_file(std::vector<std::string> &files);
  bool load_pre_text_embeddings(std::string path, std::vector<std::map<std::string ,std::vector<float>>> &known_embeddings, std::vector<std::string> cls_names);
  float cosine_similarity(const std::vector<float>& emb1, const std::vector<float>& emb2);
  std::vector<float> softmax(std::vector<float> &conf, float temperature=100);
  inline void cudaCheck(cudaError_t ret, std::ostream& err = std::cerr)
  {
    if (ret != cudaSuccess)
    {
        err << "Cuda failure: " << cudaGetErrorString(ret) << std::endl;
        abort();
    }
  }
#endif

} // namespace optical_clip_detector

#endif
