/*=====================================================================================
* Copyright (c) 2020, micROS Group, NIIDT, TAIIC.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification, are permitted
*  provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this list of conditions and
*      the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions
*      and the following disclaimer in the documentation and/or other materials provided with the
*      distribution.
* 3. All advertising materials mentioning features or use of this software must display the following
*      acknowledgement: This product includes software developed by the micROS Group and its
*      contributors.
* 4. Neither the name of the Group nor the names of contributors may be used to endorse or promote
*     products derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY MICROS GROUP AND CONTRIBUTORS ''AS IS''AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR  PURPOSE ARE DISCLAIMED. IN NO EVENT
* SHALL THE MICROS, GROUP OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
*  SPECIAL,  EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
*  PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION). HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY
* WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
=======================================================================================
*/

#include "yolov3_clip_detector/yolov3_clip_detector.h"
#include <algorithm>
#include <NvOnnxParser.h>
#include <dirent.h>
#include <math.h>
#include <jsoncpp/json/json.h>
#include <cstdlib>
#include <chrono>


namespace optical_clip_detector
{

  void createDirs(const std::string &dir_name)
  {
    if (!boost::filesystem::exists(dir_name))
    {
      try
      { boost::filesystem::create_directories(dir_name); }
      catch (...)
      {
        printf("Fail to create directory: %s, exit.\n", dir_name.c_str());
        exit(-1);
      }
    }
  }

  /**
 * @brief The interface class of imagebind clip classify
 * 
 */
  Yolov3CLIPDetector::Yolov3CLIPDetector()
  {
    SPDLOG_INFO("-- Creat instence of Resource Yolov3CLIPDetector\n");
  }

  Yolov3CLIPDetector::~Yolov3CLIPDetector()
  {
    for (auto &binding:_bindings)
    {
      cudaCheck(cudaFree(binding));
    }
    SPDLOG_INFO("-- Destroy instance of Resource Yolov3CLIPDetector\n");
  }

  bool Yolov3CLIPDetector::initModelConfig(const std::string modelconfigpath){
      Json::Reader json_reader;
      Json::Value rootValue;
      std::ifstream infile(modelconfigpath.c_str(), std::ios::binary);
      if (!infile.is_open()){
        SPDLOG_INFO("init imagebind config file is open false");
        return false;
      }

      if (json_reader.parse(infile, rootValue)){
        _clip_Width = rootValue["width"].asInt();
        _clip_Height =  rootValue["height"].asInt();
        text_emdeding_path = rootValue["text_embeding"].asString();
        Json::Value targetVals = rootValue["class_names"];
        for(unsigned int i=0; i < targetVals.size(); i++){
          imagebind_class_names.insert(targetVals[i].asString());
        }
        
      }else{
        SPDLOG_INFO("Error: Can not parse imagebind config file");
        return false;
      }
      return true;

  }


  bool Yolov3CLIPDetector::initModelRunCon(const std::string modelRunconfigpath){
      Json::Reader json_reader;
      Json::Value rootValue;

      // 6. load pre-save embeddings
      std::string file_path = "/opt/micros/share/capability_package/target_name_map.json";
      SPDLOG_INFO("json file path = {}\n", file_path);
      std::ifstream jsonfile(file_path, std::ios::binary);
      if (!jsonfile.is_open())
      {
        SPDLOG_INFO("Error open json file = {}", file_path);
        return false;
      }
      
      if((json_reader.parse(jsonfile, rootValue)))
      {
        Json::Value::Members member = rootValue.getMemberNames();
        for (Json::Value::Members::iterator iter = member.begin(); iter != member.end(); ++iter)
        {
          cls_names.push_back(rootValue[*iter].asString());
        }
      }else{
        SPDLOG_INFO("Error: Can not parse imagebind run config file");
        return false;
      }
      jsonfile.close();

      std::ifstream infile(modelRunconfigpath.c_str(), std::ios::binary);
      if (!infile.is_open()){
        SPDLOG_INFO("init imagebind run config file is open false");
        return false;
      }

      if (json_reader.parse(infile, rootValue)){
        Json::Value meanVals = rootValue["mean"];
        Json::Value stdVals = rootValue["std"];
        for(unsigned int i=0; i < meanVals.size(); i++){
          means.push_back(meanVals[i].asFloat());
          stds.push_back(stdVals[i].asFloat());
        }

        Json::Value targetVals = rootValue["target_names"];
        for(unsigned int i=0; i < targetVals.size(); i++){
          target_names.insert(targetVals[i].asString());
        } 

        Json::Value textVals = rootValue["text_embeding_name"];
        for(unsigned int i=0; i < textVals.size(); i++){
          cls_names.push_back(textVals[i].asString());
        } 
        temperature = rootValue["temperature"].asFloat();
        std::cout<<"temperature0:"<<temperature<<std::endl;
        pattern = rootValue["pattern"].asInt();
      }else{
        SPDLOG_INFO("Error: Can not parse imagebind run config file");
        return false;
      }
    infile.close();

    for (auto jname: cls_names)
    {
      std::cout<<"jsonname:"<<jname<<std::endl;
    }

    return true;
  }

  bool Yolov3CLIPDetector::initEngine(const std::string enginePath){
      // 2. create asynchronous cuda stream
    cudaCheck(cudaStreamCreate(&_stream));

    // you must reset context first, then you can reset the engine.
    // otherwise the context destructor will fail, a segment fault happens.
    _context.reset();
    // cudaCheck(cudaSetDevice(_gpu_id));
    _engine.reset(getEngine(model_path));
    if (!_engine)
    {
      std::cout << "[ERROR]: Engine creation failed" << std::endl;
      return false;
    }

    // 4. create context
    _context.reset(_engine->createExecutionContext());

    // 5. set input/output buffer
    auto num_binds = _engine->getNbBindings();
    _bindings.resize(num_binds);
    _buffer_size.resize(num_binds);
    for (int i = 0; i < num_binds; i++)
    {
        nvinfer1::Dims dims = _engine->getBindingDimensions(i);    // (3, 224, 224)  (1024)
        printf("dims = ");
        for(int i = 0; i < dims.nbDims; i++)
          printf("%d ", *(dims.d+i));
        printf("\n");
        if (_engine->bindingIsInput(i))
        {
            const bool isDynamicInput = std::any_of(dims.d, dims.d + dims.nbDims, [](int dim){ return dim == -1; });
            // set dynamic dimensions to 1.
            if (isDynamicInput)
            {
                std::vector<int> staticDims;
                staticDims.resize(dims.nbDims);
                std::transform(dims.d, dims.d + dims.nbDims, staticDims.begin(),
                    [&](int dim) { return dim >= 0 ? dim : 1; });
                nvinfer1::Dims new_dims{static_cast<int>(staticDims.size()), {}, {}};
                std::copy_n(staticDims.begin(), new_dims.nbDims, std::begin(new_dims.d));
                _context->setBindingDimensions(i, new_dims);
            }
        }
        dims = _context->getBindingDimensions(i);
        printf("dims = ");
        for(int i = 0; i < dims.nbDims; i++)
          printf("%d ", *(dims.d+i));
        printf("\n");
        nvinfer1::DataType dtype = _engine->getBindingDataType(i); // 0, 0 也就是两个都是kFloat类型
        int64_t total_size = volume(dims) * 1 * getElementSize(dtype);
        printf("total_size ======================= %ld\n", total_size);
        _buffer_size[i] = total_size;
        cudaCheck(cudaMalloc(&_bindings[i], total_size));
    }
    _embeddings.resize(_buffer_size[1] / sizeof(float));
    return true;
  }

 /**
 * @brief The interface class of init
 * @param resource_id
 * @return true or false
 * 
 */  
  bool Yolov3CLIPDetector::Initialize(const std::string& resource_id)
  {
    // 1. init yolov3 detector
    auto true_resource_id = resource_id.find("_CLIP") != std::string::npos ? resource_id.substr(0, resource_id.size() - 5) : resource_id;
    //std::string true_resource_id = "YoloV3_Car_3000_P";
    std::cout<<"resource_id:"<<resource_id<<std::endl;
    // std::cout<<"true_resource_id:"<<true_resource_id<<std::endl;
    boost::shared_ptr<cognition::Handle> cognition_handle_=boost::make_shared<cognition::Handle>();
    _YoloV3GPUDetector = cognition_handle_->init<cognition::OpticalDetection::Resource>(true_resource_id);
    bool flag = false;
    // init imagebind config
    std::string configPath = "/home/yk/xjl_learn/imageind_config";
    model_path =  configPath + "/cliptiny.engine";
    std::string imagebind_config = configPath + "/imagebind_config.json";
    std::string imagebindRun_config = configPath + "/imagebind_run.json";
    flag = initModelConfig(imagebind_config);
    if (!flag){
      SPDLOG_INFO("init Model false");
      return false;
    }
    flag = initModelRunCon(imagebindRun_config);
    if (!flag){
      SPDLOG_INFO("init model Run config false");
      return false;
    }
    flag = initEngine(model_path);
    if (!flag){
      SPDLOG_INFO("init Engine false");
      return false;
    }
    load_pre_text_embeddings(text_emdeding_path, _known_embeddings, cls_names);
    SPDLOG_INFO("known_embeddings size = {}", _known_embeddings.size());
    return true;
  }

/**
 * @brief The interface class of setParams
 * @param aConfidenceThres
 * @param aWidth
 * @param aHeight
 * @return void
 * 
 */ 
  void Yolov3CLIPDetector::setParams(float aConfidenceThres,                           // Confidence threshold
                                      std::unordered_set<std::string> aTargetClassname, // Classes need to be detected
                                      std::string aModelFilePath,                       // Model file path
                                      size_t aWidth,                                    // Block width to DNN
                                      size_t aHeight)
  {
    SPDLOG_INFO("SetParameter of YoloV3GPUDetector...");
    _YoloV3GPUDetector->setParams(aConfidenceThres, aTargetClassname, aModelFilePath, aWidth, aHeight);
  }

/**
 * @brief The interface class of inference
 * @param pFrame
 * @param pRoi
 * @param pName
 * @param pConf
 * @return true or false
 * 
 */


  bool Yolov3CLIPDetector::inference(cv::Mat &visible_pFrame,
                                  std::vector<cv::Rect2d> &pRoi,
                                  std::vector<std::string> &pName,
                                  std::vector<float> &pConf
                                  )
  {
    // 1. object detection(yolo)
    // for circle 2-6
    // 2. cut the Roi region base visible_pFrame
    // 3. change Roi region size to 224x224 (preprocess imagebind)
    // 4. put Roi region into imagebind_vision to get embedding (1x1024)
    // 5. x1 compared with known vectors(inited) get max confidence class c1
    // 6. c1 compared with pName, not consistent output c1, similarity
    if (!first_init){
      _YoloV3GPUDetector->setParams(0.2, target_names,"",1024,768);
      first_init = true;
    }
    if (visible_pFrame.empty())
    {
      SPDLOG_INFO("The image is empty!!!!!");
      return false;
    }
    if (false)  // relearning mode ON
    {
      std::string text_id;
      _blackboard.getSpeakerValueImpl("relearning_text_id", text_id);  // get relearning text vectors id
      SPDLOG_INFO("clip text_id is {}",text_id);
      if (!text_id.empty())
      {
        SPDLOG_INFO("CLIP Load new text vectors {} after relearning.", text_id);
        std::string home_path = getenv("HOME");
        std::string textFilePath = home_path + "/.micros/cognition/relearning_models/" + text_id;
        if (boost::filesystem::exists(textFilePath))
        {
          // update text vectors & clear blackboard
          _known_embeddings.clear();
          load_pre_text_embeddings(textFilePath, _known_embeddings, cls_names);
          SPDLOG_INFO("Load new text vectors from {} done.", textFilePath);
        }
        else
        {
          SPDLOG_ERROR("Load new text vectors from {} fail, check directory path and permissions first.", textFilePath);
        }
        _blackboard.eraseKeyImpl("relearning_text_id", micros_bt::BlackboardType::SPEAKER);
      }
    }

    bool detected = _YoloV3GPUDetector->inference(visible_pFrame, pRoi, pName, pConf);
    if (!detected)
    {
      return false;
    }
    SPDLOG_INFO("pName = {} pConf = {}\n", pName.size(), pConf.size());
    for (int i=0; i<pName.size(); i++)
    {
      SPDLOG_INFO("i = {} pName = {} pConf = {}\n", i, pName[i], pConf[i]);
    }
    // maximum processed target number
    int max_target_num = std::min(cognition::IMAGEBIND_MAX_TARGET_NUM, int(pRoi.size()));
    SPDLOG_INFO("max_target_num = {}\n", max_target_num);
    int modify_count = 0;
    SPDLOG_INFO("size = {}\n", _known_embeddings.size());
    std::vector<int> rm_index;
    if (_known_embeddings.size() > 1)
    {
      for (int i = 0; i < max_target_num; i++)
      {
        int index;
        if(cognition::IMAGEBIND_DETECT_ORDER == 1)
        {
          index = pRoi.size() - i - 1;
          SPDLOG_INFO("ascend order: index = {}-------------------------------------------------\n", index);
        } else 
        {
          index = i;
          SPDLOG_INFO("descend order: index = {}-------------------------------------------------\n", index);
        }
        cv::Rect2d Roi = pRoi[index];
        std::string Name = pName[index];
        float Conf = pConf[index];
        SPDLOG_INFO("Yolo detect category: {}, scores: {}", Name.c_str(), Conf);

        std::unordered_set<std::string>::iterator it = imagebind_class_names.find(Name);
        if(it ==imagebind_class_names.end()){
          SPDLOG_INFO("imagebind does not support predicting: {}", Name.c_str());
          continue;
        }
        
        int x1 = std::max(0, int(Roi.x - 10));
        int y1 = std::max(0, int(Roi.y - 10));
        int pwidth = std::min(int(x1 + Roi.width + 20), int(visible_pFrame.cols -1)) - x1;
        int pheight = std::min(int(y1 + Roi.height + 20), int(visible_pFrame.rows -1)) - y1;

        cv::Rect largeRect(x1, y1, pwidth, pheight);
        cv::Mat image_roi = visible_pFrame(largeRect);
        std::vector<float> input_vectors;
        for (auto val: means){
          std::cout<<"mean:"<<val<<std::endl;
        }

        image_preprocess(image_roi, _clip_Width, _clip_Height, input_vectors, means, stds);
       
        cudaCheck(cudaMemcpyAsync(_bindings[0], input_vectors.data(), _buffer_size[0], cudaMemcpyHostToDevice, _stream));
        _context->enqueueV2(&_bindings[0], _stream, nullptr);
        cudaCheck(cudaMemcpyAsync(_embeddings.data(), _bindings[1], _buffer_size[1], cudaMemcpyDeviceToHost, _stream));
        cudaCheck(cudaStreamSynchronize(_stream));

        printf("IMAGEBIND: output = %f %f %f %f %f %f\n", _embeddings[0], _embeddings[1], _embeddings[2], _embeddings[3], _embeddings[4], _embeddings[5]);
        std::vector<float> conf;
        std::vector<std::string> name;
        int count = 0;
        int background_index = 0;
        int others_index = 0;
        for (auto &known_embedding:_known_embeddings)
        {
          float cls_in_conf=-1000;
          std::string cls_in_name;
          for (auto &it : known_embedding)
          {
            float val = std::inner_product(_embeddings.begin(), _embeddings.end(), it.second.begin(), 0.0);
            if (val > cls_in_conf)
            {
              cls_in_conf = val;
            }
            cls_in_name = it.first.substr(0, it.first.find_last_of("_"));
          }
          conf.push_back(cls_in_conf);
          name.push_back(cls_in_name);
          if (cls_in_name == "Background"){
              background_index=count;
          }
          if (cls_in_name == "Others"){
              others_index=count;
          }
          count++;
        }

        for(auto val: name){
          std::cout<<"name:"<<val<<std::endl;
        }

        std::cout<<"background_index:"<<background_index<<",          others_index:"<<others_index<<" temperature:"<<temperature<<std::endl;
        std::vector<float> softmax_conf = softmax(conf, temperature);
        // cal max value and index
        std::vector<float>::iterator biggest = std::max_element(softmax_conf.begin(), softmax_conf.end());
        float max_value = *biggest;
        int max_value_index = std::distance(softmax_conf.begin(), biggest);

        // std
        auto size = std::distance(softmax_conf.begin(), softmax_conf.end());
        double avg = std::accumulate(softmax_conf.begin(), softmax_conf.end(),0.0) / size;
        double variance(0);
        std::for_each(softmax_conf.begin(), softmax_conf.end(), [avg, &variance](const float &num){variance += (num - avg) * (num -avg);});
        variance /= size;
        auto standard = std::sqrt(variance);

        SPDLOG_INFO("IMAGEBIND: Current detection box category: {}, ImageBind category: {}", Name.c_str(), name[max_value_index].c_str());
        for (int j = 0; j < softmax_conf.size(); j++)
        {
          SPDLOG_INFO("IMAGEBIND: index = {} name: {} softmax_conf: {}", j, name[j], softmax_conf[j]);
        }

        if (pattern == 1)
        {
          std::cout<<"pattern:"<<pattern<<std::endl;
          // if (max_value < 0.7 || max_value_index == 0)// || standard < 0.2){ 
          if (max_value_index == background_index || max_value_index == others_index)// || standard < 0.2)
          { 
            modify_count++;
            rm_index.push_back(index);
            SPDLOG_INFO("IMAGEBIND: TARGET REMOVED!!! Current detection box category: {}, ImageBind Max category confidence is: {}(<threshold {}), this box should be False Positive (background)",
                          Name.c_str(), max_value, cognition::IMAGEBIND_REMOVE_THRESHOLD);

          }
          // else if (max_value > 0.7)
          else
          {
              modify_count++;
              pName[index] = name[max_value_index];
              pConf[index] = max_value;
              SPDLOG_INFO("IMAGEBIND: TARGET MODIFIED!!! Current detection box category: {}, ImageBind category: {}, ImageBind confidence is {}(>threshold {}), modifying detection category to {}",
                            Name.c_str(), name[max_value_index].c_str(), max_value, cognition::IMAGEBIND_UPDATE_THRESHOLD, name[max_value_index].c_str());
          }
          // else {
          //     SPDLOG_INFO("TARGET KEPT!!!Current detection box category: {}, ImageBind category: {}", Name.c_str(), name[max_value_index].c_str());
          //   }

        } else
        {
            std::string imgbind_name = name[max_value_index];
            if ((max_value < 0.2) || max_value_index == background_index || max_value_index == others_index){
              modify_count++;
              rm_index.push_back(index);
              SPDLOG_INFO("TARGET REMOVED!!! Current detection box category: {}, ImageBind Max category confidence is: {}(<threshold {}), this box should be False Positive (background)",
                            Name.c_str(), max_value, cognition::IMAGEBIND_REMOVE_THRESHOLD);

            }else if(((Name=="Hummer") || (Name=="Himars_launcher") || (Name=="Command") || (Name=="Himars_carrier") || (Name=="Cross") || (Name=="Others")) && (imgbind_name != Name) && (max_value > Conf)){
                modify_count++;
                pName[index] = name[max_value_index];
                pConf[index] = max_value;
                SPDLOG_INFO("TARGET MODIFIED!!! Current detection box category: {}, ImageBind category: {}, ImageBind confidence is {}(>threshold {}), modifying detection category to {}",
                              Name.c_str(), name[max_value_index].c_str(), max_value, cognition::IMAGEBIND_UPDATE_THRESHOLD, name[max_value_index].c_str());
            }else {
              SPDLOG_INFO("TARGET KEPT!!!Current detection box category: {}, ImageBind category: {}", Name.c_str(), name[max_value_index].c_str());
            }
           
        }
      }
    } else 
    {
      SPDLOG_INFO("DO NOT have enough pretrained text embeddings, ImageBind no inference\n");
    }

    if (rm_index.size() == pRoi.size())
    {
      if (cognition::IMAGEBIND_DETECT_ORDER == 0)
      {
        rm_index.erase(rm_index.begin());
      } else
      {
        rm_index.erase(rm_index.begin()+rm_index.size()-1);
      }
    }
    
    // remove error Roi
    if (rm_index.size() > 0)
    {
      for (int i = 0; i < rm_index.size(); i++)
      {
        if (cognition::IMAGEBIND_DETECT_ORDER == 0)
        {
          pRoi.erase(pRoi.begin()+rm_index[i]-i);
          pName.erase(pName.begin()+rm_index[i]-i);
          pConf.erase(pConf.begin()+rm_index[i]-i);
        } else
        {
          pRoi.erase(pRoi.begin()+rm_index[i]);
          pName.erase(pName.begin()+rm_index[i]);
          pConf.erase(pConf.begin()+rm_index[i]);
        }
      }
    }

    SPDLOG_INFO("IMAGEBIND: ImageBind modified boxes numbers: {}\n", modify_count);
    if (pRoi.size() == 0 && detected == true)
    {
      detected = false;
    }
    
    return detected;
  }


  /**
 * @brief prepare session option for cpu
 * @return bool 
 * 
 */
  nvinfer1::ICudaEngine* getEngine(const std::string& engine) {

    std::ifstream engineFile(engine, std::ios::binary);
    if (!engineFile)
    {
        std::cout << "Error opening engine file: " << engine << std::endl;
        return nullptr;
    }

    engineFile.seekg(0, engineFile.end);
    long int fsize = engineFile.tellg();
    engineFile.seekg(0, engineFile.beg);

    std::vector<char> engineData(fsize);
    engineFile.read(engineData.data(), fsize);
    if (!engineFile)
    {
        std::cout << "Error loading engine file: " << engine << std::endl;
        return nullptr;
    }

    TrtUniquePtr<nvinfer1::IRuntime> runtime{nvinfer1::createInferRuntime(logger)};

    return runtime->deserializeCudaEngine(engineData.data(), fsize, nullptr);
  }
  

  bool image_preprocess(const cv::Mat &src_image, int width, int height, std::vector<float> &input_vectors, std::vector<float>&means, std::vector<float>&stds) {
    cv::Mat src_temp, dst_temp, dst_image;
    // 1. resize by CUBIC mode
    cv::resize(src_image, src_temp, cv::Size(width, height), 0, 0, cv::INTER_CUBIC);
    src_temp.convertTo(dst_temp, CV_32F);
    dst_temp.copyTo(dst_image);

    // 2. convert BGR to RGB
    cv::cvtColor(dst_image, dst_image, cv::COLOR_BGR2RGB);
    for (int i = 0; i < dst_image.rows; i++) {
        for (int j = 0; j < dst_image.cols; j++) {
            cv::Vec3f buf = dst_image.at<cv::Vec3f>(i,j);
            // printf("buf %f %f %f\n", buf[0], buf[1], buf[2]);
            buf[0] = ((buf[0] / 255.0f) - means[0]) / stds[0];
            buf[1] = ((buf[1] / 255.0f) - means[1]) / stds[1];
            buf[2] = ((buf[2] / 255.0f) - means[2]) / stds[2];
            dst_image.at<cv::Vec3f>(i,j) = buf;
            // printf("buf %f %f %f\n", buf[0], buf[1], buf[2]);
        }
    }

    // 4.convert mat to vector
    cv2vec(dst_image, input_vectors);

    return true;
  }

  bool cv2vec(const cv::Mat src_image, std::vector<float> &input_vectors) {
      cv::Mat channels[3]; //借用来进行HWC->CHW
      cv::split(src_image, channels);
      for (int i = 0; i < src_image.channels(); i++)  // HWC->CHW
      {
          std::vector<float> data = std::vector<float>(channels[i].reshape(1, src_image.cols * src_image.rows));
          input_vectors.insert(input_vectors.end(), data.begin(), data.end());
      }
      
      return true;
  }

  int volume(const nvinfer1::Dims& d) {
      return std::accumulate(d.d, d.d + d.nbDims, 1, std::multiplies<int>());
  }
   
  int getElementSize(nvinfer1::DataType &type) {
      switch(type)
      {
       case nvinfer1::DataType::kFLOAT :   return 4;
       case nvinfer1::DataType::kHALF :   return 2;
       case nvinfer1::DataType::kINT8 :   return 1;
       case nvinfer1::DataType::kINT32 :   return 4;
       case nvinfer1::DataType::kBOOL :   return 4;
      }
  }


  std::map<std::string ,std::vector<float>> load_pre_text_file(std::vector<std::string> &files) {
      // std::cout<<"&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&"<<std::endl;
      std::map<std::string ,std::vector<float>> known_embedding;
      for (auto &file : files)
      {
          size_t pos1 = file.find_last_of('/');
          // std::cout<<"pos1:"<<pos1<<std::endl;
          std::string folder = file.substr(0, pos1);
          // std::cout<<"folder:"<<folder<<std::endl;
          std::string cls = folder.substr(folder.find_last_of('/')+1, folder.length());
          // std::cout<<"cls:"<<cls<<std::endl;
          size_t pos2 = file.find('.');
          std::string index = file.substr(pos1+1, pos2-pos1-1);
          // std::cout<<"index:"<<index<<std::endl;
          std::ifstream in_file(file);
          std::vector <float> record;
          while (in_file)
          {
              std::string s;
              if (!getline(in_file, s)) break;
              std::istringstream ss(s);
              while (ss)
              {
                  std::string s;
                  if (!getline(ss, s, ',')) break;
                  record.push_back(atof(s.c_str()));
              }
          }

          known_embedding[cls+"_"+index].assign(record.begin(), record.end());
      }
      return known_embedding;
  }

  bool load_pre_text_embeddings(std::string path, std::vector<std::map<std::string ,std::vector<float>>> &known_embeddings, std::vector<std::string> cls_names) {
    //file handle
    DIR *d = opendir(path.c_str());
    //file path
    std::vector<std::string> files;
    string p;
    if (d != NULL)
    {
      //if folder, iter,
      //else added to vector
      struct dirent *dt = NULL;
      while (dt = readdir(d))
      {
        if (strcmp(dt->d_name, ".") != 0 && strcmp(dt->d_name, "..") != 0)
        {
          if (dt->d_type == 4 )
          {
            if (std::find(cls_names.begin(), cls_names.end(), dt->d_name) != cls_names.end())
            {
              SPDLOG_INFO("find pretrain vector class = {}\n", dt->d_name);
              load_pre_text_embeddings(p.assign(path).append("/").append(dt->d_name), known_embeddings, cls_names);
            }
          } else
          {
            files.push_back(p.assign(path).append("/").append(dt->d_name));
          }
        }
      }

      for (auto lfilename: files){
        std::cout<<"lfilename:"<<lfilename<<std::endl;
      }

      closedir(d);
      if(!files.empty())
      {
        known_embeddings.push_back(load_pre_text_file(files));
      }
    }
    return true;
  }

  float cosine_similarity(const std::vector<float>& emb1,
                  const std::vector<float>& emb2) {
    if (emb1.size() != emb2.size())
    {
      SPDLOG_INFO("SIZE INCONSISTENT with two vectors, one is {}, the other is {}\n", emb1.size(), emb2.size());
      return 0;
    }

    float dot = std::inner_product(emb1.begin(), emb1.end(), emb2.begin(), 0.0);
    float emb1_sum = std::inner_product(emb1.begin(), emb1.end(), emb1.begin(), 0.0);
    float emb2_sum = std::inner_product(emb2.begin(), emb2.end(), emb2.begin(), 0.0);
    dot /= std::max(std::sqrt(emb1_sum) * std::sqrt(emb2_sum),
                    std::numeric_limits<float>::epsilon());
    return dot;
  }

  std::vector<float> softmax(std::vector<float> &conf, float temperature)
  {
    for (auto &value : conf)
    {
      value *= temperature;
    }

    std::vector<float>::iterator biggest = std::max_element(conf.begin(),conf.end());
    float max_value = *biggest;
    std::cout<<"maxval:"<<max_value<<std::endl;
    
    float total = 0;
    for (auto x : conf)
    {
      total += exp(x-max_value);
    }
    std::cout<<"total:"<<total<<std::endl;

    std::vector<float> result;
    for (auto x : conf)
    {
       std::cout<<"newval:"<<exp(x-max_value)<<std::endl;
      result.push_back(exp(x-max_value)/total);
    }
    return result;
  }

} // namespace optical_clip_detector

#include <class_loader/class_loader.h>
CLASS_LOADER_REGISTER_CLASS(optical_clip_detector::Yolov3CLIPDetector, cognition::OpticalDetection::Resource);
