#ifndef __SV2_ARUCO_DET__
#define __SV2_ARUCO_DET__

#include "sms_core.h"
#include <opencv2/opencv.hpp>
#include <opencv2/aruco.hpp>
#include <opencv2/tracking.hpp>
#include <string>
#include <chrono>
#include <mutex>
#include <queue>
#include <condition_variable>
#define SV2_RAD2DEG 57.2957795

/*
依赖安装
sudo apt install libopencv-dev
sudo apt install libopencv-contrib-dev

*/


namespace sv2 {


enum class ArucoPadType
{
  NONE = 0,
  FEIPad = 1
};


class ArucoDetCppNode : public sms::BaseNode
{
public:
  ArucoDetCppNode(
    std::string job_name,
    std::string param_file,
    std::string ip="127.0.0.1",
    int port=9094
  ) : sms::BaseNode("ArucoDetCppNode", job_name, param_file, ip, port), 
    _image_sub("/" + job_name + "/sensor/image_raw", "sensor_msgs::CompressedImage", std::bind(&ArucoDetCppNode::image_callback, this, std::placeholders::_1)),
    _calib_sub("/" + job_name + "/sensor/calibration_info", "sensor_msgs::CameraCalibration", std::bind(&ArucoDetCppNode::calib_callback, this, std::placeholders::_1)),
    _res_pub("/" + job_name + "/detector/results", "spirecv_msgs::2DTargets"),
    _vis_pub("/" + job_name + "/detector/image_results", "sensor_msgs::CompressedImage")
  {
    // 读取节点参数
    this->_dictionary_id = this->get_param("dictionaryId", 0);
    nlohmann::json ids_need = this->get_param("markerIds", {1, 2, 3});
    nlohmann::json lengths_need = this->get_param("markerLengths", {0.1, 0.1, 0.1});
    this->_pad_type = this->get_param("padType", 0);
    this->_fei_x_bias = this->get_param("fei_x_bias", 0.0);
    this->_fei_y_bias = this->get_param("fei_y_bias", 0.0);
    this->_fei_id_begin = this->get_param("fei_id_begin", 0);
    this->_use_own_camera = this->get_param("use_own_camera", 0);

    logger.info("dictionaryId: " + std::to_string(this->_dictionary_id));
    logger.info("markerIds: " + ids_need.dump());
    logger.info("markerLengths: " + lengths_need.dump());
    logger.info("padType: " + std::to_string(static_cast<int>(this->_pad_type)));

    // 将读取到的参数转换到成员变量：_ids_need、_lengths_need
    for (const auto& item : ids_need) { this->_ids_need.push_back(item); }
    for (const auto& item : lengths_need) { this->_lengths_need.push_back(item); }

    this->camera_matrix = cv::Mat::zeros(3, 3, CV_64FC1);
    this->distortion = cv::Mat::zeros(1, 5, CV_64FC1);
    
    if (this->_use_own_camera == 1)
    {
      image_width = this->get_param("width", 0);
      image_height = this->get_param("height", 0);
      
      nlohmann::json t_camera_matrix = this->get_param("camera_matrix", {775.08569093, 0.0, 578.64894543, 0.0, 764.40240624, 393.02655515, 0.0, 0.0, 1.0});
      nlohmann::json t_distortion_coefficients = this->get_param("distortion_coefficients", {0.07637484, -0.09074819, 0.00268185, 0.0007026, 0.03910658});
        
      int i(0);
      for (const auto& item : t_camera_matrix) { this->camera_matrix.at<double>(i++) = item; }

      i = 0;
      for (const auto& item : t_distortion_coefficients) { this->distortion.at<double>(i++) = item; }
      
      int t_camera_id = this->get_param("camera_id", 0);
      this->_cap.open(t_camera_id, cv::CAP_V4L2);
      this->_cap.set(cv::CAP_PROP_FRAME_WIDTH, image_width);
      this->_cap.set(cv::CAP_PROP_FRAME_HEIGHT, image_height);
      this->_cap.set(cv::CAP_PROP_FPS, 30);
      this->_cap.set(cv::CAP_PROP_FOURCC, cv::VideoWriter::fourcc('M', 'J', 'P', 'G'));
      
      _camera_thread = std::thread(&ArucoDetCppNode::camera_t, this);
    }

    this->_first_show_calib = false;
    this->_dictionary = nullptr;
    this->_detector_params = new cv::aruco::DetectorParameters;
    this->_t0 = 0;
  }
  ~ArucoDetCppNode()
  {
    if (nullptr != this->_dictionary)
      delete this->_dictionary;
  }
  
  void camera_t()
  {
    cv::Mat img;
    while (this->_cap.isOpened())
    {
      this->_cap >> img;
      // 放入到阻塞队列中
      {
        std::unique_lock<std::mutex> lock(this->_camera_queue_mtx);
        this->_camera_queue.push(img);
      }
      // 通知主线程取数据
      this->_camera_cv.notify_one();
    }
  }
  
  void run();
  nlohmann::json detect(cv::Mat img_);
  void getIdsWithLengths(std::vector<int>& ids_, std::vector<double>& lengths_);
  // 图像话题回调函数
  void image_callback(nlohmann::json msg)
  {
    // 放入到阻塞队列中
    {
      std::unique_lock<std::mutex> lock(this->_image_queue_mtx);
      this->_image_queue.push(msg);
    }
    // 通知主线程取数据
    this->_image_cv.notify_one();
  }
  // 相机参数话题回调函数
  void calib_callback(nlohmann::json msg)
  {
    int i(0);
    for (const auto& item : msg["K"]) { this->camera_matrix.at<double>(i++) = item; }

    i = 0;
    for (const auto& item : msg["D"]) { this->distortion.at<double>(i++) = item; }

    image_width = msg["width"];
    image_height = msg["height"];

    fov_x = 2 * atan(image_width / 2. / camera_matrix.at<double>(0, 0)) * SV2_RAD2DEG;
    fov_y = 2 * atan(image_height / 2. / camera_matrix.at<double>(1, 1)) * SV2_RAD2DEG;

    if (!this->_first_show_calib)
    {
      this->_first_show_calib = true;
      logger.info("K: " + msg["K"].dump());
      logger.info("D: " + msg["D"].dump());
      logger.info("ImageSize: (" + std::to_string(image_width) + ", " + std::to_string(image_height) + ")");
      logger.info("FOV: (" + std::to_string(fov_x) + ", " + std::to_string(fov_y) + ")");
    }
  }
  

  cv::Mat camera_matrix;
  cv::Mat distortion;
  int image_width;
  int image_height;
  double fov_x;
  double fov_y;

private:
  cv::Ptr<cv::aruco::DetectorParameters> _detector_params;
  cv::Ptr<cv::aruco::Dictionary> _dictionary;
  int _dictionary_id;
  int _last_dictionary_id;
  std::vector<int> _ids_need;
  std::vector<double> _lengths_need;
  int _use_own_camera;
  cv::VideoCapture _cap;
  std::thread _camera_thread;

  // 订阅话题
  sms::Subscriber _image_sub;
  sms::Subscriber _calib_sub;
  // 发布话题
  sms::Publisher _res_pub;
  sms::Publisher _vis_pub;

  std::mutex _image_queue_mtx;
  std::queue<nlohmann::json> _image_queue;
  std::condition_variable _image_cv;
  std::mutex _camera_queue_mtx;
  std::queue<cv::Mat> _camera_queue;
  std::condition_variable _camera_cv;
  bool _first_show_calib;
  double _t0;

  // 发送spirecv_msgs::2DTargets话题
  nlohmann::json _publish_to_sms(
    std::vector<int>& ids_,
    std::vector<std::vector<cv::Point2f> >& corners_,
    std::vector<cv::Vec3d>& rvecs_,
    std::vector<cv::Vec3d>& tvecs_,
    int im_width,
    int im_height
  );
  double _calc_yaw(double vec_x_, double vec_y_);

  // 以下代码为FEI降落靶标专用
  ArucoPadType _pad_type;
  bool _fill_value_from_id(float id2c_t[3], int id, float unit_len, double x_b, double y_b, int id_begin=0);
  void _fei_pad_recognition(nlohmann::json& msg);
  double _fei_x_bias;
  double _fei_y_bias;
  int _fei_id_begin;

};


}
#endif
