#ifndef ICP_TARGET_NODE_ROS_H
#define ICP_TARGET_NODE_ROS_H

#include <ros/ros.h>
#include <geometry_msgs/PoseStamped.h>
#include <geometry_msgs/QuaternionStamped.h>
#include <geometry_msgs/Vector3.h>
#include <geometry_msgs/Quaternion.h>
#include <geometry_msgs/PointStamped.h>
#include <std_msgs/Bool.h>
#include <std_msgs/Float64MultiArray.h>
#include <sensor_msgs/Image.h>
#include <sensor_msgs/CompressedImage.h>
#include <sensor_msgs/Imu.h>
#include <image_transport/image_transport.h>
#include <visualization_msgs/Marker.h>
#include <visualization_msgs/MarkerArray.h>

#include <nav_msgs/Odometry.h>

#include <tf2/LinearMath/Transform.h>
#include <tf2/LinearMath/Quaternion.h>
#include <tf2_ros/transform_broadcaster.h>
#include <tf2_ros/transform_listener.h>
#include <tf2_eigen/tf2_eigen.h>
#include <tf2_ros/buffer.h>

#include <Eigen/Eigen>
#include <Eigen/Dense>

#include <opencv2/imgcodecs.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/videoio.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/core/eigen.hpp>
#include <opencv2/core/utility.hpp>
#include <opencv2/core.hpp>
#include <opencv2/calib3d.hpp>
#include <opencv2/features2d.hpp>
#include <cv_bridge/cv_bridge.h>

#include <multi_camera_cooperation/colors.h>
#include <multi_camera_cooperation/landmark.h>

#include <cstdlib>
#include <cstdlib>
#include <fstream>
#include <iostream>
#include <cstdio>
#include <fstream>
#include <cmath>
#include <ctime>
#include <queue>
#include <vector>
#include <chrono>
#include <sstream>
#include <yaml-cpp/yaml.h>

#include "multi_camera_cooperation/math_tools.h"
#include "multi_camera_cooperation/Assignment/linear_sum_assignment.h"

using namespace std;

class ICPTargetNodeROS;

class ICPTargetNodeROS
{
public:
    string config_file_path = "";
    string landmark_config_file_path = "";
///------------ cam attribute ----------///
    vector<string> camera_config_path = {};
    vector<string> cams;
    vector<string> servogroups;
    vector<cv::Mat> cameraMatrixs = {};
    vector<vector<double>> distCoeffs = {};
    std::string pub_topic;

///------------ image attribute ----------///
    vector<cv::Mat> Images = {};
    cv_bridge::CvImagePtr cv_ptr_compressed;

///------------ drone state ----------///
    string landmark_config_path;
    vector<cv::Point3f> targetPoints3D = {};

///------------------ ros ---------------///
    ros::WallTimer timer;
    vector<ros::Subscriber> sub_marker_pixel;
    vector<ros::Subscriber> sub_T_servogroup_to_cam;
    vector<ros::Subscriber> sub_T_base_to_servogroup;
    vector<ros::Subscriber> sub_T_base_to_cam;
    vector<ros::Subscriber> sub_showimage_cb;
    ros::Publisher pub_target_pose;
    ros::Publisher pub_rough_pose;
    ros::Publisher pub_opticalReadyFlag;
    vector<ros::Publisher> pub_epipolar_geometry;
    bool LaunchFlag = false;

///------------ triangulate parameters -------------///
    bool triangulationGoodFlag = false;
    vector<cv::Point2f> marker_pixels_left_buffer = {};
    vector<cv::Point2f> marker_pixels_right_buffer = {};
    vector<bool> isMarkerReady = {};
    vector<bool> isTServogroup2CamReady = {};
    vector<bool> isTBase2ServogroupReady = {};
    vector<bool> isTBase2CamReady = {};
    vector<vector<cv::Point2f>> marker_pixels;
    vector<Eigen::Matrix4d> T_servogroup_to_cam;
    vector<Eigen::Matrix4d> T_base_to_servogroup;
    vector<Eigen::Matrix4d> T_base_to_cam;
    Eigen::Matrix4d T_cam_to_image = Eigen::Matrix4d::Identity();
    double mergePointsThreshold = 0.05;
    tf2::Vector3 merged_RoughPoints = tf2::Vector3(0, 0, 0);
    vector<cv::Point3f> points3D_merged = {};
    Eigen::Vector3d RoughPoints = Eigen::Vector3d(0, 0, 0);
    int win_size = 5;
    vector<Eigen::Vector3d> RoughPointsBuffer = {};

///------------ ransac parameters -------------///
    vector<cv::Point3f> points3D;
    vector<int> ransac_inliers;
    vector<vector<cv::Point3f>> Good_Matched_Points;
    vector<vector<int>> Good_Matched_Inliers;
    double distance_threshold = 0.01;
    vector<cv::Point3f> Final_Matched_Points;
    vector<int> Final_Matched_Inliers;
    cv::Mat AffineTransform = cv::Mat::eye(3, 4, CV_64F);
    double ransacThreshold = 3;
    double confidence = 0.95;
    cv::Mat inliers;
    bool publish_flag = false;

///------------ icp parameters -------------///
    

///------------------target pose----------------///
    Eigen::Matrix4d T_base_to_estimation = Eigen::Matrix4d::Identity();
    Eigen::Matrix4d T_base_to_landmark = Eigen::Matrix4d::Identity();
    Eigen::Matrix4d T_landmark_to_uav = Eigen::Matrix4d::Identity();
    tf2::Vector3 t_base_to_estimation = tf2::Vector3();
    tf2::Quaternion q_base_to_estimation = tf2::Quaternion();
    tf2::Matrix3x3 R_base_to_estimation = tf2::Matrix3x3();
    geometry_msgs::TransformStamped msg_T_base_to_estimation;

///--------------------- visualization -----------------///
    std::shared_ptr<tf2_ros::TransformBroadcaster> tf_base2estimate;

///------------- function declarations -------------///
    ICPTargetNodeROS();

    /**
     * @brief Load config files, initialize the ros wrapper and related
     * @param nh
     */
    void init(ros::NodeHandle &nh);

    void solution_start_callback(const ros::WallTimerEvent&);

    void ShowImage_cb(const sensor_msgs::Image::ConstPtr &msg, int i);
    void marker_pixel_cb(const std_msgs::Float64MultiArray::ConstPtr &msg, int i);
    void T_servogroup_to_cam_cb(const geometry_msgs::TransformStamped::ConstPtr &msg, int i);
    void T_base_to_servogroup_cb(const geometry_msgs::TransformStamped::ConstPtr &msg, int i);
    void T_base_to_cam_cb(const geometry_msgs::TransformStamped::ConstPtr &msg, int i);

    cv::Mat Transform_Calculate(Eigen::Matrix4d &T_base2cam, cv::Mat &cameraMatrix);
    vector<cv::Point2f> pixel2cam(const vector<cv::Point2f> &p, const cv::Mat &K);
    vector<cv::Point3f> mergePoints(vector<cv::Point3f>& points3D, float threshold);

    /**
     * @brief Triangulate the 3D points from the pixel points
     * @param Transform1
     * @param Transform2
     * @param poinsvectors1
     * @param poinsvectors2
     * @param points3D
     */
    bool triangulation(cv::Mat &Transform1, cv::Mat &Transform2, vector<cv::Point2f> &poinsvectors1, vector<cv::Point2f> &poinsvectors2, vector<cv::Point3f> &points3D);

    void landmark_pose_solve();

    void LoadLandmarkConfig(const std::string& config_path);
    void LoadCameraConfig(const std::string& config_path, int i);

    bool feature_match(int left_cam_idx, int right_cam_idx, vector<cv::Point2f> &matched_marker_pixels_left, vector<cv::Point2f> &matched_marker_pixels_right);
    bool RANSAC_Registration(vector<cv::Point3f> &source_point_vector, vector<cv::Point3f> &target_point_vector, vector<cv::Point3f> &target_point_vector_temp, vector<int> &inliers, double distance_threshold);
    bool triangulation_process();
    void ransac_process();

    cv::Mat epipolar_line_solve(int left_cam_idx, int right_cam_idx, int marker_idx);
    cv::Mat Get_3DTransform_Matrix(const std::vector<cv::Point3f>& srcPoints, const std::vector<cv::Point3f>&  dstPoints);

    void window_filter(Eigen::Vector3d &points, vector<Eigen::Vector3d> &window, int window_size);
};


#endif //ICP_TARGET_NODE_ROS_H