#include <ros/ros.h>
#include <iostream>
#include <boost/bind.hpp>
#include <vector>
#include <string>
#include <string>

#include <message_filters/subscriber.h>
#include <message_filters/time_synchronizer.h>
#include <message_filters/sync_policies/approximate_time.h>
#include <message_filters/synchronizer.h>

#include <ros/spinner.h>
#include <sensor_msgs/Image.h>
#include <sensor_msgs/CameraInfo.h>
#include <cv_bridge/cv_bridge.h>
#include <sensor_msgs/CameraInfo.h>

#include <opencv4/opencv2/opencv.hpp>
#include <opencv4/opencv2/core.hpp>
#include <opencv4/opencv2/calib3d.hpp>
#include <opencv4/opencv2/imgproc.hpp>
#include <opencv4/opencv2/imgcodecs.hpp>
#include <opencv2/alphamat.hpp>
#include <opencv4/opencv2/features2d.hpp>

#define ZED_DEPTH_TOPIC "/zed2/zed_node/depth/depth_registered"
#define ZED_LEFT_IMAGE_TOPIC "/zed2/zed_node/left/image_rect_color"
#define ZED_RIGHT_IMAGE_TOPIC "/zed2/zed_node/right/image_rect_color"


ros::Publisher result_image_pub_left;
ros::Publisher result_image_pub_right;

/* 标定参数有待测试 */

/* MATLAB标定的内参矩阵需要转置才是真正的内参矩阵 */
cv::Mat g_left_camera_Intrinsic_matrix = (cv::Mat_<float>(3,3) << 538.7635 , 0.2824, 650.2173,
                                          0, 539.0124, 358.6647, 0, 0, 1);

cv::Mat g_right_camera_intrinsic_matrix = (cv::Mat_<float>(3,3) << 539.7048, 0.1812, 644.2358,
                                           0, 539.6816, 365.9476, 0, 0, 1);

/* MATLAB 中的畸变参数为：K1 K2 K3 P1 P2，这里需要改为K1 K2 P1 P2 K3,否则双目匹配误差较大 */
cv::Mat g_left_camera_distCoeff = (cv::Mat_<float>(5,1) << -0.0981, 0.1230, 0.000012344, 0.00019051, -0.0649);

cv::Mat g_right_camera_distCoeff = (cv::Mat_<float>(5,1) << -0.1107, 0.1564, 0.00030057, 0.0004703, -0.0897);

cv::Mat g_rotation_matrix = (cv::Mat_<float>(3,3) << 1.0000, -0.00030063, -0.0039, 0.00030764, 1.0000, 0.0018, 0.0039, -0.0018, 1.0000);

cv::Mat g_translation_matrix = (cv::Mat_<float>(3,1) << -117.2335, -0.0190, 0.0068);

cv::Mat g_transformation_matrix = (cv::Mat_<float>(4,4) << 1.0000, -0.00030063, -0.0039,  -117.2335,
                                   0.00030764, 1.0000, 0.0018, -0.0190,0.0039, -0.0018, 1.0000, 0.0068);

cv::Mat g_left_rotation_matrix = (cv::Mat_<float>(3,3) << 1.0000, 0, 0, 0, 1.0000, 0, 0, 0, 1.0000);

cv::Mat g_left_translation_matrix = (cv::Mat_<float>(3,1) << 0,0,0);

cv::Point3f uv2xyz(cv::Point2f uvLeft,cv::Point2f uvRight)
{
    //  [u1]      |X|					  [u2]      |X|
    //Z*|v1| = Ml*|Y|					Z*|v2| = Mr*|Y|
    //  [ 1]      |Z|					  [ 1]      |Z|
    //			  |1|								|1|
    cv::Mat mLeftRotation = g_left_rotation_matrix;
    cv::Mat mLeftTranslation = g_left_translation_matrix;
    cv::Mat mLeftRT = cv::Mat(3,4,CV_32F);//左相机M矩阵
    hconcat(mLeftRotation,mLeftTranslation,mLeftRT);
    cv::Mat mLeftIntrinsic = g_left_camera_Intrinsic_matrix;
    cv::Mat mLeftM = mLeftIntrinsic * mLeftRT;
    //std::cout<<"左相机M矩阵 = " <<mLeftM<<std::endl;

    cv::Mat mRightRotation = g_rotation_matrix;
    cv::Mat mRightTranslation = g_translation_matrix;
    cv::Mat mRightRT = cv::Mat(3,4,CV_32F);//右相机M矩阵
    hconcat(mRightRotation,mRightTranslation,mRightRT);
    cv::Mat mRightIntrinsic = g_right_camera_intrinsic_matrix;
    cv::Mat mRightM = mRightIntrinsic * mRightRT;
    //std::cout<<"右相机M矩阵 = "<<mRightM<<std::endl;

    //std::cout<<"左相机 x= "<< uvLeft.x << " y = " << uvLeft.y <<std::endl;
    //std::cout<<"右相机 x= "<< uvRight.x << " y = " << uvRight.y <<std::endl;

    //这个最小二乘矩阵直接可以硬求出来，直接用xyz代表s进行替换消元得到下面的表达式
    //最小二乘法A矩阵
    cv::Mat A = cv::Mat(4,3,CV_32F);
    A.at<float>(0,0) = uvLeft.x * mLeftM.at<float>(2,0) - mLeftM.at<float>(0,0);
    A.at<float>(0,1) = uvLeft.x * mLeftM.at<float>(2,1) - mLeftM.at<float>(0,1);
    A.at<float>(0,2) = uvLeft.x * mLeftM.at<float>(2,2) - mLeftM.at<float>(0,2);

    A.at<float>(1,0) = uvLeft.y * mLeftM.at<float>(2,0) - mLeftM.at<float>(1,0);
    A.at<float>(1,1) = uvLeft.y * mLeftM.at<float>(2,1) - mLeftM.at<float>(1,1);
    A.at<float>(1,2) = uvLeft.y * mLeftM.at<float>(2,2) - mLeftM.at<float>(1,2);

    A.at<float>(2,0) = uvRight.x * mRightM.at<float>(2,0) - mRightM.at<float>(0,0);
    A.at<float>(2,1) = uvRight.x * mRightM.at<float>(2,1) - mRightM.at<float>(0,1);
    A.at<float>(2,2) = uvRight.x * mRightM.at<float>(2,2) - mRightM.at<float>(0,2);

    A.at<float>(3,0) = uvRight.y * mRightM.at<float>(2,0) - mRightM.at<float>(1,0);
    A.at<float>(3,1) = uvRight.y * mRightM.at<float>(2,1) - mRightM.at<float>(1,1);
    A.at<float>(3,2) = uvRight.y * mRightM.at<float>(2,2) - mRightM.at<float>(1,2);
    //std::cout<<"A矩阵 = "<< A <<std::endl;

    //最小二乘法B矩阵
    cv::Mat B = cv::Mat(4,1,CV_32F);
    B.at<float>(0,0) = mLeftM.at<float>(0,3) - uvLeft.x * mLeftM.at<float>(2,3);
    B.at<float>(1,0) = mLeftM.at<float>(1,3) - uvLeft.y * mLeftM.at<float>(2,3);
    B.at<float>(2,0) = mRightM.at<float>(0,3) - uvRight.x * mRightM.at<float>(2,3);
    B.at<float>(3,0) = mRightM.at<float>(1,3) - uvRight.y * mRightM.at<float>(2,3);

    //std::cout << "B: " << B << std::endl;

    cv::Mat XYZ = cv::Mat(3,1,CV_64F);
    //采用SVD最小二乘法求解XYZ
    solve(A,B,XYZ,cv::DECOMP_SVD);
    //std::cout << "XYZ : " << XYZ << std::endl;

    //cout<<"空间坐标为 = "<<endl<<XYZ<<endl;

    //世界坐标系中坐标
    cv::Point3f world;
    world.x = XYZ.at<float>(0,0);
    world.y = XYZ.at<float>(1,0);
    world.z = XYZ.at<float>(2,0);

//    std::string point_text = "x:" + std::to_string(world.x) + " y:" + std::to_string(world.y)
//        + " z:" + std::to_string(world.z);
//    std::cout << point_text << std::endl;

    return world;
}

void extract_chessborad_corners(cv::Mat& input_image, std::vector<cv::Point2f>& image_corners_vec)
{
  std::string text;
  bool flag;
  int thickness = 2;
  int font_face = cv::FONT_HERSHEY_COMPLEX;
  double font_scale = 0.5;
  int index;

  /* 从棋盘格图像中提取角点图像坐标 */
  flag = cv::findChessboardCorners(input_image, cv::Size(6,9), image_corners_vec);
  if(flag == 0 || image_corners_vec.size() <= 0)
  {
    return;
  }

  /* 角点图像坐标亚像素精度优化 */
  flag = cv::find4QuadCornerSubpix(input_image, image_corners_vec, cv::Size(5,5));
  if(flag == 0)
  {
    return;
  }
  /* 角点图像坐标绘制 */
  cv::drawChessboardCorners(input_image, cv::Size(6,9), image_corners_vec, true);

  for(auto i:image_corners_vec)
  {
    text = std::to_string(index);
    cv::putText(input_image, text, i, font_face, font_scale, cv::Scalar(0, 255, 255), thickness, 8, 0);
    index++;
  }

}

void callBck(const sensor_msgs::Image::ConstPtr& depth_image, const sensor_msgs::Image::ConstPtr& left_image_msg,
             const sensor_msgs::Image::ConstPtr& right_image_msg)
{
  cv_bridge::CvImagePtr left_cv_ptr, right_cv_ptr;
  cv::Mat left_image, right_image, result_image_left, result_image_right;
  cv::Mat left_undist_image, right_undist_image;
  float* depths;
  std::vector<cv::Point2f> left_image_corners_vec, right_image_corners_vec;
  std::vector<cv::Point3f> zed_image_corners_coordinate_point_vec, svd_image_corners_coordinate_point_vec;
  sensor_msgs::ImagePtr result_image_msg_left, result_image_msg_right;
  std::string text, point_text;
  int index = 0;
  int sizes;
  int u,v,depth_image_index;
  cv::Point3f zed_corners_coordinate_point, svd_corners_coordinate_point;
  cv::Point2f left_corners_image_point, right_corners_image_point;
  int thickness = 2;
  int font_face = cv::FONT_HERSHEY_COMPLEX;
  double font_scale = 0.4;

  depths = (float*)(&depth_image->data[0]);
  sizes = depth_image->data.size();
  left_cv_ptr = cv_bridge::toCvCopy(left_image_msg, sensor_msgs::image_encodings::BGR8);
  left_image = left_cv_ptr->image;
  cv::undistort(left_image, left_undist_image, g_left_camera_Intrinsic_matrix, g_left_camera_distCoeff);
  right_cv_ptr = cv_bridge::toCvCopy(right_image_msg, sensor_msgs::image_encodings::BGR8);
  right_image = right_cv_ptr->image;
  cv::undistort(right_image, right_undist_image, g_right_camera_intrinsic_matrix, g_right_camera_distCoeff);
  left_undist_image.copyTo(result_image_left);
  right_undist_image.copyTo(result_image_right);
  cv::cvtColor(left_undist_image,left_undist_image,cv::COLOR_RGB2GRAY);
  cv::cvtColor(right_undist_image,right_undist_image,cv::COLOR_RGB2GRAY);
  extract_chessborad_corners(left_undist_image, left_image_corners_vec);
  extract_chessborad_corners(right_undist_image, right_image_corners_vec);

  result_image_msg_left = cv_bridge::CvImage(std_msgs::Header(), "bgr8", result_image_left).toImageMsg();
  result_image_pub_left.publish(result_image_msg_left);
  result_image_msg_right = cv_bridge::CvImage(std_msgs::Header(), "bgr8", result_image_right).toImageMsg();
  result_image_pub_right.publish(result_image_msg_left);
  if(left_image_corners_vec.size() != right_image_corners_vec.size())
  {
    std::cout << "Left corners != Right corners" << std::endl;
    return;
  }
  if(left_image_corners_vec.size() == 0)
  {
    std::cout << "extract 0 corners" << std::endl;
    return;
  }

  for(int i=0;i<left_image_corners_vec.size();i++)
  {
    left_corners_image_point = left_image_corners_vec.at(i);
    right_corners_image_point = right_image_corners_vec.at(i);

    svd_corners_coordinate_point = uv2xyz(left_corners_image_point, right_corners_image_point);
    svd_image_corners_coordinate_point_vec.push_back(svd_corners_coordinate_point);
    point_text ="Point " + std::to_string(i) + " x:" + std::to_string(svd_corners_coordinate_point.x) + " y:" + std::to_string(svd_corners_coordinate_point.y)
        + " z:" + std::to_string(svd_corners_coordinate_point.z);
    std::cout << point_text << std::endl;
  }

  /*
  for(auto i:left_image_corners_vec)
  {
    u = i.x;
    v = i.y;
    depth_image_index = u + v * depth_image->width;
    if(depth_image_index < 0)
    {
      depth_image_index = 0;
    }
    else if (depth_image_index > sizes / 4)
    {
        depth_image_index = sizes / 4;
    }
    zed_corners_coordinate_point.z = depths[depth_image_index];
    zed_corners_coordinate_point.y = (zed_corners_coordinate_point.z * (u - 632.2350)) / 2014.6162;
    zed_corners_coordinate_point.y = (zed_corners_coordinate_point.z * (v - 323.1177)) / 2025.0555;
    zed_image_corners_coordinate_point_vec.push_back(zed_corners_coordinate_point);

    point_text = "x:" + std::to_string(u) + " y:" + std::to_string(v) + " z:" + std::to_string(zed_corners_coordinate_point.z);
    std::cout << point_text << std::endl;
  }
*/


  for(auto j:svd_image_corners_coordinate_point_vec)
  {
    left_corners_image_point = left_image_corners_vec.at(index);
    right_corners_image_point = right_image_corners_vec.at(index);
    //point_text = std::to_string(index) + "x:" + std::to_string(i.x) + " y:" + std::to_string(i.y) + " z:" + std::to_string(i.z);
    //std::cout << point_text << std::endl;
    text = std::to_string(index);
    cv::putText(result_image_left, text, left_corners_image_point, font_face, font_scale, cv::Scalar(0, 255, 0), thickness, 8, 0);
    cv::putText(result_image_right, text, right_corners_image_point, font_face, font_scale, cv::Scalar(0, 255, 0), thickness, 8, 0);
    index++;
  }

}

int main(int argc, char **argv)
{
  ros::init(argc, argv, "zed_actuator_checker_calibration_node");
  if(!ros::ok())
  {
    return 0;
  }
  ros::NodeHandle nh;

  result_image_pub_left = nh.advertise<sensor_msgs::Image>("/zed_actuator_calibration_node/result_image_left", 1);
  result_image_pub_right = nh.advertise<sensor_msgs::Image>("/zed_actuator_calibration_node/result_image_right", 1);
  message_filters::Subscriber<sensor_msgs::Image> zed_depth(nh, ZED_DEPTH_TOPIC, 1);
  message_filters::Subscriber<sensor_msgs::Image> zed_left_img(nh, ZED_LEFT_IMAGE_TOPIC, 1);
  message_filters::Subscriber<sensor_msgs::Image> zed_right_img(nh, ZED_RIGHT_IMAGE_TOPIC, 1);
  typedef message_filters::sync_policies::ApproximateTime<sensor_msgs::Image, sensor_msgs::Image, sensor_msgs::Image> synPolicy;
  message_filters::Synchronizer<synPolicy> sync(synPolicy(10), zed_depth, zed_left_img, zed_right_img);
  sync.registerCallback(boost::bind(&callBck, _1, _2, _3));

  ros::AsyncSpinner spinner(3);

  ros::spin();
  return 0;
}
