#include <ros/ros.h>
#include <iostream>
#include <boost/bind.hpp>
#include <vector>
#include <string>
#include <string>

#include <message_filters/subscriber.h>
#include <message_filters/time_synchronizer.h>
#include <message_filters/sync_policies/approximate_time.h>
#include <message_filters/synchronizer.h>

#include <ros/spinner.h>
#include <sensor_msgs/Image.h>
#include <sensor_msgs/CameraInfo.h>
#include <cv_bridge/cv_bridge.h>
#include <sensor_msgs/CameraInfo.h>

#include <opencv4/opencv2/opencv.hpp>
#include <opencv4/opencv2/core.hpp>

#include "zed_actuator_calibration.h"
#include "stereo_camera_ranging.h"
#include "zed_get_coordinate.h"

#define ZED_DEPTH_TOPIC "/zed2/zed_node/depth/depth_registered"
#define ZED_LEFT_IMAGE_TOPIC "/zed2/zed_node/left/image_rect_color"
#define ZED_RIGHT_IMAGE_TOPIC "/zed2/zed_node/right/image_rect_color"

ros::Publisher result_image_pub;

/*
* 这个代码写的属实有点蠢，之后还是要把整个代码结构重新整一下
*/

/*
* 这个方案感觉不太可行啊，设想中应该只能检出一个四边形，然而实际上一个物体会产生好几个框，误差挺大的
*
*/

void drawSquares( cv::Mat& image, const POINT_VECTOR& squares )
{
    for( size_t i = 0; i < squares.size(); i++ )
    {
        const cv::Point* p = &squares[i][0];

        int n = (int)squares[i].size();
        //dont detect the border
        if (p-> x > 3 && p->y > 3)
          polylines(image, &p, &n, 1, true, cv::Scalar(0,255,0), 3, cv::LINE_AA);
    }
}

double caculate_angle( cv::Point pt1, cv::Point pt2, cv::Point pt0 )
{
  double dx1 = pt1.x - pt0.x;
  double dy1 = pt1.y - pt0.y;
  double dx2 = pt2.x - pt0.x;
  double dy2 = pt2.y - pt0.y;
  return (dx1*dx2 + dy1*dy2)/sqrt((dx1*dx1 + dy1*dy1)*(dx2*dx2 + dy2*dy2) + 1e-10);
}

// returns sequence of squares detected on the image.
// the sequence is stored in the specified memory storage
void findSquares( const cv::Mat& image, POINT_VECTOR& squares )
{
  int g_thresh = 50, g_N = 5;
  squares.clear();

  //s    Mat pyr, timg, gray0(image.size(), CV_8U), gray;

  // down-scale and upscale the image to filter out the noise
  //pyrDown(image, pyr, Size(image.cols/2, image.rows/2));
  //pyrUp(pyr, timg, image.size());


  // blur will enhance edge detection
  cv::Mat timg(image);
  medianBlur(image, timg, 9);
  cv::Mat gray0(timg.size(), CV_8U), gray;

  POINT_VECTOR contours;

  // find squares in every color plane of the image
  for( int c = 0; c < 3; c++ )
  {
    int ch[] = {c, 0};
    mixChannels(&timg, 1, &gray0, 1, ch, 1);

    // try several threshold levels
    for( int l = 0; l < g_N; l++ )
    {
      // hack: use Canny instead of zero threshold level.
      // Canny helps to catch squares with gradient shading
      if( l == 0 )
      {
        // apply Canny. Take the upper threshold from slider
        // and set the lower to 0 (which forces edges merging)
        Canny(gray0, gray, 5, g_thresh, 5);
        // dilate canny output to remove potential
        // holes between edge segments
        dilate(gray, gray, cv::Mat(), cv::Point(-1,-1));
      }
      else
      {
        // apply threshold if l!=0:
        //     tgray(x,y) = gray(x,y) < (l+1)*255/N ? 255 : 0
        gray = gray0 >= (l+1)*255/g_N;
      }

      // find contours and store them all as a list
      findContours(gray, contours, cv::RETR_LIST, cv::CHAIN_APPROX_SIMPLE);

      std::vector<cv::Point> approx;

      // test each contour
      for( size_t i = 0; i < contours.size(); i++ )
      {
        // approximate contour with accuracy proportional
        // to the contour perimeter
        approxPolyDP(cv::Mat(contours[i]), approx, arcLength(cv::Mat(contours[i]), true)*0.02, true);

        // square contours should have 4 vertices after approximation
        // relatively large area (to filter out noisy contours)
        // and be convex.
        // Note: absolute value of an area is used because
        // area may be positive or negative - in accordance with the
        // contour orientation
        if( approx.size() == 4 &&
            fabs(contourArea(cv::Mat(approx))) > 1000 &&
            isContourConvex(cv::Mat(approx)) )
        {
          double maxCosine = 0;

          for( int j = 2; j < 5; j++ )
          {
            // find the maximum cosine of the angle between joint edges
            double cosine = fabs(caculate_angle(approx[j%4], approx[j-2], approx[j-1]));
            maxCosine = MAX(maxCosine, cosine);
          }

          // if cosines of all angles are small
          // (all angles are ~90 degree) then write quandrange
          // vertices to resultant sequence
          if( maxCosine < 0.3 )
            squares.push_back(approx);
        }
      }
    }
  }
}

/*
* 这个函数还有问题，时间长了直接段错误
*/
void callBck(const sensor_msgs::Image::ConstPtr& depth_image, const sensor_msgs::Image::ConstPtr& left_image_msg,
             const sensor_msgs::Image::ConstPtr& right_image_msg)
{
  cv_bridge::CvImagePtr left_cv_ptr = cv_bridge::toCvCopy(left_image_msg, sensor_msgs::image_encodings::BGR8);
  cv_bridge::CvImagePtr right_cv_ptr = cv_bridge::toCvCopy(right_image_msg, sensor_msgs::image_encodings::BGR8);
  cv::Mat left_image, right_image, result_image;
  POINT_VECTOR left_point_vec;
  cv::Point left_tl,left_tr,left_bl,left_br;
  float* depths;
  int left_u, left_v, left_center_idx;
  int sizes;
  double x,y,z;
  sensor_msgs::ImagePtr result_image_msg;
  std::string text;

  sizes = depth_image->data.size();
  depths = (float*)(&depth_image->data[0]);
  left_image = left_cv_ptr->image;
  right_image = right_cv_ptr->image;
  findSquares(left_image, left_point_vec);
  assert(left_point_vec.size() == 1);
  left_tl = left_point_vec[0][0];
  left_tr = left_point_vec[0][1];
  left_bl = left_point_vec[0][2];
  left_br = left_point_vec[0][3];
  left_u = (left_bl.x + left_br.x + left_tl.x + left_tr.x) / 4;
  left_v = (left_bl.y + left_br.y + left_tl.y + left_tr.y) / 4;
  left_center_idx = left_u + depth_image->width * left_v;
  if (left_center_idx < 0)
  {
    left_center_idx = 0;
  }
  else if (left_center_idx > sizes /4)
  {
    left_center_idx = sizes /4;
  }
  z = depths[left_center_idx];
  x = (z * (left_u - 632.2350)) / 2014.6162;
  y = (z * (left_v - 323.1177)) / 2025.0555;
  std::cout << "x = " << x << "y = " << y << "z = " << z << std::endl;

  text = "x = " + std::to_string(x) + " y = " + std::to_string(y) + " z = " + std::to_string(z);
  int thickness = 2;
  int font_face = cv::FONT_HERSHEY_COMPLEX;
  double font_scale = 2;
  left_image.copyTo(result_image);
  drawSquares(result_image, left_point_vec);
  cv::putText(result_image, text, cv::Point(5,5), font_face, font_scale, cv::Scalar(0, 255, 255), thickness, 8, 0);
  result_image_msg = cv_bridge::CvImage(std_msgs::Header(), "bgr8", result_image).toImageMsg();
  result_image_pub.publish(result_image_msg);
}

int main(int argc, char **argv)
{
  ros::init(argc, argv, "zed_actuator_calibration_node");
  if(!ros::ok())
  {
    return 0;
  }
  ros::NodeHandle nh;

  result_image_pub = nh.advertise<sensor_msgs::Image>("/zed_actuator_calibration_node/result_image", 1);
  message_filters::Subscriber<sensor_msgs::Image> zed_depth(nh, ZED_DEPTH_TOPIC, 1);
  message_filters::Subscriber<sensor_msgs::Image> zed_left_img(nh, ZED_LEFT_IMAGE_TOPIC, 1);
  message_filters::Subscriber<sensor_msgs::Image> zed_right_img(nh, ZED_RIGHT_IMAGE_TOPIC, 1);
  typedef message_filters::sync_policies::ApproximateTime<sensor_msgs::Image, sensor_msgs::Image, sensor_msgs::Image> synPolicy;
  message_filters::Synchronizer<synPolicy> sync(synPolicy(10), zed_depth, zed_left_img, zed_right_img);
  sync.registerCallback(boost::bind(&callBck, _1, _2, _3));

  ros::AsyncSpinner spinner(3);

  ros::spin();
  return 0;
}
