#include <stdio.h>
#include <stack>
#include <opencv2/opencv.hpp>

//using Eigen3
#include <iostream>
#include <Eigen/Core>
#include <Eigen/Dense>
#include <complex>
#include <math.h>
#include <cfloat>
#include<limits.h>  //int max

#include <fstream>

using namespace cv;
using namespace std;
//using namespace Eigen;

//
#define FRAME_SIZE  (1280*720*3/2)
static unsigned char buffer[FRAME_SIZE+8] = {0};

//apriltag
//*************

#include <cstring>
#include <vector>
#include <list>
#include <sys/time.h>
const string usage = "\n"
  "Usage:\n"
  "  apriltags_demo [OPTION...] [IMG1 [IMG2...]]\n"
  "\n"
  "Options:\n"
  "  -h  -?          Show help options\n"
  "  -a              Arduino (send tag ids over serial port)\n"
  "  -d              Disable graphics\n"
  "  -t              Timing of tag extraction\n"
  "  -C <bbxhh>      Tag family (default 36h11)\n"
  "  -D <id>         Video device ID (if multiple cameras present)\n"
  "  -F <fx>         Focal length in pixels\n"
  "  -W <width>      Image width (default 640, availability depends on camera)\n"
  "  -H <height>     Image height (default 480, availability depends on camera)\n"
  "  -S <size>       Tag size (square black frame) in meters\n"
  "  -E <exposure>   Manually set camera exposure (default auto; range 0-10000)\n"
  "  -G <gain>       Manually set camera gain (default auto; range 0-255)\n"
  "  -B <brightness> Manually set the camera brightness (default 128; range 0-255)\n"
  "\n";

const string intro = "\n"
    "April tags test code\n"
    "(C) 2012-2014 Massachusetts Institute of Technology\n"
    "Michael Kaess\n"
    "\n";

#ifndef __APPLE__
#define EXPOSURE_CONTROL // only works in Linux
#endif

#ifdef EXPOSURE_CONTROL
#include <libv4l2.h>
#include <linux/videodev2.h>
#include <fcntl.h>
#include <errno.h>
#endif

// April tags detector and various families that can be selected by command line option
#include "AprilTags/TagDetector.h"
#include "AprilTags/Tag16h5.h"
#include "AprilTags/Tag25h7.h"
#include "AprilTags/Tag25h9.h"
#include "AprilTags/Tag36h9.h"
#include "AprilTags/Tag36h11.h"

// Needed for getopt / command line options processing
#include <unistd.h>
extern int optind;
extern char *optarg;

// For Arduino: locally defined serial port access class
const char* windowName = "apriltags_demo";

// utility function to provide current system time (used below in
// determining frame rate at which images are being processed)
double tic() {
  struct timeval t;
  gettimeofday(&t, NULL);
  return ((double)t.tv_sec + ((double)t.tv_usec)/1000000.);
}

#include <cmath>
#ifndef PI
const double PI = 3.14159265358979323846;
#endif
const double TWOPI = 2.0*PI;

/**
 * Normalize angle to be within the interval [-pi,pi].
 */
inline double standardRad(double t) {
  if (t >= 0.) {
    t = fmod(t+PI, TWOPI) - PI;
  } else {
    t = fmod(t-PI, -TWOPI) + PI;
  }
  return t;
}

/**
 * Convert rotation matrix to Euler angles
 */
void wRo_to_euler(const Eigen::Matrix3d& wRo, double& yaw, double& pitch, double& roll) {
    yaw = standardRad(atan2(wRo(1,0), wRo(0,0)));
    double c = cos(yaw);
    double s = sin(yaw);
    pitch = standardRad(atan2(-wRo(2,0), wRo(0,0)*c + wRo(1,0)*s));
    roll  = standardRad(atan2(wRo(0,2)*s - wRo(1,2)*c, -wRo(0,1)*s + wRo(1,1)*c));

    //debug
    double yaw_my = standardRad(atan2(wRo(1,0), wRo(0,0)));
    double c_my = cos(yaw_my);
    double s_my = sin(yaw_my);
    double pitch_my = standardRad(atan2(-wRo(2,0), sqrt(wRo(0,0)*c_my*wRo(0,0)*c_my + wRo(1,0)*s_my*wRo(1,0)*s_my)));
    double roll_my  = standardRad(atan2(wRo(0,2)*s_my - wRo(1,2)*c_my, -wRo(0,1)*s_my + wRo(1,1)*c_my));
}
//*************


struct EllipsePara
{
    Point2f c;
    float A;
    float B;
    float C;
    float D;
    float E;
    float F;
};

Rect far_target_detecion(Mat frame, Mat dst );
void far_target_fitellipse(Mat src,Rect rec, struct EllipsePara *Ep_t, int Ep_num);
void far_cal_feature_points(Mat src, struct EllipsePara *Ep_t,
                            Eigen::Matrix<std::complex< double >, 4, 1> &para_x,
                            Eigen::Matrix<std::complex< double >, 4, 1> &para_y,
                            Point2f *feature_points);
void far_cal_tangent_para(Mat src, struct EllipsePara *Ep_t,
                          Eigen::Matrix<std::complex< double >, 4, 1> &x,
                          Eigen::Matrix<std::complex< double >, 4, 1> &y);
void far_cal_camera_position(Mat src, Point2f *feature_points, Mat rvecsMat, Mat tvecsMat, Mat Cam_position);
void near_target_detection_solvePosition(Mat src, Mat rvecsMat, Mat tvecsMat, Mat Cam_position);

Point3f get_cross_point_3f(Point3f pt1, Point3f pt2, Point3f pt3, Point3f pt4);
Point2f get_cross_point_2f(Point2f pt1, Point2f pt2, Point2f pt3, Point2f pt4);
void CodeRotateByZ(double x, double y, double thetaz, double& outx, double& outy);
void CodeRotateByY(double x, double z, double thetay, double& outx, double& outz);
void CodeRotateByX(double y, double z, double thetax, double& outy, double& outz);
void quadratic2quartic(Eigen::Matrix<double, 6, 1> & _par1,
                       Eigen::Matrix<double, 6, 1> & _par2,
                       Eigen::Matrix<double, 5, 1> & _y2x_par,
                       Eigen::Matrix<std::complex<double>, 5, 1> &_quartic_par);

void sqrtn(const std::complex<double> &_input,
           double n,
           std::complex<double> & _out);

void Ferrari(Eigen::Matrix<std::complex<double>, 5, 1> & _quartic_par,
             Eigen::Matrix<std::complex<double>, 4, 1> & _x);

void compute_y(Eigen::Matrix<std::complex<double>, 4, 1> &_x,
           Eigen::Matrix<double, 5, 1> &_y2x_par,
           Eigen::Matrix<double, 6, 1> & _ic1,
           Eigen::Matrix<double, 6, 1> & _ic2,
           Eigen::Matrix<std::complex<double>, 4, 1> &_y);


int main(int argc, char** argv )
{
    char image_source[50]={0};
    //sprintf(image_source,"../../test_image/2.1.2.3_origin.PNG");
    sprintf(image_source,"/home/zhi/opencv_qt/test_image/FalseNegative/P0898.png");
    Mat image = imread(image_source,1);
    if ( !image.data )
    {
        printf("No image data \n");
        return -1;
    }

    Mat target_image = Mat(image.size(), CV_8UC3, Scalar(0,0,0));
    Rect rec;

    double time;
    time= getTickCount();

    Mat Cam_position = Mat(4,1,CV_64FC1, Scalar::all(0));
    Mat rvecsMat = Mat(3,1,CV_64FC1, Scalar::all(0));
    Mat tvecsMat = Mat(3,1,CV_64FC1, Scalar::all(0));

    //STEP1:obtain the target area
    //near_target_detection_solvePosition(image, rvecsMat, tvecsMat, Cam_position);
    rec = far_target_detecion(image,target_image);

    //STEP2:fit ellipse
    struct EllipsePara Ep_t[2];
    far_target_fitellipse(target_image, rec, Ep_t, 2);

    //STEP3:cal 4 tangent,solve x^4 poly
    Eigen::Matrix<std::complex< double >, 4, 1> para_x, para_y;
    far_cal_tangent_para(target_image, Ep_t, para_x, para_y);

    //STEP4:obtain N special point
    Point2f feature_points[21];
    far_cal_feature_points(target_image, Ep_t, para_x, para_y, feature_points);

    //STEP5:
    far_cal_camera_position(target_image, feature_points, rvecsMat, tvecsMat, Cam_position);


    time = 1000*((double)getTickCount() - time)/getTickFrequency();
    cout<<time<<"ms"<<endl;
    //imshow("step5", target_image);
    //imwrite("step5.jpg", target_image);
    waitKey(0);
    return 0;
}

void far_cal_camera_position(Mat src, Point2f *feature_points, Mat rvecsMat, Mat tvecsMat, Mat Cam_position)
{
    //put outsider ,save time
    vector<Point2f> imagePoint_z3;
    vector<Point3f> objectPoint_z3;

    double delta_x = 20.0 * 2.0 / 3.0;
    double delta_y = 20.0 * sin(acos(2.0 / 3.0));

    objectPoint_z3.push_back(Point3f(0.0000000000,20.0000000000,0.0000000000));
    objectPoint_z3.push_back(Point3f(60.0000000000,20.0000000000,0.0000000000));
    objectPoint_z3.push_back(Point3f(13.3333330154,14.9071197510,0.0000000000));
    objectPoint_z3.push_back(Point3f(46.6666679382,14.9071197510,0.0000000000));
    objectPoint_z3.push_back(Point3f(13.3333330154,-14.9071197510,0.0000000000));
    objectPoint_z3.push_back(Point3f(46.6666679382,-14.9071197510,0.0000000000));
    objectPoint_z3.push_back(Point3f(0.0000000000,-20.0000000000,0.0000000000));
    objectPoint_z3.push_back(Point3f(60.0000000000,-20.0000000000,0.0000000000));
    objectPoint_z3.push_back(Point3f(15.2786397934,-20.0000000000,0.0000000000));
    objectPoint_z3.push_back(Point3f(44.7213592529,-20.0000000000,0.0000000000));
    objectPoint_z3.push_back(Point3f(30.0000000000,-8.5410194397,0.0000000000));
    objectPoint_z3.push_back(Point3f(23.3084735870,-5.9850826263,0.0000000000));
    objectPoint_z3.push_back(Point3f(36.6915245056,-5.9850845337,0.0000000000));
    objectPoint_z3.push_back(Point3f(23.3084735870,5.9850826263,0.0000000000));
    objectPoint_z3.push_back(Point3f(36.6915245056,5.9850845337,0.0000000000));
    objectPoint_z3.push_back(Point3f(30.0000000000,8.5410194397,0.0000000000));
    objectPoint_z3.push_back(Point3f(15.2786397934,20.0000000000,0.0000000000));
    objectPoint_z3.push_back(Point3f(44.7213592529,20.0000000000,0.0000000000));
    objectPoint_z3.push_back(Point3f(7.6393198967,0.0000000000,0.0000000000));
    objectPoint_z3.push_back(Point3f(52.3606796265,0.0000000000,0.0000000000));
    objectPoint_z3.push_back(Point3f(30.0000000000,0.0000000000,0.0000000000));

//    for(int i=0; i<21; i++)
//        printf("objectPoint_z3[%d]=(%.10f,%.10f,%.10f)\n",i,objectPoint_z3[i].x,objectPoint_z3[i].y,objectPoint_z3[i].z);
    //feature_points[8]  = get_cross_point(feature_points[0],feature_points[4],feature_points[6],feature_points[7]);

    for(int i=0; i<21; i++)
    {
        imagePoint_z3.push_back(feature_points[i]);
    }

    float Inertial_z3[9] = {868.4883077877223, 0, 647.2946255493767,
                            0, 868.8838663318918, 353.2829270201534,
                            0, 0, 1};
    float distCoeffs_z3[5] = {0.2238432339461053, -1.274314048336785, -0.001154344398215844,
                              0.0004417496136377387, 2.79531777800908};

    Mat camera_Matrix_z3 = Mat(3,3,CV_32FC1,Inertial_z3);
    Mat camera_distCoeffs_z3 = Mat(1,5,CV_32FC1,distCoeffs_z3);

    //Mat tvecsMat;
    //Mat rvecsMat;
    solvePnP(objectPoint_z3, imagePoint_z3, camera_Matrix_z3, camera_distCoeffs_z3, rvecsMat, tvecsMat, 0, ITERATIVE);
    //solvePnP(objectPoint_z3, imagePoint_z3, camera_Matrix_z3, camera_distCoeffs_z3, rvecsMat, tvecsMat, 0, CV_EPNP);

    //camera position in object world
    double extend_matrix_0001[4] = {0.0, 0.0, 0.0, 1.0};
    Mat M_matrix = Mat(3,4,CV_64FC1, Scalar::all(0));
    Mat extend_matrix = Mat(1,4,CV_64FC1, extend_matrix_0001);
    Mat extend_M_matrix = Mat(4,4,CV_64FC1, Scalar::all(0));
    Mat invert_extend_M_matrix = Mat(4,4,CV_64FC1, Scalar::all(0));
    Mat rotation_matrix = Mat(3,3,CV_32FC1, Scalar::all(0));  //旋转矩阵
    Rodrigues(rvecsMat,rotation_matrix);

    //conver C2W 2 W2C
    Mat rotation_matrix_T = Mat(3,3,CV_64FC1, Scalar::all(0));  //旋转矩阵
    transpose(rotation_matrix,rotation_matrix_T);
    Mat tvecsMat_W2C = Mat(3,1,CV_64FC1, Scalar::all(0));
    tvecsMat_W2C = -rotation_matrix_T * tvecsMat;

    hconcat(rotation_matrix_T, tvecsMat_W2C, M_matrix);
    vconcat(M_matrix, extend_matrix, invert_extend_M_matrix);

//    cout<<"旋转矩阵："<<endl;
//    cout<<rotation_matrix<<endl;
//    cout<<"rvector："<<endl;
//    cout<<rvecsMat<<endl;
//    cout<<"平移向量："<<endl;
//    cout<<tvecsMat<<endl<<endl;
//    cout<<"invert_extend_M_matrix:"<<endl;
//    cout<<invert_extend_M_matrix<<endl<<endl;
//    cout<<"extend_M_matrix:"<<endl;
//    cout<<extend_M_matrix<<endl<<endl;

    double Cam_O0001[4] = {0.0, 0.0, 0.0, 1.0};
    Mat Cam_O = Mat(4,1,CV_64FC1, Cam_O0001);

    Cam_position = invert_extend_M_matrix * Cam_O;
    double Cam_distans = sqrt(Cam_position.at<double>(0,0)*Cam_position.at<double>(0,0) +
                              Cam_position.at<double>(1,0)*Cam_position.at<double>(1,0) +
                              Cam_position.at<double>(2,0)*Cam_position.at<double>(2,0));

    Eigen::Matrix3d rotation;

   rotation <<
      rotation_matrix_T.at<double>(0,0), rotation_matrix_T.at<double>(0,1),  rotation_matrix_T.at<double>(0,2),
      rotation_matrix_T.at<double>(1,0), rotation_matrix_T.at<double>(1,1),  rotation_matrix_T.at<double>(1,2),
      rotation_matrix_T.at<double>(2,0), rotation_matrix_T.at<double>(2,1),  rotation_matrix_T.at<double>(2,2);

    double yaw, pitch, roll;
    wRo_to_euler(rotation, yaw, pitch, roll);

    char position_tag[7][30];
    sprintf(position_tag[0],"X:  %.2lf cm",Cam_position.at<double>(0,0));
    sprintf(position_tag[1],"Y:  %.2lf cm",Cam_position.at<double>(1,0));
    sprintf(position_tag[2],"Z:  %.2lf cm",Cam_position.at<double>(2,0));
    sprintf(position_tag[3],"DISTANCE:  %.2lf cm",Cam_distans);
    sprintf(position_tag[4],"ROLL:  %.2lf rad=%.2lf",roll,roll*180.0/CV_PI);
    sprintf(position_tag[5],"YAW:  %.2lf rad=%.2lf",yaw,yaw*180.0/CV_PI);
    sprintf(position_tag[6],"PITCH:%.2lf rad=%.2lf",pitch,pitch*180.0/CV_PI);


    Point pt1,pt2;
    pt1.x = 0;
    pt1.y = 00;
    pt2.x = 250;
    pt2.y = 200;
    //rectangle(src,pt1,pt2,Scalar(237,237,237),CV_FILLED);
    Point2f Text_pos;
    for(int i=0; i<7; i++)
    {
        Text_pos.x = 20;
        Text_pos.y = 40+20*i;
        //putText(src, position_tag[i], Text_pos, FONT_HERSHEY_DUPLEX, 0.5, Scalar(42,42,165));  // B G R
    }
}


void far_cal_feature_points(Mat src, struct EllipsePara *Ep_t,
                            Eigen::Matrix<std::complex< double >, 4, 1> &para_x,
                            Eigen::Matrix<std::complex< double >, 4, 1> &para_y,
                             Point2f *feature_points)
{
    int para_flag = 0;
    //feature_points 0 2 4 6 on Ep_t[0]
    //feature_points 1 3 5 7 on Ep_t[1]
    for(int i=0; i<4; i++)
    {
        for(int j=0; j<2; j++)
        {
            //Ep_t[j] and para_x[i],para_y[i]
            //convert to form:Ax^2+Bx+c=0
            double  quartic_par[3];

            //para of x^2
            quartic_par[2] = Ep_t[j].A * (para_y[i].real())*(para_y[i].real()) -
                             Ep_t[j].B * (para_x[i].real())*(para_y[i].real()) +
                             Ep_t[j].C * (para_x[i].real())*(para_x[i].real());
            //para of x
            quartic_par[1] = -Ep_t[j].B * (para_y[i].real()) +
                             2*Ep_t[j].C * (para_x[i].real()) +
                             Ep_t[j].D * (para_y[i].real())*(para_y[i].real()) -
                             Ep_t[j].E * (para_x[i].real())*(para_y[i].real());
            //para of 1
            quartic_par[0] = Ep_t[j].C - Ep_t[j].E * (para_y[i].real()) + Ep_t[j].F * (para_y[i].real())*(para_y[i].real());

            //Normalization
            quartic_par[0] = quartic_par[0]/quartic_par[2];
            quartic_par[1] = quartic_par[1]/quartic_par[2];
            quartic_par[2] = 1.0;

            //opencv::solvePoly(iteration)
//            Mat cv_quartic_par(1, 3, CV_64FC1, quartic_par);
//            Mat cv_X;
//            solvePoly(cv_quartic_par, cv_X, 300);
//            Complexd cv_x[2]={cv_X.at<cv::Complexd>(0,0), cv_X.at<cv::Complexd>(1,0)};
//            feature_points[para_flag].x = cv_x[0].re;
//            if(para_y[i].real() != 0)
//                feature_points[para_flag].y = (-1.0 - para_x[i].real()*feature_points[para_flag].x )/para_y[i].real();

            double judge_para = quartic_par[1]*quartic_par[1] - 4*quartic_par[0];
            if(fabs(judge_para) < 1e-3)     //1e-5 13.jpg not satified
            {
                feature_points[para_flag].x = -quartic_par[1]/2.0;
                if(para_y[i].real() != 0)
                    feature_points[para_flag].y = (-1.0 - para_x[i].real()*feature_points[para_flag].x )/para_y[i].real();
            }
            else
            {
                printf("far_cal_feature_points error");
                return;
            }


            //plot + at feature_points position
            char feature_points_tag[10];
            sprintf(feature_points_tag,"+%d",para_flag);
            //bottom-left
            //change to center
            Point2f feature_points_center;
            feature_points_center.x = feature_points[para_flag].x - 5;
            feature_points_center.y = feature_points[para_flag].y + 5;
            //putText(src, feature_points_tag, feature_points_center, 1, FONT_HERSHEY_PLAIN, Scalar(0,100,255));  // B G R
            para_flag++;
        }
    }

    //label the first eight feature_points
    float theta_radian = atan2((Ep_t[1].c.y-Ep_t[0].c.y) , (Ep_t[1].c.x-Ep_t[0].c.x));
    float theta_degree = theta_radian * 180.0/CV_PI;
    //line(src,Ep_t[0].c,Ep_t[1].c,Scalar(0,255,255),1,8);
    Point2f dst_feature_points[8];
    for(int i=0; i<8; i++)
    {

        double x = feature_points[i].x - Ep_t[0].c.x;
        double y = feature_points[i].y - Ep_t[0].c.y;
        dst_feature_points[i].x = x*cos(theta_radian) + y*sin(theta_radian) + Ep_t[0].c.x;
        dst_feature_points[i].y = -x*sin(theta_radian) + y*cos(theta_radian) + Ep_t[0].c.y;


        char feature_points_tag[10];
        sprintf(feature_points_tag,"+%d",i);
        Point2f feature_points_center;
        feature_points_center.x = dst_feature_points[i].x - 5;
        feature_points_center.y = dst_feature_points[i].y + 5;
        //putText(src, feature_points_tag, feature_points_center, 1, FONT_HERSHEY_PLAIN, Scalar(255,100,0));  // B G R
    }

    //sort 0 2 4 6.y,from top to down is 1 2 3 4
    //sort 1 3 5 7.y,from top to down is 5 6 7 8
    Point2f temp_feature_points;
    for(int i=0; i<8; i++)
    {
        if(i%2 == 0)   //point on Ep_t[0]
        {
            for(int j = 0; j<3; j++)
            {
                if(dst_feature_points[2*j].y<dst_feature_points[2*(j+1)].y)
                {
                    //sort dst_feature_points
                    temp_feature_points.x = dst_feature_points[2*j].x;
                    temp_feature_points.y = dst_feature_points[2*j].y;
                    dst_feature_points[2*j].x = dst_feature_points[2*(j+1)].x;
                    dst_feature_points[2*j].y = dst_feature_points[2*(j+1)].y;
                    dst_feature_points[2*(j+1)].x = temp_feature_points.x;
                    dst_feature_points[2*(j+1)].y = temp_feature_points.y;
                    //sort correspond feature_points
                    temp_feature_points.x = feature_points[2*j].x;
                    temp_feature_points.y = feature_points[2*j].y;
                    feature_points[2*j].x = feature_points[2*(j+1)].x;
                    feature_points[2*j].y = feature_points[2*(j+1)].y;
                    feature_points[2*(j+1)].x = temp_feature_points.x;
                    feature_points[2*(j+1)].y = temp_feature_points.y;
                }
            }
        }
        else    //point on Ep_t[1]
        {
            for(int j = 0; j<3; j++)
            {
                if(dst_feature_points[2*j+1].y<dst_feature_points[2*(j+1)+1].y)
                {
                    //sort dst_feature_points
                    temp_feature_points.x = dst_feature_points[2*j+1].x;
                    temp_feature_points.y = dst_feature_points[2*j+1].y;
                    dst_feature_points[2*j+1].x = dst_feature_points[2*(j+1)+1].x;
                    dst_feature_points[2*j+1].y = dst_feature_points[2*(j+1)+1].y;
                    dst_feature_points[2*(j+1)+1].x = temp_feature_points.x;
                    dst_feature_points[2*(j+1)+1].y = temp_feature_points.y;
                    //sort correspond feature_points
                    temp_feature_points.x = feature_points[2*j+1].x;
                    temp_feature_points.y = feature_points[2*j+1].y;
                    feature_points[2*j+1].x = feature_points[2*(j+1)+1].x;
                    feature_points[2*j+1].y = feature_points[2*(j+1)+1].y;
                    feature_points[2*(j+1)+1].x = temp_feature_points.x;
                    feature_points[2*(j+1)+1].y = temp_feature_points.y;
                }
            }
        }
    }

    //calculate expand feature_points

    feature_points[8]  = get_cross_point_2f(feature_points[0],feature_points[4],feature_points[6],feature_points[7]);
    feature_points[9]  = get_cross_point_2f(feature_points[1],feature_points[5],feature_points[6],feature_points[7]);
    feature_points[10] = get_cross_point_2f(feature_points[4],feature_points[6],feature_points[5],feature_points[7]);
    feature_points[11] = get_cross_point_2f(feature_points[3],feature_points[4],feature_points[5],feature_points[7]);
    feature_points[12] = get_cross_point_2f(feature_points[4],feature_points[6],feature_points[2],feature_points[5]);
    feature_points[13] = get_cross_point_2f(feature_points[1],feature_points[3],feature_points[2],feature_points[5]);
    feature_points[14] = get_cross_point_2f(feature_points[0],feature_points[2],feature_points[3],feature_points[4]);
    feature_points[15] = get_cross_point_2f(feature_points[0],feature_points[2],feature_points[1],feature_points[3]);
    feature_points[16] = get_cross_point_2f(feature_points[0],feature_points[1],feature_points[2],feature_points[6]);
    feature_points[17] = get_cross_point_2f(feature_points[0],feature_points[1],feature_points[3],feature_points[7]);
    feature_points[18] = get_cross_point_2f(feature_points[0],feature_points[4],feature_points[2],feature_points[6]);
    feature_points[19] = get_cross_point_2f(feature_points[1],feature_points[5],feature_points[3],feature_points[7]);
    feature_points[20] = get_cross_point_2f(feature_points[3],feature_points[4],feature_points[2],feature_points[5]);

    for(int i=0; i<21; i++)
    {
        char feature_points_tag[10];
        sprintf(feature_points_tag,"+%d",i);
        Point2f feature_points_center;
        feature_points_center.x = feature_points[i].x - 2.5;
        feature_points_center.y = feature_points[i].y + 2.5;
        putText(src, feature_points_tag, feature_points_center, FONT_HERSHEY_PLAIN, 0.5, Scalar(255,100,0));  // B G R
        //printf("feature_point[%d]=(%.2f,%.2f)\n",i,feature_points[i].x,feature_points[i].y);
    }
}

Point3f get_cross_point_3f(Point3f pt1,Point3f pt2,Point3f pt3,Point3f pt4)
{
    //get line1 line 2 para
    float delta_y1 = pt2.y-pt1.y;
    float delta_y2 = pt4.y-pt3.y;
    float delta_x1 = pt2.x-pt1.x;
    float delta_x2 = pt4.x-pt3.x;
    float para_k1,para_b1,para_k2,para_b2;

    if(delta_x1 != 0)
        para_k1 = delta_y1/delta_x1;
    else
        para_k1 = 1.0e20;

    if(delta_x2 != 0)
        para_k2 = delta_y2/delta_x2;
    else
        para_k2 = 1.0e20;

    para_b1 = pt1.y-pt1.x*para_k1;
    para_b2 = pt3.y-pt3.x*para_k2;

    Point3f cross_point;
    if(para_k1 != para_k2)
    {
        cross_point.x = (para_b2-para_b1)/(para_k1-para_k2);
        cross_point.y = para_k1 * cross_point.x + para_b1;
    }
    else
    {
        cross_point.x = 1e20;
        cross_point.y = 1e20;
    }
    cross_point.z = 0.0;
    return cross_point;
}

Point2f get_cross_point_2f(Point2f pt1,Point2f pt2,Point2f pt3,Point2f pt4)
{
    //get line1 line 2 para
    float delta_y1 = pt2.y-pt1.y;
    float delta_y2 = pt4.y-pt3.y;
    float delta_x1 = pt2.x-pt1.x;
    float delta_x2 = pt4.x-pt3.x;
    float para_k1,para_b1,para_k2,para_b2;

    if(delta_x1 != 0)
        para_k1 = delta_y1/delta_x1;
    else
        para_k1 = 1.0e20;

    if(delta_x2 != 0)
        para_k2 = delta_y2/delta_x2;
    else
        para_k2 = 1.0e20;

    para_b1 = pt1.y-pt1.x*para_k1;
    para_b2 = pt3.y-pt3.x*para_k2;

    Point2f cross_point;
    if(para_k1 != para_k2)
    {
        cross_point.x = (para_b2-para_b1)/(para_k1-para_k2);
        cross_point.y = para_k1 * cross_point.x + para_b1;
    }
    else
    {
        cross_point.x = 1e20;
        cross_point.y = 1e20;
    }
    return cross_point;
}

/*
    input:
        image:CV_8UC3
    output:
        dst:CV_8UC3
    function:
        drawRec on src target area,copy target area to dst
*/
Rect far_target_detecion(Mat src, Mat dst )
{
    if ( !src.data )
    {
        printf("No image data \n");
        //return;
    }
    Mat image;
    cvtColor( src, image, CV_BGR2GRAY );
    Mat binImg;

    Mat element = getStructuringElement(MORPH_RECT, Size(3,3));

    threshold(image, binImg, 0, 255, CV_THRESH_OTSU);
    //adaptiveThreshold(image, binImg, 200, ADAPTIVE_THRESH_GAUSSIAN_C, THRESH_BINARY, 3, 5);

    //MORPH_OPEN
    morphologyEx(binImg, binImg, MORPH_OPEN, element);
    //imshow("step1:binImg", binImg);

    //用于保存所有轮廓信息
    vector< vector< Point> > contours;
    vector<Vec4i> hierarchy;
    findContours(binImg, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_NONE);

    int idx = 0;
    int idx_num = contours.size();;
    int idx_left = idx_num;
    //cout<<"the total contours:"<<idx_num<<endl;
    char contours_tag[20]={0};
    Point origin;

    for(int i=0; i<idx_num; i++)
    {
        //获得轮廓的矩形边界
        Rect rect = boundingRect(contours[i]);
        int x = rect.x;
        int y = rect.y;
        int w = rect.width;
        int h = rect.height;
        float contour_area = contourArea(contours[i], false );
        float shape_ratio = contour_area*4*CV_PI/contours[i].size()/contours[i].size();
        origin.x = x;
        origin.y = y+15;

/*
        if(i == 190)
        {
            printf("shape ratio:%f\n",contourArea(contours[i], false )*4*PI/contours[i].size()/contours[i].size());
            printf("contourArea:%f\n",contourArea(contours[i], false ));
            printf("contourSize:%d\n",contours[i].size());
            rectangle(src, rect, Scalar(0,0,255));
            sprintf(contours_tag,"tag_%d",i);
            putText(src, contours_tag, origin, 1, FONT_HERSHEY_PLAIN, Scalar(0,0,255));
            Scalar color( rand()&255, rand()&255, rand()&255 );
            drawContours( src, contours, i, color, 1, 8);
        }
*/

        if(contours[i].size() < 100 ||w<0.2*h||w>5*h)
        {
            idx_left--;
            continue;
        }

        else if(shape_ratio < 0.45) //change 0.6 to 0.4
        {
            idx_left--;
            continue;
        }

//        printf("shape ratio:%f",contourArea(contours[i], false )*4*PI/contours[i].size()/contours[i].size());
//        rectangle(src, rect, Scalar(0,0,255));
//        sprintf(contours_tag,"tag_%d",i);
//        putText(src, contours_tag, origin, 1, FONT_HERSHEY_PLAIN, Scalar(0,0,255));
//        Scalar color( rand()&255, rand()&255, rand()&255 );
//        drawContours( src, contours, i, color, 1, 8);

        //count child contours num
        if(hierarchy[i][2] != -1)
        {

            int break_flag=0;
            int ii=hierarchy[i][2]; //ii is the first child of i
            int small_child=0;
            int cicle_child=0;
            int child_num=0;
            int biggest_child_area=contours[ii].size(); //a bug: ii but not i
            int second_child_area=0;
            child_num++;

            //if child_num >1
            while(hierarchy[ii][0] != -1) //find all child
            {
                float child_shape_ratio = contourArea(contours[ii], false )*4*CV_PI/contours[ii].size()/contours[ii].size();
                if(contours[ii].size()<contours[i].size()*0.15)
                {
                    small_child++;  //not used
                }
                if(child_shape_ratio>0.65 && contours[ii].size()>0.15*contours[i].size() && contours[ii].size()<0.5*contours[i].size())    //0405 shape ratio changed from 0.8 to 0.7
                {
                    cicle_child++;  //if the first child is cicle_child,it will not be caculated,bug
                }
                child_num++;

                ii = hierarchy[ii][0];
                if(contours[ii].size()<=biggest_child_area) // <= but not <
                {
                    if(contours[ii].size() >second_child_area)
                    second_child_area = contours[ii].size();
                }
                else
                {
                    second_child_area = biggest_child_area; //a fixed bug:save the biggest value to second
                    biggest_child_area = contours[ii].size();
                }
            }
            // now ii point to the last child(if only one child then ii point to the first child)
            float child_shape_ratio = contourArea(contours[ii], false )*4*CV_PI/contours[ii].size()/contours[ii].size();
            if(child_shape_ratio>0.65 && contours[ii].size()>0.15*contours[i].size() && contours[ii].size()<0.5*contours[i].size())    //0405 shape ratio changed from 0.8 to 0.6
            {
                cicle_child++;  //if the first child is cicle_child,it will not be caculated,bug
            }

            //if(child_num ==0 || child_num >10)
            if(child_num ==0)
            {
                idx_left--;
                continue;
            }
            else if(cicle_child>=1 && biggest_child_area <1.5*second_child_area)
            {
                Mat mask = Mat::zeros(image.size(), CV_8UC1);
                drawContours( mask, contours, i, Scalar(255,255,255), CV_FILLED, 8);
                src.copyTo(dst, mask);

                rectangle(src, rect, Scalar(0,0,255),2);
                sprintf(contours_tag,"tag_%d",i);
                //putText(src, contours_tag, origin, 1, FONT_HERSHEY_PLAIN, Scalar(0,0,255));
                Scalar color( rand()&255, rand()&255, rand()&255 );
                drawContours( src, contours, i, color, 1, 8);

                return rect;
            }
            else
            {
                idx_left--;
                continue;
            }
        }
        else
        {
            idx_left--;
            continue;
        }
    }
    //cout<<"the total contours:"<<idx_num<<"\t the left contours:"<<idx_left<<endl;
    Rect rec_default(0, 0, src.cols, src.rows);
    return rec_default;
}


/*
    input:
        image:CV_8UC3
    output:
        Ep_t[2]:Ellipse Para,the first contain a AprilTag
    function:
        fitEllipse,get paras
*/
void far_target_fitellipse(Mat src,Rect ROI_rec, struct EllipsePara *Ep_t, int Ep_num)
{
    if ( !src.data )
    {
        printf("far_target_detection: No image data \n");
        return;
    }
    Mat image;
    cvtColor( src, image, CV_BGR2GRAY );
    Mat image_roi = image(ROI_rec);
    Mat binImg;
    Mat element = getStructuringElement(MORPH_RECT, Size(3,3));
    blur(image_roi, image_roi, Size(3, 3));
    threshold(image_roi, binImg, 0, 255, CV_THRESH_OTSU);
    morphologyEx(binImg, binImg, MORPH_OPEN, element);
    //adaptiveThreshold(image, binImg, 255, ADAPTIVE_THRESH_GAUSSIAN_C, THRESH_BINARY, 3, 5);
    //imshow("binImg",binImg);

    //用于保存所有轮廓信息
    vector< vector< Point> > contours;
    vector<Vec4i> hierarchy;
    findContours(binImg, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_NONE);

    int idx_num = contours.size();;
    int idx_left = idx_num;

    int short_half_axis = 0;
    //cout<<"the total contours:"<<idx_num<<endl;
    char contours_tag[20]={0};
    Point origin;

    //printf("contours num:%d\n",idx_num);

    for(int i=0; i<idx_num; i++)  //i=0 is the image board
    {
        //获得轮廓的矩形边界
        Rect rect = boundingRect(contours[i]);
        int x = rect.x + ROI_rec.x;
        int y = rect.y + ROI_rec.y;
        int w = rect.width;
        int h = rect.height;
        rect.x += ROI_rec.x;
        rect.y += ROI_rec.y;

        int child_tag[2] = {0,0};
        Rect child_rect[2];

        int contour_area = contourArea(contours[i], false );
        float shape_ratio = contour_area*4*CV_PI/contours[i].size()/contours[i].size();
        origin.x = x;
        origin.y = y+15;


        if(contours[i].size() < 30 ||w<0.2*h||w>5*h)   //change 100 to 30   (20m,10m):target cicle contour about 30 pixel
        {
            continue;
        }
        else if(shape_ratio<0.45) //change 0.6 to 0.5
        {
            continue;
        }

//        rectangle(src, rect, Scalar(0,0,255));
//        sprintf(contours_tag,"tag_%d",i);
//        putText(src, contours_tag, origin, 1, FONT_HERSHEY_PLAIN, Scalar(0,0,255));
//        Scalar color( rand()&255, rand()&255, rand()&255 );
        //drawContours( src, contours, i, color, 1, 8);

        if(hierarchy[i][2] != -1)
        {
            int ii=hierarchy[i][2]; //ii is the first child of i
            int cicle_child = 0;
            int child_num = 1;
            int biggest_child_area = contours[ii].size(); //a bug: ii but not i
            int second_child_area = 0;

            while(hierarchy[ii][0] != -1) //find all child
            {
                float child_shape_ratio = contourArea(contours[ii], false )*4*CV_PI/contours[ii].size()/contours[ii].size();
                if(child_shape_ratio>0.60 && contours[ii].size()>0.15*contours[i].size() && contours[ii].size()<0.5*contours[i].size())
                {
                    cicle_child++;
                    if(cicle_child ==1)
                        child_tag[0] = ii;
                    else
                        child_tag[1] = ii;  //a fixed bug
                }

                child_num++;
                ii = hierarchy[ii][0];

                if(contours[ii].size()<=biggest_child_area) // <= but not <
                {
                    if(contours[ii].size() >second_child_area)
                    second_child_area = contours[ii].size();
                }
                else
                {
                    second_child_area = biggest_child_area; //a fixed bug:save the biggest value to second
                    biggest_child_area = contours[ii].size();
                }
            }
            // now ii point to the last child(if only one child then ii point to the first child)
            float child_shape_ratio = contourArea(contours[ii], false )*4*CV_PI/contours[ii].size()/contours[ii].size();
            if(child_shape_ratio>0.60 && contours[ii].size()>0.15*contours[i].size() && contours[ii].size()<0.5*contours[i].size())
            {
                cicle_child++;  //if the first child is cicle_child,it will not be caculated,bug
                child_tag[1] = ii;
            }

            if(cicle_child == 2 && biggest_child_area <1.5*second_child_area && Ep_num>0)
            {
                for( ; Ep_num>0; Ep_num--)
                {
                    RotatedRect Rec_t = fitEllipse(contours[child_tag[Ep_num-1]]);
                    float theta = Rec_t.angle * CV_PI / 180.0 ;
                    float a = Rec_t.size.width / 2.0;
                    float b = Rec_t.size.height / 2.0;
                    if(a < b)
                        short_half_axis = a;
                    else
                        short_half_axis = b;

                    //pluse ROI_rec.x  ROI_rec.y
                    Ep_t[Ep_num-1].c.x = Rec_t.center.x + ROI_rec.x;
                    Ep_t[Ep_num-1].c.y = Rec_t.center.y + ROI_rec.y;
                    Rec_t.center.x += ROI_rec.x;
                    Rec_t.center.y += ROI_rec.y;
                    ellipse(src,Rec_t,Scalar(0,0,255),1,8);
                    Ep_t[Ep_num-1].A = a * a * sin(theta) * sin(theta) + b * b * cos(theta) * cos(theta);
                    Ep_t[Ep_num-1].B = (-2.0) * (a * a - b * b) * sin(theta) * cos(theta); //Negative or positive?
                    Ep_t[Ep_num-1].C = a * a * cos(theta) * cos(theta) + b * b * sin(theta) * sin(theta);
                    Ep_t[Ep_num-1].F = (-1.0) * a * a * b * b;    //a bug
                    Ep_t[Ep_num-1].F += Ep_t[Ep_num-1].A * Ep_t[Ep_num-1].c.x * Ep_t[Ep_num-1].c.x
                                        + Ep_t[Ep_num-1].B * Ep_t[Ep_num-1].c.x * Ep_t[Ep_num-1].c.y
                                        + Ep_t[Ep_num-1].C * Ep_t[Ep_num-1].c.y * Ep_t[Ep_num-1].c.y;

                    Ep_t[Ep_num-1].D = -2 * Ep_t[Ep_num-1].A * Ep_t[Ep_num-1].c.x - Ep_t[Ep_num-1].B * Ep_t[Ep_num-1].c.y;
                    Ep_t[Ep_num-1].E = -2 * Ep_t[Ep_num-1].C * Ep_t[Ep_num-1].c.y - Ep_t[Ep_num-1].B * Ep_t[Ep_num-1].c.x;

                    child_rect[Ep_num-1] = boundingRect(contours[child_tag[Ep_num-1]]);
                    child_rect[Ep_num-1].x += ROI_rec.x;
                    child_rect[Ep_num-1].y += ROI_rec.y;

//                    cout << Ep_t[Ep_num-1].A << " "
//                         << Ep_t[Ep_num-1].B << " "
//                         << Ep_t[Ep_num-1].C << " "
//                         << Ep_t[Ep_num-1].D << " "
//                         << Ep_t[Ep_num-1].E << " "
//                         << Ep_t[Ep_num-1].F << " "
//                         << Ep_t[Ep_num-1].c.x << " "
//                         << Ep_t[Ep_num-1].c.y << endl;
                    //rectangle(src, child_rect[Ep_num-1], Scalar(0,0,255));
                    //sprintf(contours_tag,"tag_%d",i);
                    //putText(src, contours_tag, origin, 1, FONT_HERSHEY_PLAIN, Scalar(0,0,255));
                    //Scalar color( rand()&255, rand()&255, rand()&255 );
                    //drawContours( src, contours, i, color, 1, 8);
                }
            }
        }
    }

    if(Ep_num!= 0)
    {
        printf("fit failed\n");
        return;
    }


    //confirm which ellipse contain the AprilTag
    int x0 = Ep_t[0].c.x-0.25*short_half_axis;
    int y0 = Ep_t[0].c.y-0.25*short_half_axis;
    int x1 = Ep_t[1].c.x-0.25*short_half_axis;
    int y1 = Ep_t[1].c.y-0.25*short_half_axis;
    int area_height = 0.5*short_half_axis;


    if(x0<0)
        x0=0;
    if(y0<0)
        y0=0;
    if(x1<0)
        x1=0;
    if(y1<0)
        y1=0;
    if(x0+area_height > src.cols)
    {
        area_height = src.cols-x0;
    }
    if(y0+area_height > src.rows)
    {
        area_height = src.rows-y0;
    }
    if(x1+area_height > src.cols)
    {
        area_height = src.cols-x1;
    }
    if(y1+area_height > src.rows)
    {
        area_height = src.rows-y1;
    }


    Rect ROI0(x0, y0, area_height, area_height);
    Rect ROI1(x1, y1, area_height, area_height);

    Mat mask0 = Mat::zeros(src.size(),CV_8UC1);
    mask0(ROI0).setTo(255);
    Scalar tempVal0 = cv::mean(src, mask0);
    float matMean0 = tempVal0.val[0];

    Mat mask1 = Mat::zeros(src.size(),CV_8UC1);
    mask1(ROI1).setTo(255);
    Scalar tempVal1 = cv::mean(src, mask1);
    float matMean1 = tempVal1.val[0];

    if(matMean0 < matMean1) //let Ep_t[0] has bigger mean,which stand Apriltag
    {
        float temp_x = Ep_t[0].c.x;
        float temp_y = Ep_t[0].c.y;
        float temp_A = Ep_t[0].A;
        float temp_B = Ep_t[0].B;
        float temp_C = Ep_t[0].C;
        float temp_D = Ep_t[0].D;
        float temp_E = Ep_t[0].E;
        float temp_F = Ep_t[0].F;

        Ep_t[0].c.x = Ep_t[1].c.x;
        Ep_t[0].c.y = Ep_t[1].c.y;
        Ep_t[0].A = Ep_t[1].A;
        Ep_t[0].B = Ep_t[1].B;
        Ep_t[0].C = Ep_t[1].C;
        Ep_t[0].D = Ep_t[1].D;
        Ep_t[0].E = Ep_t[1].E;
        Ep_t[0].F = Ep_t[1].F;

        Ep_t[1].c.x = temp_x;
        Ep_t[1].c.y = temp_y;
        Ep_t[1].A   = temp_A;
        Ep_t[1].B   = temp_B;
        Ep_t[1].C   = temp_C;
        Ep_t[1].D   = temp_D;
        Ep_t[1].E   = temp_E;
        Ep_t[1].F   = temp_F;

        float temp_Mean = matMean0;
        matMean0 = matMean1;
        matMean1 = temp_Mean;

        rectangle(src, ROI1, Scalar(0, 0, 255), -1, 8, 0); //RED   main
        rectangle(src, ROI0, Scalar(0, 255, 0), -1, 8, 0); //GREEN
        //cout<<"matMean0:"<<matMean0<<"\t"<<"(x0,y0):"<<Ep_t[0].c.x<<"  "<<Ep_t[0].c.y<<endl;
        //cout<<"matMean1:"<<matMean1<<"\t"<<"(x1,y1):"<<Ep_t[1].c.x<<"  "<<Ep_t[1].c.y<<endl;
    }
    else
    {
        rectangle(src, ROI0, Scalar(0, 0, 255), -1, 8, 0); //RED
        rectangle(src, ROI1, Scalar(0, 255, 0), -1, 8, 0); //GREEN
        //cout<<"matMean0:"<<matMean0<<"\t"<<"(x0,y0):"<<Ep_t[0].c.x<<"  "<<Ep_t[0].c.y<<endl;
        //cout<<"matMean1:"<<matMean1<<"\t"<<"(x1,y1):"<<Ep_t[1].c.x<<"  "<<Ep_t[1].c.y<<endl;
    }

    //cout<<"the total contours:"<<idx_num<<"\t the left contours:"<<idx_left<<endl;
    //waitKey(1);
    return;
}


void far_cal_tangent_para(Mat src, struct EllipsePara *Ep_t,
                          Eigen::Matrix<std::complex< double >, 4, 1> &x,
                          Eigen::Matrix<std::complex< double >, 4, 1> &y)
{
    Eigen::Matrix<double, 3, 3> C_1, C_2;
    Eigen::Matrix<double, 3, 3> C_1_adjoint, C_2_adjoint;
    C_1 << Ep_t[0].A,       Ep_t[0].B/2.0,      Ep_t[0].D/2.0,
           Ep_t[0].B/2.0,   Ep_t[0].C,          Ep_t[0].E/2.0,
           Ep_t[0].D/2.0,   Ep_t[0].E/2.0,      Ep_t[0].F;
    C_2 << Ep_t[1].A,       Ep_t[1].B/2.0,      Ep_t[1].D/2.0,
           Ep_t[1].B/2.0,   Ep_t[1].C,          Ep_t[1].E/2.0,
           Ep_t[1].D/2.0,   Ep_t[1].E/2.0,      Ep_t[1].F;
    //cout << "Here is the  C_1\n" << C_1<< endl;
    //cout << "Here is the  C_2\n" << C_2 << endl;
//    C_1_adjoint = C_1.inverse()* C_1.determinant();
//    C_2_adjoint = C_2.inverse()* C_2.determinant();
    C_1_adjoint = C_1.inverse();
    C_2_adjoint = C_2.inverse();
    //cout << "Here is the adjoint of C_1\n" << C_1_adjoint<< endl;
    //cout << "Here is the adjoint of C_2\n" << C_2_adjoint << endl;

    Eigen::Matrix<double, 6, 1> ic1, ic2;
    ic1 << C_1_adjoint(0,0), 2*C_1_adjoint(0,1), C_1_adjoint(1,1), 2*C_1_adjoint(0,2), 2*C_1_adjoint(1,2), C_1_adjoint(2,2);
    ic2 << C_2_adjoint(0,0), 2*C_2_adjoint(0,1), C_2_adjoint(1,1), 2*C_2_adjoint(0,2), 2*C_2_adjoint(1,2), C_2_adjoint(2,2);
    //cout << "Here is the Matrix<double, 6, 1> of ic1\n" << ic1 << endl;
    //cout << "Here is the Matrix<double, 6, 1> of ic2\n" << ic2 << endl;

    Eigen::Matrix<std::complex< double >, 5, 1> quartic_par;
    Eigen::Matrix<double, 5, 1> y2x_par;
    quadratic2quartic(ic1, ic2, y2x_par, quartic_par);
    //cout << "Here is the quartic_par<double, 5, 1>\n" << quartic_par << endl;

    //Eigen::Matrix<std::complex< double >, 4, 1> x, y;
    Ferrari(quartic_par, x);

    //opencv::solvePoly(iteration)
//    double a[]={quartic_par[4].real(), quartic_par[3].real(), quartic_par[2].real(), quartic_par[1].real(), quartic_par[0].real()};
//    Mat cv_quartic_par(1, 5, CV_64FC1, a);
//    Mat cv_X;
//    solvePoly(cv_quartic_par, cv_X, 300);
//    Complexd cv_x[4]={cv_X.at<cv::Complexd>(0,0), cv_X.at<cv::Complexd>(1,0), cv_X.at<cv::Complexd>(2,0), cv_X.at<cv::Complexd>(3,0)};
//    x[0].real() = cv_x[0].re;
//    x[1].real() = cv_x[1].re;
//    x[2].real() = cv_x[2].re;
//    x[3].real() = cv_x[3].re;
//    x[0].imag() = cv_x[0].im;
//    x[1].imag() = cv_x[1].im;
//    x[2].imag() = cv_x[2].im;
//    x[3].imag() = cv_x[3].im;

    compute_y(x, y2x_par, ic1, ic2, y);
    //std::cout <<"the result :" << std::endl;
    for(int i = 0; i < 4; i++)
    {
        //std::cout << "x: " << x[i] <<  "\ty: " << y[i] << std::endl;

        Point p0,p1;
        if(y[i].real()!=0)
        {
            float k = -x[i].real()/y[i].real();
            float b = -1/y[i].real();
            p0.x = 0;
            p0.y = b;
            p1.x = src.cols;   //960
            p1.y = k*p1.x+b;    //careful Point int overwhelming
            //if(k*p1.x+b > INT_MAX || k*p1.x+b < INT_MIN || p0.y > INT_MAX ||  p0.y < INT_MIN)   //INT_MAX=2147483647(32bit)
            //if(k*p1.x+b > CHAR_MAX || k*p1.x+b < CHAR_MIN || p0.y > CHAR_MAX ||  p0.y < CHAR_MIN)   //CHAR_MAX=127(8bit)
            if(k*p1.x+b > SHRT_MAX || k*p1.x+b < SHRT_MIN || p0.y > SHRT_MAX ||  p0.y < SHRT_MIN)   //SHRT_MAX=32767(16bit)
            {
                if(k != 0)
                {
                    p0.x = -b/k;
                    p0.y = 0;
                    p1.x = (src.rows-b)/k;
                    p1.y = src.rows;    //720 pixel
                }
            }
            //line(src,p0,p1,Scalar(255,0,0),1,8);
        }
    }
}


//solve quartic equation
void quadratic2quartic(Eigen::Matrix<double, 6, 1> & _par1,
                     Eigen::Matrix<double, 6, 1> & _par2,
                     Eigen::Matrix<double, 5, 1> & _y2x_par,
                     Eigen::Matrix<std::complex<double>, 5, 1> &_quartic_par)
{
    double A, B, C, D, E;
    double a1, b1, c1, d1, e1, f1, a2, b2, c2, d2, e2, f2;

    a1 = _par1[0];
    b1 = _par1[1];
    c1 = _par1[2];
    d1 = _par1[3];
    e1 = _par1[4];
    f1 = _par1[5];

    //   std::cout  << a1 << "   " << b1 <<  "   " <<  c1  << "   " << d1 <<  "   " << e1 <<  "   " << f1 << std::endl;

    a2 = _par2[0];
    b2 = _par2[1];
    c2 = _par2[2];
    d2 = _par2[3];
    e2 = _par2[4];
    f2 = _par2[5];

    //   std::cout  << a2 << "   " << b2 <<  "   " <<  c2  << "   " << d2 <<  "   " << e2 <<  "   " << f2 << std::endl;

    //double(64bit) 1bit sign + 11bit e + .52bit
    A = a1*c2 - a2*c1;
    B = b1*c2 - b2*c1;
    C = d1*c2 - d2*c1;
    D = e1*c2 - e2*c1;
    E = f1*c2 - f2*c1;
    _y2x_par << A, B, C, D, E;
    //   std::cout << "A-E: \n";
    //   std::cout << A << "   " << B << "   " << C <<"   " << D << "   " << E << std::endl;

    double M0, M1, M2, M3, M4;
    M4 = a1*B*B - b1*A*B + c1*A*A;
    M3 = 2*a1*B*D - b1*A*D - b1*B*C + 2*c1*A*C + d1*B*B - e1*A*B;
    M2 = a1*D*D - b1*C*D - b1*B*E + c1*C*C + 2*c1*A*E + 2*d1*B*D - e1*A*D - e1*B*C + f1*B*B;
    M1 = -b1*D*E + 2*c1*C*E + d1*D*D - e1*C*D - e1*B*E + 2*f1*B*D;
    M0 = c1*E*E - e1*D*E + f1*D*D;
    //   std::cout << "m4-m0: \n";
    //   std::cout << M4 << "   " << M3 << "   " << M2 << "   " << M1 << "   " << M0 << std::endl;
    //
    double b, c, d, e;
    b = M3 / M4;
    c = M2 / M4;
    d = M1 / M4;
    e = M0 / M4;
    //   std::cout  << "b, c, d, e: \n";
    //   std::cout  << " " << b << "   " << c << "   " << d<< "   " << e << "   \n";
    _quartic_par << 1.0, b, c, d, e;

}

//extract a root
void sqrtn(const std::complex<double> &_input, double n,
          std::complex<double> & _out)
{
//   double r = sqrt(_input.real()*_input.real() + _input.imag()*_input.imag());
//   std::cout << _input << std::endl;
  double r = hypot(_input.real(), _input.imag());

//   std::cout << "the norm is " << r << std::endl;
  if(r > 0.0)
  {
    double a = atan2(_input.imag(), _input.real());
//     std::cout << "the a is: " << a << std::endl;

//     double a = arg(_input);
    n = 1 / n;
    r = pow(r, n);
    a *= n;
    _out.real() = r * cos(a) ;
    _out.imag() = r * sin(a);
  }
  else
  {
    _out.real() = 0.0;
    _out.imag() = 0.0;
  }
//   std::cout << "out :" << _out << std::endl;
}

void Ferrari(Eigen::Matrix<std::complex<double>, 5, 1> & _quartic_par,
                 Eigen::Matrix<std::complex<double>, 4, 1> & _x)
{
  std::complex<double> a = _quartic_par[0];
  std::complex<double> b = _quartic_par[1];
  std::complex<double> c = _quartic_par[2];
  std::complex<double> d = _quartic_par[3];
  std::complex<double> e = _quartic_par[4];

  std::complex<double> P = (c*c + 12.0*e - 3.0*b*d) / 9.0;
  std::complex<double> Q = (27.0*d*d + 2.0*c*c*c + 27.0*b*b*e - 72.0*c*e - 9.0*b*c*d) / 54.0;
//   std::cout << "P: " << P << "   \nQ: " << Q << std::endl;
  std::complex<double> D, u, v;
//   D = cabs(sqrt(Q*Q - P*P*P));

  sqrtn(Q*Q - P*P*P, 2.0, D);
//   std::cout << "D: " << D << std::endl;
  u = Q + D;
  v = Q - D;
//   std::cout << "u: " << u << "  \nv: " << v << std::endl;
  if(v.real()*v.real() + v.imag()*v.imag() > u.real()*u.real() + u.imag()*u.imag())
  {
    sqrtn(v, 3.0, u);
  }
  else
  {
    sqrtn(u, 3.0, u);
  }
//   std::cout <<"u: " << u << std::endl;
  std::complex<double> y;
  if(u.real()*u.real() + u.imag()*u.imag() > 0.0)
  {
    v = P / u;
    std::complex<double> o1(-0.5,+0.86602540378443864676372317075294);
    std::complex<double> o2(-0.5,-0.86602540378443864676372317075294);
    std::complex<double> &yMax = _x[0];
    double m2 = 0.0;
    double m2Max = 0.0;
    int iMax = -1;

    for(int i = 0; i < 3; ++i)
    {
//        y = u + v + c / 3.0;
//        u *= o1;
//        v *= o2;  //bug
      if(i == 0)
          y = u + v + c/3.0;
      else if(i == 1)
          y = u*o1 +v*o2 +c/3.0;
      else
          y = u*o2 +v*o1 +c/3.0;
      a = b*b + 4.0*(y-c);
      m2 = a.real()*a.real() + a.imag()*a.imag();

      if(0==i || m2Max < m2)
      {
        m2Max = m2;
        yMax = y;
        iMax = i;
      }
    }
    //printf("\n iMax: %d\n",iMax);
    y = yMax;
  }
  else
  {//cubic equation
    y = c / 3.0;
  }
  std::complex<double> m;
  sqrtn(b*b + 4.0*(y-c), 2.0, m);
  if(m.real()*m.real() + m.imag()*m.imag() >= DBL_MIN)
  {
    std::complex<double> n = (b*y - 2.0*d) / m;
    sqrtn((b+m)*(b+m) - 8.0*(y+n), 2.0, a);
    _x[0] = (-(b+m) + a) / 4.0;
    _x[2] = (-(b+m) - a) / 4.0;
    sqrtn((b-m)*(b-m) - 8.0*(y-n), 2.0, a);
    _x[1] = (-(b-m) + a) / 4.0;
    _x[3] = (-(b-m) -a) / 4.0;
  }
  else
  {
     sqrtn(b*b - 8.0*y, 2.0, a);
     _x[0] = _x[1] = (-b + a) / 4.0;
     _x[2] = _x[3] = (-b - a) / 4.0;
  }

//   for(int i = 0; i <4; i++)
//   {
//     std::cout << _x[i] << std::endl;
//   }
}

void compute_y(Eigen::Matrix<std::complex<double>, 4, 1> &_x,
           Eigen::Matrix<double, 5, 1> &_y2x_par,
           Eigen::Matrix<double, 6, 1> & _ic1,
           Eigen::Matrix<double, 6, 1> & _ic2,
           Eigen::Matrix<std::complex<double>, 4, 1> &_y)
{
    int flag = 0;
    double x1=0.0,x2=0.0;
    for(int i = 0; i < 4; i++)
    {

        if(_x[i].real() == 0)
        {
            if(flag == 0 &&_ic1[2] != 0 && _ic1[4]*_ic1[4]-4*_ic1[2] >= 0) //C != 0 && E^2-4c>0
            {
                x1 = (- _ic1[4] + sqrt(_ic1[4]*_ic1[4]-4*_ic1[2]) )/ (2 * _ic1[2]);
                x2 = (- _ic1[4] - sqrt(_ic1[4]*_ic1[4]-4*_ic1[2]) )/ (2 * _ic1[2]);
            }
            if(flag == 1)
            {
                _y[i].real() = x2;
                flag ++;
            }
            if(flag == 0)
            {
                _y[i].real() = x1;
                flag ++;
            }
        }
        else
        _y[i] = (-_y2x_par[0]*_x[i]*_x[i] - _y2x_par[2]*_x[i] - _y2x_par[4]) / (_y2x_par[1]*_x[i] + _y2x_par[3]);
        //     std::cout << "_y[" << i << "]" << _y[i] << std::endl;
    }
}

class Demo {
    AprilTags::TagDetector* m_tagDetector;
    AprilTags::TagCodes m_tagCodes;

    bool m_draw; // draw image and April tag detections?
    bool m_arduino; // send tag detections to serial port?
    bool m_timing; // print timing information for each tag extraction call

    int m_width; // image size in pixels
    int m_height;
    double m_tagSize; // April tag side length in meters of square black frame
    double m_fx; // camera focal length in pixels
    double m_fy;
    double m_px; // camera principal point
    double m_py;

    int m_deviceId; // camera id (in case of multiple cameras)

    list<string> m_imgNames;

    cv::VideoCapture m_cap;

    int m_exposure;
    int m_gain;
    int m_brightness;

    //Serial m_serial;

public:

    // default constructor
    Demo() :
        // default settings, most can be modified through command line options (see below)
        m_tagDetector(NULL),
        m_tagCodes(AprilTags::tagCodes36h11),

        m_draw(true),
        m_arduino(false),
        m_timing(true),

        m_width(1280),
        m_height(720),
        //m_tagSize(0.166),
        m_tagSize(0.08),
        m_fx(868.4883077877223),
        m_fy(868.8838663318918),
        m_px(647.2946255493767),
        m_py(353.2829270201534),

        m_exposure(-1),
        m_gain(-1),
        m_brightness(-1),

    m_deviceId(0)
  {}

    // changing the tag family
    void setTagCodes(string s) {
    if (s=="16h5") {
      m_tagCodes = AprilTags::tagCodes16h5;
    } else if (s=="25h7") {
      m_tagCodes = AprilTags::tagCodes25h7;
    } else if (s=="25h9") {
      m_tagCodes = AprilTags::tagCodes25h9;
    } else if (s=="36h9") {
      m_tagCodes = AprilTags::tagCodes36h9;
    } else if (s=="36h11") {
      m_tagCodes = AprilTags::tagCodes36h11;
    } else {
      cout << "Invalid tag family specified" << endl;
      exit(1);
    }
  }

    // parse command line options to change default behavior
    void parseOptions(int argc, char* argv[]) {
    int c;
    while ((c = getopt(argc, argv, ":h?adtC:F:H:S:W:E:G:B:D:")) != -1) {
      // Each option character has to be in the string in getopt();
      // the first colon changes the error character from '?' to ':';
      // a colon after an option means that there is an extra
      // parameter to this option; 'W' is a reserved character
      switch (c) {
      case 'h':
      case '?':
        cout << intro;
        cout << usage;
        exit(0);
        break;
      case 'a':
        m_arduino = true;
        break;
      case 'd':
        m_draw = false;
        break;
      case 't':
        m_timing = true;
        break;
      case 'C':
        setTagCodes(optarg);
        break;
      case 'F':
        m_fx = atof(optarg);
        m_fy = m_fx;
        break;
      case 'H':
        m_height = atoi(optarg);
        m_py = m_height/2;
         break;
      case 'S':
        m_tagSize = atof(optarg);
        break;
      case 'W':
        m_width = atoi(optarg);
        m_px = m_width/2;
        break;
      case 'E':
#ifndef EXPOSURE_CONTROL
        cout << "Error: Exposure option (-E) not available" << endl;
        exit(1);
#endif
        m_exposure = atoi(optarg);
        break;
      case 'G':
#ifndef EXPOSURE_CONTROL
        cout << "Error: Gain option (-G) not available" << endl;
        exit(1);
#endif
        m_gain = atoi(optarg);
        break;
      case 'B':
#ifndef EXPOSURE_CONTROL
        cout << "Error: Brightness option (-B) not available" << endl;
        exit(1);
#endif
        m_brightness = atoi(optarg);
        break;
      case 'D':
        m_deviceId = atoi(optarg);
        break;
      case ':': // unknown option, from getopt
        cout << intro;
        cout << usage;
        exit(1);
        break;
      }
    }

    if (argc > optind) {
      for (int i=0; i<argc-optind; i++) {
        m_imgNames.push_back(argv[optind+i]);
      }
    }
  }

    void setup() {
    m_tagDetector = new AprilTags::TagDetector(m_tagCodes);

    // prepare window for drawing the camera images
    if (m_draw) {
      //cv::namedWindow(windowName, 1);
    }

    // optional: prepare serial port for communication with Arduino
    if (m_arduino) {
      //m_serial.open("/dev/ttyACM0");
    }
  }

    void print_detection(AprilTags::TagDetection& detection,cv::Mat& image) const {
    cout << "  Id: " << detection.id
         << " (Hamming: " << detection.hammingDistance << ")"<<endl<<endl;

    // recovering the relative pose of a tag:

    // NOTE: for this to be accurate, it is necessary to use the
    // actual camera parameters here as well as the actual tag size
    // (m_fx, m_fy, m_px, m_py, m_tagSize)

    Eigen::Vector3d translation;
    Eigen::Matrix3d rotation;
    detection.getRelativeTranslationRotation(m_tagSize, m_fx, m_fy, m_px, m_py,
                                             translation, rotation);

    Eigen::Matrix3d F;
    F <<
      1, 0,  0,
      0,  1,  0,
      0,  0,  1;
//    F <<
//      1, 0,  0,
//      0,  1,  0,
//      0,  0,  1;
    Eigen::Matrix3d fixed_rot = F*rotation;
    double yaw, pitch, roll;
    wRo_to_euler(fixed_rot, yaw, pitch, roll);

//    cout<<"position_apriltag:"<<"("
//        <<translation(2)*100<<", "
//        <<-translation(1)*100<<", "
//        <<-translation(0)*100<<", "
//        <<translation.norm()*100<<")"<<endl<<endl;
//    cout<<"alttitude_apriltag:"<<"("
//        <<-yaw<<", "
//        <<-pitch<<", "
//        <<roll<<")"<<endl<<endl;
    cout<<"position_apriltag:"<<"("
        <<translation(0)<<", "
        <<translation(1)<<", "
        <<translation(2)<<", "
        <<translation.norm()<<")"<<endl<<endl;
    cout<<"alttitude_apriltag:"<<"("
        <<standardRad(roll)<<", "
        <<standardRad(pitch)<<", "
        <<standardRad(yaw)<<")"<<endl<<endl;
    // show the current image including any detections
    if (m_draw) {

      char position_tag[7][20];
      sprintf(position_tag[0],"X:  %.2lf cm",translation(0)*100);
      sprintf(position_tag[1],"Y:  %.2lf cm",translation(1)*100);
      sprintf(position_tag[2],"Z:  %.2lf cm",translation(2)*100);
      sprintf(position_tag[3],"DISTANCE:  %.2lf ",translation.norm()*100);
      sprintf(position_tag[4],"roll:  %.2lf = %.2lf",standardRad(roll),standardRad(roll)*180/CV_PI);
      sprintf(position_tag[5],"yaw:  %.2lf = %.2lf",standardRad(yaw),standardRad(yaw)*180/CV_PI);
      sprintf(position_tag[6],"pitch:  %.2lf = %.2lf",standardRad(pitch),standardRad(pitch)*180/CV_PI);
      Point pt1,pt2;
      pt1.x = 0;
      pt1.y = 00;
      pt2.x = 250;
      pt2.y = 130;
      rectangle(image,pt1,pt2,Scalar(237,237,237),CV_FILLED);
      Point2f Text_pos;
      for(int i=0; i<7; i++)
      {
          Text_pos.x = 20;
          Text_pos.y = 40+20*i;
          //putText(src, position_tag[i], Text_pos, FONT_HERSHEY_DUPLEX, 0.5, Scalar(73,130,255));  // B G R
          putText(image, position_tag[i], Text_pos, FONT_HERSHEY_DUPLEX, 0.5, Scalar(42,42,165));  // B G R
      }
      //imshow(windowName, image); // OpenCV call
    }


    // Also note that for SLAM/multi-view application it is better to
    // use reprojection error of corner points, because the noise in
    // this relative pose is very non-Gaussian; see iSAM source code
    // for suitable factors.
  }

    void processImage(cv::Mat& image, cv::Mat& image_gray) {
    // alternative way is to grab, then retrieve; allows for
    // multiple grab when processing below frame rate - v4l keeps a
    // number of frames buffered, which can lead to significant lag
    //      m_cap.grab();
    //      m_cap.retrieve(image);

    // detect April tags (requires a gray scale image)
    cv::cvtColor(image, image_gray, CV_BGR2GRAY);
    double t0;
    if (m_timing) {
      t0 = tic();
    }
    vector<AprilTags::TagDetection> detections = m_tagDetector->extractTags(image_gray);
    if (m_timing) {
      double dt = tic()-t0;
      cout << "Extracting tags took " << dt << " seconds." << endl;
    }

    // print out each detection
    cout << detections.size() << " tags detected:" << endl;
    for (int i=0; i<detections.size(); i++) {
      print_detection(detections[i],image);
    }

    if (m_draw) {
      for (int i=0; i<detections.size(); i++) {
        // also highlight in the image
        detections[i].draw(image);
      }

      imshow(windowName, image); // OpenCV call
    }

  }


}; // Demo


void near_target_detection_solvePosition(Mat src,Mat rvecsMat, Mat tvecsMat, Mat Cam_position)
{
    Demo demo;
    // process command line options
    //demo.parseOptions(argc, argv);
    demo.setup();

    Mat image_gray;
    demo.processImage(src, image_gray);
    //while (cv::waitKey(100) == -1) {}
}

