// #pragma once
#include <stdio.h>
#include <stdlib.h>
// #include <conio.h>
#include <iostream>
#include <Eigen/Dense>
#include <opencv2/opencv.hpp>
#include <vector>
#include "rosconnector.h"
#include <fstream>



#ifndef M_PI
#define M_PI       3.14159265358979323846   // pi
#endif // !M_PI



#define ESTIMATED_DEPTH  //ESTIMATED_DEPTH  REAL_DEPTH
#ifndef ZInCam
#define ZInCam 0.03//3 //0.2515  //0.46
#endif



typedef Eigen::Matrix<float, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor> MatXf;
// 求矩阵的伪逆
MatXf pinv(MatXf x);

// 4点视觉伺服类的构造v
class VS_4Point {
public:
	Eigen::Matrix<float, 6, 1> cameraVel;
	std::vector <Eigen::Matrix<float, 6, 1>> allCameraVel;
	std::vector<Eigen::Matrix<float, 8, 1>> allFeatureError;
private:
// 	cv::waitKey(1);
	// 相机内参
	float focalLengthPIX, u0, v0;
	cv::Mat desImg;
	cv::Mat desImgGray;

	std::vector<cv::Point2f> AprilTagCenters;
	std::vector<cv::Point2f> AprilTagPconners;
	// std::vector<vpImagePoint> Pconners;
	std::vector<cv::Point2f> des_Four_Centers;
    std::vector<double> des_apriltag_positions;

	//从servoinglaw（）中拿出来的
	Eigen::Matrix<float, 8, 1> desiredImageFeature_pix;// 图像坐标系下，单位：in pixels   
	Eigen::Matrix<float, 8, 1> projectPoint_pix;
	Eigen::Matrix<float, 8, 1> desiredImageFeature; //图像坐标系下的
	Eigen::Matrix<float, 8, 1> projectPoint;
	Eigen::Matrix<float, 8, 1> featureError;

	Eigen::Matrix<float, 3, 1> current_center_positions;;

	// 设置增益λ
	float lamda = 0; //intial value : 2
	float Z1, Z2, Z3, Z4;
	Eigen::Matrix<float, 4, 1> depth; //存储深度用（懒得改了）


	//apriltag
	//vpDetectorAprilTag detector;  //默认使用TAG_16h5  自己改的函数  //默认poseEstimationMethod = HOMOGRAPHY_VIRTUAL_VS 

	double tagSize = 0.0026;

	// vpCameraParameters cam;  //相机参数
	//std::vector<vpHomogeneousMatrix> cMo_vec;  //List of tag poses.  //可以啊



	//特征点期望位置，图像坐标系下，单位 in pixels
	Eigen::Matrix<float, 4, 1> u_d, v_d;
	//特征点当前位置,图像坐标系下 in pixels
	Eigen::Matrix<float, 4, 1> u_current, v_current;
	//特征点误差 in pixels 
	Eigen::Matrix<float, 4, 1> e_u, e_v;
	Eigen::Matrix<float, 8, 1> e_u_v;  //用于计算 



	//Eigen::Matrix<float, 4, 1> temp_u, temp_v; //中间临时量
	// 图像雅克比8x6
	Eigen::Matrix<float, 8, 6> interactionMatrix_inPixel;
	Eigen::Matrix<double, 3, 1> delta_pos_in_end;
	Eigen::Matrix<double, 3, 1> delta_pos;




public:
	VS_4Point(std::vector<cv::Point2f> des_Four_Centers, float focalLengthPIX_, float u0_, float v0_, std::vector<double> des_apriltag_positions_);
	void servoing();
	// void showerror();
	void VScontrol_simulation();
	// void VScontrol();
	bool VS_success = false;

private:
	void servoingLaw();
	void AprilTagDetection(cv::Mat& Img);

};










//
//
//// 像素类
//class Luminance
//{
//public:
//	float x, y;   // 点坐标 (米)
//	float Ix, Iy; // 梯度
//};
//// 光度视觉伺服类
//class VS_photometric {
//public:
//	 Eigen::Matrix<float, 6, 1> cameraVel;
//	Eigen::Matrix<double, 6, 1> norm_cameraVel;
//	std::vector<Eigen::Matrix<float, 6, 1>> allCameraVel;
//	std::vector<double> allErrorMean;
//	///////////////////////////////////////////////////////////////////////////////ROS*  connector;
//	//cv::Mat desImgGray;
//private:
//	// 相机内参
//	float focalLengthPIX, u0, v0;
//	cv::Mat desImg;
//	cv::Mat desImgGray;
//	cv::Mat descutimg;
//	unsigned int bord1,bord2; //10
//	// 使用像素点的数量
//	unsigned int pixelNum;
//	// 像素类的数组
//	Luminance *pixInfo;
//	Eigen::MatrixXd Lsd;
//	Eigen::MatrixXd Hsd;
//	Eigen::MatrixXd diagHsd;
//public:
//	VS_photometric(cv::Mat desImg_, float focalLengthPIX_, float u0_, float v0_);
//	~VS_photometric();
//	void servoing(cv::Mat camGetImage);
//	void showerror(cv::Mat camGetImage);
//	void plotcameraVel();
//	void VScontrol();
//	void VScontrol_simulation();
//private:
//	// 像素坐标系转化为相机坐标系下归一化坐标值
//	void convertPoint(const double &u, const double &v, double &x, double &y) {
//		x = (u - u0) / focalLengthPIX;
//		y = (v - v0) / focalLengthPIX;
//	}
//
//
//	float derivativeFilterX(cv::Mat &I, const unsigned int r, const unsigned int c) {
//		return (2047.0 * (I.at<uchar>(r, c + 1) - I.at<uchar>(r, c - 1)) +
//			913.0 * (I.at<uchar>(r, c + 2) - I.at<uchar>(r, c - 2)) +
//			112.0 * (I.at<uchar>(r, c + 3) - I.at<uchar>(r, c - 3))
//			+ 1241 * (I.at<uchar>(r + 1, c + 1) - I.at<uchar>(r - 1, c - 1) + I.at<uchar>(r - 1, c + 1) - I.at<uchar>(r + 1, c - 1))
//			+ 554 * (I.at<uchar>(r + 1, c + 2) - I.at<uchar>(r - 1, c - 2) + I.at<uchar>(r - 1, c + 2) - I.at<uchar>(r + 1, c - 2))
//			+ 277 * (I.at<uchar>(r + 2, c + 1) - I.at<uchar>(r - 2, c - 1) + I.at<uchar>(r - 2, c + 1) - I.at<uchar>(r + 2, c - 1))
//			+ 124 * (I.at<uchar>(r + 2, c + 2) - I.at<uchar>(r - 2, c - 2) + I.at<uchar>(r - 2, c + 2) - I.at<uchar>(r + 2, c - 2))) /
//			19914.0;
//	}
//
//	float derivativeFilterY(cv::Mat &I, const unsigned int r, const unsigned int c) {
//		return (2047.0 * (I.at<uchar>(r + 1, c) - I.at<uchar>(r - 1, c)) +
//			913.0 * (I.at<uchar>(r + 2, c) - I.at<uchar>(r - 2, c)) +
//			112.0 * (I.at<uchar>(r + 3, c) - I.at<uchar>(r - 3, c))
//			+ 1241 * (I.at<uchar>(r + 1, c + 1) - I.at<uchar>(r - 1, c - 1) + I.at<uchar>(r + 1, c - 1) - I.at<uchar>(r - 1, c + 1))
//			+ 554 * (I.at<uchar>(r + 2, c + 1) - I.at<uchar>(r - 2, c - 1) + I.at<uchar>(r + 2, c - 1) - I.at<uchar>(r - 2, c + 1))
//			+ 277 * (I.at<uchar>(r + 1, c + 2) - I.at<uchar>(r - 1, c - 2) + I.at<uchar>(r + 1, c - 2) - I.at<uchar>(r - 1, c + 2))
//			+ 124 * (I.at<uchar>(r + 2, c + 2) - I.at<uchar>(r - 2, c - 2) + I.at<uchar>(r + 2, c - 2) - I.at<uchar>(r - 2, c + 2))) /
//			19914.0;
//	}
//	// // 计算interaction矩阵
//	void getInteraction();
//};