#include <fstream>
#include <sstream>
#include <iostream>
#include <stdint-gcc.h>
// #include <opencv2/dnn.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/highgui.hpp>
#include <opencv4/opencv2/opencv.hpp>
#include "Serial.hpp"

#include <iostream>
#include <execinfo.h> // For backtrace()
#include <signal.h>
#include <stdlib.h>
#include <unistd.h> // For sleep()
#include <thread>
#include "crc16.cpp"
using namespace cv;
using namespace dnn;
using namespace std;
// Copyright (c) 2022 ChenJun
// Licensed under the Apache-2.0 License.
#include <cstdint>
void sigsegv_handler(int signal_num)
{
	void *array[10];
	size_t size;

	// 获取当前的堆栈跟踪
	size = backtrace(array, 10);

	// 打印堆栈跟踪
	fprintf(stderr, "Error: Signal %d:\n", signal_num);
	backtrace_symbols_fd(array, size, STDERR_FILENO);

	// 这里可以添加更多你想在段错误发生后执行的代码
	// 但请注意，程序状态可能已经损坏，执行复杂操作可能风险较高

	exit(EXIT_FAILURE); // 处理完后优雅地退出程序
}
class yolo_fast
{
public:
	typedef struct message
	{
		uint8_t head;
		float x;
		float y;
		float z;
		uint16_t check_sum;
	} __attribute__((packed)) message_t;

	typedef union message_union
	{
		message_t mes;
		unsigned char data[sizeof(message_t)];
	} message_union_t;
	yolo_fast(string modelpath, float objThreshold, float confThreshold, float nmsThreshold);
	~yolo_fast();
	void detect(Mat &srcimg);
	void send_data(message_t mes);
	double color_detect(Mat &frame, Rect roi);
	void serial_check_thread();

private:
	// 35.08,48.59, 49.96,67.36, 65.38,87.75, 85.22,114.63, 143.08,182.97, 230.58,285.34
	const float anchors[2][6] = {{35.08, 48.59, 49.96, 67.36, 65.38, 87.75}, {85.22, 114.63, 143.08, 182.97, 230.58, 285.34}};
	const float stride[3] = {16.0, 32.0};
	const int inpWidth = 352;
	const int inpHeight = 352;
	const int num_stage = 2;
	const int anchor_num = 3;
	float objThreshold;
	float confThreshold;
	float nmsThreshold;
	vector<string> classes;
	std::shared_ptr<Serial> s1;
	char serial_str[13] = {'/','d','e','v','/','t','t','y','A','C','M','-','\0'}; // 串口号
	const string classesFile = "../resource/apple.names";
	int num_class;
	Net net;
	vector<float> drawPred(int classId, float conf, int left, int top, int right, int bottom, Mat &frame);
};
void yolo_fast::serial_check_thread(){
	char tmp[strlen(serial_str)]={0};
	strcpy(tmp, serial_str);
	for (size_t i = 0; i < 9; i++)
	{
		tmp[strlen(serial_str)-1]=48+i;
		if(access(tmp, F_OK)==0){
			cout<<"find "<<tmp<<endl;
			strcpy(serial_str,tmp);
			break;
		}
	}
	s1 = std::make_shared<Serial>();
	s1->OpenSerial(serial_str, E_BaudRate::_115200, E_DataSize::_8, E_Parity::None, E_StopBit::_1);
	while (true)
	{
		if(access(serial_str, F_OK)==-1){
			if(s1->b_OpenSign)
			{
				s1->Close();
				s1->b_OpenSign=false;
				cout<<"disconnect"<<endl;
			}
		}else{
			if(!s1->b_OpenSign){
				s1 = std::make_shared<Serial>();
				s1->OpenSerial(serial_str, E_BaudRate::_115200, E_DataSize::_8, E_Parity::None, E_StopBit::_1);
				cout<<"reconnect"<<endl;
			}
		}
		
	}
}
yolo_fast::~yolo_fast(){
	s1->Close();
}
yolo_fast::yolo_fast(string modelpath, float obj_Threshold, float conf_Threshold, float nms_Threshold)
{

	this->objThreshold = obj_Threshold;
	this->confThreshold = conf_Threshold;
	this->nmsThreshold = nms_Threshold;

	ifstream ifs(this->classesFile.c_str());
	if (!ifs.is_open())
	{ // 检查文件是否成功打开
		cerr << "Failed to open classes file." << endl;
		return; // 或者抛出异常
	}
	string line;
	while (getline(ifs, line))
		this->classes.push_back(line);
	this->num_class = this->classes.size();
	this->net = readNet(modelpath);

	std::thread check_thread(std::bind(&yolo_fast::serial_check_thread, this));
	check_thread.detach();

}

vector<float> yolo_fast::drawPred(int classId, float conf, int left, int top, int right, int bottom, Mat &frame) // Draw the predicted bounding box
{
	// Draw a rectangle displaying the bounding box
	rectangle(frame, Point(left, top), Point(right, bottom), Scalar(0, 0, 255), 5);

	// 左上角(left,top) 右上角 (right,top) 右下角 (right,bottom) 左下角 (left,bottom)
	//  开始pnp解算

	vector<Point2d> image_points;
	image_points.push_back(Point2d(left, top));
	image_points.push_back(Point2d(right, top));
	image_points.push_back(Point2d(right, bottom));
	image_points.push_back(Point2d(left, bottom));

	// 3D 特征点世界坐标，与像素坐标对应，单位是mm
	std::vector<Point3d> model_points;
	model_points.push_back(Point3d(-25.0f, -25.0f, 0)); //
	model_points.push_back(Point3d(+25.0f, -25.0f, 0));
	model_points.push_back(Point3d(+25.0f, +25.0f, 0));
	model_points.push_back(Point3d(-25.0f, +25.0f, 0));

	//  　注意世界坐标和像素坐标要一一对应

	// 相机内参矩阵和畸变系数均由相机标定结果得出
	// 相机内参矩阵
	Mat camera_matrix = (Mat_<double>(3, 3) << 1302.673393, 0.000000, 236.038849, 0.000000, 1305.954133, 171.824157, 0.000000, 0.000000, 1.000000);
	// 相机畸变系数
	Mat dist_coeffs = (Mat_<double>(5, 1) << -0.046741, 0.926625, -0.012113, -0.034848, 0.000000);

	// 旋转向量
	Mat rotation_vector;
	// 平移向量
	Mat translation_vector;

	// pnp求解
	solvePnP(model_points, image_points, camera_matrix, dist_coeffs,
			 rotation_vector, translation_vector, 0, SOLVEPNP_ITERATIVE);
	// 默认ITERATIVE方法，可尝试修改为EPNP（CV_EPNP）,P3P（CV_P3P）

	Mat Rvec;
	Mat_<float> Tvec;
	rotation_vector.convertTo(Rvec, CV_32F);	// 旋转向量转换格式
	translation_vector.convertTo(Tvec, CV_32F); // 平移向量转换格式

	Mat_<float> rotMat(3, 3);
	Rodrigues(Rvec, rotMat);
	// 旋转向量转成旋转矩阵

	Mat P_oc;
	P_oc = -rotMat.inv() * Tvec;

	// 解算物体相对于相机的位姿

	// RCLCPP_INFO(this->get_logger(), "X坐标: %f mm", Tvec.at<float>(0, 0));
	// RCLCPP_INFO(this->get_logger(), "Y坐标: %f mm", Tvec.at<float>(1, 0));
	// RCLCPP_INFO(this->get_logger(), "Z坐标: %f mm", Tvec.at<float>(2, 0));
	// 组织并发布苹果位置信息
	message_t msg = {0};

	// 将图像坐标和深度信息转换为世界坐标
	float world_x = Tvec.at<float>(0, 0);
	float world_y = Tvec.at<float>(1, 0);
	float world_z = Tvec.at<float>(2, 0);
	msg.x = world_x;
	msg.y = world_y;
	msg.z = world_z;
	// Get the label for the class name and its confidence
	string label = format("%.2f", conf);
	label = this->classes[classId] + ":" + label;

	// Display the label at the top of the bounding box
	int baseLine;
	Size labelSize = getTextSize(label, FONT_HERSHEY_SIMPLEX, 0.5, 1, &baseLine);
	top = max(top, labelSize.height);
	// rectangle(frame, Point(left, top - int(1.5 * labelSize.height)), Point(left + int(1.5 * labelSize.width), top + baseLine), Scalar(0, 255, 0), FILLED);
	putText(frame, label, Point(left, top), FONT_HERSHEY_SIMPLEX, 1, Scalar(0, 255, 0), 5);
	vector<float> arr = {world_x, world_y, world_z};
	return arr;
}

// double yolo_fast::color_detect(Mat &frame)
// {
// 	// 获取视频的宽度和高度
// 	int width = 640;
// 	int height = 480;

// 	if (frame.empty())
// 	{
// 		cout << "画面为空" << endl;
// 	}
// 	// 转换到HSV颜色空间
// 	cv::Mat hsv;
// 	cv::cvtColor(frame, hsv, cv::COLOR_BGR2HSV);

// 	// 定义红色范围
// 	cv::Scalar lower_red = {126, 126, 0};
// 	cv::Scalar upper_red = {179, 255, 255};

// 	// 创建掩模
// 	cv::Mat mask;
// 	cv::inRange(hsv, lower_red, upper_red, mask);

// 	imshow("ImageMask", mask);
// 	cv::waitKey(1);
// 	// 计算红色像素数量
// 	int red_pixel_count = cv::countNonZero(mask);

// 	// 计算红色像素占比
// 	double ratio = static_cast<double>(red_pixel_count) / (width * height);

// 	std::cout << "Frame Red Pixel Ratio: " << ratio << std::endl;

// 	return ratio;
// }
double yolo_fast::color_detect(Mat &frame, Rect roi)
{
	// 将整个图像从BGR转换为HSV
	cv::Mat hsv;
	cv::cvtColor(frame, hsv, cv::COLOR_BGR2HSV);
	// 定义红色范围
	cv::Scalar lower_red = {126, 126, 0};
	cv::Scalar upper_red = {179, 255, 255};
	// 遍历矩形区域内的像素
	int redPixelCount = 0;
	for (int y = roi.y; y < roi.y + roi.height; ++y)
	{
		for (int x = roi.x; x < roi.x + roi.width; ++x)
		{
			// 获取当前像素的HSV值
			cv::Vec3b hsvColor = hsv.at<cv::Vec3b>(y, x);

			// 检查像素是否为红色
			// HSV中红色的范围可以分为两段：0-10 和 170-180
			if (hsvColor[0] >= lower_red[0] && hsvColor[0] <= upper_red[0] &&
				hsvColor[1] >= lower_red[1] && hsvColor[1] <= upper_red[1] &&
				hsvColor[2] >= lower_red[2] && hsvColor[2] <= upper_red[2])
			{
				++redPixelCount;
			}
		}
	}
	// 获取矩形区域的尺寸
	int width = roi.width;
	int height = roi.height;

	// 计算红色像素的占比
	double redRatio = static_cast<double>(redPixelCount) / (width * height);
	return redRatio;
}
void yolo_fast::detect(Mat &frame)
{
	Mat blob;
	float x, y, z;
	blobFromImage(frame, blob, 1 / 255.0, Size(this->inpWidth, this->inpHeight));
	this->net.setInput(blob);
	vector<Mat> t_outs;
	vector<Mat> outs;
	this->net.forward(t_outs, this->net.getUnconnectedOutLayersNames());
	if(t_outs.size()!=2)return;
	if(t_outs[0].total()!=7744||t_outs[1].total()!=1936)return;
	cout<<"total:"<<t_outs[0].total()<<"|"<<t_outs[1].total()<<endl;
	int nout = this->anchor_num * 5 + this->classes.size();
	Mat mOut = Mat::zeros(605, nout, CV_32F);
	for (int i = 0; i < 22; i++)
	{
		for (int j = 0; j < 22; j++)
		{
			for (int k = 0; k < nout; k++)
			{
				// float d = *((float *)t_outs[0].data + 0 * 22 * 22 * nout + i * 22 * nout + j * nout + k);
				// *((float *)mOut.data + 0 * 22 * 22 * nout + i * 22 * nout + j * nout + k) = d;
				*((float *)mOut.data + 0 * 22 * 22 * nout + i * 22 * nout + j * nout + k) = *((float *)t_outs[0].data + 0 * 22 * 22 * nout + i * 22 * nout + j * nout + k);
			}
		}
	}
	for (int i = 0; i < 11; i++)
	{
		for (int j = 0; j < 11; j++)
		{
			for (int k = 0; k < nout; k++)
			{
				// float d = *((float *)t_outs[1].data + 0 * 22 * 22 * nout + i * 22 * nout + j * nout + k);
				// *((float *)mOut.data + 22 * 22 * nout + 0 * 22 * 22 * nout + i * 22 * nout + j * nout + k) = d;
				*((float *)mOut.data + 22 * 22 * nout + 0 * 11 * 11 * nout + i * 11 * nout + j * nout + k) = *((float *)t_outs[1].data + 0 * 11 * 11 * nout + i * 11 * nout + j * nout + k);
			}
		}
	}
	outs.push_back(mOut);
	/////generate proposals
	vector<int> classIds;
	vector<float> confidences;
	vector<Rect> boxes;
	float ratioh = (float)frame.rows / this->inpHeight, ratiow = (float)frame.cols / this->inpWidth;
	int n = 0, q = 0, i = 0, j = 0, row_ind = 0;
	float *pdata = (float *)outs[0].data;
	for (n = 0; n < this->num_stage; n++) /// stage
	{
		int num_grid_x = (int)(this->inpWidth / this->stride[n]);
		int num_grid_y = (int)(this->inpHeight / this->stride[n]);
		for (i = 0; i < num_grid_y; i++)
		{
			for (j = 0; j < num_grid_x; j++)
			{
				Mat scores = outs[0].row(row_ind).colRange(this->anchor_num * 5, outs[0].cols);
				Point classIdPoint;
				double max_class_socre;
				// Get the value and location of the maximum score
				minMaxLoc(scores, 0, &max_class_socre, 0, &classIdPoint);
				for (q = 0; q < this->anchor_num; q++) /// anchor
				{
					const float anchor_w = this->anchors[n][q * 2];
					const float anchor_h = this->anchors[n][q * 2 + 1];
					float box_score = pdata[4 * this->anchor_num + q];
					if (box_score > this->objThreshold && max_class_socre > this->confThreshold)
					{
						float cx = (pdata[4 * q] * 2.f - 0.5f + j) * this->stride[n];	  /// cx
						float cy = (pdata[4 * q + 1] * 2.f - 0.5f + i) * this->stride[n]; /// cy
						float w = powf(pdata[4 * q + 2] * 2.f, 2.f) * anchor_w;			  /// w
						float h = powf(pdata[4 * q + 3] * 2.f, 2.f) * anchor_h;			  /// h

						int left = (cx - 0.5 * w) * ratiow;
						int top = (cy - 0.5 * h) * ratioh; ///

						classIds.push_back(classIdPoint.x);
						confidences.push_back(box_score * max_class_socre);
						boxes.push_back(Rect(left, top, (int)(w * ratiow), (int)(h * ratioh)));
					}
				}
				row_ind++;
				pdata += nout;
			}
		}
	}

	// Perform non maximum suppression to eliminate redundant overlapping boxes with
	// lower confidences
	vector<int> indices;
	vector<vector<float>> apple;
	message_t msg = {0};
	NMSBoxes(boxes, confidences, this->confThreshold, this->nmsThreshold, indices);
	for (size_t i = 0; i < indices.size(); ++i)
	{
		int idx = indices[i];
		Rect box = boxes[idx];

		// cv::Rect roi(box.x, box.y, box.width, box.height);

		if (color_detect(frame, box) > 0.2)
		{
			// drawPred(int classId, float conf, int left, int top, int right, int bottom, Mat &frame)

			// rectangle(frame, Point(box.x, box.y), Point(box.x + box.width, box.y + box.height), Scalar(0, 0, 255), 5);
			apple.push_back(this->drawPred(classIds[idx], confidences[idx], box.x, box.y,
										   box.x + box.width, box.y + box.height, frame));
		}
	}
	// 检查apple是否为空
	if (apple.empty())
	{
		std::cout << "apple vector is empty, no data to process." << std::endl;
		return;
	}
	else
	{
		float min = apple[0][2];
		int j = 0;
		for (size_t i = 0; i < apple.size(); i++)
		{
			if (apple[i][2] < min)
			{
				min = apple[i][2];
				j = i;
			}
		}
		// 摄像头调转
		x = -apple[j][1] / 100;
		y = -apple[j][0] / 100;
		z = apple[j][2] / 100 - 2;
		// 摄像头微调
		x += 0.2;

		y += 0.1;
		// 发送数据
		msg.x = x;
		msg.y = y;
		msg.z = z;
		// msg.x = apple[j][0] / 100;
		// msg.y = -apple[j][1] / 100;
		// msg.z = apple[j][2] / 100-2;
	}
	// if (color_detect(frame) > 0.5)
	// {
	// 	msg.x = -1;
	// 	msg.y = -1;
	// 	msg.z = -1;
	// }

	std::cout << "X坐标: " << msg.x << " dm" << std::endl;
	std::cout << "y坐标: " << msg.y << " dm" << std::endl;
	std::cout << "z坐标: " << msg.z << " dm" << std::endl;
	std::cout << "绝对距离: " << sqrt(msg.z * msg.z + msg.y * msg.y + msg.x * msg.x) << " dm" << std::endl;
	send_data(msg);
}

void yolo_fast::send_data(message_t mes)
{
	if (!s1->b_OpenSign)
	{
		cout << "serial fail!" << endl;
		return;
	}
	message_union_t u;
	u.mes = mes;
	u.mes.head = 0xA5;
	u.mes.check_sum = crc16::Get_CRC16_Check_Sum(u.data, sizeof(message_t) - 2, 0xFFFF);
	s1->Send(u.data, sizeof(message_t));
}

int main()
{
	// signal(SIGSEGV, sigsegv_handler); // 注册信号处理器

	yolo_fast yolo_model("../resource/3yolo-fastestv2.onnx", 0.3, 0.3, 0.4);
	// yolo_fast yolo_model("../resource/New_fastestv2.onnx", 0.3, 0.3, 0.2);
	// 修改为视频文件路径
	string videoPath = "/dev/video0";
	cv::VideoCapture cap(videoPath);
	if (!cap.isOpened())
	{
		cerr << "Error opening video file" << endl;
		return -1;
	}

	static const string kWinName = "Deep learning object detection in OpenCV";
	namedWindow(kWinName, WINDOW_NORMAL);

	while (true)
	{
		Mat frame;

		// 读取视频的下一帧
		if (!cap.read(frame))
		{
			cerr << "End of video stream" << endl;
			break;
		}
		// Mat frame(m_frame.cols,m_frame.rows,CV_8UC3);
		// // 逆时针旋转90度
		// for (int r = 0; r < m_frame.rows; r++) {
		//     for (int c = 0; c < m_frame.cols; c++) {
		//         for (int i = 0; i < 3; i++) {
		//             // 注意：新图像的行和列应该对应原图像的列和行
		//             // 并且行号需要从顶部开始计数
		//             frame.at<cv::Vec3b>(c, m_frame.rows - 1 - r)[i] = m_frame.at<cv::Vec3b>(r, c)[i];
		//         }
		//     }
		// }

		// 获取图像尺寸
		// int width = frame.cols;
		// int height = frame.rows;

		// // 创建旋转矩阵
		// cv::Point2f center(width / 2.0, height / 2.0);
		// double angle = -90.0; // 旋转90度
		// double scale = 1.0;	 // 不缩放图像
		// cv::Mat rotation_matrix = cv::getRotationMatrix2D(center, angle, scale);
		// // 调整旋转矩阵以适应旋转90度后的图像尺寸
		// cv::Size new_size(height, width);
		// cv::warpAffine(frame, frame, rotation_matrix, new_size);

		// cv::rotate(frame, frame, cv::ROTATE_90_CLOCKWISE);
		yolo_model.detect(frame); // 对当前帧进行目标检测
		cv::rotate(frame, frame, cv::ROTATE_90_CLOCKWISE);
		imshow(kWinName, frame); // 显示带有检测框的帧

		// 按'esc'键退出循环
		if (waitKey(1) >= 27)
		{
			break;
		}
	}

	cap.release(); // 释放视频捕捉器
	destroyAllWindows();
	return -1;
}
