#include "imageUtils.h"
#include "utils.h"
#include<windows.h>

using namespace std;

void ImageUtils::saveImage(QString name, cv::Mat& image) {
	if (!image.empty() && !name.trimmed().isEmpty()) {
		cv::imwrite(QString("D:/images/%1.jpg").arg(name.trimmed()).toStdString(), image);
	}
}

void ImageUtils::showImg(QString name, cv::Mat& img) {
#ifndef PROJECT_PACKAGE
	cv::namedWindow(name.toStdString(), cv::WINDOW_NORMAL);
	cv::imshow(name.toStdString(), img);
#endif
}

template <typename T>
void ImageUtils::aveFilter(vector<T>& origin, vector<float>& dstData, int step) {
	int len = step >= 1 ? step * 2 + 1 : 3;
	int midVal = (len - 1) / 2;
	//数组最后一个元素的索引
	int lastIndex = origin.size() - 1;
	if (origin.size() > len) {
		dstData.resize(origin.size());
		//前后赋值
		for (int i = 0; i < midVal; i++) {
			dstData[i] = origin[i];
			dstData[origin.size() - i - 1] = origin[origin.size() - i - 1];
		}
		//滤波
		for (int i = 0; i < origin.size(); i++) {
			int begin = i;
			int end = i + len - 1;
			//若结束索引越界，则向前移动
			if (end > lastIndex) {
				int range = end - lastIndex;
				begin -= range;
			}
			//求平均值
			float sumVal = 0;
			for (int j = 0; j < len; j++) {
				sumVal += origin[begin + j];
			}
			dstData[begin + midVal] = sumVal / len;
		}
	}
}

int ImageUtils::calculateImageAveGrayVal(cv::Mat& inputImg) {
	cv::Mat hsvImg;
	cv::cvtColor(inputImg, hsvImg, cv::COLOR_BGR2HSV);
	cv::Scalar color = cv::mean(hsvImg);
	//qDebug() << QString("h: %1, s:%2, v:%3").arg(color[0]).arg(color[1]).arg(color[2]);
	return color[2];
}

void ImageUtils::drawText(cv::Mat& inputImg, cv::Point& position, QString& text, cv::Scalar& color) {
	string& stdText = text.toStdString();
	int fontFace = cv::FONT_HERSHEY_SIMPLEX;
	double fontScale = 1.2;
	int thickness = 2;
	int baseline;
	//获取文本框的长宽
	cv::Size textSize = cv::getTextSize(stdText, fontFace, fontScale, thickness, &baseline);
	//计算绘制的起始点(左下角)
	cv::Point origin;
	origin.x = position.x;
	origin.y = cvRound(position.y + textSize.height / 2.0);
	cv::putText(inputImg, stdText, origin, fontFace, fontScale, color, thickness);
}

void ImageUtils::liveImage(QLabel* label, cv::Mat& inputImg, QImage::Format format) {
	if (label != nullptr && !inputImg.empty()) {
		//将Mat转为QImage
		QImage dstImg((const uchar*) (inputImg.data), inputImg.cols, inputImg.rows, inputImg.cols * inputImg.channels(), format);
		label->setPixmap(QPixmap::fromImage(dstImg).scaled(label->size()));
	}
}

void ImageUtils::resizeLabelSize(QLabel* label, cv::Mat& image) {
	try {
		int wi = image.cols;
		int hi = image.rows;
		//获取label所在控件的尺寸
		QFrame* frame = qobject_cast<QFrame*>(label->parent());
		int width = frame->width();
		int height = frame->height();
		//计算label的原始尺寸,使用label的父级元素的尺寸确定，label的尺寸会根据图像的尺寸变化
		float lRatio = height * 1.0f / width;
		//判断图像的比例
		if (wi > hi) {	//矩形
			//计算图像的比例
			float iRatio = hi * 1.0f / wi;
			if (lRatio > iRatio) {
				//宽不变，高缩小
				height = cvRound(width * iRatio);
			} else if (lRatio < iRatio) {
				//高不变，宽缩小
				width = cvRound(height * 1.0f * wi / hi);
			}
		} else if (wi == hi) { //正方形
			width = height;
		} else if (wi < hi) {	//竖条形的图像
			//高不变，宽缩小
			width = cvRound(height * 1.0f * wi / hi);
		}
		//设置label的尺寸
		label->setMinimumSize(QSize(width, height));
		label->setMaximumSize(QSize(width, height));
		
	} catch (const std::exception& e) {
		qCritical() << e.what();
	} catch (...) {
		qCritical() << "UNKNOW ERROR...";
	}

}

void ImageUtils::resizeLabelSizeAndLiveImage(QLabel* label, cv::Mat& image, QImage::Format format) {
	resizeLabelSize(label, image);
	liveImage(label, image, format);
}

void ImageUtils::findContours(cv::Mat& inputImg, vector<vector<cv::Point>>& contours) {
	vector<cv::Vec4i> hierarchy;
	cv::findContours(inputImg, contours, hierarchy, cv::RETR_CCOMP, cv::CHAIN_APPROX_SIMPLE);
}

void ImageUtils::findMaxContourByWidth(cv::Mat& inputImg, vector<vector<cv::Point>>& outputContours) {
	//判断图像不为空，且为单通道图像
	if (!inputImg.empty() && inputImg.channels() == 1) {
		vector<vector<cv::Point>> contours;
		ImageUtils::findContours(inputImg, contours);
		//查询最大轮廓
		if (contours.size() > 0) {
			int maxIdx = 0;
			cv::Rect rect = cv::boundingRect(contours[0]);
			int maxWidth = rect.width;
			for (int i = 1; i < contours.size(); i++) {
				rect = cv::boundingRect(contours[i]);
				if (rect.width > maxWidth) {
					maxWidth = rect.width;
					maxIdx = i;
				}
			}
			//返回最大轮廓
			if (maxWidth > 0) {
				outputContours.push_back(contours[maxIdx]);
			}
		}
	}
}

void ImageUtils::findMaxContourByArea(cv::Mat& inputImg, vector<vector<cv::Point>>& outputContours) {
	if (inputImg.empty()) {
		return;
	}
	//清空outputContours中的点
	outputContours.clear();
	//查询轮廓
	vector<vector<cv::Point>> contours;
	vector<cv::Vec4i> hierarchy;
	cv::findContours(inputImg, contours, hierarchy, cv::RETR_CCOMP, cv::CHAIN_APPROX_SIMPLE);
	//查询你最大轮廓
	if (contours.size() > 0) {
		//查找最大轮廓
		double maxArea = 0;
		int maxIndex = 0;
		for (int i = 0; i < contours.size(); i++) {
			double area = cv::contourArea(contours[i]);
			if (area > maxArea) {
				maxArea = area;
				maxIndex = i;
			}
		}
		//将轮廓上点输出
		outputContours.push_back(contours[maxIndex]);
	}
}

void ImageUtils::findCenterPoint(int num, cv::Mat& srcImage, float threshold, std::vector<cv::Vec2f>& centerPoints){
	// 灰度化
	cv::Mat grayImg;
	cv::cvtColor(srcImage, grayImg, cv::COLOR_BGR2GRAY);
	

	// 二值化
	cv::Mat binary;
	cv::threshold(grayImg, binary, threshold, 255, cv::THRESH_BINARY);

	//// 形态学开运算
	cv::Mat kernel = cv::getStructuringElement(cv::MORPH_RECT, cv::Size(3, 3));
	//cv::morphologyEx(binary, binary, cv::MORPH_OPEN, kernel);

	// 膨胀操作
	cv::dilate(binary, binary, kernel, cv::Point(-1, -1), 2);
	
	// 遍历图像的每一行，计算灰度重心
	for (int j = 0; j < binary.rows; ++j) {
		// 提取第j行
		cv::Mat binary_row = binary.row(j);
		cv::Mat img_row = grayImg.row(j);

		// 跳过无激光条纹的列
		if (cv::sum(binary_row)[0] < 3) {
			continue;
		}

		// 提取二值图中大于0的位置和相应灰度值
		std::vector<int> indices;
		std::vector<float> intensities;
		for (int i = 0; i < binary_row.cols; ++i) {
			if (binary_row.at<uchar>(0, i) > 0) {
				indices.push_back(i);
				intensities.push_back(static_cast<float>(img_row.at<uchar>(0, i)));
			}
		}

		// 计算灰度重心
		/*
		* 对于每一行：(1) 计算每个像素的（y*灰度值）的和 weighted_sum_y;
		*           (2) 计算该列的灰度总和 intensity_sum_y
		*           (3) 计算加权平均：centerY = weighted_sum_y / intensity_sum_y
		*/
		float weighted_sum_x = 0;
		float intensity_sum_x = 0;
		for (size_t k = 0; k < indices.size(); ++k) {
			weighted_sum_x += indices[k] * intensities[k];
			intensity_sum_x += intensities[k];
		}


		if (intensity_sum_x > 0) {
			int u = weighted_sum_x / intensity_sum_x;
			centerPoints.push_back(cv::Vec2f(j, u));
		}
	}
}

cv::Mat ImageUtils::downwardThreFileter(const cv::Mat& img, int threshold)
{
	//cv::Mat imageClone;
	//if (img.channels() == 3)//如果是彩色图像，先灰度化
	//{
	//	cvtColor(img, imageClone, cv::COLOR_BGR2GRAY);
	//}
	//else
	//{
	//	imageClone = img.clone();
	//}
	////筛选符合一定阈值的灰度部分
 //   for (int i = 0; i < imageClone.rows; i++)
	//	for (int j = 0; j < imageClone.cols; j++)
	//	{
	//		if (imageClone.at<uchar>(i, j) < threshold)
	//		{
	//			imageClone.at<uchar>(i, j) = 0;
	//		}
	//	}
	//return imageClone;


	cv::Mat image;
	static int imgNum = 0;
	if (img.channels() == 3)//如果是彩色图像，先灰度化
	{
		cvtColor(img, image, cv::COLOR_BGR2GRAY);
	}
	else
	{
		image = img.clone();
	}
	for (int i = 0; i < image.rows; i++) {
		for (int j = 0; j < image.cols; j++) {
			if (image.at<uchar>(i, j) <15)
			{
				image.at<uchar>(i, j) = 0;
			}
		}
	}
	QString saveImg = QString("./data/processImgs/0_") + QString::number(imgNum++) + "downfileter.bmp";
	//cv::imwrite(saveImg.toStdString(), image);
	/*for (int i = 0; i < img.rows; i++) {
		for (int j = 0; j < img.cols; j++) {
			if (i < 500 || j>700)
			{
				image.at<uchar>(i, j) = 0;
			}
		}
	}*/
	cv::imwrite(saveImg.toStdString(), image);
	return image;
}

std::vector<cv::Point2f> ImageUtils::filterTailPoints(const std::vector<cv::Point2f>& points, float distanceThreshold)
{
	std::vector<cv::Point2f> filteredPoints;
	if (points.empty()) return filteredPoints;

	// 按照 Y 方向排序
	std::vector<cv::Point2f> sortedPoints = points;
	sortPointsByY(sortedPoints);

	// 遍历每个点，检查其与前后点的距离
	for (size_t i = sortedPoints.size() - 20; i < sortedPoints.size() - 1; ++i) {
		float prevDistance = euclideanDistance(sortedPoints[i], sortedPoints[i - 1]);
		float nextDistance = euclideanDistance(sortedPoints[i], sortedPoints[i + 1]);

		// 如果前后距离明显大于阈值，则滤除该点及其后的点
		if (prevDistance > distanceThreshold && nextDistance > distanceThreshold) {
			filteredPoints.assign(sortedPoints.begin(), sortedPoints.begin() + i);
			break;
		}
	}

	// 如果没有找到明显离散点，则返回所有点
	if (filteredPoints.empty()) {
		filteredPoints = sortedPoints;
	}

	return filteredPoints;
}

void ImageUtils::sortPointsByY(std::vector<cv::Point2f>& points)
{
	sort(points.begin(), points.end(), [](const cv::Point2f& a, const cv::Point2f& b) {
		return a.y < b.y; // 按照 Y 坐标从小到大排序
		});
}

double ImageUtils::euclideanDistance(const cv::Point2f& p1, const cv::Point2f& p2)
{
	return sqrt(std::pow(p1.x - p2.x, 2) + pow(p1.y - p2.y, 2));
}

std::vector<cv::Point2f> ImageUtils::GrayScaleCenter(const cv::Mat& img)
{
	//std::vector<cv::Point2f> pixels;
	//cv::Mat showLaserImg = img.clone();
	////灰度重心法
	//for (int r = 0; r < img.rows; r++)
	//{
	//	int sumC = 0;
	//	double sumVal = 0;

	//	for (int c = 0; c < img.cols; c++)
	//	{
	//		int pixeVal=img.ptr(r)[c];//提高访问效率
	//		if (pixeVal)
	//		{
	//			sumC += pixeVal;
	//			sumVal += pixeVal * c;
	//		}
	//	}
	//	if (sumC)
	//	{
	//		sumVal /= sumC;
	//		pixels.push_back(cv::Point2f(sumVal, r));
	//	}
	//}
	//cv::imwrite("line.bmp",showLaserImg);
	//return pixels;
	static int imgNum = 0;
	std::vector<cv::Point2f> pixels;
	cv::Mat showPt = img.clone();
	cvtColor(showPt, showPt, cv::COLOR_GRAY2BGR);
	//cv::cvtColor(showPt, showPt, COLOR_GRAY2BGR);
	//遍历图像的每一行
	//for (int i = 0; i < img.rows; i++)
	//{
	//	int sum = 0;    //用来存储当前行的非零像素灰度值的累加和
	//	float x = 0;    //用来累加计算当前行所有非零像素的加权列坐标

	//	//遍历图像当前行的每一列
	//	for (int j = 0; j < img.cols; j++) {
	//		int g = img.at<uchar>(i, j);
	//		if (g) {
	//			sum += g;
	//			x += g * j;     // g * j（灰度值乘以列索引）加到累加器 x 中
	//		}
	//	}

	//	if (sum)
	//	{
	//		x /= sum;
	//		pixels.push_back(cv::Point2f(x, i)); //将质心的坐标保存为一个点  x 是计算得到的列索引（浮点数）
	//		circle(showPt, cv::Point(x, i), 1, cv::Scalar(0, 0, 255), 1, 8);
	//	}
	//}

	//计算每行灰度值最大的点，如果有重复的那就取平均值
	vector<cv::Point> maxRowPoints;
	for (int i = 0; i < img.rows; i++)
	{
		int maxRow = 0;
		int maxCol = 0;
		float maxGrayValue = 0.0;
		for (int j = 0; j < img.cols; j++)
		{
			if (img.at<uchar>(i, j) > maxGrayValue)
			{
				maxGrayValue = img.at<uchar>(i, j);
				maxRow = j;
				maxCol = i;
			}
		}
		if (maxGrayValue > 0)
		{
			pixels.push_back(cv::Point2f(maxRow, maxCol));
			circle(showPt, cv::Point(maxRow, maxCol), 0.3, cv::Scalar(0, 0, 255), 1, 8);
		}
	}


	QString saveImgName = QString("./data/processImgs/1 %1_showPt.bmp").arg(imgNum++);
	imwrite(saveImgName.toStdString(), showPt);

	return pixels;
}

std::vector<cv::Point2f> ImageUtils::simpleMaxValueMethod(const cv::Mat& inputImage)
{
	std::vector<cv::Point2f> centerPoints;

	cv::Mat image;
	if (inputImage.channels() == 3) {
		cv::cvtColor(inputImage, image, cv::COLOR_BGR2GRAY);
	}
	else {
		image = inputImage.clone();
	}

	// 对图像进行高斯滤波以减少噪声
	cv::Mat blurredImage;
	cv::GaussianBlur(image, blurredImage, cv::Size(5, 5), 0);
	
	// 动态阈值计算
	cv::Scalar meanVal = cv::mean(blurredImage);
	int threshold = max(5, static_cast<int>(meanVal[0] * 0.2));

	// 遍历每一行寻找最大值点
	for (int row = 0; row < blurredImage.rows; row++) {
		int maxValue = 0;
		int maxCol = -1;

		// 在每一行中找到亮度最大的点
		for (int col = 0; col < blurredImage.cols; col++) {
			int pixelValue = blurredImage.at<uchar>(row, col);
			if (pixelValue > maxValue) {
				maxValue = pixelValue;
				maxCol = col;
			}
		}

		// 如果找到了有效的最大值（亮度超过阈值），添加到中心点列表
		if (maxValue > threshold && maxCol != -1) {
			// 亚像素精度计算
			if (maxCol > 0 && maxCol < blurredImage.cols - 1) {
				float y1 = blurredImage.at<uchar>(row, maxCol - 1);
				float y2 = blurredImage.at<uchar>(row, maxCol);
				float y3 = blurredImage.at<uchar>(row, maxCol + 1);

				// 抛物线拟合计算亚像素偏移量
				float denom = y1 - 2 * y2 + y3;
				if (std::abs(denom) > 1e-6) {  // 避免除零错误
					float delta = 0.5f * (y1 - y3) / denom;
					if (std::abs(delta) < 1.0) {  // 确保位移量合理
						float subpixelCol = maxCol + delta;
						centerPoints.push_back(cv::Point2f(subpixelCol, row));
						continue;
					}
				}
			}

			// 如果亚像素计算失败，使用像素级精度
			centerPoints.push_back(cv::Point2f(maxCol, row));
		}
	}

	return centerPoints;
}

float ImageUtils::calculateHeight(float imageHeight, const ImageProcessConfig& IPC){
	float pixelHeight2real = imageHeight * IPC.pixelSize; // 计算对应像素点在CCD版上的高度，单位：毫米
	// 计算对应的点到平面的实际距离
	float realHeight = IPC.poleLength * 2 * pixelHeight2real / (IPC.focalLength + pixelHeight2real);
	return realHeight;
}

std::vector<cv::Point> ImageUtils::linearInterpolation(const cv::Point& p1, const cv::Point& p2)
{
	vector<cv::Point>interpolatePoints;
	int dx = p2.x - p1.x, dy = p2.y - p1.y;
	int steps = max(int(dy), 1);
	
	for (int t = 0; t <= steps; ++t)
	{
		float ratio = static_cast<float>(t) / steps;
		interpolatePoints.emplace_back(cv::Point(p1.x + ratio * dx, p1.y + ratio * dy));
	}
	return interpolatePoints;
}

std::vector<cv::Point2f> ImageUtils::interpolatePoints(std::vector<cv::Point2f>& points, int threshold)
{
	std::vector<cv::Point2f> interpolatedPoints;

	// 插值逻辑
	for (size_t i = 0; i < points.size(); ++i) {
		if (i > 0 && abs(points[i].y - points[i - 1].y) > threshold) {
			// 断开点之间插值
			auto result = linearInterpolation(points[i - 1], points[i]);
			interpolatedPoints.insert(interpolatedPoints.end(), result.begin(), result.end());
		}
		interpolatedPoints.push_back(points[i]); // 添加当前点
	}

	// 去重
	auto last = std::unique(interpolatedPoints.begin(), interpolatedPoints.end(), [](const cv::Point2f& a, const cv::Point2f& b) {
		return a.x == b.x && a.y == b.y;
		});
	interpolatedPoints.erase(last, interpolatedPoints.end());

	// 按 y 坐标排序
	std::sort(interpolatedPoints.begin(), interpolatedPoints.end(), [](const cv::Point& a, const cv::Point& b) {
		return a.y < b.y;
		});

	return interpolatedPoints;
}

