#pragma execution_character_set("utf-8")
#include "jointimage.h"
#include <QMessageBox>
#include <QDebug>
using namespace std;
using namespace cv;
jointImage::jointImage(QObject *parent) : QObject(parent)
{
}
//szDirPath = "E:/jointImg/dstImage"
//szPic:filename
void jointImage::getTowerLines(QString szDirPath, QString szPic)
{
	m_resultImageDir = szDirPath;
	m_srcImagePath = szPic;
	std::string imagePath = szPic.toLocal8Bit().toStdString();
//1、 读入图像
	cv::Mat src = cv::imread(imagePath);
//判断图像是否加载成功
	if (src.empty())
	{
		cout << "Images loaded failed..." << endl;
		return ;
	}
	else
		cout << "Images loaded success..." << endl;
	int nCols = src.cols;
	int nRows = src.rows;
	cv::Mat srcGray;
	cv::cvtColor(src, srcGray, cv::COLOR_RGB2GRAY);// 转换为灰度图像
// 创建与原图像同样大小的目标图像（全为0）
	Mat dst = Mat::zeros(nRows, nCols, CV_8UC3);
	Mat dst2 = Mat::zeros(nRows, nCols, CV_8UC3);
// 二值化操作
// srcGray：这是一个输入的灰度图像。
// dst：这是输出图像，将会是一个二值图像。
// 120：这是一个阈值。如果源图像的像素值超过这个阈值，该像素将被设置为255（最大值）。
// 255：这是另一个阈值。如果源图像的像素值低于这个阈值，该像素将被设置为0（最小值）。
// THRESH_BINARY：这是二值化操作的类型。在这种类型下，只有两个阈值（如上面提到的120和255）被使用。
// 如果像素值超过高阈值（120），则像素被设置为最大值（255）
//threshold(srcGray, dst, 120, 255, THRESH_BINARY);
	threshold(srcGray, dst, 120, 255, THRESH_BINARY);
	string szPath = m_resultImageDir.toLocal8Bit().toStdString();
	std::string szThresholdPicPath = szPath + "\\threshold11.jpg";
	imwrite(szThresholdPicPath, dst);
//imshow("显示灰度图", dst);
//进行形态学变化
	Mat element = getStructuringElement(MORPH_RECT, Size(6, 6));//返回指定大小的矩形，锚点默认在中心
//先腐蚀后膨胀的过程，开运算可以用来消除小黑点，开运算可以用来抹除图像外部的细节
	morphologyEx(dst, dst2, MORPH_OPEN, element, Point(-1, -1), 1);
	std::string szmorphologyPicPath = szPath + "\\morphology11.jpg";
	imwrite(szmorphologyPicPath, dst2);
	std::vector<std::vector<cv::Point>> contours;
	vector<Vec4i>hierarchy;
//查找轮廓
	this->FindContours(dst2, contours, hierarchy, CV_RETR_LIST, CHAIN_APPROX_SIMPLE, Point());
	double MaxArea = 0;
	double fTotalArea = 0;//轮廓总面积
	int nCoutoursCount = 0;//轮廓数量
	std::vector<std::vector<cv::Point>>::iterator itr;
	itr = contours.begin();
	while (itr != contours.end())
	{
//计算轮廓面积
		double fArea = contourArea(*itr);
		fTotalArea += fArea;
		nCoutoursCount++;
		if (fArea >= MaxArea)
		{
			MaxArea = fArea;
		}
		itr++;
	}
//计算平均轮廓面积
	double fAverageArea = fTotalArea / nCoutoursCount;
//--------------------------------------------------
	itr = contours.begin();
	while (itr != contours.end())
	{
		std::vector<cv::Point> aContour = *itr;
		cv::Point forstPt = aContour[0];
		double g_dConArea = contourArea(*itr);
		if ( (g_dConArea<fAverageArea) || (forstPt.y<300))
		{
			itr = contours.erase(itr);
		}
		else
		{
			itr++;
		}
	}
	Mat data = Mat::zeros(nRows, nCols, CV_8UC3);
	int nIndex = 0;
	itr = contours.begin();
	while (itr != contours.end())
	{
		drawContours(data, contours, nIndex, Scalar(255, 255, 255), -1, 8, hierarchy, 0, Point());
		nIndex++;
		itr++;
	}
	string szContourPicPath = szPath + "\\contour1.jpg";
	imwrite(szContourPicPath, data);
//2、 轮廓线条
	cv::Mat cannyImg;
	cv::Canny(data, cannyImg, 120, 180);//dst
	std::string szCannyPicPath = szPath + "\\canny1.jpg";
	imwrite(szCannyPicPath, cannyImg);
}
//static int nPixelCutLen = -20;//斜边截取
static int nPixelCutLen = 5;//斜边截取
static int nXCutExtend = 1;//底边点的右边或左边截取
QString SECONDIMG="02.jpg";
int jointImage::getPicTowerPart(std::string szPic)
{
	string szPath = m_resultImageDir.toLocal8Bit().toStdString();
// 1、 读入图像
	m_src = cv::imread(szPic);
	int nCols = m_src.cols;
	int nRows = m_src.rows;
	cv::Mat srcGray;
	cv::cvtColor(m_src, srcGray, cv::COLOR_RGB2GRAY);
	int nLineCount = 0;
	std::vector<cv::Vec4i> savedLines;
	std::vector<double> linesAngle;
	cv::Mat drawImg(nRows, nCols, CV_8U, cv::Scalar(255));
	std::vector<cv::Vec4i> lines;//检测图像中的直线
//二号图像使用这个参数CV_PI / 180, 60, 100, 60
	QFileInfo fileinfo(m_srcImagePath);
	QString fileName2=fileinfo.fileName();
	if(fileName2==SECONDIMG)
	{
		cv::HoughLinesP(srcGray, lines, 1, CV_PI / 180, 60, 100, 60);
	}
	else
	{
		cv::HoughLinesP(srcGray, lines, 1, CV_PI / 180, 60, 100, 5);
	}
//遍历所有线段，找出
	std::vector<cv::Vec4i>::const_iterator it = lines.begin();
	while (it != lines.end())
	{
		cv::Point point1((*it)[0], (*it)[1]);
		cv::Point point2((*it)[2], (*it)[3]);
		int xLen = abs(point2.x - point1.x);
		int yLen = abs(point2.y - point1.y);
		int nBigger = 0;
		int nSmall = 0;
		if (xLen > yLen)
		{
			nBigger = xLen;
			nSmall = yLen;
		}
		else
		{
			nBigger = yLen;
			nSmall = xLen;
		}
		if (nSmall > 0)
		{
			double fPercent = double(nSmall) / double(nBigger);
			if (fPercent > 0.25 )  //选取符合偏移角度的线段
			{
				line(drawImg, point1, point2, cv::Scalar(0, 0, 255), 1);
				nLineCount++;
				savedLines.push_back(cv::Vec4i(point1.x, point1.y, point2.x, point2.y));
				linesAngle.push_back(fPercent);
			}
		}
		++it;
	}
	cout << "Line Count==" << nLineCount << endl;
	std::string szLinesPicPath = szPath + "\\lines1.jpg";
	imwrite(szLinesPicPath, drawImg);
// 合并同角度线段，找到最长两个线段并在图像中画出来:bug
	std::vector<cv::Vec4i> validLines = getMergeLines(savedLines, linesAngle);
	cv::Mat linesImg(nRows, nCols, CV_8U, cv::Scalar(255));
	it = validLines.begin();
	while (it != validLines.end())
	{
		cv::Point point1((*it)[0], (*it)[1]);
		cv::Point point2((*it)[2], (*it)[3]);
		line(linesImg, point1, point2, cv::Scalar(0, 0, 255), 1);
		++it;
	}
	std::string szLinesLastPath = szPath + "\\linesLast.jpg";
	imwrite(szLinesLastPath, linesImg);
// 3. 找到两个方程和三个点
	cv::Vec4i line1 = validLines[0];
	cv::Vec4i line2 = validLines[1];
	double a1 = double(line1[3] - line1[1]) / double(line1[2] - line1[0]);//斜率
	double b1 = double(line1[2] * line1[1] - line1[3] * line1[0]) / double(line1[2] - line1[0]);//截距
	double a2 = double(line2[3] - line2[1]) / double(line2[2] - line2[0]);
	double b2 = double(line2[2] * line2[1] - line2[3] * line2[0]) / double(line2[2] - line2[0]);
//根据两个直线的斜率和截距计算出交点的X坐标
	int nJointX = int((b1 - b2) / (a2 - a1));
//根据两个直线的斜率和截距计算出两条直线在图像中的位置
	int x1Loc = int( (nRows-1-b1) / a1);//当y=1080时，x的大小
	int x2Loc = int((nRows-1-b2) / a2);
//边界检测
	if (x1Loc < 0)
		x1Loc = 0;
	if (x1Loc >= nCols)
		x1Loc = nCols - 1;
	if (x2Loc < 0)
		x2Loc = 0;
	if (x2Loc >= nCols)
		x2Loc = nCols - 1;
	mp_srcImg = new QImage();
	if(! ( mp_srcImg->load(m_srcImagePath) ) ) //加载图像到内存
	{
		cout << "load pic failed" << endl;
		delete mp_srcImg;
		return 1;
	}
	int nXSize = mp_srcImg->width();
	int nYSize = mp_srcImg->height();
	QImage splitImg(nXSize,nYSize,QImage::Format_RGB888);
	splitImg.fill(Qt::black); // 填成黑色的
// 截图：将原始图像的像素点挨个取出，拷贝到纯黑图像对应像素点
	if (x1Loc < x2Loc) // 1在左，2在右
	{
		for (int i=0; i<nXSize; i++)
		{
			for (int j=0; j<nYSize; j++)
			{
				if ((i>=(x1Loc-nXCutExtend)) && (i<nJointX))//判断的是x1往左多截一点nXCutExtend
				{
					int nYloc = int(a1*i+b1);
					if (j>=(nYloc-nPixelCutLen))//判断的是斜边往上多截nPixelCutLen
					{
						QColor aSrcColor = mp_srcImg->pixelColor(i,j);
						splitImg.setPixelColor(i,j,aSrcColor);
					}
				}
				else if ((i>=nJointX) && (i<=(x2Loc+nXCutExtend)) )
				{
					int nYloc = int(a2*i+b2);
					if (j>=(nYloc-nPixelCutLen))
					{
						QColor aSrcColor = mp_srcImg->pixelColor(i,j);
						splitImg.setPixelColor(i,j,aSrcColor);
					}
				}
			}
		}
	}
	else // 2在左，1在右
	{
		for (int i=0; i<nXSize; i++)
		{
			for (int j=0; j<nYSize; j++)
			{
				if ((i>=(x2Loc-nXCutExtend)) && (i<nJointX))
				{
					int nYloc = int(a2*i+b2);
					if (j>=(nYloc-nPixelCutLen))
					{
						QColor aSrcColor = mp_srcImg->pixelColor(i,j);
						splitImg.setPixelColor(i,j,aSrcColor);
					}
				}
				else if ((i>=nJointX) && (i<=(x1Loc+nXCutExtend)))
				{
					int nYloc = int(a1*i+b1);
					if (j>=(nYloc-nPixelCutLen))
					{
						QColor aSrcColor = mp_srcImg->pixelColor(i,j);
						splitImg.setPixelColor(i,j,aSrcColor);
					}
				}
			}
		}
	}
	QString filename = m_resultImageDir + "/split.jpg";
	bool flag=splitImg.save(filename);
	if(flag) std::cout<<"Img save success ...";
//进行图像仿射变化
//根据定义的三个点进行仿射变换
	Point2f src_points[3];
	Point2f dst_points[3];
	src_points[0] = Point2f(nJointX, (nJointX*a1+b1)); //原始三角中的三个点
	src_points[1] = Point2f(x1Loc, (float)(nRows - 1));
	src_points[2] = Point2f(x2Loc,(float)(nRows - 1));
	QByteArray path=filename.toLocal8Bit();
	std::string pathimg(path);
	Mat tmpImg=imread(pathimg);
	Mat rotation1, img_warp1;
//放射变换后图像中的三个点
	dst_points[0] = Point2f((nCols-1)/2.0 , 0);
	if(x1Loc<x2Loc)//针对图3,4仿射变换
	{
		dst_points[2] = Point2f(nCols-1,nRows-1);
		dst_points[1] = Point2f(0,nRows-1);
	}
	else
	{
		dst_points[1] = Point2f(nCols-1,nRows-1);
		dst_points[2] = Point2f(0,nRows-1);
	}
	rotation1 = getAffineTransform(src_points, dst_points); //根据对应点求取仿射变换矩阵
	warpAffine(tmpImg, img_warp1, rotation1, tmpImg.size()); //进行仿射变换
	std::string fangshePath = szPath + "\\fangshe.jpg";
	imwrite(fangshePath, img_warp1);
	return 0;
}