#include "BasePixelOption.h"

BasePixelOption::BasePixelOption(QObject* parent)
	: QObject(parent)
{
	//qRegisterMetaType<QList<cv::Mat*>>("QList<cv::Mat*>");
}

/**
 * 显示原图
 * @brief Chapter3Option::pixleReverse
 */
QPixmap BasePixelOption::showSrcImage(const char* filePath) {
	Mat src = imread(filePath);
	if (src.empty()) {
		return NULL;
	}
	QImage qImage = ImageUtils::matToQImage(src);
	return QPixmap::fromImage(qImage);
	//imshow("src", src);
	//    QImage dst = QImage(src.data,src.cols,src.rows,src.step,QImage::Format_BGR888);
	//    QLabel *label = new QLabel();//创建一个label
	//    label->setFixedSize(QSize(dst.width(),dst.height()));//设置label为固定尺寸
	//    label->setScaledContents(true);//
	//    label->setPixmap(QPixmap::fromImage(dst));//在label显示图片
}

/**
 * 像素取反
 * @brief Chapter3Option::pixleReverse
 */
list<QPixmap> BasePixelOption::pixleReverse(const char* filePath) {
	list<QPixmap> pixmaps;
	Mat src = imread(filePath);
	Mat clone = src.clone();
	int channels = src.channels();//通道数
	int width = src.cols;//宽度
	int height = src.rows;//高度
	//自己写像素取反操作，原理也非常简单拿255减去当前像素就行了
	for (int row = 0;row < height;row++) {
		for (int col = 0;col < width;col++) {
			if (channels == 1) {//单通道
				int gray = src.at<uchar>(row, col);
				src.at<uchar>(row, col) = 255 - gray;
			}
			else if (channels == 3) {//三通道
				int b = src.at<Vec3b>(row, col)[0];
				int g = src.at<Vec3b>(row, col)[1];
				int r = src.at<Vec3b>(row, col)[2];
				src.at<Vec3b>(row, col)[0] = 255 - b;
				src.at<Vec3b>(row, col)[1] = 255 - g;
				src.at<Vec3b>(row, col)[2] = 255 - r;
			}
		}
	}
	QPixmap pixmapSrc = QPixmap::fromImage(ImageUtils::matToQImage(clone));
	QPixmap pixmapResult = QPixmap::fromImage(ImageUtils::matToQImage(src));
	pixmaps.push_back(pixmapSrc);
	pixmaps.push_back(pixmapResult);
	return pixmaps;
	//imshow("src", clone);
	//imshow("result", src);
	//    bitwise_not(src,src);//使用opencv提供的工具类进行像素取反
	//    QImage dst = QImage(src.data,src.cols,src.rows,src.step,QImage::Format_BGR888);
	//    QLabel *label = new QLabel();//创建一个label
	//    label->setFixedSize(QSize(dst.width(),dst.height()));//设置label为固定尺寸
	//    label->setScaledContents(true);//
	//    label->setPixmap(QPixmap::fromImage(dst));//在label显示图片
	//    return label;
}

void BasePixelOption::imageFuse(const char* filePath_first, const char* filePath_second, float mAlpha, Mat* mats) {

	Mat mat_first = imread(filePath_first);//第一张背景图片
	Mat mat_second = imread(filePath_second);//第二章马图片
	cv::resize(mat_second, mat_second, cv::Size(mat_first.cols, mat_first.rows));
	qDebug() << "融合准备图像加载完成";
	Mat dst;
	addWeighted(mat_first, mAlpha, mat_second, 1 - mAlpha, 0.1, dst);
	qDebug() << "融合成功";
	//imshow("result", dst);
	//Mat mats[3] = { mat_first ,mat_second ,dst };
	mats[0] = mat_first;
	mats[1] = mat_second;
	mats[2] = dst;
	mat_first.release();
	mat_second.release();
	dst.release();
	//return mats;
	/* QImage qImage = QImage(dst.data,dst.cols,dst.rows,dst.step,QImage::Format_BGR888);
	label->setFixedSize(QSize(qImage.width(),qImage.height()));//设置label为固定尺寸
	label->setScaledContents(true);//
	label->setPixmap(QPixmap::fromImage(qImage));//在label显示图片*/
}


void BasePixelOption::increaseBrightnessContrastRatio(const char* filePath, double beta, double alpha, Mat* mats) {
	Mat src = imread(filePath);
	//imshow("src", src);
	int height = src.rows;
	int width = src.cols;
	int channels = src.channels();
	//double alpha = 1.2;//像素增加权重，即：每个像素都扩大1.2倍，用于增加图像的对比度
	//double beta = 50;//用于增加亮度
	Mat dst;//输出图像
	dst = Mat::zeros(src.size(), src.type());//创建一个都是0的Mat，即纯黑色的mat
	//下面是增加亮度及对比度的关键代码
	for (int y = 0;y < height;y++) {
		for (int x = 0;x < width;x++) {
			dst.at<Vec3b>(y, x)[0] = saturate_cast<uchar>(alpha * src.at<Vec3b>(y, x)[0] + beta);
			dst.at<Vec3b>(y, x)[1] = saturate_cast<uchar>(alpha * src.at<Vec3b>(y, x)[1] + beta);
			dst.at<Vec3b>(y, x)[2] = saturate_cast<uchar>(alpha * src.at<Vec3b>(y, x)[2] + beta);
		}
	}
	mats[0] = src;
	mats[1] = dst;
	//imshow("src", src);
	//imshow("result", dst);


}


void BasePixelOption::drawShape(Mat* mats) {
	Mat src = Mat::zeros(500, 500, CV_8UC3);//创建一个单通道黑色图片
	Mat cloneSrc = src.clone();
	//绘制一条直线
	line(src, Point(0, 0), Point(500, 500), Scalar(125, 125, 125), 1, LINE_AA);

	//绘制一个矩形
	Rect rect = Rect(125, 125, 225, 225);
	Scalar color = Scalar(125, 125, 125);
	rectangle(src, rect, color, 2, LINE_8);

	//绘制一个椭圆
	ellipse(src, Point(250, 250), Size(src.cols / 4, src.rows / 8), 90, 0, 360, color, 2, LINE_8);

	//绘制一个圆
	circle(src, Point(250, 250), 100, color, 1, LINE_AA);

	//绘制多边形并对其进行填充
	Point pts[1][5];
	pts[0][0] = Point(100, 100);
	pts[0][1] = Point(100, 200);
	pts[0][2] = Point(200, 200);
	pts[0][3] = Point(200, 100);
	pts[0][4] = Point(100, 100);
	const Point* ppts[] = { pts[0] };
	int npt[] = { 5 };
	fillPoly(src, ppts, npt, 1, color, 8);

	//绘制文本
	putText(src, "hello OpenCV", Point(100, 100), FONT_HERSHEY_COMPLEX, 1.0, Scalar(12, 23, 200), 3, 8);
	mats[0] = cloneSrc;
	mats[1] = src;
	//imshow("src", src);
	//    QImage qImage = QImage(src.data,src.cols,src.rows,src.step,QImage::Format_BGR888);
	//    label->setFixedSize(QSize(qImage.width(),qImage.height()));
	//    label->setScaledContents(true);
	//    label->setPixmap(QPixmap::fromImage(qImage));

}


void BasePixelOption::showBlur(const char* filePath, int ksize, Mat* mats) {
	Mat src = imread(filePath);
	Mat dst;//输出图像
	blur(src, dst, Size(ksize, ksize));//均值滤波
	mats[0] = src;
	mats[1] = dst;
}

void BasePixelOption::showGaussianBlue(const char* filePath, int kSize, Mat* mats) {
	Mat src = imread(filePath);
	Mat dst;//输出图像
	//    imshow("src",src);
	GaussianBlur(src, dst, Size(kSize, kSize), 0, 0);//高斯滤波
	//imshow("GaussianBlur", dst);
	mats[0] = src;
	mats[1] = dst;
}

void BasePixelOption::showMediaBlur(const char* filePath, int kSize, Mat* mats) {
	Mat src = imread(filePath);
	Mat dst;//输出图像
	//    imshow("src",src);
	medianBlur(src, dst, kSize);//中值滤波
	//imshow("medianBlur", dst);
	mats[0] = src;
	mats[1] = dst;
}

//Mat src;
//Mat dst;
//int g_d;
//int g_sigmaColor = 20;
//int g_sigmaSpace = 20;
//void onTrackBar(int, void*) {
//	bilateralFilter(src, dst, g_d, g_sigmaColor, g_sigmaSpace);
//	imshow("bilateralFilter", dst);
//}
void BasePixelOption::showBilateralFilter(const char* filePath, int g_d, int g_sigmaColor, int g_sigmaSpace, Mat* mats) {
	Mat src = imread(filePath);
	Mat dst = Mat::zeros(src.size(), src.type());
	//imshow("src", src);
	//namedWindow("bilateralFilter", WINDOW_AUTOSIZE);
	bilateralFilter(src, dst, g_d, g_sigmaColor, g_sigmaSpace);
	//createTrackbar("g_d", "bilateralFilter", &g_d, 50, onTrackBar);
	//createTrackbar("sigma_color", "bilateralFilter", &g_sigmaColor, 100, onTrackBar);
	//createTrackbar("sigm_space", "bilateralFilter", &g_sigmaSpace, 100, onTrackBar);
	//
	//imshow("bilateralFilter", dst);
	mats[0] = src;
	mats[1] = dst;
}

void BasePixelOption::showCleanImage(const char* filePath,
	int kSize, int thresholdMin, int thresholdMax, int dilateKSize, int erodeKsize, Mat* mats) {
	Mat src = imread(filePath);
	mats[0] = src.clone();
	Mat dst;
	//使用高斯滤波降噪
	GaussianBlur(src, dst, Size(kSize, kSize), 0);
	mats[1] = dst.clone();
	//将图片转换为灰度图像
	cvtColor(src, dst, COLOR_BGR2GRAY);
	mats[2] = dst.clone();
	//imshow("gray", dst);
	threshold(dst, dst, thresholdMin, thresholdMax, THRESH_BINARY);
	mats[3] = dst.clone();
	//imshow("threshold", dst);
	//使用结构元素进行膨胀
	Mat structureElement = getStructuringElement(MORPH_RECT, Size(dilateKSize, dilateKSize), Point(-1, -1));
	dilate(dst, dst, structureElement, Point(-1, -1));
	mats[4] = dst.clone();
	//imshow("dilate", dst);
	//腐蚀，用局部最小值替换锚点的值    
	Mat structureElement2 = getStructuringElement(MORPH_RECT, Size(erodeKsize, erodeKsize), Point(-1, -1));
	erode(dst, dst, structureElement2, Point(-1, -1));
	mats[5] = dst.clone();
	//imshow("erode", dst);
	src.release();
	dst.release();

}

void BasePixelOption::showImageOpen(const char* filePath, int erodeKSize, int dilateKSize, Mat* mats) {
	Mat src = imread(filePath);
	mats[0] = src;
	Mat dst;
	cvtColor(src, dst, COLOR_BGR2GRAY);
	threshold(dst, dst, 160, 255, THRESH_BINARY);
	//imshow("src", src);
	//定义结构元素
	Mat structureElement = getStructuringElement(MORPH_RECT, Size(erodeKSize, erodeKSize), Point(-1, -1));
	erode(dst, dst, structureElement, Point(-1, -1));
	mats[1] = dst;
	//imshow("erode", dst);
	//膨胀
	Mat structureElementDilate = getStructuringElement(MORPH_RECT, Size(dilateKSize, dilateKSize), Point(-1, -1));
	dilate(dst, dst, structureElementDilate, Point(-1, -1));
	mats[2] = dst;

	//imshow("dilate", dst);
}

void BasePixelOption::showImageClose(const char* filePath, int erodeKSize, int dilateKSize, Mat* mats) {
	Mat src = imread(filePath);
	mats[0];
	Mat dst;
	//imshow("src", src);
	// 
	cvtColor(src, dst, COLOR_BGR2GRAY);
	threshold(dst, dst, 160, 255, THRESH_BINARY);
	//定义结构元素
	Mat stuctureElement = getStructuringElement(MORPH_RECT, Size(dilateKSize, dilateKSize), Point(-1, -1));
	//膨胀
	dilate(dst, dst, stuctureElement, Point(-1, -1));
	mats[1] = dst;
	//imshow("dilate", dst);
	//腐蚀
	Mat stuctureElementErode = getStructuringElement(MORPH_RECT, Size(erodeKSize, erodeKSize), Point(-1, -1));
	erode(dst, dst, stuctureElementErode, Point(-1, -1));
	mats[2] = dst;
	//imshow("erode", dst);

}

void BasePixelOption::showMorphologicalGradient(const char* filePath, int kSize, int mType, Mat* mats) {
	Mat src = imread(filePath);
	mats[0] = src;
	Mat dst;
	cvtColor(src, dst, COLOR_BGR2GRAY);
	//imshow("gray", dst);
	//定义结构元素
	Mat structur = getStructuringElement(MORPH_RECT, Size(kSize, kSize), Point(-1, -1));
	//执行形态学梯度
	int morphType = MORPH_GRADIENT;
	switch (mType) {
	case 1://形态学梯度
		morphType = MORPH_GRADIENT;
		break;
	case 2://顶帽
		morphType = MORPH_TOPHAT;
		break;
	case 3://黑帽
		morphType = MORPH_BLACKHAT;
		break;
	default:
		morphType = MORPH_GRADIENT;
		break;
	}
	morphologyEx(dst, dst, morphType, structur);
	mats[1] = dst;
	//imshow("MORPH_GRADIENT", dst);
}

void BasePixelOption::showTopHat(const char* filePath, int kSize, Mat* mats) {
	Mat src = imread(filePath);
	mats[0] = src;
	Mat dst;
	//imshow("src", src);
	//定义结构元素
	Mat structur = getStructuringElement(MORPH_RECT, Size(kSize, kSize), Point(-1, -1));
	//执行顶帽操作：相当于原图像与开操作之间的差值图像
	morphologyEx(src, dst, MORPH_TOPHAT, structur);
	mats[1] = dst;
	//imshow("MORPH_TOPHAT", dst);
}

void BasePixelOption::showBlackHat(const char* filePath, int kSize, Mat* mats) {
	Mat src = imread(filePath);
	mats[0] = src;
	Mat dst;
	//imshow("src", src);
	Mat structur = getStructuringElement(MORPH_RECT, Size(kSize, kSize), Point(-1, -1));
	//执行黑帽操作
	morphologyEx(src, dst, MORPH_BLACKHAT, structur);
	//imshow("structur", dst);
	mats[1] = dst;
}

void BasePixelOption::showLines(const char* filePath, Mat* mats) {
	qDebug() << "提取直线";
	Mat src = imread(filePath);
	mats[0] = src.clone();
	//Mat dst;
	//imshow("src", src);
	////转灰度图
	//cvtColor(src, dst, COLOR_BGR2GRAY);
	//imshow("gray", dst);
	////二值化
	//adaptiveThreshold(~dst, dst, 255, ADAPTIVE_THRESH_MEAN_C, THRESH_BINARY, 7, -2);
	//imshow("adaptiveThreshold", dst);
	////定义一个结构元素用于提取水平线
	//Mat hStructur = getStructuringElement(MORPH_RECT, Size(dst.cols / 30, 1), Point(-1, -1));
	////定义一个结构元素用于提取垂直线
	//Mat vStructur = getStructuringElement(MORPH_RECT, Size(1, dst.rows / 30), Point(-1, -1));
	//Mat hMat;
	//erode(dst, hMat, hStructur, Point(-1, -1));
	//imshow("erode", hMat);
	//dilate(hMat, hMat, hStructur, Point(-1, -1));
	//imshow("dilate", hMat);

	/*Mat vMat;
	erode(dst, vMat, vStructur, Point(-1, -1));
	imshow("vMat_erode", vMat);
	dilate(vMat, vMat, vStructur, Point(-1, -1));
	imshow("vMat_dilate", vMat);*/

	qDebug() << "直线提取完成";
}


void BasePixelOption::showPyrUp(const char* filePath, int type, int w, Mat* mats) {
	Mat src = imread(filePath);
	mats[0] = src;
	Mat dst;
	if (type == 1) {
		pyrUp(src, dst, Size(src.cols * 2, src.rows * 2));
	}
	else {
		pyrDown(src, dst, Size(src.cols / 2, src.rows / 2));
	}
	mats[1] = dst;
	//imshow("src", src);
	//imshow("dst", dst);
}

void BasePixelOption::showPyrDown(const char* filePath, int w, Mat* mats) {
	Mat src = imread(filePath);
	mats[0] = src;
	Mat dst;
	pyrDown(src, dst, Size(src.cols / w, src.rows / w));
	mats[1] = dst;
}


void BasePixelOption::showGaussianDiff(const char* filePath, int firstKSize, int secondKSize, Mat* mats) {
	Mat src = imread(filePath);
	mats[0] = src;
	Mat dst1, dst2;
	GaussianBlur(src, dst1, Size(firstKSize, firstKSize), 0);
	mats[1] = dst1;
	GaussianBlur(src, dst2, Size(secondKSize, secondKSize), 0);
	mats[2] = dst2;
	Mat result = dst1 - dst2;
	mats[3] = result;
	cvtColor(result, result, COLOR_BGRA2GRAY);
	mats[4] = result;
	threshold(result, result, 0, 255, THRESH_BINARY | THRESH_OTSU);
	mats[5] = result;
}


void BasePixelOption::showCustomKernelFilter2D(const char* filePath, Mat* mats) {
	Mat src = imread(filePath);//原图
	mats[0] = src;
	Mat dst;
	Mat  kernel_robert_x = (Mat_<int>(2, 2) << 1, 0, 0, -1);//robert x方向算子
	filter2D(src, dst, -1, kernel_robert_x, Point(-1, -1), 0);//自定义线性滤波
	mats[1] = dst;

	Mat kernel_robert_y = (Mat_<int>(2, 2) << 0, 1, -1, 0);//robert y方向上算子
	filter2D(src, dst, -1, kernel_robert_y, Point(-1, -1), 0);//自定义线性滤波
	mats[2] = dst;

	Mat kernel_sobel_x = (Mat_<int>(3, 3) << -1, 0, 1, -2, 0, 2, -1, 0, 1);//sobel x方向上算子
	filter2D(src, dst, -1, kernel_sobel_x, Point(-1, -1), 0);//自定义线性滤波
	mats[3] = dst;

	Mat kernel_sobel_y = (Mat_<int>(3, 3) << -1, -2, -1, 0, 0, 0, 1, 2, 1);//sobel y方向上算子
	filter2D(src, dst, -1, kernel_sobel_y, Point(-1, -1), 0);//自定义线性滤波
	mats[4] = dst;

	Mat kernel = (Mat_<int>(3, 3) << 0, -1, 0, -1, 4, -1, 0, -1, 0);//拉普拉斯算子
	filter2D(src, dst, -1, kernel, Point(-1, -1), 0);//自定义线性滤波
	mats[5] = dst;
}

void BasePixelOption::showCustomKernelFilter2DMergeXY(const char* filePath, Mat* mats) {
	Mat src = imread(filePath);
	Mat dst_robert_x;
	Mat dst_robert_y;
	mats[0] = src;

	Mat  kernel_robert_x = (Mat_<int>(2, 2) << 1, 0, 0, -1);//robert x方向算子
	filter2D(src, dst_robert_x, -1, kernel_robert_x, Point(-1, -1), 0);//自定义线性滤波
	mats[1] = dst_robert_x;

	Mat kernel_robert_y = (Mat_<int>(2, 2) << 0, 1, -1, 0);//robert y方向上算子
	filter2D(src, dst_robert_y, -1, kernel_robert_y, Point(-1, -1), 0);//自定义线性滤波
	mats[2] = dst_robert_y;

	//合并
	Mat dst_robert_result;
	addWeighted(dst_robert_x, 0.5, dst_robert_y, 0.5, 1, dst_robert_result);//对图像进行平均权重相加得到一个完整的用robert计算的梯度图像
	mats[3] = dst_robert_result;

	Mat dst_sobel_x;
	Mat dst_sobel_y;
	Mat kernel_sobel_x = (Mat_<int>(3, 3) << -1, 0, 1, -2, 0, 2, -1, 0, 1);//sobel x方向上算子
	filter2D(src, dst_sobel_x, -1, kernel_sobel_x, Point(-1, -1), 0);//自定义线性滤波
	mats[4] = dst_sobel_x;


	Mat kernel_sobel_y = (Mat_<int>(3, 3) << -1, -2, -1, 0, 0, 0, 1, 2, 1);//sobel y方向上算子
	filter2D(src, dst_sobel_y, -1, kernel_sobel_y, Point(-1, -1), 0);//自定义线性滤波
	mats[5] = dst_sobel_y;

	//合并
	Mat dst_sobel_result;
	addWeighted(dst_sobel_x, 0.5, dst_sobel_y, 0.5, 1, dst_sobel_result);//对图像进行平均权重相加得到一个用sobel算子计算的梯度图像
	mats[6] = dst_sobel_result;
}


void BasePixelOption::showCopyMakeBorder(const char* filePath, int borderWidth, Mat* mats) {
	Mat src = imread(filePath);
	Mat constant = src.clone();
	Mat replicate = src.clone();
	Mat wrap = src.clone();
	if (src.empty()) {
		return;
	}
	mats[0] = src;
	Mat border_default;

	copyMakeBorder(src, border_default,
		borderWidth, borderWidth, borderWidth, borderWidth, BORDER_DEFAULT);//系统默认填充方式
	mats[1] = border_default;

	Mat border_constant;
	copyMakeBorder(constant, border_constant,
		borderWidth, borderWidth, borderWidth, borderWidth, BORDER_CONSTANT);//填充边缘，用指定的像素值
	mats[2] = border_constant;

	Mat border_replicate;
	copyMakeBorder(replicate, border_replicate,
		borderWidth, borderWidth, borderWidth, borderWidth, BORDER_REPLICATE);//填充边缘像素用已知边缘像素值
	mats[3] = border_replicate;

	Mat border_wrap;
	copyMakeBorder(src.clone(), border_wrap,
		borderWidth, borderWidth, borderWidth, borderWidth, BORDER_WRAP);//填充边缘像素用已知边缘像素值
	mats[4] = border_wrap;

}

void BasePixelOption::showSobelAndScharr(const char* filePath, int gaussKSize, int sobelKSize, Mat* mats) {
	Mat src = imread(filePath);
	if (src.empty()) {
		return;
	}
	mats[0] = src;

	Mat sobel_X, sobel_y, scharr_x, scharr_y, sobel_result, scharr_result, dst;
	//高斯模糊去噪声
	GaussianBlur(src, dst, Size(gaussKSize, gaussKSize), 0);
	mats[1] = dst;
	//转灰度值
	cvtColor(dst, dst, COLOR_BGR2GRAY);
	mats[2] = dst;
	//x方向及y方向做地图计算
	Sobel(dst, sobel_X, CV_16S, 1, 0, sobelKSize);//此处卷积核大小为1的效果也比较好，如果5以上效果会非常差（仅限我用的那张图片，实际情况需要根据原始图像的线条来）
	Sobel(dst, sobel_y, CV_16S, 0, 1, sobelKSize);
	//计算图像像素绝对值并输出
	convertScaleAbs(sobel_X, sobel_X);
	convertScaleAbs(sobel_y, sobel_y);
	//对x及y方向上的梯度图像做权重相加
	addWeighted(sobel_X, 0.5, sobel_y, 0.5, 1, sobel_result);
	mats[3] = sobel_result;

	//使用Scharr函数计算x方向及y方向的梯度图像
	Scharr(dst, scharr_x, CV_16S, 1, 0);
	Scharr(dst, scharr_y, CV_16S, 0, 1);
	//计算绝对值并输出
	convertScaleAbs(scharr_x, scharr_x);
	convertScaleAbs(scharr_y, scharr_y);
	//x方向和y方向进行权重相加
	addWeighted(scharr_x, 0.5, scharr_y, 0.5, 1, scharr_result);
	mats[4] = scharr_result;


}


void BasePixelOption::showLaplacian(const char* filePath, int gaussKSize, int laplacianKSize, Mat* mats) {
	Mat src = imread(filePath);
	if (src.empty()) {
		return;
	}
	mats[0] = src;
	Mat dst, result;
	//执行高斯模糊去除噪声
	GaussianBlur(src, dst, Size(gaussKSize, gaussKSize), 0);
	mats[1] = dst;
	//将彩色图像转灰度图像
	cvtColor(dst, dst, COLOR_BGR2GRAY);
	mats[2] = dst;
	//执行拉普拉斯算子
	Laplacian(dst, result, CV_16S, laplacianKSize);
	mats[3] = result;
	//像素取绝对值
	convertScaleAbs(result, result);
	//显示图像
	mats[4] = result;
}

void BasePixelOption::showCanny(const char* filePath, int threshold_min, int threshold_max, int blurKSize, int sobelKSize, Mat* mats) {
	Mat src = imread(filePath);
	if (src.empty()) {
		return;
	}
	cv::resize(src, src, cv::Size((src.cols / 4) / 2, (src.rows / 4) / 2));
	//imshow("src", src);
	mats[0] = src;
	Mat dst, result, mask;
	//均值滤波过滤
	blur(src, dst, Size(blurKSize, blurKSize));
	mats[1] = dst;
	//转灰度图像
	cvtColor(dst, dst, COLOR_BGRA2GRAY);
	mats[2] = dst;
	//边缘检测
	Canny(dst, result, threshold_min, threshold_max, sobelKSize, true);
	mask.create(src.size(), src.type());
	src.copyTo(mask, result);
	//imshow("mask", mask);
	//imshow("result", result);
	//cv::waitKey(0);
	mats[3] = mask;
	mats[4] = result;

}


void BasePixelOption::showHoughLine(const char* filePath, int threshold_min, int threshold_max, int cannySobelKSize,
	int rho, int threshold_p, double minLineLength, double maxLineGap, Mat* mats) {
	Mat src = imread(filePath);
	if (src.empty()) {
		return;
	}
	cv::resize(src, src, cv::Size((src.cols / 4) / 2, (src.rows / 4) / 2));
	mats[0] = src;
	//imshow("src", src);//显示原图
	Mat dst;
	cvtColor(src, dst, COLOR_BGR2GRAY);//转换灰度图
	//imshow("gray", dst);
	mats[1] = dst;
	//边缘检测
	Canny(dst, dst, threshold_min, threshold_max, cannySobelKSize, true);
	//imshow("canny", dst);
	mats[2] = dst;
	//霍夫直线检测
	vector<Vec4f> plines;
	/**
	*第一个参数，InputArray类型的image，输入图像，即源图像，需为8位的单通道二进制图像，可以将任意的源图载入进来后由函数修改成此格式后，再填在这里。
	*第二个参数，InputArray类型的lines，经过调用HoughLinesP函数后后存储了检测到的线条的输出矢量，每一条线由具有四个元素的矢量(x_1,y_1, x_2, y_2） 表示，其中，(x_1, y_1)和(x_2, y_2) 是是每个检测到的线段的结束点。
	*第三个参数，double类型的rho， 以像素为单位的距离精度。 另一种形容方式是直线搜索时的进步尺寸的单位半径。
	*第四个参数，double类型的theta，以弧度为单位的角度精度。另一种形容方式是直线搜索时的进步尺寸的单位角度。
	*第五个参数，int类型的threshold，累加平面的阈值参数，即识别某部分为图中的一条直线时它在累加平面中必须达到的值。 大于阈值 threshold 的线段才可以被检测通过并返回到结果中。
	*第六个参数，double类型的minLineLength，有默认值0，表示最低线段的长度，比这个设定参数短的线段就不能被显现出来。
	*第七个参数，double类型的maxLineGap，有默认值0，允许将同一行点与点之间连接起来的最大的距离。
	*/
	/*HoughLinesP(dst, plines, 1, CV_PI / 180, 10, 100, 10);*/
	HoughLinesP(dst, plines, rho, CV_PI / 180, threshold_p, minLineLength, maxLineGap);
	Mat matLine = Mat::zeros(src.size(), src.type());
	for (int i = 0;i < plines.size();i++) {
		Vec4f h = plines[i];
		line(matLine, Point(h[0], h[1]), Point(h[2], h[3]), Scalar(0, 0, 255), 3, LINE_AA);
	}
	//imshow("lines", matLine);
	mats[3] = matLine;
	//cv::waitKey(0);//由于是在子线程中测试，现成结束程序结束，加上这段代码表示程序等待，任意键输入退出


}

//,int mediaKSize,Mat *mats
void BasePixelOption::showHoughCircles(const char* filePath, int param1, int param2, int minRadius, int maxRadius, Mat* mats) {
	Mat src = imread(filePath);
	if (src.empty()) {
		return;
	}
	cv::resize(src, src, cv::Size(src.cols / 4, src.rows / 4));
	mats[0] = src.clone();
	//imshow("src", src);
	//转灰度图像
	Mat gray;
	cvtColor(src, gray, COLOR_BGR2GRAY);
	mats[1] = gray.clone();
	//imshow("gray", gray);
	//中值滤波进行模糊
	medianBlur(gray, gray, 3);
	mats[2] = gray.clone();
	//imshow("mediaBlur", gray);
	//进行边缘检测
	Canny(gray, gray, 50, 100);
	mats[3] = gray.clone();
	//imshow("canny", gray);
	//进行腐蚀操作
	//    Mat structure = getStructuringElement(MORPH_RECT,Size(5,5),Point(-1,-1));
	//    dilate(gray,gray,structure);
	//    erode(gray,gray,structure);
	//    imshow("erode",gray);
	//霍夫圆检测
	vector<Vec3f> circles;
	//第四个参数--值越小检测时间越精确
	//第五个参数，检测到两个圆心之间的最小距离，如果两个圆心之间的距离小于这个参数就不检测(认为是同心圆)
	//第六个参数，边缘检测的的最高阈值，低阈值为高阈值的一半
	//第七个参数，只有当累加器的值大于这个参数时才认为检测到了一个圆，和HOUGH_GRADIENT配合，值越小检测到的圆就越多
	//第八个参数，检测到圆的最小半径
	//第九个参数，检测到圆的最大半径
	//HoughCircles(gray, circles, HOUGH_GRADIENT, 1, 100, 100, 30, 10, 120);
	HoughCircles(gray, circles, HOUGH_GRADIENT, 1, param1, 100, param2, minRadius, maxRadius);
	//重新回到BGR色彩空间+
	cvtColor(gray, gray, COLOR_GRAY2BGR);
	for (int i = 0;i < circles.size();i++) {
		Vec3f c3 = circles[i];
		circle(src, Point(c3[0], c3[1]), c3[2], Scalar(0, 0, 255), 3, LINE_AA);//绘制圆
		circle(src, Point(c3[0], c3[1]), 2, Scalar(0, 0, 255), 3, LINE_AA);//绘制圆心
	}
	mats[4] = src;
	//imshow("circle", src);
	//cv::waitKey(0);
}

void BasePixelOption::showRemap(const char* filePath, Mat* mats) {
	Mat src = imread(filePath);
	if (src.empty()) {
		return;
	}
	cv::resize(src, src, cv::Size(src.cols / 4, src.rows / 4));
	mats[0] = src;
	//imshow("src", src);
	Mat dst, map_x, map_y;
	map_x.create(src.size(), CV_32FC1);
	map_y.create(src.size(), CV_32FC1);
	for (int i = 0;i < src.rows;i++) {
		float* ptrX = map_x.ptr<float>(i);//获取一列像素
		float* ptrY = map_y.ptr<float>(i);
		for (int j = 0;j < src.cols;j++) {
			//            //左右像素对调
			ptrX[j] = (float)(src.cols - j);
			ptrY[j] = (float)i;
			//上下对调
			//            ptrX[j] = (float)j;
			//            ptrY[j] = (float)(src.rows-i);
		}
	}
	remap(src, dst, map_x, map_y, BORDER_CONSTANT);
	//imshow("remap", dst);
	mats[1] = dst;
	//cv::waitKey(0);
}

//直方图均衡化，提高图像对比度(直方图均衡化的输入必须是单通道图片)
void BasePixelOption::showEqualizeHist(const char* filePath, Mat* mats) {
	Mat src = imread(filePath);
	if (src.empty()) {
		return;
	}
	//imshow("src", src);
	cv::resize(src, src, cv::Size(src.cols / 4, src.rows / 2));
	mats[0] = src;
	Mat dst;
	//将图像转换为灰度图像
	cvtColor(src, dst, COLOR_BGR2GRAY);
	mats[1] = dst;
	//imshow("gray", dst);
	//执行直方图均衡化
	equalizeHist(dst, dst);
	mats[2] = dst;
	//imshow("equalizeHist", dst);
	//cv::waitKey(0);
}

void BasePixelOption::showEqualizeHistColorImage(const char* filePath, Mat* images) {
	Mat src = imread(filePath);
	if (src.empty()) {
		return;
	}
	//imshow("src", src);
cv:resize(src, src, cv::Size(src.cols / 4, src.rows / 4));
	images[0] = src;
	Mat dst, result;
	//分离通道
	vector<Mat> mats;
	split(src, mats);
	//对bgr通道进行均衡
	Mat blueChannelMat = mats[0];
	Mat greeChannelMat = mats[1];
	Mat redChannelMat = mats[2];
	images[1] = blueChannelMat;
	images[2] = greeChannelMat;
	images[3] = redChannelMat;
	/*imshow("blue", blueChannelMat);
	imshow("gree", greeChannelMat);
	imshow("red", redChannelMat);*/
	equalizeHist(blueChannelMat, blueChannelMat);
	equalizeHist(greeChannelMat, greeChannelMat);
	equalizeHist(redChannelMat, redChannelMat);
	images[4] = blueChannelMat;
	images[5] = greeChannelMat;
	images[6] = redChannelMat;
	/*imshow("blueChannelMat", blueChannelMat);
	imshow("greeChannelMat", greeChannelMat);
	imshow("redChannelMat", redChannelMat);*/
	//合并均衡后的图像;
	merge(mats, result);
	images[7] = result;
	//imshow("result", result);
	//cv::waitKey(0);

}

/*
• images：输入的图像的指针；
• nimages：输入图像个数；
• channels：需要统计直方图的第几通道；
• mask：掩模，mask必须是一个8位（CV_8U）的数组并且和images的数组大小相同；
• hist：直方图计算的输出值；
• dims：输出直方图的维度（由channels指定）；
• histSize：直方图中每个dims维度需要分成多少个区间（如果把直方图看作一个一个竖条的话，就是竖条的个数）；
• ranges：统计像素值的区间；
• uniform = true：是否对得到的直方图数组进行归一化处理；
• accumulate = false：在多个图像时，是否累积计算像素值的个数
*/
void BasePixelOption::drawCalcHist(const char* filePath, Mat* images) {
	Mat src = imread(filePath);
	if (src.empty()) {
		return;
	}
	//imshow("src", src);
	cv::resize(src, src, cv::Size(src.cols / 4, src.rows / 4));
	images[0] = src;
	Mat dst;
	//通道分离
	vector<Mat> mats;
	split(src, mats);

	int histsize = 256;//直方图级数，例如：0~255中间有256级
	float range[] = { 0,256 };//值域范围，也就是直方图值的范围
	const float* histRange = { range };
	Mat r_hist, g_hist, b_hist;
	calcHist(&mats[0], 1, 0, Mat(), r_hist, 1, &histsize, &histRange, true, false);
	calcHist(&mats[1], 1, 0, Mat(), g_hist, 1, &histsize, &histRange, true, false);
	calcHist(&mats[2], 1, 0, Mat(), b_hist, 1, &histsize, &histRange, true, false);
	images[1] = r_hist;
	images[2] = g_hist;
	images[3] = b_hist;
	//创建直方图画布
	int hist_w = 512;
	int hist_h = 400;
	Mat histImage(hist_w, hist_h, CV_8UC3, Scalar(0, 0, 0));

	//将直方图数据归一化到指定范围
	normalize(r_hist, r_hist, 0, hist_h, NORM_MINMAX, -1, Mat());
	normalize(g_hist, g_hist, 0, hist_h, NORM_MINMAX, -1, Mat());
	normalize(b_hist, b_hist, 0, hist_h, NORM_MINMAX, -1, Mat());
	images[4] = r_hist;
	images[5] = g_hist;
	images[6] = b_hist;
	int bin_w = hist_w / histsize;//直方图数据宽

	//在histImage中绘制出直方图
	for (int i = 1;i < histsize;i++) {
		line(histImage,
			Point(bin_w * (i - 1), hist_h - cvRound(b_hist.at<float>(i - 1))),
			Point(bin_w * (i), hist_w - cvRound(b_hist.at<float>(i))), Scalar(255, 0, 0), 2, LINE_AA);
		line(histImage,
			Point(bin_w * (i - 1), hist_h - cvRound(r_hist.at<float>(i - 1))),
			Point(bin_w * (i), hist_w - cvRound(r_hist.at<float>(i))), Scalar(0, 0, 255), 2, LINE_AA);
		line(histImage,
			Point(bin_w * (i - 1), hist_h - cvRound(g_hist.at<float>(i - 1))),
			Point(bin_w * (i), hist_w - cvRound(g_hist.at<float>(i))), Scalar(0, 255, 0), 2, LINE_AA);
	}
	//imshow("histImage", histImage);
	images[7] = histImage;
	//cv::waitKey(0);

}


//直方图反射投影
void BasePixelOption::showCalcHistBackProject(const char* filePath, Mat* mats) {
	//[1]载入模型图片及场景图片
	Mat src = imread(filePath);
	cv::resize(src, src, cv::Size(src.cols / 4, src.rows / 4));
	mats[0] = src;
	/*Mat  src_roi = imread(roi_filePath);
	mats[1] = src_roi;*/
	if (src.empty()) {
		return;
	}
	//imshow("src", src);
	//imshow("roi", src_roi);
	//[2]将模型图片和场景图片转换为HSV色彩空间
	Mat src_hsv, src_hsv_roi;
	cvtColor(src, src_hsv, COLOR_BGR2HSV);
	//cvtColor(src_roi, src_hsv_roi, COLOR_BGR2HSV);
	mats[1] = src_hsv;
	//mats[3] = src_hsv_roi;
	//【3】计算模型图直方图
	MatND roiHist; //直方图对象
	int dims = 2;  //特征数目（直方图维度）
	float hranges[] = { 0,180 }; //特征空间的取值范围
	float Sranges[] = { 0,256 };
	const float* ranges[] = { hranges,Sranges };
	int size[] = { 20,32 };  //存放每个维度的直方图的尺寸的数组
	int channels[] = { 0,1 };  //通道数
	calcHist(&src_hsv, 1, channels, Mat(), roiHist, dims, size, ranges);
	mats[2] = roiHist;
	//【4】直方图归一化
	normalize(roiHist, roiHist, 0, 255, NORM_MINMAX);
	mats[3] = roiHist;
	//【5】反向投影
	Mat proImage; //投影输出图像
	calcBackProject(&src_hsv, 1, channels, roiHist, proImage, ranges);
	mats[4] = proImage;
	//imshow("proImage", proImage);
}

void BasePixelOption::showMatchTemplate(const char* filePath, const char* templateFilePath, Mat* mats) {
	//【1】场景图片及模板图像
	Mat src = imread(filePath);
	Mat src_clone = src.clone();
	mats[0] = src_clone;
	Mat src_template = imread(templateFilePath);//模板图像
	mats[1] = src_template;
	//imshow("src_template", src_template);
	//【1】转换灰度图图像
	cvtColor(src, src, COLOR_BGR2GRAY);
	cvtColor(src_template, src_template, COLOR_BGR2GRAY);
	//【3】创建一个画布用来输出匹配结果
	Mat result;
	int width = src.cols - src_template.cols + 1;
	int height = src.rows - src_template.rows + 1;
	result.create(Size(width, height), CV_32FC1);
	//【4】执行模板陪
	//执行模板匹配
	matchTemplate(src, src_template, result, TM_SQDIFF_NORMED);
	//【5】执行归一化
	normalize(result, result, 0, 1, NORM_MINMAX);
	//【6】获取最小匹配系数（有可能使用最大或者最小，根据我们所使用的的method不同而不同）
	double minValue, maxValue;
	Point minLoc, maxLoc, matchLoc;
	minMaxLoc(result, &minValue, &maxValue, &minLoc, &maxLoc, Mat());
	matchLoc = minLoc;
	//【7】绘制矩形区域
	rectangle(src_clone, matchLoc, Point(matchLoc.x + src_template.cols, matchLoc.y + src_template.rows), Scalar(0, 51, 0), 2, LINE_AA);
	//    rectangle(result,Rect(matchLoc.x,matchLoc.y,src_template.cols,src_template.rows),Scalar(0,0,255),2,LINE_AA);
	//【显示绘制好的图像】
	//imshow("src_clone", src_clone);
	mats[2] = src_clone;
	//imshow("result", src_clone);
//cv::waitKey(0);

}

void BasePixelOption::showFindContours(const char* filePath, Mat* mats) {
	Mat src = imread(filePath);
	if (src.empty()) {
		return;
	}
	//imshow("src", src);
	cv::resize(src, src, cv::Size(src.cols / 4, src.rows / 4));
	mats[0] = src;
	Mat gray;
	cvtColor(src, gray, COLOR_BGR2GRAY);
	mats[1] = gray;
	//imshow("gray", gray);
	//执行边缘 检测
	Canny(gray, gray, 50, 100, 3, false);
	mats[2] = gray;
	//imshow("canny", gray);
	//发现轮廓
	vector<vector<Point>> contours;
	vector<Vec4i> hierarchy;
	findContours(gray, contours, hierarchy, RETR_TREE, CHAIN_APPROX_SIMPLE, Point(0, 0));
	//绘制轮廓
	RNG rng(12345);
	//从轮廓数据中检测凸包
	vector<vector<Point>> hull(contours);
	for (int i = 0;i < contours.size();i++) {
		convexHull(Mat(contours[i]), hull[i], false);
	}
	for (int i = 0;i < contours.size();i++) {
		double area = contourArea(contours[i], false);//轮廓面积
		if (area > 100) {
			Scalar color = Scalar(rng.uniform(0, 255), rng.uniform(0, 255), rng.uniform(0, 255));
			//绘制凸包
			drawContours(src, hull, i, color, 1, LINE_8, hierarchy, 0, Point(0, 0));
			//绘制轮廓
			drawContours(src, contours, i, color, 2, LINE_8, hierarchy, 0, Point(0, 0));
		}

	}
	mats[3] = src;
	//imshow("drawImg", src);
}

//最大外接矩形和最大外接圆
void BasePixelOption::showContoursRectAndCircle(const char* filePath, Mat* mats) {
	//1.载入图像
	Mat src = imread(filePath);
	if (src.empty()) {
		return;
	}
	cv::resize(src, src, cv::Size(src.cols / 4, src.rows / 4));
	mats[0] = src;
	Mat src_clone = src.clone();
	//imshow("src", src);
	//转灰度图图像
	Mat gray;
	cvtColor(src, gray, COLOR_BGR2GRAY);
	mats[1] = gray;
	//均值滤波轻微去噪声
	blur(gray, gray, Size(3, 3), Point(-1, -1));
	mats[2] = gray;
	//    imshow("gray",gray);
	//图像二值化
	threshold(gray, gray, 100, 200, THRESH_BINARY);
	mats[3] = gray;
	//    imshow("threshold",gray);
	//发现轮廓
	vector<vector<Point>> contours;
	vector<Vec4i> hierarchy;
	findContours(gray, contours, hierarchy, RETR_TREE, CHAIN_APPROX_SIMPLE, Point(0, 0));
	RNG rng(123456);
	//准备数据
	vector<vector<Point>> contours_ploy(contours.size());
	vector<Rect> ploy_rects(contours.size());
	vector<Point2f> ccs(contours.size());
	vector<float> radius(contours.size());
	vector<RotatedRect> minRects(contours.size());
	vector<RotatedRect> myellipse(contours.size());
	for (size_t i = 0;i < contours.size();i++) {
		approxPolyDP(Mat(contours[i]), contours_ploy[i], 3, true);
		ploy_rects[i] = boundingRect(contours_ploy[i]);
		minEnclosingCircle(contours_ploy[i], ccs[i], radius[i]);
		if (contours_ploy[i].size() > 5) {
			myellipse[i] = fitEllipse(contours_ploy[i]);
			minRects[i] = minAreaRect(contours_ploy[i]);
		}
	}
	//绘制轮廓外界图形
	Point2f pts[4];
	for (size_t t = 0;t < contours.size();t++) {
		Scalar color = Scalar(rng.uniform(0, 255), rng.uniform(0, 255), rng.uniform(0, 255));
		rectangle(src_clone, ploy_rects[t], color, 2, LINE_4);
		circle(src_clone, ccs[t], radius[t], color, 2, LINE_4);
		/*if (contours_ploy[t].size() > 5) {
			ellipse(src_clone, myellipse[t], color, 1, 8);
			minRects[t].points(pts);
			for (int r = 0; r < 4; r++) {
				line(src_clone, pts[r], pts[(r + 1) % 4], color, 1, 8);
			}
		}*/
	}
	mats[4] = src_clone;
	//imshow("src_clone", src_clone);
	//cv::waitKey(0);
}

void BasePixelOption::showWaterShedImage(const char* filePath) {
	//    watershedwindow *waterWindow=new watershedwindow();
	//    waterWindow->showWaterShedImage(filePath);
	//    waterWindow->show();
}

BasePixelOption::~BasePixelOption()
{
}
