#pragma once
#include <opencv2/opencv.hpp>
#include <iostream>
#include <time.h>
using namespace std;
using namespace cv;
int getARandNum(const int& i) {
	int r = rand()%i;
	return r;
}
Scalar getARandScalar(const int& i = 256) {
	return Scalar(getARandNum(i), getARandNum(i), getARandNum(i),getARandNum(i));
}
void test06() {
	Mat src = imread("C:\\Users\\blackpoor\\Videos\\ͼƬ\\2.jpeg", IMREAD_ANYCOLOR);
	namedWindow("a");
	imshow("a", src);
	waitKey(0);
	
}
void test07() { 
	srand((unsigned int)time(NULL));
	Mat m2 = imread("C:\\Users\\blackpoor\\Desktop\\opencv\\2.jpg", IMREAD_ANYCOLOR);
	
	Mat m3 = Mat::zeros(Size(255, 255), CV_8UC3);
	imshow("1", m3);
	int w = m3.cols;//宽度 指一行有多少个像素 
	int h = m3.rows;//
	int dims = m3.channels();//3
	for (int row = 0; row < h; row++) {
		for (int col = 0; col < w; col++) {
			if (dims == 1) {
				int pv = m3.at<uchar>(row,col);
				m3.at<uchar>(row, col) = 255 - pv;  //通过at取值和赋值
			}
			if (dims == 3) {
				Vec3b bgr = m3.at<Vec3b>(row, col);
				m3.at<Vec3b>(row, col)[0] = rand() % 256;//随机
				m3.at<Vec3b>(row, col)[1] = rand() % 256;
				m3.at<Vec3b>(row, col)[2] = rand() % 256;
			}
		}
	}
	imshow("42", m3);
	waitKey(0);
}
void test08() {
	Mat m3 = Mat::zeros(Size(10,10), CV_8UC3);
	int w = m3.cols;
	int h = m3.rows;
	uchar* a = m3.ptr<uchar>(0);
	for (int i = 0; i<100; i++) {
		*a++ = 2*i;
		*a++ = 2*i;
		*a++ = 2*i;
	}
	imshow("1", m3);

	waitKey(0);
}
//视频对象的成员数据
void test09() {
	VideoCapture VC; 
	VC.open("C:\\Users\\blackpoor\\Desktop\\opencv\\1.mp4"); //两个初始化
	VideoCapture VC1("C:\\Users\\blackpoor\\Desktop\\opencv\\A%02d.png");
	if (!VC.isOpened()) {//判断是否打开
		return;
	}
	
	cout << "图像宽度 " << VC.get(CAP_PROP_FRAME_WIDTH) << endl;
	cout << "图像高度 " << VC.get(CAP_PROP_FRAME_HEIGHT) << endl;
	cout << "视频帧率 " << VC.get(CAP_PROP_FPS) << endl;
	cout << "视频总帧数 "  << VC.get(CAP_PROP_FRAME_COUNT) << endl;
	namedWindow("VC", WINDOW_FREERATIO);
	while (1) {
		Mat frame;
		VC >> frame;
		if (frame.empty()) {
			break;
		}
		imshow("VC", frame);
		waitKey(1000 / VC.get(CAP_PROP_FPS));
	}
	waitKey(0);

}
//调用摄像头
void test10() {
	VideoCapture VC(0); //调用摄像头 ， 第一个参数为摄像头设备ID 从0开始
	namedWindow("VC", WINDOW_FREERATIO);
	while (1) {
		Mat frame;
		VC >> frame; //拍一张给一张，不会将摄像头拍出来的变成视频，当执行到这里时，会立刻拍一张给frame
		if (frame.empty()) {
			break;
		}
		cout << VC.get(CAP_PROP_FPS) << " " << endl;//帧数固定30
		imshow("VC", frame);
		waitKey(1000/ VC.get(CAP_PROP_FPS));
	}

}
//摄像头//保存图片
void test11() {//保存图片
	Mat mat(1000, 1000, CV_8UC3);
	vector<int> compression_params;
	compression_params.push_back(IMWRITE_PNG_COMPRESSION);//设置标志
	compression_params.push_back(9);//设置最高压缩
	for (int i = 0; i < mat.rows; i++) {
		for (int j = 0; j < mat.cols; j++) {
			Vec3b& a = mat.at<Vec3b>(i, j);
			a[0] = saturate_cast<uchar>((float(mat.cols - j)) / (float(mat.cols)) * UCHAR_MAX);
			a[1] = saturate_cast<uchar>((float(mat.rows - i)) / (float(mat.rows)) * UCHAR_MAX);
			a[2] = 255;
		}
	}
	bool result = imwrite("C:\\Users\\blackpoor\\Desktop\\opencv\\保存图片.png",mat,compression_params);
	if (!result) {
		cout << "保存失败";
	}
	waitKey(0);
}
void test12() {
	Mat m3;
	VideoCapture video(0);//调用摄像头
	//VideoCapture video; //读取视频
	//video.open("C:\\Users\\blackpoor\\Desktop\\opencv\\保存视频.png");
	if (!video.isOpened()) {//判断是否调用成功
		cout << "摄像头打开失败" << endl;
		return;
	}
	video >> m3;
	if (m3.empty()) {
		cout << "s" << endl;
		return;
	}
	VideoWriter writer;
	int codec = VideoWriter::fourcc('M', 'J', 'P', 'G');//选择编码格式
	double fps = 1.0;
	bool isColor = m3.type() == CV_8UC3;
	string filename = "C:\\Users\\blackpoor\\Desktop\\opencv\\保存视频.avi";
	writer.open(filename, codec, fps, m3.size(), isColor);
	if (!writer.isOpened()) {//判断视频流是否创建成功
		cout << "打开视频文件失败" << endl;
	}
	while (1) {
		if (!video.read(m3)) {//判断能否从摄像头读出一帧图像
			cout << "摄像头断开";
			break;
		}
		cout << video.get(CAP_PROP_FPS);
		writer.write(m3);//把图像写入视频
		imshow("Live", m3);
		char c = waitKey(50);//waitKey返回按键的ascaii值；
		if (c == 27) {//按esc退出
			break;
		}
	}
	video.release();
	writer.release();
	destroyWindow("Live");
	return;//退出程序自动关闭视频流

}
//保存和读取 XML和YMAL文件
void test13() {//保存和读取 XML和YMAL文件
	FileStorage FS; //用于保存和读取两种文件的类
	//FileStorage FS2("C:\\Users\\blackpoor\\Desktop\\opencv\\a.XML", FileStorage::APPEND);//第二个参数指方式。第三个参数是编码格式
	FS.open("C:\\Users\\blackpoor\\Desktop\\opencv\\a.XML", FileStorage::READ);
	//FS2.write("A", 23);//通过write函数写
	//FS2<<"age"<<22; //通过 << 写
	//FS2 << "per" << "[" << 1 << 2 << 3 << 4 << "]";//输入数组
	//FS2 << "age" << "{" << "NO01" << 12 << "NO02" << 42 << "}"; //通过{}表示隶属关系
	float ageRead;
	FS["age"] >> ageRead;
	cout << ageRead << endl;
	
	FileNode FSper = FS["per"];//读取数组
	for (FileNodeIterator it = FSper.begin(); it != FSper.end(); it++) {
		float a;
		*it >> a;
		cout << a << " ";
	}
	cout << endl;
	FileNode FSage = FS["age"]; //读取有子节点的节点数据 、不能有同名数据
	int no01 = (int)FSage["NO01"];
	int no02 = (int)FSage["NO02"];
	cout << no01 << "  " << no02 << endl;
	FS.release();
}
void test14() {
	Mat m(400, 400, CV_8UC4);//RGB
	for (int i = 0; i < 256; i+=10) {
		for (int j = 0; j < 256; j+=10) {
			for (int k = 0; k < 256; k+=10) {
				m = Scalar(i, j, k,255); //BGR  顺序与名称相反
				imshow("123", m);
				if (waitKey(10) == 27) {
					system("pause");
					return;
				};
				cout << "i=" << i << " " << "j=" << j << " " << "k=" << k << endl;
			}
		}
	}
	
}
//各种颜色模型
void test15() {
	//1、RGB颜色模型；三通道 分别为 红，绿，蓝
	//2、YUV颜色模型；三通道 像素的亮度Y，红色分量与亮度的信号差值(U) 、蓝色与亮度的差值（V）.
	//3、HSV颜色模型；色度 Hue 、饱和度 Saruration 、亮度 Value 
		//3.1 色度表示颜色基本属性
		//3.2 饱和度表示颜色鲜艳程度 0~100%
		//3.3 亮度表示颜色的明亮程度
	//4、Lab颜色模型；
		//4.1 L表示亮度
		//4.2 a和b都是颜色通道 -128~127 
		//4.3 a从小到大对应颜色从绿到红
		//4.4 b从小到大对应颜色从蓝到黄
	//5、GRAY颜色模型
		//.灰度图像只有单通道 灰度值根据图像位数不同由小到最大依次表示由黑到白，例
		//8UC 格式中，由黑到自被量化为 256 个等级，通过 255 表示，其中 255 示白色.
		//GRAY = 0.3R+0.59G +0.11B //RGB转GRAY
	Mat m1, m2;
	cvtColor(m1, m2, 0);//第三个参数为标志参数

}
//颜色模型转换
void test16() {
	Mat img32;
	Mat img = imread("C:\\Users\\blackpoor\\Desktop\\opencv\\A01.png");
	img.convertTo(img32, CV_32F, 1.0 / 255); //将CU_8U转为CV_32F
	Mat gray,HSV,YUV,Lab;
	cvtColor(img32, HSV, COLOR_BGR2HSV);
	cvtColor(img32, YUV, COLOR_BGR2YUV);
	cvtColor(img32, Lab, COLOR_BGR2Lab);
	cvtColor(img32, gray, COLOR_BGR2GRAY);
	imshow("原图" ,img32);
	imshow("HSV", HSV);
	imshow("YUV", YUV);
	imshow("Lav", Lab);
	imshow("gray", gray);

}
void test17() {
	Mat m1(500, 500, CV_8UC3);
	m1 = Scalar(10, 100, 1);
	Mat m2;
	for (float i = 0; i < 1; i+= 0.01) {
		m1.convertTo(m2, CV_32FC3, 1.0 / 255,i); //第三为缩放因子，第四为偏置因子
		imshow("a", m2);
		waitKey(2);//变化后再加上偏置因子的值
	}
}
//多通道分离和合并
void test18(Mat &img){// 多通道分离和合并
	Mat img01, img02, img03;
	Mat imgs[3];
	split(img, imgs); //分离
	img01 = imgs[0];
	img02 = imgs[1];
	img03 = imgs[2];
	Mat mer;
	merge(imgs, 3, mer);//合并
}
//图像的像素统计 
//计算像素最大值最小值
void test19(){
	float a[12] = { 1,2,3,4,5,10,6,7,8,9,10,0};
	Mat img = Mat(3, 4, CV_32FC1, a);
	Mat imgs = Mat(2, 3, CV_32FC2, a);
	double minval, maxval;
	Point minLoc, maxLoc;
	// 单通道
	minMaxLoc(img, &minval, &maxval, &minLoc, &maxLoc);//必须是单通道图像
	cout << minval << " " << maxval << endl;
	cout << minLoc << " " << maxLoc << endl;


	//多通道的话，要先转为单通道的.结果类似“矩阵展开”
	Mat imgs_re = imgs.reshape(1, 4);
	minMaxLoc(imgs_re, &minval, &maxval, &minLoc, &maxLoc);
	cout << minval << " " << maxval << endl;
	cout << minLoc << " " << maxLoc << endl;
}
//计算像素平均值和标准差 分别表示整体亮度和明暗变化
void test20() {
	float a[12] = { 1,2,3,4,5,10,6,7,8,9,10,0 };
	Mat img = Mat(3, 4, CV_32FC1, a);
	Mat imgs = Mat(2, 3, CV_32FC2, a);
	Scalar myMean;

	//用mean函数求平均值
	myMean = mean(imgs); //分别求出每个通道的平均值
	cout << "imgs平均值 = " << myMean << endl;
	cout << "imgs第一通道平均值 = " << myMean[0] << endl;
	cout << "imgs第二通道平均值 = " << myMean[1] << endl;

	//用meanStdDev函数同时求平均值和标准差
	Mat myMeanMat, myStddevMat;
	meanStdDev(img, myMeanMat, myStddevMat);
	cout << myMeanMat << " " << myStddevMat << endl;
	meanStdDev(imgs, myMeanMat, myStddevMat);
	cout << myMeanMat << " " << myStddevMat << endl;
}
//俩图像间的像素操作
//两幅图像的比较运算
void test21() {
	float a[12] = {1, 2, 3.3, 1, 5, 9, 5, 7, 8.2, 9, 10, 2};
	float b[12]{ 1, 2.2, 3, 1, 3, 10, 6, 7, 8, 9.3, 10, 1.1 };
	Mat imga = Mat(3, 4, CV_32FC1, a);
	Mat imgb = Mat(3, 4, CV_32FC1, b);
	Mat imgas = Mat(2, 3, CV_32FC2, a);
	Mat imgbs = Mat(2, 3, CV_32FC2, b);
	//对两个单通道矩阵进行比较运算
	Mat myMax, myMin;
	max(imga, imgb, myMax);
	min(imga, imgb, myMin);
	
	//对两个多通道矩阵进行比较运算
	Mat myMaxs, myMins;
	max(imgas, imgbs, myMaxs);
	min(imgas, imgbs, myMins);

	// 与掩模进行比较运算
	Mat srcl = Mat::zeros(Size(512, 512), CV_8UC3);
	Mat img0(512,512, CV_8UC3, Scalar(114, 113, 123));
	Rect rect(100, 100, 300, 300);
	srcl(rect) = Scalar(255, 255, 255); //生成个低通300,300的掩模//对srcl中矩形位置赋值
	Mat comsrcl, comsrc2;
	min(img0, srcl, comsrcl);
	imshow(" comsrc1", comsrcl);
	Mat src2 = Mat(512, 512, CV_8UC3, Scalar(0, 0, 255)); //生成一个显示红色通道的掩模
	min(img0, src2, comsrc2);
	imshow(" comsrc2 ", comsrc2);
}
//图像间的逻辑运算、
void test22() {
	Mat img0 = Mat::zeros(200, 200, CV_8UC1);
	Mat img1 = Mat::zeros(200, 200, CV_8UC1);
	Rect rect0(20, 20, 100, 100);
	img0(rect0) = Scalar(255);
	Rect rect1(100, 100, 100, 100);
	img1(rect1) = Scalar(255);
	
	Mat myAnd, myOr, myXor, myNot;
	bitwise_not(img0,myNot);
	bitwise_and(img0,img1, myAnd);
	bitwise_or(img0, img1,myOr);
	bitwise_xor(img0, img1,myXor);
}
//图像二值化
void test24() {
	//threshold()函数 和 adaptiveThreshold()函数
}
//LUT，灰度值映射表
void test25() {
	//LUT 函数用于实现图像像素灰度值的 LUT 查找表功能、
	
	//LUT第一层
		//创建一个1*255的查找表，
		//第一个元素的值代表 灰度值为0时，转为的值、
		//第二个元素的值代表灰度值为1时，转为的值.以此类推
	uchar lutFirst[256];
	for (int i = 0; i < 256; ++i) {
		if (i <= 100) {
			lutFirst[i] = 0;
		}
		else if (i <= 200) {
			lutFirst[i] = 100;
		}
		else {
			lutFirst[i] = 255;
		}
	}
	Mat lutOne(1, 256, CV_8UC1, lutFirst); 
	//LUT第二层
	uchar lutSecond[256];
	for (int i = 0; i < 256; ++i) {
		if (i <= 100) {
			lutSecond[i] = 0;
		}
		else if (i <= 150) {
			lutSecond[i] = 100;
		}
		else if (i <= 200) {
			lutSecond[i] = 150;
		}
		else {
			lutSecond[i] = 255;
		}
	}
	Mat lutTwo(1, 256, CV_8UC1, lutSecond);
	//LUT第三层
	uchar lutThird[256];
	for (int i = 0; i < 256; ++i) {
		if (i <= 100) {
			lutThird[i] = 0;
		}
		else if (i <= 200) {
			lutThird[i] = 100;
		}
		else {
			lutThird[i] = 255;
		}
	}
	Mat lutThree(1, 256, CV_8UC1, lutThird);

	//拥有三通道的LUT矩阵
	vector<Mat> mergeMats{lutOne,lutTwo,lutThree};
	Mat LutTree;
	merge(mergeMats, LutTree);

	Mat img = imread("C:\\Users\\blackpoor\\Desktop\\opencv\\lanbojini.jpg");
	//Mat img(300,300, CV_8UC3, Scalar(14, 13, 123));
	Mat gray, out0, out1, out2;
	cvtColor(img, gray, COLOR_BGR2GRAY);
	LUT(gray, lutOne, out0);
	LUT(img, lutOne, out1);
	LUT(img, LutTree, out2);
}
//图像变化
//图像连接
void test26() {
	//hconcat 横向连接， vconcat竖向连接
}
//图像尺寸变化
void test27() {
	string path = "C:\\Users\\blackpoor\\Desktop\\opencv\\lanbojini.jpg";
	Mat gray = imread(path,IMREAD_GRAYSCALE);
	Mat small0,big0, big1, big2;
	//resize 函数：第四和第五个参数是比例缩放或扩大，填写时，第三个参数括号内容应该为空
	resize(gray, small0, Size(82, 60), 0, INTER_AREA);//缩小图像，损失像素
	resize(small0, big0, Size(164, 120), 0, INTER_NEAREST); //最近邻插值
	resize(small0, big1, Size(164, 120), 0, INTER_LINEAR);//双线性插值
	resize(small0, big2, Size(164, 120), 0, INTER_CUBIC);//双三次插值
}
//图像翻转变化
void test28() {
	string path = "C:\\Users\\blackpoor\\Desktop\\opencv\\lanbojini.jpg";
	Mat img = imread(path);
	Mat img_x, img_y, img_xy;
	flip(img, img_x, 0);//第三次参数等于0，以x轴对称
	flip(img, img_y, 1);//第三次参数大于0，以y轴对称
	flip(img, img_xy, -3);//第三次参数小于0，以x轴对称，后以y轴对称
}
//图像仿射变化（图像旋转
void test29() {
	Mat img = imread("C:\\Users\\blackpoor\\Desktop\\opencv\\lanbojini.jpg");
	Mat rotation0, rotation1, img_warp0, img_warp1;
	double angle = 30;//设置图像旋转角度
	Size dst_size(img.rows, img.cols);//设置输出图像尺寸
	Point2f center(img.rows / 2.0, img.cols / 2.0);
	rotation0 = getRotationMatrix2D(center, angle, 1);//计算仿射变换矩阵。第三个参数表示两个轴的比例
	warpAffine(img, img_warp0, rotation0, dst_size);//仿射变换
	Point2f before[3]{ //仿射前三个点
		Point2f(0,0),
		Point2f(0,(float)(img.cols - 1)),
		Point2f((float)(img.rows - 1),(float)(img.cols - 1)) };
	Point2f after[3]{ //仿射后三个点
		Point2f((float)(img.rows) * 0.11,(float)(img.cols) * 0.20),
		Point2f((float)(img.rows) * 0.15,(float)(img.cols) * 0.70),
		Point2f((float)(img.rows) * 0.81,(float)(img.cols) * 0.85) };
	rotation1 = getAffineTransform(before, after);//根据仿射前三个点和仿射后三个点求出仿射变化矩阵
	warpAffine(img, img_warp1, rotation1, dst_size);


}
//图像透射变换
void test30() {
	Mat img = imread("C:\\Users\\blackpoor\\Desktop\\opencv\\erweima.jpg");
	Mat img2;
	Point2f before[4]{Point2f(65,256),Point2f(355,259), Point2f(3,423), Point2f(439,432)};
	Point2f after[4]{ Point2f(100,101),Point2f(346,100), Point2f(101,337), Point2f(346,337) };
	Mat rotation = getPerspectiveTransform(before,after);//计算透视变换矩阵
	warpPerspective(img, img2, rotation,img.size());//透射变换，变换后以img尺寸输出
}
//极坐标变换
void test31() {
	Mat img = imread("C:\\Users\\blackpoor\\Desktop\\opencv\\b57ca32c06a96e9d2653549a997ad3f0.png");
	Mat dst0,dst1,dst2;
	Point2f center(img.rows / 2, img.cols / 2);
	warpPolar(img, dst0, Size(300, 600), center,center.x,WARP_POLAR_LINEAR);//极坐标变换
	warpPolar(dst0, dst1, Size(img.rows,img.cols), center, center.x, WARP_INVERSE_MAP);//逆变换
	warpPolar(img, dst2, Size(img.rows, img.cols), center, center.x, WARP_POLAR_LOG);
}
//图像上绘制几何图形
void test32() {
	
	Mat img(512, 512, CV_8UC3, Scalar(255,255,255));

	//在第一参数画一个圆，第二参数表示在圆点在图像中的坐标，第三个参数表示圆半径，
	//第四表示颜色，第五表示厚度,厚度为负数时实心。第六表示边界类型，FILLED，第七：中心坐标和半径数值中的小数位数
	circle(img, Point(256, 256), 155, getARandScalar(),2,FILLED);

	//画一个矩形，可以想裁剪一样定义一个Rest类对象，也可以用两个点表示矩形左上角的点和右下角的点
	rectangle(img, Point(130, 226), Point(382, 286), getARandScalar(), 3);//最后一个参数表示厚度

	//画一条线，用两个点表示起点终点，加上颜色和厚度
	line(img, Point(130, 456), Point(382, 296), getARandScalar(), 3);

	//画一个椭圆，图形，椭圆中心点，椭圆大小位于矩形内，旋转角度，起始弧度，终止弧度，颜色，宽度，边界类型，中心坐标和半径数值中的小数位数
	ellipse(img, Point(252, 98), Size(100, 14), 0, 0, 135, getARandScalar(), 2);

	//图像，内容，起点，字体，比例（字的比例），颜色，厚度
	putText(img, "it is so hard for me", Point(137, 262), 3, 0.5, getARandScalar(), 0.5);

	//画 多个多边形
	Point shapes1[4]{ Point(10, 10), Point(40, 40), Point(40,10), Point(10,40) };//四边形
	Point shapes2[3]{ Point(50,40),Point(60,60),Point(40,70) }; //三角形
	Point shapes3[2]{ Point(0,0),Point(250,250) };//线
	const Point* p[5]{ shapes1,shapes2,shapes3 };
	int nums[5]{ 4,3,2};
	fillPoly(img, p, nums, 3, getARandScalar(),LINE_8);
}
//ROI
void test33() {
	Mat img = imread("C:\\Users\\blackpoor\\Desktop\\opencv\\opencv_s_p\\image_about_card.jpg");
	Rect roi(351, 84, 26, 28);
	Mat img2;
	img2 = img(roi);
	Mat img_copy1,img_copy2;
	img.copyTo(img_copy1);//深拷贝
	copyTo(img, img_copy2,img);//深拷贝、、第三个参数是掩膜矩阵，只有没位置不为0时，才复制相同位置
	
}
//图像金字塔
void test34() {
	Mat img = imread("C:\\Users\\blackpoor\\Desktop\\opencv\\opencv_s_p\\image_about_card.jpg");
	Mat img_pyr1, img_pyr2,img_resizeDown,img_resizeUp;

	// 下采样函数
	pyrDown(img, img_pyr1);//尺寸变小一半，使图像变模糊//下采样一次
	pyrDown(img_pyr1, img_pyr2);//功能和resize一样，内部实现不一样//下采样两次
	resize(img, img_resizeDown, img_pyr1.size());

	//上采样函数
	Mat img_x, img_pyr1_x;
	pyrUp(img_pyr1, img_x); //对img_pyr1上采样，尺寸变大，图像变清晰
	pyrUp(img_pyr2, img_pyr1_x);
	resize(img_pyr1, img_resizeUp, img_x.size());

	Mat img_lap_0, img_lap_1;
	resize(img_x, img_x, img.size());//向减之前保证尺寸相同
	resize(img_pyr1_x, img_pyr1_x, img_pyr1.size());
	img_lap_0 = img - img_x;  
	img_lap_1 = img_pyr1 - img_pyr1_x;

	vector<Mat> Gauss{ img,img_pyr1,img_pyr2 };//高斯金字塔 第0层代表原图，每加一层尺寸变小
	vector<Mat> Lap{ img_lap_0,img_lap_1};//拉普拉斯金字塔 第i层表示：高斯金字塔第i层 与 i+1层上采用图像 的差值
	
}
//窗口交互操作
//图像窗口滑动条
namespace test35 {
	//为了能在被调用函数中使用，设置成全局的
	int value;
	void callBack(int, void*);//滑动条回调函数
	Mat img1, img2;
	void test35() {
		//第一种方式，程序必须停再这里，不能再继续。
		{/*int val0 = 0, val1 = 0, val2 = 0;
		namedWindow("TrackBars");
		createTrackbar("1n", "TrackBars", &val0, 256);
		createTrackbar("2n", "TrackBars", &val1, 256);
		createTrackbar("3n", "TrackBars", &val2, 256);
		Scalar a(val0, val1, val2);
		Mat img(400, 400, CV_8UC3);
		while (true) {
			imshow("img", img);
			waitKey(1);
		}*/}
		//另一种方式（常用
		//这个方式要有回调函数以及参数是全局的。
		img1 = imread("C:\\Users\\blackpoor\\Desktop\\opencv\\opencv_s_p\\image_about_card.jpg");
		namedWindow("img");
		imshow("img", img1);
		value = 100;
		createTrackbar("light", "img", &value, 600, callBack,0);//每次滑动时，调用回调函数
		waitKey();
	}
	static void callBack(int, void*) {
		float a = value / 100.0;
		img2 = img1 * a;
		imshow("img", img2);
	}	
}
//鼠标响应
namespace test36 {
	Point perPoint;
	Mat img1;
	int times;
	void mouse(int event, int x, int y, int flags, void*);
	void test36() {
		img1 = imread("C:\\Users\\blackpoor\\Desktop\\opencv\\opencv_s_p\\image_about_card.jpg");
		
		imshow("imgDraw", img1);
		setMouseCallback("imgDraw", mouse);
		//鼠标在窗口imgDraw上，产生鼠标响应，调用mouse函数，
		//根据鼠标响应给mouse函数的参数不同的值
		waitKey(0);
	}
	void mouse(int event, int x, int y, int flags, void*) {
		if (event == EVENT_MOUSEMOVE && flags == EVENT_FLAG_LBUTTON) {//鼠标移动并且按住左键
			Point pt(x, y);
			line(img1, perPoint, pt, getARandScalar(), 3);
			perPoint = pt; 
		}
		perPoint = Point(x, y);
		imshow("imgDraw", img1);
	}
}

void test001() {
	srand((unsigned)time(NULL));
	test12();
	

}