#include <iostream>
#include "opencv2/opencv.hpp"
#include <ctime>
#include <cstdio>
#include <cstdlib>
#include <string>

#define MAIN_WINDOW "实时人脸识别系统"
#define WATER_MARK_PATH "./watermark.png"
#define YUNET_ONNX_PATH "./models/yunet.onnx"
#define IMAGE_PATH "./images/" // 记得文件夹位置后面加上'/'
#define VIDEO_PATH "./videos/" // 记得文件夹位置后面加上'/'
#define PICTURE_PATH "./labixiaoxin.jpg"

using namespace std;
using namespace cv;

void onMouse(int event, int x, int y, int flags, void *userdata);
void dragPicture(int event, int x, int y, int flags, void *userdata);

Mat img2(480, 640, CV_8UC3, Scalar(122, 122, 0));
int picture_x = 100, picture_y = 100;

int main()
{
	// Mat img = imread("/home/zhangqichen/code/faces.jpg"); //读取一个图片文件，返回一个对象（Mat）

	VideoCapture camera(0);
	Mat img, watermark, faces, img2;
	int r = 255, flag = 0, cnt = 0, red, green, blue, x, y;
	int photo_cnt = 1, video_cnt = 1;
	int face_x, face_y, face_w, face_h;
	time_t ti;
	char key;
	x = 280, y = 260;
	string image_path, video_path;
	char image_name[100], video_name[100];
	int i = 0, record_flag = 1;
	int cc, fps, cw, ch;
	time_t t1, t2;

	cc = VideoWriter::fourcc('m', 'p', '4', 'v');
	fps = camera.get(CAP_PROP_FPS);
	cw = camera.get(CAP_PROP_FRAME_WIDTH);
	ch = camera.get(CAP_PROP_FRAME_HEIGHT);

	namedWindow(MAIN_WINDOW);
	setMouseCallback(MAIN_WINDOW, dragPicture);

	VideoWriter v;
	Ptr<FaceDetectorYN> fd = FaceDetectorYN::create(YUNET_ONNX_PATH, "", Size(320, 320));

	watermark = imread(WATER_MARK_PATH);

	// namedWindow("mouse", WINDOW_NORMAL);
	// imshow("mouse", img2);
	// setMouseCallback("mouse", onMouse);
	Mat picture = imread(PICTURE_PATH);

	srand(time(NULL));

	while (1)
	{
		if (!camera.read(img))
		{
			cerr << "摄像头打开失败" << endl;
			return 1;
		}

		// namedWindow("人脸识别系统", WINDOW_KEEPRATIO);	//创建新窗口
		// resizeWindow("人脸识别系统", 640, 480);	//设置窗口尺寸
		fd->setInputSize(img.size()); // 设置输入图像的尺寸
		fd->detect(img, faces);		  // 通过 DNN 检测出 img 图像中包含的所有人脸信息（存放在faces中）

		// faces 中每一行代表一个人脸信息，分别是人脸区域左上角坐标，人脸区域高度和宽度，右眼、左眼坐标，鼻子坐标，右嘴角、左嘴角坐标

		addWeighted(watermark, 0.8, img, 1.0, 0, img);

		putText(img, "FaceTime", Point(230, 40), FONT_HERSHEY_SIMPLEX, 1.0, Scalar(255, 255, 255));

		for (i = 0; i < faces.rows; i++) // 识别出了人脸
		{
			face_x = faces.at<float>(i, 0);
			face_y = faces.at<float>(i, 1);
			face_w = faces.at<float>(i, 2);
			face_h = faces.at<float>(i, 3);
			rectangle(img, Rect(face_x, face_y, face_w, face_h), Scalar(100, 100, 255), 2);
			circle(img, Point(faces.at<float>(i, 8), faces.at<float>(i, 9)), 5, Scalar(255, 0, 0), -1);
		}

		// 图片移动边界检测
		if (picture_x > cw - picture.cols)
			picture_x = cw - picture.cols;
		if (picture_y > ch - picture.rows)
			picture_y = ch - picture.rows;

		Mat roi = img(Rect(picture_x, picture_y, picture.cols, picture.rows));
		// imshow("test", roi);		// 测试截取面部特写框
		picture.copyTo(roi);
		// imshow("test2", picture);
		line(img, Point(320, 100), Point(160, 380), Scalar(255, 0, 0), 2);
		line(img, Point(320, 100), Point(480, 380), Scalar(0, 255, 0), 2);
		line(img, Point(160, 380), Point(480, 380), Scalar(0, 0, 255), 2);

		rectangle(img, Rect(140, 60, 360, 360), Scalar(255, 255, 0), 2);
		if (cnt % 10 == 0)
		{
			red = rand() % 255;
			green = rand() % 255;
			blue = rand() % 255;
		}
		rectangle(img, Rect(x, y, 80, 40), Scalar(red, green, blue), -2);
		cnt++;
		circle(img, Point(320, 240), r, Scalar(0, 255, 255), 2);

		if (r < 20)
			flag = 1;
		if (r > 255)
			flag = 0;
		if (flag)
			r++;
		else
			r--;

		if (record_flag)
		{
			v.write(img);
			putText(img, "Recording...", Point(230, 450), FONT_HERSHEY_SIMPLEX, 1.0, Scalar(255, 255, 255));
		}

		imshow(MAIN_WINDOW, img); // 显示图像 参数1：窗口名 参数2：图像对象

		key = waitKey(1);
		if (key == 'q')
			break;
		if (key == 'w') // 等待用户按下键盘上的某个键，参数表示等待的超时时间（毫秒为单位），默认为0表示不超时
		{
			y -= 10;
			if (y < 64)
				y = 64;
		}
		if (key == 's')
		{
			y += 10;
			if (y > 356)
				y = 356;
		}
		if (key == 'a')
		{
			x -= 10;
			if (x < 80)
				x = 80;
		}
		if (key == 'd')
		{
			x += 10;
			if (x > 380)
				x = 380;
		}
		if (key == ' ')
		{
			sprintf(image_name, "实时照片%d.jpg", photo_cnt);
			string name_i(image_name);
			image_path = IMAGE_PATH + name_i;

			imwrite(image_path, img);
			photo_cnt++;
		}
		if (key == 'v')
		{
			if (video_cnt % 2 != 0)
			{
				sprintf(video_name, "实时录像%d.mp4", video_cnt / 2 + 1);
				string name_v(video_name);
				video_path = VIDEO_PATH + name_v;
				v.open(video_path, cc, fps, Size(cw, ch)); // 开始录制
				t1 = time(NULL);
				record_flag = 1;
			}
			else
			{
				v.release(); // 结束录制
				record_flag = 0;
				t2 = time(NULL);
				cout << "视频时长：" << t2 - t1 << endl;
			}

			video_cnt++;
		}
	}

	return 0;
}

void onMouse(int event, int x, int y, int flags, void *userdata)
{
	static int prev_x, prev_y, is_draw = 0;

	if (event == EVENT_LBUTTONDOWN)
	{
		// cout << "mouse event" << endl;
		// circle(img2, Point(x, y), 5, Scalar(0, 0, 255), -1);
		prev_x = x;
		prev_y = y;
		is_draw = 1;
		// imshow("mouse", img2);
	}

	if (event == EVENT_MOUSEMOVE)
	{
		if (is_draw)
		{
			// line(img2, Point(prev_x, prev_y), Point(x, y), Scalar(0, 0, 255), 3);
			// imshow("mouse", img2);
			prev_x = x;
			prev_y = y;
		}
	}

	if (event == EVENT_LBUTTONUP)
	{
		is_draw = 0;
	}
}

void dragPicture(int event, int x, int y, int flags, void *userdata)
{
	static int prev_x, prev_y, is_draw = 0;

	if (event == EVENT_LBUTTONDOWN)
	{
		picture_x = x;
		picture_y = y;

		prev_x = x;
		prev_y = y;

		is_draw = 1;
	}
	if (event == EVENT_MOUSEMOVE)
	{
		if (is_draw)
		{
			picture_x = x;
			picture_y = y;

			prev_x = x;
			prev_y = y;
		}
	}
	if (event == EVENT_LBUTTONUP)
	{
		is_draw = 0;
	}
}
