
#include <mutex>
#include <thread>
#include <functional>
#include "readsense_sample.h"
#include "AI_httpAPI.h"
#include "timer.h"



#define CONFIG_IMAGE_FORMAT     IMAGE_FORMAT_YV12

#define WRITER_VIDEO       0
#define SAVE_VIDEO_RESULT  0
#define SAVE_ACTION_TXT    0
#define PRINT_LOG 1
#define PRINT_TIME 0
#define DIVIDE_PEOPLE 1
#define NO_DIVIDE_PEOPLE 1
#define ORANGE Scalar(0,97,255)//detected face
#define RED Scalar(0,0,255)//recognized face
#define BLUE Scalar(255,0,0)//detected emotion
#define GREEN Scalar(0,255,0)//detected head
#define YELLOW Scalar(0,255,255)
#define PINK Scalar(255,0,255)//recognized action
#define TEXT Scalar(0,0,255)//put text
#define ARINTERVAL_SEC 60//间隔60s统计一次AR数据 default:60
#define FRINTERVAL_SEC 10//间隔300s发送一次fr数据
#define FR_SEC 300 // FR识别5分钟  default:300
#define NOEXIST_SEC 10 //无人的识别时间
#define EXIST_SEC 2 //有人时间
#define DRAW 1
#define DRAW_AR 0
#define DEBUGPATH 1
#define MAX(x,y) ((x)>(y)?(x):(y))



typedef struct FRARER
{
	std::shared_ptr<rs::fr::FaceRecognizer> fr;
	std::shared_ptr<rs::ar::ActionRecognizer> ar;

}FrArEr;

static bool landmark_enable = true; // FR -- true, AR -- false
static bool ar_enable = false; //AR -- true
// allocate the models to the specified MyriadX, VPU_1 = MyriadX 1, VPU_2 = MyriadX 2, ..., VPU_8 = MyriadX 8
std::map<string, string> map_model_device = {    // <network name, device name> device name = CPU / GPU / VPU_1,VPU_2,...,VPU_8
	{ "network_1",  "CPU" },  // network_1  ----  net1_fr   ---------- FR
	{ "network_2",  "CPU" },  // network_2  ----  net2_fr   ---------- FR
	{ "network_3",  "CPU" },  // network_3  ----  net3_fr   ---------- FR
	{ "network_4",  "CPU" },  // network_4  ----  net4_fr   ---------- FR
	{ "network_5",  "CPU" },  // network_5  ----  net5_fr   ---------- FR
	{ "network_6",  "CPU" },  // network_6  ----  net6_fr   ---------- FR
	{ "network_7",  "CPU" },  // network_7  ----  net7_ar   ---------- AR
	{ "network_8",  "CPU" },  // network_8  ----  net8_far  ---------- FR/AR
	{ "network_9",  "CPU" },  // network_9  ----  net9_far  ---------- FR/AR
	{ "network_10", "CPU" },  // network_10 ----  net10_far ---------- FR/AR
	{ "network_11", "CPU" },  // network_11 ----  net11_ar  ---------- AR
};


//写死路径
#if DEBUGPATH
static string dll_path = "D:\\AI\\readsense_FR_AR_R3_V1.0\\readsense_demo\\bin\\cpu_extension.dll";
static string data_path = "D:\\AI\\readsense_FR_AR_R3_V1.0\\readsense_demo\\data";
static string model_path = "D:\\AI\\readsense_FR_AR_R3_V1.0\\readsense_demo\\data\\openvino_lib_data.bin";
static string plugin_path = "D:\\AI\\readsense_FR_AR_R3_V1.0\\readsense_demo\\bin";
static string path_exe = "D:\\AI\\readsense_FR_AR_R3_V1.0\\readsense_demo\\bin";
static string license_path = "D:\\AI\\readsense_FR_AR_R3_V1.0\\readsense_demo\\license\\35e58492_windows_offline_license_content.lic";
static string authFile = "D:\\AI\\readsense_FR_AR_R3_V1.0\\readsense_demo\\license\\35e58492_windows_offline_license_content.lic_auth.txt";
//相对路径
#else
static string dll_path = ".\\cpu_extension.dll";
static string model_path = "..\\data\\openvino_lib_data.bin";
static string data_path = "..\\data";
static string plugin_path = ".\\";
static string path_exe = ".\\";
static string license_path = "..\\license\\35e58492_windows_offline_license_content.lic";
static string authFile = "..\\license\\35e58492_windows_offline_license_content.lic_auth.txt";
#endif

int vaild_license(string license_path, XConfig *config)
{
	char buffer[50000] = { 0 };
	FILE* pFile = fopen(license_path.c_str(), "r");
	if (pFile == NULL)
	{
		cout << "license file is not existed" << endl;
		return -2;
	}
	fseek(pFile, 0, SEEK_END);
	int len_q = ftell(pFile);
	fseek(pFile, 0, SEEK_SET);
	if (len_q <= 0)
	{
		cout << "license file is empty" << endl;
		return -2;
	}
	fread(buffer, 1, len_q, pFile);
	fclose(pFile);

	/*---------------- Offline authorization ---------------------------------------------*/
	// Step 1: initialize license handle
	std::string strLicenseConent = std::string(buffer);
	rsInitLicenseManagerV2(&config->hLicense, strLicenseConent.c_str());

	// Step 2: read authorized file content to the authContent value
	//string authFile = path_exe + "..\\..\\readsense_demo\\license\\35e58492_windows_offline_license_content.lic_auth.txt";
	//string authFile = "D:\\AI\\readsense_FR_AR_R3_V1.0\\readsense_demo\\license\\35e58492_windows_offline_license_content.lic_auth.txt";
	char *authContent = NULL;
	int iRet = rsRunLicenseManagerV2AuthOffline(config->hLicense, authFile, &authContent);
	if (0 != iRet)
	{
		return -3;
	}

	// Step 3: activate license
	iRet = rsActivateLicenseManagerV2(config->hLicense, authContent);
	if (1 != iRet)
	{
		cout << "Please make sure it run on the authorized machine who provides the deviceKey." << endl;
		return -3;
	}
	return 1;
	delete authContent;
}

int _Recognize(json& reqBody)
{
	/*    输入数据：json==>params    */
	PARAMS_INPUT p_fr_arer;
	InputData::RTSP_RECOGNIZE(reqBody, p_fr_arer);
	Recognize REC(p_fr_arer);
	REC.Init();
	REC.Start();

	return 0;
}



vector<std::string> Recognize::FR_Analysis(map<int, vector<int>> map_recognition_res)
{
	std::map<int, vector<int>>::iterator res_itr;
	vector<int> attendance_list;
	//对每一个track_id 做一次投票，选取得票最高的 作为一次统计结果
	for (res_itr = map_recognition_res.begin(); res_itr != map_recognition_res.end(); res_itr++) {
		std::map<int, int> count_int;
		int max_count = 0;
		int max_face_id = 0;
		for (int i = 0; i < res_itr->second.size(); i++) {
			int cur_count = 0;
			if (count_int.find(res_itr->second[i]) != count_int.end()) {
				count_int[res_itr->second[i]]++;
				cur_count = count_int[res_itr->second[i]];
			}
			else {
				count_int[res_itr->second[i]] = 1;
				cur_count = 1;
			}
			//一个face_id 投票最多的结果，作为最终的结果
			if (cur_count > max_count) {
				max_count = cur_count;
				max_face_id = res_itr->second[i];
			}
		}
		//避免重复： 同一个人在整个视频序列中，可能会多次分配track_id 导致重复
		bool is_exist = false;
		for (int j = 0; j < attendance_list.size(); j++) {
			if (attendance_list[j] == max_face_id) {
				is_exist = true;
				break;
			}
		}
		if (!is_exist) {
			attendance_list.push_back(max_face_id);
		}
	}
	//id ==> name
	vector<std::string> str_attendance_list;
	for (auto it : attendance_list) {
		str_attendance_list.push_back(face_name_map[it]);
	}


	FILE *fp_out = fopen("./attendance_id.txt", "w");
	fprintf(fp_out, "attendance student num: %d \n", attendance_list.size());
	cout << "attendance student num: " << attendance_list.size() << endl;
	sort(attendance_list.begin(), attendance_list.end());
	for (int i = 0; i < attendance_list.size(); i++) {
		cout << "face_id: " << attendance_list[i];
		fprintf(fp_out, "face_id: %d \n", attendance_list[i]);
		if (i % 10 == 0 && i != 0) {
			cout << endl;
		}
	}
	fclose(fp_out);

	return str_attendance_list;
}



Recognize::Recognize(PARAMS_INPUT& _p)
	:logger(Logger::file_and_terminal, Logger::debug, "..\\logs\\sampleLog.txt"),
	EndFlag(false), EndFlag_ar(false), EndFlag_fr(false),
	cb_frFrame(32), cb_arFrame(32), cb_frInput(32), cb_arInput(32)
{

	//初始化成员变量
	swap(p, _p);//将_p赋值给类成员变量p
	face_name_map.clear();
	future_fps = promise_fps.get_future();
	future_ifSend_AR = promise_ifSend_AR.get_future();


}

int Recognize::Init(bool ifAR, bool debug)
{
	ar_enable = ifAR;

	std::map<string, string> map_model_device = {    // <network name, device name> device name = CPU / GPU / VPU_1,VPU_2,...,VPU_8
		{ "network_1",  "CPU" },  // network_1  ----  net1_fr   ---------- FR
		{ "network_2",  "CPU" },  // network_2  ----  net2_fr   ---------- FR
		{ "network_3",  "CPU" },  // network_3  ----  net3_fr   ---------- FR
		{ "network_4",  "CPU" },  // network_4  ----  net4_fr   ---------- FR
		{ "network_5",  "CPU" },  // network_5  ----  net5_fr   ---------- FR
		{ "network_6",  "CPU" },  // network_6  ----  net6_fr   ---------- FR
		{ "network_7",  "CPU" },  // network_7  ----  net7_ar   ---------- AR
		{ "network_8",  "CPU" },  // network_8  ----  net8_far  ---------- FR/AR
		{ "network_9",  "CPU" },  // network_9  ----  net9_far  ---------- FR/AR
		{ "network_10", "CPU" },  // network_10 ----  net10_far ---------- FR/AR
		{ "network_11", "CPU" },  // network_11 ----  net11_ar  ---------- AR
	};
	string dll_path;
	string data_path;
	string model_path;
	string plugin_path;
	string path_exe;
	string license_path;
	string authFile;
	//初始化注册路径
	if (debug) {
		dll_path = "D:\\AI\\readsense_FR_AR_R3_V1.0\\readsense_demo\\bin\\cpu_extension.dll";
		data_path = "D:\\AI\\readsense_FR_AR_R3_V1.0\\readsense_demo\\data";
		model_path = "D:\\AI\\readsense_FR_AR_R3_V1.0\\readsense_demo\\data\\openvino_lib_data.bin";
		plugin_path = "D:\\AI\\readsense_FR_AR_R3_V1.0\\readsense_demo\\bin";
		path_exe = "D:\\AI\\readsense_FR_AR_R3_V1.0\\readsense_demo\\bin";
		license_path = "D:\\AI\\readsense_FR_AR_R3_V1.0\\readsense_demo\\license\\35e58492_windows_offline_license_content.lic";
		authFile = "D:\\AI\\readsense_FR_AR_R3_V1.0\\readsense_demo\\license\\35e58492_windows_offline_license_content.lic_auth.txt";
	}
	else {
		dll_path = ".\\cpu_extension.dll";
		model_path = "..\\data\\openvino_lib_data.bin";
		data_path = "..\\data";
		plugin_path = ".\\";
		path_exe = ".\\";
		license_path = "..\\license\\35e58492_windows_offline_license_content.lic";
		authFile = "..\\license\\35e58492_windows_offline_license_content.lic_auth.txt";
	}


	//初始化对象
	XConfig config;
	config.map_device.insert(map_model_device.begin(), map_model_device.end());
	config.plugin_path = (char *)plugin_path.c_str();
	config.extend_path = (char *)dll_path.c_str();

	vaild_license(license_path, &config);

	fd::FaceDetector::Builder fd_builder;
	fd_builder.max_num_faces = 75;
	fd_builder.threshold = 0.70;
	fd_builder.threshold_class = 0.15;
	fd_builder.threshold_emotion = 0.5;
	config.data_path = (char *)data_path.c_str();

	fd_fr = fd_builder.Build(config);
	if (ar_enable) {
		fd_ar = fd_builder.Build(config);

		ar::ActionRecognizer::Builder ar_builder;//创建实例
		ar_builder.minDetTimes = 2;
		ar_builder.windowSize = 3;
		ar_builder.threLevel = 50;
		ar_builder.isRaceToRaiseHandMode = false;
		config.data_path = "";
		ar = ar_builder.Build(config);
	}
	//人脸识别实例
	fr::FaceRecognizer::Builder fr_builder;
	fr_builder.threshold_recognition_quality = 0.19;
	fr_builder.threshold_registration_quality = 0.25;
	fr_builder.max_num_registrable_faces = 75;
	fr_builder.threshold_similarity = 0.25;   // 识别结果， 匹配度评分
	fr_builder.recognition_interval = 1;      // 识别间隔    // 设置多少帧
	fr_builder.recognition_quality_filter = false;
	fr_builder.registration_quality_filter = true;
	config.data_path = (char *)model_path.c_str();

	fr = fr_builder.Build(config);
	if (ar_enable) {
		if (fd_fr == NULL || ar == NULL || fr == NULL) {
			tmpSS.str("");
			tmpSS << __LOCATION__;
			logger.ERRORS("Build fd/fr/ar failded", tmpSS.str());
			return -1;
		}
	}
	else {
		if (fd_fr == NULL, fr == NULL) {
			tmpSS.str("");
			tmpSS << __LOCATION__;
			logger.ERRORS("Build fd/fr failded", tmpSS.str());
			return -1;
		}
	}


	//注册特征值
	rs::fr::DetectedInfo face_info;
	int person_id = 1;
	for (auto each_user : p.featureList) {
		uint8_t* featdata = new uint8_t[2048];
		float* ptr = each_user.feature.data();
		if (ptr == NULL) {
			tmpSS.str("");
			tmpSS << __LOCATION__;
			logger.ERRORS("Regester" + each_user.usrId + " feature failed", tmpSS.str());
			continue;
		}
		memcpy(featdata, ptr, 2048);
		std::pair<rs::fr::RegistrationStatus, uint32_t> reg_result =
			fr->Register(featdata, person_id);// 注册图片的特征到fr  注册时传入person_id，传出face_id   每次第一个参数内存会被释放掉
		string name = each_user.usrId;
		face_name_map[reg_result.second] = name;
		person_id++;
	}

	//定义kafka broker
	bkr = "192.168.13.221:9092";
	read_json("..\\resource\\config.json");
	//初始化数据

	return 0;
}

void Recognize::read_json(const string &file_path)
{
	ifstream file(file_path);
	json j = json::parse(file);
	j.at("RECOGNIZE_TIME_1").get_to(this->status1_interval);
	j.at("RECOGNIZE_TIME_0").get_to(this->status0_interval);


}


void Recognize::_cap()
{
	VideoCapture cap;

	if (p.rtspUrl.length() == 0) {
		tmpSS.str("");
		tmpSS << __LOCATION__;
		logger.ERRORS("Error: Null input video rtsp!", tmpSS.str());
		EndFlag = true;
		//return -1;
	}
	else {
		cap.open(p.rtspUrl);
		//system("pause");
	}
	cv::Mat frame;
	if (!cap.isOpened()) {
		tmpSS.str("");
		string s = "Error: Video " + p.rtspUrl + " can't open!";
		tmpSS << __LOCATION__;
		logger.ERRORS(s, tmpSS.str());
		EndFlag = true;
		//return -2;
	}
	string s = "SUCCESS open VIDEO " + p.rtspUrl;
	tmpSS.str("");
	tmpSS << __LOCATION__;
	logger.INFO(s, tmpSS.str());
	int frame_width = cap.get(CAP_PROP_FRAME_WIDTH);
	int frame_height = cap.get(CAP_PROP_FRAME_HEIGHT);
	int frame_rate = cap.get(CAP_PROP_FPS);
	int nFrameNum = -1;
	long totalFrames = p.timeInterval * frame_rate;
	promise_fps.set_value(frame_rate);

	//	cout << "****************** " << "video: " << fileName << " ******************" << endl;
	while (!EndFlag) {
		//写保护
		if (nFrameNum >= totalFrames) {
			EndFlag = true;
			EndFlag_ar = true;
			EndFlag_fr = true;
			break;
		}
		std::unique_lock<std::mutex> lck(frameMtx);
		TickMeter t1;
		t1.start();
		if (cb_frFrame.full()) {
			cond_var_cap.wait_for(lck, std::chrono::duration<double, milli>(40));//等待80ms如果不能唤醒，就跳过，捕捉最新帧
			cap >> frame;
			//cap >> frame;
		}


		cap >> frame;    // Cap video frame
		nFrameNum++;
		if (frame.empty()) {
			tmpSS.str("");
			tmpSS << __LOCATION__;
			logger.DEBUG("Unkonwn Promble??? cap frame is NULL. End of video", tmpSS.str());
			EndFlag = true;
			break;
		}
		if (frame.cols != frame_width || frame.rows != frame_height) {
			resize(frame, frame, Size(frame_width, frame_height));
		}
		cb_frFrame.push_back(frame);
		if (ar_enable) cb_arFrame.push_back(frame);
		lck.unlock();
		std::this_thread::sleep_for(std::chrono::milliseconds(10));
	}
	tmpSS.str("");
	tmpSS << __LOCATION__;
	logger.DEBUG("Exist Recognize cap return", tmpSS.str());
	//return 0;

}

void Recognize::_fd_fr()
{
	cv::Mat curFrame;
	std::pair<std::vector<rs::fd::Face>, std::vector<rs::fd::Head>> detect_data;
	DataInput datainput;
	//detect的标志位
	bool run_model = true;
	bool landmark_enable = true; // FR -- true, AR -- false

	int n = 0;//记录当前帧
	while (true) {
		/*-------------------读保护---------------------------*/
		//std::this_thread::sleep_for(std::chrono::milliseconds(1));
		std::unique_lock<std::mutex> framelck(frameMtx);
		/*TickMeter t1;
		t1.start();*/
		if (cb_frFrame.empty()) {
			if (EndFlag == true) {
				tmpSS.str("");
				tmpSS << __LOCATION__;
				logger.DEBUG("Exist Recognize fd return", tmpSS.str());
				break;
			}
			else
				continue;
		}

		cb_frFrame.front().copyTo(curFrame);
		cb_frFrame.front().copyTo(datainput.frame);
		//datainput.timestamp = getTickCount(); //优先队列时使用
		cb_frFrame.pop_front();

		if (cb_frFrame.empty())
			cond_var_cap.notify_one();//唤醒cap线程
		framelck.unlock();

		/*cv::TickMeter tm;
		tm.start();*/

		//添加跳帧，可以使展现流畅
		if (n++ % 3 == 0) {
			landmark_enable = !landmark_enable;// 隔3帧不做人脸识别
		}
		detect_data = fd_fr->Detect(curFrame, IMAGE_FORMAT_RGB24_B8G8R8, run_model, landmark_enable);//最耗时的地方，并行处理
		vector<rs::fd::Face> face_detect = detect_data.first;
		vector<rs::fd::Head> head_detect = detect_data.second;
		datainput.fdata = std::make_pair(face_detect, head_detect);


		/*-------------------写保护---------------------------*/
		unique_lock<timed_mutex> datalck(frInputMtx, chrono::duration<double, milli>(40)); //保存该处理好的图片，若80ms内未获取到锁，则丢弃该图片
		if (datalck.owns_lock()) {
			cb_frInput.push_back(datainput); //写满了就覆盖
		}
		else {
			continue;
		}
	}
	/*tm.stop();
	int proc_time = int(tm.getTimeMilli());
	std::cout << "Global time : " << proc_time / 1000 << " s...." << endl*/;
	tmpSS.str();
	tmpSS << __LOCATION__;
	logger.INFO("fd_fr thread return", tmpSS.str());
	//return 0;
}

void Recognize::_fd_ar()
{
	cv::Mat curFrame;
	std::pair<std::vector<rs::fd::Face>, std::vector<rs::fd::Head>> detect_data;
	DataInput datainput;
	//detect的标志位
	bool run_model = true;
	bool landmark_enable = true; // FR -- true, AR -- false

	int n = 0;//记录当前帧
	while (true) {
		/*-------------------读保护---------------------------*/
		//std::this_thread::sleep_for(std::chrono::milliseconds(1));
		std::unique_lock<std::mutex> framelck(frameMtx);
		/*TickMeter t1;
		t1.start();*/
		if (cb_arFrame.empty()) {
			if (EndFlag == true) {
				tmpSS.str("");
				tmpSS << __LOCATION__;
				logger.DEBUG("Exist Recognize fd return", tmpSS.str());
				break;
			}
			else
				continue;
		}

		cb_arFrame.front().copyTo(curFrame);
		cb_arFrame.front().copyTo(datainput.frame);
		datainput.timestamp = getTickCount();
		cb_arFrame.pop_front();

		if (cb_arFrame.empty())
			cond_var_cap.notify_one();//唤醒cap线程
		framelck.unlock();

		/*cv::TickMeter tm;
		tm.start();*/
		detect_data = fd_ar->Detect(curFrame, IMAGE_FORMAT_RGB24_B8G8R8, run_model, landmark_enable);//最耗时的地方，并行处理
		datainput.input = fd_ar->GetActionInput();
		vector<rs::fd::Face> face_detect = detect_data.first;
		vector<rs::fd::Head> head_detect = detect_data.second;
		datainput.fdata = std::make_pair(face_detect, head_detect);


		//写保护
		unique_lock<timed_mutex> datalck(arInputMtx, chrono::duration<double, milli>(40)); //保存该处理好的图片，若80ms内未获取到锁，则丢弃该图片
		if (datalck.owns_lock()) {
			cb_arInput.push_back(datainput);
		}
		else {
			continue;
		}
	}
	/*tm.stop();
	int proc_time = int(tm.getTimeMilli());
	std::cout << "Global time : " << proc_time / 1000 << " s...." << endl*/;
	tmpSS.str();
	tmpSS << __LOCATION__;
	logger.INFO("fd_ar thread return", tmpSS.str());
	//return 0;
}

void Recognize::_fr(json& res)
{

	int nFrameNumber = 0;
	ostringstream tmp_str; // Showing
	bool run_model = true;
	int fps = future_fps.get();
	int nFrame_status1 = fps * status1_interval;  // 第一次发送数据时间
	int nFrame_status0 = fps * status0_interval;  // 间隔300s发送一次数据
	//数据区
	Mat showFrame;
	Mat rawFrame;
	vector<rs::fd::Face> face_detect;
	vector<rs::fd::Head> head_detect;
	map<int, vector<int>>map_recognition_res;

	while (true) {
		/*-------------------读保护---------------------------*/
		if (cb_frInput.empty()) {
			this_thread::sleep_for(chrono::milliseconds(10));
			if (EndFlag_fr == true)
				break;
			else
				continue;
		}

		unique_lock<timed_mutex> datalck(frInputMtx);
		/*-------------------读数据---------------------------*///从buffer缓冲区取数据detect_data

		cb_frInput.front().frame.copyTo(showFrame);
		swap(face_detect, cb_frInput.front().fdata.first);
		swap(head_detect, cb_frInput.front().fdata.second);
		cb_frInput.pop_front();

		if (cb_frInput.size() <= 0) {//把数据写空再取数据
			cond_var_fd.notify_one();
		}
		datalck.unlock();
		showFrame.copyTo(rawFrame);
		nFrameNumber++;

		/*****************************Start Recognize*****************************************/
		//人脸识别： detect_data ==> recognition_in put   检测输出 ==> 识别输入
		std::pair<std::vector<rs::fr::DetectedInfo>, std::vector<rs::fr::DetectedInfo>> recognition_input;
		recognition_input.first.resize(face_detect.size());
		recognition_input.second.resize(head_detect.size());
		for (int m = 0; m < face_detect.size(); m++) {
			recognition_input.first[m].yaw = face_detect[m].yaw_angle;
			recognition_input.first[m].pitch = face_detect[m].pitch_angle;
			recognition_input.first[m].roll = face_detect[m].roll_angle;

			recognition_input.first[m].keypoints = face_detect[m].keypoints;
			recognition_input.first[m].rect = face_detect[m].rect;
			recognition_input.first[m].tracking_id = face_detect[m].tracking_id;
		}
		for (int m = 0; m < head_detect.size(); m++) {
			recognition_input.second[m].keypoints = head_detect[m].keypoints;
			recognition_input.second[m].rect = head_detect[m].rect;
			recognition_input.second[m].tracking_id = head_detect[m].tracking_id;
		}

		std::vector<fr::RecognizedFace> recognition_res = fr->Recognize(showFrame, recognition_input, run_model);//人脸识别结果
		string name = "";

		for (int i = 0; i < recognition_res.size(); i++) {
			bool face_flag = true;
			cv::Rect det_rect;
			int cur_track_id = -1;
			if (i < recognition_input.first.size()) {
				det_rect = recognition_input.first[i].rect;
				cur_track_id = recognition_input.first[i].tracking_id;
			}
			else {
				face_flag = false;
				cur_track_id = recognition_input.second[i - recognition_input.first.size()].tracking_id;
				det_rect = recognition_input.second[i - recognition_input.first.size()].rect;
			}

			if (recognition_res[i].status == rs::fr::RecognitionStatus::SUCCESS) {
				FACE_INFO cur_face;
				cur_face.face_id = recognition_res[i].person_id;
				cur_face.rect = det_rect;
				map_recognition_res[cur_track_id].push_back(cur_face.face_id);
				map<int, string>::iterator item;//找到 face_id 对应的名字
				item = face_name_map.find(recognition_res[i].face_id);
				if (item != face_name_map.end()) {
					name = item->second;
				}
#if DRAW
				if (name != "") {
					rectangle(showFrame, det_rect, RED, 2);//recognize_face
					cv::putText(showFrame, name, Point(det_rect.x, det_rect.y - 25), 2, 2, TEXT, 2);
				}
#endif
			}
#if DRAW
			else {
				if (face_flag)
					rectangle(showFrame, det_rect, ORANGE, 2);//detect_face
				else
					rectangle(showFrame, det_rect, GREEN, 2);//detect_head
			}
#endif
		}
		//显示当前帧数
#if DRAW
		cv::Mat show_frame2 = showFrame.clone();
		tmp_str.str("");  tmp_str << nFrameNumber;
		cv::putText(show_frame2, tmp_str.str(), Point(5, show_frame2.rows - 5), 2, 3, CV_RGB(0, 0, 255), 4);

		if (show_frame2.cols > 1280) {
			resize(show_frame2, show_frame2, Size(1280, 720));
		}
		imshow("fr_Recognition ", show_frame2);
		cv::waitKey(1);

		//达到条件发送数据
		if (ifInRegTime && (nFrameNumber%nFrame_status1 == 0)) {
			vector<string> attendance = FR_Analysis(map_recognition_res);
			res = attendance;
			map_recognition_res.clear();
			cond_var_sendFR.notify_one();
			//ifInRegTime = false;  // 第一次发送结束，以后的都算是迟到 status=0
		}
		else if (nFrameNumber%nFrame_status0 == 0) {
			vector<string> attendance = FR_Analysis(map_recognition_res);
			res = attendance;
			map_recognition_res.clear();
			cond_var_sendFR.notify_one();
		}

#endif
	}
	EndKafka_fr = true;
	cond_var_sendFR.notify_one();  // 结束时要唤醒kafka_fr使其结束
	//attendance = FR_StatisRecoRes(map_recognition_res, nFrameNumber, 0.6);
}

void Recognize::_ar(json& res)
{

	Mat showFrame, rawFrame;
	vector<Box> ar_input;
	int nFrameNumber = 0;
	int fps = future_fps.get();

	int ARanalyInter = fps * ARINTERVAL_SEC;  // 每隔60s统计一次行为数据

	float thr = 0.5; //表情识别阈值
	ostringstream tmp_str; // Showing

	vector<rs::fd::Face> face_detect;
	vector<rs::fd::Head> head_detect;

	//数据
	vector<int> er_time;
	vector<int> er_count;
	vector<string> er_type;
	set<int>er_trackIDset;
	static int emotion_count = 0;

	//行为识别数据：
	vector<int> ar_time;
	vector<int> ar_count;
	vector<string> ar_type;
	static int hands_count = 0;
	static int stand_count = 0;

	while (true) {
		/*-------------------读保护---------------------------*/
		std::this_thread::sleep_for(std::chrono::milliseconds(10));
		if (cb_arInput.empty()) {
			if (EndFlag_ar == true)
				break;
			else
				continue;
		}
		unique_lock<timed_mutex> dataLck(arInputMtx);
		//从buffer缓冲区取数据detect_data
		auto datainput = cb_arInput.front();
		cb_frInput.pop_front();
		dataLck.unlock();

		//准备数据
		datainput.frame.copyTo(rawFrame);
		datainput.frame.copyTo(showFrame);
		swap(face_detect, datainput.fdata.first);
		swap(head_detect, datainput.fdata.second);
		ar_input.assign(datainput.input.begin(), datainput.input.end());

		nFrameNumber++;

		/***************************** ER -- class *****************************************/
		int smile_count = 0;
		for (int i = 0; i < face_detect.size(); ++i) {
#if DRAW
			if (face_detect[i].emotion > thr) {
				rectangle(showFrame, face_detect[i].rect, BLUE, 2);
			}
#endif

			if (face_detect[i].emotion > thr  && er_trackIDset.insert(face_detect[i].tracking_id).second == 1) {
				smile_count++;
			}

		}
		emotion_count += smile_count;
		/***************************** Action Recognize *****************************************/
		std::pair<std::vector<rs::ar::XActionEventResult>, std::vector<rs::ar::XActionFrameResult>> action_out_result =
			ar->Recognier(rawFrame, ar_input, nFrameNumber);
		string action_type = "";
		std::vector<rs::ar::XActionEventResult> action_result = action_out_result.first;

		for (int i = 0; i < action_result.size(); i++) {
#if DRAW
			rectangle(showFrame, cv::Point(action_result[i].rtActionRects.left, action_result[i].rtActionRects.top),
				cv::Point(action_result[i].rtActionRects.right, action_result[i].rtActionRects.bottom),
				PINK, 2);//框出行为
						 // 0: stand up, 1: raise hand
#endif
			if (action_result[i].actionTypes == 0) {
				action_type = "stand";
				stand_count++;
			}
			else {
				action_type = "hands";
				hands_count++;
			}
#if DRAW
			if (action_result[i].track_id > 0) {
				int cur_track_id = action_result[i].track_id;

				cv::Rect data;
				data.x = action_result[i].rtHeadRects.left;
				data.y = action_result[i].rtHeadRects.top;
				data.width = action_result[i].rtHeadRects.right - action_result[i].rtHeadRects.left;
				data.height = action_result[i].rtHeadRects.bottom - action_result[i].rtHeadRects.top;

			std:string tmpStr = std::to_string(action_result[i].track_id) + action_type;
				cv::putText(showFrame, tmpStr,
					Point(action_result[i].rtHeadRects.left, action_result[i].rtHeadRects.top - 15)
					, 2, 2, PINK, 2);
			}
#endif

		}
		if (nFrameNumber%ARanalyInter == 0) {
			//表情识别卡点记录
			er_time.push_back(nFrameNumber / fps);
			er_count.push_back(emotion_count);
			er_type.push_back("smile");
			er_trackIDset.clear();
			emotion_count = 0;

			//行为识别卡点记录
			ar_time.push_back(nFrameNumber / fps);//hands
			ar_time.push_back(nFrameNumber / fps);//stands
			ar_type.push_back("hands");
			ar_type.push_back("stand");
			ar_count.push_back(hands_count);
			ar_count.push_back(stand_count);
			hands_count = 0;
			stand_count = 0;
		}

#if DRAW
		cv::Mat show_frame2 = showFrame.clone();
		tmp_str.str("");  tmp_str << nFrameNumber;
		cv::putText(show_frame2, tmp_str.str(), Point(5, show_frame2.rows - 5), 2, 3, CV_RGB(0, 0, 255), 4);

		if (show_frame2.cols > 1280) {
			resize(show_frame2, show_frame2, Size(1280, 720));
		}
		//unique_lock<mutex> lck_show(imshowMutex);
		imshow("arer_Recognize", show_frame2);
		cv::waitKey(1);
		//lck_show.unlock();
#endif

	}

	//ER result
	json er_res;
	er_res["time"] = er_time;
	er_res["count"] = er_count;
	er_res["type"] = er_type;
	res["emotion"] = er_res;
	//ARresult
	json ar_res;
	ar_res["time"] = ar_time;
	ar_res["count"] = ar_count;
	ar_res["type"] = ar_type;
	res["action"] = ar_res;

	promise_ifSend_AR.set_value(true);
	//destroyWindow("arer_Recognition");
	//cv_send_arer.notify_one();
	//return 0;
}



void Recognize::Start()
{
	//启动人脸识别
	json res_fr, res_ar;
	thread Cap(std::bind(&Recognize::_cap, this));
	thread Fd_fr(std::bind(&Recognize::_fd_fr, this));
	thread Fr(std::bind(&Recognize::_fr, this, ref(res_fr)));
	thread Kafka(std::bind(&Recognize::_kafka_fr, this, ref(res_fr)));

	Cap.join();
	Fd_fr.join();
	Fr.join();
	Kafka.join();
}

void Recognize::_kafka_fr(json &res_fr)
{

	cout << "wait to send kafka ......" << endl;
	mutex mtx0;
	string top_FR = "ai-web-attendance";
	KafkaProducer *Kafkapr_FR = new KafkaProducer(bkr, top_FR);
	Kafkapr_FR->Init();

	while (true) {
		unique_lock<mutex> lck(mtx0);
		cond_var_sendFR.wait(lck);
		if (!res_fr.empty() && !EndKafka_fr) {  // 如果没有识别到人，res_fr是空，则不会发数据
			//发送fr数据
			json resJson_fr;
			resJson_fr["recognizeResult"] = res_fr;
			resJson_fr["num"] = resJson_fr["recognizeResult"].size();
			resJson_fr["eventId"] = p.eventId;
			resJson_fr["classId"] = p.classId;
			if (ifInRegTime) {
				resJson_fr["status"] = 1;
				ifInRegTime = false;  // 第一次发送status=1，以后都是status=0
			}
			else
				resJson_fr["status"] = 0;
			string str_FR = resJson_fr.dump();
			Kafkapr_FR->Send(str_FR);
			tmpSS.str("");
			tmpSS << __LOCATION__;
			logger.INFO("Send FR kafka Success", tmpSS.str());
			//cout << "Face Recognition Success\n";
		}
		else {
			tmpSS.str("");
			tmpSS << __LOCATION__;
			if (EndKafka_fr) {
				logger.ERRORS("kafka_fr return, Recognize finished", tmpSS.str());
				return;
			}
			continue;
		}

	}
	delete Kafkapr_FR;

}

void Recognize::_kafka_ar(json &res_ar)
{
	string top_ER = "ai-web-emotion";
	KafkaProducer *Kafkapr_ER = new KafkaProducer(bkr, top_ER);
	Kafkapr_ER->Init();
	string top_AR = "ai-web-behavior";
	KafkaProducer *Kafkapr_AR = new KafkaProducer(bkr, top_AR);
	Kafkapr_AR->Init();


	cout << "wait to send kafka ......" << endl;
	future_ifSend_AR.wait();
	bool ifSendAR = future_ifSend_AR.get();
	if (ifSendAR && (!res_ar.empty())) {
		//发送ar数据

		json resJson_er, resJson_ar;
		resJson_ar["action"] = res_ar["action"];
		resJson_ar["eventId"] = p.eventId;
		resJson_er["emotion"] = res_ar["emotion"];
		resJson_er["eventId"] = p.eventId;

		string str_ER = resJson_er.dump();
		Kafkapr_ER->Send(str_ER);

		string str_AR = resJson_ar.dump();
		Kafkapr_AR->Send(str_AR);

		tmpSS.str("");
		tmpSS << __LOCATION__;
		logger.INFO("Send AR kafka Success", tmpSS.str());
		//cout << "Emotion and Action Recognition Success\n";
	}

	delete Kafkapr_AR;
	delete Kafkapr_ER;
}

static bool EndFlag;
static mutex mtx0;
int m_totalNoneFrames;
int m_totalOneFrames;
stringstream ll1;
tm* t_tm;


/*------------------ 创建4个线程 ---------------------------------------------
 * 线程0 -- Cap，解码视频放入缓冲区frame buffer中
 * 线程1 -- FD，从buffer中取数据进行人头/人脸检测，结果放入frame_arer_input buffer中
 * 线程2 -- AR，从frame_arer_input buffer中取数据，进行行为（站立于举手）识别
 */
 /*----------------------------------------------------------------------------*/
#define DATA_BUF_LEN    16  //2的整数倍


static std::condition_variable cond_var_cap;
static std::condition_variable cond_var_fd;


//rtsp
boost::circular_buffer<cv::Mat> cb_frame(32);//用boost的循环队列作为缓冲区，实现自动管理
boost::circular_buffer<DataInput> cb_frame_input(64);

static timed_mutex DataTmMtx;
map<string, bool> classClose; //控制关闭指定教室的灯
static condition_variable cond_var_sendKafka;


static int _cap_rtsp_single(string fileName, promise<int>&fps)
{
#if PRINT_TIME
	std::chrono::system_clock::time_point start_time;
	std::chrono::system_clock::time_point end_time;
	std::chrono::duration<double, milli> duration_time;
	double total_time_cap = 0.0f;
	int count_cap = 0;
#endif
	EndFlag = false;
	VideoCapture cap;
	Logger logger1(Logger::file_and_terminal, Logger::debug, "..\\logs\\sampleLog.txt");
	if (fileName.length() == 0) {
		ll1.str("");
		ll1 << __LOCATION__;
		logger1.ERRORS("Error: Null input video rtsp!", ll1.str());
		EndFlag = true;
		return -1;
	}
	else {
		cap.open(fileName);
		//system("pause");
	}
	cv::Mat frame;
	if (!cap.isOpened()) {
		ll1.str("");
		string s = "Error: Video " + fileName + " can't open!";
		ll1 << __LOCATION__;
		logger1.ERRORS(s, ll1.str());
		EndFlag = true;
		return -2;
	}
	string s = "SUCCESS open VIDEO " + fileName;
	ll1.str("");
	ll1 << __LOCATION__;
	logger1.INFO(s, ll1.str());
	int frame_width = cap.get(CAP_PROP_FRAME_WIDTH);
	int frame_height = cap.get(CAP_PROP_FRAME_HEIGHT);
	int frame_rate = cap.get(CAP_PROP_FPS);
	int nFrameNum = -1;
	fps.set_value(frame_rate);

	//	cout << "****************** " << "video: " << fileName << " ******************" << endl;

#if PRINT_TIME
	start_time = std::chrono::system_clock::now();
#endif
	while (!EndFlag) {
		//写保护

		std::unique_lock<std::mutex> lck(mtx0);
		TickMeter t1;
		t1.start();
		if (cb_frame.full()) {
			cond_var_cap.wait_for(lck, std::chrono::duration<double, milli>(40));//等待80ms如果不能唤醒，就跳过，捕捉最新帧
			cap >> frame;
			//cap >> frame;
		}

		cap >> frame;    // Cap video frame
		nFrameNum++;
		if (frame.empty()) {
			ll1.str("");
			ll1 << __LOCATION__;
			logger1.DEBUG("Unkonwn Promble??? cap frame is NULL. End of video", ll1.str());
			EndFlag = true;
			break;
		}
		if (frame.cols != frame_width || frame.rows != frame_height) {
			resize(frame, frame, Size(frame_width, frame_height));
		}
		cb_frame.push_back(frame);
		lck.unlock();
		std::this_thread::sleep_for(std::chrono::milliseconds(10));
	}
#if PRINT_TIME
	end_time = std::chrono::system_clock::now();
	/*std::chrono::duration<double,std::milli>*/
	duration_time = end_time - start_time;
	cout << "_cap_video total time : " << duration_time.count() * 1000 << "s" << endl;
#endif
	ll1.str("");
	ll1 << __LOCATION__;
	logger1.DEBUG("Exist Recognize cap return", ll1.str());
	return 0;
		}

static int _face_detect_rtsp_single1(std::shared_ptr<rs::fd::FaceDetector> fd)
{
	/*cv::TickMeter tm;
	tm.start();*/

#if PRINT_TIME
	std::chrono::system_clock::time_point start_time;
	std::chrono::system_clock::time_point end_time;
	std::chrono::duration<double, milli> duration_time;
	double total_time_cap = 0.0f;
	int count_cap = 0;
#endif
	Logger logger1(Logger::file_and_terminal, Logger::debug, "..\\logs\\sampleLog.txt");
	cv::Mat FrameShow;
	std::pair<std::vector<rs::fd::Face>, std::vector<rs::fd::Head>> detect_data;
	DataInput datainput;
	bool run_model = true;
	int n = 0;
	while (true) {
		/*-------------------读保护---------------------------*/
		//std::this_thread::sleep_for(std::chrono::milliseconds(1));
		std::unique_lock<std::mutex> framelck(mtx0);
		/*TickMeter t1;
		t1.start();*/
		if (cb_frame.empty()) {
			if (EndFlag == true) {
				ll1.str("");
				ll1 << __LOCATION__;
				logger1.DEBUG("Exist Recognize fd return", ll1.str());
				break;
			}
			else
				continue;
		}

		cb_frame.front().copyTo(FrameShow);
		cb_frame.front().copyTo(datainput.frame);
		datainput.timestamp = getTickCount();
		cb_frame.pop_front();

		if (cb_frame.empty())
			cond_var_cap.notify_one();//唤醒cap线程
		framelck.unlock();

		/*cv::TickMeter tm;
		tm.start();*/

		//添加跳帧，可以使展现流畅
		if (n++ % 3 == 0)
			detect_data = fd->Detect(FrameShow, IMAGE_FORMAT_RGB24_B8G8R8, run_model, landmark_enable);//最耗时的地方，并行处理
				//添加线程连续处理


		if (ar_enable)
			datainput.input = fd->GetActionInput();
		vector<rs::fd::Face> face_detect = detect_data.first;
		vector<rs::fd::Head> head_detect = detect_data.second;
		//frame_arer_input[DataWrIdx].fdata = std::make_pair (face_detect, head_detect);
		datainput.fdata = std::make_pair(face_detect, head_detect);
		unique_lock<timed_mutex> datalck(DataTmMtx, chrono::duration<double, milli>(80)); //保存该处理好的图片，若80ms内未获取到锁，则丢弃该图片
		if (datalck.owns_lock()) {
			cb_frame_input.push_back(datainput);
			//cout << "fd 1 done" << endl;
		}
		else {
			continue;
		}
		//if (n++ % 25 == 0) {
		//	cout << "wati to be processed" << pq_frame_input.size() << endl;//查看在队列里等待识别的数量
		//}


	}
#if PRINT_TIME
	end_time = std::chrono::system_clock::now();
	/*std::chrono::duration<double,std::milli>*/
	duration_time = end_time - start_time;
	cout << "_cap_video total time : " << duration_time.count() * 1000 << "s" << endl;
#endif
	/*tm.stop();
	int proc_time = int(tm.getTimeMilli());
	std::cout << "Global time : " << proc_time / 1000 << " s...." << endl*/;
	return 0;
	}

static int _exist_rtsp(std::shared_ptr<rs::fd::FaceDetector> fd, bool& ifExist, future<int>& fps, condition_variable& cond_var_call)//IfExist: true有人 false无人
{
	Logger logger1(Logger::file_and_terminal, Logger::debug, "..\\logs\\sampleLog.txt");
	//从cap中读取数据
	cv::Mat Frame;
	std::pair<std::vector<rs::fd::Face>, std::vector<rs::fd::Head>> detect_data;
	bool run_model = true;
	int noneFrame = 0;
	int oneFrame = 0;
	bool preIfExist = true;//一开始默认为有人
	bool curIfExist = preIfExist;
	int FPS = fps.get();
	m_totalNoneFrames = FPS * NOEXIST_SEC; //30s检测不到人就视为无人
	m_totalOneFrames = FPS * EXIST_SEC;
	int n = 0;

	while (true) {
		if (cb_frame.empty()) {
			if (EndFlag == true) {
				ll1.str("");
				ll1 << __LOCATION__;
				logger1.DEBUG("Exist Recognize exsit return", ll1.str());
				break;
			}
			else
				continue;
		}
		unique_lock<mutex> framelck(mtx0);
		swap(Frame, cb_frame.front()); //采用swap，以后不会再用这块数据
		cb_frame.pop_front();
		if (cb_frame.empty())
			cond_var_cap.notify_one(); //如果cap满了进入等待，唤醒cap
		framelck.unlock();

		//detect数据
		//if (n++ % 3 == 0)
		detect_data = fd->Detect(Frame, IMAGE_FORMAT_RGB24_B8G8R8, run_model, false);


		//分析数据

		if (detect_data.first.empty() && detect_data.second.empty()) {
			++noneFrame;
			oneFrame = 0;
			//cout << noneFrame << endl;
		}
		else {
			oneFrame++;
			noneFrame = 0;
		}
		if (noneFrame > m_totalNoneFrames) { //m_totalNoneFrames变为可变
			//IfExist.set_value(false);//修改值就会唤醒kafka
			ifExist = false;
		}
		if (oneFrame > m_totalOneFrames) {
			ifExist = true;
		}
		if (ifExist != preIfExist) {
			cond_var_call.notify_one();
			preIfExist = ifExist;
		}
		/*resize(Frame, Frame, Size(1280, 720));
		imshow("test", Frame);
		waitKey(1);*/
	}

	//cond_var_call.notify_one();
	return 0;
}


void Close_light(const string& classId, bool& msg)
{
	Logger logger1(Logger::file_and_terminal, Logger::debug, "..\\logs\\sampleLog.txt");
	stringstream ss;
	if (classClose.find(classId) != classClose.end()) {
		if (classClose[classId] == false) {
			classClose[classId] = true;
			ss << "SUCCESS: " << classId << " have been closed";
			ll1.str("");
			ll1 << __LOCATION__;
			logger1.INFO(ss.str(), ll1.str());
			//swap(msg, ss.str());
			msg = true;
		}
	}
	else {
		ss << "FAILED:" << classId << " not exist, close FAIL";
		ll1.str("");
		ll1 << __LOCATION__;
		logger1.INFO(ss.str(), ll1.str());
		//swap(msg, ss.str());
		msg = false;
	}
}

void Change_time(const int& newTime, string& msg, int flag)
{
	Logger logger1(Logger::file_and_terminal, Logger::debug, "..\\logs\\sampleLog.txt");
	//不知在何时会更改时间  等待关灯时间变为newTime.
	stringstream ss;
	switch (flag) {
	case 0:
		m_totalNoneFrames = newTime * 25;
		ss << "change left time to " << newTime;
		break;
	case 1:
		m_totalOneFrames = newTime * 25;
		ss << "change start time to " << newTime;
		break;
	}


	//cout << ss.str();
	ll1.str("");
	ll1 << __LOCATION__;
	logger1.INFO(ss.str(), ll1.str());
}



//rtsp 主线程
	//空教室识别
int Exist_rtsp(const string& rtspUrl, int secs, string classId)
{
	Logger logger1(Logger::file_and_terminal, Logger::debug, "..\\logs\\sampleLog.txt");
	if (rtspUrl.empty()) {
		ll1.str("");
		ll1 << __LOCATION__;
		logger1.ERRORS("rtsp Url is null", ll1.str());
		return -1;
	}

	XConfig config;
	config.hLicense = NULL;
	config.map_device.insert(map_model_device.begin(), map_model_device.end());
	config.plugin_path = (char *)plugin_path.c_str();
	config.extend_path = (char *)dll_path.c_str();
	config.data_path = (char *)model_path.c_str();

	vaild_license(license_path, &config);

	//人脸检测实例
	fd::FaceDetector::Builder fd_builder;
	fd_builder.max_num_faces = 75;
	fd_builder.threshold = 0.7;
	fd_builder.threshold_class = 0.15;
	fd_builder.threshold_emotion = 0.5;


	config.data_path = (char *)data_path.c_str();

	auto fd = fd_builder.Build(config);
	if (fd == NULL) {
		ll1.str("");
		ll1 << __LOCATION__;
		logger1.ERRORS("Build fd/fr/ar failded", ll1.str());
		return -1;
	}


	bool ifExist = true;//默认为有人
	bool preIfExist = ifExist;
	promise<int> promise_fps;
	future<int> future_fps = promise_fps.get_future();
	condition_variable cond_var_call;
	mutex mtx;

	thread Cap(_cap_rtsp_single, rtspUrl, ref(promise_fps));
	thread Exist(_exist_rtsp, fd, ref(ifExist), ref(future_fps), ref(cond_var_call)); //在线程中改变ifExist

	classClose[classId] = false; //当前教室的开关为false

								 //创建kafka

	string bkr = "192.168.13.223:9092";
	string top = "ai-iot-close-light";
	KafkaProducer *Kafkapr = new KafkaProducer(bkr, top);
	Kafkapr->Init();
	while (true) {
		//结束标志
		if (classClose[classId] == true) {
			EndFlag = true;
			break;
		}
		//发送kafka
		unique_lock<mutex> lck(mtx);
		cond_var_call.wait_for(lck, chrono::duration<long, deci>(5));//监听60s.防止陷入等待中,无法退出
																	 //cond_var_call.wait(lck);
		if (preIfExist != ifExist) {
			json res;
			res["IfExist"] = ifExist;
			res["classId"] = classId;
			string str_res = res.dump();
			Kafkapr->Send(str_res);
			string str;
			if (ifExist == true) str = "true";
			else str = "false";
			string info = "send kafka true or false: " + str;
			ll1.str("");
			ll1 << __LOCATION__;
			logger1.INFO(info, ll1.str());
			//cout << "Exist Recognition Success\n";
			//显示结果
			preIfExist = ifExist;
		}


	}
	delete Kafkapr;
	cout << "Exist_rtsp main thread over " << endl;
	Cap.join();
	Exist.join();
	return 0;
}
//特征提取
int FR_ExportFeature(vector<std::string> vecFilename, const vector<USR_INFO>& usrList, vector<FEAT_INFO>& featList)
{

	if (vecFilename.empty()) {
		cerr << "FR_ExportFeature Error: Picture Files names is empty. Please check faceList" << endl;
		return -1;
	}

	XConfig config;
	config.hLicense = NULL;
	config.map_device.insert(map_model_device.begin(), map_model_device.end());
	config.plugin_path = (char *)plugin_path.c_str();
	config.extend_path = (char *)dll_path.c_str();
	config.data_path = (char *)model_path.c_str();

	vaild_license(license_path, &config);

	//人脸识别实例
	fr::FaceRecognizer::Builder fr_builder;
	fr_builder.threshold_recognition_quality = 0.19;
	fr_builder.threshold_registration_quality = 0.25;
	fr_builder.max_num_registrable_faces = 75;
	fr_builder.threshold_similarity = 0.25;   // 识别结果， 匹配度评分
	fr_builder.recognition_interval = 1;      // 识别间隔    // 设置多少帧
	fr_builder.recognition_quality_filter = false;
	fr_builder.registration_quality_filter = true;
	config.data_path = (char *)model_path.c_str();

	auto fr = fr_builder.Build(config);

	/*
	* 提取特征值
	*/
	rs::fr::DetectedInfo face_info;

	float *feature = new float[512];

	for (int i = 0; i < usrList.size(); ++i) {
		FEAT_INFO feat;
		cv::Mat img_org = cv::imread(vecFilename[i]);
		std::unique_ptr<uint8_t[]> frature = fr->ExtractFeature(img_org, face_info);//从样本中提取人脸特征 frature.从图片中获取特征
		const uint8_t* p_feature = frature.get();
		memcpy(feature, p_feature, 2048);
		feat.feature.assign(feature, feature + 512);
		feat.usrId = usrList[i].usrId;
		featList.push_back(feat);
	}

	delete[]feature;
	return 0;
}



//考勤识别
//int Recognize_rtsp(const string& rtspUrl, input_params p)
//{
//	//模型初始化
//
//	Logger logger1(Logger::file_and_terminal, Logger::debug, "..\\logs\\sampleLog.txt");
//	XConfig config;
//	config.map_device.insert(map_model_device.begin(), map_model_device.end());
//	config.plugin_path = (char *)plugin_path.c_str();
//	config.extend_path = (char *)dll_path.c_str();
//
//	vaild_license(license_path, &config);
//
//	fd::FaceDetector::Builder fd_builder;
//	fd_builder.max_num_faces = 75;
//	fd_builder.threshold = 0.70;
//	fd_builder.threshold_class = 0.15;
//	fd_builder.threshold_emotion = 0.5;
//
//	config.data_path = (char *)data_path.c_str();
//
//	auto fd = fd_builder.Build(config);
//
//	ar::ActionRecognizer::Builder ar_builder;//创建实例
//	ar_builder.minDetTimes = 2;
//	ar_builder.windowSize = 3;
//	ar_builder.threLevel = 50;
//	ar_builder.isRaceToRaiseHandMode = false;
//	config.data_path = "";
//	auto ar = ar_builder.Build(config);
//
//	//人脸识别实例
//	fr::FaceRecognizer::Builder fr_builder;
//	fr_builder.threshold_recognition_quality = 0.19;
//	fr_builder.threshold_registration_quality = 0.25;
//	fr_builder.max_num_registrable_faces = 75;
//	fr_builder.threshold_similarity = 0.25;   // 识别结果， 匹配度评分
//	fr_builder.recognition_interval = 1;      // 识别间隔    // 设置多少帧
//	fr_builder.recognition_quality_filter = false;
//	fr_builder.registration_quality_filter = true;
//	config.data_path = (char *)model_path.c_str();
//
//	auto fr = fr_builder.Build(config);
//
//	if (fd == NULL || ar == NULL || fr == NULL) {
//		ll1.str("");
//		ll1 << __LOCATION__;
//		logger1.ERRORS("Build fd/fr/ar failded", ll1.str());
//		return -1;
//	}
//
//	//注册特征值
//	map<int, string> face_name_map;
//	rs::fr::DetectedInfo face_info;
//	int person_id = 1;
//	for (auto each_user : p.featureList) {
//		uint8_t* featdata = new uint8_t[2048];
//		float* ptr = each_user.feature.data();
//		if (ptr == NULL) {
//			ll1.str("");
//			ll1 << __LOCATION__;
//			logger1.ERRORS("Regester" + each_user.usrId + " feature failed", ll1.str());
//			continue;
//		}
//		memcpy(featdata, ptr, 2048);
//		std::pair<rs::fr::RegistrationStatus, uint32_t> reg_result =
//			fr->Register(featdata, person_id);// 注册图片的特征到fr  注册时传入person_id，传出face_id   每次第一个参数内存会被释放掉
//		string name = each_user.usrId;
//		face_name_map[reg_result.second] = name;
//		person_id++;
//	}
//
//	//共享数据区
//	bool endrtspFLAG = false;
//	promise<int> promise_fps;
//	future<int> future_fps = promise_fps.get_future();
//
//	boost::circular_buffer<cv::Mat> cb_FRframe(32); //cap输入图片
//	boost::circular_buffer<DataInput> cb_FRframeInput(32); //fd检测图片
//	boost::circular_buffer<DataInput> cb_ARERframeInput(32); //fr识别图片
//
//	bool Endflag = false;
//	bool EndFR = false;
//	mutex frameMtx;
//	mutex frameInputMtx;
//	mutex msgMtx;
//
//	condition_variable condvar_cap;
//	condition_variable condvar_fd;
//	condition_variable condvar_FRkafka;
//	condition_variable condvar_ARERkafka;
//
//
//	//线程区
//	thread Cap(_cap_rtsp_single, p.rtspUrl, ref(promise_fps));
//	thread Fd(_face_detect_rtsp_single1, ref(Endflag));
//	thread Fr();
//	thread AR();
//
//
//	//发送kafka
//	string bkr = "192.168.13.223:9092";
//
//	unique_lock<mutex> lck(msgMtx);
//	condvar_FRkafka.wait(lck);//等待唤醒发送fr数据
//	json resJson_fr;
//	resJson_fr["recognizeResult"] = res_fr;
//	resJson_fr["num"] = resJson_fr["recognizeResult"].size();
//	resJson_fr["eventId"] = p.eventId;
//	resJson_fr["classId"] = p.classId;
//
//	string str_FR = resJson_fr.dump();
//	string top_FR = "ai-web-attendance";
//	KafkaProducer *Kafkapr_FR = new KafkaProducer(bkr, top_FR);
//	Kafkapr_FR->Init();
//	Kafkapr_FR->Send(str_FR);
//	delete Kafkapr_FR;
//	cout << "Face Recognition Success\n";
//	lck.unlock();
//	//unique_lock<mutex> lck(msgMtx);
//	lck.lock();
//	cv_send_arer.wait(lck);//等待唤醒发送ar,er数据
//	json resJson_er, resJson_ar;
//	resJson_ar["action"] = res_arer["action"];
//	resJson_ar["eventId"] = p.eventId;
//	resJson_er["emotion"] = res_arer["emotion"];
//	resJson_er["eventId"] = p.eventId;
//
//	string str_ER = resJson_er.dump();
//	string top_ER = "ai-web-emotion";
//	KafkaProducer *Kafkapr_ER = new KafkaProducer(bkr, top_ER);
//	Kafkapr_ER->Init();
//	Kafkapr_ER->Send(str_ER);
//	delete Kafkapr_ER;
//	string str_AR = resJson_ar.dump();
//	string top_AR = "ai-web-behavior";
//	KafkaProducer *Kafkapr_AR = new KafkaProducer(bkr, top_AR);
//	Kafkapr_AR->Init();
//	Kafkapr_AR->Send(str_AR);
//	delete Kafkapr_AR;
//	cout << "Emotion and Action Recognition Success\n";
//
//
//	//保存结果
//	fstream file_res("d:\\AI\\demo\\FR_ARER_res.json", ios::out);
//	if (!file_res.is_open()) cerr << "FR_ARER_res.json create fail" << endl;
//	file_res << resJson_ar.dump(4) << endl;
//	file_res << resJson_er.dump(4) << endl;
//	file_res << resJson_fr.dump(4) << endl;
//	file_res.close();
//	cout << "FR_ARER_detach_Recognize finish" << endl;
//
//
//	Cap.join();
//	Fd.join();
//	Fr.join();
//	return 0;
//}
