﻿#include "opencv2/objdetect/objdetect.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/core/core.hpp"
#include "opencv2/contrib/contrib.hpp"

#include <iostream>
#include <fstream>
#include <sstream>
#include <stdio.h>

using namespace cv;
using namespace std;

/** Function Headers */
void detect_faces_from_image();
void detect_faces_from_CAM();
void recognize_faces_from_image(std::string test_image_url, bool use_available_model);
void recognize_faces_from_CAM_or_VideoFile(bool use_CAM, bool use_available_model, std::string input_video_url);

bool get_faces_from_one_image(std::string img_url, std::vector<cv::Mat>& face_images, std::vector<cv::Rect>& test_faces_rects);
bool get_faces_from_one_image(cv::Mat& general_image, std::vector<cv::Mat>& face_images, std::vector<cv::Rect>& test_faces_rects);
void get_faces_for_training_from_CAM_or_VideoFile(bool use_CAM, std::string dest_folder, std::string input_video_url);

void load_cascade_classifiers();
static void load_photo_database(const string& filename, vector<Mat>& images, vector<int>& labels, char separator = ';');
static void load_subject_id_info_mapping(const std::string& mapping_full_path_and_file_name);

void load_faces_database_and_train_recognition_model(std::string db_csv_file, Ptr<FaceRecognizer>& model);
int predict_face_label_using_a_recognition_model(Ptr<FaceRecognizer> model, cv::Mat& input_face);

void detect_and_display(cv::Mat frame);

/** Global variables */
std::string face_cascade_name = "haarcascade_frontalface_alt.xml";
std::string eyes_cascade_name = "haarcascade_eye_tree_eyeglasses.xml";
cv::CascadeClassifier face_cascade;
cv::CascadeClassifier eyes_cascade;
std::string window_name = "Capture - Face detection";
cv::RNG rng(12345);
std::map<int,std::string> id_info_mapping;
cv::Size standard_size_of_face_image = cv::Size(92, 112);
std::string faces_database_url = "./att_faces/at.txt";
std::string id_info_mapping_url = "./att_faces/id_info_mapping.txt";
int selected_model_id = 2; // Fisherfaces

int main(int argc, const char *argv[]) 
{
	load_cascade_classifiers();

	//detect_faces_from_image();
	//detect_faces_from_CAM();
	
	//get_faces_for_training_from_CAM_or_VideoFile(true, "./att_faces/s46", ""); // From CAM
	//get_faces_for_training_from_CAM_or_VideoFile(false, "./att_faces/s43", "./test_video_0.mp4"); // From file (run with debug mode)

	recognize_faces_from_image("test9.jpg", true);
	//recognize_faces_from_CAM_or_VideoFile(true, true, ""); // From CAM
	//recognize_faces_from_CAM_or_VideoFile(false, true, "./test_video_0.mp4"); // From file (run with debug mode)

	return 0;
}

void load_cascade_classifiers()
{
	// Load the cascades
	if( !face_cascade.load( face_cascade_name ) ){ printf("--(!)Error loading cascade file\n"); };
	if( !eyes_cascade.load( eyes_cascade_name ) ){ printf("--(!)Error loading cascade file\n"); };
}

void get_faces_for_training_from_CAM_or_VideoFile(bool use_CAM, std::string dest_folder, std::string input_video_url)
{
	cv::Mat frame;
	int frame_count = 0; /* REMEMBER TO CHECK THE MINIMUM NUMBER */

	/* Read the video stream */
	VideoCapture capture(0);
	if(use_CAM == false) { capture = cv::VideoCapture(input_video_url); } // Note: with input video file, only works in "debug" mode (F5, not Ctrl-F5)

	if(capture.isOpened())
	{
		while(true)
		{
			capture >> frame;
			capture >> frame;
			
			if(!frame.empty())
			{ 
				std::vector<cv::Mat> test_faces;
				std::vector<cv::Rect> test_faces_rects;	
				if(get_faces_from_one_image(frame, test_faces, test_faces_rects) == false) { exit(1); }
	
				for(size_t i = 0; i < test_faces.size(); i++)
				{
					cv::imshow("Capture for training", test_faces[i]);

					std::string image_url = dest_folder + cv::format("/CAM%d.jpg", frame_count);
					cv::imwrite(image_url, test_faces[i]);/**/
					frame_count++;
				}
			}
			else
			{ 
				printf(" --(!) No captured frame -- Break!"); break; 
			}

			int c = cv::waitKey(10);
			if((char)c == 'c') { break; }
		}
	}
	else
	{ 
		printf(" --(!) No captured frame !\n");
		exit(1);
	}

	std::cout << "frame count = " << frame_count;
}

void recognize_faces_from_CAM_or_VideoFile(bool use_CAM, bool use_available_model, std::string input_video_url)
{
	/* Prepare the recognition model */
	Ptr<FaceRecognizer> model;

	if(use_available_model == true) 
	{
		switch(selected_model_id)
		{
			case 0:
				model = createEigenFaceRecognizer();
				break;

			case 1:
				model = createLBPHFaceRecognizer();
				break;

			default:
				model = createFisherFaceRecognizer();
				break;
		}
		model->load("./att_faces/eigenfaces_at.yml"); 
	}
	else
	{
		load_faces_database_and_train_recognition_model(faces_database_url, model);
		model->save("./att_faces/eigenfaces_at.yml");
	}
	
	load_subject_id_info_mapping(id_info_mapping_url);

	/* Get the faces for testing */
	VideoCapture capture(0);
	if(use_CAM == false) { capture = cv::VideoCapture(input_video_url); } // Note: with input video file, only works in "debug" mode (F5, not Ctrl-F5)

	// Initialize a writer to store the result
	double width = capture.get(CV_CAP_PROP_FRAME_WIDTH);
    double height = capture.get(CV_CAP_PROP_FRAME_HEIGHT);
	
	/* fourcc: four-Character Codes
	   CV_FOURCC('P','I','M','1')    = MPEG-1 codec
	   CV_FOURCC('M','J','P','G')    = motion-jpeg codec (does not work well)
	   CV_FOURCC('M', 'P', '4', '2') = MPEG-4.2 codec
	   CV_FOURCC('D', 'I', 'V', '3') = MPEG-4.3 codec
	   CV_FOURCC('D', 'I', 'V', 'X') = MPEG-4 codec
	   CV_FOURCC('U', '2', '6', '3') = H263 codec
	   CV_FOURCC('I', '2', '6', '3') = H263I codec
	   CV_FOURCC('F', 'L', 'V', '1') = FLV1 codec
	   CV_FOURCC_DEFAULT
	   A codec code of -1 will open a codec selection window (in windows).
	*/
	// Create the video writer
	cv::VideoWriter video("capture.avi", CV_FOURCC_DEFAULT, 5, cvSize((int)width,(int)height), 1);

	// Check if the video was opened
	if(!video.isOpened())
	{
		std::cout << "Could not create video.";
		return;
	}

	cv::Mat frame;
	
	/* Read the video stream */
	if(capture.isOpened())
	{
		while(true)
		{
			capture >> frame;
			
			if(!frame.empty())
			{ 
				std::vector<cv::Mat> test_faces;
				std::vector<cv::Rect> test_faces_rects;	
				if(get_faces_from_one_image(frame, test_faces, test_faces_rects) == false) { exit(1); }
	
				/* Do face recognition */
				std::vector<int> predicted_labels(test_faces.size(), -1);
				for(size_t i = 0; i < test_faces.size(); i++)
				{
					int prd_lbl = predict_face_label_using_a_recognition_model(model, test_faces[i]);
					predicted_labels[i] = prd_lbl;
				}
	
				/* Show recognition results */
				std::string result_message = cv::format("We predict that, in the input image, there is/are %d face(s), which belong to: ", test_faces.size());
				for(size_t i = 0; i < test_faces.size(); i++)
				{
					if(predicted_labels[i] >= 0) { result_message += id_info_mapping[predicted_labels[i]] + ", "; }
					else { result_message += "[Unknown person] "; }
				}
				cout << result_message << endl;/**/
				int fontFace = FONT_HERSHEY_COMPLEX;

				for( int i = 0; i < test_faces_rects.size(); i++ )
				{
					cv::rectangle(frame, test_faces_rects[i], cv::Scalar(255, 0, 255), 2);

					Point textOrg(test_faces_rects[i].x, test_faces_rects[i].y + test_faces_rects[i].height + 30);
					cv::putText(frame, id_info_mapping[predicted_labels[i]], textOrg, fontFace, 1, cv::Scalar::all(255), 2, CV_AA);
				}

				// Show what we got
				cv::imshow("Output with recognition", frame);

				// Save frame to video
				video << frame;
			}
			else
			{ 
				printf(" --(!) No captured frame -- Break!"); break; 
			}

			int c = cv::waitKey(10);
			if((char)c == 'c') 
			{ 
				video.~VideoWriter();
				video.release();
				capture.release();
				break; 
			}
		}
	}
	else
	{ 
		printf(" --(!) No captured frame !\n");
		exit(1);
	}
}

void recognize_faces_from_image(std::string test_image_url, bool use_available_model)
{
	/* Prepare the recognition model */
	Ptr<FaceRecognizer> model;

	if(use_available_model == true) 
	{
		switch(selected_model_id)
		{
			case 0:
				model = createEigenFaceRecognizer();
				break;

			case 1:
				model = createLBPHFaceRecognizer();
				break;

			default:
				model = createFisherFaceRecognizer();
				break;
		}
		model->load("./att_faces/eigenfaces_at.yml"); 
	}
	else
	{
		load_faces_database_and_train_recognition_model(faces_database_url, model);
		model->save("./att_faces/eigenfaces_at.yml");
	}
	
	load_subject_id_info_mapping(id_info_mapping_url);

	/* Get the faces for testing */
	std::vector<cv::Mat> test_faces;
	std::vector<cv::Rect> test_faces_rects;	
	if(get_faces_from_one_image(test_image_url, test_faces, test_faces_rects) == false) { exit(1); }
	
	/* Do face recognition */
	std::vector<int> predicted_labels(test_faces.size(), -1);
	for(size_t i = 0; i < test_faces.size(); i++)
	{
		int prd_lbl = predict_face_label_using_a_recognition_model(model, test_faces[i]);
		predicted_labels[i] = prd_lbl;
	}
	
	/* Show recognition results */
	std::string result_message = cv::format("We predict that, in the input image, there is/are %d face(s), which belong to: ", test_faces.size());
	for(size_t i = 0; i < test_faces.size(); i++)
	{
		if(predicted_labels[i] >= 0) { result_message += id_info_mapping[predicted_labels[i]] + ", "; }
		else { result_message += "[Unknown person] "; }
	}
	cout << result_message << endl;
	cv::Mat output_test_image = cv::imread(test_image_url);
	int fontFace = FONT_HERSHEY_COMPLEX;

	for( int i = 0; i < test_faces_rects.size(); i++ )
	{
		cv::rectangle(output_test_image, test_faces_rects[i], cv::Scalar(255, 0, 255), 2);

		Point textOrg(test_faces_rects[i].x, test_faces_rects[i].y + test_faces_rects[i].height + 30);
		cv::putText(output_test_image, id_info_mapping[predicted_labels[i]], textOrg, fontFace, 1, cv::Scalar::all(255), 2, CV_AA);
	}

	// Show what we got
	cv::imshow("Output with recognition", output_test_image);
	cv::imwrite("output_image.jpg", output_test_image);
	cv::waitKey();
}

void detect_faces_from_CAM()
{
	CvCapture* capture;
	cv::Mat frame;

	// Read the video stream
	capture = cvCaptureFromCAM(-1);
	if(capture)
	{
		while(true)
		{
			frame = cvQueryFrame(capture);

			//-- 3. Apply the classifier to the frame
			if(!frame.empty())
			{ 
				detect_and_display(frame); 
			}
			else
			{ 
				printf(" --(!) No captured frame -- Break!"); break; 
			}

			int c = cv::waitKey(10);
			if((char)c == 'c') { break; }
		}
	}
	else
	{ 
		printf(" --(!) No captured frame !\n");
	}
}

void detect_faces_from_image()
{
	// Read the image
	cv::Mat frame = cv::imread("lenna.jpg");
	
	if(!frame.data)
	{
		std::cout << "Cannot load the input image\n";
	}
	else
	{
		detect_and_display(frame);
	}

	cv::waitKey();
}

bool get_faces_from_one_image(std::string img_url, std::vector<cv::Mat>& face_images, std::vector<cv::Rect>& test_faces_rects)
{
	cv::Mat general_image = cv::imread(img_url);
	
	if(!general_image.data)
	{
		std::cout << "Cannot load the input image\n";
		return false;
	}

	return get_faces_from_one_image(general_image, face_images, test_faces_rects);
}

bool get_faces_from_one_image(cv::Mat& general_image, std::vector<cv::Mat>& face_images, std::vector<cv::Rect>& test_faces_rects)
{
	// Detect faces
	cv::Mat general_image_gray;

	cv::cvtColor(general_image, general_image_gray, CV_BGR2GRAY);
	//cv::equalizeHist(imput_img_gray, imput_img_gray); LUU Y: KHONG duoc equalize histogram, vi nhu the ket qua recognition se sai het

	//-- Detect faces
	face_cascade.detectMultiScale(general_image_gray, test_faces_rects, 1.1, 2, 0|CV_HAAR_SCALE_IMAGE, cv::Size(50, 50));

	for(size_t i = 0; i < test_faces_rects.size(); i++)
	{
		cv::Mat ith_face = general_image_gray(test_faces_rects[i]);

		cv::Mat resized_face;
		cv::resize(ith_face, resized_face, standard_size_of_face_image);

		//cv::imshow("Input face", resized_face);
		face_images.push_back(resized_face);

		//cv::waitKey();
	}

	return true;
}

/** @function detect_and_display */
void detect_and_display( cv::Mat frame )
{
	std::vector<cv::Rect> faces;
	cv::Mat frame_gray;

	cv::cvtColor( frame, frame_gray, CV_BGR2GRAY );
	cv::equalizeHist( frame_gray, frame_gray );

	//-- Detect faces
	face_cascade.detectMultiScale( frame_gray, faces, 1.1, 2, 0|CV_HAAR_SCALE_IMAGE, cv::Size(30, 30) );

	for( int i = 0; i < faces.size(); i++ )
	{
		cv::Point center( faces[i].x + faces[i].width*0.5, faces[i].y + faces[i].height*0.5 );
		cv::ellipse( frame, center, cv::Size( faces[i].width*0.5, faces[i].height*0.5), 0, 0, 360, cv::Scalar( 255, 0, 255 ), 4, 8, 0 );
		
		cv::Mat faceROI = frame_gray( faces[i] );
		std::vector<cv::Rect> eyes;

		//-- In each face, detect eyes
		eyes_cascade.detectMultiScale( faceROI, eyes, 1.1, 2, 0 |CV_HAAR_SCALE_IMAGE, cv::Size(30, 30) );

		for( int j = 0; j < eyes.size(); j++ )
		{
			cv::Point center( faces[i].x + eyes[j].x + eyes[j].width*0.5, faces[i].y + eyes[j].y + eyes[j].height*0.5 );
			int radius = cvRound( (eyes[j].width + eyes[j].height)*0.25 );
			cv::circle( frame, center, radius, cv::Scalar( 255, 0, 0 ), 4, 8, 0 );
		}
	}
	//-- Show what you got
	cv::imshow( window_name, frame );
}

static void load_photo_database(const string& filename, vector<Mat>& images, vector<int>& labels, char separator) 
{
	std::cout << "Loading faces database...\n";
	std::ifstream file(filename.c_str(), ifstream::in);
	if (!file) 
	{
		string error_message = "No valid input file was given, please check the given filename.";
		CV_Error(CV_StsBadArg, error_message);
	}

	string path, classlabel;
	while (getline(file, path)) 
	{
		size_t start_pos = path.find_last_of("/", path.find_last_of("/") - 1) + 2;
		size_t char_count = path.find_last_of("/") - start_pos;
		classlabel = path.substr(start_pos, char_count);
		
		int nClassLabel = atoi(classlabel.c_str());

		if(!path.empty() && !classlabel.empty()) 
		{
			cv::Mat frame = imread(path, 0);
			
			if(nClassLabel >= 41)
			{
				// Danh cho photos minh tu add vao
				cv::Mat standardized_frame;
				cv::resize(frame, standardized_frame, standard_size_of_face_image);
				images.push_back(standardized_frame);			
				//imshow("aaa", frame);
				//imshow("bbb", standardized_frame);
				//cv::waitKey();
			}
			else
			{
				// Danh cho bo photos chuan cua test data set
				images.push_back(frame);
			}

			labels.push_back(nClassLabel);
		}
	}
}

static void load_subject_id_info_mapping(const std::string& mapping_full_path_and_file_name)
{
	std::cout << "Loading <subject_id, information> mapping...\n";
	std::string line;	
	std::ifstream myfile(mapping_full_path_and_file_name.c_str(), ifstream::in);
	id_info_mapping.clear();

	if(myfile.is_open())
	{
		// Get 1 explanation line
		std::getline(myfile, line);

		// Get <subject_id, name> mapping
		std::string str_subject_id, str_subject_name;
		while (std::getline(myfile, line))
		{
			stringstream liness(line);
			std::getline(liness, str_subject_id, ';');
			std::getline(liness, str_subject_name);
		
			if(!str_subject_id.empty() && !str_subject_name.empty()) 
			{
				int subject_id = std::atoi(str_subject_id.c_str());
				id_info_mapping[subject_id] = str_subject_name;
			}
		}

		myfile.close();
	}
	else
	{
		std::cout << "Error: cannot open the mapping file";
	}
}

void load_faces_database_and_train_recognition_model(std::string db_csv_file, Ptr<FaceRecognizer>& model)
{
	// These vectors hold the images and corresponding labels
	vector<Mat> images;
	vector<int> labels;
	
	// Read in the data
	try 
	{
		load_photo_database(db_csv_file, images, labels);		
	} 
	catch (cv::Exception& e) 
	{
		cerr << "Error opening file \"" << db_csv_file << "\". Reason: " << e.msg << endl;
		exit(1); // Nothing more we can do
	}

	// Quit if there are not enough images
	if(images.size() <= 1) 
	{
		string error_message = "We need at least 2 images. Please add more images to your data set!";
		CV_Error(CV_StsError, error_message);
	}

	switch(selected_model_id)
	{
		case 0:
			/* Create Eigenfaces model */
			// The following lines create an Eigenfaces model for
			// face recognition and train it with the images and
			// labels read from the given CSV file.
			// This here is a full PCA, if you just want to keep
			// 10 principal components (read Eigenfaces), then call
			// the factory method like this:
			//
			//      cv::createEigenFaceRecognizer(10);
			//
			// If you want to create a FaceRecognizer with a
			// confidence threshold (e.g. 123.0), call it with:
			//
			//      cv::createEigenFaceRecognizer(10, 123.0);
			//
			// If you want to use _all_ Eigenfaces and have a threshold,
			// then call the method like this:
			//
			//      cv::createEigenFaceRecognizer(0, 123.0);
			//
			model = createEigenFaceRecognizer();
			break;

		case 1:
			/* Create Local Binary Patterns Histograms model */
			// The following lines create an LBPH model for
			// face recognition and train it with the images and
			// labels read from the given CSV file.
			//
			// The LBPHFaceRecognizer uses Extended Local Binary Patterns
			// (it's probably configurable with other operators at a later
			// point), and has the following default values
			//
			//      radius = 1
			//      neighbors = 8
			//      grid_x = 8
			//      grid_y = 8
			//
			// So if you want a LBPH FaceRecognizer using a radius of
			// 2 and 16 neighbors, call the factory method with:
			//
			//      cv::createLBPHFaceRecognizer(2, 16);
			//
			// And if you want a threshold (e.g. 123.0) call it with its default values:
			//
			//      cv::createLBPHFaceRecognizer(1,8,8,8,123.0)
			//
			model = createLBPHFaceRecognizer();
			break;

		default:
			/* Create Fisherfaces model */
			// The following lines create an Fisherfaces model for
			// face recognition and train it with the images and
			// labels read from the given CSV file.
			// If you just want to keep 10 Fisherfaces, then call
			// the factory method like this:
			//
			//      cv::createFisherFaceRecognizer(10);
			//
			// However it is not useful to discard Fisherfaces! Please
			// always try to use _all_ available Fisherfaces for
			// classification.
			//
			// If you want to create a FaceRecognizer with a
			// confidence threshold (e.g. 123.0) and use _all_
			// Fisherfaces, then call it with:
			//
			//      cv::createFisherFaceRecognizer(0, 123.0);
			//
			model = createFisherFaceRecognizer();
			break;
	}

	std::cout << "Training model...\n";
	model->train(images, labels);
}

int predict_face_label_using_a_recognition_model(Ptr<FaceRecognizer> model, cv::Mat& input_face)
{	
	// The following line predicts the label of a given test image:
	std::cout << "Predicting...\n";
	int predicted_label = model->predict(input_face);
	
	//
	// To get the confidence of a prediction call the model with:
	//
	//      int predictedLabel = -1;
	//      double confidence = 0.0;
	//      model->predict(testSample, predictedLabel, confidence);
	//
	
	return predicted_label;

	/* Some notes */

	// Sometimes you'll need to get/set internal model data, which isn't exposed by the public cv::FaceRecognizer.
	// Since each cv::FaceRecognizer is derived from a cv::Algorithm, you can query the data.
	//
	// First we'll use it to set the threshold of the FaceRecognizer to 0.0 without retraining the model. This can be useful if you are evaluating the model:
	//
	/*model->set("threshold", 0.0);
	
	// Now the threshold of this model is set to 0.0. A prediction now returns -1, as it's impossible to have a distance below it
	predicted_label = model->predict(input_face);
	cout << "Predicted class = " << predicted_label << endl;*/

	/*// Show some informations about the model, as there's no cool Model data to display as in Eigenfaces/Fisherfaces.
	// Due to efficiency reasons the LBP images are not stored within the model:
	cout << "Model Information:" << endl;
	string model_info = format("\tLBPH(radius=%i, neighbors=%i, grid_x=%i, grid_y=%i, threshold=%.2f)",
		model->getInt("radius"),
		model->getInt("neighbors"),
		model->getInt("grid_x"),
		model->getInt("grid_y"),
		model->getDouble("threshold"));
	cout << model_info << endl;

	// We could get the histograms for example:
	vector<Mat> histograms = model->getMatVector("histograms");

	// But should I really visualize it? Probably the length is interesting:
	cout << "Size of the histograms: " << histograms[0].total() << endl;*/
}
