//ITMD536 Software Testing and Maintenance.
//Final project <group assignment>
//Author: Wanglei ID:A20479012
#include <iostream>
#include <string>
#include <vector>
#include <algorithm>
#include <map>
#include "Face.hpp"

//for face recognition.
extern ncnn::Net mNet;

static void ClipBox(cv::Rect &rect, cv::Size imgsz) {
	cv::Rect k;
	k.x = std::min(std::max(0, rect.x), imgsz.width - 1);
	k.y = std::min(std::max(0, rect.y), imgsz.height - 1);
	k.width = std::min(std::max(0, rect.x + rect.width), imgsz.width) - k.x;
	k.height = std::min(std::max(0, rect.y + rect.height), imgsz.height) - k.y;
	rect = k;
	return;
}

static int AlignedFace(const cv::Mat& image, std::vector<cv::Point> pts, cv::Rect rect, cv::Mat& out){
	int margin = 44;
	cv::Rect cropedRect;
	std::vector<cv::Point> anchors;
	anchors.push_back(cv::Point2f(38.2946, 51.6963));
	anchors.push_back(cv::Point2f(73.5318, 51.5014));
	anchors.push_back(cv::Point2f(56.0252, 71.7366));
	anchors.push_back(cv::Point2f(41.5493, 92.3655));
	anchors.push_back(cv::Point2f(70.7299, 92.2041));

	cv::Mat imgClone = image.clone();
	if (pts.size() != anchors.size())
		return -1;
	
	cv::Mat imgc=image.clone();
	cv::Mat warp_mat = cv::estimateRigidTransform(pts, anchors, false);
	if (!warp_mat.empty())
		cv::warpAffine(imgc, out, warp_mat, cv::Size(112, 112));
	else{
		int x0,y0,x1,y1;
		x0 = std::max(rect.x-margin/2, 0);
		x0 = std::min(image.cols, x0);
		y0 = std::max(rect.y-margin/2, 0);
		y0 = std::min(image.rows, y0);
		x1 = std::max(rect.x + rect.width,0);
		x1 = std::min(image.cols, x1);
		y1 = std::max(rect.y + rect.height,0);
		y1 = std::min(image.rows, y1);
		cropedRect.x = x0;
		cropedRect.y = y0;
		cropedRect.width = x1 - x0;
		cropedRect.height = y1 - y0;

		cv::Mat cropImg = imgClone(cropedRect);
		cv::resize(cropImg, cropImg, cv::Size(112, 112), 0, 0, cv::INTER_CUBIC);
		out = cropImg;
	}

	return 0;
}

cv::Mat rAlignFace(cv::Mat &image, FaceBox &faceBox){
	cv::Mat alignedFace;

	float width = faceBox.x1 - faceBox.x0;
	float height = faceBox.y1 - faceBox.y0;
	cv::Rect rect(faceBox.x0, faceBox.y0, width, height);
	ClipBox(rect, image.size());
	cv::Point2f pt1(faceBox.landmark.x[0], faceBox.landmark.y[0]);
	cv::Point2f pt2(faceBox.landmark.x[1], faceBox.landmark.y[1]);
	cv::Point2f pt3(faceBox.landmark.x[2], faceBox.landmark.y[2]);
	cv::Point2f pt4(faceBox.landmark.x[3], faceBox.landmark.y[3]);
	cv::Point2f pt5(faceBox.landmark.x[4], faceBox.landmark.y[4]);
	std::vector<cv::Point> pts_one{pt1, pt2, pt3, pt4, pt5};
	AlignedFace(image, pts_one, rect, alignedFace);
	return alignedFace;
}

static int recognition(std::vector<cv::Mat> &alignedFaces,FaceRecType *fbox) {
	//int cycI;
	//int img_h = FACE_ALIGNED_H;
	//int img_w = FACE_ALIGNED_W;
	//float *data = NULL;
	//float score;
	
	const float mean_vals[3] = {127.5f, 127.5f, 127.5f};
	const float norm_vals[3] = {1.0/127.5,1.0/127.5,1.0/127.5};

	ncnn::Mat out;
	ncnn::Mat in = ncnn::Mat::from_pixels_resize(alignedFaces[0].data, ncnn::Mat::PIXEL_BGR2RGB, 
		alignedFaces[0].cols, alignedFaces[0].rows, FACE_ALIGNED_W, FACE_ALIGNED_H);
	in.substract_mean_normalize(mean_vals, norm_vals);

	//create the ncnn extractor.
	ncnn::Extractor ex = mNet.create_extractor();

	//input to ncnn engine.
	ex.input("data", in);
	//printf("mfn in w=%d h=%d c=%d dims=%d\n", in.w, in.h, in.c, in.dims);
	//ncnn engine extract ...
	ex.extract("fc1_fc1_scale",out);
	//printf("mfn out w=%d h=%d c=%d dims=%d\n", out.w, out.h, out.c, out.dims);
	//copy the result feature value to fbox for return.
	memcpy((float *)fbox[0].features, (float *)out.data, out.w*sizeof(float));
#if 0//for debug.
	printf("face feature:\n");
	for(cycI=0; cycI<FACED_FEATURE_LEN; cycI++)
		printf("%f ", fbox->features[cycI]); //printf("%f ", *((float *)out.data+cycI));
	printf("\n\n");
#endif
	return faceNormal;	
}

//origin: db or memory record.
//chose: new gen feature.
//score: score of output.
int Face::faceCompare(float *origin, float *chose, float *score){
    int len=FACED_FEATURE_LEN;
	cv::Mat m1(len, 1, CV_32FC1, origin), m2(len, 1, CV_32FC1,chose);
	*score = m1.dot(m2) / cv::norm(m1, CV_L2) / cv::norm(m2, CV_L2);
	return faceNormal;
}

int Face::faceRecognize(cv::Mat& frame, FaceRecType *fdBox){

	int ret, cycI;
	cv::Mat process_mat = frame;
	std::vector<cv::Mat> mats;
	//std::vector<std::vector<float> > features;
	FaceBox faceBox;
	//static int index=0;
	//char name[128];
	//memset(name, 0, sizeof name);
	//sprintf(name, "./pngs/align%d.png", index++);
	
	//input face rect.
	faceBox.x0 = fdBox->box.x0;
	faceBox.y0 = fdBox->box.y0;
	faceBox.x1 = fdBox->box.x1;
	faceBox.y1 = fdBox->box.y1;
	for (cycI=0; cycI<5; cycI++) 
	{//input face landmark.
		faceBox.landmark.x[cycI] = fdBox->box.landmark.x[cycI];
		faceBox.landmark.y[cycI] = fdBox->box.landmark.y[cycI];
	}

	//face align.
	process_mat = rAlignFace(frame, faceBox);
	//cv::cvtColor(process_mat, process_mat, cv::COLOR_RGB2BGR);
	cv::imwrite("./images/align.png", process_mat);
	mats.push_back(process_mat);
	ret = recognition(mats, fdBox); //DML_Recognition(mats,features);
	mats.clear();

	if (ret != faceNormal)
		return faceRecognitionError;
	return faceNormal;
}
