﻿#include <opencv/cv.h>
#include <opencv/cvaux.h>
#include <opencv/highgui.h>
#include <opencv/ml.h> // machine learning
#include <algorithm>
#include <iostream>
#include <vector>

using namespace cv;
int main()
{
	// TODO: RANDOM TREE 구조로 변경
	//cv::RandomizedTree randomTree;
	

	Mat learnImage = imread("./Images/imageindexing.0000.bmp" , CV_LOAD_IMAGE_GRAYSCALE);	

	cv::Ptr<cv::FeatureDetector> pFD = cv::FeatureDetector::create("GFTT");

	//GFTT
	pFD->set("qualityLevel", 0.1);
	pFD->set("minDistance", 10);
	pFD->set("useHarrisDetector", 1);

	vector<Mat> objpyr, imgpyr;
	vector<KeyPoint> objKeypoints, imgKeypoints;
	PlanarObjectDetector detector;
	
	pFD->detect(learnImage, objKeypoints);

	int blurKSize = 3;
	double sigma = 0;
	GaussianBlur(learnImage, learnImage, Size(blurKSize, blurKSize), sigma, sigma);	
	buildPyramid(learnImage, objpyr, 50);	
	
	string dataPath = format("trainingData.xml");
	FileStorage fs(dataPath, FileStorage::READ);

	if (fs.isOpened())	// 학습 데이터가 있으면
	{
		std::cout << "학습 데이터 로드" << std::endl;
		detector.read(fs.getFirstTopLevelNode());		
	}
	else
	{
		std::cout << "학습 시작" << std::endl;		
		detector.setVerbose(true);
		detector.train(objpyr, objKeypoints);
		
		if (fs.open(dataPath, FileStorage::WRITE))
		{
			detector.write(fs, "ferns_model");
			fs.release();
		}

		std::cout << "학습 데이터 저장 완료" << std::endl;
	}	

	cv::VideoCapture cap(0);
	cv::Mat frame;

	while (1)
	{
		cap >> frame;		
		cv::cvtColor(frame, frame, CV_BGR2GRAY);

		GaussianBlur(frame, frame, Size(blurKSize, blurKSize), sigma, sigma);
		buildPyramid(frame, imgpyr, 50);

		vector<Point2f> corner_H;
		Mat correspond(learnImage.rows + frame.rows, std::max(learnImage.cols, frame.cols), CV_8UC3);
		correspond = Scalar(0.);

		Mat part(correspond, Rect(0, 0, learnImage.cols, learnImage.rows));
		cvtColor(learnImage, part, CV_GRAY2BGR);

		part = Mat(correspond, Rect(0, learnImage.rows, frame.cols, frame.rows));
		cvtColor(frame, part, CV_GRAY2BGR);

		vector<int> pairs;
		Mat H;

		double t = (double)getTickCount();
		objKeypoints = detector.getModelPoints();	

		pFD->detect(frame, imgKeypoints);

		bool found = detector(imgpyr, imgKeypoints, H, corner_H, &pairs);
		
		//std::cout << "Pair Size " << pairs.size() << " and Keypoint size " << imgKeypoints.size() <<  std::endl;
		//float result = (float)pairs.size() / (float)imgKeypoints.size() * 100;
		//std::cout << "Result Size " << result << std::endl;

		t = (double)getTickCount() - t;
		printf("%gms\n", t * 1000 / getTickFrequency());

		if (found/* && result >= 80*/)
		{
			for (int i = 0; i < 4; i++)
			{
				Point r1 = corner_H[i % 4];
				Point r2 = corner_H[(i + 1) % 4];
				line(correspond, Point(r1.x, r1.y + learnImage.rows), Point(r2.x, r2.y + learnImage.rows), Scalar(0, 0, 255));
			}
		}

		for (int i = 0; i < (int)pairs.size(); i += 2)
		{
			line(correspond, objKeypoints[pairs[i]].pt, imgKeypoints[pairs[i + 1]].pt + Point2f(0, learnImage.rows),
				Scalar(0, 255, 0));
		}

		imshow("Object Correspondence", correspond);

		Mat objectColor;
		cvtColor(learnImage, objectColor, CV_GRAY2BGR);
		for (int i = 0; i < (int)objKeypoints.size(); i++)
		{
			circle(objectColor, objKeypoints[i].pt, 2, Scalar(0, 0, 255), -1);
			circle(objectColor, objKeypoints[i].pt, (1 << objKeypoints[i].octave) * 15, Scalar(0, 255, 0), 1);
		}

		Mat imageColor;
		cvtColor(frame, imageColor, CV_GRAY2BGR);
		for (int i = 0; i < (int)imgKeypoints.size(); i++)
		{
			circle(imageColor, imgKeypoints[i].pt, 2, Scalar(0, 0, 255), -1);
			circle(imageColor, imgKeypoints[i].pt, (1 << imgKeypoints[i].octave) * 15, Scalar(0, 255, 0), 1);
		}
				
		imshow("Object", objectColor);
		imshow("Image", imageColor);

		if (cvWaitKey(1) == 27)
		{
			break;
		}
	}

	waitKey(0);
	cap.release();

	return 0;
}	

// 일반 FernClassifier 학습 예
//// ----------------------------------------------학습부----------------------------------------------
//// <FernClassifier>
//cv::FernClassifier classifier;
//std::vector<vector<Point2f>> tempClass(keypointSet.size());
//for (int i = 0; i < keypointSet.size(); i++)
//{
//	for (int j = 0; j < keypointSet[i].size(); j++)
//	{
//		tempClass[i].push_back(keypointSet[i][j].pt);
//	}
//}
//// </FernClassifier>

//classifier.train(tempClass, imageSet);

//cv::FernDescriptorMatcher::Params matchParam;
//FileStorage fs;	
//string modelFilename = format("MatcherTrainingData.xml");
//if (fs.open(modelFilename, FileStorage::WRITE))
//{

//	fs << "nclasses" << matchParam.nclasses;
//	fs << "patchSize" << matchParam.patchSize;
//	fs << "signatureSize" << matchParam.signatureSize;
//	fs << "nstructs" << matchParam.nstructs;
//	fs << "structSize" << matchParam.structSize;
//	fs << "nviews" << matchParam.nviews;
//	fs << "compressionMethod" << matchParam.compressionMethod;

//	classifier.write(fs, "fernClassifier");
//	fs.release();		
//}
//// ----------------------------------------------학습부----------------------------------------------	 
