#include "stdafx.h"

#include <opencv2/legacy/legacy.hpp>
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/photo/photo.hpp"
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/nonfree/features2d.hpp"
#include "opencv2/calib3d/calib3d.hpp"

#include "SurfFeatures.h"
#include "RGBColour.h"

using namespace cv;
using namespace std;

SurfFeatures::SurfFeatures(void)
{
}

SurfFeatures::~SurfFeatures(void)
{
}

/*
*	SURF feature detection algorithms. Split up into function to find features and detect matches
*/
int SurfFeatures::findFeatures(Mat image1, Mat image2, int match_features)
{
	// Create the images 
	Mat grayscale1;
	Mat grayscale2;
	// Convert to grayscale values to check that the images have been read in correctly
	cvtColor( image1, grayscale1, CV_RGB2GRAY );
	cvtColor( image2, grayscale2, CV_RGB2GRAY );

	imshow("first image",image1);
	imshow("second image",image2);

	if( !grayscale1.data || !grayscale2.data )
	{ 
		std::cout<< " --(!) Error reading images " << std::endl; 
		return -1; 
	}

//-- Step 1: Detect the keypoints using SURF Detector
	int minHessian = 500;
	SurfFeatureDetector detector( minHessian );
	std::vector< KeyPoint > keypoints_object, keypoints_scene;
	//detect grayscale
	detector.detect( grayscale1, keypoints_object );
	detector.detect( grayscale2, keypoints_scene );
	//detect colour
	//detector.detect( image1, keypoints_object );
	//detector.detect( image2, keypoints_scene );

	 //-- Draw keypoints
	Mat img_keypoints_1; Mat img_keypoints_2;
	drawKeypoints( image1, keypoints_object, img_keypoints_1, Scalar::all(-1), DrawMatchesFlags::DEFAULT );
	drawKeypoints( image2, keypoints_scene, img_keypoints_2, Scalar::all(-1), DrawMatchesFlags::DEFAULT );

	//-- Show detected (drawn) keypoints
	imshow("Keypoints 1", img_keypoints_1 );
	imshow("Keypoints 2", img_keypoints_2 );

	if (match_features == 1)
	{
		//findMatches(image1,image2);
	}

	return 0;
 }

 int SurfFeatures::findMatches(Mat image1, Mat image2, Mat grayscale1, Mat grayscale2, std::vector< KeyPoint > keypoints_object, std::vector< KeyPoint > keypoints_scene)
 {
	 //-- Step 2: Calculate descriptors (feature vectors)
	SurfDescriptorExtractor extractor;

	Mat descriptors_object, descriptors_scene;
	extractor.compute( grayscale1, keypoints_object, descriptors_object );
	extractor.compute( grayscale2, keypoints_scene, descriptors_scene );

//-- Step 3: Matching descriptor vectors using FLANN matcher
	FlannBasedMatcher matcher;
	BruteForceMatcher<L1<uchar> > matcher2;
	std::vector< DMatch > matches;
	matcher.match( descriptors_object, descriptors_scene, matches );
	//matcher2.match( descriptors_object, descriptors_scene, matches );

	double max_dist = 0; double min_dist = 100;

//-- Quick calculation of max and min distances between keypoints
	for( int i = 0; i < descriptors_object.rows; i++ )
	{	
		double dist = matches[i].distance;
		if( dist < min_dist ) min_dist = dist;
		if( dist > max_dist ) max_dist = dist;
	}

	printf("-- Max dist : %f \n", max_dist );
	printf("-- Min dist : %f \n", min_dist );

//-- Use only "good" matches (i.e. whose distance is less than 3*min_dist )
	std::vector< DMatch > good_matches;
	for( int i = 0; i < descriptors_object.rows; i++ )
	{ 
		if( matches[i].distance < 3*min_dist )
		{ 
			good_matches.push_back( matches[i]);
		}
	}

	 //-- Draw only "good" matches
	Mat img_matches;
	drawMatches( image1, keypoints_object, image2, keypoints_scene,good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
	//imshow( "Good Matches", img_matches );

	std::vector< Point2f > obj;
	std::vector< Point2f > scene;

	for( int i = 0; i < good_matches.size(); i++ )
	{
 //-- Get the keypoints from the good matches
		obj.push_back( keypoints_object[ good_matches[i].queryIdx ].pt );
		scene.push_back( keypoints_scene[ good_matches[i].trainIdx ].pt );
	}

// Find the Homography Matrix
	Mat H = findHomography( obj, scene, CV_RANSAC, 4);
	//Mat H = findHomography( obj, scene, 0);
	//Mat H = findHomography( obj, scene, CV_LMEDS);
 // Use the Homography Matrix to warp the images
	//cv::Mat result;
	//warpPerspective(image1,result,H,cv::Size(image1.cols+image2.cols,image1.rows));
	//cv::Mat half(result,cv::Rect(0,0,image2.cols,image2.rows));
	//image2.copyTo(half);
	//imshow( "Result", result );

	//-- Get the corners from the image_1 ( the object to be "detected" )
	std::vector<Point2f> obj_corners(4);
	obj_corners[0] = cvPoint(0,0); obj_corners[1] = cvPoint( image1.cols, 0 );
	obj_corners[2] = cvPoint( image1.cols, image1.rows ); obj_corners[3] = cvPoint( 0, image1.rows );
	std::vector<Point2f> scene_corners(4);

	perspectiveTransform( obj_corners, scene_corners, H);

	//-- Draw lines between the corners (the mapped object in the scene - image_2 )
	line( img_matches, scene_corners[0] + Point2f( image1.cols, 0), scene_corners[1] + Point2f( image1.cols, 0), Scalar( 0, 0, 255), 4 );
	line( img_matches, scene_corners[1] + Point2f( image1.cols, 0), scene_corners[2] + Point2f( image1.cols, 0), Scalar( 0, 0, 255), 4 );
	line( img_matches, scene_corners[2] + Point2f( image1.cols, 0), scene_corners[3] + Point2f( image1.cols, 0), Scalar( 0, 0, 255), 4 );
	line( img_matches, scene_corners[3] + Point2f( image1.cols, 0), scene_corners[0] + Point2f( image1.cols, 0), Scalar( 0, 0, 255), 4 );

	//-- Show detected matches
	imshow( "Good Matches & Object detection", img_matches );
	CvMat imageToSave = img_matches;
	cvSaveImage("Matches.png", &imageToSave);

	waitKey(0);
	return 0;
 }

 