/**
 *  The Visual Topology Project.
 *
 *  Author:  David Qiu
 *  Email:   david@davidqiu.com
 *  Website: www.davidqiu.com
 *
 *  Copyright (C) 2016, David Qiu. All rights reserved.
 */

#include <iostream>
#include <opencv2/opencv.hpp>
#include <opencv2/xfeatures2d.hpp>

using namespace std;
using namespace cv;
using namespace xfeatures2d;


const int CONFIG_PROCESSING_QUALITY = 360;
const int CONFIG_FEATURE_DETECTOR = 2; // 0 = FAST, 1 = ORB, 2 = SIFT, 3 = SURF
const int CONFIG_DESCRIPTOR_MATCHER = 0; // 0 = FlannBased, 1 = BruteForce L1, 2 = BruteForce L2, 3 = BruteForce Hamming, 4 = BruteForce Hamming (2)
const int CONFIG_ENABLE_GOOD_MATCH = 1;


int g_currentTask = 0; // 0 = none, 1 = memorize, 2 = compare
Mat g_memorizedFrame;
bool g_memorizedFrameExists = false;


Ptr<FeatureDetector> createFeatureDetector( void );
Ptr<DescriptorMatcher> createDescriptorMatcher( void );
int drawFullImageHomographyTranslation( const Mat& img_ori, const Mat& img_dst, const Mat& img_matches, const Mat& H, Mat& img_trans );
int process( const cv::Mat& frameIn, cv::Mat& frameOut );
int performTaskMemorize( const cv::Mat& frame );
int performTaskCompare( const cv::Mat& frame );


int main( int argc, char** argv )
{
  VideoCapture cap;
  
  // Open the main camera to capture live video
  cap.open(0);
  if (!cap.isOpened())
  {
    cerr << "Could not initialize video capture. (cameraId: 0)" << endl;
  }
  
  // Capture live video frame
  Mat frame;
  Mat frameProcessed;
  while (1)
  {
    // Load next frame from video capture
    cap >> frame;
    if (frame.empty())
    {
      cerr << "The next video frame from video capture is empty." << endl;
      break;
    }
    
    // Process frame
    process(frame, frameProcessed);
    
    // Show the frame
    imshow("original", frame);
    imshow("processed", frameProcessed);
    
    // Response to keypress
    bool doExit = false;
    int keycode = waitKey(33);
    switch (keycode) {
      case 27: doExit = true; break; // ESC
      case 49: g_currentTask = 1; break; // Upper 1
      case 50: g_currentTask = 2; break; // Upper 2
    }
    if (doExit)
    {
      break;
    }
  }
  
  // Return success
  return 0;
}


Ptr<FeatureDetector> createFeatureDetector( void )
{
  Ptr<FeatureDetector> detector;
  
  // Select feature detector type
  switch (CONFIG_FEATURE_DETECTOR)
  {
    case 0: detector = FastFeatureDetector::create(); break;
    case 1: detector = ORB::create(); break;
    case 2: detector = SIFT::create(); break;
    case 3: detector = SURF::create(); break;
    default: cerr << "Invalid feature detector." << endl; break;
  }
  
  // Return the created feature detector
  return detector;
}


Ptr<DescriptorMatcher> createDescriptorMatcher( void )
{
  Ptr<DescriptorMatcher> matcher;

  // Select descriptor matcher type
  switch (CONFIG_DESCRIPTOR_MATCHER) {
    case 0: matcher = DescriptorMatcher::create("FlannBased"); break;
    case 1: matcher = DescriptorMatcher::create("BruteForce-L1"); break;
    case 2: matcher = DescriptorMatcher::create("BruteForce"); break;
    case 3: matcher = DescriptorMatcher::create("BruteForce-Hamming"); break;
    case 4: matcher = DescriptorMatcher::create("BruteForce-Hamming(2)"); break;
    default: cerr << "Invalid descriptor matcher." << endl; break;
  }
  
  // Return the created descriptor matcher
  return matcher;
}


int drawFullImageHomographyTranslation( const Mat& img_ori, const Mat& img_dst, const Mat& img_matches, const Mat& H, Mat& img_trans )
{
  // Initialize the homography translation image
  img_trans = img_matches;
  
  // Find the corners of the original image
  vector<Point2f> corners_ori(4);
  corners_ori[0] = cvPoint(0, 0);
  corners_ori[1] = cvPoint(img_ori.cols, 0);
  corners_ori[2] = cvPoint(img_ori.cols, img_ori.rows);
  corners_ori[3] = cvPoint(0, img_ori.rows);
  
  // Translate the corners to the destination image by homography matrix
  vector<Point2f> corners_dst(4);
  perspectiveTransform(corners_ori, corners_dst, H);
  
  // Draw box to highlight the homography translation result
  line( img_trans, corners_dst[0] + Point2f( img_ori.cols, 0), corners_dst[1] + Point2f( img_ori.cols, 0), Scalar( 0, 255, 0), 4 );
  line( img_trans, corners_dst[1] + Point2f( img_ori.cols, 0), corners_dst[2] + Point2f( img_ori.cols, 0), Scalar( 0, 255, 0), 4 );
  line( img_trans, corners_dst[2] + Point2f( img_ori.cols, 0), corners_dst[3] + Point2f( img_ori.cols, 0), Scalar( 0, 255, 0), 4 );
  line( img_trans, corners_dst[3] + Point2f( img_ori.cols, 0), corners_dst[0] + Point2f( img_ori.cols, 0), Scalar( 0, 255, 0), 4 );
  
  // Return success
  return 0;
}


int process( const cv::Mat& frameIn, cv::Mat& frameOut )
{
  // Resize the frame to desired processing quality
  Mat frameResized;
  int width = frameIn.cols;
  int height = frameIn.rows;
  if (width > height)
  {
    height = height * CONFIG_PROCESSING_QUALITY / width;
    width = CONFIG_PROCESSING_QUALITY;
  }
  else
  {
    width = width * CONFIG_PROCESSING_QUALITY / height;
    height = CONFIG_PROCESSING_QUALITY;
  }
  resize(frameIn, frameResized, Size(width, height));
  
  // Detect featured keypoints and compute descriptors
  Mat frameKeypoints;
  Ptr<FeatureDetector> detector = createFeatureDetector();
  vector<KeyPoint> keypoints;
  Mat descriptors;
  detector->detectAndCompute(frameResized, Mat(), keypoints, descriptors);
  drawKeypoints(frameResized, keypoints, frameKeypoints);
  
  // Check if there is any task to perform
  if (g_currentTask)
  {
    // Select task to perform
    switch (g_currentTask) {
      case 1: performTaskMemorize(frameResized); break;
      case 2: performTaskCompare(frameResized); break;
    }
    cout << "perfrom task: " << g_currentTask << endl;
    
    // Clear the task status
    g_currentTask = 0;
  }
  
  // Output the processed frame
  frameOut = frameKeypoints;
  
  // Return success
  return 0;
}


int performTaskMemorize( const cv::Mat& frame )
{
  // Memorize the current frame
  g_memorizedFrame = frame;
  g_memorizedFrameExists = true;
  
  // Return success
  return 0;
}


int performTaskCompare( const cv::Mat& frame )
{
  // Check if memorized frame exists
  if (!g_memorizedFrameExists)
  {
    g_memorizedFrame = frame;
    g_memorizedFrameExists = true;
  }
  
  // Detect featured keypoints and compute descriptors
  Ptr<FeatureDetector> detector = createFeatureDetector();
  vector<KeyPoint> keypoints_ori, keypoints_cmp;
  Mat descriptors_ori, descriptors_cmp;
  detector->detectAndCompute(g_memorizedFrame, Mat(), keypoints_ori, descriptors_ori);
  detector->detectAndCompute(frame, Mat(), keypoints_cmp, descriptors_cmp);
  
  // Match keypoints by descriptors
  Mat frameMatch;
  Ptr<DescriptorMatcher> matcher = createDescriptorMatcher();
  vector<DMatch> matches;
  matcher->match(descriptors_ori, descriptors_cmp, matches);
  drawMatches(g_memorizedFrame, keypoints_ori, frame, keypoints_cmp, matches, frameMatch);
  //imshow("matches", frameMatch);
  
  // Find good matches
  vector<DMatch> matches_good = matches;
  if (CONFIG_ENABLE_GOOD_MATCH && matches.size() > 0)
  {
    float max_dist = matches[0].distance;
    float min_dist = matches[0].distance;
    for (int i=0; i<matches.size(); ++i)
    {
      if (matches[i].distance > max_dist) max_dist = matches[i].distance;
      if (matches[i].distance < min_dist) min_dist = matches[i].distance;
    }
    
    matches_good.clear();
    for (int i=0; i<matches.size(); ++i)
    {
      if (matches[i].distance < 3 * min_dist) matches_good.push_back(matches[i]);
    }
  }
  
  // Find homography between the two frames
  Mat frameHomography;
  vector<Point2f> keypointPositions_ori;
  vector<Point2f> keypointPositions_cmp;
  for (int i=0; i<matches_good.size(); ++i)
  {
    keypointPositions_ori.push_back(keypoints_ori[matches_good[i].queryIdx].pt);
    keypointPositions_cmp.push_back(keypoints_cmp[matches_good[i].trainIdx].pt);
  }
  Mat H = findHomography(keypointPositions_ori, keypointPositions_cmp, CV_RANSAC);
  drawFullImageHomographyTranslation(g_memorizedFrame, frame, frameMatch, H, frameHomography);
  imshow("homography", frameHomography);
  
  // Return success
  return 0;
}


