#include <cassert>
#include <cmath>
#include <iostream>
#include <opencv/cv.h>
#include <opencv/highgui.h>
#include <opencv/cxcore.h>
#include "Features.h"
#include <ctime>    // For time()
#include <cstdlib>  // For srand() and rand()
#include <stdio.h>


// This is the entry point for all panorama generation.  The output image will
// be allocated by your code and in particular should be allocated from a call
// to compositeImages.  This function will also depend on ransacHomography
// in order to compute a best homography for the pair of images.  You should
// use computeFeatures and matchFeatures when necessary.
/*IplImage * constructPanorama ( IplImage *     img1,
                               IplImage *     img2,
                               int            featureType,
                               int matchType
#ifdef                                        Q_WS_MAEMO_5
                               ,
                               Progressable * thread
#endif
                               )
{
  assert ( img1->depth == IPL_DEPTH_32F );
  assert ( img1->nChannels == 3 );
  assert ( img2->depth == IPL_DEPTH_32F );
  assert ( img2->nChannels == 3 );

  // @@@ TODO
  assert ( 0 ); // Remove this when ready
}*/
IplImage * constructPanorama ( IplImage *     img1,
                               IplImage *     img2,
                               int            featureType,
                               int matchType
#ifdef                                        Q_WS_MAEMO_5
                               ,
                               Progressable * thread
#endif
                               )
{

  assert ( img1->depth == IPL_DEPTH_32F );
  assert ( img1->nChannels == 3 );
  assert ( img2->depth == IPL_DEPTH_32F );
  assert ( img2->nChannels == 3 );

  FeatureSet f1;
  computeFeatures(img1, f1, featureType);
  FeatureSet f2;
  computeFeatures(img2, f2, featureType);
  float totalScore = 0.0f;
  vector<FeatureMatch> matches;
  //change the 1 to 2 for ratio match
  bool ismatched = matchFeatures ( f1, f2, matches,totalScore, 2);//featureType);
  cout<<"Matches are made? "<<ismatched<<endl;


  int numRounds = 10000;
  int inlierThreshold = 50; //not being used...

  vector<FeatureMatch> matches4;
  /*
  matches4.push_back(matches[0]);
  matches4.push_back(matches[5]);
  matches4.push_back(matches[10]);
  matches4.push_back(matches[2]);


  matches4.push_back(matches[0]);
  matches4.push_back(matches[15]);
  matches4.push_back(matches[9]);
  matches4.push_back(matches[17]);
  */

  //matches4.push_back(matches[23]);


  vector<int> fourpoints;
  cv::Mat h = ransacHomography(f1,f2,matches, fourpoints, numRounds,inlierThreshold);//(f1, f2, matches,fourpoints, numRounds, inlierThreshold);

  cout<<"ransac completed, back in constructPanorama"<<endl;
  cv::Mat matIm1, matIm2;
  IplImage im1 = *img1;
  IplImage im2 = *img2;

  //matIm1 = cv::cvarrToMat(img1);//Construct an Mat image "img" out of an IplImage
  cv::Mat(img1,false).convertTo(matIm1,CV_32F);
  //matIm2 = cv::cvarrToMat(img2);//Construct an Mat image "img" out of an IplImage
  cv::Mat(img2,false).convertTo(matIm2,CV_32F);

  cout<<"fourpoints size: "<<fourpoints.size()<<endl;
  fflush;
  cout<<"h based on 4 points in matches:"<<fourpoints[0]<<","<<fourpoints[1]<<","<<fourpoints[2]<<","<<fourpoints[3]<<endl;
  for(int i=0;i<fourpoints.size();i++){


      FeatureMatch m = matches[fourpoints[i]];

      cout<<"im1 point:"<<f1[m.id1-1].x<<","<<f1[m.id1-1].y<<endl;

      cv::circle(matIm1, cv::Point( f1[m.id1-1].x, f1[m.id1-1].y ), 10, cv::Scalar(0,255,0));
      cv::circle(matIm2, cv::Point( f2[m.id2-1].x, f2[m.id2-1].y ), 10, cv::Scalar(0,255,0));
  }

  //cv::imshow("img1 w/ 4points", matIm1);
  //cv::waitKey();
  //cv::imshow("img2 w/ 4points", matIm2);
  //cv::waitKey();




  cv::Mat result = compositeImages( img1, img2, h, f1, f2);



  float h_arr[9];
  h_arr[0]=h.at<float>(0,0);
  h_arr[1]=h.at<float>(0,1);
  h_arr[2]=h.at<float>(0,2);
  h_arr[3]=h.at<float>(1,0);
  h_arr[4]=h.at<float>(1,1);
  h_arr[5]=h.at<float>(1,2);
  h_arr[6]=h.at<float>(2,0);
  h_arr[7]=h.at<float>(2,1);
  h_arr[8]=h.at<float>(2,2);

  /*
  float x2, y2;
  for (int i=0; i<matches.size(); ++i ){
        FeatureMatch m = matches[i];
        printf("pair (%d,%d) -> %f\n", m.id1, m.id2, m.score);
        Feature pair1 = f1[m.id1-1];
        Feature pair2 = f2[m.id2-1];

        cout<<pair1.x<<","<<pair1.y<<endl;
        cout<<pair2.x<<","<<pair2.y<<endl;

        applyHomography(pair1.x, pair1.y, x2, y2, h_arr);
        cv::line(result, cv::Point(pair2.x, pair2.y), cv::Point(x2,y2), cv::Scalar(255,0,0));
  }
  */

  printf("Displaying image\n");
  //cv::imshow("final result", result);
  //cv::waitKey();
  //printf("Displayed!\n");

  IplImage* ni = cvCloneImage(new IplImage(result));//new IplImage();



  return ni;
}

// img1 and img2 are color images that you want to make into a panorama by
// applying the homography, h, to img2.  This function needs to determine the
// size of the output image and allocate the memory for it.
cv::Mat compositeImages ( IplImage *     img1,
                             IplImage *     img2,
                             cv::Mat        h,
                             const vector<Feature> & f1,
                             const vector<Feature> & f2
#ifdef Q_WS_MAEMO_5
                             ,
                             Progressable * thread
#endif
                             )
{
  assert ( img1->depth == IPL_DEPTH_32F );
  assert ( img1->nChannels == 3 );
  assert ( img2->depth == IPL_DEPTH_32F );
  assert ( img2->nChannels == 3 );

  // @@@ TODO
 // assert ( 0 ); // Remove this when ready
  // convert from IplImage to Mat

  cv::Mat matIm1, matIm2;
  IplImage im1 = *img1;
  IplImage im2 = *img2;

  //matIm1 = cv::cvarrToMat(img1);//Construct an Mat image "img" out of an IplImage
  cv::Mat(img1,false).convertTo(matIm1,CV_32F);
  //matIm2 = cv::cvarrToMat(img2);//Construct an Mat image "img" out of an IplImage
  cv::Mat(img2,false).convertTo(matIm2,CV_32F);

  int w1 = im1.width;
  int h1 = im1.height;
  int w2 = im2.width;
  int h2 = im2.height;

  cout<<"size 1: "<<w1<<","<<h1<<endl;
  cout<<"size 2: "<<w2<<","<<h2<<endl;


  cv::Point3f topleft = cv::Point3f(0.0, 0.0, 1);
  cv::Point3f topright = cv::Point3f(w1, 0.0, 1);
  cv::Point3f botleft = cv::Point3f(0.0, h1, 1);
  cv::Point3f botright = cv::Point3f(w1,h1,1);

  // warp 4 corners
 /* cv::Mat TL = (cv::Mat_<float>(3, 1) << topleft.x, topleft.y, topleft.z);
  //cv::Point3f p1 = h*topleft;
  cv::Mat P1 = h*TL;
  cv::Point3f p1 = cv::Point3f(P1.at<float>(0,0), P1.at<float>(1,0), P1.at<float>(2,0));
  p1.x /= p1.z; p1.y /= p1.z; // normalize to bring it to homogeneous coordinate
  cv::Mat TR = (cv::Mat_<float>(3, 1) << topright.x, topright.y, topright.z);
  cv::Mat P2 = h*TR;
  cv::Point3f p2 = cv::Point3f(P2.at<float>(0,0), P2.at<float>(1,0), P2.at<float>(2,0));
  p2.x /= p2.z; p2.y /= p2.z;
  cv::Mat BL = (cv::Mat_<float>(3, 1) << botleft.x, botleft.y, botleft.z);
  cv::Mat P3 = h*BL;
  cv::Point3f p3 = cv::Point3f(P3.at<float>(0,0), P3.at<float>(1,0), P3.at<float>(2,0));
  p3.x /= p3.z; p3.y /= p3.z;
  cv::Mat BR = (cv::Mat_<float>(3, 1) << botright.x, botright.y, botright.z);
  cv::Mat P4 = h*BR;
  cv::Point3f p4 = cv::Point3f(P4.at<float>(0,0), P4.at<float>(1,0), P4.at<float>(2,0));
  p4.x /= p4.z; p4.y /= p4.z; */

  float h_arr[9];
  h_arr[0]=h.at<float>(0,0);
  h_arr[1]=h.at<float>(0,1);
  h_arr[2]=h.at<float>(0,2);
  h_arr[3]=h.at<float>(1,0);
  h_arr[4]=h.at<float>(1,1);
  h_arr[5]=h.at<float>(1,2);
  h_arr[6]=h.at<float>(2,0);
  h_arr[7]=h.at<float>(2,1);
  h_arr[8]=h.at<float>(2,2);

  float x, y;
  applyHomography(0.0, 0.0, x, y, h_arr);
  cv::Point2f p1 = cv::Point2f(x,y);

  applyHomography(w1, 0.0, x, y, h_arr);
  cv::Point2f p2 = cv::Point2f(x,y);

  applyHomography(0.0, h1, x, y, h_arr);
  cv::Point2f p3 = cv::Point2f(x,y);

  applyHomography(w1, h1, x, y, h_arr);
  cv::Point2f p4 = cv::Point2f(x,y);

 //cout<<"The Point3f:"<<endl;
  //cout<<P1.at<float>(0,0)<<", "<<P1.at<float>(1,0)<<", "<<P1.at<float>(2,0)<<endl;


  cout<<"here"<<endl;
  // calculate size of result image
  double dx, dy; // the shifting amount for img2.
  int w = 0;
  int H = 0; // size of the result image
  if (fmin(p1.x,p3.x) < 0)
  {
      w = fmax(fmax(p2.x,p4.x), w2) + ( 0 - fmin(p1.x,p3.x));
      dx = -fmin(p1.x,p3.x);

  }
  else
  {
      w = fmax(fmax(p2.x,p4.x), w2);
      dx = 0;
  }
  cout << "P.x: " << p1.x << "//" << p2.x << "//" << p3.x << "//" << p4.x << endl;
  cout << "P.y: " << p1.y << "//" << p2.y << "//" << p3.y << "//" << p4.y << endl;

  if (fmin(p1.y,p2.y) < 0)
  {
      H = fmax(fmax(p3.y,p4.y), h2) + (0 - fmin(p1.y,p2.y));
      dy = -fmin(p1.y,p2.y);
  }
  else
  {
      H = fmax(fmax(p3.y,p4.y),h2);
      dy = 0;
  }

  cv::Size s = cv::Size(w,H);
  cout << "result size:" << s.width << "-" << s.height << endl;
  cv::Mat result;
  h.at<float>(0,2) += dx;
  h.at<float>(1,2) += dy;
  cv::warpPerspective(matIm1, result, h, cv::Size(w,H));
  //cv::imshow("warpimage", result);
  //cv::waitKey();
  vector<cv::Mat> planes;
  cv::split(result, planes);
  cout<<"here2"<<endl;
  vector<cv::Mat> im2planes;
  const cv::Mat tempIm2 = matIm2;
  cv::split(tempIm2, im2planes);

  // overlay im2 content to the result image. For now, we simply copying im2's pixels to the destination.
  for (int x = 0; x < w2; x++)
  {
      for (int y = 0; y < h2; y++)
      {
          if (( planes[0].at<float>(y+dy,x+dx) > 10.0 || planes[1].at<float>(y+dy,x+dx) > 10.0 ) || planes[2].at<float>(y+dy,x+dx) > 10.0)
          {
              planes[0].at<float>(y+dy,x+dx) = (0.5*planes[0].at<float>(y+dy,x+dx) + 0.5*im2planes[0].at<float>(y,x));
              planes[1].at<float>(y+dy,x+dx) = (0.5*planes[1].at<float>(y+dy,x+dx) + 0.5*im2planes[1].at<float>(y,x));
              planes[2].at<float>(y+dy,x+dx) = (0.5*planes[2].at<float>(y+dy,x+dx) + 0.5*im2planes[2].at<float>(y,x));
          }
          else
          {
              planes[0].at<float>(y+dy,x+dx) = im2planes[0].at<float>(y,x);
              planes[1].at<float>(y+dy,x+dx) = im2planes[1].at<float>(y,x);
              planes[2].at<float>(y+dy,x+dx) = im2planes[2].at<float>(y,x);
          }
      }
  }

  merge(planes,result);


  cout<<"here3"<<endl;
  // convert result to res, not done yet. Please fill in
  cout<<"result size "<<result.size().width<<" "<<result.size().height<<endl;
  cout<<result.at<float>(100,100)<<" "<<result.at<float>(100,300)<<endl;

  for ( FeatureSet::const_iterator i1 = f1.begin(); i1 != f1.end (); ++i1 ){
      cv::circle(matIm1, cv::Point(i1->x, i1->y), 2, cv::Scalar(255,0,255));
  }
  for ( FeatureSet::const_iterator i1 = f2.begin(); i1 != f2.end (); ++i1 ){
      cv::circle(matIm2, cv::Point(i1->x, i1->y), 2, cv::Scalar(255,0,255));
  }


  //cv::imshow("img1",matIm1);
  //cv::waitKey();
  //cv::imshow("img2", matIm2);
  //cv::waitKey();

  //cv::imshow("panorama", result);
  //cv::waitKey();

  return result;
}

// Compute features of an image.
bool computeFeatures ( IplImage *     image,
                       FeatureSet &   features,
                       int featureType
#ifdef \
                       Q_WS_MAEMO_5
                       ,
                       Progressable * thread
#endif
                       )
{
  assert ( image->depth == IPL_DEPTH_32F );
  assert ( image->nChannels == 3 );
  // Instead of calling dummyComputeFeatures, write your own
  // feature computation routines and call them here.
  switch ( featureType )
  {
  case 1:
    dummyComputeFeatures ( image, features
#ifdef Q_WS_MAEMO_5
                           , thread
#endif
                           );
    break;
  case 2:
    ComputeMOPSFeatures ( image, features
#ifdef Q_WS_MAEMO_5
                          , thread
#endif
                          );
    break;
  case 3:
    ComputeSURFFeatures ( image, features
#ifdef Q_WS_MAEMO_5
                          , thread
#endif
                          );
    break;
  default:
    return false;
  }

  // This is just to make sure the IDs are assigned in order, because
  // the ID gets used to index into the feature array.
  for ( size_t i = 0; i < features.size (); ++i )
  {
    features [i].id = i + 1;
  }

  return true;
}

void ComputeSURFFeatures ( IplImage * image, FeatureSet & features
#ifdef Q_WS_MAEMO_5
                           , Progressable * thread
#endif
                           )
{
  cv::SURF surf;

  std::vector<cv::KeyPoint> keypoints;
  std::vector<float> descriptors;
  IplImage * temp = cvCreateImage ( cvGetSize ( image ), IPL_DEPTH_8U, 3 );
  cvConvertScale ( image, temp, 255, 0 );
  IplImage * temp2 = cvCreateImage ( cvGetSize ( image ), IPL_DEPTH_8U, 1 );
  cvCvtColor ( temp, temp2, CV_RGB2GRAY );
  cvReleaseImage ( &temp );
  surf ( temp2, cv::Mat (), keypoints, descriptors );
  cvReleaseImage ( &temp );
  features.clear ();
  features.reserve ( keypoints.size () );
  for ( size_t i = 0; i < keypoints.size (); ++i )
  {
#ifdef Q_WS_MAEMO_5
    if ( thread && i % ( keypoints.size () / 100 ) == 0 )
    {
      thread->emitProgressUpdate ( 100 * i / keypoints.size () );
    }
#endif
    Feature f;
    f.x = keypoints[i].pt.x;
    f.y = keypoints[i].pt.y;
    f.type = 3;
    f.id = 0;
    f.angleRadians = keypoints[i].angle;
    f.data.reserve ( surf.descriptorSize () );
    for ( int j = 0; j < surf.descriptorSize (); ++j )
    {
      f.data.push_back ( descriptors [i * surf.descriptorSize () + j] );
    }
    features.push_back ( f );
  }
}

// Match one feature set with another.
bool matchFeatures ( const FeatureSet &     f1,
                     const FeatureSet &     f2,
                     vector<FeatureMatch> & matches,
                     float &                totalScore,
                     int matchType
#ifdef                                      Q_WS_MAEMO_5
                     ,
                     Progressable *         thread
#endif
                     )
{
  // We have given you the ssd matching function, you must write your own
  // feature matching function for the ratio test.

  std::cout << std::endl << "Matching features......." << std::endl;

  switch ( matchType )
  {
  case 1:
    ssdMatchFeatures ( f1, f2, matches, totalScore
#ifdef Q_WS_MAEMO_5
                       , thread
#endif
                       );
    return true;
  case 2:
    ratioMatchFeatures ( f1, f2, matches, totalScore
#ifdef Q_WS_MAEMO_5
                         , thread
#endif
                         );
    return true;
  default:
    return false;
  }
}

// Evaluate a match using a ground truth homography.  This computes the
// average SSD distance between the matched feature points and
// the actual transformed positions.
float evaluateMatch ( const vector<Feature> &           f1,
                      const vector<Feature> &           f2,
                      const vector<FeatureMatch> & matches,
                      float                        h [9] )
{
  float d = 0;
  int n = 0;

  float xNew;
  float yNew;

  size_t num_matches = matches.size ();

  for ( size_t i = 0; i < num_matches; ++i )
  {
    int id1 = matches [i].id1;
    int id2 = matches [i].id2;
    assert ( id1 );
    assert ( id2 );
    applyHomography ( f1 [id1 - 1].x, f1 [id1 - 1].y, xNew, yNew, h );
    d +=
      sqrt ( pow ( xNew - f2 [id2 - 1].x,
                   2 ) + pow ( yNew - f2 [id2 - 1].y, 2 ) );
    ++n;
  }

  return d / n;
}

// Compute silly example features.  This doesn't do anything
// meaningful.
void dummyComputeFeatures ( IplImage * image, FeatureSet & features
#ifdef Q_WS_MAEMO_5
                            , Progressable * thread
#endif
                            )
{
  assert ( image->depth == IPL_DEPTH_32F );
  assert ( image->nChannels == 3 );

  Feature f;
#ifdef Q_WS_MAEMO_5
  int total_size = image->height * image->width;
#endif
  int count = 0;
  for ( int y = 0; y < image->height; ++y )
  {
    for ( int x = 0; x < image->width; ++x )
    {
// @@@ Nothing to do here, but take note of how you can notify the UI on the
// phone to display the status on a progress bar.  Values range from 0 - 100%
#ifdef Q_WS_MAEMO_5
      if ( thread && count % ( total_size / 100 ) == 0 )
      {
        thread->emitProgressUpdate ( 100 * count / total_size );
      }
#endif
      ++count;
      float r = CV_IMAGE_ELEM ( image, float, y, 3 * x );
      float g = CV_IMAGE_ELEM ( image, float, y, 3 * x + 1 );
      float b = CV_IMAGE_ELEM ( image, float, y, 3 * x + 2 );
      if ( ( int )( 255 * ( r + g + b ) + 0.5 ) % 100 == 1 )
      {
        // If the pixel satisfies this meaningless criterion,
        // make it a feature.

        f.type = 1;
        f.id += 1;
        assert ( f.id );
        f.x = x;
        f.y = y;

        f.data.resize ( 1 );
        f.data [0] = r + g + b;

        features.push_back ( f );
      }
    }
  }
}

//Implementing this type of feature is OPTIONAL
void ComputeMOPSFeatures ( IplImage * image, FeatureSet & features
#ifdef Q_WS_MAEMO_5
                           , Progressable * thread
#endif
                           )
{
  assert ( image->depth == IPL_DEPTH_32F );
  assert ( image->nChannels == 3 );

  //Create grayscale image used for MOPS detection
  IplImage * grayImage =
    cvCreateImage ( cvGetSize ( image ), IPL_DEPTH_32F, 1 );
  cvCvtColor ( image, grayImage, CV_RGB2GRAY );

  //Create image to store MOPS values
  IplImage * harrisImage =
    cvCreateImage ( cvGetSize ( image ), IPL_DEPTH_32F, 1 );

  //Create image to store local maximum harris values as 255, other pixels 0
  IplImage * harrisMaxImage =
    cvCreateImage ( cvGetSize ( image ), IPL_DEPTH_32F, 1 );

  //OPTIONAL TO DO--------------------------------------------------------------------
  //function puts harris values at each pixel position in harrisImage and
  // orientations at each pixel position in orientationImage
  IplImage * orientationImage =
    cvCreateImage ( cvGetSize ( image ), IPL_DEPTH_32F, 1 );
  //@@@ Fill in computeMOPSValues if you want to use MOPS
  computeMOPSValues ( grayImage, harrisImage, orientationImage );
  cvSaveImage ( "harris.png", harrisImage );

  //OPTIONAL TO DO---------------------------------------------------------------------
  //Loop through harrisValues and find the best features in a local 3x3 maximum
  //compute the feature descriptor
  //@@@ Fill in computeLocalMaxima if you want to use MOPS
  computeLocalMaxima ( harrisImage, harrisMaxImage );
  cvSaveImage ( "harrisMax.png", harrisMaxImage );

  //OPTIONAL TO DO--------------------------------------------------------------------
  //Loop through feature points in harrisMaxImage and create feature descriptor
  //for each point above a threshold
  //@@@ Copy your project 2a code here if you want to use MOPS


  cvReleaseImage ( &orientationImage );
  cvReleaseImage ( &harrisImage );
  cvReleaseImage ( &grayImage );
  cvReleaseImage ( &harrisMaxImage );
}

//Implementing this type of feature is OPTIONAL
//OPTIONAL TO DO---------------------------------------------------------------------
//Loop through the image to compute the harris corner values as described in
//class
// srcImage:  grayscale of original image
// harrisImage:  populate the harris values per pixel in this image
// orientationImage: populate the orientation in this image
void computeMOPSValues ( IplImage * srcImage,
                         IplImage * harrisImage,
                         IplImage * orientationImage )
{
  assert ( srcImage->depth == IPL_DEPTH_32F );
  assert ( harrisImage->depth == IPL_DEPTH_32F );
  assert ( orientationImage->depth == IPL_DEPTH_32F );
  assert ( srcImage->nChannels == 1 );
  assert ( harrisImage->nChannels == 1 );
  assert ( orientationImage->nChannels == 1 );

  //@@@
}

//Implementing this type of feature is OPTIONAL
//Loop through the image to compute the harris corner values as described in
//class
// srcImage:  image with MOPS values
// destImage: Assign 1 to local maximum in 3x3 window, 0 otherwise
void computeLocalMaxima ( IplImage * srcImage, IplImage * destImage )
{
  assert ( srcImage->depth == IPL_DEPTH_32F );
  assert ( destImage->depth == IPL_DEPTH_32F );
  assert ( srcImage->nChannels == 1 );
  assert ( destImage->nChannels == 1 );

  //@@@
}


// Perform simple feature matching.  This just uses the SSD
// distance between two feature vectors, and matches a feature in the
// first image with the closest feature in the second image.  It can
// match multiple features in the first image to the same feature in
// the second image.
void ssdMatchFeatures ( const FeatureSet &     f1,
                        const FeatureSet &     f2,
                        vector<FeatureMatch> & matches,
                        float &                totalScore
#ifdef                                         Q_WS_MAEMO_5
                        ,
                        Progressable *         thread
#endif
                        )
{
  // @@@ You can leave is as-is since you will probably want to use the ratio
  // test anyway.  It is okay if you want to replace this with FLANN code such
  // as what we provide in the ratio test function.
  totalScore = 0;
#ifdef Q_WS_MAEMO_5
  int count = 0;
  int totalCount = f1.size ();
#endif
  for ( FeatureSet::const_iterator i1 = f1.begin (); i1 != f1.end (); ++i1 )
  {
#ifdef Q_WS_MAEMO_5
    if ( thread && count % ( totalCount / 100 ) == 0 )
    {
      thread->emitProgressUpdate ( 100 * count / totalCount );
    }
    ++count;
#endif
    size_t bestMatch = 1;
    float bestScore = 1e100;
    for ( FeatureSet::const_iterator i2 = f2.begin (); i2 != f2.end (); ++i2 )
    {
      float score = 0;
      assert ( i1->data.size () == i2->data.size () );
      for ( size_t i = 0; i < i1->data.size (); ++i )
      {
        float diff = i1->data[i] - i2->data[i];
        score += diff * diff;
      }
      if ( score < bestScore )
      {
        bestMatch = i2->id;
        bestScore = score;
      }
    }
    FeatureMatch m;
    m.id1 = i1->id;
    m.id2 = f2[bestMatch - 1].id;
    m.score = bestScore;
    totalScore += m.score;
    if (m.score < 0.05){
        matches.push_back ( m );
    }
  }
}

// This just uses the ratio of the SSD distance of the two best matches
// and matches a feature in the first image with the closest feature in the
// second image.
// It can match multiple features in the first image to the same feature in
// the second image.  (See class notes for more information)
void ratioMatchFeatures ( const FeatureSet &     f1,
                          const FeatureSet &     f2,
                          vector<FeatureMatch> & matches,
                          float &                totalScore
#ifdef                                           Q_WS_MAEMO_5
                          ,
                          Progressable *         thread
#endif
                          )
{
  // @@@ We are improving the speed of your matching code using the
  // Fast Library for Approximate Nearest Neighbors (FLANN)
  // We've done the work to populate the index.  Now you need to
  // look up how to query it and how to use the results.  Feel free
  // to use it for SSD match as well.
  cv::Mat features ( f2.size (), f2[0].data.size (), CV_32F );

  for ( size_t i = 0; i < f2.size (); ++i )
  {
    for ( size_t j = 0; j < f2[i].data.size (); ++j )
    {
      features.at<float>( ( int )i, ( int )j ) = f2[i].data[j];
    }
  }
  cv::flann::Index::Index flannIndex ( features, cv::flann::KDTreeIndexParams () ); //default KDTrees = 4
  // End FLANN index population
  // Is this needed?
  //flannIndex.buildIndex();
  totalScore = 0;
  int count = 0;
  int totalCount = f1.size ();

  for ( FeatureSet::const_iterator i1 = f1.begin (); i1 != f1.end (); ++i1 )
  {
#ifdef Q_WS_MAEMO_5
    if ( thread && count % ( totalCount / 100 ) == 0 )
    {
      thread->emitProgressUpdate ( 100 * count / totalCount );
    }

#endif

    ++count;
    //printf("Iteration %d of %d\n", count, totalCount);
    // @@@ TODO Find out how to query a cv::flann::Index::Index
    // @@@ TODO Calculate the ratio of the distances of the neighbors (edge cases?)
    // @@@ TODO If the ratio falls within a certain threshold (we determined the 0.7
    //          through trial and error) then we keep those features. Otherwise they
    //          are too ambiguous and are rejected.
    // query the flannIndex to get the nn closest to il. if ratio<0.7 add point pair and their ssd to matches.

    /*cv::Mat indices(f2.size(), 2, CV_32S);
    cv::Mat dists(f2.size(), 2, CV_32F);

    cv::Mat MatData = cv::Mat(i1->data.size(), 1, CV_32F);
    for(int i=0; i<i1->data.size(); i++)
    {
        MatData.at<float>(i,1) = i1->data[i];
    }

    flannIndex.knnSearch(MatData, indices, dists, 2, cv::flann::SearchParams());
    cout<<"IN RATIO!!!!!!!!!!"<<endl;
    cout.flush();
    FeatureMatch m;
    m.id1 = i1->id;
    m.id2 = f2[indices.at<int>(0,0)].id;
    m.score = dists.at<float>(0,0);

    //we only want to consider it a match if the corresponding matches are near
    //one another, corresponding to a "cluster"
    if ( dists.at<float>(i1->id,0) < dists.at<float>(i1->id,1)*0.7 ){
      matches.push_back ( m );
      totalScore += m.score;
    }*/

    vector<int> indices(f2.size());
    vector<float> dists(f2.size());
    flannIndex.knnSearch(i1->data, indices, dists, 2, cv::flann::SearchParams());



    FeatureMatch m;
    m.id1 = i1->id;

    //cout << indices[0] << endl;

    m.id2 = f2[ indices[0] ].id;

    m.score = dists[0];

    if (dists[0] < dists[1]*.7){//0.7){
        matches.push_back(m);
        totalScore += m.score;
    }


     //cout<<"end of for"<<endl;
  }
  cout<<"end of ratio"<<endl;
  cout.flush();
}


int imin(int a, int b){
    if (a < b){return a;}
    return b;
}




// RANSAC as described in lecture.  The result is a 3x3 homography matrix that
// computeHomography produced.  computeHomography expects a preallocated 3x3
// matrix of type CV_32F.  Just a tip, if you want to calculate an exact
// homography for 4 point correspondences, it's easy to pass computeHomography
// the full f1 and f2 vectors and just construct a temporary FeatureMatch
// vector with four elements from the full matches vector.

cv::Mat ransacHomography ( const std::vector<Feature> &      f1,
                           const std::vector<Feature> &      f2,
                           const std::vector<FeatureMatch> & matches,
                           std::vector<int> & chosen4,
                           int                               numRounds,
                           int inlierThreshold
#ifdef                                                       Q_WS_MAEMO_5
                           ,Progressable *                    thread
#endif
                           )
{
   // @@@ TODO


   //FeatureMatch is a struct, 3 fields:   size_t id1, id2;  float  score;
   //D(f1[id1], f2[id2]) = score.

   //Feature object has ID field...assume f1 and f2 in increasing order of ID?

   //inlierThreshold is the number of inliers needed for a 'fit'?


   //=================================
   // define threshold for inlier
   //=================================
   float inlier_thresh=100.0f;  // a point is an inlier if it's within this errror margin





   int matches_size=matches.size();
   int f1size=f1.size();
   int f2size=f2.size();

   cout<<"ransac: f1size="<<f1size<<", f2size="<<f2size<<endl;
   cout<<"matches size="<<matches_size<<endl;

   inlierThreshold=(int)(matches_size*.7);

cout<<"inlierThreshold="<<inlierThreshold<<endl;
   srand(time(NULL));
   int curr_iter=0;

   bool threshold_reached=false;



   float h_arr[9];

   float curr_score=0.0f;
   cv::Mat best_h(3,3,CV_32F);
   cv::Mat best_count_h(3,3,CV_32F);
   int curr_count=0;
   int best_count=0;
   vector<int> inlier_set;
   vector<int> best_inlier_set;
   float min_ssd=1e100;




   int rand_count;
   vector<int> four_ids;

   cv::Mat h( 3,3, CV_32F ); //this is the homography

   vector<FeatureMatch> four_matches;
   //TEST!!!!!!
   //numRounds = 1;
   while (curr_iter++<numRounds){
      //select 4 random samples from matches
      rand_count=0;
      four_ids.clear();

      //cout<<"gettting 4 points"<<endl;
      while(rand_count<4){
        int curr_ind=rand()%matches_size;
        //check if exists
        bool dup=false;
        for (int i=0;i<rand_count;i++){
          if (four_ids[i]==curr_ind){
            dup=true;
            }
        }//end for

        if (!dup){
            rand_count++;
            four_ids.push_back(curr_ind);
            }
      }

      //cout<<"4 points selected"<<endl;
      //by now we have selected 4 samples from matches
      //construct temp match vector
      four_matches.clear();
      four_matches.push_back(matches[four_ids[0]]);
      four_matches.push_back(matches[four_ids[1]]);
      four_matches.push_back(matches[four_ids[2]]);
      four_matches.push_back(matches[four_ids[3]]);

      //cout<<"chosen 4 ids:"<<four_ids[0]<<","<<four_ids[1]<<","<<four_ids[2]<<","<<four_ids[3]<<endl;

      //cout<<"calling computeHomography"<<endl;
      //Test Case

      computeHomography(f1, f2, four_matches, h); //h is updated here
      //cout<<"complete calling computeHomography"<<endl;
      //under h, see how many consensus we get. this requires inlier_thresh.
      //apply h to all f1 points in matches
      //see how many points are within thresh, count as inliers
      h_arr[0]=h.at<float>(0,0);
      h_arr[1]=h.at<float>(0,1);
      h_arr[2]=h.at<float>(0,2);
      h_arr[3]=h.at<float>(1,0);
      h_arr[4]=h.at<float>(1,1);
      h_arr[5]=h.at<float>(1,2);
      h_arr[6]=h.at<float>(2,0);
      h_arr[7]=h.at<float>(2,1);
      h_arr[8]=h.at<float>(2,2);


      if (h_arr[6]+h_arr[7]+h_arr[8]<1.0f)
          continue;


        //cout<<"checking inlier count"<<endl;


        curr_count=count_inliers(f1,f2,matches,h_arr,inlier_thresh,curr_score,inlier_set);
        cout<<"num inliers :"<<curr_count<<endl;

        if (curr_count>best_count&&(!threshold_reached)){
            best_count=curr_count;
            best_count_h=h;
            best_inlier_set=inlier_set;

            chosen4.clear();
            chosen4.push_back(four_ids[0]);
            chosen4.push_back(four_ids[1]);
            chosen4.push_back(four_ids[2]);
            chosen4.push_back(four_ids[3]);

        }

        if (curr_count>inlierThreshold&&min_ssd>curr_score){
            threshold_reached=true;
          //a good model, keep it
          //update best_h
            //best_count=curr_count;
            //cout<<"updating best count to"<<best_count<<endl;
            min_ssd=curr_score;
            best_h=h;
            best_inlier_set=inlier_set;

            chosen4.clear();
            chosen4.push_back(four_ids[0]);
            chosen4.push_back(four_ids[1]);
            chosen4.push_back(four_ids[2]);
            chosen4.push_back(four_ids[3]);
        }


    }

   //cout<<"best cout "<<best_count<<endl;
   cout<<"threshold_reached:"<<threshold_reached<<endl;
   cout<<"min ssd:"<<min_ssd<<endl;
   cout<<"best count:"<<best_count<<endl;
   cout<<"The matrix best_h from ransac:"<<endl;
   cout<<best_h.at<float>(0,0)<<", "<<best_h.at<float>(0,1)<<", "<<best_h.at<float>(0,2)<<endl;
   cout<<best_h.at<float>(1,0)<<", "<<best_h.at<float>(1,1)<<", "<<best_h.at<float>(1,2)<<endl;
   cout<<best_h.at<float>(2,0)<<", "<<best_h.at<float>(2,1)<<", "<<best_h.at<float>(2,2)<<endl;


/*
   //final pass to computeHomography with inlier set
   cout<<"final h calculation with "<<best_inlier_set.size()<<" inliers"<<endl;
   vector<FeatureMatch> finalmatches;
   for (int i=0;i<imin(4,best_inlier_set.size());i++){
       finalmatches.push_back(matches[best_inlier_set[i]]);
   }
   computeHomography(f1,f2,finalmatches,h);

   best_h=h;

   cout<<"final version of h from ransac:"<<endl;
   cout<<best_h.at<float>(0,0)<<", "<<best_h.at<float>(0,1)<<", "<<best_h.at<float>(0,2)<<endl;
   cout<<best_h.at<float>(1,0)<<", "<<best_h.at<float>(1,1)<<", "<<best_h.at<float>(1,2)<<endl;
   cout<<best_h.at<float>(2,0)<<", "<<best_h.at<float>(2,1)<<", "<<best_h.at<float>(2,2)<<endl;
   */


  cout<<"chosen4.size() "<<chosen4.size()<<endl;

if (threshold_reached)
   return best_h;
else
    return best_count_h;




   //assert ( 0 ); // Remove when ready
}


//added by geoff, for ransac
int count_inliers(const std::vector<Feature> &f1, //features im1
                  const std::vector<Feature> &f2, //features im2
                  const std::vector<FeatureMatch> & matches, //matches from f1 and f2
                  float h[9],           //homography
                  float inliers_thresh,
                  float &curr_score,
                  std::vector<int> & inlier_set){  //threshold for inliers (how close should they be)




    int count=0;
    inlier_set.clear();
    //for each point in matches, apply h
    //compare new_y/x with y/x in f2
    //count++ iff within threshold

    //how is h computed? warp f1 to f2 or the other way?


    //declare vars
    float f1y,f1x, newf1y,newf1x, f2y, f2x, diffy,diffx,temp;
    curr_score=0.0;

    //assume h*f1 goes to f2.
    for (int i=0;i<matches.size();i++){
      f1y=(float)(f1[matches[i].id1-1].y);
      f1x=(float)(f1[matches[i].id1-1].x);

      //map it to f2y/x
      applyHomography(f1x,f1y,newf1x,newf1y,h);

      f2y=(float)(f2[matches[i].id2-1].y);
      f2x=(float)(f2[matches[i].id2-1].x);

      /*cout<<"x,y in im1 = "<<f1x<<","<<f1y<<endl;
      cout<<"h maps them to "<<newf1x<<","<<newf1y<<endl;
      cout<<"x,y in im2 = "<<f2x<<","<<f2y<<endl;
       */

      //calc loss
      diffy=f2y-newf1y;
      diffx=f2x-newf1x;
      temp=diffy*diffy+diffx*diffx;
      if (temp<inliers_thresh){
          inlier_set.push_back(i);
        count++;
        curr_score+=temp;
        //printf("%f\t",temp);
        //cout<<"curr score"<<curr_score<<",temp "<<temp<<" thresh "<<inliers_thresh<<endl;
    }

    }
    return count;

}





/*
//This version assumes more than 4 matches in &matches
// The resulting matrix is a 3x3 homography matrix.  You may find cvSolve
// (CV_SVD option) useful for solving the least squares problem.
void computeHomography ( const std:: vector<Feature> &f1,
                        const std:: vector<Feature> &f2,
                        const std:: vector<FeatureMatch> &matches,
                        cv::Mat &   h )
{

   int isSolved1 = 0;
   int isSolved2 = 0;
   int numTries = 0;
   //Declare X
   cv::Mat X1(8, 1, CV_32FC1);
   cv::Mat X2(8, 1, CV_32FC1);
   float cur_score = 0.0f;
   float best_score = 1e100;
   cv::Mat best_h(3, 3, CV_32F);
   while(numTries*4<matches.size())
   {
       while (!(isSolved1 == 1 && isSolved2 ==1))
       {
           numTries++;
           //making a sublist of matching points
           std:: vector<FeatureMatch> matches4;
           for( int i = (numTries-1)*4; i<numTries*4; i++)
           {
               matches4.push_back(matches[i]);
           }
           float x0 = 0.0f;
           float x1 = 0.0f;
           float x2 = 0.0f;
           float x3 = 0.0f;
           float y0 = 0.0f;
           float y1 = 0.0f;
           float y2 = 0.0f;
           float y3 = 0.0f;
           //Points from f2
                x0 =  (float) f2[matches4[0].id2-1].x;//
                y0 =  (float) f2[matches4[0].id2-1].y;//

                x1 =  (float) f2[matches4[1].id2-1].x;//
                y1 =  (float) f2[matches4[1].id2-1].y;//

                x2 =  (float) f2[matches4[2].id2-1].x;//
                y2 =  (float) f2[matches4[2].id2-1].y;//

                x3 =  (float) f2[matches4[3].id2-1].x;//
                y3 =  (float) f2[matches4[3].id2-1].y;//

           //Make A1 (matrix 8x8)
           cv::Mat A1 = (cv::Mat_<float>(8, 8) <<
                        0, 0, 1, 0, 0, 0, 0, 0,
                        1, 0, 1, 0, 0, 0, -x1, 0,
                        1, 1, 1, 0, 0, 0, -x2, -x2,
                        0, 1, 1, 0, 0, 0, 0, -x3,
                        0, 0, 0, 0, 0, 1, 0, 0,
                        0, 0, 0, 1, 0, 1, -y1, 0,
                        0, 0, 0, 1, 1, 1, -y2, -y2,
                        0, 0, 0, 0, 1, 1, 0, -y3);

           //Make B (column 8x1)
           cv::Mat B1 = (cv::Mat_<float>(8, 1) <<
                        x0, x1, x2, x3, y0, y1, y2, y3);

           //A=source matrix, B= right hand part of linear system, solves A*X-B=0,
           //X (column 8x1)=>H(sq. matrix, with lower right corner = 1
           //use the example of Case 1 in ProjectiveMappings
           CvMat cvA1 = A1;
           CvMat cvB1 = B1;
           CvMat cvX1 = X1;
           const CvMat* ptrcvA1 = &cvA1;
           const CvMat* ptrcvB1 = &cvB1;
           CvMat* ptrcvX1 = &cvX1;
           isSolved1 = cvSolve(ptrcvA1, ptrcvB1, ptrcvX1, CV_SVD);
           X1 = cv::cvarrToMat(ptrcvX1).clone();


           //Points from f1
                x0 =  (float) f1[matches4[0].id1-1].x;//
                y0 =  (float) f1[matches4[0].id1-1].y;//39;//

                x1 =  (float) f1[matches4[1].id1-1].x;//123;//
                y1 =  (float) f1[matches4[1].id1-1].y;//104;//

                x2 =  (float) f1[matches4[2].id1-1].x;//123;//
                y2 =  (float) f1[matches4[2].id1-1].y;//377;//

                x3 =  (float) f1[matches4[3].id1-1].x;//39;//
                y3 =  (float) f1[matches4[3].id1-1].y;//416;//

           //Make A2 (matrix 8x8)
           cv::Mat A2 = (cv::Mat_<float>(8, 8) <<
                        0, 0, 1, 0, 0, 0, 0, 0,
                        1, 0, 1, 0, 0, 0, -x1, 0,
                        1, 1, 1, 0, 0, 0, -x2, -x2,
                        0, 1, 1, 0, 0, 0, 0, -x3,
                        0, 0, 0, 0, 0, 1, 0, 0,
                        0, 0, 0, 1, 0, 1, -y1, 0,
                        0, 0, 0, 1, 1, 1, -y2, -y2,
                        0, 0, 0, 0, 1, 1, 0, -y3);

           //Make B (column 8x1)
           cv::Mat B2 = (cv::Mat_<float>(8, 1) <<
                        x0, x1, x2, x3, y0, y1, y2, y3);

           //A=source matrix, B= right hand part of linear system, solves A*X-B=0,
           //X (column 8x1)=>H(sq. matrix, with lower right corner = 1
           //use the example of Case 2 in ProjectiveMappings
           CvMat cvA2 = A2;
           CvMat cvB2 = B2;
           CvMat cvX2 = X2;
           const CvMat* ptrcvA2 = &cvA2;
           const CvMat* ptrcvB2 = &cvB2;
           CvMat* ptrcvX2 = &cvX2;
           isSolved2 = cvSolve(ptrcvA2, ptrcvB2, ptrcvX2, CV_SVD);
           X2 = cv::cvarrToMat(ptrcvX2).clone();
            //cout<<"isSolved1: "<<isSolved1<<" isSolved2: "<<isSolved2<<endl;

       }
       //Convert X to H
       cv::Mat H1 = (cv::Mat_<float>(3, 3) <<
                       X1.at<float>(0,0), X1.at<float>(1,0), X1.at<float>(2,0),
                       X1.at<float>(3,0), X1.at<float>(4,0), X1.at<float>(5,0),
                       X1.at<float>(6,0), X1.at<float>(7,0), 1);
       cv::Mat H2 = (cv::Mat_<float>(3, 3) <<
                       X2.at<float>(0,0), X2.at<float>(1,0), X2.at<float>(2,0),
                       X2.at<float>(3,0), X2.at<float>(4,0), X2.at<float>(5,0),
                       X2.at<float>(6,0), X2.at<float>(7,0), 1);
       //H = H1*H2^(-1)
       //use the example of Case 3 in ProjectiveMappings
       CvMat cvH2 = H2;

       CvMat* ptrcvH2 = &cvH2;

       cvInvert(ptrcvH2,ptrcvH2,CV_SVD);
       //H2=H2^(-1)
       H2 = cv::cvarrToMat(ptrcvH2).clone();
       h = H1*H2;
       /*for(int i = 0; i<h.size().width; i++)
       {
           for(int j = 0; j<h.size().height; j++)
           {
               if(h.at<float>(i,j) < 1e-5 ){
                   cout<<"here "<<endl;
                   h.at<float>(i,j) = 0.0f;
               }
           }
       }*/
/*
       float h_arr[9];
       h_arr[0]=h.at<float>(0,0);
       h_arr[1]=h.at<float>(0,1);
       h_arr[2]=h.at<float>(0,2);
       h_arr[3]=h.at<float>(1,0);
       h_arr[4]=h.at<float>(1,1);
       h_arr[5]=h.at<float>(1,2);
       h_arr[6]=h.at<float>(2,0);
       h_arr[7]=h.at<float>(2,1);
       h_arr[8]=h.at<float>(2,2);

        cur_score = evaluateMatch(f1,f2,matches,h_arr);
        if(cur_score < best_score){

            best_score = cur_score;
            best_h = h;
            cout<<"best score: "<< best_score<<endl;
        }


       isSolved1 = 0;
       isSolved2 = 0;

   }

   h = best_h;
   cout<<"The matrix h:"<<endl;
   cout<<h.at<float>(0,0)<<", "<<h.at<float>(0,1)<<", "<<h.at<float>(0,2)<<endl;
   cout<<h.at<float>(1,0)<<", "<<h.at<float>(1,1)<<", "<<h.at<float>(1,2)<<endl;
   cout<<h.at<float>(2,0)<<", "<<h.at<float>(2,1)<<", "<<h.at<float>(2,2)<<endl;
  // @@@ TODO
 // assert ( 0 ); // Remove when ready
}
*/

 //this version assumes 4 points only
// The resulting matrix is a 3x3 homography matrix.  You may find cvSolve
// (CV_SVD option) useful for solving the least squares problem.
void computeHomography ( const std:: vector<Feature> &f1,
                         const std:: vector<Feature> &f2,
                         const std:: vector<FeatureMatch> &matches, //this should be 4 matches long
                         cv::Mat &   h )
{


    //Declare X
    cv::Mat X1(8, 1, CV_32FC1);
    cv::Mat X2(8, 1, CV_32FC1);


    //Points from f2
    float x0 = 0.0f;
    float x1 = 0.0f;
    float x2 = 0.0f;
    float x3 = 0.0f;
    float y0 = 0.0f;
    float y1 = 0.0f;
    float y2 = 0.0f;
    float y3 = 0.0f;

    x0 =  (float) f2[matches[0].id2-1].x;//
    y0 =  (float) f2[matches[0].id2-1].y;//

    x1 =  (float) f2[matches[1].id2-1].x;//
    y1 =  (float) f2[matches[1].id2-1].y;//

    x2 =  (float) f2[matches[2].id2-1].x;//
    y2 =  (float) f2[matches[2].id2-1].y;//

    x3 =  (float) f2[matches[3].id2-1].x;//
    y3 =  (float) f2[matches[3].id2-1].y;//


    //Make A1 (matrix 8x8)
    cv::Mat A1 = (cv::Mat_<float>(8, 8) <<
                 0, 0, 1, 0, 0, 0, 0, 0,
                 1, 0, 1, 0, 0, 0, -x1, 0,
                 1, 1, 1, 0, 0, 0, -x2, -x2,
                 0, 1, 1, 0, 0, 0, 0, -x3,
                 0, 0, 0, 0, 0, 1, 0, 0,
                 0, 0, 0, 1, 0, 1, -y1, 0,
                 0, 0, 0, 1, 1, 1, -y2, -y2,
                 0, 0, 0, 0, 1, 1, 0, -y3);

    //Make B (column 8x1)
    cv::Mat B1 = (cv::Mat_<float>(8, 1) <<
                 x0, x1, x2, x3, y0, y1, y2, y3);

    //A=source matrix, B= right hand part of linear system, solves A*X-B=0,
    //X (column 8x1)=>H(sq. matrix, with lower right corner = 1
    //use the example of Case 1 in ProjectiveMappings
    CvMat cvA1 = A1;
    CvMat cvB1 = B1;
    CvMat cvX1 = X1;
    const CvMat* ptrcvA1 = &cvA1;
    const CvMat* ptrcvB1 = &cvB1;
    CvMat* ptrcvX1 = &cvX1;
    cvSolve(ptrcvA1, ptrcvB1, ptrcvX1, CV_SVD);
    X1 = cv::cvarrToMat(ptrcvX1).clone();



    x0 =  (float) f1[matches[0].id1-1].x;//
    y0 =  (float) f1[matches[0].id1-1].y;//39;//

    x1 =  (float) f1[matches[1].id1-1].x;//123;//
    y1 =  (float) f1[matches[1].id1-1].y;//104;//

    x2 =  (float) f1[matches[2].id1-1].x;//123;//
    y2 =  (float) f1[matches[2].id1-1].y;//377;//

    x3 =  (float) f1[matches[3].id1-1].x;//39;//
    y3 =  (float) f1[matches[3].id1-1].y;//416;//


    //Make A2 (matrix 8x8)
    cv::Mat A2 = (cv::Mat_<float>(8, 8) <<
                 0, 0, 1, 0, 0, 0, 0, 0,
                 1, 0, 1, 0, 0, 0, -x1, 0,
                 1, 1, 1, 0, 0, 0, -x2, -x2,
                 0, 1, 1, 0, 0, 0, 0, -x3,
                 0, 0, 0, 0, 0, 1, 0, 0,
                 0, 0, 0, 1, 0, 1, -y1, 0,
                 0, 0, 0, 1, 1, 1, -y2, -y2,
                 0, 0, 0, 0, 1, 1, 0, -y3);

    //Make B (column 8x1)
    cv::Mat B2 = (cv::Mat_<float>(8, 1) <<
                 x0, x1, x2, x3, y0, y1, y2, y3);

    //A=source matrix, B= right hand part of linear system, solves A*X-B=0,
    //X (column 8x1)=>H(sq. matrix, with lower right corner = 1
    //use the example of Case 2 in ProjectiveMappings
    CvMat cvA2 = A2;
    CvMat cvB2 = B2;
    CvMat cvX2 = X2;
    const CvMat* ptrcvA2 = &cvA2;
    const CvMat* ptrcvB2 = &cvB2;
    CvMat* ptrcvX2 = &cvX2;
    cvSolve(ptrcvA2, ptrcvB2, ptrcvX2, CV_SVD);
    X2 = cv::cvarrToMat(ptrcvX2).clone();


    //Convert X to H
    cv::Mat H1 = (cv::Mat_<float>(3, 3) <<
                    X1.at<float>(0,0), X1.at<float>(1,0), X1.at<float>(2,0),
                    X1.at<float>(3,0), X1.at<float>(4,0), X1.at<float>(5,0),
                    X1.at<float>(6,0), X1.at<float>(7,0), 1);
    cv::Mat H2 = (cv::Mat_<float>(3, 3) <<
                    X2.at<float>(0,0), X2.at<float>(1,0), X2.at<float>(2,0),
                    X2.at<float>(3,0), X2.at<float>(4,0), X2.at<float>(5,0),
                    X2.at<float>(6,0), X2.at<float>(7,0), 1);
    //H = H2^(-1)*H1
    //use the example of Case 3 in ProjectiveMappings
    CvMat cvH2 = H2;
    CvMat* ptrcvH2 = &cvH2;
    cv::Mat H = (cv::Mat_<float>(3, 3) <<
                    0,0,0,
                    0,0,0,
                    0,0,0);
    CvMat cvH = H;
    CvMat* ptrcvH = &cvH;
/*
    cout<<"The matrix H2 before inversion:"<<endl;
    cout<<H2.at<float>(0,0)<<", "<<H2.at<float>(0,1)<<", "<<H2.at<float>(0,2)<<endl;
    cout<<H2.at<float>(1,0)<<", "<<H2.at<float>(1,1)<<", "<<H2.at<float>(1,2)<<endl;
    cout<<H2.at<float>(2,0)<<", "<<H2.at<float>(2,1)<<", "<<H2.at<float>(2,2)<<endl;
*/
    cvInvert(ptrcvH2,ptrcvH2,CV_SVD);

    H2 = cv::cvarrToMat(ptrcvH2).clone();
    h = H1*H2;
    /*
    cout<<"Multiplied H2^-1*H1"<<endl;
    cout<<"The matrix H1:"<<endl;
    cout<<H1.at<float>(0,0)<<", "<<H1.at<float>(0,1)<<", "<<H1.at<float>(0,2)<<endl;
    cout<<H1.at<float>(1,0)<<", "<<H1.at<float>(1,1)<<", "<<H1.at<float>(1,2)<<endl;
    cout<<H1.at<float>(2,0)<<", "<<H1.at<float>(2,1)<<", "<<H1.at<float>(2,2)<<endl;
    cout<<"The matrix H2:"<<endl;
    cout<<H2.at<float>(0,0)<<", "<<H2.at<float>(0,1)<<", "<<H2.at<float>(0,2)<<endl;
    cout<<H2.at<float>(1,0)<<", "<<H2.at<float>(1,1)<<", "<<H2.at<float>(1,2)<<endl;
    cout<<H2.at<float>(2,0)<<", "<<H2.at<float>(2,1)<<", "<<H2.at<float>(2,2)<<endl;
    cout<<"The matrix H= H2*H1:"<<endl;
    cout<<h.at<float>(0,0)<<", "<<h.at<float>(0,1)<<", "<<h.at<float>(0,2)<<endl;
    cout<<h.at<float>(1,0)<<", "<<h.at<float>(1,1)<<", "<<h.at<float>(1,2)<<endl;
    cout<<h.at<float>(2,0)<<", "<<h.at<float>(2,1)<<", "<<h.at<float>(2,2)<<endl;




    cout<<"x,y in Image 2: "<<f2[matches[0].id2-1].x<<", "<<f2[matches[0].id2-1].y<<endl;
    float xNew = 0.0f;
    float yNew = 0.0f;
    float h_arr[9];
    h_arr[0]=h.at<float>(0,0);
    h_arr[1]=h.at<float>(0,1);
    h_arr[2]=h.at<float>(0,2);
    h_arr[3]=h.at<float>(1,0);
    h_arr[4]=h.at<float>(1,1);
    h_arr[5]=h.at<float>(1,2);
    h_arr[6]=h.at<float>(2,0);
    h_arr[7]=h.at<float>(2,1);
    h_arr[8]=h.at<float>(2,2);
    applyHomography((float)f1[matches[0].id1-1].x, (float)f1[matches[0].id1-1].y, xNew, yNew, h_arr);
    cout<<"x,y in from homography: "<<xNew<<", "<<yNew<<endl;
    cout<<"x,y in Image 2: "<<f2[matches[1].id2-1].x<<", "<<f2[matches[1].id2-1].y<<endl;
    applyHomography((float)f1[matches[1].id1-1].x, (float)f1[matches[1].id1-1].y, xNew, yNew, h_arr);
    cout<<"x,y in from homography: "<<xNew<<", "<<yNew<<endl;
    cout<<"x,y in Image 2: "<<f2[matches[2].id2-1].x<<", "<<f2[matches[2].id2-1].y<<endl;
    applyHomography((float)f1[matches[2].id1-1].x, (float)f1[matches[2].id1-1].y, xNew, yNew, h_arr);
    cout<<"x,y in from homography: "<<xNew<<", "<<yNew<<endl;
    cout<<"x,y in Image 2: "<<f2[matches[3].id2-1].x<<", "<<f2[matches[3].id2-1].y<<endl;
    applyHomography((float)f1[matches[3].id1-1].x, (float)f1[matches[3].id1-1].y, xNew, yNew, h_arr);
    cout<<"x,y in from homography: "<<xNew<<", "<<yNew<<endl;
    */
   // @@@ TODO
  // assert ( 0 ); // Remove when ready

}



//If you find a need for a byte array from your floating point images,
//here's some code you may use.
void convertToByteImage ( IplImage * floatImage, IplImage * byteImage )
{
  assert ( floatImage->depth == IPL_DEPTH_32F );
  assert ( byteImage->depth == IPL_DEPTH_32F );
  cvConvertScale ( floatImage, byteImage, 255, 0 );
}

// Transform point by homography.
void applyHomography ( float   x,
                       float   y,
                       float & xNew,
                       float & yNew,
                       float   h [9] )
{
  float d = h[6] * x + h[7] * y + h[8];

  xNew = ( h[0] * x + h[1] * y + h[2] ) / d;
  yNew = ( h[3] * x + h[4] * y + h[5] ) / d;
}
