/*
 * CrossCheckingPanograph.cpp
 *
 *  Created on: 08/09/2011
 *      Author: kimdongback
 */

#include "CrossCheckingPanograph.h"

#include <iomanip>

using namespace std;
using namespace cv;

void getMinMaxVectors(vector< Vec2d > &corners, Vec2d & min_vec, Vec2d & max_vec)
{
    min_vec = Vec2d(__DBL_MAX__,__DBL_MAX__);
    max_vec = Vec2d(__DBL_MIN__,__DBL_MIN__);
    for(int i = 0; i < (int)corners.size(); i++)
    {
        Vec2d corner = corners.at(i);
        if(corner[0] < min_vec[0])
            min_vec[0] = corner[0];
        if(corner[1] < min_vec[1])
            min_vec[1] = corner[1];
        if(corner[0] > max_vec[0])
            max_vec[0] = corner[0];
        if(corner[1] > max_vec[1])
            max_vec[1] = corner[1];
    }
}

CrossCheckingPanograph::CrossCheckingPanograph(const vector<cv::Mat> & images)
    : m_images(images)
{
}

CrossCheckingPanograph::~CrossCheckingPanograph()
{
}

void CrossCheckingPanograph::analyze()
{
    vector<cv::Mat> grayscales;
    cv::Ptr<cv::FeatureDetector> featureDetector = FeatureDetector::create("SURF");
    cv::SurfDescriptorExtractor surf;

    // creating gray scale, key points and descriptor
    cout << "  Analyzing images..." << endl;

    for(size_t i=0; i < m_images.size(); i++)
    {

        cv::Mat temp;
        cv::Mat descriptor;
        vector<cv::KeyPoint> keypoints;

        // gray scale
        cvtColor(m_images[i], temp, CV_RGB2GRAY);
        m_grayscales.push_back(temp);

        // key points
        featureDetector->detect(temp, keypoints);
        m_keypoints.push_back(keypoints);

        cout << "    image " << setw(3) << right << (i + 1) << "... "
             << setw(6) << right << keypoints.size() << " features detected." << endl;

        // descriptor
        surf.compute(temp, keypoints, descriptor);
        m_descriptors.push_back(descriptor);
    }

    cv::BruteForceMatcher<cv::L2<float> > descriptorMatcher;
    size_t cap = 300;

    // panograph

    cout << "Panograph image..." << endl;

    cv::Mat currentImage;
    cv::Mat currentGrayscale;
    cv::Mat currentDescriptor;
    vector<cv::KeyPoint> currentKeypoints;

    for(size_t i=0; i < m_images.size(); i++)
    {
        if ( i == 0 )
        {
            m_images[0].copyTo(currentImage);
        }
        else
        {
            vector<DMatch> single_matches;
            vector<DMatch> single_reverse_matches;
            vector<DMatch> common_matches;
            vector< vector<DMatch> > matches;
            vector< vector<DMatch> > reverseMatches;

            descriptorMatcher.radiusMatch(currentDescriptor, m_descriptors[i], matches, 0.1);
            descriptorMatcher.radiusMatch(m_descriptors[i], currentDescriptor, reverseMatches, 0.1);

            cout << "  Matching " << i << " ..." << endl;
            cout << "    matches                = " << matches.size() << endl;
            cout << "    reverse matches        = " << reverseMatches.size() << endl;


            for(size_t k = 0; k < matches.size(); k++)
            {
                vector<DMatch> point_matches = matches.at(k);

                if(point_matches.size() > 0)
                {
                    single_matches.push_back(point_matches.at(0));
                }
            }

            for(size_t k = 0; k < reverseMatches.size(); k++)
            {
                vector<DMatch> point_matches = reverseMatches.at(k);

                if(point_matches.size() > 0)
                {
                    single_reverse_matches.push_back(point_matches.at(0));
                }
            }

            cout << "    single matches         = " << single_matches.size() << endl;
            cout << "    single reverse matches = " << single_reverse_matches.size() << endl;

            if ( single_matches.size() > cap )
            {
                std::nth_element(single_matches.begin(), single_matches.begin()+(cap-1), single_matches.end());
                single_matches.erase(single_matches.begin()+cap, single_matches.end());
            }

            if ( single_reverse_matches.size() > cap )
            {
                std::nth_element(single_reverse_matches.begin(), single_reverse_matches.begin()+(cap-1), single_reverse_matches.end());
                single_reverse_matches.erase(single_reverse_matches.begin()+cap, single_reverse_matches.end());
            }

            // getting common matches

            for(size_t k=0; k < single_matches.size(); k++)
            {
                for(size_t m=0; m < single_reverse_matches.size(); m++)
                {
                    if ( single_matches[k].queryIdx == single_reverse_matches[m].trainIdx
                      && single_matches[k].trainIdx == single_reverse_matches[m].queryIdx)
                    {
                        common_matches.push_back(single_matches[k]);
                    }
                }
            }

            if ( common_matches.size() > 4 )
            {

            cv::Mat imageResult;
            cv::drawMatches(currentImage, currentKeypoints,
                            m_images[i], m_keypoints[i],
                            common_matches, imageResult,
                            cv::Scalar(255, 255, 255));

            vector<int> params;
            params.push_back(CV_IMWRITE_JPEG_QUALITY);
            params.push_back(100);

            stringstream ss;

            ss << "images/alex_feature_detection_" << i << ".jpg";

            imwrite(ss.str(), imageResult,params);

            vector<Point2f> image_i, image_j;

            for(size_t k=0; k < common_matches.size(); k++)
            {
                image_i.push_back(currentKeypoints[common_matches[k].queryIdx].pt);
                image_j.push_back(m_keypoints[i][common_matches[k].trainIdx].pt);
            }


                vector<unsigned char> match_mask;

                cv::Mat homography = findHomography(image_i, image_j, CV_RANSAC, 3, match_mask);

                vector<Vec2d> corners;
                corners.push_back(Vec2d(0,0));
                corners.push_back(Vec2d(m_images[i].cols-1,0));
                corners.push_back(Vec2d(0,m_images[i].rows-1));
                corners.push_back(Vec2d(m_images[i].cols-1,m_images[i].rows-1));

                vector<Vec2d> trafo_corners(4);
                perspectiveTransform(corners, trafo_corners, homography.inv());

                Vec2d min_vec, max_vec;
                getMinMaxVectors(trafo_corners,  min_vec, max_vec);

                Vec2i shift,padding;
                if(min_vec[1] < 0)
                    shift[1] = ceil(fabs(min_vec[1]));
                else
                    shift[1] = 0;

                if(min_vec[0] < 0)
                    shift[0] = ceil(fabs(min_vec[0]));
                else
                    shift[0] = 0;

                if(max_vec[1] >= currentImage.rows)
                    padding[1] = ceil(max_vec[1]) -currentImage.rows -1;
                else
                    padding[1]= 0;

                if(max_vec[0] >= currentImage.cols)
                    padding[0] = ceil(max_vec[0]) -currentImage.cols -1;
                else
                    padding[0]= 0;

                Size out_size(shift[0]+padding[0]+currentImage.cols,shift[1]+padding[1]+currentImage.rows);

                Mat out_img = Mat::zeros(out_size,CV_8UC3);

                Mat first_part = out_img(Range(shift[1],shift[1]+currentImage.rows),Range(shift[0],shift[0]+currentImage.cols));
                currentImage.copyTo(first_part);

                Mat T = Mat::eye(Size(3,3),CV_64FC1);
                T.at<double>(0,2) = -shift[0];
                T.at<double>(1,2) = -shift[1];
                Mat shifted_H =  homography * T ; // care opencv multiplies the wrong way!!!

                warpPerspective(m_images[i], out_img, shifted_H.inv(), out_size, INTER_CUBIC,BORDER_TRANSPARENT);

                stringstream sp;

                sp << "images/alex_panograph_" << i << ".jpg";
                imwrite(sp.str(), out_img,params);

                out_img.copyTo(currentImage);
            }
        }

        cvtColor(currentImage, currentGrayscale, CV_RGB2GRAY);
        featureDetector->detect(currentGrayscale, currentKeypoints);
        surf.compute(currentGrayscale, currentKeypoints, currentDescriptor);

    }
}
