#include "Analyser.h"
#include "ImageContainer.h"
#include "ImageAnalyzer.h"

#include <sys/types.h>
#include <dirent.h>
#include <errno.h>
#include <qfile.h>

#include "Geometry/v3d_metricbundle.h"
#include "Math/v3d_linear.h"
#include "Math/v3d_optimization.h"
#include <limits>

#include "Geometry/v3d_distortion.h"
#include "Geometry/v3d_cameramatrix.h"

using namespace std;
using namespace cv;

#define MIN_MATCHES 20
#if defined(WIN32) || defined(_WIN32)
# define FILE_SEP "\\"
#else
# define FILE_SEP "/"
#endif

#define PILLARS 0
#define WEAK_INIT 1

Analyser::Analyser(const QList<QString> & filelist, const QList<int> & initList)
{
    m_files = filelist;
    m_initial_files = initList;
    m_calls_ctr = 0;
    m_total_number_of_consistently_cross_checked_matches = 0;
    m_total_number_of_consistent_RANSAC_matches = 0;
    m_num_inliers = 0;
    m_num_soft_inliers = 0;
    m_num_outliers = 0;
    m_sum_track_size_inliers = 0;
    m_sum_track_size_soft_inliers = 0;
    m_sum_track_size_outliers = 0;
}

void Analyser::evaluateMatchingAccuracy()
{
    ImageContainer ic;
    QString dataset("templeRing");

    int number_of_images = 4;
    std::string input_path("../../datasets/"+dataset.toStdString());//templeRingNotRot");//templeRing
    ic.loadImages(input_path,".png");

    std::vector<cv::Mat*> images = ic.getAllImages();

    QString filename("../../evaluation/SIFT_" + dataset+ "_"+QString::number(number_of_images) +".txt");
    QFile file(filename);
    QTextStream out( &file );

    out << "Evaluation of speed and accuracy!\n" << endl;

    if (!file.open(QIODevice::WriteOnly | QIODevice::Text))
    {
        cout << filename.toStdString() << "  couldn't be opened. -> ABORTING!" << endl;
        return;
    }

    out << "number of test images: " << number_of_images << endl;
    out << endl;
    for(size_t i = 0; i < number_of_images;i++)
    {
        cv::Mat* img_ptr = images.at(i);
        if(img_ptr != NULL)
        {
            m_images.append(new AnalyzedImage(img_ptr));
        }
    }
    cout << flush;
    out << "detection time: " << ImageAnalyzer::getInstance().getTotalDetectionTime() << endl;
    out << "number of features: " << ImageAnalyzer::getInstance().getTotalNumFeatures() << endl;
    out << "descriptor extraction time: " << ImageAnalyzer::getInstance().getTotelDescTime() << endl;
    out << endl;


    extractTracks();
    out << "number of consistently cross checked matches: " << m_total_number_of_consistently_cross_checked_matches << endl;
    out << "number of consistent RANSAC matches: " << m_total_number_of_consistent_RANSAC_matches << endl;
    out << "number of tracks: " << m_tracks.size() << endl;
    out << endl;
    int num_consistent_tracks = 0;
    for(int i = 0; i < m_tracks.size() ; i++)
    {
        if(!m_tracks[i]->isInconsistent())
            num_consistent_tracks++;
    }
    out << "number of consistent tracks: " << num_consistent_tracks << endl;

    out << endl;

    loadParameters(QString(input_path.c_str()));
    estimateAll3DPoints();
    out << "number of true inliers: " <<   m_num_inliers << endl;
    out << "inlier track size sum:"  << m_sum_track_size_inliers << endl;
    out << "average inlier track size: " << ((double)m_sum_track_size_inliers)/((double)m_num_inliers) << endl;

    out << endl;

    out << "number of soft inliers: " <<   m_num_soft_inliers << endl;
    out << "soft inlier track size sum:"  << m_sum_track_size_soft_inliers << endl;
    out << "average soft inlier track size: " << ((double)m_sum_track_size_soft_inliers)/((double)m_num_soft_inliers) << endl;

    out << endl;
    out << "number of outliers: " <<   m_num_outliers << endl;
    out << "outlier track size sum:"  << m_sum_track_size_outliers << endl;
    out << "average outlier track size: " << ((double)m_sum_track_size_outliers)/((double)m_num_outliers) << endl;
    file.close();
}

void Analyser::testNotreDame()
{

    // obatining list of images
    QFile file("../../datasets/NotreDame/list.txt");

    if (!file.open(QIODevice::ReadOnly | QIODevice::Text))
    {
        cout << "NotreDame/list.txt couldn't be opened." << endl;
    }

    QTextStream in (&file);
    QString line = in.readLine();

    m_files.clear();

    while(!(line.isNull()))
    {
        m_files.append("../../datasets/NotreDame/"+line);
        line = in.readLine();
    }

    file.close();

    QFile dataFile("../../datasets/NotreDame/notredame.out");

    if (!dataFile.open(QIODevice::ReadOnly | QIODevice::Text))
    {
        cout << "NotreDame/notredame.out couldn't be opened." << endl;
    }

    QTextStream dataIn (&dataFile);
    QString data;

    data = dataIn.readLine(); // consume the comment

    data = dataIn.readLine(); // numbers

    QStringList list;

    list = data.split(' ');

    int numCamera = list.at(0).toInt();
    int numPoint = list.at(1).toInt();

    // collecting cameras
    QString cameraData[5];

    for(int i=0; i < numCamera; i++)
    {
        cameraData[0] = dataIn.readLine();
        cameraData[1] = dataIn.readLine();
        cameraData[2] = dataIn.readLine();
        cameraData[3] = dataIn.readLine();
        cameraData[4] = dataIn.readLine();

        // K param

        QStringList fList = cameraData[0].split(' ');
        double f = fList.at(0).toDouble();
        double k1 = fList.at(1).toDouble();
        double k2 = fList.at(2).toDouble();
        double F[9] = {0, 0, 0, 0, 0, 0, 0, 0, 1};
        F[0] = F[4] = f;

        // R param

        QStringList r1List = cameraData[1].split(' ');
        QStringList r2List = cameraData[2].split(' ');
        QStringList r3List = cameraData[3].split(' ');
        double R[9];

        R[0] = r1List.at(0).toDouble();
        R[1] = r1List.at(1).toDouble();
        R[2] = r1List.at(2).toDouble();
        R[3] = r2List.at(0).toDouble();
        R[4] = r2List.at(1).toDouble();
        R[5] = r2List.at(2).toDouble();
        R[6] = r3List.at(0).toDouble();
        R[7] = r3List.at(1).toDouble();
        R[8] = r3List.at(2).toDouble();

        // T param

        QStringList tList = cameraData[4].split(' ');
        double T[3];

        T[0] = tList.at(0).toDouble();
        T[1] = tList.at(1).toDouble();
        T[2] = tList.at(2).toDouble();

        cv::Mat kMat = cv::Mat(3, 3, CV_64FC1, F);
        cv::Mat rMat = cv::Mat(3, 3, CV_64FC1, R);
        cv::Mat tMat = cv::Mat(3, 1, CV_64FC1, T);

        Parameters param(kMat, rMat, tMat,k1,k2);

        m_test_parameters.append(param);
        m_test_counter.append(0);
    }

    QString pointData[3];

    for(int i=0; i < numPoint; i++)
    {
        pointData[0] = dataIn.readLine();
        pointData[1] = dataIn.readLine();
        pointData[2] = dataIn.readLine();

        Track * pTrack = new Track();

        // Position
        double p[3];
        QStringList pList = pointData[0].split(' ');

        p[0] = pList.at(0).toDouble();
        p[1] = pList.at(1).toDouble();
        p[2] = pList.at(2).toDouble();

        cv::Mat p3d = cv::Mat(3, 1, CV_64FC1, p);
        pTrack->set3DPoint(p3d);

        // Color
        int c[3];
        QStringList cList = pointData[1].split(' ');

        c[0] = cList.at(0).toInt();
        c[1] = cList.at(1).toInt();
        c[2] = cList.at(2).toInt();

        // View list
        QStringList vList = pointData[2].split(' ');
        int camera;
        int key;
        float x;
        float y;
        int length = vList.at(0).toInt();
        int index = 1;

        for(int i=0; i < length; i++)
        {
            camera = vList.at(index).toInt();
            index++;
            key = vList.at(index).toInt();
            index++;
            x = vList.at(index).toFloat();
            index++;
            y = vList.at(index).toFloat();
            index++;

            TrackDetail td;

            m_test_counter[camera]++;
            td.cameraIdx = camera;
            td.keypointIdx = key;
            td.x = x;
            td.y = y;

            pTrack->track_details.append(td);
        }
        m_test_tracks.append(pTrack);
    }

    dataFile.close();
    unsigned int max= 0;
    int max_ind = -1;
    for(int i = 0 ; i < m_test_counter.size() ;i++)
    {
        if(m_test_counter[i] > max)
        {
            max = m_test_counter[i];
            max_ind = i;
        }
        m_test_counter[i] = 0;
    }
    if(max_ind == -1)
    {
        cerr << "There was an ERROR!!!" << endl;
    }

    for (int i  = 0; i < m_test_tracks.size(); i++)
    {
        Track * track = m_test_tracks[i];
        if(track->containsTrackDetails(max_ind))
        {
            for(int j = 0; j < track->track_details.size();j++)
            {
                int cam_ind = track->track_details[j].cameraIdx;
                if(cam_ind != max_ind)
                    m_test_counter[cam_ind]++;
            }
        }
    }

    unsigned int max2= 0;
    int max_ind2 = -1;
    for(int i = 0 ; i < m_test_counter.size() ;i++)
    {
        if(m_test_counter[i] > max2)
        {
            max2 = m_test_counter[i];
            max_ind2 = i;
        }
    }
    //max_ind2 = 81;

    QList<unsigned int> joined_tracks;
    for (int i  = 0; i < m_test_tracks.size(); i++)
    {
        Track * track = m_test_tracks[i];
        if(track->containsTrackDetails(max_ind) && track->containsTrackDetails(max_ind2))
        {
            joined_tracks.append(i);
        }
    }

    m_notre_dame_file_selection.clear();
    m_notre_dame_file_selection.append(max_ind);
    m_notre_dame_file_selection.append(max_ind2);
    int num_pix = 8;
    //int indices[15] = {21,22,75,79,81,144,260,306,414,517,534,557,629 ,690,711};
    //int indices[4] = {21,81,517,711};
    int indices[8] = {21,22,75,81,260,306,414,517};
    for(int i = 0; i < num_pix ; i++)
    {

        m_notre_dame_file_selection.append(indices[i]);
    }

    loadNotreDameImages();
    extractTracks();

    estimate3DPointsNormalized(0,1);
    estimateObjectCenter(0,1);
    rejectOutliers();
    bundleAdjust();
    int next_img_id = getMostRelevantImageID();
    while(next_img_id != -1)//while()
    {

        addImageToModel(next_img_id);
        bundleAdjust(1);
        addNewTracksToModel(next_img_id);
        rejectOutliers();
        next_img_id = getMostRelevantImageID();
    }
    cout << "nothing relevant left!" << endl;
    printPointCloud();
    produceCameras();


}

void Analyser::loadNotreDameImages()
{
    for(unsigned int i = 0; i < m_notre_dame_file_selection.size(); i++)
    {
        QString filename = m_files[m_notre_dame_file_selection[i]];
        filename.replace(".jpg",".rd.jpg");
        Mat img = imread(filename.toStdString());
        m_images.append(new AnalyzedImage(&img));
        if(i < 2)
        {
            Parameters params =  m_test_parameters[m_notre_dame_file_selection[i]];
            Mat K = params.getK();
            double ppx = ((double)img.cols)/2.0;
            double ppy = ((double)img.rows)/2.0;
            K.at<double>(0,2) = ppx;
            K.at<double>(1,2) = ppy;
            m_images[m_images.size()-1]->setPose(K,params.getR(),params.getT());
        }
    }
}

void Analyser::analyse()
{
    // TODO: [AK] Loads up image files as QImage.
    // TODO: [AK] Converts QImage instances into OpenCV images

    // stuff that should be changed
    //ImageContainer ic;
    std::string input_path("../../datasets/templeRingNotRot");//templeRing
    //ic.loadImages(input_path,".JPG");

    //std::vector<cv::Mat*> images = ic.getAllImages();

    std::vector<cv::Mat *> images;

    for(int i=0; i < m_files.size(); i++)
    {
        cv::Mat img = imread(m_files.at(i).toStdString());
        m_images.append(new AnalyzedImage(&img));
    }

    //for(size_t i = 0; i < images.size();i++)
    //{
    //    cv::Mat* img_ptr = images.at(i);
    //    if(img_ptr != NULL)
    //    {
    //        m_images.append(new AnalyzedImage(img_ptr));
    //    }
    //}

    // TODO: [CM] Produces point cloud and cameras
    cout.precision(numeric_limits<double>::digits10+1);

    extractTracks();

    int mode = WEAK_INIT;//WEAK_INIT;

    if(mode == PILLARS)
        loadParameters(QString(input_path.c_str()));
    init3DModel(mode);
    bundleAdjust();

    int next_img_id = getMostRelevantImageID();
    while(next_img_id != -1)//while()
    {

        addImageToModel(next_img_id);
        bundleAdjust();
        addNewTracksToModel(next_img_id);
        rejectOutliers();
        next_img_id = getMostRelevantImageID();
    }
    cout << "nothing relevant left!" << endl;
    printPointCloud();
    produceCameras();

    //init3DModel();
    //test3DProjection();

}

void Analyser::printPointCloud()
{
    cout << "pc=[";
    for(int i = 0; i < m_point_cloud.getPointCount(); i++)
    {
        Point3D pt = m_point_cloud.getPoint(i);
        cout << pt.getX() << ", " << pt.getY() << ", " << pt.getZ() << ";" << endl;
    }
    cout << "];" << endl;
}

void Analyser::addNewTracksToModel(int img_id)
{
    QList<unsigned int > img_ids = getInitialisedImageIds();
    for(int i = 0; i < img_ids.size();i++)
    {
        if(img_ids[i] != img_id)
            estimate3DPointsNormalized(img_ids[i],img_id);
    }
}

void Analyser::produceCameras()
{
    m_cameras.clear();
    QList<unsigned int > img_ids = getInitialisedImageIds();
    QList<unsigned int >::iterator it;
    cout << "T=[" << endl;
    for(it = img_ids.begin();it != img_ids.end();++it)
    {
        AnalyzedImage * img = m_images[*it];
        Mat R = img->getR();
        Mat K = img->getK();
        Mat T = img->getT();
        Mat Tvis = img->getModelT();//-((K*R).inv()*T);
        print(Tvis.t()); cout << "; ";
        Mat RT = Mat::zeros(3,4,CV_64FC1);
        Mat roi = RT(Range(0,3),Range(0,3));
        R.copyTo(roi);
        roi = RT(Range(0,3),Range(3,4));
        T.copyTo(roi);
        Mat cameraMatrix,rotMatrix,transVect,rotMatrixX,rotMatrixY,rotMatrixZ,eulerAngles;
        decomposeProjectionMatrix(RT,  cameraMatrix,  rotMatrix,  transVect,  rotMatrixX,  rotMatrixY,  rotMatrixZ,  eulerAngles);
        T = img->getModelT();
        Camera cam;
        Vector3D trans(Tvis.at<double>(0),Tvis.at<double>(1),Tvis.at<double>(2));
        cam.setCameraPosition(trans);
        Vector3D angles(eulerAngles.at<double>(0),eulerAngles.at<double>(1),eulerAngles.at<double>(2));
        cam.setCameraRotationVector(angles);
        cam.setFilename(m_files.at(*it));

        m_cameras.append(cam);
    }
    cout << "];"<< endl;

    /*    //should be
    cout << "Ttrue=[" << endl;
    for(int i = 0; i < img_ids.size() ; i++ )
    {

        Mat R = m_parameters[img_ids[i]]->getR();
        Mat K = m_parameters[img_ids[i]]->getK();
        Mat T = m_parameters[img_ids[i]]->getT();
        Mat Tvis = -((K*R).inv()*T);
        print(Tvis.t()); cout << "; ";
    }
    cout << "];"<< endl;*/
}



void Analyser::addImageToModel(int img_id)
{
    int closest_id = getClosestImageID(img_id);
    AnalyzedImage* closest_img = m_images[closest_id];
    AnalyzedImage* img = m_images[img_id];
    img->setPose(closest_img->getK(),closest_img->getR(),closest_img->getT());
    // Parameters * params = m_parameters[img_id];
    //img->setPose(params->getK(),params->getR(),params->getT());
}

int Analyser::getClosestImageID(int img_id)
{
    AnalyzedImage* img = m_images[img_id];
    QList<unsigned int> track_ids = img->kp2trackID.values();
    QList<unsigned int> img_ids = getInitialisedImageIds();
    QList<unsigned int> rating;
    for(int i = 0; i< img_ids.size();i++)
    {
        rating.push_back(0);
    }
    for(int i = 0; i< img_ids.size();i++)
    {
        for(int j = 0 ; j < track_ids.size();j++)
        {
            if( m_tracks[track_ids[j]]->isVisible(img_ids[i]))
                rating[i]++;
        }
    }

    unsigned int max_val = 0;
    int max_ind = -1;
    for(int i = 0; i < rating.size();i++)
    {
        if(rating[i] > max_val)
        {
            max_val = rating[i];
            max_ind = img_ids[i];
        }
    }
    if(max_ind == -1)
    {
        cerr << "getClosestImageID: Obviously wrong call!!! " << endl;
    }
    return max_ind;
}

/*void Analyser::addImageToModel(int img_id)
{
    Mat X,Y,W,A;
    getCorrespondences(img_id,X,Y,W,A);

    Mat A_inv = A.inv(DECOMP_SVD);
    Mat Px = (A_inv*X).t();
    Mat Py = (A_inv*Y).t();
    Mat Pw = (A_inv*W).t();
    Mat P = Mat::zeros(3,4,CV_64FC1);
    Mat roi = P(Range(0,1),Range(0,4));
    Px.copyTo(roi);
    roi = P(Range(1,2),Range(0,4));
    Py.copyTo(roi);
    roi = P(Range(2,3),Range(0,4));
    Pw.copyTo(roi);


    Parameters * params = m_parameters[img_id];
    Mat K = params->getK();
    Mat R = params->getR();
    Mat T = params->getT();
    Mat RT = Mat::zeros(3,4,CV_64FC1);
    roi = RT(Range(0,3),Range(0,3));
    R.copyTo(roi);
    roi = RT(Range(0,3),Range(3,4));
    T.copyTo(roi);
    Mat P_true = K*RT;
    // P_true = P_true / P_true.at<double>(2,3);
    cout <<  "true P : " << endl;
    print(P_true);
    cout << "new P :" << endl;
    print(P);
    Mat cameraMatrix,rotMatrix,transVect;
    decomposeProjectionMatrix(P,  cameraMatrix,  rotMatrix,  transVect);
    cout <<  "true K : " << endl;
    print(K);
    cout << "new K :" << endl;
    print(cameraMatrix);
    cout <<  "true R : " << endl;
    print(R);
    cout << "new R :" << endl;
    print(rotMatrix);
    cout <<  "true T : " << endl;
    print(T);
    cout << "new T :" << endl;
    print(transVect);


}*/

void Analyser::getCorrespondences(int img_id,cv::Mat & X,cv::Mat & Y,cv::Mat & W,cv::Mat & A)
{

    vector<Point2f> img_pts;
    vector<Point3d> pts_3D;
    for(int i= 0; i < m_point_cloud.getPointCount();i++)
    {
        Point3D pt = m_point_cloud.getPoint(i);

        Track * track = m_tracks[pt.getTrackId()];
        if(track->isVisible(img_id))
        {
            unsigned int kp_id = track->getKeyPointID(img_id);
            Point2f img_pt = m_images[img_id]->keypoints[kp_id].pt;
            img_pts.push_back(img_pt);
            Point3d pt_3D(pt.getX(),pt.getY(),pt.getZ());
            pts_3D.push_back(pt_3D);
        }
    }
    int num_points = img_pts.size();
    X = Mat::zeros(num_points,1,CV_64FC1);
    Y = Mat::zeros(num_points,1,CV_64FC1);
    W = Mat::ones(num_points,1,CV_64FC1);
    A = Mat::ones(num_points,4,CV_64FC1);
    for(int i = 0 ; i < num_points; i++)
    {
        X.at<double>(i) = img_pts[i].x;
        Y.at<double>(i) = img_pts[i].y;
        A.at<double>(i,0) = pts_3D[i].x;
        A.at<double>(i,1) = pts_3D[i].y;
        A.at<double>(i,2) = pts_3D[i].z;
    }
}



int Analyser::getMostRelevantImageID()
{

    m_relevance.clear();
    for(int i = 0 ; i < m_images.size();i++)
    {
        m_relevance.append(0);
    }

    for(int i = 0 ; i < m_point_cloud.getPointCount(); i++)
    {
        Point3D pt = m_point_cloud.getPoint(i);
        Track * track =  m_tracks[pt.getTrackId()];
        if(!track->isInconsistent())
        {
            QList<unsigned int> img_ids;
            track->getImageIDs(img_ids);
            for(int j = 0; j < img_ids.size();j++)
            {

                if(!m_images[img_ids[j]]->isPoseSet())
                    m_relevance[img_ids[j]]++;
            }
        }
    }

    int max_val = 0;
    int max_ind = -1;
    for(int i = 0 ; i < m_images.size();i++)
    {
        if(m_relevance[i] > max_val)
        {
            max_val = m_relevance[i];
            max_ind = i;
        }
    }

    if(max_val < 20)
    {
        cout << "No good images left!"  << endl;
        cout << "highest score was: "<< max_val << endl;
        return -1;
    }
    return max_ind;

}

bool Analyser::loadParameters(QString folder_path)
{

    QString file_name(getParameterFileName(folder_path.toStdString(),"par.txt").c_str() );
    if(file_name.size() == 0)
    {
        cerr << "Cannot find parameter file!" << endl;
        return false;
    }
    QFile f( file_name );

    if( !f.exists() )
    {
        // It does not exist
        std::cerr << "The file does not exist." << std::endl;

        return false;
    }

    // It exists, open it
    if( !f.open(  QIODevice::ReadOnly ) )
    {
        // It could not open
        std::cerr << "Failed to open." << std::endl;

        return false;
    }

    if(f.atEnd())
    {
        std::cerr << "File empty." << std::endl;
        return false;
    }
    QString line = f.readLine();

    while (!f.atEnd())
    {
        QString line = f.readLine();
        //char nl = 32;
        QStringList params_chars = line.split(' ');
        int num_elems = 22;
        if(params_chars.size() != num_elems)
        {
            //cerr << "File corrupt! size should be " << num_elems << " but is " << params_chars.size()<< endl;
            break;
        }
        double k_params[9];
        for(int i = 0; i < 9; i++ )
        {
            k_params[i] = params_chars[i+1].toDouble();
        }
        double r_params[9];
        for(int i = 0; i < 9; i++ )
        {
            r_params[i] = params_chars[i+10].toDouble();
        }
        double t_params[3];
        for(int i = 0; i < 3; i++ )
        {

            t_params[i] = params_chars[i+19].toDouble();

        }
        Mat K = Mat(3,3, CV_64F, k_params );
        Mat R = Mat(3,3, CV_64F, r_params );
        Mat T = Mat(3,1, CV_64F, t_params );



        m_parameters.append(new Parameters(K,R,T));
    }

    f.close();
    return true;
}

string Analyser::getParameterFileName(string dir, std::string file_ending)
{
    DIR *dp;
    struct dirent *dirp;
    if((dp  = opendir(dir.c_str())) == NULL) {
        cout << "Error(" << errno << ") opening " << dir << endl;
        return string();
    }

    while ((dirp = readdir(dp)) != NULL) {
        string file_name(dirp->d_name);

        if(endingEquals(file_name,file_ending))
        {

            closedir(dp);
            return string(dir + FILE_SEP + file_name);
        }
    }
    closedir(dp);
    return string();
}

bool Analyser::endingEquals(string query_name, string ending)
{

    if(ending.size() > query_name.length())
        return false;
    string query_ending = query_name.substr(query_name.size() - ending.size(), ending.size());

    int num_not_matches = ending.compare(query_ending);
    if(num_not_matches == 0)
        return true;
    else
        return false;
}


QList<unsigned int>  Analyser::getInitialisedImageIds()
{
    QList<unsigned int> list;
    for (unsigned int i = 0 ; i < m_images.size(); i++)
    {
        if(m_images[i]->isPoseSet())
            list.append(i);
    }
    return list;
}

cv::Mat makeMat(V3D::Matrix3x3d m_v3d)
{
    Mat mat = Mat::zeros(3,3,CV_64FC1);
    for(int i= 0 ; i < 3;i++)
        for (int j = 0; j < 3 ; j++)
        {
        mat.at<double>(i,j) =  m_v3d[i][j];
    }
    return mat;
}

cv::Mat makeMat(V3D::Vector3d vec)
{
    Mat mat = Mat::zeros(3,1,CV_64FC1);
    mat.at<double>(0) =  vec[0];
    mat.at<double>(1) =  vec[1];
    mat.at<double>(2) =  vec[2];
    return mat;
}

V3D::Matrix3x3d makeMatrix3x3d(cv::Mat mat)
{
    if(mat.cols != 3 || mat.rows != 3)
    {
        cerr << "ERROR: makeMatrix3x3d: WRONG DIMENSIONS!" << endl;
        return V3D::Matrix3x3d();
    }
    V3D::Matrix3x3d m_v3d;
    for(int i= 0 ; i < 3;i++)
        for (int j = 0; j < 3 ; j++)
        {
        m_v3d[i][j] = mat.at<double>(i,j);
    }


    return m_v3d;
}

V3D::Vector3d makeVector3d(Mat mat)
{
    if(!(mat.cols == 3 && mat.rows == 1) && !(mat.rows == 3 && mat.cols == 1))
    {
        cerr << "ERROR: makeVector3d: WRONG DIMENSIONS!" << endl;
        return V3D::Vector3d();
    }
    V3D::Vector3d vec;
    vec[0] = mat.at<double>(0);
    vec[1] = mat.at<double>(1);
    vec[2] = mat.at<double>(2);
    return vec;

}

void Analyser::bundleAdjust(int ba_mode)
{

    cout << "bundle Adjust!" << endl;
    QList<unsigned int> initialised_img_ids = getInitialisedImageIds();

    int num_points = m_point_cloud.getPointCount();
    int num_cams = initialised_img_ids.size();

    vector<V3D::StdDistortionFunction> distortions(num_cams); // might need initialization
    vector<double> origFocalLengths(num_cams);
    vector<V3D::CameraMatrix> cams(num_cams);

    double avgFocalLength = 0.0;

    for (int i = 0; i < num_cams; ++i)
    {


        unsigned int img_id= initialised_img_ids[i];
        AnalyzedImage * img = m_images[img_id];


        V3D::Matrix3x3d KMat = makeMatrix3x3d(img->getK());

        double const f0 = KMat[0][0];
        V3D::Matrix3x3d Knorm = KMat;
        // Normalize the intrinsic to have unit focal length.
        V3D::scaleMatrixIP(1.0/f0, Knorm);
        Knorm[2][2] = 1.0;

        origFocalLengths[i] = f0;
        cams[i].setIntrinsic(Knorm);
        V3D::Matrix3x3d R = makeMatrix3x3d(img->getR());
        cams[i].setRotation(R);


        V3D::Vector3d T = makeVector3d(img->getT());
        cams[i].setTranslation(T);
        avgFocalLength += f0;

    }

    avgFocalLength /= num_cams;
    cout << "mean focal length = " << avgFocalLength << endl;

    vector<V3D::Vector3d > Xs(num_points);
    vector<V3D::Vector2d > measurements;
    vector<int> correspondingView;
    vector<int> correspondingPoint;


    for(int i= 0; i < m_point_cloud.getPointCount() ; i++)
    {
        Point3D pt = m_point_cloud.getPoint(i);

        Xs[i][0] = pt.getX();
        Xs[i][1] = pt.getY();
        Xs[i][2] = pt.getZ();

        QList<unsigned int> image_ids;
        Track * track = m_tracks[pt.getTrackId()];
        track->getImageIDs(image_ids);
        for(int j = 0; j < initialised_img_ids.size(); j++)
        {
            unsigned int img_id= initialised_img_ids[j];
            double const f0 = origFocalLengths[j];
            if(image_ids.contains(img_id))
            {
                Point2f img_pt = m_images[img_id]->keypoints[track->getKeyPointID(img_id)].pt;
                V3D::Vector2d normed_pt = V3D::Vector2d(img_pt.x/f0,img_pt.y/f0);
                measurements.push_back(normed_pt);
                correspondingView.push_back(j);
                correspondingPoint.push_back(i);
            }
        }


    }






    V3D::optimizerVerbosenessLevel = 1;
    double const inlierThreshold = 2.0 / avgFocalLength;
    int mode =V3D::FULL_BUNDLE_METRIC;
    if(ba_mode == 1)
        mode = V3D::FULL_BUNDLE_FOCAL_LENGTH;//;FULL_BUNDLE_METRIC
    V3D::VaryingInternalsMetricBundleOptimizer opt(mode, inlierThreshold, distortions, cams, Xs,
                                                   measurements, correspondingView, correspondingPoint);

    opt.maxIterations = 2000;
    opt.minimize();
    cout << "optimizer status = " << opt.status << endl;

    /*if(opt.status == V3D::LEVENBERG_OPTIMIZER_TIMEOUT)
     {
         cout << "No real improvement -> not changing parameters!"  << endl;
         return; // there was nothing to minize
     }*/


    for(int  i = 0; i < num_cams ; i++)
    {
        unsigned int img_id= initialised_img_ids[i];
        AnalyzedImage * img = m_images[img_id];
        V3D::Matrix3x3d Knew = cams[i].getIntrinsic();
        V3D::scaleMatrixIP(origFocalLengths[i], Knew);
        Knew[2][2] = 1.0;
        Mat K = makeMat(Knew);
        Mat R = makeMat(cams[i].getRotation());
        Mat T = makeMat(cams[i].getTranslation());
        cout << " K: " << endl; print(K);print(img->getK());
        cout << " R: " << endl; print(R);print(img->getR());
        cout << " T: " << endl; print(T);print(img->getT());
        img->setPose(K,R,T);
    }
    // cout << "Updated cams!" << endl;
    // cout << "pc=[" << endl;
    for(int i= 0; i < num_points; i++)
    {

        //cout << "o: [" << m_point_cloud.getPoint(i).getX() << ", "<< m_point_cloud.getPoint(i).getY() << ", "<< m_point_cloud.getPoint(i).getZ() << "]" <<endl;
        //    cout << Xs[i][0] << ", "<<  Xs[i][1]<< ", "<< Xs[i][2]<< ";" <<endl;
        m_point_cloud.updatePoint(i,Xs[i][0],Xs[i][1],Xs[i][2]);
    }
    // cout << "];" << endl;

    // cout << "Updated points!" <<  endl;

}

void Analyser::init3DModel(int mode)
{
    if(m_images.size() < 2)
    {
        cerr << "Dont have enough images!!!!" << endl;
        return;
    }
    if(mode == PILLARS)
    {
        QPair<unsigned int,unsigned int> max_pair = getPairWithMostMatches();
        cout << "maxpair is: " << max_pair.first << " and " << max_pair.second << endl;
        unsigned int img_ind1 = max_pair.first;
        unsigned int img_ind2 = max_pair.second;


        m_images[img_ind1]->setPose(m_parameters[img_ind1]);
        m_images[img_ind2]->setPose(m_parameters[img_ind2]);

        estimate3DPointsNormalized(img_ind1,img_ind2);
    }
    else if(mode == WEAK_INIT)
    {

        unsigned int img_ind1 = m_initial_files.at(0);
        unsigned int img_ind2 = m_initial_files.at(1);
        double f_assumed = 1800;//1952.0;
        double k_params1[9] = {f_assumed,0,((double)m_images[img_ind1]->bw_image.cols)/2.0,0,f_assumed,((double)m_images[img_ind1]->bw_image.rows)/2.0,0,0,1};
        double k_params2[9] = {f_assumed,0,((double)m_images[img_ind2]->bw_image.cols)/2.0,0,f_assumed,((double)m_images[img_ind2]->bw_image.rows)/2.0,0,0,1};
        //double k_params[9] = {f_assumed,0,1157.0,0,f_assumed,772.5,0,0,1};
        //double f_assumed = 1832.0;
        //double k_params[9] = {f_assumed,0,1160.3,0,f_assumed,761.3,0,0,1};
        Mat K1 = Mat(3,3, CV_64F, k_params1 );
        cout << "K1 : " << endl;
        print(K1);
        Mat K2 =  Mat(3,3, CV_64F, k_params2 );
        Mat R1 = Mat::eye(3,3,CV_64FC1);
        Mat T1 = Mat::zeros(3,1,CV_64FC1);
        m_images[img_ind1]->setPose(K1,R1,T1);
        //Mat M = K*R;
        Mat t2 = Mat::zeros(3,1,CV_64FC1);
        t2.at<double>(0) = -3;

        Mat R_est = Mat::eye(3,3,CV_64FC1);
        Mat T2 = - R_est * t2;
        /*
        //____________________________



        Mat F = Mat::zeros(3,3,CV_64FC1);

        AnalyzedImage  first_image = *(m_images[img_ind1]);
        AnalyzedImage second_image = *(m_images[img_ind2]);

        vector<DMatch> matches;
        ImageAnalyzer::getInstance().match(first_image,second_image,matches);
        vector<Point2f> matched_pts1;
        vector<Point2f> matched_pts2;
        matches2points(m_images.at(img_ind2)->keypoints ,m_images.at(img_ind1)->keypoints,matches , matched_pts2, matched_pts1,K1,K2);

        vector<unsigned char> match_mask;
        F = findFundamentalMat(matched_pts1, matched_pts2, FM_RANSAC, 3/f_assumed,0.99, match_mask);


        cout <<  "F: " << endl;
        print(F);





        //correctF(F); //WHY???
        print(F);
        Mat W,U,Vt;
        SVD::compute( F, W, U, Vt );
        Mat e2 = U.col(2);
        cout << "e2: " << endl;
        print(e2);
        cout << "epitest: " << endl;
        print(e2.t()*F);
        Vec3d epipole2 = U.col(2);
        Mat skewsym_epi = createSkewSymmetricMatrix(epipole2);
        Mat M2 = skewsym_epi*F;
        cout << "M2: " << endl;
        print(M2);


        // e2 = e2/cv::norm(e2);
        Mat R_ass = Mat::ones(3,3,CV_64FC1);
        Mat R_ass_v = R_ass.reshape(0,9);
        Mat M2_v = M2.reshape(0,9);

        // M2_v = M2_v/cv::norm(M2_v);
        Mat A = Mat::zeros(9,4,CV_64FC1);
        Mat roi = A(Range(0,9),Range(0,1));
        M2_v.copyTo(roi);
        roi =A(Range(0,3),Range(1,2));
        e2.copyTo(roi);
        roi =A(Range(3,6),Range(2,3));
        e2.copyTo(roi);
        roi =A(Range(6,9),Range(3,4));
        e2.copyTo(roi);

        cout << "A: " << endl;
        print(A);
        Mat A_inner = A.t()*A;
        // double m2_val = A_inner.at<double>(0,0);
        //Mat A_inner_inv =Mat::eye(4,4,CV_64FC1);
        // A_inner_inv.at<double>(0,0) = 1.0/ m2_val;
        cout <<"A_inner="<< endl;
        print(A_inner);
        Mat A_middle = A*(A_inner.inv());
        cout << "here2!"<< endl;
        Mat A_outer = (A_middle*A.t());
        cout << "here3!"<< endl;
        print(A_outer);
        print(R_ass_v);
        //Mat Proj = (A*((A.t()*A).inv())*A.t());
        //) cout << "Proj:" << endl;
        // print(Proj);
        Mat R_vec = A_outer *R_ass_v;
        Mat R_est = R_vec.reshape(0,3);

        cout << "R_est:" << endl;
        print(R_est);

        // double R_vals[9] =   { 0.195702914727429  , 0.198319733435131,  -0.178200310192610,
        //                       -0.183402119151040  , 0.596984755249129 ,  0.096511073909671,
        //                       -0.584242119264824  , 0.295965744829991  , 0.824076955659064};

        //  R_est = Mat(3,3, CV_64F, R_vals );
        Mat T2 = -(R_est) * t2;

        double scale_trans = T2.at<double>(0)/e2.at<double>(0);
        cout << "T2 want: "<< endl;
        print(T2);
        T2 = e2 * scale_trans;
        cout << "T2 get: " << endl;

        print(T2);
*/
        //return;
        //_________________________________
        m_images[img_ind2]->setPose(K2,R_est,T2);
        estimate3DPointsNormalized(img_ind1,img_ind2);

        AnalyzedImage  first_image = *(m_images[img_ind1]);
        AnalyzedImage second_image = *(m_images[img_ind2]);

        vector<DMatch> matches;
        ImageAnalyzer::getInstance().match(first_image,second_image,matches);
        vector<Point2f> matched_pts1;
        vector<Point2f> matched_pts2;
        matches2points(m_images.at(img_ind2)->keypoints ,m_images.at(img_ind1)->keypoints,matches , matched_pts2, matched_pts1);

        vector<unsigned char> match_mask;
        Mat H = findHomography(matched_pts1, matched_pts2, CV_RANSAC, 3, match_mask);

        Mat sum_of_points = Mat::zeros(3,1,CV_64FC1);
        unsigned int num_plane_points = 0;
        for(int k = 0; k < (int)match_mask.size() ; k++)
        {
            if(match_mask.at(k) != 0)
            {
                const DMatch & dmatch = matches[k];

                int first_ind = dmatch.queryIdx;

                if(m_images.at(img_ind1)->kp2trackID.contains(first_ind))
                {
                    //cout << "contains!" << endl;
                    unsigned int trackID = m_images.at(img_ind1)->kp2trackID.value(first_ind);
                    Track * track = m_tracks[trackID];
                    if(track->isInit())
                    {
                        num_plane_points++;
                        sum_of_points += track->get3DPoint();
                    }
                }
            }
        }

        m_object_center = sum_of_points/num_plane_points;
        m_max_distance= sqrt( pow(m_object_center.at<double>(0),2) + pow(m_object_center.at<double>(1),2)+ pow(m_object_center.at<double>(2),2) );

        cout << "m_object_center: "<< endl;
        print(m_object_center);

        rejectOutliers();


    }


}

void Analyser::estimateObjectCenter(unsigned int img_ind1,unsigned int img_ind2)
{
    AnalyzedImage  first_image = *(m_images[img_ind1]);
    AnalyzedImage second_image = *(m_images[img_ind2]);

    vector<DMatch> matches;
    ImageAnalyzer::getInstance().match(first_image,second_image,matches);
    vector<Point2f> matched_pts1;
    vector<Point2f> matched_pts2;
    matches2points(m_images.at(img_ind2)->keypoints ,m_images.at(img_ind1)->keypoints,matches , matched_pts2, matched_pts1);

    vector<unsigned char> match_mask;
    Mat H = findHomography(matched_pts1, matched_pts2, CV_RANSAC, 3, match_mask);

    Mat sum_of_points = Mat::zeros(3,1,CV_64FC1);
    unsigned int num_plane_points = 0;
    for(int k = 0; k < (int)match_mask.size() ; k++)
    {
        if(match_mask.at(k) != 0)
        {
            const DMatch & dmatch = matches[k];

            int first_ind = dmatch.queryIdx;

            if(m_images.at(img_ind1)->kp2trackID.contains(first_ind))
            {
                //cout << "contains!" << endl;
                unsigned int trackID = m_images.at(img_ind1)->kp2trackID.value(first_ind);
                Track * track = m_tracks[trackID];
                if(track->isInit())
                {
                    num_plane_points++;
                    sum_of_points += track->get3DPoint();
                }
            }
        }
    }

    m_object_center = sum_of_points/num_plane_points;
    m_max_distance= sqrt( pow(m_object_center.at<double>(0),2) + pow(m_object_center.at<double>(1),2)+ pow(m_object_center.at<double>(2),2) );

    cout << "m_object_center: "<< endl;
    print(m_object_center);

}

void Analyser::rejectOutliers()
{
    for(int i = 0; i < m_point_cloud.getPointCount();i++)
    {
        Point3D pt = m_point_cloud.getPoint(i);
        double distance = sqrt( pow(m_object_center.at<double>(0)-pt.getX(),2) + pow(m_object_center.at<double>(1)-pt.getY(),2)+ pow(m_object_center.at<double>(2)-pt.getZ(),2) );
        if(distance > m_max_distance)
        {
            m_tracks[pt.getTrackId()]->setInconsistent();
            m_point_cloud.removePoint(i--);
        }
    }
}

void Analyser::estimate3DPointsNormalizedNotreDame(unsigned int img_ind1,unsigned int img_ind2, QList<unsigned int> & track_ids)
{
    string relative_path =  "../../datasets/NotreDame/";
    Mat image1 = imread(string(relative_path+m_files[img_ind1].toStdString()));

    imwrite(string(relative_path+"chosen/"+m_files[img_ind1].toStdString()), image1);
    Mat image2 = imread(string(relative_path+m_files[img_ind2].toStdString()));
    imwrite(string(relative_path+"chosen/"+m_files[img_ind2].toStdString()), image2);

    Mat K1 = m_test_parameters[img_ind1].getK();
    double k1_1 = m_test_parameters[img_ind1].getK1();
    double k1_2 = m_test_parameters[img_ind1].getK2();
    //K1.at<double>(0,2) = ((double)image1.cols)/2.0;
    // K1.at<double>(1,2) = ((double)image1.rows)/2.0;
    Mat K2 = m_test_parameters[img_ind2].getK();

    //  K2.at<double>(0,2) = ((double)image2.cols)/2.0;
    //  K2.at<double>(1,2) = ((double)image2.rows)/2.0;
    Mat R1  = m_test_parameters[img_ind1].getR();
    Mat R2 = m_test_parameters[img_ind2].getR();
    Mat T1 = m_test_parameters[img_ind2].getT();
    Mat T2 = m_test_parameters[img_ind2].getT();
    Mat K1_inv = (K1).inv();
    Mat K2_inv = (K2).inv();

    Mat P1 = Mat::zeros(3,4,CV_64FC1);
    Mat roi = P1(Range(0,3),Range(0,3));
    R1.copyTo(roi);
    roi = P1(Range(0,3),Range(3,4));
    T1.copyTo(roi);

    Mat P2 = Mat::zeros(3,4,CV_64FC1);
    roi = P2(Range(0,3),Range(0,3));
    R2.copyTo(roi);
    roi = P2(Range(0,3),Range(3,4));
    T2.copyTo(roi);

    Mat M1i = (R1).inv();
    Mat M2i = (R2).inv();
    Mat t1 = -(M1i* T1);
    Mat t2 = -(M2i* T2);
    Mat I2 = Mat::eye(3,3,CV_64FC1)*2;
    // cout << "pc=[" ;
    // QList<Point3D>  model_points;
    for(int i= 0; i < (int)track_ids.size();i++)
    {
        Track* track = m_test_tracks[track_ids[i]];

        Mat true_pt = track->get3DPoint();
        Mat true_pt_h = Mat::ones(4,1,CV_64FC1);
        roi = true_pt_h(Range(0,3),Range(0,1));
        true_pt.copyTo(roi);

        Mat ms_pt1 = R1*true_pt + T1;

        ms_pt1 = - ms_pt1/ms_pt1.at<double>(2);
        Mat p = Mat::zeros(2,1,CV_64FC1);
        p.at<double>(0) = ms_pt1.at<double>(0) ;
        p.at<double>(1) = ms_pt1.at<double>(1) ;

        double norm_pt1 = cv::norm(p);
        double dist1 = 1.0 + k1_1*pow(norm_pt1,2)+k1_2 * pow(norm_pt1,4);
        double f = K1.at<double>(0);
        ms_pt1 = f *dist1* ms_pt1;
        Mat true_pt1 = P1*true_pt_h;
        true_pt1 = true_pt1/true_pt1.at<double>(2);
        Mat true_pt2 = P2*true_pt_h;

        true_pt2 = true_pt2/true_pt2.at<double>(2);
        Point2f p1ih =  track->getKeyPoint(img_ind1);
        //  kps1.push_back(m_images[img_ind1]->keypoints[track->getKeyPointID(img_ind1)]);
        Point2f p2ih =  track->getKeyPoint(img_ind2);
        //  kps2.push_back(m_images[img_ind2]->keypoints[track->getKeyPointID(img_ind2)]);
        //matches.push_back(DMatch(i,i,1));

        cout << ms_pt1.at<double>(0) << ", " << p1ih.x << ", " << true_pt1.at<double>(0) << ", " << p2ih.x << ", " << true_pt2.at<double>(0) << "; \n";
        cout << ms_pt1.at<double>(1) << ", " << p1ih.y << ", " << true_pt1.at<double>(1) << ", " << p2ih.y << ", " << true_pt2.at<double>(1) << endl;
        double p1vals[3] = {p1ih.x,p1ih.y,1.0};
        Mat p1 = Mat(3,1, CV_64F, p1vals );
        p1 = K1_inv*p1;
        double p2vals[3] = {p2ih.x,p2ih.y,1.0};
        Mat p2 = Mat(3,1, CV_64F, p2vals );
        p2 = K2_inv*p2;

        //print(img_pts1[i]);
        Mat D1,D2;
        cv::normalize(M1i* p1,D1);
        cv::normalize( M2i * p2,D2);

        Mat S1 = I2 - D1*D1.t() - D2*D2.t();

        Mat t1td1= t1.t()*D1;
        double t1td1_d = t1td1.at<double>(0);
        Mat t2td2 = t2.t()*D2;
        double t2td2_d = t2td2.at<double>(0);
        Mat S2 = t1 + t2 -t1td1_d*D1 -t2td2_d*D2;

        Mat X3D = S1.inv() * S2;
        cout << "projected point:" << endl;
        print(X3D);
        cout << "true point:" << endl;
        print(true_pt);
        //print(X3D);
        // if(X3D.at<double>(0) >= bot.x && X3D.at<double>(0) <= top.x && X3D.at<double>(1) >= bot.y && X3D.at<double>(1) <= top.y && X3D.at<double>(2) >= bot.z && X3D.at<double>(2) <= top.z )
        /*  {
            Point3D model_point(X3D.at<double>(0),X3D.at<double>(1),X3D.at<double>(2));
            model_point.setTrackId(track_ids[i]);
            m_point_cloud.append(model_point);
            track->set3DPoint(X3D);
        }*/
        //  cout << X3D.at<double>(0) << ", " << X3D.at<double>(1)  << ", "<< X3D.at<double>(2)  << ";\n";
    }
    // cout << "];" << endl;

    //Mat out;
    // drawMatches(m_images[img_ind1]->rgb_image, kps1,m_images[img_ind2]->rgb_image, kps2, matches, out);
    //imwrite(string("/media/Volume/Personal/UNI/australia/CV/malab_tests/out.jpg"), out);

    // m_point_cloud.setPoints(model_points);

}

void Analyser::estimateAll3DPoints()
{

    Point3d bot(-0.023121, -0.038009, -0.091940);
    Point3d top(0.078626, 0.121636 ,-0.017395);
    double percentage = 0.1;
    Point3d margin((top.x-bot.x)*percentage,(top.y-bot.y)*percentage,(top.z-bot.z)*percentage);
    Point3d soft_bot = bot - margin;
    Point3d soft_top = top + margin;



    Mat I = Mat::eye(3,3,CV_64FC1);
    cout << "pc=[" ;
    for(int i= 0; i < (int)m_tracks.size();i++)
    {
        Track* track = m_tracks[i];
        if(track->isInconsistent())
            continue;

        Mat S1 = Mat::zeros(3,3,CV_64FC1);
        Mat S2 = Mat::zeros(3,1,CV_64FC1);
        for(int j = 0; j < track->track_.size();j++)
        {
            QPair<unsigned int, unsigned int> ukpid = track->track_[j];
            AnalyzedImage * img = m_images[ukpid.first];
            Parameters * params = m_parameters[ukpid.first];

            Mat K = params->getK();

            Mat R = params->getR();
            Mat t = -R.inv()*params->getT();
            Point2f pih = img->keypoints[ukpid.second].pt;
            double pvals[3] = {pih.x,pih.y,1.0};
            Mat p = Mat(3,1, CV_64F, pvals );
            p = K.inv() * p;
            Mat D;
            cv::normalize(R.inv()*p,D);
            S1 += (I - D*D.t());
            Mat ttd= t.t()*D;
            double ttd_d = ttd.at<double>(0);
            S2 += t - ttd_d*D;
        }


        Mat X3D = S1.inv() * S2;

        cout << X3D.at<double>(0) << ", " << X3D.at<double>(1)  << ", "<< X3D.at<double>(2)  << ";\n";
        //print(X3D);
        if(X3D.at<double>(0) >= bot.x && X3D.at<double>(0) <= top.x && X3D.at<double>(1) >= bot.y && X3D.at<double>(1) <= top.y && X3D.at<double>(2) >= bot.z && X3D.at<double>(2) <= top.z )
        {
            m_num_inliers++;
            m_sum_track_size_inliers += track->track_.size();


        }
        else if(X3D.at<double>(0) >= soft_bot.x && X3D.at<double>(0) <= soft_top.x && X3D.at<double>(1) >= soft_bot.y && X3D.at<double>(1) <= soft_top.y && X3D.at<double>(2) >= soft_bot.z && X3D.at<double>(2) <= soft_top.z )
        {
            m_num_soft_inliers++;
            m_sum_track_size_soft_inliers += track->track_.size();

        }
        else
        {
            m_num_outliers++;
            m_sum_track_size_outliers += track->track_.size();
        }
        /* Point3D model_point(X3D.at<double>(0),X3D.at<double>(1),X3D.at<double>(2));
        model_point.setColor(r,g,b);
        model_point.setTrackId(track_ids[i]);
        m_point_cloud.append(model_point);
        track->set3DPoint(X3D);*/

    }
    cout << "];" << endl;

}


void Analyser::estimate3DPointsNormalized(unsigned int img_ind1,unsigned int img_ind2 )
{
    cout << "projecting: " << img_ind1 << " / " << img_ind2 << endl;
    if(!m_images[img_ind1]->isPoseSet() || !m_images[img_ind2]->isPoseSet())
    {
        cerr << "Cannot estimate3DPoints without init of cams!!!" << endl;
        return;
    }
    QList<unsigned int> track_ids =  getJoinedTracks(img_ind1,img_ind2);

    Mat K1_inv = (m_images[img_ind1]->getK()).inv();
    Mat K2_inv = (m_images[img_ind2]->getK()).inv();

    Point3d bot(-0.023121, -0.038009, -0.091940);
    Point3d top(0.078626, 0.121636 ,-0.017395);

    vector<KeyPoint> kps1;
    vector<KeyPoint> kps2;
    vector<DMatch> matches;

    Mat M1i = (m_images[img_ind1]->getR()).inv();
    Mat M2i = (m_images[img_ind2]->getR()).inv();
    Mat t1 = -(M1i* m_images[img_ind1]->getT());
    Mat t2 = -(M2i* m_images[img_ind2]->getT());
    Mat I2 = Mat::eye(3,3,CV_64FC1)*2;
    // cout << "pc=[" ;
    // QList<Point3D>  model_points;
    for(int i= 0; i < (int)track_ids.size();i++)
    {
        Track* track = m_tracks[track_ids[i]];
        Point2f p1ih =  m_images[img_ind1]->keypoints[track->getKeyPointID(img_ind1)].pt;
        kps1.push_back(m_images[img_ind1]->keypoints[track->getKeyPointID(img_ind1)]);
        Point2f p2ih =  m_images[img_ind2]->keypoints[track->getKeyPointID(img_ind2)].pt;
        kps2.push_back(m_images[img_ind2]->keypoints[track->getKeyPointID(img_ind2)]);
        matches.push_back(DMatch(i,i,1));
        double p1vals[3] = {p1ih.x,p1ih.y,1.0};
        Mat p1 = Mat(3,1, CV_64F, p1vals );
        p1 = K1_inv*p1;
        double p2vals[3] = {p2ih.x,p2ih.y,1.0};
        Mat p2 = Mat(3,1, CV_64F, p2vals );
        p2 = K2_inv*p2;

        //print(img_pts1[i]);
        Mat D1,D2;
        cv::normalize(M1i* p1,D1);
        cv::normalize( M2i * p2,D2);

        Mat S1 = I2 - D1*D1.t() - D2*D2.t();

        Mat t1td1= t1.t()*D1;
        double t1td1_d = t1td1.at<double>(0);
        Mat t2td2 = t2.t()*D2;
        double t2td2_d = t2td2.at<double>(0);
        Mat S2 = t1 + t2 -t1td1_d*D1 -t2td2_d*D2;

        Mat X3D = S1.inv() * S2;
        //print(X3D);
        //if(X3D.at<double>(0) >= bot.x && X3D.at<double>(0) <= top.x && X3D.at<double>(1) >= bot.y && X3D.at<double>(1) <= top.y && X3D.at<double>(2) >= bot.z && X3D.at<double>(2) <= top.z )
        float r = 1;
        float g = 0;
        float b = 0;
        /* if(m_calls_ctr == 0)
            r = 1;
        else if(m_calls_ctr == 1)
            g = 1;
        else if(m_calls_ctr == 2)
            b = 1;*/

        Point3D model_point(X3D.at<double>(0),X3D.at<double>(1),X3D.at<double>(2));
        model_point.setColor(r,g,b);
        model_point.setTrackId(track_ids[i]);
        m_point_cloud.append(model_point);
        track->set3DPoint(X3D);

        //  cout << X3D.at<double>(0) << ", " << X3D.at<double>(1)  << ", "<< X3D.at<double>(2)  << ";\n";
    }
    // cout << "];" << endl;

    /*  Mat out;
    drawMatches(m_images[img_ind1]->rgb_image, kps1,m_images[img_ind2]->rgb_image, kps2, matches, out);
    imwrite(string("/media/Volume/Personal/UNI/australia/CV/malab_tests/out.jpg"), out);*/

    // m_point_cloud.setPoints(model_points);
    m_calls_ctr++;
}

void Analyser::estimate3DPointsNormalizedParallel(unsigned int img_ind1,unsigned int img_ind2 )
{
    cout << "projecting: " << img_ind1 << " / " << img_ind2 << endl;
    if(!m_images[img_ind1]->isPoseSet() || !m_images[img_ind2]->isPoseSet())
    {
        cerr << "Cannot estimate3DPoints without init of cams!!!" << endl;
        return;
    }
    QList<unsigned int> track_ids =  getJoinedTracks(img_ind1,img_ind2);

    Mat K1= m_images[img_ind1]->getK();
    double f = K1.at<double>(0);
    Mat K2 = m_images[img_ind2]->getK();

    Point3d bot(-0.023121, -0.038009, -0.091940);
    Point3d top(0.078626, 0.121636 ,-0.017395);

    vector<KeyPoint> kps1;
    vector<KeyPoint> kps2;
    vector<DMatch> matches;

    Mat M1i = (m_images[img_ind1]->getR()).inv();
    Mat M2i = (m_images[img_ind2]->getR()).inv();
    Mat t1 = -(M1i* m_images[img_ind2]->getT());
    Mat t2 = -(M2i* m_images[img_ind2]->getT());
    Mat diff_t = t1-t2;
    double baseline = sqrt(pow(diff_t.at<double>(0),2)+pow(diff_t.at<double>(1),2)+pow(diff_t.at<double>(2),2));

    for(int i= 0; i < (int)track_ids.size();i++)
    {
        Track* track = m_tracks[track_ids[i]];
        Point2f p1ih =  m_images[img_ind1]->keypoints[track->getKeyPointID(img_ind1)].pt;
        kps1.push_back(m_images[img_ind1]->keypoints[track->getKeyPointID(img_ind1)]);
        Point2f p2ih =  m_images[img_ind2]->keypoints[track->getKeyPointID(img_ind2)].pt;
        kps2.push_back(m_images[img_ind2]->keypoints[track->getKeyPointID(img_ind2)]);
        matches.push_back(DMatch(i,i,1));

        double disparity = sqrt(pow((double)(p1ih.x-p2ih.x),2.0)+pow((double)(p1ih.y+p2ih.y),2.0));
        double p1vals[3] = {p1ih.x-K1.at<double>(0,2),p1ih.y-K1.at<double>(1,2),f};
        Mat p1 = Mat(3,1, CV_64F, p1vals );
        //double p2vals[3] = {p2ih.x-K2.at<double>(0,2),p2ih.y-K2.at<double>(1,2),f};
        // Mat p2 = Mat(3,1, CV_64F, p2vals );


        //double z = baseline*f/disparity;


        Mat X3D = (baseline/disparity) * p1;
        //print(X3D);
        //if(X3D.at<double>(0) >= bot.x && X3D.at<double>(0) <= top.x && X3D.at<double>(1) >= bot.y && X3D.at<double>(1) <= top.y && X3D.at<double>(2) >= bot.z && X3D.at<double>(2) <= top.z )
        float r = 1;
        float g = 0;
        float b = 0;

        Point3D model_point(X3D.at<double>(0),X3D.at<double>(1),X3D.at<double>(2));
        model_point.setColor(r,g,b);
        model_point.setTrackId(track_ids[i]);
        m_point_cloud.append(model_point);
        track->set3DPoint(X3D);

        //  cout << X3D.at<double>(0) << ", " << X3D.at<double>(1)  << ", "<< X3D.at<double>(2)  << ";\n";
    }

    /*  Mat out;
    drawMatches(m_images[img_ind1]->rgb_image, kps1,m_images[img_ind2]->rgb_image, kps2, matches, out);
    imwrite(string("/media/Volume/Personal/UNI/australia/CV/malab_tests/out.jpg"), out);*/

    // m_point_cloud.setPoints(model_points);

}


void Analyser::estimate3DPoints(unsigned int img_ind1,unsigned int img_ind2)
{
    if(!m_images[img_ind1]->isPoseSet() || !m_images[img_ind2]->isPoseSet())
    {
        cerr << "Cannot estimate3DPoints without init of cams!!!" << endl;
        return;
    }
    Mat P1 = m_images[img_ind1]->getP();
    Mat P2 = m_images[img_ind2]->getP();
    QList<unsigned int> track_ids =  getJoinedTracks(img_ind1,img_ind2);

    Point2f pp1 = m_images[img_ind1]->getPrincipalPoint();
    Point2f pp2 = m_images[img_ind2]->getPrincipalPoint();

    vector<KeyPoint> kps1;
    vector<KeyPoint> kps2;
    vector<DMatch> matches;

    Mat M1i = P1(Range(0,3),Range(0,3)).inv();
    Mat M2i = P2(Range(0,3),Range(0,3)).inv();
    Mat t1 = -(M1i* P1(Range(0,3),Range(3,4)));
    Mat t2 = -(M2i* P2(Range(0,3),Range(3,4)));
    Mat I2 = Mat::eye(3,3,CV_64FC1)*2;
    // cout << "pc=[" ;
    // QList<Point3D>  model_points;
    for(int i= 0; i < (int)track_ids.size();i++)
    {
        Track* track = m_tracks[track_ids[i]];
        Point2f p1ih =  m_images[img_ind1]->keypoints[track->getKeyPointID(img_ind1)].pt;
        kps1.push_back(m_images[img_ind1]->keypoints[track->getKeyPointID(img_ind1)]);
        Point2f p2ih =  m_images[img_ind2]->keypoints[track->getKeyPointID(img_ind2)].pt;
        kps2.push_back(m_images[img_ind2]->keypoints[track->getKeyPointID(img_ind2)]);
        matches.push_back(DMatch(i,i,1));
        double p1vals[3] = {p1ih.x-pp1.x,p1ih.y-pp1.y,1.0};
        Mat p1 = Mat(3,1, CV_64F, p1vals );
        double p2vals[3] = {p2ih.x-pp2.x,p2ih.y-pp2.y,1.0};
        Mat p2 = Mat(3,1, CV_64F, p2vals );

        //print(img_pts1[i]);
        Mat D1,D2;
        cv::normalize(M1i* p1,D1);
        cv::normalize( M2i * p2,D2);

        Mat S1 = I2 - D1*D1.t() - D2*D2.t();

        Mat t1td1= t1.t()*D1;
        double t1td1_d = t1td1.at<double>(0);
        Mat t2td2 = t2.t()*D2;
        double t2td2_d = t2td2.at<double>(0);
        Mat S2 = t1 + t2 -t1td1_d*D1 -t2td2_d*D2;

        Mat X3D = S1.inv() * S2;
        Point3D model_point(X3D.at<double>(0),X3D.at<double>(1),X3D.at<double>(2));
        model_point.setTrackId(track_ids[i]);
        m_point_cloud.append(model_point);
        track->set3DPoint(X3D);
        //  cout << X3D.at<double>(0) << ", " << X3D.at<double>(1)  << ", "<< X3D.at<double>(2)  << ";\n";
    }
    // cout << "];" << endl;

    /* Mat out;
    drawMatches(m_images[img_ind1]->rgb_image, kps1,m_images[img_ind2]->rgb_image, kps2, matches, out);
    imwrite(string("/media/Volume/Personal/UNI/australia/CV/malab_tests/out.jpg"), out);*/

    // m_point_cloud.setPoints(model_points);

}

QList<unsigned int> Analyser::getJoinedTracks(int ind1,int ind2)
{
    AnalyzedImage* img1 = m_images.at(ind1);
    AnalyzedImage* img2 = m_images.at(ind2);


    QSet<unsigned int> ids1 = QSet<unsigned int> ::fromList(img1->kp2trackID.values());
    QSet<unsigned int> ids2 = QSet<unsigned int> ::fromList(img2->kp2trackID.values());
    QList<unsigned int> joined_ids = (ids1.intersect(ids2)).toList();
    for(int i = 0; i < (int)joined_ids.size();i++)
    {
        if(m_tracks[joined_ids[i]]->isInconsistent() || m_tracks[joined_ids[i]]->isInit())
        {
            joined_ids.removeAt(i--);
        }

    }
    return joined_ids;

}

void Analyser::extractTracks()
{
    cout << "Number of images: " << m_images.size() << endl;
    for(int i = 0; i <m_images.size();i++)
    {
        cout << "processing image " << i << endl;
        AnalyzedImage  first_image =*( m_images[i]);
        for(int j = i+1; j<m_images.size();j++)
        {
            AnalyzedImage  second_image = *(m_images[j]);
            vector<DMatch> matches;
            ImageAnalyzer::getInstance().match(first_image,second_image,matches);
            m_total_number_of_consistently_cross_checked_matches += matches.size();

            vector<Point2f> matched_pts1;
            vector<Point2f> matched_pts2;
            matches2points(m_images[j]->keypoints ,m_images[i]->keypoints,matches , matched_pts2, matched_pts1);

            vector<unsigned char> match_mask;
            Mat F = findFundamentalMat(matched_pts1, matched_pts2, FM_RANSAC, 1,0.99, match_mask);
/*
            /// DEBUG OUTPUT START
            Mat output_img;
            vector<char> signed_mask( match_mask.begin(), match_mask.end() );
            drawMatches(m_images.at(i)->rgb_image, m_images.at(i)->keypoints, m_images.at(j)->rgb_image,  m_images.at(j)->keypoints, matches, output_img,Scalar::all(-1),Scalar::all(-1),signed_mask);
            vector<int> params;
            params.push_back(CV_IMWRITE_JPEG_QUALITY);
            params.push_back(100);
            stringstream ss;
            ss << "/media/Volume/Personal/UNI/australia/CV/malab_tests/matches" << i << j << ".jpg";

            imwrite(ss.str(), output_img,params);
            // imwrite(string("/home/dbkim/matches.jpg"), output_img,params);
            /// DEBUG OUTPUT END*/

            //Mat H = findHomography(matched_pts1, matched_pts2, CV_RANSAC, 3, match_mask);
            // cout << "F: " << endl;
            //print(F);
            //cout << "H: " << endl;
            //print(H);
            int correct_matches = 0;
            for(int k = 0; k < (int)match_mask.size() ; k++)
            {
                if(match_mask[k] != 0)
                    correct_matches++;
            }
            m_total_number_of_consistent_RANSAC_matches += correct_matches;
            if(correct_matches >= MIN_MATCHES)
            {
                //cout << "correct_matches: " << correct_matches << endl;
                m_images[i]->imageID2F.insert(j,F.clone());
                // cout << "m_images.at(i)->imageID2F.size(): " << m_images.at(i)->imageID2F.size() << endl;
                for(int k = 0; k < (int)match_mask.size() ; k++)
                {
                    if(match_mask[k] != 0)
                    {
                        const DMatch & dmatch = matches[k];

                        int first_ind = dmatch.queryIdx;
                        int second_ind = dmatch.trainIdx;

                        if(m_images[i]->kp2trackID.contains(first_ind))
                        {
                            //cout << "contains!" << endl;
                            if(!m_images[j]->kp2trackID.contains(second_ind))
                            {
                                unsigned int trackID = m_images[i]->kp2trackID.value(first_ind);
                                // Track * track = m_tracks.at(trackID);
                                m_tracks[trackID]->addKeypoint(j,second_ind);
                                m_images[j]->kp2trackID.insert(second_ind,trackID);
                            }
                        }
                        else if(m_images[j]->kp2trackID.contains(second_ind))
                        {
                            if(!m_images[i]->kp2trackID.contains(first_ind))
                            {
                                unsigned int trackID =m_images[j]->kp2trackID.value(second_ind);
                                //Track * track = m_tracks.at(trackID);
                                m_tracks[trackID]->addKeypoint(i,first_ind);
                                m_images[i]->kp2trackID.insert(first_ind,trackID);
                            }
                        }
                        else
                        {
                            Track * track = new Track();
                            track->addKeypoint(i,first_ind);
                            track->addKeypoint(j,second_ind);
                            m_tracks.append(track);
                            unsigned int trackID = m_tracks.size()-1;
                            m_images[i]->kp2trackID.insert(first_ind,trackID);
                            m_images[j]->kp2trackID.insert(second_ind,trackID);

                        }
                    }
                }
            }
        }
    }


    cout << "num tracks: " << m_tracks.size()  << endl;
    m_track_matrix = Mat::zeros(m_images.size(),m_images.size(),CV_32SC1);
    for(int i = 0; i < m_tracks.size();i++)
    {
        Track * track = m_tracks.at(i);
        // cout << "track inconsistent: " << track->isInconsistent()  <<  " size: " << track->size() << endl;
        if(!track->isInconsistent())
        {
            QList<unsigned int> image_ids;
            track->getImageIDs(image_ids);
            for(int j = 0 ; j < image_ids.size(); j++)
            {
                for(int k = 0 ; k < image_ids.size(); k++)
                {
                    if(j != k)
                    {
                        m_track_matrix.at<int>(image_ids.at(j),image_ids.at(k)) = m_track_matrix.at<int>(image_ids.at(j),image_ids.at(k)) +1;
                    }
                }
            }
        }
    }
}

void Analyser::correctF(Mat & F)
{
    Mat W,U,Vt;
    SVD::compute( F, W, U, Vt );
    cout << "U: " << endl;
    print(U);
    Mat W_corr = Mat::zeros(3,3,CV_64FC1);
    W_corr.at<double>(0,0) = W.at<double>(0);
    W_corr.at<double>(1,1) = W.at<double>(1);
    F = U*W_corr*Vt;

}
void Analyser::print(cv::Mat mat)
{
    if(mat.type() != CV_64FC1)
        return;

    cout << "[ ";
    for(int i = 0; i < mat.rows ; i++)
    {
        for(int j = 0; j < mat.cols; j++)
        {

            cout << mat.at<double>(i,j);
            if(j != mat.cols-1)
                cout  << ", ";
        }
        if(i != mat.rows-1)
            cout << ";\n " ;
    }
    cout << "]\n" << flush;
}

Mat Analyser::createSkewSymmetricMatrix(Vec3d vec)
{
    Mat out = Mat::zeros(3,3,CV_64FC1);
    out.at<double>(0,1) = -vec[2];
    out.at<double>(0,2) =  vec[1];
    out.at<double>(1,0) =  vec[2];
    out.at<double>(1,2) = -vec[0];
    out.at<double>(2,0) = -vec[1];
    out.at<double>(2,1) =  vec[0];
    return out;

}
QPair<unsigned int, unsigned int> Analyser::getPairWithMostMatches()
{
    QPair<unsigned int, unsigned int> max_pair;
    unsigned int max_val = 0;
    for(int i = 0; i < m_track_matrix.rows; i++)
    {
        for(int j =0; j< m_track_matrix.cols;j++)
        {
            unsigned int val = (unsigned int) m_track_matrix.at<int>(i,j);
            if(val > max_val)
            {
                Mat baseline = m_parameters[i]->getT() - m_parameters[j]->getT();
                double dist = fabs(baseline.at<double>(0)) + fabs(baseline.at<double>(1)) +fabs(baseline.at<double>(2));
                if(dist > 0.0001 )
                {


                    max_val = val;
                    max_pair.first = i;
                    max_pair.second = j;
                }
            }
        }
    }
    return max_pair;
}

void Analyser::matches2points(const std::vector<cv::KeyPoint>& train, const std::vector<cv::KeyPoint>& query,
                              const std::vector<cv::DMatch>& matches, std::vector<cv::Point2f>& pts_train,
                              std::vector<Point2f>& pts_query)
{

    pts_train.clear();
    pts_query.clear();
    pts_train.reserve(matches.size());
    pts_query.reserve(matches.size());

    size_t i = 0;

    for (; i < matches.size(); i++)
    {

        const DMatch & dmatch = matches[i];

        pts_query.push_back(query[dmatch.queryIdx].pt);
        pts_train.push_back(train[dmatch.trainIdx].pt);

    }

}

void Analyser::matches2points(const std::vector<cv::KeyPoint>& train, const std::vector<cv::KeyPoint>& query,
                              const std::vector<cv::DMatch>& matches, std::vector<cv::Point2f>& pts_train,
                              std::vector<Point2f>& pts_query, cv::Mat & Ktrain, cv::Mat & Kquery)
{

    Mat Kt_inv = Ktrain.inv();
    Mat Kq_inv = Kquery.inv();
    pts_train.clear();
    pts_query.clear();
    pts_train.reserve(matches.size());
    pts_query.reserve(matches.size());

    size_t i = 0;

    for (; i < matches.size(); i++)
    {

        const DMatch & dmatch = matches[i];

        Point2f query_pt = query[dmatch.queryIdx].pt;
        double pq_vals[3] = {query_pt.x,query_pt.y,1.0};
        Mat pq = Mat(3,1,CV_64FC1,pq_vals);
        pq = Kq_inv*pq;
        Point2f train_pt = train[dmatch.trainIdx].pt;
        double pt_vals[3] = {train_pt.x,train_pt.y,1.0};
        Mat pt = Mat(3,1,CV_64FC1,pt_vals);
        pt = Kt_inv*pt;
        pts_query.push_back(Point2f((float)pq.at<double>(0),(float)pq.at<double>(0)));
        pts_train.push_back(Point2f((float)pt.at<double>(0),(float)pt.at<double>(1)));

    }

}

