#ifndef SFMER_H_INCLUDED
#define SFMER_H_INCLUDED

#include "util/timer.h"
#include "util/math.h"
#include "util/bitmap.h"
#include "util/random.h"
#include "util/misc.h"
#include "util/func.h"
#include "util/cmdLine.h"
#include "base/image.h"
#include "base/point3d.h"
#include "base/triangulation.h"
#include "base/camera.h"
#include "base/pose.h"
#include "base/projection.h"
#include "base/homography_matrix.h"
#include "base/cost_functions.h"
#include "base/database_cache.h"
#include "base/database.h"
#include "base/triplet.h"
#include "optim/bundle_adjustment.h"
#include "estimators/rotation_averaging.h"
#include "estimators/similarity_transform_.h"
#include "estimators/homography_matrix_.h"
#include "estimators/triangulation_.h"
#include "estimators/pose.h"
#include "estimators/p3p_Ke.h"
#include "tracks/tracks.hpp"
#include "optim/random_sampler.h"
#include <boost/graph/subgraph.hpp>
#include <boost/graph/adjacency_list.hpp>
#include <boost/config.hpp>
#include <boost/graph/connected_components.hpp>
#include <boost/program_options.hpp>
#include <iostream>
#include <fstream>
#include <vector>
#include <unordered_set>
#include <omp.h>
#include "sfm/SfM.h"

namespace RVG
{
class SfMer
{
public:
    struct Options
    {
        // minimum number of feature matches in an epipolar edge
        uint32_t minimum_edge_inlier = 15;

        // minimum P3P matches
        uint32_t minimum_P3P_matches = 24;

        // minimum P3P inliers
        uint32_t minimum_P3P_inliers = 24;

        // tracks cover times
        uint32_t Kcover_ = 256;

        // minimum number of matches in initial pair
        uint32_t minimum_initial_pair_inlier = 100;

        //projection error
        double maximum_projection_error = 12.0;

        // minimum tri-angle
        double minimum_tri_angle = 1.5;

        // minimum visible images
        uint32_t minimum_visible_views = 2;

        // minimum ransac
        uint32_t minimum_ransac_times = 64;

        // maximum rotation angle
        double maximum_rotation_angle = 45.0;

        // inlier ratio
        double P3P_inlier_ratio = 0.3;

        // maximal addition cameras
        int maximum_camera_registration = 1024;

        // maximum_register_times_
        uint32_t maximum_register_times_ = 1024;

        // optimize principle points
        bool bundle_principle_points = false;

        // extract color for final output
        int extract_color_ = 1;

        // tracks selection
        bool tracks_selection_ = true;

        // BA times
        size_t baTimes_ = 12;

        // Rotation Averaing conf
        int RAconf_ = 0;

        // community
        bool community = false;
        int community_id = -1;

        // used for robust registration
        double addRatio_points = 0.3;
        double addRatio_votes = 0.3;

        // # of prev images and # of prev 3D points
        size_t num_of_prev_images = 0;
        size_t num_of_prev_points = 0;

        int nMST = 128;

        // pre-calibration
        bool preCalibration = false;
        bool transGPS = false;
        bool simiGPS = false;
        bool gpsPrior = false;
        double gpsPresion = 15.0;

        // gnss
        bool GNSS_Flag = false;
    };

    // data initialization
    SfMer();
    ~SfMer();
    bool LoadSQLDataBase();

    // pre calibration
    bool SetPrecalibration();
    bool ResetConfiguration();

    // seed reconstruction
    size_t FindInitialPair();
    bool SeedReconstruction();
    bool ReconstructPair(image_pair_t& pairID);

    // similarity transformation
    Eigen::Vector2d TransformFrame2GPS(double gps_error, bool skip_final_check = false);

    // camera registration
    bool ImageAddition();
    bool RegisterNextImage_P3P(const image_t image_id);

    bool TriangulateTrack(int& track_id, Eigen::Vector3d& pointXYZ, double& maxError);
    bool TriangulateImages_RANSAC(bool selection = true);
    size_t FilterScenePoints();
    void Normalize();

    // bundle adjustment
    bool BundleAdjustment_Seed();
    bool BundleAdjustment_AngleAxis(const bool constantK, const bool constantR, const bool constantT, size_t iterTimes = 10);
    bool LocalBundleAdjustment(const bool constantK, const bool constantR, const bool constantT, size_t iterTimes = 10);

    // output result
    void ExportResult(const int iteration);
    void ExportReport();
    void ExportPLY(const std::string& path);
    void ExportCHN(const std::string& path, const std::string& list_path, bool pointFlag);
    void ExportPLYWithCameraPose(const std::string& path);
    void ExportRunningTimes();

    // file directory name
    string SfMDir;
    string KFile;
    string PreExtrinsic;
    string PreIntrinsic;
    string EGFile;
    string communityFile;
    string DatabasePath;
    string logName;

	// Parameters
	REC3D::ImageInfos* imageInfos;
	REC3D::ImagePairMatches* imagePairMatches;
	REC3D::SparsePoints* sparsePoints;

    Options options_;
    int local_iteration;
    int global_iteration;
    double timeCost = 0.0;
    double repro_RMS = 0.0;
    double repro_median = 0.0;
    Eigen::Vector3d imgGPSMean;

    // used for SfM
    std::map<camera_t, class Camera> cameras_; // camera intrinsic parameters
    std::map<image_t, class Image> images_; // image info and camera extrinsic parameters
    std::map<image_t, std::set<image_t>> imgNeighbors;
    std::map<image_t, Eigen::Vector3d> imgGPS;
    std::map<int, bool> effImage_;  // used for CSfM
    std::map<int, int> img_cam_id_; // clustering camera intrinsic parameters
    std::vector<int> calibImages;   // id of calibrated cameras

    std::map<int, double> points_track_error;
    std::set<image_t> refineCams;
    std::map<int, bool> preCam;
    std::map<int, int> indexID;

    std::map<image_pair_t, FeatureMatches> ImgMatches; // image feature matches
    std::vector<image_pair_t> PossibleInitialPair;
    std::map<image_pair_t, double> candiPairs;

    std::vector<int> num_reg_trials_; // times of registeration, for the outer-loop convergence
    std::vector<track> tracks_; // tracks produced by union-find algorithm
    std::map<int, Point3D> effTracks_; // currently effective tracks in the iteration of outer-loop
    std::map<int, uint32_t> trackEffV;
    std::unordered_map<int, Eigen::Vector3ub> tracks_color; // rgb color for tracks
    std::map<int, int> validView;

    // location error
    std::vector<Eigen::Vector3d> globalGPSerror;
    std::vector<int> calibSeq;
    std::map<int, double> globalGPSerrorMean;
    std::map<int, double> globalGPSerrorMedian;
    std::map<int, int> oriID;
    // records the running-time
    std::map<string, double> runningTimes;
};


SfMer::SfMer()
{
    local_iteration = 0;
    global_iteration = 0;
    PreIntrinsic = "";
    PreExtrinsic = "";
    SfMDir = "SfM/";
    EGFile = "EG.txt";
    communityFile = "";
    DatabasePath = "";
    imgGPSMean(0) = 0.0, imgGPSMean(1) = 0.0, imgGPSMean(2) = 0.0;
    timeCost = 0.0;
    logName="";
}

SfMer::~SfMer()
{
    tracks_.clear();
    tracks_color.clear();
    effTracks_.clear();
	imageInfos = NULL;
	imagePairMatches = NULL;
	sparsePoints = NULL;
}

/// Loading MySQL database
bool SfMer::LoadSQLDataBase()
{ 
	std::map<double, int> cameras;
    int iCam = 0;
    for(auto& imgI : *imageInfos)
    {
		auto& curImg = imgI;
        double focalL = (curImg.fx + curImg.fy) / 2.0;

        Camera camD;
        camD.SetCameraId(boost::lexical_cast<camera_t>(cameras.size()));
        camD.SetModelIdFromName("RADIAL");
        camD.SetWidth(curImg.width);
        camD.SetHeight(curImg.height);
        camD.Params().push_back(focalL);
        if (curImg.u > 0 && curImg.v > 0)
        {
            camD.Params().push_back(curImg.u);
            camD.Params().push_back(curImg.v);
        }
        else
        {
            camD.Params().push_back((curImg.width) / 2.0);
            camD.Params().push_back((curImg.height) / 2.0);
        }
        camD.Params().push_back(curImg.k1);
        camD.Params().push_back(curImg.k2);
        camD.Params().shrink_to_fit();
        camD.SetPriorFocalLength(true);
        //camD.SetFocalLength(focalL);
        //camD.SetPriorFocalLength(focalL);
        //camD.SetPrincipalPointX(curImg.width / 2.0);
        //camD.SetPrincipalPointY(curImg.height / 2.0);

        double tripleID = 0 ;
        if(ComputeTripleID(camD.Width(), camD.Height(), camD.FocalLength()) < std::numeric_limits<double>::max())
            tripleID = ComputeTripleID(camD.Width(), camD.Height(), camD.FocalLength());
        else
            return false;

        if(cameras.count(tripleID) > 0)
            img_cam_id_[iCam] = cameras[tripleID];
        else
        {
            /*class Camera camera;
            // ID
            camera.SetCameraId(boost::lexical_cast<camera_t>(cameras.size()));

            // MODEL
            camera.SetModelIdFromName("RADIAL");
            // WIDTH
            camera.SetWidth(camD.Width());
            // HEIGHT
            camera.SetHeight(camD.Height());
            // PARAMS
            camera.Params().push_back(camD.FocalLength());
            camera.Params().push_back((camD.Width()) / 2.0);
            camera.Params().push_back((camD.Height()) / 2.0);
            camera.Params().push_back(0.0);
            camera.Params().push_back(0.0);
            camera.Params().shrink_to_fit();

            camera.SetPriorFocalLength(true);
            camera.SetFocalLength(camD.FocalLength());
            camera.SetPriorFocalLength(camD.FocalLength());*/
            cameras_[boost::lexical_cast<camera_t>(cameras.size())] = camD;
            cameras[tripleID] = boost::lexical_cast<int>(cameras.size());
            img_cam_id_[iCam] = cameras[tripleID];
        }

        iCam++;
    }

    if(cameras_.size() <= (size_t)50)
    {
        options_.bundle_principle_points = true;
        LOG(INFO) << "principle points are refined" << std::endl;
    }
    else
    {
        LOG(INFO) << "principle points are not going to be refined" << std::endl;
    }

    /// map imageID
    for (int i = 0; i < (int)((*imageInfos).size()); i++)
	{
		indexID[(*imageInfos)[i].ID] = i;
		oriID[i] = (*imageInfos)[i].ID;

         if((*imageInfos)[i].gps_x != DBL_MAX || (*imageInfos)[i].gps_y != DBL_MAX || (*imageInfos)[i].gps_z != DBL_MAX){
            imgGPS[i] = Eigen::Vector3d((*imageInfos)[i].gps_x, (*imageInfos)[i].gps_y, (*imageInfos)[i].gps_z);
        }
	}    

    if(imgGPS.size() >= 3){
        options_.gpsPrior = true;

        // for(auto& gps : imgGPS){
        //     imgGPSMean += 1.0/imgGPS.size() * gps.second;
        // }

        // for(auto& gps : imgGPS){
        //     gps.second = gps.second - imgGPSMean;
        // }
        // LOG(INFO) << "mean gps: " << imgGPSMean(0) << " " << imgGPSMean(1) << " " << imgGPSMean(2) << std::endl;
    }
        

    /// Set Images
    for(auto& img : *imageInfos)
    {
        image_t imgId = indexID[img.ID];

        Image imgT;
        images_[imgId] = imgT;
        images_[imgId].SetCameraId(img_cam_id_[imgId]);
        images_[imgId].SetUp(cameras_[img_cam_id_[imgId]]);
        images_[imgId].SetImageId(imgId);
    }
    num_reg_trials_.resize(images_.size(), 0);
    LOG(INFO) << images_.size() << " images are loaded with " << cameras_.size() << " intrinsic camera model" << std::endl;

    /// Read Keypoints
    for(auto& img : *imageInfos)
    {
        auto& features = img.keys;
        std::vector<Eigen::Vector2d> points(features.size());
		int i = 0;
		for (auto& fea : features)
		{
			points[i++] = Eigen::Vector2d(fea.x, fea.y);
		}

        images_[indexID[img.ID]].SetPoints2D(points);
        images_[indexID[img.ID]].color2D_.resize(features.size());
		i = 0;
		for (auto& fea : features)
		{
			images_[indexID[img.ID]].color2D_[i++] = Eigen::Vector3ub(fea.r, fea.g, fea.b);
		}
    }

    /// Load FeatureMatches
    LOG(INFO) << "Loading matches..." << std::endl;
    std::vector<std::pair<image_pair_t, TwoViewGeometry>> image_pairs;
    for(auto& pairM : *imagePairMatches)
    {
        auto& match = pairM;
        int id1 = indexID[match.image_id1];
        int id2 = indexID[match.image_id2];
        image_pair_t pairID = ComputePairID(id1, id2);
        TwoViewGeometry twoGeo;
		twoGeo.inlier_matches.resize(match.keyMatches.size());
		for (int i = 0; i < match.keyMatches.size(); i++)
		{
			twoGeo.inlier_matches[i].point2D_idx1 = match.keyMatches[i].keyIdx1;
			twoGeo.inlier_matches[i].point2D_idx2 = match.keyMatches[i].keyIdx2;
		}
        twoGeo.F_num_inliers = match.keyMatches.size();
        twoGeo.E_num_inliers = twoGeo.F_num_inliers;

        if(match.keyMatches.size() >= options_.minimum_edge_inlier)
            image_pairs.push_back(std::make_pair(pairID, twoGeo));
        
        if(match.keyMatches.size() >= 30){
            imgNeighbors[id1].insert(id2);
            imgNeighbors[id2].insert(id1);
        }
    }

	(*imagePairMatches).clear();
    
    for(auto& imgPair : image_pairs)
    {
        image_pair_t& pairID = imgPair.first;
        TwoViewGeometry& twoViewG = imgPair.second;
        image_t imgId1 = pairID / kMaxNumImages;
        image_t imgId2 = pairID % kMaxNumImages;
        image_pair_t pairNewID = RVG::ComputePairID(imgId1, imgId2);

        ImgMatches[pairNewID] = twoViewG.inlier_matches;
    }
    LOG(INFO) << ImgMatches.size() << " pairwise feature matches in database\n";

    /// Estimate the view-graph
    LOG(INFO) << "Estimating View-graph..." << std::endl;
    std::set<image_pair_t> image_pair_selection;
    std::vector<image_pair_t> top5MST;
    int nOMST = 3;
    {
        /// discover how many images in the cc
        LOG(INFO) << options_.nMST << " MST are going to select" << std::endl;
        for(int i = 0 ; i < options_.nMST; i++)
        {
            graph_t g;
            for (auto& epi : ImgMatches)
            {
                if(image_pair_selection.count(epi.first) > 0)
                    continue;
                int imgI = epi.first / kMaxNumImages, imagJ = epi.first % kMaxNumImages;
                boost::add_edge(imgI, imagJ, -1.0 * (epi.second.size()), g);
            }
            std::vector<edge_t> spanningTree;
            boost::kruskal_minimum_spanning_tree(g, std::back_inserter(spanningTree));

            for (std::vector<edge_t>::const_iterator ei=spanningTree.begin(); ei!=spanningTree.end(); ++ei)
            {
                const edge_t& edge = *ei;
                image_pair_t pairID = ComputePairID(edge.m_source, edge.m_target);
                image_pair_selection.insert(pairID);
                if(i < nOMST && top5MST.size() <= 100000)
                {
                    top5MST.push_back(pairID);
                }
            }
            LOG(INFO) << i << " [ " << spanningTree.size() << "] ";
            if(spanningTree.size() == 0)
                break;
        }
        LOG(INFO) << std::endl;
    }

    std::vector<image_pair_t> image_pair_keys;
    for(auto& pairID : image_pair_selection)
    {
        image_pair_keys.push_back(pairID);
    }
    LOG(INFO) << "Pairwise Matches Loading in " << images_.size() << " images, selected " << image_pair_keys.size() << " edges"<< std::endl;

    for(auto& eg : top5MST)
    {
        if(ImgMatches[eg].size() >= options_.minimum_initial_pair_inlier)
            candiPairs[eg] = ImgMatches[eg].size();
    }

    /// Update ImgMatches
    std::map<image_pair_t, FeatureMatches> ImgMatches_NMSTs;
    for(auto& epi: image_pair_keys)
    {
        ImgMatches_NMSTs[epi] = ImgMatches[epi];
    }
    ImgMatches.clear();
    ImgMatches.swap(ImgMatches_NMSTs);
    LOG(INFO) << ImgMatches.size() << " pairwise matches are used for SfM\n";

    /// Build Tracks
    LOG(INFO) << "Generating tracks (Union-Find Algorithm)" << std::endl;
    tracks::STLMAPTracks map_tracks;
    tracks::TracksBuilder tracksBuilder;
    tracksBuilder.Build(ImgMatches);
    tracksBuilder.Filter();
    tracksBuilder.ExportToSTL(map_tracks);
    Transform2LocalTracks(map_tracks, tracks_, images_, ImgMatches);

    if(options_.extract_color_)
    {
        for (int imgID = 0; imgID < static_cast<int>(images_.size()); ++imgID)
        {
            Image& img = images_[imgID];
            for (const auto& trackMap: img.visible_tracks_)
            {
                int featureID = trackMap.first;
                int trackID = trackMap.second;
                if(tracks_color.count(trackID) > 0)
                    continue;
                else
                    tracks_color[trackID] = img.color2D_[featureID];
            }
        }
        LOG(INFO) << tracks_color.size() << " tracks are colored" << std::endl;
    }
    else
    {
        for(int i = 0; i < (int)tracks_.size(); i++)
            tracks_color[i] = Eigen::Vector3ub(255, 255, 255);
    }    
    return true;
}


bool SfMer::ReconstructPair(image_pair_t& pairID)
{
    image_t image_id1 = static_cast<image_t>(pairID / kMaxNumImages);
    image_t image_id2 = static_cast<image_t>(pairID % kMaxNumImages);

    Image image1 = images_[image_id1];
    Image image2 = images_[image_id2];
    Camera camera1 = cameras_[image1.CameraId()];
    Camera camera2 =  cameras_[image2.CameraId()];

    std::vector<Eigen::Vector2d> points1;
    std::vector<Eigen::Vector2d> points1_normal;
    points1.reserve(image1.NumPoints2D());
    for (const auto& point : image1.Points2D())
    {
        points1.push_back(point.XY());
    }

    std::vector<Eigen::Vector2d> points2;
    std::vector<Eigen::Vector2d> points2_normal;
    points2.reserve(image2.NumPoints2D());
    for (const auto& point : image2.Points2D())
    {
        points2.push_back(point.XY());
    }

    TwoViewGeometry two_view_geometry;
    FeatureMatches& matches = ImgMatches[pairID];
    TwoViewGeometry::Options two_view_geometry_options;
    two_view_geometry_options.ransac_options.max_error = 4.0;
    two_view_geometry_options.ransac_options.max_num_trials = 512;
    two_view_geometry_options.ransac_options.min_num_trials = options_.minimum_ransac_times;
    two_view_geometry_options.ransac_options.confidence = 0.999999;
    two_view_geometry_options.ransac_options.min_inlier_ratio = 0.1;
    two_view_geometry.EstimateWithRelativePose(camera1, points1, camera2, points2, matches, two_view_geometry_options);

    image1.Qvec() = ComposeIdentityQuaternion();
    image1.Tvec() = Eigen::Vector3d(0, 0, 0);
    image2.Qvec() = two_view_geometry.qvec;
    image2.NormalizeQvec();
    two_view_geometry.tvec = two_view_geometry.tvec / two_view_geometry.tvec.norm();
    image2.Tvec() = two_view_geometry.tvec;

    const Eigen::Matrix3x4d proj_matrix1 = image1.ProjectionMatrix();
    const Eigen::Matrix3x4d proj_matrix2 = image2.ProjectionMatrix();
    const Eigen::Vector3d proj_center1 = image1.ProjectionCenter();
    const Eigen::Vector3d proj_center2 = image2.ProjectionCenter();

    FeatureMatches& corrs = ImgMatches[pairID];

    // Add 3D point tracks.
    int countInitialTracks = 0;
    effTracks_.clear();

    std::vector<double> pointErrors;
    std::vector<double> rayAngles;;
    for (size_t i = 0; i < corrs.size(); ++i)
    {
        const point2D_t point2D_idx1 = corrs[i].point2D_idx1;
        const point2D_t point2D_idx2 = corrs[i].point2D_idx2;

        const Eigen::Vector2d point1_N = camera1.ImageToWorld(image1.Point2D(point2D_idx1).XY());
        const Eigen::Vector2d point2_N = camera2.ImageToWorld(image2.Point2D(point2D_idx2).XY());
        const Eigen::Vector3d& xyz = TriangulatePoint(proj_matrix1, proj_matrix2, point1_N, point2_N);
        double tri_angle = CalculateTriangulationAngle(proj_center1, proj_center2, xyz);

        const Eigen::Vector2d point_1 = image1.Point2D(point2D_idx1).XY();
        const Eigen::Vector2d point_2 = image2.Point2D(point2D_idx2).XY();
        double reproError1 = CalculateReprojectionError(point_1, xyz, proj_matrix1, camera1);
        double reproError2 = CalculateReprojectionError(point_2, xyz, proj_matrix2, camera2);
        double maxError = reproError1 > reproError2 ? reproError1 : reproError2;
        pointErrors.push_back(maxError);
        rayAngles.push_back(RadToDeg(tri_angle));

        if(image1.visible_tracks_.count(point2D_idx1) > 0 && image2.visible_tracks_.count(point2D_idx2) > 0)
        {
            if(image1.visible_tracks_[point2D_idx1] == image2.visible_tracks_[point2D_idx2])
                countInitialTracks++;
        }
        else
        {
            continue;
        }

        if (RadToDeg(tri_angle) >= options_.minimum_tri_angle && HasPointPositiveDepth(proj_matrix1, xyz)
            && HasPointPositiveDepth(proj_matrix2, xyz)
            && maxError <= options_.maximum_projection_error)
        {
            int trackID1 = image1.visible_tracks_[point2D_idx1];
            int trackID2 = image2.visible_tracks_[point2D_idx2];
            if(trackID1 == trackID2)
            {
                effTracks_[trackID1] = Point3D();
                effTracks_[trackID1].SetXYZ(xyz);

                if(image1.color2D_.size() > 0)
                    effTracks_[trackID1].SetColor(image1.color2D_[point2D_idx1]);
                else
                    effTracks_[trackID1].SetColor(Eigen::Vector3ub(255, 255, 255));
            }
        }
    }
    LOG(INFO) << effTracks_.size() << " effective tracks, from initial " << countInitialTracks  << " tracks" << std::endl;
    std::sort(rayAngles.begin(), rayAngles.end());
    std::sort(pointErrors.begin(), pointErrors.end());
    LOG(INFO) << "median rays angle: " << rayAngles[rayAngles.size() / 2] << ", median repro-error: " << pointErrors[pointErrors.size() / 2] << std::endl;


    if(effTracks_.size() < (size_t)options_.minimum_initial_pair_inlier || effTracks_.size() < 0.3 * countInitialTracks
        || rayAngles[rayAngles.size() / 2] < options_.minimum_tri_angle)
    //if(effTracks_.size() < (size_t)options_.minimum_initial_pair_inlier)
    {
        effTracks_.clear();
        return false;
    }
    else
    {
        /// Register 3D points
        for(auto& trackSeed : effTracks_)
        {
            track& curT = tracks_[trackSeed.first];
            for(auto& imgV : curT.view2D)
            {
                class Image& img = images_[imgV.first];
                img.SetPoint3DForPoint2D(imgV.second, trackSeed.first);
                img.point3D_visibility_pyramid_.SetPoint(img.Point2D(imgV.second).X(), img.Point2D(imgV.second).Y());
            }
        }
        calibImages.push_back(image_id1);
        calibImages.push_back(image_id2);
        images_[image_id1].SetRegistered(true);
        images_[image_id2].SetRegistered(true);
        images_[image_id1].Qvec() = ComposeIdentityQuaternion();
        images_[image_id1].Tvec() = Eigen::Vector3d(0, 0, 0);
        images_[image_id2].Qvec() = image2.Qvec();
        images_[image_id2].Tvec() = image2.Tvec();
        double* angleaxis1 = images_[image_id1].angleAxis.data();
        double* qvec1 = images_[image_id1].Qvec().data();
        ceres::QuaternionToAngleAxis(qvec1, angleaxis1);
        double* angleaxis2 = images_[image_id2].angleAxis.data();
        double* qvec2 = images_[image_id2].Qvec().data();
        ceres::QuaternionToAngleAxis(qvec2, angleaxis2);

        num_reg_trials_[image_id1]++;
        num_reg_trials_[image_id2]++;
        return true;
    }
}

bool SfMer::ResetConfiguration()
{
    for(auto& trackSeed : effTracks_)
    {
        track& curT = tracks_[trackSeed.first];
        for(auto& imgV : curT.view2D)
        {
            class Image& img = images_[imgV.first];
            img.ResetPoint3DForPoint2D(imgV.second);
            img.point3D_visibility_pyramid_.ResetPoint(img.Point2D(imgV.second).X(), img.Point2D(imgV.second).Y());
        }
    }

    effTracks_.clear();
    num_reg_trials_.clear();
    num_reg_trials_.resize(images_.size(), 0);

    for(auto& img : images_)
    {
       img.second.SetRegistered(false);
    }
    calibImages.clear();
    return true;
}

bool SfMer::SeedReconstruction()
{
    {
        std::vector<std::pair<image_pair_t, double>> pairWeight;
        for(auto& candi : candiPairs)
        {
            image_t id1, id2;
            Database::PairIdToImagePair(candi.first, &id1, &id2);
            double weight = std::min(imgNeighbors[id1].size(), imgNeighbors[id2].size());
            pairWeight.push_back(std::make_pair(candi.first, weight));
        }
        std::sort(pairWeight.begin(), pairWeight.end(), comEdges);

        for(auto& candi : pairWeight)
        {
            PossibleInitialPair.push_back(candi.first);
        }
        LOG(INFO) << PossibleInitialPair.size() << " possible pairs" << std::endl;

        for(int i = 0; i < 10; i++)
        {
            image_t id1, id2;
            Database::PairIdToImagePair(PossibleInitialPair[i], &id1, &id2);
            double weight = std::min(imgNeighbors[id1].size(), imgNeighbors[id2].size());
            LOG(INFO) << "(" << id1 << ", " << id2 << ", " << weight
                      << ") ";
        }
        LOG(INFO) << std::endl;
    }

    bool seedSuc = false;
    for(int tri = 0; tri < static_cast<int>(PossibleInitialPair.size()); tri++)
    {
        image_pair_t& pairID = PossibleInitialPair[tri];
        image_t image_id1 = pairID / kMaxNumImages, image_id2 = pairID % kMaxNumImages;
        LOG(INFO) << image_id1 << " " << image_id2 << " " << ImgMatches[pairID].size()
                  << std::endl;

        if(ImgMatches[pairID].size() < options_.minimum_edge_inlier)
            continue;

        if(!ReconstructPair(pairID))
            continue;
        else
            seedSuc = true;

        /// BA the seed reconstruction
        if(seedSuc)
        {
            if(!BundleAdjustment_Seed())
            {
                LOG(INFO) << "seed reconstruction in " << image_id1 << " " << image_id2  << " after BA is not robust!" << std::endl;
                seedSuc = false;
                ResetConfiguration();
                continue;
            }
            else
            {
                ExportPLY(SfMDir + "/Seed/seed.ply");
                ExportCHN(SfMDir + "/Seed/seed.out", SfMDir + "/Seed/seed.list", false);
                break;
            }
        }
    }
    return seedSuc;
}

bool SfMer::RegisterNextImage_P3P(const image_t image_id)
{
    Image& image = images_[image_id];
    Camera camera = cameras_[image.CameraId()];
    CHECK(!image.IsRegistered()) << "Image cannot be registered multiple times";
    num_reg_trials_[image_id] += 1;

    // Check if enough 2D-3D correspondences.
    if (image.NumPoints3D() < static_cast<size_t>(options_.minimum_P3P_matches))
    {
        return false;
    }

    //////////////////////////////////////////////////////////////////////////////
    // Search for 2D-3D correspondences
    //////////////////////////////////////////////////////////////////////////////

    std::vector<std::pair<point2D_t, point3D_t>> tri_corrs;
    std::vector<Eigen::Vector2d> tri_points2D;
    std::vector<Eigen::Vector3d> tri_points3D;

    for (point2D_t point2D_idx = 0; point2D_idx < image.NumPoints2D(); ++point2D_idx)
    {
        const Point2D& point2D = image.Point2D(point2D_idx);
        std::unordered_set<point3D_t> point3D_ids;

        if (!point2D.HasPoint3D())
        {
            continue;
        }

        // Avoid duplicate correspondences.
        if (point3D_ids.count(point2D.Point3DId()) > 0)
        {
            continue;
        }

        const Point3D& point3D = effTracks_[point2D.Point3DId()];
        tri_corrs.emplace_back(point2D_idx, point2D.Point3DId());
        point3D_ids.insert(point2D.Point3DId());
        tri_points2D.push_back(point2D.XY());
        tri_points3D.push_back(point3D.XYZ());
    }

    if (tri_points2D.size() < static_cast<size_t>(options_.minimum_P3P_matches))
    {
        return false;
    }

    //////////////////////////////////////////////////////////////////////////////
    // 2D-3D estimation
    //////////////////////////////////////////////////////////////////////////////

    AbsolutePoseEstimationOptions abs_pose_options;
    abs_pose_options.num_threads = 8;
    abs_pose_options.num_focal_length_samples = 30;
    abs_pose_options.min_focal_length_ratio = 0.1;
    abs_pose_options.max_focal_length_ratio = 10;
    abs_pose_options.ransac_options.max_error = 8.0;
    abs_pose_options.ransac_options.min_inlier_ratio = 0.1;
    abs_pose_options.ransac_options.min_num_trials = options_.minimum_ransac_times * 4;
    abs_pose_options.estimate_focal_length = false;
    abs_pose_options.ransac_options.confidence = 0.999999;

    AbsolutePoseRefinementOptions abs_pose_refinement_options;
    abs_pose_refinement_options.refine_focal_length = false;
    abs_pose_refinement_options.refine_extra_params = false;

    std::vector<char> inlier_mask;
    //////////////////////////////////////////////////////////////////////////////
    // P3P
    //////////////////////////////////////////////////////////////////////////////
    size_t P3Pinlier_num = 0;
    double P3Pinlier_ratio = 0.0;
    double track_ratio = 0.0;
    bool P3P_estimate = false, P3PinlierRatioFlag = false, P3PestimateFlag = false;
    Eigen::Vector4d Qvec_P3P = image.Qvec();
    Eigen::Vector3d Tvec_P3P = image.Tvec();
    if(tri_points2D.size() >= static_cast<size_t>(options_.minimum_P3P_matches))
    {
        if (EstimateAbsolutePose(abs_pose_options, tri_points2D, tri_points3D,
                                 &Qvec_P3P, &Tvec_P3P, &camera, &P3Pinlier_num,
                                 &inlier_mask))
        {
            P3PestimateFlag = true;
        }

        P3Pinlier_ratio = static_cast<double>(P3Pinlier_num) / static_cast<double>(tri_points2D.size());
        track_ratio =  static_cast<double>(tri_points2D.size()) / static_cast<double>(image.visible_tracks_.size());
        if (P3Pinlier_num >= options_.minimum_P3P_inliers && P3Pinlier_ratio >= options_.P3P_inlier_ratio)
        {
            P3PinlierRatioFlag = true;
        }

        Qvec_P3P = Qvec_P3P / Qvec_P3P.norm();
        Eigen::Matrix3d RP3P = QuaternionToRotationMatrix(Qvec_P3P);
        Eigen::Vector3d Cvec_P3P = -RP3P.transpose() * Tvec_P3P;
        LOG(INFO) << "P3P: " << P3Pinlier_num << " from " << tri_points3D.size() << " (" << P3Pinlier_ratio * 100 << "%)"
                  << " (" << track_ratio * 100 << "%)" << std::endl
                  << RP3P << std::endl
                  << Cvec_P3P(0) << " " << Cvec_P3P(1) << " " << Cvec_P3P(2) << std::endl;

        if(P3PestimateFlag && P3PinlierRatioFlag)
        {
            P3P_estimate = true;
            LOG(INFO) << "P3P works successfully" << std::endl;
        }
        else
        {
            P3P_estimate = false;
            LOG(INFO) << "P3P fails" << std::endl;
        }
    }

    if(P3P_estimate) // both right
    {
        Eigen::Vector4d Qvec_P3P_ORI = Qvec_P3P;
        Eigen::Vector3d Tvec_P3P_ORI = Tvec_P3P;
        image.Qvec() = Qvec_P3P;
        image.Tvec() = Tvec_P3P;

        /// refine the pose
        bool refineFlag = true;
        Eigen::Matrix3d Rpre = QuaternionToRotationMatrix(image.Qvec());
        if (!RefineAbsolutePose(abs_pose_refinement_options, inlier_mask,
                            tri_points2D, tri_points3D, &image.Qvec(),
                            &image.Tvec(), &camera))
        {
            refineFlag = false;
        }
        else
        {
            image.NormalizeQvec();
            Eigen::Vector3d Crefine = -QuaternionToRotationMatrix(image.Qvec()).transpose() * image.Tvec();
            LOG(INFO) << "After refinement: " << std::endl
                  << QuaternionToRotationMatrix(image.Qvec()) << std::endl
                  << Crefine(0) << " " << Crefine(1) << " " << Crefine(2) << std::endl;

            Eigen::Matrix3d Rpost = QuaternionToRotationMatrix(image.Qvec());
            double angleChange = ComputeRotationError(Rpre, Rpost);
            LOG(INFO) << "Rotation angleChange: " << angleChange << std::endl;

            if(angleChange >= options_.maximum_rotation_angle) /// check the rotation change
                refineFlag = false;
        }

        if(refineFlag)
        {
            double* qvec_ = image.Qvec().data();
            double* angle_axis_ = image.AngleAxis().data();
            ceres::QuaternionToAngleAxis(qvec_, angle_axis_);
            return true;
        }
        else
        {
            LOG(INFO) << "Reset to  original estimation" << std::endl;
            image.Qvec() = Qvec_P3P_ORI;
            image.Tvec() = Tvec_P3P_ORI;
            double* qvec_ = image.Qvec().data();
            double* angle_axis_ = image.AngleAxis().data();
            ceres::QuaternionToAngleAxis(qvec_, angle_axis_);
            return false;
        }
    }
    else
    {
        return false;
    }
}

bool SfMer::ImageAddition()
{
    std::vector<std::pair<image_t, float>> image_ranks;
    // Append images that have not failed to register before.
    for (auto& image : images_)
    {
        // Skip images that are already registered.
        if (image.second.IsRegistered())
        {
            continue;
        }

        // Only consider images with a sufficient number of visible points.
        if (image.second.NumPoints3D() < static_cast<size_t>(options_.minimum_P3P_matches))
        {
            continue;
        }

        // Only try registration for a certain maximum number of times.
        const size_t num_reg_trials = num_reg_trials_[image.first];
        if (num_reg_trials > static_cast<size_t>(options_.maximum_register_times_))
        {
            LOG(INFO) << "image: " << image.first << " has been registered too many times" << std::endl;
            continue;
        }

        const float rankScore = static_cast<float>(image.second.Point3DVisibilityScore());
        //const float rankScore = static_cast<float>(image.second.NumPoints3D());
        image_ranks.emplace_back(image.first, rankScore);
    }
    LOG(INFO) << image_ranks.size() << " images are preparing to be registered\n";

    std::sort(image_ranks.begin(), image_ranks.end(),
              [](const std::pair<image_t, float>& image1,
                 const std::pair<image_t, float>& image2)
    {
        return image1.second > image2.second;
    });

    std::vector<image_t> next_images;
    size_t num_add_ = options_.maximum_camera_registration > (int)image_ranks.size() ? (int)image_ranks.size() : options_.maximum_camera_registration;
    if(num_add_ == 0)
        return false;

    for(auto& rankM : image_ranks)
    {
        if(rankM.second <= options_.addRatio_points * image_ranks[0].second)
        {
            break;
        }
        else
        {
            next_images.push_back(rankM.first);
            if(next_images.size() > num_add_)
                break;
        }
    }
    LOG(INFO) << next_images.size() <<" cameras are going to be registered: \n" << std::endl;

    /// add images
    bool registerFlag = false;
    int sucImage = 0;
    refineCams.clear();

    for (int reg_trial = 0; reg_trial < (int)(next_images.size()); ++reg_trial)
    {
        const image_t next_image_id = next_images[reg_trial];
        Image& next_image = images_[next_image_id];
        {
            PrintHeading1(StringPrintf("Registering image #%d (%d)", next_image_id, calibImages.size() + 1));
            LOG(INFO) << StringPrintf("  => Image sees %d / %d points, score = %d", next_image.NumPoints3D(),
                                  next_image.visible_tracks_.size(), next_image.Point3DVisibilityScore())
                      << std::endl;

            if(RegisterNextImage_P3P(next_image_id))
            {
                calibImages.push_back(next_image_id);
                next_image.SetRegistered(true);
                registerFlag = true;
                sucImage++;
                refineCams.insert(next_image_id);
            }
        }
    }

    std::set<image_t> neighborCams1;
    /// first level
    for(auto& imgID : refineCams)
    {
        if(imgNeighbors.count(imgID) == 0)
            continue;

        for(auto& neighID : imgNeighbors[imgID])
        {
            Image& img = images_[neighID];
            if(img.IsRegistered())
                neighborCams1.insert(neighID);
        }
    }
    LOG(INFO) << "Level1: " << neighborCams1.size() << " cameras are added\n";

    /// second level
    std::set<image_t> neighborCams2;
    for(auto& imgID : neighborCams1)
    {
         if(imgNeighbors.count(imgID) == 0)
            continue;

        for(auto& neighID : imgNeighbors[imgID])
        {
            class Image& img = images_[neighID];
            if(img.IsRegistered())
                neighborCams2.insert(neighID);
        }
    }
    LOG(INFO) << "Level2: " << neighborCams2.size() << " cameras are added\n";

    /// second level
    std::set<image_t> neighborCams3;
    /*for(auto& imgID : neighborCams2)
    {
         if(imgNeighbors.count(imgID) == 0)
            continue;

        for(auto& neighID : imgNeighbors[imgID])
        {
            class Image& img = images_[neighID];
            if(img.IsRegistered())
                neighborCams3.insert(neighID);
        }
    }
    LOG(INFO) << "Level3: " << neighborCams3.size() << " cameras are added\n";*/

    for(auto& imgID : neighborCams1)
    {
        refineCams.insert(imgID);
    }
    for(auto& imgID : neighborCams2)
    {
        refineCams.insert(imgID);
    }
    for(auto& imgID : neighborCams3)
    {
        refineCams.insert(imgID);
    }

    double percentage = (double)sucImage / (double)(next_images.size());
    LOG(INFO) << sucImage << " images are added from " << next_images.size() << " images, " << percentage * 100 << "%" << std::endl;

    if(sucImage == 0)
    {
        return false;
    }

    if((options_.RAconf_ != 0) && (percentage <= 0.5))
    {
        PrintHeading1(StringPrintf("Supervised SfM ends"));
        options_.RAconf_ = 0;
        return true;
    }
    else
    {
        return registerFlag;
    }
}

bool SfMer::TriangulateTrack(int& track_id, Eigen::Vector3d& pointXYZ, double& maxError)
{
    track feature_track = tracks_[track_id];

    // compute the effective number of calibrated cameras
    size_t num_calibrated_cameras = 0;
    for(const auto& v2D : feature_track.view2D)
    {
        Image& image = images_[v2D.first];
        if(!image.IsRegistered())
            continue;
        else
            num_calibrated_cameras++;
    }

    if(num_calibrated_cameras < options_.minimum_visible_views)
        return false;

    // compute the 3D position of track
    EstimateTriangulationOptions tri_options;
    tri_options.min_tri_angle = DegToRad(options_.minimum_tri_angle);
    tri_options.residual_type = TriangulationEstimator::ResidualType::REPROJECTION_ERROR;
    tri_options.ransac_options.max_error = std::sqrt(options_.maximum_projection_error);
    tri_options.ransac_options.confidence = 0.9999;
    tri_options.ransac_options.min_inlier_ratio = 0.02;
    tri_options.ransac_options.max_num_trials = 10000;

    // Setup data for triangulation estimation.
    std::vector<TriangulationEstimator::PointData> point_data;
    point_data.resize(num_calibrated_cameras);
    std::vector<TriangulationEstimator::PoseData> pose_data;
    pose_data.resize(num_calibrated_cameras);

    int indexData = 0;
    for(const auto& v2D : feature_track.view2D)
    {
        Image& image = images_[v2D.first];
        Camera& cam = cameras_[image.CameraId()];
        if(!image.IsRegistered())
                continue;

        point_data[indexData].point = image.Point2D(v2D.second).XY();
        point_data[indexData].point_normalized = cam.ImageToWorld(image.Point2D(v2D.second).XY());
        pose_data[indexData].proj_matrix = image.ProjectionMatrix();
        pose_data[indexData].proj_center = image.ProjectionCenter();
        pose_data[indexData].camera = &cam;
        indexData++;
    }

    // Enforce exhaustive sampling for small track lengths.
    const size_t kExhaustiveSamplingThreshold = 15;
    if (point_data.size() <= kExhaustiveSamplingThreshold)
    {
      tri_options.ransac_options.min_num_trials = NChooseK(point_data.size(), 2);
    }

    // Estimate triangulation.
    std::vector<char> inlier_mask;
    if (!EstimateTriangulation(tri_options, point_data, pose_data, &inlier_mask,
                               &pointXYZ)) {
      return false;
    }

    std::vector<double> residuals;
    TriangulationEstimator TE;
    TE.Residuals(point_data, pose_data, pointXYZ, &residuals);
    //std::sort(residuals.begin(), residuals.end());
    //maxError = residuals[residuals.size() - 1];
    maxError = *std::max_element(residuals.begin(), residuals.end());

    if (maxError <= 10.0 * options_.maximum_projection_error)
    {
        return true;
    }
    else
    {
        maxError = 0;
        return false;
    }
}

bool SfMer::TriangulateImages_RANSAC(bool selection)
{
    LOG(INFO) << calibImages.size() << " images are going to triangulate (RANSAC-based Triangulation)" << std::endl;
    trackEffV.clear();
    for(size_t i = 0; i < tracks_.size(); i++){
        trackEffV[i] = 0;
    }

    /// Find effective tracks could be triangulate
    for(const auto& imgID: calibImages)
    {
        Image& image = images_[imgID];
        for(auto& vTrack : image.visible_tracks_){
            trackEffV[vTrack.second]++;
        }
    }

    /// Undo the registeration of 3D point
    size_t pre_size = effTracks_.size();
    std::set<int> preTrackID;
    for(auto& trackSeed : effTracks_)
    {
        track& curT = tracks_[trackSeed.first];
        preTrackID.insert(trackSeed.first);
        for(auto& imgV : curT.view2D){
            Image& img = images_[imgV.first];
            img.ResetPoint3DForPoint2D(imgV.second);
            img.point3D_visibility_pyramid_.ResetPoint(img.Point2D(imgV.second).X(), img.Point2D(imgV.second).Y());
        }
    }
    effTracks_.clear();

    /*std::vector<double> trackError(tracks_.size(), 0.0);
    std::vector<bool> trackValid(tracks_.size(), false);
    std::vector<Eigen::Vector3d> sceneX(tracks_.size(), Eigen::Vector3d(0, 0, 0));
    #pragma omp parallel for
    for(int iT = 0; iT < (int)tracks_.size(); iT++)
    {
        if(trackEffV[iT] < options_.minimum_visible_views) // 20170906
            continue;

        Eigen::Vector3d pointXYZ;
        double errorMax = 0.0;
        if(TriangulateTrack(tracks_[iT], pointXYZ, errorMax))
        {
            #pragma omp critical
            {
                trackValid[iT] = true;
                sceneX[iT] = pointXYZ;
                trackError[iT] = errorMax;
            }
        }
    }*/

    std::vector<size_t> index_tracks;
    for(size_t iT = 0; iT < tracks_.size(); iT++) {
        if(trackEffV[iT] >= options_.minimum_visible_views) {
            index_tracks.push_back(iT);
        }
    }

    std::vector<double> track_error;
    track_error.resize(tracks_.size(), 0.0);
    std::vector<Eigen::Vector3d> sceneX(tracks_.size(), Eigen::Vector3d(0, 0, 0));

    #pragma omp parallel for
    for(int iT = 0; iT < (int)index_tracks.size(); iT++) {
        int trackID = index_tracks[iT];
        TriangulateTrack(trackID, sceneX[trackID], track_error[trackID]);
    }

    for(int validT = 0; validT < (int)track_error.size(); validT++) {
        if(track_error[validT] != 0.0) {
            effTracks_[validT] = Point3D();
            effTracks_[validT].SetXYZ(sceneX[validT]);
            effTracks_[validT].SetColor(Eigen::Vector3ub(255, 255, 255));
        }
    }
    sceneX.clear();
    LOG(INFO) << index_tracks.size() << " tracks are checked, " << effTracks_.size() << " effective tracks" << std::endl;

    /*for(int validT = 0; validT < (int)trackValid.size(); validT++)
    {
        if(trackValid[validT] == true)
        {
            effTracks_[validT] = Point3D();
            effTracks_[validT].SetXYZ(sceneX[validT]);
            effTracks_[validT].SetColor(Eigen::Vector3ub(255, 255, 255));
        }
    }
    trackValid.clear();
    sceneX.clear();
    LOG(INFO) << "Tracks Triangulation is done with " << effTracks_.size() << " effective tracks" << std::endl;*/

    // perform the tracks selection
    std::set<int> postTrackID;
    std::vector<std::pair<int, Eigen::Vector2d>> trackViews;
    std::map<int, std::set<int>> viewConstraints;
    std::map<image_t, bool> viewCalibrate;
    for(auto& viewID : images_)
    {
        if(viewID.second.IsRegistered())
            viewCalibrate[viewID.first] = true;
    }

    for(auto& trackSeed : effTracks_)
    {
        track& curT = tracks_[trackSeed.first];
        double viewEff = 0.0;
        for(auto& imgV : curT.view2D)
        {
            Image& img = images_[imgV.first];
            viewConstraints[imgV.first].insert(trackSeed.first);

            if(img.IsRegistered())
                viewEff++;
        }
        trackViews.push_back(std::make_pair(trackSeed.first, Eigen::Vector2d(viewEff, track_error[trackSeed.first])));
    }

    /// next additiong view list
    std::set<int> coverImages;
    std::set<image_t> newCalibImages;
    std::vector<int> nextViewList;
    for(auto& viewID : images_)
    {
        if(viewConstraints[viewID.first].size() >= (int)(options_.minimum_P3P_matches))
        {
            coverImages.insert(viewID.first);

            if(viewCalibrate.count(viewID.first) == 0){
                newCalibImages.insert(viewID.first);
            }
        }

        if(viewCalibrate[viewID.first] == true){
            coverImages.insert(viewID.first);
        }
    }
    LOG(INFO) << coverImages.size() << " images are covered" << std::endl;
    
    if(coverImages.size() == 0){
        LOG(INFO) << "no images are covered by the current tracks" << std::endl;
        return false;
    }

    /// Tracks selection
    /// initial cover times
    int min_id, max_id;
    size_t min_times = 1000, max_times = 0; 
    for(auto& view_c : viewConstraints){
        if(view_c.second.size() < options_.minimum_P3P_matches)
            continue;

        if(view_c.second.size() < min_times){
            min_id = view_c.first;
            min_times = view_c.second.size();
        }

        if(view_c.second.size() > max_times){
            max_id = view_c.first;
            max_times = view_c.second.size();
        }
    }
    LOG(INFO) << "Initial effective tracks:" << std::endl;
    LOG(INFO) << "Minimal covering times: " << min_times << " for calibrated image " << min_id << std::endl;
    LOG(INFO) << "Maximal covering times: " << max_times << " for calibrated image " << max_id << std::endl;

    std::map<int, bool> tracksSelection;
    if(selection) {
        size_t cover_k = options_.Kcover_ >= max_times ? max_times : options_.Kcover_;

        std::map<int, int> Kcover;
        for(auto& viewID : coverImages) {
            Kcover[viewID] = static_cast<int>(cover_k);
        }

        for(auto& view_c : viewConstraints){
            if(view_c.second.size() <= cover_k && Kcover.count(view_c.first) > 0){
                Kcover.erase(view_c.first);

                for(auto& track_id : view_c.second){
                    tracksSelection[track_id] = true;
                }
            }
        }

        for(auto& select_track : tracksSelection){
            track& cur_ = tracks_[select_track.first];
            
            for(const auto& view : cur_.view2D) {
                if(Kcover.count(view.first) > 0) {
                    Kcover[view.first]--;

                    if(Kcover[view.first] == 0)
                        Kcover.erase(view.first);
                }
            }
        }
        LOG(INFO) << Kcover.size() << " images' tracks are carefully selected" << std::endl;

        std::vector<std::pair<int, Eigen::Vector2i>> trackViews;
        std::map<int, int> eff_track_view;
        for(auto& trackSeed : effTracks_) {
            track& curT = tracks_[trackSeed.first];
            int view_eff = 0, view_new_cover = 0;

            for(auto& imgV : curT.view2D) {
                Image& img = images_[imgV.first];
                if(Kcover.count(img.ImageId() > 0))
                    view_eff++;

                if(newCalibImages.count(img.ImageId()) > 0)
                    view_new_cover++;
            }
            trackViews.push_back(std::make_pair(trackSeed.first, Eigen::Vector2i(view_eff, view_new_cover)));
            eff_track_view[trackSeed.first] = view_eff;
        }
        std::sort(trackViews.begin(), trackViews.end(), compView);

        /// Test for tracks selection
        if(Kcover.size() != 0)
        {
            int extra_select = 0;
            while(Kcover.size() != 0){
                std::vector<std::pair<int, int>> rank_cover;
                for(auto& cover : Kcover){
                    rank_cover.push_back(std::make_pair(cover.first, cover.second)); 
                }
                
                std::sort(rank_cover.begin(), rank_cover.end(), comImageWeight); // find the minimal cover
                int refer_id = rank_cover[0].first;

                std::vector<std::pair<int, int>> rank_track;
                for(auto& track_id : viewConstraints[refer_id]){
                    if(tracksSelection.count(track_id) > 0)
                        continue;

                    rank_track.push_back(std::make_pair(track_id, eff_track_view[track_id]));
                }
                std::sort(rank_track.begin(), rank_track.end(), comImageWeight);

                for(auto& track_r : rank_track){
                    tracksSelection[track_r.first] = true;
                    track& cur_ = tracks_[track_r.first];

                    for(const auto& view : cur_.view2D) {
                        if(Kcover.count(view.first) > 0) {
                             Kcover[view.first] -= 1;
                        }
                    }

                    if(Kcover[refer_id] == 0) // current images have been covered
                        break;
                }

                std::vector<int> erase_ids;
                for(auto& kc : Kcover){
                    if(kc.second <= 0){
                        erase_ids.push_back(kc.first);
                    }
                }

                for(auto& kc : erase_ids){
                    Kcover.erase(kc);
                }
                extra_select++;
            }
            LOG(INFO) << extra_select << " iterations are performed in tracks selection " << std::endl;
        }

        for(auto& viewID : coverImages)
            Kcover[viewID] = 0;

        std::map<int, Point3D> effe_Tracks;
        for(auto& trackSeed : effTracks_) {
            if(tracksSelection.count(trackSeed.first) == 0)
                continue;

            effe_Tracks[trackSeed.first] = trackSeed.second;
            track& curT = tracks_[trackSeed.first];

            for(auto& imgV : curT.view2D) {
                if(Kcover.count(imgV.first) > 0) {
                    Kcover[imgV.first]++;
                }
            }
        }

        /// find min cover
        int minK = 100000, maxK = 0;
        int view_min, view_max;
        for(const auto& viewID : coverImages){
            if(Kcover[viewID] < minK) {
                minK = Kcover[viewID];
                view_min = viewID;
            }

            if(Kcover[viewID] > maxK) {
                maxK = Kcover[viewID];
                view_max = viewID;
            }

        }
        LOG(INFO) << "After track selection:" << std::endl;
        LOG(INFO) << "Minimal covering times: " << minK << " for calibrated image " << view_min << std::endl;
        LOG(INFO) << "Maximal covering times: " << maxK << " for calibrated image " << view_max << std::endl;

        effTracks_.clear();
        for(auto& trackSeed : effe_Tracks) {
            effTracks_[trackSeed.first] = trackSeed.second;  
        }
        LOG(INFO) << "Tracks Selection is done, K = " << options_.Kcover_ << std::endl;
    }
    else
    {
        LOG(INFO) << "All tracks are selected";
    }
        

    for(const auto& trackSeed : effTracks_)
    {
        if(selection == true)
        {
            if(tracksSelection.count(trackSeed.first) == 0)
                continue;
        }
        postTrackID.insert(trackSeed.first);
        track& curT = tracks_[trackSeed.first];
        for(auto& imgV : curT.view2D)
        {
            Image& img = images_[imgV.first];
            img.SetPoint3DForPoint2D(imgV.second, trackSeed.first);
            img.point3D_visibility_pyramid_.SetPoint(img.Point2D(imgV.second).X(), img.Point2D(imgV.second).Y());
        }
    }
    LOG(INFO) << "Image Distribution is done" << std::endl;

    std::vector<int> id_intersection;
    std::set_intersection(preTrackID.begin(), preTrackID.end(), postTrackID.begin(), postTrackID.end(), std::back_inserter(id_intersection));

    std::vector<int> id_union;
    std::set_union(preTrackID.begin(), preTrackID.end(), postTrackID.begin(), postTrackID.end(), std::back_inserter(id_union));

    double ratioIU = 1.0;
    if(id_union.size() != (size_t)0)
    {
        ratioIU = static_cast<double>(id_intersection.size()) / static_cast<double>(id_union.size());
    }

    LOG(INFO) << "EffectiveTracks: Pre (" << pre_size << "), Now (" << effTracks_.size() << "), ratioIU : " << ratioIU * 100 << " %"<< std::endl;
    if(selection)
        LOG(INFO) << tracksSelection.size() << " tracks are selected for BA" << std::endl;

    if(ratioIU < 0.9)
        return false;
    else
        return true;
}

bool SfMer::BundleAdjustment_AngleAxis(const bool constantK, const bool constantR, const bool constantT, size_t iterTimes)
{
    std::unique_ptr<ceres::Problem> problem_;
    problem_.reset(new ceres::Problem());

    int fixedRIndex = 0;
    int maxResidualCount = 0;

    std::set<int> cameraID;
    for(auto view : calibImages)
    {
        int camID = img_cam_id_[view];
        cameraID.insert(camID);
    }

    for(auto viewID : cameraID)
    {
        Camera& camera = cameras_[viewID];
        double* camera_params = camera.ParamsData();
        problem_->AddParameterBlock(camera_params, 5);

        if(options_.bundle_principle_points == false)
        {
            std::vector<int> constant_idxs;
            constant_idxs.push_back(1);
            constant_idxs.push_back(2);
            ceres::SubsetParameterization* cam_parameterization = new ceres::SubsetParameterization(5, constant_idxs);
            problem_->SetParameterization(camera_params, cam_parameterization);
        }
    }

    for(auto view : calibImages)
    {
        Image& img = images_[view];
        Camera& camera = cameras_[img.CameraId()];
        double* camera_params = camera.ParamsData();

        double* tvec_data = img.Tvec().data();
        problem_->AddParameterBlock(tvec_data, 3);

        double* angle_axis = img.AngleAxis().data();
        problem_->AddParameterBlock(angle_axis, 3);

        int countResidual = 0;
        ceres::LossFunction* loss_function = new ceres::HuberLoss(8.0);
        for (const Point2D& point2D : img.Points2D())
        {
            if (!point2D.HasPoint3D())
            {
                continue;
            }
            countResidual++;
            Point3D & point3D = effTracks_[point2D.Point3DId()];

            ceres::CostFunction* cost_function = BundleAdjustmentAACostFunction<class RadialCameraModel>::Create(point2D.XY());
            problem_->AddResidualBlock(cost_function, loss_function, angle_axis, tvec_data, point3D.XYZ().data(), camera_params);
        }

        if(countResidual > maxResidualCount)
        {
            maxResidualCount = countResidual;
            fixedRIndex = view;
        }

        if(countResidual < 2 * (int)options_.minimum_visible_views)
        {
            LOG(INFO) << view << "(" << countResidual << ") " << "\t";
        }
    }
    LOG(INFO) << std::endl;

    std::set<image_t> fixedIDs;
    // keep some rotations constant
    if(constantR)
    {
        for(const auto& view : calibImages)
        {
            double* angle_axis = images_[view].AngleAxis().data();
            problem_->SetParameterBlockConstant(angle_axis);
        }
        LOG(INFO) << calibImages.size() << " camera rotations are fixed " << std::endl;
    }
    else
    {
        Image& img = images_[fixedRIndex];
        double* angle_axis = img.AngleAxis().data();
        problem_->SetParameterBlockConstant(angle_axis);
        fixedIDs.insert(fixedRIndex);
        LOG(INFO) << "camera: " << fixedRIndex << " id fixed in the BA, with " << maxResidualCount << " constraints" << std::endl;
    }

    if (problem_->NumResiduals() == 0)
        return false;
    else
        LOG(INFO) << "Residuals number: " << problem_->NumResiduals() << std::endl;

    // Empirical choice.
    ceres::Solver::Options solver_options;
    solver_options.minimizer_progress_to_stdout = true;
    const size_t kMaxNumImagesDirectDenseSolver = 50;
    const size_t kMaxNumImagesDirectSparseSolver = 2000;
    const size_t num_images = calibImages.size();
    if (num_images <= kMaxNumImagesDirectDenseSolver)
    {
        solver_options.linear_solver_type = ceres::DENSE_SCHUR;
    }
    else if (num_images <= kMaxNumImagesDirectSparseSolver)
    {
        solver_options.preconditioner_type = ceres::JACOBI;
        solver_options.linear_solver_type = ceres::SPARSE_SCHUR;
    }
    else
    {
        // Indirect sparse (preconditioned CG) solver.
        solver_options.linear_solver_type = ceres::ITERATIVE_SCHUR;
        solver_options.preconditioner_type = ceres::SCHUR_JACOBI;
    }

    solver_options.num_threads = omp_get_max_threads();
    //solver_options.num_linear_solver_threads = omp_get_max_threads();
    solver_options.sparse_linear_algebra_library_type = ceres::SUITE_SPARSE;
    //solver_options.sparse_linear_algebra_library_type = ceres::CX_SPARSE;
    solver_options.max_num_iterations = iterTimes;
    solver_options.max_linear_solver_iterations = iterTimes;

    std::string error;
    CHECK(solver_options.IsValid(&error)) << error;

    ceres::Solver::Summary summary_;
    ceres::Solve(solver_options, problem_.get(), &summary_);

    if (solver_options.minimizer_progress_to_stdout)
    {
        LOG(INFO) << std::endl;
    }
    PrintHeading2("Bundle adjustment report");
    PrintSolverSummary(summary_);

    for(const auto& view : calibImages)
    {
        Image& img = images_[view];
        double* angleAxis = img.AngleAxis().data();
        ceres::AngleAxisToQuaternion(angleAxis, &img.qvec_(0));
        img.NormalizeQvec();
    }

    if(options_.transGPS == false) // gps has transform
        Normalize();

    FilterScenePoints();
    return true;
}

bool SfMer::LocalBundleAdjustment(const bool constantK, const bool constantR, const bool constantT, size_t iterTimes)
{
    std::unique_ptr<ceres::Problem> problem_;
    problem_.reset(new ceres::Problem());

    int fixedRIndex = 0;
    int maxResidualCount = 0;

    std::set<int> cameraID;
    for(auto view : calibImages)
    {
        int camID = img_cam_id_[view];
        cameraID.insert(camID);
    }
    LOG(INFO) << refineCams.size() << " cameras are going to be refined\n";

    for(auto viewID : cameraID)
    {
        Camera& camera = cameras_[viewID];
        double* camera_params = camera.ParamsData();
        problem_->AddParameterBlock(camera_params, 5);

        if(options_.bundle_principle_points == false)
        {
            std::vector<int> constant_idxs;
            constant_idxs.push_back(1);
            constant_idxs.push_back(2);
            ceres::SubsetParameterization* cam_parameterization = new ceres::SubsetParameterization(5, constant_idxs);
            problem_->SetParameterization(camera_params, cam_parameterization);
        }
    }

    for(auto view : calibImages)
    {
        Image& img = images_[view];
        Camera& camera = cameras_[img.CameraId()];
        double* camera_params = camera.ParamsData();

        double* tvec_data = img.Tvec().data();
        problem_->AddParameterBlock(tvec_data, 3);

        double* angle_axis = img.AngleAxis().data();
        problem_->AddParameterBlock(angle_axis, 3);

        int countResidual = 0;
        ceres::LossFunction* loss_function = new ceres::HuberLoss(8.0);
        for (const Point2D& point2D : img.Points2D())
        {
            if (!point2D.HasPoint3D())
            {
                continue;
            }
            countResidual++;
            Point3D & point3D = effTracks_[point2D.Point3DId()];

            ceres::CostFunction* cost_function = BundleAdjustmentAACostFunction<class RadialCameraModel>::Create(point2D.XY());
            problem_->AddResidualBlock(cost_function, loss_function, angle_axis, tvec_data, point3D.XYZ().data(), camera_params);
        }

        if(countResidual > maxResidualCount)
        {
            maxResidualCount = countResidual;
            fixedRIndex = view;
        }

        if(countResidual < 2 * (int)options_.minimum_visible_views)
        {
            LOG(INFO) << view << "(" << countResidual << ") " << "\t";
        }
    }
    LOG(INFO) << std::endl;

    std::set<image_t> fixedIDs;
    // keep some rotations constant
    if(constantR)
    {
        for(const auto& view : calibImages)
        {
            double* angle_axis = images_[view].AngleAxis().data();
            problem_->SetParameterBlockConstant(angle_axis);
        }
        LOG(INFO) << refineCams.size() << " camera rotations are fixed " << std::endl;
    }
    else
    {
        Image& img = images_[fixedRIndex];
        double* angle_axis = img.AngleAxis().data();
        problem_->SetParameterBlockConstant(angle_axis);
        fixedIDs.insert(fixedRIndex);
        LOG(INFO) << "camera: " << fixedRIndex << " id fixed in the BA, with " << maxResidualCount << " constraints" << std::endl;
    }

    if(constantT)
    {
        for(const auto& view : calibImages)
        {
            double* tvec_data = images_[view].Tvec().data();
            problem_->SetParameterBlockConstant(tvec_data);
        }
        LOG(INFO) << calibImages.size() << " camera tranlations are fixed " << std::endl;
    }

    int nFixedCameras = 2;
    for(auto& view : calibImages)
    {
        if(fixedIDs.count(view) > 0)
            continue;

        if(refineCams.count(view) == 0)
        {
            Image& img = images_[view];
            double* angle_axis = img.AngleAxis().data();
            double* tvec_data = img.Tvec().data();
            problem_->SetParameterBlockConstant(angle_axis);
            problem_->SetParameterBlockConstant(tvec_data);
            nFixedCameras++;
        }
    }
    LOG(INFO) << nFixedCameras << " cameras are fixed to speed-up the BA" << std::endl;


    if (problem_->NumResiduals() == 0)
        return false;
    else
        LOG(INFO) << "Residuals number: " << problem_->NumResiduals() << std::endl;

    // Empirical choice.
    ceres::Solver::Options solver_options;
    solver_options.minimizer_progress_to_stdout = true;
    const size_t kMaxNumImagesDirectDenseSolver = 50;
    const size_t kMaxNumImagesDirectSparseSolver = 2000;
    const size_t num_images = calibImages.size();
    if (num_images <= kMaxNumImagesDirectDenseSolver)
    {
        solver_options.linear_solver_type = ceres::DENSE_SCHUR;
    }
    else if (num_images <= kMaxNumImagesDirectSparseSolver)
    {
        solver_options.preconditioner_type = ceres::JACOBI;
        solver_options.linear_solver_type = ceres::SPARSE_SCHUR;
    }
    else
    {
        // Indirect sparse (preconditioned CG) solver.
        solver_options.linear_solver_type = ceres::ITERATIVE_SCHUR;
        solver_options.preconditioner_type = ceres::SCHUR_JACOBI;
    }

    solver_options.num_threads = omp_get_max_threads();
    //solver_options.num_linear_solver_threads = omp_get_max_threads();
    solver_options.sparse_linear_algebra_library_type = ceres::SUITE_SPARSE;
    //solver_options.sparse_linear_algebra_library_type = ceres::CX_SPARSE;
    solver_options.max_num_iterations = iterTimes;
    solver_options.max_linear_solver_iterations = iterTimes;

    std::string error;
    CHECK(solver_options.IsValid(&error)) << error;

    ceres::Solver::Summary summary_;
    ceres::Solve(solver_options, problem_.get(), &summary_);

    if (solver_options.minimizer_progress_to_stdout)
    {
        LOG(INFO) << std::endl;
    }
    PrintHeading2("Bundle adjustment report");
    PrintSolverSummary(summary_);

    for(const auto& view : calibImages)
    {
        Image& img = images_[view];
        double* angleAxis = img.AngleAxis().data();
        ceres::AngleAxisToQuaternion(angleAxis, &img.qvec_(0));
        img.NormalizeQvec();
    }

    FilterScenePoints();
    return true;
}

bool SfMer::BundleAdjustment_Seed()
{
    std::unique_ptr<ceres::Problem> problem_;
    problem_.reset(new ceres::Problem());

    std::set<int> cameraID;
    for(auto view : calibImages)
    {
        int camID = img_cam_id_[view];
        cameraID.insert(camID);
    }

    for(auto viewID : cameraID)
    {
        Camera& camera = cameras_[viewID];
        double* camera_params = camera.ParamsData();
        problem_->AddParameterBlock(camera_params, 5);
        problem_->SetParameterBlockConstant(camera_params);
    }

    int countR = 0;
    int countResidual = 0;
    for(const auto& view : calibImages)
    {
        Image& img = images_[view];
        img.NormalizeQvec();

        Camera& camera = cameras_[img.CameraId()];
        double* camera_params = camera.ParamsData();

        double* tvec_data = img.Tvec().data();
        problem_->AddParameterBlock(tvec_data, 3);

        double* angle_axis = img.AngleAxis().data();
        problem_->AddParameterBlock(angle_axis, 3);

        if(countR == 0)
        {
            problem_->SetParameterBlockConstant(angle_axis);
            countR++;
        }

        for (const Point2D& point2D : img.Points2D())
        {
            if (!point2D.HasPoint3D())
                continue;

            countResidual++;
            Point3D & point3D = effTracks_[point2D.Point3DId()];
            ceres::LossFunction* loss_function = new ceres::HuberLoss(5.0);
            ceres::CostFunction* cost_function = BundleAdjustmentAACostFunction<class RadialCameraModel>::Create(point2D.XY());
            problem_->AddResidualBlock(cost_function, loss_function, angle_axis, tvec_data, point3D.XYZ().data(), camera_params);
        }
    }

    if (problem_->NumResiduals() == 0)
    {
        return false;
    }

    LOG(INFO) << "Residuals number: " << problem_->NumResiduals() << " " << countResidual << std::endl;
    ceres::Solver::Options solver_options;
    solver_options.minimizer_progress_to_stdout = true;
    solver_options.linear_solver_type = ceres::DENSE_SCHUR;
    solver_options.num_threads = omp_get_max_threads();
//    solver_options.num_linear_solver_threads = omp_get_max_threads();
    solver_options.max_num_iterations = 100;
    solver_options.max_linear_solver_iterations = 100;

    std::string error;
    CHECK(solver_options.IsValid(&error)) << error;

    ceres::Solver::Summary summary_;
    ceres::Solve(solver_options, problem_.get(), &summary_);

    if (solver_options.minimizer_progress_to_stdout)
    {
        LOG(INFO) << std::endl;
    }
    PrintHeading2("Bundle adjustment report");
    PrintSolverSummary(summary_);

    for(const auto& view : calibImages)
    {
        Image& img = images_[view];
        double* angleAxis = img.AngleAxis().data();
        ceres::AngleAxisToQuaternion(angleAxis, &img.qvec_(0));
        img.NormalizeQvec();
    }
    Normalize();

    int numOutliers = FilterScenePoints();
    LOG(INFO) << numOutliers << " points are filtered, from " << countResidual / 2 << " points" << std::endl;

    if(numOutliers > 0.3 * countResidual / 2.0 || (summary_.num_successful_steps + summary_.num_unsuccessful_steps) >= 20)
    {
        return false;
    }
    else
        return summary_.IsSolutionUsable();
}

size_t SfMer::FilterScenePoints()
{
    std::vector<int> deleteID;
    std::vector<double> reproErrors;
    std::vector<double> reproErrors_;
    for(auto& point3D : effTracks_)
    {
        /// check reprojection error and cheriaty
        track& tr = tracks_[point3D.first];
        bool reproFlag = true;
        std::vector<double> reproCur;
        for(const auto& imgV: tr.view2D)
        {
            Image& img = images_[imgV.first];
            img.NormalizeQvec();

            if(img.IsRegistered())
            {
                Camera& cam = cameras_[img.CameraId()];
                Eigen::Matrix3x4d proj_matrix = img.ProjectionMatrix();

                double reproError = CalculateReprojectionError(img.Point2D(imgV.second).XY(), point3D.second.XYZ(), proj_matrix, cam);
                if (!HasPointPositiveDepth(proj_matrix, point3D.second.XYZ()))
                {
                    reproFlag = false;
                    break;
                }

                double error_threshold = std::max(cam.Width(), cam.Height()) / 1024.0 * 4.0;
                if(reproError >= error_threshold)
                {
                    reproFlag = false;
                    break;
                }
                reproCur.push_back(reproError);
            }
        }

        /// check tri-angle
        Eigen::Vector3d proj_center1;
        Eigen::Vector3d proj_center2;
        bool angleFlag = false;
        std::vector<int> View;
        for(const auto& imgV : tr.view2D)
        {
            View.push_back(imgV.first);
        }

        int countEffView = 0;
        for(int i1 = 0; i1 < static_cast<int>(View.size()) - 1; i1++)
        {
            if(images_[View[i1]].IsRegistered())
            {
                countEffView++;
            }
        }

        for(int i1 = 0; i1 < static_cast<int>(View.size()) - 1; i1++)
        {
            Image& img1 = images_[View[i1]];
            if(!img1.IsRegistered())
                continue;

            for(int i2 = i1 + 1; i2 < static_cast<int>(View.size()); i2++)
            {
                Image& img2 = images_[View[i2]];
                if(!img2.IsRegistered())
                    continue;

                proj_center1 = img1.ProjectionCenter();
                proj_center2 = img2.ProjectionCenter();
                const double tri_angle = CalculateTriangulationAngle(proj_center1, proj_center2, point3D.second.XYZ());

                if(calibImages.size() == 2)
                {
                    if(RadToDeg(tri_angle) >= options_.minimum_tri_angle)
                    {
                        angleFlag = true;
                        break;
                    }
                }
                else
                {
                    if(RadToDeg(tri_angle) >= options_.minimum_tri_angle)
                    {
                        angleFlag = true;
                        break;
                    }
                }
            }

            if(angleFlag)
                break;
        }

        if(!reproFlag || !angleFlag)
        {
            for(const auto& imgV : tr.view2D)
            {
                Image& img = images_[imgV.first];
                img.ResetPoint3DForPoint2D(imgV.second);
                img.point3D_visibility_pyramid_.ResetPoint(img.Point2D(imgV.second).X(), img.Point2D(imgV.second).Y());
            }
            deleteID.push_back(point3D.first);
        }
        else
        {
            double sum = 0.0;
            for(auto& re : reproCur){
                sum += re;
                reproErrors.push_back(re);
            }
            sum = sum / reproCur.size();
            reproErrors_.push_back(sum);
            points_track_error[point3D.first] = sum;
        }
    }

    double sum = 0.0;
    for(auto& re : reproErrors_)
        sum += re*re;

    repro_RMS = sqrt(sum / reproErrors_.size()); 
    std::sort(reproErrors_.begin(), reproErrors_.end());
    repro_median = reproErrors_[reproErrors_.size() / 2];

    for(int iD = 0; iD < static_cast<int>(deleteID.size()); iD++)
    {
        effTracks_.erase(deleteID[iD]);
    }

    LOG(INFO) << "# of effective tracks: " << reproErrors_.size() << ", repro-error RMS: " << repro_RMS
              << ", repro-error median: " << repro_median
              << std::endl;
    return deleteID.size();
}

/// This function is only performed after seed reconstruction
/// we only need to transform the rotation matrix and 3D scene points
Eigen::Vector2d SfMer::TransformFrame2GPS(double gps_error, bool skip_final_check)
{
    std::vector<image_t> calibGPS;
    std::vector<image_t> calib_noGPS;
    for (size_t i = 0; i < calibImages.size(); ++i)
    {
        image_t indexI = calibImages[i];
        if(imgGPS.count(indexI) == 0)
            calib_noGPS.push_back(indexI);
        else
            calibGPS.push_back(indexI);
    }
    LOG(INFO) << calibGPS.size() << " images has priori GPS" << std::endl;

    if(calibGPS.size() <= 3)
        return Eigen::Vector2d(0.0, 0.0);

    std::vector<Eigen::Vector3d> src_mat;
    std::vector<Eigen::Vector3d> dst_mat;
    for (size_t i = 0; i < calibGPS.size(); ++i)
    {
        image_t indexI = calibGPS[i];
        Eigen::Vector3d srcC = images_[indexI].ProjectionCenter();
        Eigen::Vector3d dstC = imgGPS[indexI];
        src_mat.push_back(srcC);
        dst_mat.push_back(dstC);
    }

    RANSACOptions options;
    options.max_error = gps_error;
    options.min_inlier_ratio = 0.01;

    LORANSAC<SimilarityTransformEstimator<3>, SimilarityTransformEstimator<3>>  ransac(options);
    const auto report = ransac.Estimate(src_mat, dst_mat);
    Eigen::Matrix<double, 3, 4> model = report.model;
    LOG(INFO) << "# of Inliers in similarity transformation: " << report.support.num_inliers << std::endl;
    LOG(INFO) << "gps threshold for inlier determination: " << options.max_error << std::endl;
    LOG(INFO) << "Model: \n" << model << std::endl;

    double sumRes = 0.0;
    std::vector<double> residuals;
    residuals.resize(calibGPS.size());
    for (size_t i = 0; i < src_mat.size(); ++i)
    {
        Eigen::Vector3d srcI = src_mat[i];
        Eigen::Vector3d dstI = dst_mat[i];
        Eigen::Vector3d dst_transformed = model * srcI.homogeneous();
        residuals[i] = (dstI - dst_transformed).norm();
        sumRes += residuals[i];
    }
    std::sort(residuals.begin(), residuals.end());
    LOG(INFO) << "Median Error of Transformation: " << residuals[residuals.size() / 2] << std::endl;
    LOG(INFO) << "Mean Error of Transformation: " << sumRes / residuals.size() << std::endl;
    LOG(INFO) << "Max Error of Transformation: " << residuals[residuals.size() - 1] << std::endl;

    if(residuals[residuals.size() / 2] <= gps_error * 4.0 || skip_final_check) // options_.gpsPresion
    {
        Eigen::Matrix3d Rtrans;
        for (size_t i = 0; i < calibImages.size(); ++i)
        {
            image_t indexI = calibImages[i];
            Eigen::Vector3d srcC = images_[indexI].ProjectionCenter();
            Eigen::Vector3d dstC = model * srcC.homogeneous();

            double Scale = model.block<1, 3>(0, 0).norm();
            Rtrans = model.topLeftCorner<3, 3>();
            //Eigen::Matrix3d Rsrc = QuaternionToRotationMatrix(images_[indexI].Qvec());
            Eigen::Matrix3d Rsrc;
            double* Rsrc_ptr = &Rsrc(0, 0);
            ceres::AngleAxisToRotationMatrix(images_[indexI].AngleAxis().data(), Rsrc_ptr);

            Rtrans *= 1.0 / Scale;
            Eigen::Matrix3d Rdst = Rsrc * Rtrans.transpose();
            images_[indexI].SetQvec(RotationMatrixToQuaternion(Rdst));
            images_[indexI].SetTvec(-1.0 * Rdst * dstC);

            double* Rdst_ptr = &Rdst(0, 0);
            RotationMatrixToAngleAxis(Rdst_ptr, images_[indexI].AngleAxis().data());
        }

        for(auto& img: images_)
        {
            Image& curImg = img.second;
            Eigen::Matrix3d Rprior = QuaternionToRotationMatrix(curImg.QvecPrior());
            Eigen::Matrix3d Rprior_new = Rprior * Rtrans.transpose();
            curImg.SetQvecPrior(RotationMatrixToQuaternion(Rprior_new));
        }

        for(auto& tr : effTracks_){
            tr.second.SetXYZ(model * tr.second.XYZ().homogeneous());
        }
        return Eigen::Vector2d(sumRes / residuals.size(), residuals[residuals.size() / 2]);
    }
    else
        return Eigen::Vector2d(0, 0);
}

void SfMer::Normalize()
{
    std::unordered_map<class Image*, Eigen::Vector3d> proj_centers;
    for (size_t i = 0; i < calibImages.size(); ++i)
    {
        class Image& image = images_[calibImages[i]];
        const Eigen::Vector3d proj_center = image.ProjectionCenter();
        proj_centers[&image] = proj_center;
    }

    // Coordinates of image centers or point locations.
    std::vector<float> coords_x;
    std::vector<float> coords_y;
    std::vector<float> coords_z;
    coords_x.reserve(proj_centers.size());
    coords_y.reserve(proj_centers.size());
    coords_z.reserve(proj_centers.size());
    for (const auto& proj_center : proj_centers)
    {
        coords_x.push_back(static_cast<float>(proj_center.second(0)));
        coords_y.push_back(static_cast<float>(proj_center.second(1)));
        coords_z.push_back(static_cast<float>(proj_center.second(2)));
    }

    // Determine robust bounding box and mean.
    std::sort(coords_x.begin(), coords_x.end());
    std::sort(coords_y.begin(), coords_y.end());
    std::sort(coords_z.begin(), coords_z.end());

    const size_t P0 = static_cast<size_t>((coords_x.size() > 3) ? 0.1 * (coords_x.size() - 1) : 0);
    const size_t P1 = static_cast<size_t>((coords_x.size() > 3) ? 0.9 * (coords_x.size() - 1) : coords_x.size() - 1);

    const Eigen::Vector3d bbox_min(coords_x[P0], coords_y[P0], coords_z[P0]);
    const Eigen::Vector3d bbox_max(coords_x[P1], coords_y[P1], coords_z[P1]);

    Eigen::Vector3d mean_coord(0, 0, 0);
    for (size_t i = P0; i <= P1; ++i)
    {
        mean_coord(0) += coords_x[i];
        mean_coord(1) += coords_y[i];
        mean_coord(2) += coords_z[i];
    }
    mean_coord /= P1 - P0 + 1;

    // Calculate scale and translation, such that
    // translation is applied before scaling.
    const double old_extent = (bbox_max - bbox_min).norm();
    double scale;
    if (old_extent < std::numeric_limits<double>::epsilon())
    {
        scale = 1;
    }
    else
    {
        scale = 10.0 / old_extent;
    }

    const Eigen::Vector3d translation = mean_coord;

    // Transform images.
    for (auto& elem : proj_centers)
    {
        elem.second -= translation;
        elem.second *= scale;
        const Eigen::Quaterniond quat(elem.first->Qvec(0), elem.first->Qvec(1),
                                      elem.first->Qvec(2), elem.first->Qvec(3));
        elem.first->SetTvec(quat * -elem.second);
    }

    // Transform points.
    for (auto& point3D : effTracks_)
    {
        point3D.second.XYZ() -= translation;
        point3D.second.XYZ() *= scale;
    }
}

bool SfMer::SetPrecalibration()
{
    if(options_.GNSS_Flag == true){
        int countPre = 0;
        Eigen::Vector3d mean(0,0,0);
        size_t n_images = (*imageInfos).size();
        int validNum = 0;
        for(auto& imgInfo : *imageInfos){            
            if(imgInfo.cx != 0.0 && imgInfo.cy != 0.0 && imgInfo.cz != 0.0 && imgInfo.R.size() == 9){
                mean(0) += 1.0 / n_images * imgInfo.cx;
                mean(1) += 1.0 / n_images * imgInfo.cy;
                mean(2) += 1.0 / n_images * imgInfo.cz;
                validNum++;
            }             
        }
        double s = (double)n_images / (double)validNum;
        mean *= s;

        for(auto& imgInfo : *imageInfos){
            // LOG(INFO) << imgInfo.ID << " " << indexID[imgInfo.ID] << " "
            //           << imgInfo.cx << " " << imgInfo.cy << " " << imgInfo.cz
            //           << " "
            //           << imgInfo.R.size()
            //           << std::endl;

            if(imgInfo.cx != 0.0 && imgInfo.cy != 0.0 && imgInfo.cz != 0.0 && imgInfo.R.size() == 9){
                Image& img = images_[indexID[imgInfo.ID]];
                img.SetRegistered(true);
                calibImages.push_back(img.ImageId());
                
                Eigen::Matrix3d Ri;
                Ri(0, 0) = imgInfo.R.at(0);
                Ri(0, 1) = imgInfo.R.at(1);
                Ri(0, 2) = imgInfo.R.at(2);
                Ri(1, 0) = imgInfo.R.at(3);
                Ri(1, 1) = imgInfo.R.at(4);
                Ri(1, 2) = imgInfo.R.at(5);
                Ri(2, 0) = imgInfo.R.at(6);
                Ri(2, 1) = imgInfo.R.at(7);
                Ri(2, 2) = imgInfo.R.at(8);
                img.SetQvec(RotationMatrixToQuaternion(Ri));
                //LOG(INFO) << Ri << std::endl;
                RotationMatrixToAngleAxis(&(Ri(0, 0)), &(img.angleAxis(0)));

                Eigen::Vector3d Ci(imgInfo.cx - mean(0), imgInfo.cy - mean(1), imgInfo.cz - mean(2));
                Eigen::Vector3d Ti = -1.0*Ri*Ci;
                img.SetTvec(Ti);
                
                countPre++;
                preCam[imgInfo.ID] = true;
            }
        }
        (*imageInfos).clear();
        LOG(INFO) << countPre << " images are pre-calibrated" << std::endl;
        return true;
    }

    ifstream preCalib;
    preCalib.open(PreExtrinsic);

    ifstream preCalibK;
    preCalibK.open(PreIntrinsic);

    if(preCalib.is_open())
    {
        ifstream indexCam;
        indexCam.open("indexPreCalibration.txt");
        std::map<int, int> indexPre;
        if(indexCam.is_open())
        {
            int indexI, indexJ;
            while(indexCam >> indexI >> indexJ)
            {
                indexPre[indexI] = indexJ;
            }
            LOG(INFO) << indexPre.size() << " images are index-calibrated" << std::endl;
            indexCam.close();
        }
        else
        {
            for(size_t i = 0; i < images_.size(); i++)
                indexPre[i] = i;
            LOG(INFO) << "Default: " << indexPre.size() << " images are index-calibrated" << std::endl;
        }

        string ss;
        preCalib >> ss >> ss >> ss >> ss;

        int nImages, nPoints;
        preCalib >> nImages >> nPoints;
        Eigen::Matrix3d D = Eigen::Matrix3d::Identity(); /// transform to the world-to-camera coordinate system
        D(1, 1) = -1;
        D(2, 2) = -1;
        int countPre = 0;
        for(int i = 0; i < nImages; i++)
        {
            Eigen::Matrix3d Ri;
            Eigen::Vector3d Ti;
            double focal, k1, k2;
            preCalib >> focal >> k1 >> k2;
            preCalib >> Ri(0, 0) >> Ri(0, 1) >> Ri(0, 2);
            preCalib >> Ri(1, 0) >> Ri(1, 1) >> Ri(1, 2);
            preCalib >> Ri(2, 0) >> Ri(2, 1) >> Ri(2, 2);
            preCalib >> Ti(0) >> Ti(1) >> Ti(2);

            Ri = D * Ri;
            Ti = D * Ti;

            if(indexPre.count(i) > 0 && Ti.norm() != 0)
            {
                Image& img = images_[indexPre[i]];
                img.SetRegistered(true);
                calibImages.push_back(img.ImageId());
                img.SetQvec(RotationMatrixToQuaternion(Ri));
                img.SetTvec(Ti);
                RotationMatrixToAngleAxis(&(Ri(0, 0)), &(img.angleAxis(0)));
                countPre++;
                preCam[indexPre[i]] = true;
            }
        }
        preCalib.close();

        if(preCalibK.is_open())
        {
            int imgid;
            double focal, k1, k2, px, py;
            while(preCalibK >> imgid >> focal >> px >> py >> k1 >> k2)
            {
                Image& img = images_[imgid - 1];
                Camera& cam = cameras_[img.CameraId()];
                std::vector<double> camInfo;
                camInfo.push_back(focal);
                camInfo.push_back(px);
                camInfo.push_back(py);
                camInfo.push_back(k1);
                camInfo.push_back(k2);
                cam.SetParams(camInfo);
            }
            preCalibK.close();
        }

        LOG(INFO) << countPre << " images are pre-calibrated" << std::endl;
        return true;
    }
    else
    {
        return false;
    }
} 

void SfMer::ExportReport()
{
    std::ofstream sta_result;
    sta_result.open(SfMDir + "/report.txt");
    auto t = std::chrono::system_clock::to_time_t(std::chrono::system_clock::now());
    sta_result << "Number of photos: " << images_.size() << std::endl;

    if(imgGPS.size() >= 3){
        sta_result << "Mean of GPS: " << std::setprecision(10) << imgGPSMean(0) << " " << imgGPSMean(1) << " " << imgGPSMean(2) << std::endl;
    }

    sta_result << "Processing Date: " << std::put_time(std::localtime(&t), "%Y-%m-%d %X") << std::endl;
    sta_result << "Processing time: " << (int)(timeCost / 3600) << "h" << (timeCost - (int)(timeCost / 3600) * 3600) / 60 << "mins" << std::endl;
        
    sta_result << "Dataset: " << calibImages.size() << " out of " << images_.size() << " photos calibrated (" 
                   << (double)(calibImages.size()) / images_.size() * 100 << "%)" << std::endl; 

    std::vector<int> keypoints_num;
    std::vector<int> tiepoints_num;
    std::vector<int> tracks_num;
    for(auto& eid : effTracks_){
        tracks_num.push_back(tracks_[eid.first].view2D.size());
    }
    for(auto& img : images_){
        keypoints_num.push_back(img.second.NumPoints2D());
        tiepoints_num.push_back(img.second.NumPoints3D());
    }
    std::sort(keypoints_num.begin(), keypoints_num.end());
    std::sort(tiepoints_num.begin(), tiepoints_num.end());
    std::sort(tracks_num.begin(), tracks_num.end());
    sta_result << "Keypoints: Median of " << keypoints_num[keypoints_num.size() / 2] << " keypoints per image" << std::endl;
    sta_result << "Tie points: " << effTracks_.size() << " points, with a median of " << tiepoints_num[tiepoints_num.size() / 2] << " keypoints per photo" << std::endl;
    sta_result << "Reprojection error(RMS): " << repro_RMS << " pixels" << std::endl;

    std::map<camera_t, std::vector<std::pair<double, double>>> camera_params;
    for(auto& img : images_){
        camera_t camid = img.second.CameraId();
        Camera& cam = cameras_[camid];
        camera_params[camid].push_back(std::make_pair((*imageInfos)[img.second.ImageId()].fx, cam.FocalLength()));
        camera_params[camid].push_back(std::make_pair(cam.Width() / 2.0, cam.PrincipalPointX()));
        camera_params[camid].push_back(std::make_pair(cam.Height() / 2.0, cam.PrincipalPointY()));
        camera_params[camid].push_back(std::make_pair((*imageInfos)[img.second.ImageId()].k1, cam.Params(3)));
        camera_params[camid].push_back(std::make_pair((*imageInfos)[img.second.ImageId()].k2, cam.Params(4)));
    }
    
    sta_result << "Camera calibration result:" << std::endl;
    for(auto& cam : camera_params){
        camera_t camid = cam.first;
        std::vector<std::pair<double, double>>& params = cam.second;
        Camera& camC = cameras_[camid];
        sta_result << "Camera: " << camid << std::endl;
        sta_result << "Image Dimensions: " << camC.Width() << "x" << camC.Height() << " pixels" << std::endl;
        sta_result << "FocalLength(previous, optimized, difference): " << params[0].first << " " << params[0].second << " " << params[0].second - params[0].first << std::endl;
        sta_result << "PrincipalPointX(previous, optimized, difference): " << params[1].first << " " << params[1].second << " " << params[1].second - params[1].first << std::endl;
        sta_result << "PrincipalPointY(previous, optimized, difference): " << params[2].first << " " << params[2].second << " " << params[2].second - params[2].first << std::endl;
        sta_result << "K1(previous, optimized, difference): " << params[3].first << " " << params[3].second << " " << params[3].second - params[3].first << std::endl;
        sta_result << "K2(previous, optimized, difference): " << params[4].first << " " << params[4].second << " " << params[4].second - params[4].first << std::endl;
    }

    if (imgGPS.size() > 0)
    {
        sta_result << "Difference of input and output camera positions:" << std::endl;
        std::vector<double> dis_gps;
        for (auto &img : imgGPS)
        {
            image_t imgid = img.first;
            Eigen::Vector3d imggps = img.second;
            Image &imgC = images_[imgid];

            double gps_error = (imgC.ProjectionCenter() - imggps).norm();
            dis_gps.push_back(gps_error);

            sta_result << imggps[0] << " " << imggps[1] << " " << imggps[2] << " "
                       << imgC.ProjectionCenter()[0] << " " << imgC.ProjectionCenter()[1] << " " << imgC.ProjectionCenter()[2]
                       << " " << gps_error << std::endl;
        }
        std::sort(dis_gps.begin(), dis_gps.end());
        sta_result << "minimum distance: " << dis_gps[0] << " meters, maximum distance: " << dis_gps[dis_gps.size() - 1]
                   << " meters, median distance: " << dis_gps[dis_gps.size() / 2]
                   << " meters" << std::endl;
    }

    sta_result << "Tie points result:" << std::endl;
    sta_result << "Number of points: " << effTracks_.size() << std::endl;
    sta_result << "Median number of points per photo: " << tiepoints_num[tiepoints_num.size() / 2] << std::endl;
    sta_result << "Median number of photos per point: " << tracks_num[tracks_num.size() / 2] << std::endl;
    sta_result << "Median Reprojection Error: " << repro_median << std::endl;
    sta_result << "RMS of Rprojection Error: " << repro_RMS << std::endl;
    sta_result << "Location(x, y, z),  # of photos, reprojection error" << std::endl;
    for(auto& eid : effTracks_){
        sta_result << eid.second.X() << " " << eid.second.Y() << " " << eid.second.Z() << " " << tracks_[eid.first].view2D.size() << " " << points_track_error[eid.first] << std::endl;
    }
    sta_result.close();
}

void SfMer::ExportResult(const int iteration)
{
    if(iteration == -1)
    {
        char RegName[128], RegOut[128];
        sprintf(RegName, "/Reg/SfM_Reg_%d.ply", (int)calibImages.size());
        sprintf(RegOut, "/Reg/SfM_Reg_%d.out", (int)calibImages.size());
        ExportPLYWithCameraPose(SfMDir + RegName);
        ExportCHN(SfMDir + RegOut, SfMDir + "/Reg/SfM_Reg.list", false);
        return;
    }

    if(iteration == -2)
    {
        ExportPLYWithCameraPose(SfMDir + "/Final/SfM_Final.ply");
        ExportCHN(SfMDir + "/Final/SfM_Final.out", SfMDir + "/Final/SfM_Final.list", false);
        return;
    }

    char plyName[128], outName[128], listName[128];
    sprintf(plyName, "/Reg/_%02d_%02d.ply", iteration, (int)calibImages.size());
    sprintf(outName, "/Reg/_%02d_%02d.out", iteration, (int)calibImages.size());
    sprintf(listName,  "/Reg/SfM_Add.list");
    ExportPLYWithCameraPose(SfMDir + plyName);
    ExportCHN(SfMDir + outName, SfMDir + listName, true);
}

void SfMer::ExportPLYWithCameraPose(const std::string& path)
{
    std::ofstream fileX(path.c_str(), std::ios::trunc);
    CHECK(fileX.is_open());

    fileX << "ply" << std::endl;
    fileX << "format ascii 1.0" << std::endl;
    size_t countID;
    if(global_iteration != -1)
    {
        countID = 0;
        for (const auto& point3D : effTracks_)
        {
            if(trackEffV[point3D.first] >= 3)
                countID++;
        }
    }
    else
    {
        countID = effTracks_.size();
    }

    fileX << "element vertex " << calibImages.size() + countID << std::endl;
    fileX << "property float x" << std::endl;
    fileX << "property float y" << std::endl;
    fileX << "property float z" << std::endl;
    fileX << "property uint8 red" << std::endl;
    fileX << "property uint8 green" << std::endl;
    fileX << "property uint8 blue" << std::endl;
    fileX << "end_header" << std::endl;

    size_t Length = 0;
    for (const auto& point3D : effTracks_)
    {
        Length += tracks_[point3D.first].view2D.size();
        if(global_iteration == -1 || trackEffV[point3D.first] >= 3)
        {
            fileX << point3D.second.X() << " ";
            fileX << point3D.second.Y() << " ";
            fileX << point3D.second.Z() << " ";

            Eigen::Vector3ub& color = tracks_color[point3D.first];
            fileX << static_cast<int>(color(0)) << " ";
            fileX << static_cast<int>(color(1)) << " ";
            fileX << static_cast<int>(color(2)) << std::endl;
        }
    }

    for (int iM = 0; iM < static_cast<int>(calibImages.size()); iM++)
    {
        Eigen::Matrix3d R = RVG::QuaternionToRotationMatrix(images_[calibImages[iM]].Qvec());
        Eigen::Vector3d T = images_[calibImages[iM]].Tvec();
        Eigen::Vector3d C = -R.transpose() * T;

        fileX << C(0) << " ";
        fileX << C(1) << " ";
        fileX << C(2) << " ";
        fileX << 0 << " " << 255 << " " << 0 << std::endl;
    }

    if(effTracks_.size() != 0)
        LOG(INFO) << "Average length of the selected tracks is: " << (double)Length / (double)effTracks_.size() << std::endl;
    else
    {
        LOG(INFO) << "Average length of the selected tracks is: " << 0 << std::endl;
        fileX.close();
        return;
    }
    fileX.close();
}

void SfMer::ExportPLY(const std::string& path)
{
    std::ofstream file(path.c_str(), std::ios::trunc);
    CHECK(file.is_open());

    file << "ply" << std::endl;
    file << "format ascii 1.0" << std::endl;
    file << "element vertex " << effTracks_.size() + calibImages.size() << std::endl;
    file << "property float x" << std::endl;
    file << "property float y" << std::endl;
    file << "property float z" << std::endl;
    file << "property uchar red" << std::endl;
    file << "property uchar green" << std::endl;
    file << "property uchar blue" << std::endl;
    file << "end_header" << std::endl;

    size_t Length = 0;
    for (const auto& point3D : effTracks_)
    {
        Length += tracks_[point3D.first].view2D.size();
        file << point3D.second.X() << " ";
        file << point3D.second.Y() << " ";
        file << point3D.second.Z() << " ";
        Eigen::Vector3ub& color = tracks_color[point3D.first];
        file << static_cast<int>(color(0)) << " ";
        file << static_cast<int>(color(1)) << " ";
        file << static_cast<int>(color(2)) << std::endl;
    }

    if(effTracks_.size() != 0)
        LOG(INFO) << "Average length of the selected tracks is: " << (double)Length / (double)effTracks_.size() << std::endl;
    else
    {
        LOG(INFO) << "Average length of the selected tracks is: " << 0 << std::endl;
        return;
    }

    for (int iM = 0; iM < static_cast<int>(calibImages.size()); iM++)
    {
        Eigen::Matrix3d R = RVG::QuaternionToRotationMatrix(images_[calibImages[iM]].Qvec());
        Eigen::Vector3d T = images_[calibImages[iM]].Tvec();
        Eigen::Vector3d C = -R.transpose() * T;
        file << C(0) << " ";
        file << C(1) << " ";
        file << C(2) << " ";
        file << static_cast<int>(0) << " ";
        file << static_cast<int>(255) << " ";
        file << static_cast<int>(0) << std::endl;
    }

    file << std::endl;
    file.close();
}

void SfMer::ExportCHN(const std::string& path, const std::string& list_path, bool pointFlag)
{
    std::ofstream file(path.c_str(), std::ios::trunc);
    CHECK(file.is_open());

    std::ofstream list_file(list_path.c_str(), std::ios::trunc);
    CHECK(list_file.is_open());

    file << "# Bundle file v0.3" << std::endl;
    file << images_.size() << " " << effTracks_.size() << std::endl;

    string cameraIntrinsic = SfMDir + "/cameras.txt";
    std::ofstream cfile(cameraIntrinsic.c_str(), std::ios::trunc);
    CHECK(cfile.is_open());

	if (global_iteration == -1)
        (*imageInfos).resize(images_.size());

    for (int image_id = 0; image_id < (int)images_.size(); image_id++)
    {
        const class Image& image = images_[image_id];
        const class Camera& camera = cameras_[image.CameraId()];

        cfile << oriID[image_id] << " " << camera.Params(0) << " " << camera.Params(1) << " " << camera.Params(2)
              << " " << camera.Params(3) << " " << camera.Params(4) << std::endl;

        if(!image.IsRegistered())
        {
            file << 0.0 << " " << 0.0 << " " << 0.0 << std::endl;
            file << 0.0 << " " << 0.0 << " " << 0.0 << std::endl;
            file << 0.0 << " " << 0.0 << " " << 0.0 << std::endl;
            file << 0.0 << " " << 0.0 << " " << 0.0 << std::endl;
            file << 0.0 << " " << 0.0 << " " << 0.0 << std::endl;
            list_file << image.Name() << " " << (int)(0) << std::endl;
            if (global_iteration == -1)
            {
                (*imageInfos)[image_id].ID = oriID[image.ImageId()];
                (*imageInfos)[image_id].valid = 0;
                (*imageInfos)[image_id].fx = camera.Params(0);
                (*imageInfos)[image_id].fy = camera.Params(0);
                (*imageInfos)[image_id].u = camera.Params(1);
                (*imageInfos)[image_id].v = camera.Params(2);
                (*imageInfos)[image_id].k1 = camera.Params(3);
                (*imageInfos)[image_id].k2 = camera.Params(4);
            }
            continue;
        }

        double f, k1, k2;
        if (camera.ModelId() == SimplePinholeCameraModel::model_id ||
                camera.ModelId() == PinholeCameraModel::model_id)
        {
            f = camera.MeanFocalLength();
            k1 = 0.0;
            k2 = 0.0;
        }
        else if (camera.ModelId() == SimpleRadialCameraModel::model_id)
        {
            f = camera.Params(SimpleRadialCameraModel::focal_length_idxs[0]);
            k1 = camera.Params(SimpleRadialCameraModel::extra_params_idxs[0]);
            k2 = 0.0;
        }
        else if (camera.ModelId() == RadialCameraModel::model_id)
        {
            f = camera.Params(RadialCameraModel::focal_length_idxs[0]);
            k1 = camera.Params(RadialCameraModel::extra_params_idxs[0]);
            k2 = camera.Params(RadialCameraModel::extra_params_idxs[1]);
        }

        file << f << " " << k1 << " " << k2 << std::endl;

        const Eigen::Matrix3d R = image.RotationMatrix();
        file << R(0, 0) << " " << R(0, 1) << " " << R(0, 2) << std::endl;
        file << -R(1, 0) << " " << -R(1, 1) << " " << -R(1, 2) << std::endl;
        file << -R(2, 0) << " " << -R(2, 1) << " " << -R(2, 2) << std::endl;

        file << image.Tvec(0) << " ";
        file << -image.Tvec(1) << " ";
        file << -image.Tvec(2) << std::endl;

        list_file << image.Name() << " " << (int)(1) << std::endl;

        if(global_iteration == -1)
        {
			(*imageInfos)[image_id].ID = oriID[image.ImageId()];
			(*imageInfos)[image_id].valid = 1;
			(*imageInfos)[image_id].fx = camera.Params(0);
			(*imageInfos)[image_id].fy = camera.Params(0);
			(*imageInfos)[image_id].u = camera.Params(1);
			(*imageInfos)[image_id].v = camera.Params(2);
			(*imageInfos)[image_id].k1 = camera.Params(3);
			(*imageInfos)[image_id].k2 = camera.Params(4);
			(*imageInfos)[image_id].cx = image.ProjectionCenter()(0);
			(*imageInfos)[image_id].cy = image.ProjectionCenter()(1);
			(*imageInfos)[image_id].cz = image.ProjectionCenter()(2);
			for (int i = 0; i < 3; i++)
				for (int j = 0; j < 3; j++)
					(*imageInfos)[image_id].R.push_back(R(i, j));
		}
    }
    cfile.close();

    if(pointFlag)
    {
        file.close();
        list_file.close();
        return;
    }

	sparsePoints->Xs.clear();
	sparsePoints->Xnormals.clear();
	sparsePoints->Colors.clear();
	sparsePoints->views.clear();
	std::vector<Eigen::Vector3f>& Xs = sparsePoints->Xs;
    std::vector<Eigen::Vector3f>& Xnormals = sparsePoints->Xnormals;
    std::vector<Eigen::Vector3f>& Colors = sparsePoints->Colors;
    std::vector<unsigned int>& views = sparsePoints->views;
    std::vector<unsigned int>& tracks = sparsePoints->tracks;
    for (const auto& point3D : effTracks_)
    {
        file << point3D.second.XYZ()(0) << " ";
        file << point3D.second.XYZ()(1) << " ";
        file << point3D.second.XYZ()(2) << std::endl;
        Xs.push_back(Eigen::Vector3f(point3D.second.XYZ()(0), point3D.second.XYZ()(1), point3D.second.XYZ()(2)));
        //Xnormals.push_back(Eigen::Vector3f(0.0, 0.0, 0.0));

        std::ostringstream line;
        track& curT = tracks_[point3D.first];

        int countView = 0;
        std::vector<unsigned int> visViews;
        std::vector<unsigned int> visTracks;
        Eigen::Vector3ub color = tracks_color[point3D.first];
        Colors.push_back(Eigen::Vector3f(color(0), color(1), color(2)));

        // Eigen::Vector3ub color(255,255,255);
        for (const auto& track_el : curT.view2D)
        {
            const class Image& image = images_[track_el.first];
            if(!image.registered_)
                continue;
            else
            {
                countView++;
                visViews.push_back(image.image_id_);
                visTracks.push_back(track_el.second);
            }
        }
        if(global_iteration == -1)
        {
            views.push_back(countView);
            tracks.push_back(countView);
            for(auto& v : visViews)
            {
                views.push_back(oriID[v]);
            }
            for(auto& t : visTracks)
            {
                tracks.push_back(t);
            }
        }

        file << static_cast<int>(color(0)) << " ";
        file << static_cast<int>(color(1)) << " ";
        file << static_cast<int>(color(2)) << std::endl;
        line << countView << " ";

        for (const auto& track_el : curT.view2D)
        {
            const Image& image = images_[track_el.first];
            const Camera& camera = cameras_[image.CameraId()];

            if(!image.registered_)
                continue;
            // Bundler output assumes image coordinate system origin
            // in the lower left corner of the image with the center of
            // the lower left pixel being (0, 0). Our coordinate system
            // starts in the upper left corner with the center of the
            // upper left pixel being (0.5, 0.5).

            const Point2D& point2D = image.Point2D(track_el.second);

            line << track_el.first << " ";
            line << track_el.second << " ";
            line << point2D.X() - camera.Params(1)<< " ";
            line << camera.Params(2) - point2D.Y() << " ";
        }

        std::string line_string = line.str();
        line_string = line_string.substr(0, line_string.size() - 1);

        file << line_string << std::endl;
    }
    file.close();
    list_file.close();
}

void SfMer::ExportRunningTimes()
{
    string timesFileName("Running-times.txt");
    timesFileName = SfMDir + "/" + timesFileName;

    ofstream times;
    times.open(timesFileName);
    
    timeCost = 0.0;
    for(auto& rt : runningTimes)
    {
        times << rt.first << " : " << rt.second << " [seconds]" << std::endl;
        timeCost += rt.second;
    }
    times << "Time-cost of SfM: " << timeCost << " [seconds]" << std::endl;
    times.close();
}

}// RVG namespace
#endif // SFMER_H_INCLUDED
