#include "Tracking.h"
#include <opencv2/core/core.hpp>
#include <chrono>

#include "g2o/g2o/core/block_solver.h"
#include "g2o/g2o/core/optimization_algorithm_levenberg.h"
#include "g2o/g2o/core/optimization_algorithm_gauss_newton.h"
#include "g2o/g2o/core/optimization_algorithm_dogleg.h"
#include "g2o/g2o/solvers/linear_solver_eigen.h"
#include "g2o/g2o/types/types_six_dof_expmap.h"
#include "g2o/g2o/core/robust_kernel_impl.h"
#include "g2o/g2o/solvers/linear_solver_dense.h"
#include "g2o/g2o/types/types_seven_dof_expmap.h"

#include "Converter.h"


#define 	TH_HIGH    500


using namespace std;
using namespace cv;


Tracking::~Tracking()
{

}


Tracking::Tracking(Map* pMap, MapDrawer* pMapDrawer):
	m_Map(pMap),
	m_MinFrames(0),
	m_MaxFrames(20),
	m_MapDrawer(pMapDrawer),
	m_Viewer(NULL)
{

}

static int test_count = 0;
int Tracking::SearchByBruteForce(KeyFrame *pKF, Frame &F, std::vector<MapPoint*> &vpMapPointMatches)
{
	int nmatches=0;
	const vector<MapPoint*> vpMapPointsKF = pKF->GetMapPointMatches();

    vpMapPointMatches = vector<MapPoint*>(F.m_keyPointsNumber, static_cast<MapPoint*>(NULL));
	
	Ptr<DescriptorMatcher> matcher = DescriptorMatcher::create("BruteForce");
	std::vector<DMatch> matches;

	matcher->match(pKF->m_Descriptors, F.m_DescriptorsLeft, matches);

	vector<Point2f>  KFPoint, FPoint;
	for(size_t i=0; i<matches.size(); i++)
	{
		cv::Mat des1 = pKF->m_Descriptors.row(matches[i].queryIdx);
		cv::Mat des2 = F.m_DescriptorsLeft.row(matches[i].trainIdx);

		double normValue = cv::norm(des1, des2);

		//printf("normValue:%f\n", normValue);
	
		KFPoint.push_back(pKF->m_vectorKeyPoints[matches[i].queryIdx].pt);
		FPoint.push_back(F.m_vectorKeyPointsLeft[matches[i].trainIdx].pt);
	}

	vector<char> inLiners;
	Mat homography = findHomography(KFPoint, FPoint,inLiners, RANSAC, 30.0);

#if 0// test 

	cv::Mat drawTest;
	drawTest = Mat::zeros(480, 752*2,CV_8UC1);

	std::vector<cv::Point2f> showPts;
	for(int i=0; i<FPoint.size(); i++)
	{
		cv::Point2f tempPts;
		tempPts.x = FPoint[i].x + 752;
		tempPts.y = FPoint[i].y;
		showPts.push_back(tempPts);
	}
#endif
	if(homography.empty())
	{
		return 0;
	}
	else
	{
		for(size_t i=0; i<KFPoint.size(); i++)
		{
			if(inLiners[i] !=0)
			{
#if 0//test

				cv::line(drawTest, KFPoint[i], showPts[i], cv::Scalar(255, 0, 255));
#endif

				MapPoint* pMP = vpMapPointsKF[matches[i].queryIdx];

                if(!pMP)
                    continue;

                if(pMP->isBadMapPoint())
                    continue;

				vpMapPointMatches[matches[i].trainIdx]=pMP;
				nmatches++;
			}
		}
	}

#if 0 //test
	char testImageName[30];
	memset(testImageName, 0, sizeof(testImageName) / sizeof(testImageName[0]));
	test_count++;
	sprintf(testImageName,"testImage/%d.jpg",test_count);
	imwrite(testImageName,drawTest);
#endif

	return nmatches;
}

int Tracking::PoseOptimization(Frame *pFrame)
{
    g2o::SparseOptimizer optimizer;
    g2o::BlockSolver_6_3::LinearSolverType * linearSolver;

    linearSolver = new g2o::LinearSolverDense<g2o::BlockSolver_6_3::PoseMatrixType>();

    g2o::BlockSolver_6_3 * solver_ptr = new g2o::BlockSolver_6_3(linearSolver);

    g2o::OptimizationAlgorithmLevenberg* solver = new g2o::OptimizationAlgorithmLevenberg(solver_ptr);
    optimizer.setAlgorithm(solver);

    int nInitialCorrespondences=0;

    // Set Frame vertex
    g2o::VertexSE3Expmap * vSE3 = new g2o::VertexSE3Expmap();
    vSE3->setEstimate(Converter::toSE3Quat(pFrame->m_RTcw));
    vSE3->setId(0);
    vSE3->setFixed(false);
    optimizer.addVertex(vSE3);

    // Set MapPoint vertices
    const int N = pFrame->m_keyPointsNumber;

    vector<g2o::EdgeSE3ProjectXYZOnlyPose*> vpEdgesMono;
    vector<size_t> vnIndexEdgeMono;
    vpEdgesMono.reserve(N);
    vnIndexEdgeMono.reserve(N);

    vector<g2o::EdgeStereoSE3ProjectXYZOnlyPose*> vpEdgesStereo;
    vector<size_t> vnIndexEdgeStereo;
    vpEdgesStereo.reserve(N);
    vnIndexEdgeStereo.reserve(N);

    const float deltaMono = sqrt(5.991);
    const float deltaStereo = sqrt(7.815);


    {
    unique_lock<mutex> lock(MapPoint::g_GlobalMutex);

    for(int i=0; i<N; i++)
    {
        MapPoint* pMP = pFrame->m_vectorMapPoints[i];
        if(pMP)
        {
            // Monocular observation
            if(pFrame->m_vectorURight[i]<0)
            {
                nInitialCorrespondences++;
                pFrame->m_vectorOutlier[i] = false;

                Eigen::Matrix<double,2,1> obs;
                const cv::KeyPoint &kpUn = pFrame->m_vectorKeyPointsLeftUn[i];
                obs << kpUn.pt.x, kpUn.pt.y;

                g2o::EdgeSE3ProjectXYZOnlyPose* e = new g2o::EdgeSE3ProjectXYZOnlyPose();

                e->setVertex(0, dynamic_cast<g2o::OptimizableGraph::Vertex*>(optimizer.vertex(0)));
                e->setMeasurement(obs);
                const float invSigma2 = 0.69f;
                e->setInformation(Eigen::Matrix2d::Identity()*invSigma2);

                g2o::RobustKernelHuber* rk = new g2o::RobustKernelHuber;
                e->setRobustKernel(rk);
                rk->setDelta(deltaMono);

                e->fx = pFrame->m_fx;
                e->fy = pFrame->m_fy;
                e->cx = pFrame->m_cx;
                e->cy = pFrame->m_cy;
                cv::Mat Xw = pMP->GetWorldPos();
                e->Xw[0] = Xw.at<float>(0);
                e->Xw[1] = Xw.at<float>(1);
                e->Xw[2] = Xw.at<float>(2);

				//printf("Monocular x:%f, y:%f, z:%f\n", e->Xw[0], e->Xw[1], e->Xw[2]);

                optimizer.addEdge(e);

                vpEdgesMono.push_back(e);
                vnIndexEdgeMono.push_back(i);
            }
            else  // Stereo observation
            {
                nInitialCorrespondences++;
                pFrame->m_vectorOutlier[i] = false;

                //SET EDGE
                Eigen::Matrix<double,3,1> obs;
                const cv::KeyPoint &kpUn = pFrame->m_vectorKeyPointsLeftUn[i];
                const float &kp_ur = pFrame->m_vectorURight[i];
                obs << kpUn.pt.x, kpUn.pt.y, kp_ur;

                g2o::EdgeStereoSE3ProjectXYZOnlyPose* e = new g2o::EdgeStereoSE3ProjectXYZOnlyPose();

                e->setVertex(0, dynamic_cast<g2o::OptimizableGraph::Vertex*>(optimizer.vertex(0)));
                e->setMeasurement(obs);
                const float invSigma2 = 0.69f;
                Eigen::Matrix3d Info = Eigen::Matrix3d::Identity()*invSigma2;
                e->setInformation(Info);

                g2o::RobustKernelHuber* rk = new g2o::RobustKernelHuber;
                e->setRobustKernel(rk);
                rk->setDelta(deltaStereo);

                e->fx = pFrame->m_fx;
                e->fy = pFrame->m_fy;
                e->cx = pFrame->m_cx;
                e->cy = pFrame->m_cy;
                e->bf = pFrame->m_baselineFx;
                cv::Mat Xw = pMP->GetWorldPos();
                e->Xw[0] = Xw.at<float>(0);
                e->Xw[1] = Xw.at<float>(1);
                e->Xw[2] = Xw.at<float>(2);

				//printf("Stereo x:%f, y:%f, z:%f\n", e->Xw[0], e->Xw[1], e->Xw[2]);

                optimizer.addEdge(e);

                vpEdgesStereo.push_back(e);
                vnIndexEdgeStereo.push_back(i);
            }
        }

    }
    }


    if(nInitialCorrespondences<3)
        return 0;

    // We perform 4 optimizations, after each optimization we classify observation as inlier/outlier
    // At the next optimization, outliers are not included, but at the end they can be classified as inliers again.
    const float chi2Mono[4]={5.991,5.991,5.991,5.991};
    const float chi2Stereo[4]={7.815,7.815,7.815, 7.815};
    const int its[4]={10,10,10,10};    

    int nBad=0;
    for(size_t it=0; it<4; it++)
    {

        vSE3->setEstimate(Converter::toSE3Quat(pFrame->m_RTcw));
        optimizer.initializeOptimization(0);
        int optRet = optimizer.optimize(its[it]);

		//cout << "optimizer return :" << optRet << endl;

        nBad=0;
        for(size_t i=0, iend=vpEdgesMono.size(); i<iend; i++)
        {
            g2o::EdgeSE3ProjectXYZOnlyPose* e = vpEdgesMono[i];

            const size_t idx = vnIndexEdgeMono[i];

            if(pFrame->m_vectorOutlier[idx])
            {
                e->computeError();
            }

            const float chi2 = e->chi2();

            if(chi2>chi2Mono[it])
            {                
                pFrame->m_vectorOutlier[idx]=true;
                e->setLevel(1);
                nBad++;
            }
            else
            {
                pFrame->m_vectorOutlier[idx]=false;
                e->setLevel(0);
            }

            if(it==2)
                e->setRobustKernel(0);
        }

        for(size_t i=0, iend=vpEdgesStereo.size(); i<iend; i++)
        {
            g2o::EdgeStereoSE3ProjectXYZOnlyPose* e = vpEdgesStereo[i];

            const size_t idx = vnIndexEdgeStereo[i];

            if(pFrame->m_vectorOutlier[idx])
            {
                e->computeError();
            }

            const float chi2 = e->chi2();

            if(chi2>chi2Stereo[it])
            {
                pFrame->m_vectorOutlier[idx]=true;
                e->setLevel(1);
                nBad++;
            }
            else
            {                
                e->setLevel(0);
                pFrame->m_vectorOutlier[idx]=false;
            }

            if(it==2)
                e->setRobustKernel(0);
        }

        if(optimizer.edges().size()<10)
            break;
    }    

    // Recover optimized pose and return number of inliers
    g2o::VertexSE3Expmap* vSE3_recov = static_cast<g2o::VertexSE3Expmap*>(optimizer.vertex(0));
    g2o::SE3Quat SE3quat_recov = vSE3_recov->estimate();
    cv::Mat pose = Converter::toCvMat(SE3quat_recov);
    pFrame->SetPose(pose);

    return nInitialCorrespondences-nBad;
}

void Tracking::StereoInitialization()
{
	// Set Frame pose to the origin
	m_CurrentFrame.SetPose(cv::Mat::eye(4,4,CV_32F));

	// Create KeyFrame
	KeyFrame* pKFini = new KeyFrame(m_CurrentFrame, m_Map);
	
	// Create MapPoints and asscoiate to KeyFrame
	
	for(int i=0; i<m_CurrentFrame.m_keyPointsNumber; i++)
	{
		float z = m_CurrentFrame.m_vectorDepth[i];
		if(z > 0)
		{
			cv::Mat x3D = m_CurrentFrame.UnprojectStereo(i);
			MapPoint* pNewMP = new MapPoint(x3D,pKFini, m_Map);
			pNewMP->AddObservation(pKFini,i);
			pKFini->AddMapPoint(pNewMP,i);
			pNewMP->ComputeDistinctiveDescriptors();
			pNewMP->UpdateNormalAndDepth();
			m_Map->AddMapPoint(pNewMP);

			m_CurrentFrame.m_vectorMapPoints[i] = pNewMP;
		}
	}
	
	cout << "New map created with " << m_Map->MapPointsSize() << " points" << endl;

	m_LocalMapper->InsertKeyFrame(pKFini);

	m_LastFrame = Frame(m_CurrentFrame);
	m_LastKeyFrameId = m_CurrentFrame.m_frameId;

	m_LastKeyFrame = pKFini;

	m_vectorLocalKeyFrames.push_back(pKFini);
	m_vectorLocalMapPoints = m_Map->GetAllMapPoints();
	m_ReferenceKF = pKFini;
	m_CurrentFrame.m_ReferenceKF = pKFini;

	m_Map->SetReferenceMapPoints(m_vectorLocalMapPoints);
	m_Map->m_vectorKeyFrameOrigins.push_back(pKFini);

	m_MapDrawer->SetCurrentCameraPose(m_CurrentFrame.m_RTcw);

	m_init = true;
}


void Tracking::UpdateLastFrame()
{
	printf("test1--------------\n");
    // Update pose according to reference keyframe
    KeyFrame* pRef = m_LastFrame.m_ReferenceKF;
    //cv::Mat Tlr = mlRelativeFramePoses.back();

	printf("test1.1--------------\n");

    m_LastFrame.SetPose(pRef->GetPose());

	printf("test1.2--------------\n");

    
    // Create "visual odometry" MapPoints
    // We sort points according to their measured depth by the stereo/RGB-D sensor
    vector<pair<float,int> > vDepthIdx;
    vDepthIdx.reserve(m_LastFrame.m_keyPointsNumber);
    for(int i=0; i<m_LastFrame.m_keyPointsNumber;i++)
    {
        float z = m_LastFrame.m_vectorDepth[i];
        if(z>0)
        {
            vDepthIdx.push_back(make_pair(z,i));
        }
    }

    if(vDepthIdx.empty())
        return;

    sort(vDepthIdx.begin(),vDepthIdx.end());

    // We insert all close points (depth<mThDepth)
    // If less than 100 close points, we insert the 100 closest ones.
    int nPoints = 0;
    for(size_t j=0; j<vDepthIdx.size();j++)
    {
        int i = vDepthIdx[j].second;

        bool bCreateNew = false;

        MapPoint* pMP = m_LastFrame.m_vectorMapPoints[i];
        if(!pMP)
            bCreateNew = true;
        else if(pMP->Observations()<1)
        {
            bCreateNew = true;
        }

        if(bCreateNew)
        {
            cv::Mat x3D = m_LastFrame.UnprojectStereo(i);
            MapPoint* pNewMP = new MapPoint(x3D,m_Map,&m_LastFrame,i);

            m_LastFrame.m_vectorMapPoints[i]=pNewMP;

            //mlpTemporalPoints.push_back(pNewMP);
            nPoints++;
        }
        else
        {
            nPoints++;
        }

        if(vDepthIdx[j].first>m_ThDepth && nPoints>100)
            break;
    }


	printf("test2--------------\n");
}


bool Tracking::TrackReferenceKeyFrame()
{
	UpdateLastFrame();
	//printf("mpReferenceKF id:%ld \n", mpReferenceKF->m_frameId);
	
	// We perform first an sift matching with the reference keyframe
	// If enough matches are found we setup a PnP solver
	vector<MapPoint*> vpMapPointMatches;
	m_CurrentFrame.SetPose(m_LastFrame.m_RTcw);
	//int nmatches = SearchByBruteForce(m_ReferenceKF, m_CurrentFrame, vpMapPointMatches);
	int nmatches = SearchByProjection(m_CurrentFrame,m_LastFrame,7,false);
	//cout <<"TrackReferenceKeyFrame nmatches:" << nmatches << endl;
	if(nmatches < 15)
	{
		cout << "frame id:" << m_CurrentFrame.m_frameId << endl;
		cout << "mpReferenceKF kp size:" << m_ReferenceKF->m_keyPointsNumber << ", mCurrentFrame kp size:" << m_CurrentFrame.m_keyPointsNumber << endl;
		cout <<" SearchByBruteForce nmatches:" << nmatches << " < 15"  << endl;
		return false;
	}
	
	m_CurrentFrame.m_vectorMapPoints = vpMapPointMatches;
	//m_CurrentFrame.SetPose(m_LastFrame.m_RTcw);

	std::chrono::steady_clock::time_point t1 = std::chrono::steady_clock::now();
	PoseOptimization(&m_CurrentFrame);

	std::chrono::steady_clock::time_point t2 = std::chrono::steady_clock::now();
	double ttrack = std::chrono::duration_cast<std::chrono::duration<double>>(t2 - t1).count();
	cout << "PoseOptimization--------ttrack:" << ttrack * 1e3 << " ms" << endl;
	
	//Discard outliers
	int nmatchesMap = 0;
	for(int i =0; i<m_CurrentFrame.m_keyPointsNumber; i++)
	{
		if(m_CurrentFrame.m_vectorMapPoints[i])
		{
			if(m_CurrentFrame.m_vectorOutlier[i])
			{
				MapPoint* pMP = m_CurrentFrame.m_vectorMapPoints[i];
				m_CurrentFrame.m_vectorMapPoints[i] = static_cast<MapPoint*>(NULL);
				m_CurrentFrame.m_vectorOutlier[i] = false;
			}
			else if(m_CurrentFrame.m_vectorMapPoints[i]->Observations()>0)
			{
				nmatchesMap++;
			}
		}
	}

	//cout << "TrackReferenceKeyFrame nmatchesMap : " << nmatchesMap << endl;
	if(nmatchesMap < 5)
	{
		cout << "frame id:" << m_CurrentFrame.m_frameId << ",nmatches:" << nmatches <<  endl;
		cout << "mpReferenceKF kp size:" << m_ReferenceKF->m_keyPointsNumber << ", mCurrentFrame kp size:" << m_CurrentFrame.m_keyPointsNumber << endl;
		cout << "nmatchesMap : " << nmatchesMap << " < 5" << endl;
		return false;
	}

	return true;
}

bool Tracking::TrackLocalMap()
{
	//update local Map
	// This is for visualization
    m_Map->SetReferenceMapPoints(m_vectorLocalMapPoints);

	//update mpReferenceKF
	UpdateLocalKeyFrames();
	UpdateLocalPoints();

	SearchLocalPoints();

	// Optimize Pose
    PoseOptimization(&m_CurrentFrame);

	m_MatchesInliers = 0;

	// Update MapPoints Statistics
	for(int i=0; i<m_CurrentFrame.m_keyPointsNumber; i++)
	{
		if(m_CurrentFrame.m_vectorMapPoints[i])
		{
			if(!m_CurrentFrame.m_vectorOutlier[i])
			{
				m_CurrentFrame.m_vectorMapPoints[i]->IncreaseFound();
				m_MatchesInliers++;
			}
			else
			{
				m_CurrentFrame.m_vectorMapPoints[i] = static_cast<MapPoint*>(NULL);
			}
		}
	}

	//cout << "TrackLocalMap mnMatchesInliers : " << mnMatchesInliers << endl;
	if(m_MatchesInliers < 3)
	{
		cout << "mnMatchesInliers : " << m_MatchesInliers << " < 3" << endl;
		return false;
	}

	return true;
}


static int DescriptorDistance(const cv::Mat &a, const cv::Mat &b)
{
    const int *pa = a.ptr<int32_t>();
    const int *pb = b.ptr<int32_t>();

    int dist=0;

    for(int i=0; i<8; i++, pa++, pb++)
    {
        unsigned  int v = *pa ^ *pb;
        v = v - ((v >> 1) & 0x55555555);
        v = (v & 0x33333333) + ((v >> 2) & 0x33333333);
        dist += (((v + (v >> 4)) & 0xF0F0F0F) * 0x1010101) >> 24;
    }

    return dist;
}


int Tracking::SearchByProjection(Frame &CurrentFrame, const Frame &LastFrame, const float th, const bool bMono)
{
    int nmatches = 0;

    // Rotation Histogram (to check rotation consistency)
    vector<int> rotHist[30];
    for(int i=0;i<30;i++)
        rotHist[i].reserve(500);
    const float factor = 1.0f/30;


    const cv::Mat Rcw = CurrentFrame.m_RTcw.rowRange(0,3).colRange(0,3);

    const cv::Mat tcw = CurrentFrame.m_RTcw.rowRange(0,3).col(3);

    const cv::Mat twc = -Rcw.t()*tcw;

    const cv::Mat Rlw = LastFrame.m_RTcw.rowRange(0,3).colRange(0,3);
    const cv::Mat tlw = LastFrame.m_RTcw.rowRange(0,3).col(3);

    const cv::Mat tlc = Rlw*twc+tlw;

    const bool bForward = tlc.at<float>(2)>CurrentFrame.m_baseline && !bMono;
    const bool bBackward = -tlc.at<float>(2)>CurrentFrame.m_baseline && !bMono;


    for(int i=0; i<LastFrame.m_keyPointsNumber; i++)
    {
        MapPoint* pMP = LastFrame.m_vectorMapPoints[i];

        if(pMP)
        {
            if(!LastFrame.m_vectorOutlier[i])
            {
                // Project
                cv::Mat x3Dw = pMP->GetWorldPos();
                cv::Mat x3Dc = Rcw*x3Dw+tcw;

                const float xc = x3Dc.at<float>(0);
                const float yc = x3Dc.at<float>(1);
                const float invzc = 1.0/x3Dc.at<float>(2);

                if(invzc<0)
                    continue;

                float u = CurrentFrame.m_fx*xc*invzc+CurrentFrame.m_cx;
                float v = CurrentFrame.m_fy*yc*invzc+CurrentFrame.m_cy;

                if(u<CurrentFrame.m_MinX || u>CurrentFrame.m_MaxX)
                    continue;
                if(v<CurrentFrame.m_MinY || v>CurrentFrame.m_MaxY)
                    continue;

                int nLastOctave = LastFrame.m_vectorKeyPointsLeft[i].octave;

                // Search in a window. Size depends on scale
                float radius = th* 5;

                vector<size_t> vIndices2;

                if(bForward)
                    vIndices2 = CurrentFrame.GetFeaturesInArea(u,v, radius);
                else if(bBackward)
                    vIndices2 = CurrentFrame.GetFeaturesInArea(u,v, radius);
                else
                    vIndices2 = CurrentFrame.GetFeaturesInArea(u,v, radius);

                if(vIndices2.empty())
                    continue;

                const cv::Mat dMP = pMP->GetDescriptor();

                int bestDist = 256;
                int bestIdx2 = -1;

                for(vector<size_t>::const_iterator vit=vIndices2.begin(), vend=vIndices2.end(); vit!=vend; vit++)
                {
                    const size_t i2 = *vit;
                    if(CurrentFrame.m_vectorMapPoints[i2])
                        if(CurrentFrame.m_vectorMapPoints[i2]->Observations()>0)
                            continue;

                    if(CurrentFrame.m_vectorURight[i2]>0)
                    {
                        const float ur = u - CurrentFrame.m_baselineFx*invzc;
                        const float er = fabs(ur - CurrentFrame.m_vectorURight[i2]);
                        if(er>radius)
                            continue;
                    }

                    const cv::Mat &d = CurrentFrame.m_DescriptorsLeft.row(i2);

                    const int dist = DescriptorDistance(dMP,d);

                    if(dist<bestDist)
                    {
                        bestDist=dist;
                        bestIdx2=i2;
                    }
                }

                if(bestDist<=100)
                {
                    CurrentFrame.m_vectorMapPoints[bestIdx2]=pMP;
                    nmatches++;
#if 0
                    if(mbCheckOrientation)
                    {
                        float rot = LastFrame.mvKeysUn[i].angle-CurrentFrame.mvKeysUn[bestIdx2].angle;
                        if(rot<0.0)
                            rot+=360.0f;
                        int bin = round(rot*factor);
                        if(bin==HISTO_LENGTH)
                            bin=0;
                        assert(bin>=0 && bin<HISTO_LENGTH);
                        rotHist[bin].push_back(bestIdx2);
                    }
#endif
                }
            }
        }
    }
#if 0
    //Apply rotation consistency
    if(mbCheckOrientation)
    {
        int ind1=-1;
        int ind2=-1;
        int ind3=-1;

        ComputeThreeMaxima(rotHist,HISTO_LENGTH,ind1,ind2,ind3);

        for(int i=0; i<HISTO_LENGTH; i++)
        {
            if(i!=ind1 && i!=ind2 && i!=ind3)
            {
                for(size_t j=0, jend=rotHist[i].size(); j<jend; j++)
                {
                    CurrentFrame.mvpMapPoints[rotHist[i][j]]=static_cast<MapPoint*>(NULL);
                    nmatches--;
                }
            }
        }
    }
#endif


    return nmatches;
}


int Tracking::SearchByProjection(Frame &F, const std::vector<MapPoint*> &vpMapPoints, const float th)
{
	int nmatches=0;

	for(size_t iMP=0; iMP<vpMapPoints.size(); iMP++)
	{
		MapPoint* pMP = vpMapPoints[iMP];
		if(!pMP->m_TrackInView)
			continue;

		if(pMP->isBadMapPoint())
			continue;

		// The size of the window will depend on the viewing direction
		float r = RadiusByViewingCos(pMP->m_TrackViewCos);

		r = r * th;

		const vector<size_t> vIndices =
				F.GetFeaturesInArea(pMP->m_TrackProjX, pMP->m_TrackProjY, r);

		//printf("vIndices size:%d\n", vIndices.size());
	
		if(vIndices.empty())
		{
			continue;
		}
	
		const cv::Mat MPdescriptor = pMP->GetDescriptor();
	
		int bestDist = INT32_MAX;
		int bestDist2 = INT32_MAX;
		int bestIdx = -1 ;
	
		// Get best and second matches with near keypoints
		for(vector<size_t>::const_iterator vit=vIndices.begin(), vend=vIndices.end(); vit!=vend; vit++)
		{
			const size_t idx = *vit;
	
			if(F.m_vectorMapPoints[idx])
			{
				if(F.m_vectorMapPoints[idx]->Observations() > 0)
				{
					continue;
				}
			}
	
			if(F.m_vectorURight[idx] > 0)
			{
				const float er = fabs(pMP->m_TrackProjXR - F.m_vectorURight[idx]);
				if(er > r)
				{
					continue;
				}
			}
	
			const cv::Mat &d = F.m_DescriptorsLeft.row(idx);
	
			const int dist = cv::norm(MPdescriptor,d);
	
			if(dist < bestDist)
			{
				bestDist2 = bestDist;
				bestDist = dist;
				bestIdx = idx;
			}
			else if(dist < bestDist2)
			{
				bestDist2 = dist;
			}
		}
	
		// Apply ratio to second match (only if best and second are in the same scale level)
		//printf("----bestDist:%d\n", bestDist);
		if(bestDist<=TH_HIGH)
		{
			F.m_vectorMapPoints[bestIdx] = pMP;
			nmatches++;
		}
	}

	//printf("SearchByProjection-----nmatches:%d\n", nmatches);
	return nmatches;
}

void Tracking::CreateNewKeyFrame()
{
	if(!m_LocalMapper->SetNotStop(true))
	{
		return;
	}

	KeyFrame* pKF = new KeyFrame(m_CurrentFrame, m_Map);
	m_ReferenceKF = pKF;
	m_CurrentFrame.m_ReferenceKF = pKF;

	m_CurrentFrame.UpdatePoseMatrices();

	//We sort points by the measured depth by the stereo/RGBD sensor.
	// We create all those MapPoints whose depth < mThDepth.
	// If there are less than 100 close points we create the 100 closest.
	vector<pair<float,int> > vDepthIdx;
	vDepthIdx.reserve(m_CurrentFrame.m_keyPointsNumber);

	for(int i=0; i<m_CurrentFrame.m_keyPointsNumber; i++)
	{
		float z = m_CurrentFrame.m_vectorDepth[i];
		if(z>0)
		{
			vDepthIdx.push_back(make_pair(z,i));
		}
	}

	//printf("++++++ vDepthIdx: %d\n", vDepthIdx.size());
	if(!vDepthIdx.empty())
	{
		sort(vDepthIdx.begin(),vDepthIdx.end());
		int nPoints = 0;

		for(size_t j=0; j<vDepthIdx.size();j++)
		{
			int i = vDepthIdx[j].second;
			bool bCreateNew = false;

			MapPoint* pMP = m_CurrentFrame.m_vectorMapPoints[i];
			if(!pMP)
			{
				bCreateNew = true;
			}
			else if(pMP->Observations()<1)
			{
				bCreateNew = true;
				m_CurrentFrame.m_vectorMapPoints[i] = static_cast<MapPoint*>(NULL);
			}

			if(bCreateNew)
			{
				cv::Mat x3D = m_CurrentFrame.UnprojectStereo(i);
				MapPoint* pNewMP = new MapPoint(x3D, pKF, m_Map);
				pNewMP->AddObservation(pKF,i);
				pKF->AddMapPoint(pNewMP,i);
				pNewMP->ComputeDistinctiveDescriptors();
				pNewMP->UpdateNormalAndDepth();
				m_Map->AddMapPoint(pNewMP);

				m_CurrentFrame.m_vectorMapPoints[i] = pNewMP;
				nPoints++;
			}
			else
			{
				nPoints++;
			}

			if(vDepthIdx[j].first > m_ThDepth && nPoints > 100)
			{
				//printf("-------------------------------vDepthIdx[j].first:%f\n", vDepthIdx[j].first);
				break;
			}
		}

		//printf("=============nPoints:%d\n", nPoints);
	}


	m_LocalMapper->InsertKeyFrame(pKF);

    m_LocalMapper->SetNotStop(false);

	m_LastKeyFrameId = m_CurrentFrame.m_frameId;
	m_LastKeyFrame = pKF;
	//printf("last key frame id:%d\n", m_LastKeyFrameId);
}


void Tracking::UpdateLocalKeyFrames()
{
	// Each map point vote for the keyframes in which it has been observed
	map<KeyFrame*,int> keyframeCounter;
	for(int i=0; i<m_CurrentFrame.m_keyPointsNumber; i++)
	{
		if(m_CurrentFrame.m_vectorMapPoints[i])
		{
			MapPoint* pMP = m_CurrentFrame.m_vectorMapPoints[i];
			if(!pMP->isBadMapPoint())
			{
				const map<KeyFrame*,size_t> observations = pMP->GetObservations();
				for(map<KeyFrame*,size_t>::const_iterator it=observations.begin(), end=observations.end(); it!=end; it++)
				{
					keyframeCounter[it->first]++;
				}
			}
			else
			{
				m_CurrentFrame.m_vectorMapPoints[i]=NULL;
			}
		}
	}


	//printf("----keyframeCounter size:%d\n", keyframeCounter.size());

	if(keyframeCounter.empty())
	{
		return;
	}

	int max=0;
	KeyFrame* pKFmax= static_cast<KeyFrame*>(NULL);
	m_vectorLocalKeyFrames.clear();
	m_vectorLocalKeyFrames.reserve(3 * keyframeCounter.size());

	// All keyframes that observe a map point are included in the local map. Also check which keyframe shares most points
	for(map<KeyFrame*,int>::const_iterator it=keyframeCounter.begin(), End=keyframeCounter.end(); it!=End; it++)
	{
		KeyFrame* pKF = it->first;
		 if(pKF->isBadKeyFrame())
		 {
			continue;
		 }

		 if(it->second > max)
		 {
			max = it->second;
			pKFmax = pKF;
		 }

		 m_vectorLocalKeyFrames.push_back(it->first);
		 pKF->m_TrackReferenceForFrame = m_CurrentFrame.m_frameId;
	}

	// Include also some not-already-included keyframes that are neighbors to already-included keyframes
	for(vector<KeyFrame*>::const_iterator it=m_vectorLocalKeyFrames.begin(), End=m_vectorLocalKeyFrames.end(); it!=End; it++)
	{
		// Limit the number of keyframes
		if(m_vectorLocalKeyFrames.size()>80)
		{
			break;
		}

		KeyFrame* pKF = *it;
		const vector<KeyFrame*> vNeighs = pKF->GetBestCovisibilityKeyFrames(10);
		for(vector<KeyFrame*>::const_iterator itNeighKF=vNeighs.begin(), itEndNeighKF=vNeighs.end(); itNeighKF!=itEndNeighKF; itNeighKF++)
		{
			KeyFrame* pNeighKF = *itNeighKF;
			if(!pNeighKF->isBadKeyFrame())
			{
				if(pNeighKF->m_TrackReferenceForFrame != m_CurrentFrame.m_frameId)
				{
					m_vectorLocalKeyFrames.push_back(pNeighKF);
					pNeighKF->m_TrackReferenceForFrame = m_CurrentFrame.m_frameId;
					break;
				}
			}
		}

		const set<KeyFrame*> spChilds = pKF->GetChilds();
		for(set<KeyFrame*>::const_iterator sit=spChilds.begin(), send=spChilds.end(); sit!=send; sit++)
		{
			KeyFrame* pChildKF = *sit;
			if(!pChildKF->isBadKeyFrame())
			{
				if(pChildKF->m_TrackReferenceForFrame != m_CurrentFrame.m_frameId)
				{
					 m_vectorLocalKeyFrames.push_back(pChildKF);
					 pChildKF->m_TrackReferenceForFrame = m_CurrentFrame.m_frameId;
					 break;
				}
			}
			
		}
		
		KeyFrame* pParent = pKF->GetParent();
		if(pParent)
		{
			if(pParent->m_TrackReferenceForFrame != m_CurrentFrame.m_frameId)
			{
				m_vectorLocalKeyFrames.push_back(pParent);
				pParent->m_TrackReferenceForFrame = m_CurrentFrame.m_frameId;
				break;
			}
		}
	}

	 if(pKFmax)
	 {
		m_ReferenceKF = pKFmax;
		m_CurrentFrame.m_ReferenceKF = m_ReferenceKF;
	 }
}


void Tracking::UpdateLocalPoints()
{
	m_vectorLocalMapPoints.clear();
	for(vector<KeyFrame*>::const_iterator itKF=m_vectorLocalKeyFrames.begin(), itEndKF=m_vectorLocalKeyFrames.end(); itKF!=itEndKF; itKF++)
	{
		KeyFrame* pKF = *itKF;
		const vector<MapPoint*> vpMPs = pKF->GetMapPointMatches();
		for(vector<MapPoint*>::const_iterator itMP=vpMPs.begin(), itEndMP=vpMPs.end(); itMP!=itEndMP; itMP++)
		{
			MapPoint* pMP = *itMP;
			if(!pMP)
			{
				continue;
			}

			if(pMP->m_TrackReferenceForFrameId == m_CurrentFrame.m_frameId)
			{
				continue;
			}

			if(!pMP->isBadMapPoint())
			{
				m_vectorLocalMapPoints.push_back(pMP);
				pMP->m_TrackReferenceForFrameId = m_CurrentFrame.m_frameId;
			}
		}
	}

}

void Tracking::SearchLocalPoints()
{
	// Do not search map points already matched
	for(vector<MapPoint*>::iterator vit=m_CurrentFrame.m_vectorMapPoints.begin(), vend=m_CurrentFrame.m_vectorMapPoints.end(); vit!=vend; vit++)
	{
		MapPoint* pMP = *vit;
		if(pMP)
		{
			 if(pMP->isBadMapPoint())
			 {
				*vit = static_cast<MapPoint*>(NULL);
			 }
			 else
			 {
				pMP->IncreaseVisible();
				pMP->m_LastFrameSeenId = m_CurrentFrame.m_frameId;
				pMP->m_TrackInView = false;
			 }
		}
	}

	int nToMatch=0;
	// Project points in frame and check its visibility
	for(vector<MapPoint*>::iterator vit=m_vectorLocalMapPoints.begin(), vend=m_vectorLocalMapPoints.end(); vit!=vend; vit++)
	{
		MapPoint* pMP = *vit;
		if(pMP->m_LastFrameSeenId == m_CurrentFrame.m_frameId)
		{
			continue;
		}

		if(pMP->isBadMapPoint())
		{
			continue;
		}

		// Project (this fills MapPoint variables for matching)
		if(m_CurrentFrame.isInFrustum(pMP,0.5))
		{
			pMP->IncreaseVisible();
			nToMatch++;
		}
	}

	//cout << "SearchLocalPoints +++++nToMatch:" << nToMatch << ",mvpLocalMapPoints size:" << mvpLocalMapPoints.size() << endl;
	if(nToMatch>0)
	{
		SearchByProjection(m_CurrentFrame, m_vectorLocalMapPoints, 10);
	}
}


float Tracking::RadiusByViewingCos(const float &viewCos)
{
    if(viewCos>0.998)
        return 2.5;
    else
        return 4.0;
}


void Tracking::track(const cv::Mat &imRectLeft, const cv::Mat &imRectRight, const double &timestamp, cv::Mat &K, cv::Mat &distCoef, const float &bf, float thDepth)
{
	m_ThDepth = thDepth;
	m_CurrentFrame = Frame(imRectLeft, imRectRight, timestamp, K, distCoef, bf,thDepth);

	if(!m_init)
	{
		StereoInitialization();
	}
	else
	{
		bool ret;
		ret = TrackReferenceKeyFrame();

		m_CurrentFrame.m_ReferenceKF = m_ReferenceKF;

		if(ret == true)
		{
			//update mpReferenceKF
			ret = TrackLocalMap();

			if(ret == true)
			{
				m_MapDrawer->SetCurrentCameraPose(m_CurrentFrame.m_RTcw);
			}
		}

		if(NeedNewKeyFrame())
		{
			CreateNewKeyFrame();	
		}

		m_LastFrame = Frame(m_CurrentFrame);
	}
}

bool Tracking::NeedNewKeyFrame()
{
	// If Local Mapping is freezed by a Loop Closure do not insert keyframes
	if(m_LocalMapper->isStopped() || m_LocalMapper->stopRequested())
	{
		return false;
	}

	int nKFs = m_Map->KeyFramesSize();

	// Tracked MapPoints in the reference keyframe
	int nMinObs = 3;
	if(nKFs <= 2)
	{
		nMinObs=2;
	}

	int nRefMatches = m_ReferenceKF->TrackedMapPoints(nMinObs);

	// Local Mapping accept keyframes?
	bool bLocalMappingIdle = m_LocalMapper->AcceptKeyFrames();

	// Check how many "close" points are being tracked and how many could be potentially created.
	int nNonTrackedClose = 0;
	int nTrackedClose= 0;
	for(int i =0; i<m_CurrentFrame.m_keyPointsNumber; i++)
	{
		if(m_CurrentFrame.m_vectorDepth[i] > 0 && m_CurrentFrame.m_vectorDepth[i] < m_ThDepth)
		{
			if(m_CurrentFrame.m_vectorMapPoints[i] && !m_CurrentFrame.m_vectorOutlier[i])
			{
				nTrackedClose++;
			}
			else
			{
				nNonTrackedClose++;
			}
		}
	}

	printf("nTrackedClose : %d, nNonTrackedClose:%d\n", nTrackedClose, nNonTrackedClose);
	bool bNeedToInsertClose = (nTrackedClose < 60) && (nNonTrackedClose > 30);
	// Thresholds
	float thRefRatio = 0.9f;

	// Condition 1a: More than "MaxFrames" have passed from last keyframe insertion
	bool c1a = m_CurrentFrame.m_frameId >= m_LastKeyFrameId + m_MaxFrames;

	// Condition 1b: More than "MinFrames" have passed and Local Mapping is idle
	bool c1b = (m_CurrentFrame.m_frameId >= m_LastKeyFrameId + m_MinFrames && bLocalMappingIdle);

	//Condition 1c: tracking is weak
    bool c1c =  m_MatchesInliers < nRefMatches * 0.75 || bNeedToInsertClose ;

	// Condition 2: Few tracked points compared to reference keyframe. Lots of visual odometry compared to map matches.
	bool c2 = ((m_MatchesInliers < nRefMatches * thRefRatio || bNeedToInsertClose) && m_MatchesInliers > 2);

	if((c1a||c1b||c1c) && c2)
	{
		// If the mapping accepts keyframes, insert keyframe.
        // Otherwise send a signal to interrupt BA
        if(bLocalMappingIdle)
        {
			return true;
		}
		else
		{
			m_LocalMapper->InterruptBA();
			if(m_LocalMapper->KeyframesInQueue() < 3)
			{
				return true;
			}
			else
			{
				return false;
			}
		}
	}
	else
	{
		return false;
	}
}


void Tracking::SetLocalMapper(LocalMapping* pLocalMapper)
{
	m_LocalMapper = pLocalMapper;
}

void Tracking::SetViewer(Viewer* pViewer)
{
	m_Viewer = pViewer;
}

