//
// Created by zss on 18-4-18.
//
#include "ORBmatcher.h"
#include "Map.h"
#include "System.h"
#include "Object.h"
#include<stdint-gcc.h>

namespace myslam
{
    const int ORBmatcher::TH_HIGH = 100;
    const int ORBmatcher::TH_LOW = 50;
    const int ORBmatcher::HISTO_LENGTH = 30;

    ORBmatcher::ORBmatcher(float nnratio, bool checkOri): mfNNratio(nnratio), mbCheckOrientation(checkOri),num_object_point(0)
    {
    }

    int ORBmatcher::SearchByProjection(Frame &F, const vector<MapPoint*> &vpMapPoints,Map *mpMap, const float th)//tracklocalmap--search local points
    {
        //cout<<"------------SearchByProjection----------"<<endl;
        int nmatches = 0;
        const bool bFactor = th != 1.0;
        int num_not_in_view = 0;
        bool output = true;
        for (size_t iMP = 0; iMP < vpMapPoints.size(); iMP++) {
            MapPoint *pMP = vpMapPoints[iMP];

            if (!pMP->mbTrackInView) {

                continue;
            }
            if (pMP->isBad()) {
                num_not_in_view++;
                continue;
            }

            const int &nPredictedLevel = pMP->mnTrackScaleLevel;

            // The size of the window will depend on the viewing direction
            float r = RadiusByViewingCos(pMP->mTrackViewCos);

            if (bFactor)
                r *= th;

            const vector<size_t> vIndices =
                    F.GetFeaturesInArea(pMP->mTrackProjX, pMP->mTrackProjY, r * F.mvScaleFactors[nPredictedLevel],
                                        nPredictedLevel - 1, nPredictedLevel);

            if (vIndices.empty())
                continue;

            const cv::Mat MPdescriptor = pMP->GetDescriptor();

            int bestDist = 256;
            int bestLevel = -1;
            int bestDist2 = 256;
            int bestLevel2 = -1;
            int bestIdx = -1;

            // Get best and second matches with near keypoints
            for (vector<size_t>::const_iterator vit = vIndices.begin(), vend = vIndices.end(); vit != vend; vit++) {
                const size_t idx = *vit;
                if (F.mvpMapPoints[idx])
                    if (F.mvpMapPoints[idx]->Observations() > 0) {
                        continue;
                    }
                if (F.mvuRight[idx] > 0) {
                    const float er = fabs(pMP->mTrackProjXR - F.mvuRight[idx]);
                    if (er > r * F.mvScaleFactors[nPredictedLevel])
                        continue;
                }
                const cv::Mat &d = F.mDescriptors.row(idx);
                //cout<<"特征点："<<F.mvKeysUn[idx].class_id<<endl;//todo 从这开始就没有class的了
                const int dist = DescriptorDistance(MPdescriptor, d);

                if (dist < bestDist) {
                    bestDist2 = bestDist;
                    bestDist = dist;
                    bestLevel2 = bestLevel;
                    bestLevel = F.mvKeysUn[idx].octave;
                    bestIdx = idx;
                } else if (dist < bestDist2) {
                    bestLevel2 = F.mvKeysUn[idx].octave;
                    bestDist2 = dist;
                }
            }
            if (bestDist <= TH_HIGH) {
                if (bestLevel == bestLevel2 && bestDist > mfNNratio * bestDist2)
                    continue;

                for (int i = 0; i < F.objects.size(); ++i) {
                    if (F.mvKeysUn[bestIdx].pt.x > F.objects[i]->left &&
                        F.mvKeysUn[bestIdx].pt.x < F.objects[i]->right &&
                        F.mvKeysUn[bestIdx].pt.y > F.objects[i]->top &&
                        F.mvKeysUn[bestIdx].pt.y < F.objects[i]->bottom) {
                        float z = F.mvDepth[bestIdx];
                        if (z > 0)
                        {
                            cv::Mat x3D = F.UnprojectStereo(bestIdx);
                            float depth = x3D.at<float>(2, 0);
                            float middle_point = F.objects[i]->_Pos.at<float>(2, 0);
                            //cout<<"~~~~~~~~~~~"<<abs(depth - middle_point)<<endl;
                            if (abs(depth - middle_point) < 0.1) {
                                F.mvKeysUn[bestIdx].class_id = F.objects[i]->mnId;
                                //cout<<"~~~~~~~~~~~~~~"<<F.objects[i]->mnId<<endl;
                                break;
                            }
                        }
                    }
                }

                bool False = false;
                if (F.mvKeysUn[bestIdx].class_id != -1 && !pMP->object_id_vector.empty() )
                {
                    map<int, int>::iterator sit;
                    sit = pMP->object_id_vector.find(F.mvKeysUn[bestIdx].class_id);
                    if (sit != pMP->object_id_vector.end()&&sit->second>10)//&&sit->second>10
                    {
                        //cout<<"find"<<sit->second<<endl;
                    } else
                    {
                        vector<cv::Mat> Mapppoint;
                        cv::Mat Keypoint;
                        map<int, int> msp = pMP->object_id_vector;
                        for (int i = 0; i < mpMap->objs_real.size(); ++i)
                        {
                            if (mpMap->objs_real[i]->mnId == F.mvKeysUn[bestIdx].class_id)
                            {
                                Keypoint = mpMap->objs_real[i]->_Pos;
                            }
                            for (map<int, int>::iterator sit1 = msp.begin(), send = msp.end(); sit1 != send; sit1++)
                            {
                                if (mpMap->objs_real[i]->mnId == sit1->first)
                                {
                                    Mapppoint.push_back(mpMap->objs_real[i]->_Pos);
                                }
                            }
                        }
                        if (!Mapppoint.empty() && !Keypoint.empty())
                        {
                            int num = 0;
                            for (int i = 0; i < Mapppoint.size(); ++i)
                            {
                                double x = Mapppoint[i].at<float>(0, 0) - Keypoint.at<float>(0, 0);
                                double y = Mapppoint[i].at<float>(1, 0) - Keypoint.at<float>(1, 0);
                                double z = Mapppoint[i].at<float>(2, 0) - Keypoint.at<float>(2, 0);
                                //cout<<i<<":"<<x<<"~"<<y<<"~"<<z<<endl;
                                if (sqrt(pow(x, 2) + pow(y, 2) + pow(z, 2)) > 0.6)
                                    continue;
                                else
                                {
                                    num++;
                                    break;
                                }
                            }
                            if (num == 0)
                            {
                                False = true;
                            }
                        }
                    }
                    if (False)
                        continue;
                }
                F.mvpMapPoints[bestIdx] = pMP;
                    nmatches++;
                }
            }
          //  cout << "-----------------------track local map:" << nmatches << "------------------------" << endl;

            return nmatches;
        }

    float ORBmatcher::RadiusByViewingCos(const float &viewCos)
    {
        if(viewCos>0.998)
            return 2.5;
        else
            return 4.0;
    }


    bool ORBmatcher::CheckDistEpipolarLine(const cv::KeyPoint &kp1,const cv::KeyPoint &kp2,const cv::Mat &F12,const KeyFrame* pKF2)//SearchForTriangulation
    {
        // Epipolar line in second image l = x1'F12 = [a b c]
        const float a = kp1.pt.x*F12.at<float>(0,0)+kp1.pt.y*F12.at<float>(1,0)+F12.at<float>(2,0);
        const float b = kp1.pt.x*F12.at<float>(0,1)+kp1.pt.y*F12.at<float>(1,1)+F12.at<float>(2,1);
        const float c = kp1.pt.x*F12.at<float>(0,2)+kp1.pt.y*F12.at<float>(1,2)+F12.at<float>(2,2);

        const float num = a*kp2.pt.x+b*kp2.pt.y+c;

        const float den = a*a+b*b;

        if(den==0)
            return false;

        const float dsqr = num*num/den;

        return dsqr<3.84*pKF2->mvLevelSigma2[kp2.octave];
    }

    int ORBmatcher::SearchByBoW(KeyFrame* pKF,Frame &F, vector<MapPoint*> &vpMapPointMatches)
    {
        const vector<MapPoint*> vpMapPointsKF = pKF->GetMapPointMatches();    //todo 上一帧的所有地图点　return mvpMapPoints
        vpMapPointMatches = vector<MapPoint*>(F.N,static_cast<MapPoint*>(NULL));//todo 初始化
        const DBoW2::FeatureVector &vFeatVecKF = pKF->mFeatVec;
        int nmatches=0;
        vector<int> rotHist[HISTO_LENGTH];
        for(int i=0;i<HISTO_LENGTH;i++)
            rotHist[i].reserve(500);
        const float factor = 1.0f/HISTO_LENGTH;

        // We perform the matching over ORB that belong to the same vocabulary node (at a certain level)
        DBoW2::FeatureVector::const_iterator KFit = vFeatVecKF.begin();//todo 上一帧的词典开始
        DBoW2::FeatureVector::const_iterator Fit = F.mFeatVec.begin();//todo 当前帧的词典开始
        DBoW2::FeatureVector::const_iterator KFend = vFeatVecKF.end();//todo 上一帧的词典结束
        DBoW2::FeatureVector::const_iterator Fend = F.mFeatVec.end();//todo 当前帧的词典结束

//todo 步骤1：分别取出属于同一node的ORB特征点
        while(KFit != KFend && Fit != Fend)
        {
            if(KFit->first == Fit->first)// todo 同一个单词下的 描述子
            {
                const vector<unsigned int> vIndicesKF = KFit->second;
                const vector<unsigned int> vIndicesF = Fit->second;
//todo 步骤2：遍历上一帧中属于该node的地图点 其对应一个描述子
                for(size_t iKF=0; iKF<vIndicesKF.size(); iKF++)
                {
                    const unsigned int realIdxKF = vIndicesKF[iKF];
                    MapPoint* pMP = vpMapPointsKF[realIdxKF];//todo 取出上一帧中该特征对应的MapPoint
                    if(!pMP)
                        continue;

                    if(pMP->isBad())
                        continue;

                    const cv::Mat &dKF= pKF->mDescriptors.row(realIdxKF);//todo  取出上一帧中该特征对应的描述子

                    int bestDist1=256;
                    int bestIdxF =-1 ;
                    int bestDist2=256;
//todo 步骤3：遍历当前帧F中属于该node的特征点，找到最佳匹配点
                    for(size_t iF=0; iF<vIndicesF.size(); iF++)
                    {
                        const unsigned int realIdxF = vIndicesF[iF];
                        //todo  表明这个特征点点已经被匹配过了，不再匹配，加快速度
                        if(vpMapPointMatches[realIdxF])
                            continue;

                        const cv::Mat &dF = F.mDescriptors.row(realIdxF);

                        const int dist =  DescriptorDistance(dKF,dF);//todo 描述子之间的距离
//todo  步骤4：找出最短距离和次短距离对应的匹配点
                        if(dist<bestDist1)
                        {
                            bestDist2=bestDist1;
                            bestDist1=dist;
                            bestIdxF=realIdxF;
                        }
                        else if(dist<bestDist2)
                        {
                            bestDist2=dist;
                        }
                    }
//todo  步骤5：根据阈值 和 角度投票剔除误匹配
                    if(bestDist1<=TH_LOW)
                    {
                        if(static_cast<float>(bestDist1)<mfNNratio*static_cast<float>(bestDist2))
                        {
// todo 步骤6：更新当前帧特征点对应的地图点MapPoint
                            vpMapPointMatches[bestIdxF]=pMP;
                            const cv::KeyPoint &kp = pKF->mvKeysUn[realIdxKF];
                            if(mbCheckOrientation)
                            {
                                float rot = kp.angle-F.mvKeys[bestIdxF].angle;
                                if(rot<0.0)
                                    rot+=360.0f;
                                int bin = round(rot*factor);
                                if(bin==HISTO_LENGTH)
                                    bin=0;
                                assert(bin>=0 && bin<HISTO_LENGTH);
                                rotHist[bin].push_back(bestIdxF);
                            }
                            nmatches++;
                        }
                    }

                }

                KFit++;
                Fit++;
            }
            else if(KFit->first < Fit->first)
            {
                KFit = vFeatVecKF.lower_bound(Fit->first);
            }
            else
            {
                Fit = F.mFeatVec.lower_bound(KFit->first);
            }
        }


        if(mbCheckOrientation)
        {
            int ind1=-1;
            int ind2=-1;
            int ind3=-1;
            //计算三个极大值点
            ComputeThreeMaxima(rotHist,HISTO_LENGTH,ind1,ind2,ind3);

            for(int i=0; i<HISTO_LENGTH; i++)
            {
                if(i==ind1 || i==ind2 || i==ind3)
                    continue;
                for(size_t j=0, jend=rotHist[i].size(); j<jend; j++)
                {
                    vpMapPointMatches[rotHist[i][j]]=static_cast<MapPoint*>(NULL);
                    nmatches--;
                }
            }
        }

        return nmatches;
    }

    int ORBmatcher::SearchByProjection(KeyFrame* pKF, cv::Mat Scw, const vector<MapPoint*> &vpPoints, vector<MapPoint*> &vpMatched, int th)//computesim3
    {
        // Get Calibration Parameters for later projection
        const float &fx = pKF->fx;
        const float &fy = pKF->fy;
        const float &cx = pKF->cx;
        const float &cy = pKF->cy;

        // Decompose Scw
        cv::Mat sRcw = Scw.rowRange(0,3).colRange(0,3);
        const float scw = sqrt(sRcw.row(0).dot(sRcw.row(0)));
        cv::Mat Rcw = sRcw/scw;
        cv::Mat tcw = Scw.rowRange(0,3).col(3)/scw;
        cv::Mat Ow = -Rcw.t()*tcw;

        // Set of MapPoints already found in the KeyFrame
        set<MapPoint*> spAlreadyFound(vpMatched.begin(), vpMatched.end());
        spAlreadyFound.erase(static_cast<MapPoint*>(NULL));

        int nmatches=0;

        // For each Candidate MapPoint Project and Match
        for(int iMP=0, iendMP=vpPoints.size(); iMP<iendMP; iMP++)
        {
            MapPoint* pMP = vpPoints[iMP];

            // Discard Bad MapPoints and already found
            if(pMP->isBad() || spAlreadyFound.count(pMP))
                continue;

            // Get 3D Coords.
            cv::Mat p3Dw = pMP->GetWorldPos();

            // Transform into Camera Coords.
            cv::Mat p3Dc = Rcw*p3Dw+tcw;

            // Depth must be positive
            if(p3Dc.at<float>(2)<0.0)
                continue;

            // Project into Image
            const float invz = 1/p3Dc.at<float>(2);
            const float x = p3Dc.at<float>(0)*invz;
            const float y = p3Dc.at<float>(1)*invz;

            const float u = fx*x+cx;
            const float v = fy*y+cy;

            // Point must be inside the image
            if(!pKF->IsInImage(u,v))
                continue;

            // Depth must be inside the scale invariance region of the point
            const float maxDistance = pMP->GetMaxDistanceInvariance();
            const float minDistance = pMP->GetMinDistanceInvariance();
            cv::Mat PO = p3Dw-Ow;
            const float dist = cv::norm(PO);

            if(dist<minDistance || dist>maxDistance)
                continue;

            // Viewing angle must be less than 60 deg
            cv::Mat Pn = pMP->GetNormal();

            if(PO.dot(Pn)<0.5*dist)
                continue;

            int nPredictedLevel = pMP->PredictScale(dist,pKF);

            // Search in a radius
            const float radius = th*pKF->mvScaleFactors[nPredictedLevel];

            const vector<size_t> vIndices = pKF->GetFeaturesInArea(u,v,radius);

            if(vIndices.empty())
                continue;

            // Match to the most similar keypoint in the radius
            const cv::Mat dMP = pMP->GetDescriptor();

            int bestDist = 256;
            int bestIdx = -1;
            for(vector<size_t>::const_iterator vit=vIndices.begin(), vend=vIndices.end(); vit!=vend; vit++)
            {
                const size_t idx = *vit;
                if(vpMatched[idx])
                    continue;

                const int &kpLevel= pKF->mvKeysUn[idx].octave;

                if(kpLevel<nPredictedLevel-1 || kpLevel>nPredictedLevel)
                    continue;

                const cv::Mat &dKF = pKF->mDescriptors.row(idx);

                const int dist = DescriptorDistance(dMP,dKF);

                if(dist<bestDist)
                {
                    bestDist = dist;
                    bestIdx = idx;
                }
            }

            if(bestDist<=TH_LOW)
            {
                vpMatched[bestIdx]=pMP;
                nmatches++;
            }

        }

        return nmatches;
    }

    int ORBmatcher::SearchByBoW(KeyFrame *pKF1, KeyFrame *pKF2, vector<MapPoint *> &vpMatches12)//computesim3
    {
        const vector<cv::KeyPoint> &vKeysUn1 = pKF1->mvKeysUn;
        const DBoW2::FeatureVector &vFeatVec1 = pKF1->mFeatVec;
        const vector<MapPoint*> vpMapPoints1 = pKF1->GetMapPointMatches();
        const cv::Mat &Descriptors1 = pKF1->mDescriptors;

        const vector<cv::KeyPoint> &vKeysUn2 = pKF2->mvKeysUn;
        const DBoW2::FeatureVector &vFeatVec2 = pKF2->mFeatVec;
        const vector<MapPoint*> vpMapPoints2 = pKF2->GetMapPointMatches();
        const cv::Mat &Descriptors2 = pKF2->mDescriptors;

        vpMatches12 = vector<MapPoint*>(vpMapPoints1.size(),static_cast<MapPoint*>(NULL));
        vector<bool> vbMatched2(vpMapPoints2.size(),false);

        vector<int> rotHist[HISTO_LENGTH];
        for(int i=0;i<HISTO_LENGTH;i++)
            rotHist[i].reserve(500);

        const float factor = 1.0f/HISTO_LENGTH;

        int nmatches = 0;

        DBoW2::FeatureVector::const_iterator f1it = vFeatVec1.begin();
        DBoW2::FeatureVector::const_iterator f2it = vFeatVec2.begin();
        DBoW2::FeatureVector::const_iterator f1end = vFeatVec1.end();
        DBoW2::FeatureVector::const_iterator f2end = vFeatVec2.end();

        while(f1it != f1end && f2it != f2end)
        {
            if(f1it->first == f2it->first)
            {
                for(size_t i1=0, iend1=f1it->second.size(); i1<iend1; i1++)
                {
                    const size_t idx1 = f1it->second[i1];

                    MapPoint* pMP1 = vpMapPoints1[idx1];
                    if(!pMP1)
                        continue;
                    if(pMP1->isBad())
                        continue;

                    const cv::Mat &d1 = Descriptors1.row(idx1);

                    int bestDist1=256;
                    int bestIdx2 =-1 ;
                    int bestDist2=256;

                    for(size_t i2=0, iend2=f2it->second.size(); i2<iend2; i2++)
                    {
                        const size_t idx2 = f2it->second[i2];

                        MapPoint* pMP2 = vpMapPoints2[idx2];

                        if(vbMatched2[idx2] || !pMP2)
                            continue;

                        if(pMP2->isBad())
                            continue;

                        const cv::Mat &d2 = Descriptors2.row(idx2);

                        int dist = DescriptorDistance(d1,d2);

                        if(dist<bestDist1)
                        {
                            bestDist2=bestDist1;
                            bestDist1=dist;
                            bestIdx2=idx2;
                        }
                        else if(dist<bestDist2)
                        {
                            bestDist2=dist;
                        }
                    }

                    if(bestDist1<TH_LOW)
                    {
                        if(static_cast<float>(bestDist1)<mfNNratio*static_cast<float>(bestDist2))
                        {
                            vpMatches12[idx1]=vpMapPoints2[bestIdx2];
                            vbMatched2[bestIdx2]=true;

                            if(mbCheckOrientation)
                            {
                                float rot = vKeysUn1[idx1].angle-vKeysUn2[bestIdx2].angle;
                                if(rot<0.0)
                                    rot+=360.0f;
                                int bin = round(rot*factor);
                                if(bin==HISTO_LENGTH)
                                    bin=0;
                                assert(bin>=0 && bin<HISTO_LENGTH);
                                rotHist[bin].push_back(idx1);
                            }
                            nmatches++;
                        }
                    }
                }

                f1it++;
                f2it++;
            }
            else if(f1it->first < f2it->first)
            {
                f1it = vFeatVec1.lower_bound(f2it->first);
            }
            else
            {
                f2it = vFeatVec2.lower_bound(f1it->first);
            }
        }

        if(mbCheckOrientation)
        {
            int ind1=-1;
            int ind2=-1;
            int ind3=-1;

            ComputeThreeMaxima(rotHist,HISTO_LENGTH,ind1,ind2,ind3);

            for(int i=0; i<HISTO_LENGTH; i++)
            {
                if(i==ind1 || i==ind2 || i==ind3)
                    continue;
                for(size_t j=0, jend=rotHist[i].size(); j<jend; j++)
                {
                    vpMatches12[rotHist[i][j]]=static_cast<MapPoint*>(NULL);//todo 不满足条件的删掉
                    nmatches--;
                }
            }
        }

        return nmatches;
    }

    int ORBmatcher::SearchForTriangulation(KeyFrame *pKF1, KeyFrame *pKF2, cv::Mat F12,
                                           vector<pair<size_t, size_t> > &vMatchedPairs, const bool bOnlyStereo)//create new mappoints
    {
        const DBoW2::FeatureVector &vFeatVec1 = pKF1->mFeatVec;
        const DBoW2::FeatureVector &vFeatVec2 = pKF2->mFeatVec;

        //Compute epipole in second image
        cv::Mat Cw = pKF1->GetCameraCenter();
        cv::Mat R2w = pKF2->GetRotation();
        cv::Mat t2w = pKF2->GetTranslation();
        cv::Mat C2 = R2w*Cw+t2w;
        const float invz = 1.0f/C2.at<float>(2);
        const float ex =pKF2->fx*C2.at<float>(0)*invz+pKF2->cx;
        const float ey =pKF2->fy*C2.at<float>(1)*invz+pKF2->cy;

        // Find matches between not tracked keypoints
        // Matching speed-up by ORB Vocabulary
        // Compare only ORB that share the same node

        int nmatches=0;
        vector<bool> vbMatched2(pKF2->N,false);
        vector<int> vMatches12(pKF1->N,-1);

        vector<int> rotHist[HISTO_LENGTH];
        for(int i=0;i<HISTO_LENGTH;i++)
            rotHist[i].reserve(500);

        const float factor = 1.0f/HISTO_LENGTH;

        DBoW2::FeatureVector::const_iterator f1it = vFeatVec1.begin();
        DBoW2::FeatureVector::const_iterator f2it = vFeatVec2.begin();
        DBoW2::FeatureVector::const_iterator f1end = vFeatVec1.end();
        DBoW2::FeatureVector::const_iterator f2end = vFeatVec2.end();

        while(f1it!=f1end && f2it!=f2end)
        {
            if(f1it->first == f2it->first)
            {
                for(size_t i1=0, iend1=f1it->second.size(); i1<iend1; i1++)
                {
                    const size_t idx1 = f1it->second[i1];

                    MapPoint* pMP1 = pKF1->GetMapPoint(idx1);

                    // If there is already a MapPoint skip
                    if(pMP1)
                        continue;

                    const bool bStereo1 = pKF1->mvuRight[idx1]>=0;

                    if(bOnlyStereo)
                        if(!bStereo1)
                            continue;

                    const cv::KeyPoint &kp1 = pKF1->mvKeysUn[idx1];

                    const cv::Mat &d1 = pKF1->mDescriptors.row(idx1);

                    int bestDist = TH_LOW;
                    int bestIdx2 = -1;

                    for(size_t i2=0, iend2=f2it->second.size(); i2<iend2; i2++)
                    {
                        size_t idx2 = f2it->second[i2];

                        MapPoint* pMP2 = pKF2->GetMapPoint(idx2);

                        // If we have already matched or there is a MapPoint skip
                        if(vbMatched2[idx2] || pMP2)
                            continue;

                        const bool bStereo2 = pKF2->mvuRight[idx2]>=0;

                        if(bOnlyStereo)
                            if(!bStereo2)
                                continue;

                        const cv::Mat &d2 = pKF2->mDescriptors.row(idx2);

                        const int dist = DescriptorDistance(d1,d2);

                        if(dist>TH_LOW || dist>bestDist)
                            continue;

                        const cv::KeyPoint &kp2 = pKF2->mvKeysUn[idx2];

                        if(!bStereo1 && !bStereo2)
                        {
                            const float distex = ex-kp2.pt.x;
                            const float distey = ey-kp2.pt.y;
                            if(distex*distex+distey*distey<100*pKF2->mvScaleFactors[kp2.octave])
                                continue;
                        }

                        if(CheckDistEpipolarLine(kp1,kp2,F12,pKF2))
                        {
                            bestIdx2 = idx2;
                            bestDist = dist;
                        }
                    }

                    if(bestIdx2>=0)
                    {
                        const cv::KeyPoint &kp2 = pKF2->mvKeysUn[bestIdx2];
                        vMatches12[idx1]=bestIdx2;
                        nmatches++;

                        if(mbCheckOrientation)
                        {
                            float rot = kp1.angle-kp2.angle;
                            if(rot<0.0)
                                rot+=360.0f;
                            int bin = round(rot*factor);
                            if(bin==HISTO_LENGTH)
                                bin=0;
                            assert(bin>=0 && bin<HISTO_LENGTH);
                            rotHist[bin].push_back(idx1);
                        }
                    }
                }

                f1it++;
                f2it++;
            }
            else if(f1it->first < f2it->first)
            {
                f1it = vFeatVec1.lower_bound(f2it->first);
            }
            else
            {
                f2it = vFeatVec2.lower_bound(f1it->first);
            }
        }

        if(mbCheckOrientation)
        {
            int ind1=-1;
            int ind2=-1;
            int ind3=-1;

            ComputeThreeMaxima(rotHist,HISTO_LENGTH,ind1,ind2,ind3);

            for(int i=0; i<HISTO_LENGTH; i++)
            {
                if(i==ind1 || i==ind2 || i==ind3)
                    continue;
                for(size_t j=0, jend=rotHist[i].size(); j<jend; j++)
                {
                    vMatches12[rotHist[i][j]]=-1;
                    nmatches--;
                }
            }

        }

        vMatchedPairs.clear();
        vMatchedPairs.reserve(nmatches);

        for(size_t i=0, iend=vMatches12.size(); i<iend; i++)
        {
            if(vMatches12[i]<0)
                continue;
            vMatchedPairs.push_back(make_pair(i,vMatches12[i]));
        }

        return nmatches;
    }

    int ORBmatcher::Fuse(KeyFrame *pKF, const vector<MapPoint *> &vpMapPoints, const float th)//search in neighbors
    {
        cv::Mat Rcw = pKF->GetRotation();
        cv::Mat tcw = pKF->GetTranslation();

        const float &fx = pKF->fx;
        const float &fy = pKF->fy;
        const float &cx = pKF->cx;
        const float &cy = pKF->cy;
        const float &bf = pKF->mbf;

        cv::Mat Ow = pKF->GetCameraCenter();

        int nFused=0;

        const int nMPs = vpMapPoints.size();

        for(int i=0; i<nMPs; i++)
        {
            MapPoint* pMP = vpMapPoints[i];

            if(!pMP)
                continue;

            if(pMP->isBad() || pMP->IsInKeyFrame(pKF))//todo 已经被这个关键帧观测到了
                continue;

            cv::Mat p3Dw = pMP->GetWorldPos();
            cv::Mat p3Dc = Rcw*p3Dw + tcw;

            // todo Depth must be positive
            if(p3Dc.at<float>(2)<0.0f)
                continue;

            const float invz = 1/p3Dc.at<float>(2);
            const float x = p3Dc.at<float>(0)*invz;
            const float y = p3Dc.at<float>(1)*invz;

            const float u = fx*x+cx;
            const float v = fy*y+cy;

            // todo Point must be inside the image
            if(!pKF->IsInImage(u,v))
                continue;

            const float ur = u-bf*invz;

            const float maxDistance = pMP->GetMaxDistanceInvariance();
            const float minDistance = pMP->GetMinDistanceInvariance();
            cv::Mat PO = p3Dw-Ow;
            const float dist3D = cv::norm(PO);

            // todo Depth must be inside the scale pyramid of the image
            if(dist3D<minDistance || dist3D>maxDistance )
                continue;

            // todo Viewing angle must be less than 60 deg
            cv::Mat Pn = pMP->GetNormal();

            if(PO.dot(Pn)<0.5*dist3D)
                continue;

            int nPredictedLevel = pMP->PredictScale(dist3D,pKF);

            // todo Search in a radius
            const float radius = th*pKF->mvScaleFactors[nPredictedLevel];

            const vector<size_t> vIndices = pKF->GetFeaturesInArea(u,v,radius);

            if(vIndices.empty())
                continue;

            // todo Match to the most similar keypoint in the radius

            const cv::Mat dMP = pMP->GetDescriptor();

            int bestDist = 256;
            int bestIdx = -1;
            for(vector<size_t>::const_iterator vit=vIndices.begin(), vend=vIndices.end(); vit!=vend; vit++)
            {
                const size_t idx = *vit;

                const cv::KeyPoint &kp = pKF->mvKeysUn[idx];

                const int &kpLevel= kp.octave;

                if(kpLevel<nPredictedLevel-1 || kpLevel>nPredictedLevel)
                    continue;

                if(pKF->mvuRight[idx]>=0)
                {
                    // Check reprojection error in stereo
                    const float &kpx = kp.pt.x;
                    const float &kpy = kp.pt.y;
                    const float &kpr = pKF->mvuRight[idx];
                    const float ex = u-kpx;
                    const float ey = v-kpy;
                    const float er = ur-kpr;
                    const float e2 = ex*ex+ey*ey+er*er;

                    if(e2*pKF->mvInvLevelSigma2[kpLevel]>7.8)
                        continue;
                }
                else
                {
                    const float &kpx = kp.pt.x;
                    const float &kpy = kp.pt.y;
                    const float ex = u-kpx;
                    const float ey = v-kpy;
                    const float e2 = ex*ex+ey*ey;

                    if(e2*pKF->mvInvLevelSigma2[kpLevel]>5.99)
                        continue;
                }

                const cv::Mat &dKF = pKF->mDescriptors.row(idx);

                const int dist = DescriptorDistance(dMP,dKF);

                if(dist<bestDist)
                {
                    bestDist = dist;
                    bestIdx = idx;
                }
            }

            // todo If there is already a MapPoint replace, otherwise add new measurement
            if(bestDist<=TH_LOW)
            {
                //todo 这个索引位置原本对应着一个地图点
                MapPoint* pMPinKF = pKF->GetMapPoint(bestIdx);//todo 地图点的个数都是和关键点个数对应的，只是如果没有就是NULL,或者是坏点
                if(pMPinKF)
                {
                    if(!pMPinKF->isBad())
                    {
                        //todo 找一个被观测次数多的点保留
                        if(pMPinKF->Observations()>pMP->Observations())
                        {
                            pMP->Replace(pMPinKF);//todo 将当前地图点替换成括号里面的点，将当前点设置成bad
                        }

                        else
                        {
                            pMPinKF->Replace(pMP);
                        }

                    }
                }
                else
                {
                    pMP->AddObservation(pKF,bestIdx);
                    pKF->AddMapPoint(pMP,bestIdx);
                }
                nFused++;
            }
        }

        return nFused;
    }

    int ORBmatcher::Fuse(KeyFrame *pKF, cv::Mat Scw, const vector<MapPoint *> &vpPoints, float th, vector<MapPoint *> &vpReplacePoint)//回环检测
    {
        // Get Calibration Parameters for later projection
        const float &fx = pKF->fx;
        const float &fy = pKF->fy;
        const float &cx = pKF->cx;
        const float &cy = pKF->cy;

        // Decompose Scw
        cv::Mat sRcw = Scw.rowRange(0,3).colRange(0,3);
        const float scw = sqrt(sRcw.row(0).dot(sRcw.row(0)));
        cv::Mat Rcw = sRcw/scw;
        cv::Mat tcw = Scw.rowRange(0,3).col(3)/scw;
        cv::Mat Ow = -Rcw.t()*tcw;

        // Set of MapPoints already found in the KeyFrame
        const set<MapPoint*> spAlreadyFound = pKF->GetMapPoints();

        int nFused=0;

        const int nPoints = vpPoints.size();

        // For each candidate MapPoint project and match
        for(int iMP=0; iMP<nPoints; iMP++)
        {
            MapPoint* pMP = vpPoints[iMP];

            // Discard Bad MapPoints and already found
            if(pMP->isBad() || spAlreadyFound.count(pMP))
                continue;

            // Get 3D Coords.
            cv::Mat p3Dw = pMP->GetWorldPos();

            // Transform into Camera Coords.
            cv::Mat p3Dc = Rcw*p3Dw+tcw;

            // Depth must be positive
            if(p3Dc.at<float>(2)<0.0f)
                continue;

            // Project into Image
            const float invz = 1.0/p3Dc.at<float>(2);
            const float x = p3Dc.at<float>(0)*invz;
            const float y = p3Dc.at<float>(1)*invz;

            const float u = fx*x+cx;
            const float v = fy*y+cy;

            // Point must be inside the image
            if(!pKF->IsInImage(u,v))
                continue;

            // Depth must be inside the scale pyramid of the image
            const float maxDistance = pMP->GetMaxDistanceInvariance();
            const float minDistance = pMP->GetMinDistanceInvariance();
            cv::Mat PO = p3Dw-Ow;
            const float dist3D = cv::norm(PO);

            if(dist3D<minDistance || dist3D>maxDistance)
                continue;

            // Viewing angle must be less than 60 deg
            cv::Mat Pn = pMP->GetNormal();

            if(PO.dot(Pn)<0.5*dist3D)
                continue;

            // Compute predicted scale level
            const int nPredictedLevel = pMP->PredictScale(dist3D,pKF);

            // Search in a radius
            const float radius = th*pKF->mvScaleFactors[nPredictedLevel];

            const vector<size_t> vIndices = pKF->GetFeaturesInArea(u,v,radius);

            if(vIndices.empty())
                continue;

            // Match to the most similar keypoint in the radius

            const cv::Mat dMP = pMP->GetDescriptor();

            int bestDist = INT_MAX;
            int bestIdx = -1;
            for(vector<size_t>::const_iterator vit=vIndices.begin(); vit!=vIndices.end(); vit++)
            {
                const size_t idx = *vit;
                const int &kpLevel = pKF->mvKeysUn[idx].octave;

                if(kpLevel<nPredictedLevel-1 || kpLevel>nPredictedLevel)
                    continue;

                const cv::Mat &dKF = pKF->mDescriptors.row(idx);

                int dist = DescriptorDistance(dMP,dKF);

                if(dist<bestDist)
                {
                    bestDist = dist;
                    bestIdx = idx;
                }
            }

            // If there is already a MapPoint replace otherwise add new measurement
            if(bestDist<=TH_LOW)
            {
                MapPoint* pMPinKF = pKF->GetMapPoint(bestIdx);
                if(pMPinKF)
                {
                    if(!pMPinKF->isBad())
                        vpReplacePoint[iMP] = pMPinKF;//todo 记录要替换的点
                }
                else
                {
                    pMP->AddObservation(pKF,bestIdx);
                    pKF->AddMapPoint(pMP,bestIdx);
                }
                nFused++;
            }
        }

        return nFused;
    }

    int ORBmatcher::SearchBySim3(KeyFrame *pKF1, KeyFrame *pKF2, vector<MapPoint*> &vpMatches12,
                                 const float &s12, const cv::Mat &R12, const cv::Mat &t12, const float th)
    {
        const float &fx = pKF1->fx;
        const float &fy = pKF1->fy;
        const float &cx = pKF1->cx;
        const float &cy = pKF1->cy;

        // Camera 1 from world
        //todo 计算完没啥用
        cv::Mat R1w = pKF1->GetRotation();
        cv::Mat t1w = pKF1->GetTranslation();

        //Camera 2 from world
        //todo 计算完没啥用
        cv::Mat R2w = pKF2->GetRotation();
        cv::Mat t2w = pKF2->GetTranslation();

        //todo Transformation between cameras
        cv::Mat sR12 = s12*R12;
        cv::Mat sR21 = (1.0/s12)*R12.t();
        cv::Mat t21 = -sR21*t12;

        const vector<MapPoint*> vpMapPoints1 = pKF1->GetMapPointMatches();
        const int N1 = vpMapPoints1.size();

        const vector<MapPoint*> vpMapPoints2 = pKF2->GetMapPointMatches();
        const int N2 = vpMapPoints2.size();

        vector<bool> vbAlreadyMatched1(N1,false);
        vector<bool> vbAlreadyMatched2(N2,false);

        for(int i=0; i<N1; i++)
        {
            MapPoint* pMP = vpMatches12[i];
            if(pMP)
            {
                vbAlreadyMatched1[i]=true;
                int idx2 = pMP->GetIndexInKeyFrame(pKF2);
                if(idx2>=0 && idx2<N2)
                    vbAlreadyMatched2[idx2]=true;
            }
        }

        vector<int> vnMatch1(N1,-1);
        vector<int> vnMatch2(N2,-1);

        // Transform from KF1 to KF2 and search
        for(int i1=0; i1<N1; i1++)
        {
            MapPoint* pMP = vpMapPoints1[i1];

            if(!pMP || vbAlreadyMatched1[i1])
                continue;

            if(pMP->isBad())
                continue;

            cv::Mat p3Dw = pMP->GetWorldPos();
            cv::Mat p3Dc1 = R1w*p3Dw + t1w;//todo 先转换到相机坐标系下，然后根据相机之间的转换关系
            cv::Mat p3Dc2 = sR21*p3Dc1 + t21;//todo 根据s,r,t从世界坐标转换到像素坐标

            // Depth must be positive
            if(p3Dc2.at<float>(2)<0.0)
                continue;

            const float invz = 1.0/p3Dc2.at<float>(2);
            const float x = p3Dc2.at<float>(0)*invz;
            const float y = p3Dc2.at<float>(1)*invz;

            const float u = fx*x+cx;
            const float v = fy*y+cy;

            // Point must be inside the image
            if(!pKF2->IsInImage(u,v))
                continue;

            const float maxDistance = pMP->GetMaxDistanceInvariance();
            const float minDistance = pMP->GetMinDistanceInvariance();
            const float dist3D = cv::norm(p3Dc2);

            // Depth must be inside the scale invariance region
            if(dist3D<minDistance || dist3D>maxDistance )
                continue;

            // Compute predicted octave
            const int nPredictedLevel = pMP->PredictScale(dist3D,pKF2);

            // Search in a radius
            const float radius = th*pKF2->mvScaleFactors[nPredictedLevel];

            const vector<size_t> vIndices = pKF2->GetFeaturesInArea(u,v,radius);

            if(vIndices.empty())
                continue;

            // Match to the most similar keypoint in the radius
            const cv::Mat dMP = pMP->GetDescriptor();

            int bestDist = INT_MAX;
            int bestIdx = -1;
            for(vector<size_t>::const_iterator vit=vIndices.begin(), vend=vIndices.end(); vit!=vend; vit++)
            {
                const size_t idx = *vit;

                const cv::KeyPoint &kp = pKF2->mvKeysUn[idx];

                if(kp.octave<nPredictedLevel-1 || kp.octave>nPredictedLevel)
                    continue;

                const cv::Mat &dKF = pKF2->mDescriptors.row(idx);

                const int dist = DescriptorDistance(dMP,dKF);

                if(dist<bestDist)
                {
                    bestDist = dist;
                    bestIdx = idx;
                }
            }

            if(bestDist<=TH_HIGH)
            {
                vnMatch1[i1]=bestIdx;//todo 匹配结果
            }
        }

        // Transform from KF2 to KF1 and search
        for(int i2=0; i2<N2; i2++)
        {
            MapPoint* pMP = vpMapPoints2[i2];

            if(!pMP || vbAlreadyMatched2[i2])
                continue;

            if(pMP->isBad())
                continue;

            cv::Mat p3Dw = pMP->GetWorldPos();
            cv::Mat p3Dc2 = R2w*p3Dw + t2w;
            cv::Mat p3Dc1 = sR12*p3Dc2 + t12;

            // Depth must be positive
            if(p3Dc1.at<float>(2)<0.0)
                continue;

            const float invz = 1.0/p3Dc1.at<float>(2);
            const float x = p3Dc1.at<float>(0)*invz;
            const float y = p3Dc1.at<float>(1)*invz;

            const float u = fx*x+cx;
            const float v = fy*y+cy;

            // Point must be inside the image
            if(!pKF1->IsInImage(u,v))
                continue;

            const float maxDistance = pMP->GetMaxDistanceInvariance();
            const float minDistance = pMP->GetMinDistanceInvariance();
            const float dist3D = cv::norm(p3Dc1);

            // Depth must be inside the scale pyramid of the image
            if(dist3D<minDistance || dist3D>maxDistance)
                continue;

            // Compute predicted octave
            const int nPredictedLevel = pMP->PredictScale(dist3D,pKF1);

            // Search in a radius of 2.5*sigma(ScaleLevel)
            const float radius = th*pKF1->mvScaleFactors[nPredictedLevel];

            const vector<size_t> vIndices = pKF1->GetFeaturesInArea(u,v,radius);

            if(vIndices.empty())
                continue;

            // Match to the most similar keypoint in the radius
            const cv::Mat dMP = pMP->GetDescriptor();

            int bestDist = INT_MAX;
            int bestIdx = -1;
            for(vector<size_t>::const_iterator vit=vIndices.begin(), vend=vIndices.end(); vit!=vend; vit++)
            {
                const size_t idx = *vit;

                const cv::KeyPoint &kp = pKF1->mvKeysUn[idx];

                if(kp.octave<nPredictedLevel-1 || kp.octave>nPredictedLevel)
                    continue;

                const cv::Mat &dKF = pKF1->mDescriptors.row(idx);

                const int dist = DescriptorDistance(dMP,dKF);

                if(dist<bestDist)
                {
                    bestDist = dist;
                    bestIdx = idx;
                }
            }

            if(bestDist<=TH_HIGH)
            {
                vnMatch2[i2]=bestIdx;//todo 匹配结果
            }
        }

        // todo Check agreement
        int nFound = 0;

        for(int i1=0; i1<N1; i1++)
        {
            int idx2 = vnMatch1[i1];

            if(idx2>=0)
            {
                int idx1 = vnMatch2[idx2];
                if(idx1==i1)
                {
                    vpMatches12[i1] = vpMapPoints2[idx2];
                    nFound++;
                }
            }
        }

        return nFound;
    }

    int ORBmatcher::SearchByProjection(Frame &CurrentFrame, const Frame &LastFrame,Map *mpMap, const float th)//trackmotion
    {
        int nmatches = 0;

        // Rotation Histogram (to check rotation consistency)
        /////////////////////步骤１：初始化/////////////////////////////////
        vector<int> rotHist[HISTO_LENGTH];
        for(int i=0;i<HISTO_LENGTH;i++)
            rotHist[i].reserve(500);
        const float factor = 1.0f/HISTO_LENGTH;

        //当前帧
        const cv::Mat Rcw = CurrentFrame.mTcw.rowRange(0,3).colRange(0,3);
        const cv::Mat tcw = CurrentFrame.mTcw.rowRange(0,3).col(3);
        const cv::Mat twc = -Rcw.t()*tcw;
        //上一帧
        const cv::Mat Rlw = LastFrame.mTcw.rowRange(0,3).colRange(0,3);
        const cv::Mat tlw = LastFrame.mTcw.rowRange(0,3).col(3);
        const cv::Mat tlc = Rlw*twc+tlw;

        // 判断前进还是后退
        const bool bForward = tlc.at<float>(2)>CurrentFrame.mb ;
        const bool bBackward = -tlc.at<float>(2)>CurrentFrame.mb;
        /////////////////////步骤2：遍历 上一帧 所有的关键点(对应 地图点)////////////////////////////////////

        for(int i=0; i<LastFrame.N; i++)
        {
            MapPoint* pMP = LastFrame.mvpMapPoints[i];

            if(pMP)// 地图点存在
            {
                if(!LastFrame.mvbOutlier[i])// 该地图点也不是外点
                {
                    //////////////////////// 步骤3： 上一帧地图点投影到当前帧像素平面上////////////////////////////////
                    cv::Mat x3Dw = pMP->GetWorldPos();
                    cv::Mat x3Dc = Rcw*x3Dw+tcw;

                    const float xc = x3Dc.at<float>(0);
                    const float yc = x3Dc.at<float>(1);
                    const float invzc = 1.0/x3Dc.at<float>(2);

                    if(invzc<0)
                        continue;

                    float u = CurrentFrame.fx*xc*invzc+CurrentFrame.cx;
                    float v = CurrentFrame.fy*yc*invzc+CurrentFrame.cy;

                    if(u<CurrentFrame.mnMinX || u>CurrentFrame.mnMaxX)
                        continue;
                    if(v<CurrentFrame.mnMinY || v>CurrentFrame.mnMaxY)
                        continue;
                    ////////////////////// 步骤4： 在当前帧上确定候选点/////////////////////////////////////
                    // NOTE 尺度越大,图像越小
                    // 以下可以这么理解，例如一个有一定面积的圆点，在某个尺度n下它是一个特征点
                    // 当前进时，圆点的面积增大，在某个尺度m下它是一个特征点，由于面积增大，则需要在更高的尺度下才能检测出来
                    // 因此m>=n，对应前进的情况，nCurOctave>=nLastOctave。后退的情况可以类推
                    int nLastOctave = LastFrame.mvKeys[i].octave;//上一帧地图点对应特征点所处的尺度(金字塔层数)

                    //Search in a window. Size depends on scale,尺度越大，搜索范围越大
                    float radius = th*CurrentFrame.mvScaleFactors[nLastOctave];

                    vector<size_t> vIndices2;// 当前帧 上 投影点附近的 候选点

                    if(bForward)// 前进,则上一帧兴趣点在所在的尺度nLastOctave <= nCurOctave< 8(近了 尺度变大)
                        vIndices2 = CurrentFrame.GetFeaturesInArea(u,v, radius, nLastOctave);
                    else if(bBackward)// 后退,则上一帧兴趣点在所在的尺度0<= nCurOctave <= nLastOctave（远了 尺度降低）
                        vIndices2 = CurrentFrame.GetFeaturesInArea(u,v, radius, 0, nLastOctave);
                    else// 没怎么运动 在上一帧尺度附加搜索
                        vIndices2 = CurrentFrame.GetFeaturesInArea(u,v, radius, nLastOctave-1, nLastOctave+1);

                    if(vIndices2.empty())
                        continue;


////////////////////// 步骤5：遍历候选关键点  计算与地图点  描述子匹配 计算距离 保留最近距离的匹配///////////////////
                    const cv::Mat dMP = pMP->GetDescriptor();// 上一帧　地图点描述子   // Best descriptor to fast matching

                    int bestDist = 256;
                    int bestIdx2 = -1;

                    for(vector<size_t>::const_iterator vit=vIndices2.begin(), vend=vIndices2.end(); vit!=vend; vit++)
                    {
                        const size_t i2 = *vit;
                        if(CurrentFrame.mvpMapPoints[i2])
                            if(CurrentFrame.mvpMapPoints[i2]->Observations()>0)
                            {
                                continue;
                            }

                        if(CurrentFrame.mvuRight[i2]>0) // 双目和rgbd的情况，需要保证右图的点也在搜索半径以内
                        {
                            const float ur = u - CurrentFrame.mbf*invzc;//匹配点 右图的横坐标
                            const float er = fabs(ur - CurrentFrame.mvuRight[i2]);// 误差
                            if(er>radius)
                                continue;
                        }
                        //cout<<"~~~"<<yolo_detection[1][6]<<"~~~"<<endl;
                        const cv::Mat &d = CurrentFrame.mDescriptors.row(i2);// 当前帧 关键点描述子 ORB descriptor, each row associated to a keypoint.
                        const int dist = DescriptorDistance(dMP,d);// 描述子匹配距离
                        if(dist<bestDist)
                        {
                            bestDist=dist;
                            bestIdx2=i2;// 最短距离对应的当前帧关键点下标
                        }
                    }
                    if(bestDist<=TH_HIGH)// 最短距离小于 <100
                    {
                        for (int i = 0; i < CurrentFrame.objects.size(); ++i) {
                            if (CurrentFrame.mvKeysUn[bestIdx2].pt.x > CurrentFrame.objects[i]->left &&
                                    CurrentFrame.mvKeysUn[bestIdx2].pt.x < CurrentFrame.objects[i]->right &&
                                    CurrentFrame.mvKeysUn[bestIdx2].pt.y > CurrentFrame.objects[i]->top &&
                                    CurrentFrame.mvKeysUn[bestIdx2].pt.y < CurrentFrame.objects[i]->bottom)
                            {
                                float z = CurrentFrame.mvDepth[bestIdx2];
                                if (z > 0)
                                {
                                    cv::Mat x3D = CurrentFrame.UnprojectStereo(bestIdx2);
                                    float depth = x3D.at<float>(2, 0);
                                    float middle_point = CurrentFrame.objects[i]->_Pos.at<float>(2, 0);
                                    //cout<<"~~~~~~~~~~~"<<abs(depth - middle_point)<<endl;
                                    if (abs(depth - middle_point) < 0.1) {
                                        CurrentFrame.mvKeysUn[bestIdx2].class_id = CurrentFrame.objects[i]->mnId;
                                        //cout<<"~~~~~~~~~~~~~~"<<CurrentFrame.objects[i]->mnId<<endl;
                                        break;
                                    }
                                }
                            }
                        }
                        //if(pMP->object_id!=-1&&CurrentFrame.mvKeysUn[bestIdx2].class_id!=-1)
                        //cout<<pMP->object_id<<"~~~"<<CurrentFrame.mvKeysUn[bestIdx2].class_id<<endl;

                        if(pMP->object_id!=CurrentFrame.mvKeysUn[bestIdx2].class_id&&pMP->object_id!=-1&&CurrentFrame.mvKeysUn[bestIdx2].class_id!=-1)
                        {
                            cv::Mat Mapppoint;
                            cv::Mat Keypoint;
                            bool False=false;
                            for (int i = 0; i <mpMap->objs_real.size(); ++i)
                            {
                                if(mpMap->objs_real[i]->mnId==pMP->object_id)
                                {
                                    Mapppoint=mpMap->objs_real[i]->_Pos;
                                }
                                if(mpMap->objs_real[i]->mnId==CurrentFrame.mvKeysUn[bestIdx2].class_id)
                                {
                                    Keypoint=mpMap->objs_real[i]->_Pos;
                                }
                            }
                            if(!Mapppoint.empty()&&!Keypoint.empty())
                            {
                                double x=Mapppoint.at<float>(0,0)-Keypoint.at<float>(0,0);
                                double y=Mapppoint.at<float>(1,0)-Keypoint.at<float>(1,0);
                                double z=Mapppoint.at<float>(2,0)-Keypoint.at<float>(2,0);
                                if(sqrt(pow(x,2)+pow(y,2)+pow(z,2))>0.6)
                                {
                                    //cout<<pMP->object_id<<"~~~~~"<<CurrentFrame.mvKeysUn[bestIdx2].class_id<<endl;
                                    False=true;
                                }
                            }
                            if(False)
                            {
                                continue;
                            }
                        }
                        //zss5++;//////////////////////////////////匹配总数//////////////////////zss5=nmatches+zss10////////////////////
                        CurrentFrame.mvpMapPoints[bestIdx2]=pMP;// 为当前帧关键点匹配上一帧的地图点
                        nmatches++;
                        if(mbCheckOrientation)
                        {
                            float rot = LastFrame.mvKeysUn[i].angle-CurrentFrame.mvKeysUn[bestIdx2].angle;
                            // 匹配点 观测方向差
                            if(rot<0.0)
                                rot+=360.0f;
                            int bin = round(rot*factor);
                            if(bin==HISTO_LENGTH)
                                bin=0;
                            assert(bin>=0 && bin<HISTO_LENGTH);
                            rotHist[bin].push_back(bestIdx2);//统计到对应的 方向直方图上
                        }
                    }
                }
            }
        }
        //角度直方图是用来剔除不满足两帧之间角度旋转的外点的，也就是所谓的旋转一致性检测
////////////////// 步骤6：根据方向差一致性约束 剔除误匹配的点//////////////////////////
        //Apply rotation consistency
        if(mbCheckOrientation)
        {
            int ind1=-1;
            int ind2=-1;
            int ind3=-1;
            // 统计直方图最高的三个bin保留，其他范围内的匹配点剔除。
            // 另外，若最高的比第二高的高10倍以上，则只保留最高的bin中的匹配点。
            // 若最高的比第 三高的高10倍以上，则 保留最高的和第二高bin中的匹配点。
            ComputeThreeMaxima(rotHist,HISTO_LENGTH,ind1,ind2,ind3);

            for(int i=0; i<HISTO_LENGTH; i++)
            {// 对可能的一致的方向就不予考虑
                if(i!=ind1 && i!=ind2 && i!=ind3)
                {
                    // 对剩下方向不一致的匹配进行剔除
                    for(size_t j=0, jend=rotHist[i].size(); j<jend; j++)
                    {
                        //zss10++;
                        CurrentFrame.mvpMapPoints[rotHist[i][j]]=static_cast<MapPoint*>(NULL);
                        nmatches--;
                    }
                }
            }
        }
        return nmatches;
    }

    int ORBmatcher::SearchByProjection(Frame &CurrentFrame, KeyFrame *pKF, const set<MapPoint*> &sAlreadyFound, const float th , const int ORBdist)//重定位
    {
        int nmatches = 0;

        const cv::Mat Rcw = CurrentFrame.mTcw.rowRange(0,3).colRange(0,3);
        const cv::Mat tcw = CurrentFrame.mTcw.rowRange(0,3).col(3);
        const cv::Mat Ow = -Rcw.t()*tcw;

        // Rotation Histogram (to check rotation consistency)
        vector<int> rotHist[HISTO_LENGTH];
        for(int i=0;i<HISTO_LENGTH;i++)
            rotHist[i].reserve(500);
        const float factor = 1.0f/HISTO_LENGTH;

        const vector<MapPoint*> vpMPs = pKF->GetMapPointMatches();

        for(size_t i=0, iend=vpMPs.size(); i<iend; i++)
        {
            MapPoint* pMP = vpMPs[i];

            if(pMP)
            {
                if(!pMP->isBad() && !sAlreadyFound.count(pMP))
                {
                    //Project
                    cv::Mat x3Dw = pMP->GetWorldPos();
                    cv::Mat x3Dc = Rcw*x3Dw+tcw;

                    const float xc = x3Dc.at<float>(0);
                    const float yc = x3Dc.at<float>(1);
                    const float invzc = 1.0/x3Dc.at<float>(2);

                    const float u = CurrentFrame.fx*xc*invzc+CurrentFrame.cx;
                    const float v = CurrentFrame.fy*yc*invzc+CurrentFrame.cy;

                    if(u<CurrentFrame.mnMinX || u>CurrentFrame.mnMaxX)
                        continue;
                    if(v<CurrentFrame.mnMinY || v>CurrentFrame.mnMaxY)
                        continue;

                    // Compute predicted scale level
                    cv::Mat PO = x3Dw-Ow;
                    float dist3D = cv::norm(PO);

                    const float maxDistance = pMP->GetMaxDistanceInvariance();
                    const float minDistance = pMP->GetMinDistanceInvariance();

                    // Depth must be inside the scale pyramid of the image
                    if(dist3D<minDistance || dist3D>maxDistance)
                        continue;

                    int nPredictedLevel = pMP->PredictScale(dist3D,&CurrentFrame);

                    // Search in a window
                    const float radius = th*CurrentFrame.mvScaleFactors[nPredictedLevel];

                    const vector<size_t> vIndices2 = CurrentFrame.GetFeaturesInArea(u, v, radius, nPredictedLevel-1, nPredictedLevel+1);

                    if(vIndices2.empty())
                        continue;

                    const cv::Mat dMP = pMP->GetDescriptor();

                    int bestDist = 256;
                    int bestIdx2 = -1;

                    for(vector<size_t>::const_iterator vit=vIndices2.begin(); vit!=vIndices2.end(); vit++)
                    {
                        const size_t i2 = *vit;
                        if(CurrentFrame.mvpMapPoints[i2])
                            continue;

                        const cv::Mat &d = CurrentFrame.mDescriptors.row(i2);

                        const int dist = DescriptorDistance(dMP,d);

                        if(dist<bestDist)
                        {
                            bestDist=dist;
                            bestIdx2=i2;
                        }
                    }

                    if(bestDist<=ORBdist)
                    {
                        CurrentFrame.mvpMapPoints[bestIdx2]=pMP;
                        nmatches++;

                        if(mbCheckOrientation)
                        {
                            float rot = pKF->mvKeysUn[i].angle-CurrentFrame.mvKeysUn[bestIdx2].angle;
                            if(rot<0.0)
                                rot+=360.0f;
                            int bin = round(rot*factor);
                            if(bin==HISTO_LENGTH)
                                bin=0;
                            assert(bin>=0 && bin<HISTO_LENGTH);
                            rotHist[bin].push_back(bestIdx2);
                        }
                    }

                }
            }
        }

        if(mbCheckOrientation)
        {
            int ind1=-1;
            int ind2=-1;
            int ind3=-1;

            ComputeThreeMaxima(rotHist,HISTO_LENGTH,ind1,ind2,ind3);

            for(int i=0; i<HISTO_LENGTH; i++)
            {
                if(i!=ind1 && i!=ind2 && i!=ind3)
                {
                    for(size_t j=0, jend=rotHist[i].size(); j<jend; j++)
                    {
                        CurrentFrame.mvpMapPoints[rotHist[i][j]]=NULL;
                        nmatches--;
                    }
                }
            }
        }

        return nmatches;
    }

    void ORBmatcher::ComputeThreeMaxima(vector<int>* histo, const int L, int &ind1, int &ind2, int &ind3)
    {
        int max1=0;
        int max2=0;
        int max3=0;

        for(int i=0; i<L; i++)
        {
            const int s = histo[i].size();
            if(s>max1)
            {
                max3=max2;
                max2=max1;
                max1=s;
                ind3=ind2;
                ind2=ind1;
                ind1=i;
            }
            else if(s>max2)
            {
                max3=max2;
                max2=s;
                ind3=ind2;
                ind2=i;
            }
            else if(s>max3)
            {
                max3=s;
                ind3=i;
            }
        }

        if(max2<0.1f*(float)max1)
        {
            ind2=-1;
            ind3=-1;
        }
        else if(max3<0.1f*(float)max1)
        {
            ind3=-1;
        }
    }

    int ORBmatcher::DescriptorDistance(const cv::Mat &a, const cv::Mat &b)
    {
        const int *pa = a.ptr<int32_t>();
        const int *pb = b.ptr<int32_t>();

        int dist=0;

        for(int i=0; i<8; i++, pa++, pb++)
        {
            unsigned  int v = *pa ^ *pb;
            v = v - ((v >> 1) & 0x55555555);
            v = (v & 0x33333333) + ((v >> 2) & 0x33333333);

            dist += (((v + (v >> 4)) & 0xF0F0F0F) * 0x1010101) >> 24;
        }

        return dist;
    }
}
