/**
* This file is part of MultiCol-SLAM
*
* Copyright (C) 2015-2016 Steffen Urban <urbste at googlemail.com>
* For more information see <https://github.com/urbste/MultiCol-SLAM>
*
* MultiCol-SLAM is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* MultiCol-SLAM is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with MultiCol-SLAM . If not, see <http://www.gnu.org/licenses/>.
*/

/*
* MultiCol-SLAM is based on ORB-SLAM2 which was also released under GPLv3
* For more information see <https://github.com/raulmur/ORB_SLAM2>
* Ra�l Mur-Artal <raulmur at unizar dot es> (University of Zaragoza)
*/

#include <opencv2/opencv.hpp>


#include "cTracking.h"


#include "cORBmatcher.h"
#include "cMultiFramePublisher.h"
#include "cConverter.h"
#include "cMap.h"
#include "cMultiInitializer.h"
#include "cOptimizer.h"
#include "cSim3Solver.h"
// #include "cam_system_omni.h"
#include "direct_method.h"

#include "misc.h"
#ifdef _WIN32
#include <conio.h>
#endif
#include <memory>

#include <mysql/mysql.h>
#include <sstream>

namespace MultiColSLAM
{
using namespace std;

/**
 * @brief cTracking 类构造函数
 *
 * 初始化 cTracking 类对象，设置相关成员变量的初始值，加载 SLAM 设置等。
 *
 * @param pSys cSystem 对象指针，用于系统相关操作
 * @param pVoc ORBVocabulary 对象指针，用于 ORB 词汇表的创建和管理
 * @param pFramePublisher cMultiFramePublisher 对象指针，用于帧的发布
 * @param pMapPublisher cMapPublisher 对象指针，用于地图的发布
 * @param pMap cMap 对象指针，用于地图的管理和操作
 * @param pKFDB cMultiKeyFrameDatabase 对象指针，用于关键帧数据库的管理
 * @param camSystem_ cMultiCamSys_ 类型的相机系统对象
 * @param settingsPath_ 设置文件路径
 */
cTracking::cTracking(
	cSystem* pSys,
	ORBVocabulary* pVoc,
	cMultiFramePublisher *pFramePublisher,
	cMapPublisher *pMapPublisher,
	cMap *pMap,
	cMultiKeyFrameDatabase* pKFDB,
	cMultiCamSys_ camSystem_,
	std::string settingsPath_) :
	mState(NO_IMAGES_YET),
	mpORBVocabulary(pVoc),
	mpSystem(pSys),
	mpFramePublisher(pFramePublisher),
	mpMapPublisher(pMapPublisher),
	mpKeyFrameDB(pKFDB),
	mpMap(pMap),
	mnLastRelocFrameId(0),
	mbPublisherStopped(false),
	mbReseting(false),
	mbForceRelocalisation(false),
	mbMotionModel(false),
	settingsPath(settingsPath_),
	camSystem(camSystem_),
	curBaseline2MKF(0.0),
	finished(false),
	grab(true),
	loopAndMapperSet(false)
{

	pFramePublisher->SetMCS(&camSystem); // 设置多帧发布器的相机系统
	mpMapPublisher->SetMCS(camSystem.Get_All_M_c()); // 设置地图发布器的多相机系统
	// 原始cam
	numberCameras = static_cast<int>(camSystem.Get_All_M_c().size()); // 获取相机数量
	// numberCameras = 2;
	// activeCameras = static_cast<std::vector<bool>>(GetCameraActive()); // 获取相机活跃状态
	// 初始化为0，用于存储相机上的点数，并将相机活跃状态初始化为true
	historyState.resize(numberCameras);
	for (int i = 0; i < numberCameras; i++) {
		pointsNumOfCam.push_back(0);
		activeCameras.push_back(true);
		for (int j = 0; j < historySize; j++) {
			historyState[i].push_back(1);
		}
	}

	/////////////
	// load slam settings
	/////////////
	cv::FileStorage slamSettings(settingsPath, cv::FileStorage::READ);
	double fps = slamSettings["Camera.fps"]; // 从设置文件中读取相机帧率
    if (fps==0)
        fps = 25;

	// 计算插入关键帧和检查重定位的最大/最小帧数
    // Max/Min Frames to insert keyframes and to check relocalisation
	mMinFrames = cvRound(fps / 3);
    mMaxFrames = cvRound(2*fps / 3);

    std::cout << "Camera Parameters: " << endl;
	std::cout << "- fps: " << fps << endl;

	// 从设置文件中读取RGB信息
	int nRGB = slamSettings["Camera.RGB"];
    mbRGB = nRGB;

    if(mbRGB)
		std::cout << "- color order: RGB (ignored if grayscale)" << endl;
    else
		std::cout << "- color order: BGR (ignored if grayscale)" << endl;

	// 加载ORB参数
    // Load ORB parameters
	int featDim = (int)slamSettings["extractor.descSize"];
	int nFeatures = (int)slamSettings["extractor.nFeatures"];
	float fScaleFactor = slamSettings["extractor.scaleFactor"];
	int nLevels = (int)slamSettings["extractor.nLevels"];
	int fastTh = (int)slamSettings["extractor.fastTh"];
	int Score = (int)slamSettings["extractor.nScoreType"];

	assert(Score == 1 || Score == 0);

	this->use_mdBRIEF = false;
	bool learnMasks = false;

	int usemd = (int)slamSettings["extractor.usemdBRIEF"];
	this->use_mdBRIEF = static_cast<bool>(usemd);
	int masksL = (int)slamSettings["extractor.masks"];
	learnMasks = static_cast<bool>(masksL);

	// 调整mdBRIEF提取器的容器大小，以适应多相机系统
	mp_mdBRIEF_extractorOct.resize(numberCameras);
	mp_mdBRIEF_init_extractorOct.resize(numberCameras);

	int useAgast = (int)slamSettings["extractor.useAgast"];
	int fastAgastType = (int)slamSettings["extractor.fastAgastType"];
	int descSize = (int)slamSettings["extractor.descSize"];

	assert(descSize == 16 || descSize == 32 || descSize == 64);

	std::cout << endl << "Extractor Parameters: " << endl;
	std::cout << "- Number of Features: " << nFeatures << endl;
	std::cout << "- Scale Levels: " << nLevels << endl;
	std::cout << "- Scale Factor: " << fScaleFactor << endl;
	std::cout << "- Fast Threshold: " << fastTh << endl;
	std::cout << "- Learn Masks: " << learnMasks << endl;
	std::cout << "- Descriptor Size (byte): " << descSize << endl;
	std::cout << "- Use AGAST: " << useAgast << endl;
	std::cout << "- FAST/AGAST Type: " << fastAgastType << endl;

	if (Score == 0)
		std::cout << "- Score: HARRIS" << endl;
	else
		std::cout << "- Score: FAST" << endl;

	// 为每个相机创建mdBRIEF提取器
	for (int c = 0; c < numberCameras; ++c)
	{
		mp_mdBRIEF_extractorOct[c] = new mdBRIEFextractorOct(nFeatures,
			fScaleFactor, nLevels, 25, 0, Score,
			32, fastTh, (bool)useAgast, fastAgastType,this->use_mdBRIEF, learnMasks, descSize);

		mp_mdBRIEF_init_extractorOct[c] = new mdBRIEFextractorOct(2 * nFeatures,
			fScaleFactor, nLevels, 25, 0, Score,
			32, 5, (bool)useAgast, fastAgastType, this->use_mdBRIEF, learnMasks, descSize);
	}

	
    // ORB extractor for initialization
    // Initialization uses only points from the finest scale level
	// 运动模型的初始化
	int nMotion = slamSettings["UseMotionModel"];
    mbMotionModel = nMotion;

	// 输出运动模型状态
    if(mbMotionModel)
    {
        mVelocity = cv::Matx44d::eye();
		std::cout << endl << "Motion Model: Enabled" << endl << endl;
    }
    else
		std::cout << endl << "Motion Model: Disabled (not recommended, change settings UseMotionModel: 1)" << endl << endl;

	//allPoses = std::vector<cv::Matx61d>(nrImages2Track);
	//allPosesBool = std::vector<bool>(nrImages2Track);
	//nrTrackedPts = std::vector<int>(nrImages2Track);
	//inlierRatio = std::vector<double>(nrImages2Track);
}

void cTracking::SetLocalMapper(cLocalMapping *pLocalMapper)
{
    mpLocalMapper = pLocalMapper;
}

void cTracking::SetLoopClosing(cLoopClosing *pLoopClosing)
{
    mpLoopClosing = pLoopClosing;
}

void cTracking::SetKeyFrameDatabase(cMultiKeyFrameDatabase *pKFDB)
{
    mpKeyFrameDB = pKFDB;
}

void cTracking::SetViewer(cViewer *pViewer)
{
	mpViewer = pViewer;
}

/**
 * @brief 获取摄像头数量
 *
 * 返回当前对象中的摄像头数量。
 *
 * @return 返回摄像头数量，类型为int。
 */
int cTracking::GetNrCams()
{
	// return 2;
	// 原始cam
	return this->numberCameras;
}
// 抓取图像集并进行处理
cv::Matx44d cTracking::GrabImageSet(const std::vector<cv::Mat>& imgSet,
	const double& timestamp, MultiColSLAM::cSystem &MultiSLAM)
{
	std::chrono::steady_clock::time_point t1 = std::chrono::steady_clock::now();
	std::chrono::system_clock Time;

	// 将图像存储到convertedImages中
	std::vector<cv::Mat> convertedImages(imgSet.size());
	convertedImages = imgSet;

	// 根据跟踪器的状态来决定当前帧的处理方法
	if (mState == WORKING || mState == LOST)
		mCurrentFrame = cMultiFrame(convertedImages,
		timestamp, mp_mdBRIEF_extractorOct, mpORBVocabulary, 
		camSystem, imgCounter - 1); // 如果跟踪器处于工作状态或丢失状态，使用mp_mdBRIEF_extractorOct进行特征提取
	else
		mCurrentFrame = cMultiFrame(convertedImages,
		timestamp, mp_mdBRIEF_init_extractorOct, mpORBVocabulary, 
		camSystem, imgCounter - 1);

	// 如果loopAndMapperSet为false，为LocalMapper和LoopClosing设置匹配器属性，并将loopAndMapperSet设为true
	if (!loopAndMapperSet)
	{
		mpLocalMapper->SetMatcherProperties(mCurrentFrame.DescDims(),
			mCurrentFrame.HavingMasks());
		mpLoopClosing->SetMatcherProperties(mCurrentFrame.DescDims(),
			mCurrentFrame.HavingMasks());
		loopAndMapperSet = true;
	}

	std::chrono::steady_clock::time_point t2 = std::chrono::steady_clock::now();
	double ttrack = std::chrono::duration_cast<std::chrono::duration<double> >(t2 - t1).count();
	std::cout << "数据处理时间: " << ttrack << "ms"  <<  std::endl;

	// 跟踪 
	Track(MultiSLAM); 
	// 返回当前帧的位姿
	return mCurrentFrame.GetPose();
}

// 设置某个相机的活跃状态
void cTracking::SetCameraActive(int c, bool active) {
	if (c >= 0 && c < numberCameras) {
		activeCameras[c] = active;
	}
}

// 获取某个相机的活跃状态
bool cTracking::IsCameraActive(int c) {
	if (c >= 0 && c < numberCameras) {
		return activeCameras[c];
	}
	return false; // 如果索引无效，返回false
}

// 获取所有相机的活跃状态
std::vector<bool> cTracking::GetCameraActive() {
	return activeCameras;
}

void saveTrajectoryToDatabase(const std::string filePath, std::tm timestamp);
	

void saveTrajectoryToDatabase(const std::string filePath, std::tm timestamp) {
    
	const string host = "localhost";
	const string user = "root";
	const string password = "lsr666966";
	const string db = "slam";
	const unsigned int port = 3306;

	MYSQL* conn = mysql_init(nullptr);
	if (!mysql_real_connect(conn, host.c_str(), user.c_str(), password.c_str(), db.c_str(), port, NULL, 0)) {
		std::cerr << "连接数据库失败" << std::endl;
		return;
	}

	std::ostringstream oss;
	oss << std::put_time(&timestamp, "%Y-%m-%d %H:%M:%S");
	string gTime = oss.str();
	string sql = "INSERT INTO trajInfo (name, gTime) VALUES ('" + filePath + "', '" + gTime + "')";
	if (mysql_query(conn, sql.c_str())) {
		std::cerr << "插入数据失败: " << mysql_error(conn) << std::endl;
	}

}

cv::Matx44d cTracking::AdaptiveWeightedFusion(const cv::Matx44d& pose_feature, 
	const cv::Matx44d& pose_direct,
	int num_matches,
	int total_features,
	double direct_residual) {

	// 定义直接法残差和特征点匹配率的阈值
    // const double residual_threshold = 40000.0;   // 直接法残差阈值
    // const double match_ratio_threshold = 50; // 特征点匹配率阈值
	const double residual_threshold = 40000.0;   // 直接法残差阈值
    const double match_ratio_threshold = 50; // 特征点匹配率阈值

	cout << "direct_residual: " << direct_residual << ", num_matches: " << num_matches << ", total_features: " << total_features << endl;

    // 计算特征点匹配率
    double match_ratio = static_cast<double>(num_matches) / total_features;

    // 动态计算权重
    if (direct_residual < residual_threshold && num_matches < match_ratio_threshold) {
        w_d = 0.2;
		// w_d = 1;
    } else if (direct_residual >= residual_threshold && num_matches >= match_ratio_threshold) {
        w_d = 0.0;
    } else if (direct_residual < residual_threshold) {
        w_d += 0.01;
		// w_d += 0.1;
    } else {
        w_d = 0.0;
    }
	w_f = 1.0 - w_d;

	cout << "w_d: " << w_d << ", w_f: " << w_f << endl;

    // 进行加权融合
    cv::Matx44d pose_fused = w_d * pose_direct + w_f * pose_feature;
    // return pose_fused; // 返回融合后的位姿
	return pose_feature; // 返回特征法位姿

}

bool cTracking::Track(MultiColSLAM::cSystem &MultiSLAM)
{
    // Depending on the state of the Tracker we perform different tasks
    if (mState == NO_IMAGES_YET)
        mState = NOT_INITIALIZED;

    mLastProcessedState = mState;

    if (mState == NOT_INITIALIZED)
    {
        FirstInitialization();
    }
    else if (mState == INITIALIZING)
    {
        Initialize();
    }
    else
    {
        // System is initialized. Track Frame.
		// 系统已经初始化，开始跟踪
		bool bOK = false;
		//if (_kbhit())
		//{
		//	int ch;
		//	ch = _getch();
		//	if (ch == 114)
		//	{
		//		ForceRelocalisation();
		//	}
		//}

		// //////////////////////////////////////////////////////////
		// 多相机切换策略
		// 1. 在跟踪之前，检查当前帧中每个相机的特征点数量
		for (size_t i = 0; i < numberCameras; ++i) {
			pointsNumOfCam[i] = 0;
		}
		// // 遍历当前帧的地图点
		for (size_t i = 0; i < mCurrentFrame.mvpMapPoints.size(); ++i) {
			if (!mCurrentFrame.mvbOutlier[i]) {
				int idxC = mCurrentFrame.keypoint_to_cam.find(i)->second; // 获取相机索引
				++pointsNumOfCam[idxC];
			}
		}

		// // 2. 遍历相机，根据特征点个数设置相机的活跃状态
		// // todo: 特征点个数的判断条件是否合理？
		// // 在初始化的代码中，有个设置bestCam的部分，看看能否拿来用？

		// for (int c = 0; c < numberCameras; ++c) {
		// 	cout << "pointsNumOfCam[" << c << "]：" << pointsNumOfCam[c] << endl;
		// 	if (pointsNumOfCam[c] < 404)
		// 		SetCameraActive(c, false);
		// 	else
		// 		SetCameraActive(c, true);
		// }

		// ////////////////////////////////////////////////////////////////////

        // Initial Camera Pose Estimation from Previous Frame (Motion Model or Coarse) or Relocalisation
        if (mState == WORKING && !RelocalisationRequested())
        {
			// 初始相机位姿估计，从前一帧（运动模型或粗略）或重定位
			if (!mbMotionModel || 
				 mpMap->KeyFramesInMap() < 2  ||
				 mCurrentFrame.mnId < mnLastRelocFrameId + 2)
			{
				cout << "从前一帧1..." << endl;
				bOK = TrackPreviousFrame();
			}
            else
            {
				cout << "运动模型..." << endl;
				bOK = TrackWithMotionModel();
				if (!bOK) {
					cout << "从前一帧2..." << endl;
					bOK = TrackPreviousFrame();		
				}
            }
        }
        else
        {
            bOK = Relocalisation();
        }
		// If we have an initial estimation of the camera pose and matching. Track the local map.
		// 如果有相机位姿和匹配的初始估计，跟踪局部地图
		if (bOK)
		{
			cout << "跟踪局部地图..." << endl;
			bOK = TrackLocalMap();
		}

        // If tracking were good, check if we insert a keyframe
		// 如果跟踪状况良好，检查是否插入一个关键帧
        if (bOK)
        {
			// 设置当前帧位姿
            mpMapPublisher->SetCurrentCameraPose(mCurrentFrame.GetPose());
			// count tracked points in each cam
			//CountNumberTrackedPointsPerCam();
			// 如果需要新的关键帧，插入新的关键帧
			if (NeedNewKeyFrame())
				CreateNewKeyFrame();
            // We allow points with high innovation (considererd outliers by the Huber Function)
            // pass to the new keyframe, so that bundle adjustment will finally decide
            // if they are outliers or not. We don't want next frame to estimate its position
            // with those points so we discard them in the frame.
			/**
			 * 这段代码是用于处理当前帧中的异常点（outliers）的。
			 * 异常点通常是指在特征匹配或跟踪过程中，由于各种原因（如遮挡、运动模糊等）
			 * 导致无法正确匹配到地图点（MapPoints）的特征点。
			*/
			// 遍历当前帧中的所有特征点
            for(size_t i=0; i < mCurrentFrame.mvbOutlier.size();i++)
            {
				// 检查第i个特征点是否被标记为异常点，并且是否关联了地图点
                if(mCurrentFrame.mvpMapPoints[i] && mCurrentFrame.mvbOutlier[i])
                    mCurrentFrame.mvpMapPoints[i] = NULL; // 如果第i个特征点被标记为异常点，则将其关联的地图点置为NULL，即断开与地图点的关联
            }
        }

        if(bOK)
            mState = WORKING;
        else
            mState = LOST;

		cout << "mState：" << mState << endl;
        // Reset if the camera get lost soon after initialization
		// 如果初始化之后很快跟丢
        if (mState == LOST)
        {	
			cout << "LOST..." << endl;
			// Save camera trajectory
			auto now = std::chrono::system_clock::now();
			auto time_t_now = std::chrono::system_clock::to_time_t(now);
			std::tm tm_now = *std::localtime(&time_t_now);

			// 使用字符串流格式化时间为 "YYYYMMDDHHMMSS"
			std::ostringstream oss;
			oss << std::put_time(&tm_now, "%Y%m%d%H%M%S");
			std::string timestamp = oss.str();

			std::string directory = "/home/zc/slam/code/my-proj/slam/result/";
			std::string fileName = "traj" + timestamp + ".txt";
			std::string fullPath = directory + fileName;
			MultiSLAM.SaveMKFTrajectoryLAFIDA(fullPath);
			// 将轨迹信息保存到数据库
			saveTrajectoryToDatabase(fileName, tm_now);

			// 修改（新增）
			// 尝试使用可用相机进行跟踪
			for (int c = 0; c < numberCameras; ++c) {
				if (IsCameraActive(c)) {
					// 使用活跃相机进行跟踪
					if (TrackLocalMap()) {
						mState = WORKING; // 成功跟踪，恢复状态
						return true;
					}
				}
			}

            if (mpMap->KeyFramesInMap() < 2) // 在地图中关键帧数量<=3个
            {
				cout << "在地图中关键帧数量小于2个...进入重置.." <<endl;
                Reset();
                return true;
            }
        }

        // Update motion model
		// 使用运动模型
        if (mbMotionModel)
        {
			if (bOK)
            {
				// 计算当前帧的位姿与上一帧位姿逆的乘积，得到从上一帧到当前帧的变换，即相机的运动速度
				cv::Matx44d LastTwc = cConverter::invMat(mLastFrame.GetPose());
				//cv::Matx44d current = mCurrentFrame.GetPose().inv();
				//mVelocity = current * LastTwc;
				mVelocity = LastTwc*mCurrentFrame.GetPose(); // 原始
				// mVelocity = mCurrentFrame.GetPose() * LastTwc;
            }
            else
                mVelocity = cv::Matx44d::eye();
        }

        mLastFrame = cMultiFrame(mCurrentFrame);
     }       

    // Update drawer
    mpFramePublisher->Update(this);
	return true;
}

void cTracking::CountNumberTrackedPointsPerCam()
{
	int nrCams = camSystem.GetNrCams();
	// 初始化每个相机的跟踪点数向量
	nbTrackedPtsInCam = std::vector<int>(nrCams);
	// 初始化跟踪点比例向量
	nbTrackedRatios = std::vector<double>();
	// 初始化每个相机的跟踪点数为0
	for (int c = 0; c < nrCams; ++c)
		nbTrackedPtsInCam[c] = 0;

	// 遍历当前帧的地图点
	for (size_t i = 0; i < mCurrentFrame.mvpMapPoints.size(); ++i)
		// 如果当前地图点存在
		if (mCurrentFrame.mvpMapPoints[i])
			// 增加对应相机的跟踪点数
			++nbTrackedPtsInCam[mCurrentFrame.keypoint_to_cam.find(i)->second];

	// 计算比例
	// calc ratios
	for (int c1 = 0; c1 < nrCams; ++c1)
	{
		for (int c2 = c1 + 1; c2 < nrCams; ++c2)
		{
			if ((double)nbTrackedPtsInCam[c2] > 0)
				// 将c1相机与c2相机的跟踪点数比例加入向量
				nbTrackedRatios.push_back((double)
					nbTrackedPtsInCam[c1] / (double)nbTrackedPtsInCam[c2]);
			if ((double)nbTrackedPtsInCam[c1] > 0)
				// 将c2相机与c1相机的跟踪点数比例加入向量
				nbTrackedRatios.push_back((double)
					nbTrackedPtsInCam[c2] / (double)nbTrackedPtsInCam[c1]);
		}
	}
}

void cTracking::FirstInitialization()
{
    //We ensure a minimum ORB features to continue, otherwise discard frame
    if (mCurrentFrame.mvKeys.size() > 100)
    {
		fill(mvIniMatches.begin(), mvIniMatches.end(), -1);
		mInitialFrame = cMultiFrame(mCurrentFrame);
		mLastFrame = cMultiFrame(mCurrentFrame);
        mvbPrevMatched.resize(mCurrentFrame.mvKeys.size());
		for (size_t i = 0; i < mCurrentFrame.mvKeys.size(); ++i)
            mvbPrevMatched[i] = 
				cv::Vec2d(mCurrentFrame.mvKeys[i].pt.x, mCurrentFrame.mvKeys[i].pt.y);

		// 创建多相机初始化实例对象
		mpInitializer = new cMultiInitializer(mCurrentFrame, 1.0, 200);
		mState = INITIALIZING;   
    }
}

void cTracking::Initialize()
{	
	 // 检查当前帧是否有足够的关键点，否则重置初始化过程
    // Check if current frame has enough keypoints, otherwise reset initialization process

	if (mCurrentFrame.mvKeys.size() <= 50) // 原始
	// if (mCurrentFrame.mvKeys.size() <= 80)
    {
		// 将 mvIniMatches 数组中的所有元素填充为 -1
        fill(mvIniMatches.begin(),mvIniMatches.end(),-1);
		// 设置状态为未初始化
        mState = NOT_INITIALIZED;
		// 提前结束函数执行
        return;
    }    

	// 寻找匹配点对
    // Find correspondences
	cORBmatcher matcher(0.9, checkOrientation, mCurrentFrame.DescDims(), mCurrentFrame.HavingMasks());
    // 在初始帧和当前帧之间搜索匹配点对，并返回匹配点对数量
	int nmatches = matcher.SearchForInitialization(mInitialFrame,
		mCurrentFrame,
		mvbPrevMatched,
		mvIniMatches,50);

	// 检查是否有足够的匹配点对
    // Check if there are enough correspondences
    // if (nmatches < 100)  // 原始
	if (nmatches < 80)
    {
        mState = NOT_INITIALIZED;
        return;
    }  

    cv::Matx33d Rcw; // Current Camera Rotation  当前相机旋转矩阵
    cv::Vec3d tcw; // Current Camera Translation  当前相机平移向量
    vector<bool> vbTriangulated; // Triangulated Correspondences (mvIniMatches) 三角化后的匹配点对（mvIniMatches）
	int leadingCam = 0;
	// 调用初始化器进行初始化，并返回初始化结果
	if (mpInitializer->
		Initialize(mCurrentFrame, mvIniMatches, Rcw, tcw, mvIniP3D, vbTriangulated, leadingCam))
    {
		// 遍历 mvIniMatches 数组
		for (size_t i = 0, iend = mvIniMatches.size(); i < iend; ++i)
        {
			// 如果当前匹配点对有效且未被三角化
            if (mvIniMatches[i] >= 0 && !vbTriangulated[i])
            {
				// 将当前匹配点对设置为无效
                mvIniMatches[i] = -1;
				--nmatches;
            }           
        }
		// 创建初始地图
		CreateInitialMap(Rcw, tcw, leadingCam);
    }

}

/**
 * 用于初始化地图，在初始帧和当前帧之间进行特征点匹配，创建关键帧，并进行初步的全局优化。
 */
void cTracking::CreateInitialMap(cv::Matx33d &Rcw, cv::Vec3d &tcw, int leadingCam)
{
	// Set Frame Poses
	// we have to calculate the multi cam sys poses from the single camera poses
	// 设置帧的姿态
    // 从单个相机的位姿计算多相机系统的位姿
    // 获取初始帧 相机到世界坐标系的转换矩阵的逆
	cv::Matx44d invMc = mInitialFrame.camSystem.Get_M_t() *
		cConverter::invMat(mInitialFrame.camSystem.Get_M_c(leadingCam));
	cv::Matx44d Mc = mInitialFrame.camSystem.Get_M_c(leadingCam);
	//cv::Matx44d invMc = Mc.inv();
	mInitialFrame.SetPose(invMc);  // 设置初始帧的位姿
	cv::Matx44d invCurr = cConverter::Rt2Hom(Rcw, tcw)*invMc;
	mCurrentFrame.SetPose(invCurr); // inverse!

	// Create KeyFrames
	// 创建关键帧
	cMultiKeyFrame* pKFini = new cMultiKeyFrame(mInitialFrame, mpMap, mpKeyFrameDB);
	pKFini->imageId = mInitialFrame.GetImgCnt();
	cMultiKeyFrame* pKFcur = new cMultiKeyFrame(mCurrentFrame, mpMap, mpKeyFrameDB);
	pKFcur->imageId = mCurrentFrame.GetImgCnt();

	pKFini->ComputeBoW();  // 计算初始关键帧的词袋向量
	pKFcur->ComputeBoW();  // 计算当前关键帧的词袋向量

	// Insert KFs in the map
	// 将关键帧插入地图
	mpMap->AddKeyFrame(pKFini);
	mpMap->AddKeyFrame(pKFcur);

    // Create MapPoints and asscoiate to keyframes
	// 创建地图点并关联到关键帧
	for (size_t i = 0; i < mvIniMatches.size(); ++i)
    {
        if (mvIniMatches[i] < 0)
            continue;

        //Create MapPoint.
		// 创建地图点，设置世界坐标位置
        cv::Vec3d worldPos(mvIniP3D[i]);

        cMapPoint* pMP = new cMapPoint(worldPos,pKFcur,mpMap);
		// assign mappoint to keyframes
		// 将地图点关联到初始关键帧和当前关键帧
        pKFini->AddMapPoint(pMP,i);
        pKFcur->AddMapPoint(pMP,mvIniMatches[i]);
		// add observation to mappoints
		// 添加观测信息到地图点
        pMP->AddObservation(pKFini,i);
        pMP->AddObservation(pKFcur,mvIniMatches[i]);
		// compute some statistics about the mappoint
		// 计算地图点的描述符等统计信息
		pMP->ComputeDistinctiveDescriptors(pKFcur->HavingMasks());
		cv::Mat desc = pMP->GetDescriptor();
		pMP->UpdateCurrentDescriptor(desc);

        pMP->UpdateNormalAndDepth();

        //Fill Current Frame structure
		// 在当前帧中填充地图点结构
        mCurrentFrame.mvpMapPoints[mvIniMatches[i]] = pMP;

        //Add to Map
		// 将地图点添加到地图中
        mpMap->AddMapPoint(pMP);

    }

    // Update Connections
	// 更新关键帧之间的连接
    pKFini->UpdateConnections();
    pKFcur->UpdateConnections();

	// 检查当前关键帧是否追踪到足够多的地图点
    if(pKFcur->TrackedMapPoints() < 15)
    {
        Reset();
        return;
    }

	vector<cMapPoint*> vpAllMapPoints1 = pKFini->GetMapPointMatches();
	vector<cMapPoint*> vpAllMapPoints2 = pKFcur->GetMapPointMatches();

	// 进行全局优化
	cOptimizer::GlobalBundleAdjustment(mpMap, true);

	cORBmatcher tempMatcher(0.8, checkOrientation, 
		pKFcur->DescDims(), pKFcur->HavingMasks());
	vector<double> scales;
	vector<cv::Vec3d> ptsBefore;
	vector<cv::Vec3d> ptsAfter;
	for (int c = 0; c < pKFcur->camSystem.GetNrCams(); ++c)
	{
		// skip the same cam
		// we just want to search in all other cams
		// 跳过主相机，只在其他相机中进行搜索匹配
		if (c == leadingCam)
			continue;

		// 计算相对姿态（从主相机到其他相机）
		cv::Matx44d relOri = Mc * cConverter::invMat(mInitialFrame.camSystem.Get_M_c(c));
		cv::Matx33d Rrel = cConverter::Hom2R(relOri);
		cv::Vec3d trel = cConverter::Hom2T(relOri);
		// essential matrix for epipolar test
		// 计算本质矩阵用于极线约束
		cv::Matx33d E12 = ComputeE(relOri);

		// loop through map points
		// 遍历所有地图点，将地图点投影到其他相机中
		for (size_t iMP = 0; iMP < vpAllMapPoints1.size(); ++iMP)
		{
			cMapPoint* pMP = vpAllMapPoints1[iMP];

			cv::KeyPoint ptMatch1 = pKFini->GetKeyPoint(iMP);

			if (!vpAllMapPoints1[iMP])
				continue;

			cv::Vec2d uv;
			cv::Vec3d wp = pMP->GetWorldPos();
			cv::Vec4d wp4 = cConverter::toVec4d(wp);
			// 将地图点投影到其他相机中
			pKFini->camSystem.WorldToCamHom_fast(c, wp4, uv);
			// test if the point even projects into the mirror mask
			if (!pKFini->camSystem.GetCamModelObj(c).isPointInMirrorMask(uv(0), uv(1), 0))
				continue;

			// if yes, then get all the features in that area
			// 如果投影在遮罩中，在该区域内获取所有特征点
			vector<size_t> vIndices =
				pKFini->GetFeaturesInArea(c, uv(0), uv(1), 50);
			if (vIndices.empty())
				continue;

			// get descriptor of point in the leading cam
			int idxDescLast = pKFini->cont_idx_to_local_cam_idx.find(iMP)->second;
			//cv::Mat descMP1 = pKFini->GetDescriptor(leadingCam, idxDescLast);
			const uint64_t* descMP = pKFini->GetDescriptorRowPtr(leadingCam, idxDescLast);
			const uint64_t* descMP_mask = 0;
			if (pKFini->HavingMasks())
				descMP_mask = pKFini->GetDescriptorMaskRowPtr(leadingCam, idxDescLast);

			// match to descriptors in area
			int bestDist = INT_MAX;
			int bestIdx2 = -1;
			// match the descriptor of the current image point to all points in the area
			for (vector<size_t>::iterator vit = vIndices.begin(), vend = vIndices.end();
				vit != vend; ++vit)
			{
				size_t i2 = *vit;
				int idxDescCurr = pKFini->cont_idx_to_local_cam_idx.find(i2)->second;
				//cv::Mat d = pKFini->GetDescriptor(c, idxDescCurr);
				const uint64_t* d = pKFini->GetDescriptorRowPtr(c, idxDescCurr);

				int dist = 0;
				if (pKFini->HavingMasks())
				{
					const uint64_t* d_mask = pKFini->GetDescriptorRowPtr(c, idxDescCurr);
					dist = DescriptorDistance64Masked(descMP, d, descMP_mask, d_mask, pKFini->DescDims());
				}
				else dist = DescriptorDistance64(descMP, d, pKFini->DescDims());

				if (dist < bestDist)
				{
					bestDist = dist;
					bestIdx2 = i2;
				}
			}
			cv::KeyPoint ptMatch2 = pKFini->GetKeyPoint(bestIdx2);

			cv::Vec3d Xl1 = pKFini->GetKeyPointRay(iMP);
			cv::Vec3d Xl2 = pKFini->GetKeyPointRay(bestIdx2);
			if (bestDist <= tempMatcher.TH_HIGH_ && CheckDistEpipolarLine(Xl1, Xl2, E12, 1e-2))
			{
				cv::Vec3d wp = pMP->GetWorldPos();

				pMP->AddObservation(pKFini, bestIdx2);
				// because we can have multiple observations per mappoint,
				pKFini->AddMapPoint(pMP, bestIdx2);
				mInitialFrame.mvpMapPoints[bestIdx2] = pMP;
			}
		}
	}

	cOptimizer::GlobalBundleAdjustment(mpMap, false);

	vpAllMapPoints1 = pKFini->GetMapPointMatches();

	// test reprojection
	for (int c = 0; c < pKFcur->camSystem.GetNrCams(); ++c)
	{
		cv::Matx44d relOri = Mc * cConverter::invMat(mCurrentFrame.camSystem.Get_M_c(c));
		cv::Matx33d Rrel = cConverter::Hom2R(relOri);
		cv::Vec3d trel = cConverter::Hom2T(relOri);
		// essential matrix for epipolar test
		cv::Matx33d E12 = ComputeE(relOri);

		// loop through map points
		for (size_t iMP = 0; iMP < vpAllMapPoints2.size(); ++iMP)
		{
			cMapPoint* pMP = vpAllMapPoints2[iMP];

			if (!pMP)
				continue;

			cv::KeyPoint ptMatch1 = pKFcur->GetKeyPoint(iMP);
			cv::Vec3d pos = pMP->GetWorldPos();
			if (vpAllMapPoints2[iMP])
			{
				if (!(c == leadingCam) && IsCameraActive(c))
				{
					cv::Vec2d uv;
					cv::Vec3d wp = pMP->GetWorldPos();
					cv::Vec4d wp4 = cv::Vec4d(wp(0), wp(1), wp(2), 1.0);
					pKFcur->camSystem.WorldToCamHom_fast(c, wp4, uv);
					// test if the point even projects into the mirror mask
					if (!pKFcur->camSystem.GetCamModelObj(c).isPointInMirrorMask(uv(0), uv(1), 0))
						continue;

					// if yes, then get all the features in that area
					vector<size_t> vIndices =
						pKFcur->GetFeaturesInArea(c, uv(0), uv(1), 50);
					if (vIndices.empty())
						continue;
					// get descriptor of point in the leading cam
					int idxDescLast = pKFcur->cont_idx_to_local_cam_idx.find(iMP)->second;
					//cv::Mat descMP1 = pKFcur->GetDescriptor(leadingCam, idxDescLast);
					const uint64_t* descMP = pKFcur->GetDescriptorRowPtr(leadingCam, idxDescLast);
					const uint64_t* descMP_mask = 0;
					if (pKFcur->HavingMasks())
						descMP_mask = pKFcur->GetDescriptorMaskRowPtr(leadingCam, idxDescLast);

					// match to descriptors in area
					int bestDist = INT_MAX;
					int bestIdx2 = -1;
					// match the descriptor of the current image point to all points in the area
					for (vector<size_t>::iterator vit = vIndices.begin(), vend = vIndices.end();
						vit != vend; ++vit)
					{
						size_t i2 = *vit;
						int idxDescCurr = pKFcur->cont_idx_to_local_cam_idx.find(i2)->second;
						//cv::Mat d = pKFcur->GetDescriptor(c, idxDescCurr);
						const uint64_t* d = pKFcur->GetDescriptorRowPtr(c, idxDescCurr);
						int dist = 0;
						if (pKFcur->HavingMasks())
						{
							const uint64_t* d_mask = pKFcur->GetDescriptorMaskRowPtr(c, idxDescCurr);
							dist = DescriptorDistance64Masked(descMP, d, descMP_mask, d_mask, pKFcur->DescDims());
						}
						else
							dist = DescriptorDistance64(descMP, d, pKFcur->DescDims());

						if (dist < bestDist)
						{
							bestDist = dist;
							bestIdx2 = i2;
						}
					}
					cv::KeyPoint ptMatch2 = pKFcur->GetKeyPoint(bestIdx2);

					cv::Vec3d Xl1 = pKFcur->GetKeyPointRay(iMP);
					cv::Vec3d Xl2 = pKFcur->GetKeyPointRay(bestIdx2);
					bool epiDist = CheckDistEpipolarLine(Xl1, Xl2, E12, 1e-2);

					if (bestDist <= tempMatcher.TH_HIGH_ && epiDist)
					{
						// add observation
						pMP->AddObservation(pKFcur, bestIdx2);
						pKFcur->AddMapPoint(pMP, bestIdx2);
						mCurrentFrame.mvpMapPoints[bestIdx2] = pMP;
						pMP->ComputeDistinctiveDescriptors(pKFcur->HavingMasks());
					}
				}
			}

		}
	}

	cOptimizer::GlobalBundleAdjustment(mpMap, false, 5);

    mpLocalMapper->InsertMultiKeyFrame(pKFini);
	mpLocalMapper->InsertMultiKeyFrame(pKFcur);

	cv::Matx44d iniPose = pKFini->GetPose();
	mInitialFrame.SetPose(iniPose);
	cv::Matx44d curPose = pKFini->GetPose();
	mCurrentFrame.SetPose(curPose);
	mCurrentFrame.mvpMapPoints = pKFcur->GetMapPointMatches();

    mLastFrame = cMultiFrame(mCurrentFrame);
    mnLastKeyFrameId = mCurrentFrame.mnId;
    mpLastKeyFrame = pKFcur;

	// add local keyframes for the tracker
	// 为跟踪器添加局部关键帧
    mvpLocalKeyFrames.push_back(pKFcur);
    mvpLocalKeyFrames.push_back(pKFini);
    mvpLocalMapPoints = mpMap->GetAllMapPoints();
    mpReferenceKF = pKFcur;

    mpMap->SetReferenceMapPoints(mvpLocalMapPoints);

    mpMapPublisher->SetCurrentCameraPose(pKFcur->GetPose());

    mState = WORKING; // 设置跟踪器状态为工作中
}

/**
 * 在当前帧中跟踪上一帧的特征点，通过初步匹配、姿态优化和剔除外点来更新当前帧的特征点匹配关系。
 * 如果匹配的特征点数量足够多，则返回 true，否则返回 false。
 */
bool cTracking::TrackPreviousFrame()
{
	// 创建一个 ORB 特征匹配器对象
	cORBmatcher matcher(0.8, checkOrientation, 
		mCurrentFrame.DescDims(), mCurrentFrame.HavingMasks());
    vector<cMapPoint*> vpMapPointMatches;

    // Search first points at coarse scale levels to get a rough initial estimate
	// 在粗略的尺度级别上搜索初始匹配点，获得粗略的初始估计
    int minOctave = 0;
    int maxOctave = mCurrentFrame.mvScaleFactors.size() - 1;
    if (mpMap->KeyFramesInMap() > 5)
		minOctave = maxOctave / 2 + 1;

	// 设置当前帧的初始位姿为上一帧的位姿
	cv::Matx44d pose = mLastFrame.GetPose();
	mCurrentFrame.SetPose(pose);
	// 在初始尺度范围内进行窗口搜索，获得初步的特征点匹配
    int nmatches = matcher.WindowSearch(mLastFrame, mCurrentFrame,
		60, vpMapPointMatches, minOctave);

    // If not enough matches, search again without scale constraint
	// 如果匹配的特征点数量不够多，则再次在初始尺度范围内进行窗口搜索
    if (nmatches < 10)
    {
        nmatches = matcher.WindowSearch(mLastFrame,mCurrentFrame,50,vpMapPointMatches,0);
        if (nmatches < 10)
        {
            vpMapPointMatches = vector<cMapPoint*>
				(mCurrentFrame.mvpMapPoints.size(), static_cast<cMapPoint*>(NULL));
            nmatches = 0;
        }
    }
	// 将匹配结果保存到当前帧的地图点结构中
	mCurrentFrame.mvpMapPoints = vpMapPointMatches;
	// 位姿优化
	double inliers = 0.0;
	cOptimizer::PoseOptimization(&mCurrentFrame, inliers);

	// Discard outliers
	// 剔除异常点
	for (size_t i = 0; i < mCurrentFrame.mvbOutlier.size(); ++i)
	{
		if (mCurrentFrame.mvbOutlier[i])
		{
			mCurrentFrame.mvpMapPoints[i] = NULL;
			mCurrentFrame.mvbOutlier[i] = false;
			--nmatches;
		}
	}

	// 通过投影再次搜索匹配点
	nmatches +=
		matcher.SearchByProjection(mLastFrame, mCurrentFrame, 40, vpMapPointMatches);
	// 更新当前帧的地图点结构
    mCurrentFrame.mvpMapPoints = vpMapPointMatches;
	// 如果匹配点仍不足，返回 false
    if (nmatches < 10)
        return false;

   // Optimize pose again with all correspondences
   // 使用所有匹配点再次进行位姿优化
	cOptimizer::PoseOptimization(&mCurrentFrame, inliers);

    // Discard outliers
	// 剔除外点
	for (size_t i = 0; i < mCurrentFrame.mvbOutlier.size(); ++i)
	{
		if (mCurrentFrame.mvbOutlier[i])
		{
			mCurrentFrame.mvpMapPoints[i] = NULL;
			mCurrentFrame.mvbOutlier[i] = false;
			--nmatches;
		}
	}
    return nmatches >= 3; // 如果匹配点数量大于等于6，返回 true，否则返回 false
}

// 创建圆形掩码
cv::Mat createCircularMask(int width, int height, int radius) {
    cv::Mat mask = cv::Mat::zeros(height, width, CV_8U); // 黑色掩码
    cv::Point center(width / 2, height / 2);
    cv::circle(mask, center, radius, cv::Scalar(255), -1); // 画白色圆形
    return mask;
}

// 计算直方图，使用掩码
cv::Mat computeHistogramWithMask(const cv::Mat &image, const cv::Mat &mask) {
    std::vector<cv::Mat> bgr_planes;
    cv::split(image, bgr_planes); // 拆分通道

    int histSize = 256;  // 直方图分256级
    float range[] = {0, 256};
    const float *histRange = {range};
    bool uniform = true, accumulate = false;
    
    cv::Mat hist;
    cv::calcHist(&bgr_planes[0], 1, 0, mask, hist, 1, &histSize, &histRange, uniform, accumulate); // 只计算掩码内的像素
    return hist;
}

// 运动模型进行跟踪
bool cTracking::TrackWithMotionModel()
{
	std::chrono::steady_clock::time_point begin;
	std::chrono::steady_clock::time_point end;
	cORBmatcher matcher(0.8, checkOrientation, 
		mCurrentFrame.DescDims(), mCurrentFrame.HavingMasks());
    vector<cMapPoint*> vpMapPointMatches;

	// 计算位姿估计时间
	std::chrono::steady_clock::time_point t1 = std::chrono::steady_clock::now();

	// //////////////////////////////////////////////////////////////
	// 多相机切换策略计算图像直方图，判断是否切换相机

	// 计算每个相机的图像直方图，并根据曝光情况选择最佳相机
	std::cout << "开始计算图像直方图..." << std::endl;
	// 记录每个相机的曝光评价指标（假设存储在 vector<double> isExposed，分数越高说明图像质量越好  isExposed = 1: 曝光）
    std::vector<double> isExposed(GetNrCams(), 0.0);
	for (int camIdx = 0; camIdx < GetNrCams(); ++camIdx)
    {
        // 获取当前相机的图像（假设 mCurrentFrame.images 为 vector<cv::Mat>）
        cv::Mat img = mCurrentFrame.images[camIdx];
        if (img.empty())
        {
            std::cout << "相机 " << camIdx << " 图像为空！" << std::endl;
            continue;
        }

        // 如果图像为彩色，则转换为灰度图
        cv::Mat gray;
        if (img.channels() == 3)
            cv::cvtColor(img, gray, cv::COLOR_BGR2GRAY);
        else
            gray = img.clone();
		
		// 添加圆形掩码
		int radius = std::min(gray.cols, gray.rows) / 2; // 圆形掩码半径
		cv::Mat mask = createCircularMask(gray.cols, gray.rows, radius);
		cv::Mat hist = computeHistogramWithMask(gray, mask);

        // // 计算灰度直方图：256 个灰度级
        // cv::Mat hist;
        // int histSize = 256;
        // float range[] = { 0, 256 };
        // const float* histRange = { range };
        // cv::calcHist(&gray, 1, 0, cv::Mat(), hist, 1, &histSize, &histRange, true, false);

        // 对直方图进行归一化处理
        cv::normalize(hist, hist, 1, 0, cv::NORM_L1);

        // 根据直方图统计曝光情况：
        // 统计亮部像素（假定灰度值大于200）比例和暗部像素（灰度值小于50）比例
        double sumOverexposed = 0, sumUnderexposed = 0;
        for (int i = 200; i < 256; ++i)
            sumOverexposed += hist.at<float>(i);
        for (int i = 0; i < 50; ++i)
            sumUnderexposed += hist.at<float>(i);

        // 暴露评价：如果高亮或暗部比例过大，则认为该相机图像质量较差
        double exposureScore = 1.0 - std::max(sumOverexposed, sumUnderexposed); // 分数越低表示曝光问题越严重
        isExposed[camIdx] = exposureScore < 0.5 ? 1.0 : 0.0; // 1.0: 曝光问题严重，0.0: 曝光正常

        std::cout << "相机 " << camIdx << " 曝光评价分数: " << exposureScore << std::endl;
    }

    // 根据 exposureScores 决定切换策略：
    // int bestCamIdx = 0;
    // double bestScore = exposureScores[0];
    // for (size_t i = 1; i < exposureScores.size(); ++i)
    // {
    //     if (isExposured[i] == 1.0)
    //     {
	//         SetCameraActive(camIdx, false);
    //         bestScore = exposureScores[i];
    //         bestCamIdx = static_cast<int>(i);
    //     }
    // }
    // std::cout << "选择相机 " << bestCamIdx << " 作为主相机（曝光分数最高）" << std::endl;

	// 维护每个相机的历史状态
	for (int c = 0; c < GetNrCams(); c++) {
        int pointsNum = pointsNumOfCam[c]; // 特征点提取数量
        bool IsExposed = isExposed[c];
		int tempState = 0;

		std::cout << "Camera " << c << ": Points: " << pointsNum << ", Exposed: " << IsExposed << std::endl;

        // 判断临时状态
        if (pointsNum < featureThreshold && IsExposed) {
            tempState = 0;
        } else {
            tempState = 1;
        }

        // 维护历史状态
		cout << "size: " << historyState[c].size() << std::endl;
        if (historyState[c].size() >= historySize) {
            historyState[c].pop_front();
        }
        // historyState[c].push_back(tempState);

        // 计算历史可用次数
        int availableCount = std::count(historyState[c].begin(), historyState[c].end(), 1);
        if (availableCount >= historySize) {
            SetCameraActive(c, true); // 恢复可用状态
        } else if (availableCount < historySize && pointsNum < featureThreshold && IsExposed) {
            SetCameraActive(c, false); // 设置为不可用状态
        }

        // std::cout << "Camera " << c << " State: " << historyState[c].state << std::endl;
    }


	// //////////////////////////////////////////////////////////////




	// //////////////////////////////////////////////////////////////
	// 直接法计算位姿
	// let's randomly pick pixels in the first image and generate some 3d points in the first image's frame
    std::vector<Eigen::Isometry3d> T_direct; // 存储多个相机各自的位姿
	double direct_cost; // 直接法残差
	for (int i = 0; i < GetNrCams(); i++) {
		cv::RNG rng;
		int nPoints = 2000;
		int boarder = 20;
		VecVector2d pixels_ref;
		vector<double> depth_ref;
		
		// 生成视差图，并生成深度信息
		// 创建StereoBM对象
		cv::Ptr<cv::StereoBM> stereoBM = cv::StereoBM::create(16, 9); // 参数为视差范围和窗口大小
		stereoBM->setBlockSize(15); // 设置块大小: 15 11
		stereoBM->setPreFilterSize(7); // 设置预处理滤波器大小
		stereoBM->setPreFilterCap(31); // 设置预处理滤波器的截断值
		// stereoBM->setPreFilterCap(63); // 设置预处理滤波器的截断值
		stereoBM->setMinDisparity(1e-9); // 设置最小视差值
		stereoBM->setNumDisparities(64); // 设置视差级数 64
		stereoBM->setTextureThreshold(2048); // 设置纹理阈值
		stereoBM->setUniquenessRatio(15); // 设置唯一性比率
		stereoBM->setSpeckleWindowSize(100); // 设置散斑窗口大小
		stereoBM->setSpeckleRange(16); // 设置散斑范围
		stereoBM->setDisp12MaxDiff(1); // 设置左右视差检查阈值
	
		// 计算视差图
		// 相机索引为i的图像视差
		cv::Mat disparityPic;
		// stereoBM->compute(left_img, right_img, disparityPic);
		stereoBM->compute(mLastFrame.images[i], mCurrentFrame.images[i], disparityPic);
	
		// 归一化视差图以便显示
		cv::normalize(disparityPic, disparityPic, 0, 255, cv::NORM_MINMAX, CV_8U);


		// generate pixels in ref and load depth data
		const double min_disparity = 1e-9;  // 定义最小视差值，避免除以 0
		for (int j = 0; j < nPoints; j++) {
			int x = rng.uniform(boarder, mLastFrame.images[i].cols - boarder);  // don't pick pixels close to boarder
			int y = rng.uniform(boarder, mLastFrame.images[i].rows - boarder);  // don't pick pixels close to boarder
			int disparity = disparityPic.at<uchar>(y, x); // 视差图
			double depth = fx * baseline / std::max((double)disparity, static_cast<double>(min_disparity));; // you know this is disparity to depth
			depth_ref.push_back(depth);
			pixels_ref.push_back(Eigen::Vector2d(x, y));
		}

		// estimates 01~05.png's pose using this information
		Eigen::Isometry3d T_cur_ref = Eigen::Isometry3d::Identity();

		std::cout << "计算视差图完成..." << std::endl;

		// cv::imshow("disparityPic", disparityPic);
		// cv::waitKey(1);
		
		DirectMethod directObj;
		direct_cost += directObj.DirectPoseEstimationMultiLayer(mLastFrame.images[i], mCurrentFrame.images[i], pixels_ref, depth_ref, T_cur_ref);
		T_direct.push_back(T_cur_ref);
	}

	direct_cost /= GetNrCams();	// 计算直接法残差

	// 计算直接法的全局MCS位姿
	cv::Matx44d invMc = mLastFrame.camSystem.Get_M_t() *
		cConverter::invMat(mLastFrame.camSystem.Get_M_c(0));
	// del
	// cv::Matx44d Mc = mLastFrame.camSystem.Get_M_c(0);
	//cv::Matx44d invMc = Mc.inv();
	// mInitialFrame.SetPose(invMc);  // 设置初始帧的位姿
	// del end
	Eigen::Isometry3d leadCamPose = T_direct[0];
	Eigen::Matrix3d R_eigen = leadCamPose.rotation();
    cv::Matx33d Rcw;
    for (int i = 0; i < 3; ++i) {
        for (int j = 0; j < 3; ++j) {
            Rcw(i, j) = R_eigen(i, j);
        }
    }
	Eigen::Vector3d t_eigen = leadCamPose.translation();
	cv::Vec3d tcw(t_eigen.x(), t_eigen.y(), t_eigen.z());
	cv::Matx44d invCurr = cConverter::Rt2Hom(Rcw, tcw)*invMc;
	std::cout << "invCurr:" << invCurr << std::endl;

	// ////////////////////////////////////////////////////////////////////////

    // Compute current pose by motion model	
	//mCurrentFrame.SetPose(mVelocity*mLastFrame.GetPose());
	// 通过上一帧的位姿和运动速度模型计算当前帧的位姿
	cv::Matx44d pose = mLastFrame.GetPose()*mVelocity;
	std::cout << "feature: " << pose << std::endl;

	// 位姿融合
	int nmatches = matcher.SearchByProjection(mCurrentFrame, mLastFrame, 50);
	// // 计算当前的特征点数量
	int total_features = 0;
	for (size_t i = 0; i < GetNrCams(); ++i) {
		total_features += pointsNumOfCam[i];
	}
	cout << "total_features:" << total_features << std::endl;
	total_features = total_features / GetNrCams();
	cv::Matx44d pose_fused = AdaptiveWeightedFusion(invCurr, pose, nmatches, total_features, direct_cost);
	cout << "pose_fused:" << pose_fused << std::endl;

	// mCurrentFrame.SetPose(invCurr);

	if (w_f > 0.7)
		mCurrentFrame.SetPose(pose);
	else
		mCurrentFrame.SetPose(pose_fused);
	// mCurrentFrame.SetPose(mLastFrame.GetPose()*mVelocity);

	std::cout << "mCurrentFrame.GetPose():" << mCurrentFrame.GetPose() << std::endl;

	std::chrono::steady_clock::time_point t2 = std::chrono::steady_clock::now();
	double ttrack = std::chrono::duration_cast<std::chrono::duration<double> >(t2 - t1).count();
	std::cout << "位姿估计时间: " << ttrack << "ms" << std::endl;

	// 初始化当前帧的地图点为空
    fill(mCurrentFrame.mvpMapPoints.begin(),
		mCurrentFrame.mvpMapPoints.end(),static_cast<cMapPoint*>(NULL));

	begin = std::chrono::steady_clock::now();
    // Project points seen in previous frame
	// 投影上一帧观测到的特征点到当前帧进行匹配
	nmatches = matcher.SearchByProjection(mCurrentFrame, mLastFrame, 50);
	end = std::chrono::steady_clock::now();

    // if (nmatches < 10) {  // 原始
	// 	cout << "nmatches < 10..." << endl;
	// 	return false;
	// }
    

	double inliers= 0.0;
    cOptimizer::PoseOptimization(&mCurrentFrame, inliers);
    // Discard outliers
	// 移除异常点
	for (size_t i = 0; i < mCurrentFrame.mvpMapPoints.size(); ++i)
    {
        if (mCurrentFrame.mvpMapPoints[i])
        {
            if (mCurrentFrame.mvbOutlier[i])
            {

                mCurrentFrame.mvpMapPoints[i] = NULL;
                mCurrentFrame.mvbOutlier[i] = false;
				--nmatches;
            }
        }
    }
	cout << "discard outliers nmatches:" << nmatches <<endl;
	// 检查是否满足最小匹配数要求
    return nmatches >= 6;  // 原始
	// return true;
}
// 局部地图跟踪
// todo...
bool cTracking::TrackLocalMap()
{
    // Tracking from previous frame or relocalisation was succesfull and we have an estimation
    // of the camera pose and some map points tracked in the frame.
    // Update Local Map and Track

    // Update Local Map
	// 更新局部地图的参考点
    UpdateReference();

    // Search Local MapPoints
	// 在视野范围内搜索局部地图点
	int nrPoints = SearchReferencePointsInFrustum();

    // Optimize Pose
	// 对当前帧位姿进行优化
	double inliers = 0.0;
	mnMatchesInliers = cOptimizer::PoseOptimization(&mCurrentFrame, inliers);

    // Update MapPoints Statistics
	// 更新地图点数据
	for (size_t i = 0; i < mCurrentFrame.mvpMapPoints.size(); ++i)
	{
		if (mCurrentFrame.mvpMapPoints[i])
		{
			// 如果地图点被标记为异常点，则减少点数
			if (mCurrentFrame.mvbOutlier[i])
			{
				--nrPoints;
			}
			// 否则更新该点的发现次数和描述子
			else
			{
				// 原始
				mCurrentFrame.mvpMapPoints[i]->IncreaseFound(); //增加当前帧中第i个地图点的发现次数
				// 更新描述子
				int idxC = mCurrentFrame.keypoint_to_cam.find(i)->second;
				int descIdx = mCurrentFrame.cont_idx_to_local_cam_idx.find(i)->second;
				cv::Mat desc = mCurrentFrame.mDescriptors[idxC].row(descIdx);
				mCurrentFrame.mvpMapPoints[i]->UpdateCurrentDescriptor(desc);
				
				// 修改
				// int idxC = mCurrentFrame.keypoint_to_cam.find(i)->second;
				// if (IsCameraActive(idxC)) {
				// 	mCurrentFrame.mvpMapPoints[i]->IncreaseFound(); //增加当前帧中第i个地图点的发现次数
				// 	// 更新描述子
				// 	int descIdx = mCurrentFrame.cont_idx_to_local_cam_idx.find(i)->second;
				// 	cv::Mat desc = mCurrentFrame.mDescriptors[idxC].row(descIdx);
				// 	mCurrentFrame.mvpMapPoints[i]->UpdateCurrentDescriptor(desc);
				// }
			}				
		}
	}
	// 计算当前帧位姿与参考关键帧位姿之间的基线
	curBaseline2MKF = cv::norm(cConverter::Hom2T(mCurrentFrame.GetPose()) -
		cConverter::Hom2T(mpReferenceKF->GetPose()));

    // Decide if the tracking was succesful
    // More restrictive if there was a relocalization recently
	// 判断跟踪是否成功
	if (mCurrentFrame.mnId < mnLastRelocFrameId + mMaxFrames && mnMatchesInliers < 15)  // 原始
	// if (mCurrentFrame.mnId < mnLastRelocFrameId + mMaxFrames && mnMatchesInliers < 6)
        // return false;
		return true;


	if (mnMatchesInliers < 80) { // 50: outdoor_large_loop
		// 遍历当前帧的地图点
		for (size_t i = 0; i < mCurrentFrame.mvpMapPoints.size(); ++i) {
			if (!mCurrentFrame.mvbOutlier[i]) {
				int idxC = mCurrentFrame.keypoint_to_cam.find(i)->second; // 获取相机索引
				++pointsNumOfCam[idxC];
			}
		}

		// 2. 遍历相机，根据特征点个数设置相机的活跃状态
		// todo: 特征点个数的判断条件是否合理？
		// 在初始化的代码中，有个设置bestCam的部分，看看能否拿来用？

		// for (int c = 0; c < numberCameras; ++c) {
		// 	cout << "pointsNumOfCam[" << c << "]：" << pointsNumOfCam[c] << endl;
		// 	if (pointsNumOfCam[c] < 600)
		// 		SetCameraActive(c, false);
		// 	else
		// 		SetCameraActive(c, true);
		// }
	}

	

	// 如果内点数少于15，则认为跟踪失败
	if (mnMatchesInliers < 15)  // 原始
	// if (mnMatchesInliers < 6) 
        // return false;
		return true;
	else
	{
		// 否则记录当前帧的位姿信息，并认为跟踪成功
		allPoses.push_back(hom2cayley(mCurrentFrame.GetPose()));
		allPosesBool.push_back(true);
		nrTrackedPts.push_back(mnMatchesInliers);
		inlierRatio.push_back(inliers);
		return true;
	}
}

bool cTracking::NeedNewKeyFrame()
{
	// If Local Mapping is freezed by a Loop Closure do not insert keyframes
	if (mpLocalMapper->isStopped() || mpLocalMapper->stopRequested())
		return false;

	// Not insert keyframes if not enough frames from last relocalisation have passed
	if (mCurrentFrame.mnId<mnLastRelocFrameId + mMaxFrames && mpMap->KeyFramesInMap()>mMaxFrames)
		return false;

	// Reference KeyFrame MapPoints
	int nRefMatches = mpReferenceKF->TrackedMapPoints();

	// Local Mapping accept keyframes?
	bool bLocalMappingIdle = mpLocalMapper->AcceptMultiKeyFrames();

	// Condition 1a: More than "MaxFrames" have passed from last keyframe insertion
	const bool c1a = mCurrentFrame.mnId >= mnLastKeyFrameId + mMaxFrames;
	// Condition 1b: More than "MinFrames" have passed and Local Mapping is idle
	const bool c1b = mCurrentFrame.mnId >= mnLastKeyFrameId + mMinFrames && bLocalMappingIdle;
	// Condition 2: Less than 90% of points than reference keyframe and enough inliers
	// const bool c2 = mnMatchesInliers < (nRefMatches * 0.9) && mnMatchesInliers > 25; // 原始
	const bool c2 = mnMatchesInliers < (nRefMatches * 0.9) && mnMatchesInliers > 10;
	// Condition 3: if there is a camera which tracks a lot less keypoints than another
	//const bool c3 = true;
	//const bool c3 = (*std::min_element(this->nbTrackedPtsInCam.begin(), this->nbTrackedPtsInCam.end()) < 15 ||
	//	*std::min_element(this->nbTrackedRatios.begin(), this->nbTrackedRatios.end()) < 0.15) && 
	//	mnMatchesInliers > 15 && bLocalMappingIdle;
	//double ratio = *std::min_element(this->nbTrackedRatios.begin(), this->nbTrackedRatios.end());
	//cout << "ratio: " << ratio << endl;
	// if we track from an initialized model pose we wait until the baseline is big enough

	if (((c1a || c1b) && c2) && (curBaseline2MKF > 0.2))
	//if ((c1a || c1b) && c2)
	{
		// If the mapping accepts keyframes insert, otherwise send a signal to interrupt BA, but not insert yet
		if (bLocalMappingIdle)
		{
			return true;
		}
		else
		{
			mpLocalMapper->InterruptBA();
			return false;
		}
	}
	else
		return false;

}

void cTracking::CreateNewKeyFrame()
{
	const int nrCams = mCurrentFrame.camSystem.GetNrCams();

	cMultiKeyFrame* pKF = new cMultiKeyFrame(mCurrentFrame, mpMap, mpKeyFrameDB);
	//pKF->SetRenderedImages(worldCoords, normalImages, depthFullImages);
	pKF->imageId = mCurrentFrame.GetImgCnt();
	mpLocalMapper->InsertMultiKeyFrame(pKF);

    mnLastKeyFrameId = mCurrentFrame.mnId;
    mpLastKeyFrame = pKF;
}

// 搜索在视野范围内的参考地图点
int cTracking::SearchReferencePointsInFrustum()
{
    // Do not search map points already matched
	// 不搜索已经匹配过的地图点
	int nrMatches = 0;
	for (int i = 0; i < mCurrentFrame.mvpMapPoints.size(); ++i) // 遍历当前帧的所有地图点
    {
		cMapPoint* pMP = mCurrentFrame.mvpMapPoints[i];
        if(pMP) // 如果存在地图点
        {
            if(pMP->isBad())
            {
				mCurrentFrame.mvpMapPoints[i] = NULL;
            }
            else
            {
				// 原始
                pMP->IncreaseVisible(); // 增加地图点的可见次数
                pMP->mnLastFrameSeen = mCurrentFrame.mnId; // 更新地图点的最后一帧观测到的帧号

				int cam = mCurrentFrame.keypoint_to_cam.find(i)->second; // 获取特征点所属的相机索引
				pMP->mbTrackInView[cam] = false; // 将地图点在该相机中的跟踪状态置为false
				++nrMatches; // 匹配计数器加1


				// 修改
				// int cam = mCurrentFrame.keypoint_to_cam.find(i)->second; // 获取特征点所属的相机索引
				// if (IsCameraActive(cam)) {
				// 	pMP->IncreaseVisible(); // 增加地图点的可见次数
				// 	pMP->mnLastFrameSeen = mCurrentFrame.mnId; // 更新地图点的最后一帧观测到的帧号
				// 	pMP->mbTrackInView[cam] = false; // 将地图点在该相机中的跟踪状态置为false
				// 	++nrMatches; // 匹配计数器加1
				// }
				
            }
        }
    }

    int nToMatch=0; // 待匹配的地图点数量

    // Project points in frame and check its visibility
	// 在当前帧中投影地图点，并检查其可见性
    for(vector<cMapPoint*>::iterator vit=mvpLocalMapPoints.begin(), vend=mvpLocalMapPoints.end();
		vit != vend; ++vit)
    {
        cMapPoint* pMP = *vit;
        if(pMP->mnLastFrameSeen == mCurrentFrame.mnId) // 如果地图点在当前帧中已经被观测到
            continue;
        if(pMP->isBad())
            continue;        
        // Project (this fills MapPoint variables for matching)
		// project it in each camera
		// 投影地图点至当前帧（这一步填充了地图点的匹配变量）
        // 在每个相机中投影地图点
		for (int c = 0; c < mCurrentFrame.camSystem.GetNrCams(); ++c)
		{
			// 原始
			if (mCurrentFrame.isInFrustum(c, pMP, 0.3)) // 如果地图点在相机视锥体内可见
			// 修改 IsCameraActive(c)
			// if (IsCameraActive(c) && mCurrentFrame.isInFrustum(c, pMP, 0.3)) // 如果地图点在相机视锥体内可见
			{
				pMP->IncreaseVisible(); // 增加地图点的可见次数
				++nToMatch; // 待匹配的地图点数量+1
			}
		}
	}    

    if (nToMatch > 0) // 如果有待匹配的地图点
    {
		cORBmatcher matcher(0.8, checkOrientation, mCurrentFrame.DescDims(), mCurrentFrame.HavingMasks());
        int th = 3; // 设置匹配阈值
        // If the camera has been relocalised recently, perform a coarser search
        if (mCurrentFrame.mnId < mnLastRelocFrameId+2)
            th = 3;
        nrMatches += matcher.SearchByProjection(mCurrentFrame, mvpLocalMapPoints, th); // 根据投影进行匹配
    }

	return nrMatches;
}
// 更新
void cTracking::UpdateReference()
{    
    // This is for visualization
	// 可视化用途，将局部地图点设置为参考地图点
    mpMap->SetReferenceMapPoints(mvpLocalMapPoints);

    // Update
	// 更新参考关键帧和参考地图点
    UpdateReferenceKeyFrames();
    UpdateReferencePoints();
}

// 更新参考地图点
void cTracking::UpdateReferencePoints()
{
    mvpLocalMapPoints.clear();
	// 遍历局部关键帧，更新局部地图点
	for (vector<cMultiKeyFrame*>::iterator itKF = mvpLocalKeyFrames.begin(),
		itEndKF = mvpLocalKeyFrames.end(); itKF != itEndKF; ++itKF)
    {
		// 获取关键帧的地图点匹配
		cMultiKeyFrame* pKF = *itKF;
        vector<cMapPoint*> vpMPs = pKF->GetMapPointMatches();

		// 遍历关键帧的地图点匹配，更新局部地图点
        for(vector<cMapPoint*>::iterator itMP = vpMPs.begin(), itEndMP = vpMPs.end(); 
			itMP != itEndMP; ++itMP)
        {
            cMapPoint* pMP = *itMP;
            if(!pMP)
                continue;
            if(pMP->mnTrackReferenceForFrame == mCurrentFrame.mnId) // 如果地图点已经被当前帧标记为参考点，跳过
                continue;
            if(!pMP->isBad()) // 如果地图点没有被标记为坏点
            {
                mvpLocalMapPoints.push_back(pMP); // 将地图点添加到局部地图点容器中
                pMP->mnTrackReferenceForFrame = mCurrentFrame.mnId; // 标记地图点为当前帧的参考点
            }
        }
    }
}
// 更新参考关键帧
void cTracking::UpdateReferenceKeyFrames()
{
    // Each map point vote for the keyframes in which it has been observed
	// each map point that was found in the the current frame
	map<cMultiKeyFrame*, int> keyframeCounter;
	for (size_t i = 0, iend = mCurrentFrame.mvpMapPoints.size(); i<iend; ++i)
    {
        if (mCurrentFrame.mvpMapPoints[i])
        {
            cMapPoint* pMP = mCurrentFrame.mvpMapPoints[i];
            if (!pMP->isBad())
            {
				 // 获取地图点的观测信息（关键帧及对应的特征索引）
				map<cMultiKeyFrame*, std::vector<size_t>> observations = pMP->GetObservations();
				for (map<cMultiKeyFrame*, std::vector<size_t>>::iterator it = observations.begin(),
					itend = observations.end(); it != itend; it++)
				{
					//keyframeCounter[it->first] += static_cast<int>(it->second.size());
					keyframeCounter[it->first]++;
				}

            }
            else
            {
                mCurrentFrame.mvpMapPoints[i] = NULL;
            }
        }
    }

    int max = 0;
	cMultiKeyFrame* pKFmax = NULL;

    mvpLocalKeyFrames.clear();
	mvpLocalKeyFramesCovWeights.clear();
	mvpLocalKeyFramesDistance2Frame.clear();
    mvpLocalKeyFrames.reserve(3*keyframeCounter.size());

    // All keyframes that observe a map point are included in the local map. 
	// Also check which keyframe shares most points
	// 将观测到地图点的所有关键帧加入局部地图，并记录观测到地图点最多的关键帧
	for (map<cMultiKeyFrame*, int>::iterator it = keyframeCounter.begin(), 
		itEnd = keyframeCounter.end();it != itEnd; ++it)
    {
		cMultiKeyFrame* pKF = it->first;
		pKF->SetReference(false); // set reference to false, only to display reference
		// if the keyframe counter is above 2
		// this effectively controls how fast the tracker "forgets" 
		// map points that are probably not visible any more
		// the higher the threshold the smaller the local map
		if (it->second > 4)
		{
			if (pKF->isBad())
				continue;

			if (it->second > max)
			{
				max = it->second;
				pKFmax = pKF;
			}
			// 计算当前帧与关键帧之间的距离
			double 	curBaseline2MKF = cv::norm(cConverter::Hom2T(mCurrentFrame.GetPose()) -
				cConverter::Hom2T(pKF->GetPose()));
			//cout << "kf id: "<<pKF->mnId<<" kf weight: " << it->second << " dist: " << curBaseline2MKF << endl;

			// 存储关键帧的覆盖权重、关键帧指针和距离信息
			mvpLocalKeyFramesCovWeights.push_back(it->second);

			mvpLocalKeyFrames.push_back(it->first);
			mvpLocalKeyFramesDistance2Frame.push_back(curBaseline2MKF);
		
			pKF->mnTrackReferenceForFrame = mCurrentFrame.mnId;
		}
    }

	mpReferenceKF->SetReference(true);
    mpReferenceKF = pKFmax;  // 观测到地图点最多的关键帧设为新的参考关键帧
}

/**
 * @brief 重定位
 *
 * 执行重定位过程，当跟踪丢失或在循环闭合的某些阶段强制执行时调用。
 *
 * @return 如果重定位成功，则返回true；否则返回false
 */
bool cTracking::Relocalisation()
{
    // Compute Bag of Words Vector
    mCurrentFrame.ComputeBoW();

    // Relocalisation is performed when tracking is lost and forced at some stages during loop closing
    // Track Lost: Query KeyFrame Database for keyframe candidates for relocalisation
    vector<cMultiKeyFrame*> vpCandidateKFs;
	if (!RelocalisationRequested())
        vpCandidateKFs = mpKeyFrameDB->DetectRelocalisationCandidates(&mCurrentFrame);
    else // Forced Relocalisation: Relocate against local window around last keyframe
    {
		std::unique_lock<std::mutex> lock(mMutexForceRelocalisation);
        mbForceRelocalisation = false;
        vpCandidateKFs.reserve(10);
        vpCandidateKFs = mpLastKeyFrame->GetBestCovisibilityKeyFrames(5);
        vpCandidateKFs.push_back(mpLastKeyFrame);
		
    }

    if(vpCandidateKFs.empty())
        return false;

    const int nKFs = vpCandidateKFs.size();
    // We perform first an ORB matching with each candidate
    // If enough matches are found we setup a PnP solver
	cORBmatcher matcher(0.9, checkOrientation, mCurrentFrame.DescDims(), mCurrentFrame.HavingMasks());

	vector<opengv::bearingVectors_t> matchedBearingVecs(nKFs);
	vector<opengv::points_t> points3D(nKFs);
	opengv::translations_t camOffsets = camSystem.Get_All_t_c_ogv();
	opengv::rotations_t camRotations = camSystem.Get_All_R_c_ogv();
	std::vector<std::vector<int> > camCorrespondences(nKFs);

	vector<int> indices_ransac_to_mp(nKFs);
	vector<vector<int>> mvKeyPointIndices(nKFs, vector<int>());

    vector<vector<cMapPoint*> > vvpMapPointMatches;
    vvpMapPointMatches.resize(nKFs);

    vector<bool> vbDiscarded;
    vbDiscarded.resize(nKFs);

    int nCandidates = 0;

	for (size_t i = 0; i < vpCandidateKFs.size(); ++i)
    {
        cMultiKeyFrame* pKF = vpCandidateKFs[i];
        if (pKF->isBad())
            vbDiscarded[i] = true;
        else
        {
            int nmatches = matcher.SearchByBoW(pKF,mCurrentFrame,vvpMapPointMatches[i]);
            if (nmatches < 15)
            {
                vbDiscarded[i] = true;
                continue;
            }
            else
            {
				// bearing vectors
				opengv::bearingVectors_t mvP2D; 
				// 3D Points
				opengv::points_t mvP3Dw;
				// get points
				int idx = 0;
				for (size_t j = 0, iend = vvpMapPointMatches[i].size(); j < iend; ++j)
				{
					cMapPoint* pMP = vvpMapPointMatches[i][j];

					if (pMP)
					{
						if (!pMP->isBad())
						{
							const cv::Vec3d &kpRay = mCurrentFrame.mvKeysRays[j];
							mvP2D.push_back(opengv::bearingVector_t(kpRay(0), kpRay(1), kpRay(2)));

							cv::Vec3d Pos = pMP->GetWorldPos();
							mvP3Dw.push_back(opengv::point_t(Pos(0), Pos(1), Pos(2)));
							mvKeyPointIndices[i].push_back(j);
							int cam = mCurrentFrame.keypoint_to_cam.find(j)->second;
							camCorrespondences[i].push_back(cam);
							++idx;
						}
					}
				}
				matchedBearingVecs[i] = mvP2D;
				points3D[i] = mvP3Dw;
				// setup an adapter for each keyframe we are trying

				++nCandidates;
            }
        }        
    }

    bool bMatch = false;
	cORBmatcher matcher2(0.9, checkOrientation, mCurrentFrame.DescDims(), mCurrentFrame.HavingMasks());

	for (size_t i = 0; i < vpCandidateKFs.size(); i++)
	{
		if (vbDiscarded[i])
			continue;

		vector<bool> vbInliers;
		vector<int> inliers;

		int nInliers;
		bool bNoMore;

		opengv::absolute_pose::NoncentralAbsoluteAdapter adapter(
			matchedBearingVecs[i],
			camCorrespondences[i],
			points3D[i],
			camOffsets,
			camRotations);
#undef max
#undef min
		opengv::sac::Ransac < opengv::sac_problems::absolute_pose::AbsolutePoseSacProblem >
			ransac;
		std::shared_ptr<opengv::sac_problems::absolute_pose::AbsolutePoseSacProblem>
			absposeproblem_ptr(
			new opengv::sac_problems::absolute_pose::AbsolutePoseSacProblem(
			adapter,
			opengv::sac_problems::absolute_pose::AbsolutePoseSacProblem::GP3P));
		ransac.sac_model_ = absposeproblem_ptr;
		ransac.threshold_ = 0.0001;
		ransac.max_iterations_ = 150;

		ransac.computeModel();
		inliers = ransac.inliers_;
		opengv::transformation_t trafo = ransac.model_coefficients_;
		

		// If Ransac reaches max. iterations discard keyframe
		if (ransac.iterations_ >= ransac.max_iterations_)
		{
			vbDiscarded[i] = true;
			--nCandidates;
		}
		else
		{
			trafo = opengv::absolute_pose::gpnp(adapter, ransac.inliers_);

			cv::Matx44d trafoOut = cConverter::ogv2ocv(trafo);

			mCurrentFrame.SetPose(trafoOut);
			set<cMapPoint*> sFound;

			for (int ii = 0; ii < mCurrentFrame.mvpMapPoints.size(); ++ii)
				mCurrentFrame.mvpMapPoints[ii] = NULL;

			for (size_t j = 0; j < inliers.size(); ++j)
			{
				mCurrentFrame.mvpMapPoints[mvKeyPointIndices[i][inliers[j]]] =
					vvpMapPointMatches[i][mvKeyPointIndices[i][inliers[j]]];
				sFound.insert(vvpMapPointMatches[i][mvKeyPointIndices[i][inliers[j]]]);
			}
			double inliers = 0.0;
			int nGood = cOptimizer::PoseOptimization(&mCurrentFrame, inliers);

			if (nGood < 10)
				continue;

			for (size_t io = 0, ioend = mCurrentFrame.mvbOutlier.size(); io < ioend; ++io)
				if (mCurrentFrame.mvbOutlier[io])
					mCurrentFrame.mvpMapPoints[io] = NULL;

			// If the pose is supported by enough inliers stop ransacs and continue
			if (nGood >= 10)
			{
				bMatch = true;
				break;
			}
		}

	}

    if(!bMatch)
    {
        return false;
    }
    else
    {
        mnLastRelocFrameId = mCurrentFrame.mnId;
        return true;
    }

}

void cTracking::ForceRelocalisation()
{
    std::unique_lock<std::mutex> lock(mMutexForceRelocalisation);
    mbForceRelocalisation = true;
    mnLastRelocFrameId = mCurrentFrame.mnId;
}

bool cTracking::RelocalisationRequested()
{
    std::unique_lock<std::mutex> lock(mMutexForceRelocalisation);
    return mbForceRelocalisation;
}

void cTracking::Reset()
{
	this->mpViewer->RequestStop();

	cout << "System Reseting" << endl;
	while (!mpViewer->isStopped())
		std::this_thread::sleep_for(std::chrono::milliseconds(1000));
	// Reset Local Mapping
	cout << "Reseting Local Mapper...";
	mpLocalMapper->RequestReset();
	cout << " done" << endl;

	// Reset Loop Closing
	cout << "Reseting Loop Closing...";
	mpLoopClosing->RequestReset();
	cout << " done" << endl;

	// Clear BoW Database
	cout << "Reseting Database...";
	mpKeyFrameDB->clear();
	cout << " done" << endl;

	// Clear Map (this erase MapPoints and KeyFrames)
	cout << "Clearing map...";
	mpMap->clear();
	cout << " done" << endl;
	cMultiKeyFrame::nNextId = 0;
	cMultiFrame::nNextId = 0;
	mState = NO_IMAGES_YET;

	if (mpInitializer)
	{
		delete mpInitializer;
		mpInitializer = static_cast<cMultiInitializer*>(NULL);
	}

	mpViewer->Release();

	// Reset statistics otherwise false poses will be evaluated
	allPosesBool = std::vector<bool>(allPosesBool.size(), false);
	allPoses.clear();
	allPosesBool.clear();
	nrTrackedPts.clear();
	inlierRatio.clear();
	timingFeatureExtraction.clear();
	timingTrackLocalMap.clear();
	timingInitalPoseEst.clear();

}

void cTracking::CheckResetByPublishers()
{
    bool bReseting = false;

    {
        std::unique_lock<std::mutex> lock(mMutexReset);
        bReseting = mbReseting;
    }

    if(bReseting)
    {
        std::unique_lock<std::mutex> lock(mMutexReset);
        mbPublisherStopped = true;
    }

    // Hold until reset is finished
    while(1)
    {
        {
			std::unique_lock<std::mutex> lock(mMutexReset);
            if(!mbReseting)
            {
                mbPublisherStopped=false;
                break;
            }
        }
		std::this_thread::sleep_for(std::chrono::milliseconds(500));
    }
}

bool cTracking::CheckFinished()
{
	return finished;
}
}