// Copyright 2008 Isis Innovation Limited
#include "OpenGL.h"
#include "Tracker.h"
#include "MEstimator.h"
#include "ShiTomasi.h"
#include "PatchFinder.h"
#include "TrackerData.h"
#include "MarkerData.h"
#include "MiniPatch.h"
#include "RandomHelpers.h"
#include "RelocaliserSBI.h"
#include "RelocaliserSCARF.h"

#include <cvd/utility.h>
#include <cvd/gl_helpers.h>
#include <cvd/fast_corner.h>
#include <cvd/vision.h>

#include <cvd/vector_image_ref.h>
#include <cvd/image_interpolate.h>

#include <TooN/wls.h>
#include <TooN/helpers.h>

#include <TooN/Cholesky.h>

#include <gvars3/instances.h>
#include <gvars3/GStringUtil.h>
#include "external/ARToolKit/ARToolKitPlus/TrackerMultiMarkerImpl.h"
#include "external/ARToolKit/ARToolKitPlus/arMulti.h"

#include <fstream>
#include <fcntl.h>




#include <opencv/cv.h>
//#include <opencv/cvaux.h>
#include <opencv/highgui.h>

#if CVD_HAVE_XMMINTRIN
#include <tmmintrin.h>
#endif

using namespace CVD;
using namespace std;

#include <opencv/highgui.h>

using namespace CVD;
using namespace std;
using namespace GVars3;

// The constructor mostly sets up interal reference variables
// to the other classes..
Tracker::Tracker(const ATANCamera &c, Scarf &s, MapMaker &mm) : 
  mMapMaker(mm),
  mCamera(c),
  mCameraSBI(c),
  mScarf(s),
  mDynamicObject(c)
{
  //Test mios
  D.init = false;


  GUI.RegisterCommand("Reset", GUICommandCallBack, this);
  GUI.RegisterCommand("KeyPress", GUICommandCallBack, this);

  mpSBILastFrame = NULL;
  mpSBIThisFrame = NULL;
  mCurrentKF.reset();
  mnFrame=0;

  mCameraSBI.SetImageSize(mCamera.GetImageSize()/16);

  GV3::Register(mgvnLevel1ThreshFAST, "Tracker.Level1ThreshFAST", 0, SILENT);
  GV3::Register(mgvnLevel2ThreshFAST, "Tracker.Level2ThreshFAST", 0, SILENT);
  GV3::Register(mgvnLevel3ThreshFAST, "Tracker.Level3ThreshFAST", 0, SILENT);
  GV3::Register(mgvnLevel4ThreshFAST, "Tracker.Level4ThreshFAST", 0, SILENT);
  GV3::Register(mgvnUseRelocaliserSBI, "Tracker.UseRelocaliserSBI", 0, SILENT);
  GV3::Register(mgvnMinTrackedPoints, "Tracker.MinTrackedPoints", 50, SILENT);
  GV3::Register(mgvnGlobalSE3, "System.GlobalSE3", 0, SILENT);
  GV3::Register(mgvnGlobalSO3, "System.GlobalSO3", 0, SILENT);
  GV3::Register(mgvdMaxRotationError, "System.MaxRotationError", 3.0, SILENT);
  GV3::Register(mgvdMaxTranslationError, "System.MaxTranslationError", 3.0, SILENT);
  GV3::Register(mgvnDrawFASTCorners, "Tracker.DrawFASTCorners", 0, SILENT);
  GV3::Register(mgvdMinInitBaseline, "Tracker.MinInitBaseline", 0.5, SILENT);

  nLevelThreshFAST[0] = *mgvnLevel1ThreshFAST;
  nLevelThreshFAST[1] = *mgvnLevel2ThreshFAST;
  nLevelThreshFAST[2] = *mgvnLevel3ThreshFAST;
  nLevelThreshFAST[3] = *mgvnLevel4ThreshFAST;
  mnMinTrackedPoints = *mgvnMinTrackedPoints;
  mnGlobalSE3 = *mgvnGlobalSE3;
  mnGlobalSO3 = *mgvnGlobalSO3;
  mdMaxRotationError = *mgvdMaxRotationError;
  mdMaxTranslationError = *mgvdMaxTranslationError;
  mnDrawFASTCorners = *mgvnDrawFASTCorners;
  mdMinInitBaseline = *mgvdMinInitBaseline;

  if(*mgvnUseRelocaliserSBI)
    mpRelocaliser = new RelocaliserSBI(c);
  else
    mpRelocaliser = new RelocaliserSCARF(c);

	mpMultiMarkerTracker = new ARToolKitPlus::TrackerMultiMarkerImpl<6,6,6,1,16>(mCamera.GetImageSize()[0], mCamera.GetImageSize()[1]);
	mpMultiMarkerTracker->setPixelFormat(ARToolKitPlus::PIXEL_FORMAT_LUM);

	// load calibration file (check config file!!!)
  static gvar3<string> gvsArtkCameraConfigFile("MultiMarkerTracker.ArtkCameraConfigFile", "config/firefly_mv_4mm_default.cal", SILENT);
  static gvar3<string> gvsArtkMarkerConfigFile("MultiMarkerTracker.ArtkMarkerConfigFile", "config/artk_marker_default.cfg", SILENT);
	if(!mpMultiMarkerTracker->init(gvsArtkCameraConfigFile->c_str(), gvsArtkMarkerConfigFile->c_str(), 1.0f, 1000.0f)){
		std::cout << "ERROR: Could not initialize the ARToolkit tracker" << std::endl;
		std::cout << "\t Tried to load camera calibration file: " << *gvsArtkCameraConfigFile << std::endl;
		std::cout << "\t Tried to load marker config file: " << *gvsArtkMarkerConfigFile << std::endl;
	}
  else
  {
		std::cout << "Loaded camera calibration file: " << *gvsArtkCameraConfigFile << " " << std::endl;
		std::cout << "Loaded marker config file: " << *gvsArtkMarkerConfigFile << std::endl;
  }

	// set ARToolKit parameters
  static gvar3<int> gvnArtkThreshold("MultiMarkerTracker.ArtkThreshold", "100", SILENT);
	mpMultiMarkerTracker->setBorderWidth(0.25f);
	mpMultiMarkerTracker->setThreshold(*gvnArtkThreshold);
	mpMultiMarkerTracker->setUndistortionMode(ARToolKitPlus::UNDIST_LUT);
	mpMultiMarkerTracker->setPixelFormat(ARToolKitPlus::PIXEL_FORMAT_LUM);
	mpMultiMarkerTracker->setMarkerMode(ARToolKitPlus::MARKER_ID_SIMPLE);
	mpMultiMarkerTracker->setPoseEstimator(ARToolKitPlus::POSE_ESTIMATOR_RPP);
	mpMultiMarkerTracker->activateAutoThreshold(true);

  // Most of the initialisation is done in Reset()
  Reset();
}

Tracker::~Tracker()
{
  delete mpRelocaliser;
  delete mpMultiMarkerTracker;
}

// Resets the tracker, wipes the map.
// This is the main Reset-handler-entry-point of the program! Other classes' resets propagate from here.
// It's always called in the Tracker's thread, often as a GUI command.
void Tracker::Reset(bool bCompleteReset)
{
  mbDidCoarse = false;
  mbUserPressedSpacebar = false;
  mTrackingQuality = BAD;
  mnLostFrames = 0;
  mdMSDScaledVelocityMagnitude = 0;
  mnLastKeyFrameDropped = -20;
  mv6CameraVelocity = Zeros;
  mbJustRecoveredSoUseCoarse = false;
  mnShowScale = 0;
  mdInitBaseline = 0.0;
  mvInitialKeyFrames.clear();
  mmMarkerMeasurements.clear();

  if(bCompleteReset)
  {
    mCurrentKF.reset();
  }

  // Tell the MapMaker to reset itself.. 
  // this may take some time, since the mapmaker thread may have to wait
  // for an abort-check during calculation, so sleep while waiting.
  // MapMaker will also clear the map.
  mMapMaker.RequestReset(bCompleteReset);
  while(!mMapMaker.ResetDone())
#ifndef WIN32
	  usleep(10);
#else
	  Sleep(1);
#endif
}

struct Sort_Scores2 {
  bool operator()(const std::pair<CVD::ImageRef,int> &left, std::pair<CVD::ImageRef,int> &right) {
    return left.second > right.second;
  }
} SortScores2 ;

//
//void Tracker::DrawOpticalFlow(CVD::Image<CVD::byte> &CurrentImage, SE3GaussianPDF &se3PdfGfromC, std::vector<TooN::Vector<3> > &vTest)
//{
//  vTest.clear();
//  cout << "Entra en Draw" << endl;
//  double ImgDistMaxThreshold = 0.2*fmax(640,480);
// //   double ImgDistMinThreshold = 0.05*fmax(640,480);
//  cv::Mat CurrentFrameMat = cv::Mat(640,480, CV_8UC1, CurrentImage.data(),CurrentImage.row_stride());
//  D.CurrentFrame.reset(new KeyFrame());
//  D.CurrentFrame->MakeKeyFrame_Lite(CurrentImage, nLevelThreshFAST, se3PdfGfromC);
//  cout << "Pasa inicializacion" << endl;
//  if (!D.init)
//  {
//    D.index = 0;
//    D.init = true;
//  }
//  else
//  {
//    D.index++;
//    cout << "Entra en bucle" << endl;
//    std::vector<cv::Point2f> CurrFrameFeaturePos;
//    CurrFrameFeaturePos.clear();
//    std::vector<unsigned char> status;
//    std::vector<float> err;
//    cv::Size winSize(15,15);
//    int critCOUNT = cv::TermCriteria::COUNT;
//    int critEPS = 2;
//    cv::TermCriteria criteria(critCOUNT + critEPS ,30,0.01);
//
//    int igual1= D.OldFrameMat.at(30,30);
//    int igual2 = CurrentFrameMat.at(30,30);
//    bool igual = (igual1==igual2);
//    cout << "Son iguales" << igual <<  endl;
//    //TermCriteria criteria(cv::TermCriteria::COUNT + cv::TermCriteria::EPS, 30, 0.01);
//    cv::calcOpticalFlowPyrLK(D.OldFrameMat,CurrentFrameMat,D.OldFrameFeaturePos,CurrFrameFeaturePos,status,err,winSize,3,criteria,0.5,0);
//    cout << "Ha calculado optical flow" << D.OldFrameFeaturePos.size() <<"-" <<CurrFrameFeaturePos.size() <<  endl;
//
//    glPointSize(5);
//    glLineWidth(2);
//    glEnable(GL_POINT_SMOOTH);
//    glEnable(GL_LINE_SMOOTH);
//    glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
//    glEnable(GL_BLEND);
//    //glBegin(GL_LINES);
//    glBegin(GL_LINES);
//
//    TooN::Vector<2> vec1,vec2,vec2_a;
//    double ImgDist;
//    cout << "Dibuando " << D.OldFrameFeaturePos.size() << "puntos "<< endl;
//    for(int i=0;i<(int)D.OldFrameFeaturePos.size();i++)
//    {
//      vec1[0] = D.OldFrameFeaturePos[i].x;
//      vec1[1] = D.OldFrameFeaturePos[i].y;
//      vec2[0] = CurrFrameFeaturePos[i].x;
//      vec2[1] = CurrFrameFeaturePos[i].y;
//      cout << "Coord"<<"(" << vec1[0] <<","<< vec1[1]<<")."<<"(" << vec2[0] <<","<< vec2[1]<<")."  <<  endl;
//      ImgDist = sqrt(pow(vec1[0]-vec2[0],2)+pow(vec1[1]-vec2[1],2));
//      if (1)//ImgDist < ImgDistMaxThreshold)
//      {
//
//        glColor3f(0,1,0);
//        glVertex(vec1);
//        glColor3f(0,0,1);
//        glVertex(vec2);
//
//      }
//    }
//    vec1[0] = 3;
//    vec1[1] = 2;
//    vec2[0] = 150+D.index;
//    vec2[1] = 150+D.index;
//
//    glColor3f(0,1,0);
//    glVertex(vec1);
//    glColor3f(0,0,1);
//    glVertex(vec2);
//    glEnd();
//    cout << "terminado de dibujar" << endl;
//    /*
//    glPointSize(5);
//    glLineWidth(2);
//    glEnable(GL_POINT_SMOOTH);
//    glEnable(GL_LINE_SMOOTH);
//    glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
//    glEnable(GL_BLEND);
//    //glBegin(GL_LINES);
//    glBegin(GL_LINES);
//    TooN::Vector<2> vec1,vec2,vec2_a;
//
//    vec1[0] = 3;
//    vec1[1] = 2;
//    vec2[0] = 150+D.index;
//    vec2[1] = 150+D.index;
//
//    glColor3f(0,1,0);
//    glVertex(vec1);
//    glColor3f(0,0,1);
//    glVertex(vec2);
//
//    glEnd();
//*/
//
//  }
//  //Update
//  cout << "Entra en update" << endl;
//  int NFeatures = 1000;
//  D.OldFrameMat = CurrentFrameMat;
////  cout << "Antes de nonmax" << endl;
////  CVD::fast_corner_detect_9(CurrentImage,D.CurrentFrame->aLevels[0].vCorners,40);
////  cout << "Hace 9 nonmax" << endl;
////  CVD::fast_nonmax_with_scores(CurrentImage,D.CurrentFrame->aLevels[0].vCorners,10,D.CurrentFrame->aLevels[0].vMaxCorners);
////  cout << "Hace scores" << endl;
////  std::partial_sort(D.CurrentFrame->aLevels[0].vMaxCorners.begin(), D.CurrentFrame->aLevels[0].vMaxCorners.begin()+NFeatures, D.CurrentFrame->aLevels[0].vMaxCorners.end(), SortScores2);
//  D.OldFrameFeaturePos.clear();
//  cv::Point2f auxPoint;
//  for(int i = 1;i<640;i=i+30)
//  {
//    for (int j = 1;j<480;j=j+30)
//    {
//    auxPoint.x = (float)i;//D.CurrentFrame->aLevels[0].vMaxCorners[i].first.x;
//    auxPoint.y = (float)j;//D.CurrentFrame->aLevels[0].vMaxCorners[i].first.y;
//    D.OldFrameFeaturePos.push_back(auxPoint);
//    }
//  }
//  if (D.index>100)
//    D.index = 0;
//  cout << "LLega al final" << endl;
//}


// TrackFrame is called by System.cc with each incoming video frame.
// It figures out what state the tracker is in, and calls appropriate internal tracking
// functions. bDraw tells the tracker whether it should output any GL graphics
// or not (it should not draw, for example, when AR stuff is being shown.)




bool Tracker::TrackFrame(uint64_t timestamp, CVD::Image<CVD::byte> &greyFrame, CVD::Image<CVD::Rgb<CVD::byte> > &colourFrame, bool bDraw, SE3GaussianPDF &se3PdfGfromC, std::vector<TooN::Vector<3> > &vTest)
{
 // std::cout << "TrackFrame" << std::endl;
  bool bTracking = false;
  mbDraw = bDraw;
  mMessageForUser.str("");   // Wipe the user message clean
  if(mMapMaker.IsPaused())
    mMessageForUser << "**MapMaker Paused** ";

  mCurrentKF.reset(new KeyFrame());

  mCurrentKF->MakeKeyFrame_Lite(greyFrame, nLevelThreshFAST, se3PdfGfromC);

  // Update the small images for the rotation estimator
  if(!mpSBIThisFrame)
  {
    mpSBIThisFrame = new SmallBlurryImage(*mCurrentKF);
    mpSBILastFrame = new SmallBlurryImage(*mCurrentKF);
  }
  else
  {
    delete  mpSBILastFrame;
    mpSBILastFrame = mpSBIThisFrame;
    mpSBIThisFrame = new SmallBlurryImage(*mCurrentKF);
  }
  
  // From now on we only use the keyframe struct!
  mnFrame++;

  if(mbDraw)
  {
    glRasterPos2i(0,0);
    glDrawPixels(mCurrentKF->aLevels[0].im.size().x, mCurrentKF->aLevels[0].im.size().y, GL_LUMINANCE, GL_UNSIGNED_BYTE, mCurrentKF->aLevels[0].im.data());
    if(mnDrawFASTCorners)
  	{
	    glColor3f(1,0,1);  glPointSize(1); glBegin(GL_POINTS);
	    for(unsigned int i=0; i<mCurrentKF->aLevels[0].vCorners.size(); i++) 
	      glVertex(mCurrentKF->aLevels[0].vCorners[i]);
	    glEnd();
	    glColor3f(0,0,1);  glPointSize(1); glBegin(GL_POINTS);
	    for(unsigned int i=0; i<mCurrentKF->aLevels[1].vCorners.size(); i++) 
	      glVertex(mCurrentKF->aLevels[1].vCorners[i]*2);
	    glEnd();
	    glColor3f(0,1,0);  glPointSize(1); glBegin(GL_POINTS);
	    for(unsigned int i=0; i<mCurrentKF->aLevels[2].vCorners.size(); i++) 
	      glVertex(mCurrentKF->aLevels[2].vCorners[i]*4);
	    glEnd();
	    glColor3f(1,0,0);  glPointSize(1); glBegin(GL_POINTS);
	    for(unsigned int i=0; i<mCurrentKF->aLevels[3].vCorners.size(); i++) 
	      glVertex(mCurrentKF->aLevels[3].vCorners[i]*8);
	    glEnd();
	  }
  }

  // Decide what to do - if there is a map, try to track the map ...
  if(mMapMaker.GetMap()->IsGood())
  {
    // Firstly, compute relative motion from external measurements
    //if(mnGlobalSE3 || mnGlobalSO3)
      mCurrentKF->ComputeGlobalFfromC(mMapMaker.GetGfromF());

    if(!mnLostFrames)//if(mnLostFrames < 3)  // .. but only if we're not lost!
	  {
      ApplyMotionModel(timestamp);
      TrackMap(mMapMaker.GetMap());
      UpdateMotionModel();

	    AssessTrackingQuality();  //  Check if we're lost or if tracking is poor.
	  
      mMapMaker.UpdateTrackerPosition(mse3CamFromWorld);

      mMessageForUser << "Tracking Map, quality ";
      if(mTrackingQuality == GOOD)  
      {
        mMessageForUser << "good.";
      }
      else if(mTrackingQuality == DODGY)
      { 
        mMessageForUser << "poor.";
      }
      else if(mTrackingQuality == BAD)  
         mMessageForUser << "bad.";

      mMessageForUser << " Found:";
      for(int i=0; i<LEVELS; i++) 
        mMessageForUser << " " << manMeasFound[i] << "/" << manMeasAttempted[i];

      boost::shared_lock< boost::shared_mutex > lockKeyFrames(mMapMaker.GetMap()->mKeyFramesLock);
      mMessageForUser << " Map: " << mMapMaker.GetMap()->msLocalMapPoints.size() << "/" << mMapMaker.GetMap()->mvAllMapPoints.size() << "P, " << mMapMaker.GetMap()->msLocalOptimKeyFrames.size() << "KF";
      lockKeyFrames.unlock();
      
      mDynamicObject.NewFrame(timestamp,mMapMaker.MostRecentKeyFrame(), mCurrentKF, vTest);

      /*if(mbDraw)
      {
        RenderMesh(mMapMaker.GetMap());
	      RenderGrid(); 
      }*/

      double dMaxKFDistWiggleMult = 1.0;
      if(mnThisFramePoints < mnMinTrackedPoints) // If we desparately need a new keyframe for tracking relax the keyframe spacing constraint a little...
        dMaxKFDistWiggleMult = 0.5;
	  
	    // Heuristics to check if a key-frame should be added to the map: (NewKeyFrame() || mMapMaker.NeedNewKeyFrame(mCurrentKF))
	    if(mTrackingQuality==GOOD && mMapMaker.NeedNewKeyFrame(mCurrentKF, dMaxKFDistWiggleMult) && mMapMaker.QueueSize()<3 && mnFrame-mnLastKeyFrameDropped > 20)
      {
        mMessageForUser << " Adding key-frame.";
        AddCurrentKeyFrameToMap(colourFrame);
      }
      else
        DeleteCurrentKeyFrame(); // Not added to map so we do not need it anymore

      bTracking = true;
	  }
    else  // what if there is a map, but tracking has been lost?
	  {
	    mMessageForUser << "** Attempting recovery **.";
	    AttemptRecovery(colourFrame);
	  }
  }
  else // If there is no map, try to make one.
  {
    TrackMarker(timestamp, greyFrame, colourFrame);





    mdInitBaseline = MaxBaseline(mvInitialKeyFrames);
    if(mdInitBaseline > mdMinInitBaseline)
    {
      bool successful = mMapMaker.InitFromKeyFrames(mvInitialKeyFrames, mmMarkerMeasurements, mse3CamFromWorld);
      mvInitialKeyFrames.clear();
      mmMarkerMeasurements.clear();
    }
  }

  // GUI interface
  while(!mvQueuedCommands.empty())
  {
    GUICommandHandler(mvQueuedCommands.begin()->sCommand, mvQueuedCommands.begin()->sParams);
    mvQueuedCommands.erase(mvQueuedCommands.begin());
  }
  //Only to "non real test"
   //  DrawOpticalFlow(greyFrame,se3PdfGfromC, vTest);

  return bTracking;
}

// Try to relocalise in case tracking was lost.
bool Tracker::AttemptRecovery(CVD::Image<CVD::Rgb<CVD::byte> > &colour)
{
  bool bOutsideMap = false;
  boost::shared_ptr<KeyFrame> pMapKF;

  // If we have a GPS fix for this frame, then update pMapKF
  /*if(mnGlobalSE3 && mCurrentKF->mse3PdfFfromC.has_translation())
  {
    std::pair<double, boost::shared_ptr<KeyFrame> > pClosest;
    SE3<> se3WfromC = mMapMaker.GetFfromW().inverse() * mCurrentKF->mse3PdfFfromC.get_mean();
    Vector<3> v3MapPos = se3WfromC.get_translation();
    boost::shared_lock< boost::shared_mutex > lockKeyFrames(mMapMaker.GetMap()->mKeyFramesLock);
    mMapMaker.GetMap()->mrtAllKeyFrames.NearestNeighbour(v3MapPos.get_data_ptr(), boost::shared_ptr<KeyFrame>(), pClosest, 2);
    lockKeyFrames.unlock();
    pMapKF = pClosest.second;

    Vector<3> v3Diff = pMapKF->se3CfromW.inverse().get_translation() - v3MapPos;
    double dDistance = sqrt(v3Diff[0]*v3Diff[0] + v3Diff[1]*v3Diff[1]);

    if(mMapMaker.IsDistanceToNearestKeyFrameExcessive(dDistance))
      bOutsideMap = true;
  }
  else*/
    pMapKF = mMapMaker.NearestKeyFrameToTrackerPosition();

  if(bOutsideMap)
  {
    //Reset(false); // Reset but preserve any existing map(s)
    return false;
  }
  else
  {
    mCurrentKF->MakeKeyFrame_Rest(mScarf);  // This populates the Candidates list (of robust features), which is required for relocalisation.
    bool bRelocGood = mpRelocaliser->AttemptRecovery(mMapMaker.GetMap(), mCurrentKF, pMapKF); //Attempt local area recovery first...
    if(!bRelocGood)
    {
      DeleteCurrentKeyFrame(); // Not added to map so we do not need it anymore
      return false;
    }
    else
    {
      SE3<> se3Best = mpRelocaliser->BestPose();
      mse3CamFromWorld = mse3StartPos = se3Best;
      mv6CameraVelocity = Zeros;
      mbJustRecoveredSoUseCoarse = true;

      mMapMaker.ForceLocalMapUpdate(mse3CamFromWorld);

      TrackMap(mMapMaker.GetMap());
      AssessTrackingQuality();

      DeleteCurrentKeyFrame(); // Not added to map so we do not need it anymore

      if(mTrackingQuality == GOOD)
      {
        //mnLastKeyFrameDropped = mnFrame; // TODO: was not in PTAM tracker, added so cannot add keyframe until we are sure tracking is stable...
        return true;
      }
      else
        return false;
    }
  }
}

// Draw the reference grid to give the user an idea of wether tracking is OK or not.
void Tracker::RenderGrid()
{
  // The colour of the ref grid shows if the coarse stage of tracking was used
  // (it's turned off when the camera is sitting still to reduce jitter.)
  if(mbDidCoarse)
    glColor4f(.0, 0.5, .0, 0.6);
  else
    glColor4f(0,0,0,0.6);
  
  // The grid is projected manually, i.e. GL receives projected 2D coords to draw.
  int nHalfCells = 8;
  int nTot = nHalfCells * 2 + 1;
  Image<Vector<2> >  imVertices(ImageRef(nTot,nTot));
  for(int i=0; i<nTot; i++)
    for(int j=0; j<nTot; j++)
      {
	Vector<3> v3;
	v3[0] = (i - nHalfCells) * 0.1;
	v3[1] = (j - nHalfCells) * 0.1;
	v3[2] = 0.0;
	Vector<3> v3Cam = mse3CamFromWorld * v3;
	if(v3Cam[2] < 0.001)
	  v3Cam[2] = 0.001;
	imVertices[i][j] = mCamera.Project(project(v3Cam));
      }
  glEnable(GL_LINE_SMOOTH);
  glEnable(GL_BLEND);
  glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
  glLineWidth(2);
  for(int i=0; i<nTot; i++)
    {
      glBegin(GL_LINE_STRIP);
      for(int j=0; j<nTot; j++)
	glVertex(imVertices[i][j]);
      glEnd();
      
      glBegin(GL_LINE_STRIP);
      for(int j=0; j<nTot; j++)
	glVertex(imVertices[j][i]);
      glEnd();
    };
  
  glLineWidth(1);
  glColor3f(1,0,0);
}


//Draw a mesh of the surface using adaptive compactly supported radial basis functions (ACSRBF)
void Tracker::RenderMesh()
{
  glEnable(GL_LINE_SMOOTH);
  glEnable(GL_BLEND);
  glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
  glLineWidth(2);

  //Need to search for bounds in x,y,z to consider, determine equivalent bounds in i,j,k and then search for faces

  boost::shared_lock< boost::shared_mutex > lockMesh(mMapMaker.GetMap()->mMeshLock);

  std::vector<boost::shared_ptr<Cube> > cubes;
  unsigned int cube_N = mMapMaker.GetMap()->mesh.GetAllCubes(cubes);

  for(unsigned int i = 0; i < cube_N; ++i)
  {
    for(unsigned int j = 0; j < cubes[i]->faces.size(); ++j)
    {
      Vector<3> v3Vertex0 = makeVector(cubes[i]->faces[j]->x1, cubes[i]->faces[j]->y1, cubes[i]->faces[j]->z1);
      Vector<3> v3Vertex1 = makeVector(cubes[i]->faces[j]->x2, cubes[i]->faces[j]->y2, cubes[i]->faces[j]->z2);
      Vector<3> v3Vertex2 = makeVector(cubes[i]->faces[j]->x3, cubes[i]->faces[j]->y3, cubes[i]->faces[j]->z3);
      Vector<3> v3Center = makeVector(cubes[i]->faces[j]->xc, cubes[i]->faces[j]->yc, cubes[i]->faces[j]->zc);
      Vector<3> v3Normal = makeVector(cubes[i]->faces[j]->xn, cubes[i]->faces[j]->yn, cubes[i]->faces[j]->zn);

      v3Vertex0 = mse3CamFromWorld * v3Vertex0;
      v3Vertex1 = mse3CamFromWorld * v3Vertex1;
      v3Vertex2 = mse3CamFromWorld * v3Vertex2;
      v3Normal = mse3CamFromWorld * (v3Center + 0.05*v3Normal);
      v3Center = mse3CamFromWorld * v3Center;

	    if(v3Vertex0[2] < 0.001)
	      v3Vertex0[2] = 0.001;
	    if(v3Vertex1[2] < 0.001)
	      v3Vertex1[2] = 0.001;
	    if(v3Vertex2[2] < 0.001)
	      v3Vertex2[2] = 0.001;
	    if(v3Center[2] < 0.001)
	      v3Center[2] = 0.001;
	    if(v3Normal[2] < 0.001)
	      v3Normal[2] = 0.001;

      Vector<2> imVertex0 = mCamera.Project(project(v3Vertex0));
      Vector<2> imVertex1 = mCamera.Project(project(v3Vertex1));
      Vector<2> imVertex2 = mCamera.Project(project(v3Vertex2));
      Vector<2> imCenter = mCamera.Project(project(v3Center));
      Vector<2> imNormal = mCamera.Project(project(v3Normal));

      glColor4f(.0, cubes[i]->score/300.0, .0, 0.6);//0.5
      glBegin(GL_LINE_STRIP);
      glVertex(imVertex0);
      glVertex(imVertex1);
      glVertex(imVertex2);
      glVertex(imVertex0);
      glEnd();

      /*glColor4f(0.5, .0, .0, 0.6);
      glBegin(GL_LINES);
      glVertex(imCenter);
      glVertex(imNormal);
      glEnd();*/

      /*glColor4f(0.5, .0, .0, 0.6);
      glBegin(GL_POINTS);
      if(faces[i]->b1)
        glVertex(imVertex0);
      if(faces[i]->b2)
        glVertex(imVertex1);
      if(faces[i]->b3)
        glVertex(imVertex2);
      glEnd();*/
    }
  }
  lockMesh.unlock();
  glLineWidth(1);
  glColor3f(1,0,0);
}


// GUI interface. Stuff commands onto the back of a queue so the tracker handles
// them in its own thread at the end of each frame. Note the charming lack of
// any thread safety (no lock on mvQueuedCommands).
void Tracker::GUICommandCallBack(void* ptr, string sCommand, string sParams)
{
  Command c;
  c.sCommand = sCommand;
  c.sParams = sParams;
  ((Tracker*) ptr)->mvQueuedCommands.push_back(c);
}

// This is called in the tracker's own thread.
void Tracker::GUICommandHandler(string sCommand, string sParams)  // Called by the callback func..
{
  if(sCommand=="Reset")
  {
    Reset();
    return;
  }

  // KeyPress commands are issued by GLWindow
  if(sCommand=="KeyPress")
  {
    if(sParams == "Space")
	  {
	    mbUserPressedSpacebar = true;
	  }
    else if(sParams == "r")
	  {
	    Reset();
	  }
    else if(sParams == "s")
	  {
      mMapMaker.GetMap()->vSnapshotPoses.push_back(pair<string,SE3<> >(mpRelocaliser->Type(), mse3CamFromWorld));
	  }
    else if(sParams == "q" || sParams == "Escape")
	  {
	    GUI.ParseLine("quit");
	  }
    return;
  }
  
  cout << "! Tracker::GUICommandHandler: unhandled command "<< sCommand << endl;
  exit(1);
}; 

double Tracker::MaxBaseline(std::vector<boost::shared_ptr<KeyFrame> > &vKeyFrames)
{
  // Find maximum baseline
  double dMaxBaseline = 0.0;
  for(int i = 0; i < vKeyFrames.size(); ++i)
  {
    for(int j = i+1; j < vKeyFrames.size(); ++j)
    {
      Vector<3> v3Diff = vKeyFrames[i]->se3CfromW.inverse().get_translation() - vKeyFrames[j]->se3CfromW.inverse().get_translation();
      double dBaseline = sqrt(v3Diff[0]*v3Diff[0] + v3Diff[1]*v3Diff[1]);
      if(dBaseline > dMaxBaseline)
        dMaxBaseline = dBaseline;
    }
  }

  return dMaxBaseline;
}

double Tracker::ClosestKeyFrame(boost::shared_ptr<KeyFrame> kf, std::vector<boost::shared_ptr<KeyFrame> > &vKeyFrames)
{
  // Find minimum baseline
  double dMinBaseline = std::numeric_limits<double>::max();
  for(int i = 0; i < vKeyFrames.size(); ++i)
  {
    Vector<3> v3Diff = vKeyFrames[i]->se3CfromW.inverse().get_translation() - kf->se3CfromW.inverse().get_translation();
    double dBaseline = sqrt(v3Diff[0]*v3Diff[0] + v3Diff[1]*v3Diff[1]);
    if(dBaseline < dMinBaseline)
      dMinBaseline = dBaseline;
  }

  return dMinBaseline;
}

// TrackMap is the main purpose of the Tracker.
// It first projects all map points into the image to find a potentially-visible-set (PVS);
// Then it tries to find some points of the PVS in the image;
// Then it updates camera pose according to any points found.
// Above may happen twice if a coarse tracking stage is performed.
// Finally it updates the tracker's current-frame-KeyFrame struct with any
// measurements made.
// A lot of low-level functionality is split into helper classes:
// class TrackerData handles the projection of a MapPoint and stores intermediate results;
// class PatchFinder finds a projected MapPoint in the current-frame-KeyFrame.
void Tracker::TrackMap(Map *pMap)
{
  // Some accounting which will be used for tracking quality assessment:
  for(int i=0; i<LEVELS; i++)
    manMeasAttempted[i] = manMeasFound[i] = 0;
  
  // The Potentially-Visible-Set (PVS) is split into pyramid levels.
  vector<boost::shared_ptr<TrackerData> > avPVS[LEVELS]; 
  for(int i=0; i<LEVELS; i++)
    avPVS[i].reserve(500);

  boost::shared_lock< boost::shared_mutex > lockMapPoints(pMap->mMapPointsLock);

  for(std::set<boost::shared_ptr<MapPoint> >::iterator it = pMap->msLocalMapPoints.begin(); it != pMap->msLocalMapPoints.end(); ++it)
  {
    boost::shared_ptr<MapPoint> p = *it;
    boost::shared_ptr<TrackerData> TData(new TrackerData(p));
      
    // Project according to current view, and if it's not in the image, skip.
    TData->Project(mse3CamFromWorld, mCamera); 
    if(!TData->bInImage)
      continue;   

    // Calculate camera projection derivatives of this point.
    TData->GetDerivsUnsafe(mCamera);

    // And check what the PatchFinder (included in TrackerData) makes of the mappoint in this view..
    TData->nSearchLevel = TData->Finder.CalcSearchLevelAndWarpMatrix(TData->Point, mse3CamFromWorld, TData->m2CamDerivs);
    if(TData->nSearchLevel == -1)
	    continue;   // a negative search pyramid level indicates an inappropriate warp for this view, so skip.

    // Otherwise, this point is suitable to be searched in the current image! Add to the PVS.
    TData->bSearched = false;
    TData->bFound = false;
    avPVS[TData->nSearchLevel].push_back(TData);
  };

  lockMapPoints.unlock();
  
  // Next: A large degree of faffing about and deciding which points are going to be measured!
  // First, randomly shuffle the individual levels of the PVS.
  for(int i=0; i<LEVELS; i++)
    random_shuffle(avPVS[i].begin(), avPVS[i].end());

  // The next two data structs contain the list of points which will next 
  // be searched for in the image, and then used in pose update.
  vector<boost::shared_ptr<TrackerData> > vNextToSearch;
  vector<boost::shared_ptr<TrackerData> > vIterationSet;
  
  // Tunable parameters to do with the coarse tracking stage:
  static gvar3<unsigned int> gvnCoarseMin("Tracker.CoarseMin", 20, SILENT);   // Min number of large-scale features for coarse stage
  static gvar3<unsigned int> gvnCoarseMax("Tracker.CoarseMax", 60, SILENT);   // Max number of large-scale features for coarse stage
  static gvar3<unsigned int> gvnCoarseRange("Tracker.CoarseRange", 30, SILENT);       // Pixel search radius for coarse features
  static gvar3<int> gvnCoarseSubPixIts("Tracker.CoarseSubPixIts", 8, SILENT); // Max sub-pixel iterations for coarse features
  static gvar3<int> gvnCoarseDisabled("Tracker.DisableCoarse", 0, SILENT);    // Set this to 1 to disable coarse stage (except after recovery)
  static gvar3<double> gvdCoarseMinVel("Tracker.CoarseMinVelocity", 0.006, SILENT);  // Speed above which coarse stage is used.
  
  unsigned int nCoarseMax = *gvnCoarseMax;
  unsigned int nCoarseRange = *gvnCoarseRange;
  
  mbDidCoarse = false;

  // Set of heuristics to check if we should do a coarse tracking stage.
  bool bTryCoarse = true;
  if(*gvnCoarseDisabled || mdMSDScaledVelocityMagnitude < *gvdCoarseMinVel  || nCoarseMax == 0)
    bTryCoarse = false;
  if(mbJustRecoveredSoUseCoarse)
  {
    bTryCoarse = true;
    nCoarseMax *=2;
    nCoarseRange *=2;
    mbJustRecoveredSoUseCoarse = false;
  };
      
  // If we do want to do a coarse stage, also check that there's enough high-level 
  // PV map points. We use the lowest-res two pyramid levels (LEVELS-1 and LEVELS-2),
  // with preference to LEVELS-1.
  if(bTryCoarse && avPVS[LEVELS-1].size() + avPVS[LEVELS-2].size() > *gvnCoarseMin )
    {
      // Now, fill the vNextToSearch struct with an appropriate number of 
      // TrackerDatas corresponding to coarse map points! This depends on how many
      // there are in different pyramid levels compared to CoarseMin and CoarseMax.
      
      if(avPVS[LEVELS-1].size() <= nCoarseMax) 
	{ // Fewer than CoarseMax in LEVELS-1? then take all of them, and remove them from the PVS list.
	  vNextToSearch = avPVS[LEVELS-1];
	  avPVS[LEVELS-1].clear();
	}
      else
	{ // ..otherwise choose nCoarseMax at random, again removing from the PVS list.
	  for(unsigned int i=0; i<nCoarseMax; i++)
	    vNextToSearch.push_back(avPVS[LEVELS-1][i]);
	  avPVS[LEVELS-1].erase(avPVS[LEVELS-1].begin(), avPVS[LEVELS-1].begin() + nCoarseMax);
	}
      
      // If didn't source enough from LEVELS-1, get some from LEVELS-2... same as above.
      if(vNextToSearch.size() < nCoarseMax)
	{
	  unsigned int nMoreCoarseNeeded = nCoarseMax - vNextToSearch.size();
	  if(avPVS[LEVELS-2].size() <= nMoreCoarseNeeded)
	    {
	      vNextToSearch = avPVS[LEVELS-2];
	      avPVS[LEVELS-2].clear();
	    }
	  else
	    {
	      for(unsigned int i=0; i<nMoreCoarseNeeded; i++)
		vNextToSearch.push_back(avPVS[LEVELS-2][i]);
	      avPVS[LEVELS-2].erase(avPVS[LEVELS-2].begin(), avPVS[LEVELS-2].begin() + nMoreCoarseNeeded);
	    }
	}
      // Now go and attempt to find these points in the image!
      unsigned int nFound = SearchForPoints(vNextToSearch, nCoarseRange, *gvnCoarseSubPixIts);
      vIterationSet = vNextToSearch;  // Copy over into the to-be-optimised list.
      if(nFound >= *gvnCoarseMin)  // Were enough found to do any meaningful optimisation?
	{
	  mbDidCoarse = true;
	  for(int iter = 0; iter<10; iter++) // If so: do ten Gauss-Newton pose updates iterations.
	    {
	      if(iter != 0)
		{ // Re-project the points on all but the first iteration.
		  for(unsigned int i=0; i<vIterationSet.size(); i++)
		    if(vIterationSet[i]->bFound)  
		      vIterationSet[i]->ProjectAndDerivs(mse3CamFromWorld, mCamera);
		}
	      for(unsigned int i=0; i<vIterationSet.size(); i++)
		if(vIterationSet[i]->bFound)
		  vIterationSet[i]->CalcJacobian();
	      double dOverrideSigma = 0.0;
	      // Hack: force the MEstimator to be pretty brutal 
	      // with outliers beyond the fifth iteration.
	      if(iter > 5)
		dOverrideSigma = 1.0;
	      
	      // Calculate and apply the pose update...
	      Vector<6> v6Update = 
		CalcPoseUpdate(vIterationSet, dOverrideSigma);
	      mse3CamFromWorld = SE3<>::exp(v6Update) * mse3CamFromWorld;
	    };
	}
    };
  
  // So, at this stage, we may or may not have done a coarse tracking stage.
  // Now do the fine tracking stage. This needs many more points!
  
  int nFineRange = 10;  // Pixel search range for the fine stage. 
  if(mbDidCoarse)       // Can use a tighter search if the coarse stage was already done.
    nFineRange = 5;
  
  // What patches shall we use this time? The high-level ones are quite important,
  // so do all of these, with sub-pixel refinement.
  {
    int l = LEVELS - 1;
    for(unsigned int i=0; i<avPVS[l].size(); i++)
      avPVS[l][i]->ProjectAndDerivs(mse3CamFromWorld, mCamera);
    SearchForPoints(avPVS[l], nFineRange, 8);
    for(unsigned int i=0; i<avPVS[l].size(); i++)
      vIterationSet.push_back(avPVS[l][i]);  // Again, plonk all searched points onto the (maybe already populate) vIterationSet.
  };
  
  // All the others levels: Initially, put all remaining potentially visible patches onto vNextToSearch.
  vNextToSearch.clear();
  for(int l=LEVELS - 2; l>=0; l--)
    for(unsigned int i = 0; i < avPVS[l].size(); i++)
      vNextToSearch.push_back(avPVS[l][i]);
  
  // But we haven't got CPU to track _all_ patches in the map - arbitrarily limit 
  // ourselves to 1000, and choose these randomly.
  static gvar3<int> gvnMaxPatchesPerFrame("Tracker.MaxPatchesPerFrame", 1000, SILENT);
  int nFinePatchesToUse = *gvnMaxPatchesPerFrame - vIterationSet.size();
  if(nFinePatchesToUse < 0)
    nFinePatchesToUse = 0;
  if((int) vNextToSearch.size() > nFinePatchesToUse)
    {
      random_shuffle(vNextToSearch.begin(), vNextToSearch.end());
      vNextToSearch.resize(nFinePatchesToUse); // Chop!
    };
  
  // If we did a coarse tracking stage: re-project and find derivs of fine points
  if(mbDidCoarse)
    for(unsigned int i = 0; i < vNextToSearch.size(); i++)
      vNextToSearch[i]->ProjectAndDerivs(mse3CamFromWorld, mCamera);
  
  // Find fine points in image:
  SearchForPoints(vNextToSearch, nFineRange, 0);
  // And attach them all to the end of the optimisation-set.
  for(unsigned int i = 0; i < vNextToSearch.size(); i++)
    vIterationSet.push_back(vNextToSearch[i]);
  
  // Again, ten gauss-newton pose update iterations.
  Vector<6> v6LastUpdate;
  v6LastUpdate = Zeros;
  for(int iter = 0; iter<10; iter++)
  {
    bool bNonLinearIteration; // For a bit of time-saving: don't do full nonlinear
                              // reprojection at every iteration - it really isn't necessary!
    if(iter == 0 || iter == 4 || iter == 9)
      bNonLinearIteration = true;   // Even this is probably overkill, the reason we do many
    else                            // iterations is for M-Estimator convergence rather than 
      bNonLinearIteration = false;  // linearisation effects.
    
    if(iter != 0)   // Either way: first iteration doesn't need projection update.
    {
      if(bNonLinearIteration)
      {
        for(unsigned int i=0; i<vIterationSet.size(); i++)
	        if(vIterationSet[i]->bFound)
	          vIterationSet[i]->ProjectAndDerivs(mse3CamFromWorld, mCamera);
      }
      else
      {
        for(unsigned int i=0; i<vIterationSet.size(); i++)
        	if(vIterationSet[i]->bFound)
	          vIterationSet[i]->LinearUpdate(v6LastUpdate);
      };
	}
      
    if(bNonLinearIteration)
	    for(unsigned int i=0; i<vIterationSet.size(); i++)
	      if(vIterationSet[i]->bFound)
	        vIterationSet[i]->CalcJacobian();

    // Again, an M-Estimator hack beyond the fifth iteration.
    double dOverrideSigma = 0.0;
    if(iter > 5)
    	dOverrideSigma = 16.0;
    
    // Calculate and update pose; also store update vector for linear iteration updates.
    Vector<6> v6Update = CalcPoseUpdate(vIterationSet, dOverrideSigma, iter==9 && !mMapMaker.IsPaused());
    mse3CamFromWorld = SE3<>::exp(v6Update) * mse3CamFromWorld;
    v6LastUpdate = v6Update;
  };
  
  if(mbDraw)
  {
    glPointSize(6);
    glEnable(GL_BLEND);
    glEnable(GL_POINT_SMOOTH);
    glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
    glBegin(GL_POINTS);
    for(vector<boost::shared_ptr<TrackerData> >::reverse_iterator it = vIterationSet.rbegin(); it!= vIterationSet.rend(); it++)
	  {
	    if(! (*it)->bFound)
	      continue;

      if(! (*it)->Point->bFixed)
  	    glColor(gavLevelColors[(*it)->nSearchLevel]);
      else
  	    glColor(makeVector(1.0,1.0,1.0));
  
	    glVertex((*it)->v2Image);
	  }
    glEnd();
    glDisable(GL_BLEND);
  }
  
  // Update the current keyframe with info on what was found in the frame.
  // Strictly speaking this is unnecessary to do every frame, it'll only be
  // needed if the KF gets added to MapMaker. Do it anyway.
  // Export pose to current keyframe:
  mCurrentKF->se3CfromW = mse3CamFromWorld;
  
  // Record successful measurements. Use the KeyFrame-Measurement struct for this.
  mCurrentKF->mMeasurements.clear();
  for(vector<boost::shared_ptr<TrackerData> >::iterator it = vIterationSet.begin(); it!= vIterationSet.end(); it++)
  {
    if(! (*it)->bFound)
      continue;
    Measurement m;
    m.v2RootPos = (*it)->v2Found;
    m.nLevel = (*it)->nSearchLevel;
    m.bSubPix = (*it)->bDidSubPix; 
    m.Source = Measurement::SRC_TRACKER;
    mCurrentKF->mMeasurements[(*it)->Point] = m;
  }
  
  // Finally, find the mean scene depth from tracked features
  {
    double dSum = 0;
    double dSumSq = 0;
    mnThisFramePoints = 0;
    for(vector<boost::shared_ptr<TrackerData> >::iterator it = vIterationSet.begin(); it!= vIterationSet.end(); it++)
    {
      if((*it)->bFound)
	    {
	      double z = (*it)->v3Cam[2];
	      dSum+= z;
	      dSumSq+= z*z;
	      mnThisFramePoints++;
	    };
    }
    if(mnThisFramePoints > 20)
    {
	    mCurrentKF->dSceneDepthMean = dSum/mnThisFramePoints;
	    mCurrentKF->dSceneDepthSigma = sqrt((dSumSq / mnThisFramePoints) - (mCurrentKF->dSceneDepthMean) * (mCurrentKF->dSceneDepthMean));
    }
  }
}

// Find points in the image. Uses the PatchFiner struct stored in TrackerData
int Tracker::SearchForPoints(vector<boost::shared_ptr<TrackerData> > &vTD, int nRange, int nSubPixIts)
{
  int nFound = 0;
  for(unsigned int i=0; i<vTD.size(); i++)   // for each point..
    {
      // First, attempt a search at pixel locations which are FAST corners.
      // (PatchFinder::FindPatchCoarse)
      TrackerData &TD = *vTD[i];
      PatchFinder &Finder = TD.Finder;
      Finder.MakeTemplateCoarseCont(TD.Point);
      if(Finder.TemplateBad())
	{
	  TD.bInImage = TD.bPotentiallyVisible = TD.bFound = false;
	  continue;
	}
      manMeasAttempted[Finder.GetLevel()]++;  // Stats for tracking quality assessmenta
      
      bool bFound = 
	Finder.FindPatchCoarse(ir(TD.v2Image), *mCurrentKF, nRange);
      TD.bSearched = true;
      if(!bFound) 
	{
	  TD.bFound = false;
	  continue;
	}
      
      TD.bFound = true;
      TD.dSqrtInvNoise = (1.0 / Finder.GetLevelScale());
      
      nFound++;
      manMeasFound[Finder.GetLevel()]++;
      
      // Found the patch in coarse search - are Sub-pixel iterations wanted too?
      if(nSubPixIts > 0)
	{
	  TD.bDidSubPix = true;
	  Finder.MakeSubPixTemplate();
	  bool bSubPixConverges=Finder.IterateSubPixToConvergence(*mCurrentKF, nSubPixIts);
	  if(!bSubPixConverges)
	    { // If subpix doesn't converge, the patch location is probably very dubious!
	      TD.bFound = false;
	      nFound--;
	      manMeasFound[Finder.GetLevel()]--;
	      continue;
	    }
	  TD.v2Found = Finder.GetSubPixPos();
	}
      else
	{
	  TD.v2Found = Finder.GetCoarsePosAsVector();
	  TD.bDidSubPix = false;
	}
    }
  return nFound;
};


//Calculate a pose update 6-vector from a bunch of image measurements.
//User-selectable M-Estimator.
//Normally this robustly estimates a sigma-squared for all the measurements
//to reduce outlier influence, but this can be overridden if
//dOverrideSigma is positive. Also, bMarkOutliers set to true
//records any instances of a point being marked an outlier measurement
//by the Tukey MEstimator.
Vector<6> Tracker::CalcPoseUpdate(vector<boost::shared_ptr<TrackerData> > vTD, double dOverrideSigma, bool bMarkOutliers)
{
  // Which M-estimator are we using?
  int nEstimator = 0;
  static gvar3<string> gvsEstimator("TrackerMEstimator", "Tukey", SILENT);
  if(*gvsEstimator == "Tukey")
    nEstimator = 0;
  else if(*gvsEstimator == "Cauchy")
    nEstimator = 1;
  else if(*gvsEstimator == "Huber")
    nEstimator = 2;
  else 
    {
      cout << "Invalid TrackerMEstimator, choices are Tukey, Cauchy, Huber" << endl;
      nEstimator = 0;
      *gvsEstimator = "Tukey";
    };
  
  // Find the covariance-scaled reprojection error for each measurement.
  // Also, store the square of these quantities for M-Estimator sigma squared estimation.
  vector<double> vdErrorSquared;
  for(unsigned int f=0; f<vTD.size(); f++)
    {
      TrackerData &TD = *vTD[f];
      if(!TD.bFound)
	continue;
      TD.v2Error_CovScaled = TD.dSqrtInvNoise* (TD.v2Found - TD.v2Image);
      vdErrorSquared.push_back(TD.v2Error_CovScaled * TD.v2Error_CovScaled);
    };
  
  // No valid measurements? Return null update.
  if(vdErrorSquared.size() == 0)
    return makeVector( 0,0,0,0,0,0);
  
  // What is the distribution of errors?
  double dSigmaSquared;
  if(dOverrideSigma > 0)
    dSigmaSquared = dOverrideSigma; // Bit of a waste having stored the vector of square errors in this case!
  else
    {
      if (nEstimator == 0)
	dSigmaSquared = Tukey::FindSigmaSquared(vdErrorSquared);
      else if(nEstimator == 1)
	dSigmaSquared = Cauchy::FindSigmaSquared(vdErrorSquared);
      else 
	dSigmaSquared = Huber::FindSigmaSquared(vdErrorSquared);
    }
  
  // The TooN WLSCholesky class handles reweighted least squares.
  // It just needs errors and jacobians.
  WLS<6> wls;
  wls.add_prior(100.0); // Stabilising prior
  for(unsigned int f=0; f<vTD.size(); f++)
    {
      TrackerData &TD = *vTD[f];
      if(!TD.bFound)
	continue;
      Vector<2> &v2 = TD.v2Error_CovScaled;
      double dErrorSq = v2 * v2;
      double dWeight;
      
      if(nEstimator == 0)
	dWeight= Tukey::Weight(dErrorSq, dSigmaSquared);
      else if(nEstimator == 1)
	dWeight= Cauchy::Weight(dErrorSq, dSigmaSquared);
      else 
	dWeight= Huber::Weight(dErrorSq, dSigmaSquared);
      
      // Inlier/outlier accounting, only really works for cut-off estimators such as Tukey.
      if(dWeight == 0.0)
	{
	  if(bMarkOutliers)
	    TD.Point->nMEstimatorOutlierCount++;
	  continue;
	}
      else
	if(bMarkOutliers)
	  TD.Point->nMEstimatorInlierCount++;
      
      Matrix<2,6> &m26Jac = TD.m26Jacobian;
      wls.add_mJ(v2[0], TD.dSqrtInvNoise * m26Jac[0], dWeight); // These two lines are currently
      wls.add_mJ(v2[1], TD.dSqrtInvNoise * m26Jac[1], dWeight); // the slowest bit of poseits
    }
  
  wls.compute();
  return wls.get_mu();
}


Vector<6> Tracker::CalcPoseUpdate(vector<boost::shared_ptr<MarkerData> > vMD, double dOverrideSigma)
{
  // Which M-estimator are we using?
  int nEstimator = 0;
  static gvar3<string> gvsEstimator("TrackerMEstimator", "Tukey", SILENT);
  if(*gvsEstimator == "Tukey")
    nEstimator = 0;
  else if(*gvsEstimator == "Cauchy")
    nEstimator = 1;
  else if(*gvsEstimator == "Huber")
    nEstimator = 2;
  else 
  {
    cout << "Invalid TrackerMEstimator, choices are Tukey, Cauchy, Huber" << endl;
    nEstimator = 0;
    *gvsEstimator = "Tukey";
  };
  
  // Find the covariance-scaled reprojection error for each measurement.
  // Also, store the square of these quantities for M-Estimator sigma squared estimation.
  vector<double> vdErrorSquared;
  for(unsigned int f=0; f<vMD.size(); f++)
  {
    MarkerData &MD = *vMD[f];
    if(!MD.bFound)
      continue;
    MD.v2Error_CovScaled = (MD.v2Found - MD.v2Image);
    vdErrorSquared.push_back(MD.v2Error_CovScaled * MD.v2Error_CovScaled);
  };
  
  // No valid measurements? Return null update.
  if(vdErrorSquared.size() == 0)
    return makeVector( 0,0,0,0,0,0);
  
  // What is the distribution of errors?
  double dSigmaSquared;
  if(dOverrideSigma > 0)
    dSigmaSquared = dOverrideSigma; // Bit of a waste having stored the vector of square errors in this case!
  else
  {
    if (nEstimator == 0)
      dSigmaSquared = Tukey::FindSigmaSquared(vdErrorSquared);
    else if(nEstimator == 1)
      dSigmaSquared = Cauchy::FindSigmaSquared(vdErrorSquared);
    else 
      dSigmaSquared = Huber::FindSigmaSquared(vdErrorSquared);
  }
  
  // The TooN WLSCholesky class handles reweighted least squares.
  // It just needs errors and jacobians.
  WLS<6> wls;
  wls.add_prior(100.0); // Stabilising prior
  for(unsigned int f=0; f<vMD.size(); f++)
  {
    MarkerData &MD = *vMD[f];
    if(!MD.bFound)
      continue;
    Vector<2> &v2 = MD.v2Error_CovScaled;
    double dErrorSq = v2 * v2;
    double dWeight;
    
    if(nEstimator == 0)
      dWeight= Tukey::Weight(dErrorSq, dSigmaSquared);
    else if(nEstimator == 1)
      dWeight= Cauchy::Weight(dErrorSq, dSigmaSquared);
    else 
      dWeight= Huber::Weight(dErrorSq, dSigmaSquared);
    
    // Mark as outlier, only really works for cut-off estimators such as Tukey.
    if(dWeight == 0.0)
    {
      MD.bFound = false;
  	  continue;
    }

    Matrix<2,6> &m26Jac = MD.m26Jacobian;
    wls.add_mJ(v2[0], m26Jac[0], dWeight); // These two lines are currently
    wls.add_mJ(v2[1], m26Jac[1], dWeight); // the slowest bit of poseits
  }
  
  wls.compute();
  return wls.get_mu();
}


// Just add the current velocity to the current pose.
// N.b. timestamp is in ms
void Tracker::ApplyMotionModel(uint64_t timestamp)
{
  mse3StartPos = mse3CamFromWorld;
  mStartTime = mCamFromWorldTime;
  mCamFromWorldTime = timestamp;
  Vector<6> v6Velocity = mv6CameraVelocity;

  CalcSBIRotation();
  v6Velocity.slice<3,3>() = mv6SBIRot.slice<3,3>();
  v6Velocity[0] = 0.0;
  v6Velocity[1] = 0.0;

  SE3<> se3Temp = SE3<>::exp(v6Velocity);
  mse3CamFromWorld = se3Temp * mse3StartPos;
};


// The motion model is entirely the tracker's, and is kept as a decaying
// constant velocity model.
void Tracker::UpdateMotionModel()
{
  SE3<> se3NewFromOld = mse3CamFromWorld * mse3StartPos.inverse();
  Vector<6> v6Vel = SE3<>::ln(se3NewFromOld);
  Vector<6> v6OldVel = mv6CameraVelocity;
  
  mv6CameraVelocity = 0.9 * (0.5 * v6Vel + 0.5 * v6OldVel);
  mdVelocityMagnitude = sqrt(mv6CameraVelocity * mv6CameraVelocity);
  
  // Also make an estimate of this which has been scaled by the mean scene depth.
  // This is used to decide if we should use a coarse tracking stage.
  // We can tolerate more translational vel when far away from scene!
  Vector<6> v6 = mv6CameraVelocity;
  v6.slice<0,3>() *= 1.0 / mCurrentKF->dSceneDepthMean;
  mdMSDScaledVelocityMagnitude = sqrt(v6*v6);
}

// Time to add a new keyframe? The MapMaker handles most of this.
void Tracker::AddCurrentKeyFrameToMap(CVD::Image<CVD::Rgb<CVD::byte> > &colour)
{
  mCurrentKF->MakeKeyFrame_Colour(colour);
  mMapMaker.AddKeyFrame(mCurrentKF);
  mCurrentKF.reset();
  mnLastKeyFrameDropped = mnFrame; //TODO: do not do this? ... was in original PTAM tracker but if when moving fast does not allow keyframes to be created close enough together.
}

void Tracker::DeleteCurrentKeyFrame()
{
  mCurrentKF.reset();
}

// Some heuristics to decide if tracking is any good, for this frame.
// This influences decisions to add key-frames, and eventually
// causes the tracker to attempt relocalisation.
void Tracker::AssessTrackingQuality()
{
  int nTotalAttempted = 0;
  int nTotalFound = 0;
  int nLargeAttempted = 0;
  int nLargeFound = 0;
  
  for(int i=0; i<LEVELS; i++)
  {
    nTotalAttempted += manMeasAttempted[i];
    nTotalFound += manMeasFound[i];
    if(i>=2) nLargeAttempted += manMeasAttempted[i];
    if(i>=2) nLargeFound += manMeasFound[i];
  }
  
  if(nTotalFound == 0 || nTotalAttempted == 0)
    mTrackingQuality = BAD;
  else
  {
    double dTotalFracFound = (double) nTotalFound / nTotalAttempted;
    double dLargeFracFound;
    if(nLargeAttempted > 10)
      dLargeFracFound = (double) nLargeFound / nLargeAttempted;
    else
      dLargeFracFound = dTotalFracFound;

    static gvar3<double> gvdQualityGood("Tracker.TrackingQualityGood", 0.3, SILENT);
    static gvar3<double> gvdQualityLost("Tracker.TrackingQualityLost", 0.13, SILENT);
    
    
    if(dTotalFracFound > *gvdQualityGood)
      mTrackingQuality = GOOD;
    else if(dLargeFracFound < *gvdQualityLost)
      mTrackingQuality = BAD;
    else
      mTrackingQuality = DODGY;
  }
  
  if(mTrackingQuality == DODGY)
  {
    // Further heuristics to see if it's actually bad, not just dodgy...
    // If the camera pose estimate has run miles away, it's probably bad.
    if(mMapMaker.IsDistanceToNearestKeyFrameExcessive(mCurrentKF))
      mTrackingQuality = BAD;
  }

  /*if(mnGlobalSE3 && mCurrentKF->mse3PdfFfromC.has_rotation() && mCurrentKF->mse3PdfFfromC.has_translation())
  {
    SE3GaussianPDF se3PdfWfromC = mCurrentKF->ComputeGlobalWfromC(mMapMaker.GetFfromW());
    const TooN::Matrix<6,6>& m66CovWfromC = se3PdfWfromC.get_covariance();

    Vector<3> v3EpsilonPos = se3PdfWfromC.get_translation() - mCurrentKF->se3CfromW.inverse().get_translation();
    if(fabs(v3EpsilonPos[0]) > mdMaxTranslationError*m66CovWfromC(0,0) || 
       fabs(v3EpsilonPos[1]) > mdMaxTranslationError*m66CovWfromC(1,1) || 
       fabs(v3EpsilonPos[2]) > mdMaxTranslationError*m66CovWfromC(2,2))
      mTrackingQuality = BAD;

    SO3<> so3Relative = mCurrentKF->se3CfromW.get_rotation() * se3PdfWfromC.get_rotation();
    Vector<3> v3EpsilonRot = UnPackSO3_RPY(so3Relative);
    if(fabs(v3EpsilonRot[0]) > mdMaxRotationError*m66CovWfromC(3,3) || 
       fabs(v3EpsilonRot[1]) > mdMaxRotationError*m66CovWfromC(4,4) || 
       fabs(v3EpsilonRot[2]) > mdMaxRotationError*m66CovWfromC(5,5))
      mTrackingQuality = BAD;
  }
  else if((mnGlobalSO3 || mnGlobalSE3) && mCurrentKF->mse3PdfFfromC.has_rotation())
  {
    SE3GaussianPDF se3PdfWfromC = mCurrentKF->ComputeGlobalWfromC(mMapMaker.GetFfromW());
    const TooN::Matrix<6,6>& m66CovWfromC = se3PdfWfromC.get_covariance();

    SO3<> so3Relative = mCurrentKF->se3CfromW.get_rotation() * se3PdfWfromC.get_rotation();
    Vector<3> v3EpsilonRot = UnPackSO3_RPY(so3Relative);
    if(fabs(v3EpsilonRot[0]) > mdMaxRotationError*m66CovWfromC(3,3) || 
       fabs(v3EpsilonRot[1]) > mdMaxRotationError*m66CovWfromC(4,4) || 
       fabs(v3EpsilonRot[2]) > mdMaxRotationError*m66CovWfromC(5,5))
      mTrackingQuality = BAD;
  }*/

  if(mTrackingQuality==BAD)
    mnLostFrames++;
  else
    mnLostFrames = 0;
}

string Tracker::GetMessageForUser()
{
  return mMessageForUser.str();
}

void Tracker::CalcSBIRotation()
{
  mpSBILastFrame->MakeJacs();
  pair<SE2<>, double> result_pair;
  result_pair = mpSBIThisFrame->IteratePosRelToTarget(*mpSBILastFrame, 6);
  SE3<> se3Adjust = SmallBlurryImage::SE3fromSE2(result_pair.first, mCameraSBI);
  mv6SBIRot = se3Adjust.ln();
}

bool Tracker::TrackMarker(uint64_t timestamp, CVD::Image<CVD::byte> &greyFrame, CVD::Image<CVD::Rgb<CVD::byte> > &colourFrame)
{

  mMessageForUser << "Translate the camera sideways, current maximum baseline " << mdInitBaseline << " need " << mdMinInitBaseline << " to initialise" << endl;
  static bool bTracking = false;

  int marker_num = mpMultiMarkerTracker->calc((const unsigned char*)greyFrame.data(), !bTracking);
  // std::cout << "markernum =   "<< marker_num <<std::endl;
  if (marker_num == 0)
    return false;

  if(!bTracking)  // Not tracking, so need librpp pose estimate to get us started....
  {
    const float *mat = mpMultiMarkerTracker->getModelViewMatrix();

    Matrix<3,3> m33WfromC;
    m33WfromC(0,0) =  mat[4];
    m33WfromC(0,1) =  mat[5];
    m33WfromC(0,2) =  mat[6];

    m33WfromC(1,0) =  mat[0];
    m33WfromC(1,1) =  mat[1];
    m33WfromC(1,2) =  mat[2];

    m33WfromC(2,0) = -mat[8];
    m33WfromC(2,1) = -mat[9];
    m33WfromC(2,2) = -mat[10];

    //set t part to  -(R^T * t) (=invert)
    float x, y, z, roll, pitch, yaw;
    x = -(m33WfromC(0,0)*mat[12] + m33WfromC(0,1)*mat[13] + m33WfromC(0,2)*mat[14])/1000.f;
    y = -(m33WfromC(1,0)*mat[12] + m33WfromC(1,1)*mat[13] + m33WfromC(1,2)*mat[14])/1000.f;
    z = -(m33WfromC(2,0)*mat[12] + m33WfromC(2,1)*mat[13] + m33WfromC(2,2)*mat[14])/1000.f;

    if(x == 0.f && y == 0.f && z == 0.f)
      return false;

    pitch = -asin(m33WfromC(2,0)); //pitch
    roll = atan2(m33WfromC(2,1)/cos(pitch), m33WfromC(2,2)/cos(pitch)); //roll
    yaw = atan2(m33WfromC(1,0)/cos(pitch), m33WfromC(0,0)/cos(pitch)); //yaw

    SE3<> se3WorldFromCam = SE3<>(PackSO3_RPY(roll, pitch, yaw), makeVector(x,y,z));
    mse3CamFromWorld = se3WorldFromCam.inverse();
    bTracking = true;
  }
  else
    ApplyMotionModel(timestamp);

  int *marker_id = NULL;
  ARToolKitPlus::ARMarkerInfo *marker_info = NULL;

  mpMultiMarkerTracker->getDetectedMarkers(marker_id);
  mpMultiMarkerTracker->getDetectedMarkers(marker_info);

  std::map<int, int> marker_id_freq;
  for(int i=0; i<marker_num; i++)
  {
    int m_patt_id = marker_id[i];
    if(m_patt_id >= 0)
    {
      std::map<int, int>::iterator iter = marker_id_freq.find(m_patt_id);
      if(iter == marker_id_freq.end())
        marker_id_freq.insert(std::make_pair<int,int>(m_patt_id,1));
      else
        ((*iter).second)++;
    }
  }

  const ARToolKitPlus::ARMultiMarkerInfoT* config = mpMultiMarkerTracker->getMultiMarkerConfig();
  std::deque<std::pair<int,int> > config_patt_id;
  for(int j=0; j<config->marker_num; j++)
    config_patt_id.push_back(std::make_pair<int,int>(j, config->marker[j].patt_id));

  std::map<int, int> m2c_idx;
  for(int m=0; m<marker_num; m++)
  {
    const int m_patt_id = marker_id[m];
    bool ignore_marker = (m_patt_id < 0);
    std::map<int, int>::iterator m_iter = marker_id_freq.find(m_patt_id);
    if(m_iter != marker_id_freq.end()) ignore_marker |= ((*m_iter).second > 1);
    if(!ignore_marker)
    {
      std::deque<std::pair<int,int> >::iterator c_iter = config_patt_id.begin();
      if(c_iter != config_patt_id.end()) do
      {
        const int patt_id = (*c_iter).second;
        if(marker_id[m] == patt_id)
        {
          m2c_idx.insert(std::make_pair<int,int>(m,(*c_iter).first));
          config_patt_id.erase(c_iter);
          c_iter = config_patt_id.end();
          continue;
        }
        else
        {
          c_iter++;
        }
      }
      while(c_iter != config_patt_id.end());
    }
  }
  const unsigned int n_markers = (unsigned int) m2c_idx.size();
  const unsigned int n_pts = 4*n_markers;

  if(n_markers == 0)
    return false;

  // The next two data structs contain the list of points which will next 
  // be searched for in the image, and then used in pose update.
  int nFineRange = 10;  // Pixel search range for the fine stage. 
  vector<boost::shared_ptr<MarkerData> > vIterationSet;
  vIterationSet.reserve(n_pts);

  // Some accounting which will be used for tracking quality assessment:
  int nTotalAttempted = 0;
  int nTotalFound = 0;

  glPointSize(5);
  glLineWidth(2);
  glEnable(GL_POINT_SMOOTH);
  glEnable(GL_LINE_SMOOTH);
  glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
  glEnable(GL_BLEND);
  glBegin(GL_LINES);
  for(std::map<int, int>::iterator iter = m2c_idx.begin(); iter != m2c_idx.end(); iter++)
  {
    const int m = (*iter).first;
    const int c = (*iter).second;

    const int dir = marker_info[m].dir;
    const int v_idx[4] = {(4-dir)%4, (5-dir)%4, (6-dir)%4, (7-dir)%4};

    for(int i=0; i<4; i++)
    {
      Vector<3> v3WorldPos = makeVector(config->marker[c].pos3d[i][1]/1000.0, config->marker[c].pos3d[i][0]/1000.0, -config->marker[c].pos3d[i][2]/1000.0);
      Vector<2> v2Found = makeVector(marker_info[m].vertex[v_idx[i]][0], marker_info[m].vertex[v_idx[i]][1]);
      boost::shared_ptr<MarkerData> MData(new MarkerData(4*c + i, v3WorldPos, v2Found));

      // Project according to current view, and if it's not in the image, skip.
      if(!MData->Project(mse3CamFromWorld, mCamera))
        continue;

      glColor3f(1,1,0);
      glVertex(v2Found);
      glColor3f(1,0,0);
      glVertex(MData->v2Image);

      // Calculate camera projection derivatives of this point.
      MData->GetDerivsUnsafe(mCamera);

      // Now see if the found and projected point are close together in the image!
      nTotalAttempted++;
      Vector<2> v2Diff = MData->v2Image - MData->v2Found;
      if(v2Diff*v2Diff < nFineRange*nFineRange)
      {
        //nTotalFound++;
        MData->bFound = true;
        vIterationSet.push_back(MData);
      }
      else
        MData->bFound = false;
    }
  }
  glEnd();

  // Again, ten gauss-newton pose update iterations.
  Vector<6> v6LastUpdate;
  v6LastUpdate = Zeros;
  for(int iter = 0; iter<10; iter++)
  {
    bool bNonLinearIteration; // For a bit of time-saving: don't do full nonlinear
    // reprojection at every iteration - it really isn't necessary!
    if(iter == 0 || iter == 4 || iter == 9)
      bNonLinearIteration = true;   // Even this is probably overkill, the reason we do many
    else                            // iterations is for M-Estimator convergence rather than 
      bNonLinearIteration = false;  // linearisation effects.

    if(iter != 0)   // Either way: first iteration doesn't need projection update.
    {
      if(bNonLinearIteration)
      {
        for(unsigned int i=0; i<vIterationSet.size(); i++)
          vIterationSet[i]->ProjectAndDerivs(mse3CamFromWorld, mCamera);
      }
      else
      {
        for(unsigned int i=0; i<vIterationSet.size(); i++)
          vIterationSet[i]->LinearUpdate(v6LastUpdate);
      };
    }

    if(bNonLinearIteration)
      for(unsigned int i=0; i<vIterationSet.size(); i++)
        vIterationSet[i]->CalcJacobian();

    // Again, an M-Estimator hack beyond the fifth iteration.
    double dOverrideSigma;
    if(iter == 9) // On the last iteration be brutal!
      dOverrideSigma = 4.0;
    else if(iter > 5) // Past half way be quite strict
      dOverrideSigma = 16.0;
    else // Leave it up to the M-Estimator to decide
      dOverrideSigma = 0.0;

    // Calculate and update pose; also store update vector for linear iteration updates.
    Vector<6> v6Update = CalcPoseUpdate(vIterationSet, dOverrideSigma);
    mse3CamFromWorld = SE3<>::exp(v6Update) * mse3CamFromWorld;
    v6LastUpdate = v6Update;
  };

  // Update the current keyframe with info on what was found in the frame.
  // Strictly speaking this is unnecessary to do every frame, it'll only be
  // needed if the KF gets added to MapMaker. Do it anyway.
  // Export pose to current keyframe:
  mCurrentKF->se3CfromW = mse3CamFromWorld;

  // Finally, find the mean scene depth from tracked features
  {
    double dSum = 0;
    double dSumSq = 0;
    mnThisFramePoints = 0;
    for(vector<boost::shared_ptr<MarkerData> >::iterator it = vIterationSet.begin(); it!= vIterationSet.end(); it++)
    {
      if((*it)->bFound)
      {
        double z = (*it)->v3Cam[2];
        dSum+= z;
        dSumSq+= z*z;
        mnThisFramePoints++;
        nTotalFound++;
      };
    }
    if(mnThisFramePoints > 20)
    {
      mCurrentKF->dSceneDepthMean = dSum/mnThisFramePoints;
      mCurrentKF->dSceneDepthSigma = sqrt((dSumSq / mnThisFramePoints) - (mCurrentKF->dSceneDepthMean) * (mCurrentKF->dSceneDepthMean));
    }
  }

  UpdateMotionModel();

  if(nTotalFound == 0 || nTotalAttempted == 0)
  {
    mTrackingQuality = BAD;
    bTracking = false;
  }
  else
  {
    double dTotalFracFound = (double) nTotalFound / nTotalAttempted;

    static gvar3<int> gvnMinMarkerPoints("Tracker.MinMarkerPoints", 8, SILENT);
    static gvar3<double> gvdMarkerQualityGood("Tracker.MarkerQualityGood", 0.5, SILENT);
    static gvar3<double> gvdMarkerQualityLost("Tracker.MarkerQualityLost", 0.3, SILENT);
    
    if(nTotalFound >= *gvnMinMarkerPoints && dTotalFracFound >= *gvdMarkerQualityGood)
    {
      //std::cout << "GOOD: " << dTotalFracFound << " " << nTotalFound << std::endl;
      mTrackingQuality = GOOD;
      bTracking = true;

      if(ClosestKeyFrame(mCurrentKF, mvInitialKeyFrames) > 0.05) // We cannot keep them all!
      {
        mCurrentKF->MakeKeyFrame_Colour(colourFrame);
        mvInitialKeyFrames.push_back(mCurrentKF);

        for(vector<boost::shared_ptr<MarkerData> >::iterator it = vIterationSet.begin(); it!= vIterationSet.end(); it++)
        {
          if((*it)->bFound)
          {
            MarkerMeasurements& mobs = mmMarkerMeasurements[(*it)->nMarkerVertexID];
            if(mobs.mMeasurements.empty())
              mobs.v3WorldPos = (*it)->v3WorldPos;
            mobs.mMeasurements[mCurrentKF] = ir((*it)->v2Found);
          }
        }
      }
    }
    else if(dTotalFracFound < *gvdMarkerQualityLost)
    {
      //std::cout << "BAD: " << dTotalFracFound << " " << nTotalFound << std::endl;
      mTrackingQuality = BAD;
      bTracking = false;
    }
    else
    {
      //std::cout << "DODGY: " << dTotalFracFound << " " << nTotalFound << std::endl;
      mTrackingQuality = DODGY;
      bTracking = true;
    }
  }

  return bTracking;
}


