#include "SfmPool.h"
#include "..\MathAux\imagelib\triangulate.h"
#include "..\MathAux\Matrix\matrix.h"
#include "..\MathAux\imagelib\qsort.h"
#include "..\MathAux\\imagelib\util.h"
#include <set>

using namespace cv;
using namespace Utils;

#define PROJ_ESTIMATION_THRESHOLD 4.0
#define PROJ_ESTIMATION_THRESHOLD_WEAK 64.0
#define MIN_INLINERS_EST_PROJECTION 6
#define CAMERA_REFINEMENT_THRESHOLD 1.0e-12
#define NUM_STDDEV 2.0

#define MIN_PROJ_ERROR_THRESHOLD 8.0000
#define MAX_PROJ_ERROR_THRESHOLD 16.00000

#define NUM_STDDEV 2.0 

#define F_SCALE 0.001
#define K_SCALE 5.0

struct SDataForProjection
{
	Mat InitR[17];
};

struct SDataForCameraRefinement
{
	int NumPoints;
	int Iteration;
	double* points;
	double* projections;
	double Focal;
	Mat RInit;
	bool EstiamteIntrin;
	double ConstrainFocalweight; 
	double ConstrainRdweight;
	double Distoration[2];
};
static SDataForCameraRefinement g_DataForRefinement;

static Mat RotateUpdate(Mat mR, Mat mW);
static void ProjectInternal(Mat dt, Mat R, double *_b, double Focal, double* k, double *p, double KScale);

static void CameraRefineResidual(const int* m, const int* n, double* x, double* fvec, int* iflag)
{
	for (int i = 0; i < g_DataForRefinement.NumPoints; i++)
	{
		double KScale = 1.0;
		double proj[2];
		Mat dt = INIT_MAT_ARRAY(3, 1, x);
		Mat dw = INIT_MAT_ARRAY(3, 1, x + 3);
		Mat R = RotateUpdate(g_DataForRefinement.RInit, dw);
		double* b = g_DataForRefinement.points + 3*i;
		double Focal = (g_DataForRefinement.EstiamteIntrin) ? x[6] : g_DataForRefinement.Focal;
		double* k = (g_DataForRefinement.EstiamteIntrin) ? x + 7 : g_DataForRefinement.Distoration;
		ProjectInternal(dt, R, b, Focal, k, proj, KScale);

		double dx = g_DataForRefinement.projections[2 * i] - proj[0];
		double dy = g_DataForRefinement.projections[2 * i + 1] - proj[1];

		fvec[2 * i + 0] = dx;
		fvec[2 * i + 1] = dy;

		if (g_DataForRefinement.EstiamteIntrin)
		{
			double FocalDiff = g_DataForRefinement.Focal - x[6];
			fvec[2 * g_DataForRefinement.NumPoints] = g_DataForRefinement.ConstrainFocalweight * FocalDiff;

			fvec[2 * g_DataForRefinement.NumPoints + 1] = -g_DataForRefinement.ConstrainRdweight * x[7];
			fvec[2 * g_DataForRefinement.NumPoints + 2] = -g_DataForRefinement.ConstrainRdweight * x[8];
		}
	}

	g_DataForRefinement.Iteration++;
}

//todo use the CCamera::Project instead of this function
static void ProjectInternal(Mat dt, Mat R, 
							double *_b, double Focal, double* k,
							double *p, double KScale)
{
	Mat b = (Mat_<double>(3,1) << _b[0], _b[1], _b[2]);
	//dt is the camera center - 'c' , so the formula for projection is x = R(X - c)
	Mat b_t = b - dt;
	Mat mbCam = R * b_t;

	double* bCam = GetMatrixDataAsDouble(mbCam);
	//switching the direction of the z axis so multiplying by -1.0
	p[0] = -bCam[0] * Focal / bCam[2];
    p[1] = -bCam[1] * Focal / bCam[2];

	double k1 = k[0] / KScale;
	double k2 = k[1] / KScale;

	double rsq = (p[0] * p[0] + p[1] * p[1]) / (Focal * Focal);
	double factor = 1.0 + k1 * rsq + k2 * rsq* rsq;

	p[0] *= factor;
	p[1] *= factor;
		
}

static Mat RotateUpdate(Mat mR, Mat mW)
{

	double theta, sinth, costh;

	Mat mN, mNxsq;
  
	Mat I = EYE(3);

	double* w = GetMatrixDataAsDouble(mW);

    theta = sqrt(w[0] * w[0] + w[1] * w[1] + w[2] * w[2]);

    if (theta == 0.0) 
	{
		return mR;
    }

    mN = mW * (1.0 / theta);
	double* n = GetMatrixDataAsDouble(mN);

	double nx[9];

    nx[0] = 0.0;   nx[1] = -n[2];  nx[2] = n[1];
    nx[3] = n[2];  nx[4] = 0.0;    nx[5] = -n[0];
    nx[6] = -n[1]; nx[7] = n[0];   nx[8] = 0.0;


	Mat mNx = INIT_MAT_ARRAY(3,3,nx);

    mNxsq = mNx * mNx;

    sinth = sin(theta);
    costh = cos(theta);

	Mat term2 = mNx * sinth;
    Mat term3 = mNxsq * (1.0 - costh);
    
	Mat dR = (I + term2) + term3;

	return dR*mR;
}


//xij is the output 
static void ProjectPoints(int j, int i, double *aj, double *bi, 
			       double *xij, void *adata)
{

	SDataForProjection* pData = (SDataForProjection*)adata;

	double *w, *dt, *k;

	dt = aj + 0;
    w = aj + 3;
	k = aj + 7;
	Mat mW = INIT_MAT(3, 1, w[0], w[1], w[2]);
	Mat Rnew = RotateUpdate(pData->InitR[j], mW);


	Mat t = INIT_MAT(3,1, dt[0], dt[1], dt[2]);
	double Focal = aj[6];
	
	ProjectInternal(t, Rnew, bi, Focal / F_SCALE, k, xij, K_SCALE);
}

void CSfmPool::RunSFM(CamerasVector& cameras, CPointCloud& PointCloud, char* vmask, double* projections, cv::Point3d* points, int NumPoints, int* remap)
{
#define SBA_V121

	double* params = NULL; // parametrs for sfm i.e cam params and points

	//WTF are those????
	double info[10];
	double* Vout, *Sout, *Uout, *Wout;
	Vout = Sout = Uout = Wout = NULL;
	double eps2 = 9.9999999999999998e-013;
	camera_constraints_t *Constraints = NULL;

	int NumCameras = cameras.size();
	int bConstrictions = 1;
	int NumberOfParamsPerCamera = 9;
	int NumberOfCameraParams = NumberOfParamsPerCamera * NumCameras;
	int NumberOfPointsParams = NumPoints * 3;
	int NumberOfParams = NumberOfCameraParams + NumberOfPointsParams;
	int base = NumberOfCameraParams; // the offset of the points params

	params = new double[NumberOfParams];
	VISIO_ASSERT(params);

	/* Fill parameters */
	for (int j = 0; j < NumCameras; j++) 
	{
		Mat mCamCenter = cameras[j]->GetCameraCenter();
		double* CamCenter = GetMatrixDataAsDouble(mCamCenter);
		params[NumberOfParamsPerCamera * j + 0] = CamCenter[0]; 
		params[NumberOfParamsPerCamera * j + 1] = CamCenter[1]; 
		params[NumberOfParamsPerCamera * j + 2] = CamCenter[2]; 

		//dw difference of rotation
		params[NumberOfParamsPerCamera * j + 3] = 0.0;
		params[NumberOfParamsPerCamera * j + 4] = 0.0;
		params[NumberOfParamsPerCamera * j + 5] = 0.0;

		//focal length
		params[NumberOfParamsPerCamera * j + 6] = cameras[j]->GetFocal() * F_SCALE;

		params[NumberOfParamsPerCamera * j + 7] = cameras[j]->GetDistorationParams(0) * K_SCALE;

		params[NumberOfParamsPerCamera * j + 8] = cameras[j]->GetDistorationParams(1) * K_SCALE;
	}



	for (int j = 0; j < NumPoints; j++)
	{
		params[base + j * 3 + 0] = points[j].x;
		params[base + j * 3 + 1] = points[j].y;
		params[base + j * 3 + 2] = points[j].z;
	}

	//init constraints
	Constraints = new camera_constraints_t[NumCameras];
		
	for (int i = 0; i < NumCameras; i++)
	{
		Constraints[i].constrained = new char[NumberOfParamsPerCamera];
		Constraints[i].constraints = new double[NumberOfParamsPerCamera];
		Constraints[i].weights = new double[NumberOfParamsPerCamera];
		SCameraConstriants CamConstraints = cameras[i]->GetConstraints();
		memcpy(Constraints[i].constrained, 
			CamConstraints.bConstrianed, NumberOfParamsPerCamera * sizeof(char));
		memcpy(Constraints[i].constraints, 
			CamConstraints.Constraints, NumberOfParamsPerCamera * sizeof(double));
		memcpy(Constraints[i].weights,
			CamConstraints.Weights, NumberOfParamsPerCamera * sizeof(double));


		Constraints[i].constraints[6] *= F_SCALE;
		Constraints[i].weights[6] *= (1.0 / (F_SCALE * F_SCALE));
		
		Constraints[i].constraints[7] *= K_SCALE;
		Constraints[i].weights[7] *= (1.0 / (K_SCALE * K_SCALE));

		Constraints[i].constraints[8] *= K_SCALE;
		Constraints[i].weights[8] *= (1.0 / (K_SCALE * K_SCALE));

	}

	//if there is a problem we probably need to set the constraints see bundler SetCameraConstraitns for how to.
#ifdef SBA_V121
	double opts[6]; // opts[5];
#else
	double opts[3];
#endif

	opts[0] = 1.0e-3;
	opts[1] = 1.0e-10; // 1.0e-15;
	opts[2] = eps2; // 0.0;  // 1.0e-10; // 1.0e-15;

#ifdef SBA_V121
	opts[3] = 1.0e-12;
	// opts[4] = 4.0e-2;
	opts[4] = 0.0;
	opts[5] = 4.0e-2; // change this back to opts[4] for sba v1.2.1
#endif

#define MAX_ITERS 150 // 256
#define VERBOSITY 3

	//fill data for the projection func
	SDataForProjection* pData = new SDataForProjection;

	for (int i = 0; i < NumCameras; i++)
	{
		pData->InitR[i] = cameras[i]->GetParameters().R;
	}

	sba_motstr_levmar(NumPoints, NumCameras, 0, 
		vmask,  params, NumberOfParamsPerCamera, 3, projections, NULL, 2, 
		//remove NULL in prev line for sba v1.2.1
		ProjectPoints, NULL, 
		(void *) (pData),
		MAX_ITERS, VERBOSITY, opts, info,
		bConstrictions, Constraints, // cam constraints
		0,NULL,  // point constraints
		Vout, Sout, Uout, Wout);

	for (int j = 0; j < NumCameras; j++) 
	{

		double* dt = params + NumberOfParamsPerCamera * j + 0;
		double* w = params + NumberOfParamsPerCamera * j + 3;
		double K[] = {params[NumberOfParamsPerCamera * j + 6] / F_SCALE,  params[NumberOfParamsPerCamera * j + 7] / K_SCALE, params[NumberOfParamsPerCamera * j + 8] / K_SCALE};

		Mat mCamCenter = INIT_MAT(3, 1, dt[0], dt[1], dt[2]); //new camera center

		Mat mW = INIT_MAT(3, 1, w[0], w[1], w[2]);

		Mat Rnew = RotateUpdate(cameras[j]->GetParameters().R, mW);

		Mat mT = Utils::ConvertCamCenterToTranslation(Rnew, mCamCenter);

		cameras[j]->Update(Rnew, mT, K);


		//add points must be in the same order as they were
		for (int j = 0; j < NumPoints; j++)
		{
			points[j].x = params[base + j * 3 + 0];
			points[j].y = params[base + j * 3 + 1];
			points[j].z = params[base + j * 3 + 2];
		}
	}

	delete[] params;
	delete pData;
}

UINT CSfmPool::RemoveOutliers( CamerasVector& cameras, CPointCloud& PointCloud, char* vmask, double* projections, cv::Point3d* points, int* remap )
{

	OutliersSet outliers;

	UINT NumCameras = cameras.size();
	double DistTotal = 0.0;
	

    for (UINT i = 0; i < NumCameras; i++) 
	{
		CCamera* pCam = cameras[i];
		CImageData* pImageData = pCam->GetImageData();
		UINT NumPtsProj = pCam->GetNumPointsProjected();
		UINT PointtCount = 0; // indexer for dists

        double *dists = new double[NumPtsProj];

		for (UINT KeyPointIndex = 0; KeyPointIndex < pImageData->GetNumKeyPoints(); KeyPointIndex++ )
		{
			if (pImageData->IsKeyPointMarked(KeyPointIndex))
			{
				int Pointindex = pImageData->GetMark(KeyPointIndex);

				Point3d p = points[remap[Pointindex]];
				Point2d projection = pCam->Project(p, true, true) * -1.0;

				Point2d Key = pCam->GetKeyPoint(KeyPointIndex);

				double dist = norm(Key - projection);

				dists[PointtCount] = dist;
				DistTotal += dist;
				PointtCount++;
			}
		}

        /* Estimate the median of the distances */
        double median = kth_element_copy(NumPtsProj, ceil(0.8 * NumPtsProj), dists);

        double threshold = 1.2 * NUM_STDDEV * median; /* k * stddev */
        threshold = Clamp(threshold, MIN_PROJ_ERROR_THRESHOLD, MAX_PROJ_ERROR_THRESHOLD);  

		//finally check for outliers
		PointtCount = 0;
		for (UINT KeyPointIndex = 0; KeyPointIndex < pImageData->GetNumKeyPoints(); KeyPointIndex++ )
		{
			
			if (pImageData->IsKeyPointMarked(KeyPointIndex))
			{
				int Pointindex = pImageData->GetMark(KeyPointIndex);
				if (dists[PointtCount] > threshold )
				{
					//remove this point from consideration
					outliers.insert(Pointindex);
				}

				PointtCount++;
			}
		}
        
		delete[] dists;
	}
	/* Remove outlying points */
	PointCloud.RemoveOutliers(outliers);
	return outliers.size();
}

UINT CSfmPool::AddCameras(CamerasVector& cameras, CPointCloud& PointCloud)
{

	char* vmask = NULL; // the view bit mask regarding the 3d points i.e which camera watch which 3d points
	double* projections = NULL;
	cv::Point3d* points = NULL;

	CheckAddedIndex(cameras);
	PointCloud.CheckViews();

	UINT NumOutliers = 0;
	int NumCameras = cameras.size();
	int NumPoints = PointCloud.Size();
	int* remap = new int[NumPoints];

	do 
	{
		//TODO: no reason here to use the remapping after removing outliers for now we will use it only to maintain some correspondence with bundler for reference

		//the sba levmar expects that if the vmask for a given (point, cam) pair is zero then there are no projection present.
		//so the number of projections is <= Numpoints*Numcameras
		int NumProjections = PointCloud.GetNumberOfProjections();
		projections = new double[NumProjections*2];
		VISIO_ASSERT(projections);
	
		vmask = new char[NumPoints * NumCameras];
		VISIO_ASSERT(vmask);

		points = new cv::Point3d[NumPoints];
		VISIO_ASSERT(points);

		UINT NumActivePoints = PointCloud.GetViewMask(NumCameras, vmask, projections, points, NumProjections, remap);

		RunSFM(cameras, PointCloud, vmask, projections, points, NumActivePoints, remap);

		NumOutliers = RemoveOutliers(cameras, PointCloud, vmask, projections, points, remap);

		PointCloud.UpdatePoints(points, NumPoints, remap);
		delete[] projections;
		delete[] vmask;
		delete[] points;
	
	} while (NumOutliers > 0);

	delete[] remap;

	return cameras.size();
	
}

//this will also change the cam matrix if needed
Mat CSfmPool::FixIntrinsincsMatrix(Mat mP, Mat mKInit)
{
	//check parity of negatives values along the diagonal
	Mat mK = mKInit;
	double* KInit = GetMatrixDataAsDouble(mKInit);
	int neg = (KInit[0] < 0.0) + (KInit[4] < 0.0) + (KInit[8] < 0.0);

	//odd case
	if ((neg % 2) == 1)
	{
		//scale matrix by -1.0 so we can handle the even case
		mK = mKInit * -1.0;
		mP = mP * -1.0;
	}

	//even case
	double* K = GetMatrixDataAsDouble(mK);
	Mat mFix = EYE(3);
	double* fix = GetMatrixDataAsDouble(mFix);

	if (K[0] < 0.0 && K[4] < 0.0)
	{
		fix[0] = -1.0f;
		fix[4] = -1.0f;
	}
	else if (K[0] < 0.0)
	{
		fix[0] = -1.0f;
		fix[8] = -1.0f;
	}
	else if (K[4] < 0.0)
	{
		fix[4] = -1.0f;
		fix[8] = -1.0f;
	}

	mK = mK * mFix;

	return mK;
}

CCamera* CSfmPool::FindCameraFromProjection(CImageData* pImageData, CCameraMesh* pMesh, const VisiblePointsVector& SharedVisiblePoints, const CPointCloud& PointCloud, SInliners& inliners) 
{
	UINT NumPoints = SharedVisiblePoints.size();
	int NumInliners = 0;
	double* points = new double[NumPoints * 3];
	double* projections = new double[NumPoints * 2];

	for (UINT i = 0; i < NumPoints; i++)
	{
		VisablePoint vis = SharedVisiblePoints[i];

		//first get the 3d point from the point cloud
		Point3d p = PointCloud.Get3DPoint(vis.first->GetMark());
		points[3*i] = p.x;
		points[3*i + 1] = p.y;
		points[3*i + 2] = p.z;

		//now get the projection
		Point2d proj = pImageData->GetKeyPoint(vis.second);
		projections[2*i] = proj.x;
		projections[2*i + 1] = proj.y;
	}
	
	//todo remove NumInliners
	SCameraParams NewCamParams = FindCameraParamsFromProjection(points, projections, NumPoints, NumInliners);

	//here we will use the default focal length (as in bundler)
	CCamera* pCam = new CCamera(pMesh, NewCamParams, pImageData);	
	
	inliners = CalculateInlinersOfCamera(pCam, points, projections, NumPoints);

	//Set to default Focal length as per bundler
	pCam->UseDefaulFocalLength();

	RefineCameraParams(pCam, points, projections, inliners);

	delete[] points;
	delete[] projections;

	return pCam;
}

SCameraParams CSfmPool::FindCameraParamsFromProjection( double* points, double* projections, UINT NumPoints, int& NumInliners )
{
	double P[12];
	//all paramaters set run the ransac to find the projection matrix
	NumInliners = find_projection_3x4_ransac(NumPoints, 
		CastToBundlerVec<v3_t>(points), CastToBundlerVec<v2_t>(projections), 
		P, /* 2048 */ 4096 /* 100000 */, 
		PROJ_ESTIMATION_THRESHOLD);

	VISIO_ASSERT(NumInliners >= MIN_INLINERS_EST_PROJECTION );

	double KRInit[9], RInit[9], KInit[9];

	//copy the rows of the result
	for (int row = 0; row < 3; row++)
	{
		memcpy(KRInit + row * 3, P + row * 4, sizeof(double) * 3);
	}

	//do an rq decompose to separate R and K
	Mat mP = INIT_MAT_ARRAY(3,4,P);

	dgerqf_driver(3, 3, KRInit, KInit, RInit);

	//need to fix the intrinisics matrix to have a certain form 
	Mat mK = FixIntrinsincsMatrix(mP, INIT_MAT_ARRAY(3,3, KInit));

	//init the matrices
	Mat mPFixed  = mK.inv() * mP;

	Mat mR = mPFixed(Range(0,3), Range(0,3)).clone();

	Mat t = mPFixed.col(3).clone();

	//scale to normalize the diagonal
	mK = mK * (1.0 / CV_MAT(mK, 2, 2));

	// need to fix intrinsics to use the const focal length

	return SCameraParams(mR, t, mK, mP);
}

SInliners CSfmPool::CalculateInlinersOfCamera( const CCamera* pCam, double* points, double* projections, int NumInliners )
{
	SInliners inliners;
	//todo calculate the number of points behind the camera
	for (int i = 0; i < NumInliners; i++)
	{
		Point2d qMeasured = Point2dFromArray(projections + 2*i);
		Point2d qProjected = pCam->Project(points + 3*i, true, true) * -1.0;  //not sure why we need multiplay by -1.0

		double dist = norm(qMeasured - qProjected);

		if (dist < PROJ_ESTIMATION_THRESHOLD)
		{
			inliners.Inliners.push_back(i);
		}

		if (dist < PROJ_ESTIMATION_THRESHOLD_WEAK)
		{
			inliners.WeakInliners.push_back(i);
		}

	}

	return inliners;
}

void CSfmPool::CameraRefine(CCamera* pCam, double* points, double* projections, UINT NumPoints, bool bEstimateIntrin)
{
	Mat mC = pCam->GetCameraCenter();
	double* t = GetMatrixDataAsDouble(mC);
	double x[9] = {0 , 0, 0, 0, 0, 0, 0, 0, 0};
	UINT FocalConstraint = 0;
	UINT EstimateDistortaion = 0;
	UINT NumCameraParams = 0;
	if (bEstimateIntrin)
	{
		double _x[9] = {t[0], t[1], t[2], 0.0, 0.0, 0.0, pCam->GetFocal(), pCam->GetDistorationParams(0), pCam->GetDistorationParams(1) };
		memcpy (x, _x, sizeof(_x));
		FocalConstraint = 1;
		EstimateDistortaion = 1;
		NumCameraParams = 9;
	}
	else
	{
		double _x[6] = {t[0], t[1], t[2], 0.0, 0.0, 0.0 };
		memcpy (x, _x, sizeof(_x));
		FocalConstraint = 0;
		EstimateDistortaion = 0;
		NumCameraParams = 6;
	}
	

	//init the data for the refinement
	g_DataForRefinement.Focal = pCam->GetFocal();
	g_DataForRefinement.NumPoints = NumPoints;
	g_DataForRefinement.points = points;
	g_DataForRefinement.projections = projections;
	g_DataForRefinement.RInit = pCam->GetParameters().R;
	g_DataForRefinement.Iteration = 0;
	g_DataForRefinement.EstiamteIntrin = bEstimateIntrin;
	memcpy(g_DataForRefinement.Distoration, pCam->GetDistorationParams(), sizeof(g_DataForRefinement.Distoration));
	g_DataForRefinement.ConstrainFocalweight = 1.0 * NumPoints * pCam->GetConstraints().Weights[6];
	g_DataForRefinement.ConstrainRdweight = 0.05 * NumPoints;

	//todo: maybe we can do this with member functions removing all this stupid need for global params and static functions
	lmdif_driver2(CameraRefineResidual, 2 * NumPoints + FocalConstraint + 2*EstimateDistortaion, NumCameraParams, x, CAMERA_REFINEMENT_THRESHOLD);

	//do cam update
	Mat mCamCenter = INIT_MAT_ARRAY(3, 1, x);
	Mat mW = INIT_MAT_ARRAY(3, 1, x + 3);
	Mat NewR = RotateUpdate(pCam->GetParameters().R, mW);

	Mat mT = Utils::ConvertCamCenterToTranslation(NewR, mCamCenter);
	double Intrin[3];

	if (bEstimateIntrin) 
	{
		double _intrin[] = {x[6], x[7], x[8]};
		memcpy(Intrin, _intrin, sizeof(_intrin));
	}
	else
	{
		double _intrin[] = {g_DataForRefinement.Focal, g_DataForRefinement.Distoration[0], g_DataForRefinement.Distoration[1]};
		memcpy(Intrin, _intrin, sizeof(_intrin));
	}

	pCam->Update(NewR, mT, Intrin);
}

//this function will also refine the inliners
InlinersVector CSfmPool::RefineCameraParamsInternal( CCamera* pCam, double* points,double* projections, InlinersVector& inliners, UINT NumInliners )
{
	InlinersVector RefinedInliners = inliners;
	UINT NumInlinersCurr = NumInliners; 
	double* PointsCurr = new double[NumInlinersCurr * 3];
	double* ProjCurr = new double[NumInlinersCurr * 2];

	memcpy(PointsCurr, points, sizeof(double) * 3 * NumInlinersCurr);
	memcpy(ProjCurr, projections, sizeof(double) * 2 * NumInlinersCurr);

	CameraRefine(pCam, PointsCurr, ProjCurr, NumInlinersCurr, false);

	while (1)
	{

		CameraRefine(pCam, PointsCurr, ProjCurr, NumInlinersCurr, true);

		double* PointsNext = new double[NumInlinersCurr * 3];
		double* ProjNext = new double[NumInlinersCurr * 2];
		double error = 0;
		double* errors = new double[NumInlinersCurr];
		InlinersVector inlinersNext;
		int count = 0;

		//todo find a way to use findCamerainlienrs instead of doing this code duplication
		for (int i = 0; i < NumInlinersCurr; i++)
		{
			//reverse Z axis as per bundler
			Point2d p = pCam->Project(PointsCurr + 3*i, true, true) * -1.0;
			Point2d q = Point2dFromArray(ProjCurr + 2 * i);

			double dist = norm(p - q);

			errors[i] = dist;
			error += dist;
		}

		double median = kth_element_copy(NumInlinersCurr, iround(0.95 * NumInlinersCurr), errors);
		double threshold = 1.2 * NUM_STDDEV * median;
		threshold = Clamp(threshold, MIN_PROJ_ERROR_THRESHOLD, MAX_PROJ_ERROR_THRESHOLD);

		for (int i = 0; i < NumInlinersCurr; i++)
		{
			if (errors[i] < threshold)
			{
				inlinersNext.push_back(RefinedInliners[i]);
				memcpy(PointsNext + 3 * count , PointsCurr + 3 * i, sizeof(double) * 3);
				memcpy(ProjNext + 2 * count , ProjCurr  + 2 * i, sizeof(double) * 2);

				count++;
			}
		}

		delete[] PointsCurr;
		delete[] ProjCurr;
		delete[] errors;

		PointsCurr = PointsNext;
		ProjCurr = ProjNext;
		
		if (count == NumInlinersCurr)
		{
			//done removing inlienrs
			break; 
		}

		NumInlinersCurr = count;
		RefinedInliners = inlinersNext;
		
		VISIO_ASSERT(count != 0)
	}

	delete[] PointsCurr;
	delete[] ProjCurr;

	//This might be a bug but is there in bundler
	pCam->ResetDistortationPoly();

	return RefinedInliners;
}

void CSfmPool::RefineCameraParams( CCamera* pCam, double* points,double* projections, SInliners& inliners )
{
	
	//we refine using the weak inliners
	UINT NumInlienrs = inliners.WeakInliners.size();
	double* PointsFinal = new double[NumInlienrs * 3];
	double* ProjsFinal = new double[NumInlienrs * 2];

	for (UINT i = 0; i < NumInlienrs; i++)
	{
		memcpy(PointsFinal + 3 * i, points + 3 * inliners.WeakInliners[i], sizeof(double) * 3);
		memcpy(ProjsFinal + 2 * i, projections + 2 * inliners.WeakInliners[i], sizeof(double) * 2);
	}

	inliners.Inliners = RefineCameraParamsInternal(pCam, PointsFinal, ProjsFinal, inliners.WeakInliners, inliners.WeakInliners.size());

	delete[] PointsFinal;
	delete[] ProjsFinal;
}

void CSfmPool::CheckAddedIndex(CamerasVector& cams) 
{
	for (UINT i = 0; i < cams.size(); i++)
	{
		VISIO_ASSERT(cams[i]->GetAddedIndex() == i);
	}
}

void CSfmPool::ReadBundlerParams(double* _params, double* _projections, char* _vmask, int NumParams)
{
	int NumVmask = 0;
	int NumProjections = 0;
	FILE* f = fopen("SFM.out", "r");
	fscanf(f, "%d", &NumVmask);
	for (int i = 0; i < NumVmask; i++)
	{
		int temp = 0;
		fscanf(f, "%d ", &temp);
		_vmask[i] = temp;
	}
	fscanf(f, "%d", &NumProjections);
	for (int i = 0; i < NumProjections; i++)
	{
		fscanf(f, "%lf %lf", &(_projections[2*i]), &(_projections[2*i + 1]));
	}
	for (int i = 0; i < NumParams; i++)
	{
		fscanf(f, "%lf ", &(_params[i]));
	}
	fclose(f);
}