#include "include\NativePTAMSystem.h"

void NativePTAMSystem::commonConstruct()
{
	frameToSkip = 0;
	frameCounter = 0l;
	totalCalibErr = 0;

	buff = new char[1000];
	sbuf = new char[100];

	shouldWriteMessage = true;
	shouldInitPoints = true;
	shouldTrack = false;
	doCapture = false;
	doReset = false;

	currFeatures = vector<Point2f>(MAX_FEATURES_COUNT);
	features3D = vector<Point3f>(MAX_FEATURES_COUNT);
	featuresTracked = vector<uchar>(MAX_FEATURES_COUNT);
	invisibleFeatures3D = vector<Point3f>();

	storedFrames = vector<Mat>();
	storedFeatures = vector<vector<Point2f> >();
	storedFrameCount = 0;

	#ifdef USE_NATIVE_CAMERA
	image = Mat(640,480,CV_8UC4);
	frame = Mat(640,480, CV_8UC4);
	gray = Mat(640,480, CV_8UC1);
	prev = Mat(640,480, CV_8UC1);
	captureInitFinished = false;
	captureOpening = false;
	#endif
}

NativePTAMSystem::NativePTAMSystem()
{
	commonConstruct();

	camera_matrix = Mat();
	distortion_coefficients = Mat();
	cameraCalibrated = false;
	currentState = PTAM_STATE_INITIALIZED_WITHOUT_CAM_PARAM;
	messageString = "initialized without camera parameter";
}

NativePTAMSystem::NativePTAMSystem(long long addrCameraMatrixMat, long long addrDistortionCoefMat)
{
	commonConstruct();

	((Mat*)addrCameraMatrixMat)->copyTo(camera_matrix);
	((Mat*)addrDistortionCoefMat)->copyTo(distortion_coefficients);
	cameraCalibrated = true;
	currentState = PTAM_STATE_INITIALIZED_WITH_CAM_PARAM;
	messageString = "initialized with camera parameter. Touch to start...";
}

NativePTAMSystem::~NativePTAMSystem()
{

	image.release();
	frame.release();
	gray.release();
	prev.release();
	delete sbuf;
	delete buff;
	buff = NULL;
	sbuf = NULL;

	#ifdef USE_NATIVE_CAMERA
	capture.release();
	#endif
}

int NativePTAMSystem::processFrame(long long addrGray, long long addrFrame)
{

	#ifdef USE_NATIVE_CAMERA

	if (capture.isOpened())
	{

		capture >> frame;
		cvtColor(frame, gray, CV_BGR2GRAY);
		cvtColor(frame, image, CV_BGR2RGB);
		image.copyTo(frame);
		//sprintf(buff, "captured frame[0]: %d %d %d", gray.at<uchar>(0,0), gray.at<uchar>(0,1), gray.at<uchar>(0,2));
		//LOG_D("frame", buff);
		captureInitFinished = true;
		captureOpening = false;
	}
	else if (!captureOpening)
	{
		capture.open(CV_CAP_ANDROID + 0);
		capture.set(CV_CAP_PROP_FRAME_WIDTH,640);
		capture.set(CV_CAP_PROP_FRAME_HEIGHT,480);
		captureOpening = true;
		LOG_D("capturing", "opening camera...");
	}

	#else
	gray = *((Mat*)addrGray);
	image = *((Mat*)addrFrame);
	image.copyTo(frame);
	#endif

	if (frameToSkip > 0)
	{
		frameToSkip--;
		currentState = PTAM_STATE_FRAME_SKIPPED;
	}
	else
	{
		if (cameraCalibrated)
		{
			currentState = PTAM_STATE_CALIBRATION_COMPLETED;

			if (!shouldTrack)
			{
				if (doCapture)
				{
					shouldTrack = true;
					doCapture = false;
					currentState = PTAM_STATE_STARTING_TRACKING_INIT;
					messageString = "Starting tracking initialization. Touch to get a frame of a flat surface...";
				}
			}

			if (shouldTrack)
			{
				try
				{
					if(shouldInitPoints || (storedFrameCount == 0))
					{
						initPoints(gray, currFeatures);
						drawPoints(image, currFeatures, CV_RGB(0,0,255));
						shouldInitPoints = false;
					}
					else
					{
						bool shouldTrunc = storedFrameCount < 2;
						updatePoints(prev, gray, currFeatures, featuresTracked);

						stringstream st;
						st << countNonZero(Mat(featuresTracked)) << " good points";
						if (shouldWriteMessage)
							putText(image,st.str(),Point(10,15),FONT_HERSHEY_PLAIN,1,Scalar(0,255,0));
					}

					gray.copyTo(prev);

					switch (storedFrameCount)
					{
						case 1:
							drawPointCorrespondence(image, (vector<Point2f>)storedFeatures[0], currFeatures, CV_RGB(0,255,255));
						case 0:
							if (doCapture) {
								int storeResult = captureFrameForStereo(frame, currFeatures, features3D, featuresTracked , storedFrames, storedFeatures, referencePoint);

								stringstream st;
								switch (storeResult)
								{
									case 0:
										st << "Frames captured are not good. Please take another frame.";
										currentState = PTAM_STATE_STARTING_TRACKING_INIT;
										break;
									case 1:
										st << "First frame captured. Pan the phone for around 5 cm and touch again...";
										currentState = PTAM_STATE_TRACKING_INIT_NEW_FRAME;
										break;
									default:
										drawPointCorrespondence(image, (vector<Point2f>)storedFeatures[0], (vector<Point2f>)storedFeatures[1], CV_RGB(255,0,0));
										st << "Tracking initialization finished. Starting tracking...";
										currentState = PTAM_STATE_TRACKING_INIT_FINISHED;
										break;
								}
								storedFrameCount = storeResult;
								frameToSkip = FRAME_SKIP_ON_ACTION;
								messageString = st.str();
								doCapture = false;
							}
							break;
						default:
							currentState = PTAM_STATE_TRACKING;
							double reprojErr = trackFrame(
									currFeatures, features3D,
									invisibleFeatures3D, featuresTracked,
									(frameCounter % 5 == 0), (frameCounter % 20 == 0));
							drawReprojectedOnImage(image,rV,tV,currFeatures,features3D, referencePoint);
							stringstream st; st << "Tracking: x(" << tV[0] << ") y(" << tV[1] <<") z(" << tV[2];//reprojection error = " << reprojErr;
							messageString = st.str();
							break;
					}

					if (doReset)
					{
						shouldInitPoints = true;
						doReset = false;
					}
				}
				catch(Exception* ex)
				{
					LOG_E("processing", (ex->msg).data());
				}
			}
		}
		else
		{
			currentState = PTAM_STATE_CALIBRATING_CAMERA;
			messageString = "Touch screen for calibration...";

			if (doCapture)
			{

				int calibState = tryToCalibrate();
				stringstream st;
					st << "Calibrating camera: ";


				if (calibState == CALIB_FRAMES_COUNT)
				{
					currentState = PTAM_STATE_CALIBRATION_COMPLETED;
					st << "Calibration finished! Starting tracking...";
				}
				else
				{
					if (calibState >= 0)
					{
						st << calibState << " frame(s) OK. Waiting for " << CALIB_FRAMES_COUNT - calibState << " more frame(s).";
					}
					else
					{
						switch (calibState)
						{
							case CALIB_ERROR_NO_CHESSBOARD:
								st << " Please show a " << CALIB_CHESSBOARD_SIZE.height << " by " << CALIB_CHESSBOARD_SIZE.width << " chess board pattern.";
								break;
							case CALIB_ERROR_CALIBRATION_INVALID:
								st << " Calibration is invalid. Restarting the calibration process...";
								break;
						}

					}
				}

				messageString = st.str();
				frameToSkip = FRAME_SKIP_ON_ACTION;
				doCapture = false;
			}

		}


	}

	frameCounter++;
	if (shouldWriteMessage) putText(image,messageString,Point(10,image.rows - 15),FONT_HERSHEY_PLAIN,1,Scalar(255,0,0));
	#ifdef USE_NATIVE_CAMERA
	if (captureInitFinished)
	{
		Mat* tmpMat = (Mat*)addrFrame;
		image.copyTo(*tmpMat);
	}
	#endif

	return currentState;
}

/*
 *
 * Functions that got broken up
 *
 */

void NativePTAMSystem::initPoints(Mat inputFrame, vector<Point2f>& features)
{
	goodFeaturesToTrack(inputFrame,features,MAX_FEATURES_COUNT,0.01f,2.0);
}

void NativePTAMSystem::updatePoints(Mat prevFrame, Mat currFrame, vector<Point2f>& features, vector<uchar>& featuresTracked)
{
	vector<Point2f> nextFeatures;
	vector<float> featureError;

	calcOpticalFlowPyrLK(prevFrame,currFrame,features,nextFeatures,featuresTracked,featureError);
	for (int i = 0; i < nextFeatures.size(); i++)
	{
		if (featuresTracked[i])
			features[i] = nextFeatures[i];
	}
}

double NativePTAMSystem::trackFrame(vector<Point2f>& features, vector<Point3f>& features3d, vector<Point3f>& invisibleFeatures3d, vector<uchar>& featuresTracked, bool shouldReproject, bool shouldTriangulate)
{
	double reprojErr = -1;

	if (countNonZero(Mat(featuresTracked)) == features.size())
	{
		LOG_D("trackFrame","points looking good'");
	}
	else
	{
		vector<Point2f> tempFeatures;
		vector<Point3f> tempFeatures3d;
		for (int i = 0; i < features.size(); i++)
		{
			if (featuresTracked[i])
			{
				tempFeatures.push_back((Point2f)features[i]);
				tempFeatures3d.push_back((Point3f)features3d[i]);
			}
			else
			{
				invisibleFeatures3d.push_back((Point3f)features3d[i]);
			}
		}

		features.clear();
		features3d.clear();
		featuresTracked.clear();

		for (int i = 0; i < tempFeatures.size(); i ++)
		{
			features.push_back((Point2f)tempFeatures[i]);
			features3d.push_back((Point3f)tempFeatures3d[i]);
			featuresTracked.push_back(1);
		}

		LOG_D("trackFrame","Santa came to town: bad points exist");
	}

	findExtrinsics(features, features3d, rV,tV);

	if (shouldReproject)
	{
		//once every 5 frames:
		//try to reproject some of the "invisible" features back into the tracking

		if (invisibleFeatures3d.size() > 0)
		{
			vector<Point2f> revived2d;
			vector<Point3f> revived3d;
			reprojectInvisibles(invisibleFeatures3d, revived2d, revived3d, rV, tV);

			if (revived3d.size() > 0)
			{
				sprintf(buff, "%d point(s) get revived!", revived2d.size());
				LOG_D("trackFrame", buff);
				for (int i = 0; i < revived2d.size(); i++)
				{
					features.push_back((Point2f)revived2d[i]);
					features3d.push_back((Point3f)revived3d[i]);
					featuresTracked.push_back(1);
				}
			}
			else
			{
				vector<Point3f> removed3d;
				reprojErr = keepGood2D3DMatch(features, features3d, rV, tV, removed3d);
				if (removed3d.size() > 0)
				{
					sprintf(buff, "%d 3d point is not good.", removed3d.size());
					LOG_D("trackFrame", buff);
					for (int i = 0; i < removed3d.size(); i++)
					{
						invisibleFeatures3d.push_back((Point3f)removed3d[i]);
					}

					sprintf(buff, "Now I have %d features %d 3d features %d invisibles", features.size(), features3d.size(), invisibleFeatures3d.size());
					LOG_D("trackFrame", buff)
				}
			}
		}
	}

	if(shouldTriangulate) {
		//every 20 frames, try to triangulate more features using this view
		//and the view from 20 frames ago
		//TODO: do this.
	}

	return reprojErr;
}

int NativePTAMSystem::captureFrameForStereo(Mat newFrame, vector<Point2f>& features, vector<Point3f>& features3d, vector<uchar>& featuresTracked, vector<Mat>& storedFrames, vector<vector<Point2f> >& storedFeatures, Point3f& refPoint)
{

	storedFrames.push_back(Mat(newFrame));
	storedFeatures.push_back(vector<Point2f>());

	int storedCount = storedFrames.size();

	for (int i = 0; i < features.size(); i++)
	{
		((vector<Point2f>&)storedFeatures[storedCount - 1]).push_back((Point2f)features[i]);
	}

	sprintf(buff, "Stored %d points in frame %d / %d",  ((vector<Point2f>&)storedFeatures[storedCount - 1]).size(), storedCount - 1, storedFrames.size());
	LOG_D("captureFrameForStereo", buff);

	if(storedCount == 2) {
		for (int i = 0; i < 2; i++)
		{
			vector<Point2f> temp2d = storedFeatures[i];
			((vector<Point2f>&)storedFeatures[i]).clear();
			for (int j = 0; j < temp2d.size(); j++)
			{
				if (featuresTracked[j])
				{
					((vector<Point2f>&)storedFeatures[i]).push_back((Point2f)temp2d[j]);
				}
			}
		}

		LOG_D("captureframeforstereo","points ready :)");

		//triangulate points
		stereoInit((vector<Point2f>&)storedFeatures[0], (vector<Point2f>&)storedFeatures[1], features3d, refPoint);

		//now track only the good & triangulated points
		int numPts = ((vector<Point2f>&)storedFeatures[1]).size();

		sprintf(buff, "numPts: %d", numPts);
		LOG_D("captureFrameForStereo", buff);

		if (numPts < 10)
		{
			storedFrames.clear();
			storedFeatures.clear();
			storedCount = 0;
		}
		else
		{
			features.clear();
			featuresTracked.clear();
			for (int i = 0; i < numPts; i++)
			{
				features.push_back((Point2f)storedFeatures[1][i]);
				featuresTracked.push_back(1);
			}

			//estimate 2nd keyframe extrinsics to have a good starting point
			//of the camera position
			findExtrinsics((vector<Point2f>&)storedFeatures[0],features3d,rV,tV);
			findExtrinsics((vector<Point2f>&)storedFeatures[1],features3d,rV,tV);
		}

	}
	return storedCount;

}

/**
 * Initialize world from stereo views (triangulate 3D points)
 */
void NativePTAMSystem::stereoInit(vector<Point2f>& features1, vector<Point2f>& features2, vector<Point3f>& features3d, Point3f& refPoint) {

	vector<Point2f> undistFeatures1, undistFeatures2;

	undistortPoints(features1,undistFeatures1,camera_matrix,distortion_coefficients);
	undistortPoints(features2,undistFeatures2,camera_matrix,distortion_coefficients);
	LOG_D("stereoinit", "undistorted points");

	//Mat H = findHomography(pts1M,pts2M,CV_RANSAC,10.0);
	Mat H = findHomography(undistFeatures1,undistFeatures2,CV_LMEDS);
	double* ptH = (double*)H.ptr(0);

	sprintf(buff, "Homography:\n%.3f %.3f %.3f\n%.3f %.3f %.3f\n%.3f %.3f %.3f\n",
		ptH[0],ptH[1],ptH[2],ptH[3],ptH[4],ptH[5],ptH[6],ptH[7],ptH[8],ptH[9]);
	//warpPerspective(frame1,image,H,frame1.size(),INTER_LINEAR);
	LOG_D("stereoinit",buff);

	uV = Vec3d(3.0*(1.0-ptH[0]),3.0*ptH[1],3.0*ptH[2]);

	double s = sqrt(uV.ddot(uV));
	uV[0] /= s;
	uV[1] /= s;
	uV[2] /= s;
	sprintf(buff,"normal %.3f %.3f %.3f\n",uV[0],uV[1],uV[2]);
	LOG_D("stereoinit",buff);

	vV = Vec3d(0.0,-1.0,0.0);

	vector<uchar> isTriangulated;
	triangulate(undistFeatures1, undistFeatures2, features3d, isTriangulated);

	//retain only good points
	vector<Point3f> temp3d = features3d;
	vector<Point2f> temp2d_1 = features1;
	vector<Point2f> temp2d_2 = features2;
	features1.clear();
	features2.clear();
	features3d.clear();
	refPoint = Point3f(0,0,0);
	for (int i = 0; i < isTriangulated.size(); i++)
	{
		if (isTriangulated[i])
		{
			features1.push_back((Point2f)temp2d_1[i]);
			features2.push_back((Point2f)temp2d_2[i]);
			features3d.push_back((Point3f)temp3d[i]);
			refPoint = refPoint + temp3d[i];
		}
	}
	refPoint.x = refPoint.x / (float)isTriangulated.size();
	refPoint.y = refPoint.y / (float)isTriangulated.size();
	refPoint.z = refPoint.z / (float)isTriangulated.size();


	sprintf(buff, "triangulated %d points", features3d.size());
	LOG_D("stereoinit",buff);

}


void NativePTAMSystem::triangulate(vector<Point2f>& points1, vector<Point2f>& points2, vector<Point3f>& points3dOut, vector<uchar>& isTriangulated)
{

	points3dOut.clear();

	//projection matrices
	double P1d[12] = {	-1,0,0,0,
						0,1,0,0,
						0,0,1,0 };	//Identity, but looking into -z axis
	Mat P1m(3,4,CV_64FC1,P1d);

	double P2d[12] = {	-1,0,0,-5,
						0,1,0,0,
						0,0,1,0 };  //Identity rotation, 5cm -x translation, looking into -z axis
	Mat P2m(3,4,CV_64FC1,P2d);

	float _d[1000] = {0.0f};
	Mat outTM(4,points1.size(),CV_32FC1,_d);

	triangulatePoints(P1m, P2m, points1, points2, outTM);

	/*
	//triangulation: learnt from college
	stringstream st;
	st << "Point1_size:" << points1.size() << " | points2_size:" << points2.size();
	LOG_D("triangulate", st.str().data());

	Mat A = Mat(4,4,CV_64FC1,_d);
	Mat U = Mat(), D= Mat(), V= Mat();

	for (int i = 0; i < points1.size(); i++)
	{

		int k = 0;
		for (int j = 0; j < 3; j++)
			A.at<double>(j,k) = P1m.at<double>(j,0) - ((Point2d)(points1[i])).x * P1m.at<double>(j,2);
		k++;
		for (int j = 0; j < 3; j++)
			A.at<double>(j,k) = P1m.at<double>(j,1) - ((Point2d)(points1[i])).y * P1m.at<double>(j,2);
		k++;
		for (int j = 0; j < 3; j++)
			A.at<double>(j,k) = P2m.at<double>(j,0) - ((Point2d)(points2[i])).x * P2m.at<double>(j,2);
		k++;
		for (int j = 0; j < 3; j++)
			A.at<double>(j,k) = P2m.at<double>(j,1) - ((Point2d)(points2[i])).y * P2m.at<double>(j,2);
		k++;

		SVD::compute(A, D, U, V);
		Mat pt = V.col(V.cols - 1);
		pt = pt / (pt.at<double>(3));
		sprintf(buff, "triangulated point[%d]: %.3f %.3f %.3f %.3f\n",i,pt.at<double>(0),pt.at<double>(1),pt.at<double>(2), pt.at<double>(3));
		LOG_D("triangulate", buff);
		for (int j = 0; j < 4; j++)
		{
			outTM.at<float>(j,i) = (float)pt.at<double>(j);
		}
	}
	A.release();
	U.release();
	V.release();
	D.release();
	 */
	//triangulatePoints();


	LOG_D("triangulate", "triangulated")
	vector<Point2f> projPoints[2] = {points1,points2};

	double point2D_dat[3] = {0};
	double point3D_dat[4] = {0};
	Mat twoD(3,1,CV_64FC1,point2D_dat);
	Mat threeD(4,1,CV_64FC1,point3D_dat);

	Mat P[2] = {Mat(P1m),Mat(P2m)};
	int pointCount = outTM.cols;

	isTriangulated = vector<uchar>(pointCount);

	//scan all points, reproject 3D->2D, and keep only good ones
	//points1Proj.clear();
	for(int i=0;i<pointCount;i++)
	{

		double W = outTM.at<float>(3,i);

		point3D_dat[0] = outTM.at<float>(0,i) / W;
		point3D_dat[1] = outTM.at<float>(1,i) / W;
		point3D_dat[2] = outTM.at<float>(2,i) / W;
		point3D_dat[3] = 1;

		bool push = true;

		// !!! Project this point for each camera //
		for( int currCamera = 0; currCamera < 2; currCamera++ )
		{
			//cvmMul(projMatrs[currCamera], &point3D, &point2D);
			twoD = P[currCamera] * threeD;

			float x,y;
			float xr,yr,wr;
			x = (float)(((Point2f)(projPoints[currCamera][i])).x);
			y = (float)(((Point2f)(projPoints[currCamera][i])).y);

			wr = (float)point2D_dat[2];
			xr = (float)(point2D_dat[0]/wr);
			yr = (float)(point2D_dat[1]/wr);

			float deltaX,deltaY;
			deltaX = (float)fabs(x-xr);
			deltaY = (float)fabs(y-yr);


			if(isBadMove(deltaX, 1.f) || isBadMove(deltaY, 1.f))
			{
				push = false;
			}
		}

		if(push) {
			// A good 3D reconstructed point, add to known world points

			double s = 7;
			Point3f point3f ((float)point3D_dat[0]/s, (float)point3D_dat[1]/s, (float)point3D_dat[2]/s);
			//sprintf(buff, "Good point: %.3f %.3f %.3f\n",point3f.x ,point3f.y, point3f.z);
			//LOG_D("triangulate", buff);

			//push real data
			points3dOut.push_back(point3f);
			isTriangulated[i] = 1;
		} else {

			//add dummy data
			points3dOut.push_back(Point3f());
			isTriangulated[i] = 0;
		}

	}
	LOG_D("triangulate", "finished");

}

/**
 * find extrinsic parameters of this camera according to the detected points
 */
void NativePTAMSystem::findExtrinsics(vector<Point2f>& features, vector<Point3f>& features3d,  vector<double>& rv, vector<double>& tv) {
	//estimate extrinsics for these points

	bool useApprox = true;
	Mat rvec, tvec;
	if (rv.size() != 3)
	{
		rv = vector<double>(3);
		rvec = Mat(rv);
		double _d[9] = {1,0,0,
						0,-1,0,
						0,0,-1};
		Rodrigues(Mat(3,3,CV_64FC1,_d),rvec);
		useApprox = false;
	}
	else
	{
		rvec = Mat(rv);
	}

	if (tv.size() != 3)
	{
		tv = vector<double>(3);
		useApprox = false;
	}
	tvec = Mat(tv);

	//sprintf(buff, "received %d image points, %d object ", features.size(), features3d.size());
	//LOG_D("extrinsics", buff);

	bool solvePnPSucceeded = solvePnP(features3d,features,camera_matrix,distortion_coefficients,rvec,tvec,useApprox);

	//LOG_D("findExtrinsics", solvePnPSucceeded?"solved PnP":"could not solve PNP");

	sprintf(buff, "frame extrinsic:[rvec(%d): %.3f %.3f %.3f][tvec(%d): %.3f %.3f %.3f]",
			rv.size(), rv[0],rv[1],rv[2],
			tv.size(), tv[0],tv[1],tv[2]);
	LOG_D("findExtrinsics", buff);

	//Mat rotM(3,3,CV_64FC1); ///,_r);
	//Rodrigues(rvec,rotM);
}

void NativePTAMSystem::drawReprojectedOnImage(Mat& image, vector<double>& rv, vector<double>& tv, vector<Point2f>& features, vector<Point3f>& features3d, Point3f refPoint)
{
	vector<Point2f> imagePoints;

	sprintf(buff, "%d features %d 3d features", features.size(), features3d.size());
	LOG_D("drawreproj", buff);

	projectPoints(features3d,Mat(rv),Mat(tv),camera_matrix,distortion_coefficients,imagePoints);

	for (int i = 0; i < imagePoints.size(); i++)
	{
		circle(image, (Point2f)(imagePoints[i]), 2, CV_RGB(255,255,0));
		line(image,
				Point2f( ((Point2f)features[i]).x + 2, ((Point2f)features[i]).y + 2),
				Point2f( ((Point2f)features[i]).x - 2, ((Point2f)features[i]).y - 2),
				CV_RGB(128,128,0));
		line(image,
				Point2f( ((Point2f)features[i]).x + 2, ((Point2f)features[i]).y - 2),
				Point2f( ((Point2f)features[i]).x - 2, ((Point2f)features[i]).y + 2),
				CV_RGB(128,128,0));
		line(image, (Point2f)imagePoints[i], (Point2f)features[i], CV_RGB(255,0,0));
	}

	#ifdef DRAW_TEST_TRIANGLE
	{
		vector<Point3f> vertices;
		vector<Point2f> verticesProj;
		vertices.push_back(refPoint);
		vertices.push_back(Point3f(0,0,2) + refPoint);
		vertices.push_back(Point3f(2,2,0) + refPoint);
		vertices.push_back(Point3f(-2,2,0) + refPoint);
		vertices.push_back(Point3f(2,-2,0) + refPoint);
		vertices.push_back(Point3f(-2,-2,0) + refPoint);
		projectPoints(vertices, Mat(rv), Mat(tv), camera_matrix, distortion_coefficients, verticesProj);

		line(image, (Point2f)verticesProj[1], (Point2f)verticesProj[0], CV_RGB(0,255,255));
		for (int i = 2; i < 6; i++)
		{
			line(image, (Point2f)verticesProj[i], (Point2f)verticesProj[0], CV_RGB(0,255,255));
			line(image, (Point2f)verticesProj[i], (Point2f)verticesProj[1], CV_RGB(0,0,255));
			line(image, (Point2f)verticesProj[i], (Point2f)verticesProj[i == 5?2:(i+1)], CV_RGB(0,0,255));
		}
	}
	#endif

}

/**
 * check which of the tracked points align with thier reprojected 3D feature
 * mark in the status which points are good and which aren't
 * return the SSD between tracked and reprojected
 */
double NativePTAMSystem::keepGood2D3DMatch(vector<Point2f>& features, vector<Point3f>& features3d, vector<double>& rv, vector<double>& tv, vector<Point3f>& features3dRemoved)
{

	int featuresCount = features.size();
	vector<Point2f> reprojectedPoints;
	projectPoints(features3d,Mat(rv),Mat(tv),camera_matrix,distortion_coefficients,reprojectedPoints);

	double totalSum = 0;

	vector<Point3f> temp3d;
	vector<Point2f> temp2d;

	for(int i=0;i < featuresCount ; i++)
	{
		double dx = (double)(((Point2f)(reprojectedPoints[i])).x - ((Point2f)(features[i])).x);
		double dy = (double)(((Point2f)(reprojectedPoints[i])).y - ((Point2f)(features[i])).y);
		double sqdiff = sqrt(dx*dx + dy*dy);
		totalSum += sqdiff;

		if(sqdiff > 10.0) {

			features3dRemoved.push_back((Point3f)features3d[i]);

		}
		else
		{
			temp2d.push_back((Point2f)features[i]);
			temp3d.push_back((Point3f)features3d[i]);
		}
	}

	if (features3dRemoved.size() > 0)
	{
		features.clear();
		features3d.clear();
		for (int i = 0; i < temp2d.size(); i++)
		{
			features.push_back((Point2f)temp2d[i]);
			features3d.push_back((Point3f)temp3d[i]);
		}
	}
	return totalSum;
}

void NativePTAMSystem::reprojectInvisibles(vector<Point3f>& invisible3dFeatures, vector<Point2f>& reproj2dPoints, vector<Point3f>& reproj3dPoints, vector<double> rv, vector<double> tv)
{

	vector<Point2f> imagePoints;

	projectPoints(invisible3dFeatures, Mat(rv), Mat(tv), camera_matrix, distortion_coefficients, imagePoints);

	Rect frameRect (Point(0,0),FRAME_SIZE);

	vector<Point3f> newInvisibleList;
	for(int i = 0 ; i < invisible3dFeatures.size(); i++) {
		if(((Point2f)imagePoints[i]).inside(frameRect)) {
			//only use points that are inside the frame...

			reproj2dPoints.push_back((Point2f)imagePoints[i]);			//2d for tracking
			reproj3dPoints.push_back((Point3f)invisible3dFeatures[i]);	//3d for pose estim

		} else {
			newInvisibleList.push_back((Point3f)invisible3dFeatures[i]);
		}
	}

	invisible3dFeatures.clear();
	for (int i = 0; i < newInvisibleList.size(); i++)
	{
		invisible3dFeatures.push_back((Point3f)newInvisibleList[i]);
	}

	sprintf(buff, "revived %d points", reproj2dPoints.size());
	LOG_D("invisible", buff);

}

int NativePTAMSystem::tryToCalibrate()
{

	int returnState = 0;

	//Get chessboard corners
	bool found = false;
	vector<Point2f> pointbuf;

	sprintf(buff, "%d == %d, (%d %d)",CV_8UC1, gray.type(), gray.cols, gray.rows);
	LOG_E("calib", buff);
	found = findChessboardCorners(gray, CALIB_CHESSBOARD_SIZE, pointbuf, CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_FAST_CHECK | CV_CALIB_CB_NORMALIZE_IMAGE);

	if (found)
	{

		calibPoints.push_back(pointbuf);

		if (calibPoints.size() >= CALIB_FRAMES_COUNT)
		{
			//calibrate camera
			camera_matrix = Mat::eye(3,3, CV_64F);
			if (gray.cols != gray.rows)
			{
				double aspectRatio = (double) gray.rows / (double) gray.cols;
				camera_matrix.at<double>(0,0) = aspectRatio;
			}
			distortion_coefficients = Mat::zeros(5,1, CV_64F);

			vector< vector<Point3f> > objectPoints;

			vector<Point3f> initObjPts;

	        for( int i = 0; i < CALIB_CHESSBOARD_SIZE.height; i++ )
	            for( int j = 0; j < CALIB_CHESSBOARD_SIZE.width; j++ )
	            	initObjPts.push_back(Point3f(float(j),float(i), 0));


	        while (objectPoints.size() != calibPoints.size())
	        {
	        	objectPoints.push_back(vector<Point3f>(initObjPts));
	        }

	        vector<Mat> rvecs, tvecs;
	        double rms = calibrateCamera(objectPoints, calibPoints, gray.size(), camera_matrix, distortion_coefficients, rvecs, tvecs, CV_CALIB_FIX_K4|CV_CALIB_FIX_K5);

	        if (checkRange(camera_matrix) && checkRange(distortion_coefficients))
	        {
	        	cameraCalibrated = true;

		        //Compute reprojection Errors;
		        vector<float> reprojErrs;
		        vector<Point2f> imagePoints2;

		        int i, totalPoints = 0;
		        double totalErr = 0, err;
		        for (int i = 0; i < objectPoints.size(); i++)
		        	reprojErrs.push_back(0);

		        for( i = 0; i < (int)objectPoints.size(); i++ )
		        {
		            projectPoints(Mat(objectPoints[i]), Mat(rvecs[i]), Mat(tvecs[i]),
		            		camera_matrix, distortion_coefficients, imagePoints2);
		            err = norm(Mat(calibPoints[i]), Mat(imagePoints2), CV_L2);
		            int n = (int)((vector<Point3f>)(objectPoints[i])).size();
		            reprojErrs[i] = (float)std::sqrt(err*err/n);
		            totalErr += err*err;
		            totalPoints += n;
		        }

		        totalCalibErr =  std::sqrt(totalErr/totalPoints);
		        returnState = CALIB_FRAMES_COUNT;

	        }
	        else
	        {
	        	calibPoints.clear();
	        	returnState = CALIB_ERROR_CALIBRATION_INVALID;
	        }

		}
		else
		{
			returnState = calibPoints.size();
		}
	}
	else
		returnState = CALIB_ERROR_NO_CHESSBOARD;

	return returnState;
}

void NativePTAMSystem::drawPointCorrespondence(Mat& image, vector<Point2f> first, vector<Point2f> second, Scalar color)
{
	for (int i = 0; i < second.size(); i ++)
	{
		Point pt1 = Point(
				((Point2f)(first[i])).x,
				((Point2f)(first[i])).y);
		Point pt2 = Point(
				((Point2f)(second[i])).x,
				((Point2f)(second[i])).y);

		circle(image,pt1,2,CV_RGB(0,255,0),CV_FILLED);
		circle(image,pt2,2,CV_RGB(0,0,255),CV_FILLED);
		line(image, pt1, pt2, color);
	}
}

void NativePTAMSystem::drawPoints(Mat&image, vector<Point2f> points, Scalar color)
{
	for (int i = 0; i < points.size(); i++)
	{
		Point pt = Point(((Point2f)currFeatures[i]).x, ((Point2f)currFeatures[i]).y);
		circle(image,pt,2,color,CV_FILLED);
	}
}

/*
 *
 * Interface to get extrinsics
 *
 */

bool NativePTAMSystem::getExtrinsics(long long addrMatRv, long long addrMatTv)
{
	Mat* inRvMat = (Mat*)addrMatRv;
	Mat* inTvMat = (Mat*)addrMatTv;

	if (inRvMat != NULL && inTvMat != NULL)
	{
		Mat(rV).copyTo(*inRvMat);
		Mat(tV).copyTo(*inTvMat);
		//LOG_D("extr", "copied");
		return true;
	}
	else return false;
}

bool NativePTAMSystem::getUV(long long addrMatUV, long long addrMatVV)
{
	Mat* inUVMat = (Mat*)addrMatUV;
	Mat* inVVMat = (Mat*)addrMatVV;

	if (inUVMat != NULL && inVVMat != NULL)
	{
		Mat(uV).copyTo(*inUVMat);
		Mat(vV).copyTo(*inVVMat);
		return true;
	}
	else return false;
}

bool NativePTAMSystem::getReferencePoint(long long addrRefPt)
{
	Mat* inRefPt = (Mat*)addrRefPt;
	if (inRefPt!=NULL)
	{
		if (inRefPt->type() == CV_32FC1)
		{
			inRefPt->at<float>(0) = referencePoint.x;
			inRefPt->at<float>(1) = referencePoint.y;
			inRefPt->at<float>(2) = referencePoint.z;
		}
		else if (inRefPt->type() == CV_32FC3)
		{
			inRefPt->at<Vec3f>(0)[0] = referencePoint.x;
			inRefPt->at<Vec3f>(0)[1] = referencePoint.y;
			inRefPt->at<Vec3f>(0)[2] = referencePoint.z;
		}
		return true;
	}
	else return false;
}

string NativePTAMSystem::getOutputString()
{
	string output = string(messageString);
	return output;
}

bool NativePTAMSystem::getCameraMatrix(long long addrCameraMatrixMat, long long addrDistortionCoefMat)
{
	bool success = cameraCalibrated;
	if (cameraCalibrated)
	{
		Mat* inCamMat = (Mat*)addrCameraMatrixMat;
		Mat* inDistCoef = (Mat*)addrDistortionCoefMat;

		if (inCamMat != NULL && inDistCoef != NULL)
		{
			camera_matrix.copyTo(*inCamMat);
			distortion_coefficients.copyTo(*inDistCoef);
		}
		else success = false;
	}

	return success;
}

void NativePTAMSystem::captureOnNextFrame()
{
	doCapture = true;
}

void NativePTAMSystem::resetOnNextFrame()
{
	doReset = true;
}

bool NativePTAMSystem::isBadMove(float in, float threshold)
{
	bool success = false;
	if (in > threshold) success = true;
	if (isnan(in)) success = true;
	if (isinf(in)) success = true;
	return success;
}

void NativePTAMSystem::setShouldWriteMessage(bool shouldWriteMessage)
{
	this->shouldWriteMessage = shouldWriteMessage;
}

/*
 *
 * Helper function to help vector resize
 *
 */

template <class T>
void vector_resize(vector<T> &v, uint new_size, T initValue)
{
	if (v.size() > new_size)
	{
		do
		{
			v.pop_back();
		} while (v.size() > new_size);
	}
	else if (v.size() < new_size)
	{
		do
		{
			v.push_back(initValue);
		} while (v.size() < new_size);
	}
}
