/*
 *  tracker.cpp
 *  TrackingProject
 *
 *  Created by Bassel Saba on 5/28/12.
 *  Copyright 2012 Technion. All rights reserved.
 *
 */

#include <list>
#include "Tracker.h"
#include <math.h>
#include <memory>
#include <opencv2/contrib/contrib.hpp>

#include RT_SZ 6
#include PT_SZ 3

map<int, KeyPoint> convertToMap(vector<KeyPoint> keypoints);
Mat getFundamentalMatrix(map<int, KeyPoint>& keypoints1,
		map<int, KeyPoint>& keypoints2);
Mat convertFeaturesToMat(vector<KeyPoint>& keypoints);
double getMedian(vector<double>& vals);
double getDistance(Point2f pt1, Point2f pt2) {
	return sqrt(pow((pt1.x - pt2.x), 2) + pow((pt1.y - pt2.y), 2));
}
vector<KeyPoint> getValuesVector(map<int, KeyPoint>& keypoints) {
	vector<KeyPoint> result;
	for (map<int, KeyPoint>::iterator it = keypoints.begin();
			it != keypoints.end(); it++) {
		result.push_back(it->second);
	}
	return result;

}

void Tracker::track() {
	//VideoCapture cap;
	Mat frame;
	//int frame_index = 1;
	vector<KeyPoint> keypoints;
	//if filename is an empty string, take input from the camera
	/*
	 if (filename.length() == 0) {
	 cap.open(0);
	 } else {
	 cap.open(filename);
	 }

	 if (!cap.isOpened()) // check if we succeeded
	 throw "cannot open video capture";
	 Mat edges;

	 cap >> frame;
	 cvtColor(frame, frame, CV_BGR2GRAY);

	 //vector<vector<KeyPoint>	> framesKeypoints;
	 GoodFeaturesToTrackDetector surf(50);

	 vector<KeyPoint> temp_keypoints;
	 surf.detect(frame, keypoints);
	 for (vector<KeyPoint>::iterator it = keypoints.begin(); it
	 != keypoints.end(); it++) {
	 if (!(it->pt.x < 0 || it->pt.x > frame.rows || it->pt.y < 0 || it->pt.y
	 > frame.cols)) {
	 //keypoints.erase(it);
	 temp_keypoints.push_back(*it);

	 }
	 }

	 keypoints = temp_keypoints;
	 map<int, KeyPoint> frame_keypoints = convertToMap(keypoints);
	 map<int,pair<float,float> > first_offsets;
	 //int last_key = keypoints.size();
	 //TODO: bedha tezbeet
	 map<int, Mat> first_frame_keypoints_templates;
	 for (map<int, KeyPoint>::iterator it = frame_keypoints.begin(); it
	 != frame_keypoints.end(); it++) {
	 Rect template_rec = getRectangle(it->second.pt.x, it->second.pt.y,
	 TEMPLATE_SIZE, TEMPLATE_SIZE, frame.size());
	 first_frame_keypoints_templates[it->first] = Mat(frame, template_rec);
	 first_offsets[it->first]  = pair<float,float>(it->second.pt.x - template_rec.x,it->second.pt.y - template_rec.y);
	 }
	 Frame* firstInsertedFrame = new Frame(frame_keypoints,
	 first_frame_keypoints_templates,first_offsets);

	 frames.push_back(firstInsertedFrame);

	 SiftDescriptorExtractor extractor;
	 Mat* lastDescMat = new Mat();
	 Mat* newDescMat = new Mat();
	 extractor.compute(frame, keypoints, *lastDescMat);
	 firstInsertedFrame->descriptors = lastDescMat;

	 Frame* lastFrameWithDesc = firstInsertedFrame;

	 */
	VideoWriter writer(string("tracking_result.avi"),
			CV_FOURCC('M', 'J', 'P', 'G'), 25,
			cvSize(prev_frame.cols, prev_frame.rows));

	cout << "(" << prev_frame.size().width << "x" << prev_frame.size().height
			<< ")" << endl;
	/*Mat first_frame = frame;
	 */
	//Mat prev_frame = frame;
	double test = 1;
	int dropped_ncc = 0;
	int dropped_maxdist = 0;
	PointData& pd = *frames.front()->keypoints_data[0];
	for (;;) {
		if (!cap.grab()) {
			break;
		}
		//cout << "heloooo" << endl;
		cap.retrieve(frame); // get a new frame from camera
		cvtColor(frame, frame, CV_BGR2GRAY);
		Size frameSize = frame.size();

		vector<double> distances; //distances of keypoints from last frame to current frame
		map<int, KeyPoint>& lastKeypoints = frames.back()->keypoints;
	//	map<int, Mat> keypointsTemplates = frames.back()->keypoints_template;
		//map<int, pair<float, float> > keypointsOffsets =
			//	frames.back()->keypoints_offsets;
		map<int, KeyPoint> newKeypoints;
		map<int, KeyPoint>& firstKeypoints = frames.front()->keypoints;
		map<int, PointDataP>& keypointsData = frames.back()->keypoints_data;
		for (map<int, KeyPoint>::iterator it = lastKeypoints.begin();
				it != lastKeypoints.end(); it++) {
			KeyPoint current_keypoint = it->second;
			//KeyPoint first_keypoint = firstKeypoints[it->first];
			int octave = TEMPLATE_SIZE;
			/*Rect template_rec = getRectangle(first_keypoint.pt.x,
			 first_keypoint.pt.y, TEMPLATE_SIZE, TEMPLATE_SIZE,
			 frameSize);
			 */
			Rect roiRec = getRectangle(current_keypoint.pt.x,
					current_keypoint.pt.y, ROI_SIZE, ROI_SIZE, frameSize);
			Rect prev_template_rec = getRectangle(current_keypoint.pt.x,
					current_keypoint.pt.y, TEMPLATE_SIZE, TEMPLATE_SIZE,
					frameSize);
			Mat res = cvCreateImage(
					cvSize(
							roiRec.width - keypointsData[it->first]->templateWindow.cols
									+ 1,
							roiRec.height - keypointsData[it->first]->templateWindow.rows
									+ 1), IPL_DEPTH_32F, 1);
			Mat res1 = cvCreateImage(
					cvSize(
							roiRec.width - keypointsData[it->first]->templateWindow.cols
									+ 1,
							roiRec.height - keypointsData[it->first]->templateWindow.rows
									+ 1), IPL_DEPTH_32F, 1);
			imshow("test",keypointsData[it->first]->templateWindow);
			matchTemplate(Mat(frame, roiRec),
					keypointsData[it->first]->templateWindow, res,/*CV_TM_CCORR_NORMED*/
					CV_TM_CCOEFF_NORMED);
			matchTemplate(Mat(frame, roiRec),
					Mat(prev_frame, prev_template_rec), res1,/*CV_TM_CCORR_NORMED*/
					CV_TM_CCOEFF_NORMED);
			Point minloc, maxloc;
			double minval, maxval;

			minMaxLoc(res, &minval, &maxval, &minloc, &maxloc, Mat());
			KeyPoint newLocatedKeypoint = current_keypoint;
			newLocatedKeypoint.pt.x = maxloc.x + roiRec.x
					+ keypointsData[it->first]->offsets.first;
			newLocatedKeypoint.pt.y = maxloc.y + roiRec.y
					+ keypointsData[it->first]->offsets.second;
			//testing
			/*if (it->first == 10) {
			 imshow( "Display window" , res);
			 imshow( "prev_frame" , Mat(prev_frame, template_rec));
			 imshow( "current_frame" , Mat(frame,roiRec));
			 cvWaitKey(10000);
			 }*/
			/*
			 if(it->first == 10){
			 Mat tmpMat(frame,roiRec);
			 tmpMat = tmpMat.clone();
			 double tmpX = maxloc.x;
			 double tmpY = maxloc.y;
			 rectangle(tmpMat,Point(tmpX,tmpY),Point(tmpX + template_rec.width,tmpY + template_rec.height),cvScalar(255, 255, 255, 0));
			 imshow( "Display window",  Mat(prev_frame, template_rec));
			 imshow( "Display window1", tmpMat);
			 imshow( "Display window2" , res);
			 cout << "score:" << maxval << endl;
			 cvWaitKey(10000);
			 }*/
			if (maxval
					>= ncc_threshold /*|| res1.at<float>(maxloc) >= ncc_threshold*/) {
				newKeypoints[it->first] = newLocatedKeypoint;
				distances.push_back(
						getDistance(current_keypoint.pt,
								newLocatedKeypoint.pt));
			} else {
				dropped_ncc++;
				//keypointsTemplates.erase(it->first);
				//keypointsOffsets.erase(it->first);
			}

		}

		//cout << "test = " << test;
		map<int, KeyPoint> afterMedianKeyPoints;
		double median_distance = getMedian(distances);
		//cout << "avg = " << median_distance << endl;
		for (map<int, KeyPoint>::iterator it = newKeypoints.begin();
				it != newKeypoints.end(); it++) {
			if (abs(
					getDistance(it->second.pt, lastKeypoints[it->first].pt)
							- median_distance) < 3) {
				afterMedianKeyPoints[it->first] = it->second;

			} else {
				/*cout << abs(getDistance(it->second.pt,lastKeypoints[it->first].pt) - median_distance) << endl;
				 */
				//cout << "median " << median_distance << ",d= "
				//		<< getDistance(it->second.pt,
				//				lastKeypoints[it->first].pt) << endl;
				dropped_maxdist++;
				//keypointsTemplates.erase(it->first);
				//keypointsOffsets.erase(it->first);
			}
		}

		map<int, PointDataP> newKeypointsData;
		for (map<int, KeyPoint>::iterator it = afterMedianKeyPoints.begin();
				it != afterMedianKeyPoints.end(); it++) {
			newKeypointsData[it->first] = keypointsData[it->first];
			newKeypointsData[it->first]->frames_n++;
		}
		map<int, KeyPoint>& newFilteredKeyPoints = afterMedianKeyPoints;
		/*
		 Mat fundamentalMatrix = getFundamentalMatrix(lastKeypoints,newKeypoints);
		 vector<Point2f> lastPointsVec;
		 for (map<int,KeyPoint>::iterator it1 = lastKeypoints.begin() ;  it1 != lastKeypoints.end();it1++) {
		 if (newKeypoints.find(it1->first) != newKeypoints.end()) {
		 lastPointsVec.push_back(it1->second.pt);
		 }
		 }
		 vector<Vec3f> epipolarLines;
		 computeCorrespondEpilines(lastPointsVec,1,fundamentalMatrix,epipolarLines);
		 int i = 0;
		 Mat frameWithEpipolarLines;
		 frame.copyTo(frameWithEpipolarLines);
		 double tempMax = 0;
		 for (map<int,KeyPoint>::iterator it1 = newKeypoints.begin() ; i < lastKeypoints.size() && it1 != newKeypoints.end();it1++,i++) {
		 Vec3f line = epipolarLines[i];
		 double fundamentalScore = (line[0]*((it1->second).pt.x) + line[1]*((it1->second).pt.y) + line[2])/sqrt(line[0]*line[0] + line[1]*line[1]);
		 static int maria = 10;
		 if (fundamentalScore > tempMax) {
		 tempMax = fundamentalScore;
		 }
		 if (fundamentalScore < 2.8) {
		 //cout << "score = " << fundamentalScore << endl;
		 newFilteredKeyPoints[it1->first] = it1->second;
		 //maria--;
		 }
		 //newFilteredKeyPoints[it1->first] = it1->second;
		 KeyPoint x = it1->second;
		 Point x2 = Point(x.pt.x+10, 0);
		 x2.y = -(line[0]*x2.x+line[2])/line[1];
		 cv::line(frameWithEpipolarLines, Point(x.pt.x,x.pt.y),  x2,Scalar(255, 255, 255));


		 }
		 //imshow( "Display window1", frameWithEpipolarLines);
		 //cvWaitKey(10000);
		 */
		//add new features if needed
		if (newFilteredKeyPoints.size() < Tracker::min_features) {
			cout << "hereee" << endl;
			keypoints.clear();
			GoodFeaturesToTrackDetector surf1(100);
			surf1.detect(frame, keypoints);
			vector<KeyPoint> features_temp = getValuesVector(
					newFilteredKeyPoints);
			Mat features = convertFeaturesToMat(features_temp);
			Mat query = convertFeaturesToMat(keypoints);
			flann::KDTreeIndexParams indexParams(5);
			flann::Index kdtree(features, indexParams);
			Mat indices(features_temp.size(), 1, CV_32S);
			Mat dists(features_temp.size(), 1, CV_32F);
			kdtree.knnSearch(query, indices, dists, 1,
					cv::flann::SearchParams(64));
			for (int k = 0; k < dists.rows; k++) {
				if (dists.at<float>(k, 0) >= 12
						&& !(keypoints[k].pt.x < 0
								|| keypoints[k].pt.x > frame.cols - 1
								|| keypoints[k].pt.y < 0
								|| keypoints[k].pt.y > frame.rows - 1) /* min distance threshold */) {
					//cout << frame.rows;
					//cout << frame.cols;
					alive_keypoints_c++;
					newFilteredKeyPoints[last_key++] = keypoints[k];
					Rect template_rec = getRectangle(keypoints[k].pt.x,
							keypoints[k].pt.y, TEMPLATE_SIZE, TEMPLATE_SIZE,
							frame.size());
					/*keypointsTemplates[last_key - 1] = Mat(frame, template_rec);
					keypointsOffsets[last_key - 1] = pair<float, float>(
							keypoints[k].pt.x - template_rec.x,
							keypoints[k].pt.y - template_rec.y);*/
					//keypointsTemplates
					newKeypointsData[last_key - 1] = PointDataP(
							new PointData(Mat(frame, template_rec),
									pair<float, float>(
											keypoints[k].pt.x - template_rec.x,
											keypoints[k].pt.y
													- template_rec.y)));
				}
			}
		}

		Frame* currentFrame = new Frame(newFilteredKeyPoints, newKeypointsData);
		frames.push_back(currentFrame);

		/*
		 if (frame_index % 5 == 0) {
		 vector<KeyPoint> keypointsVector = getValuesVector(newKeypoints);
		 extractor.compute(frame,keypointsVector,*newDescMat);
		 currentFrame->descriptors = newDescMat;
		 map<int,KeyPoint>::iterator it = newKeypoints.begin();
		 for (int k = 0; k < newDescMat->rows; k++,it++) {
		 float keyPointScore = 0;
		 for (int l = 0; l < newDescMat->cols; l++) {
		 keyPointScore+= pow(newDescMat->at<float>(k,l) - lastDescMat->at<float>(k,l),2);
		 }
		 keyPointScore = sqrt(keyPointScore);
		 keyPointScore/= newDescMat->cols;
		 if (keyPointScore > 2.3) {
		 currentFrame->keypoints.erase(it->first);
		 }
		 // cout << tempResult << endl;
		 }

		 //TODO: make checking
		 lastFrameWithDesc->descriptors = NULL;
		 lastFrameWithDesc = currentFrame;
		 Mat* temp = newDescMat;
		 newDescMat = lastDescMat;
		 lastDescMat = temp;
		 }*/

		Mat frameWithKeypoints;
		drawKeypoints(frame, getValuesVector(newFilteredKeyPoints),
				frameWithKeypoints);
		imshow("Display window", frameWithKeypoints);
		writer << frameWithKeypoints;
		cvWaitKey(33);
		prev_frame = frame;

		//prev_frame = frame;
		frame_index++;
	}
	cout << frame_index << endl;
	//printDataToFile("camera.txt","points.txt");
	cout << dropped_ncc << " dropped by ncc threshold" << endl;
	cout << dropped_maxdist << " dropped by max distance limit" << endl;
	//delete newDescMat;
	//delete lastDescMat;
	return;
}

Mat convertFeaturesToMat(vector<KeyPoint>& keypoints) {
	Mat features(keypoints.size(), 2, CV_32F);
	for (int i = 0; i < keypoints.size(); i++) {
		features.at<float>(i, 0) = keypoints[i].pt.x;
		features.at<float>(i, 1) = keypoints[i].pt.y;
	}
	return features;
}

void Tracker::printDataToFile(const char* camera_filename,
		const char* points_filename) {
	//not efficient function just for testing
	Frame& first_frame = *frames[0];
	Frame& q_frame = *frames[frames.size() / 4];
	Frame& last_frame = *frames[frames.size() - 1];
	Frame& tq_frame = *frames[3 * frames.size() / 4];
	FILE* camera_file = fopen(camera_filename, "w");
	if (!camera_file) {
		cerr << "Error opening camera file";
		exit(1);
	}
	FILE* points_file = fopen(points_filename, "w");
	if (!points_file) {
		cerr << "Error opening ponints file";
		exit(1);
	}
	for (map<int, KeyPoint>::iterator it = first_frame.keypoints.begin();
			it != first_frame.keypoints.end(); it++) {
		if (last_frame.keypoints.count(it->first)) {
			Point2f pt1 = first_frame.keypoints[it->first].pt;
			Point2f pt2 = last_frame.keypoints[it->first].pt;
			Point2f pt3 = q_frame.keypoints[it->first].pt;
			Point2f pt4 = tq_frame.keypoints[it->first].pt;
			fprintf(points_file, "%f %f %f 4 0 %f %f 1 %f %f 2 %f %f 3 %f %f\n",
					1.0, 1.0, 1.0, pt1.x, pt1.y, pt3.x, pt3.y, pt4.x, pt4.y,
					pt2.x, pt2.y);
		}
		/*
		 string projections;
		 for (int i = 0; i < frames.size(); i++) {
		 Frame* current = frames[i];

		 }*/
	}
	for (int i = 0; i < 4; i++) {
		for (int j = 0; j < 7; j++) {
			fprintf(camera_file, "%f ", 1.0);
		}
		fprintf(camera_file, "\n");
	}
	fclose(points_file);
	fclose(camera_file);
}

Mat getFundamentalMatrix(map<int, KeyPoint>& keypoints1,
		map<int, KeyPoint>& keypoints2) {
	int point_count = keypoints1.size();
	vector<Point2f> points1;
	vector<Point2f> points2;

	// initialize the points here ... */
	for (map<int, KeyPoint>::iterator it = keypoints1.begin();
			it != keypoints1.end(); it++) {
		if (keypoints2.find(it->first) != keypoints2.end()) {
			points1.push_back(keypoints1[it->first].pt);
			points2.push_back(keypoints2[it->first].pt);
		}
	}
	return findFundamentalMat(points1, points2, FM_RANSAC, 3, 0.99);
}

double getMedian(vector<double>& vals) {
	double median;
	size_t size = vals.size();
	sort(vals.begin(), vals.end());

	if (size % 2 == 0) {
		median = (vals[size / 2 - 1] + vals[size / 2]) / 2;
	} else {
		median = vals[size / 2];
	}
	double sum = 0;
	for (int i = 0; i < vals.size(); i++) {
		sum += vals[i];
	}
	return median;
}

Rect Tracker::getRectangle(double x, double y, double x_offset, int y_offset,
		Size frameSize) {
	double tmpl_top = y - y_offset;
	double tmpl_down = y + y_offset;
	double tmpl_left = x - x_offset;
	double tmpl_right = x + x_offset;

	if (tmpl_top <= 0) {
		tmpl_top = 0;
	}
	if (tmpl_down >= frameSize.height - 1) {
		tmpl_down = frameSize.height - 1;
	}
	if (tmpl_left <= 0) {
		tmpl_left = 0;
	}
	if (tmpl_right >= frameSize.width) {
		tmpl_right = frameSize.width;
	}

	Rect result(tmpl_left, tmpl_top, tmpl_right - tmpl_left,
			tmpl_down - tmpl_top);
	return result;
}

map<int, KeyPoint> convertToMap(vector<KeyPoint> keypoints) {
	map<int, KeyPoint> map;
	for (int i = 0; i < keypoints.size(); i++) {
		map[i] = keypoints[i];
	}
	return map;

}

void Tracker::myCalibrateCamera() {
	int board_w = 8; // Board width in squares
	int board_h = 6; // Board height
	int n_boards = 10; // Number of boards
	int board_n = board_w * board_h;
	Size board_sz = Size(board_w, board_h);

	// Allocate Sotrage
	vector<vector<Point2f> > image_points;
	vector<vector<Point3f> > object_points;
	Mat intrinsic_matrix = Mat(3, 3, CV_32FC1);
	Mat distortion_coeffs = Mat(5, 1, CV_32FC1);

	//for syncing with our data stuctures for tracking ba3den
	map<int, KeyPoint> keypoints;
	map<int, Mat> keypoints_template;
	map<int, pair<float, float> > keypoints_offsets;

	vector<Point2f> corners;
	int corner_count;
	int successes = 0;
	int step, frame = 0;
	Mat image, gray_image;
	int skip = 10;
	//VideoCapture cap;
	//if filename is an empty string, take input from the camera
	if (filename.length() == 0) {
		cap.open(0);
	} else {
		cap.open(filename);
	}
	if (!cap.isOpened()) // check if we succeeded
		throw "cannot open video capture";

	cap >> image;
	while (successes < n_boards) {
		if (frame++ % skip != 0) {
			cap >> image;
			continue;
		}
		// Find chessboard corners:
		bool found = findChessboardCorners(image, board_sz, corners,
				CALIB_CB_ADAPTIVE_THRESH + CALIB_CB_NORMALIZE_IMAGE
						+ CALIB_CB_FAST_CHECK);

		// Get subpixel accuracy on those corners
		/*
		 cvtColor(image, gray_image, CV_BGR2GRAY);
		 FindCornerSubPix( gray_image, corners, cvSize( 11, 11 ),
		 cvSize( -1, -1 ), cvTermCriteria( CV_TERMCRIT_EPS+CV_TERMCRIT_ITER, 30, 0.1 ));
		 */

		// Draw it
		//drawChessboardCorners(image, board_sz, corners, found);
		//imshow("Calibration", image);
		// If we got a good board, add it to our data
		if (corners.size() == board_n) {
			vector<Point2f> tmp_image_pts;
			vector<Point3f> tmp_object_pts;
			map<int, PointDataP> keypoints_data;
			map<int, PointDataP>* last_keypoints_data = NULL;
			if(successes > 0){
				last_keypoints_data = &frames.back()->keypoints_data;
			}
			Mat tmp_mat;
			cvtColor(image, tmp_mat, CV_BGR2GRAY);
			for (int j = 0; j < board_n; ++j) {
				tmp_image_pts.push_back(Point2f(corners[j].x, corners[j].y));
				tmp_object_pts.push_back(
						Point3f(j / board_w, j % board_w, 0.0f));
				if (successes == 0) { //first frame
					Rect template_rec = getRectangle(corners[j].x, corners[j].y,
							TEMPLATE_SIZE, TEMPLATE_SIZE, image.size());
					keypoints[j] = KeyPoint(corners[j], 1, 1, 1, 1, 0);

					//keypoints_template[j] = Mat(tmp_mat, template_rec);
					//keypoints_offsets[j]  = pair<float,float>(corners[j].x - template_rec.x,corners[j].y - template_rec.y);
					keypoints_data[j] = PointDataP(
							new PointData(Mat(tmp_mat, template_rec),
									pair<float, float>(
											corners[j].x - template_rec.x,
											corners[j].y - template_rec.y)));
					keypoints_data[j]->status = RECSTATUS_FINAL;
					keypoints_data[j]->pt = Point3f(j / board_w, j % board_w, 0.0f);
					points_3d.push_back(Point3f(j / board_w, j % board_w, 0.0f));

				} else {
					keypoints[j] = KeyPoint(corners[j], 1, 1, 1, 1, 0);
					keypoints_data[j] = PointDataP((*last_keypoints_data)[j]);
					keypoints_data[j]->frames_n++;
				}
			}
			Frame* currentFrame = new Frame(keypoints,keypoints_data);

			frames.push_back(currentFrame);
			image_points.push_back(tmp_image_pts);
			object_points.push_back(tmp_object_pts);
			successes++;

		}
		int c = waitKey(30);
		if (c == 'p') {
			c = 0;
			while (c != 'p' && c != 27) {
				c = waitKey(250);
			}
		}
		if (c == 27)
			return;
		//TODO: check if last image
		cap >> image; // Get next image
	} // End collection while loop
	last_key = board_n;
	Mat tmp_mat;
	cvtColor(image, tmp_mat, CV_BGR2GRAY);
	prev_frame = tmp_mat;
	frame_index = successes;
	cout << "successes : " << successes << endl;
	intrinsic_matrix.at<float>(0, 0) = 1.0;
	intrinsic_matrix.at<float>(1, 1) = 1.0;
	vector<Mat> rvecs = vector<Mat>(n_boards);
	vector<Mat> tvecs = vector<Mat>(n_boards);
	calibrateCamera(object_points, image_points, image.size(), intrinsic_matrix,
			distortion_coeffs, rvecs, tvecs, CV_CALIB_FIX_ASPECT_RATIO);
	for(int i = 0;i< frames.size(); i++){
		frames[i]->R = rvecs[i];
		frames[i]->t = tvecs[i];
	}
	k = intrinsic_matrix;
	minimizer.initWithK(k);
	alive_keypoints_c += board_n;
	cout << "Intrinsic params:" << endl << intrinsic_matrix << endl;
}

bool f(int iteration, double norm_error, void* user_data){
	return true;
}

void Tracker::performSBA(){

	//vector<Point3f> temp_points = points_3d;
	//TODO: make copy of points and R and T
	//prepare R and T
	TermCriteria criteria(CV_TERMCRIT_ITER+CV_TERMCRIT_EPS, 70, 1e-10);
	vector<Mat> Rs;
	vector<Mat> Ts;
	vector<vector<int> > visibility;
	vector<Mat> distCoeffs;
//	for(int i = 0 ;i<frames.size();i++){
//		Rs.push_back(frames[i]->R);
//		Ts.push_back(frames[i]->t);
//	}
	//Prepare projections
	vector<vector<Point2f> > projections;
	for(int i = 0 ;i<frames.size();i++){
		distCoeffs.push_back((cv::Mat_<double>(4,1) << 0, 0, 0, 0));
		vector<Point2f> temp_projections;
		vector<int> temp_visibility;
		map<int,KeyPoint>& temp_map = frames[i]->keypoints;
		map<int,PointDataP>& temp_data = frames[i]->keypoints_data;
		for (int j=0; j<last_key;j++){

			if(temp_map.find(j)==temp_map.end() || temp_data[j]->status == RECSTATUS_NOTINITIALIZED
					|| temp_data[j]->frames_n < 2)
			{
				temp_projections.push_back(Point2f(-1, -1));
				temp_visibility.push_back(0);
			}

			else {
				temp_projections.push_back(temp_map[j].pt);
				temp_visibility.push_back(1);
			}
		}
		projections.push_back(temp_projections);
		visibility.push_back(temp_visibility);
	}
	LevMarqSparse   lms;
	//BundleAdjustCallback
	//lms.bundleAdjust(points_3d,projections,visibility,k,Rs,Ts,distCoeffs,criteria,&f,NULL);

}


