#include "cvxCalib3d.h"
#include "nvMath.h"
#include "cvxMat.h"

using nv::vec2f;
using nv::vec3f;
using cvx::CvxMat_d;

#define CV_SQRT2 1.41421356


static void normalizePoints(const vector<Point2f> &imagePoints, vector<Point2f> &pts)
{
	pts = imagePoints;
	return;
	vec2f center = vec2f(0, 0);
	for (int i = 0; i<imagePoints.size(); ++i)
	{
		center.x += imagePoints[i].x;
		center.y += imagePoints[i].y;
	}
	center /= pts.size();
	//translate to center
	for (int i = 0; i<pts.size(); ++i)
	{
		pts[i].x -= center.x;
		pts[i].y -= center.y;
	}

	//scale
	float dis = 0.0;
	for (int i = 0; i<pts.size(); ++i)
	{
		dis += nv::length(nv::vec2f(pts[i].x, pts[i].y));
	}
	dis /= pts.size();
	float scale = CV_SQRT2/dis;
	for (int i = 0; i<pts.size(); ++i)
	{
		pts[i].x *= scale;
		pts[i].y *= scale;
	}
}

void normalizePoints(const vector<Point3f> &imagePoints, vector<Point3f> &pts)
{
	pts = imagePoints;
	return;
	vec3f center = vec3f(0, 0, 0);
	for (int i = 0; i<imagePoints.size(); ++i)
	{
		center.x += imagePoints[i].x;
		center.y += imagePoints[i].y;
		center.z += imagePoints[i].z;
	}
	center /= pts.size();
	//translate to center
	for (int i = 0; i<pts.size(); ++i)
	{
		pts[i].x -= center.x;
		pts[i].y -= center.y;
		pts[i].z -= center.z;
	}

	//scale
	float dis = 0.0;
	for (int i = 0; i<pts.size(); ++i)
	{
		dis += nv::length(nv::vec3f(pts[i].x, pts[i].y, pts[i].z));
	}
	dis /= pts.size();
	float scale = CV_SQRT2/dis;
	for (int i = 0; i<pts.size(); ++i)
	{
		pts[i].x *= scale;
		pts[i].y *= scale;
		pts[i].z *= scale;
	}
}


bool CvxCalib3d::closedFormSolution(const vector<Point3f> & objectPoints,
									const vector<vector<Point2f> >& imagePoints,
									const CvSize &size,
									CV_IN_OUT Mat& cameraMatrix,
									CV_IN_OUT Mat& distCoeffs,
									CV_OUT vector<Mat>& rvecs, CV_OUT vector<Mat>& tvecs,
									bool computerExtrinsic)
{

	vector<Mat> Homographyvec;
	//computer homography matrix H
	{
		//normalize object points
		vector<Point3f> normObjPts;
		normalizePoints(objectPoints, normObjPts);
		vector<Point2f> normObj2dPts(normObjPts.size());
		for (int i = 0; i<normObjPts.size(); ++i)
		{
			normObj2dPts[i] = Point2f(normObjPts[i].x, normObjPts[i].y);
		}

		//normalize image points and computer homography H
		vector<vector<Point2f> > normImagePoints;
		for (int i = 0; i<imagePoints.size(); ++i)
		{
			vector<Point2f> pts;
			normalizePoints(imagePoints[i], pts);
			normImagePoints.push_back(pts);

			Mat H = cv::findHomography(cv::Mat(normObj2dPts), cv::Mat(pts), CV_LMEDS);

			CvxMat_d d_h(&H);
			//check H
			{
				for (int j = 0; j<normObj2dPts.size(); ++j)
				{
					cv::Mat objp = Mat(3, 1, CV_64F);
					objp.at<double>(0, 0) = normObj2dPts[j].x;
					objp.at<double>(1, 0) = normObj2dPts[j].y;
					objp.at<double>(2, 0) = 1.0;

					cv::Mat imgp = H * objp;
					float x = imgp.at<double>(0, 0)/imgp.at<double>(2, 0);
					float y = imgp.at<double>(1, 0)/imgp.at<double>(2, 0);
				//	printf("delta %f %f\n", pts[j].x - x, pts[j].y - y);
				}
			}			
			Homographyvec.push_back(H);			
		}		
	}

	//computer b
	/************************************************************************/
	/* vij = [
		hi1hj1 , hi1hj2  + hi2 hj1 , hi2 hj2 ,             
		hi3 hj1 + hi1hj3 , hi3 hj2  + hi2 hj3 , hi3 hj3]
	*/
	/************************************************************************/
	//float *vijVec = new float[2*6*Homographyvec.size()];  //v12, v11 - v22
	//memset(vijVec, 0, sizeof(float) * 2 * 6 * Homographyvec.size());

	Mat v = Mat(2*Homographyvec.size(), 6, CV_64F);

	for (int i = 0; i<Homographyvec.size(); ++i)
	{
		double h11, h12, h13, h21, h22, h23, h31, h32, h33;
		h11 = Homographyvec[i].at<double>(0, 0);
		h12 = Homographyvec[i].at<double>(0, 1);
		h13 = Homographyvec[i].at<double>(0, 2);

		h21 = Homographyvec[i].at<double>(1, 0);
		h22 = Homographyvec[i].at<double>(1, 1);
		h23 = Homographyvec[i].at<double>(1, 2);

		h31 = Homographyvec[i].at<double>(2, 0);
		h32 = Homographyvec[i].at<double>(2, 1);
		h33 = Homographyvec[i].at<double>(2, 2);

		double v12[6];
		v12[0] = h11 * h21;
		v12[1] = h11 * h22 + h12 * h21;
		v12[2] = h12 * h22;
		v12[3] = h13 * h21 + h11 * h23;
		v12[4] = h13 * h22 + h12 * h23;
		v12[5] = h13 * h23;

		double v11[6];
		v11[0] = h11 * h11;
		v11[1] = h11 * h12 + h12 * h11;
		v11[2] = h12 * h12;
		v11[3] = h13 * h11 + h11 * h13;
		v11[4] = h13 * h12 + h12 * h13;
		v11[5] = h13 * h13;

		double v22[6];
		v22[0] = h21 * h21; 
		v22[1] = h21 * h22 + h22 * h21;
		v22[2] = h22 * h22;
		v22[3] = h23 * h21 + h21 * h23;
		v22[4] = h23 * h22 + h22 * h23;
		v22[5] = h23 * h23;

		for (int j = 0; j<6; ++j)
		{
			v.at<double>(2*i,   j) = v12[j];
			v.at<double>(2*i+1, j) = v11[j] - v22[j];
		}
		
	}

	//test cv::eigen
// 	{
// 		Mat vv = Mat(2, 2, CV_64F);
// 		Mat eigenValue;
// 		Mat eigenVector;
// 
// 		vv.at<double>(0, 0) = 2.0;
// 		vv.at<double>(0, 1) = 1.0;
// 		vv.at<double>(1, 0) = 1.0;
// 		vv.at<double>(1, 1) = 2.0;	
// 
// 		cv::eigen(vv, eigenValue, eigenVector);
// 		
// 		CvxMat_d eigen_cvx(&eigenVector);
// 		for (int i = 0; i<2; ++i)
// 		{
// 			Mat t_eigenvector = Mat(2, 1, CV_64F);
// 			for (int k = 0; k<2; ++k)
// 			{
// 				t_eigenvector.at<double>(k, 0) = eigenVector.at<double>(i, k);
// 			}
// 			float x = t_eigenvector.at<double>(0, 0);
// 			float y = t_eigenvector.at<double>(1, 0);
// 			Mat hxeigenvector = vv * t_eigenvector;
// 			x = hxeigenvector.at<double>(0, 0);
// 			y = hxeigenvector.at<double>(1, 0);
// 			for (int k = 0; k<2; ++k)
// 			{
// 				printf("%f ", hxeigenvector.at<double>(k, 0) - eigenValue.at<double>(i, 0)*eigenVector.at<double>(i, k));
// 			}
// 			printf("\n");
// 		}
// 		return true;
// 	}

	

	v = v.t()*v;
	Mat eigenVector = Mat(6, 6, CV_64F);
	Mat eigenValue  = Mat(6, 1, CV_64F);
//	std::cout<<v<<endl;
	cv::eigen(v, eigenValue, eigenVector);
//	std::cout<<v<<endl;

	{
		for (int i = 0; i<6; ++i)
		{
			Mat t_eigenvector = Mat(6, 1, CV_64F);
			for (int k = 0; k<6; ++k)
			{
				t_eigenvector.at<double>(k, 0) = eigenVector.at<double>(i, k);
			}
			float x = t_eigenvector.at<double>(0, 0);
			float y = t_eigenvector.at<double>(1, 0);
			Mat hxeigenvector = v * t_eigenvector;
			x = hxeigenvector.at<double>(0, 0);
			y = hxeigenvector.at<double>(1, 0);
			for (int k = 0; k<6; ++k)
			{
				printf("%f ", hxeigenvector.at<double>(k, 0) - eigenValue.at<double>(i, 0)*eigenVector.at<double>(i, k));
			}
			printf("\n");
		}
		return true;
	}


	

	float _b[6];
	for (int i = 0; i<6; ++i)
	{
		_b[i] = eigenVector.at<double>(5, i);
	}
	//computer intrinsic matrix with c = 0

	{
		float B11, B12, B22, B13, B23, B33;
		B11 = _b[0];
		B12 = _b[1];
		B22 = _b[2];
		B13 = _b[3];
		B23 = _b[4];
		B33 = _b[5];

		float v0, lambda, alpha, beita, c, u0;
		v0     = (B12*B13 - B11*B23)/(B11*B22 - B12*B12);
		lambda = B33 - (B13*B13 + v0*(B12*B13 - B11*B23))/B11;
		alpha  = sqrt(lambda/B11);
		beita  = sqrt(lambda*B11/(B11*B22 - B12*B12));
		c  = -B12*alpha*alpha*beita/lambda;
		u0 = c*v0/alpha - B13*alpha*alpha/lambda;

		int test_a = 0;

	}

	//computer rotation and translation
	return true;
}


static Mat prepareCameraMatrix(Mat& cameraMatrix0, int rtype)
{
	Mat cameraMatrix = Mat::eye(3, 3, rtype);
	if( cameraMatrix0.size() == cameraMatrix.size() )
		cameraMatrix0.convertTo(cameraMatrix, rtype);
	return cameraMatrix;
}

static Mat prepareDistCoeffs(Mat& distCoeffs0, int rtype)
{
	Mat distCoeffs = Mat::zeros(distCoeffs0.cols == 1 ? Size(1, 8) : Size(8, 1), rtype);
	if( distCoeffs0.size() == Size(1, 4) ||
		distCoeffs0.size() == Size(1, 5) ||
		distCoeffs0.size() == Size(1, 8) ||
		distCoeffs0.size() == Size(4, 1) ||
		distCoeffs0.size() == Size(5, 1) ||       
		distCoeffs0.size() == Size(8, 1) )
	{
		Mat dstCoeffs(distCoeffs, Rect(0, 0, distCoeffs0.cols, distCoeffs0.rows));
		distCoeffs0.convertTo(dstCoeffs, rtype);
	}
	return distCoeffs;
}

static void collectCalibrationData( const vector<vector<Point3f> >& objectPoints,
								   const vector<vector<Point2f> >& imagePoints,
								   const vector<vector<Point2f> >& imagePoints2,
								   Mat& objPtMat, Mat& imgPtMat, Mat* imgPtMat2,
								   Mat& npoints )
{
	size_t i, j = 0, ni = 0, nimages = objectPoints.size(), total = 0;
	CV_Assert(nimages > 0 && nimages == imagePoints.size() &&
		(!imgPtMat2 || nimages == imagePoints2.size()));

	for( i = 0; i < nimages; i++ )
	{
		ni = objectPoints[i].size();
		CV_Assert(ni == imagePoints[i].size() && (!imgPtMat2 || ni == imagePoints2[i].size()));
		total += ni;
	}

	npoints.create(1, (int)nimages, CV_32S);
	objPtMat.create(1, (int)total, DataType<Point3f>::type);
	imgPtMat.create(1, (int)total, DataType<Point2f>::type);
	Point2f* imgPtData2 = 0;

	if( imgPtMat2 )
	{
		imgPtMat2->create(1, (int)total, DataType<Point2f>::type);
		imgPtData2 = imgPtMat2->ptr<Point2f>();
	}

	Point3f* objPtData = objPtMat.ptr<Point3f>();
	Point2f* imgPtData = imgPtMat.ptr<Point2f>();

	for( i = 0; i < nimages; i++, j += ni )
	{
		ni = objectPoints[i].size();
		((int*)npoints.data)[i] = (int)ni;
		std::copy(objectPoints[i].begin(), objectPoints[i].end(), objPtData + j);
		std::copy(imagePoints[i].begin(), imagePoints[i].end(), imgPtData + j);
		if( imgPtMat2 )
			std::copy(imagePoints2[i].begin(), imagePoints2[i].end(), imgPtData2 + j);
	}
}

/* finds intrinsic and extrinsic camera parameters
from a few views of known calibration pattern */
static const char* cvDistCoeffErr = "Distortion coefficients must be 1x4, 4x1, 1x5, 5x1, 1x8 or 8x1 floating-point vector";
double CvxCalib3d::cvCalibrateCamera2( const CvMat* objectPoints,
								  const CvMat* imagePoints, const CvMat* npoints,
								  CvSize imageSize, CvMat* cameraMatrix, CvMat* distCoeffs,
								  CvMat* rvecs, CvMat* tvecs, int flags )
{
	const int NINTRINSIC = 12;
	Ptr<CvMat> matM, _m, _Ji, _Je, _err;
	CvLevMarq solver;
	double reprojErr = 0;

	double A[9], k[8] = {0,0,0,0,0,0,0,0};
	CvMat matA = cvMat(3, 3, CV_64F, A), _k;
	int i, nimages, maxPoints = 0, ni = 0, pos, total = 0, nparams, npstep, cn;
	double aspectRatio = 0.;

	// 0. check the parameters & allocate buffers
	if( !CV_IS_MAT(objectPoints) || !CV_IS_MAT(imagePoints) ||
		!CV_IS_MAT(npoints) || !CV_IS_MAT(cameraMatrix) || !CV_IS_MAT(distCoeffs) )
		CV_Error( CV_StsBadArg, "One of required vector arguments is not a valid matrix" );

	if( imageSize.width <= 0 || imageSize.height <= 0 )
		CV_Error( CV_StsOutOfRange, "image width and height must be positive" );

	if( CV_MAT_TYPE(npoints->type) != CV_32SC1 ||
		(npoints->rows != 1 && npoints->cols != 1) )
		CV_Error( CV_StsUnsupportedFormat,
		"the array of point counters must be 1-dimensional integer vector" );

	nimages = npoints->rows*npoints->cols;
	npstep = npoints->rows == 1 ? 1 : npoints->step/CV_ELEM_SIZE(npoints->type);

	if( rvecs )
	{
		cn = CV_MAT_CN(rvecs->type);
		if( !CV_IS_MAT(rvecs) ||
			(CV_MAT_DEPTH(rvecs->type) != CV_32F && CV_MAT_DEPTH(rvecs->type) != CV_64F) ||
			((rvecs->rows != nimages || (rvecs->cols*cn != 3 && rvecs->cols*cn != 9)) &&
			(rvecs->rows != 1 || rvecs->cols != nimages || cn != 3)) )
			CV_Error( CV_StsBadArg, "the output array of rotation vectors must be 3-channel "
			"1xn or nx1 array or 1-channel nx3 or nx9 array, where n is the number of views" );
	}

	if( tvecs )
	{
		cn = CV_MAT_CN(tvecs->type);
		if( !CV_IS_MAT(tvecs) ||
			(CV_MAT_DEPTH(tvecs->type) != CV_32F && CV_MAT_DEPTH(tvecs->type) != CV_64F) ||
			((tvecs->rows != nimages || tvecs->cols*cn != 3) &&
			(tvecs->rows != 1 || tvecs->cols != nimages || cn != 3)) )
			CV_Error( CV_StsBadArg, "the output array of translation vectors must be 3-channel "
			"1xn or nx1 array or 1-channel nx3 array, where n is the number of views" );
	}

	if( (CV_MAT_TYPE(cameraMatrix->type) != CV_32FC1 &&
		CV_MAT_TYPE(cameraMatrix->type) != CV_64FC1) ||
		cameraMatrix->rows != 3 || cameraMatrix->cols != 3 )
		CV_Error( CV_StsBadArg,
		"Intrinsic parameters must be 3x3 floating-point matrix" );

	if( (CV_MAT_TYPE(distCoeffs->type) != CV_32FC1 &&
		CV_MAT_TYPE(distCoeffs->type) != CV_64FC1) ||
		(distCoeffs->cols != 1 && distCoeffs->rows != 1) ||
		(distCoeffs->cols*distCoeffs->rows != 4 &&
		distCoeffs->cols*distCoeffs->rows != 5 &&
		distCoeffs->cols*distCoeffs->rows != 8) )
		CV_Error( CV_StsBadArg, cvDistCoeffErr );

	for( i = 0; i < nimages; i++ )
	{
		ni = npoints->data.i[i*npstep];
		if( ni < 4 )
		{
			char buf[100];
			sprintf( buf, "The number of points in the view #%d is < 4", i );
			CV_Error( CV_StsOutOfRange, buf );
		}
		maxPoints = MAX( maxPoints, ni );
		total += ni;
	}

	matM = cvCreateMat( 1, total, CV_64FC3 );
	_m = cvCreateMat( 1, total, CV_64FC2 );

	cvConvertPointsHomogeneous( objectPoints, matM );
	cvConvertPointsHomogeneous( imagePoints, _m );

	nparams = NINTRINSIC + nimages*6;
	_Ji = cvCreateMat( maxPoints*2, NINTRINSIC, CV_64FC1 );
	_Je = cvCreateMat( maxPoints*2, 6, CV_64FC1 );
	_err = cvCreateMat( maxPoints*2, 1, CV_64FC1 );
	cvZero( _Ji );

	_k = cvMat( distCoeffs->rows, distCoeffs->cols, CV_MAKETYPE(CV_64F,CV_MAT_CN(distCoeffs->type)), k);
	if( distCoeffs->rows*distCoeffs->cols*CV_MAT_CN(distCoeffs->type) < 8 )
	{
		if( distCoeffs->rows*distCoeffs->cols*CV_MAT_CN(distCoeffs->type) < 5 )
			flags |= CV_CALIB_FIX_K3;
		flags |= CV_CALIB_FIX_K4 | CV_CALIB_FIX_K5 | CV_CALIB_FIX_K6;
	}

	// 1. initialize intrinsic parameters & LM solver
	if( flags & CV_CALIB_USE_INTRINSIC_GUESS )
	{
		cvConvert( cameraMatrix, &matA );
		if( A[0] <= 0 || A[4] <= 0 )
			CV_Error( CV_StsOutOfRange, "Focal length (fx and fy) must be positive" );
		if( A[2] < 0 || A[2] >= imageSize.width ||
			A[5] < 0 || A[5] >= imageSize.height )
			CV_Error( CV_StsOutOfRange, "Principal point must be within the image" );
		if( fabs(A[1]) > 1e-5 )
			CV_Error( CV_StsOutOfRange, "Non-zero skew is not supported by the function" );
		if( fabs(A[3]) > 1e-5 || fabs(A[6]) > 1e-5 ||
			fabs(A[7]) > 1e-5 || fabs(A[8]-1) > 1e-5 )
			CV_Error( CV_StsOutOfRange,
			"The intrinsic matrix must have [fx 0 cx; 0 fy cy; 0 0 1] shape" );
		A[1] = A[3] = A[6] = A[7] = 0.;
		A[8] = 1.;

		if( flags & CV_CALIB_FIX_ASPECT_RATIO )
			aspectRatio = A[0]/A[4];
		cvConvert( distCoeffs, &_k );
	}
	else
	{
		CvScalar mean, sdv;
		cvAvgSdv( matM, &mean, &sdv );
		if( fabs(mean.val[2]) > 1e-5 || fabs(sdv.val[2]) > 1e-5 )
			CV_Error( CV_StsBadArg,
			"For non-planar calibration rigs the initial intrinsic matrix must be specified" );
		for( i = 0; i < total; i++ )
			((CvPoint3D64f*)matM->data.db)[i].z = 0.;

		if( flags & CV_CALIB_FIX_ASPECT_RATIO )
		{
			aspectRatio = cvmGet(cameraMatrix,0,0);
			aspectRatio /= cvmGet(cameraMatrix,1,1);
			if( aspectRatio < 0.01 || aspectRatio > 100 )
				CV_Error( CV_StsOutOfRange,
				"The specified aspect ratio (=A[0][0]/A[1][1]) is incorrect" );
		}
		CvxCalib3d::cvInitIntrinsicParams2D( matM, _m, npoints, imageSize, &matA, aspectRatio );
	}

	solver.init( nparams, 0, cvTermCriteria(CV_TERMCRIT_ITER+CV_TERMCRIT_EPS,30,DBL_EPSILON) );

	{
		double* param = solver.param->data.db;
		uchar* mask = solver.mask->data.ptr;

		param[0] = A[0]; param[1] = A[4]; param[2] = A[2]; param[3] = A[5];
		param[4] = k[0]; param[5] = k[1]; param[6] = k[2]; param[7] = k[3];
		param[8] = k[4]; param[9] = k[5]; param[10] = k[6]; param[11] = k[7];

		if( flags & CV_CALIB_FIX_FOCAL_LENGTH )
			mask[0] = mask[1] = 0;
		if( flags & CV_CALIB_FIX_PRINCIPAL_POINT )
			mask[2] = mask[3] = 0;
		if( flags & CV_CALIB_ZERO_TANGENT_DIST )
		{
			param[6] = param[7] = 0;
			mask[6] = mask[7] = 0;
		}
		if( !(flags & CV_CALIB_RATIONAL_MODEL) )
			flags |= CV_CALIB_FIX_K4 + CV_CALIB_FIX_K5 + CV_CALIB_FIX_K6;
		if( flags & CV_CALIB_FIX_K1 )
			mask[4] = 0;
		if( flags & CV_CALIB_FIX_K2 )
			mask[5] = 0;
		if( flags & CV_CALIB_FIX_K3 )
			mask[8] = 0;
		if( flags & CV_CALIB_FIX_K4 )
			mask[9] = 0;
		if( flags & CV_CALIB_FIX_K5 )
			mask[10] = 0;
		if( flags & CV_CALIB_FIX_K6 )
			mask[11] = 0;
	}

	// 2. initialize extrinsic parameters
	for( i = 0, pos = 0; i < nimages; i++, pos += ni )
	{
		CvMat _Mi, _mi, _ri, _ti;
		ni = npoints->data.i[i*npstep];

		cvGetRows( solver.param, &_ri, NINTRINSIC + i*6, NINTRINSIC + i*6 + 3 );
		cvGetRows( solver.param, &_ti, NINTRINSIC + i*6 + 3, NINTRINSIC + i*6 + 6 );

		cvGetCols( matM, &_Mi, pos, pos + ni );
		cvGetCols( _m, &_mi, pos, pos + ni );

		cvFindExtrinsicCameraParams2( &_Mi, &_mi, &matA, &_k, &_ri, &_ti );
	}

	// 3. run the optimization
	for(;;)
	{
		const CvMat* _param = 0;
		CvMat *_JtJ = 0, *_JtErr = 0;
		double* _errNorm = 0;
		bool proceed = solver.updateAlt( _param, _JtJ, _JtErr, _errNorm );
		double *param = solver.param->data.db, *pparam = solver.prevParam->data.db;

		if( flags & CV_CALIB_FIX_ASPECT_RATIO )
		{
			param[0] = param[1]*aspectRatio;
			pparam[0] = pparam[1]*aspectRatio;
		}

		A[0] = param[0]; A[4] = param[1]; A[2] = param[2]; A[5] = param[3];
		k[0] = param[4]; k[1] = param[5]; k[2] = param[6]; k[3] = param[7];
		k[4] = param[8]; k[5] = param[9]; k[6] = param[10]; k[7] = param[11];

		if( !proceed )
			break;

		reprojErr = 0;

		for( i = 0, pos = 0; i < nimages; i++, pos += ni )
		{
			CvMat _Mi, _mi, _ri, _ti, _dpdr, _dpdt, _dpdf, _dpdc, _dpdk, _mp, _part;
			ni = npoints->data.i[i*npstep];

			cvGetRows( solver.param, &_ri, NINTRINSIC + i*6, NINTRINSIC + i*6 + 3 );
			cvGetRows( solver.param, &_ti, NINTRINSIC + i*6 + 3, NINTRINSIC + i*6 + 6 );

			cvGetCols( matM, &_Mi, pos, pos + ni );
			cvGetCols( _m, &_mi, pos, pos + ni );

			_Je->rows = _Ji->rows = _err->rows = ni*2;
			cvGetCols( _Je, &_dpdr, 0, 3 );
			cvGetCols( _Je, &_dpdt, 3, 6 );
			cvGetCols( _Ji, &_dpdf, 0, 2 );
			cvGetCols( _Ji, &_dpdc, 2, 4 );
			cvGetCols( _Ji, &_dpdk, 4, NINTRINSIC );
			cvReshape( _err, &_mp, 2, 1 );

			if( _JtJ || _JtErr )
			{
				cvProjectPoints2( &_Mi, &_ri, &_ti, &matA, &_k, &_mp, &_dpdr, &_dpdt,
					(flags & CV_CALIB_FIX_FOCAL_LENGTH) ? 0 : &_dpdf,
					(flags & CV_CALIB_FIX_PRINCIPAL_POINT) ? 0 : &_dpdc, &_dpdk,
					(flags & CV_CALIB_FIX_ASPECT_RATIO) ? aspectRatio : 0);
			}
			else
				cvProjectPoints2( &_Mi, &_ri, &_ti, &matA, &_k, &_mp );

			cvSub( &_mp, &_mi, &_mp );

			if( _JtJ || _JtErr )
			{
				cvGetSubRect( _JtJ, &_part, cvRect(0,0,NINTRINSIC,NINTRINSIC) );
				cvGEMM( _Ji, _Ji, 1, &_part, 1, &_part, CV_GEMM_A_T );

				cvGetSubRect( _JtJ, &_part, cvRect(NINTRINSIC+i*6,NINTRINSIC+i*6,6,6) );
				cvGEMM( _Je, _Je, 1, 0, 0, &_part, CV_GEMM_A_T );

				cvGetSubRect( _JtJ, &_part, cvRect(NINTRINSIC+i*6,0,6,NINTRINSIC) );
				cvGEMM( _Ji, _Je, 1, 0, 0, &_part, CV_GEMM_A_T );

				cvGetRows( _JtErr, &_part, 0, NINTRINSIC );
				cvGEMM( _Ji, _err, 1, &_part, 1, &_part, CV_GEMM_A_T );

				cvGetRows( _JtErr, &_part, NINTRINSIC + i*6, NINTRINSIC + (i+1)*6 );
				cvGEMM( _Je, _err, 1, 0, 0, &_part, CV_GEMM_A_T );
			}

			double errNorm = cvNorm( &_mp, 0, CV_L2 );
			reprojErr += errNorm*errNorm;
		}
		if( _errNorm )
			*_errNorm = reprojErr;
	}

	// 4. store the results
	cvConvert( &matA, cameraMatrix );
	cvConvert( &_k, distCoeffs );

	for( i = 0; i < nimages; i++ )
	{
		CvMat src, dst;
		if( rvecs )
		{
			src = cvMat( 3, 1, CV_64F, solver.param->data.db + NINTRINSIC + i*6 );
			if( rvecs->rows == nimages && rvecs->cols*CV_MAT_CN(rvecs->type) == 9 )
			{
				dst = cvMat( 3, 3, CV_MAT_DEPTH(rvecs->type),
					rvecs->data.ptr + rvecs->step*i );
				cvRodrigues2( &src, &matA );
				cvConvert( &matA, &dst );
			}
			else
			{
				dst = cvMat( 3, 1, CV_MAT_DEPTH(rvecs->type), rvecs->rows == 1 ?
					rvecs->data.ptr + i*CV_ELEM_SIZE(rvecs->type) :
				rvecs->data.ptr + rvecs->step*i );
				cvConvert( &src, &dst );
			}
		}
		if( tvecs )
		{
			src = cvMat( 3, 1, CV_64F, solver.param->data.db + NINTRINSIC + i*6 + 3 );
			dst = cvMat( 3, 1, CV_MAT_TYPE(tvecs->type), tvecs->rows == 1 ?
				tvecs->data.ptr + i*CV_ELEM_SIZE(tvecs->type) :
			tvecs->data.ptr + tvecs->step*i );
			cvConvert( &src, &dst );
		}
	}

	return std::sqrt(reprojErr/total);
}

double CvxCalib3d::calibrateCamera( const vector<vector<Point3f> >& objectPoints,
						   const vector<vector<Point2f> >& imagePoints,
						   Size imageSize, Mat& cameraMatrix, Mat& distCoeffs,
						   vector<Mat>& rvecs, vector<Mat>& tvecs, int flags )
{
	int rtype = CV_64F;
	cameraMatrix = prepareCameraMatrix(cameraMatrix, rtype);
	distCoeffs = prepareDistCoeffs(distCoeffs, rtype);
	if( !(flags & CALIB_RATIONAL_MODEL) )
		distCoeffs = distCoeffs.rows == 1 ? distCoeffs.colRange(0, 5) : distCoeffs.rowRange(0, 5);

	size_t i, nimages = objectPoints.size();
	CV_Assert( nimages > 0 );
	Mat objPt, imgPt, npoints, rvecM((int)nimages, 3, CV_64FC1), tvecM((int)nimages, 3, CV_64FC1);
	collectCalibrationData( objectPoints, imagePoints, vector<vector<Point2f> >(),
		objPt, imgPt, 0, npoints );
	CvMat _objPt = objPt, _imgPt = imgPt, _npoints = npoints;
	CvMat _cameraMatrix = cameraMatrix, _distCoeffs = distCoeffs;
	CvMat _rvecM = rvecM, _tvecM = tvecM;

	double reprojErr = CvxCalib3d::cvCalibrateCamera2(&_objPt, &_imgPt, &_npoints, imageSize,
		&_cameraMatrix, &_distCoeffs, &_rvecM,
		&_tvecM, flags );
	rvecs.resize(nimages);
	tvecs.resize(nimages);
	for( i = 0; i < nimages; i++ )
	{
		rvecM.row((int)i).copyTo(rvecs[i]);
		tvecM.row((int)i).copyTo(tvecs[i]);
	}
	return reprojErr;
}

//Mat(const CvMat* m, bool copyData=false);
static void normalizeData(CvMat *normMat, Mat &T, Mat &S)
{
	Scalar avg_val = cvAvg(normMat);
	//move to data center
	cvSubS(normMat, avg_val, normMat);

	T = Mat::eye(3, 3, CV_64FC1);
	T.at<double>(0, 2) = -avg_val.val[0];
	T.at<double>(1, 2) = -avg_val.val[1];

	// scale sqrt(2)/d
	CvMat* xnormMat = cvCreateMat( 1, normMat->cols, CV_64FC1);
	CvMat* ynormMat = cvCreateMat( 1, normMat->cols, CV_64FC1);

	
	cvSplit(normMat, xnormMat, ynormMat, NULL, NULL);

	cvMul(xnormMat, xnormMat, xnormMat);  //x * x
	cvMul(ynormMat, ynormMat, ynormMat);  //y * y
	cvAdd(xnormMat, ynormMat, ynormMat);  // x*x + y*y
	
 	double d = 0.0;
	for (int i = 0; i<ynormMat->cols; ++i)
	{
		double *data = (ynormMat->data.db + i);
		d += sqrt(data[0]);
	}
	d /= ynormMat->cols;

	cvScale(normMat, normMat, CV_SQRT2/d);

	S = Mat::eye(3, 3, CV_64FC1);
	S.at<double>(0, 0) = CV_SQRT2/d;
	S.at<double>(1, 1) = CV_SQRT2/d;

	cvReleaseMat(&xnormMat);
	cvReleaseMat(&ynormMat);

}

void CvxCalib3d::cvInitIntrinsicParams2D( const CvMat* objectPoints,
							 const CvMat* imagePoints, const CvMat* npoints,
							 CvSize imageSize, CvMat* cameraMatrix,
							 double aspectRatio )
{
	Ptr<CvMat> matA, _b, _allH, _allK;

	int i, j, pos, nimages, total, ni = 0;
	double a[9] = { 0, 0, 0, 0, 0, 0, 0, 0, 1 };
	double H[9], f[2];
	CvMat _a = cvMat( 3, 3, CV_64F, a );
	CvMat matH = cvMat( 3, 3, CV_64F, H );
	CvMat _f = cvMat( 2, 1, CV_64F, f );

	assert( CV_MAT_TYPE(npoints->type) == CV_32SC1 &&
		CV_IS_MAT_CONT(npoints->type) );
	nimages = npoints->rows + npoints->cols - 1;

	if( (CV_MAT_TYPE(objectPoints->type) != CV_32FC3 &&
		CV_MAT_TYPE(objectPoints->type) != CV_64FC3) ||
		(CV_MAT_TYPE(imagePoints->type) != CV_32FC2 &&
		CV_MAT_TYPE(imagePoints->type) != CV_64FC2) )
		CV_Error( CV_StsUnsupportedFormat, "Both object points and image points must be 2D" );

	if( objectPoints->rows != 1 || imagePoints->rows != 1 )
		CV_Error( CV_StsBadSize, "object points and image points must be a single-row matrices" );

	matA = cvCreateMat( 2*nimages, 2, CV_64F );
	_b = cvCreateMat( 2*nimages, 1, CV_64F );
	a[2] = (imageSize.width - 1)*0.5;
	a[5] = (imageSize.height - 1)*0.5;
	_allH = cvCreateMat( nimages, 9, CV_64F );

	total = cvRound(cvSum(npoints).val[0]);

	// extract vanishing points in order to obtain initial value for the focal length
	for( i = 0, pos = 0; i < nimages; i++, pos += ni )
	{
		double* Ap = matA->data.db + i*4;
		double* bp = _b->data.db + i*2;
		ni = npoints->data.i[i];
		double h[3], v[3], d1[3], d2[3];
		double n[4] = {0,0,0,0};
		CvMat _m, matM;
		cvGetCols( objectPoints, &matM, pos, pos + ni );
		cvGetCols( imagePoints, &_m, pos, pos + ni );

		cvFindHomography( &matM, &_m, &matH );

		//

		memcpy( _allH->data.db + i*9, H, sizeof(H) );

		H[0] -= H[6]*a[2]; H[1] -= H[7]*a[2]; H[2] -= H[8]*a[2];
		H[3] -= H[6]*a[5]; H[4] -= H[7]*a[5]; H[5] -= H[8]*a[5];

		for( j = 0; j < 3; j++ )
		{
			double t0 = H[j*3], t1 = H[j*3+1];
			h[j] = t0; v[j] = t1;
			d1[j] = (t0 + t1)*0.5;
			d2[j] = (t0 - t1)*0.5;
			n[0] += t0*t0; n[1] += t1*t1;
			n[2] += d1[j]*d1[j]; n[3] += d2[j]*d2[j];
		}

		for( j = 0; j < 4; j++ )
			n[j] = 1./sqrt(n[j]);

		for( j = 0; j < 3; j++ )
		{
			h[j] *= n[0]; v[j] *= n[1];
			d1[j] *= n[2]; d2[j] *= n[3];
		}

		Ap[0] = h[0]*v[0]; Ap[1] = h[1]*v[1];
		Ap[2] = d1[0]*d2[0]; Ap[3] = d1[1]*d2[1];
		bp[0] = -h[2]*v[2]; bp[1] = -d1[2]*d2[2];
	}

	cvSolve( matA, _b, &_f, CV_NORMAL + CV_SVD );
	a[0] = sqrt(fabs(1./f[0]));
	a[4] = sqrt(fabs(1./f[1]));
	if( aspectRatio != 0 )
	{
		double tf = (a[0] + a[4])/(aspectRatio + 1.);
		a[0] = aspectRatio*tf;
		a[4] = tf;
	}

	cvConvert( &_a, cameraMatrix );
}