#include "cvxVCamera.h"

CvxVCamera::CvxVCamera()
{
	initCamera();
}
CvxVCamera::~CvxVCamera()
{

}

bool CvxVCamera::config(int flags, const CvSize &imageSize)
{
	assert(imageSize.width > 0 && imageSize.height > 0);
	m_flags = flags;
	m_imageSize = imageSize;
	return true;
}

const Mat & CvxVCamera::getIntrinsic(void)
{
	return m_intrinsic;
}
const Mat & CvxVCamera::getDistortion(void)
{
	return m_distortion;
}
const vector<Mat> & CvxVCamera::getRotationVecs(void)
{
	return m_rvecs;
}
const vector<Mat> & CvxVCamera::getRotationMatrixes(void)
{
	return m_rMats;
}
const vector<Mat> & CvxVCamera::getTranslationVecs(void)
{
	return m_tvecs;
}
void CvxVCamera::saveIntrinsic(const string &filename)const
{
	FILE *pf = fopen(filename.c_str(), "w");	
	assert(pf);
	//focal length
	float fx = m_intrinsic.at<double>(0, 0);
	float fy = m_intrinsic.at<double>(1, 1);
	fprintf(pf, "%8.2f %8.2f ", fx, fy);

	//principla point
	float cx = m_intrinsic.at<double>(0, 2);
	float cy = m_intrinsic.at<double>(1, 2);
	fprintf(pf, "%8.2f %8.2f ", cx, cy);

	//distortion
	float k1, k2, p1, p2, k3;
	k1 = m_distortion.at<double>(0, 0);
	k2 = m_distortion.at<double>(1, 0);
	p1 = m_distortion.at<double>(2, 0);
	p2 = m_distortion.at<double>(3, 0);
	k3 = m_distortion.at<double>(4, 0);
	fprintf(pf, "%8.6f %8.6f %8.6f %8.6f %8.6f\n", k1, k2, p1, p2, k3);
	
	fprintf(pf, "%8.6f\n", m_rms);
	fclose(pf);
}

void CvxVCamera::initCamera()
{
	m_intrinsic    = Mat::eye(3, 3, CV_64F);
	m_distortion   = Mat::zeros(5, 1, CV_64F);
	m_parameterErr = Mat::zeros(8, 1, CV_64F);
	m_stdDev =       Mat::zeros(2, 1, CV_64F);
	m_aspectRatio = 1.0;
	m_flags = 0;

	m_apertureWidth = 4.8;
	m_apertureHeight = 3.6;
	m_focalLength = -1.0;
	m_principalPoint = cvPoint(1920, 1080);
	m_fovx = 0;
	m_fovy = 0;
	m_rms = 0;
	m_imageSize = cvSize(1920, 1080);
	_fx = (double*)m_intrinsic.ptr(0, 0);
	_fy = (double*)m_intrinsic.ptr(1, 1);
	_cx = (double*)m_intrinsic.ptr(0, 2);
	_cy = (double*)m_intrinsic.ptr(1, 2);
	_k1 = (double*)m_distortion.ptr(0, 0);
	_k2 = (double*)m_distortion.ptr(1, 0);
	_p1 = (double*)m_distortion.ptr(2, 0);
	_p2 = (double*)m_distortion.ptr(3, 0);
	_k3 = (double*)m_distortion.ptr(4, 0);
}

bool CvxVCamera::calibration( const vector<vector<cv::Point3f> > &objectPoints, 
						const vector<vector<cv::Point2f> > &imagePoints,
						const CvSize imageSize, const int flags, Mat& cameraMatrix, Mat& distCoeffs,
						vector<Mat>& rvecs, vector<Mat>& tvecs, double &rms)
{
	rvecs.resize(imagePoints.size());
	tvecs.resize(imagePoints.size());
	for (unsigned int i = 0; i<rvecs.size(); ++i)
	{
		rvecs[i]  = Mat(3, 1, CV_64F);
		tvecs[i]  = Mat(3, 1, CV_64F);
	}
	
	rms = cv::calibrateCamera(objectPoints, imagePoints, imageSize, cameraMatrix,
							 		 distCoeffs, rvecs, tvecs, flags);	

	bool ok = cv::checkRange(cameraMatrix) && cv::checkRange(distCoeffs);
	return ok;

}



/************************************************************************/
/* 
			CvxOpenCvCamera
*/
/************************************************************************/

CvxOpenCvCamera::CvxOpenCvCamera()
{

}
CvxOpenCvCamera::~CvxOpenCvCamera()
{

}

bool CvxOpenCvCamera::setCalibData(const vector<vector<Point2f>> &ptsVec,  const vector<Point3f> &targetPts, 
								   const vector<IplImage *> &images, const CvxBoard *pBoard)
{
	return false;

}
bool CvxOpenCvCamera::calib()
{
	return false;
}


/************************************************************************/
/* 
	   CvxDKKCamera
*/
/************************************************************************/

CvxDKKCamera::CvxDKKCamera()
{

}
CvxDKKCamera::~CvxDKKCamera()
{

}

bool CvxDKKCamera::setCalibData(const vector<vector<Point2f>> &ptsVec,  const vector<Point3f> &targetPts, 
				  const vector<IplImage *> &images, const CvxBoard *pBoard)
{
	assert(ptsVec.size() >= 3);
	assert(images.size() == ptsVec.size());
	assert(pBoard);
	assert(ptsVec[0].size() == targetPts.size());

	m_imgPts = ptsVec;
	m_objPts = targetPts;
	m_pImages = images;
	m_pBoard = const_cast<CvxBoard *>(pBoard);
	return true;
}
bool CvxDKKCamera::calib()
{
	if (m_imgPts.size() < 3)
	{
		return false;
	}
	//estimate initial camera parameters
	vector<vector<Point3f>> objPtsVec(m_imgPts.size(), m_objPts);
	bool isOk = CvxVCamera::calibration(objPtsVec, m_imgPts, m_imageSize, m_flags, m_intrinsic, m_distortion, m_rvecs,
										m_tvecs, m_rms);
	saveIntrinsic(string("initial_intrinsic.txt"));

	//do until convergence
	Mat rmapx = cv::Mat(m_imageSize.height, m_imageSize.width, CV_32FC1);
	Mat rmapy = cv::Mat(m_imageSize.height, m_imageSize.width, CV_32FC1);
	vector<Point3f> objPts;
	int h = m_imageSize.height;
	int w = m_imageSize.width;
	float unitLength = m_pBoard->GetUnitLength();
	for (int y = 0; y<h; ++y)
	{
		for (int x = 0; x<w; ++x)
		{
			float xx = x - unitLength;
			float yy = y - unitLength;
			objPts.push_back(Point3f(xx, yy, 0));
		}
	}
	IplImage *frontaImage = cvCloneImage(m_pImages[0]);
	IplImage *copyImage = cvCloneImage(m_pImages[0]);
	IplImage *showImage = cvCreateImage(cvSize(w/2, h/2), IPL_DEPTH_8U, 3);
	assert(copyImage);
	vector<vector<Point2f>> pts2dFronta;
	for (int i = 0; i<2; ++i)
	{
			
		vector<Point2f> mapPts(m_imageSize.height*m_imageSize.width);
		for (int j = 0; j<m_pImages.size(); ++j)
		{
			//1. undistort and un-project	
			//computer remap			
			cv::projectPoints(objPts, m_rvecs[j], m_tvecs[j], m_intrinsic, m_distortion, mapPts);
			for (int y = 0; y<h; ++y)
			{
				for (int x = 0; x<w; ++x)
				{
					int idx = y * w + x;
					if (mapPts[idx].x < 0)
					{
						mapPts[idx].x = 0;
					}
					if (mapPts[idx].y < 0)
					{
						mapPts[idx].y = 0;
					}
					if (mapPts[idx].x >=w)
					{
						mapPts[idx].x = w-1;
					}
					if (mapPts[idx].y >=h)
					{
						mapPts[idx].y = h-1;
					}
					rmapx.at<float>(y, x) = mapPts[idx].x;
					rmapy.at<float>(y, x) = mapPts[idx].y;
				}
			}
			cvCopy(m_pImages[j], copyImage);
			cv::remap(Mat(copyImage), Mat(frontaImage), rmapx, rmapy, CV_INTER_LINEAR); //CV_INTER_LINEAR
//  			cvResize(frontaImage, showImage);
//  			cvShowImage("frontal image", showImage);
//  			cvShowImage("original image", m_pImages[j]);
//  			cvWaitKey(0);
			
			//2. localize control points in frontal image
			vector<CvxMarker> preMarkers;
			vector<CvxMarker> curMarker;
			vector<Point2f> vpts2d;  //v for virtual
			isOk = m_pBoard->FindCorners(frontaImage, false, preMarkers, curMarker, vpts2d);
			if (isOk)
			{
				//3. re-porject to original image				
				vector<Point3f> vpts3d(vpts2d.size());
				for (int k = 0; k<vpts2d.size(); ++k)
				{
					vpts3d[k].x = vpts2d[k].x - unitLength;
					vpts3d[k].y = vpts2d[k].y - unitLength;
					vpts3d[k].z = 0.0;
				}

				vpts2d.clear();
				cv::projectPoints(vpts3d, m_rvecs[j], m_tvecs[j], m_intrinsic, m_distortion, vpts2d); 
				
//  				cvCopy(m_pImages[j], copyImage);
//  				m_pBoard->DrawCorners(copyImage, vpts2d);
//  				cvResize(copyImage, showImage);
//  				cvShowImage("updated corner position", showImage);
//  				cvWaitKey(0);		
			}
			else
			{
				fprintf(stderr, "can not found points in frontal image!\n");
				vpts2d = m_imgPts[j];
			}
			pts2dFronta.push_back(vpts2d);			
		}		
		//4. parameter fitting and check reprojection error
		m_flags = m_flags | CV_CALIB_USE_INTRINSIC_GUESS;
		isOk = CvxVCamera::calibration(objPtsVec, pts2dFronta, m_imageSize, m_flags, m_intrinsic, m_distortion, 
									   m_rvecs, m_tvecs, m_rms);
		saveIntrinsic(string("update_intrinsic.txt"));
		pts2dFronta.clear();

	}

	if (copyImage)
	{
		cvReleaseImage(&copyImage);
	}
	
	


	return true;
}