#include "CVApp.h"
#include "Util.h"
#include <string>

#define IMPORT_FROM_BUNDLER 1
#define MIN_MATCHES_FOR_CONNECTION 16
void CCVApp::Reset()
{
	m_Images.clear();
	m_Cameras.clear();
	m_NumberOfPhotos = 2;
}

void CCVApp::Init(CCameraMesh* pCamMesh)
{
	m_Meshes.push_back(pCamMesh);
	m_NumberOfPhotos = 4;
	m_bUsePreloadedMatches = true;
}

void CCVApp::LoadImages()
{
	cv::Mat im[1464-1448+1];

	im[0] = cv::imread("Pics\\IMG_1448.JPG");
	im[1] = cv::imread("Pics\\IMG_1449.JPG");
	im[2] = cv::imread("Pics\\IMG_1450.JPG");
	im[3] = cv::imread("Pics\\IMG_1451.JPG");
	im[4] = cv::imread("Pics\\IMG_1452.JPG");
	im[5] = cv::imread("Pics\\IMG_1453.JPG");
	im[6] = cv::imread("Pics\\IMG_1454.JPG");
	im[7] = cv::imread("Pics\\IMG_1455.JPG");
	im[8] = cv::imread("Pics\\IMG_1456.JPG");
	im[9] = cv::imread("Pics\\IMG_1457.JPG");
	im[10] = cv::imread("Pics\\IMG_1458.JPG");
	im[11] = cv::imread("Pics\\IMG_1459.JPG");
	im[12] = cv::imread("Pics\\IMG_1460.JPG");
	im[13] = cv::imread("Pics\\IMG_1461.JPG");
	im[14] = cv::imread("Pics\\IMG_1462.JPG");
	im[15] = cv::imread("Pics\\IMG_1463.JPG");
	im[16] = cv::imread("Pics\\IMG_1464.JPG");


	for (UINT i = 0; i < m_NumberOfPhotos; i++)
	{
		VISIO_ASSERT(im[i].data);
		m_Images.push_back(im[i]);
	}
}

FILE* CCVApp::GetImageFile(const char* SceneName, char* mode)
{
	string ImageFileName = SceneName;
	ImageFileName += ".img";
	FILE* pImgFile = fopen(ImageFileName.c_str(), mode);
	VISIO_ASSERT(pImgFile);

	return pImgFile;
}


void CCVApp::DumpImageFile(const char* OutFileName)
{
	FILE* pImgFile = CCVApp::GetImageFile(OutFileName, "w");

	fprintf(pImgFile, "%d\n", m_ImageDataBase.GetNumImages());
	for (UINT i = 0; i < m_ImageDataBase.GetNumImages(); i++)
	{
		CImageData* pImgData = m_ImageDataBase.GetImageData(i);
		fprintf(pImgFile, "%s\n", pImgData->GetName().c_str());
	}

	fclose(pImgFile);

	


}

void CCVApp::DumpOutputToFile(const char* OutFileName)
{
	FILE* pOut = fopen(OutFileName, "w");

	fprintf(pOut, "CV Result\n");
	//num cameras, num points 
	fprintf(pOut, "%d %d\n", m_Cameras.size(), m_PointCloud.Size());
	for (CamerasVector::iterator it = m_Cameras.begin(); it != m_Cameras.end(); it++)
	{
		CCamera* CurrCam = *it;
		CurrCam->PrintCameraToFile(pOut);
	}

	m_PointCloud.PrintToFile(pOut);

	fclose(pOut);

	DumpImageFile(OutFileName);
}

void CCVApp::RunPhotoStich( char* SceneParams )
{
	
	CreateImageAndTracksDataBase(SceneParams);
	
	Create3DScenesFromOrderedPhotos();

	string SceneOutput = m_SceneName + ".scene";
	DumpOutputToFile(SceneOutput.c_str());
}

CCamera* CCVApp::InitCanonicalCamera(CImageData* pImageData)
{
	cv::Mat R = EYE(3);
	cv::Mat t(3, 1, CV_64F, cv::Scalar(0));
	cv::Mat K = INIT_MAT(3, 3, pImageData->GetFocal(), 0, 0, 0, pImageData->GetFocal(), 0, 0, 0, 1);
	cv::Mat P = ZEROS(3,3);
	SCameraParams CameraParams(R, t, K , P);
	return new CCamera(GetCameraMesh(), CameraParams, pImageData);
}

CamerasVector CCVApp::GetIntialCameraPair( CImageMatch* pMatch )
{
	CamerasVector cams;
	// first camera is canonical
	CCamera* firstCam =  InitCanonicalCamera( pMatch->GetImageData(FIRST));
	cams.push_back(firstCam);
	// EMatrix assuming the the first camera is canonical
	Mat EMatrix = pMatch->GetEMatrix();

	SCameraParams SecondCamParams = GetCameraParamsFromEMatrix(EMatrix, firstCam->GetFocal(), pMatch);
	CCamera* secondCam = new CCamera(GetCameraMesh(), SecondCamParams, pMatch->GetImageData(SECOND));
	cams.push_back(secondCam);
	return cams;
}

UINT CCVApp::InitiliazeFirstPair(CamerasVector& Cameras, CPointCloud& PointCloud)
{
	//make sure everything is initilaize
	VISIO_ASSERT(Cameras.empty() && PointCloud.empty());

	ImageIndexMatch MatchIndex = m_ImageDataBase.GetBestInitialPair();
	CImageMatch* pMatch = m_ImageDataBase.GetMatchFromIndex(MatchIndex);
	Cameras = GetIntialCameraPair(pMatch);
	PointCloud.CreatePoints(pMatch, Cameras);
	return Cameras.size();
}

UINT CCVApp::RegisterFirstPair(CamerasVector& Cameras, CPointCloud& PointCloud)
{
	InitiliazeFirstPair(Cameras, PointCloud);
	return m_SfmPool.AddCameras(Cameras, PointCloud);
}

UINT CCVApp::RegisterNextCam(CamerasVector& Cameras, CPointCloud& PointCloud)
{
	if(InitiliazeNextBestCamera(Cameras, PointCloud))
	{
		PointCloud.AddPoints(Cameras, m_ImageDataBase);
		return m_SfmPool.AddCameras(Cameras, PointCloud);
	}
	else
	{
		return Cameras.size();
	}
}

void CCVApp::Create3DScenesFromOrderedPhotos()
{
	m_NumCamera = RegisterFirstPair(m_Cameras, m_PointCloud);

	while (m_NumCamera < m_NumberOfPhotos)
	{
		//todo make sure we actually added a camera
		UINT newCameras = RegisterNextCam(m_Cameras, m_PointCloud);
		if (m_NumCamera == newCameras)
		{
			// no new cameras;
			break;
		}
		m_NumCamera = newCameras;
		CONSOLE(".\n");
	}	
}

//choose which of the rotation matrix is correct based on the correspondence
Mat CCVApp::GetCorrectConfiguration(Mat& t, Mat Ra, Mat Rb, Mat K, CImageMatch* pMatch) const
{
	CTriangulator Triangulator;
	UINT QzPos = 0;
	UINT QzNeg = 0;
	UINT PzPos = 0;
	UINT PzNeg = 0;
	Mat R;

	int NumberOfPoints = (int)pMatch->GetNumMatches();

	double* lPoints = new double[2 * NumberOfPoints];
	double* rPoints = new double[2 * NumberOfPoints];

	cv::Mat K1 = pMatch->GetIntrinMatrixForImage(FIRST);
	cv::Mat K2 = pMatch->GetIntrinMatrixForImage(SECOND);

	pMatch->GetMatchedPointsAsDouble(lPoints, rPoints);

	double* rPointsNorm = new double[NumberOfPoints * 2];
	double* lPointsNorm = new double[NumberOfPoints * 2];

	pMatch->NormalizePoints(NumberOfPoints, rPoints, lPoints, rPointsNorm, lPointsNorm, K1, K2);

	for (UINT i = 0; i < NumberOfPoints; i++)
	{
		double error = 0;
		Point3d Q = Triangulator.TriangulateBundler(lPointsNorm + 2*i, rPointsNorm + 2*i, EYE(3), ZEROS(3, 1), Ra, t, &error);
		Mat P = INIT_MAT(3, 1, Q.x, Q.y, Q.z);
		Mat PQ = Ra * P + t; //tranformed reprojected point

		double Qz = Q.z;
		double Pz = PQ.at<double>(2,0);

		if (Qz > 0)
		{
			QzPos++;
		}
		else
		{
			QzNeg++;
		}

		if (Pz > 0)
		{
			PzPos++;
		}
		else
		{
			PzNeg++;
		}
	}

	if (QzPos < QzNeg && PzPos < PzNeg)
	{
		R = Ra;
	}
	else if (QzPos > QzNeg && PzPos > PzNeg)
	{
		R = Ra;
		t *= -1;
	}
	else
	{
		// check rb
		QzNeg = QzPos = PzPos = PzNeg = 0;

		for (UINT i = 0; i < NumberOfPoints; i++)
		{
			double error = 0;
			Point3d Q = Triangulator.TriangulateBundler(lPointsNorm + 2*i, rPointsNorm + 2*i, EYE(3), ZEROS(3, 1), Rb, t, &error);
			Mat P = INIT_MAT(3, 1, Q.x, Q.y, Q.z);
			Mat PQ = Rb * P + t; //tranformed reprojected point

			double Qz = Q.z;
			double Pz = PQ.at<double>(2,0);

			if (Qz > 0)
			{
				QzPos++;
			}
			else
			{
				QzNeg++;
			}

			if (Pz > 0)
			{
				PzPos++;
			}
			else
			{
				PzNeg++;
			}
		}

		if (QzPos < QzNeg && PzPos < PzNeg)
		{
			R = Rb;
		}
		else if (QzPos > QzNeg && PzPos > PzNeg)
		{
			R = Rb;
			t *= -1;
		}
		else
		{
			VISIO_ASSERT(0);
		}

	}
	delete[] lPoints;
	delete[] rPoints;
	delete[] rPointsNorm;
	delete[] lPointsNorm;

	return R;
}

SCameraParams CCVApp::GetCameraParamsFromEMatrix(Mat E, double focal, CImageMatch* pMatch) const
{
	cv::Mat Cam = ZEROS(3,4);

	double KDiag[] = {focal , focal , 1.0};
	cv::Mat K = Utils::Diag(3, KDiag);

	Utils::SSVD svd = Utils::ComputeSVD(E, USE_BUNDLE_MATH);

	CONSOLE("\nSVD\n");
	double error = (svd.w.at<double>(0,0) - svd.w.at<double>(1,0)) / svd.w.at<double>(1,0);
	double singualrValueAvg = (svd.w.at<double>(0,0) + svd.w.at<double>(1,0))*0.5;
	CONSOLE("error: %.9e\n", error);

	//building up the camera matrix from the svd

	Mat u3 = svd.u.col(2);
	Mat t = u3.clone();		//this will help make sure that t is continous
	
	
	Mat W = (Mat_<double>(3,3) << 0, 1, 0, -1, 0, 0, 0, 0, 1);
	Mat Wt = W.t();

	Mat UWVt = svd.u*W*svd.vt;
	if (cv::determinant(UWVt) < 0)
	{
		UWVt = UWVt*-1.0f;
	}

	Mat UWtVt = svd.u*Wt*svd.vt;
	if (cv::determinant(UWtVt) < 0)
	{
		UWtVt = UWtVt*-1.0f;
	}


	cv::Mat R = GetCorrectConfiguration(t, UWVt, UWtVt, K, pMatch);
	
	t.copyTo(Cam.col(3));
	Mat subCam = Cam(Range(0,3), Range(0,3));
	R.copyTo(subCam);
	return SCameraParams(R, t, K, Cam);
}

CCameraMesh* CCVApp::GetCameraMesh() const
{
	//assuming the first one is the CameraMesh
	VISIO_ASSERT((m_Meshes.size() > 0));
	CCameraMesh* retMesh = dynamic_cast<CCameraMesh*>(m_Meshes[0]);
	VISIO_ASSERT(retMesh);
	return retMesh;
}

string CCVApp::GetSceneName(string SceneParamsFile)
{
	size_t found;
	found = SceneParamsFile.find_last_of(".");
	return SceneParamsFile.substr(0, found);
}

string CCVApp::GetSceneDirectory(string SceneParamsFile)
{
	size_t found;
	found = SceneParamsFile.find_last_of("\\");
	return SceneParamsFile.substr(0, found);
}

void CCVApp::CreateImageAndTracksDataBase(char* SceneParams)
{
	m_SceneName = GetSceneName(SceneParams);
	m_SceneDirectory = GetSceneDirectory(SceneParams);
	if (IMPORT_FROM_BUNDLER)
	{
		ImportImageAndTracksFromBundler(SceneParams);
	}
	else
	{
		VISIO_ASSERT(0)
		CreateImageAndTracksDataBaseInternal();
	}
}

void CCVApp::CreateImageAndTracksDataBaseInternal()
{
	VISIO_ASSERT(0);
	LoadImages();
	MatchedImages* ImageMatches;
	//getting an ordered set of matched photos
	if (m_bUsePreloadedMatches)
	{
		ImageMatches = m_IMatcher.MatchFromFile(m_Images, "match.out");
	}
	else
	{
		VISIO_ASSERT(0);
		//this uses single precision the algorithem requires double precision
		ImageMatches = m_IMatcher.Match(m_Images);
	}
}
char* CCVApp::GetStringFromFile(FILE* pFile, char* str)
{
	fscanf(pFile, "%s", str);
	return str;
}

void CCVApp::ImportImageAndTracksFromBundler(char* SceneParams)
{
	char FilePath[MAX_PATH] = "";
	FILE* pFileSceneParams = fopen(SceneParams, "r");

	ImageNamesVector ImageNames = LoadImagesNames(GetStringFromFile(pFileSceneParams, FilePath));
	m_NumberOfPhotos = ImageNames.size();
	//loading keypoints
	m_ImageDataBase.LoadImageData(GetStringFromFile(pFileSceneParams, FilePath), ImageNames);

	//loading tracks
	m_TrackDataBase.LoadTracks(GetStringFromFile(pFileSceneParams, FilePath));
	m_ImageDataBase.SetTracks(&m_TrackDataBase);

	//loading the matches and their data
	m_ImageDataBase.LoadMatches(GetStringFromFile(pFileSceneParams, FilePath));
}

ImageNamesVector CCVApp::LoadImagesNames(char* ImageFileName)
{

	FILE* pImgFile = fopen(ImageFileName, "r");
	ImageNamesVector names;

	UINT NumImages = 0;
	fscanf(pImgFile, "%d" , &NumImages);

	for (UINT i = 0; i < NumImages; i++)
	{
		char ImgName[200];
		fscanf(pImgFile, "%s", ImgName);
		names.push_back(ImgName);
	}

	return names;
}

CamAddingParamsVector CCVApp::GetNextCams(VisiblePointsVector SharedVisiblePoints, UINT index)
{
	UINT EproxNumTracks = ceil(0.75  * SharedVisiblePoints.size()); // 75% of the max matches
	CamAddingParamsVector ret;

	SNewCamAddingParams newParams;
	newParams.index = index;
	newParams.SharedVisiablePoints = SharedVisiblePoints;
	ret.push_back(newParams);

	//todo share code with best next cam
	for (int i = 0; i < m_ImageDataBase.GetNumImages(); i++) 
	{
		VisiblePointsVector tempVisiblePoints;
		if (IsImageAlreadyAdded(i) || i == index)
		{
			continue;
		}

		UINT NumTracks = m_ImageDataBase.GetNumTracksCorespanding(i, tempVisiblePoints);
		if (NumTracks > EproxNumTracks)
		{
			SNewCamAddingParams tNewParams;
			tNewParams.index = i;
			tNewParams.SharedVisiablePoints = tempVisiblePoints;
			ret.push_back(tNewParams);
		}
	}

	return ret;
}

bool CCVApp::InitiliazeNextBestCamera( CamerasVector& Cameras, CPointCloud& PointCloud )
{
	VisiblePointsVector SharedVisiblePoints; //holding the visible points shared by the new image and the already added ones

	UINT index = GetNextCamIndex(SharedVisiblePoints);

	if (SharedVisiblePoints.size() < MIN_MATCHES_FOR_CONNECTION)
	{
		return false;
	}

	CamAddingParamsVector NewCamsIndex = GetNextCams(SharedVisiblePoints, index);
	
	for (UINT i = 0; i < NewCamsIndex.size(); i++)
	{
		CCamera* pCam = EstimateNewCam(NewCamsIndex[i].index, NewCamsIndex[i].SharedVisiablePoints, Cameras, PointCloud);
		Cameras.push_back(pCam);
	}
	
	return true;
}

//this will alsot retun the shared visable points
UINT CCVApp::GetNextCamIndex( VisiblePointsVector& VisiblePoints )
{
	UINT BestNumTracks = 0;
	UINT BestIndex = -1;
	for (int i = 0; i < m_ImageDataBase.GetNumImages(); i++) 
	{
		VisiblePointsVector tempVisiblePoints;
		if (IsImageAlreadyAdded(i))
		{
			continue;
		}

		UINT NumTracks = m_ImageDataBase.GetNumTracksCorespanding(i, tempVisiblePoints);
		if (NumTracks > BestNumTracks)
		{
			BestIndex = i;
			BestNumTracks = NumTracks;
			VisiblePoints = tempVisiblePoints;
		}
	}
	return BestIndex;
}

bool CCVApp::IsImageAlreadyAdded( UINT index ) const 
{
	for (UINT i = 0; i < m_Cameras.size(); i++)
	{
		CCamera* pCam = m_Cameras[i];
		if (pCam->GetImageIndex() == index)
		{
			return true;
		}
	}

	return false;
}

CameraIndexVector CCVApp::GetImageIndexVector( CamerasVector& cams ) const
{
	CameraIndexVector CamsIndex;
	for (UINT i = 0; i < cams.size(); i++)
	{
		CamsIndex.push_back(cams[i]->GetImageIndex());
	}
	return CamsIndex;
}

CCamera* CCVApp::EstimateNewCam( UINT index, VisiblePointsVector& SharedVisiblePoints, CamerasVector& Cameras, CPointCloud& PointCloud )
{
	SInliners inliners;

	CCamera* pCam = FindFirstEstimationOfNewCam(index, SharedVisiblePoints, PointCloud, inliners);

	//we only take the strong inliners
	InlinersVector& StrongInliners = inliners.Inliners;
	for (UINT i = 0; i < StrongInliners.size(); i++) 
	{
		VisablePoint visPoint = SharedVisiblePoints[StrongInliners[i]]; //inliners are the visable points that were inliners

		PointCloud.AddKeyData(visPoint.first->GetMark(), pCam, visPoint.second);
	}

	return pCam;
}

CCamera* CCVApp::FindFirstEstimationOfNewCam( UINT index,const VisiblePointsVector& SharedVisiblePoints,const CPointCloud& PointCloud, SInliners& inliners )
{
	
	CImageData* pImageData = m_ImageDataBase.GetImageData(index);

	CCamera* pCam = m_SfmPool.FindCameraFromProjection(pImageData, GetCameraMesh(), SharedVisiblePoints, PointCloud, inliners);

	return pCam;
}

void CCVApp::LoadMeshes(vector<CMeshInstance*>* pMeshes)
{
	for (CamerasVector::iterator it = m_Cameras.begin(); it != m_Cameras.end(); it++)
	{
		CCamera* CurrCam = *it;
		pMeshes->push_back(CurrCam->CreateMeshInstance());
	}

	pMeshes->push_back(m_PointCloud.CreateMeshInstance());
}