/*M///////////////////////////////////////////////////////////////////////////////////////
//
//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
//  By downloading, copying, installing or using the software you agree to this license.
//  If you do not agree to this license, do not download, install,
//  copy or use the software.
//
//
//                           License Agreement
//                For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
//   * Redistribution's of source code must retain the above copyright notice,
//     this list of conditions and the following disclaimer.
//
//   * Redistribution's in binary form must reproduce the above copyright notice,
//     this list of conditions and the following disclaimer in the documentation
//     and/or other materials provided with the distribution.
//
//   * The name of the copyright holders may not be used to endorse or promote products
//     derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/

#include "MBOpenCVExtensionFunctions.h"
#include "MBVec2.h"

const double METERS_TO_MILLIMETERS = 1000.0;

const MBuint X = 0u;
const MBuint Y = 1u;
const MBuint Z = 2u;

#include <iostream>

using std::cout;
using std::endl;

namespace cv
{
	void myInitUndistortRectifyMap( InputArray _cameraMatrix, InputArray _distCoeffs,
		InputArray _matR, InputArray _newCameraMatrix,
		Size size, int m1type, OutputArray _map1, OutputArray _map2,
		const MBCameraModel &cameraModel, const MBBasedProjectFormat &projectFormat)
	{
		Mat cameraMatrix = _cameraMatrix.getMat(), distCoeffs = _distCoeffs.getMat();
		Mat matR = _matR.getMat(), newCameraMatrix = _newCameraMatrix.getMat();

		if( m1type <= 0 )
			m1type = CV_16SC2;
		CV_Assert( m1type == CV_16SC2 || m1type == CV_32FC1 || m1type == CV_32FC2 );
		_map1.create( size, m1type );
		Mat map1 = _map1.getMat(), map2;
		if( m1type != CV_32FC2 )
		{
			_map2.create( size, m1type == CV_16SC2 ? CV_16UC1 : CV_32FC1 );
			map2 = _map2.getMat();
		}
		else
			_map2.release();

		Mat_<double> R = Mat_<double>::eye(3, 3);
		Mat_<double> A = Mat_<double>(cameraMatrix), Ar;

		if( newCameraMatrix.data )
			Ar = Mat_<double>(newCameraMatrix);
		else
			Ar = getDefaultNewCameraMatrix( A, size, true );

		if( matR.data )
			R = Mat_<double>(matR);

		if( distCoeffs.data )
			distCoeffs = Mat_<double>(distCoeffs);
		else
		{
			distCoeffs.create(8, 1, CV_64F);
			distCoeffs = 0.;
		}

		CV_Assert( A.size() == Size(3,3) && A.size() == R.size() );
		CV_Assert( Ar.size() == Size(3,3) || Ar.size() == Size(4, 3));
		Mat_<double> iR = (Ar.colRange(0,3)*R).inv(DECOMP_LU);
		const double* ir = &iR(0,0);

		double u0 = A(0, 2),  v0 = A(1, 2);
		double fx = A(0, 0),  fy = A(1, 1);

		CV_Assert( distCoeffs.size() == Size(1, 4) || distCoeffs.size() == Size(4, 1) || 
			distCoeffs.size() == Size(1, 5) || distCoeffs.size() == Size(5, 1) ||
			distCoeffs.size() == Size(1, 8) || distCoeffs.size() == Size(8, 1));

		if( distCoeffs.rows != 1 && !distCoeffs.isContinuous() )
			distCoeffs = distCoeffs.t();

		double k1 = ((double*)distCoeffs.data)[0];
		double k2 = ((double*)distCoeffs.data)[1];
		double p1 = ((double*)distCoeffs.data)[2];
		double p2 = ((double*)distCoeffs.data)[3];
		double k3 = distCoeffs.cols + distCoeffs.rows - 1 >= 5 ? ((double*)distCoeffs.data)[4] : 0.;
		double k4 = distCoeffs.cols + distCoeffs.rows - 1 >= 8 ? ((double*)distCoeffs.data)[5] : 0.;
		double k5 = distCoeffs.cols + distCoeffs.rows - 1 >= 8 ? ((double*)distCoeffs.data)[6] : 0.;
		double k6 = distCoeffs.cols + distCoeffs.rows - 1 >= 8 ? ((double*)distCoeffs.data)[7] : 0.;

		double pixel_width_in_millimeters = METERS_TO_MILLIMETERS * cameraModel.GetIntrinsicParameters().mPixelWidthInMeters;
		double pixel_height_in_millimeters = METERS_TO_MILLIMETERS * cameraModel.GetIntrinsicParameters().mPixelHeightInMeters;
		
		double half_image_width_in_pixels = 0.5 * cameraModel.GetIntrinsicParameters().mImageWidthInPixels;
		double half_image_height_in_pixels = 0.5 * cameraModel.GetIntrinsicParameters().mImageHeightInPixels;

		double delta_xp;
		double delta_yp;
		double focal_length_millimeters = cameraModel.GetInternalCalibrationParameters().mFocalLengthXInPixels * pixel_width_in_millimeters;
		double principal_point_x0 = (cameraModel.GetInternalCalibrationParameters().mCenterOfImageXInPixels - half_image_width_in_pixels) * pixel_width_in_millimeters;
		double principal_point_y0 = (cameraModel.GetInternalCalibrationParameters().mCenterOfImageYInPixels - half_image_height_in_pixels) * pixel_height_in_millimeters;

		double radial_distortion_xr;
		double radial_distortion_yr;

		double x_sq;
		double y_sq;
		double r_sq;
		double k_factor;

		double decentering_distortion_xd;
		double decentering_distortion_yd;

		double affinity_distortion_xaff;

		double differential_scale = cameraModel.GetDifferentialScale();
		double non_orthogonality = cameraModel.GetNonOrthogonality();

		double two_x_y;

		double u;
		double v;

		for( int i = 0; i < size.height; i++ )
		{
			float* m1f = (float*)(map1.data + map1.step*i);
			float* m2f = (float*)(map2.data + map2.step*i);
			short* m1 = (short*)m1f;
			ushort* m2 = (ushort*)m2f;
			double _x = i*ir[1] + ir[2], _y = i*ir[4] + ir[5], _w = i*ir[7] + ir[8];

			for( int j = 0; j < size.width; j++, _x += ir[0], _y += ir[3], _w += ir[6] )
			{
				switch (projectFormat)
				{
				case MB_BASED_PROJECT_FORMAT_ADAMTECH:
					{
						double w = 1.0 / _w;

						MBVec2 point_image_space( _x / w, _y / w );
						
						point_image_space[X] = focal_length_millimeters * point_image_space[X];
						point_image_space[Y] = focal_length_millimeters * point_image_space[Y];

						x_sq = point_image_space[X] * point_image_space[X];
						y_sq = point_image_space[Y] * point_image_space[Y];

						r_sq = x_sq + y_sq;

						point_image_space[X] += principal_point_x0;
						point_image_space[Y] += principal_point_y0;

						k_factor = (((k4 * r_sq + k3) * r_sq + k2) * r_sq + k1) * r_sq;
						radial_distortion_xr = k_factor * point_image_space[X];
						radial_distortion_yr = k_factor * point_image_space[Y];

						two_x_y = 2.0 * point_image_space[X] * point_image_space[Y];

						decentering_distortion_xd = p1 * (r_sq + 2 * point_image_space[X] * point_image_space[X]) + p2 * two_x_y;
						decentering_distortion_yd = p2 * (r_sq + 2 * point_image_space[Y] * point_image_space[Y]) + p1 * two_x_y;

						affinity_distortion_xaff = differential_scale * point_image_space[X] + non_orthogonality * point_image_space[Y];

						delta_xp = radial_distortion_xr + decentering_distortion_xd + affinity_distortion_xaff;
						delta_yp = radial_distortion_yr + decentering_distortion_yd; // affinity_distortion_yaff is always 0.0

						point_image_space[X] -= delta_xp;
						point_image_space[Y] -= delta_yp;

						u = (point_image_space[X] / pixel_width_in_millimeters) + half_image_width_in_pixels;
						v = (point_image_space[Y] / pixel_height_in_millimeters) + half_image_height_in_pixels;
					}
					break;

				case MB_BASED_PROJECT_FORMAT_RISCAN_PRO:
					{
						double w = 1./_w, x = _x*w, y = _y*w;
						double x2 = x*x, y2 = y*y;
						double r2;
						double _2xy = 2*x*y;

						if (cameraModel.GetRieglVersion() == 0u)
						{
							r2 = x2 + y2;
						}
						else
						{
							r2 = atan( sqrt( x2 + y2 ) );
							//cout << "VERSION 1 for Riscan Project!" << endl;
						}
						

						// Changed to the form as it is described in the RiScan Pro dtd-File
						//double kr = (1 + ((k3*r2 + k2)*r2 + k1)*r2)/(1 + ((k6*r2 + k5)*r2 + k4)*r2);
						double kr = 1 + ((((k4 * r2 + k3) * r2 + k2) * r2 + k1) * r2);

						u = fx*(x*kr + p1*_2xy + p2*(r2 + 2*x2)) + u0;
						v = fy*(y*kr + p1*(r2 + 2*y2) + p2*_2xy) + v0;
					}
					break;
				}

				if( m1type == CV_16SC2 )
				{
					int iu = saturate_cast<int>(u*INTER_TAB_SIZE);
					int iv = saturate_cast<int>(v*INTER_TAB_SIZE);
					m1[j*2] = (short)(iu >> INTER_BITS);
					m1[j*2+1] = (short)(iv >> INTER_BITS);
					m2[j] = (ushort)((iv & (INTER_TAB_SIZE-1))*INTER_TAB_SIZE + (iu & (INTER_TAB_SIZE-1)));
				}
				else if( m1type == CV_32FC1 )
				{
					m1f[j] = (float)u;
					m2f[j] = (float)v;
				}
				else
				{
					m1f[j*2] = (float)u;
					m1f[j*2+1] = (float)v;
				}
			}
		}
	}
}

void cv::undistortWithOptionalFlipping( InputArray _src, OutputArray _dst, InputArray _cameraMatrix,
	InputArray _distCoeffs, InputArray _newCameraMatrix, const MBCameraModel &cameraModel,
	const MBBasedProjectFormat &projectFormat )
{
	Mat my_src = _src.getMat();
	Mat src = my_src;
	Mat my_dst;

	if (projectFormat == MB_BASED_PROJECT_FORMAT_ADAMTECH)
	{
		// Flip the source image vertically to assume the image origin to be
		// in the lower left corner
		cv::flip( my_src, src, 0 );
	}

	Mat cameraMatrix = _cameraMatrix.getMat();
	Mat distCoeffs = _distCoeffs.getMat(), newCameraMatrix = _newCameraMatrix.getMat();

	_dst.create( src.size(), src.type() );
	Mat dst = _dst.getMat();

	CV_Assert( dst.data != src.data );

	int stripe_size0 = std::min(std::max(1, (1 << 12) / std::max(src.cols, 1)), src.rows);
	Mat map1(stripe_size0, src.cols, CV_16SC2), map2(stripe_size0, src.cols, CV_16UC1);

	Mat_<double> A, Ar, I = Mat_<double>::eye(3,3);

	cameraMatrix.convertTo(A, CV_64F);
	if( distCoeffs.data )
		distCoeffs = Mat_<double>(distCoeffs);
	else
	{
		distCoeffs.create(5, 1, CV_64F);
		distCoeffs = 0.;
	}

	if( newCameraMatrix.data )
		newCameraMatrix.convertTo(Ar, CV_64F);
	else
		A.copyTo(Ar);

	double v0 = Ar(1, 2);
	for( int y = 0; y < src.rows; y += stripe_size0 )
	{
		int stripe_size = std::min( stripe_size0, src.rows - y );
		Ar(1, 2) = v0 - y;
		Mat map1_part = map1.rowRange(0, stripe_size),
			map2_part = map2.rowRange(0, stripe_size),
			dst_part = dst.rowRange(y, y + stripe_size);

		myInitUndistortRectifyMap( A, distCoeffs, I, Ar, Size(src.cols, stripe_size),
			map1_part.type(), map1_part, map2_part, cameraModel, projectFormat );
		remap( src, dst_part, map1_part, map2_part, INTER_LINEAR, BORDER_CONSTANT );
	}

	my_dst = _dst.getMat();

	if (projectFormat == MB_BASED_PROJECT_FORMAT_ADAMTECH)
	{
		// Backflip the destination image
		cv::flip( my_dst, _dst.getMat(), 0 );
	}
}