#include <glog/logging.h>
#include <opencv2/highgui.hpp>
#include "mbt/object3d.hh"
#include "mbt/tracker_df.hh"

//DFTracker::DFTracker(const cv::Matx33f& K, const cv::Matx14f& distCoeffs, std::vector<Object3D*>& objects) 
//	: Tracker(K, distCoeffs, objects)
DFTracker::DFTracker(const cv::Matx33f& K, std::vector<Object3D*>& objects) 
	: Tracker(K, objects)
{
	is_normalize_fields = true;
	grid_step = 10;
}

void DFTracker::ToggleTracking(cv::Mat& frame, int objectIndex, bool undistortFrame) {
	templ = frame.clone();

	if (!objects[objectIndex]->isInitialized()) {
		objects[objectIndex]->initialize();
		view->setLevel(0);
	} else {
		objects[objectIndex]->reset();

		initialized = false;
		for (int o = 0; o < objects.size(); o++) {
			initialized |= objects[o]->isInitialized();
		}
	}
}

cv::Mat SmoothImage(const float sigma, const cv::Mat &im) {
	cv::Mat smoothed_image;
	int s = std::max(5, 2*int(sigma)+1);
	cv::Size kernel_size(s, s);
	cv::GaussianBlur(im, smoothed_image, kernel_size, sigma, sigma, cv::BORDER_REFLECT);
	return smoothed_image;
}

std::vector<cv::Mat> SmoothDescriptorFields(const float sigma, const std::vector<cv::Mat> & fields) {
	std::vector<cv::Mat> smoothed_fields(fields.size());

//#pragma omp parallel for
	for(int ch = 0; ch < fields.size(); ++ch){
		smoothed_fields[ch] = SmoothImage(sigma, fields[ch]);}

	return smoothed_fields;
}

void ConvertImageToFloat(cv::Mat& image) {
	double min,max;
	minMaxLoc(image,&min,&max);
	const float v = 1.0/(max - min);
	image.convertTo(image, CV_32F, v, -min * v);
	CHECK(image.isContinuous());
}

void AcquireGrayscaleImage(const cv::Mat &rgb_image, cv::Mat &gray_image) {
	cvtColor(rgb_image, gray_image, CV_RGB2GRAY, 1);
	ConvertImageToFloat(gray_image);
}

void NormalizeImage(cv::Mat &image) {
	cv::Scalar mean, stddev;
	cv::meanStdDev(image, mean, stddev);
	image = (image - mean)/stddev[0];
}

void ComputeImageDerivatives(const cv::Mat & image, cv::Mat & imageDx, cv::Mat &imageDy) {
	int ddepth = -1; //same image depth as source
	double scale = 1 / 32.0;// normalize wrt scharr mask for having exact gradient
	double delta = 0;

	cv::Scharr(image, imageDx, ddepth, 1, 0, scale, delta, cv::BORDER_REFLECT);
	cv::Scharr(image, imageDy, ddepth, 0, 1, scale, delta, cv::BORDER_REFLECT);
}

void ComputeImageDerivatives(std::vector<cv::Mat>& images, std::vector<cv::Mat>& dx_images, std::vector<cv::Mat>& dy_images) {
	const int channels = images.size();

	dx_images.resize(channels);
	dy_images.resize(channels);
	for (int ch = 0; ch < channels; ++ch) {
		ComputeImageDerivatives(images[ch], dx_images[ch], dy_images[ch]);
	}
}

void ComputeDescriptorFields(cv::Mat& gray_image, std::vector<cv::Mat>& desc_fields) {
	desc_fields.clear();
	cv::Mat dx, dy;
	ComputeImageDerivatives(gray_image, dx, dy);
	DCHECK(dx.isContinuous());
	DCHECK(dy.isContinuous());

	cv::Size imSize = gray_image.size();
	cv::Mat dxPos(imSize, CV_32F, cv::Scalar(0));
	cv::Mat dxNeg(imSize, CV_32F, cv::Scalar(0));
	cv::Mat dyPos(imSize, CV_32F, cv::Scalar(0));
	cv::Mat dyNeg(imSize, CV_32F, cv::Scalar(0));

	float dxPixel, dyPixel;

	//TODO: use cv::threshold instead of !
	//TODO: add thresholding for eliminating noise
	for (int row = 0; row < gray_image.rows; ++row)
	for (int col = 0; col < gray_image.cols; ++col) {
		//dxPixel = dx.at<float>(row, col);
		//dyPixel = dy.at<float>(row, col);
		dxPixel = ((float*)dx.data)[dx.cols * row + col];
		dyPixel = ((float*)dy.data)[dx.cols * row + col];

		if(dxPixel>0)
			((float*)dxPos.data)[dx.cols * row + col] = 10*dxPixel;//10 is just a factor for numerical stability, with no particular meaning
		else
			((float*)dxNeg.data)[dx.cols * row + col] = -10*dxPixel;

		if(dyPixel>0)
			((float*)dyPos.data)[dx.cols * row + col] = 10*dyPixel;
		else
			((float*)dyNeg.data)[dx.cols * row + col] = -10*dyPixel;
	}
	desc_fields.push_back(dxPos);
	desc_fields.push_back(dxNeg);
	desc_fields.push_back(dyPos);
	desc_fields.push_back(dyNeg);
}

void DFTracker::EstimatePoses(cv::Mat& frame, bool check_lost) {
	ComputeGradientFields(templ, template_fields);
	ComputeGradientFields(frame, image_fields);

	if (is_normalize_fields) {
		for (int i = 0; i < template_fields.size(); ++i) {
			NormalizeImage(template_fields[i]);
			NormalizeImage(image_fields[i]);
		}
	}

	SampleGridInnerPoints(objects, 2, ctr_pts);

	std::vector<cv::Mat> imagePyramid;
	Track(imagePyramid, objects);

	templ = frame.clone();
}

void DFTracker::Track(std::vector<cv::Mat>& imagePyramid, std::vector<Object3D*>& objects, int runs) {
#if 1
	RunIteration(image_fields, template_fields);
	RunIteration(image_fields, template_fields);
	RunIteration(image_fields, template_fields);
	RunIteration(image_fields, template_fields);
	RunIteration(image_fields, template_fields);
	RunIteration(image_fields, template_fields);
	RunIteration(image_fields, template_fields);
	RunIteration(image_fields, template_fields);
#else
	std::vector<float> pyramid_variance;
	pyramid_variance.push_back(7);
	pyramid_variance.push_back(7);
	pyramid_variance.push_back(7);
	pyramid_variance.push_back(7);
		for (int i = 0; i < pyramid_variance.size(); ++i) {
			std::vector<cv::Mat> smoothed_image_fields = SmoothDescriptorFields(pyramid_variance[i], image_fields);
			std::vector<cv::Mat> smoothed_template_fields = SmoothDescriptorFields(pyramid_variance[i], template_fields);
			RunIteration(smoothed_image_fields, smoothed_template_fields);
		}
#endif
}

void DFTracker::RunIteration(std::vector<cv::Mat>& image_fields, std::vector<cv::Mat>& template_fields) {
	ComputeImageDerivatives(image_fields, dx_fields, dy_fields);

	cv::Matx66f wJTJM;
	cv::Matx61f JTM;
	ComputeJac(image_fields, template_fields, wJTJM, JTM);

	cv::Matx44f T_cm = Transformations::exp(-wJTJM.inv(cv::DECOMP_CHOLESKY)*JTM)* objects[0]->getPose();
	objects[0]->setPose(T_cm);
}

void DFTracker::ComputeJac(std::vector<cv::Mat>& image_fields, std::vector<cv::Mat>& template_fields, cv::Matx66f& wJTJM, cv::Matx61f& JTM) {
	cv::Matx33f K = view->GetCalibrationMatrix().get_minor<3, 3>(0, 0);
	cv::Matx33f K_inv = K.inv();
	float* K_inv_data =  K.inv().val;

	float* JT = JTM.val;
	float* wJTJ = wJTJM.val;

	float fx = K(0,0);
	float fy = K(1,1);

	view->ProjectPoints(ctr_pts, objects[0]->getPose(), warped_points);

	for (int ch = 0; ch < image_fields.size(); ++ch)
	for (int i = 0; i < template_points.size(); ++i) {
		int warped_idx = dx_fields[ch].cols * warped_points[i].y + warped_points[i].x;
		float dx = ((float*)dx_fields[ch].data)[warped_idx];
		float dy = ((float*)dy_fields[ch].data)[warped_idx];
			
		cv::Vec4f ctr_pt = objects[0]->getPose() * cv::Vec4f(ctr_pts[i].x, ctr_pts[i].y, ctr_pts[i].z, 1);
		float Xc = ctr_pt(0);
		float Yc = ctr_pt(1);
		float Zc = ctr_pt(2);

		float J[6];

		//dx = (((float*)image_fields[ch].data)[warped_idx+1] - ((float*)image_fields[ch].data)[warped_idx-1]) / 2.0f;
		//dy = (((float*)image_fields[ch].data)[warped_idx+image_fields[ch].cols] - ((float*)image_fields[ch].data)[warped_idx-image_fields[ch].cols]) / 2.0f;

		float Zc2 = Zc*Zc;
		J[0] = dx * (-Xc*fx*Yc/Zc2) +     dy * (-fy -Yc*Yc*fy/Zc2);
		J[1] = dx * (fx + Xc*Xc*fx/Zc2) + dy * (Xc*Yc*fy/Zc2);
		J[2] = dx * (-fx*Yc/Zc)+          dy * (Xc*fy/Zc);
		J[3] = dx * (fx/Zc);
		J[4] =                            dy * (fy/Zc);
		J[5] = dx * (-Xc*fx/Zc2) +        dy * (-Yc*fy/Zc2);
#if 0
		for (int r = 0; r < 6; r++)
		for (int c = r; c < 6; c++) {
			wJTJM(r, c) += J[r] * J[c];
		}

		float err = ((float*)template_fields[ch].data)[warped_idx] - ((float*)image_fields[ch].data)[warped_idx];
		for (int r = 0; r < 6; r++) {
			JTM(r) += J[r] * err;
		}
#endif
		float err = ((float*)template_fields[ch].data)[warped_idx] - ((float*)image_fields[ch].data)[warped_idx];
		for (int n = 0; n < 6; n++) {
			JT[n] += err * J[n];
		}

		for (int n = 0; n < 6; n++)
		for (int m = n; m < 6; m++) {
			wJTJ[n * 6 + m] += J[n] * J[m];
		}
	}

	for (int i = 0; i < wJTJM.rows; i++)
	for (int j = i + 1; j < wJTJM.cols; j++) {
		wJTJM(j, i) = wJTJM(i, j);
	}
}

void DFTracker::SampleGridInnerPoints(std::vector<Object3D*>& objects, int grid_step, std::vector<cv::Point3f>& ctr_pts) {
	view->RenderSilhouette(std::vector<Model*>(objects.begin(), objects.end()), GL_FILL);
	cv::Mat masks_img = view->DownloadFrame(View::MASK);
	cv::Mat depth_img = view->DownloadFrame(View::DEPTH);
	cv::Mat mask_img;
	ConvertMask(depth_img, objects[0]->getModelID(), mask_img);
	cv::imshow("mask_img", mask_img);
	
	const int dilation_size = 8;
	cv::Mat element = 
	cv::getStructuringElement(
		cv::MORPH_RECT,
		cv::Size(2 * dilation_size + 1, 2 * dilation_size + 1),
		cv::Point(dilation_size, dilation_size));
	cv::Mat fg_mask_erode;
	cv::erode(mask_img, fg_mask_erode, element);
	cv::imshow("fg_mask_erode", fg_mask_erode);

	template_points.clear();
	for (int y = 0; y < fg_mask_erode.rows; y+= grid_step)
	for (int x = 0; x < fg_mask_erode.cols; x+= grid_step) {
		if (255 == fg_mask_erode.at<uchar>(y,x)) {
			template_points.push_back(cv::Point(x,y));
		}
	}

	view->BackProjectPoints(template_points, depth_img, objects[0]->getPose(), ctr_pts);
}

void DFTracker::ComputeGradientFields(const cv::Mat& image, std::vector<cv::Mat>& desc_fields) {
	cv::Mat gray_image;
	if (1 != image.channels()) {
		AcquireGrayscaleImage(image, gray_image);
	} else {
		gray_image = image;
	}

	cv::Mat dx, dy;
	ComputeImageDerivatives(gray_image, dx, dy);
	DCHECK(dx.isContinuous());
	DCHECK(dy.isContinuous());

	cv::Size imSize = gray_image.size();
	cv::Mat dx_pos(imSize, CV_32F, cv::Scalar(0));
	cv::Mat dx_neg(imSize, CV_32F, cv::Scalar(0));
	cv::Mat dy_pos(imSize, CV_32F, cv::Scalar(0));
	cv::Mat dy_neg(imSize, CV_32F, cv::Scalar(0));

	//TODO: use cv::threshold instead of !
	//TODO: add thresholding for eliminating noise
	for (int row = 0; row < gray_image.rows; ++row)
	for (int col = 0; col < gray_image.cols; ++col) {
		float dxPixel = ((float*)dx.data)[dx.cols * row + col];
		float dyPixel = ((float*)dy.data)[dx.cols * row + col];

		//10 is just a factor for numerical stability, 
		//with no particular meaning
		if(dxPixel>0)
			((float*)dx_pos.data)[dx.cols * row + col] = 10*dxPixel;
		else
			((float*)dx_neg.data)[dx.cols * row + col] = -10*dxPixel;

		if(dyPixel>0)
			((float*)dy_pos.data)[dx.cols * row + col] = 10*dyPixel;
		else
			((float*)dy_neg.data)[dx.cols * row + col] = -10*dyPixel;
	}

	desc_fields.resize(4);
	desc_fields[0] = dx_pos;
	desc_fields[1] = dx_neg;
	desc_fields[2] = dy_pos;
	desc_fields[3] = dy_neg;
}