#include <ros/ros.h>
#include <math.h>

// Ros
#include <std_msgs/Bool.h>
#include <sensor_msgs/PointCloud2.h>
#include <sensor_msgs/Image.h>
#include <stereo_msgs/DisparityImage.h>
#include <sensor_msgs/CameraInfo.h>
#include <sensor_msgs/image_encodings.h>
#include <message_filters/subscriber.h>
#include <message_filters/synchronizer.h>
#include <message_filters/sync_policies/approximate_time.h>
#include <image_geometry/stereo_camera_model.h>

// Ros -> openCV -> Ros
#include <cv_bridge/cv_bridge.h>
#include <opencv2/opencv.hpp>

// PCL
#include <pcl/ros/conversions.h>
#include <pcl/point_cloud.h>
#include <pcl/point_types.h>
#include <boost/foreach.hpp>

// Image Publishing for Debug
#include <image_transport/image_transport.h>

// Blob Detection
#include <cmvisionMod/Blobs.h>

// Move Head
#include <actionlib/client/simple_action_client.h>
#include <pr2_controllers_msgs/PointHeadGoal.h>
#include <pr2_controllers_msgs/PointHeadAction.h>

using namespace sensor_msgs;
using namespace stereo_msgs;
using namespace message_filters;


typedef actionlib::SimpleActionClient<pr2_controllers_msgs::PointHeadAction> PointHeadClient;

bool hasBlob;
bool canProcessNextImage;
//image_transport::Publisher pub;
image_geometry::StereoCameraModel camModel;

image_transport::Publisher lPublisher;
image_transport::Publisher rPublisher;
ros::Publisher blobCanFindMore;
cmvisionMod::Blob lBlob;
cmvisionMod::Blob rBlob;
cmvisionMod::Blob biggestBlob;
bool lBlobRecieved;
bool rBlobRecieved;

//
// Can use either IplImage or cv::Mat as output.
//   	Bridge from pcl::PointCloud -> cvBridge
//
//IplImage *makeImageFromPointCloud(pcl::PointCloud<pcl::PointXYZRGB>& cloud)
cv::Mat makeImageFromPointCloud(pcl::PointCloud<pcl::PointXYZRGB>& cloud)
{
	cv::Mat image = cv::Mat(cloud.width, cloud.height,CV_8UC3);
	for (unsigned int h = 0; h < cloud.height; h++ ) {
		for ( unsigned int w = 0; w < cloud.width; w++ ) {
			if ( cloud.points[h*cloud.width + w].x == cloud.points[h*cloud.width + w].x ) {
				image.at<cv::Vec3b>(h,w)[0] = cloud.points[h*cloud.width + w].b;
				image.at<cv::Vec3b>(h,w)[1] = cloud.points[h*cloud.width + w].g;
				image.at<cv::Vec3b>(h,w)[2] = cloud.points[h*cloud.width + w].r;
			} else {
				image.at<cv::Vec3b>(h,w)[0] = 0;
				image.at<cv::Vec3b>(h,w)[1] = 0;
				image.at<cv::Vec3b>(h,w)[2] = 0;
			}
		}
	}

	return image;
	// Convert to IplImage and then make a pointer
	//IplImage img =  (IplImage)(image);
	//return new IplImage(img);
}

//
// Generic LookAt Function with frame_id set to wide_stereo_optical_frame
//
PointHeadClient* pointHead;
void lookAtPoint(float x, float y, float z) {
	pr2_controllers_msgs::PointHeadGoal goal;

	if ( x!=x || y!=y || z!=z )  {
		ROS_ERROR("Recieved a NaN");
		return;
	}

	ROS_INFO("Looking At:  (%f,%f,%f)",x,y,z);

	geometry_msgs::PointStamped point;
	point.header.frame_id = "/wide_stereo_optical_frame";
	point.point.x = x;
	point.point.y = y;
	point.point.z = z;
	goal.target = point;

	//goal.min_duration = ros::Duration(0.5);
	goal.max_velocity = 1.0;

	/*
	PointHeadClient* pointHead = new PointHeadClient("/head_traj_controller/point_head_action",true);
	if ( !pointHead->waitForServer(ros::Duration(5.0)) ) {
		ROS_WARN("PointHeadClient not running");
		//return;
	}*/
	pointHead->sendGoal(goal);
	pointHead->waitForResult(ros::Duration(2.0));
}


//
// Point Cloud Blob Tracking Callback
//
void callback(const sensor_msgs::PointCloud2ConstPtr& msg)
{
	ROS_INFO("pointCloud2 Callback");
	if (!hasBlob) return;
	if (!canProcessNextImage) return;
	canProcessNextImage = false;

	pcl::PointCloud<pcl::PointXYZRGB> newCloud;
	//printf ("Cloud:  %dx%d\n", msg->width, msg->height);
	pcl::fromROSMsg(*msg,newCloud);

	biggestBlob = lBlob;

	if (biggestBlob.left > 0 && biggestBlob.right < msg->width)
	{
		if ( biggestBlob.top > 0 && biggestBlob.bottom < msg->height ) {
			// get point location:
			ROS_INFO("Outputting 3D Points");
			float avgX = 0.00, avgY = 0.00, avgZ = 0.00;
			int count = 0;


			for ( unsigned int x = biggestBlob.left; x < biggestBlob.right; x++ ) {
				for ( unsigned int y = biggestBlob.top; y < biggestBlob.bottom; y++ ) {
					pcl::PointXYZRGB point = newCloud.at(x, y);
					if ( point.x == point.x && point.y == point.y && point.z == point.z) { // only false if NaN			
						//ROS_INFO("(%f,%f,%f) Found", point.x, point.y, point.z);
						avgX = avgX + point.x;
						avgY = avgY + point.y;
						avgZ = avgZ + point.z;
						count = count + 1;
					}	
				}
			}

			if ( count != 0 ) {
				avgX /= count;
				avgY /= count;
				avgZ /= count;
				ROS_INFO("\tHeader:  %s",(msg->header.frame_id).c_str());
				ROS_INFO("\tPoint in 3D:  (%f, %f, %f)", avgX, avgY, avgZ);
				lookAtPoint(avgX,avgY,avgZ);
			}else {
				ROS_WARN("No Non-NaN Points Found");
			}
		}
	}

	//
	// Debug Information
	//

	/*
	//IplImage *frameRGB = makeImageFromPointCloud(newCloud);
	cv::Mat frameRGB = makeImageFromPointCloud(newCloud);
	//sensor_msgs::Img::Ptr image = 

	cv_bridge::CvImage out_img;
	out_img.header = msg->header;
	out_img.encoding = sensor_msgs::image_encodings::BGR8;
	out_img.image    = frameRGB;

	pub.publish(out_img.toImageMsg());
	*/

	canProcessNextImage = true;
}

//
//	Right Blob Callback from cmvisionMod
//
void rBlobCallback(const cmvisionMod::BlobsConstPtr& msg)
{
	if (msg->blob_count == 0 || !canProcessNextImage || rBlobRecieved) {
		return;
	}

	//ROS_INFO("Found Right Blobs:  %i", msg->blob_count);

	// largest blob:
	unsigned int bestIndex = 0;
	for ( unsigned int i = 0; i < msg->blob_count; i++ ) {
		if (msg->blobs[bestIndex].area < msg->blobs[i].area) {
			bestIndex = i;
		}
	}

	rBlob = msg->blobs[bestIndex];

	rBlobRecieved = true;
}

//
//	Left Blob Callback from cmvisionMod
//
void lBlobCallback(const cmvisionMod::BlobsConstPtr& msg)
{
	if (msg->blob_count == 0 || !canProcessNextImage || lBlobRecieved) {
		return;
	}

    std_msgs::Bool publishBlobs;
    publishBlobs.data = false;
    blobCanFindMore.publish(publishBlobs);

	//ROS_INFO("Found Left Blobs:  %i", msg->blob_count);

	// largest blob:
	unsigned int bestIndex = 0;
	for ( unsigned int i = 0; i < msg->blob_count; i++ ) {
		if (msg->blobs[bestIndex].area < msg->blobs[i].area) {
			bestIndex = i;
		}
	}

	lBlob = msg->blobs[bestIndex];
	ROS_INFO("\tLeft Center:  (%i,%i)",lBlob.x,lBlob.y);

	lBlobRecieved = true;
	hasBlob = true;
}

//
//	Store the Camera Information
//

CameraInfoConstPtr leftCamInfo;
CameraInfoConstPtr rightCamInfo;
void camInfoCallback(const CameraInfoConstPtr& leftInfo, const CameraInfoConstPtr& rightInfo)
{
	leftCamInfo = leftInfo;
	rightCamInfo = rightInfo;
	camModel.fromCameraInfo(leftInfo,rightInfo);
	return;
}

//
//	Check disparity map if it has a valid value to pull from
//		Stores into val so I don't have to run memcpy etc again
//
float val;
bool hasDisparityValue(DisparityImageConstPtr disparity,int x, int y) {
	memcpy(&val, &(disparity->image.data.at(y*disparity->image.step + sizeof(float)*x )), sizeof(float));
	if ( val != val ) return false;
	if ( val < disparity->min_disparity || val > disparity->max_disparity ) return false;

	return true;
}

//
//	Should project a point in an image to a 3D point using the wide_stereo... frame as reference
//
inline void projectTo3dLeft(float i, float j, float disparity, DisparityImageConstPtr disparityImage, float &x, float &y, float &z) {
	

	float fx = leftCamInfo->P[0];
	float fy = leftCamInfo->P[5];
	float cx = leftCamInfo->P[2];
	float cy = leftCamInfo->P[6];
	float Tx = leftCamInfo->P[3];
	float Ty = leftCamInfo->P[7];

	//i = i - disparityImage->image.width/2 - cx;
	//j = j - disparityImage->image.height/2 - cy;

	x = ( (i - cx - Tx) / fx );
	y = ( (j - cy - Ty) / fy );
	z = 1.0;

	float norm = sqrt(x*x + y*y + 1);
	float depth = disparityImage->f * disparityImage->T / disparity;
	x = depth*x / norm;
	y = depth*y / norm;
	z = depth*z / norm;
}

//
//	Should project a point in an image to a 3D point using the wide_stereo... frame as reference
//
inline void projectTo3dRight(float i, float j, float disparity, DisparityImageConstPtr disparityImage, float &x, float &y, float &z) {
	

	float fx = rightCamInfo->P[0];
	float fy = rightCamInfo->P[5];
	float cx = rightCamInfo->P[2];
	float cy = rightCamInfo->P[6];
	float Tx = rightCamInfo->P[3];
	float Ty = rightCamInfo->P[7];

	//i = i - disparityImage->image.width/2 - cx;
	//j = j - disparityImage->image.height/2 - cy;

	x = ( (i - cx - Tx) / fx );
	y = ( (j - cy - Ty) / fy );
	z = 1.0;

	float norm = sqrt(x*x + y*y + 1);
	float depth = disparityImage->f * disparityImage->T / disparity;
	x = depth*x / norm;
	y = depth*y / norm;
	z = depth*z / norm;
}

//
//	Callback from disparity
//
void disparityCallback(const DisparityImageConstPtr& disparity) {
	ROS_INFO("disparityCallback");
	if ( lBlobRecieved ) {
		float avgDisparity = 0.0;
		int count = 0;
		for ( unsigned int x = lBlob.left; x < lBlob.right; x++ ) {
			for ( unsigned int y = lBlob.top; y < lBlob.bottom; y++ ) {
				if ( hasDisparityValue(disparity,x,y) ) {
					avgDisparity += val;
					count += 1;
				}
			}
		}

		if ( count > 0 ) {
			cv::Point2d left( lBlob.x, lBlob.y);
			float x1, y1, z1, x2, y2, z2;
			avgDisparity = avgDisparity/count;
			ROS_INFO("AvgDisparity:  %f",avgDisparity);
			projectTo3dLeft(lBlob.x, lBlob.y, avgDisparity, disparity, x1,y1,z1);
			ROS_INFO("\tLeft Look At:  (%f, %f, %f)",x1,y1,z1);
			projectTo3dRight(rBlob.x, rBlob.y, avgDisparity, disparity, x2,y2,z2);
			//ROS_INFO("\tRight Look At:  (%f, %f, %f)",x2,y2,z2);
			//lookAtPoint((x1+x2)/2,(y1+y2)/2,(z1+z2)/2);
            lookAtPoint(x1,y1,z1);
		}

        ros::Duration(10.0).sleep();

        std_msgs::Bool publishBlobs;
        publishBlobs.data = true;
        blobCanFindMore.publish(publishBlobs);

        lBlobRecieved = false;
        //rBlobRecieved = false;
	}
}

//
// Attempting to use camModel to do the above math for me
//	Always returned a point of (0,0,0) or (-0,-0,-0) thus useless
//
/*	if ( !canProcessNextImage || !lBlobRecieved) return;
	canProcessNextImage = false;
	ROS_INFO("Processing Callback");
	ROS_INFO("Recieved lBlob");

	cv::Point2d left( lBlob.x, lBlob.y);
	cv::Point3d xyz;
	float dispAtPixel = disparity->image.data[int(lBlob.x+lBlob.y*disparity->image.width)];

	int count = 0;
	float avgX = 0.0, avgY = 0.0, avgZ = 0.0;
	for ( int x = lBlob.left; x < lBlob.right; x++ ) {
		for ( int y = lBlob.top; y < lBlob.bottom; y++ ) {
			camModel.projectDisparityTo3d(left,dispAtPixel,xyz);
			if ( xyz.x == xyz.x && xyz.y == xyz.y && xyz.z == xyz.z ) {
				avgX += xyz.x;
				avgY += xyz.y;
				avgZ += xyz.z;
				count += 1;
			}
		}
	}
	if ( count > 0 ) {
		lookAtPoint(avgX/count, avgY/count, avgZ/count);		
	} else {
		ROS_INFO("All Points are NaN!!!");
	}


	lBlobRecieved = false;
	canProcessNextImage = true;
}
*/

void disparityAndBlobCallback(const DisparityImageConstPtr& disparity, const cmvisionMod::BlobsConstPtr& leftBlob, const cmvisionMod::BlobsConstPtr& rightBlob)
{
	ROS_ERROR("Disparity And Blob Callback");
 
	if (leftBlob->blob_count == 0 || rightBlob->blob_count == 0 || !canProcessNextImage) {
		return;
	}
	cmvisionMod::Blob bestBlobRight, bestBlobLeft;

	unsigned int bestIndexL = 0, bestIndexR = 0;
	for ( unsigned int i = 0; i < std::max(leftBlob->blob_count,rightBlob->blob_count); i++ ) {
		if ( i < leftBlob->blob_count ) {
			if (leftBlob->blobs[bestIndexL].area < leftBlob->blobs[i].area) {
				bestIndexL = i;
			}
		}

		if ( i < rightBlob->blob_count ) {
			if (rightBlob->blobs[bestIndexR].area < leftBlob->blobs[i].area) {
				bestIndexR = i;
			}
		}
	}

	bestBlobRight = rightBlob->blobs[bestIndexR];
	bestBlobLeft = leftBlob->blobs[bestIndexL];

	float avgDisparity = 0.0;
	int count = 0;
	for ( unsigned int x = bestBlobLeft.left; x < bestBlobLeft.right; x++ ) {
		for ( unsigned int y = bestBlobLeft.top; y < bestBlobLeft.bottom; y++ ) {
			if ( hasDisparityValue(disparity,x,y) ) {
				avgDisparity += val;
				count += 1;
			}
		}
	}

	if ( count > 0 ) {
		cv::Point2d left( lBlob.x, lBlob.y);
		float x, y, z;
		avgDisparity = avgDisparity/count;
		//ROS_INFO("AvgDisparity:  %f",avgDisparity);
		projectTo3dLeft(bestBlobLeft.x, bestBlobLeft.y, avgDisparity, disparity, x,y,z);
		ROS_ERROR("\tLeft Look At:  (%f, %f, %f)",x,y,z);
		//projectTo3dRight(rBlob.x, rBlob.y, avgDisparity, disparity, x,y,z);
		//ROS_INFO("\tRight Look At:  (%f, %f, %f)",x,y,z);
		lookAtPoint(x,y,z);
	} else {
        ROS_ERROR("No Disparity Points Found");
    }
}


int main(int argc, char** argv)
{
	// 
	// Must have cmvisionMod running
	// rosrun cmvisionMod cmvisionMod image:="/wide_stereo/left/image_rect_color"
	// ./bin/followBlob runs this
	lBlobRecieved = false;
	rBlobRecieved = false;

	canProcessNextImage = true;
	ros::init(argc, argv, "sub_pcl");
	ros::NodeHandle nh;

	// Create pointHeadClient (typedef'd above so it's a little cleaner)
	pointHead = new PointHeadClient("/head_traj_controller/point_head_action",true);
	if ( !pointHead->waitForServer(ros::Duration(5.0)) ) {
		ROS_WARN("PointHeadClient not running");
	}

	// Subscribe to disparity and camera info
	ros::Subscriber disparity = nh.subscribe<DisparityImage>("/wide_stereo/disparity",1,disparityCallback);
	message_filters::Subscriber<CameraInfo> leftCamInfo(nh, "/wide_stereo/left/camera_info", 1);
	message_filters::Subscriber<CameraInfo> rightCamInfo(nh, "/wide_stereo/right/camera_info", 1);

	//TimeSynchronizer<CameraInfo, CameraInfo> sync(leftCamInfo, leftCamInfo, 5);
	typedef sync_policies::ApproximateTime<CameraInfo, CameraInfo> camInfoPolicy;
	Synchronizer<camInfoPolicy> sync(camInfoPolicy(5), leftCamInfo, rightCamInfo);
	sync.registerCallback(boost::bind(&camInfoCallback,_1, _2));

	//message_filters::Subscriber<DisparityImage> disp(nh, "/wide_stereo/disparity",1);
	//message_filters::Subscriber<cmvisionMod::Blobs> lBlobSub(nh, "blobsLeft",1);
	//message_filters::Subscriber<cmvisionMod::Blobs> rBlobSub(nh, "blobsRight",1);

	//TimeSynchronizer<DisparityImage, cmvisionMod::Blobs, cmvisionMod::Blobs> sync2(disp, lBlob, rBlob, 100);
	//typedef sync_policies::ApproximateTime<DisparityImage, cmvisionMod::Blobs, cmvisionMod::Blobs> disparityPolicy;
	//Synchronizer<disparityPolicy> sync2(disparityPolicy(15), disp, lBlobSub, rBlobSub);
	//sync2.registerCallback(boost::bind(&disparityAndBlobCallback,_1,_2,_3));

	// Subscribe to cmVision Blobs.  These topics were defined in the .launch file in
	//    FollowBlobs/launch/cmvision.launch
	ros::Subscriber blobDetectorL = nh.subscribe<cmvisionMod::Blobs> ("blobsLeft",1,lBlobCallback);
	ros::Subscriber blobDetectorR = nh.subscribe<cmvisionMod::Blobs> ("blobsRight",1,rBlobCallback);

	// Originally planned to timesync the images and then have it recieve the blobs back
	//  	Wasn't happy so I am not doing that anymore.  Plus it probably saves a bit of bandwidth
	//lPublisher = it.advertise("/look_for_blobs_left",1);
	//rPublisher = it.advertise("/look_for_blobs_right",1);
    blobCanFindMore = nh.advertise<std_msgs::Bool>("/can_find_next_blob",1);

	// PointCloud2 Callback.  This works but slow.  Disparity Callback is called 4-6 times more frequently
	// Assume it's because there is a ton of math that goes into this
	//ros::Subscriber pointCloud = nh.subscribe<sensor_msgs::PointCloud2> ("wide_stereo/points2",1,callback);
	
	
	// Debugging purposes
	//pub = it.advertise("out_image_base_topic", 1);

	//nh.advertise("blobDetectedPoint",1);
	ros::spin();
}
