// Includes:
// --------
#include "geometric_image_filter/geometric_image_filter.h"

// Initialize static variables:
// ---------------------------
const tf::Vector3 GeometricImageFilter::groundNormalWorld(0, 0, 1);
const tf::Vector3 GeometricImageFilter::noTranslation(0, 0, 0);

// ---------
// Callbacks|
// ------------------------------------------------------------------------------
void GeometricImageFilter::imageCallback(const sensor_msgs::Image::ConstPtr& msg)
// ------------------------------------------------------------------------------
// Callback that recieves (ROS) image messages 
{   
	this->imageMsg = msg;
}

// ---------------------------------------------------------------------------------------
void GeometricImageFilter::cameraInfoCallback(const sensor_msgs::CameraInfo::ConstPtr& msg)
// ---------------------------------------------------------------------------------------
// Callback that recieves the camera information model
{
	this->cameraInfoMsg = msg;
}

// -----------------------------------------------------------------------------------
void GeometricImageFilter::depthCallback(const sensor_msgs::PointCloud2::ConstPtr& msg)
// -----------------------------------------------------------------------------------
// Callback to revieve and store the depth information
{
	this->depthMsg = msg;
}

// -------
// Helpers|
// -------------------------------------------------------------------------------
void GeometricImageFilter::printObjectRects(vector<TD_DetectionPair>& objectRects)
// -------------------------------------------------------------------------------
{
	if(objectRects.size() > 0)
	{
		for(vector<TD_DetectionPair>::iterator it = this->objectRects.begin(); it != this->objectRects.end(); ++it)
		{
			ROS_INFO(" + Face: (x: %i, y: %i, w: %i) | Tracklet #%i", it->first.x, it->first.y, it->first.width, it->second->getId());
		}
	}else
	{
		ROS_INFO(" + Nothing detected");
	}
}

// --------------------------------------------------------------------------
void GeometricImageFilter::printWindowSizeInfo(map<int, double>& windowSizes)
// --------------------------------------------------------------------------
{
	ROS_INFO(" + Window Sizes:");
	for(map<int, double>::iterator it = windowSizes.begin(); it != windowSizes.end(); ++it)
	{
		ROS_INFO("   - Window size: %i | Window Scale: %f", it->first, it->second); 
	}
}

// -------------------------------------------------------------------------------------------------------------------------------
void GeometricImageFilter::drawTracklets(TD_TrackletVector& tracklets, ros::Time t, cv::Mat& img, cv::Scalar color, int thickness)
// -------------------------------------------------------------------------------------------------------------------------------
{
	for(TD_TrackletVector::iterator it = tracklets.begin(); it != tracklets.end(); it++)
	{
		cv::Rect roi = (*it)->getROI(this->cameraFrame, t);
		cv::rectangle(img, cv::Point(roi.x, roi.y), cv::Point(roi.x + roi.width, roi.y + roi.height),
					  color, thickness);
	}
}

// --------------------------------------------------------------------------------------------------------------------------------------------
void GeometricImageFilter::drawRectangles(vector<TD_DetectionPair>& rects, cv::Mat& img, cv::Scalar color, int thickness, bool drawAnnotations)
// --------------------------------------------------------------------------------------------------------------------------------------------
{
	// Draw the rectangles
	for(vector<TD_DetectionPair>::iterator it = rects.begin(); it != rects.end(); ++ it)
	{	
		// Rectangle
		cv::rectangle(img, cv::Point(it->first.x, it->first.y), 
		              cv::Point(it->first.x + it->first.width, it->first.y + it->first.height),
		  			  color, thickness);
		
		// Tracking annotation
		if(drawAnnotations)
		{
			int trackletId = it->second->getId();
			if(trackletId)
			{
				stringstream ss; 
				ss << "#" << trackletId;
				string annotation(ss.str());
				
				IplImage iplImg = img;
				CvPoint origin = cvPoint(it->first.x, it->first.y + 10);
				cvPutText(&iplImg, annotation.c_str(), origin, &(this->font), CV_RGB(255,0,255));
			}
		}
	}
}

// ----------------------------------------------------------------------------------------------------------
geometry_msgs::Point GeometricImageFilter::centersToPoint(vector<cv::Point>& centers, TD_PointCloud& ptCloud)
// ----------------------------------------------------------------------------------------------------------
{
	int size = centers.size();
	double x = 0;
	double y = 0;
	
	for(vector<cv::Point>::iterator it = centers.begin(); it != centers.end(); ++it)
	{
		x += it->x; 
		y += it->y; 
	}

	int u = (int)(x / size);
	int v = (int)(y / size);

	pcl::PointXYZRGB pt = ptCloud(u, v);

	geometry_msgs::Point point;
	point.x = pt.x;
	point.y = pt.y;
	point.z = pt.z;
		
	return point;
}

// ----------------------------------------------------
cv::Point GeometricImageFilter::makePoint(int x, int y)
// ----------------------------------------------------
{
	cv::Point toReturn;
	toReturn.x = x;
	toReturn.y = y;
	return toReturn;
}

// ----
// Core|
// -------------------------------------------------------------------------------
GeometricImageFilter::FilterMessages GeometricImageFilter::getMessages(bool block)
// -------------------------------------------------------------------------------
// This method compiles all of the necessary messages into a struct, blocking as necessary
{
	// The struct of requested messages
	FilterMessages msgs;

	while(true)
	{
		// Request the first batch of messages
		bag_player::Next next;
		if(!this->nextClient.call(next))
		{
			continue;
		}

		// Messages initially invalid 
		msgs.valid = false;

		// Check for an image
		if(this->imageMsg.get() != NULL)
		{
			// Check that the image message is "current"
			ros::Time now = ros::Time::now();
			ros::Time imageStamp = this->imageMsg->header.stamp;
			ros::Duration imageDuration = now - imageStamp;
			if (imageDuration.toSec() < MESSAGE_BUFFER_DURATION_S)
			{
				// Store the image message
				msgs.imageMsg = this->imageMsg;	
				
				// Check for a camera info message
				if(this->cameraInfoMsg.get() != NULL)
				{
					ros::Time infoStamp = this->cameraInfoMsg->header.stamp;
					ros::Duration infoDuration = now - infoStamp;
					if (infoDuration.toSec() < MESSAGE_BUFFER_DURATION_S)
					{
						// Store the camera info message
						msgs.cameraInfoMsg = this->cameraInfoMsg;

						// Get the camera's orientation
						boost::shared_ptr<CameraPose> cameraPose = this->getCameraPose(this->baseFrame, msgs.cameraInfoMsg->header.frame_id);
						msgs.cameraPose = cameraPose;

						// Check for depth information
						if (cameraPose != NULL && this->depthMsg.get() != NULL)
						{
							ros::Time depthStamp = this->depthMsg->header.stamp;
							ros::Duration depthDuration = now - depthStamp;
							if (depthDuration.toSec() < MESSAGE_BUFFER_DURATION_S)
							{
								// Store the depth message
								msgs.depthMsg = this->depthMsg;
								msgs.valid = true;
							}
						}
					} else{
						ROS_INFO_COND(DEBUGGING, "Camera info message not current");
					}
				}
			}else{
				ROS_INFO_COND(DEBUGGING, "Image message not current");
			}
		}

		// Break if necessary
		if(!block || msgs.valid || !ros::ok())
		{
			break;  // All done
		}else
		{
			ros::spinOnce(); // Ensure ROS updates while we block
		}
	}

	return msgs;
}

// --------------------------------------------------------------------------------------------------------------------------
boost::shared_ptr<GeometricImageFilter::CameraPose> GeometricImageFilter::getCameraPose(string baseFrame, string cameraFrame)
// --------------------------------------------------------------------------------------------------------------------------
// Uses TF information to get the camera's current position (in the real world)
{
	boost::shared_ptr<CameraPose> cameraPose;

	try
	{
		// Get the transform from the camera --> base frame
		tf::StampedTransform cbTransform;
		this->transformListener->lookupTransform(baseFrame, cameraFrame, ros::Time(0), cbTransform);
		tf::Quaternion cbRotation= cbTransform.getRotation();
		tf::Vector3 cbTranslation = cbTransform.getOrigin();

		// Get the transform from the camera --> base frame
		tf::StampedTransform bcTransform;
		this->transformListener->lookupTransform(cameraFrame, baseFrame, ros::Time(0), bcTransform);
		bcTransform.setOrigin(noTranslation);	

		// Apply the base --> camera rotation to the canonical ground normal (in the world frame)
		tf::Vector3 groundNormalCamera = bcTransform * (this->groundNormalWorld);

		// Build the struct
		cameraPose = boost::shared_ptr<CameraPose>(new CameraPose);
		cameraPose->cameraOrigin = cbTranslation;
		cameraPose->groundNormal = groundNormalCamera;

	}catch(tf::TransformException ex)
	{
		// Just pass... this exception gets tripped when the node is starting up
	}

	return cameraPose;
}

// ------------------------------------------------------------------------------------------------------------------------------------------------------
int GeometricImageFilter::computeMinWindowSize(sensor_msgs::CameraInfo::ConstPtr cameraInfo, const double minAllowedSizeM, const double maxAllowedDepthM)
// ------------------------------------------------------------------------------------------------------------------------------------------------------
// Calculates the minimum window size based on physical paramaters
{
	// Create the camera model
	image_geometry::PinholeCameraModel cameraModel;
	cameraModel.fromCameraInfo(cameraInfo);

	// Create our two points
	cv::Point3d pt3A(0, 0, maxAllowedDepthM);
	cv::Point3d pt3B(minAllowedSizeM, 0, maxAllowedDepthM);

	// Project the two points into image space
	cv::Point2d pt2A, pt2B;
	cameraModel.project3dToPixel(pt3A, pt2A);
	cameraModel.project3dToPixel(pt3B, pt2B);
	
	// Return the X-distance between them as the minimum window size
	return (int)abs(pt2A.x - pt2B.x);
}

// -----------------------------------------------------------------------------------------------------------------------------------
void GeometricImageFilter::computeWindowSizes(map<int, double>& windowSizes, int minWindowSize, int maxWindowSize, double scaleFactor)
// -----------------------------------------------------------------------------------------------------------------------------------
// Computes all of the window sizes using the minimum/maximum window sizes & a scale factor
{
	// Initializations 
	double mws = (double)maxWindowSize;
	double trainedSize = (double)this->classifier.oldCascade->orig_window_size.height;

	for(double ws = (double)minWindowSize; ws < mws; ws *= scaleFactor)
	{
		// Compute the window size and the scale factor
		int w = (int)ws;
		double sf = ((int)ws / trainedSize);
		windowSizes[w] = sf;
	}
}

// --------------------------------------------------------------------------------------------
bool GeometricImageFilter::computeSizeConstraints(map<int, Interval>& dS, double fX,
												  double minWorldSize, double maxWorldSize, 
												  int minWindowSize, int maxWindowSize)
// --------------------------------------------------------------------------------------------
// Computes the size constraints used in performing the geometric filtering
{
	// Clear the current size constraints
	dS.clear();	

	// The focal length
	//double fX = messages.cameraInfoMsg->K[0];
	double sMinFx = fX * minWorldSize;
	double sMaxFx = fX * maxWorldSize;

	// Loop through the window sizes and compute the size constraint
	//for(map<int, double>::iterator it = this->windowSizes.begin(); it != this->windowSizes.end(); ++it)
	for(int windowSize = minWindowSize; windowSize < maxWindowSize; ++windowSize)
	{
		double scMin = (sMinFx / windowSize);
		double scMax = (sMaxFx / windowSize);
		Interval di(scMin, scMax);
		dS[windowSize] = di;
	}		
	return true;
}

// -----------------------------------------------------------------------------------------------------------
bool GeometricImageFilter::computeVProjection(vector<double>& out, const boost::array<double, 9> k, int height)
// -----------------------------------------------------------------------------------------------------------
// Computes a projection used in calculating the geometric constraints
{
	// Clear the out vector
	out.clear();

	// Reshape and invert K
	btMatrix3x3 kMat(k[0], k[1], k[2], k[3], k[4], k[5], k[6], k[7], k[8]);
	btMatrix3x3 kMatInv = kMat.inverse();
	
	// Compute the projection for each v
	for(int v = 0; v < height; ++v)
	{
		double rYVal = ((kMatInv[1][1] * v) + kMatInv[1][2]) / ((kMatInv[2][1] * v) + kMatInv[2][2]);
		out.push_back(rYVal);
	}

	return true;
}

// ------------------------------------------------------------------------------------------------------------------------------------------------------------
bool GeometricImageFilter::computeHeightConstraints(map<int, Interval>& dH, vector<double>& rY, FilterMessages& messages, double minHeightM, double maxHeightM)
// ------------------------------------------------------------------------------------------------------------------------------------------------------------
// Computes the height constraints used in the filtering
{
	// Clear the current height constraints
	dH.clear();

	// Constants
	double cameraHeightM = messages.cameraPose->getHeight();
	double hMinDiff = (minHeightM - cameraHeightM);
	double hMaxDiff = (maxHeightM - cameraHeightM);
	
	// Pull out a reference to the ground plane's normal vector
	tf::Vector3& n = messages.cameraPose->groundNormal;
	
	// Compute the depth constraints
	int height = messages.cameraInfoMsg->height;
	for(int v = 0; v < height; ++v)
	{
		double nr = (n[1] * rY[v]) + n[2];
		double dHMin = (hMinDiff / nr);
		double dHMax = (hMaxDiff / nr);
	
		// Create the depth interval
		Interval di(min(dHMin, dHMax), max(dHMin, dHMax));
		dH[v] = di;
	}

	return true;
}

// ----------------------------------------------------------------------------------------
bool GeometricImageFilter::filterDepthConstraint(Interval& depthConstraint, double maxDepth)
// ----------------------------------------------------------------------------------------
// Method for determining whether or not to include or filter out an interval
{
	// Filter if any of the following are true:
	// 1.) High point is negative
	// 2.) Low point is greater than the max depth
	// 3.) High point is less than the low point (the interval is empty)
	return (depthConstraint.getHighPoint() < 0 || depthConstraint.getLowPoint() > maxDepth || depthConstraint.isEmpty());
}

// ----------------------------------------------------------------------------------------------------
bool GeometricImageFilter::filterDepthConstraints(map<int, Interval>& depthConstraints, double maxDepth)
// ----------------------------------------------------------------------------------------------------
// Apply the fitler to all of the depth constraints
{
	// Loop through the elements of the map
	for(map<int, Interval>::iterator it = depthConstraints.begin(); it != depthConstraints.end(); ++it)
	{
		if(filterDepthConstraint(it->second, maxDepth))
		{
			depthConstraints.erase(it);
		}
	}
	return true;
}


// -------------------------------------------------------------------------------------------------------------------------------------------------
bool GeometricImageFilter::combineConstraints(map<pair<int,int>, Interval>& dC, map<int, Interval>& dS, map<int, Interval>& dH, unsigned int height)
// -------------------------------------------------------------------------------------------------------------------------------------------------
// Combines the size/height constraints
{
	// Clear the combined constraints
	dC.clear();

	// Loop through the image rows
	for(map<int, Interval>::iterator vIt = dH.begin(); vIt != dH.end(); ++vIt)
	{
		// Loop throug the window sizes
		for(map<int, Interval>::iterator wIt = dS.begin(); wIt != dS.end(); ++wIt)
		{
			// Compute the intersection of the ranges
			Interval di = vIt->second.intersect(wIt->second);
		
			// Make sure the range is valid	
			if(!this->filterDepthConstraint(di, this->maxDepthM))
			{
				// Add the range to the mask
				pair<int,int> vw(vIt->first, wIt->first);
				dC[vw] = di;
			}
		}
	}
	
	return true;
}

// ---------------------------------------------------------------------------------------
void GeometricImageFilter::validateDetections(vector<TD_DetectionPair>& objects, 
											  TD_PointCloud& ptCloud, 
											  map<pair<int,int>, Interval>& dC, 
											  vector<TD_DetectionPair>* falsePositivesOut)
// ---------------------------------------------------------------------------------------
{
	// Loop through & double check each of the detections
	for(vector<TD_DetectionPair>::iterator it = objects.begin(); it != objects.end(); /* NOTE: not incrementing */ )
	{
		// Pull out some numbers
		int x = it->first.x;
		int y = it->first.y;
		int w = it->first.width; 
		int h = it->first.height; 

		// First calculate the median depth in the region
		vector<double> depths; 
		for(int v = y + (h/4); v < y + (h - (h / 4)); ++v)
		{
			for(int u = x + (w / 4); u < x + (w - (w/4)); ++u)
			{
				pcl::PointXYZRGB pt = ptCloud(u,v);
				depths.push_back(pt.z);
			}
		}
		sort(depths.begin(), depths.end());
		double medianDepth = *(depths.begin() + (depths.size() / 2));

		// Calculate the VW pair at the center of the rectangle and get the interval
		pair<int, int> vw(y + (h / 2), w);
		map<pair<int,int>, Interval>::iterator vwIt = dC.find(vw);

		// If the median depth is NaN or does not fall in the interval then eliminate the detection
		if(isnan(medianDepth) || vwIt == dC.end() || !(vwIt->second.contains(medianDepth)))
		{
			// If we're collecting false positives then add this one to the output vector
			if(falsePositivesOut != NULL)
			{
				falsePositivesOut->push_back(*it);
			}
			
			// Optional debugging information
			if(DEBUGGING)
			{
				ROS_INFO(" + Eliminating false positive: [x = %i, y = %i, w = %i, h = %i], d = %f, ", x, y, w, h, medianDepth);
				if(isnan(medianDepth)) ROS_INFO("   - Median depth is NaN");
				else if(vwIt == dC.end()) ROS_INFO("   - No v,w constraint (v = %i , w = %i)", vw.first, vw.second);
				else ROS_INFO("   - Outside of geometric constraint interval: [%f, %f]", vwIt->second.getLowPoint(), vwIt->second.getHighPoint());
			}

			
			// Actually remove the rectangle from the vector
			it = objects.erase(it);
		}else
		{
			ROS_INFO_COND(DEBUGGING, " + Face confirmed: [x = %i, y = %i, w = %i, h = %i], d = %f, [%f, %f]", x, y, w, h, medianDepth, 
						  vwIt->second.getLowPoint(), vwIt->second.getHighPoint());
			++it;
		}
	}
}

// ---------------------------------------------------------------------
void GeometricImageFilter::publishDetections(vector<TD_DetectionPair>& objects, 
											 TD_PointCloud& ptCloud, 
											 string& cameraFrame, 
											 ros::Time t)
// ---------------------------------------------------------------------
{	
	// Create the message
	people_msgs::PositionMeasurement msg;
	msg.header.stamp = t;
	msg.header.frame_id = cameraFrame;

	// Loop through the detections
	for(vector<TD_DetectionPair>::iterator it = objects.begin(); it != objects.end(); ++it)
	{
		// Get the depth 
		int u = (int)(it->first.x + (it->first.width/2.0));
		int v = (int)(it->first.y + (it->first.height/2.0));
		pcl::PointXYZRGB pt = ptCloud(u, v);

		// Create a stamped point
		msg.pos.x = pt.x;
		msg.pos.y = pt.y;
		msg.pos.z = pt.z;

		// TODO: There are still some fields that need to be filled in:
		// ------------------------------------------------------------
		// + Header/sequence
		// + Reliability
		// + Name 
		// + Object id
		// ...
		this->positionPublisher.publish(msg);
	}
}

// ---------------------------------------------------------------------
void GeometricImageFilter::publishAnnotations(vector<TD_DetectionPair>& objects, 
											 TD_PointCloud& ptCloud, 
											 string& cameraFrame, 
											 ros::Time t)
// ---------------------------------------------------------------------
{	
	// Create the message
	annotations::AnnotationArray msg;
	msg.header.stamp = t;
	msg.header.frame_id = cameraFrame;

	// Loop through the detections
	for(vector<TD_DetectionPair>::iterator it = objects.begin(); it != objects.end(); ++it)
	{
		// Create an annotation
		annotations::Annotation a;
		a.header = msg.header;
		a.x = it->first.x;
		a.y = it->first.y;
		a.width = it->first.width;
		a.height = it->first.height;

		// Add the annotation
		msg.annotations.push_back(a);
		
		// TODO: There are still some fields that need to be filled in:
		// ------------------------------------------------------------
		// + Header/sequence
		// + Reliability
		// + Name 
		// + Object id
		// ...
	}

	// Publish the annotation array
	this->annotationPublisher.publish(msg);
}


// -----------------------------------------------------------------------
void GeometricImageFilter::objectDetection(vector<TD_DetectionPair>& objects,
						   cv::Mat& img, 
						   string& cameraFrame,
						   ros::Time cameraTime,
						   TD_PointCloud& ptCloud,
						   TD_TrackletVector& tracklets,
						   map<pair<int,int>, Interval>& dC,
						   int groupThreshold,
						   double eps)
// -----------------------------------------------------------------------
// Perform the actual object detection using the image, the depth information, and the physical constraints
{
	// OpenCV Declarations
	CvHaarClassifierCascade* cascade = this->classifier.oldCascade;
			
	// Calculate the integral image
	CvMat cImage = img;
	cvIntegral(&cImage, this->sum, this->sqSum, this->tiltedSum);

	// Setup the per-tracklet object vectors (for tracklet updating)
	// -------------------------------------------------------------
	map<TD_TrackletVector::iterator, vector<cv::Rect> > objectRects;
	map<TD_TrackletVector::iterator, vector<cv::Point> > objectCenters;
	map<TD_TrackletVector::iterator, cv::Rect> roi;
	for(TD_TrackletVector::iterator trackIt = tracklets.begin(); trackIt != tracklets.end(); ++trackIt)
	{
		// Create the object vector
		vector<cv::Rect> objectRectVector;
		objectRects[trackIt] = objectRectVector;

		// Object center vector
		vector<cv::Point> objectCenterVector;
		objectCenters[trackIt] = objectCenterVector;

		// Get & store the region of interest
		roi[trackIt] = (*trackIt)->getROI(cameraFrame, cameraTime); 
	}

	// Go ahead and allocate some frequently used variables:
	// ----------------------------------------------------
	CvPoint centerPoint;
	pcl::PointXYZRGB ptCloudPoint;
	int roiX, roiY, roiW, roiH;
	pair<int, int> vw;
	map<pair<int,int>, Interval>::iterator vwIt;
	cv::Rect objRect;

	// Loop through the window sizes
	for(map<int, double>::iterator it = this->windowSizes.begin(); it != this->windowSizes.end(); ++it)
	{
		// If we're generating false color images, start the image
#ifdef FALSE_COLOR
			cv::Mat falseColorImage;
			if(this->generateFalseColorImages)
			{
				falseColorImage = cv::Mat::zeros(img.rows, img.cols, CV_8UC3);
			}
#endif

		// Window size & scale
		int w = it->first; 
		int halfW = w/2;
		double wScale = it->second;

		// Set the scale in the HaarClassifierCascade
		cvSetImagesForHaarClassifierCascade(cascade, sum, sqSum, tiltedSum, wScale);

		// Loop through the tracklets
		for(TD_TrackletVector::iterator trackIt = tracklets.begin(); trackIt != tracklets.end(); ++trackIt)
		{
			// Alias the tracklet ROI components
			cv::Rect& trackItROI = roi[trackIt];
			roiX = (int)trackItROI.x;
			roiY = (int)trackItROI.y;
			roiW = (int)trackItROI.width;
			roiH = (int)trackItROI.height;

			// Loop through every possible window center vertical component
			for(int v = roiY + halfW; v < (roiY + roiH) - halfW; ++v)
			{
				// Lookup the combined depth constraint at v,w
				vw.first = v;
				vw.second = w;
				vwIt = dC.find(vw);
				
				// Make sure we HAVE a combined depth constraint at v,w... otherwise the entire row can be safely eliminated
				if(vwIt != dC.end())
				{
					// Alias the depth interval
					Interval& vwDI = vwIt->second;

					// Loop through every possible window center horizontal component
					for(int u = roiX + halfW; u < (roiX + roiW) - halfW; ++u)
					{
						// Get the depth at u,v
						ptCloudPoint = ptCloud(u,v);

						// Make sure we have depth data
						if(!isnan(ptCloudPoint.z))
						{
							// Check to see if the depth is in the range defined by the mask & the tracklet
							if(vwDI.contains(ptCloudPoint.z) && (*trackIt)->contains(ptCloudPoint))
							{
								// Construct the point at which we're going to run the detector
								centerPoint.x = u - halfW;
								centerPoint.y = v - halfW;

#ifdef FALSE_COLOR
								// Ran the detector -- white
								cv::rectangle(falseColorImage, cv::Point(u,v), cv::Point(u,v), cv::Scalar(255,255,255));
#endif

								// Actually run the detector
								if(cvRunHaarClassifierCascade(cascade, centerPoint) > 0)
								{
#ifdef FALSE_COLOR
									// Indicate detection
									cv::rectangle(falseColorImage, cv::Point(u,v), cv::Point(u,v), cv::Scalar(0,255,255));
#endif

									// Then we have a detection of the object... store the rectangle
									objRect.x = centerPoint.x;
									objRect.y = centerPoint.y;
									objRect.width = w;
									objRect.height = w;
									objectRects[trackIt].push_back(objRect);
			
									// Also store the center point
									objectCenters[trackIt].push_back(this->makePoint(u, v));
								}
							}
#ifdef FALSE_COLOR
							else if(this->generateFalseColorImages){
								// Wrong scale -- red
								cv::rectangle(falseColorImage, cv::Point(u,v), cv::Point(u,v), cv::Scalar(0,0,255));
							}	
#endif
						}
#ifdef FALSE_COLOR
						else if(this->generateFalseColorImages){
							// No depth -- green
							cv::rectangle(falseColorImage, cv::Point(u,v), cv::Point(u,v), cv::Scalar(0,255,0));
						}
#endif
					}
				}
#ifdef FALSE_COLOR
				else if(this->generateFalseColorImages){
					// Outside of physical paramaters -- blue
					cv::rectangle(falseColorImage, cv::Point(halfW,v), cv::Point(img.cols - halfW, v), cv::Scalar(255,0,0));
				}
#endif
			}

		}

#ifdef FALSE_COLOR
		// Write out the false color image
		if(this->generateFalseColorImages)
		{
			stringstream ss; 
			ss << "/home/walker/Projects/rache/trunk/geometric_image_filter/data/false_color/false_color_" << cameraTime.sec << "w" << w << ".png";
			string outPath = ss.str();
			cv::imwrite(outPath.c_str(), falseColorImage);
		}
#endif
	}

	// Group rectangles & update tracklets
	for(TD_TrackletVector::iterator trackIt = tracklets.begin(); trackIt != tracklets.end(); /* NOTE: Not moving the iterator here */ )
	{
		// Alias the per-tracklet vector
		vector<cv::Rect>& trackletRectVector = objectRects[trackIt];
		int size = trackletRectVector.size();

		// Group the rectangles for this particular tracklet
		cv::groupRectangles(trackletRectVector, groupThreshold, eps);

		// Copy the results into the output vector
		for(vector<cv::Rect>::iterator it = trackletRectVector.begin(); it != trackletRectVector.end(); ++it)
		{
			TD_DetectionPair tdp(*it, *trackIt);
			objects.push_back(tdp);
		}

		// Update the tracklet
		if(size > 0)
		{
			geometry_msgs::PointStamped ptStamped;
			ptStamped.header.stamp = cameraTime;
			ptStamped.header.frame_id = cameraFrame;
			ptStamped.point = this->centersToPoint(objectCenters[trackIt], ptCloud);

			(*trackIt)->update(ptStamped);
			++trackIt;
		}else{
			(*trackIt)->update();
			if((*trackIt)->killTracklet())
			{
				ROS_INFO("Kill tracklet!");
				trackIt = tracklets.erase(trackIt);
			}else
			{
				++trackIt;
			}
		}
	}
}

// ------------------------------------------------------------
GeometricImageFilter::GeometricImageFilter(ros::NodeHandle& nh)
// ------------------------------------------------------------
// Constructor for the filter
{
	// Save the node handle 
    this->nodeHandle = nh;

	// Get the paramaters
	nh.param("debugging", DEBUGGING, DEBUGGING); // TODO: This should be an instance variable
	nh.param("timing", TIMING, TIMING); // TODO: This should be an intstance variable too
	nh.param("min_height_m", this->minHeightM, 1.35);
    nh.param("max_height_m", this->maxHeightM, 1.70);
    nh.param("min_size_m", this->minSizeM, 0.05);
    nh.param("max_size_m", this->maxSizeM, 0.17);
	nh.param("jitter_tolerance_m", this->jitterToleranceM, 0.025);
    nh.param("max_depth_m", this->maxDepthM, 8.0);
	nh.param("max_movement_m", this->maxMovementM, 0.20);
    nh.param("window_scale_factor", this->windowScaleFactor, 1.2);
	nh.param("keyframe_rate", this->keyframeRate, 20);
	nh.param("save_local", this->saveLocal, false);
	nh.param("display_local", this->displayLocal, false);
	nh.param("base_frame", this->baseFrame, string("/base_footprint"));
	nh.param("classifier_filename", this->cascadePath, string("data/haarcascade_frontalface_alt.xml"));
	nh.param("publish_annotations", this->doPublishAnnotations, true);

	// Other initializations
	this->generateFalseColorImages = true;
	this->transformListener = new tf::TransformListener(nh, ros::Duration(5.0), true);
	this->framesSinceLastDetection = this->keyframeRate;
	this->trackletId = 1;

	// Subscribe to the necessary topics
	this->imageSubscriber = this->nodeHandle.subscribe(IMAGE_TOPIC, 3, &GeometricImageFilter::imageCallback, this);
	this->cameraInfoSubscriber = this->nodeHandle.subscribe(CAMERA_INFO_TOPIC, 3, &GeometricImageFilter::cameraInfoCallback, this);
	this->depthSubscriber = this->nodeHandle.subscribe(STEREO_TOPIC, 3, &GeometricImageFilter::depthCallback, this);

	// Advertise our published topics
	this->positionPublisher = nh.advertise<people_msgs::PositionMeasurement>(POSITION_MEASUREMENT_TOPIC, 10);
	if(this->doPublishAnnotations)
	{
		this->annotationPublisher = nh.advertise<annotations::AnnotationArray>(ANNOTATION_ARRAY_TOPIC, 10);
	}

	// Load the object detector cascade
	this->classifier.load(this->cascadePath.c_str());

	// If we're displaying the results locally, setup the OpenCV window
	if(this->displayLocal)
	{
		// Open the window
		cv::namedWindow(WINDOW_NAME, CV_WINDOW_AUTOSIZE);
		cvWaitKey(10);
	}

	// Load the annotation font
	if(this->displayLocal || this->saveLocal)
	{
		cvInitFont(&font, CV_FONT_HERSHEY_SIMPLEX, 0.5, 0.5);
	}

	// Bag services
	this->nextClient = nh.serviceClient<bag_player::Next>("/bag_player_next");
}

// ------------------------------------------
GeometricImageFilter::~GeometricImageFilter()
// ------------------------------------------
// Filter destructor
{
	// Cleanup the transform listener
	delete this->transformListener;

	// Close the display window (if necesary)
	if(this->displayLocal)
	{
		cvDestroyWindow(WINDOW_NAME);
	}
}

// ------------------------------------------------------------------------------------------------------------------
bool GeometricImageFilter::updateGeometricConstraints(FilterMessages& messages, int minWindowSize, int maxWindowSize)
// ------------------------------------------------------------------------------------------------------------------
{
	// Keep track of whether or not we need to update the filter
	bool updateGeometricFilter = false;

	// Update size constraints as necessary
	// ------------------------------------
	if(this->prevCameraInfoMsg.get() == NULL || this->prevCameraInfoMsg->K != messages.cameraInfoMsg->K)
	{
		ROS_INFO_COND(DEBUGGING, " + Computing the range of feasible depths given image window size and feasible object sizes");
		ROS_INFO_COND(DEBUGGING, "   - Minimum feasible object size: %fm", this->minSizeM);
		ROS_INFO_COND(DEBUGGING, "   - Maximum feasible object size: %fm", this->maxSizeM);
	   
		// Compute the size constraints
		this->computeSizeConstraints(this->dS, messages.cameraInfoMsg->K[0], this->minSizeM, this->maxSizeM, minWindowSize, maxWindowSize);
		this->filterDepthConstraints(this->dS, this->maxDepthM);

		// Update the projection of v into the real world
		this->computeVProjection(this->rY, messages.cameraInfoMsg->K, messages.cameraInfoMsg->height);

		// Update the previous camera info message to reflect the new one
		this->prevCameraInfoMsg = messages.cameraInfoMsg;
		updateGeometricFilter = true;
	}

	// Update height constraints as necessary
	// --------------------------------------
	if(!this->prevCameraPose || abs(this->prevCameraPose->getHeight() - messages.cameraPose->getHeight()) > this->jitterToleranceM)
	{
		// Optional debugging info
		ROS_INFO_COND(DEBUGGING, " + Computing the range of feasible depths given image height and feasible object height");
		ROS_INFO_COND(DEBUGGING, "   - Minimum feasible object height: %fm", this->minHeightM); 
		ROS_INFO_COND(DEBUGGING, "   - Maximum feasible object height: %fm", this->maxHeightM); 
		ROS_INFO_COND(DEBUGGING, "   - Jitter tolerance: %fm", this->jitterToleranceM); 
		ROS_INFO_COND(DEBUGGING, "   - Display results locally: %s", this->displayLocal ? "Yes" : "No"); 
		
		// Compute the height constraints
		this->computeHeightConstraints(this->dH, this->rY, messages, this->minHeightM, this->maxHeightM);
		this->filterDepthConstraints(this->dH, this->maxDepthM);
		
		// Update the previous camera pose
		this->prevCameraPose = messages.cameraPose;				
		updateGeometricFilter = true;
	}

	// Update the combined geometric filter if necessary
	// -------------------------------------------------
	if(updateGeometricFilter)
	{
		ROS_INFO_COND(DEBUGGING, " + Combining the size and height depth constraints");
		this->combineConstraints(this->dC, this->dS, this->dH, messages.cameraInfoMsg->height);
	}

	return updateGeometricFilter;
}

// -----------------------------
void GeometricImageFilter::run()
// -----------------------------
// The main control loop for a filter object
{
    // If the classifier hasn't loaded, you can't run the node
    if(this->classifier.empty())
    {
        ROS_ERROR("Unable to load cascade \"%s\"", this->cascadePath.c_str());
        return;
    }

	// Preprocessing
	// -------------
    // Status info
	ROS_INFO_COND(DEBUGGING, " G E O M E T R I C   I M A G E   F I L T E R ");
	ROS_INFO_COND(DEBUGGING, " --------------------------------------------");
	ROS_INFO_COND(DEBUGGING, " + Cascade: %s", this->cascadePath.c_str());
	ROS_INFO_COND(DEBUGGING, " + Blocking for camera info message...");

	// Block for the first set of messages
	FilterMessages messages = this->getMessages(true);	// NOTE: This will block until messages are recieved
	
	// Compute the range of window sizes
	int minWindowSizePx = computeMinWindowSize(messages.cameraInfoMsg, this->minSizeM, this->maxDepthM);
	int maxWindowSizePx = min(messages.cameraInfoMsg->width, messages.cameraInfoMsg->height);
	this->computeWindowSizes(this->windowSizes, minWindowSizePx, maxWindowSizePx, this->windowScaleFactor);
	if(DEBUGGING) this->printWindowSizeInfo(this->windowSizes);	

	// Other initializations
	this->cameraFrame = messages.imageMsg->header.frame_id;
	int frameNum = 0; // TODO: Remove this
	vector<TD_DetectionPair> falsePositives;
	TD_TrackletVector newTracklets;

    // Initialize the variables used in calculating integral images (so we don't have to do it again & again)
    int rows = messages.imageMsg->height;
    int cols = messages.imageMsg->width;
    this->sum = cvCreateMat( rows + 1, cols + 1, CV_32SC1 );
    this->sqSum = cvCreateMat( rows + 1, cols + 1, CV_64FC1 );
    this->tiltedSum = cvCreateMat( rows + 1, cols + 1, CV_32SC1 );

	// Initialize the default tracklet collection
	boost::shared_ptr<Tracklet> defaultTracklet(new StaticTracklet(0, cols, rows));
	this->defaultTracklets.push_back(defaultTracklet);
	
	// Main control loop
	// -----------------
	while(ros::ok())
	{

		// Collect messages:
		// ----------------
		FilterMessages messages = this->getMessages(true); // (blocking)
	
		// Make sure we have the necessary  messages	
		if(messages.valid)
		{
			// Optional debugging/timing information
			START();

			// Update the geometric constraints:
			// --------------------------------
			this->updateGeometricConstraints(messages, minWindowSizePx, maxWindowSizePx);

			try
			{
				// Convert the image from a ROS message to OpenCV's format
				// -------------------------------------------------------
				// Do the conversion & keep a color copy of the image if we're going to display it later
				cv::Mat img(this->cvBridge.imgMsgToCv(messages.imageMsg, "bgr8"));
				cv::Mat forDisplay;
				if(this->displayLocal || this->saveLocal) 
				{
					forDisplay =  img.clone();
				}

				// Make sure the image is grayscale!
				if (img.channels() > 1)
				{
					cv::Mat temp;
					cv::cvtColor(img, temp, CV_BGR2GRAY);
					img = temp;
				}

				// Convert the sensor_msgs::PointCloud2 to a pcl::PointCloud2<PointXYZRGB> object
				pcl::PointCloud<pcl::PointXYZRGB> ptCloud;
				pcl::fromROSMsg(*(messages.depthMsg), ptCloud);
				
				// Full Detection or tracking?
				this->objectRects.clear();
				falsePositives.clear();
				if(this->framesSinceLastDetection >= keyframeRate || this->tracklets.size() == 0)
				{
					// Run the *full* object detection:
					// -----------------------------
					objectDetection(this->objectRects, img, cameraFrame, messages.imageMsg->header.stamp, ptCloud, this->defaultTracklets, this->dC, GROUP_THRESHOLD, EPS);
					this->validateDetections(objectRects, ptCloud, dC, &falsePositives);

					// Initialize the tracklets
					// ------------------------
					if(objectRects.size() > 0)
					{
						newTracklets.clear();
						image_geometry::PinholeCameraModel cameraModel;
						cameraModel.fromCameraInfo(messages.cameraInfoMsg);
						for(vector<TD_DetectionPair>::iterator it = this->objectRects.begin(); it != this->objectRects.end(); ++it)
						{
							ROS_INFO("Tracklet Init_1");						

							// Get the depth 
							int u = (int)(it->first.x + (it->first.width/2.0));
							int v = (int)(it->first.y + (it->first.height/2.0));
							pcl::PointXYZRGB pt = ptCloud(u, v);

							ROS_INFO("Tracklet Init_2");						

							// Create a stamped point
							geometry_msgs::PointStamped ptStamped;
							ptStamped.header = messages.imageMsg->header; // Keep the same header as our image
							ptStamped.point.x = pt.x;
							ptStamped.point.y = pt.y;
							ptStamped.point.z = pt.z;

							ROS_INFO("Tracklet Init_3");

							// Does this detection match an existing tracklet?
							vector<pair<double, TD_TrackletVector::iterator> > distances;
							for(TD_TrackletVector::iterator trackIt = this->tracklets.begin(); trackIt != this->tracklets.end(); ++trackIt)
							{
								double distance = (*trackIt)->dist(pt);	
								if(distance <= this->maxMovementM) 
								{
									distances.push_back(pair<double, TD_TrackletVector::iterator>(distance, trackIt));
								}
							}

							ROS_INFO("Tracklet Init_4");

							// Matches an existing tracklet!
							if(distances.size() != 0)
							{
								ROS_INFO("Tracklet Init_4.1");

								vector<pair<double, TD_TrackletVector::iterator> >::iterator closest = min_element(distances.begin(), distances.end());
								newTracklets.push_back(*(closest->second));
								it->second = (*(closest->second));
								ROS_INFO_COND(DEBUGGING, "Existing tracklet (#%i) matched!", (*closest->second)->getId());
							}else
							{
								ROS_INFO("Tracklet Init_4.2");

								// Create a new tracklet
								boost::shared_ptr<Tracklet> t(new DynamicTracklet(++trackletId, ptStamped, this->maxMovementM, cameraModel, this->transformListener));
								newTracklets.push_back(t);
								it->second = t;
							}
						
							ROS_INFO("Tracklet Init_5");
						}

						// Replace the old tracklets
						this->tracklets.clear();
						this->tracklets.insert(this->tracklets.begin(), newTracklets.begin(), newTracklets.end());
					}
					
					// Update the counter
					this->framesSinceLastDetection = (this->objectRects.size() > 0) ? 0 : this->framesSinceLastDetection + 1;
				}else{

					// Run tracking:
					// ------------
					// Optionally draw the tracking rectangles
					if(this->displayLocal) this->drawTracklets(this->tracklets, messages.imageMsg->header.stamp, forDisplay, cv::Scalar(0,0,255), RECT_THICKNESS_PX);

					ROS_INFO_COND(DEBUGGING, " + Tracking: %i", this->framesSinceLastDetection);
					this->framesSinceLastDetection++;

					objectDetection(this->objectRects, img, cameraFrame, messages.imageMsg->header.stamp, ptCloud, this->tracklets, this->dC, GROUP_THRESHOLD, EPS);
					this->validateDetections(objectRects, ptCloud, dC, &falsePositives);
				}

				// Publish the results
				// -------------------
				ROS_INFO("Publishing");
				publishDetections(objectRects, ptCloud, cameraFrame, messages.imageMsg->header.stamp);
				if(this->doPublishAnnotations)
				{
					publishAnnotations(objectRects, ptCloud, cameraFrame, messages.imageMsg->header.stamp);
				}

				// Optional debugging/timing information
				// -------------------------------------
				if(DEBUGGING) this->printObjectRects(objectRects);
				if(TIMING){
                    int seq = messages.imageMsg->header.seq;
                    int faces = this->objectRects.size();
					STOP(seq, faces);
				}

				// Optionally visually display the results 
				// ---------------------------------------
				if(this->displayLocal || this->saveLocal)
				{
					// Draw the detection boxes
					this->drawRectangles(objectRects, forDisplay, cv::Scalar(0,255,0), RECT_THICKNESS_PX, true);

					// Draw the false positives
					this->drawRectangles(falsePositives, forDisplay, cv::Scalar(255,0,0), RECT_THICKNESS_PX);
					
					// Show the (modified) image
					if(this->displayLocal)
					{
						cv::imshow(WINDOW_NAME, forDisplay);
						cvWaitKey(10); // This seems to be necessary
					} 
					
					// Save the (modified) image
					if(this->saveLocal)
					{
						char buffer[200]; // TODO: This is a terrible way to do this
						sprintf(buffer, "/home/walker/Projects/rache/trunk/geometric_image_filter/data/frames/frame-%i.png", frameNum++); // TODO: This shouldn't be so hard coded
						cv::imwrite(buffer, forDisplay);
					}
				}
			}catch(sensor_msgs::CvBridgeException error)
			{
				ROS_ERROR("CV Bridge Exception");
			}
		}

		// Update ROS
		ros::spinOnce();
	}
}

// ----------------------------
int main(int argc, char** argv)
// ----------------------------
{
	// Initialize our ROS node
	ros::init(argc, argv, NODE_NAME);
    ros::NodeHandle nodeHandle(NAMESPACE);

    // Start the filter node
	GeometricImageFilter geometricImageFilter(nodeHandle);
	geometricImageFilter.run();
}
