// Includes:
// --------
#include "geometric_image_filter/geometric_image_filter.h"

// Initialize static variables:
// ---------------------------
const tf::Vector3 GeometricImageFilter::groundNormalWorld(0, 0, 1);
const tf::Vector3 GeometricImageFilter::noTranslation(0, 0, 0);

// ---------
// Callbacks|
// ------------------------------------------------------------------------------
void GeometricImageFilter::imageCallback(const sensor_msgs::Image::ConstPtr& msg)
// ------------------------------------------------------------------------------
{   
	this->imageMsg = msg;
}

// ---------------------------------------------------------------------------------------
void GeometricImageFilter::cameraInfoCallback(const sensor_msgs::CameraInfo::ConstPtr& msg)
// ---------------------------------------------------------------------------------------
{
	this->cameraInfoMsg = msg;
}

// -----------------------------------------------------------------------------------
void GeometricImageFilter::stereoCallback(const sensor_msgs::PointCloud2::ConstPtr& msg)
// -----------------------------------------------------------------------------------
{
	this->stereo = msg;
}

// ----
// Core|
// ---------------------------------------------------------------------
GeometricImageFilter::FilterMessages GeometricImageFilter::getMessages()
// ---------------------------------------------------------------------
{
	// The struct of requested messages
	FilterMessages msgs;
	msgs.valid = false;

	// Check for an image
	if(this->imageMsg.get() != NULL)
	{
		// Check that the image message is "current"
		ros::Time now = ros::Time::now();
		ros::Time imageStamp = this->imageMsg->header.stamp;
		ros::Duration imageDuration = now - imageStamp;
		if (imageDuration.toSec() < MESSAGE_BUFFER_DURATION_S)
		{
			// Store the image message
			msgs.imageMsg = this->imageMsg;	
			
			// Check for a camera info message
			if(this->cameraInfoMsg.get() != NULL)
			{
				ros::Time infoStamp = this->cameraInfoMsg->header.stamp;
				ros::Duration infoDuration = now - infoStamp;
				if (infoDuration.toSec() < MESSAGE_BUFFER_DURATION_S)
				{
					// Store the camera info message
					msgs.cameraInfoMsg = this->cameraInfoMsg;

					// Get the camera's orientation
					CameraPose* cameraPose = this->getCameraPose(BASE_FRAME, msgs.cameraInfoMsg->header.frame_id);
					msgs.cameraPose = cameraPose;

					// If we have both of these and a valid camera pose, then we have enough information to continue
					msgs.valid = (cameraPose != NULL);

					// Check for stereo data (which is not strictly neccessary)
					if (this->stereo.get() != NULL)
					{
						ros::Time stereoStamp = this->stereo->header.stamp;
						ros::Duration stereoDuration = now - stereoStamp;
						if (stereoDuration.toSec() < MESSAGE_BUFFER_DURATION_S)
						{
							// Store the stereo message
							msgs.stereo = this->stereo;
						}
					}
				} else{
					ROS_INFO_COND(DEBUGGING, "Camera info message not current");
				}
			}
		}else{
			ROS_INFO_COND(DEBUGGING, "Image message not current");
		}
	}

	return msgs;
}

// --------------------------------------------------------------------------------------------------------
GeometricImageFilter::CameraPose* GeometricImageFilter::getCameraPose(string baseFrame, string cameraFrame)
// --------------------------------------------------------------------------------------------------------
{
	CameraPose* cameraPose = NULL;

	try
	{
		// Allocate the camera pose on the heap
		cameraPose = new CameraPose;

		// Get the transform from the bamera --> base frame
		tf::StampedTransform cbTransform;
		this->transformListener.lookupTransform(baseFrame, cameraFrame, ros::Time(0), cbTransform);
		tf::Quaternion cbRotation= cbTransform.getRotation();
		tf::Vector3 cbTranslation = cbTransform.getOrigin();

		// Get the transform from the camera --> base frame
		tf::StampedTransform bcTransform;
		this->transformListener.lookupTransform(cameraFrame, baseFrame, ros::Time(0), bcTransform);
		bcTransform.setOrigin(noTranslation);	

		// Apply the base --> camera rotation to the canonical ground normal (in the world frame)
		tf::Vector3 groundNormalCamera = bcTransform * (this->groundNormalWorld);

		// Build the struct
		cameraPose->cameraOrigin = cbTranslation;
		cameraPose->groundNormal = groundNormalCamera;

	}catch(tf::TransformException ex)
	{
		// Just pass... this exception gets tripped when the node is starting up
		if(cameraPose != NULL) delete cameraPose;
		cameraPose = NULL;
	}

	return cameraPose;
}

// -----------------------------------------------------------------------------------------------------------------------------------
int GeometricImageFilter::getMinWindowSize(const boost::array<double, 9> k, const float minAllowedSizeM, const float maxAllowedDepthM)
// -----------------------------------------------------------------------------------------------------------------------------------
{
	// Reshape the K array into a btMatrix3x3 so that it can be used
	btMatrix3x3 kMat(k[0], k[1], k[2], k[3], k[4], k[5], k[6], k[7], k[8]);

	// Create our two points
	btVector3 pt3A(0,0,maxAllowedDepthM);
	btVector3 pt3B(minAllowedSizeM, 0, maxAllowedDepthM);

	// Project the two points into image space
	btVector3 pt2A = kMat * pt3A;
	btVector3 pt2B = kMat * pt3B;

	// Calculate the distance between the two points
	return (int)(abs(pt2A.x() - pt2B.x()));
}

// -----------------------------------------------------------------------------------------------------------------------------------
vector<int> GeometricImageFilter::getWindowSizes(int minWindowSize, int maxWindowSize, float scaleFactor, vector<float>* windowScales)
// -----------------------------------------------------------------------------------------------------------------------------------
{
	// Initializations 
	vector<int> windowSizes;
	float mws = (float)maxWindowSize;
	float trainedSize = (float)this->classifier.oldCascade->orig_window_size.height;
	if(windowScales != NULL)
	{
		windowScales->clear();
	}

	for(float ws = (float)minWindowSize; ws < mws; ws *= scaleFactor)
	{
		// Store the actual window size
		windowSizes.push_back((int)ws);

		// Calculate the scale factor
		if (windowScales != NULL)
		{
			float sf = ((int)ws / trainedSize);
			windowScales->push_back(sf);
		}
	}
	return windowSizes;
}

// ---------------------------------------------------------------------------------------------------------------------------------------------------------
bool GeometricImageFilter::computeSizeConstraints(map<int, DepthInterval>& dS, FilterMessages& messages, const float minWorldSize, const float maxWorldSize)
// ---------------------------------------------------------------------------------------------------------------------------------------------------------
{
	// Clear the current size constraints
	dS.clear();	

	// The focal length
	float fX = messages.cameraInfoMsg->K[0];
	float sMinFx = fX * minWorldSize;
	float sMaxFx = fX * maxWorldSize;

	// Loop through the window sizes and compute the size constraint
	for(vector<int>::iterator it = this->windowSizes.begin(); it != this->windowSizes.end(); ++it)
	{
		int windowSize = *it;
		float scMin = (sMinFx / windowSize);
		float scMax = (sMaxFx / windowSize);
		DepthInterval di(scMin, scMax);
		dS[windowSize] = di;
	}		
	return true;
}

// ---------------------------------------------------------------------------------------------
bool GeometricImageFilter::filterDepthConstraint(DepthInterval& depthConstraint, float maxDepth)
// ---------------------------------------------------------------------------------------------
{
	// Filter if any of the following are true:
	// 1.) High point is negative
	// 2.) Low point is greater than the max depth
	// 3.) High point is less than the low point (the interval is empty)
	return (depthConstraint.GetHighPoint() < 0 || depthConstraint.GetLowPoint() > maxDepth || depthConstraint.isEmpty());
}

// ---------------------------------------------------------------------------------------------------------
bool GeometricImageFilter::filterDepthConstraints(map<int, DepthInterval>& depthConstraints, float maxDepth)
// ---------------------------------------------------------------------------------------------------------
{
	// Loop through the elements of the map
	for(map<int, DepthInterval>::iterator it = depthConstraints.begin(); it != depthConstraints.end(); ++it)
	{
		if(filterDepthConstraint(it->second, maxDepth))
		{
			depthConstraints.erase(it);
		}
	}
	return true;
}

// -----------------------------------------------------------------------------------------------------------
bool GeometricImageFilter::computeVProjection(vector<float>& out, const boost::array<double, 9> k, int height)
// -----------------------------------------------------------------------------------------------------------
{
	// Clear the out vector
	out.clear();

	// Reshape and invert K
	btMatrix3x3 kMat(k[0], k[1], k[2], k[3], k[4], k[5], k[6], k[7], k[8]);
	btMatrix3x3 kMatInv = kMat.inverse();
	
	// Compute the projection for each v
	for(int v = 0; v < height; ++v)
	{
		float rYVal = ((kMatInv[1][1] * v) + kMatInv[1][2]) / ((kMatInv[2][1] * v) + kMatInv[2][2]);
		out.push_back(rYVal);
	}

	return true;
}

// --------------------------------------------------------------------------------------------------------------------------------------------------------------
bool GeometricImageFilter::computeHeightConstraints(map<int, DepthInterval>& dH, vector<float>& rY, FilterMessages& messages, float minHeightM, float maxHeightM)
// --------------------------------------------------------------------------------------------------------------------------------------------------------------
{
	// Clear the current height constraints
	dH.clear();

	// Constants
	float cameraHeightM = messages.cameraPose->getHeight();
	float hMinDiff = (minHeightM - cameraHeightM);
	float hMaxDiff = (maxHeightM - cameraHeightM);
	
	// Pull out a reference to the ground plane's normal vector
	tf::Vector3& n = messages.cameraPose->groundNormal;
	
	// Compute the depth constraints
	int height = messages.cameraInfoMsg->height;
	for(int v = 0; v < height; ++v)
	{
		float nr = (n[1] * rY[v]) + n[2];
		float dHMin = (hMinDiff / nr);
		float dHMax = (hMaxDiff / nr);
	
		// Create the depth interval
		DepthInterval di(min(dHMin, dHMax), max(dHMin, dHMax));
		dH[v] = di;
	}

	return true;
}

// -------------------------------------------------------------------------------------------------------------------------------------------------------
bool GeometricImageFilter::combineConstraints(map<VWPt, DepthInterval>& dC, map<int, DepthInterval>& dS, map<int, DepthInterval>& dH, unsigned int height)
// -------------------------------------------------------------------------------------------------------------------------------------------------------
{
	// Clear the combined constraints
	dC.clear();

	// Loop through the image rows
	for(map<int, DepthInterval>::iterator vIt = dH.begin(); vIt != dH.end(); ++vIt)
	{
		// Loop throug the window sizes
		for(map<int, DepthInterval>::iterator wIt = dS.begin(); wIt != dS.end(); ++wIt)
		{
			// Compute the intersection of the ranges
			DepthInterval di = vIt->second.Intersect(wIt->second);
		
			// Make sure the range is valid	
			if(!this->filterDepthConstraint(di, this->maxDepthM))
			{
				// Add the range to the mask
				VWPt vw = VWPt(vIt->first, wIt->first);
				dC[vw] = di;
			}
		}
	}
	
	return true;
}

// -------------------------------------------------------------------------------------------------------------------------
void GeometricImageFilter::objectDetection(vector<cv::Rect>& objects, map<VWPt, DepthInterval>& dC, cv::Mat img, 
						   sensor_msgs::PointCloud2::ConstPtr stereo, int width, int height, int groupThreshold, double eps)
// -------------------------------------------------------------------------------------------------------------------------
{

	// Make sure we have stereo infomation
	if(stereo.get() != NULL)
	{
		// OpenCV Declarations
		CvHaarClassifierCascade* cascade = this->classifier.oldCascade;

				
		// Calculate the integral image
		CvMat image = img;
		cvIntegral(&image, sum, sqSum, tiltedSum);
	
		// Convert the sensor_msgs::PointCloud2 to a pcl::PointCloud2<PointXYZ}
		pcl::PointCloud<pcl::PointXYZRGB> stereoPtCloud;
		pcl::fromROSMsg(*stereo, stereoPtCloud);

		// Loop through the window sizes
		for(unsigned int i = 0; i < this->windowSizes.size(); ++i)
		{
            // Window size & scale
			int w = this->windowSizes[i];
			int halfW = w/2;
			float wScale = this->windowScales[i];

			// Set the scale in the HaarClassifierCascade
			cvSetImagesForHaarClassifierCascade(cascade, sum, sqSum, tiltedSum, wScale);

			// Loop through every possible window center vertical component
			for(int v = halfW; v < height - halfW; ++v)
			{
				// Lookup the combined depth constraint at v,w
				VWPt vw(v,w);
				map<VWPt, DepthInterval>::iterator vwIt = dC.find(vw);
				
				// Make sure we HAVE a  combined depth constraint at v,w... otherwise the entire row can be safely eliminated
				if(vwIt != dC.end())
				{
					// Alias the depth interval
					DepthInterval& vwDI = vwIt->second;

					// Loop through every possible window center horizontal component
					for(int u = halfW; u < width - halfW; ++u)
					{
						// Get the depth at u,v
						pcl::PointXYZRGB p = stereoPtCloud.at(u,v);
						float d = p.z;

						// Make sure we have stereo data
						if (!isnan(d)){

							// Check to see if the depth is in the range defined by the mask
							if(vwDI.contains(d))
							{
								// Construct the point at which we're going to run the detector
								CvPoint pt;
								pt.x = u - halfW;
								pt.y = v - halfW;

								// Actually run the detector
								int result = cvRunHaarClassifierCascade(cascade, pt);
								if(result > 0)
								{
									// Then we have a detection of the object... store the rectangle
									cv::Rect objRect;
									objRect.x = pt.x;
									objRect.y = pt.y;
									objRect.width = w;
									objRect.height = w;
									objects.push_back(objRect);
								}
							}		
						}
					}
				}
			}
		}

        // Now we need to group the rectangles of all the objects that we found
        cv::groupRectangles(objects, groupThreshold, eps);
    }
}

// ------------------------------------------------------------------------------------------------------------------------
GeometricImageFilter::GeometricImageFilter(ros::NodeHandle& nh, const char* cascadePath, float minHeightM, float maxHeightM,
                                           float minSizeM, float maxSizeM, float maxDepthM,
                                           float windowScaleFactor, bool displayLocal)
// ---------------------------------------------------------------------------------------------------------------------------
{
    // Save the node handle & other paramaters
    this->nodeHandle = nh;
    this->minHeightM = minHeightM;
    this->maxHeightM = maxHeightM;
    this->minSizeM = minSizeM;
    this->maxSizeM = maxSizeM;
    this->maxDepthM = maxDepthM;
    this->windowScaleFactor = windowScaleFactor;

	// Initialize pointers to NULL
	this->prevCameraPose = NULL;

	// Subscribe to the necessary topics
	this->imageSubscriber = this->nodeHandle.subscribe(IMAGE_TOPIC, 3, &GeometricImageFilter::imageCallback, this);
	this->cameraInfoSubscriber = this->nodeHandle.subscribe(CAMERA_INFO_TOPIC, 3, &GeometricImageFilter::cameraInfoCallback, this);
	this->stereoSubscriber = this->nodeHandle.subscribe(STEREO_TOPIC, 3, &GeometricImageFilter::stereoCallback, this);

	// Load the object detector cascade
    this->cascadePath = cascadePath;
	this->classifier.load(cascadePath);

	// If we're displaying the results locally, setup the OpenCV window
	this->displayLocal = displayLocal;
	if(this->displayLocal)
	{
		// Open the window
		cv::namedWindow(WINDOW_NAME, CV_WINDOW_AUTOSIZE);
		cvWaitKey(10);
	}
}

// ------------------------------------------
GeometricImageFilter::~GeometricImageFilter()
// ------------------------------------------
{
	// Close the display window (if necesary)
	if(this->displayLocal)
	{
		cvDestroyWindow(WINDOW_NAME);
	}
}

// -----------------------------
void GeometricImageFilter::run()
// -----------------------------
{
    // If the classifier hasn't loaded, you can't run the node
    if(this->classifier.empty())
    {
        ROS_ERROR("Unable to load cascade \"%s\"", this->cascadePath);
        return;
    }

	// Preprocessing
	// -------------
    // Status info
	ROS_INFO_COND(DEBUGGING, " G E O M E T R I C   I M A G E   F I L T E R ");
	ROS_INFO_COND(DEBUGGING, " --------------------------------------------");
	ROS_INFO_COND(DEBUGGING, " + Cascade: %s", this->cascadePath);

	// Determine the mininum object size in *pixels*
	ROS_INFO_COND(DEBUGGING, " + Blocking for camera info message...");
	while(ros::ok()) { FilterMessages messages = this->getMessages(); if(messages.valid) break;	ros::spinOnce(); } // Block for messages
	FilterMessages messages = this->getMessages();
	int minWindowSizePx = getMinWindowSize(messages.cameraInfoMsg->K, this->minSizeM, this->maxDepthM);
	ROS_INFO_COND(DEBUGGING, " + Minimum window size: %ipx", minWindowSizePx);

	// Compute the range of window sizes
	int width = messages.cameraInfoMsg->width;
	int height = messages.cameraInfoMsg->height;
	int maxWindowSize = min(width, height);
	this->windowSizes = this->getWindowSizes(minWindowSizePx, maxWindowSize, this->windowScaleFactor, &this->windowScales);
	for(unsigned int i = 0; i < this->windowSizes.size(); ++i)
	{
		int windowSize = this->windowSizes[i];
		float windowScale = this->windowScales[i];
		ROS_INFO_COND(DEBUGGING, "   - Window size: %i | Window Scale: %f", windowSize, windowScale); 
	}

    // Initialize the variables used in calculating integral images
    int rows = messages.imageMsg->height;
    int cols = messages.imageMsg->width;
    this->sum = cvCreateMat( rows + 1, cols + 1, CV_32SC1 );
    this->sqSum = cvCreateMat( rows + 1, cols + 1, CV_64FC1 );
    this->tiltedSum = cvCreateMat( rows + 1, cols + 1, CV_32SC1 );

	// Initialize our vector of object rectangles
	vector<cv::Rect> objectRects;

	// Main control loop
	// -----------------
	while(ros::ok())
	{
		// Empty the vector of objects rectangles
		objectRects.clear();

		// Collect messages:
		// ----------------
		FilterMessages messages = this->getMessages();
	
		// Make sure we have the necessary  messages	
		if(messages.valid)
		{
			// Optional debugging/timing information
			START();

			// Keep track of whether or not we need to update the filter
			bool updateGeometricFilter = false;

			// Update size constraints as necessary
			// ------------------------------------
			if(this->prevCameraInfoMsg.get() == NULL || this->prevCameraInfoMsg->K != messages.cameraInfoMsg->K)
			{
				ROS_INFO_COND(DEBUGGING, " + Computing the range of feasible depths given image window size and feasible object sizes");
				ROS_INFO_COND(DEBUGGING, "   - Minimum feasible object size: %fm", this->minSizeM);
				ROS_INFO_COND(DEBUGGING, "   - Maximum feasible object size: %fm", this->maxSizeM);

               
				// Compute the size constraints
				this->computeSizeConstraints(this->dS, messages, this->minSizeM, this->maxSizeM);
				this->filterDepthConstraints(this->dS, this->maxDepthM);

				// Update the projection of v into the real world
				this->computeVProjection(this->rY, messages.cameraInfoMsg->K, messages.cameraInfoMsg->height);

				// Update the previous camera info message to reflect the new one
				this->prevCameraInfoMsg = messages.cameraInfoMsg;
				
				updateGeometricFilter = true;
			}

			// Update height constraints as necessary
			// --------------------------------------
			if(this->prevCameraPose == NULL || abs(this->prevCameraPose->getHeight() - messages.cameraPose->getHeight()) > HEIGHT_EPS_M)
			{
				// Optional debugging info
				ROS_INFO_COND(DEBUGGING, " + Computing the range of feasible depths given image height and feasible object height");
				ROS_INFO_COND(DEBUGGING, "   - Minimum feasible object height: %fm", this->minHeightM); 
				ROS_INFO_COND(DEBUGGING, "   - Maximum feasible object height: %fm", this->maxHeightM); 
				ROS_INFO_COND(DEBUGGING, "   - Jitter tolerance: %fm", HEIGHT_EPS_M); // TODO: This shouldn't be a hard-coded constant
				ROS_INFO_COND(DEBUGGING, "   - Display results locally: %s", this->displayLocal ? "Yes" : "No"); 
				
				// Compute the height constraints
				this->computeHeightConstraints(this->dH, this->rY, messages, this->minHeightM, this->maxHeightM);
				this->filterDepthConstraints(this->dH, this->maxDepthM);
				
				// Update the previous camera pose
				if(this->prevCameraPose != NULL)
					delete this->prevCameraPose;
				this->prevCameraPose = messages.cameraPose;				
				
				updateGeometricFilter = true;
			}

			// Update the combined geometric filter if necessary
			// -------------------------------------------------
			if(updateGeometricFilter)
			{
				ROS_INFO_COND(DEBUGGING, " + Combining the size and height depth constraints");
				this->combineConstraints(this->dC, this->dS, this->dH, messages.cameraInfoMsg->height);
			}

			// Convert the image from a ROS message to OpenCV's format
			// -------------------------------------------------------
			try
			{
				// Do the conversion & keep a color copy of the image if we're going to display it later
				IplImage* cvImage = this->cvBridge.imgMsgToCv(messages.imageMsg, "bgr8");
				cv::Mat img(cvImage);
				cv::Mat forDisplay = img.clone();
				
				// If necessary, convert the image to grayscale
				if (img.channels() > 1)
				{
					cv::Mat temp;
					cv::cvtColor(img, temp, CV_BGR2GRAY);
					img = temp;
				}

				// Run object detection
				// --------------------
				objectDetection(objectRects, this->dC, img, messages.stereo, messages.cameraInfoMsg->width, messages.cameraInfoMsg->height, GROUP_THRESHOLD, EPS);
					
				// Optional debugging/timing information
				// -------------------------------------
				if(DEBUGGING)
				{
					bool foundFace = false;
					for(vector<cv::Rect>::iterator it = objectRects.begin(); it != objectRects.end(); ++it)
					{
						foundFace = true;
						ROS_INFO(" + Face: (x: %i, y: %i, w: %i", it->x, it->y, it->width);
					}

					if(!foundFace) ROS_INFO(" + No face ");
				}
				if(TIMING){
					char buffer[50];
					sprintf(buffer, "%i", messages.imageMsg->header.seq);
					STOP(buffer);
				}

				// Optionally visually display the results 
				// ---------------------------------------
				if(this->displayLocal)
				{
					// Draw the face boxes
					for(vector<cv::Rect>::iterator it = objectRects.begin(); it != objectRects.end(); ++ it)
					{
						cv::rectangle(forDisplay, cv::Point(it->x, it->y), cv::Point(it->x + it->width, it->y + it->height), cv::Scalar(0,255,0), FACE_RECT_THICKNESS_PX);
					}

					// Show the (modified) image
					cv::imshow(WINDOW_NAME, forDisplay);
   					cvWaitKey(10);

                    // Save the image

				}
				
			}catch(sensor_msgs::CvBridgeException error)
			{
				ROS_ERROR("CV Bridge Exception");
			}
						
			// Cleanup to avoid memory leaks
			// -----------------------------
			if(this->prevCameraPose != messages.cameraPose)
			{
				delete messages.cameraPose;
			}	
		}

		// Update ROS
		ros::spinOnce();
	}
}

// ----------------------------
int main(int argc, char** argv)
// ----------------------------
{
	// Initialize our ROS node
	ros::init(argc, argv, NODE_NAME);
    ros::NodeHandle nodeHandle;

    // Get the paramaters setup
    string cascadePath;
    if(!nodeHandle.getParam("classifier_cascade", cascadePath))
    {
        cascadePath = "data/haarcascade_frontalface_alt.xml";
    } 
    DEBUGGING = false;
    if(nodeHandle.hasParam("debugging"))
    {
        DEBUGGING = true;
    }
    bool displayLocal = false;
    if(nodeHandle.hasParam("display_local"))
    {
        displayLocal = true;
    }
    double minHeightM, maxHeightM;
    if(!nodeHandle.getParam("min_height_m", minHeightM) || !nodeHandle.getParam("max_height_m", maxHeightM))
    {
        return 1;   
    }
    float minSizeM = 0.05;
    float maxSizeM = 0.17;
    float maxDepthM = 8.0;
    float windowScaleFactor = 1.2;

    // Start the filter node
	GeometricImageFilter geometricImageFilter(nodeHandle, cascadePath.c_str(), minHeightM, maxHeightM, minSizeM, maxSizeM, maxDepthM, windowScaleFactor, displayLocal);
	geometricImageFilter.run();
}
