#ifndef _GEOMETRIC_IMAGE_FILTER_
#define _GEOMETRIC_IMAGE_FILTER_

// Includes:
// --------
// Project headers
#include "config.h"
#include "interval.h"
#include "timing.h"
#include "tracklet.h"
#include "static_tracklet.h"
#include "dynamic_tracklet.h"
#include "bag_player/Next.h"

// Sys/STL/Boost
#include <iostream>
#include <string>
#include <sstream>
#include <vector>
#include <cmath>
#include <algorithm>
#include <boost/ptr_container/ptr_vector.hpp>

// ROS
#include <ros/ros.h>
#include <sensor_msgs/Image.h>
#include <sensor_msgs/CameraInfo.h>
#include <sensor_msgs/LaserScan.h>
#include <stereo_msgs/DisparityImage.h>
#include <image_geometry/pinhole_camera_model.h>
#include <tf/transform_listener.h>
#include <cv_bridge/CvBridge.h>
#include <people_msgs/PositionMeasurement.h>
#include <annotations/AnnotationArray.h>

// PCL
#include <pcl/point_types.h>
#include <pcl/io/io.h>
#include <pcl/ros/conversions.h>

// OpenCV 
#include <opencv2/objdetect/objdetect.hpp>
#include <opencv2/highgui/highgui.hpp>

// Namespaces:
// ----------
using namespace std;

// -----------------------
class GeometricImageFilter
// -----------------------
{
	private:
		// --------------
		struct CameraPose
		// --------------
		{
			tf::Vector3 cameraOrigin; 								// The camera's point in 3D space
			tf::Vector3 groundNormal;								// The camera's normal vector

			double getHeight(){ return cameraOrigin[2]; }			// Returns the height of the camera off the ground 
		};

		// ------------------
		struct FilterMessages
		// ------------------
		{
			bool valid;												// Indicates whether or not all the required messages were collected
			sensor_msgs::Image::ConstPtr imageMsg;					// The actual image
			sensor_msgs::CameraInfo::ConstPtr cameraInfoMsg;		// The camera information message
			sensor_msgs::PointCloud2::ConstPtr depthMsg;			// The required depth data
			boost::shared_ptr<CameraPose> cameraPose;				// The camera's "pose" --> (origin & normal, calculated from TF frames)
		};
	
		// Typedefs:
		typedef vector<boost::shared_ptr<Tracklet> > TD_TrackletVector;
		typedef pcl::PointCloud<pcl::PointXYZRGB> TD_PointCloud;
		typedef pair<cv::Rect, boost::shared_ptr<Tracklet> >  TD_DetectionPair;

		// Publishers/Subscriptions/Transforms:
		// -----------------------------------
		ros::NodeHandle nodeHandle;									// ROS node handle
		ros::Subscriber imageSubscriber;							// Image source
		ros::Subscriber cameraInfoSubscriber;						// Camera information source
		ros::Subscriber depthSubscriber;							// Point cloud source
		ros::Publisher positionPublisher;							// Publisher for people_msgs::PositionMeasurement messages
		ros::Publisher annotationPublisher;							// Publisher for annotations::AnnotationArray messages
		tf::TransformListener* transformListener;					// Transform frame source
		sensor_msgs::CvBridge cvBridge;								// ROS Image --> OpenCV Image converter

		// Bag Services
		// ------------
		ros::ServiceClient nextClient;								// Client for the next service

		// Stored Messages & similar data:
		// ------------------------------
		sensor_msgs::Image::ConstPtr imageMsg;						// Most recent image message
		sensor_msgs::CameraInfo::ConstPtr cameraInfoMsg;			// Most recent camera information message
		sensor_msgs::CameraInfo::ConstPtr prevCameraInfoMsg;		// Previous camera information message
		sensor_msgs::PointCloud2::ConstPtr depthMsg;				// Most recent point cloud message
		boost::shared_ptr<CameraPose> prevCameraPose;				// The last camera pose

		// Physical parameters: 
		// -------------------
		string baseFrame;											// The TF frame used to calculate the camera's pose
		string cameraFrame; 										// The TF frame of the camera generating the images
		double jitterToleranceM, maxDepthM;					 		// Misc. parameters
		double minHeightM, maxHeightM;								// Physical height constraints
		double minSizeM, maxSizeM;									// Physical size constraints
		double maxMovementM; 										// Max expected movement from frame --> frame (for tracking)

		// Filtering variables:
		// -------------------
		static const tf::Vector3 noTranslation, groundNormalWorld;	// Constants
		map<int, double> windowSizes;								// Window Size --> Window Scale
		map<int, Interval> dS;										// w --> [near, far], size depth constraints
		map<int, Interval> dH;										// v --> [near, far], height depth constraints
		map<pair<int, int>, Interval> dC;							// (v,w) --> [near, far], combination of dS & dH 
		vector<double> rY;											// Projection used in the calculations of dS, dH

		// OpenCV:
		// ------
		double windowScaleFactor;									// The scale factor used to generate window sizes
        string cascadePath;											// Path to the classifier XML file
		cv::CascadeClassifier classifier;							// The classifier itself
        cv::Ptr<CvMat> sum;											// The intergral image
        cv::Ptr<CvMat> tiltedSum;									// Calculated when the integral image is calculated
        cv::Ptr<CvMat> sqSum;										// Squared integral image
		CvFont font;

		// Tracking & Detection:
		// --------------------
		int trackletId;												// The ID number of the last tracklet
		vector<TD_DetectionPair> objectRects; 						// The vector of rectangles denoting detections
		TD_TrackletVector defaultTracklets; 						// Should hold 1 tracklet to represent the entire screen
		TD_TrackletVector tracklets;								// The tracklets used in tracking augmented detection
		int framesSinceLastDetection;								// The number of frames since we last had a successful keyframe
		int keyframeRate;											// The max number of frames between keyframes (full detections)

		// Flags
		// -----
		bool displayLocal;											// True --> Draw images locally to the screen
		bool saveLocal;												// True --> Save images to disk
		bool doPublishAnnotations;									// True --> Publish annotaion messages
		bool generateFalseColorImages;								// True --> False color images are generated and saved

		// Callbacks
		// ---------
		void imageCallback(const sensor_msgs::Image::ConstPtr& msg);
		void cameraInfoCallback(const sensor_msgs::CameraInfo::ConstPtr& msg);
		void depthCallback(const sensor_msgs::PointCloud2::ConstPtr& msg);
		
		// Helper
		// ------
		void printObjectRects(vector<TD_DetectionPair>& objectRects);
		void printWindowSizeInfo(map<int, double>& windowSizes);
		void drawTracklets(TD_TrackletVector& tracklets, ros::Time t, cv::Mat& img, cv::Scalar color, int thickness);
		void drawRectangles(vector<TD_DetectionPair>& rects, cv::Mat& img, cv::Scalar color, int thickness, bool drawAnnotations = false);
		geometry_msgs::Point centersToPoint(vector<cv::Point>& centers, TD_PointCloud& ptCloud);
		inline cv::Point makePoint(int x, int y);

		// Core 
		// ----
		FilterMessages getMessages(bool block);
		boost::shared_ptr<CameraPose> getCameraPose(string baseFrame, string cameraFrame);
		int computeMinWindowSize(sensor_msgs::CameraInfo::ConstPtr cameraInfo, const double minAllowedSizeM, const double maxAllowedDepthM);
		void computeWindowSizes(map<int, double>& windowSizes, int minWindowSize, int maxWindowSize, double scaleFactor);
		bool computeSizeConstraints(map<int, Interval>& dS, double fX, double minWorldSize, double maxWorldSize, int minWIndowSize, int maxWindowSize);
		bool computeVProjection(vector<double>& out, const boost::array<double, 9> k, int height);
		bool computeHeightConstraints(map<int, Interval>& dH, vector<double>& rY, FilterMessages& messages, double minHeightM, double maxHeightM);
		inline bool filterDepthConstraint(Interval& depthConstraint, double maxDepth);
		bool filterDepthConstraints(map<int, Interval>& depthConstraints, double maxDepth);
		bool updateGeometricConstraints(FilterMessages& messages, int minWindowSize, int maxWindowSize);
		bool combineConstraints(map<pair<int,int>, Interval>& dC, map<int, Interval>& dS, map<int, Interval>& dH, unsigned int height);
		
		// Object detection
		// ----------------
		void validateDetections(vector<TD_DetectionPair>& objects,
							    TD_PointCloud& ptCloud,
								map<pair<int,int>, Interval>& dC,
								vector<TD_DetectionPair>* falsePositivesOut = NULL);
	    void objectDetection(vector<TD_DetectionPair>& objects,		// The output of the method -- the vector of (grouped) objects
						 	 cv::Mat& img,                          // The image itself
							 string& cameraFrame,					// The camera's TF frame
							 ros::Time cameraTime,					// The time stamp of the TF transformation  
						 	 TD_PointCloud& ptCloud, 			    // The depth information
						 	 TD_TrackletVector& tracklets,                  // Tracking information from previous frames
						 	 map<pair<int,int>, Interval>& dC,              // The combined geometric constraints
						 	 int groupThreshold,                            // Grouping paramater
						 	 double eps);                                   // Grouping paramater

		// Publishing
		void publishDetections(vector<TD_DetectionPair>& objects, TD_PointCloud& ptCloud, string& cameraFrame, ros::Time t);
		void publishAnnotations(vector<TD_DetectionPair>& objects, TD_PointCloud& ptCloud, string& cameraFrame, ros::Time t);

	public:
		// Public interface
		// ----------------
		GeometricImageFilter(ros::NodeHandle& nh);
		~GeometricImageFilter();
		void run();
};

#endif /* _GEOMETRIC_IMAGE_FILTER_ */
