#include "VisionNode.h"
#include "math.h"
#include "../utilities/ObjPosition.h"

#include <map>
#include "../utilities/XmlParser.h"

#define QUEUE_SIZE 5
#define VERTICAL_OFFSET 0.19

ObjPosition global_pos(0.0,0.0,1.0,0.0);

sensor_msgs::Image::ConstPtr rgb_global;
sensor_msgs::Image::ConstPtr depth_global;

VisionNode::VisionNode() throw(const char*) {

	// reading the parameters
	XmlParser::extractNodeData("Vision", m_params);
	m_min_obj_size = atoi(m_params["min_obj_size"].c_str());

	// initialization
	m_lower_bound_hsv = cvScalar(0, 0, 0);
	m_upper_bound_hsv = cvScalar(0, 0, 0);

	// subscribe to point cloud
    m_cloud_sub = m_node_handler.subscribe("/camera/depth_registered/points", QUEUE_SIZE, &VisionNode::detectObjCB, this);

    // publishes the object position
	m_obj_pos_publisher = m_node_handler.advertise<std_msgs::String>("/VisionNode/ObjPosition", QUEUE_SIZE);

	// subscribe to color setting topic
	m_color_sub = m_node_handler.subscribe("/VisionNode/SetColor", QUEUE_SIZE, &VisionNode::setColorCB, this);
}

void VisionNode::setColorCB(const std_msgs::String::ConstPtr& msg) {

	// check error in message
	if (msg->data.empty()) {
		ROS_ERROR("VisionNode::setColorCB- EMPTY MESSAGE");
		return;
	}

	// parse values out of message
	CvScalar lower_bound_hsv;
	CvScalar upper_bound_hsv;
	parseMsg(msg->data,lower_bound_hsv,upper_bound_hsv);

	ROS_INFO("in VisionNode::setColorCB:  msg->data = %s",msg->data.c_str());

	// set values of object position
	std::unique_lock<std::mutex> mlock(m_mutex);
	m_lower_bound_hsv = lower_bound_hsv;
	m_upper_bound_hsv = upper_bound_hsv;
	mlock.unlock();
}

void VisionNode::parseMsg(string msg, CvScalar& lower_bound_hsv, CvScalar& upper_bound_hsv) {

	// split message by delimiter
	char dataChars[msg.length()];
	strcpy(dataChars, msg.c_str());

	// store tokens
	vector<string> tokens;
	char* pch = strtok(dataChars,DELIMITER);
	while (pch != NULL) {
		tokens.push_back(pch);
		pch = strtok(NULL, DELIMITER);
	}

	// extract data
	lower_bound_hsv = cvScalar(atoi(tokens[0].c_str()),atoi(tokens[1].c_str()),atoi(tokens[2].c_str()));
	upper_bound_hsv = cvScalar(atoi(tokens[3].c_str()),atoi(tokens[4].c_str()),atoi(tokens[5].c_str()));
}

// parse values out of message
CvScalar detect_obj_lower_bound_hsv;
CvScalar detect_obj_upper_bound_hsv;

void VisionNode::detectObjCB(const sensor_msgs::PointCloud2ConstPtr& cloud) {

	try {
		pcl::toROSMsg(*cloud, m_image); //convert the cloud
	} catch (std::runtime_error e) {
		ROS_ERROR_STREAM(
				"Error in converting cloud to image message: " << e.what());
	}

	// acquire RGB image from camera
	cv_bridge::CvImagePtr cv_ptr;
    try {
      cv_ptr = cv_bridge::toCvCopy(m_image, enc::BGR8);
    } catch (cv_bridge::Exception& e) {
      ROS_ERROR("cv_bridge exception: %s", e.what());
      return;
    }
    IplImage input(cv_ptr->image);
    IplImage* img = cvCloneImage(&input);

    // create HSV image out of RGB
    IplImage* imgHSV = cvCreateImage(cvGetSize(img), 8, 3);
    cvCvtColor(img, imgHSV, CV_BGR2HSV);

    // threshold HSV image by object color
    IplImage* imgThreshed = cvCreateImage(cvGetSize(img), 8, 1);

    // synchronize access- color to be found might be change by callback
	std::unique_lock<std::mutex> mlock(m_mutex);
	detect_obj_lower_bound_hsv = m_lower_bound_hsv;
	detect_obj_upper_bound_hsv = m_upper_bound_hsv;
	mlock.unlock();

    // perform thresholding
    cvInRangeS(imgHSV, detect_obj_lower_bound_hsv, detect_obj_upper_bound_hsv, imgThreshed);

    RNG rng(12345);

    // cast thresholded image to iterable matrix format
    Mat src = Mat(imgThreshed, true);

    // find contours
    std::vector<std::vector<cv::Point> > contours;
    std::vector<cv::Vec4i> hierarchy;
    cv::findContours(src, contours, hierarchy, CV_RETR_TREE,
        CV_CHAIN_APPROX_SIMPLE, cv::Point(0, 0));

    // get the contours
    Point2f p2f;
    double area = 0.0;
    double max_area = 0.0;
    int max_index = -1;
	for (unsigned int i = 0; i < contours.size(); ++i) {

		area = contourArea(contours[i]); // calculate area for current contour

		// skip insignificant contours
		if (area <= m_min_obj_size) {
			continue;
		}

		// find greatest contour (it describes the object)
		if (area > max_area) {
			max_area = area;
			max_index = i;
		}
	}

	// set object's size
    global_pos.m_obj_size = max_area;

    // object position in image (measured in pixels)
	static int imgCol = 0, imgRow = 0;

    // check if object was not detected on current frame
    if (max_index < 0) {

    	// indicate that no object was found
    	global_pos.m_x = ERROR_POSITION;
    	global_pos.m_y = ERROR_POSITION;
    	global_pos.m_dist = ERROR_POSITION;
    	global_pos.m_obj_size = 0.0;

    } else { // object is detected

		// find object's center of mass
		Moments mu = moments(contours[max_index], false);
		p2f = Point2f(mu.m10 / mu.m00, mu.m01 / mu.m00);

    	// update object position
    	imgCol = int(p2f.x);
    	imgRow = int(p2f.y);

    	// cast data from topic to point cloud
    	pcl::PointCloud<pcl::PointXYZ> pCloud;
    	pcl::fromROSMsg(*cloud, pCloud);
    	pcl::PointXYZ target = pCloud.at(imgCol, imgRow);

		// set values of object position
    	if (isValidPos(target)) {

    		// set object position
			global_pos.m_x = target.x;
			global_pos.m_y = target.y;

			// calculate distance from camera to object's horizontal axis
			float dist_to_camera = target.z * target.z - target.x * target.x;

			// calculate vertical offset
			float diff_height = std::abs(VERTICAL_OFFSET - target.y);

			// set driving distance
			global_pos.m_dist = sqrtf(dist_to_camera - diff_height * diff_height);

    	} else { // object exceeds effective distance

    		// keep last values of x,y
    		ROS_ERROR("invalid: %f,%f,%f", target.x, target.y, target.z);
        	global_pos.m_dist = -std::abs(global_pos.m_dist);
    	}
    }

    // release allocated memory for images
    cvReleaseImage(&img);
    cvReleaseImage(&imgHSV);
    cvReleaseImage(&imgThreshed);

    // compose message
	char buffer[32];

	snprintf(buffer, sizeof(buffer), "%3.3f%s%3.3f%s%3.3f%s%5.1f",
			global_pos.m_x, DELIMITER, global_pos.m_y, DELIMITER,
			global_pos.m_dist, DELIMITER, global_pos.m_obj_size);
	m_message_to_publish.data = buffer;
	ROS_WARN("Vision node publishes: %s", buffer);

	// publish message with object position
	m_obj_pos_publisher.publish(m_message_to_publish);
}

bool VisionNode::isValidPos(pcl::PointXYZ target) {

	if (!isnan(target.x) && !isnan(target.y)
			&& !isnan(target.z))
	{
		return true;
	}

	return false;
}

VisionNode::~VisionNode() {

}

/**
 * Main function for the vision node (no input arguments)
 */
int main(int argc, char** argv) throw(const char*) {

  ros::init(argc, argv, "VisionNode");

  VisionNode vn;

  ros::spin();
  return 0;
}
