#include "pixeltransform.h"

#include <sensor_msgs/image_encodings.h>

bool PixelTransform::hasDepthImage = false;
cv_bridge::CvImagePtr PixelTransform::depth_cv_ptr;

/*
 * Callback function for getting the depth image from the kinect.
 *
 */
void PixelTransform::pixelTransformDepthCallback(const sensor_msgs::ImageConstPtr &msg)
{

	try
	{
		depth_cv_ptr = cv_bridge::toCvCopy(msg, sensor_msgs::image_encodings::TYPE_32FC1);
	}
	catch(cv_bridge::Exception& e)
	{
		ROS_ERROR("Something went wrong when converting!");
		return;
	}

	hasDepthImage = true;
}

PixelTransform::PixelTransform(ros::NodeHandle& node) :
	mTfListener(ros::Duration(10))
{
	sub_depth = node.subscribe("camera/depth_registered/image_rect", 1, pixelTransformDepthCallback);
}

void PixelTransform::UpdateModel(const sensor_msgs::CameraInfoConstPtr &camInfo)
{
	//necessary data for depth calculation later on
	mCameraModel.fromCameraInfo(camInfo);
}

cv::Point2d PixelTransform::transformFrom3dToPixel(cv::Point3d xyzpoint)
{
	return mCameraModel.project3dToPixel(xyzpoint);
}

bool PixelTransform::Depth(cv::Point2d xypoint, float &depth)
{
	if(!hasDepthImage)
		return false;

	if(xypoint.x >= 0 && xypoint.x < depth_cv_ptr->image.cols && xypoint.y >= 0 && xypoint.y < depth_cv_ptr->image.rows)
	{
		depth = depth_cv_ptr->image.at<float>(xypoint.y, xypoint.x);
		return true;
	}
	return false;
}

bool PixelTransform::transformFromPixelTo3d(cv::Point2d xypoint, cv::Point3d& xyzpoint)
{
	if(!hasDepthImage)
		return false;

	// Find xyz coordinate of point
	if(xypoint.x >= 0 && xypoint.x < depth_cv_ptr->image.cols && xypoint.y >= 0 && xypoint.y < depth_cv_ptr->image.rows)
	{
		float depth = depth_cv_ptr->image.at<float>(xypoint.y, xypoint.x);
		if(depth >= 0 && depth < 20)
		{
			xyzpoint = mCameraModel.projectPixelTo3dRay(xypoint);
			xyzpoint *= depth;

			return true;
		}
	}

	return false;
}

bool PixelTransform::transformFrom3dToBase(cv::Point3d xyzPoint, geometry_msgs::Point &base_point)
{
	// Convert to base coordinates (tf)
	geometry_msgs::PointStamped kinect_frame;
	kinect_frame.header.frame_id = "camera_rgb_optical_frame";
	kinect_frame.header.stamp = ros::Time();

	// Converting to right hand orientation
	kinect_frame.point.x = xyzPoint.x;
	kinect_frame.point.y = xyzPoint.y;
	kinect_frame.point.z = xyzPoint.z;

	try
	{
		// Transform from camera_link to base_link
		geometry_msgs::PointStamped base_frame;
		mTfListener.transformPoint("base_link", kinect_frame, base_frame);
		base_point.x = base_frame.point.x;
		base_point.y = base_frame.point.y;
		base_point.z = base_frame.point.z;

		return true;
	}
	catch(tf::TransformException& ex)
	{
		ROS_ERROR("Received an exception trying to transform a point from \"camera_link\" to \"base_link\": %s", ex.what());
	}

	return false;
}

// This function uses the camera model built from camera info to project a ray, then use the kinect depth buffer to find the xyz position in space
bool PixelTransform::transformFromPixelToBase(cv::Point2d point, geometry_msgs::Point &base_point)
{
	cv::Point3d xyzPoint;

	bool result = transformFromPixelTo3d(point, xyzPoint);
	if(result)
		return transformFrom3dToBase(xyzPoint, base_point);

	return false;
}
