#!/usr/bin/env python

# this file defines a single class, Camera2BoundingBox, which is designed
# to make it easy to mix camera information (from the tf module) with a
# 2D bounding box (perhaps with front face) and produce a world-coordinate
# bounding box, which can be delivered to the 3D intersector.

# When you create an instance of this class, you must procide the name of
# two key entities: the tf name of the camera (which will give us the
# world-coordinate bounding box), and the topic name where you want us
# to publish bounding boxes.  It will set up the ROS tf listener and
# bounding box publisher, and return to you.

# Later, when you have some data from your analysis, you call the object's
# methods to compose the bounding boxes.  Right now, the code only
# handles simple rectangular bounding boxes, aligned with the camera
# axes, but if you look at the implementation below, you will see that
# it can easily be generalized for other possibilities later.

# Also note that when you create the camera, it requires you to define
# the width and height of the picture (for your convenience, I
# automatically normalize the bounding box coordinates you give me).
# This can be changed by altering the xWid and yHei variables in the
# class.  Also note that, by default, the camera has 1:1 aspect ratios
# X:Z and Y:Z; these are probably not correct, and need to be set
# according to the actual camera properties.

# NOTE ABOUT COORDINATES AND ASPECT RATIOS:
#
# tf uses a funky coordinate system.  The bounding boxes use that
# system, but I'm assuming that you will be using a more conventional
# system, with x pointing right, y pointing down, and the origin at
# (0,0).
#
# z points "in front of" the camera (in the direction you're looking,
# that is).  The X:Z aspect ratio gives the slope of the right or
# left hand bounding planes if they were at the edge of the camera;
# a wide-angle lens would have a high value, a telephoto would have
# very small.  1:1 of course means 45 degree angle.  Similarly,
# the Y:Z aspect ratio gives the slope of the top or bottom planes
# at the extremes.
#
# Note that the X:Z and Y:Z aspect ratios will typically *NOT* be the
# same.  In a typical camera, the X:Z should be about 4/3 as much as
# the Y:Z, but your camera may vary.


import roslib
roslib.load_manifest('intersector')
import rospy
import tf
from sensor_msgs.msg import CameraInfo
from geometry_msgs.msg import Point
from geometry_msgs.msg import PointStamped
from geometry_msgs.msg import PolygonStamped
from visualization_msgs.msg import Marker
from intersector.msg import Plane
from intersector.msg import Convex_Space
import math
from numpy import * #Needed for cross product.  Can't seem to get it to work without importing all

class Camera2BoundingBox:
    def __init__(self, publish_name, xWid,yHei):
        self.tf_listener = tf.TransformListener()
        self.tf_name = ''

        self.bb_publisher = rospy.Publisher(publish_name, Convex_Space)
        self.marker_pub = rospy.Publisher('/look_for_this/found_it/bounding_pyramids/markers', Marker)
        
        self.xWid = xWid
        self.yHei = yHei

        self.aspectXtoZ = 1.0
        self.aspectYtoZ = 1.0
        
        self.K_dict = {}
        
       	global marker
	marker = Marker()
        marker.header.frame_id = "/map"
        marker.header.stamp = rospy.Time()
        marker.ns = ""
        marker.id = 0
        marker.type = 5 # 5 = LINELIST
        marker.action = 0 # 0 = ADD
        marker.pose.position.x = 0
        marker.pose.position.y = 0
        marker.pose.position.z = 0
        marker.pose.orientation.x = 0.0
        marker.pose.orientation.y = 0.0
        marker.pose.orientation.z = 0.0
        marker.pose.orientation.w = 0.0
        marker.scale.x = 0.01
        marker.color.a = 1
        marker.color.r = 0
        marker.color.g = 0
        marker.color.b = 0


    def PublishPyramid(self, polygonStamped):
        self.PublishCroppedPyramid(polygonStamped, 0)


    def PublishCroppedPyramid(self, polygonStamped, front):
        # PERFORMANCE WARNING: I'm guessing that calling transformPoint()
        # over and over is going to have to lots of painful lookups, and
        # performance will suck.  But for now, I'm focusing on correctness
        # rather than speed, so I'm calling their API rather than getting
        # the transform for myself with lookupTransform().

    	self.tf_name = polygonStamped.header.frame_id
    	points = polygonStamped.polygon.points

        # abort (with warning) if we don't yet know the position of the
        # camera.
        if not self.tf_listener.canTransform(self.tf_name, "/map", polygonStamped.header.stamp):
            print "WARNING: Could not find the tf for the camera %s" % self.tf_name
            return


        # note that the tf coordinate system is right handed, with:
        #     X forward
        #     Y left
        #     Z up
        # ick.
        #
        # BTW, I'm assuming that the picture coordinate system is:
        #     X right
        #     Y down
        #     (0,0) at upper-left
        # which is also icky, but it's a familiar sort of icky.

        # after normalization, left & right give the Y value of those edges
        # of the bounding box, according to the tf convention
        # after normalization, top & bottom give the Z value of those edges
        # of the bounding box, according to the tf convention

        # to construct each plane, we build three points in the camera's
        # coordinate system, which form a triangle.  If you view the
        # triangle and see the points in a counter-clockwise direction,
        # then you are "in front of" the plane.  You would see them in
        # clockwise direction if you were in back of it.
        #
        # remember that the planes face inwards.

        # this is the object which will collect the 4 (or maybe 5)
        # planes we generate.
        poly = Convex_Space()
        poly.sensor = self.tf_name
        poly.algorithm = self.tf_name
        
        marker.points = []

	# Gonna use the intrinsic camera matrix K to reverse project
	K = self.K_dict[self.tf_name]	

	for i in range(len(points)):
#                print "OLD POINT:   %s" % points[i]
		transformed = K.I*[[points[i].x], [points[i].y], [1]]
#                print "TRANSFORMED: %s" % transformed
		""" Camera thinks it's facing down the x-axis, while tf thinks it's
			facing down the z, so we put in (z, -x, y)
			y also has to be negated here, not sure why??? """
		points[i] = Point(transformed[2,0], -transformed[0,0], -transformed[1,0])

		# Old method
#		points[i].x = ( 1 - (2*points[i].x/self.xWid) * self.aspectXtoZ )
#		points[i].y = ( 1 - (2*points[i].y/self.yHei) * self.aspectYtoZ )
		
	""" This should add the points for each plane in the correct order,
		counter-clockwise if looking from within the pyramid """
        origin = Point(0,0,0)
	for point1,point2 in zip(points, points[1:]+[points[0]]):
	        poly.planes.append( self.GenPlane(origin, point2, point1) )
	 
        # finally, if front is positive, then we want to produce a bounding
        # plane there, facing away from the camera.  So, if we stand at the
        # origin, it should be a clockwise pattern.
        if front > 0:
            point1 = Point(front, 0,0)
            point2 = Point(front, 1,0)
            point2 = Point(front, 1,1)
            poly.planes.append( self.GenPlane(point1, point2, point3) )


        # we are at last ready to speak to the world!
        self.bb_publisher.publish(poly)

	""" This bit is for the purpose of testing the intersector
		It publishes a cube that should intersect with the
		bounding pyramid generated for the blue_block
	"""
	poly.sensor = "testing"
        poly.algorithm = "testing"
        poly.planes = []
        """
	# Front and Back planes
        poly.planes.append( self.GenPlaneFromWorldPoints(Point(2,0,0),Point(2,1,0),Point(2,0,1)) )
        poly.planes.append( self.GenPlaneFromWorldPoints(Point(1,0,0),Point(1,0,1),Point(1,1,0)) )
        # Right and Left planes
        poly.planes.append( self.GenPlaneFromWorldPoints(Point(0,-2,0),Point(1,-2,0),Point(0,-2,1)) )
        poly.planes.append( self.GenPlaneFromWorldPoints(Point(0,-1,0),Point(0,-1,1),Point(1,-1,0)) )
        """
        # Top and Bottom planes
        poly.planes.append( self.GenPlaneFromWorldPoints(Point(0,0,1),Point(1,0,1),Point(0,1,1)) )
        poly.planes.append( self.GenPlaneFromWorldPoints(Point(0,0,0),Point(0,1,0),Point(1,0,0)) )
        self.bb_publisher.publish(poly)
        
	        
        self.marker_pub.publish(marker)


    def GenPlane(self, point1, point2, point3):
        point1_stamped = PointStamped();
        point1_stamped.header.frame_id = self.tf_name
        point1_stamped.point = point1

        point2_stamped = PointStamped();
        point2_stamped.header.frame_id = self.tf_name
        point2_stamped.point = point2

        point3_stamped = PointStamped();
        point3_stamped.header.frame_id = self.tf_name
        point3_stamped.point = point3

        point1_world = self.tf_listener.transformPoint("/map", point1_stamped)
        point2_world = self.tf_listener.transformPoint("/map", point2_stamped)
        point3_world = self.tf_listener.transformPoint("/map", point3_stamped)
        
        scale2 = 1 # -2/ (point2_world.point.z - point1_world.point.z)
        point2_scaled = Point( point1_world.point.x + (point2_world.point.x - point1_world.point.x) * scale2,
                                                 point1_world.point.y + (point2_world.point.y - point1_world.point.y) * scale2,
                                                 point1_world.point.z + (point2_world.point.z - point1_world.point.z) * scale2)

	if( not math.isnan(point2_scaled.x) ):
	        marker.points.append(point1_world.point)
	        marker.points.append(point2_scaled)
               
#        marker.points.append(point1_world.point)
#        marker.points.append(point2_world.point)
#        marker.points.append(point1_world.point)
#        marker.points.append(point3_world.point)
        return self.GenPlaneFromWorldPoints(point1_world.point, point2_world.point, point3_world.point)

    #creates a plane Ax + By + Cz + D, generate normal, then tells you which side of the plane the normal is on
    def GenPlaneFromWorldPoints(self, point1, point2, point3):
        
        AB = [(point2.x-point1.x),(point2.y-point1.y),(point2.z-point1.z)]
        AC = [(point3.x-point1.x),(point3.y-point1.y),(point3.z-point1.z)]
        cProd = cross(AB,AC)
        newPlane = Plane(cProd[0],cProd[1],cProd[2],0)

        # d is the value we expect to get when we multiply the normal
        # times any point on the plane        
        normal = cProd
        newPlane.d = -dot(normal, [point1.x,point1.y,point1.z])

	return newPlane



# DEAD CODE	
        if otherPt > 0:
            return 1
        else:
            otherPt = -1*point1.x*newPlane[0] + -1*point1.y*newPlane[1] + -1*point1.z*newPlane[2] + newPlane.d + normal
            if otherPt > 0:
                return 0 
                


def cameraCallback(cam_data):
    global K
    K = matrix(cam_data.K)
    K.shape = (3,3)
    return

def polygonCallback(polygonStamped):

    cam_name = polygonStamped.header.frame_id
    if cam_name not in C2BB.K_dict:
	    global K
	    K = None
	    thing = rospy.Subscriber(cam_name + "/camera_info", CameraInfo, cameraCallback)
	    while (K == None): None
	    thing.unregister()
	    print K
	    print K.I
	    C2BB.K_dict[cam_name] = K
	    
    C2BB.PublishPyramid(polygonStamped)

def listener():
    rospy.init_node('listener', anonymous=True)
    global C2BB
    C2BB = Camera2BoundingBox( "/intersector_input", 1024, 1024 )
    rospy.Subscriber("/look_for_this/found_it/", PolygonStamped, polygonCallback)
    rospy.spin()

if __name__ == '__main__':
    listener()
