#!/usr/bin/env python

# this file defines a single class, Camera2BoundingBox, which is designed
# to make it easy to mix camera information (from the tf module) with a
# 2D bounding box (perhaps with front face) and produce a world-coordinate
# bounding box, which can be delivered to the 3D intersector.

# When you create an instance of this class, you must procide the name of
# two key entities: the tf name of the camera (which will give us the
# world-coordinate bounding box), and the topic name where you want us
# to publish bounding boxes.  It will set up the ROS tf listener and
# bounding box publisher, and return to you.

# Later, when you have some data from your analysis, you call the object's
# methods to compose the bounding boxes.  Right now, the code only
# handles simple rectangular bounding boxes, aligned with the camera
# axes, but if you look at the implementation below, you will see that
# it can easily be generalized for other possibilities later.

# Also note that when you create the camera, it requires you to define
# the width and height of the picture (for your convenience, I
# automatically normalize the bounding box coordinates you give me).
# This can be changed by altering the xWid and yHei variables in the
# class.  Also note that, by default, the camera has 1:1 aspect ratios
# X:Z and Y:Z; these are probably not correct, and need to be set
# according to the actual camera properties.

# NOTE ABOUT COORDINATES AND ASPECT RATIOS:
#
# tf uses a funky coordinate system.  The bounding boxes use that
# system, but I'm assuming that you will be using a more conventional
# system, with x pointing right, y pointing down, and the origin at
# (0,0).
#
# z points "in front of" the camera (in the direction you're looking,
# that is).  The X:Z aspect ratio gives the slope of the right or
# left hand bounding planes if they were at the edge of the camera;
# a wide-angle lens would have a high value, a telephoto would have
# very small.  1:1 of course means 45 degree angle.  Similarly,
# the Y:Z aspect ratio gives the slope of the top or bottom planes
# at the extremes.
#
# Note that the X:Z and Y:Z aspect ratios will typically *NOT* be the
# same.  In a typical camera, the X:Z should be about 4/3 as much as
# the Y:Z, but your camera may vary.


import roslib
roslib.load_manifest('intersector')
import rospy
import tf
from sensor_msgs.msg import CameraInfo
from geometry_msgs.msg import Point
from geometry_msgs.msg import PointStamped
from geometry_msgs.msg import PolygonStamped
from visualization_msgs.msg import Marker
from intersector.msg import Plane
from intersector.msg import Camera2BoundingBox__setCamera
from intersector.srv import Camera2BoundingBox__Points
from intersector.srv import Camera2BoundingBox__Planes
from intersector.msg import Convex_Space

import math
from numpy import * #Needed for cross product.  Can't seem to get it to work without importing all



class Camera2BoundingBox:
    def __init__(self):
        self.tf_listener = tf.TransformListener()

#        self.marker_pub   = rospy.Publisher('/look_for_this/found_it/bounding_pyramids/markers', Marker)
        
        self.xWid = 1
        self.yHei = 1

        self.aspectLRtoDEEP = 1.0
        self.aspectTBtoDEEP = 1.0
        
#       	global marker
#	marker = Marker()
#        marker.header.frame_id = "/map"
#        marker.header.stamp = rospy.Time()
#        marker.ns = ""
#        marker.id = 0
#        marker.type = 5 # 5 = LINELIST
#        marker.action = 0 # 0 = ADD
#        marker.pose.position.x = 0
#        marker.pose.position.y = 0
#        marker.pose.position.z = 0
#        marker.pose.orientation.x = 0.0
#        marker.pose.orientation.y = 0.0
#        marker.pose.orientation.z = 0.0
#        marker.pose.orientation.w = 0.0
#        marker.scale.x = 0.01
#        marker.color.a = 1
#        marker.color.r = 0
#        marker.color.g = 0
#        marker.color.b = 0


    def setPublisher(self, publish_name):
        self.publisher = rospy.Publisher(publish_name, Convex_Space)


    def setCamera(self, cam_data):
        self.xWid = cam_data.xWid
        self.yHei = cam_data.yHei

        self.aspectLRtoDEEP = cam_data.upperLeft_world_y / cam_data.upperLeft_world_x
        self.aspectTBtoDEEP = cam_data.upperLeft_world_z / cam_data.upperLeft_world_x


    def GenPoints(self, bb):
        # PERFORMANCE WARNING: I'm guessing that calling transformPoint()
        # over and over is going to have to lots of painful lookups, and
        # performance will suck.  But for now, I'm focusing on correctness
        # rather than speed, so I'm calling their API rather than getting
        # the transform for myself with lookupTransform().


        # abort (with warning) if we don't yet know the position of the
        # camera.
        if not self.tf_listener.canTransform(bb.header.frame_id, "/map", bb.header.stamp):
            print "WARNING: Could not find the tf for the camera %s" % bb.header.frame_id
            return []


        # note that the tf coordinate system is right handed, with:
        #     X forward
        #     Y left
        #     Z up
        # ick.
        #
        # BTW, I'm assuming that the picture coordinate system is:
        #     X right
        #     Y down
        #     (0,0) at upper-left
        # which is also icky, but it's a familiar sort of icky.

        # after normalization, left & right give the Y value of those edges
        # of the bounding box, according to the tf convention
        # after normalization, top & bottom give the Z value of those edges
        # of the bounding box, according to the tf convention

        # to construct each plane, we build three points in the camera's
        # coordinate system, which form a triangle.  If you view the
        # triangle and see the points in a counter-clockwise direction,
        # then you are "in front of" the plane.  You would see them in
        # clockwise direction if you were in back of it.
        #
        # remember that the planes face inwards.


        points = []

        cameraOrigin_stamped = PointStamped()
        cameraOrigin_stamped.header          = bb.header
        cameraOrigin_stamped.point           = Point(0,0,0)

        absolute = self.tf_listener.transformPoint("/map", cameraOrigin_stamped)

#        print Point(0,0,0)
#        print absolute.point
#        print 

        points.append(absolute.point)

        for pt in bb.polygon.points:
            print "GenPoints(): xWid=%d yHei=%d aspectLRtoDEEP=%f aspectTBtoDEEP=%f" % (self.xWid,self.yHei, self.aspectLRtoDEEP,self.aspectTBtoDEEP)
            print "GenPoints(): pt=%s" % pt
            print

            # Ignoring aspect ratios:
            # pixel coords (  0,  0) map to camera coords (1, 1, 1).
            # pixel coords (  0,max) map to camera coords (1, 1,-1).
            # pixel coords (max,  0) map to camera coords (1,-1, 1).
            # pixel coords (max,max) map to camera coords (1,-1,-1).

            

            normalized_LR = float(pt.x) / self.xWid    # normalized_LR ranges from 0 to 1
            normalized_TB = float(pt.y) / self.yHei

            print "GenPoints(): normalized: LR=%f TB=%f" % (normalized_LR,normalized_TB)

            shifted_LR = self.aspectLRtoDEEP * (1 -2*normalized_LR)  # shifted_LR ranges from -aspectLRtoDEEP to aspectLRtoDEEP
            shifted_TB = self.aspectTBtoDEEP * (1 -2*normalized_TB)

            print "GenPoints(): shifted: LR=%f TB=%f" % (shifted_LR,shifted_TB)

            cameraRelative_point = Point(1, shifted_LR, shifted_TB)
            cameraPoint_stamped = PointStamped()
            cameraPoint_stamped.header = bb.header
            cameraPoint_stamped.point  = cameraRelative_point

            absolute_point = self.tf_listener.transformPoint("/map", cameraPoint_stamped)

            print "GenPoints(): cameraPoint_stamped=%s" % cameraPoint_stamped.point
            print "GenPoints(): absolute_point     =%s" % absolute_point.point
            print

            points.append(absolute_point.point)

        return points


    def GenPlanes(self, bb):
        points = self.GenPoints(bb)

        if len(points) == 0:
            return []

        origin   = points[0]
        base_pts = points[1:] + [points[1]]  # base_pts loops around, to make plane generation easy

        planes = []

        for i in range(1,len(base_pts)):
            plane = self.GenPlaneFromWorldPoints(origin, base_pts[i-1], base_pts[i])
            if plane is None:
                # GenPlaneFromWorldPoints() detected an error.
                return []

            planes.append(plane)

        return planes


    def PublishSpace(self, bb, sensor,algorithm, aux_payload):
        planes = self.GenPlanes(bb)
        if len(planes) == 0:
            print "WARNING: Camera2BoundingBox:PublishSpace(): Returning None because len(planes) == 0"
            return None

        space = Convex_Space()

        space.planes      = planes
        space.sensor      = sensor
        space.algorithm   = algorithm
        space.aux_payload = aux_payload

        self.publisher.publish(space)

        # some callers might be interested in seeing what I just published
        return space


    #creates a plane Ax + By + Cz + D, generate normal, then tells you which side of the plane the normal is on
    def GenPlaneFromWorldPoints(self, point1, point2, point3):
        if (point1.x == point2.x and point1.y == point2.y and point1.z == point2.z) or (point1.x == point3.x and point1.y == point3.y and point1.z == point3.z) or (point2.x == point3.x and point2.y == point3.y and point2.z == point3.z):
            print "camera2boundingBox.pl:GenPlaneFromWorldPoints(): ERROR: 2 points were identical!!!  point1=%s point2=%s point3=%s" % (point1,point2,point3)
            return None


        AB = [(point2.x-point1.x),(point2.y-point1.y),(point2.z-point1.z)]
        AC = [(point3.x-point1.x),(point3.y-point1.y),(point3.z-point1.z)]

        normal = cross(AB,AC)

        # the a,b,c fields of the plane are the plane normal.  d is the
        # (negation of the) dot product of the normal times any point on the plane
        d = - dot(normal, [point1.x, point1.y, point1.z])

        print "GenPlaneFromWorldPoints: point1=(%f,%f,%f) point2=(%f,%f,%f) point3=(%f,%f,%f)" % (point1.x,point1.y,point1.z, point2.x,point2.y,point2.z, point3.x,point3.y,point3.z)
        print "GenPlaneFromWorldPoints: normal=(%f,%f,%f)" % (normal[0],normal[1],normal[2])
        print "GenPlaneFromWorldPoints: d=%f" % d
        print

        # BUGFIX: the planes generated were pointing the wrong direction.  Multiply
        # all values by -1.
#        return Plane(normal[0], normal[1], normal[2], d)
        return Plane(-normal[0], -normal[1], -normal[2], -d)







# DEAD CODE from here to the end of the class

        # this is the object which will collect the 4 (or maybe 5)
        # planes we generate.
        poly = Convex_Space()
        poly.sensor = self.tf_name
        poly.algorithm = self.tf_name
        
#        marker.points = []

	# Gonna use the intrinsic camera matrix K to reverse project
	K = self.K_dict[self.tf_name]	
	for i in range(len(points)):
		transformed = K.I*[[points[i].x], [points[i].y], [1]]
		""" Camera thinks it's facing down the x-axis, while tf thinks it's
			facing down the z, so we put in (z, -x, y)
			y also has to be negated here, not sure why??? """
		points[i] = Point(transformed[2,0], -transformed[0,0], -transformed[1,0])

		# Old method
#		points[i].x = ( 1 - (2*points[i].x/self.xWid) * self.aspectXtoZ )
#		points[i].y = ( 1 - (2*points[i].y/self.yHei) * self.aspectYtoZ )
		
	""" This should add the points for each plane in the correct order,
		counter-clockwise if looking from within the pyramid """
        origin = Point(0,0,0)
	for point1,point2 in zip(points, points[1:]+[points[0]]):
	        poly.planes.append( self.GenPlane(origin, point1, point2) )
	 
        # finally, if front is positive, then we want to produce a bounding
        # plane there, facing away from the camera.  So, if we stand at the
        # origin, it should be a clockwise pattern.
        if front > 0:
            point1 = Point(front, 0,0)
            point2 = Point(front, 1,0)
            point3 = Point(front, 1,1)
            poly.planes.append( self.GenPlane(point1, point2, point3) )


        # we are at last ready to speak to the world!
        self.bb_publisher.publish(poly)

	""" This bit is for the purpose of testing the intersector
		It publishes a cube that should intersect with the
		bounding pyramid generated for the blue_block
	"""
	poly.sensor = "testing"
        poly.algorithm = "testing"
        poly.planes = []
	# Front and Back planes
        poly.planes.append( self.GenPlaneFromWorldPoints(Point(2,0,0),Point(2,1,0),Point(2,0,1)) )
        poly.planes.append( self.GenPlaneFromWorldPoints(Point(1,0,0),Point(1,0,1),Point(1,1,0)) )
        # Right and Left planes
        poly.planes.append( self.GenPlaneFromWorldPoints(Point(0,-2,0),Point(1,-2,0),Point(0,-2,1)) )
        poly.planes.append( self.GenPlaneFromWorldPoints(Point(0,-1,0),Point(0,-1,1),Point(1,-1,0)) )
        # Top and Bottom planes
        poly.planes.append( self.GenPlaneFromWorldPoints(Point(0,0,1),Point(1,0,1),Point(0,1,1)) )
        poly.planes.append( self.GenPlaneFromWorldPoints(Point(0,0,0),Point(0,1,0),Point(1,0,0)) )
        self.bb_publisher.publish(poly)
        
	        
#        self.marker_pub.publish(marker)


    def GenPlane(self, point1, point2, point3):
        point1_stamped = PointStamped();
        point1_stamped.header.frame_id = self.tf_name
        point1_stamped.point = point1

        point2_stamped = PointStamped();
        point2_stamped.header.frame_id = self.tf_name
        point2_stamped.point = point2

        point3_stamped = PointStamped();
        point3_stamped.header.frame_id = self.tf_name
        point3_stamped.point = point3

        point1_world = self.tf_listener.transformPoint("/map", point1_stamped)
        point2_world = self.tf_listener.transformPoint("/map", point2_stamped)
        point3_world = self.tf_listener.transformPoint("/map", point3_stamped)
        
        scale2 = -3/ (point2_world.point.z - point1_world.point.z)
        point2_scaled = Point( point1_world.point.x + (point2_world.point.x - point1_world.point.x) * scale2,
                                                 point1_world.point.y + (point2_world.point.y - point1_world.point.y) * scale2,
                                                 point1_world.point.z + (point2_world.point.z - point1_world.point.z) * scale2)

#	if( not math.isnan(point2_scaled.x) ):
#	        marker.points.append(point1_world.point)
#	        marker.points.append(point2_scaled)
#               
#        marker.points.append(point1_world.point)
#        marker.points.append(point2_world.point)
#        marker.points.append(point1_world.point)
#        marker.points.append(point3_world.point)
        return self.GenPlaneFromWorldPoints(point1_world.point, point2_world.point, point3_world.point)




# this is really designed to work as a Python library, not as a node.
# However, if you have code which wants to access this code but doesn't want
# to import the Python (such as a C++) node, you can run this by itself and
# it will set up a pair of services which you can call.
#
# TODO: Add ROS argument parsing so that we can run multiple copies of this
# node, and have them listen on different interfaces.

if __name__ == '__main__':

    # there are 3 interfaces:
    #    setCamera()     - /camera2boundingBox__camera - sets the camera parameters
    #    pointsService() - /camera2boudningBox__points - ROS service; pass it the bounding box, and it produces the n+1 world points which represent it.
    #    planesService() - /camera2boundingBox__planes - ROS service; pass it the bounding box, and it produces the n   planes which represent it.

    def setCamera(cam_data):
        global CAM_OBJECT
        CAM_OBJECT.setCamera(cam_data)

    def pointsService(input_shape):
        global CAM_OBJECT
        return CAM_OBJECT.GenPoints(input_shape)

    def planesService(input_shape):
        global CAM_OBJECT
        return CAM_OBJECT.GenPlanes(input_shape)


    # this sets up the ROS node
    rospy.init_node('listener', anonymous=True)
    global CAM_OBJECT
    CAM_OBJECT = Camera2BoundingBox()

    rospy.Subscriber("/camera2boundingBox__setCamera", Camera2BoundingBox__setCamera,     setCamera)

    rospy.Service   ("/camera2boundingBox__points",    Camera2BoundingBox__Points, pointsService)
    rospy.Service   ("/camera2boundingBox__planes",    Camera2BoundingBox__Planes, planesService)

    rospy.spin()

