#!/usr/bin/env python

##
##
# required for ROS Python
import rospy
import message_filters
from roslib import message
# Standard messages
from std_msgs.msg import Header
from std_msgs.msg import String
from std_msgs.msg import Float64
from std_msgs.msg import Bool
from geometry_msgs.msg import Vector3
from group6.msg import DetectedObject
from group6.msg import DetectedObjectArray
from group6.srv import * #ConvertCoordinates
from group6.msg import motion_ctrl

# Image messages (for debugging and configuration)
from sensor_msgs.msg import Image
import sensor_msgs.point_cloud2 as pc2
from sensor_msgs.msg import PointCloud2
from sensor_msgs.msg import PointField
from group6.msg import ImageCloud
# for converting from ROS-Image to OpenCV-Image and vice versa
from cv_bridge import CvBridge, CvBridgeError

# Import Python's math libraty, advanced number thingie and openCV
import math
import numpy
import cv2

# Publisher and stuff
pub1 = rospy.Publisher('detected_object_image', Image, queue_size=5)
pub2 = rospy.Publisher('detected_object_array', DetectedObjectArray, queue_size=5)
pub3 = rospy.Publisher('goToPoint', motion_ctrl, queue_size=5)
pub4 = rospy.Publisher('saveRobotPosition', Bool, queue_size=1)

# Modes
detection_mode = 0  # 0 = nothing, 1 = 
detection = False
transmission = False

# CONFIGURATION
minZ = -0.05#0.01#-0.05
maxZ = 0.18#0.22#16
minArea = 920#1000#550
maxArea = 20000#13000#10000#13000
# -----------------------------------------------------------------
# ------------- PARAMETERS ----------------------------------------
distanceThreshold = 0.25+0.03#0.3#0.24#0.12 # when movement command is 0, 0.11 is untested it used to be 0.08
angleThreshold = 0.06 ##0.15 #0.035 (radians) when turning command is 0
angleCorrection = 0.06#0.07#0.07 #0 (radians) rotation offset.
findEdges = 230 # Edge Detection parameter
# -----------------------------------------------------------------


# Converter for cv->ros and ros->cv
bridge = CvBridge()
# Message
inst = motion_ctrl()
#inst = motion_ctrl()


# Crop parameters, deprecated
cropW = 480#640/2 # crop width, center is horizontal middle
cropH = 480 #2 crop height, origin is bottom of image

# Moving average, moving_avg = ( (moving_avg * measurements) + new_measurement ) / ( measurements + 1) 
movAvg = 0
avgAngle = 0
avgDistance = 0
toyW = 0
toyH = 0

# to keep track
noDetections = 0

# THE FOLLOWING VALUES ARE ROUGH APPROXIMATIONS
# Grabber Pos, distance to robot rotation point and pixel to meters
grabberY = 480 #- (480 - 480) #temp
dist2RobotOrigin = 1.3#1 #meters
p2m = 0.0015625 # 1m/640pixels

# The object file global var
obj = []
arr = DetectedObjectArray()

# container for depth and image data
depthData = 0
imageData = 0

# Rect class for holding wall information

class Rect:
	def __init__(self, x, y, w, h):
		self.x1 = x - 0.05
		self.x2 = x + w + 2*0.05
		self.y1 = y - 0.05
		self.y2 = y + h + 2*0.05        

room = Rect(-0.7, 1.82, 4.22, 3.2)

rects = []

# the blocking box
#rects.append( Rect(0.72,0.91,0.31,0.42) )
rects.append( Rect(0.88, 0.68, 0.49, 0.37))
# blue toy box
#rects.append( Rect(1.21,0.22,0.27,0.38) )
# white toy box - ok
#rects.append( Rect(0.79,3.21,0.38,0.27) )
# corner wall
#rects.append( Rect(1.4,3.05,0.08,0.43) )
# white round thing
rects.append( Rect(3.08,0.43,0.46,0.46) )
# large blocking wall
rects.append( Rect(1.29,-1.46,0.05,1.46) )
# small blocking wall
rects.append( Rect(1.3,-0.9,0.04,0.36) )

# left wall
rects.append( Rect(-0.72, 1.82, 1.72, 1.0) )
rects.append( Rect( 1.00, 1.86, 1.0, 1.0) )
rects.append( Rect( 2.00, 1.93, 1.0, 1.0) )
rects.append( Rect( 3.00, 2.00, 1.0, 1.0) )

# far
rects.append( Rect( 3.48, 1, 0.5, 1.5) )
rects.append( Rect( 3.48, -0.4, 0.5, 1.4) )
rects.append( Rect( 3.53, -1.5, 0.5, 1.1) ) 

# right
rects.append( Rect( 3.00, -2.00, 1.0, 0.9 ) )
rects.append( Rect( 2.00, -2.00, 1.0, 0.75) )
rects.append( Rect( 1.00, -2.00, 1.0, 0.7) )  
rects.append( Rect( 0, -2.00, 1.0, 0.65) )
rects.append( Rect( -1.00, -2.00, 1.0, 0.62) )

# close
rects.append( Rect( -1.00, -2.00, 0.26, 1.00) )
rects.append( Rect( -1.00, -1.00, 0.38, 1.00) )
rects.append( Rect( -1.00, 0, 0.43, 2.5) )

# baskets
rects.append( Rect( 0, -1.48, 0.40, 0.28) )
rects.append( Rect( 3.27, -0.8, 0.28, 0.40) )


# Create black image mask from the given hsv image
def createBlackMask(image):
	imageC = image.copy()
	COLOR_MIN = numpy.array([0, 0, 0], numpy.uint8) # 50 50
	COLOR_MAX = numpy.array([179, 255, 50], numpy.uint8)   # 255
	#Filter color countours
	frame_threshed = cv2.inRange(imageC, COLOR_MIN, COLOR_MAX)
	# Dilation kernel, 3x3
	kernel = numpy.ones((3,3),'uint8')
	# Dilate ( = make the edge detection lines larger to close 
	frame = cv2.dilate(frame_threshed, kernel, iterations = 6)
	return frame

# Create yellow image mask from the given hsv image
def createYellowMask(image):
	imageC = image.copy()
	COLOR_MIN = numpy.array([15, 150, 51], numpy.uint8) # 50 50
	COLOR_MAX = numpy.array([40, 255, 253], numpy.uint8)   # 255
	#Filter color countours
	frame_threshed = cv2.inRange(imageC, COLOR_MIN, COLOR_MAX)
	# Dilation kernel, 3x3
	kernel = numpy.ones((3,3),'uint8')
	# Dilate ( = make the edge detection lines larger to close 
	frame = cv2.dilate(frame_threshed, kernel, iterations = 6)
	return frame	
	
# COLOR BASED DETECTION from the hsv image
def filterColor(image, lower, upper):
	imageC = image.copy()
	COLOR_MIN = numpy.array([lower, 51, 51], numpy.uint8) #41 41 50 50
	COLOR_MAX = numpy.array([upper,255,255], numpy.uint8)   # 255
	#Filter color countours
	frame_threshed = cv2.inRange(imageC, COLOR_MIN, COLOR_MAX)
	# Dilation kernel, 3x3
	kernel = numpy.ones((3,3),'uint8')
	# Dilate ( = make the edge detection lines larger to 
	frame = cv2.dilate(frame_threshed, kernel, iterations = 6)
	return frame

# Reset detection and other parameters, used when switching between modes
def resetDetection():
	global movAvg, avgAngle, avgDistance, transmission, detection, detection_mode, noDetections			
	avgAngle = 0
	avgDistance = 0
	movAvg = 0
	noDetections = 0
	if (transmission == True):
		detection_mode = -1
	elif (detection == True):
		detection_mode = 1
	else:
		detection_mode = 0
# service 1, object detection listener
def serviceResponse1(arg):
	global detection, transmission
	if (arg.data):
		rospy.loginfo("Detection started!")
		detection = True
		transmission = False
	else:
		rospy.loginfo("Detection stopped!")
		detection = False
	resetDetection()
		
# service 2, approach listener		
def serviceResponse2(arg):
	global transmission, detection
	if (arg.data):
		rospy.loginfo("Detection started!")
		transmission = True
		detection = False
	else:
		rospy.loginfo("Detection stopped!")
		transmission = False
	resetDetection()		

# is point rect, this checks if a coordinate is inside a rectangle
def ipr(rects, x, y):
	for rect in rects:
		if (rect.x1 < x):
			if (rect.y1 < y):
				if (rect.x2 > x):
					if (rect.y2 > y):
						return True
	return False

# this one idenfties the object big or small, not great success rate
def identObject(wo, ho, ddx, ddy):
	size = wo*ho
	dd = ddy + ho/2.0
	a = size * float(1.0 - (dd/600.0)*0.25)#size * ddz
	if (a > 4600):
		return "big"
	elif (a > 750):
		return "small"
	else:
		return "unknown"
	
# This is a simple callback about the subscription to the image topic
#def callback(colorData, depthData):
def detect():
  #rospy.loginfo("data received")
  global pub1, pub2, pub3, plane, detection, minZ, maxZ, minArea, maxArea, room, rects, arr, pub4, colorData, depthData
  global distanceThreshold, angleThreshold, angleCorrection, noDetections, movAvg, avgAngle, avgDistance, inst, obj, bridge, findEdges, cropH, cropW, grabberY, dist2RobotOrigin, p2m, transmission, toyW, toyW
  if (detection_mode != 0):
	  if (depthData and colorData): # if both data ok
		  rospy.loginfo("detecting...")
		  time = rospy.get_time()#rospy.get_rostime()colorData, depthData
		  i = colorData#.copy()
		  #depthData = depthData#.copy()
		  d = depthData
		  #
		  movAvg = movAvg + 1
		  obj = []
		  # Convert from sensor_msgs/Image to OpenCV format
		  color = bridge.imgmsg_to_cv2(i, "bgr8")
		  final = color.copy() 
		  # get the dimension
		  #h,w,d = res.shape
		  h = d.height
		  w = d.width
		  # Convert the image to gray
		  gray = cv2.cvtColor(color, cv2.COLOR_BGR2GRAY)
		  gret,thresh1 = cv2.threshold(gray,240,255,cv2.THRESH_BINARY) #230
		  # Dilation kernel, 3x3
		  kernel = numpy.ones((3,3),'uint8')
		  # Dilate ( = make the edge detection lines larger to close caps and form actual shapes
		  dilated = cv2.dilate(thresh1, kernel, iterations = 1)
		  color = cv2.inpaint(color, dilated, 3, cv2.INPAINT_NS)
		  # Convert the image to HSV
		  hsv = cv2.cvtColor(color, cv2.COLOR_BGR2HSV)
		  # Blur
		  hsv = cv2.medianBlur(hsv, 5)
		  hsv = cv2.medianBlur(hsv, 5)
		  #hsv = cv2.medianBlur(hsv, 5)

		  # Create Masks, upper and lower values in hsv color space
		  black_mask = createBlackMask(hsv)
		  blue_mask = filterColor(hsv, 85, 135)
		  yellow_mask = createYellowMask(hsv)#filterColor(hsv, 15, 40)
		  green_mask = filterColor(hsv, 45, 80) #48 - 70
		  red_mask1 = filterColor(hsv, 167, 179)
		  red_mask2 = filterColor(hsv, 0, 13)
		  red_mask = numpy.bitwise_or(red_mask1, red_mask2)
		  # Combine them
		  total_mask = numpy.bitwise_or( black_mask,numpy.bitwise_or(numpy.bitwise_or(numpy.bitwise_or(blue_mask, yellow_mask), green_mask), red_mask))
		  # find edges
		  edges = cv2.Canny(gray, findEdges*1, findEdges*2)
		  dilated_edges = cv2.dilate(edges, kernel, iterations = 6)
		  # fill gaps
		  dilated_edge_mask = dilated_edges.copy()
		  contour1,hier1 = cv2.findContours(dilated_edge_mask,cv2.RETR_CCOMP,cv2.CHAIN_APPROX_SIMPLE)
		  for cnt in contour1:
			cv2.drawContours(dilated_edge_mask,[cnt],0,255,-1)
		  # fill gaps
		  dilated_total_mask = total_mask.copy()
		  contour1,hier1 = cv2.findContours(total_mask,cv2.RETR_CCOMP,cv2.CHAIN_APPROX_SIMPLE)
		  for cnt in contour1:
			cv2.drawContours(dilated_total_mask,[cnt],0,255,-1)


		  # Combine total_mask and edge mask
		  total_mask = numpy.bitwise_and(dilated_total_mask, dilated_edge_mask)#numpy.bitwise_or(total_mask, dilated_edges)
		  #cv2.rectangle(total_mask, (0, 305), (640, 480), 0,                   # Dilate ( = make the edge detection lines
		  kernel = numpy.ones((3,3),'uint8')
		  dilated_mask = cv2.erode(total_mask, kernel, iterations = 0)                        
		  contours, hierarchy = cv2.findContours(dilated_mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
		  filled_mask = dilated_mask.copy()
		  # Draw blocking rectangles
		  ww = 320
		  cv2.rectangle(filled_mask,(320-(ww/2)+10, 417),(320-(ww/2)+ww+20, 480), 0,-1)
		  cv2.rectangle(filled_mask,(0, 310),(320-(ww/2)+107, 480), 0,-1)
		  cv2.rectangle(filled_mask,(320-(ww/2)+ww-73, 310),(640, 480), 0,-1)
		  cv2.rectangle(final,(320-(ww/2)+10, 420),(320-(ww/2)+ww+20, 480), (0,0,0),-1)
		  cv2.rectangle(final,(0, 310),(320-(ww/2)+107, 480), (0,0,0),-1)
		  cv2.rectangle(final,(320-(ww/2)+ww-73, 310),(640, 480), (0,0,0),-1)		  
		  
		  # Find the Contours again
		  #contours, hierarchy = cv2.findContours(filled_mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
		  contours, hierarchy = cv2.findContours(filled_mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)

		  # Find the larger contours = HOPEFULLY THE OBJECTs           #
		  olist = []
		  for cnt in contours:
			area1 = cv2.contourArea(cnt)
		  	#print str(area)
		  	rx,ry,wi,he = cv2.boundingRect(cnt)
		  	area2 = wi*he
		  	ratiow = wi/he
		  	ratioh = he/wi
		  	cv2.rectangle(final, (rx,ry), (rx+wi, ry+he), (255, 255, 0), 2)
		  	# check for criteria 
		  	if (area2 > minArea and area2 < maxArea and ratiow <= 2.0 and ratioh <= 2.0 and (area1/area2) > 0.30 and wi > 20 and he > 20 and wi < 210 and he < 210 ): #0.35
				olist.append(cnt)
				#if (ry+he < 100 and area2 > 5500):
				#	rospy.loginfo("Object too large at that distance!")
				#else:
				#	olist.append(cnt)
		  # analyze and calculate those detections not yet discarded
		  objlist = []
		  flist = []
		  for i in olist:
			  m = numpy.zeros(color.shape, numpy.uint8)
			  cv2.drawContours(m,[i],0,255,-1)
			  pixelpoints = numpy.transpose(numpy.nonzero(m))
			  #mean_val = cv2.mean(hsv, mask = m)
			  x = 0
			  y = 0
			  z = 0
			  cc = 0
			  nans = 0				
			  for c in pixelpoints:
				  pc = read_depth(c[1],c[0], depthData)
				  if (pc != -1):					
					  if (math.isnan(pc[0]) or math.isnan(pc[1]) or math.isnan(pc[2])):
						  nans = nans + 1	 
						  #rospy.loginfo("NAN value encountered in pointcloud")
					  else:
						  x = x + pc[0]
						  y = y + pc[1]
						  z = z + pc[2]
						  cc = cc + 1
			  # if there were some calculations take avg and convert them
			  if (cc != 0):
				  x = x/cc
				  y = y/cc
				  z = z/cc# + 0.03
				  rospy.loginfo("x: "+str(x)+" y: "+str(y)+" z: "+str(z))
				  rospy.wait_for_service('convertCoordinates')
				  try:
					  coordinates = rospy.ServiceProxy('convertCoordinates',convertCoordinates)
					  respl = coordinates(Vector3(float(x), float(y), float(z)))#coordinates(Float64(x), Float64(y), Float64(z))
					  #rospy.loginfo(respl.point)
					  x = float(respl.point.x)
					  y = float(respl.point.y)
					  z = float(respl.point.z)
					  rx,ry,wi,he = cv2.boundingRect(i)
					  cv2.rectangle(final,(rx,ry),(rx+wi,ry+he),(0,0,255),2)
					  rospy.loginfo("area: "+str(cv2.contourArea(i))+" x: "+str(x)+" y: "+str(y)+" z: "+str(z))
					  # add object if not insize a box and too high or low
					  if (ipr(rects, x, y) == False and z > minZ and z < maxZ):
					  	  #if (z > minZ and z < maxZ): 
						  flist.append(i)
						  rx,ry,wi,he = cv2.boundingRect(i)
						  cv2.rectangle(final,(rx,ry),(rx+wi,ry+he),(0,255,0),2)
						  dobj = DetectedObject()
						  dobj.object_id = "small"#identObject(wi,he, rx, ry)#math.sqrt(x*x + y*y + z*z)) 
						  dobj.x = x
						  dobj.y = y
						  dobj.z = z
						  objlist.append(dobj)
					  else:
						  rospy.loginfo("Detection is a wall/obstacle")
						  #if (ipr(rects, x, y) == False):
							  #rospy.loginfo("OBJECT INSIDE A OBSTACLE")
				  except rospy.ServiceException, e:
					  rospy.loginfo('Service call failed: %s'%e)
				  #print "x: "+str(x)+" y: "+str(y)+" z: "+str(z)
			  else:
				  rospy.loginfo('DEPTH MODE FAILED')
		  #rospy.loginfo(str(detection_mode))
		  # 1 means object detection		
		  if (detection_mode == 1):
			  if (movAvg >= 4): # send if over 4 consecutive
				  movAvg = 0
				  # create header
				  arr.header = rospy.Header()
				  #header.frame_id = frame
				  arr.header.stamp = rospy.get_rostime()
				  # publish
				  pub2.publish(arr)
				  # create a new DetectedObjectArray
				  arr = DetectedObjectArray()
			  else:
				  # create a list of toys that have a fixed pre-known location, see fixedToyLocations.py for modifications
				  arr.objects = arr.objects + objlist
				  # the size of the array
				  arr.size = len(arr.objects)
		  # -1 means approach
		  elif (detection_mode == -1):
			  if (movAvg >= 4):
				  movAvg = 0
				  avgAngle = 0
				  avgDistance = 0
				  noDetections = 0
				  if (inst.distance < 0):
					inst.distance = 0
				
				  pub3.publish(inst)
			  else:
				  toyH = 0
				  toyW = 0
				  if (len(flist) > 0):
					  bcnt = []#0
					  smd = 1000
					  sme = 0
					  # find suitable detections
					  for i in flist:
						  M = cv2.moments(i)
						  objX = (M['m10']/M['m00'])
						  objY = (M['m01']/M['m00'])
						  deltaObjX = ( objX - 320 )
						  deltaObjY = ( objY - grabberY )
						  if ( math.fabs(deltaObjX) < 400  and (objY > 75 )  ):
							  if ( math.fabs(deltaObjX) < smd ):
								  if (objY > sme):
								  	  bcnt = i
								  	  smd = math.fabs(deltaObjX)
								  	  sme = objY#rospy.loginfo("plop")
					  if ( len(bcnt) == 0):
						  noDetections = noDetections + 1
						  toyH = 0
						  toyW = 0
					  else:
						  inst.status = 1
						  noDetections = 0
						  #M = cv2.moments(bcnt)
						  xa, ya, wa, ha = cv2.boundingRect(bcnt)
						  objX = xa+(wa/2)#(M['m10']/M['m00'])
						  objY = ya#(M['m01']/M['m00'])
						  deltaObjX = ( objX - 320 )
						  deltaObjY =    ( objY - grabberY )    
						  deltaAngle = (math.atan2(dist2RobotOrigin + (deltaObjY * p2m), deltaObjX * p2m) - 0.5*math.pi) + angleCorrection
						  # Calculate the moving average
						  avgAngle = (avgAngle * movAvg + deltaAngle) / (movAvg + 1)
						  avgDistance = (avgDistance * movAvg + deltaObjY) / (movAvg + 1)

						  # Compile the message
						  inst.angle = avgAngle
						  inst.distance = (avgDistance * -p2m) #- 0.18 #16
						  # threshold
						  if (math.fabs(deltaObjX)  < 17):#(math.fabs(inst.angle) <= angleThreshold):
							  inst.angle = 0

						  if (math.fabs(inst.angle) < angleThreshold):
						  	  inst.angle = 0
						  # threshold	
						  if (math.fabs(inst.distance) < distanceThreshold):
							  inst.distance = 0
							
						  #xa,ya,wa,ha = cv2.boundingRect(bcnt)
						  cv2.rectangle(final,(xa,ya),(xa+ha,ya+ha),(255,0,0),2)
						  toyW = wa
						  toyH = ha
				  else:
					  noDetections = noDetections + 1
					  toyH = 0
					  toyW = 0

				  if (noDetections > 3):
					  noDetections = 0
					  if (movAvg < 4):
						  inst.status = 0
					
		  else:
			  rospy.loginfo("Detection offline")
		  #mask = numpy.zeros((h, w), numpy.uint8)
		  image_message = bridge.cv2_to_imgmsg(final, "bgr8")
		  # Publish the image
		  pub1.publish(image_message)
		  rospy.loginfo("detection took "+str((rospy.get_time()-time))+" seconds")

		
# getColor data
def getColorImage(data):
	global colorData
	colorData = data

# read the depth data
def read_depth(width, height, data):
	#read function
	if (height >= data.height) or (width >= data.width):
		return -1
	data_out = pc2.read_points(data, field_names=None, skip_nans=False, uvs=[[width, height]])
	int_data = next(data_out)
	#rospy.loginfo("int_data"+str(int_data))
	return int_data
		
# get the depth data
def depth_callback(data):
	global depthData
	depthData = data
	#rospy.loginfo("PointCloud received from /filtered_pointcloud")

# get the object ratio, not used but still implemented
def object_ratio(req):
	if (object_detection == -1):
		return objectDimResponse( Vector3(float(toyW), float(toyH), float(666.6)))	
	return objectDimResponse( Vector3(float(-1), float(-1), float(666.6)))

	# main code
def object_detection():
	# init
	rospy.init_node('object_detection', anonymous=True)
	rospy.loginfo("Starting object detection")
	#
	rospy.Service('object_dimensions', objectDim, object_ratio)
	# Subscribers
	rospy.Subscriber('camera/rgb/image_rect_color', Image, getColorImage)
	rospy.Subscriber('camera/depth_registered/points', PointCloud2, depth_callback)
	#rospy.Subscriber('image_cloud', ImageCloud, detect)
	#
	rospy.Subscriber('triggerDetect', Bool, serviceResponse1)
	rospy.Subscriber('toggleKinectPublish', Bool, serviceResponse2)  	
	#
	r = rospy.Rate(10) #10Hz, added    

	# Start the service

	
	#rospy.loginfo("OD: Service enabled")

	while not rospy.is_shutdown():                        
		#arr = "LOL"
		# LOLOLO		
		# log
		detect()
		# publish
		# pub.publish(arr)
		r.sleep()
	
	# spin() simply keeps python from exiting until this node is stopped
	#rospy.spin()
		
if __name__ == '__main__':
	try:
		object_detection()
	except rospy.ROSInterruptException: pass



