#!/usr/bin/env python

##
##
# required for ROS Python
import rospy
# Standard messages
from std_msgs.msg import Header
from std_msgs.msg import String
from std_msgs.msg import Bool
# Group6 messages and services
from group6.msg import motion_ctrl
#from group6.srv import kinectApproach
# Image messages (for debugging and configuration)
from sensor_msgs.msg import Image
# for converting from ROS-Image to OpenCV-Image and vice versa

from cv_bridge import CvBridge, CvBridgeError
# Import Python's math libraty, advanced number thingie and openCV
import math
import numpy
import cv2

# Publisher and stuff
pub = rospy.Publisher('manipulated_object_position', Image, queue_size=10)
pub2 = rospy.Publisher('goToPoint', motion_ctrl, queue_size=10)

# Pub2, this is the actual publisher of the relevant information cool huh?!
pub2 = rospy.Publisher('goToPoint', motion_ctrl, queue_size=10)
transmission = False

# -----------------------------------------------------------------
# ------------- PARAMETERS ----------------------------------------
distanceThreshold = 0.18#0.132 # when movement command is 0, 0.11 is untested it used to be 0.08
angleThreshold = 0.15 #0.035 (radians) when turning command is 0
angleCorrection = 0#0.07 #0 (radians) rotation offset.
findEdges = 600 # Edge Detection parameter
# -----------------------------------------------------------------

# Converter for cv->ros and ros->cv
bridge = CvBridge()
# Message
inst = motion_ctrl()
#inst = motion_ctrl()


# Crop parameters
cropW = 480#640/2 # crop width, center is horizontal middle
cropH = 480 #2 crop height, origin is bottom of image

# Moving average, moving_avg = ( (moving_avg * measurements) + new_measurement ) / ( measurements + 1) 
movAvg = 0
avgAngle = 0
avgDistance = 0

noDetections = 0

# THE FOLLOWING VALUES ARE ROUGH APPROXIMATIONS
# Grabber Pos, distance to robot rotation point and pixel to meters
grabberY = 480 - (480 - cropH) #temp
dist2RobotOrigin = 1.3#1 #meters
p2m = 0.0015625 # 1m/640pixels

# The object file global var
obj = []


# DETECTION
def filterColor(image, lower, upper):
	imageC = image.copy()
	COLOR_MIN = numpy.array([lower, 120, 30], numpy.uint8) # 50 50
	COLOR_MAX = numpy.array([upper,255,255], numpy.uint8)	# 255
	#Filter color countours
	frame_threshed = cv2.inRange(imageC, COLOR_MIN, COLOR_MAX)
	# Dilation kernel, 3x3
	#kernel = numpy.ones((3,3),'uint8')
	# Dilate ( = make the edge detection lines larger to close caps and form actual shapes
	#frame = cv2.dilate(frame_threshed, kernel, iterations = 1)
	return frame_threshed#frame


# This is a simple callback about the subscription to the image topic
def callback(data):
	global distanceThreshold
	global angleThreshold
	global angleCorrection
	global noDetections
	global movAvg
	global avgAngle
	global avgDistance
	global inst
	global obj
	global bridge
	global pub
	global pub2
	global findEdges
	global cropH
	global cropW
	global grabberY
	global dist2RobotOrigin
	global p2m
	global transmission
	if (transmission):
		# Calculate the average
		movAvg = movAvg + 1
		obj = []
		mes = "sensor_msgs/Image sent /manipulated_object_detection"
		# Convert from sensor_msgs/Image to OpenCV format
		image = bridge.imgmsg_to_cv2(data, "bgr8")
		# Crop the image according to crop parameters
		imgC = image[ (480-cropH):480, (320-cropW/2):(320+cropW/2)]
		
		# -------------------------------------------------------------------
		# --------------- COMMENT THIS AWAY ---------------------------------
		# Convert the image to gray
		gray = cv2.cvtColor(imgC, cv2.COLOR_BGR2GRAY)
		gret,thresh1 = cv2.threshold(gray,230,255,cv2.THRESH_BINARY)
		# Dilation kernel, 3x3
		kernel = numpy.ones((3,3),'uint8')
		# Dilate ( = make the edge detection lines larger to close caps and form actual shapes
		dilated = cv2.dilate(thresh1, kernel, iterations = 2)
		imgC = cv2.inpaint(imgC, dilated, 3, cv2.INPAINT_NS)
		# ---------------- ENDS HERE ----------------------------------------
		# -------------------------------------------------------------------
		
		
		# Convert the image to HSV
		hsv = cv2.cvtColor(imgC, cv2.COLOR_BGR2HSV)
		
		hsv = cv2.medianBlur(hsv, 5)
		hsv = cv2.medianBlur(hsv, 5)

		#blue_mask = numpy.zeros((cropH, cropW), numpy.uint8)

		# Create Masks, blue mask not actually blue LOL its empty
		blue_mask = filterColor(hsv, 110, 130)
		yellow_mask = filterColor(hsv, 20, 35)
		green_mask = filterColor(hsv, 50, 67) #48 - 70
		#big_mask = filterColor(hsv, 48, 70)
		red_mask1 = filterColor(hsv, 170, 180)
		red_mask2 = filterColor(hsv, 0, 8)
		red_mask = numpy.bitwise_or(red_mask1, red_mask2)
		# Combine them
		total_mask =  numpy.bitwise_or(numpy.bitwise_or(numpy.bitwise_or(blue_mask, yellow_mask), green_mask), red_mask)

		# -------------------------------------------------------
		# ----------- COMMENT THIS AWAY -------------------------
		# Canny Edge Detection with the parameter
		edges = cv2.Canny(hsv, findEdges*1, findEdges*1)
		# Combine total_mask and edge mask
		total_mask =  numpy.bitwise_or(total_mask, edges)
		# ---------- ENDS HERE ----------------------------------
		# -------------------------------------------------------
		
		# Find the Contours again
		contours, hierarchy = cv2.findContours(total_mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)

		# Find the largest contour = HOPEFULLY THE OBJECT lol
		maxArea = -1000
		cc = 0
		ccnt = 0

		# default status
		inst.status = 0
		for cnt in contours:
			area = cv2.contourArea(cnt)
			if area > maxArea:
				#if ( area > 3000 and area < 20000):
				obj = cnt
				maxArea = area
				ccnt = cc
			cc = cc + 1

		
		# NO OBJECT SITUATION, min was 1000
		if  (maxArea > 300 and maxArea < 11000):
			inst.status = 1
			noDetections = 0
		else:
			noDetections = noDetections + 1
			inst.status = 0

		# if found something
		if (inst.status == 1 and len(obj)):
			# Calculate center point and delta Y and delta X lol
			M = cv2.moments(obj)
			objX = (M['m10']/M['m00'])
			objY = (M['m01']/M['m00'])
			deltaObjX = ( objX - (cropW/2) )
			deltaObjY =    ( objY - grabberY )    
			deltaAngle = (math.atan2(dist2RobotOrigin + (deltaObjY * p2m), deltaObjX * p2m) - 0.5*math.pi) + angleCorrection
			
			# Calculate the moving average
			avgAngle = (avgAngle * movAvg + deltaAngle) / (movAvg + 1)
			avgDistance = (avgDistance * movAvg + deltaObjY) / (movAvg + 1)


			# Compile the message
			inst.angle = avgAngle
			inst.distance = (avgDistance * -p2m) #- 0.18 #16
			
			# threshold
			if (math.fabs(deltaObjX)  < 13):#(math.fabs(inst.angle) <= angleThreshold):
				inst.angle = 0
			
			# threshold	
			if (math.fabs(inst.distance) < distanceThreshold):
				inst.distance = 0

			# Reset once movAvg has reached 10 measurements, if transmission is True then publish
			if (movAvg >= 10):
				movAvg = 0
				# Publish
				pub2.publish(inst)		
				rospy.loginfo("OD: Published at goToPoint "+str(maxArea))
				#reset
				inst = motion_ctrl()
				avgAngle = 0
				avgDistance = 0
				
			#Draw two circles on top of the contour
			(cx,cy),radius = cv2.minEnclosingCircle(obj)
			center = (int(cx),int(cy))
			radius = int(radius)
			cv2.circle(imgC,center,radius,(255,0,0),2)
			cv2.circle(imgC,center, 6,(255,0,0), -1)

		else:
			if (noDetections >= 12):
				rospy.loginfo("OD: No detections "+str(maxArea))
				movAvg = 0
				avgAngle = 0
				avgDistance = 0
				inst.distance = 0
				inst.angle = 0
				noDetections = 0
				pub2.publish(inst)
			
			
			
		#TEMPP	
		h,w,d = imgC.shape
		# Draw Rectangle	
		cv2.rectangle(imgC, (cropW,cropH), (0,0), (0,255,0), 2) 

		mask = numpy.zeros((h, w), numpy.uint8)
		cv2.drawContours(mask, contours, ccnt, (255,255,255), -1) 
		res = cv2.bitwise_and(imgC, imgC, mask = mask)	

		# Convert img to Ros and Publish
		image_message = bridge.cv2_to_imgmsg(res, "bgr8")
		pub.publish(image_message)


# what to do when the service is initialized
def serviceResponse(arg):
	global transmission
	global movAvg
	global avgAngle
	global avgDistance
	avgAngle = 0
	avgDistance = 0
	movAvg = 0
	if (arg.data):
		rospy.loginfo("Detection started!")
		transmission = True
	else:
		rospy.loginfo("Detection stopped!")
		transmission = False


# main code
def object_manipulation():
	global inst
	
	# init
	rospy.init_node('object_manipulation', anonymous=True)
	rospy.loginfo("OD: Init")
	
	#Subscribe to /camera/rgb/image_color
	rospy.Subscriber("camera/rgb/image_color", Image, callback)
	rospy.loginfo("OD: Subscribed to kinect");
	
	#Subscribe to /toggleKinectPublish
	rospy.Subscriber('toggleKinectPublish', Bool, serviceResponse)   
	
	#rospy.init_node('object_manipulation', anonymous=True)
	rospy.loginfo("OD: Started")
	r = rospy.Rate(10) #10Hz, added    

	# Start the service
	#rospy.Service('toggleKinectPublish', kinectApproach, serviceResponse)
	
	rospy.loginfo("OD: Service enabled")

	while not rospy.is_shutdown():                        
		#arr = "LOL"
		# LOLOLO		
		# log
		# rospy.loginfo(arr)
		# publish
		# pub.publish(arr)
		r.sleep()
	
	# spin() simply keeps python from exiting until this node is stopped
	#rospy.spin()
		
if __name__ == '__main__':
	try:
		object_manipulation()
	except rospy.ROSInterruptException: pass



