#!/usr/bin/env python
# license removed for brevity
import rospy
from std_msgs.msg import String
from std_msgs.msg import Float32MultiArray
from roborts_msgs.msg import Target3D

from ctypes import *
import random
import os
import cv2
import time
import darknet
import argparse
import sys
import signal
import math
import numpy as np
import pyrealsense2 as rs
from threading import Thread, enumerate
from queue import Queue



def quit(signum, frame):
    print ('stop fusion')
    sys.exit(0)

def parser():
    parser = argparse.ArgumentParser(description="YOLO Object Detection")
    parser.add_argument("--input", type=str, default=0,
                        help="video source. If empty, uses webcam 0 stream")
    parser.add_argument("--out_filename", type=str, default="None",
                        help="inference video name. Not saved if empty")
    parser.add_argument("--weights", default="./armor20210123/yolov4-tiny_best20210123.weights",
                        help="yolo weights path")
    parser.add_argument("--dont_show", action='store_true',
                        help="windown inference display. For headless systems")
    parser.add_argument("--ext_output", action='store_true',
                        help="display bbox coordinates of detected objects")
    parser.add_argument("--config_file", default="./armor20210123/yolov4-tiny.cfg",
                        help="path to config file")
    parser.add_argument("--data_file", default="./armor20210123/voc.data",
                        help="path to data file")
    parser.add_argument("--thresh", type=float, default=.80,
                        help="remove detections with confidence below this value")
    return parser.parse_args()


def str2int(video_path):
    """
    argparse returns and string althout webcam uses int (0, 1 ...)
    Cast to int if needed
    """
    try:
        return int(video_path)
    except ValueError:
        return video_path


def check_arguments_errors(args):
    assert 0 < args.thresh < 1, "Threshold should be a float between zero and one (non-inclusive)"
    if not os.path.exists(args.config_file):
        raise(ValueError("Invalid config path {}".format(os.path.abspath(args.config_file))))
    if not os.path.exists(args.weights):
        raise(ValueError("Invalid weight path {}".format(os.path.abspath(args.weights))))
    if not os.path.exists(args.data_file):
        raise(ValueError("Invalid data file path {}".format(os.path.abspath(args.data_file))))
    if str2int(args.input) == str and not os.path.exists(args.input):
        raise(ValueError("Invalid video path {}".format(os.path.abspath(args.input))))

def box_depth(center_point, depth_image, kernel=5):
    #center_x = round(center_point[0]*1.1538)
    #center_y = round(center_point[1]*1.5384)
    #center_x = round(center_point[0]*1.5384)
    #center_y = round(center_point[1]*1.1538)
    center_x = round(center_point[0]*1.5384)
    center_y = round(center_point[1]*1.1538)
    # print('center_point',center_point)
    # print('center_x, center_y',center_x,center_y)
    temp1 = round((kernel-1)/2)
    depth_need = depth_image[center_x-temp1:center_x+temp1+1,center_y-temp1:center_y+temp1+1]
    depth_need = depth_image[center_y-temp1:center_y+temp1+1,center_x-temp1:center_x+temp1+1]
    #print('depth_need',depth_need)
    depth_need = depth_need.reshape(-1,)
    if sum(depth_need!=0)!=0:
        depth = round(sum(depth_need)/sum(depth_need!=0))
    else:
        depth = 0
    
    print('sum(depth_need)',sum(depth_need))
    print('sum(depth_need!=0)',sum(depth_need!=0))

    return depth,center_x,center_y


def draw_boxes(detections, image, colors, depth_image):
    confidence_temp=0
    u=v=z=0
    for label, confidence, bbox in detections:
        # print('confidence',confidence)
        confidence=float(confidence)
        if confidence>confidence_temp:
            xmin, ymin, xmax, ymax = darknet.bbox2points(bbox)
            cv2.rectangle(image, (xmin, ymin), (xmax, ymax), colors[label], 1)
            cv2.putText(image, "{}".format(label),
                        (xmin, ymin - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
                        colors[label], 2)
            # cv2.putText(image, "{} [{:.2f}]".format(label, float(confidence)),
            #             (xmin, ymin - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
            #             colors[label], 2)
            center_point = (int((xmin+xmax)/2),int((ymin+ymax)/2))
            
            # print('center point is {}'.format(center_point))
            z,u,v=box_depth(center_point, depth_image)
            cv2.circle(image, center_point, 6, colors[label], 0)
            cv2.putText(image, "depth:{}mm".format(z),
                        (center_point[0], center_point[1]- 5), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
                        colors[label], 2)
            
            confidence_temp=confidence
    return image,u,v,z

def talker(fps):
    pub = rospy.Publisher('chatter', String, queue_size=10)
    rospy.init_node('talker', anonymous=True)
    rate = rospy.Rate(10) # 10hz
    while not rospy.is_shutdown():
        hello_str = "hello world %f" % fps
        # rospy.loginfo(hello_str)
        pub.publish(hello_str)
        rate.sleep()

# piexl location 2 camera coordinate system location
def uvz2xyz(u, v, z):
    u=u-320
    v=v-240
    x=z*math.tan(0.4993*u/320)    # shi ye fanwei z=5.5,chang=6,gao=4.5
    y=z*math.tan(0.3883*v/240)
    z=z
    return x,y,z

if __name__ == '__main__':

    args = parser()
    check_arguments_errors(args)
    network, class_names, class_colors = darknet.load_network(
            args.config_file,
            args.data_file,
            args.weights,
            batch_size=1
        )
    # Darknet doesn't accept numpy images.
    # Create one with image we reuse for each detect
    width = darknet.network_width(network)
    height = darknet.network_height(network)
    darknet_image = darknet.make_image(width, height, 3)
    input_path = str2int(args.input)

    pub = rospy.Publisher('target_position', Target3D, queue_size=30)
    rospy.init_node('talker', anonymous=True)
    rate = rospy.Rate(30) # 10hz

    # Configure depth and color streams
    pipeline = rs.pipeline()
    config = rs.config()
    config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
    config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)

    # Start streaming
    profile = pipeline.start(config)
    

    # Create an align object
    # rs.align allows us to perform alignment of depth frames to others frames
    # The "align_to" is the stream type to which we plan to align depth frames.
    align_to = rs.stream.color
    align = rs.align(align_to)


    # tracker = cv2.TrackerKCF_create()
    # detection2track = None
    i = 1
    signal.signal(signal.SIGINT, quit)                                
    signal.signal(signal.SIGTERM, quit)
    f =0
    while True:
        f=f+1
        if f>60:
            # Get frameset of color and depth
            frames = pipeline.wait_for_frames()
            # frames.get_depth_frame() is a 640x360 depth image  
            #depth_frame = frames.get_depth_frame()
            #color_frame = frames.get_color_frame()

            # Align the depth frame to color frame
            aligned_frames = align.process(frames)

            # Get aligned frames
            aligned_depth_frame = aligned_frames.get_depth_frame() # aligned_depth_frame is a 640x480 depth image
            aligned_color_frame = aligned_frames.get_color_frame()

            # Validate that both frames are valid
            if not aligned_depth_frame or not aligned_color_frame:
                continue

            aligned_depth_image = np.asanyarray(aligned_depth_frame.get_data())
            aligned_color_image = np.asanyarray(aligned_color_frame.get_data())

            #aligned_depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(aligned_depth_image, alpha=0.05), cv2.COLORMAP_JET)
            #images_aligned = np.hstack((aligned_color_image, aligned_depth_colormap))
            #cv2.imshow('aligned_images', images_aligned)

            prev_time = time.time()

            frame = aligned_color_image
            frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
            frame_resized = cv2.resize(frame_rgb, (width, height),
                                    interpolation=cv2.INTER_LINEAR)
            darknet.copy_image_from_bytes(darknet_image, frame_resized.tobytes())

            detections = darknet.detect_image(network, class_names, darknet_image, thresh=args.thresh)

            fps = int(1/(time.time() - prev_time))
            # print(class_colors)
            class_colors = {'armor_red1': (141, 117, 183), 'armor_red2': (255, 255, 255)}

            # if frame_resized is not None:
            if detections is not None :
                image, u, v, z = draw_boxes(detections, frame_resized, class_colors, aligned_depth_image)

                x,y,z=uvz2xyz(u,v,z)
                target_msg = Target3D()
                target_msg.px = x
                target_msg.py = y
                target_msg.pz = z
                pub.publish(target_msg)
                rate.sleep()
                rospy.loginfo('targey3d is (%2f, %2f, %2f)', x,y,z)

                image = cv2.resize(image, (640, 480),
                                    interpolation=cv2.INTER_LINEAR)
                image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)

                if not args.dont_show:
                    cv2.imshow('Inference', image)
                    cv2.waitKey(1)
                # if cv2.waitKey(fps) == 27:
                #     break


            print("FPS: {}".format(fps))
            darknet.print_detections(detections, args.ext_output)
            # darknet.free_image(darknet_image)

    pipeline.stop()


