import pyrealsense2 as rs
import numpy as np
import cv2
import time
from color_dist import *
import random

def show_colorizer_depth_img():
    '''
    show colorized depth img
    '''
    global depth_frame, color_image
    colorizer = rs.colorizer()
    hole_filling = rs.hole_filling_filter()
    filled_depth = hole_filling.process(depth_frame)
    colorized_depth = np.asanyarray(colorizer.colorize(filled_depth).get_data())
    cv2.imshow('filled depth',colorized_depth)

def object_color_detect(color):
    '''
    detect the color object
    '''
    global depth_frame, color_image
    hsvFrame = cv2.cvtColor(color_image, cv2.COLOR_BGR2HSV)
    # HSV
    color_lower = np.array(color_dist[color]["Lower"], np.uint8) 
    color_upper = np.array(color_dist[color]["Upper"], np.uint8) 
    color_mask = cv2.inRange(hsvFrame, color_lower, color_upper) 
	
    color_mask = cv2.medianBlur(color_mask, 9)  # 中值滤波

    # kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5))  # 矩形结构
    # kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))  # 椭圆结构
    kernel = cv2.getStructuringElement(cv2.MORPH_CROSS, (5, 5))  # 十字形结构

    color_mask = cv2.dilate(color_mask, kernel)  # 膨胀
    kernel = np.ones((10, 10), np.uint8)
    color_mask = cv2.erode(color_mask, kernel)  # 腐蚀
	
    res = cv2.bitwise_and(color_image, color_image, mask = color_mask) 
    cv2.imshow("Color Detection res in Real-Time", res)

	# Creating contour to track red color 
    contours, hierarchy = cv2.findContours(color_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
    
    try:

        # 最小外接正矩形
        c = max(contours, key=cv2.contourArea)
        left_x, left_y, width, height = cv2.boundingRect(c)

        bound_rect = np.array([[[left_x, left_y]], [[left_x + width, left_y]],
                               [[left_x + width, left_y + height]], [[left_x, left_y+height]]])
        box_list = bound_rect.tolist()
        cv2.drawContours(color_image, [bound_rect], -1, (255, 255, 255), 2)

    except ValueError:
        box_list = []
    
    return box_list

def get_center_depth(center_coordinate):
    global depth_frame, color_image
    '''
    get object center depth
    '''
    dis_center = round(depth_frame.get_distance(center_coordinate[0], center_coordinate[1])*100, 2)
    print("The camera is facing an object ", dis_center, " cm away.")
    return dis_center

def object_distance_measure(bbox_list):
    global depth_frame, color_image
    if bbox_list != []:
        # print(type(bbox_list))
        print(bbox_list)
        left_x=bbox_list[0][0][0]
        left_y=bbox_list[0][0][1]
        left = bbox_list[0][0][0]
        right = bbox_list[1][0][0]
        top = bbox_list[1][0][1]
        bottom = bbox_list[3][0][1]
        width = right - left
        height = bottom - top
        # 测距的区域
        roi_lx = int(left + width/4)
        roi_rx = int(right - width/4)
        roi_ty = int(top + height/4)
        roi_by = int(bottom - height/4)
        print(roi_lx, roi_rx, roi_ty, roi_by)
        color_image= cv2.rectangle(color_image, (roi_lx, roi_ty), (roi_rx, roi_by), (255, 255, 0),3) 

        center_x = int(left_x + width/2)
        center_y = int(left_y + height/2)
        cv2.circle(color_image, (center_x, center_y), 5, (0,0,255), 0)

        depth_points = []
        depth_means = []
        
        # 获取目标框内的物体距离，并进行均值滤波
        for j in range(50):
            rand_x = random.randint(roi_lx, roi_rx)
            rand_y = random.randint(roi_ty, roi_by)
            depth_point = round(depth_frame.get_distance(rand_x, rand_y)*100, 2)
            if depth_point != 0:
                depth_points.append(depth_point)
        depth_object = np.mean(depth_points)
        if depth_object >= 30:
            print("The camera is facing an object mean ", int(depth_object), " cm away.")
        else:
            print("The camera is facing an object mean <30 cm away.")


if __name__ == "__main__":
    global depth_frame, color_image
    
    # Configure depth and color streams
    pipeline = rs.pipeline()
    config = rs.config()
    config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
    config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)
    # Start streaming
    pipeline.start(config)
 
    try:
        while True:
            start = time.time()
            # Wait for a coherent pair of frames: depth and color
            frames = pipeline.wait_for_frames()
             
            depth_frame = frames.get_depth_frame()
            color_frame = frames.get_color_frame()
            if not depth_frame or not color_frame:
                continue

            # Convert images to numpy arrays
            color_image = np.asanyarray(color_frame.get_data())

            # task program
            bbox_list = object_color_detect(color="red")
            object_distance_measure(bbox_list)

            # show image
            cv2.imshow("color_image", color_image)

            print("FPS:", 1/(time.time()-start), "/s")
            # Press esc or 'q' to close the image window
            key = cv2.waitKey(1)
            if key & 0xFF == ord('q') or key == 27:
                cv2.destroyAllWindows()
                break
    finally:
        # Stop streaming
        pipeline.stop()
