
import os
import sys
import time

import cv2 as cv
import numpy as np
import pyrealsense2 as rs

from ..paddleocr.tools.infer.predict_system import main
from ..realsense.utils import \
    convert_depth_pixel_to_metric_coordinate as get_coordinate

# sys.path.remove('/opt/ros/kinetic/lib/python2.7/dist-packages')
# from util.utils_cv import *
# from util.keys_prov import *
# from util.utils_recg import *

pipeline = rs.pipeline()
config = rs.config()
pipeline_wrapper = rs.pipeline_wrapper(pipeline)
pipeline_profile = config.resolve(pipeline_wrapper)
device = pipeline_profile.get_device()
device_product_line = str(device.get_info(rs.camera_info.product_line))

found_rgb = False
for s in device.sensors:
    if s.get_info(rs.camera_info.name) == 'RGB Camera':
        found_rgb = True
        break
if not found_rgb:
    print("The demo requires Depth camera with Color sensor")
    exit(0)

config.enable_stream(rs.stream.depth, 640, 480,
                     rs.format.z16, 30)  # 不设置像素会导致无法识别近距离内容

if device_product_line == 'L500':
    config.enable_stream(rs.stream.color, 960, 540, rs.format.bgr8, 30)
else:
    config.enable_stream(rs.stream.color, 1280, 720, rs.format.bgr8, 30)


def get_boxes_coordinate():
    # TODO:盒子紧靠时的识别
    # TODO:多次检测以获取平均值
    # Start streaming
    profile = pipeline.start(config)
    depth_sensor = profile.get_device().first_depth_sensor()
    depth_scale = depth_sensor.get_depth_scale()

    depth_profile = rs.video_stream_profile(
        profile.get_stream(rs.stream.depth))
    depth_intrinsics = depth_profile.get_intrinsics()
    print("Depth Scale is: ", depth_scale)
    # We will be removing the background of objects more than
    #  clipping_distance_in_meters meters away
    clipping_distance_in_meters = 0.305  # 1 meter
    clipping_distance = clipping_distance_in_meters / depth_scale

    # Create an align object
    # rs.align allows us to perform alignment of depth frames to others frames
    # The "align_to" is the stream type to which we plan to align depth frames.
    align_to = rs.stream.color
    align = rs.align(align_to)
    try:
        
        while True:
            # Get frameset of color and depth
            frames = pipeline.wait_for_frames()
            # frames.get_depth_frame() is a 640x360 depth image

            # Align the depth frame to color frame
            aligned_frames = align.process(frames)

            # Get aligned frames
            # aligned_depth_frame is a 640x480 depth image
            aligned_depth_frame = aligned_frames.get_depth_frame()
            color_frame = aligned_frames.get_color_frame()
            depth_intrinsics = color_frame.profile.as_video_stream_profile().intrinsics  # 需要使用彩色相机的内参
            # Validate that both frames are valid
            if not aligned_depth_frame or not color_frame:
                continue

            depth_image = np.asanyarray(aligned_depth_frame.get_data())

            # Remove background - Set pixels further than clipping_distance to black
            bg_removed = np.where(
                (depth_image > clipping_distance) | (depth_image <= 0), 0, 255)
            bg_removed = np.asfarray(bg_removed).astype(np.uint8)

            # 获取每个盒子的坐标
            pc = rs.pointcloud()
            boxes = []
            contours, _ = cv.findContours(
                bg_removed, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)
            for c in contours:
                rect = cv.minAreaRect(c)
                if rect[1][0] > 160 and rect[1][1] > 160:
                    boxes.append(rect)
                # box = cv.boxPoints(rect)
                # box = np.int0(box)

            x, y = 1280, 720
            # 绘制
            bg_removed_result = np.dstack([bg_removed, bg_removed, bg_removed])
            for rect in boxes:
                box = cv.boxPoints(rect)
                box = np.int0(box)
                cv.drawContours(bg_removed_result, [box], 0, [0, 0, 255], 2)
                cv.line(bg_removed_result, (0, int(
                    rect[0][1])), (x, int(rect[0][1])), 255, 2)
                cv.line(bg_removed_result,
                        (int(rect[0][0]), 0), (int(rect[0][0]), y), 255, 2)

            # 测试坐标
            cor = rs.rs2_deproject_pixel_to_point(
                depth_intrinsics, rect[0], (depth_image[int(rect[0][1]), int(rect[0][0])]))
            print(depth_image[int(rect[0][1]), int(rect[0][0])])

            # cor_frame = rs.rs2_deproject_pixel_to_point(depth_intrinsics,[x//2,y//2],depth_image[y//2,x//2])
            # print("middle x %.3f " % cor_frame[0]+" y %.3f " % cor_frame[1]+" z %.3f " % cor_frame[2])
            print("1x %.3f " % (cor[0])+" y %.3f " %
                  (cor[1])+" z %.3f " % cor[2])

            cv.namedWindow('Align Example', cv.WINDOW_NORMAL)
            cv.imshow('Align Example', bg_removed_result)
            key = cv.waitKey(1)

            # Press esc or 'q' to close the image window
            if key & 0xFF == ord('q') or key == 27:
                cv.destroyAllWindows()
                break
    finally:
        pipeline.stop()
    return boxes
    # cap = cv.VideoCapture(0)
    # for i in range(0,30):
    #     _, origin_img = cap.read()
    # test_imshow(mask_red,cv_show = True)
    # test_imshow(mask_red, cv_show=True)
    # return mask_red, origin_img





if __name__ == '__main__':
    # origin_img = cv.imread("./img/019.jpg")
    # get_address_new(origin_img,True)
    get_boxes_coordinate()
