import numpy as np                     # fundamental package for scientific computing
import pyrealsense2 as rs                 # Intel RealSense cross-platform open-source API
import detect_round
import model as md
import cv2
import random


def get_round_pose():
    while True:
        frameset = pipe.wait_for_frames()
        color_frame = frameset.get_color_frame()
        color = np.asanyarray(color_frame.get_data())#cv2.imread("D:/train_pictures/099_Color.png")#
        # Create alignment primitive with color as its target stream:
        align = rs.align(rs.stream.color)
        frameset = align.process(frameset)
        # Update color and depth frames:
        aligned_depth_frame = frameset.get_depth_frame()
        gaussian_blur = cv2.GaussianBlur(color, (9, 9), 0)  # 第二个参数是滤波器的大小，第三个参数是标准差
        sharpening_kernel = np.array([[-1, -1, -1],
                                      [-1, 10, -1],
                                      [-1, -1, -1]])
        img0 = cv2.filter2D(gaussian_blur, -1, sharpening_kernel)
        i = 0
        round = detect_round.Detect_shapes_2(img0, 0)  # ,draw_center
        print(round)
        [ellipse_center, ellipse_axes, ellipse_angle] = round[0]
        while True:
            # 随机取点，沿椭圆中心到椭圆边缘构造一个向量
            i += 1
            angle = np.random.uniform(0, 2 * np.pi)
            vec = (ellipse_axes[0] * np.cos(angle) / 2, ellipse_axes[1] * np.sin(angle) / 2)

            # 将向量延长到原来的1.03倍并取整
            extended_point = [int(ellipse_center[0] + vec[0] * 1.03), int(ellipse_center[1] + vec[1] * 1.03)]

            # 与该点关于中心对称的另一点并取整
            symmetric_point = [int(2 * ellipse_center[0] - extended_point[0]),
                               int(2 * ellipse_center[1] - extended_point[1])]

            # 检测两个点的深度
            depth_1 = aligned_depth_frame.get_distance(extended_point[0], extended_point[1])
            depth_2 = aligned_depth_frame.get_distance(symmetric_point[0], symmetric_point[1])

            # 如果两个点的深度均不为零，则跳出循环
            if depth_1 != 0 and depth_2 != 0:
                break
            if i > 100:
                print("it's too close to get distance information")
                break
        # 椭圆信息
        depth_point_1 = np.array(
            rs.rs2_deproject_pixel_to_point(color_intrinsics, [extended_point[0], extended_point[1]], depth_1))
        depth_point_2 = np.array(
            rs.rs2_deproject_pixel_to_point(color_intrinsics, [symmetric_point[0], symmetric_point[1]], depth_2))
        center_pose = (depth_point_1 + depth_point_2) / 2

        cv2.putText(img0, str(center_pose), [int(ellipse_center[0]),int(ellipse_center[1])], cv2.FONT_HERSHEY_PLAIN, 1, (0, 0, 255), 2)
        cv2.imshow("round",img0)
        key = cv2.waitKey(0)
        if key == ord('n') or key == ord('N'):
            continue
        if key == ord('y') or key == ord('Y'):
            cv2.destroyAllWindows()
            break
    return center_pose

def get_bolts_pose():
    while True:
        frameset = pipe.wait_for_frames()
        color_frame = frameset.get_color_frame()

        color = np.asanyarray(color_frame.get_data())#cv2.imread("D:/train_pictures/099_Color.png")#

        # Create alignment primitive with color as its target stream:
        align = rs.align(rs.stream.color)
        frameset = align.process(frameset)

        # Update color and depth frames:
        aligned_depth_frame = frameset.get_depth_frame()
        # 定义锐化核
        gaussian_blur = cv2.GaussianBlur(color, (5, 5), 0)  # 第二个参数是滤波器的大小，第三个参数是标准差
        sharpening_kernel = np.array([[-1, -1, -1],
                                      [-1, 9, -1],
                                      [-1, -1, -1]])

        # 应用锐化核
        imag0 = cv2.filter2D(gaussian_blur, -1, sharpening_kernel)

        ret_img, bolt_point = md.get_bolts_pixel(imag0)
        pose_bolt_list = []
        for point in bolt_point:
            depth_bolt = aligned_depth_frame.get_distance(point[0], point[1])
            if depth_bolt == 0:
                i = 0
                while True:
                    randomx = random.randint(-5, 5)
                    randomy = random.randint(-5, 5)
                    depth_bolt = aligned_depth_frame.get_distance(point[0]+randomx, point[1]+randomy)
                    if i >= 200 or depth_bolt != 0:
                        break
                    i = i+1
            pose_bolt = np.array(rs.rs2_deproject_pixel_to_point(color_intrinsics, [point[0], point[1]], depth_bolt))
            round_pose_bolt = [np.round(num, 3) for num in pose_bolt]
            pose_bolt_list.append(round_pose_bolt)
            cv2.putText(imag0, str(round_pose_bolt), point, cv2.FONT_HERSHEY_PLAIN, 1, (0, 0, 255), 2)
            cv2.imshow("bolt detect", imag0)
        # 输出结果
        key = cv2.waitKey(0)
        if key == ord('n') or key == ord('N'):
            continue
        elif key == ord('y') or key == ord('Y'):
            cv2.destroyAllWindows()
            break
    return pose_bolt_list



# Setup:
# pipe = rs.pipeline()
# cfg = rs.config()
# cfg.enable_device_from_file("D:\qishuoyan_shiyan/20240430_184015.bag")
# profile = pipe.start(cfg)
# depth_intrinsics = profile.get_stream(rs.stream.depth).as_video_stream_profile().get_intrinsics()
# color_intrinsics = profile.get_stream(rs.stream.color).as_video_stream_profile().get_intrinsics()


# 初始化 RealSense 摄像头
pipe = rs.pipeline()
config = rs.config()
config.enable_stream(rs.stream.depth, 1280, 720, rs.format.z16, 30)
config.enable_stream(rs.stream.color, 1280, 720, rs.format.bgr8, 30)
profile = pipe.start(config)
depth_intrinsics = profile.get_stream(rs.stream.depth).as_video_stream_profile().get_intrinsics()
color_intrinsics = profile.get_stream(rs.stream.color).as_video_stream_profile().get_intrinsics()


# 获取深度传感器
depth_sensor = pipe.get_active_profile().get_device().first_depth_sensor()
depth_scale = depth_sensor.get_depth_scale()
try:
    frameset = pipe.wait_for_frames()
    # Skip 5 first frames to give the Auto-Exposure time to adjust
    for x in range(10):
        pipe.wait_for_frames()
    while True:
        frameset = pipe.wait_for_frames()
        color_frame = frameset.get_color_frame()
        #depth_frame = frameset.get_depth_frame()

        color = np.asanyarray(color_frame.get_data())#cv2.imread("D:/train_pictures/099_Color.png")#
        color_copy = color.copy()

        # Create alignment primitive with color as its target stream:
        align = rs.align(rs.stream.color)
        frameset = align.process(frameset)

        # Update color and depth frames:
        aligned_depth_frame = frameset.get_depth_frame()
        colorizer = rs.colorizer()
        colorized_depth = np.asanyarray(colorizer.colorize(aligned_depth_frame).get_data())
        key = cv2.waitKey(1)# 等待按键输入，每毫秒检测一次
        if key == ord('r') or key == ord('R'):
            center = get_round_pose()
            print(center)
        if key == ord('b') or key == ord('B'):
            pose = get_bolts_pose()
            print(pose)
        cv2.imshow('RealSense', color)
        if key == ord('q') or key == ord('Q'):  # 如果按下 Q 键，则退出循环
            break
finally:
    # Stop streaming
    pipe.stop()




'''
# There values are needed to calculate the mapping
depth_scale = profile.get_device().first_depth_sensor().get_depth_scale()
depth_min = 0.11 #meter
depth_max = 1.0 #meter

depth_intrin = profile.get_stream(rs.stream.depth).as_video_stream_profile().get_intrinsics()
color_intrin = profile.get_stream(rs.stream.color).as_video_stream_profile().get_intrinsics()

depth_to_color_extrin =  profile.get_stream(rs.stream.depth).as_video_stream_profile().get_extrinsics_to( profile.get_stream(rs.stream.color))
color_to_depth_extrin =  profile.get_stream(rs.stream.color).as_video_stream_profile().get_extrinsics_to( profile.get_stream(rs.stream.depth))

color_points = [
    [400.0, 150.0],
    [560.0, 150.0],
    [560.0, 260.0],
    [400.0, 260.0]
]
for color_point in color_points:
   depth_point_ = rs.rs2_project_color_pixel_to_depth_pixel(
                depth_frame.get_data(), depth_scale,
                depth_min, depth_max,
                depth_intrin, color_intrin, depth_to_color_extrin, color_to_depth_extrin, color_point)
'''