import pyrealsense2 as rs
import numpy as np
import cv2

#from Algorithm.Detector2dYolo5 import Detector2dYolo5


class Camera():
    def __init__(self,width = 480,height = 270,frame_rate=30):
        """
        Initializes the RealsenseCamera with specified width, height, and frame rate.

        Args:
            width (int, optional): The width of the camera frame. Defaults to 480.
            height (int, optional): The height of the camera frame. Defaults to 270.
            frame_rate (int, optional): The frame rate of the camera. Defaults to 30.
        """
        self.pipeline = rs.pipeline()
        self.width = width
        self.height = height
        self.frame_rate=frame_rate

    def Open(self):
        config = rs.config()
        config.enable_stream(rs.stream.depth, self.width, self.height, rs.format.z16, self.frame_rate)
        config.enable_stream(rs.stream.color, self.width, self.height, rs.format.bgr8, self.frame_rate)
        self.pipeline.start(config)
        self.decimation = rs.decimation_filter()
        self.spatial = rs.spatial_filter()
        self.temporal = rs.temporal_filter()
        self.hole_filling = rs.hole_filling_filter()
        self.depth_range = rs.threshold_filter()
        self.depth_range.set_option(rs.option.min_distance, 0)
        self.depth_range.set_option(rs.option.max_distance, 0.5)
        self.align_to = rs.stream.color
        self.align = rs.align(self.align_to)

    def Close(self):
        self.pipeline.stop()
    
    def GetImage(self,roateangle=0,isShow=False):
        """返回值为是否成功，图像的numpy数组"""
        # Wait for a coherent pair of frames: depth and color
        frames = self.pipeline.wait_for_frames()
        frames = self.align.process(frames)
        depth_frame = frames.get_depth_frame()
        color_frame = frames.get_color_frame()
        self.depthfram=depth_frame
        if not depth_frame or not color_frame:
            return False,None
        # Convert images to numpy arrays
        self.depth_intrin = depth_frame.profile.as_video_stream_profile().intrinsics  # 获取深度参数（像素坐标系转相机坐标系会用到）

        depth_image = np.asanyarray(depth_frame.get_data())

        color_image = np.asanyarray(color_frame.get_data())


        colorizer = rs.colorizer()
        depth_to_disparity = rs.disparity_transform(True)
        disparity_to_depth = rs.disparity_transform(False)

        depth_frame1 = self.depth_range.process(depth_frame)
        depth_frame1 = self.spatial.process(depth_frame1)
        depth_frame1 = self.temporal.process(depth_frame1)

        colorized_depth = np.asanyarray(colorizer.colorize(depth_frame1).get_data())
        # color_image = cv2.rotate(color_image, cv2.ROTATE_90_CLOCKWISE)#按理是不应该在这里旋转的，应该在手眼标定处旋转

        # Show images
        if(isShow==True):
            cv2.namedWindow('RealSense', cv2.WINDOW_AUTOSIZE)
            cv2.imshow('RealSense', color_image)
            cv2.namedWindow('RealSensedepth', cv2.WINDOW_AUTOSIZE)
            cv2.imshow('RealSensedepth', colorized_depth)
            cv2.waitKey(1)
        return True,color_image

    def Get_aligned_depth_frame(self):
        return self.depthfram
    def Get3DPoint(self,depth_pixel, aligned_depth_frame, depth_intrin=None):
        if(depth_intrin==None):
            depth_intrin=self.depth_intrin
        x = int(depth_pixel[0])
        y = int(depth_pixel[1])
        dis = aligned_depth_frame.get_distance(x, y)  # 获取该像素点对应的深度
        # print ('depth: ',dis)       # 深度单位是m
        camera_coordinate = rs.rs2_deproject_pixel_to_point(depth_intrin, depth_pixel, dis)
        # print ('camera_coordinate: ',camera_coordinate)
        return dis, camera_coordinate


    def SetProperty(self,propertyname,type,value):
        '''

        Args:
            propertyname: 设置的属性字符串
            type:"Auto" or "Value"
            value:如果是value，则传入数字

        Returns:

        '''
        None

    def GetProerty(self,propertyname):
        return 0



# camera= Camera()
# camera.Open()
# camera.SetProperty("Gain","Value",5)
# camera.SetProperty("Exposure","Value",float(1/299))
# v=camera.GetProerty("Gain")
# print(v)
# algorithm=Detector2dYolo5(useSES=False)#实时控制算法设置为视觉伺服
#
# while(True):
#
#     _,image=camera.GetImage()
#     result, image = camera.GetImage()
#     size = image.shape
#     w = size[1]  # 宽度
#     h = size[0]  # 高度
#     result, image, ROI = algorithm.detect(image, w, h)
#     if (result.size != 0):
#         # print(result,image.shape)
#         lx = (int)(result[0][0])
#         ly = (int)(result[0][1])
#         rx = (int)(result[0][2])
#         ry = (int)(result[0][3])
#         cenx=(lx+rx)/2
#         ceny=(ly+ry)/2
#         print(cenx,ceny)
#         # ty=(cenx-w/2)
#         # tx=-(ceny-h/2)
#         # tx=tx+cenx
#         # ty=ceny-ty
#         d1,d2=camera.Get3DPoint(np.array([cenx,ceny]),camera.Get_aligned_depth_frame())
#         print(d2)
#     cv2.namedWindow("result", 0)
#     cv2.resizeWindow("result", 320, 320)  # 设置窗口大小
#     cv2.imshow('result', image)
#     cv2.waitKey(30)