import json
import os
import threading
import time

import pyrealsense2 as rs
import numpy as np
import cv2

#from Algorithm.Detector2dYolo5 import Detector2dYolo5


class Camera():
    def __init__(self):
        self.pipeline = rs.pipeline()
        self.current_dir = os.path.dirname(__file__)


    def Open(self):
        config = rs.config()
        config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 90)
        config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 90)


        cfg =self.pipeline.start(config)
        # dev = cfg.get_device()
        # advnc_mode = rs.rs400_advanced_mode(dev)
        # jsonObj = json.load(open(os.path.join(self.current_dir,"default.json")))
        # json_string = str(jsonObj).replace("'", '\"')
        # advnc_mode.load_json(json_string)

        self.decimation = rs.decimation_filter()
        self.spatial = rs.spatial_filter()
        self.temporal = rs.temporal_filter()
        self.hole_filling = rs.hole_filling_filter()
        self.depth_range = rs.threshold_filter()
        self.depth_range.set_option(rs.option.min_distance, 0)
        self.depth_range.set_option(rs.option.max_distance, 0.5)
        self.align_to = rs.stream.color
        self.align = rs.align(self.align_to)
        # t = threading.Thread(target=self.collectLoop)
        # self.isStart=True
        # t.start()
        # self.lock=threading.Lock()

    def collectLoop(self):
        while(self.isStart):
            try:
                # Wait for a coherent pair of frames: depth and color

                frames = self.pipeline.wait_for_frames(timeout_ms=2)
                frames = self.align.process(frames)
                # depth_frame = frames.get_depth_frame()
                color_frame = frames.get_color_frame()
                # self.depthfram = depth_frame
                # if not depth_frame or not color_frame:
                #     return False, None
                # Convert images to numpy arrays
                # self.depth_intrin = depth_frame.profile.as_video_stream_profile().intrinsics  # 获取深度参数（像素坐标系转相机坐标系会用到）

                # depth_image = np.asanyarray(depth_frame.get_data())

                color_image = np.asanyarray(color_frame.get_data())

                colorizer = rs.colorizer()
                depth_to_disparity = rs.disparity_transform(True)
                disparity_to_depth = rs.disparity_transform(False)

                # depth_frame1 = self.depth_range.process(depth_frame)
                # depth_frame1 = self.spatial.process(depth_frame1)
                # depth_frame1 = self.temporal.process(depth_frame1)

                # colorized_depth = np.asanyarray(colorizer.colorize(depth_frame1).get_data())
                # color_image = cv2.rotate(color_image, cv2.ROTATE_90_CLOCKWISE)#按理是不应该在这里旋转的，应该在手眼标定处旋转

                # Show images
                # if (isShow == True):
                #     cv2.namedWindow('RealSense', cv2.WINDOW_AUTOSIZE)
                #     cv2.imshow('RealSense', color_image)
                #     cv2.namedWindow('RealSensedepth', cv2.WINDOW_AUTOSIZE)
                #     cv2.imshow('RealSensedepth', colorized_depth)
                #     cv2.waitKey(1)
                # self.lock.acquire()
                # self.color=color_image.copy()
                #self.depth=depth.copy()
                # self.lock.release()
                time.sleep(0.001)

            except:
                time.sleep(0.025)
                continue

    def Close(self):
        self.pipeline.stop()

    def GetImage(self,roateangle=0,isShow=False):
        # Wait for a coherent pair of frames: depth and color
        frames = self.pipeline.wait_for_frames()
        frames = self.align.process(frames)
        depth_frame = frames.get_depth_frame()
        color_frame = frames.get_color_frame()
        # self.depthfram=depth_frame
        # if not depth_frame or not color_frame:
        #     return False,None
        # Convert images to numpy arrays
        # self.depth_intrin = depth_frame.profile.as_video_stream_profile().intrinsics  # 获取深度参数（像素坐标系转相机坐标系会用到）
        #
        # depth_image = np.asanyarray(depth_frame.get_data())

        color_image = np.asanyarray(color_frame.get_data())


        colorizer = rs.colorizer()
        depth_to_disparity = rs.disparity_transform(True)
        disparity_to_depth = rs.disparity_transform(False)

        # depth_frame1 = self.depth_range.process(depth_frame)
        # depth_frame1 = self.spatial.process(depth_frame1)
        # depth_frame1 = self.temporal.process(depth_frame1)

        # colorized_depth = np.asanyarray(colorizer.colorize(depth_frame1).get_data())
        # color_image = cv2.rotate(color_image, cv2.ROTATE_90_CLOCKWISE)#按理是不应该在这里旋转的，应该在手眼标定处旋转

        # Show images
        if(isShow==True):
            cv2.namedWindow('RealSense', cv2.WINDOW_AUTOSIZE)
            cv2.imshow('RealSense', color_image)
            cv2.namedWindow('RealSensedepth', cv2.WINDOW_AUTOSIZE)
            # cv2.imshow('RealSensedepth', colorized_depth)
            cv2.waitKey(1)
        return True,color_image

    def GetImageNodepth(self,roateangle=0,isShow=False):
        # Wait for a coherent pair of frames: depth and color
        frames = self.pipeline.wait_for_frames()
        frames = self.align.process(frames)
        color_frame = frames.get_color_frame()
        if not color_frame:
            return False,None
        # Convert images to numpy arrays


        color_image = np.asanyarray(color_frame.get_data())



        # color_image = cv2.rotate(color_image, cv2.ROTATE_90_CLOCKWISE)#按理是不应该在这里旋转的，应该在手眼标定处旋转

        # Show images
        if(isShow==True):
            cv2.namedWindow('RealSense', cv2.WINDOW_AUTOSIZE)
            cv2.imshow('RealSense', color_image)
            cv2.namedWindow('RealSensedepth', cv2.WINDOW_AUTOSIZE)
            cv2.waitKey(1)
        return True,color_image

    def GetImageInLoop(self):
        self.lock.acquire()
        color=self.color.copy()
        #depth=self.depth.copy()
        self.lock.release()
        return color

    def Get_aligned_depth_frame(self):
        return self.depthfram
    def Get3DPoint(self,depth_pixel, aligned_depth_frame, depth_intrin=None):
        if(depth_intrin==None):
            depth_intrin=self.depth_intrin
        x = int(depth_pixel[0])
        y = int(depth_pixel[1])
        dis = aligned_depth_frame.get_distance(x, y)  # 获取该像素点对应的深度
        # print ('depth: ',dis)       # 深度单位是m
        camera_coordinate = rs.rs2_deproject_pixel_to_point(depth_intrin, depth_pixel, dis)
        # print ('camera_coordinate: ',camera_coordinate)
        return dis, camera_coordinate


    def SetProperty(self,propertyname,type,value):
        '''

        Args:
            propertyname: 设置的属性字符串
            type:"Auto" or "Value"
            value:如果是value，则传入数字

        Returns:

        '''
        None

    def GetProerty(self,propertyname):
        return 0



if __name__ == '__main__':
    camera=Camera()
    camera.Open()
    while(True):
        _, image = camera.GetImage()
        cv2.namedWindow("result", 0)
        cv2.resizeWindow("result", 640, 480)  # 设置窗口大小
        cv2.imshow('result', image)
        cv2.waitKey(30)

# camera= Camera()
# camera.Open()
# camera.SetProperty("Gain","Value",5)
# camera.SetProperty("Exposure","Value",float(1/299))
# v=camera.GetProerty("Gain")
# print(v)
# algorithm=Detector2dYolo5(useSES=False)#实时控制算法设置为视觉伺服
#
# while(True):
#
#     _,image=camera.GetImage()
#     result, image = camera.GetImage()
#     size = image.shape
#     w = size[1]  # 宽度
#     h = size[0]  # 高度
#     result, image, ROI = algorithm.detect(image, w, h)
#     if (result.size != 0):
#         # print(result,image.shape)
#         lx = (int)(result[0][0])
#         ly = (int)(result[0][1])
#         rx = (int)(result[0][2])
#         ry = (int)(result[0][3])
#         cenx=(lx+rx)/2
#         ceny=(ly+ry)/2
#         print(cenx,ceny)
#         # ty=(cenx-w/2)
#         # tx=-(ceny-h/2)
#         # tx=tx+cenx
#         # ty=ceny-ty
#         d1,d2=camera.Get3DPoint(np.array([cenx,ceny]),camera.Get_aligned_depth_frame())
#         print(d2)
#     cv2.namedWindow("result", 0)
#     cv2.resizeWindow("result", 320, 320)  # 设置窗口大小
#     cv2.imshow('result', image)
#     cv2.waitKey(30)