# -*- coding: utf-8 -*-
# Realsense D435使用Python获取相机坐标系下某点三维坐标  https://blog.csdn.net/ZNC1998/article/details/133864874
import pyrealsense2 as rs
import numpy as np
import cv2
import cv2.aruco as aruco
from matrixTools import rotation_matrix_to_vector,rotateX,rotateY,rotateZ

class CameraDriver():
    def __init__(self):
        self.arcuoInit()

    def openCamera(self):
        self.pipeline = rs.pipeline()  # 定义流程pipeline，创建一个管道
        config = rs.config()  # 定义配置config
        config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 15)  # 配置depth流
        config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 15)  # 配置color流
        pipe_profile = self.pipeline.start(config)  # streaming流开始
        # 创建对齐对象与color流对齐
        align_to = rs.stream.color  # align_to 是计划对齐深度帧的流类型
        self.align = rs.align(align_to)  # rs.align 执行深度帧与其他帧的对齐

    def arcuoInit(self):
        # 单个标记器的大小(边长)，单位：米
        self.markerLength = 0.0835    #0.083
        # DICT_4X4_189.65表示一个包含了1000种4x4位标记的字典。
        self.aruco_dict = aruco.getPredefinedDictionary(aruco.DICT_ARUCO_ORIGINAL)
        self.parameters = aruco.DetectorParameters()
        #self.parameters.maxErroneousBitsInBorderRate = 0.04  # 调整边界错误率
        #self.parameters.errorCorrectionRate = 0.6  # 调整错误纠正率
        # 相机内参
        self.dist_coeffs = np.array([[ 0.0, 0.0,  0.0, 0.0,  0.0]])
        self.inter_matrix = np.array([
                        [603.8297729492188,   0.        , 326.9015197753906 ],
                        [  0.        , 603.3161010742188, 225.9406280517578],
                        [  0.        ,   0.        ,   1.        ]])


        #识别二维码
    def aruc_detect(self,img_color):
        #img_detected = cv2.cvtColor(img_color, cv2.COLOR_BGR2RGB)
        img_detected = img_color
        #cv2.imwrite("test.jpg",img_color)
        corners, ids, rejectedImgPoints = aruco.detectMarkers(img_detected , self.aruco_dict, parameters=self.parameters)
        Tc5 = None
        #print("len(corners): ",len(corners))
        if len(corners) != 1:
            return Tc5
        for i in range(len(corners)): 
            # 函数的输出是以相机坐标系为参考的，描述了ArUco标记相对于相机的位置和方向
            rvecs, tvecs, obj_points = cv2.aruco.estimatePoseSingleMarkers(corners[i], markerLength=self.markerLength, cameraMatrix=self.inter_matrix, distCoeffs=self.dist_coeffs)
            '''
                OX轴以红色绘制，OY轴以绿色绘制，OZ轴以蓝色绘制
                length: 绘制的轴的长度，与 tvec 的单位相同（通常是米）。
                thickness: 绘制的轴的线宽，默认为3。
            '''
            R, _ = cv2.Rodrigues(rvecs[0])
            cv2.drawFrameAxes(img_detected, self.inter_matrix, self.dist_coeffs, rvecs, tvecs, 0.02)
            self.cur_Rcm = R
            self.cur_Tcm = tvecs[0].reshape(3, 1)
            self.curVcm = [tvecs[0][0][0],tvecs[0][0][1],tvecs[0][0][2],rvecs[0][0][0],rvecs[0][0][1],rvecs[0][0][2]]
            Tcm = np.eye(4)
            Tcm[:3, :3] = R
            Tcm[0:3, 3] = tvecs[0]
        return Tcm
    ''' 
    获取对齐图像帧与相机参数
    '''
    def get_aligned_images(self):
        frames = self.pipeline.wait_for_frames()  # 等待获取图像帧，获取颜色和深度的框架集
        aligned_frames = self.align.process(frames)  # 获取对齐帧，将深度框与颜色框对齐
        aligned_depth_frame = aligned_frames.get_depth_frame()  # 获取对齐帧中的的depth帧
        aligned_color_frame = aligned_frames.get_color_frame()  # 获取对齐帧中的的color帧
        #### 获取相机参数 ####
        depth_intrin = aligned_depth_frame.profile.as_video_stream_profile().intrinsics  # 获取深度参数（像素坐标系转相机坐标系会用到）
        color_intrin = aligned_color_frame.profile.as_video_stream_profile().intrinsics  # 获取相机内参
        #### 将images转为numpy arrays ####
        img_color = np.asanyarray(aligned_color_frame.get_data())  # RGB图
        img_depth = np.asanyarray(aligned_depth_frame.get_data())  # 深度图（默认16位）
        self.depth_intrin = depth_intrin
        return color_intrin, depth_intrin, img_color, img_depth, aligned_depth_frame
 
 
    ''' 
    获取随机点三维坐标
    '''
    def get_3d_camera_coordinate(self,depth_pixel, aligned_depth_frame, depth_intrin):
        x = depth_pixel[0]
        y = depth_pixel[1]
        dis = aligned_depth_frame.get_distance(x, y)  # 获取该像素点对应的深度
        # print ('depth: ',dis)       # 深度单位是m
        camera_coordinate = rs.rs2_deproject_pixel_to_point(depth_intrin, depth_pixel, dis)
        # print ('camera_coordinate: ',camera_coordinate)
        return dis, camera_coordinate
 
 
if __name__ == "__main__":
    print("")

