# -*- coding: UTF-8 -*-
#!/usr/bin/python2
import ctypes
libgcc_s = ctypes.CDLL('libgcc_s.so.1')
import pyrealsense2 as rs
import json
import rospy, sys

from std_msgs.msg import Bool
import math
from geometry_msgs.msg import PoseStamped, Pose
import copy


from cv_bridge import CvBridge, CvBridgeError
from sensor_msgs.msg import Image
import numpy as np
import threading
import time
from geometry_msgs.msg import Twist
import matplotlib.pyplot as plt
#from scipy.interpolate import spline


import ur_kinematics as kmtic

sys.path.remove('/opt/ros/kinetic/lib/python2.7/dist-packages')
import cv2
import cv2.aruco as aruco







class Image_converter:
    def __init__(self):

        self.dist=np.array(([[0.107320  ,-0.175569, 0.010295 , -0.004028 ,0.000000]]))
        self.dist=np.array(([[0  ,0., 0. , 0. ,0.000000]]))
        self.mtx=np.array([[607.222072 , 0.      ,   324.465533],
                          [  0.       ,  606.460759,  246.436670],
                          [  0.,           0.,           1.        ]])
        self.mtx=np.array([[926.465533 , 0.      ,   654.1],
                          [  0.       ,  926.436670,  353.1],
                          [  0.,           0.,           1.        ]])
       
        self.eye2hand=[[-0.08330238,-0.31939972 ,0.94395155],
                        [-0.99522918, -0.02161323, -0.0951407],
                        [ 0.05078976, -0.94737357, -0.31607549],
                   ]
        self.eye2hand_T=[[-0.08330238, -0.31939972,  0.94395155,  0.01117643],
                        [-0.99522918, -0.02161323, -0.0951407,   0.01222332],
                        [ 0.05078976, -0.94737357, -0.31607549, -0.10752432],
                        [ 0.,          0.,          0.,          1.        ]]

        pipeline = rs.pipeline()
        config = rs.config()
        #config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
        config.enable_stream(rs.stream.color, 1280, 720, rs.format.bgr8, 30)
        # Start pipeline
        profile = pipeline.start(config)
        frames = pipeline.wait_for_frames()
        color_frame = frames.get_color_frame()
        color_profile = color_frame.get_profile()
        cvsprofile = rs.video_stream_profile(color_profile)
        color_intrin = cvsprofile.get_intrinsics()
        print(color_intrin)
        # Color Intrinsics 
        intr = color_frame.profile.as_video_stream_profile().intrinsics


        align_to = rs.stream.color
        align = rs.align(align_to)
        while True:
            prev_time = time.time()
            frames = pipeline.wait_for_frames()
            aligned_frames = align.process(frames)

            #aligned_depth_frame = aligned_frames.get_depth_frame()
            color_frame = aligned_frames.get_color_frame()
            # Validate that both frames are valid
            # if not aligned_depth_frame or not color_frame:
            #     continue

            #d = np.asanyarray(aligned_depth_frame.get_data())
            c = np.asanyarray(color_frame.get_data())
            self.callback(c)
            # Visualize count down
            # if time.time() -T_start > 5 + 5:
            #     pipeline.stop()
            #     break
            #cv2.imshow('COLOR IMAGE',c)
            # press q to quit the program
            fps = int(1/(time.time() - prev_time))
            if cv2.waitKey(fps) == ord('q'):
                pipeline.stop()
                break

      
        cv2.destroyAllWindows()
 

        

    def callback(self,data):
        
        vel_msg = Twist()
        cv_image = data
        gray = cv2.cvtColor(cv_image, cv2.COLOR_BGR2GRAY)
        aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250)
        parameters =  aruco.DetectorParameters_create()
        #使用aruco.detectMarkers()函数可以检测到marker，返回ID和标志板的4个角点坐标
        corners, ids, rejectedImgPoints = aruco.detectMarkers(gray,aruco_dict,parameters=parameters)
        bool_msg=Bool()
      
        if ids is not None:
            # print('---------------------')
            rvec, tvec, _ = aruco.estimatePoseSingleMarkers(corners, 0.05, self.mtx, self.dist)
            # 估计每个标记的姿态并返回值rvet和tvec ---不同

            (rvec-tvec).any() 

            for i in range(rvec.shape[0]):
                aruco.drawAxis(cv_image, self.mtx, self.dist, rvec[i, :, :], tvec[i, :, :], 0.03)
                aruco.drawDetectedMarkers(cv_image, corners)
            EulerAngles = self.rotationVectorToEulerAngles(rvec[0])


         
            #--------------打印位置姿态----------------------------------
            cv2.putText(cv_image, "Attitude_angle:" + str(EulerAngles), (0, 120), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2,
                        cv2.LINE_AA)
            cv2.putText(cv_image, "tvec:" + str(tvec), (0, 160), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2,
                        cv2.LINE_AA)


        else:
            ##### DRAW "NO IDS" #####
            self.kalman_reset_flag = False
            cv2.putText(cv_image, "No Ids", (0,64), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,255,0),2,cv2.LINE_AA)


        # 显示结果框架
        cv2.imshow("frame",cv_image)

        key = cv2.waitKey(1)

        if key == 27:         # 按esc键退出
            print('esc break...')
            rospy.signal_shutdown("c")
            #cv2.destroyAllWindows()
 
    def rotationVectorToEulerAngles(self,rvec):
      R = np.zeros((3, 3), dtype=np.float64)
      cv2.Rodrigues(rvec, R)
      sy = math.sqrt(R[0, 0] * R[0, 0] + R[1, 0] * R[1, 0])
      singular = sy < 1e-6
      if not singular:  # 偏航，俯仰，滚动
          x = math.atan2(R[2, 1], R[2, 2])
          y = math.atan2(-R[2, 0], sy)
          z = math.atan2(R[1, 0], R[0, 0])
      else:
          x = math.atan2(-R[1, 2], R[1, 1])
          y = math.atan2(-R[2, 0], sy)
          z = 0
      # 偏航，俯仰，滚动换成角度

      return x,y,z
          

     
if __name__ == "__main__":
    #rate = rospy.Rate(30) 
    Image_converter()

    
