#-*- coding: utf-8 -*-
###########################################
from __future__ import division
import sys
import rospy

from kortex_driver.srv import *
from kortex_driver.msg import *
import math
import numpy as np
from numpy import *
import threading


from std_msgs.msg import Float64MultiArray, Int16, String
from tf2_msgs.msg import TFMessage
from sensor_msgs.msg import Image, CameraInfo
from geometry_msgs.msg import Point

from tf.transformations import euler_from_quaternion, quaternion_from_euler
import random
from cv_bridge import CvBridge
# aruco
import cv2
# from cv2 import aruco
import cv2.aruco as aruco
import tf
from tf.transformations import quaternion_matrix

bridge = CvBridge()

def camera_to_base(obj_came):

    listener = tf.TransformListener()
    tff = True
    while tff:
        try:
            (trans,rot) = listener.lookupTransform('/base_link', '/camera_link', rospy.Time(0))
        except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):
            continue
        #rot = [0,0,0,1]
        mat = quaternion_matrix(rot) # quaternion([]) ->matrix([4×4]）
        trans_mat = np.array(((0.0,    0.0,    0.0,    trans[0]),
                              (0.0,    0.0,    0.0,    trans[1]),
                              (0.0,    0.0,    0.0,    trans[2]),
                              (0.0,    0.0,    0.0,         0.0)), dtype=np.float64)

        trans_matrix = trans_mat + mat
        tff = False

    base_frame = trans_matrix * obj_came
    return base_frame
def get_common_parameter():
    global color_info
    global depth_info
    rospy.loginfo("Waiting for message for  camera ......")
    color_info = rospy.wait_for_message('/camera/color/camera_info', CameraInfo)
    depth_info = rospy.wait_for_message('/camera/depth/camera_info', CameraInfo)

def Intrinsics_camera(cam_info):

    K = cam_info.K
    D = cam_info.D
    fx = K[0]
    cx = K[2]
    fy = K[4]
    cy = K[5]

    distCoe = np.array([D[0], D[1], D[2], D[3], D[4]])
    cameraMat = np.array([[fx, 0, cx], [0, fy, cy], [0, 0, 1]])

    return cameraMat, distCoe

def get_common_parameter():
        global color_info
        global depth_info
        rospy.loginfo("Waiting for message for  camera ......")
        color_info = rospy.wait_for_message('/camera/color/camera_info', CameraInfo)
        depth_info = rospy.wait_for_message('/camera/depth/camera_info', CameraInfo)
def aruco_list_detecter(data):
    ''' detect aruco markers and store their poses in a dictionary'''
    cameraMatrix, distCoeffs = Intrinsics_camera(color_info)
    # data = rospy.wait_for_message('/camera/color/image_raw', Image)
    # final_base = {}
    # from message to img
    frame = bridge.imgmsg_to_cv2(data, "bgr8")

    # BGR -> RAY
    # gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    gray = frame
    # 选择aruco模块中预定义的字典来创建一个字典对象
    aruco_dict = cv2.aruco.getPredefinedDictionary(cv2.aruco.DICT_6X6_250)  # cv2.aruco.DICT_ARUCO_ORIGINAL
    parameters = cv2.aruco.DetectorParameters_create()
    # lists of ids and the corners beloning to each id // numpy.ndarray, list
    corners, ids, rejectedImgPoints = aruco.detectMarkers(gray,
                                                          aruco_dict,
                                                          parameters=parameters)
    aruco.drawDetectedMarkers(gray, corners, ids, borderColor=(0, 0, 255))
    gray = cv2.resize(gray,(1280,960))



    if ids is not None:
        for i in range(len(ids)):
            rvec, tvec, _ = aruco.estimatePoseSingleMarkers(corners[i], 0.019, cameraMatrix, distCoeffs)
            (rvec - tvec).any()  # get rid of that nasty numpy value array error
            print("=======================")
            """ Applies perspective transform for given rvec and tvec. """
            R, _ = cv2.Rodrigues(rvec)
            print("-------------------------")
            t = tvec[0].T
            TT = np.hstack((R, t))
            TT = np.vstack((TT, np.array([0, 0, 0, 1])))
            transition_mat = np.array([[1, 0, 0, 0],
                                       [0, 1, 0, 0],
                                       [0, 0, 1, 0],
                                       [0, 0, 0, 1]])
            final_mat = np.dot(TT, transition_mat)
            final = final_mat[:, 3]
            final_1 = matrix([[final[0]], [final[1]], [final[2]], [1]])
            # print final_1
            final_trans = camera_to_base(final_1)
            # final_ends_trans = camera_to_end(final_1)
            final_base[ids[i][0]] = final_trans  # store the final transformation in the dictionary
            print final_base
            # final_end[ids[i][0]] = final_ends_trans
    cv2.imshow('result_id', gray)
    # if cv2.waitKey(25) & 0xFF == ord('q'):
    #     cv2.destroyWindow()
    cv2.waitKey(1)

    # cv2.destroyAllWindows()

    # return final_1  # , final_end

if __name__ == '__main__':

    rospy.init_node("cam_aruco")
    get_common_parameter()
    final_base = {}
    # while True:
    #     aruco_list_detecter()
    a = rospy.Subscriber('/camera/color/image_raw', Image, aruco_list_detecter)
    rospy.spin()
