#-*- coding: utf-8 -*-
###########################################
# 2020版本
# 功能: 语音、检测、人手
# 抓取架子上的固粉试剂桶
###########################################


from __future__ import division  
import sys
import rospy

from kortex_driver.srv import *
from kortex_driver.msg import *
import math
import numpy as np
from numpy import *    #导入numpy的库函数
import threading


from std_msgs.msg import Float64MultiArray, Int16, String, Float32MultiArray
from tf2_msgs.msg import TFMessage
from sensor_msgs.msg import Image, CameraInfo
from geometry_msgs.msg import Point

from tf.transformations import euler_from_quaternion, quaternion_from_euler
import random
from cv_bridge import CvBridge
# aruco
import cv2
# from cv2 import aruco
import cv2.aruco as aruco
import tf
from tf.transformations import quaternion_matrix

from pymodbus.client.sync import ModbusTcpClient #pip3 install pymodbus==2.5.3
import time
bridge = CvBridge()


def color_to_depth(depth_img, Co_x, Co_y):
    # 1280× 720  ->  480*270

    depth_x = int( Co_x / 1280 * 480 )  + 40
    depth_y = int( Co_y / 720 * 270 )   + 40
    print(depth_x, depth_y)
    x_min = depth_x - 35
    y_min = depth_y - 35
    x_max = depth_x + 35
    y_max = depth_y + 35
    if x_min < 0:    
        x_min = 0
    if y_min < 0:    
        y_min = 0
    if x_max > 480:    
        x_max = 480
    if y_max > 270:    
        y_max = 270   

    x_i = x_min
    y_i = y_min
    dep_val = 1000
    while x_i <  x_max:
        y_i = y_min
        while y_i <  y_max:

            dep_ij = depth_img[y_i, x_i]
            if dep_ij == 0:
                pass
            else:
                if dep_ij < dep_val:
                    dep_val = dep_ij
            y_i = y_i + 2

        x_i = x_i + 2

    dep_val = dep_val  * 0.001

    return dep_val 

def Cam_trans_fun(camera_info, pixels_cood, z):
    
    K2 = camera_info.K
    fx = K2[0]
    cx = K2[2]
    fy = K2[4]
    cy = K2[5]

    # 内参矩阵
    neican = matrix([[fx, 0, cx],[0, fy, cy],[0, 0, 1]])
    
    matrixValue_ni = neican.I
    camera_frame = matrixValue_ni*pixels_cood * z
    obj_camera_frame = np.vstack((camera_frame,[[1]]))
    return obj_camera_frame

def camera_to_end(obj_cam):
    transf_matrix = matrix([[-1, 0, 0, 0], [0, -1, 0, 0.05639], [0, 0, 1, -0.00305 - 0.11995], [0, 0, 0, 1]])
    end_frame = transf_matrix * obj_cam

    return end_frame

def camera_to_base(obj_came):

    listener = tf.TransformListener()
    tff = True
    while tff:
        try:
            (trans,rot) = listener.lookupTransform('/base_link', '/camera_link', rospy.Time(0))
        except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):
            continue
        #rot = [0,0,0,1]
        mat = quaternion_matrix(rot) # quaternion([]) ->matrix([4×4]）
        trans_mat = np.array(((0.0,    0.0,    0.0,    trans[0]),
                              (0.0,    0.0,    0.0,    trans[1]),
                              (0.0,    0.0,    0.0,    trans[2]),
                              (0.0,    0.0,    0.0,         0.0)), dtype=np.float64)

        trans_matrix = trans_mat + mat
        tff = False

    base_frame = trans_matrix * obj_came
    return base_frame

def Intrinsics_camera(cam_info):
    
    K = cam_info.K
    D = cam_info.D
    fx = K[0]
    cx = K[2]
    fy = K[4]
    cy = K[5]

    distCoe = np.array( [D[0], D[1], D[2], D[3], D[4]] )
    cameraMat = np.array([[fx, 0, cx],[0, fy, cy],[0, 0, 1]])
    
    return cameraMat, distCoe

def robot_to_camera(obj_camera_frame):
    "transform camera coord to robot coord"
    
    data_a = np.load('/home/gang/base_to_kinect.npy')
    
    robot_to_camera = np.asmatrix(data_a)  # 将 ndarray 转化为 matrix
    
    obj_robot_frame = robot_to_camera * np.asmatrix(obj_camera_frame).T

    return obj_robot_frame

class GenArmMovement:
    def __init__(self):
        rospy.init_node('gen_arm_movement')

        self.HOME_ACTION_IDENTIFIER = 2
        self.pose_done = threading.Event()

        # Get node params
        self.robot_name = rospy.get_param('~robot_name', "my_gen3")
        self.degrees_of_freedom = rospy.get_param("/" + self.robot_name + "/degrees_of_freedom", 7)
        self.is_gripper_present = rospy.get_param("/" + self.robot_name + "/is_gripper_present", False)
        #
        rospy.loginfo("Using robot_name " + self.robot_name + " , robot has " + str(self.degrees_of_freedom) + " degrees of freedom and is_gripper_present is " + str(self.is_gripper_present))

        # Init the services
        clear_faults_full_name = '/' + self.robot_name + '/base/clear_faults'
        rospy.wait_for_service(clear_faults_full_name)
        self.clear_faults = rospy.ServiceProxy(clear_faults_full_name, Base_ClearFaults)

        read_action_full_name = '/' + self.robot_name + '/base/read_action'
        rospy.wait_for_service(read_action_full_name)
        self.read_action = rospy.ServiceProxy(read_action_full_name, ReadAction)

        execute_action_full_name = '/' + self.robot_name + '/base/execute_action'
        rospy.wait_for_service(execute_action_full_name)
        self.execute_action = rospy.ServiceProxy(execute_action_full_name, ExecuteAction)

        set_cartesian_reference_frame_full_name = '/' + self.robot_name + '/control_config/set_cartesian_reference_frame'
        rospy.wait_for_service(set_cartesian_reference_frame_full_name)
        self.set_cartesian_reference_frame = rospy.ServiceProxy(set_cartesian_reference_frame_full_name, SetCartesianReferenceFrame)

        play_cartesian_trajectory_full_name = '/' + self.robot_name + '/base/play_cartesian_trajectory'
        rospy.wait_for_service(play_cartesian_trajectory_full_name)
        self.play_cartesian_trajectory = rospy.ServiceProxy(play_cartesian_trajectory_full_name, PlayCartesianTrajectory)

        play_joint_trajectory_full_name = '/' + self.robot_name + '/base/play_joint_trajectory'
        rospy.wait_for_service(play_joint_trajectory_full_name)
        self.play_joint_trajectory = rospy.ServiceProxy(play_joint_trajectory_full_name, PlayJointTrajectory)

        send_gripper_command_full_name = '/' + self.robot_name + '/base/send_gripper_command'
        rospy.wait_for_service(send_gripper_command_full_name)
        self.send_gripper_command = rospy.ServiceProxy(send_gripper_command_full_name, SendGripperCommand)
        # 
        activate_publishing_of_action_notification_full_name = '/' + self.robot_name + '/base/activate_publishing_of_action_topic'
        rospy.wait_for_service(activate_publishing_of_action_notification_full_name)
        self.activate_publishing_of_action_notification = rospy.ServiceProxy(activate_publishing_of_action_notification_full_name, OnNotificationActionTopic)

        self.pub_hand = rospy.Publisher('start_hand_det', Int16, queue_size=10)
        self.stop_hand = rospy.Publisher('stop_hand_det', Int16, queue_size=10)
    
    def example_clear_faults(self):
        try:
            self.clear_faults()
        except rospy.ServiceException:
            rospy.logerr("Failed to call ClearFaults")
        else:
            rospy.loginfo("Cleared the faults successfully")
            rospy.sleep(2.5)

    def example_set_cartesian_reference_frame(self):
        # Prepare the request with the frame we want to set
        req = SetCartesianReferenceFrameRequest()
        req.input.reference_frame = CartesianReferenceFrame.CARTESIAN_REFERENCE_FRAME_MIXED

        # Call the service
        try:
            self.set_cartesian_reference_frame(req)
        except rospy.ServiceException:
            rospy.logerr("Failed to call SetCartesianReferenceFrame")
        else:
            rospy.loginfo("Set the cartesian reference frame successfully")

        # Wait a bit
        rospy.sleep(0.25)

    def notification_callback(self, notif):
        if notif.action_event == ActionEvent.ACTION_END:
            print("The movement has finshed!")
            self.pose_done.set()

    def example_subscribe_to_a_robot_notification(self):
        # Activate the publishing of the ActionNotification
        req = OnNotificationActionTopicRequest()
        rospy.loginfo("Activating the action notifications...")
        try:
            self.activate_publishing_of_action_notification(req)
        except rospy.ServiceException:
            rospy.logerr("Failed to call OnNotificationActionTopic")
        else:
            rospy.loginfo("Successfully activated the Action Notifications!")

        # Subscribe to the ActionNotification with the given callback
        rospy.Subscriber("/" + self.robot_name + "/action_topic", ActionNotification, self.notification_callback)

        rospy.sleep(1.0)

    def get_common_parameter(self):
        global color_info
        global depth_info
        rospy.loginfo("Waiting for message for  camera ......")
        color_info = rospy.wait_for_message('/camera/color/camera_info', CameraInfo)
        depth_info = rospy.wait_for_message('/camera/depth/camera_info', CameraInfo)

    def example_send_joint_angles(self, joint_angles):
        self.last_action_notif_type = None
        # Create the list of angles
        req = PlayJointTrajectoryRequest()
        # Here the arm is vertical (all zeros)
        # Set joint_angles
        # joint_angles = [28.194, 77.737, 192.003, 244.183, 32.354, 94.92, 86.988]
        for i in range(self.degrees_of_freedom):
            temp_angle = JointAngle()
            temp_angle.joint_identifier = i
            temp_angle.value = joint_angles[i]
            req.input.joint_angles.joint_angles.append(temp_angle)
        # Send the angles
        rospy.loginfo("Sending the robot vertical...")
        try:
            self.play_joint_trajectory(req)
        except rospy.ServiceException:
            rospy.logerr("Failed to call PlayJointTrajectory")
            return False

        # else:
        #     return self.wait_for_action_end_or_abort()
        self.pose_done.wait()
        self.pose_done.clear()

    def example_home_the_robot(self, v):
        req = PlayCartesianTrajectoryRequest()
        req.input.target_pose.x = 0.55    #0.345
        req.input.target_pose.y = 0
        req.input.target_pose.z = 0.30   # 0.21
        req.input.target_pose.theta_x = 180     # 180
        req.input.target_pose.theta_y = 0       #  0
        req.input.target_pose.theta_z = 90     # 90

        pose_speed = CartesianSpeed()
        pose_speed.translation = v
        pose_speed.orientation = 30
        req.input.constraint.oneof_type.speed.append(pose_speed)

        # Call the service
        rospy.loginfo("Sending the robot to the home1 pose...")
        try:
            self.play_cartesian_trajectory(req)
        except rospy.ServiceException:
            rospy.logerr("Failed to call home")
        self.pose_done.wait()
        self.pose_done.clear()

    def example_send_the_robot(self, list, v):

        req = PlayCartesianTrajectoryRequest()
        req.input.target_pose.x = list[0]
        req.input.target_pose.y = list[1]
        req.input.target_pose.z = list[2]   # 0.35
        req.input.target_pose.theta_x = list[3]     # 180
        req.input.target_pose.theta_y = list[4]       #  0
        req.input.target_pose.theta_z = list[5]     # 90

        pose_speed = CartesianSpeed()
        pose_speed.translation = v
        pose_speed.orientation = 30
        req.input.constraint.oneof_type.speed.append(pose_speed)

        # Call the service
        rospy.loginfo("Sending the robot to the  pose...")
        try:
            self.play_cartesian_trajectory(req)
        except rospy.ServiceException:
            rospy.logerr("Failed to call home")
        self.pose_done.wait()
        self.pose_done.clear()

    def example_send_gripper_command(self, value):
        # This works for the Robotiq Gripper 2F_85
        # 0: open     slow      small              
        # 1: close    fast       big
        req = SendGripperCommandRequest()
        finger = Finger()
        finger.finger_identifier = 0
        finger.value = value
        req.input.gripper.finger.append(finger)
        req.input.mode = GripperMode.GRIPPER_POSITION

        rospy.loginfo("Sending the gripper command...")
        
        # Call the service 
        try:
            self.send_gripper_command(req)
        except rospy.ServiceException:
            rospy.logerr("Failed to call SendGripperCommand")
    
    def grasp_det_object(self, goal, cls_nn, v):
        ''' grasp obj '''
        # Here we only need the latest message in the topic though  获得当前机器人位置
        feedback = rospy.wait_for_message("/" + self.robot_name + "/base_feedback", BaseCyclic_Feedback)

        # if goal[0] < -0.01:
        #     goal[0] = goal[0] + 0.01

        # read for grasp
        req2 = PlayCartesianTrajectoryRequest()
        req2.input.target_pose.x = goal[0]
        req2.input.target_pose.y = goal[1]+0.01
        req2.input.target_pose.z = 0.062
        req2.input.target_pose.theta_x = feedback.base.commanded_tool_pose_theta_x    
        req2.input.target_pose.theta_y = feedback.base.commanded_tool_pose_theta_y    
        req2.input.target_pose.theta_z = goal[5]

        pose_speed = CartesianSpeed()
        pose_speed.translation = v
        pose_speed.orientation = 30
        req2.input.constraint.oneof_type.speed.append(pose_speed)

        # Call the service
        rospy.loginfo("Sending the robot to the grasp pose...")
        try:
            self.play_cartesian_trajectory(req2)    #position to grasp
        except rospy.ServiceException:
            rospy.logerr("Failed to call PlayCartesianTrajectory")   

        self.pose_done.wait()
        self.pose_done.clear()

        self.example_send_gripper_command(0.9)    #grasp
        rospy.sleep(1.0)

        # upper
        feedback5 = rospy.wait_for_message("/" + self.robot_name + "/base_feedback", BaseCyclic_Feedback)
        req2.input.target_pose.x = feedback5.base.commanded_tool_pose_x
        req2.input.target_pose.y = feedback5.base.commanded_tool_pose_y
        req2.input.target_pose.z =  0.25  #0.13
        req2.input.target_pose.theta_x = feedback5.base.commanded_tool_pose_theta_x
        req2.input.target_pose.theta_y = feedback5.base.commanded_tool_pose_theta_y
        req2.input.target_pose.theta_z = feedback5.base.commanded_tool_pose_theta_z

        self.play_cartesian_trajectory(req2)
        self.pose_done.wait()
        self.pose_done.clear()

        # 重复一次
        feedback5 = rospy.wait_for_message("/" + self.robot_name + "/base_feedback", BaseCyclic_Feedback)
        req2.input.target_pose.x = feedback5.base.commanded_tool_pose_x
        req2.input.target_pose.y = feedback5.base.commanded_tool_pose_y
        req2.input.target_pose.z =  0.25
        req2.input.target_pose.theta_x = feedback5.base.commanded_tool_pose_theta_x
        req2.input.target_pose.theta_y = feedback5.base.commanded_tool_pose_theta_y
        req2.input.target_pose.theta_z = feedback5.base.commanded_tool_pose_theta_z

        self.play_cartesian_trajectory(req2)
        self.pose_done.wait()
        self.pose_done.clear()

    def Single_Move_callback(self):
        ''' receive message from Vmrd, cal coord'''
        global depth_value
        Pixel_coord = []

        index_class = ['__background__',
                       'bottle']

        # Object = raw_input('pls input the object you grasp:')
        # Object='banana'    #grasp banana only
        # Object_num = index_class.index(Object)
        Object_num = [0,1]
        ObjectNow = 3
        # print('tar',Object_num)
        # receive message for ......

        # while ObjectNow != Object_num:
        while ObjectNow not in Object_num:  # auto grasp server objects
            rospy.loginfo("Waiting for message to arrive ......")
            Pixel_coord = rospy.wait_for_message('multi_task_det', Float64MultiArray)  # gen3_talker
            ObjectNow = int(Pixel_coord.data[3])
            # print('now',ObjectNow)
        print("grasp:bottle")
        # rospy.loginfo("Waiting for message to arrive ......")
        # Pixel_coord = rospy.wait_for_message('multi_task_det', Float64MultiArray)  # gen3_talker
        # print(Pixel_coord)
        # ObjectNow = int(Pixel_coord.data[3])
        # print('now',ObjectNow)

        print(Pixel_coord)
        depth_val = rospy.wait_for_message('/camera/depth/image_rect_raw', Image)

        Co = Pixel_coord.data
        print(Co)

        depth_value = bridge.imgmsg_to_cv2(depth_val, "16UC1")
        zz = color_to_depth(depth_value, Co[0], Co[1])  # + 0.04
        if zz <= 0.37:
            zz = 0.37
        theta = Co[2]
        cls_n = int(Co[3])
        if cls_n == 6:
            theta = 90

        # from pixel coordinates to camera coord
        pixels_cood_frame = matrix([[Co[0]], [Co[1]], [1]])  # 0.50676625967  0.47
        obj_camera_frame = Cam_trans_fun(color_info, pixels_cood_frame, z=zz)
        print "camera coordinates=:"
        print(obj_camera_frame)

        base = camera_to_base(obj_camera_frame)
        print("base",base)

        # robot_to_camera ----- camera_color_frame=camera_link=camera_depth_frame
        # obj_robot_frame = robot_to_aruco * aruco_to_camera * obj_camera_frame
        # obj_robot_frame = robot_to_camera * obj_camera_frame
        # obj_end_frame = end_effector_link -> camera_link × obj_camera_frame
        # obj_end_frame = camera_to_end(obj_camera_frame)
        # obj_end_frame[1] = obj_end_frame[1]
        # print "obj_end_frame =:"
        # print(obj_end_frame)

        return cls_n, base

    def aruco_detecter(self, ids_num):
        ''' detect aruco marker'''
        global ids
        global final_trans
        cameraMatrix, distCoeffs = Intrinsics_camera(color_info)

        data = rospy.wait_for_message('/camera/color/image_raw', Image)

        # from message to img
        frame = bridge.imgmsg_to_cv2(data, "bgr8")

        # BGR -> RAY
        # gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        gray = frame
        # cv2.imshow('yuantu', gray)
        # cv2.waitKey(0)
        # cv2.destroyWindow()
        # 选择aruco模块中预定义的字典来创建一个字典对象 
        aruco_dict = cv2.aruco.getPredefinedDictionary(cv2.aruco.DICT_6X6_250)  #cv2.aruco.DICT_ARUCO_ORIGINAL
        parameters = cv2.aruco.DetectorParameters_create()
    
        # lists of ids and the corners beloning to each id // numpy.ndarray, list
        corners, ids, rejectedImgPoints = aruco.detectMarkers(gray, 
                                                              aruco_dict, 
                                                              parameters=parameters)
        print(ids)
        aruco.drawDetectedMarkers(gray, corners, ids, borderColor=(0,0,255))
        cv2.imshow('result_id', gray)
        # if cv2.waitKey(25) & 0xFF == ord('q'):
        #     cv2.destroyWindow()
        # cv2.waitKey(0)
        # cv2.destroyWindow()

        #-----------
        # 标记相对于相机框架的旋转, 平移
        # rvec, tvec, _ = aruco.estimatePoseSingleMarkers(corners, 0.019, cameraMatrix, distCoeffs)
        #
        # (rvec - tvec).any()  # get rid of that nasty numpy value array error
        # for i in range(rvec.shape[0]):
        #     aruco.drawAxis(gray, cameraMatrix, distCoeffs, rvec[i, :, :], tvec[i, :, :], 0.01)

        # cv2.imshow('result_coord',gray)
        # cv2.waitKey(0)
        # cv2.destroyWindow()
        #------------------

        detect = False
        k = ids_num
        if ids is not None:
            for i in range(len(ids)):
                if ids[i] == k:
                    ids = [[k]]
                    corners = [(corners[i])]   # list(array)
                    detect = True
                    break

            if detect:
                # 标记相对于相机框架的旋转, 平移  
                rvec, tvec, _ = aruco.estimatePoseSingleMarkers(corners, 0.019, cameraMatrix, distCoeffs)

                (rvec-tvec).any() # get rid of that nasty numpy value array error
                print("=======================")
                """ Applies perspective transform for given rvec and tvec. """
                R, _ = cv2.Rodrigues(rvec)
                print("-------------------------")
                t = tvec[0].T
                TT = np.hstack((R, t)) 
                TT = np.vstack((TT, np.array([0, 0, 0, 1]))) 
                #print(TT)
                transition_mat = np.array([[1, 0, 0, 0],
                                          [0, 1, 0, 0],
                                          [0, 0, 1, 0],
                                          [0, 0, 0, 1]])
                final_mat = np.dot(TT, transition_mat)
                final = final_mat[:, 3]
                final_trans = matrix([[final[0]], [final[1]], [final[2]],[1]])
                final_trans = camera_to_base(final_trans)

            else:
                ids = None
                final_trans = None
        else:
            ids = None
            final_trans = None

        return ids, final_trans

    def aruco_list_detecter(self, final_base):
        ''' detect aruco markers and store their poses in a dictionary'''
        cameraMatrix, distCoeffs = Intrinsics_camera(color_info)

        data = rospy.wait_for_message('/camera/color/image_raw', Image)

        # final_base = {}
        # from message to img
        frame = bridge.imgmsg_to_cv2(data, "bgr8")

        # BGR -> RAY
        # gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        gray = frame
        # 选择aruco模块中预定义的字典来创建一个字典对象
        aruco_dict = cv2.aruco.getPredefinedDictionary(cv2.aruco.DICT_6X6_250)  # cv2.aruco.DICT_ARUCO_ORIGINAL
        parameters = cv2.aruco.DetectorParameters_create()
        # lists of ids and the corners beloning to each id // numpy.ndarray, list
        corners, ids, rejectedImgPoints = aruco.detectMarkers(gray,
                                                              aruco_dict,
                                                              parameters=parameters)
        aruco.drawDetectedMarkers(gray, corners, ids, borderColor=(0, 0, 255))
        # cv2.imshow('result_id', gray)
        # if cv2.waitKey(25) & 0xFF == ord('q'):
        #     cv2.destroyWindow()
        # cv2.waitKey(2000)
        # cv2.destroyAllWindows()

        # print("ids:",ids)
        if ids is not None:
            for i in range(len(ids)):
                rvec, tvec, _ = aruco.estimatePoseSingleMarkers(corners[i], 0.019, cameraMatrix, distCoeffs)
                (rvec - tvec).any()  # get rid of that nasty numpy value array error
                print("=======================")
                """ Applies perspective transform for given rvec and tvec. """
                R, _ = cv2.Rodrigues(rvec)
                print("-------------------------")
                t = tvec[0].T
                TT = np.hstack((R, t))
                TT = np.vstack((TT, np.array([0, 0, 0, 1])))
                transition_mat = np.array([[1, 0, 0, 0],
                                           [0, 1, 0, 0],
                                           [0, 0, 1, 0],
                                           [0, 0, 0, 1]])
                final_mat = np.dot(TT, transition_mat)
                final = final_mat[:, 3]
                final_1 = matrix([[final[0]], [final[1]], [final[2]], [1]])
                final_trans = camera_to_base(final_1)
                final_ends_trans = camera_to_end(final_1)
                final_base[ids[i][0]] = final_trans  # store the final transformation in the dictionary
                # final_end[ids[i][0]] = final_ends_trans
        else:
            final_base = {}
            # final_end = {}


        return final_base       #, final_end

    def aruco_place_find(self, id_list, v, final_base):
        ''' Find aruco! The goal place can change, but not real time '''
        # if thetaa < 90:
        #     pl_theta = 0
        # else:
        #     pl_theta = 180
        flag = True
        GG = False
        while flag:
            # if all(id in final_base for id in id_list):
            '''就是这个判断条件'''
            if id_list[0] in final_base and id_list[1] in final_base and id_list[2] in final_base:
                GG = True
                break
            ## Prepare and send pose 1
            '''前往检测位置1，以下分别设置平移速度，角速度，x,y,z,以及三个旋转角度，需要旋转夹爪角度就改变最后一个theta_角度'''
            my_cartesian_speed = CartesianSpeed()
            my_cartesian_speed.translation = v  # m/s    0.1
            my_cartesian_speed.orientation = 90  # deg/s

            my_constrained_pose = ConstrainedPose()
            my_constrained_pose.constraint.oneof_type.speed.append(my_cartesian_speed)

            my_constrained_pose.target_pose.x = 0.49
            my_constrained_pose.target_pose.y = 0.0
            my_constrained_pose.target_pose.z = 0.43
            my_constrained_pose.target_pose.theta_x = 90
            my_constrained_pose.target_pose.theta_y = 0
            my_constrained_pose.target_pose.theta_z = 90  # 夹爪旋转角度

            req = ExecuteActionRequest()
            req.input.oneof_action_parameters.reach_pose.append(my_constrained_pose)

            req.input.name = "pose1"
            req.input.handle.identifier = 1001

            rospy.loginfo("Sending pose 1...")
            try:
                self.execute_action(req)
            except rospy.ServiceException:
                rospy.logerr("Failed to send pose 1")
            else:
                rospy.loginfo("Waiting for pose 1 to finish...")

            self.pose_done.wait()
            self.pose_done.clear()
            rospy.sleep(3.0)
            '''aruco_list_detecter函数是检测当前位置的Aruco码，并保存检测到的位置和id到字典final_base，id为键名，对应的位置为键值'''
            final_base = self.aruco_list_detecter(final_base)

            '''这个即为判断条件，判断字典final_base的键名是否包含了id_list的前三个值，即是否检测到了我们需要的所有目标'''
            if id_list[0] in final_base and id_list[1] in final_base and id_list[2] in final_base:
                GG = True
                break
            '''后面重复到位置2，3，4，5，6，不需要这么位置就注释，需要再添加位置，两个横杠里面是一个位置检测'''

            # ——————————————————————————————————————————————————————————————
            ## Prepare and send pose 2
            req.input.handle.identifier = 1001
            req.input.name = "pose2"

            my_constrained_pose.target_pose.x = 0.49
            my_constrained_pose.target_pose.y = 0.1
            req.input.oneof_action_parameters.reach_pose[0] = my_constrained_pose

            rospy.loginfo("Sending pose 2...")
            try:
                self.execute_action(req)
            except rospy.ServiceException:
                rospy.logerr("Failed to send pose 2")
            else:
                rospy.loginfo("Waiting for pose 2 to finish...")

            self.pose_done.wait()
            self.pose_done.clear()
            rospy.sleep(2.0)
            final_base = self.aruco_list_detecter(final_base)

            if id_list[0] in final_base and id_list[1] in final_base and id_list[2] in final_base:
                GG = True
                break
            # ——————————————————————————————————————————————————————————————

            ## Prepare and send pose 3
            req.input.handle.identifier = 1001
            req.input.name = "pose3"

            my_constrained_pose.target_pose.x = 0.49
            my_constrained_pose.target_pose.y = -0.1
            # my_constrained_pose.target_pose.theta_x = 150
            req.input.oneof_action_parameters.reach_pose[0] = my_constrained_pose

            rospy.loginfo("Sending pose 3...")
            try:
                self.execute_action(req)
            except rospy.ServiceException:
                rospy.logerr("Failed to send pose 3")
            else:
                rospy.loginfo("Waiting for pose 3 to finish...")

            self.pose_done.wait()
            self.pose_done.clear()
            rospy.sleep(2.0)
            final_base = self.aruco_list_detecter(final_base)

            if id_list[0] in final_base and id_list[1] in final_base and id_list[2] in final_base:
                GG = True
                break

            # ## Prepare and send pose 4  中
            # req.input.handle.identifier = 1001
            # req.input.name = "pose4"

            # my_constrained_pose.target_pose.x = 0.50
            # my_constrained_pose.target_pose.y = 0
            # req.input.oneof_action_parameters.reach_pose[0] = my_constrained_pose

            # rospy.loginfo("Sending pose 4...")
            # try:
            #     self.execute_action(req)
            # except rospy.ServiceException:
            #     rospy.logerr("Failed to send pose 4")
            # else:
            #     rospy.loginfo("Waiting for pose 4 to finish...")

            # self.pose_done.wait()
            # self.pose_done.clear()
            # rospy.sleep(2.0)
            # final_base = self.aruco_list_detecter(final_base)

            # if id_list[0] in final_base and id_list[1] in final_base and id_list[2] in final_base:
            #     GG = True
            #     break

            # # ## Prepare and send pose 5
            # req.input.handle.identifier = 1001
            # req.input.name = "pose5"

            # my_constrained_pose.target_pose.x = 0.58
            # my_constrained_pose.target_pose.y = 0.1
            # req.input.oneof_action_parameters.reach_pose[0] = my_constrained_pose

            # rospy.loginfo("Sending pose 5...")
            # try:
            #     self.execute_action(req)
            # except rospy.ServiceException:
            #     rospy.logerr("Failed to send pose 5")
            # else:
            #     rospy.loginfo("Waiting for pose 5 to finish...")

            # self.pose_done.wait()
            # self.pose_done.clear()
            # rospy.sleep(2.0)
            # final_base = self.aruco_list_detecter(final_base)

            # if id_list[0] in final_base and id_list[1] in final_base and id_list[2] in final_base:
            #     GG = True
            #     break

            # ## Prepare and send pose 6
            # req.input.handle.identifier = 1001
            # req.input.name = "pose6"

            # my_constrained_pose.target_pose.x = 0.58
            # my_constrained_pose.target_pose.y = -0.1
            # # my_constrained_pose.target_pose.z = 0.25
            # # my_constrained_pose.target_pose.theta_x = 180
            # req.input.oneof_action_parameters.reach_pose[0] = my_constrained_pose

            # rospy.loginfo("Sending pose 6...")
            # try:
            #     self.execute_action(req)
            # except rospy.ServiceException:
            #     rospy.logerr("Failed to send pose 6")
            # else:
            #     rospy.loginfo("Waiting for pose 6 to finish...")

            # self.pose_done.wait()
            # self.pose_done.clear()
            # final_base = self.aruco_list_detecter(final_base)

            # if id_list[0] in final_base and id_list[1] in final_base and id_list[2] in final_base:
            #     GG = True
            #     break

            # ## Prepare and send pose 7
            # req.input.handle.identifier = 1001
            # req.input.name = "pose7"

            # my_constrained_pose.target_pose.x = 0.2
            # my_constrained_pose.target_pose.y = 0.3

            # req.input.oneof_action_parameters.reach_pose[0] = my_constrained_pose

            # rospy.loginfo("Sending pose 7...")
            # try:
            #     self.execute_action(req)
            # except rospy.ServiceException:
            #     rospy.logerr("Failed to send pose 7")
            # else:
            #     rospy.loginfo("Waiting for pose 7 to finish...")

            # self.pose_done.wait()
            # self.pose_done.clear()
            # ids, final_trans  = self.aruco_detecter(id_num)
            # if ids is not None:
            #     GG = True
            #     break
            # else:
            #     pass
            # flag = False
            # rospy.loginfo("\033[1;32mfind process finished, but no goal !!!")

        return final_base

    def final_place_obj(self, final_t):
        ''' send obj to lan zi'''
        feedback = rospy.wait_for_message("/" + self.robot_name + "/base_feedback", BaseCyclic_Feedback)
        # now go place goal
        req = PlayCartesianTrajectoryRequest()
        req.input.target_pose.x = final_t[0]+0.015
        req.input.target_pose.y = final_t[1]
        req.input.target_pose.z = 0.07
        req.input.target_pose.theta_x = feedback.base.commanded_tool_pose_theta_x    
        req.input.target_pose.theta_y = feedback.base.commanded_tool_pose_theta_y    
        req.input.target_pose.theta_z = feedback.base.commanded_tool_pose_theta_z   

        # Call the service
        rospy.loginfo("Sending the robot to the finally place...")
        try:
            self.play_cartesian_trajectory(req)
        except rospy.ServiceException:
            rospy.logerr("Failed to call finally place")
        self.pose_done.wait()
        self.pose_done.clear()        

    def place_fix_place(self, cls_nn, goal, v):
        req = PlayCartesianTrajectoryRequest()
        if cls_nn in [0,1,2,3,4,5,6,7,8,9]:
            req.input.target_pose.x = goal[0]
            req.input.target_pose.y = goal[1]
            req.input.target_pose.z = goal[2]
        req.input.target_pose.theta_x = 180     # 180
        req.input.target_pose.theta_y = 0       #  0
        req.input.target_pose.theta_z = goal[5]     # 90

        pose_speed = CartesianSpeed()
        pose_speed.translation = v
        pose_speed.orientation = 30
        req.input.constraint.oneof_type.speed.append(pose_speed)

        # Call the service
        rospy.loginfo("Sending the robot to the home2 pose...")
        try:
            self.play_cartesian_trajectory(req)
        except rospy.ServiceException:
            rospy.logerr("Failed to call home")
        self.pose_done.wait()
        self.pose_done.clear()

        self.example_send_gripper_command(0.32)  # grasp
        rospy.sleep(1.0)

    def move_delta(self, goal):

        # Here we only need the latest message in the topic though  获得当前机器人位置
        feedback5 = rospy.wait_for_message("/" + self.robot_name + "/base_feedback", BaseCyclic_Feedback)
        # repeat one time
        req2 = PlayCartesianTrajectoryRequest()
        req2.input.target_pose.x = feedback5.base.commanded_tool_pose_x+goal[0]
        req2.input.target_pose.y = feedback5.base.commanded_tool_pose_y+goal[1]
        req2.input.target_pose.z = feedback5.base.commanded_tool_pose_z+goal[2]
        req2.input.target_pose.theta_x = feedback5.base.commanded_tool_pose_theta_x
        req2.input.target_pose.theta_y = feedback5.base.commanded_tool_pose_theta_y
        req2.input.target_pose.theta_z = feedback5.base.commanded_tool_pose_theta_z

        pose_speed = CartesianSpeed()
        pose_speed.translation = 0.5
        pose_speed.orientation = 30
        req2.input.constraint.oneof_type.speed.append(pose_speed)

        # Call the service
        rospy.loginfo("Sending the robot to the home2 pose...")
        try:
            self.play_cartesian_trajectory(req2)
        except rospy.ServiceException:
            rospy.logerr("Failed to call home")
        self.pose_done.wait()
        self.pose_done.clear()

        self.example_send_gripper_command(0.5)



    def hand_grasp_bottle(self, goal):
        x = goal[0]
        y = goal[1]
        z = goal[2]
        # 放平，0.55是最近位置
        goal_pos0 = [0.50, y, z , 90, 0, 90]
        self.example_send_the_robot(goal_pos0, 0.2)
        # rospy.sleep(1)

        # 前进到目标位置
        goal_pos0 = [x, y, z, 90, 0, 90]
        self.example_send_the_robot(goal_pos0, 0.2)
        # grasp
        # self.example_send_gripper_command(0.5)
        # goal_pos0 = [x , y , 0.48, 90, 0, 90]
        # self.example_send_the_robot(goal_pos0, 0.2)
        # goal_pos0 = [0.55, y, 0.48, 90, 0, 90]
        # self.example_send_the_robot(goal_pos0, 0.2)



    def hand_place_bottle(self, zz):
        x = 0.47
        y = 0
        z = 0.48

        goal_pos0 = [x, y, z, 90, 0, 90]
        self.example_send_the_robot(goal_pos0, 0.2)
        goal_pos0 = [x + 0.23, y, z, 90, 0, 90]
        self.example_send_the_robot(goal_pos0, 0.2)
        goal_pos0 = [x + 0.23, y, zz, 90, 0, 90]
        self.example_send_the_robot(goal_pos0, 0.2)
        self.example_send_gripper_command(0.0)
        rospy.sleep(1)
        # goal_pos0 = [x + 0.2, y-0.02, z - 0.045, 90, -90, 90]
        # self.example_send_the_robot(goal_pos0, 0.2)
        goal_pos0 = [x + 0.23, y , z, 90, 0, 90]
        self.example_send_the_robot(goal_pos0, 0.2)
        goal_pos0 = [0.55, y , z, 90, 0, 90]
        self.example_send_the_robot(goal_pos0, 0.2)

        home = [0.792, 354.793, 179.659, 219.409, 0.631, 45.359, 89.527]
        self.example_send_joint_angles(home)


    def main(self):

        apple_N = 0
        kung = True
        #*******************************************************************************
        # Make sure to clear the robot's faults else it won't move if it's already in fault
        self.example_clear_faults()

        #*******************************************************************************            
        # Set the reference frame to "Mixed"
        self.example_set_cartesian_reference_frame()

        #*******************************************************************************
        # Subscribe to ActionNotification's from the robot to know when a cartesian pose is finished
        self.example_subscribe_to_a_robot_notification()

        #*******************************************************************************
        self.get_common_parameter()

        pub = rospy.Publisher("arm_statue", String, queue_size=10)

        # home position
        home = [0.792, 354.793, 179.659, 219.409, 0.631, 45.359, 89.527]
        self.example_send_joint_angles(home)

        self.example_send_gripper_command(0.0)
        #
        # goal_pos0 = [0.47, 0, 0.42, 90, 0, 90]
        # self.example_send_the_robot(goal_pos0, 0.5)

        # home0 = [3.88, 343.657, 169.845, 216.738, 164.5, 342.619, 279.99]
        # self.example_send_joint_angles(home0)

        while kung:
            # *******************************************************************************
            # 接收小车消息，决定抓取目标
            print("等待接收小车位置1的消息")
            msg = rospy.wait_for_message("car_statue", Float32MultiArray)
            re_msg = msg.data
            print("re_msg", re_msg)

            # # 清理并解析从"chemical_task"话题接收到的消息
            # re_msg= re_msg.sub(r'\D', '', msg.data)  # 移除非数字字符
            # if len(data) != 3:
            #     rospy.logerr("Received invalid message format: %s", msg.data)
            #     return

            first_goal, second_goal, grasp_msg_id = map(int, list(re_msg))
            # first_goal, second_goal, grasp_msg_id = map(int, list(re_msg))
            grasp_msg_id = int(grasp_msg_id)
            first_goal = int(first_goal)
            second_goal = int(second_goal)
            print("first_goal",first_goal)


            # re_msg = "Grasp position arrived"

            if grasp_msg_id in [1,2,3,4,5,6]:
                print("运行接下来的抓取任务")
                rospy.sleep(2)
                # 这个函数是机械臂接收到消息后《前往检测位置，检测，获取抓取位置，前往抓取位置抓取，并收回手臂》这一系列动作
                if first_goal == 3:
                    z = 0.45
                else:
                    z = 0.43

                # 检测位姿
                goal_pos0 = [0.49, 0, z, 90, 0, 90]
                self.example_send_the_robot(goal_pos0, 0.2)

                # --aruco   detect--------
                # hand_mode(client, 4)
                final_base = {}
                id = grasp_msg_id
                id_list = [id, id, id]
                print("id_list", id_list)

                id_coord = self.aruco_place_find(id_list,0.2,final_base)
                if id in id_coord:
                    print("检测完成")
                    x = float(id_coord[id][0])
                    y = float(id_coord[id][1])
                    z = float(id_coord[id][2])
                    if first_goal == 1:
                        if z <= 0.42:
                            z = 0.42
                    else:
                        if z <= 0.37:
                            z = 0.37
                    print(x, y, z)
                    goal = [x, y, z]
                else:
                    print("No goal")
                # 抓取的一系列参数在这个函数改
                # goal=[0.74,-0.05,0.37]


                # 检测网络检测
                # cln, goal = self.Single_Move_callback()

                # home = [0.792, 354.793, 179.659, 219.409, 0.631, 45.359, 0]
                # self.example_send_joint_angles(home)

                self.hand_grasp_bottle(goal)

                self.example_send_gripper_command(0.5)
                self.example_send_gripper_command(0.5)
                print("抓取成功")
                rospy.sleep(1)

                self.move_delta([0,0,0])

                goal_pos0 = [goal[0], goal[1], 0.48, 90, 0, 90]
                self.example_send_the_robot(goal_pos0, 0.2)

                home = [0.792, 354.793, 179.659, 219.409, 0.631, 45.359, 89.527]
                self.example_send_joint_angles(home)

            # *******************************************************************************
            # 抓取任务结束，向小车发送消息，小车前往放置点
            print("抓取任务结束，向小车发送消息，小车前往放置点")
            arm_pd1 = 1
            while arm_pd1 < 20:  # 发送2s
                send_data = String()
                send_data.data = "Success grasp the object"
                pub.publish(send_data)
                rospy.sleep(0.1)
                arm_pd1 += 1

            # *******************************************************************************
            # 小车到达，接受小车消息，放置
            print("等待接收小车位置2的消息")
            msg2 = rospy.wait_for_message("car_statue", Float32MultiArray)
            re_msg2 = msg2.data
            print(re_msg2)
            a = re_msg2[0]
            if a == 1.0:
                print("运行接下来的放置任务")
                rospy.sleep(2)
                if second_goal == 1:
                    zz = 0.42
                else:
                    zz = 0.37
                print("zz", zz)
                self.hand_place_bottle(zz)


            kung = False

if __name__ == '__main__':
    ex = GenArmMovement()
    ex.main()




  



