#!/usr/bin/env python
# coding=UTF-8
import pyrealsense2
import rospy
import sys
import message_filters
import tf2_ros
import math
import moveit_commander
import geometry_msgs.msg
from tf.transformations import quaternion_from_euler
from cv_bridge import CvBridge
from std_msgs.msg import String, Float32
from sensor_msgs.msg import Image, CameraInfo
from darknet_ros_msgs.msg import BoundingBoxes, ObjectCount
from handsfree_arm_msgs.msg import arm_joints_position, arm_state
from geometry_msgs.msg import PoseStamped

# 思路:
# 1. 通过订阅 /patrol/simple_status_feeback 来检测移动底盘是否到达目标点 callbackStatus()
# 2. 通过发布 /patrol/control_cmd 来控制移动底盘的 暂停 和 开始 callbackStatus()
# 3. 由于摄像头在机械臂末端, 视野受限, 需要给机器人末端一个运动路径, 最大成程度的让摄像头看到更多.
# 4. 新机械臂代码添加了机械臂的软结束, 当检测到物体后, 同样通过发布 /patrol/control_cmd 来让机械表停止
#    (问题 1: 可能物体进入一半后, yolo 识别成功, 这样不够准确. )
#         【想法 1: 此时规划移动机械臂, 使物体处于摄像头中心】
#         【想法 2: 延时机械臂的暂停指令(可能出现在临界点，延时移动后，物体反而不在相框中)】
#         【想法 3: 通过图像边界来控制机械臂的移动】
# 5. 通过深度帧 获取深度距离和摄像头信息
# 6. 此时发布 物体的 tf
# 7. moveit 逆向解算, 判断机械臂是否到达
# 8. 解算成功则到达, 失败则前往下个点


def averageList(ls):
    sum = 0
    for l in ls:
        sum += l
    return sum / len(ls)


# 取得可用的深度点
def takeDepthPoints(w, centre_x, centre_y, depth_img):
    depth_points = []
    for x in range(1, w):
        _x = int(depth_img[centre_y][centre_x + x])
        x_ = int(depth_img[centre_y][centre_x - x])
        if _x != 0:
            depth_points.append(_x)
        if x_ != 0:
            depth_points.append(x_)
    return depth_points


# 机械臂关节控制移动
def armMoveFindPtah(joint=None):
    pub = rospy.Publisher('/handsfree/arm/set_arm_joints_pos', arm_joints_position, queue_size=10)
    if joint is None:
        for arm_move_path in arm_move_paths:
            for i in range(5):
                pub.publish(arm_move_path)
                rate.sleep()
    else:
        for i in range(5):
            pub.publish(joint)
            rate.sleep()


# 获取目标物体坐标点
def getObjCoord(obj_list, depth_img):
    obj_coords = []
    for obj_ in obj_list:
        # 检测物中心像素点
        obj_w, obj_h = (obj_.xmax - obj_.xmin) / 2, (obj_.ymax - obj_.ymin) / 2
        obj_centre_x, obj_centre_y = obj_.xmin + obj_w, obj_.ymin + obj_h
        if not odd_obj_flag:
            depth_distance = (depth_img[obj_centre_y][obj_centre_x]) / 1000.0
            if depth_distance == 0:
                print('距离过近.......')
                print('以物体框的中心点画一条长度等同物体框宽度的平行线, 取该线上的所有点的最小值, 且不含 0')
                depth_points = takeDepthPoints(obj_w, obj_centre_x, obj_centre_y, depth_img)
                if len(depth_points) > 0:
                    print('点形成的列表长度大于零')
                    min_depth_distance = min(depth_points)
                    if min_depth_distance < 500:
                        print('最小点小于 500 mm, 正确')
                        depth_distance = min_depth_distance / 1000.0
                    else:
                        print('最小点大于 500 mm, 错误')
                        print('.......近的过分')
                        return
                else:
                    print('点形成的列表长度小于零')
                    return
            else:
                # 如拉环, 中心为空，难以测距，需要采取框内距离最近的点
                print('以物体框的中心点画一条长度等同物体框宽度的平行线, 取该线上的所有点的最小值, 且不含 0')
                depth_points = takeDepthPoints(obj_w, obj_centre_x, obj_centre_y, depth_img)
                if len(depth_points) > 0:
                    print('点形成的列表长度大于零')
                    min_depth_distance = min(depth_points)
                    if min_depth_distance < 500:
                        print('最小点小于 500 mm, 正确')
                        depth_distance = min_depth_distance / 1000.0
                    else:
                        print('最小点大于 500 mm, 错误')
                        return
                else:
                    print('点形成的列表长度小于零')
                    return
                pass
            obj_coord = pyrealsense2.rs2_deproject_pixel_to_point(intrin=depth_intrin,
                                                                  pixel=[obj_centre_x, obj_centre_y],
                                                                  depth=depth_distance)
            obj_coords.append(obj_coord)
    len_obj_coords = len(obj_coords)
    if len_obj_coords == 0:
        return

    if len_obj_coords == 1:
        return obj_coords[0]

    if len_obj_coords > 1:
        print obj_coords
        list_x = [obj_coords[i][2] for i in range(len(obj_coords))]
        n = list_x.index(min(list_x))
        return obj_coords[n]


# 逆向解算
def moveitIk(x, y, z, qx, qy, qz, qw):
    arm_control_pub = rospy.Publisher('/patrol/control_cmd', String, queue_size=10)
    for i in range(10):
        arm_control_pub.publish('Arm start')
        rate.sleep()

    target_pose = PoseStamped()
    target_pose.header.frame_id = reference_frame
    target_pose.header.stamp = rospy.Time.now()
    target_pose.pose.position.x = x
    target_pose.pose.position.y = y
    target_pose.pose.position.z = z
    target_pose.pose.orientation.x = qx
    target_pose.pose.orientation.y = qy
    target_pose.pose.orientation.z = qz
    target_pose.pose.orientation.w = qw
    for i in range(10):
        target_pose_pub.publish(target_pose)
        rospy.sleep(0.01)

    arm.set_start_state_to_current_state()
    arm.set_pose_target(target_pose, end_effector_link)
    traj = arm.plan()
    point_list = len(traj.joint_trajectory.points)
    arm.execute(traj)
    if point_list == 0:
        print('路径规划失败, 机械臂不可达!')
        return False
    else:
        print('路径规划成功, 机械臂前往中!')
        return True


# 下个目标点
def nextPoint():
    armMoveFindPtah(arm_move_path_01)
    rospy.sleep(1)
    chassis_control_pub = rospy.Publisher('/patrol/control_cmd', String, queue_size=10)
    rate.sleep()
    for i in range(5):
        chassis_control_pub.publish('Patrol_Start')
        rate.sleep()
    init()


# 到达目标点, 底盘停止移动 1,2 ----------------
def callbackStatus(status):
    global arrive_flag
    global rate
    print('--')
    print('机器人状态：{}'.format(status.data))

    # 是否到达目标点位置
    if 'Moving to' in status.data:
        print('机器人正在移动到 {}'.format(status.data[-1]))
        return

    if status.data != 'Load':
        return
    else:
        chassis_control_pub = rospy.Publisher('/patrol/control_cmd', String, queue_size=10)
        print('到达目标点')
        arrive_flag = True
        rate.sleep()
        for i in range(5):
            chassis_control_pub.publish('Patrol_Pause')
            chassis_control_pub.publish('Arm start')
            rate.sleep()


# 3机械臂是否移动
def callbackArmHaveMove(arm_posture):
    global min_diff_radian
    global arm_move_arrive_flag
    global arm_move_flag
    global end_find_fp
    global arm_joints_radian
    arm_joints_radian = list(arm_posture.joints.position)[0:6]
    if arm_move_flag:

        diff_list = []
        for i in range(6):
            diff_list.append(abs(arm_joints_radian[i] - arm_move_paths[-1][i]))

        if max(diff_list) < min_diff_radian:
            end_find_fp -= 1
            if not find_obj_flag and end_find_fp == 0:

                arm_move_arrive_flag = True
                print('机械臂完成移动查看')
                arm_move_flag = False
    else:
        return


# 345判断是否检测到物体
def callbackObjectCount(obj_count):
    global find_count_fp
    global arm_move_flag
    global arrive_flag
    if not arrive_flag:  # 判断底盘是否停下
        return
    else:
        if not find_obj_flag:
            if obj_count.count == 0:
                if arm_move_arrive_flag:  # 机械臂查找路径移动完成.
                    print('机械臂查找路径执行完毕, 没有找到目标物体, 开始前往下一个点')
                    arrive_flag = True
                    nextPoint()
                    # armMoveFindPtah()
                    # chassis_control_pub = rospy.Publisher('/patrol/control_cmd', String, queue_size=10)
                    # rate.sleep()
                    # for i in range(5):
                    #     chassis_control_pub.publish('Patrol_Start')
                    #     rate.sleep()
                    # init()
                    return
                print('没有检测到任何物体, 开始移动机械臂查找目标物体.')
                find_count_fp -= 1  # xx 帧后开始移动机械臂来查找目标物体
                if find_count_fp == 0 and not arm_move_flag:  # 判断初始姿态有检测到物体
                    armMoveFindPtah()  # 机械臂移动函数
                    arm_move_flag = True
            else:
                return
        else:
            return


# 通过 Yolo 找目标物体
def callbackYoloFind(depth_image, depth_info, yolo_objects):
    global arrive_flag
    global find_count_fp
    global arm_move_flag
    global find_obj_flag
    global set_depth_intrin_flag
    global handle_fps_flag
    global handle_count_fps
    global handle_coords
    global handle_coord
    global run_moveit_ik_flag
    global get_camera_pos_flag
    if set_depth_intrin_flag:
        depth_d = list(depth_info.D)
        depth_k = list(depth_info.K)
        depth_intrin.width = depth_info.height
        depth_intrin.height = depth_info.width
        depth_intrin.coeffs = depth_d
        depth_intrin.fx = depth_k[0]
        depth_intrin.fy = depth_k[4]
        depth_intrin.model = rs_model
        depth_intrin.ppx = depth_k[2]
        depth_intrin.ppy = depth_k[5]
        set_depth_intrin_flag = False

    if not arrive_flag:  # 判断底盘是否停下
        return
    else:
        if not find_obj_flag:  # 没有找到目标物体
            obj_list = []
            for obj in yolo_objects.bounding_boxes:  # 遍历每一帧 yolo 识别到的所有物品
                if obj.Class == obj_name:  # 判断是否是你需要识别的物品
                    if obj.probability >= obj_min_probability:  # 判断 yolo 识别的准确率是否大于你设置的最小值
                        obj_list.append(obj)  # 考虑多个物品被识别到的情况下
                    else:
                        print('识别到 {}, 识别率 {} < {}'.format(obj_name, obj.probability, obj_min_probability))
                else:
                    continue

            len_obj_list = len(obj_list)

            if len_obj_list == 0:  # 判断是否检测到了物体
                print('等待 {} 进入相机视野, 或识别率过低..'.format(obj_name))
                find_count_fp -= 1  # xx 帧后开始移动机械臂来查找目标物体
                if find_count_fp == 0 and not arm_move_flag:  # 判断初始姿态有检测到物体
                    armMoveFindPtah()  # 机械臂移动函数
                    arm_move_flag = True
                else:  # 机械臂移动查看后, 还没有检测到目标物体
                    if arm_move_arrive_flag:  # 机械臂查找路径移动完成.
                        print('机械臂查找路径执行完毕, 没有找到目标物体, 开始前往下一个点')
                        nextPoint()
                        # chassis_control_pub = rospy.Publisher('/patrol/control_cmd', String, queue_size=10)
                        # rate.sleep()
                        # for i in range(5):
                        #     chassis_control_pub.publish('Patrol_Start')
                        #     rate.sleep()
                        # init()
                        return
                return
            else:
                try:
                    # 需要将摄像头的深度对齐到 yolo 的彩色
                    depth_img = CvBridge().imgmsg_to_cv2(depth_image, "16UC1")
                except Exception as e:
                    print e
                    return
                else:
                    if arm_move_flag:  # 如果机械臂移动了, 则检测成功后需要停止机械臂移动。
                        arm_control_pub = rospy.Publisher('/patrol/control_cmd', String, queue_size=10)
                        for i in range(10):
                            arm_control_pub.publish('Arm stop')
                            rate.sleep()
                        arm_move_flag = False
                    print('检测出 {} 个 {}'.format(len_obj_list, obj_name))

                    if not handle_fps_flag:  # 不使用多帧处理获取的坐标
                        handle_coord = getObjCoord(obj_list, depth_img)
                        find_obj_flag = True
                    else:  # 使用多帧处理获取的坐标
                        if handle_count_fps > 0:
                            handle_coords.append(getObjCoord(obj_list, depth_img))
                            handle_count_fps -= 1
                            return

                        if handle_count_fps == 0:  # 处理多帧数据
                            coords_x, coords_y, coords_z = [], [], []
                            usable_coords_len = handle_coords.count(None)  # 计数不可用的值

                            if len(handle_coords) - usable_coords_len < 3:
                                print('可能是摄像头或物体抖动, 物体太近或太远, 导致帧内可用数据长度过短.')
                                nextPoint()
                                return

                            else:
                                for i in range(usable_coords_len):  # 移除不可用的值
                                    handle_coords.remove(None)

                                for coord in handle_coords:
                                    coords_x.append(coord[0])
                                    coords_y.append(coord[1])
                                    coords_z.append(coord[2])
                                max_i = coords_z.index(max(coords_z))
                                coords_x.pop(max_i)
                                coords_y.pop(max_i)
                                coords_z.pop(max_i)
                                min_i = coords_z.index(min(coords_z))
                                coords_x.pop(min_i)
                                coords_y.pop(min_i)
                                coords_z.pop(min_i)
                                handle_coord = [averageList(coords_x), averageList(coords_y), averageList(coords_z)]
                                find_obj_flag = True
        else:
            print('找到目标物体, 发布该物体 tf')
            global arm_base_camera
            if not get_camera_pos_flag:
                try:
                    arm_base_camera = tfBuffer.lookup_transform('arm_base_link', 'camera_link', rospy.Time())
                    get_camera_pos_flag = True
                except Exception as e:
                    print(e)
                    return
            else:
                obj_tf.header.stamp = rospy.Time.now()
                # 图形帧中的数据转为 tf 关系
                obj_tf.transform.translation.x = handle_coord[2] + arm_base_camera.transform.translation.x
                obj_tf.transform.translation.y = - handle_coord[0] + arm_base_camera.transform.translation.y
                obj_tf.transform.translation.z = - handle_coord[1] + arm_base_camera.transform.translation.z
                if end_parallel_flag:  # 和地面平行
                    obj_tf.transform.rotation.x = 0
                    obj_tf.transform.rotation.y = 0
                    obj_tf.transform.rotation.z = 0
                    obj_tf.transform.rotation.w = 1
                else:  # 不和地面平行, 只改变 偏航角 和 俯仰角,(通过 x y z 坐标来计算角度)
                    qx, qy, qz, qw = quaternion_from_euler(0, math.atan(handle_coord[1] / handle_coord[2]), math.atan(- handle_coord[0] / handle_coord[2]))
                    obj_tf.transform.rotation.x = qx
                    obj_tf.transform.rotation.y = qy
                    obj_tf.transform.rotation.z = qz
                    obj_tf.transform.rotation.w = qw
                br.sendTransform(obj_tf)

                rospy.sleep(0.33)
                try:
                    coord = tfBuffer.lookup_transform('arm_base_link', obj_name+'_link', rospy.Time())
                except Exception as e:
                    print(e)
                    return
                else:

                    x = coord.transform.translation.x
                    y = coord.transform.translation.y
                    z = coord.transform.translation.z
                    qx = coord.transform.rotation.x
                    qy = coord.transform.rotation.y
                    qz = coord.transform.rotation.z
                    qw = coord.transform.rotation.w

                    global moveit_ik_flag
                    if not run_moveit_ik_flag:  # 没有运行逆向结算
                        moveit_ik_flag = moveitIk(x, y, z, qx, qy, qz, qw)
                        rospy.sleep(5)  # 等待 moveit 运行规划
                        run_moveit_ik_flag = True
                    else:
                        if moveit_ik_flag is not None and moveit_ik_flag:
                            print('运动规划成功')
                            successMoveit()
                            nextPoint()
                            print('机器人前往下一个点')
                            return
                        else:
                            print('运动规划失败, 机器人前往下一个点')
                            nextPoint()
                            return


# 运动规划成功后
def successMoveit():
    print('打开喷嘴---')
    control_pub = rospy.Publisher('/patrol/control_cmd', String, queue_size=10)
    rate.sleep()
    for i in range(5):
        control_pub.publish('喷嘴打开指令')
        rate.sleep()
    value_ = 3 * 3.14 / 180
    arm_pos_01 = arm_joints_radian
    arm_pos_02 = arm_joints_radian
    arm_pos_03 = arm_joints_radian
    arm_pos_04 = arm_joints_radian
    arm_pos_01[0] = arm_joints_radian[0] - value_
    arm_pos_01[1] = arm_joints_radian[1] - value_
    arm_pos_02[0] = arm_joints_radian[0] - value_
    arm_pos_02[1] = arm_joints_radian[1] + value_
    arm_pos_03[0] = arm_joints_radian[0] + value_
    arm_pos_03[1] = arm_joints_radian[1] + value_
    arm_pos_04[0] = arm_joints_radian[0] + value_
    arm_pos_04[1] = arm_joints_radian[1] - value_
    arm_pos_list = [arm_pos_01, arm_pos_02, arm_pos_03, arm_pos_04]
    pub = rospy.Publisher('/handsfree/arm/set_arm_joints_pos', arm_joints_position, queue_size=10)
    for arm_move_path in arm_pos_list:
        for i in range(5):
            pub.publish(arm_move_path)
            rate.sleep()
        rospy.sleep(2)  # 停留两秒

    for i in range(5):
        control_pub.publish('喷嘴关闭指令')
        rate.sleep()


# re init
def init():
    global arrive_flag
    global find_obj_flag
    global find_count_fp
    global arm_move_flag
    global odd_obj_flag
    global handle_count_fps
    global handle_coords
    global handle_coord
    global arm_move_arrive_flag
    global end_find_fp
    global run_moveit_ik_flag
    global get_camera_pos_flag
    arrive_flag = False  # 机器人移动底盘到达标记
    find_obj_flag = False  # 是否检测到物体的标记
    find_count_fp = 10  # 机器人停止后, 检测对应数字帧数据
    arm_move_flag = False  # 机械臂可有通过移动去找物体

    handle_count_fps = 5  # 采集对应数字帧数据做处理后, 得出坐标
    handle_coords = []  # 多帧处理时用
    handle_coord = [0.0, 0.0, 0.0]  # 单帧 或 多针数据时用
    arm_move_arrive_flag = False  # 机械臂查找路径是否移动完
    end_find_fp = 20  # 机械臂移动完后, 判断对应数据帧数据后在发布移动完成的命令

    run_moveit_ik_flag = False  # 是否执行了运动规划

    get_camera_pos_flag = False  # 获取当前相机姿态


# 机械臂查看路径点 (可修改)
arm_move_path_01 = [0.0, -0.734609067440033, -0.3312634825706482, 0.0, -0.4625122547149658, 0.0]
arm_move_path_02 = [0.0, -1.1325441598892212, 0.8100073337554932, 0.0, -1.1936306953430176, 0.0]
arm_move_path_03 = [0.0, -1.1292279958724976, 0.9810495972633362, 0.0, -0.7223917841911316, 0.0]
arm_move_paths = [arm_move_path_01, arm_move_path_02, arm_move_path_03]


if __name__ == '__main__':
    moveit_commander.roscpp_initialize(sys.argv)
    rospy.init_node('chassis_arm', anonymous=True)

    init()  # 初始化某些值, 当底盘移动后需要再次重置这些值
    print('开始等待机器人移动底盘反馈状态, 接受到 Load 返回值将停止移动.')

    # ros param
    obj_name = rospy.get_param('~obj_name', 'bottle')  # 检测物体名
    obj_min_probability = rospy.get_param('~obj_min_probability', 0.5)  # 检测物体名最小识别率
    handle_fps_flag = rospy.get_param('~handle_fps_flag', True)  # 是否处理多帧后, 得出坐标点
    min_diff_radian = rospy.get_param('~min_diff_radian', 1.5 * 3.14 / 180)  # 机械臂执行查看路径是否完成 弧度
    end_parallel_flag = rospy.get_param('~end_parallel_flag', False)  # 末端是否平行地面
    odd_obj_flag = rospy.get_param('~odd_obj_flag', False)  # 形状奇特的物体
    rate = rospy.Rate(10.0)

    # 处理订阅的话题
    rospy.Subscriber('/patrol/simple_status_feeback', String, callbackStatus)  # 机器人状态检测
    rospy.Subscriber('/handsfree/arm/arm_state', arm_state, callbackArmHaveMove)  # 机械臂状态检测
    rospy.Subscriber('/darknet_ros/found_object', ObjectCount, callbackObjectCount)   # 当图片中无检测物时, yolo不会发布bounding_boxes

    # 需要在同一个函数中处理
    depth_image = message_filters.Subscriber('/camera/depth_registered/image_raw', Image)  # 相机深度数据
    depth_info = message_filters.Subscriber('/camera/depth_registered/camera_info', CameraInfo)  # 相机深度信息
    bounding_boxes = message_filters.Subscriber('/darknet_ros/bounding_boxes', BoundingBoxes) # yolo 检测框信息
    ts = message_filters.ApproximateTimeSynchronizer([depth_image, depth_info, bounding_boxes], 10, 0.1, allow_headerless=True)
    ts.registerCallback(callbackYoloFind)

    # py realsense 中的方法
    set_depth_intrin_flag = True
    rs_model = pyrealsense2.distortion(2)
    depth_intrin = pyrealsense2.intrinsics()

    # TF 发布
    br = tf2_ros.TransformBroadcaster()
    obj_tf = geometry_msgs.msg.TransformStamped()
    obj_tf.header.frame_id = "arm_base_link"
    obj_tf.child_frame_id = obj_name + "_link"

    # tf 获取
    tfBuffer = tf2_ros.Buffer()  # 创建缓冲池
    listener = tf2_ros.TransformListener(tfBuffer)  # TransformListener帮助简化接收转换的任务，并进行长达 10 秒缓冲（默认 10秒）

    # 发布话题
    target_pose_pub = rospy.Publisher('target_pose', PoseStamped, queue_size=10)
    arm_gripper = rospy.Publisher('/handsfree/arm/set_arm_joint_gripper', Float32, queue_size=10)

    # arm moveit 运动规划
    arm = moveit_commander.MoveGroupCommander('arm')
    end_effector_link = 'gripper_link'
    reference_frame = 'arm_base_link'
    arm.set_pose_reference_frame(reference_frame)
    arm.allow_replanning(True)  # 当运动规划失败后，允许重新规划
    arm.set_goal_position_tolerance(0.01)
    arm.set_goal_orientation_tolerance(0.05)

    # arm init
    armMoveFindPtah(arm_move_path_01)
    rospy.spin()
