import cv2
import time
import socket
import math
import numpy as np
import cv2.aruco as aruco
from PIL import Image, ImageFont, ImageDraw
from handTracking import handDetect

font = cv2.FONT_HERSHEY_SIMPLEX

BUFSIZE = 1024
client = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
ip_port = [('192.168.1.11', 1234), ('192.168.1.12', 1234), ('192.168.1.13', 1234)]  # 输入机器人IP地址

targetX = [0, 0, 0]  # 机器人目标点X坐标
targetY = [0, 0, 0]  # 机器人目标点Y坐标
Point = [[], [], []]  # 机器人轨迹点列表

g_rectangle = [0, 0, 0, 0]  # 选框的x,y,w,h
g_bDrawingBox = False  # 是否开始描绘选框
robot_id = [1, 2, 3]  # 机器人的id，对应aruco的实际id
robot_selected = [False, False, False]  # 机器人是否被选择
robot_XY = [(0, 0), (0, 0), (0, 0)]  # 机器人的中心坐标
checked_color = (255, 0, 0)  # 被选中时显示的颜色
unchecked_color = (0, 255, 0)  # 没有被选中时显示的颜色

finger_navigation = False  # 是否使用手指导航机器人
finger_middle_point = [0, 0]  # 手指导航点，即拇指与食指合拢处


def showchinese(img, text, left, top, textColor=(0, 255, 0), textSize=20):
    '''  显示汉字
        :param img: 当前帧图像
        :param text: 显示字符
        :param left: x
        :param top: y
        :param textColor: 字体颜色
        :param textSize: 字体大小
        :return: 描绘好的图像
    '''
    if (isinstance(img, np.ndarray)):  # 判断是否OpenCV图片类型
        img = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
    draw = ImageDraw.Draw(img)
    fontText = ImageFont.truetype(
        "font/simsun.ttc", textSize, encoding="utf-8")
    draw.text((left, top), text, textColor, font=fontText)
    return cv2.cvtColor(np.asarray(img), cv2.COLOR_RGB2BGR)


def DrawRectangle(img, box):
    '''  绘制选框
    :param img: 当前帧图像
    :param box: 选框的x,y,w,h点
    :return: 描绘好的图像
    '''
    cv2.rectangle(img, (box[0], box[1]), (box[0] + box[2], box[1] + box[3]), (0, 255, 0), 2, 4)


def check_selected(x, y, w, h):
    '''
    检查哪些机器人被选中了
    :param x: 选框的左上角x坐标
    :param y: 选框的左上角y坐标
    :param w: 选框的宽
    :param h: 选框的高
    :return:
    '''
    for i in range(len(robot_id)):
        if (robot_XY[i][0] > x and robot_XY[i][0] < (x + w) and robot_XY[i][1] > y and robot_XY[i][1] < (y + h)):
            robot_selected[i] = True
        else:
            robot_selected[i] = False


def checked_fill(img, topLeft, topRight, bottomRight, bottomLeft):
    '''
     选中时填充半透明颜色
    :param img: 传入图像
    :param topLeft: aruco码的左上角点
    :param topRight: aruco码的右上角点
    :param bottomRight: aruco码的右下角点
    :param bottomLeft: aruco码的左下角点
    :return:
    '''
    # 创建2个空图像
    zeros = np.zeros(img.shape, dtype=np.uint8)
    newImg = np.zeros(img.shape, dtype=np.uint8)
    # 生成4个角点的numpy
    points = np.array([topLeft, topRight, bottomRight, bottomLeft])
    # 填充aruco码
    cv2.fillConvexPoly(zeros, points, (0, 255, 255))
    # 合成到新图像
    cv2.addWeighted(img, 1, zeros, 0.7, 0, newImg)
    return newImg


def mouseCallback(event, x, y, flags, param):
    '''
    鼠标事件回调处理
    '''
    global g_rectangle, g_bDrawingBox, frame, Point
    # 鼠标移动消息
    if event == cv2.EVENT_MOUSEMOVE:
        if g_bDrawingBox:
            # 更新选框的宽
            g_rectangle[2] = x - g_rectangle[0]
            # 更新选框的高
            g_rectangle[3] = y - g_rectangle[1]

    # 中键按下消息
    if event == cv2.EVENT_MBUTTONDOWN:
        for i in range(len(robot_id)):
            if (robot_selected[i]):
                # 机器人选中时，添加轨迹点
                Point[i].append((x, y))

    # 右键按下,清空轨迹
    if event == cv2.EVENT_RBUTTONDOWN:
        Point = [[], [], []]

    # 左键按下
    if event == cv2.EVENT_LBUTTONDOWN:
        # 记录起始点
        g_bDrawingBox = True
        g_rectangle[0] = x
        g_rectangle[1] = y
        g_rectangle[2] = 0
        g_rectangle[3] = 0
    # 左键松开
    elif event == cv2.EVENT_LBUTTONUP:
        g_bDrawingBox = False
        # 判断哪个机器人被框选了
        check_selected(g_rectangle[0], g_rectangle[1], g_rectangle[2], g_rectangle[3])


# 弧度计算
def get_angle_by_cos(p0, p1, p2):
    """
    使用向量的点乘公式计算角度值
    :param p0:
    :param p1: 角的顶点
    :param p2:
    :return: 弧度
    """
    # print(p0, p1, p2)
    l1 = p0[0] - p1[0], p0[1] - p1[1]
    l2 = p2[0] - p1[0], p2[1] - p1[1]
    # print(l1, l2)
    m = math.sqrt(l1[0] ** 2 + l1[1] ** 2) * math.sqrt(l2[0] ** 2 + l2[1] ** 2)
    if m == 0:
        return 0
    cos = (l1[0] * l2[0] + l1[1] * l2[1]) / m
    # print(cos)

    try:
        R = math.acos(cos)
    except:
        R = 180
    return R


# 线段长度计算
def length(p0, p1):
    d_x = abs(p0[0] - p1[0])
    d_y = abs(p0[1] - p1[1])
    length_p = math.sqrt(d_x ** 2 + d_y ** 2)
    return length_p


def robot_controll(frame, index, robotX, robotY, centerX, centerY):
    '''
    机器人控制任务
    :param frame: 当前图像帧
    :param index: 当前要操作机器人的索引
    :param robotX: 机器人的开始点x坐标（即Y轴的端点）
    :param robotY: 机器人的开始点y坐标
    :param centerX: 机器人的中心点x坐标
    :param centerY: 机器人的中心点y坐标
    :return:
    '''
    global Point, finger_middle_point
    # 当处于手指导航模式时
    if finger_navigation:
        # 先清空鼠标模式下的导航点
        Point = [[], [], []]
        if robot_selected[index]:
            targetX[index] = finger_middle_point[0]
            targetY[index] = finger_middle_point[1]
        else:
            targetX[index] = 0
            targetY[index] = 0
    else:
        # 当存在描绘路径点时
        if len(Point[index]) != 0:
            # 画出机器人开始点到第一个轨迹点的线段
            cv2.line(frame, (robotX, robotY), Point[index][0], (0, 0, 255), 2)
            # 如果不只1个轨迹点时，接着画出其余头尾连接线段
            if (len(Point[index])) > 1:
                for j in range(len(Point[index]) - 1):
                    cv2.line(frame, Point[index][j], Point[index][j + 1], (0, 0, 255), 2)
            # 将目标点设置为路径的第一个轨迹点
            targetX[index] = Point[index][0][0]
            targetY[index] = Point[index][0][1]
        else:
            targetX[index] = 0
            targetY[index] = 0

    # 当有移动目标点时
    if targetX[index] != 0 and targetY[index] != 0:
        # 画出机器人开始点到目标点的线段
        cv2.line(frame, (robotX, robotY), (targetX[index], targetY[index]), (0, 0, 255), 2)
        # 计算机器人与目标点的弧度
        a = get_angle_by_cos((centerX, centerY), (robotX, robotY), (targetX[index], targetY[index]))
        # 计算机器人方向
        b = ((targetX[index] - robotX) * (robotY - centerY)) - (
                (targetY[index] - centerY) * (robotX - centerX))
        # 计算机器人与目标点角度
        angle = round(math.degrees(a), 2)
        print(f"弧度：{a}， 角度：{angle} , 方向: {b}")

        # 计算机器人与目标点距离
        N_M = length((robotX, robotY), (targetX[index], targetY[index]))
        # 画出离目标点的剩余距离
        cv2.putText(frame, str(int(N_M)), (robotX, robotY), font, 1, (0, 0, 255), 2)

        # 驱动机器人移动
        if N_M > 25:
            msg = "C" + str(int(N_M)) + ",R" + str(int(angle)) + ",F" + str(b)
            client.sendto(msg.encode('utf-8'), ip_port[index])  # UDP发送数据
        # 到达目标点后
        if N_M <= 25:
            # 如果还有其它轨迹点
            if len(Point[index]) != 0:
                # 移除上一个目标点，向下一个目标点移动
                Point[index].remove(Point[index][0])
                print(Point[index])
            # 停止移动
            else:
                targetX[index] = 0
                targetY[index] = 0
    else:
        msg = "C0,R180,F1"
        client.sendto(msg.encode('utf-8'), ip_port[index])  # UDP发送数据


def handControl(str):
    '''
    手势检测控制机器人
    :param str: 识别出来的字符串
    '''
    global robot_selected, Point
    try:
        number = int(str, 10)
    except:
        number = -1

    if finger_navigation:
        return "手势导航中"
    # 举起手指为1——3的数字时，选取对应序号的机器人
    if (number > 0 and number < 4):
        robot_selected[number - 1] = True
        return "选中{}号机器人".format(number)
    # 清空选取机器人
    if (str == "Spider-Man"):
        robot_selected = [False, False, False]
        return "取消选择"
    # 清空导航路径
    if (str == "Pink Up"):
        Point = [[], [], []]
        return "清空路径"

    return "未知手势"

# 程序入口
if __name__ == '__main__':
    # 获取摄像头，根据情况传入id号，一般第一个摄像头为0
    cap = cv2.VideoCapture(0)
    # 设置画面长宽
    cap.set(cv2.CAP_PROP_FRAME_WIDTH, 1920)
    cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 1080)
    cv2.namedWindow('image')
    # 设置鼠标时间监听
    cv2.setMouseCallback('image', mouseCallback)
    # 设置aruco解码字典和参数
    aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250)
    parameters = aruco.DetectorParameters_create()

    # 检查摄像头是否正确打开
    while cap.isOpened():
        # 获取当前帧画面
        ret, frame = cap.read()
        if ret:
            # 转为灰度图
            gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            # 使用aruco.detectMarkers()函数可以检测到marker，返回ID和标志板的4个角点坐标
            corners, ids, rejectedImgPoints = aruco.detectMarkers(gray, aruco_dict, parameters=parameters)
            if len(corners) > 0:
                # 展平 ArUco ID 列表
                ids = ids.flatten()
                # 检测aruco码id号所对应的索引
                index = 0
                # 循环检测到的 ArUCo 角
                for (markerCorner, markerID) in zip(corners, ids):
                    # 提取标记角（始终按左上角、右上角、右下角和左下角顺序返回）
                    corners = markerCorner.reshape((4, 2))
                    (topLeft, topRight, bottomRight, bottomLeft) = corners
                    # 将每个 (x, y) 坐标对转换为整数
                    topRight = (int(topRight[0]), int(topRight[1]))
                    bottomRight = (int(bottomRight[0]), int(bottomRight[1]))
                    bottomLeft = (int(bottomLeft[0]), int(bottomLeft[1]))
                    topLeft = (int(topLeft[0]), int(topLeft[1]))

                    # 找到对应索引
                    for key, value in enumerate(robot_id):
                        if (value == markerID):
                            index = key

                    # 根据是否被框选，判断要显示的颜色, 是否填充ArUCo码
                    if (robot_selected[index]):
                        drawColor = checked_color
                        frame = checked_fill(frame, topLeft, topRight, bottomRight, bottomLeft)
                    else:
                        drawColor = unchecked_color

                    # 绘制ArUCo检测的边界框
                    cv2.line(frame, topLeft, topRight, drawColor, 2)
                    cv2.line(frame, topRight, bottomRight, drawColor, 2)
                    cv2.line(frame, bottomRight, bottomLeft, drawColor, 2)
                    cv2.line(frame, bottomLeft, topLeft, drawColor, 2)

                    # 计算并绘制 ArUco 标记的中心 (x, y) 坐标
                    cX = int((topLeft[0] + bottomRight[0]) / 2.0)
                    cY = int((topLeft[1] + bottomRight[1]) / 2.0)
                    cv2.circle(frame, (cX, cY), 4, (0, 0, 255), -1)
                    robot_XY[index] = cX, cY  # 更新机器人中心坐标

                    # 在框架上绘制 ArUco 标记 ID
                    cv2.putText(frame, str(markerID), (topLeft[0], topLeft[1] - 15),
                                cv2.FONT_HERSHEY_SIMPLEX, 1, drawColor, 2)

                    # 绘制Y轴，后面要根据这条线来判断机器人方向
                    sX = int((topRight[0] - topLeft[0]) / 2) + topLeft[0]  # 顶边中点x坐标
                    sY = int((topRight[1] - topLeft[1]) / 2) + topLeft[1]  # 顶边重点y坐标
                    cv2.line(frame, (cX, cY), (sX, sY), (0, 0, 255), 2)

                    # 机器人控制
                    robot_controll(frame, index, sX, sY, cX, cY)

                # cv2.putText(frame, "{} Robots".format(len(ids)), (0, 35), font, 1, (0, 0, 255), 2, cv2.LINE_AA)
                frame = showchinese(frame, "发现{}个机器人".format(len(ids)), 0, 15, (255, 0, 0), 25)

            else:
                ##### DRAW "NO IDS" #####
                # cv2.putText(frame, "No Robots", (0, 35), font, 1, (0, 0, 255), 2, cv2.LINE_AA)
                frame = showchinese(frame, "未发现机器人", 0, 15, (255, 0, 0), 25)

            # 当进行绘制的标识符为真，则进行绘制选框
            if g_bDrawingBox:
                DrawRectangle(frame, g_rectangle)

            # 手势检测和控制
            frame, gesture_str, finger_navigation, finger_middle_point = handDetect(frame)
            result_str = handControl(gesture_str)

            # cv2.putText(frame, gesture_str, (0, 65), font, 1, (0, 255, 255), 2, cv2.LINE_AA)
            frame = showchinese(frame, result_str, 0, 50, (255, 255, 0), 25)
            cv2.imshow("image", frame)
        else:
            break

        # 按下q键，程序退出
        k = cv2.waitKey(10) & 0xFF
        if k == ord("q"):
            break

    cap.release()
    cv2.destroyAllWindows()
