import cv2
import json
import socket
import numpy as np
from ultralytics import YOLO

# 第一行 port=5000 后面的空格不能删除，删除后拉不到流
url = "udpsrc address=0.0.0.0 port=5000 " \
      "! application/x-rtp, media=video, encoding-name=H264" \
      "! rtph264depay" \
      "! h264parse" \
      "! avdec_h264" \
      "! videoconvert" \
      "! video/x-raw,width=1280,height=720,format=BGR" \
      "! appsink drop=1"
cap = cv2.VideoCapture(url, cv2.CAP_GSTREAMER)

model = YOLO('yolov5su.pt')

skt = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
skt.connect(('192.168.171.11', 8090))

while(cap.isOpened()):
        ret, frame = cap.read()

        if not ret:
                print("exit")
                break

        res = model(frame)

        # cls 检测到的物体类型, 是一个列表, 包含图片中所有检测到的物体类型
        # 比如说检测到人, 杯子, 键盘, 这 cls 列表有三个元素, 分辨表示 人, 杯子, 键盘的枚举值
        cls = res[0].boxes.cls

        # xyxy 检测到的物体为止, 也是一个列表, 包含图片中所有检测到的物体位置
        xyxy = res[0].boxes.xyxy

        # 遍历所有的检测结果, 判断是否有检测到人
        detect = False
        for i, t in enumerate(cls):
                if t == 0:                      # 0 表示 person 类型
                        p1x = int(xyxy[i][0].item())
                        p1y = int(xyxy[i][1].item())
                        p2x = int(xyxy[i][2].item())
                        p2y = int(xyxy[i][3].item())
                        data = {"type": 0, "time": "2024-10-08 12:00:00", "position": [p1x, p1y, p2x, p2y]}
                        message = json.dumps(data)

                        skt.sendall(message.encode('utf-8'))
                        print("send:", message)
                        response = skt.recv(1024)
                        print(f"recv: {response.decode('utf-8')}")

                        detect = True
                        break

        # 如果有检测到人, 则叠加矩形框

        if detect:
                p1 = (p1x, p1y)
                p2 = (p2x, p2y)
                color = (0, 255, 255)
                thickness = 2
                frame = cv2.rectangle(frame, p1, p2, color, thickness)

                radius = 10
                color = (0, 0, 255)
                thickness = -1
                center_x = int((p1x + p2x) / 2)
                cv2.circle(frame, (center_x, p2y), radius, color, thickness)

        # 叠加检测阈值区域
        start_point = (int(1280 * 0.42), 0)
        end_point = (int(1280 * 0.42), 719)
        color = (128, 128, 0)
        thickness = 5
        cv2.line(frame, start_point, end_point, color, thickness)

        start_point = (int(1280 * 0.58), 0)
        end_point = (int(1280 * 0.58), 719)
        cv2.line(frame, start_point, end_point, color, thickness)

        start_point = (0, int(720 * 0.9))
        end_point = (1279, int(720 * 0.9))
        cv2.line(frame, start_point, end_point, color, thickness)

        # 显示图片
        cv2.imshow('camere', frame)
        if cv2.waitKey(1) & 0xFF == ord('q'):
                break

cap.release()
cv2.destroyAllWindows()
skt.close()

