import json
import time

import cv2
from ultralytics import YOLO

from app.config.protocol import CMD_TRACK_SHIP, CMD_UNTRACK_SHIP
from app.socket_client import SocketClient


# # 打开摄像头
# cap = cv2.VideoCapture(0)
#
# # 视频文件路径
# video_path = 'video/2.mp4'
#
# # 创建VideoCapture对象
# # cap = cv2.VideoCapture(video_path)
#
# model = YOLO('/home/uds/code/yolov8/runs/detect/train6/weights/best.pt') # 这里用你训练完以后保存的模型文件
# # 开始采集和推流
# while True:
#     # 采集一帧图像
#     ret, frame = cap.read()
#
#     if ret:
#         results = model(frame,device="cuda")
#         # 标注后的图片
#         res = results[0]
#         annotated_image = res.plot()
#         # 通过FFmpeg编码和推流
#         cv2.imshow('YoloV8 Inference', annotated_image)
#
#         names = res.names
#         indexes = res.boxes.cls
#         posPoint = res.boxes.xyxy.cpu().numpy().astype('uint32')
#         x = -1
#         y = -1
#         if len(posPoint)>0:
#             x = posPoint[0][0]
#             y = posPoint[0][1]
#
#         print("X=",x,"Y=",y)
#     key = cv2.waitKey(1)
#
#
#     if key == ord('q') or key == 27:  # 'q'或者ESC都会触发退出条件
#         break
#
#
# cap.release()
# cv2.waitKey(0)
# cv2.destroyAllWindows


class Detect:
    def __init__(self,yolo_pt_best_path,client:SocketClient):
        self.model = YOLO(yolo_pt_best_path)

        self.client = client

        self.client.connect()
        # 打开摄像头
        self.cap = cv2.VideoCapture(0)
        # self.cap.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
        # self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 360)
        self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 2)

        self.isRunning = True

        self.tracker = None
        self.tracking = False

        self.fp_position_record = []


    def start(self,queue_socket_to_detect):

        # 开始采集和推流
        while self.isRunning:

            start_time =time.time()
            # 采集一帧图像
            ret, frame = self.cap.read()

            if ret:
                try:
                    cmd = None
                    data = None
                    try:
                        socket_data = queue_socket_to_detect.get(block=False)
                        cmd = socket_data['cmd']
                        data = socket_data['data']
                    except:
                        pass

                    detect_ship = False
                    if not self.tracking:
                        results = self.model(frame,device="cuda",verbose=False)

                        for result in results:
                            boxes = result.boxes
                            for box in boxes:
                                # print(
                                #     f"类别: {box.cls.int().item()}, 置信度: {box.conf.item()}, 坐标: {box.xyxy.tolist()[0]}")
                                category = box.cls.int().item()
                                believe = box.conf.item()
                                position = box.xyxy.tolist()[0]

                                # 发送浮漂的坐标
                                if category == 0 and believe > 0.3:
                                    x_p = int(position[0] / 640 * 100)
                                    y_p = int(position[1] / 480 * 100)

                                    detect_ship = True

                                    self.fp_position_record = position



                        annotated_image = results[0].plot()
                    else:
                        annotated_image = frame

                    key = cv2.waitKey(1)

                    # TODO 确认跟踪SHIP
                    if cmd == CMD_TRACK_SHIP or key == ord('a'):
                        if detect_ship:
                            self.tracker = cv2.TrackerCSRT_create()
                            # self.tracker = cv2.TrackerKCF_create()
                            roi = (int(self.fp_position_record[0]), int(self.fp_position_record[1]),
                                   int(self.fp_position_record[2] - self.fp_position_record[0]),
                                   int(self.fp_position_record[3] - self.fp_position_record[1]))
                            self.tracker.init(frame, roi)
                            self.tracking = True
                    elif cmd == CMD_UNTRACK_SHIP or key == ord('k'):
                        self.tracking = False

                    if self.tracking:
                        success, roi = self.tracker.update(annotated_image)
                        if success:
                            # 绘制跟踪框
                            x, y, w, h = map(int, roi)
                            #print(f"X: {x}, Y: {y}")
                            cv2.rectangle(annotated_image, (x, y), (x + w, y + h), (0, 255, 0), 2)


                        else:
                            cv2.putText(annotated_image, "Tracking failed!", (50, 50),
                                        cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)

                    send_data = {
                        "x":2,
                        "y":2342
                    }
                    self.client.send_message(json.dumps(send_data))

                    cv2.imshow("px4_yolov8",annotated_image)

                    print("RunTime:",time.time()-start_time)

                    if key == ord('q') or key == 27:  # 'q'或者ESC都会触发退出条件
                        break
                except Exception as e:
                    print('read cap frame error')

            else:
                time.sleep(1)

        self.cap.release()
        self.isRunning = False