from ultralytics import YOLO
import os
import cv2
import numpy as np
from collections import defaultdict
import subprocess

class Animal:
    def __init__(self, **kwargs):
        self.image = kwargs.get("image")  # 必须传入
        self.device = kwargs.get("device", "cpu")  # 默认 CPU
        self.dynamic = kwargs.get("dynamic", True)  # 默认动态推理
        self.imgsz = kwargs.get("imgsz", 640)  # 默认图片尺寸
        self.project = kwargs.get("project", "/yy/static/processimg")  # 项目名称
        self.name = kwargs.get("name", "animaldetect")  # 任务名称
        self.conf = kwargs.get("conf", 0.5)  # 置信度
        self.iou = kwargs.get("iou", 0.6)  # IOU 阈值
        self.save = kwargs.get("save", True)  # 是否保存结果
        self.onnxmodel = kwargs.get("model", "yolo11s")  # ONNX 模型名称

    def load_onnxmodel(self):
        model_path = os.path.join("onnx", self.onnxmodel + "_animal.onnx")
        return YOLO(model_path)  # 加载 ONNX 模型

    def detect_image(self):
        model = self.load_onnxmodel()  # 加载模型

        # **使用 predict() 方法进行推理**
        results = model.predict(source=self.image, device=self.device, dynamic=self.dynamic,
                                imgsz=self.imgsz, conf=self.conf, iou=self.iou, save=self.save,
                                project=self.project,name=self.name
                                )

        return results  # 返回检测结果

    def track_animal(self,video_path,save_path):
        #加载模型
        model = self.load_onnxmodel()  # 加载模型

        track_history = defaultdict(lambda: [])

        # 抓取视频
        capture = cv2.VideoCapture(video_path)
        if not capture.isOpened():
            print("Error opening Video file.")
            exit()
        # 视频参数
        fps = capture.get(cv2.CAP_PROP_FPS)
        frame_width = capture.get(cv2.CAP_PROP_FRAME_WIDTH)
        frame_height = capture.get(cv2.CAP_PROP_FRAME_HEIGHT)
        videoWriter = None

        while True:
            successCode, frame = capture.read()
            if not successCode:
                print("视频读取结束")
                break
            # 预测单张图片
            results = model.track(frame, persist=True)
            a_frame = results[0].plot()
            a_frame = np.copy(a_frame)

            # 所有id的序列号信息
            boxes = results[0].boxes.xywh.cpu()
            if results[0].boxes.id is None:
                track_ids = []
            else:
                track_ids = results[0].boxes.id.int().cpu().tolist()

            # 迭代每一个预测到的对象
            for box, track_id in zip(boxes, track_ids):

                # 获得框的大小
                x, y, w, h = box
                track = track_history[track_id]
                # 添加坐标
                track.append((float(x), float(y)))
                if len(track) > 75:
                    track.pop(0)

                # 创建点实例
                points = np.hstack(track).astype(np.int32).reshape(-1, 1, 2)
                # 画到预测结果上
                cv2.polylines(a_frame, [points], isClosed=False, color=(0, 0, 255), thickness=3)

                if videoWriter is None:
                    # fourcc = cv2.VideoWriter_fourcc()
                    fourcc = cv2.VideoWriter_fourcc(*"mp4v")
                    videoWriter = cv2.VideoWriter(save_path, fourcc, fps, (int(frame_width), int(frame_height)))
                videoWriter.write(a_frame)
        capture.release()
        if videoWriter is not None:
            videoWriter.release()

        save_array = save_path.split("/")
        save_array[len(save_array) - 1] = "yy" + save_array[len(save_array) - 1]
        savepath2 = "/".join(save_array)
        ffmpeg_command = [
                             "ffmpeg",     "-i", save_path,   "-vcodec", "libx264",  "-crf", "23", savepath2    ]
        subprocess.run(ffmpeg_command, check=True)


        return savepath2
