from termcolor import colored
from ultralytics import YOLO#type: ignore
import cv2
import os
import time
import json
# modelDetectPath = rf"{MODEL_ROOT}\yolov8n.pt"
modelDetectPath = "best13.pt"
print("正在加载模型文件")
modelDetect = YOLO(modelDetectPath)  # 通常是pt模型的文件
print("模型加载成功！")

# 打开摄像头
def main():
    cap = cv2.VideoCapture(0)  # 0表示默认摄像头，外接摄像头可尝试1

    # 设置摄像头参数
    cap.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
    cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)

    # 指定存储图像的文件夹

    # 创建全局变量，方便在鼠标事件回调中访问当前帧
    frame = None
    image_counter = 0
    # 实时检测循环
    while cap.isOpened():
        # 读取帧
        success, current_frame = cap.read()
        if success:
            # 将读取到的帧赋值给全局变量 frame
            frame = current_frame.copy()
            
            # 模型推理 - 添加verbose=False来抑制输出
            resultsDetect = modelDetect.predict(
                source=frame,
                imgsz=[640, 480],  # 此处可以调节
                half=True,
                # iou=0.5,
                conf=0.1,
                device='0',  # '0'使用GPU运行
                verbose=False,  # 添加此参数来抑制输出
            )

            # 解析检测结果
            annotated_frame = resultsDetect[0].plot()  # 自动绘制检测结果
            yolo_obj = resultsDetect[0]
            
            # 提取检测到的对象信息（边界框坐标和类别名称）
            detected_objects = []
            for box in yolo_obj.boxes:
                # 获取边界框坐标 (x1, y1, x2, y2 格式)
                coords = box.xyxy[0].tolist()  # 转换为列表
                # 获取置信度
                conf = float(box.conf[0])
                # 获取类别ID和名称
                class_id = int(box.cls[0])
                class_name = yolo_obj.names[class_id]
                detected_objects.append({
                    'pos': [(coords[0] + coords[2]) / 2, (coords[1] + coords[3]) / 2],  # [center_x, center_y]
                    'label_id': class_id,
                    'label': class_name,
                })
            # 将检测到的对象信息添加到yolo_obj中
            # yield detected_objects
            # Uncomment this block ONLY if you have a display
            # Write the detected_objects to a truncated file at each frame
            with open("detected_objects.json", "w") as f:
                json.dump({"objs":detected_objects, "time":time.time()}, f, indent=4)
            cv2.imshow('YOLOv8', annotated_frame)
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break
            # Alternatively, save the frame to a file (one approach if on a headless environment)
            cv2.imwrite("yolo.png", annotated_frame)
        else:
            print("Failed to read frame from camera")
            break
    cap.release()
    cv2.destroyAllWindows()
if __name__ == "__main__":
    main()
    # for yolo_obj in main():
    #     print(yolo_obj)
