from ultralytics import YOLO
import cv2
import time
import os
from xgoedu import XGOEDU 

XGO_edu = XGOEDU()

# 加载模型
model = YOLO("best_detect.pt")

# 创建输出目录
output_dir = "output_images"
os.makedirs(output_dir, exist_ok=True)
def Toc(s):
    if s == "water" : return "洪水"
    elif s == "explore": return "爆炸"
    elif s == "landslide": return "塌方"
    elif s == "roof" : return "冒顶"
    else: return "火灾"

def capture_fresh_frame():
    """重新初始化摄像头并捕获新画面"""
    cap = cv2.VideoCapture(0)
    cap.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
    cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)
    cap.set(cv2.CAP_PROP_BUFFERSIZE, 1)
    
    # 等待摄像头稳定
    time.sleep(0.5)
    
    # 读取几帧让摄像头稳定
    for _ in range(5):
        ret, frame = cap.read()
        if not ret:
            cap.release()
            return None, None
    
    # 读取最终帧
    success, frame = cap.read()
    cap.release()
    
    return success, frame

frame_id = 0
max_frames = 5  # 最多识别 5 次

while frame_id < max_frames:
    XGO_edu.SpeechSynthesis(f"开始第{frame_id + 1}次识别")
    input(f"👉 放好图后按 Enter 拍摄第{frame_id + 1}张...")
    
    print("准备拍摄...")
    time.sleep(1)  # 给用户时间放好图片
    
    # 重新初始化摄像头并捕获
    success, frame = capture_fresh_frame()
    
    if not success or frame is None:
        print("无法读取摄像头")
        break
    
    print("已拍摄，正在识别...")
    
    # 运行检测
    results = model(frame)
    
    # 绘制检测结果
    annotated_frame = results[0].plot()
    
    # 保存图像
    timestamp = time.strftime("%Y%m%d_%H%M%S")
    filename = f"{output_dir}/frame_{timestamp}_{frame_id + 1}.jpg"
    cv2.imwrite(filename, annotated_frame)
    
    print(f"[{frame_id + 1}/{max_frames}] 已保存: {filename}")
    
    # 显示并播报检测结果（过滤explore，只要高置信度）
    if len(results[0].boxes) > 0:
        print(f"检测到 {len(results[0].boxes)} 个对象")
        
        # 收集高置信度且非explore的对象
        detected_objects = []
        
        for i, box in enumerate(results[0].boxes):
            if hasattr(box, 'cls') and hasattr(results[0].names, '__getitem__'):
                class_id = int(box.cls)
                class_name = results[0].names[class_id][5:-2]  # 直接获取字符串
                confidence = float(box.conf)
                #{2: roof}
                print(f"  对象 {i+1}: {class_name} (置信度: {confidence:.2f})")
                
                # 过滤条件：非explore且高置信度
                if confidence > 0.9:
                    detected_objects.append(class_name)

        # 播报检测结果
        if detected_objects:
            # 去重
            unique_objects = list(set(detected_objects))
            if len(unique_objects) == 1:
                speech_text = unique_objects[0]
            else:
                speech_text = unique_objects[0]
            
            print(f"播报内容: {speech_text}")
            print(f"返回的结果:{Toc(str(speech_text))}")
            XGO_edu.SpeechSynthesis(Toc(str(speech_text)))
        else:
            # 没有符合条件的对象
            print("播报内容: 识别失败")
            XGO_edu.SpeechSynthesis("识别失败")
    else:
        print("未检测到对象")
        XGO_edu.SpeechSynthesis("识别失败")
    
    print("-" * 50)  # 分隔线
    frame_id += 1

# 播报结束
XGO_edu.SpeechSynthesis("识别完成")