from ultralytics import YOLO
import cv2
import time
import os
import datetime
import inspect
from xgoedu import XGOEDU
class Detector:
    """危险标志检测器类"""
    def __init__(self,xgo_edu=None,confidence_threshold=0.88):
        """
        初始化危险检测器
        
        Args:
            model_path (str): 模型文件路径
            output_dir (str): 输出图像目录
            confidence_threshold (float): 置信度阈值
        """
        self.shape_detect = YOLO("shape_detect.pt")
        self.dange_detect = YOLO("best.pt")
        self.output_dir_dange = "./dange_images"
        self.output_dir_shape = "./shape_images"
        self.confidence_threshold = confidence_threshold
        self.xgo_edu = xgo_edu
        
        # 创建输出目录
        os.makedirs(self.output_dir_dange, exist_ok=True)
        os.makedirs(self.output_dir_shape, exist_ok=True)


    def loginfo(self,content: str):
        # 获取当前时间（格式：YYYY-MM-DD HH:MM:SS）
        current_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
        # 获取调用 self.loginfo 的文件名和行号
        frame = inspect.currentframe().f_back  # 获取调用栈的上一帧
        filename = os.path.basename(frame.f_code.co_filename)  # 提取文件名（不含路径）
        line_no = frame.f_lineno  # 行号
        # 格式化输出：[日期]-[文件名]-[行号]: 内容
        log_message = f"[{current_time}]-[{filename}]-[{line_no}]: {content}"
        print(log_message)  # 输出到控制台  
    
    def extract_color(self, class_name):
        """提取颜色名称"""
        if "blue" in class_name.lower():
            return "blue"
        elif "green" in class_name.lower():
            return "green"
        elif "red" in class_name.lower():
            return "red"
        else:
            return class_name  # 如果没有匹配的颜色，返回原始名称
    def Toc(self, s):
        """将英文类名转换为中文"""
        if s == "water":
            return "洪水"
        elif s == "explode":
            return "爆炸"
        elif s == "landslide":
            return "塌方"
        elif s == "maoding":
            return "冒顶"
        else:
            return "火灾"
    def capture_fresh_frame(self):
            """重新初始化摄像头并捕获新画面"""
            cap = cv2.VideoCapture(0)
            cap.set(cv2.CAP_PROP_FRAME_WIDTH, 320)
            cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 240)
            cap.set(cv2.CAP_PROP_BUFFERSIZE, 1)
            
            # 等待摄像头稳定
            time.sleep(0.5)
            
            # 读取几帧让摄像头稳定
            for _ in range(5):
                ret, frame = cap.read()
                if not ret:
                    cap.release()
                    return None, None
            
            # 读取最终帧
            success, frame = cap.read()
            cap.release()
            
            return success, frame        
    def detect_once_shape(self, frame_id=None, save_image=True, speak_result=True):
            """
            执行一次形状检测
            Args:
                frame_id (int): 帧ID，用于文件命名
                save_image (bool): 是否保存图像
                speak_result (bool): 是否语音播报
                
            Returns:
                dict: 检测结果
            """
            input(f"👉 放好图后按 Enter 拍摄")
            self.loginfo("准备拍摄...")
            time.sleep(1)  # 给用户时间放好物体
            
            # 重新初始化摄像头并捕获
            success, frame = self.capture_fresh_frame()
            
            if not success or frame is None:
                error_msg = "无法读取摄像头"
                self.loginfo(error_msg)
                if speak_result:
                    self.xgo_edu.SpeechSynthesis("摄像头错误")
                return {
                    'success': False,
                    'message': error_msg,
                    'detected_objects': [],
                    'speech_text': '摄像头错误',
                    'saved_file': None
                }
            
            self.loginfo("已拍摄，正在识别...")
            
            # 运行检测
            results = self.shape_detect(frame)
            
            # 绘制检测结果
            annotated_frame = results[0].plot()
            
            # 保存图像
            saved_file = None
            if save_image:
                timestamp = time.strftime("%Y%m%d_%H%M%S")
                if frame_id is not None:
                    filename = f"{self.output_dir_shape}/frame_{timestamp}_{frame_id}.jpg"
                else:
                    filename = f"{self.output_dir_shape}/frame_{timestamp}.jpg"
                cv2.imwrite(filename, annotated_frame)
                saved_file = filename
                self.loginfo(f"已保存到:{saved_file}")
                if frame_id is not None:
                    self.loginfo(f"已保存: {filename}")
            
            # 显示并播报检测结果
            detected_colors = []
            detected_objects = []
            speech_text = "识别失败"
            
            if len(results[0].boxes) > 0:
                self.loginfo(f"检测到 {len(results[0].boxes)} 个对象")
                
                for i, box in enumerate(results[0].boxes):
                    if hasattr(box, 'cls') and hasattr(results[0].names, '__getitem__'):
                        class_id = int(box.cls)
                        class_name = results[0].names[class_id]
                        confidence = float(box.conf)
                        
                        self.loginfo(f"  对象 {i+1}: {class_name} (置信度: {confidence:.2f})")
                        
                        # 置信度过滤
                        if confidence > self.confidence_threshold:
                            color = self.extract_color(class_name)
                            detected_colors.append(color)
                            detected_objects.append({
                                'class_name': class_name,
                                'confidence': confidence,
                                'color': color
                            })
                
                # 播报检测结果
                if detected_colors:
                    # 去重并取第一个检测到的颜色
                    unique_colors = list(set(detected_colors))
                    speech_text = unique_colors[0]
                    self.loginfo(f"播报内容: {speech_text}")
                    if speak_result:
                        self.xgo_edu.SpeechSynthesis(speech_text)
                else:
                    # 没有符合条件的对象
                    self.loginfo("播报内容: 识别失败")
                    if speak_result:
                        self.xgo_edu.SpeechSynthesis("识别失败")
            else:
                self.loginfo("未检测到对象")
                if speak_result:
                    self.xgo_edu.SpeechSynthesis("识别失败")
            
            return {
                'success': True,
                'message': '检测完成',
                'detected_objects': detected_objects,
                'speech_text': speech_text,
                'saved_file': saved_file,
                'object_count': len(results[0].boxes)
            }            
    
    def detect_once_shape_position(self, frame_id=None, save_image=True, speak_result=True):
                """
                执行一次形状检测
                Args:
                    frame_id (int): 帧ID，用于文件命名
                    save_image (bool): 是否保存图像
                    speak_result (bool): 是否语音播报
                Returns:
                    dict: 返回检测的结果
                """
                input(f"👉 放好图后按 Enter 拍摄")
                self.loginfo("准备拍摄...")
                time.sleep(1)  # 给用户时间放好物体
                
                # 重新初始化摄像头并捕获
                success, frame = self.capture_fresh_frame()
                
                if not success or frame is None:
                    error_msg = "无法读取摄像头"
                    self.loginfo(error_msg)
                    if speak_result:
                        self.xgo_edu.SpeechSynthesis("摄像头错误")
                    return {
                        'success': False,
                        'message': error_msg,
                        'detected_objects': [],
                        'speech_text': '摄像头错误',
                        'saved_file': None
                    }
                
                self.loginfo("已拍摄，正在识别...")
                
                # 运行检测
                results = self.shape_detect(frame)
                
                # 绘制检测结果
                annotated_frame = results[0].plot()
                
                # 保存图像
                saved_file = None
                if save_image:
                    timestamp = time.strftime("%Y%m%d_%H%M%S")
                    if frame_id is not None:
                        filename = f"{self.output_dir_shape}/frame_{timestamp}_{frame_id}.jpg"
                    else:
                        filename = f"{self.output_dir_shape}/frame_{timestamp}.jpg"
                    cv2.imwrite(filename, annotated_frame)
                    saved_file = filename
                    self.loginfo(f"已保存到:{saved_file}")
                    if frame_id is not None:
                        self.loginfo(f"已保存: {filename}")
                # 显示并播报检测结果
                detected_colors = []
                detected_objects = []
                speech_text = "识别失败"
                center_x,center_y,w = None,None,None 
                if len(results[0].boxes) > 0:
                    self.loginfo(f"检测到 {len(results[0].boxes)} 个对象")
                    for i, box in enumerate(results[0].boxes):
                        if hasattr(box, 'cls') and hasattr(results[0].names, '__getitem__'):
                            class_id = int(box.cls)
                            class_name = results[0].names[class_id]
                            confidence = float(box.conf)
                            x1, y1, x2, y2 = map(int, box.xyxy[0])
                            center_x = (x1 + x2) // 2
                            center_y = (y1 + y2) // 2 
                            w = x2-x1
                            # 置信度过滤
                            if confidence > self.confidence_threshold:
                                color = self.extract_color(class_name)
                                detected_colors.append(color)
                                detected_objects.append({
                                    'class_name': class_name,
                                    'confidence': confidence,
                                    'color': color,
                                    'center_x':center_x,
                                    'center_y':center_y,
                                    'width':w
                                })
                        break # 只需要取得第一个结果即可
                    # 播报检测结果
                    if detected_colors:
                        # 去重并取第一个检测到的颜色
                        unique_colors = list(set(detected_colors))
                        speech_text = unique_colors[0]
                        self.loginfo(f"播报内容: {speech_text}")
                        if speak_result:
                            self.xgo_edu.SpeechSynthesis(speech_text)
                    else:
                        # 没有符合条件的对象
                        self.loginfo("播报内容: 识别失败")
                        if speak_result:
                            self.xgo_edu.SpeechSynthesis("识别失败")
                else:
                    self.loginfo("未检测到对象")
                    if speak_result:
                        self.xgo_edu.SpeechSynthesis("识别失败")
                
                return {
                    'success': True,
                    'detected_objects': detected_objects,
                    'speech_text': speech_text,
                    'saved_file': saved_file,
                    'object_count': len(results[0].boxes)
                }                
    def auto_detect_once_shape_position(self, frame_id=None, save_image=True, speak_result=True):
                    """
                    执行一次形状检测
                    Args:
                        frame_id (int): 帧ID，用于文件命名
                        save_image (bool): 是否保存图像
                        speak_result (bool): 是否语音播报
                    Returns:
                        dict: 返回检测的结果
                    """
                    self.loginfo("准备拍摄...")
                    time.sleep(1)  # 给用户时间放好物体
                    
                    # 重新初始化摄像头并捕获
                    success, frame = self.capture_fresh_frame()
                    
                    if not success or frame is None:
                        error_msg = "无法读取摄像头"
                        self.loginfo(error_msg)
                        if speak_result:
                            self.xgo_edu.SpeechSynthesis("摄像头错误")
                        return {
                            'success': False,
                            'message': error_msg,
                            'detected_objects': [],
                            'speech_text': '摄像头错误',
                            'saved_file': None
                        }
                    
                    self.loginfo("已拍摄，正在识别...")
                    
                    # 运行检测
                    results = self.shape_detect(frame)
                    
                    # 绘制检测结果
                    annotated_frame = results[0].plot()
                    
                    # 保存图像
                    saved_file = None
                    if save_image:
                        timestamp = time.strftime("%Y%m%d_%H%M%S")
                        if frame_id is not None:
                            filename = f"{self.output_dir_shape}/frame_{timestamp}_{frame_id}.jpg"
                        else:
                            filename = f"{self.output_dir_shape}/frame_{timestamp}.jpg"
                        cv2.imwrite(filename, annotated_frame)
                        saved_file = filename
                        self.loginfo(f"已保存到:{saved_file}")
                        if frame_id is not None:
                            self.loginfo(f"已保存: {filename}")
                    # 显示并播报检测结果
                    detected_colors = []
                    detected_objects = []
                    speech_text = "识别失败"
                    center_x,center_y,w = None,None,None 
                    if len(results[0].boxes) > 0:
                        self.loginfo(f"检测到 {len(results[0].boxes)} 个对象")
                        for i, box in enumerate(results[0].boxes):
                            if hasattr(box, 'cls') and hasattr(results[0].names, '__getitem__'):
                                class_id = int(box.cls)
                                class_name = results[0].names[class_id]
                                confidence = float(box.conf)
                                x1, y1, x2, y2 = map(int, box.xyxy[0])
                                center_x = (x1 + x2) // 2
                                center_y = (y1 + y2) // 2 
                                w = x2-x1
                                # 置信度过滤
                                if confidence > self.confidence_threshold:
                                    color = self.extract_color(class_name)
                                    detected_colors.append(color)
                                    detected_objects.append({
                                        'class_name': class_name,
                                        'confidence': confidence,
                                        'color': color,
                                        'center_x':center_x,
                                        'center_y':center_y,
                                        'width':w
                                    })
                            break # 只需要取得第一个结果即可
                        # 播报检测结果
                        if detected_colors:
                            # 去重并取第一个检测到的颜色
                            unique_colors = list(set(detected_colors))
                            speech_text = unique_colors[0]
                            self.loginfo(f"播报内容: {speech_text}")
                            if speak_result:
                                self.xgo_edu.SpeechSynthesis(speech_text)
                        else:
                            # 没有符合条件的对象
                            self.loginfo("播报内容: 识别失败")
                            if speak_result:
                                self.xgo_edu.SpeechSynthesis("识别失败")
                    else:
                        self.loginfo("未检测到对象")
                        if speak_result:
                            self.xgo_edu.SpeechSynthesis("识别失败")
                    
                    return {
                        'success': True,
                        'detected_objects': detected_objects,
                        'speech_text': speech_text,
                        'saved_file': saved_file,
                        'object_count': len(results[0].boxes)
                    }                    
    def detect_once_dange(self, frame_id=None, save_image=True, speak_result=True):
        """
        执行一次危险检测
        
        Args:
            frame_id (int): 帧ID，用于文件命名
            save_image (bool): 是否保存图像
            speak_result (bool): 是否语音播报
            
        Returns:
            dict: 检测结果
        """
        # input(f"👉 放好图后按 Enter 拍摄")
        self.loginfo("准备拍摄...")
        time.sleep(1)  # 给用户时间放好图片
        
        # 重新初始化摄像头并捕获
        success, frame = self.capture_fresh_frame()
        
        if not success or frame is None:
            error_msg = "无法读取摄像头"
            self.loginfo(error_msg)
            if speak_result:
                self.xgo_edu.SpeechSynthesis("摄像头错误")
            return {
                'success': False,
                'message': error_msg,
                'detected_objects': [],
                'speech_text': '摄像头错误',
                'saved_file': None
            }
        
        self.loginfo("已拍摄，正在识别...")
        
        # 运行检测
        results = self.dange_detect(frame)
        
        # 绘制检测结果
        annotated_frame = results[0].plot()
        
        # 保存图像
        saved_file = None
        if save_image:
            timestamp = time.strftime("%Y%m%d_%H%M%S")
            if frame_id is not None:
                filename = f"{self.output_dir_dange}/frame_{timestamp}_{frame_id}.jpg"
            else:
                filename = f"{self.output_dir_dange}/frame_{timestamp}.jpg"
            cv2.imwrite(filename, annotated_frame)
            saved_file = filename
            if frame_id is not None:
                self.loginfo(f"已保存: {filename}")
        
        # 显示并播报检测结果
        detected_objects = []
        speech_text = "识别失败"
        
        if len(results[0].boxes) > 0:
            self.loginfo(f"检测到 {len(results[0].boxes)} 个对象")
            
            for i, box in enumerate(results[0].boxes):
                if hasattr(box, 'cls') and hasattr(results[0].names, '__getitem__'):
                    class_id = int(box.cls)
                    class_name = results[0].names[class_id]  # 直接获取字符串
                    self.loginfo(f"class_name:{class_name}")
                    confidence = float(box.conf)
                    
                    self.loginfo(f"  对象 {i+1}: {class_name} (置信度: {confidence:.2f})")
                    
                    # 过滤条件：高置信度
                    if confidence > self.confidence_threshold:
                        detected_objects.append({
                            'class_name': class_name,
                            'confidence': confidence,
                            'chinese_name': self.Toc(class_name)
                        })
            
            # 播报检测结果
            if detected_objects:
                # 去重
                unique_objects = list(set([obj['class_name'] for obj in detected_objects]))
                speech_text = unique_objects[0]
                chinese_result = self.Toc(str(speech_text))
                
                self.loginfo(f"播报内容: {speech_text}")
                self.loginfo(f"返回的结果: {chinese_result}")
                
                if speak_result:
                    self.xgo_edu.SpeechSynthesis(chinese_result)
                
                speech_text = chinese_result
            else:
                # 没有符合条件的对象
                self.loginfo("播报内容: 识别失败")
                if speak_result:
                    self.xgo_edu.SpeechSynthesis("识别失败")
        else:
            self.loginfo("未检测到对象")
            if speak_result:
                self.xgo_edu.SpeechSynthesis("识别失败")
        
        return {
            'success': True,
            'message': '检测完成',
            'detected_objects': detected_objects,
            'speech_text': speech_text,
            'saved_file': saved_file,
            'object_count': len(results[0].boxes)
        }
    
    