#!/usr/bin/env python
# -*- coding: utf-8 -*-

"""
视频检测模块
提供YOLOv8视频对象检测相关功能，包括摄像头、视频文件和屏幕检测
"""

from ultralytics import YOLO
import cv2
import os
import time
import numpy as np
import tkinter as tk
from tkinter import filedialog
import threading
import sys
from pathlib import Path

def generate_output_name(base_name="video", ext=".mp4"):
    """生成唯一的输出文件名"""
    timestamp = time.strftime("%Y%m%d_%H%M%S")
    return f"result_{base_name}_{timestamp}{ext}"

def select_video_file():
    """让用户选择视频文件"""
    try:
        # 使用独立的Tk窗口实例，确保合理销毁
        root = tk.Tk()
        root.withdraw()  # 隐藏主窗口
        root.attributes('-topmost', True)  # 置顶窗口
        
        print("正在打开文件选择对话框...")
        file_path = filedialog.askopenfilename(
            title="选择视频文件",
            filetypes=[
                ("视频文件", "*.mp4 *.avi *.mov *.mkv *.flv *.wmv"),
                ("所有文件", "*.*")
            ]
        )
        
        # 确保关闭和销毁Tk窗口
        root.update()  # 处理任何挂起的事件
        root.destroy()
        
        if file_path:
            # 验证文件是否存在且为合法视频文件
            if not os.path.isfile(file_path):
                print(f"错误: 所选文件不存在: {file_path}")
                return None
                
            print(f"已选择视频文件: {file_path}")
            return file_path
        else:
            print("未选择任何文件")
            return None
            
    except Exception as e:
        print(f"选择文件时出错: {e}")
        # 文件选择对话框失败时，使用命令行输入
        try:
            print("\n无法打开文件选择对话框，请手动输入路径")
            path = input("请输入视频文件完整路径 (或按Enter取消): ")
            if path and os.path.isfile(path):
                return path
            elif path:
                print(f"错误: 指定的文件不存在: {path}")
            return None
        except Exception as e:
            print(f"输入路径时出错: {e}")
            return None

def run_camera_detection(model, output_dir, conf_threshold=0.25):
    """运行摄像头检测"""
    # 生成输出文件名
    output_path = os.path.join(output_dir, generate_output_name("camera", ".mp4"))
    
    # 打开摄像头
    cap = cv2.VideoCapture(0)
    if not cap.isOpened():
        print("错误: 无法打开摄像头")
        return
    
    # 获取视频属性
    width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
    height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
    fps = cap.get(cv2.CAP_PROP_FPS)
    if fps <= 0:
        fps = 30  # 默认帧率
    
    # 设置视频编码器和输出视频对象
    fourcc = cv2.VideoWriter_fourcc(*'mp4v')
    out = cv2.VideoWriter(output_path, fourcc, fps, (width, height))
    
    print(f"摄像头检测已启动")
    print(f"分辨率: {width}x{height}, 帧率: {fps}")
    print(f"输出文件: {output_path}")
    print(f"按'q'键停止检测")
    
    # 记录时间，计算FPS
    start_time = time.time()
    frame_count = 0
    
    try:
        while True:
            # 读取一帧
            ret, frame = cap.read()
            if not ret:
                print("错误: 无法读取摄像头画面")
                break
            
            # 计算实时FPS
            frame_count += 1
            elapsed_time = time.time() - start_time
            if elapsed_time > 0:
                current_fps = frame_count / elapsed_time
            else:
                current_fps = 0
                
            # YOLOv8检测
            results = model(frame, conf=conf_threshold)
            
            # 绘制检测结果到帧上
            annotated_frame = results[0].plot()
            
            # 添加FPS信息
            cv2.putText(
                annotated_frame, 
                f"FPS: {current_fps:.1f}", 
                (10, 30), 
                cv2.FONT_HERSHEY_SIMPLEX, 
                1, 
                (0, 255, 0), 
                2
            )
            
            # 写入输出视频
            out.write(annotated_frame)
            
            # 显示结果
            cv2.imshow("YOLOv8摄像头检测", annotated_frame)
            
            # 按'q'键退出
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break
                
    except KeyboardInterrupt:
        print("\n检测已被用户中断")
    finally:
        # 释放资源
        cap.release()
        out.release()
        cv2.destroyAllWindows()
        
        if os.path.exists(output_path) and os.path.getsize(output_path) > 0:
            print(f"检测结果已保存至: {output_path}")
        else:
            print("警告: 未生成输出视频或输出视频为空")
            if os.path.exists(output_path):
                os.remove(output_path)

def run_video_detection(video_path, model, output_dir, conf_threshold=0.25):
    """运行视频文件检测"""
    if not os.path.isfile(video_path):
        print(f"错误: 视频文件 '{video_path}' 不存在")
        return
    
    try:
        # 获取文件名（不带路径和扩展名）
        video_name = os.path.splitext(os.path.basename(video_path))[0]
        
        # 生成输出文件名
        output_path = os.path.join(output_dir, generate_output_name(video_name, ".mp4"))
        
        print(f"正在打开视频文件: {video_path}")
        # 打开视频文件
        cap = cv2.VideoCapture(video_path)
        if not cap.isOpened():
            print(f"错误: 无法打开视频文件 '{video_path}'")
            return
        
        # 获取视频属性
        width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
        height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
        fps = cap.get(cv2.CAP_PROP_FPS)
        if fps <= 0 or fps > 120:  # 处理无效帧率
            print(f"警告: 检测到无效的帧率 ({fps})，使用默认值 30fps")
            fps = 30
            
        total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
        if total_frames <= 0:  # 处理无法获取总帧数的情况
            print("警告: 无法获取视频总帧数，将使用估计值")
            # 估算总帧数（使用文件大小和平均帧大小）
            file_size = os.path.getsize(video_path)
            estimated_frames = min(10000, file_size // (width * height * 3 // 100))
            total_frames = estimated_frames
            print(f"估计总帧数: {total_frames}")
        
        # 设置视频编码器和输出视频对象
        try:
            fourcc = cv2.VideoWriter_fourcc(*'mp4v')
            out = cv2.VideoWriter(output_path, fourcc, fps, (width, height))
            
            if not out.isOpened():
                raise Exception("无法创建输出视频文件")
        except Exception as e:
            print(f"创建输出视频时出错: {e}")
            print("尝试使用替代编码器...")
            # 尝试使用替代编码器
            try:
                fourcc = cv2.VideoWriter_fourcc(*'XVID')
                out = cv2.VideoWriter(output_path, fourcc, fps, (width, height))
            except Exception as e2:
                print(f"使用替代编码器时也出错: {e2}")
                print("无法继续处理视频")
                return
        
        print(f"视频处理已启动: {video_path}")
        print(f"分辨率: {width}x{height}, 帧率: {fps}, 总帧数: {total_frames}")
        print(f"输出文件: {output_path}")
        print(f"按'q'键停止处理，按'p'键暂停/继续")
        
        # 记录时间，计算处理速度
        start_time = time.time()
        frame_count = 0
        last_update_time = start_time
        update_interval = 1.0  # 更新窗口的时间间隔（秒）
        
        # 批处理参数
        show_preview = True
        paused = False
        
        try:
            while True:
                if paused:
                    # 如果暂停，显示暂停信息
                    if show_preview:
                        cv2.putText(
                            annotated_frame, 
                            "已暂停 (按'p'继续)", 
                            (10, 70), 
                            cv2.FONT_HERSHEY_SIMPLEX, 
                            0.8, 
                            (0, 0, 255), 
                            2
                        )
                        cv2.imshow("YOLOv8视频检测", annotated_frame)
                    
                    # 检查键盘事件
                    key = cv2.waitKey(100) & 0xFF
                    if key == ord('q'):
                        print("处理已中止")
                        break
                    elif key == ord('p'):
                        paused = False
                        print("处理已继续")
                    continue
                
                # 读取一帧
                ret, frame = cap.read()
                if not ret:
                    print("\n视频处理完成")
                    break
                
                # 计算处理进度和剩余时间
                frame_count += 1
                current_time = time.time()
                elapsed_time = current_time - start_time
                progress = frame_count / total_frames * 100
                
                # 计算ETA和处理速度
                if frame_count > 1 and elapsed_time > 0:
                    frames_per_second = frame_count / elapsed_time
                    seconds_per_frame = elapsed_time / frame_count
                    remaining_frames = total_frames - frame_count
                    eta = remaining_frames * seconds_per_frame
                    
                    progress_info = f"进度: {progress:.1f}% | 速度: {frames_per_second:.1f}帧/秒 | ETA: {eta:.1f}秒"
                else:
                    progress_info = f"进度: {progress:.1f}%"
                
                # 在命令行中更新进度
                if current_time - last_update_time >= 2.0 or frame_count == 1:
                    sys.stdout.write(f"\r{progress_info}")
                    sys.stdout.flush()
                    last_update_time = current_time
                
                # YOLOv8检测
                results = model(frame, conf=conf_threshold)
                
                # 绘制检测结果到帧上
                annotated_frame = results[0].plot()
                
                # 添加进度信息
                cv2.putText(
                    annotated_frame, 
                    progress_info, 
                    (10, 30), 
                    cv2.FONT_HERSHEY_SIMPLEX, 
                    0.8, 
                    (0, 255, 0), 
                    2
                )
                
                # 写入输出视频
                out.write(annotated_frame)
                
                # 定期更新显示的帧，减少UI更新频率以提高处理速度
                if current_time - last_update_time >= update_interval or frame_count == 1:
                    if show_preview:
                        cv2.imshow("YOLOv8视频检测", annotated_frame)
                    last_update_time = current_time
                
                # 处理键盘事件
                key = cv2.waitKey(1) & 0xFF
                if key == ord('q'):
                    print("\n处理已中止")
                    break
                elif key == ord('p'):
                    paused = True
                    print("\n处理已暂停 (按'p'继续)")
                    
        except KeyboardInterrupt:
            print("\n处理已被用户中断")
        except Exception as e:
            print(f"\n处理视频时出错: {e}")
        finally:
            # 换行（为了美观，因为我们使用了\r来刷新进度）
            print()
            
            # 释放资源
            cap.release()
            out.release()
            cv2.destroyAllWindows()
            
            if os.path.exists(output_path) and os.path.getsize(output_path) > 0:
                print(f"处理结果已保存至: {output_path}")
            else:
                print("警告: 未生成输出视频或输出视频为空")
                if os.path.exists(output_path):
                    os.remove(output_path)
                    
    except Exception as e:
        print(f"视频处理过程中发生异常: {e}")
        cv2.destroyAllWindows()  # 确保窗口被关闭

def select_screen_region():
    """让用户选择屏幕区域"""
    try:
        import pyautogui
        
        print("请按住鼠标左键拖动选择屏幕区域，释放鼠标完成选择")
        print("(选择好区域后将自动开始检测)")
        
        # 创建简单的窗口提示用户选择区域
        root = tk.Tk()
        root.title("选择屏幕区域")
        root.attributes("-topmost", True)
        
        label = tk.Label(root, text="请按住鼠标左键拖动选择屏幕区域，\n释放鼠标完成选择")
        label.pack(padx=20, pady=20)
        
        # 选择区域变量
        region = {"start_x": 0, "start_y": 0, "end_x": 0, "end_y": 0, "selected": False}
        
        # 鼠标事件处理函数
        def on_mouse_down(event):
            region["start_x"] = event.x_root
            region["start_y"] = event.y_root
            root.withdraw()  # 隐藏窗口
        
        def on_mouse_up(event):
            region["end_x"] = event.x_root
            region["end_y"] = event.y_root
            region["selected"] = True
            root.quit()  # 结束主循环
        
        # 绑定鼠标事件
        root.bind("<ButtonPress-1>", on_mouse_down)
        root.bind("<ButtonRelease-1>", on_mouse_up)
        
        # 放置在屏幕中央
        screen_width = root.winfo_screenwidth()
        screen_height = root.winfo_screenheight()
        window_width = 300
        window_height = 100
        position_x = int((screen_width - window_width) / 2)
        position_y = int((screen_height - window_height) / 2)
        root.geometry(f"{window_width}x{window_height}+{position_x}+{position_y}")
        
        # 启动主循环
        root.mainloop()
        
        if region["selected"]:
            # 确保坐标正确（支持从任意方向拖动）
            x1 = min(region["start_x"], region["end_x"])
            y1 = min(region["start_y"], region["end_y"])
            x2 = max(region["start_x"], region["end_x"])
            y2 = max(region["start_y"], region["end_y"])
            
            width = x2 - x1
            height = y2 - y1
            
            # 确保区域大小合理
            if width < 10 or height < 10:
                print("选择的区域过小，将使用全屏")
                return None
            
            return (x1, y1, width, height)
        
        return None
    except Exception as e:
        print(f"选择区域失败: {e}")
        return None

def run_screen_detection(model, output_dir, conf_threshold=0.25, select_region=True):
    """运行屏幕检测"""
    try:
        import pyautogui
        
        # 生成输出文件名
        output_path = os.path.join(output_dir, generate_output_name("screen", ".mp4"))
        
        # 选择屏幕区域
        region = None
        if select_region:
            region = select_screen_region()
            
        # 如果未选择区域或选择失败，使用全屏
        if region is None:
            screen_width, screen_height = pyautogui.size()
            print(f"使用全屏模式: {screen_width}x{screen_height}")
        else:
            x, y, width, height = region
            print(f"使用选定区域: 位置({x}, {y}), 大小{width}x{height}")
        
        # 设置FPS和编码器
        fps = 15
        fourcc = cv2.VideoWriter_fourcc(*'mp4v')
        
        # 创建视频写入器
        if region is None:
            screen_width, screen_height = pyautogui.size()
            out = cv2.VideoWriter(output_path, fourcc, fps, (screen_width, screen_height))
        else:
            x, y, width, height = region
            out = cv2.VideoWriter(output_path, fourcc, fps, (width, height))
        
        print(f"屏幕检测已启动")
        print(f"输出文件: {output_path}")
        print(f"按'q'键停止检测")
        
        # 显示键盘提示
        print("按'p'键暂停/继续检测")
        
        # 检测状态
        paused = False
        
        # 记录时间，计算FPS
        start_time = time.time()
        frame_count = 0
        
        try:
            while True:
                # 截取屏幕
                if region is None:
                    screenshot = pyautogui.screenshot()
                    frame = np.array(screenshot)
                else:
                    x, y, width, height = region
                    screenshot = pyautogui.screenshot(region=(x, y, width, height))
                    frame = np.array(screenshot)
                
                # OpenCV使用BGR格式，而非RGB
                frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
                
                # 计算实时FPS
                frame_count += 1
                elapsed_time = time.time() - start_time
                if elapsed_time > 0:
                    current_fps = frame_count / elapsed_time
                else:
                    current_fps = 0
                
                # 检测是否暂停
                if not paused:
                    # YOLOv8检测
                    results = model(frame, conf=conf_threshold)
                    
                    # 绘制检测结果到帧上
                    annotated_frame = results[0].plot()
                    
                    # 添加FPS信息和状态
                    cv2.putText(
                        annotated_frame, 
                        f"FPS: {current_fps:.1f}", 
                        (10, 30), 
                        cv2.FONT_HERSHEY_SIMPLEX, 
                        1, 
                        (0, 255, 0), 
                        2
                    )
                else:
                    # 如果暂停，只显示当前画面加暂停提示
                    annotated_frame = frame.copy()
                    cv2.putText(
                        annotated_frame, 
                        "已暂停 (按'p'继续)", 
                        (10, 30), 
                        cv2.FONT_HERSHEY_SIMPLEX, 
                        1, 
                        (0, 0, 255), 
                        2
                    )
                
                # 写入输出视频
                out.write(annotated_frame)
                
                # 显示结果
                cv2.imshow("YOLOv8屏幕检测", annotated_frame)
                
                # 处理键盘事件
                key = cv2.waitKey(1) & 0xFF
                if key == ord('q'):
                    break
                elif key == ord('p'):
                    paused = not paused
                    status = "暂停" if paused else "继续"
                    print(f"检测已{status}")
                    
        except KeyboardInterrupt:
            print("\n检测已被用户中断")
        finally:
            # 释放资源
            out.release()
            cv2.destroyAllWindows()
            
            if os.path.exists(output_path) and os.path.getsize(output_path) > 0:
                print(f"检测结果已保存至: {output_path}")
            else:
                print("警告: 未生成输出视频或输出视频为空")
                if os.path.exists(output_path):
                    os.remove(output_path)
    
    except ImportError:
        print("错误: 缺少必要的依赖包。请安装pyautogui: pip install pyautogui")
    except Exception as e:
        print(f"屏幕检测出错: {e}")

def run_screen_overlay_detection(model, output_dir, conf_threshold=0.25):
    """运行屏幕覆盖层检测（不使用额外窗口）"""
    try:
        # 导入必要的依赖
        try:
            import pyautogui
            from PIL import Image, ImageTk, ImageDraw, ImageFont
            import tkinter as tk
            import platform
        except ImportError as e:
            print(f"错误: 缺少必要的依赖包: {e}")
            print("请运行以下命令安装所需依赖:")
            print("pip install pyautogui Pillow")
            return

        # 检测操作系统
        system = platform.system()
        print(f"当前操作系统: {system}")
        
        if system == "Windows":
            try:
                # 尝试导入屏幕覆盖层模块
                from utils.screen_overlay import TransparentOverlay
                print("✓ 成功导入屏幕覆盖层模块")
            except ImportError as e:
                print(f"错误: 无法导入屏幕覆盖层模块: {e}")
                print("请确保utils/screen_overlay.py文件存在且路径正确")
                return
            except Exception as e:
                print(f"错误: 导入屏幕覆盖层模块时出现未知错误: {e}")
                return
        else:
            print(f"注意: 在{system}上运行屏幕覆盖层可能需要特定权限")
            print("如果出现问题，请尝试以管理员/root权限运行程序")
            
            try:
                from utils.screen_overlay import TransparentOverlay
                print("✓ 成功导入屏幕覆盖层模块")
            except ImportError as e:
                print(f"错误: 无法导入屏幕覆盖层模块: {e}")
                print("请确保utils/screen_overlay.py文件存在且路径正确")
                return
        
        # 生成输出文件名
        output_path = os.path.join(output_dir, generate_output_name("screen_overlay", ".mp4"))
        print(f"检测结果将保存至: {output_path}")
        
        # 创建覆盖层对象
        try:
            print("正在创建屏幕覆盖层...")
            overlay = TransparentOverlay(alpha=0.7)
            print("✓ 覆盖层创建成功")
        except Exception as e:
            print(f"错误: 创建覆盖层失败: {e}")
            print("这可能是由操作系统限制或缺少必要组件导致")
            return
        
        # 声明变量以便在finally中访问
        detection_thread_started = False
        
        # 检测线程
        def detection_thread():
            try:
                # 记录FPS计算
                start_time = time.time()
                frame_count = 0
                last_fps_update = 0
                current_fps = 0
                
                # 设置视频录制
                screen_width, screen_height = pyautogui.size()
                fourcc = cv2.VideoWriter_fourcc(*'mp4v')
                out = cv2.VideoWriter(output_path, fourcc, 15, (screen_width, screen_height))
                
                is_recording = True
                
                print("✓ 检测线程已启动，开始屏幕检测")
                
                while overlay.running:
                    try:
                        # 截取屏幕
                        screenshot = pyautogui.screenshot()
                        frame = np.array(screenshot)
                        # OpenCV使用BGR格式，而非RGB
                        frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
                        
                        # 计算FPS
                        frame_count += 1
                        current_time = time.time()
                        elapsed_time = current_time - start_time
                        
                        # 每秒更新一次FPS
                        if current_time - last_fps_update >= 1.0:
                            current_fps = frame_count / elapsed_time
                            last_fps_update = current_time
                        
                        # YOLOv8检测
                        results = model(frame, conf=conf_threshold)
                        
                        # 转换检测结果为覆盖层格式
                        detections = []
                        for r in results:
                            boxes = r.boxes
                            for box in boxes:
                                cls = int(box.cls[0])
                                conf = float(box.conf[0])
                                label = model.names[cls]
                                x1, y1, x2, y2 = box.xyxy[0].tolist()
                                
                                detection = {
                                    'class': label,
                                    'confidence': conf,
                                    'box': (x1, y1, x2, y2)
                                }
                                detections.append(detection)
                        
                        # 更新覆盖层
                        overlay.update_frame(detections, current_fps)
                        
                        # 如果正在录制，写入带注释的帧
                        if is_recording:
                            # 绘制检测结果
                            annotated_frame = results[0].plot()
                            # 添加额外信息
                            cv2.putText(
                                annotated_frame, 
                                f"FPS: {current_fps:.1f}", 
                                (10, 30), 
                                cv2.FONT_HERSHEY_SIMPLEX, 
                                1, 
                                (0, 255, 0), 
                                2
                            )
                            # 写入视频
                            out.write(annotated_frame)
                        
                        # 短暂休眠以减少CPU使用
                        time.sleep(0.01)
                    except Exception as e:
                        print(f"检测线程处理帧时出错: {e}")
                        # 继续循环而不是退出
                        time.sleep(0.5)
                
                # 保存视频
                out.release()
                
                if os.path.exists(output_path) and os.path.getsize(output_path) > 0:
                    print(f"检测结果已保存至: {output_path}")
                else:
                    print("警告: 未生成输出视频或输出视频为空")
                    if os.path.exists(output_path):
                        os.remove(output_path)
                        
            except Exception as e:
                print(f"检测线程出错: {e}")
        
        # 启动检测线程
        print(f"屏幕覆盖层检测已启动")
        print(f"输出文件: {output_path}")
        print(f"按'ESC'或'q'键停止检测")
        print(f"按'+'/'-'键调整覆盖层透明度")
        
        # 启动检测线程
        try:
            detection_thread_obj = threading.Thread(target=detection_thread, daemon=True)
            detection_thread_obj.start()
            detection_thread_started = True
            print("✓ 检测线程已成功启动")
        except Exception as e:
            print(f"错误: 无法启动检测线程: {e}")
            return
        
        # 启动覆盖层窗口
        try:
            overlay.start()
        except Exception as e:
            print(f"错误: 启动覆盖层窗口失败: {e}")
        
        # 线程结束后
        print("检测已结束")
        
    except ImportError as e:
        print(f"错误: 缺少必要的依赖包: {e}")
        print("请安装以下依赖包:")
        print("pip install pyautogui Pillow opencv-python numpy")
    except PermissionError as e:
        print(f"错误: 权限不足: {e}")
        print("屏幕截图可能需要特殊权限，请尝试以管理员/root权限运行程序")
    except Exception as e:
        print(f"屏幕覆盖层检测出错: {e}") 