import os
import cv2
from moviepy.editor import VideoFileClip, clips_array, concatenate_videoclips
from moviepy.editor import ColorClip, ImageClip
from paddleocr import PaddleOCR
import paddle
import tkinter as tk
from tkinter import filedialog, messagebox, ttk
import threading
import sys
import shutil
from pathlib import Path
import numpy as np


def copy_models():
    # 目标路径（用户主目录）
    target_models = Path.home() / ".paddlex" / "official_models"

    # 如果目标目录已经存在且有内容，直接返回
    if target_models.exists() and any(target_models.iterdir()):
        print("模型已存在，跳过复制")
        return

    # 查找源模型目录
    current_dir = Path(__file__).parent
    for parent in [current_dir] + list(current_dir.parents):
        source_models = parent / "dist" / "ocrapp" / "_internal" / ".paddlex" / "official_models"
        if source_models.exists():
            # 复制模型到目标目录
            shutil.copytree(source_models, target_models, dirs_exist_ok=True)
            print(f"模型已复制到: {target_models}")
            return

    # 如果执行到这里，说明没找到模型
    raise FileNotFoundError("找不到模型目录，请确保项目结构完整")


TARGET_CONFIG = {
    '电脑机箱': ['机', '机箱'],
    '快递箱': ['递', '快'],
    '军用医疗包': ['军', '用'],
    '航空储物箱': ['航', '空'],
    '旅行袋': ['旅', '行', '袋'],
    '野外物资箱': ['野', '外'],
    '工具柜': ['工具柜', '柜'],
    # '工具盒': ['工具盒', '盒'],
    '手提箱': ['手', '提'],
    '电脑包': ['电脑', ],
    '抽屉柜': ['屉', '抽'],
    '小保险箱': ['小'],
    '大武器箱': ['大', '武', '器'],
    '医疗物资堆': ['医', '疗', '堆'],
    '保险箱': ['保', '险'],
    '一件衣服': ['一', '件', '衣'],
    '弹药箱': ['弹', '药'],
    '服务器': ['服', '务'],
    '高级旅行箱': ['高', '级'],
    '鸟窝': ['鸟', '窝'],
    '垃圾桶': ['垃', '圾'],
    '收纳盒': ['收', '纳'],
    # '战利品': ['战', '利'],
    '藏匿处': ['藏', '匿', '处'],
    '返回舱': ['返', '回', '舱'],
}

# 布局选项
LAYOUT_OPTIONS = {
    "单画面": 1,
    "2x2 (4画面)": 4,
    "3x2 (6画面)": 6,
    "4x2 (8画面)": 8
}


def resource_path(relative_path):
    """获取打包后的资源绝对路径"""
    if hasattr(sys, '_MEIPASS'):
        return os.path.join(sys._MEIPASS, relative_path)
    return os.path.join(os.path.abspath("."), relative_path)


class TargetDetector:
    def __init__(self, start_offset=0, end_delay=1.0):
        self.current_target = None
        self.search_start_time = 0
        self.search_end_time = 0
        self.in_search = False
        self.post_search_frames = 0
        self.target_clips = []
        self.zoomed_in = False
        self.zoomed_frames = []  # 记录需要放大的帧时间点
        self.start_offset = start_offset  # 开始时间偏移量
        self.end_delay = end_delay  # 结束延迟时间（秒）

    def detect_target(self, text):
        for target_name, keywords in TARGET_CONFIG.items():
            if all(keyword in text for keyword in keywords):
                return target_name
        return None


class MultiClipProcessor:
    """处理多画面组合的类"""

    def __init__(self, master, app):
        self.master = master
        self.app = app
        self.frame = tk.Frame(master)
        self.setup_ui()

    def setup_ui(self):
        """设置多画面组合功能的UI"""
        # 配置网格权重
        self.frame.grid_rowconfigure(4, weight=1)
        self.frame.grid_columnconfigure(1, weight=1)

        # 输入文件夹
        tk.Label(self.frame, text="视频文件夹:").grid(row=0, column=0, padx=5, pady=5, sticky='e')
        self.input_folder_entry = tk.Entry(self.frame)
        self.input_folder_entry.grid(row=0, column=1, padx=5, pady=5, sticky='we')
        tk.Button(self.frame, text="浏览", command=self.browse_input_folder).grid(row=0, column=2, padx=5, pady=5)

        # 输出目录
        tk.Label(self.frame, text="输出目录:").grid(row=1, column=0, padx=5, pady=5, sticky='e')
        self.output_entry = tk.Entry(self.frame)
        self.output_entry.grid(row=1, column=1, padx=5, pady=5, sticky='we')
        tk.Button(self.frame, text="浏览", command=self.browse_output).grid(row=1, column=2, padx=5, pady=5)

        # 布局选择
        tk.Label(self.frame, text="画面布局:").grid(row=2, column=0, padx=5, pady=5, sticky='e')
        self.layout_var = tk.StringVar(value="2x2 (4画面)")
        self.layout_menu = ttk.Combobox(self.frame, textvariable=self.layout_var,
                                        values=list(LAYOUT_OPTIONS.keys()), state="readonly")
        self.layout_menu.grid(row=2, column=1, padx=5, pady=5, sticky='w')

        # 进度条
        self.progress = ttk.Progressbar(self.frame, orient="horizontal", mode="determinate")
        self.progress.grid(row=3, column=0, columnspan=3, padx=5, pady=5, sticky='we')

        # 日志框框架
        log_frame = tk.Frame(self.frame)
        log_frame.grid(row=4, column=0, columnspan=3, padx=5, pady=5, sticky='nsew')
        log_frame.grid_rowconfigure(0, weight=1)
        log_frame.grid_columnconfigure(0, weight=1)

        # 日志文本框
        self.log_text = tk.Text(log_frame, wrap=tk.WORD, font=('Consolas', 10))
        self.log_text.grid(row=0, column=0, sticky='nsew')

        # 滚动条
        scroll_y = tk.Scrollbar(log_frame, orient=tk.VERTICAL, command=self.log_text.yview)
        scroll_y.grid(row=0, column=1, sticky='ns')
        self.log_text['yscrollcommand'] = scroll_y.set

        scroll_x = tk.Scrollbar(log_frame, orient=tk.HORIZONTAL, command=self.log_text.xview)
        scroll_x.grid(row=1, column=0, sticky='ew')
        self.log_text['xscrollcommand'] = scroll_x.set

        # 按钮框架
        button_frame = tk.Frame(self.frame)
        button_frame.grid(row=5, column=0, columnspan=3, pady=5)

        self.start_button = tk.Button(button_frame, text="开始组合", command=self.start_processing)
        self.start_button.pack(side=tk.LEFT, padx=5)

        tk.Button(button_frame, text="清空日志", command=self.clear_log).pack(side=tk.LEFT, padx=5)

    def browse_input_folder(self):
        """选择输入视频文件夹"""
        dirpath = filedialog.askdirectory()
        if dirpath:
            self.input_folder_entry.delete(0, tk.END)
            self.input_folder_entry.insert(0, dirpath)
            self.log_message(f"已选择输入文件夹: {dirpath}")

    def browse_output(self):
        """选择输出目录"""
        dirpath = filedialog.askdirectory()
        if dirpath:
            self.output_entry.delete(0, tk.END)
            self.output_entry.insert(0, dirpath)
            self.log_message(f"已选择输出目录: {dirpath}")

    def log_message(self, message, error=False):
        """在日志框中添加消息"""
        self.log_text.config(state="normal")
        tag = "error" if error else "info"
        self.log_text.insert(tk.END, message + "\n", tag)
        self.log_text.see(tk.END)
        self.log_text.config(state="disabled")

        if error:
            self.log_text.tag_config("error", foreground="red")
        else:
            self.log_text.tag_config("info", foreground="black")

    def clear_log(self):
        """清空日志"""
        self.log_text.config(state="normal")
        self.log_text.delete(1.0, tk.END)
        self.log_text.config(state="disabled")

    def update_progress(self, value):
        """更新进度条"""
        self.progress["value"] = value
        self.master.update_idletasks()

    def start_processing(self):
        """开始处理视频组合"""
        input_folder = self.input_folder_entry.get()
        output_dir = self.output_entry.get()

        if not input_folder:
            messagebox.showerror("错误", "请选择视频文件夹!")
            return

        if not output_dir:
            messagebox.showerror("错误", "请选择输出目录!")
            return

        # 获取视频文件列表
        video_files = []
        for root, dirs, files in os.walk(input_folder):
            for file in files:
                if file.lower().endswith(('.mp4', '.avi', '.mov')):
                    video_files.append(os.path.join(root, file))

        if not video_files:
            messagebox.showerror("错误", "指定文件夹中没有找到视频文件!")
            return

        # 获取布局设置
        selected_layout = self.layout_var.get()
        clips_per_output = LAYOUT_OPTIONS[selected_layout]

        # 禁用开始按钮
        self.start_button.config(state="disabled")
        self.log_message(f"开始处理 {len(video_files)} 个视频...")
        self.log_message(f"使用布局: {selected_layout} (每组{clips_per_output}个视频)")

        # 在新线程中处理
        thread = threading.Thread(
            target=self.process_videos,
            args=(video_files, output_dir, clips_per_output),
            daemon=True
        )
        thread.start()

    def process_videos(self, video_files, output_dir, clips_per_output):
        """处理视频并创建多画面组合"""
        try:
            # 计算布局行列数
            if clips_per_output == 1:
                rows, cols = 1, 1
            elif clips_per_output == 4:
                rows, cols = 2, 2
            elif clips_per_output == 6:
                rows, cols = 2, 3
            elif clips_per_output == 8:
                rows, cols = 2, 4
            else:
                rows, cols = 1, 1

            subclip_width = 1920 // cols
            subclip_height = 1080 // rows

            for i in range(0, len(video_files), clips_per_output):
                group_videos = video_files[i:i + clips_per_output]
                clip_group = []
                durations = []

                # 1. 先加载所有视频并记录时长
                for video_path in group_videos:
                    try:
                        video_clip = VideoFileClip(video_path)
                        durations.append(video_clip.duration)
                        resized_clip = video_clip.resize((subclip_width, subclip_height))
                        clip_group.append(resized_clip)
                    except Exception as e:
                        self.log_message(f"处理视频{os.path.basename(video_path)}时出错:{str(e)}", error=True)
                        continue

                # 2. 确定最长持续时间
                max_duration = max(durations) if durations else 0

                # 3. 填充空白画面或延长视频
                final_clips = []
                for clip, duration in zip(clip_group, durations):
                    if duration < max_duration:
                        # 延长视频到最长持续时间
                        try:
                            # 获取当前剪辑的最后一帧
                            last_frame = clip.get_frame(clip.duration - 0.1)
                            # 创建一个静态帧剪辑
                            frozen_clip = ImageClip(last_frame, duration=max_duration - duration)
                            frozen_clip = frozen_clip.resize((subclip_width, subclip_height))
                            # 拼接原视频和静态帧
                            extended_clip = concatenate_videoclips([clip, frozen_clip])
                            final_clips.append(extended_clip)
                        except Exception as e:
                            self.log_message(f"延长视频时出错: {str(e)}", error=True)
                            # 如果延长失败，保持原视频不变
                            final_clips.append(clip)
                    else:
                        final_clips.append(clip)

                # 4. 填充空白画面（如果需要）
                while len(final_clips) < clips_per_output:
                    try:
                        # 创建一个黑色背景的空白剪辑
                        blank_clip = ColorClip((subclip_width, subclip_height), color=(0, 0, 0), duration=max_duration)
                        blank_clip = blank_clip.set_fps(24)  # 设置帧率
                        final_clips.append(blank_clip)
                    except Exception as e:
                        self.log_message(f"创建空白剪辑时出错: {str(e)}", error=True)
                        # 备用方案：使用第一个视频的黑色帧
                        blank_clip = VideoFileClip(video_files[0]).subclip(0, 0.1).set_duration(max_duration)
                        blank_clip = blank_clip.resize((subclip_width, subclip_height))
                        blank_clip = blank_clip.fl_image(lambda frame: np.zeros_like(frame))
                        final_clips.append(blank_clip)

                # 5. 创建多画面布局
                try:
                    if clips_per_output == 1:
                        final_clip = final_clips[0]
                    else:
                        clip_rows = []
                        for row in range(rows):
                            row_clips = final_clips[row * cols: (row + 1) * cols]
                            clip_rows.append(row_clips)
                        final_clip = clips_array(clip_rows).resize((1920, 1080))

                    output_path = os.path.join(output_dir, f"组合_{i // clips_per_output + 1}.mp4")

                    # 确保所有剪辑都有相同的属性
                    final_clip = final_clip.set_fps(24).set_duration(max_duration)

                    final_clip.write_videofile(
                        output_path,
                        codec="libx264",
                        audio_codec="aac",
                        threads=4,
                        preset="slow",
                        bitrate=f"{4000 * clips_per_output}k",
                        ffmpeg_params=[
                            "-crf", "18",
                            "-pix_fmt", "yuv420p"
                        ],
                        logger=None
                    )
                    self.log_message(f"已保存组合视频: {output_path}")

                    # 显式关闭所有剪辑以释放资源
                    for clip in final_clips:
                        if hasattr(clip, 'close'):
                            clip.close()
                    if hasattr(final_clip, 'close'):
                        final_clip.close()

                except Exception as e:
                    self.log_message(f"合成视频时出错: {str(e)}", error=True)
                    # 确保在出错时也关闭所有剪辑
                    for clip in final_clips:
                        if hasattr(clip, 'close'):
                            clip.close()

        except Exception as e:
            self.log_message(f"处理过程中发生错误: {str(e)}", error=True)
        finally:
            self.start_button.config(state="normal")


class VideoProcessorApp:
    def __init__(self, root):
        self.root = root
        self.root.title("三角洲自动剪辑工具")
        self.ocr = None
        self.video_queue = []
        self.current_processing_video = None
        self.start_offset = 0  # 默认开始时间偏移量
        self.end_delay = 1.0  # 默认结束延迟时间
        self.use_gpu = True  # 默认使用GPU
        self.processing_thread = None  # 处理线程
        self.stop_processing = False  # 停止处理标志

        # 设置窗口最小尺寸
        self.root.minsize(400, 400)

        # 创建主界面
        self.setup_main_ui()
        self.init_ocr()

    def init_ocr(self):
        """初始化OCR引擎"""
        try:
            def resource_path(relative_path):
                if getattr(sys, 'frozen', False) and hasattr(sys, '_MEIPASS'):
                    return os.path.join(sys._MEIPASS, relative_path)
                return os.path.join(os.path.abspath("."), relative_path)

            device = 'gpu' if self.use_gpu and paddle.is_compiled_with_cuda() else 'cpu'

            self.ocr = PaddleOCR(
                det_model_dir=resource_path(os.path.join("model", "PP-OCRv5_server_det")),
                rec_model_dir=resource_path(os.path.join("model", "PP-OCRv5_server_rec")),
                use_textline_orientation=True,
                device=device
            )
            self.log_message("OCR引擎初始化成功")
            self.log_message(f"使用设备: {device.upper()}")
            self.log_message(f"CUDA可用: {paddle.is_compiled_with_cuda()}")
            self.log_message(f"检测设备: {paddle.device.get_device()}")
        except Exception as e:
            self.log_message(f"OCR初始化失败: {str(e)}", error=True)

    def toggle_device(self):
        """切换CPU/GPU设备"""
        self.use_gpu = not self.use_gpu
        device_status = "GPU" if self.use_gpu else "CPU"
        self.device_button.config(text=f"切换设备: 使用{device_status}")
        self.log_message(f"已切换到: {device_status}")

        # 重新初始化OCR引擎
        self.init_ocr()

    def setup_main_ui(self):
        """设置主界面"""
        # 创建主框架
        main_frame = tk.Frame(self.root)
        main_frame.pack(fill=tk.BOTH, expand=True, padx=10, pady=10)

        # 创建选项卡
        self.notebook = ttk.Notebook(main_frame)
        self.notebook.pack(fill=tk.BOTH, expand=True)

        # 第一页：视频处理
        self.video_frame = tk.Frame(self.notebook)
        self.notebook.add(self.video_frame, text="视频处理")
        self.setup_video_ui()

        # 第二页：多画面组合
        self.multi_clip_processor = MultiClipProcessor(self.notebook, self)
        self.notebook.add(self.multi_clip_processor.frame, text="多画面组合")

    def setup_video_ui(self):
        """设置视频处理页面的UI"""
        self.video_frame.grid_rowconfigure(5, weight=1)
        self.video_frame.grid_columnconfigure(1, weight=1)
        self.show_detection_var = tk.BooleanVar(value=False)
        # self.show_detection_check = tk.Checkbutton(self.video_frame, text="显示检测窗口", variable=self.show_detection_var)
        # self.show_detection_check.grid(row=5, column=2, columnspan=2, padx=5, pady=5, sticky='w')

        # 设备切换按钮
        self.device_button = tk.Button(self.video_frame, text="切换设备: 使用GPU", command=self.toggle_device)
        self.device_button.grid(row=1, column=3, padx=5, pady=5, sticky='w')

        # 输入视频
        tk.Label(self.video_frame, text="输入视频:").grid(row=0, column=0, padx=5, pady=5, sticky='e')
        self.input_entry = tk.Entry(self.video_frame)
        self.input_entry.grid(row=0, column=1, padx=5, pady=5, sticky='we')
        tk.Button(self.video_frame, text="浏览", command=self.browse_input).grid(row=0, column=2, padx=5, pady=5)

        # 批量上传按钮
        tk.Button(self.video_frame, text="批量上传", command=self.batch_upload).grid(row=0, column=3, padx=5, pady=5)

        # 输出目录
        tk.Label(self.video_frame, text="输出目录:").grid(row=1, column=0, padx=5, pady=5, sticky='e')
        self.output_entry = tk.Entry(self.video_frame)
        self.output_entry.grid(row=1, column=1, padx=5, pady=5, sticky='we')
        tk.Button(self.video_frame, text="浏览", command=self.browse_output).grid(row=1, column=2, padx=5, pady=5)

        # 开始时间偏移量设置
        tk.Label(self.video_frame, text="开始时间偏移(秒):").grid(row=2, column=0, padx=5, pady=5, sticky='e')
        self.offset_var = tk.StringVar(value="0")

        self.offset_entry = tk.Entry(self.video_frame, textvariable=self.offset_var, width=10)
        self.offset_entry.grid(row=2, column=1, padx=5, pady=5, sticky='w')
        tk.Label(self.video_frame, text="(检测到容器后提前多久剪辑)").grid(row=2, column=2, columnspan=2, padx=5, pady=5, sticky='w')

        # 结束延迟时间设置
        tk.Label(self.video_frame, text="结束延长时间(秒):").grid(row=3, column=0, padx=5, pady=5, sticky='e')
        self.end_delay_var = tk.StringVar(value="1.0")
        self.end_delay_entry = tk.Entry(self.video_frame, textvariable=self.end_delay_var, width=10)
        self.end_delay_entry.grid(row=3, column=1, padx=5, pady=5, sticky='w')
        tk.Label(self.video_frame, text="(延长最后一帧画面的时间)").grid(row=3, column=2, columnspan=2, padx=5, pady=5, sticky='w')

        # 进度条
        self.progress = ttk.Progressbar(self.video_frame, orient="horizontal", mode="determinate")
        self.progress.grid(row=4, column=0, columnspan=4, padx=5, pady=5, sticky='we')

        # 日志框 - 使用 Frame 作为容器
        log_frame = tk.Frame(self.video_frame)
        log_frame.grid(row=5, column=0, columnspan=4, padx=5, pady=5, sticky='nsew')

        # 配置日志框框架的网格权重
        log_frame.grid_rowconfigure(0, weight=1)
        log_frame.grid_columnconfigure(0, weight=1)

        # 日志文本框
        self.log_text = tk.Text(log_frame, wrap=tk.WORD, font=('Consolas', 10))
        self.log_text.grid(row=0, column=0, sticky='nsew')

        # 垂直滚动条
        scroll_y = tk.Scrollbar(log_frame, orient=tk.VERTICAL, command=self.log_text.yview)
        scroll_y.grid(row=0, column=1, sticky='ns')
        self.log_text['yscrollcommand'] = scroll_y.set

        # 水平滚动条
        scroll_x = tk.Scrollbar(log_frame, orient=tk.HORIZONTAL, command=self.log_text.xview)
        scroll_x.grid(row=1, column=0, sticky='ew')
        self.log_text['xscrollcommand'] = scroll_x.set

        # 按钮框架
        button_frame = tk.Frame(self.video_frame)
        button_frame.grid(row=6, column=0, columnspan=4, pady=5)

        self.start_button = tk.Button(button_frame, text="开始", command=self.toggle_processing)
        self.start_button.pack(side=tk.LEFT, padx=5)

        tk.Button(button_frame, text="退出", command=self.root.quit).pack(side=tk.LEFT, padx=5)

    def toggle_processing(self):
        """切换开始/停止处理状态"""
        if self.start_button.cget("text") == "开始":
            self.start_processing()
        else:
            self.stop_processing = True
            self.log_message("正在停止处理，请稍候...")

    def browse_input(self):
        """选择输入视频文件"""
        filepath = filedialog.askopenfilename(filetypes=[("视频文件", "*.mp4 *.avi *.mov")])
        if filepath:
            self.input_entry.delete(0, tk.END)
            self.input_entry.insert(0, filepath)

    def batch_upload(self):
        """批量上传视频文件"""
        filepaths = filedialog.askopenfilenames(
            title="选择多个视频文件",
            filetypes=[("视频文件", "*.mp4 *.avi *.mov")]
        )

        if filepaths:
            self.video_queue = []
            for filepath in filepaths:
                self.video_queue.append(filepath)

            self.log_message(f"已添加 {len(filepaths)} 个视频到处理队列")
            self.input_entry.delete(0, tk.END)
            self.input_entry.insert(0, f"待处理视频: {len(filepaths)} 个")

    def browse_output(self):
        """选择输出目录"""
        dirpath = filedialog.askdirectory()
        if dirpath:
            self.output_entry.delete(0, tk.END)
            self.output_entry.insert(0, dirpath)

    def log_message(self, message, error=False):
        """在日志框中添加消息"""
        self.log_text.config(state="normal")
        tag = "error" if error else "info"
        self.log_text.insert(tk.END, message + "\n", tag)
        self.log_text.see(tk.END)
        self.log_text.config(state="disabled")

        if error:
            self.log_text.tag_config("error", foreground="red")
        else:
            self.log_text.tag_config("info", foreground="black")

    def update_progress(self, value):
        """更新进度条"""
        self.progress["value"] = value
        self.root.update_idletasks()

    def start_processing(self):
        """开始处理视频"""
        output_path = self.output_entry.get()

        if not output_path:
            messagebox.showerror("错误", "请选择输出目录!")
            return

        if not self.ocr:
            messagebox.showerror("错误", "OCR引擎未正确初始化!")
            return

        # 获取开始时间偏移量
        try:
            self.start_offset = float(self.offset_var.get())
            self.log_message(f"使用开始时间偏移量: {self.start_offset}秒")
        except ValueError:
            messagebox.showerror("错误", "开始时间偏移量必须是有效的数字!")
            return

        # 获取结束延迟时间
        try:
            self.end_delay = float(self.end_delay_var.get())
            self.log_message(f"使用结束延迟时间: {self.end_delay}秒")
        except ValueError:
            messagebox.showerror("错误", "结束延迟时间必须是有效的数字!")
            return

        # 确定要处理的视频列表
        videos_to_process = []

        if self.video_queue:
            videos_to_process = self.video_queue.copy()
        elif self.input_entry.get():
            videos_to_process = [self.input_entry.get()]
        else:
            messagebox.showerror("错误", "请选择至少一个视频文件!")
            return

        # 重置停止标志
        self.stop_processing = False

        # 更改按钮文本
        self.start_button.config(text="停止检测")
        self.log_message(f"开始处理 {len(videos_to_process)} 个视频...")

        # 在新线程中处理视频
        self.processing_thread = threading.Thread(
            target=self.video_processor_app,
            args=(videos_to_process, output_path),
            daemon=True
        )
        self.processing_thread.start()

    def video_processor_app(self, video_paths, output_base_dir):
        """处理多个视频的核心逻辑"""
        total_videos = len(video_paths)

        for i, video_path in enumerate(video_paths):
            # 检查是否请求停止
            if self.stop_processing:
                self.log_message("处理已停止")
                break

            try:
                self.current_processing_video = video_path
                self.log_message(f"\n=== 正在处理视频 {i + 1}/{total_videos}: {os.path.basename(video_path)} ===")

                # 扫描视频
                detector = self.scan_video_for_targets(video_path)

                # 检查是否请求停止
                if self.stop_processing:
                    self.log_message("处理已停止")
                    break

                if detector and detector.target_clips:
                    self.log_message(f"发现 {len(detector.target_clips)} 个目标片段")

                    # 保存剪辑片段
                    self.save_individual_clips(video_path, output_base_dir, detector)

                # 更新整体进度
                overall_progress = int((i + 1) / total_videos * 100)
                self.update_progress(overall_progress)

            except Exception as e:
                self.log_message(f"处理视频 {video_path} 时发生错误: {str(e)}", error=True)
                continue
            finally:
                self.current_processing_video = None

        # 处理完成或停止
        self.log_message(f"视频处理{'完成' if not self.stop_processing else '已停止'}！共处理了 {i} 个视频")
        self.update_progress(0)
        self.start_button.config(text="开始")
        self.stop_processing = False

    def scan_video_for_targets(self, video_path):
        """扫描视频并返回目标片段列表"""
        cap = cv2.VideoCapture(video_path)
        if not cap.isOpened():
            self.log_message(f"无法打开视频文件: {video_path}", error=True)
            return None

        fps = cap.get(cv2.CAP_PROP_FPS)
        total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
        frame_interval = int(fps * 0.2)
        detector = TargetDetector(self.start_offset, self.end_delay)

        frame_count = 0
        current_time = 0

        self.log_message("正在扫描视频中的目标...")

        # 只在用户选择显示检测窗口时创建窗口
        if self.show_detection_var.get():
            cv2.namedWindow('OCR Detection', cv2.WINDOW_NORMAL)
            cv2.resizeWindow('OCR Detection', 800, 600)

        while cap.isOpened():
            # 检查是否请求停止
            if self.stop_processing:
                break

            ret, frame = cap.read()
            if not ret:
                break

            frame_count += 1
            current_time = cap.get(cv2.CAP_PROP_POS_MSEC) / 1000

            if frame_count % frame_interval != 0:
                continue

            # 调用文本检测函数，根据用户选择显示检测窗口
            self.detect_text(frame, detector, current_time, show_window=self.show_detection_var.get())

            # 更新进度
            if frame_count % (frame_interval * 10) == 0:
                progress = int((frame_count / total_frames) * 100)
                self.update_progress(progress)
                self.log_message(f"扫描进度: {progress}%")

            # 检查是否按下了退出键（只在显示窗口时检查）
            if self.show_detection_var.get() and cv2.waitKey(1) & 0xFF == ord('q'):
                self.log_message("用户中断扫描")
                break

        cap.release()

        # 只在显示窗口时关闭窗口
        if self.show_detection_var.get():
            cv2.destroyAllWindows()

        return detector

    def save_individual_clips(self, video_path, output_base_dir, detector):
        """保存单独的剪辑片段"""
        video_clip = VideoFileClip(video_path)
        video_name = os.path.splitext(os.path.basename(video_path))[0]

        for i, clip_info in enumerate(detector.target_clips):
            # 检查是否请求停止
            if self.stop_processing:
                break

            target_name = clip_info['target']
            start_time = clip_info['start']
            end_time = clip_info['end']  # 这里不需要min()，因为我们在detect_text中已经处理了

            # 计算实际开始裁剪的时间（检测到"正在"后1秒）
            zoom_start_time = clip_info.get('zoom_start', start_time + 0)  # 默认1秒后开始裁剪

            output_dir = os.path.join(output_base_dir, target_name)
            os.makedirs(output_dir, exist_ok=True)

            timestamp = int(start_time * 1000)
            output_path = os.path.join(output_dir, f"{video_name}_{target_name}_{timestamp}.mp4")

            # 剪辑原始片段（不包含延迟）
            original_subclip = video_clip.subclip(start_time, end_time)

            # 获取最后一帧
            last_frame = original_subclip.get_frame(original_subclip.duration)

            # 创建最后一帧的静态剪辑
            frozen_clip = ImageClip(last_frame, duration=detector.end_delay)

            # 拼接原始剪辑和静态帧剪辑
            final_clip = concatenate_videoclips([original_subclip, frozen_clip])

            # 创建帧处理函数
            # 创建帧处理函数
            def make_frame(get_frame, t):
                frame = get_frame(t)
                current_video_time = start_time + t

                if current_video_time >= zoom_start_time:
                    h, w = frame.shape[:2]

                    if w == 2560:
                        crop_width = 750
                        crop_height = 500
                        crop_x = w - crop_width - 200
                        crop_y = 80
                    elif w == 1920:
                        crop_width = 560
                        crop_height = 375
                        crop_x = w - crop_width - 150
                        crop_y = 60
                    else:
                        crop_width = int(w * 0.3)  # 宽度的30%
                        crop_height = int(h * 0.35)  # 高度的35%
                        crop_x = w - crop_width - int(w * 0.08)  # 右侧8%
                        crop_y = int(h * 0.05)  # 顶部5%
                    # 边界保护
                    crop_x = max(0, min(crop_x, w - 1))
                    crop_y = max(0, min(crop_y, h - 1))
                    crop_width = max(1, min(crop_width, w - crop_x))
                    crop_height = max(1, min(crop_height, h - crop_y))

                    # 执行裁剪
                    cropped = frame[crop_y:crop_y + crop_height, crop_x:crop_x + crop_width]

                    # 放大到原尺寸
                    zoomed = cv2.resize(cropped, (w, h), interpolation=cv2.INTER_LINEAR)
                    return zoomed

                return frame
            # 应用自定义帧处理
            processed_clip = final_clip.fl(make_frame, apply_to=['mask', 'video'])
            processed_clip.write_videofile(output_path, codec="libx264", audio_codec="aac", logger=None)

            # 更新进度
            progress = int((i + 1) / len(detector.target_clips) * 100)
            self.update_progress(progress)
            self.log_message(f"已保存: {output_path} (时长: {end_time - start_time + detector.end_delay:.2f}秒)")

        video_clip.close()

    def detect_text(self, frame, detector, current_time, show_window=True):
        """文本检测函数，可选显示检测窗口"""
        try:
            h, w = frame.shape[:2]
            roi_width = int(w / 2.5)
            roi_height = h // 2
            roi_x_start = w - roi_width
            inner_margin_top = int(roi_height * 0.1)
            inner_margin_right = int(roi_width * 0.1)

            roi = frame[
                  inner_margin_top:roi_height,
                  roi_x_start:(w - inner_margin_right)
                  ].copy()

            roi_processed = cv2.medianBlur(roi, 3)
            result = self.ocr.predict(roi_processed)
            all_texts = []
            combined_text = ""

            # 只在需要显示时创建显示副本
            if show_window:
                display_frame = frame.copy()

            if isinstance(result, dict):
                all_texts = result.get('rec_texts', [])
                # 只在显示窗口时绘制检测框
                if 'boxes' in result and show_window:
                    for box in result['boxes']:
                        # 将坐标转换回原图坐标系
                        box[:, 0] += roi_x_start
                        box[:, 1] += inner_margin_top
                        # 绘制多边形
                        cv2.polylines(display_frame, [box.astype(np.int32)], True, (0, 255, 0), 2)
            elif isinstance(result, list):
                for item in result:
                    if isinstance(item, dict):
                        all_texts.extend(item.get('rec_texts', []))
                        # 只在显示窗口时绘制检测框
                        if 'boxes' in item and show_window:
                            for box in item['boxes']:
                                # 将坐标转换回原图坐标系
                                box[:, 0] += roi_x_start
                                box[:, 1] += inner_margin_top
                                # 绘制多边形
                                cv2.polylines(display_frame, [box.astype(np.int32)], True, (0, 255, 0), 2)
                    elif isinstance(item, list):
                        for block in item:
                            if len(block) >= 2:
                                text = block[1][0] if isinstance(block[1], (list, tuple)) else str(block[1])
                                all_texts.append(text)

            combined_text = "".join(all_texts)

            # 只在需要显示时处理显示逻辑
            if show_window:
                # 绘制ROI区域边界
                cv2.rectangle(display_frame,
                              (roi_x_start, inner_margin_top),
                              (w - inner_margin_right, roi_height),
                              (255, 0, 0), 2)

                # 显示识别到的文字
                if combined_text:
                    cv2.putText(display_frame, f"识别文字: {combined_text}", (10, 30),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)

                # 显示当前时间
                cv2.putText(display_frame, f"时间: {current_time:.2f}s", (10, 60),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 255, 255), 2)

                # 显示检测状态
                status = "检测中" if detector.in_search else "等待"
                status_color = (0, 255, 255) if detector.in_search else (200, 200, 200)
                cv2.putText(display_frame, f"状态: {status}", (10, 90),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.7, status_color, 2)

                # 显示当前目标（如果识别到目标）
                detected_target = None
                for target_name, keywords in TARGET_CONFIG.items():
                    if all(keyword in combined_text for keyword in keywords):
                        detected_target = target_name
                        break

                if detected_target:
                    cv2.putText(display_frame, f"目标: {detected_target}", (10, 120),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)

                # 显示检测窗口
                cv2.imshow('OCR Detection', display_frame)
                cv2.waitKey(1)  # 短暂等待以便显示更新

            has_searching = "正在" in combined_text

            if has_searching and not detector.in_search:
                detector.in_search = True
                detector.search_start_time = current_time - detector.start_offset
                detector.post_search_frames = 0
                detector.current_target = None
                self.log_message(f"检测到容器，开始记录片段")

            elif not has_searching and detector.in_search:
                detector.post_search_frames += 1

                if detector.post_search_frames <= 5:
                    target = detector.detect_target(combined_text)
                    if target:
                        detector.current_target = target
                        self.log_message(f"确认目标: {target}")

                if detector.post_search_frames > 5:
                    detector.in_search = False
                    # 这里不再添加延迟，因为我们会在保存剪辑时单独处理
                    detector.search_end_time = current_time - 1

                    if detector.current_target:
                        detector.target_clips.append({
                            'target': detector.current_target,
                            'start': max(0, detector.search_start_time),
                            'end': detector.search_end_time
                        })
                        self.log_message(
                            f"保存片段: {detector.current_target} ({detector.search_start_time:.2f}s - {detector.search_end_time:.2f}s)")

                    detector.current_target = None

        except Exception as e:
            self.log_message(f"OCR处理异常: {e}", error=True)


if __name__ == "__main__":
    print(paddle.__version__)
    copy_models()
    root = tk.Tk()
    app = VideoProcessorApp(root)
    root.mainloop()