import os
import numpy as np
import cv2
import easyocr
from PIL import Image
import pytesseract
from moviepy.editor import VideoFileClip
from PyQt5.QtCore import QObject, pyqtSignal, QThread


class SubtitleExtractor(QObject):
    """字幕提取器"""
    
    progress_updated = pyqtSignal(int)
    status_updated = pyqtSignal(str)
    extraction_finished = pyqtSignal(bool, str, list)
    
    def __init__(self):
        super().__init__()
        self.supported_formats = ['.mp4', '.avi', '.mov', '.mkv', '.wmv', '.flv']
        self.reader = None
        self._init_ocr()
        
    def _init_ocr(self):
        """初始化OCR引擎"""
        try:
            # 使用EasyOCR，支持中文和英文
            self.reader = easyocr.Reader(['ch_sim', 'en'], gpu=False)
            self.status_updated.emit("OCR引擎初始化完成")
        except Exception as e:
            self.status_updated.emit(f"OCR引擎初始化失败: {str(e)}")
            self.reader = None
        
    def extract_subtitles_from_video(self, video_path, output_path=None, 
                                   sample_interval=1.0, confidence_threshold=60,
                                   subtitle_region=None):
        """
        从视频中提取硬编码字幕
        
        Args:
            video_path: 输入视频路径
            output_path: 输出字幕文件路径（可选）
            sample_interval: 采样间隔（秒）
            confidence_threshold: OCR置信度阈值
            subtitle_region: 字幕区域 (x, y, width, height)，None表示自动检测
            
        Returns:
            (success, message, subtitles)
        """
        try:
            if not self.reader:
                return False, "OCR引擎未初始化", []
                
            self.status_updated.emit("正在加载视频...")
            self.progress_updated.emit(10)
            
            # 加载视频
            video = VideoFileClip(video_path)
            duration = video.duration
            
            self.status_updated.emit("正在分析视频帧...")
            self.progress_updated.emit(20)
            
            # 提取字幕
            subtitles = self._extract_subtitles_from_frames(
                video, duration, sample_interval, confidence_threshold, subtitle_region
            )
            
            self.status_updated.emit("正在处理字幕数据...")
            self.progress_updated.emit(80)
            
            # 合并相邻字幕
            merged_subtitles = self._merge_adjacent_subtitles(subtitles)
            
            # 保存字幕文件
            if output_path:
                success, message = self._save_subtitles(merged_subtitles, output_path)
                if not success:
                    return False, f"保存字幕文件失败: {message}", merged_subtitles
            
            self.progress_updated.emit(100)
            self.status_updated.emit("字幕提取完成!")
            
            return True, "字幕提取成功", merged_subtitles
            
        except Exception as e:
            return False, f"字幕提取失败: {str(e)}", []
        finally:
            if 'video' in locals():
                video.close()
                
    def _extract_subtitles_from_frames(self, video, duration, sample_interval, 
                                     confidence_threshold, subtitle_region):
        """从视频帧中提取字幕"""
        subtitles = []
        total_frames = int(duration / sample_interval)
        
        # 自动检测字幕区域（如果未指定）
        if subtitle_region is None:
            subtitle_region = self._detect_subtitle_region(video)
        
        # 使用更密集的采样来捕获短暂的字幕
        dense_interval = min(sample_interval, 1.0)  # 最多1秒间隔
        dense_times = np.arange(0, duration, dense_interval)
        
        for i, time in enumerate(dense_times):
            try:
                # 获取视频帧
                frame = video.get_frame(time)
                
                # 转换为OpenCV格式
                frame_cv = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
                
                # 尝试多个置信度阈值
                confidence_levels = [confidence_threshold, 30, 20, 10]
                text = ""
                best_confidence = 0
                
                for conf in confidence_levels:
                    temp_text = self._extract_text_from_frame(frame_cv, subtitle_region, conf)
                    if temp_text and temp_text.strip():
                        text = temp_text.strip()
                        best_confidence = conf
                        break
                
                if text:
                    # 检查是否与上一个字幕相同
                    if subtitles and subtitles[-1]["text"] == text:
                        # 延长上一个字幕的结束时间
                        subtitles[-1]["end_time"] = time + dense_interval
                    else:
                        # 创建新的字幕条目
                        subtitle = {
                            "start_time": time,
                            "end_time": time + dense_interval,
                            "text": text,
                            "confidence": best_confidence
                        }
                        subtitles.append(subtitle)
                
                # 更新进度（基于原始采样间隔）
                if i % int(sample_interval / dense_interval) == 0:
                    progress = 20 + int(60 * (i // int(sample_interval / dense_interval)) / total_frames)
                    self.progress_updated.emit(progress)
                    self.status_updated.emit(f"正在处理第 {i+1}/{len(dense_times)} 帧...")
                
            except Exception as e:
                print(f"处理帧 {time}s 时出错: {str(e)}")
                continue
                
        return subtitles
        
    def _detect_subtitle_region(self, video):
        """自动检测字幕区域"""
        try:
            # 获取视频中间的几个帧来检测字幕区域
            duration = video.duration
            sample_times = [duration * 0.3, duration * 0.5, duration * 0.7]
            
            subtitle_regions = []
            
            for time in sample_times:
                frame = video.get_frame(time)
                frame_cv = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
                
                # 转换为灰度图
                gray = cv2.cvtColor(frame_cv, cv2.COLOR_BGR2GRAY)
                
                # 使用边缘检测找到可能的文本区域
                edges = cv2.Canny(gray, 50, 150)
                
                # 查找轮廓
                contours, _ = cv2.findContours(edges, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
                
                # 筛选可能的字幕区域（通常在底部）
                height, width = gray.shape
                for contour in contours:
                    x, y, w, h = cv2.boundingRect(contour)
                    
                    # 字幕通常在底部1/3区域，宽度至少为视频宽度的1/4
                    if (y > height * 0.6 and w > width * 0.25 and h > 20 and h < 100):
                        subtitle_regions.append((x, y, w, h))
            
            # 如果找到多个区域，选择最频繁出现的
            if subtitle_regions:
                # 简单的区域合并逻辑
                x = min(r[0] for r in subtitle_regions)
                y = min(r[1] for r in subtitle_regions)
                w = max(r[0] + r[2] for r in subtitle_regions) - x
                h = max(r[1] + r[3] for r in subtitle_regions) - y
                return (x, y, w, h)
            
            # 默认字幕区域（底部中央）
            height, width = video.get_frame(0).shape[:2]
            return (width // 4, int(height * 0.8), width // 2, int(height * 0.15))
            
        except Exception as e:
            print(f"字幕区域检测失败: {str(e)}")
            # 返回默认区域
            height, width = video.get_frame(0).shape[:2]
            return (width // 4, int(height * 0.8), width // 2, int(height * 0.15))
        
    def _extract_text_from_frame(self, frame, subtitle_region, confidence_threshold):
        """从单帧中提取文本"""
        try:
            x, y, w, h = subtitle_region
            
            # 裁剪字幕区域
            subtitle_area = frame[y:y+h, x:x+w]
            
            if subtitle_area.size == 0:
                return ""
            
            # 预处理图像
            processed_image = self._preprocess_image(subtitle_area)
            
            # 使用EasyOCR识别文本
            results = self.reader.readtext(processed_image)
            
            # 提取文本
            texts = []
            for (bbox, text, confidence) in results:
                if confidence >= confidence_threshold / 100.0:
                    texts.append(text)
            
            return " ".join(texts)
            
        except Exception as e:
            print(f"文本提取失败: {str(e)}")
            return ""
    
    def _preprocess_image(self, image):
        """预处理图像以提高OCR准确性"""
        try:
            # 转换为灰度图
            gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
            
            # 应用高斯模糊减少噪声
            blurred = cv2.GaussianBlur(gray, (3, 3), 0)
            
            # 自适应阈值处理
            thresh = cv2.adaptiveThreshold(
                blurred, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 2
            )
            
            # 形态学操作去除小噪点
            kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (2, 2))
            cleaned = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernel)
            
            return cleaned
            
        except Exception as e:
            print(f"图像预处理失败: {str(e)}")
            return image
        
    def _merge_adjacent_subtitles(self, subtitles):
        """合并相邻的相同字幕"""
        if not subtitles:
            return []
            
        merged = []
        current = subtitles[0].copy()
        
        for subtitle in subtitles[1:]:
            # 如果文本相同且时间连续，则合并
            if (subtitle["text"] == current["text"] and 
                subtitle["start_time"] - current["end_time"] < 0.5):
                current["end_time"] = subtitle["end_time"]
            else:
                merged.append(current)
                current = subtitle.copy()
                
        merged.append(current)
        return merged
        
    def _save_subtitles(self, subtitles, output_path):
        """保存字幕到文件"""
        try:
            with open(output_path, 'w', encoding='utf-8') as f:
                for i, subtitle in enumerate(subtitles, 1):
                    # 序号
                    f.write(f"{i}\n")
                    
                    # 时间
                    start_str = self._seconds_to_srt_time(subtitle["start_time"])
                    end_str = self._seconds_to_srt_time(subtitle["end_time"])
                    f.write(f"{start_str} --> {end_str}\n")
                    
                    # 内容
                    f.write(f"{subtitle['text']}\n\n")
                    
            return True, "字幕文件保存成功"
        except Exception as e:
            return False, f"保存失败: {str(e)}"
            
    def _seconds_to_srt_time(self, seconds):
        """将秒数转换为SRT时间格式"""
        hours = int(seconds // 3600)
        minutes = int((seconds % 3600) // 60)
        secs = int(seconds % 60)
        millisecs = int((seconds % 1) * 1000)
        
        return f"{hours:02d}:{minutes:02d}:{secs:02d},{millisecs:03d}"
        
    def get_video_info(self, video_path):
        """获取视频信息"""
        try:
            video = VideoFileClip(video_path)
            info = {
                'duration': video.duration,
                'fps': video.fps,
                'size': video.size,
                'width': video.w,
                'height': video.h
            }
            video.close()
            return True, info
        except Exception as e:
            return False, f"获取视频信息失败: {str(e)}"


class SubtitleExtractionThread(QThread):
    """字幕提取线程"""
    
    progress_updated = pyqtSignal(int)
    status_updated = pyqtSignal(str)
    extraction_finished = pyqtSignal(bool, str, list)
    
    def __init__(self, extractor, video_path, output_path, sample_interval, confidence_threshold, subtitle_region=None):
        super().__init__()
        self.extractor = extractor
        self.video_path = video_path
        self.output_path = output_path
        self.sample_interval = sample_interval
        self.confidence_threshold = confidence_threshold
        self.subtitle_region = subtitle_region
        
    def run(self):
        """运行提取任务"""
        success, message, subtitles = self.extractor.extract_subtitles_from_video(
            self.video_path,
            self.output_path,
            self.sample_interval,
            self.confidence_threshold,
            self.subtitle_region
        )
        
        self.extraction_finished.emit(success, message, subtitles) 