"""
文本后处理模块
实现文本清洗、噪声过滤、时间戳合并和语义分段功能
"""

import re
import time
from typing import List, Dict, Optional, Tuple, Set
from dataclasses import dataclass
from enum import Enum
import difflib
from collections import defaultdict
import jieba
import jieba.posseg as pseg

from ..core import config, app_logger
from ..ocr import OCRResult


class SourceType(Enum):
    """内容来源类型"""
    SLIDE = "slide"          # 幻灯片内容
    WHITEBOARD = "whiteboard"  # 板书内容
    DISCUSSION = "discussion"  # 讨论记录
    UNKNOWN = "unknown"      # 未知类型


@dataclass
class ProcessedText:
    """处理后的文本数据"""
    text_id: str
    content: str
    source_type: SourceType
    confidence: float
    timestamp: float
    region_ids: List[str]
    keywords: List[str]
    language: str
    cleaned_content: str


@dataclass
class TextSegment:
    """文本段落"""
    segment_id: str
    content: str
    source_type: SourceType
    start_time: float
    end_time: float
    confidence: float
    keywords: List[str]
    related_segments: List[str]


class TextCleaner:
    """文本清洗器"""
    
    def __init__(self):
        # 噪声模式
        self.noise_patterns = [
            r'[^\w\s\u4e00-\u9fff\u3000-\u303f\uff00-\uffef]',  # 非中英文字符
            r'\s+',  # 多个空白字符
            r'^\s+|\s+$',  # 首尾空白
        ]
        
        # 常见噪声词
        self.noise_words = {
            '的', '了', '在', '是', '我', '有', '和', '就', '不', '人',
            'the', 'a', 'an', 'and', 'or', 'but', 'in', 'on', 'at', 'to'
        }
        
        # 最小文本长度
        self.min_text_length = 3
        
        app_logger.info("文本清洗器初始化完成")
    
    def clean_text(self, text: str) -> str:
        """清洗文本"""
        if not text or len(text.strip()) < self.min_text_length:
            return ""
        
        cleaned = text
        
        try:
            # 移除特殊字符，保留中英文和标点
            cleaned = re.sub(r'[^\w\s\u4e00-\u9fff\u3000-\u303f\uff00-\uffef.,!?;:()（）]', ' ', cleaned)
            
            # 规范化空白字符
            cleaned = re.sub(r'\s+', ' ', cleaned)
            
            # 移除首尾空白
            cleaned = cleaned.strip()
            
            # 移除过短的文本
            if len(cleaned) < self.min_text_length:
                return ""
            
            return cleaned
            
        except Exception as e:
            app_logger.error(f"文本清洗失败: {e}")
            return text
    
    def remove_duplicates(self, texts: List[str], similarity_threshold: float = 0.8) -> List[str]:
        """移除重复文本"""
        if not texts:
            return []
        
        unique_texts = []
        
        for text in texts:
            is_duplicate = False
            
            for existing_text in unique_texts:
                similarity = difflib.SequenceMatcher(None, text, existing_text).ratio()
                if similarity >= similarity_threshold:
                    is_duplicate = True
                    break
            
            if not is_duplicate:
                unique_texts.append(text)
        
        return unique_texts
    
    def extract_keywords(self, text: str, max_keywords: int = 10) -> List[str]:
        """提取关键词"""
        if not text:
            return []
        
        try:
            # 使用jieba进行分词和词性标注
            words = pseg.cut(text)
            
            # 过滤词性，保留名词、动词、形容词
            valid_pos = {'n', 'v', 'a', 'nr', 'ns', 'nt', 'nz', 'vn', 'an'}
            keywords = []
            
            for word, pos in words:
                if (len(word) >= 2 and 
                    pos[0] in valid_pos and 
                    word not in self.noise_words):
                    keywords.append(word)
            
            # 去重并限制数量
            keywords = list(set(keywords))[:max_keywords]
            
            return keywords
            
        except Exception as e:
            app_logger.error(f"关键词提取失败: {e}")
            return []
    
    def detect_language(self, text: str) -> str:
        """检测文本语言"""
        if not text:
            return "unknown"
        
        # 简单的语言检测
        chinese_chars = len(re.findall(r'[\u4e00-\u9fff]', text))
        english_chars = len(re.findall(r'[a-zA-Z]', text))
        total_chars = len(text)
        
        if chinese_chars / total_chars > 0.3:
            return "zh"
        elif english_chars / total_chars > 0.5:
            return "en"
        else:
            return "mixed"


class SourceTypeClassifier:
    """内容来源类型分类器"""
    
    def __init__(self):
        # 幻灯片特征词
        self.slide_keywords = {
            '标题', '目录', '概述', '总结', '结论', '第一章', '第二章', '第三章',
            'title', 'agenda', 'overview', 'summary', 'conclusion', 'chapter'
        }
        
        # 板书特征词
        self.whiteboard_keywords = {
            '公式', '计算', '推导', '证明', '解答', '步骤',
            'formula', 'calculation', 'proof', 'solution', 'step'
        }
        
        # 讨论特征词
        self.discussion_keywords = {
            '问题', '回答', '讨论', '意见', '建议', '想法',
            'question', 'answer', 'discussion', 'opinion', 'suggestion', 'idea'
        }
        
        app_logger.info("内容来源分类器初始化完成")
    
    def classify(self, text: str, confidence: float, region_info: Dict) -> SourceType:
        """分类内容来源类型"""
        if not text:
            return SourceType.UNKNOWN
        
        text_lower = text.lower()
        
        # 计算各类型的得分
        slide_score = self._calculate_score(text_lower, self.slide_keywords)
        whiteboard_score = self._calculate_score(text_lower, self.whiteboard_keywords)
        discussion_score = self._calculate_score(text_lower, self.discussion_keywords)
        
        # 根据区域信息调整得分
        if region_info.get('is_large_region', False):
            slide_score *= 1.5
        
        if region_info.get('has_formulas', False):
            whiteboard_score *= 2.0
        
        # 选择最高得分的类型
        max_score = max(slide_score, whiteboard_score, discussion_score)
        
        if max_score == 0:
            return SourceType.UNKNOWN
        elif max_score == slide_score:
            return SourceType.SLIDE
        elif max_score == whiteboard_score:
            return SourceType.WHITEBOARD
        else:
            return SourceType.DISCUSSION
    
    def _calculate_score(self, text: str, keywords: Set[str]) -> float:
        """计算关键词匹配得分"""
        score = 0
        for keyword in keywords:
            if keyword in text:
                score += 1
        return score / len(keywords) if keywords else 0


class TextMerger:
    """文本合并器"""
    
    def __init__(self):
        self.merge_time_window = config.post_processing.merge_time_window
        self.similarity_threshold = config.post_processing.text_similarity_threshold
        
        app_logger.info("文本合并器初始化完成")
    
    def merge_by_timestamp(self, ocr_results: List[OCRResult]) -> List[ProcessedText]:
        """按时间戳合并文本"""
        if not ocr_results:
            return []
        
        # 按时间戳排序
        sorted_results = sorted(ocr_results, key=lambda x: x.timestamp)
        
        merged_texts = []
        current_group = []
        
        for result in sorted_results:
            if not current_group:
                current_group.append(result)
            else:
                # 检查时间间隔
                time_diff = result.timestamp - current_group[-1].timestamp
                
                if time_diff <= self.merge_time_window:
                    current_group.append(result)
                else:
                    # 处理当前组
                    if current_group:
                        merged_text = self._merge_group(current_group)
                        if merged_text:
                            merged_texts.append(merged_text)
                    
                    # 开始新组
                    current_group = [result]
        
        # 处理最后一组
        if current_group:
            merged_text = self._merge_group(current_group)
            if merged_text:
                merged_texts.append(merged_text)
        
        return merged_texts
    
    def _merge_group(self, group: List[OCRResult]) -> Optional[ProcessedText]:
        """合并一组OCR结果"""
        if not group:
            return None
        
        # 清洗和去重文本
        cleaner = TextCleaner()
        texts = [cleaner.clean_text(result.text) for result in group]
        texts = [t for t in texts if t]  # 移除空文本
        
        if not texts:
            return None
        
        # 去重
        unique_texts = cleaner.remove_duplicates(texts, self.similarity_threshold)
        
        # 合并文本
        combined_text = ' '.join(unique_texts)
        
        # 计算平均置信度
        confidences = [r.confidence for r in group if r.confidence > 0]
        avg_confidence = sum(confidences) / len(confidences) if confidences else 0.0
        
        # 提取关键词
        keywords = cleaner.extract_keywords(combined_text)
        
        # 检测语言
        language = cleaner.detect_language(combined_text)
        
        # 分类来源类型
        classifier = SourceTypeClassifier()
        source_type = classifier.classify(combined_text, avg_confidence, {})
        
        return ProcessedText(
            text_id=f"text_{int(time.time() * 1000)}",
            content=combined_text,
            source_type=source_type,
            confidence=avg_confidence,
            timestamp=group[0].timestamp,
            region_ids=[r.region_id for r in group],
            keywords=keywords,
            language=language,
            cleaned_content=combined_text
        )


class SemanticSegmenter:
    """语义分段器"""
    
    def __init__(self):
        self.min_segment_length = 50
        self.max_segment_length = 1000
        
        app_logger.info("语义分段器初始化完成")
    
    def segment_texts(self, processed_texts: List[ProcessedText]) -> List[TextSegment]:
        """对文本进行语义分段"""
        if not processed_texts:
            return []
        
        segments = []
        
        # 按来源类型分组
        grouped_texts = defaultdict(list)
        for text in processed_texts:
            grouped_texts[text.source_type].append(text)
        
        # 为每个组创建段落
        for source_type, texts in grouped_texts.items():
            if not texts:
                continue
            
            # 按时间排序
            texts.sort(key=lambda x: x.timestamp)
            
            # 创建段落
            current_segment_texts = []
            current_length = 0
            
            for text in texts:
                text_length = len(text.content)
                
                # 检查是否需要开始新段落
                if (current_length + text_length > self.max_segment_length and 
                    current_segment_texts):
                    
                    # 创建当前段落
                    segment = self._create_segment(current_segment_texts, source_type)
                    if segment:
                        segments.append(segment)
                    
                    # 开始新段落
                    current_segment_texts = [text]
                    current_length = text_length
                else:
                    current_segment_texts.append(text)
                    current_length += text_length
            
            # 处理最后一个段落
            if current_segment_texts:
                segment = self._create_segment(current_segment_texts, source_type)
                if segment:
                    segments.append(segment)
        
        return segments
    
    def _create_segment(self, texts: List[ProcessedText], 
                       source_type: SourceType) -> Optional[TextSegment]:
        """创建文本段落"""
        if not texts:
            return None
        
        # 合并内容
        combined_content = ' '.join(text.content for text in texts)
        
        # 检查长度
        if len(combined_content) < self.min_segment_length:
            return None
        
        # 计算时间范围
        start_time = min(text.timestamp for text in texts)
        end_time = max(text.timestamp for text in texts)
        
        # 计算平均置信度
        avg_confidence = sum(text.confidence for text in texts) / len(texts)
        
        # 合并关键词
        all_keywords = []
        for text in texts:
            all_keywords.extend(text.keywords)
        unique_keywords = list(set(all_keywords))
        
        return TextSegment(
            segment_id=f"segment_{int(time.time() * 1000)}",
            content=combined_content,
            source_type=source_type,
            start_time=start_time,
            end_time=end_time,
            confidence=avg_confidence,
            keywords=unique_keywords,
            related_segments=[]
        )


class TextPostProcessor:
    """文本后处理器主类"""
    
    def __init__(self):
        self.cleaner = TextCleaner()
        self.merger = TextMerger()
        self.segmenter = SemanticSegmenter()
        
        app_logger.info("文本后处理器初始化完成")
    
    def process(self, ocr_results: List[OCRResult]) -> Tuple[List[ProcessedText], List[TextSegment]]:
        """处理OCR结果"""
        app_logger.info(f"开始后处理 {len(ocr_results)} 个OCR结果")
        
        # 过滤低置信度结果
        filtered_results = [
            result for result in ocr_results 
            if result.confidence >= config.post_processing.min_confidence
        ]
        
        app_logger.info(f"过滤后剩余 {len(filtered_results)} 个结果")
        
        # 合并文本
        processed_texts = self.merger.merge_by_timestamp(filtered_results)
        app_logger.info(f"合并后得到 {len(processed_texts)} 个文本")
        
        # 语义分段
        segments = self.segmenter.segment_texts(processed_texts)
        app_logger.info(f"分段后得到 {len(segments)} 个段落")
        
        return processed_texts, segments
