import re
import unicodedata
from typing import Dict, Any, List, Set

class TextCleaner:
    """文本清洗器"""
    
    def __init__(self, config: Dict[str, Any]):
        """
        初始化文本清洗器
        
        Args:
            config: 清洗配置字典
        """
        self.config = config
        
        # 预编译正则表达式以提高性能
        self._compile_patterns()
    
    def _compile_patterns(self):
        """预编译正则表达式模式"""
        
        # 空行模式
        self.empty_line_pattern = re.compile(r'^\s*$', re.MULTILINE)
        
        # 多个连续空行模式
        self.multiple_newlines_pattern = re.compile(r'\n\s*\n\s*\n+', re.MULTILINE)
        
        # 页眉页脚模式（常见模式）
        self.header_footer_patterns = [
            re.compile(r'^\s*第\s*\d+\s*页\s*$', re.MULTILINE | re.IGNORECASE),
            re.compile(r'^\s*Page\s+\d+\s*$', re.MULTILINE | re.IGNORECASE),
            re.compile(r'^\s*\d+\s*/\s*\d+\s*$', re.MULTILINE),
            re.compile(r'^\s*-\s*\d+\s*-\s*$', re.MULTILINE),
            re.compile(r'^\s*\[\s*\d+\s*\]\s*$', re.MULTILINE),
            re.compile(r'^\s*\d{4}-\d{2}-\d{2}\s*$', re.MULTILINE),  # 日期
            re.compile(r'^\s*\d{2}:\d{2}:\d{2}\s*$', re.MULTILINE),  # 时间
        ]
        
        # 常见的页眉页脚模式（编译为正则表达式对象）
        self.header_footer_text_patterns = [
            r'第.*页', r'page \d+', r'共.*页', r'of \d+ pages',
            r'\d+/\d+', r'页码：\d+', r'Page \d+',
            r'版权所有', r'Copyright', r'All rights reserved',
            r'机密', r'Confidential', r'内部资料'
        ]
        
        # 特殊字符模式
        self.special_chars_patterns = [
            re.compile(r'[\x00-\x08\x0B\x0C\x0E-\x1F\x7F-\x9F]'),  # 控制字符
            re.compile(r'[\uFEFF\u200B-\u200D\u2060]'),  # 零宽字符
            re.compile(r'[\u2000-\u200A\u2028\u2029\u202F\u205F\u3000]'),  # 各种空格
            re.compile(r'[\uE000-\uF8FF]'),  # 私用区字符
        ]
        
        # 断行修复模式
        self.line_break_patterns = [
            re.compile(r'([a-z])\n([a-z])', re.IGNORECASE),  # 单词中间断行
            re.compile(r'([^.!?])\n([A-Z])', re.MULTILINE),  # 句子中间断行
            re.compile(r'([,;:])\n'),  # 标点后断行
        ]
        
        # 重复内容模式
        self.duplicate_patterns = [
            re.compile(r'(\S.{10,})\n\1', re.MULTILINE),  # 重复行
            re.compile(r'(\S.{20,})\s+\1'),  # 重复段落
        ]
    
    def clean_text(self, text: str) -> str:
        """
        清洗文本
        
        Args:
            text: 原始文本
            
        Returns:
            str: 清洗后的文本
        """
        
        if not text:
            print("[TextCleaner] 输入文本为空")
            return ""
        
        print(f"[TextCleaner] 开始文本清洗，原始长度: {len(text)} 字符")
        cleaned_text = text
        
        try:
            # 1. Unicode规范化
            if self.config.get('normalize_text', True):
                print(f"[TextCleaner] 步骤1: Unicode规范化")
                cleaned_text = self._normalize_unicode(cleaned_text)
                print(f"[TextCleaner] Unicode规范化完成，长度: {len(cleaned_text)}")
            
            # 2. 去除特殊字符
            if self.config.get('remove_special_chars', True):
                print(f"[TextCleaner] 步骤2: 去除特殊字符")
                cleaned_text = self._remove_special_chars(cleaned_text)
                print(f"[TextCleaner] 特殊字符清理完成，长度: {len(cleaned_text)}")
            
            # 3. 修复断行
            if self.config.get('fix_line_breaks', True):
                print(f"[TextCleaner] 步骤3: 修复断行")
                cleaned_text = self._fix_line_breaks(cleaned_text)
                print(f"[TextCleaner] 断行修复完成，长度: {len(cleaned_text)}")
            
            # 4. 去除页眉页脚
            if self.config.get('remove_headers_footers', False):
                print(f"[TextCleaner] 步骤4: 去除页眉页脚")
                cleaned_text = self._remove_headers_footers(cleaned_text)
                print(f"[TextCleaner] 页眉页脚清理完成，长度: {len(cleaned_text)}")
            
            # 5. 去除重复内容
            if self.config.get('remove_duplicates', True):
                print(f"[TextCleaner] 步骤5: 去除重复内容")
                cleaned_text = self._remove_duplicates(cleaned_text)
                print(f"[TextCleaner] 重复内容清理完成，长度: {len(cleaned_text)}")
            
            # 6. 去除空行
            if self.config.get('remove_empty_lines', True):
                print(f"[TextCleaner] 步骤6: 去除空行")
                cleaned_text = self._remove_empty_lines(cleaned_text)
                print(f"[TextCleaner] 空行清理完成，长度: {len(cleaned_text)}")
            
            # 7. 最终整理
            print(f"[TextCleaner] 步骤7: 最终整理")
            cleaned_text = self._final_cleanup(cleaned_text)
            print(f"[TextCleaner] 文本清洗完成，最终长度: {len(cleaned_text)}")
            
            return cleaned_text
            
        except Exception as e:
            print(f"[TextCleaner] 文本清洗过程中出错: {str(e)}")
            import traceback
            print(f"[TextCleaner] 错误堆栈: {traceback.format_exc()}")
            # 如果清洗失败，返回原始文本
            print(f"[TextCleaner] 返回原始文本")
            return text
    
    def _normalize_unicode(self, text: str) -> str:
        """Unicode规范化"""
        
        # NFC规范化（组合字符）
        text = unicodedata.normalize('NFC', text)
        
        # 替换全角字符为半角
        replacements = {
            '　': ' ',  # 全角空格
            '，': ',',
            '。': '.',
            '；': ';',
            '：': ':',
            '？': '?',
            '！': '!',
            '（': '(',
            '）': ')',
            '【': '[',
            '】': ']',
            '「': '"',
            '」': '"',
            '『': "'",
            '』': "'",
        }
        
        for full_width, half_width in replacements.items():
            text = text.replace(full_width, half_width)
        
        return text
    
    def _remove_special_chars(self, text: str) -> str:
        """去除特殊字符"""
        
        for pattern in self.special_chars_patterns:
            text = pattern.sub('', text)
        
        # 去除多余的空格
        text = re.sub(r' +', ' ', text)  # 多个空格合并为一个
        text = re.sub(r'\t+', ' ', text)  # 制表符替换为空格
        
        return text
    
    def _fix_line_breaks(self, text: str) -> str:
        """修复断行问题"""
        
        # 修复单词中间的断行
        text = self.line_break_patterns[0].sub(r'\1\2', text)
        
        # 修复句子中间的断行（但保留段落分隔）
        text = self.line_break_patterns[1].sub(r'\1 \2', text)
        
        # 修复标点后的不必要断行
        text = self.line_break_patterns[2].sub(r'\1 ', text)
        
        # 合并分散的列表项
        text = re.sub(r'\n([•·▪▫◦‣⁃])', r' \1', text)
        text = re.sub(r'\n(\d+[.)、])', r' \1', text)
        text = re.sub(r'\n([a-zA-Z][.)、])', r' \1', text)
        
        return text
    
    def _remove_headers_footers(self, text: str) -> str:
        """去除页眉页脚"""
        
        # 使用预编译的正则表达式模式
        for pattern in self.header_footer_patterns:
            text = pattern.sub('', text)
        
        # 去除常见的页眉页脚内容
        lines = text.split('\n')
        cleaned_lines = []
        
        for line in lines:
            line = line.strip()
            
            # 跳过可能的页眉页脚
            if self._is_likely_header_footer(line):
                continue
            
            cleaned_lines.append(line)
        
        return '\n'.join(cleaned_lines)
    
    def _is_likely_header_footer(self, line: str) -> bool:
        """判断是否可能是页眉页脚"""
        
        line = line.strip()
        
        if not line:
            return False
        
        # 长度过短的行（可能是页码）
        if len(line) < 3:
            return True
        
        # 只包含数字、空格、标点的行
        if re.match(r'^[\d\s\-_.,/()\[\]]+$', line):
            return True
        
        # 常见的页眉页脚关键词
        footer_keywords = [
            '版权所有', 'copyright', '保留所有权利', 'all rights reserved',
            '机密', 'confidential', '内部资料', 'internal use'
        ]
        
        # 检查简单关键词
        for keyword in footer_keywords:
            if re.search(keyword, line, re.IGNORECASE):
                return True
        
        # 检查页眉页脚模式
        for pattern in self.header_footer_text_patterns:
            if re.search(pattern, line, re.IGNORECASE):
                return True
        
        return False
    
    def _remove_duplicates(self, text: str) -> str:
        """去除重复内容"""
        
        # 去除重复的行
        lines = text.split('\n')
        seen_lines = set()
        unique_lines = []
        
        for line in lines:
            line_stripped = line.strip()
            
            # 跳过空行和过短的行
            if not line_stripped or len(line_stripped) < 10:
                unique_lines.append(line)
                continue
            
            # 检查是否重复
            if line_stripped not in seen_lines:
                seen_lines.add(line_stripped)
                unique_lines.append(line)
        
        text = '\n'.join(unique_lines)
        
        # 去除重复的段落
        paragraphs = text.split('\n\n')
        seen_paragraphs = set()
        unique_paragraphs = []
        
        for para in paragraphs:
            para_stripped = para.strip()
            
            if not para_stripped or len(para_stripped) < 20:
                unique_paragraphs.append(para)
                continue
            
            # 使用段落的前50个字符作为指纹
            fingerprint = para_stripped[:50]
            
            if fingerprint not in seen_paragraphs:
                seen_paragraphs.add(fingerprint)
                unique_paragraphs.append(para)
        
        return '\n\n'.join(unique_paragraphs)
    
    def _remove_empty_lines(self, text: str) -> str:
        """去除空行"""
        
        # 去除完全空白的行
        text = self.empty_line_pattern.sub('', text)
        
        # 合并多个连续的换行符
        text = self.multiple_newlines_pattern.sub('\n\n', text)
        
        return text
    
    def _final_cleanup(self, text: str) -> str:
        """最终清理"""
        
        # 去除首尾空白
        text = text.strip()
        
        # 确保段落之间有适当的间隔
        text = re.sub(r'\n{3,}', '\n\n', text)
        
        # 去除行首尾多余空格
        lines = text.split('\n')
        cleaned_lines = [line.strip() for line in lines]
        text = '\n'.join(cleaned_lines)
        
        # 最后一次去除多余的空行
        text = re.sub(r'\n\s*\n\s*\n+', '\n\n', text)
        
        return text
    
    def get_cleaning_stats(self, original_text: str, cleaned_text: str) -> Dict[str, Any]:
        """获取清洗统计信息"""
        
        original_lines = original_text.split('\n')
        cleaned_lines = cleaned_text.split('\n')
        
        original_chars = len(original_text)
        cleaned_chars = len(cleaned_text)
        
        stats = {
            'original_length': original_chars,
            'cleaned_length': cleaned_chars,
            'removed_chars': original_chars - cleaned_chars,
            'compression_ratio': cleaned_chars / original_chars if original_chars > 0 else 0,
            'original_lines': len(original_lines),
            'cleaned_lines': len(cleaned_lines),
            'removed_lines': len(original_lines) - len(cleaned_lines),
            'empty_lines_removed': len([line for line in original_lines if not line.strip()]),
            'config_used': self.config.copy()
        }
        
        return stats
    
    def preview_cleaning(self, text: str, max_length: int = 1000) -> Dict[str, str]:
        """预览清洗效果"""
        
        if len(text) > max_length:
            preview_text = text[:max_length] + "..."
        else:
            preview_text = text
        
        cleaned_preview = self.clean_text(preview_text)
        
        return {
            'original': preview_text,
            'cleaned': cleaned_preview,
            'changes_detected': preview_text != cleaned_preview
        }