"""法律文档分块器

基于BaseChunker实现的法律文档专用分块器。
"""

import os
import re
import time
import threading
from typing import List, Tuple, Optional

import docx2txt
from docx import Document

from core.base_chunker import BaseChunker, ChunkingStrategy
from utils.logger import LoggerManager, ErrorHandler, log_execution_time, handle_exceptions
from config.settings import ConfigManager


class LegalChunkingStrategy(ChunkingStrategy):
    """法律文档专用分块策略"""
    
    def __init__(self, min_content_length: int = 100):
        self.min_content_length = min_content_length
        self.logger = LoggerManager.get_logger(self.__class__.__name__)
        
    def chunk(self, text: str) -> List[str]:
        """智能分块法律文本"""
        try:
            return self._smart_chunk_legal_text(text)
        except Exception as e:
            self.logger.error(f"法律文档分块失败: {e}")
            # 降级到简单分块
            return self._fallback_chunk(text)
    
    def _smart_chunk_legal_text(self, text: str) -> List[str]:
        """智能分块法律文本，考虑标题级别关系和内容长度"""
        lines = [line.strip() for line in text.split('\n') if line.strip()]
        
        chunks = []
        current_chunk = []
        current_level = 0
        i = 0
        
        # 预处理：识别标题和特殊行
        doc_title_patterns = [
            r'^.*通知.*[\[（［].*[）］\]]号$',   # 通知标题和文号
            r'^各省.*：$',                      # 通知的收文单位
            r'^[^年]*\d{4}年\d{1,2}月\d{1,2}日$', # 日期
            r'^附\s*则',                       # 附则
            r'^中华人民共和国.*令第\d+号$'       # 国务院令
        ]
        
        # 章节标题的模式
        chapter_pattern = r'^第[一二三四五六七八九十]+章\s+.+'
        
        # 目录模式
        toc_start_patterns = [
            r'^目\s*录$',
            r'^目\s*次$',
            r'^contents$',
            r'^index$'
        ]
        
        # 处理目录
        chunks.extend(self._extract_table_of_contents(lines, toc_start_patterns, chapter_pattern))
        
        # 主要内容处理
        while i < len(lines):
            line = lines[i]
            level, match = self._get_heading_level(line)
            
            # 跳过目录部分
            if self._is_toc_start(line, toc_start_patterns):
                i = self._skip_toc_section(lines, i, chapter_pattern)
                continue
            
            # 检查是否是章节标题
            is_chapter_title = bool(re.match(chapter_pattern, line))
            
            # 检查是否是文档标题或特殊段落
            is_special_line = any(re.match(pattern, line) for pattern in doc_title_patterns)
            
            # 特殊行处理 - 单独成块
            if is_special_line:
                if current_chunk:
                    chunks.append('\n'.join(current_chunk))
                    current_chunk = []
                chunks.append(line)
                i += 1
                continue
            
            # 章节标题处理
            if is_chapter_title:
                if current_chunk:
                    chunks.append('\n'.join(current_chunk))
                    current_chunk = []
                current_chunk = [line]
                current_level = 0
                i += 1
                continue
            
            # 处理不同级别的标题
            if level == 1:  # 一级标题
                if current_chunk:
                    chunks.append('\n'.join(current_chunk))
                    current_chunk = []
                current_level = level
                current_chunk.append(line)
                i += 1
            
            elif level == 2:  # 二级标题（条款）
                i = self._process_article_section(lines, i, chunks, current_chunk, chapter_pattern, doc_title_patterns)
                current_chunk = []
                continue
            
            elif level == 3 and current_level != 2:  # 独立的三级标题
                i = self._process_subsection(lines, i, chunks, current_chunk, chapter_pattern, doc_title_patterns)
                continue
            
            else:  # 正文内容
                current_chunk.append(line)
                i += 1
        
        # 添加最后一个块
        if current_chunk:
            chunks.append('\n'.join(current_chunk))
        
        return [chunk for chunk in chunks if chunk.strip()]
    
    def _get_heading_level(self, line: str) -> Tuple[int, Optional[str]]:
        """获取标题的级别"""
        # 一级标题: 一、二、三、...
        if re.match(r'^[一二三四五六七八九十]+、', line):
            return (1, line)
        # 二级标题: 第一条、第二条...
        elif re.match(r'第[一二三四五六七八九十]+条', line):
            return (2, line)
        # 三级标题: （一）、（二）...
        elif re.match(r'（[一二三四五六七八九十]+）', line):
            return (3, line)
        # 三级标题: 1. 2. ...
        elif re.match(r'^\d+\.', line):
            return (3, line)
        else:
            return (0, None)
    
    def _extract_table_of_contents(self, lines: List[str], toc_patterns: List[str], chapter_pattern: str) -> List[str]:
        """提取目录内容"""
        chunks = []
        in_toc = False
        toc_content = []
        
        for line in lines:
            is_toc_start = any(re.match(pattern, line, re.IGNORECASE) for pattern in toc_patterns)
            
            if is_toc_start and not in_toc:
                in_toc = True
                toc_content.append(line)
            elif in_toc:
                if re.match(chapter_pattern, line) or re.match(r'^第[一二三四五六七八九十]+条', line):
                    in_toc = False
                    if toc_content:
                        chunks.append('\n'.join(toc_content))
                        toc_content = []
                else:
                    toc_content.append(line)
        
        if in_toc and toc_content:
            chunks.append('\n'.join(toc_content))
        
        return chunks
    
    def _is_toc_start(self, line: str, toc_patterns: List[str]) -> bool:
        """检查是否是目录开始"""
        return any(re.match(pattern, line, re.IGNORECASE) for pattern in toc_patterns)
    
    def _skip_toc_section(self, lines: List[str], start_idx: int, chapter_pattern: str) -> int:
        """跳过目录部分"""
        i = start_idx
        while i < len(lines):
            line = lines[i]
            if re.match(chapter_pattern, line) or re.match(r'^第[一二三四五六七八九十]+条', line):
                break
            i += 1
        return i
    
    def _process_article_section(self, lines: List[str], start_idx: int, chunks: List[str], 
                               current_chunk: List[str], chapter_pattern: str, 
                               doc_title_patterns: List[str]) -> int:
        """处理条款部分"""
        # 实现条款处理逻辑
        # 这里简化处理，实际应该包含完整的条款分块逻辑
        if current_chunk:
            chunks.append('\n'.join(current_chunk))
        
        article_chunk = [lines[start_idx]]
        i = start_idx + 1
        
        while i < len(lines):
            line = lines[i]
            level, _ = self._get_heading_level(line)
            
            if level in [1, 2] or any(re.match(p, line) for p in doc_title_patterns) or re.match(chapter_pattern, line):
                break
            
            article_chunk.append(line)
            i += 1
        
        if article_chunk:
            chunks.append('\n'.join(article_chunk))
        
        return i
    
    def _process_subsection(self, lines: List[str], start_idx: int, chunks: List[str], 
                          current_chunk: List[str], chapter_pattern: str, 
                          doc_title_patterns: List[str]) -> int:
        """处理子章节"""
        if current_chunk:
            chunks.append('\n'.join(current_chunk))
        
        subsection_chunk = [lines[start_idx]]
        i = start_idx + 1
        
        while i < len(lines):
            line = lines[i]
            level, _ = self._get_heading_level(line)
            
            if level > 0 or any(re.match(p, line) for p in doc_title_patterns) or re.match(chapter_pattern, line):
                break
            
            subsection_chunk.append(line)
            i += 1
        
        if subsection_chunk:
            chunks.append('\n'.join(subsection_chunk))
        
        return i
    
    def _fallback_chunk(self, text: str) -> List[str]:
        """降级分块策略"""
        # 简单按段落分块
        paragraphs = [p.strip() for p in text.split('\n\n') if p.strip()]
        return [p for p in paragraphs if len(p) >= self.min_content_length]


class LawTextChunker(BaseChunker):
    """法律文档分块器"""
    
    def __init__(self):
        super().__init__()
        self.config_manager = ConfigManager()
        self.logger = LoggerManager.get_logger(self.__class__.__name__)
        self.error_handler = ErrorHandler()
        
        # 设置窗口标题
        self.master.title("法律文档智能分块工具 v2.0")
        
        # 设置默认分块策略
        self.chunking_strategy = LegalChunkingStrategy()
        
        # 加载配置
        self._load_config()
    
    def _load_config(self):
        """加载配置"""
        try:
            config = self.config_manager.load_config()
            # 应用配置到UI
            self.min_content_length_var.set(config.chunking.min_content_length)
            self.timeout_var.set(config.processing.timeout)
        except Exception as e:
            self.logger.warning(f"加载配置失败，使用默认配置: {e}")
    
    @handle_exceptions
    @log_execution_time
    def extract_text_from_docx(self, file_path: str) -> str:
        """从DOCX文件提取文本"""
        text = ""
        extraction_errors = []
        
        # 方法1: 使用docx2txt
        try:
            self.log("尝试使用docx2txt提取文本...")
            text = docx2txt.process(file_path)
            if text and text.strip():
                self.log("docx2txt提取成功")
                return text
        except Exception as e:
            extraction_errors.append(f"docx2txt失败: {e}")
            self.logger.warning(f"docx2txt提取失败: {e}")
        
        # 方法2: 使用python-docx
        try:
            self.log("尝试使用python-docx提取文本...")
            doc = Document(file_path)
            paragraphs = []
            for paragraph in doc.paragraphs:
                if paragraph.text.strip():
                    paragraphs.append(paragraph.text.strip())
            
            text = '\n'.join(paragraphs)
            if text and text.strip():
                self.log("python-docx提取成功")
                return text
        except Exception as e:
            extraction_errors.append(f"python-docx失败: {e}")
            self.logger.warning(f"python-docx提取失败: {e}")
        
        # 如果所有方法都失败
        error_msg = "所有文本提取方法都失败:\n" + "\n".join(extraction_errors)
        raise Exception(error_msg)
    
    @handle_exceptions
    def process_file(self, file_path: str, output_dir: str, chunk_mode: str, 
                    separator: str, blank_line_count: int) -> int:
        """处理单个文件"""
        try:
            # 创建输出目录
            os.makedirs(output_dir, exist_ok=True)
            
            # 获取文件信息
            file_name = os.path.splitext(os.path.basename(file_path))[0]
            file_size = os.path.getsize(file_path) / 1024  # KB
            
            self.log(f"处理文件: {file_name}")
            self.log(f"文件大小: {file_size:.2f} KB")
            
            # 检查文件
            if not os.path.exists(file_path):
                raise FileNotFoundError(f"文件不存在: {file_path}")
            
            if file_size == 0:
                raise ValueError("文件内容为空")
            
            # 提取文本
            text = self.extract_text_from_docx(file_path)
            
            if not text or not text.strip():
                raise ValueError("提取的文本内容为空")
            
            self.log(f"提取文本长度: {len(text)} 字符")
            
            # 分块处理
            if chunk_mode == "智能分块":
                chunks = self.chunking_strategy.chunk(text)
            else:
                # 使用分隔符分块
                from core.base_chunker import SeparatorChunkingStrategy
                sep_strategy = SeparatorChunkingStrategy(separator)
                chunks = sep_strategy.chunk(text)
            
            self.log(f"分块完成，共 {len(chunks)} 个块")
            
            # 保存分块结果
            chunk_count = self._save_chunks(chunks, file_name, output_dir, blank_line_count)
            
            return chunk_count
            
        except Exception as e:
            self.error_handler.handle_processing_error(e, {"file_path": file_path})
            raise
    
    def _save_chunks(self, chunks: List[str], file_name: str, output_dir: str, 
                    blank_line_count: int) -> int:
        """保存分块结果"""
        output_format = self.output_format_var.get()
        blank_lines = '\n' * blank_line_count
        
        if output_format == "单个文件":
            # 保存到单个文件
            output_file = os.path.join(output_dir, f"{file_name}_chunked.txt")
            with open(output_file, 'w', encoding='utf-8') as f:
                f.write(blank_lines.join(chunks))
            self.log(f"保存到: {output_file}")
        else:
            # 保存到多个文件
            for i, chunk in enumerate(chunks, 1):
                output_file = os.path.join(output_dir, f"{file_name}_chunk_{i:03d}.txt")
                with open(output_file, 'w', encoding='utf-8') as f:
                    f.write(chunk)
            self.log(f"保存到 {len(chunks)} 个文件")
        
        return len(chunks)


def main():
    """主函数"""
    import tkinter as tk
    
    # 配置日志
    LoggerManager.setup_logging()
    
    # 创建应用
    root = tk.Tk()
    app = LawTextChunker()
    
    # 运行应用
    root.mainloop()


if __name__ == "__main__":
    main()