"""
文本分割器

提供智能文本分割功能，支持多种分割策略
"""

import logging
from typing import List, Dict, Any, Optional, Callable
from langchain_text_splitters import (
    RecursiveCharacterTextSplitter,
    CharacterTextSplitter,
    MarkdownHeaderTextSplitter,
    HTMLHeaderTextSplitter,
    TokenTextSplitter
)
from langchain_core.documents import Document

logger = logging.getLogger(__name__)

class SmartTextSplitter:
    """
    智能文本分割器
    
    根据文档类型和内容自动选择最适合的分割策略
    """
    
    def __init__(self, 
                 chunk_size: int = 1000,
                 chunk_overlap: int = 200,
                 length_function: Optional[Callable] = None):
        """
        初始化文本分割器
        
        Args:
            chunk_size (int): 文档块大小
            chunk_overlap (int): 文档块重叠大小
            length_function (Optional[Callable]): 长度计算函数
        """
        self.chunk_size = chunk_size
        self.chunk_overlap = chunk_overlap
        self.length_function = length_function or len
        
        # 初始化各种分割器
        self._init_splitters()
    
    def _init_splitters(self):
        """初始化不同类型的分割器"""
        
        # 递归字符分割器（默认）
        self.recursive_splitter = RecursiveCharacterTextSplitter(
            chunk_size=self.chunk_size,
            chunk_overlap=self.chunk_overlap,
            length_function=self.length_function,
            separators=["\n\n", "\n", " ", ""]
        )
        
        # 字符分割器
        self.character_splitter = CharacterTextSplitter(
            chunk_size=self.chunk_size,
            chunk_overlap=self.chunk_overlap,
            length_function=self.length_function,
            separator="\n"
        )
        
        # Markdown 分割器
        self.markdown_splitter = MarkdownHeaderTextSplitter(
            headers_to_split_on=[
                ("#", "Header 1"),
                ("##", "Header 2"),
                ("###", "Header 3"),
                ("####", "Header 4"),
            ],
            strip_headers=False
        )
        
        # HTML 分割器
        self.html_splitter = HTMLHeaderTextSplitter(
            headers_to_split_on=[
                ("h1", "Header 1"),
                ("h2", "Header 2"),
                ("h3", "Header 3"),
                ("h4", "Header 4"),
            ]
        )
        
        # Token 分割器
        self.token_splitter = TokenTextSplitter(
            chunk_size=self.chunk_size,
            chunk_overlap=self.chunk_overlap
        )
    
    def split_documents(self, documents: List[Document]) -> List[Document]:
        """
        分割文档列表
        
        Args:
            documents (List[Document]): 输入文档列表
            
        Returns:
            List[Document]: 分割后的文档块列表
        """
        all_chunks = []
        
        for doc in documents:
            try:
                chunks = self.split_document(doc)
                all_chunks.extend(chunks)
            except Exception as e:
                logger.warning(f"分割文档失败: {str(e)}")
                # 如果分割失败，使用原文档
                all_chunks.append(doc)
        
        logger.info(f"文档分割完成: {len(documents)} -> {len(all_chunks)} 个块")
        return all_chunks
    
    def split_document(self, document: Document) -> List[Document]:
        """
        分割单个文档
        
        Args:
            document (Document): 输入文档
            
        Returns:
            List[Document]: 分割后的文档块列表
        """
        # 根据文档类型选择分割器
        splitter = self._choose_splitter(document)
        
        # 执行分割
        if splitter == self.markdown_splitter or splitter == self.html_splitter:
            # 对于结构化分割器，直接分割文本
            chunks = splitter.split_text(document.page_content)
            
            # 将文本块转换为Document对象
            result_docs = []
            for i, chunk in enumerate(chunks):
                new_doc = Document(
                    page_content=chunk.page_content if hasattr(chunk, 'page_content') else chunk,
                    metadata={
                        **document.metadata,
                        'chunk_index': i,
                        'chunk_type': splitter.__class__.__name__
                    }
                )
                result_docs.append(new_doc)
            
            # 如果结构化分割后的块仍然太大，进行二次分割
            final_docs = []
            for doc in result_docs:
                if len(doc.page_content) > self.chunk_size:
                    sub_chunks = self.recursive_splitter.split_documents([doc])
                    final_docs.extend(sub_chunks)
                else:
                    final_docs.append(doc)
            
            return final_docs
        else:
            # 对于其他分割器，直接分割文档
            chunks = splitter.split_documents([document])
            
            # 添加块索引
            for i, chunk in enumerate(chunks):
                chunk.metadata.update({
                    'chunk_index': i,
                    'chunk_type': splitter.__class__.__name__
                })
            
            return chunks
    
    def _choose_splitter(self, document: Document):
        """
        根据文档选择最适合的分割器
        
        Args:
            document (Document): 输入文档
            
        Returns:
            分割器实例
        """
        content = document.page_content
        file_type = document.metadata.get('file_type', '').lower()
        source_type = document.metadata.get('source_type', '').lower()
        
        # 根据文件类型选择
        if file_type == '.md' or self._is_markdown(content):
            logger.debug("使用 Markdown 分割器")
            return self.markdown_splitter
        
        elif source_type == 'web' or self._is_html(content):
            logger.debug("使用 HTML 分割器")
            return self.html_splitter
        
        elif self._is_structured_text(content):
            logger.debug("使用递归字符分割器")
            return self.recursive_splitter
        
        else:
            logger.debug("使用默认分割器")
            return self.character_splitter
    
    def _is_markdown(self, text: str) -> bool:
        """检查文本是否为Markdown格式"""
        markdown_indicators = ['#', '##', '###', '**', '*', '`', '```', '[', ']', '(', ')']
        return any(indicator in text for indicator in markdown_indicators)
    
    def _is_html(self, text: str) -> bool:
        """检查文本是否为HTML格式"""
        html_indicators = ['<html', '<head', '<body', '<div', '<p>', '<h1', '<h2', '<h3']
        return any(indicator in text.lower() for indicator in html_indicators)
    
    def _is_structured_text(self, text: str) -> bool:
        """检查文本是否有明显的结构"""
        # 检查是否有明显的段落分隔
        paragraph_count = text.count('\n\n')
        line_count = text.count('\n')
        
        # 如果有较多的段落分隔，认为是结构化文本
        return paragraph_count > 2 or (line_count > 10 and paragraph_count / line_count > 0.1)
    
    def get_chunk_stats(self, chunks: List[Document]) -> Dict[str, Any]:
        """
        获取文档块统计信息
        
        Args:
            chunks (List[Document]): 文档块列表
            
        Returns:
            Dict[str, Any]: 统计信息
        """
        if not chunks:
            return {
                'total_chunks': 0,
                'total_characters': 0,
                'average_chunk_size': 0,
                'chunk_types': {}
            }
        
        total_chars = sum(len(chunk.page_content) for chunk in chunks)
        chunk_sizes = [len(chunk.page_content) for chunk in chunks]
        chunk_types = {}
        
        for chunk in chunks:
            chunk_type = chunk.metadata.get('chunk_type', 'unknown')
            chunk_types[chunk_type] = chunk_types.get(chunk_type, 0) + 1
        
        return {
            'total_chunks': len(chunks),
            'total_characters': total_chars,
            'average_chunk_size': total_chars // len(chunks),
            'min_chunk_size': min(chunk_sizes),
            'max_chunk_size': max(chunk_sizes),
            'chunk_types': chunk_types
        }
    
    def optimize_chunks(self, chunks: List[Document], 
                       min_size: int = 100, 
                       max_size: int = None) -> List[Document]:
        """
        优化文档块大小
        
        Args:
            chunks (List[Document]): 输入文档块
            min_size (int): 最小块大小
            max_size (int): 最大块大小
            
        Returns:
            List[Document]: 优化后的文档块
        """
        if max_size is None:
            max_size = self.chunk_size * 2
        
        optimized_chunks = []
        
        i = 0
        while i < len(chunks):
            current_chunk = chunks[i]
            current_size = len(current_chunk.page_content)
            
            # 如果块太小，尝试与下一个块合并
            if current_size < min_size and i + 1 < len(chunks):
                next_chunk = chunks[i + 1]
                combined_size = current_size + len(next_chunk.page_content)
                
                if combined_size <= max_size:
                    # 合并块
                    combined_content = current_chunk.page_content + "\n\n" + next_chunk.page_content
                    combined_metadata = {**current_chunk.metadata}
                    combined_metadata['combined_from'] = [
                        current_chunk.metadata.get('chunk_index', i),
                        next_chunk.metadata.get('chunk_index', i + 1)
                    ]
                    
                    combined_chunk = Document(
                        page_content=combined_content,
                        metadata=combined_metadata
                    )
                    
                    optimized_chunks.append(combined_chunk)
                    i += 2  # 跳过下一个块
                    continue
            
            # 如果块太大，重新分割
            elif current_size > max_size:
                sub_chunks = self.recursive_splitter.split_documents([current_chunk])
                optimized_chunks.extend(sub_chunks)
            else:
                optimized_chunks.append(current_chunk)
            
            i += 1
        
        logger.info(f"块优化完成: {len(chunks)} -> {len(optimized_chunks)} 个块")
        return optimized_chunks

# 创建全局实例
smart_text_splitter = SmartTextSplitter() 