"""
高级文档分块处理器
支持多种分块策略和优化方法
"""
import re
from typing import List, Dict, Any
from enum import Enum


class ChunkingStrategy(Enum):
    """分块策略枚举"""
    FIXED_SIZE = "fixed_size"           # 固定大小分块
    SEMANTIC = "semantic"               # 语义分块
    PARAGRAPH = "paragraph"             # 段落分块
    SENTENCE = "sentence"               # 句子分块


class AdvancedChunker:
    """高级分块处理器"""
    
    def __init__(self, chunk_size: int = 1000, chunk_overlap: int = 200, 
                 strategy: ChunkingStrategy = ChunkingStrategy.FIXED_SIZE):
        self.chunk_size = chunk_size
        self.chunk_overlap = chunk_overlap
        self.strategy = strategy
    
    def chunk_text(self, text: str) -> List[str]:
        """根据策略分块文本"""
        if not text:
            return []
        
        if self.strategy == ChunkingStrategy.FIXED_SIZE:
            return self._fixed_size_chunking(text)
        elif self.strategy == ChunkingStrategy.SEMANTIC:
            return self._semantic_chunking(text)
        elif self.strategy == ChunkingStrategy.PARAGRAPH:
            return self._paragraph_chunking(text)
        elif self.strategy == ChunkingStrategy.SENTENCE:
            return self._sentence_chunking(text)
        else:
            return self._fixed_size_chunking(text)
    
    def _fixed_size_chunking(self, text: str) -> List[str]:
        """固定大小分块（当前使用的方法）"""
        if len(text) <= self.chunk_size:
            return [text] if text else []
        
        chunks = []
        start = 0
        
        while start < len(text):
            end = start + self.chunk_size
            
            # 如果不是最后一块，尝试在句号或换行处分割
            if end < len(text):
                # 寻找最近的句号
                period_pos = text.rfind('。', start, end)
                newline_pos = text.rfind('\n', start, end)
                
                if period_pos > start and period_pos > newline_pos:
                    end = period_pos + 1
                elif newline_pos > start:
                    end = newline_pos + 1
            
            chunk = text[start:end].strip()
            if chunk:
                chunks.append(chunk)
            
            # 计算下一块的起始位置，考虑重叠
            start = end - self.chunk_overlap
            if start >= len(text):
                break
        
        return chunks
    
    def _semantic_chunking(self, text: str) -> List[str]:
        """语义分块 - 基于段落和语义边界"""
        # 按段落分割
        paragraphs = re.split(r'\n\s*\n', text)
        chunks = []
        current_chunk = ""
        
        for paragraph in paragraphs:
            paragraph = paragraph.strip()
            if not paragraph:
                continue
            
            # 如果当前块加上新段落超过大小限制
            if len(current_chunk) + len(paragraph) > self.chunk_size and current_chunk:
                chunks.append(current_chunk.strip())
                # 保留重叠部分
                overlap_start = max(0, len(current_chunk) - self.chunk_overlap)
                current_chunk = current_chunk[overlap_start:] + "\n\n" + paragraph
            else:
                current_chunk += "\n\n" + paragraph if current_chunk else paragraph
        
        if current_chunk.strip():
            chunks.append(current_chunk.strip())
        
        return chunks
    
    def _paragraph_chunking(self, text: str) -> List[str]:
        """段落分块 - 按段落分割，合并小段落"""
        paragraphs = re.split(r'\n\s*\n', text)
        chunks = []
        current_chunk = ""
        
        for paragraph in paragraphs:
            paragraph = paragraph.strip()
            if not paragraph:
                continue
            
            # 如果段落本身超过大小限制，需要进一步分割
            if len(paragraph) > self.chunk_size:
                if current_chunk:
                    chunks.append(current_chunk.strip())
                    current_chunk = ""
                
                # 对长段落进行句子分割
                sentences = re.split(r'[。！？\n]', paragraph)
                temp_chunk = ""
                for sentence in sentences:
                    sentence = sentence.strip()
                    if not sentence:
                        continue
                    
                    if len(temp_chunk) + len(sentence) > self.chunk_size and temp_chunk:
                        chunks.append(temp_chunk.strip())
                        temp_chunk = sentence
                    else:
                        temp_chunk += sentence + "。"
                
                if temp_chunk.strip():
                    current_chunk = temp_chunk.strip()
            else:
                # 合并段落
                if len(current_chunk) + len(paragraph) > self.chunk_size and current_chunk:
                    chunks.append(current_chunk.strip())
                    current_chunk = paragraph
                else:
                    current_chunk += "\n\n" + paragraph if current_chunk else paragraph
        
        if current_chunk.strip():
            chunks.append(current_chunk.strip())
        
        return chunks
    
    def _sentence_chunking(self, text: str) -> List[str]:
        """句子分块 - 按句子分割，合并句子"""
        # 按句子分割
        sentences = re.split(r'[。！？\n]', text)
        chunks = []
        current_chunk = ""
        
        for sentence in sentences:
            sentence = sentence.strip()
            if not sentence:
                continue
            
            sentence += "。"  # 添加句号
            
            # 如果当前块加上新句子超过大小限制
            if len(current_chunk) + len(sentence) > self.chunk_size and current_chunk:
                chunks.append(current_chunk.strip())
                current_chunk = sentence
            else:
                current_chunk += sentence
        
        if current_chunk.strip():
            chunks.append(current_chunk.strip())
        
        return chunks
    
    def get_chunk_info(self, text: str) -> Dict[str, Any]:
        """获取分块信息统计"""
        chunks = self.chunk_text(text)
        
        if not chunks:
            return {
                'chunk_count': 0,
                'avg_chunk_size': 0,
                'min_chunk_size': 0,
                'max_chunk_size': 0,
                'strategy': self.strategy.value
            }
        
        chunk_sizes = [len(chunk) for chunk in chunks]
        
        return {
            'chunk_count': len(chunks),
            'avg_chunk_size': sum(chunk_sizes) / len(chunk_sizes),
            'min_chunk_size': min(chunk_sizes),
            'max_chunk_size': max(chunk_sizes),
            'strategy': self.strategy.value,
            'chunks': chunks
        }


# 使用示例
if __name__ == "__main__":
    # 测试文本
    test_text = """
    这是第一段内容。这里包含了一些重要的信息。
    
    这是第二段内容。它描述了另一个主题。
    
    这是第三段内容。它提供了更多的细节和说明。
    """
    
    # 测试不同分块策略
    strategies = [
        ChunkingStrategy.FIXED_SIZE,
        ChunkingStrategy.SEMANTIC,
        ChunkingStrategy.PARAGRAPH,
        ChunkingStrategy.SENTENCE
    ]
    
    for strategy in strategies:
        chunker = AdvancedChunker(chunk_size=100, chunk_overlap=20, strategy=strategy)
        info = chunker.get_chunk_info(test_text)
        
        print(f"\n{strategy.value} 策略:")
        print(f"  块数: {info['chunk_count']}")
        print(f"  平均大小: {info['avg_chunk_size']:.1f}")
        print(f"  大小范围: {info['min_chunk_size']} - {info['max_chunk_size']}") 