"""
基础分块方法演示
演示各种基础的文本分块方法
"""
import re
import string
from typing import List, Dict, Any, Optional, Tuple
from dataclasses import dataclass
import json


@dataclass
class Chunk:
    """分块数据类"""
    text: str
    start_index: int
    end_index: int
    chunk_type: str
    metadata: Dict[str, Any] = None
    
    def __post_init__(self):
        if self.metadata is None:
            self.metadata = {}
    
    def to_dict(self) -> Dict[str, Any]:
        """转换为字典"""
        return {
            'text': self.text,
            'start_index': self.start_index,
            'end_index': self.end_index,
            'chunk_type': self.chunk_type,
            'metadata': self.metadata
        }


class BasicChunker:
    """基础分块器类"""
    
    def __init__(self):
        self.chunks = []
    
    def clear(self):
        """清空分块结果"""
        self.chunks = []
    
    def get_chunks(self) -> List[Chunk]:
        """获取分块结果"""
        return self.chunks
    
    def print_chunks(self, max_chunks: int = 5):
        """打印分块结果"""
        print(f"总共分块数: {len(self.chunks)}")
        print("=" * 50)
        
        for i, chunk in enumerate(self.chunks[:max_chunks]):
            print(f"Chunk {i+1}:")
            print(f"类型: {chunk.chunk_type}")
            print(f"位置: {chunk.start_index}-{chunk.end_index}")
            print(f"长度: {len(chunk.text)}")
            print(f"内容: {chunk.text[:100]}...")
            print("-" * 50)
        
        if len(self.chunks) > max_chunks:
            print(f"... 还有 {len(self.chunks) - max_chunks} 个块未显示")
    
    def save_chunks(self, filepath: str):
        """保存分块结果到文件"""
        chunks_data = [chunk.to_dict() for chunk in self.chunks]
        with open(filepath, 'w', encoding='utf-8') as f:
            json.dump(chunks_data, f, ensure_ascii=False, indent=2)
        print(f"分块结果已保存到: {filepath}")
    
    def load_chunks(self, filepath: str):
        """从文件加载分块结果"""
        with open(filepath, 'r', encoding='utf-8') as f:
            chunks_data = json.load(f)
        
        self.chunks = []
        for chunk_data in chunks_data:
            chunk = Chunk(
                text=chunk_data['text'],
                start_index=chunk_data['start_index'],
                end_index=chunk_data['end_index'],
                chunk_type=chunk_data['chunk_type'],
                metadata=chunk_data.get('metadata', {})
            )
            self.chunks.append(chunk)
        
        print(f"已加载 {len(self.chunks)} 个分块")


class FixedLengthChunker(BasicChunker):
    """固定长度分块器"""
    
    def __init__(self, chunk_size: int = 1000, overlap: int = 200):
        super().__init__()
        
        # 验证参数有效性
        if overlap >= chunk_size:
            raise ValueError(f"Overlap ({overlap}) must be less than chunk_size ({chunk_size})")
        
        self.chunk_size = chunk_size
        self.overlap = overlap
    
    def chunk(self, text: str) -> List[Chunk]:
        """固定长度分块"""
        self.clear()
        
        if not text:
            return self.chunks
        
        chunks = []
        start = 0
        
        while start < len(text):
            end = min(start + self.chunk_size, len(text))
            chunk_text = text[start:end]
            
            chunk = Chunk(
                text=chunk_text,
                start_index=start,
                end_index=end,
                chunk_type='fixed_length',
                metadata={
                    'chunk_size': self.chunk_size,
                    'overlap': self.overlap,
                    'actual_length': len(chunk_text)
                }
            )
            chunks.append(chunk)
            
            # 移动到下一个块，考虑重叠
            # 确保至少前进1个字符以避免无限循环
            step = max(1, self.chunk_size - self.overlap)
            start = start + step
            
            # 如果已经到达文本末尾，退出循环
            if start >= len(text):
                break
        
        self.chunks = chunks
        return self.chunks


class SentenceChunker(BasicChunker):
    """句子级别分块器"""
    
    def __init__(self, max_sentences: int = 5, min_length: int = 50):
        super().__init__()
        self.max_sentences = max_sentences
        self.min_length = min_length
    
    def split_sentences(self, text: str) -> List[str]:
        """分割句子"""
        # 中英文句子分隔符
        sentence_endings = r'[.!?。！？；;]'
        sentences = re.split(sentence_endings, text)
        
        # 清理句子
        cleaned_sentences = []
        for sentence in sentences:
            sentence = sentence.strip()
            if sentence and len(sentence) >= self.min_length:
                cleaned_sentences.append(sentence)
        
        return cleaned_sentences
    
    def chunk(self, text: str) -> List[Chunk]:
        """句子级别分块"""
        self.clear()
        
        if not text:
            return self.chunks
        
        sentences = self.split_sentences(text)
        chunks = []
        current_chunk = []
        current_start = 0
        
        for sentence in sentences:
            # 在原始文本中找到句子的位置
            sentence_start = text.find(sentence, current_start)
            if sentence_start == -1:
                continue
            
            current_chunk.append(sentence)
            
            if len(current_chunk) >= self.max_sentences:
                chunk_text = ' '.join(current_chunk) + '.'
                chunk = Chunk(
                    text=chunk_text,
                    start_index=current_start,
                    end_index=sentence_start + len(sentence),
                    chunk_type='sentence',
                    metadata={
                        'num_sentences': len(current_chunk),
                        'sentence_lengths': [len(s) for s in current_chunk]
                    }
                )
                chunks.append(chunk)
                current_chunk = []
                current_start = sentence_start + len(sentence)
        
        # 处理剩余的句子
        if current_chunk:
            chunk_text = ' '.join(current_chunk) + '.'
            chunk = Chunk(
                text=chunk_text,
                start_index=current_start,
                end_index=len(text),
                chunk_type='sentence',
                metadata={
                    'num_sentences': len(current_chunk),
                    'sentence_lengths': [len(s) for s in current_chunk]
                }
            )
            chunks.append(chunk)
        
        self.chunks = chunks
        return self.chunks


class ParagraphChunker(BasicChunker):
    """段落分块器"""
    
    def __init__(self, min_paragraph_length: int = 100, max_paragraph_length: int = 2000):
        super().__init__()
        self.min_paragraph_length = min_paragraph_length
        self.max_paragraph_length = max_paragraph_length
    
    def split_paragraphs(self, text: str) -> List[str]:
        """分割段落"""
        # 按换行符分割段落
        paragraphs = text.split('\n\n')
        
        # 清理段落
        cleaned_paragraphs = []
        for para in paragraphs:
            para = para.strip()
            if para and len(para) >= self.min_paragraph_length:
                cleaned_paragraphs.append(para)
        
        return cleaned_paragraphs
    
    def chunk(self, text: str) -> List[Chunk]:
        """段落分块"""
        self.clear()
        
        if not text:
            return self.chunks
        
        paragraphs = self.split_paragraphs(text)
        chunks = []
        current_start = 0
        
        for para in paragraphs:
            # 在原始文本中找到段落的位置
            para_start = text.find(para, current_start)
            if para_start == -1:
                continue
            
            # 如果段落过长，进一步分割
            if len(para) > self.max_paragraph_length:
                # 使用固定长度分割长段落
                sub_chunks = self._split_long_paragraph(para, para_start)
                chunks.extend(sub_chunks)
            else:
                chunk = Chunk(
                    text=para,
                    start_index=para_start,
                    end_index=para_start + len(para),
                    chunk_type='paragraph',
                    metadata={
                        'paragraph_length': len(para),
                        'is_long_paragraph': False
                    }
                )
                chunks.append(chunk)
            
            current_start = para_start + len(para)
        
        self.chunks = chunks
        return self.chunks
    
    def _split_long_paragraph(self, paragraph: str, start_pos: int) -> List[Chunk]:
        """分割长段落"""
        chunks = []
        words = paragraph.split()
        current_chunk = []
        current_length = 0
        current_start = start_pos
        
        for word in words:
            if current_length + len(word) + 1 > self.max_paragraph_length:
                if current_chunk:
                    chunk_text = ' '.join(current_chunk)
                    chunk = Chunk(
                        text=chunk_text,
                        start_index=current_start,
                        end_index=current_start + len(chunk_text),
                        chunk_type='paragraph',
                        metadata={
                            'paragraph_length': len(chunk_text),
                            'is_long_paragraph': True,
                            'is_sub_chunk': True
                        }
                    )
                    chunks.append(chunk)
                    current_chunk = []
                    current_start += len(chunk_text) + 1
                    current_length = 0
            
            current_chunk.append(word)
            current_length += len(word) + 1
        
        # 处理剩余部分
        if current_chunk:
            chunk_text = ' '.join(current_chunk)
            chunk = Chunk(
                text=chunk_text,
                start_index=current_start,
                end_index=current_start + len(chunk_text),
                chunk_type='paragraph',
                metadata={
                    'paragraph_length': len(chunk_text),
                    'is_long_paragraph': True,
                    'is_sub_chunk': True
                }
            )
            chunks.append(chunk)
        
        return chunks


class WordChunker(BasicChunker):
    """词级别分块器"""
    
    def __init__(self, chunk_size: int = 200, overlap: int = 50):
        super().__init__()
        self.chunk_size = chunk_size
        self.overlap = overlap
    
    def chunk(self, text: str) -> List[Chunk]:
        """词级别分块"""
        self.clear()
        
        if not text:
            return self.chunks
        
        words = text.split()
        chunks = []
        current_chunk = []
        current_length = 0
        current_start = 0
        
        for i, word in enumerate(words):
            if current_length + len(word) + 1 > self.chunk_size:
                if current_chunk:
                    chunk_text = ' '.join(current_chunk)
                    chunk = Chunk(
                        text=chunk_text,
                        start_index=current_start,
                        end_index=current_start + len(chunk_text),
                        chunk_type='word',
                        metadata={
                            'word_count': len(current_chunk),
                            'character_count': len(chunk_text)
                        }
                    )
                    chunks.append(chunk)
                    
                    # 处理重叠
                    overlap_words = self.overlap // 10  # 假设平均词长为10
                    current_chunk = current_chunk[-overlap_words:] if len(current_chunk) > overlap_words else []
                    current_length = sum(len(w) + 1 for w in current_chunk)
                    current_start = text.find(' '.join(current_chunk), current_start)
                else:
                    # 如果单个词就超过chunk_size，单独成块
                    chunk = Chunk(
                        text=word,
                        start_index=current_start,
                        end_index=current_start + len(word),
                        chunk_type='word',
                        metadata={
                            'word_count': 1,
                            'character_count': len(word),
                            'is_long_word': True
                        }
                    )
                    chunks.append(chunk)
                    current_chunk = []
                    current_length = 0
                    current_start += len(word) + 1
            
            current_chunk.append(word)
            current_length += len(word) + 1
        
        # 处理剩余部分
        if current_chunk:
            chunk_text = ' '.join(current_chunk)
            chunk = Chunk(
                text=chunk_text,
                start_index=current_start,
                end_index=current_start + len(chunk_text),
                chunk_type='word',
                metadata={
                    'word_count': len(current_chunk),
                    'character_count': len(chunk_text)
                }
            )
            chunks.append(chunk)
        
        self.chunks = chunks
        return self.chunks


class LineChunker(BasicChunker):
    """行级别分块器"""
    
    def __init__(self, max_lines: int = 50, min_line_length: int = 10):
        super().__init__()
        self.max_lines = max_lines
        self.min_line_length = min_line_length
    
    def chunk(self, text: str) -> List[Chunk]:
        """行级别分块"""
        self.clear()
        
        if not text:
            return self.chunks
        
        lines = text.split('\n')
        chunks = []
        current_chunk = []
        current_start = 0
        
        for i, line in enumerate(lines):
            line = line.strip()
            if len(line) < self.min_line_length:
                continue
            
            current_chunk.append(line)
            
            if len(current_chunk) >= self.max_lines:
                chunk_text = '\n'.join(current_chunk)
                chunk = Chunk(
                    text=chunk_text,
                    start_index=current_start,
                    end_index=current_start + len(chunk_text),
                    chunk_type='line',
                    metadata={
                        'line_count': len(current_chunk),
                        'line_lengths': [len(l) for l in current_chunk]
                    }
                )
                chunks.append(chunk)
                current_chunk = []
                current_start += len(chunk_text) + 1
        
        # 处理剩余部分
        if current_chunk:
            chunk_text = '\n'.join(current_chunk)
            chunk = Chunk(
                text=chunk_text,
                start_index=current_start,
                end_index=current_start + len(chunk_text),
                chunk_type='line',
                metadata={
                    'line_count': len(current_chunk),
                    'line_lengths': [len(l) for l in current_chunk]
                }
            )
            chunks.append(chunk)
        
        self.chunks = chunks
        return self.chunks


class RecursiveChunker(BasicChunker):
    """递归分块器"""
    
    def __init__(self, chunk_size: int = 1000, separators: List[str] = None):
        super().__init__()
        self.chunk_size = chunk_size
        self.separators = separators or [
            "\n\n",  # 段落分隔符
            "\n",    # 行分隔符
            ".", "!", "?", "。", "！", "？",  # 句子分隔符
            ",", "，", ";", "；",  # 短语分隔符
            " ",     # 词分隔符
            ""       # 字符分隔符
        ]
    
    def chunk(self, text: str) -> List[Chunk]:
        """递归分块"""
        self.clear()
        
        if not text:
            return self.chunks
        
        chunks = self._recursive_chunk(text, 0, self.separators)
        self.chunks = chunks
        return self.chunks
    
    def _recursive_chunk(self, text: str, start_pos: int, separators: List[str]) -> List[Chunk]:
        """递归分块实现"""
        if len(text) <= self.chunk_size:
            return [Chunk(
                text=text,
                start_index=start_pos,
                end_index=start_pos + len(text),
                chunk_type='recursive',
                metadata={
                    'final_separator': separators[0] if separators else 'none',
                    'chunk_level': len(self.separators) - len(separators)
                }
            )]
        
        if not separators:
            # 如果没有更多分隔符，强制分割
            return self._force_chunk(text, start_pos)
        
        separator = separators[0]
        if separator not in text:
            # 如果当前分隔符不在文本中，尝试下一个分隔符
            return self._recursive_chunk(text, start_pos, separators[1:])
        
        # 使用当前分隔符分割
        parts = text.split(separator)
        chunks = []
        current_chunk = []
        current_start = start_pos
        
        for part in parts:
            part = part.strip()
            if not part:
                continue
            
            # 检查添加当前部分是否会超过chunk_size
            if current_chunk and len(' '.join(current_chunk) + separator + part) > self.chunk_size:
                # 如果当前块不为空，先创建块
                chunk_text = separator.join(current_chunk)
                chunk = Chunk(
                    text=chunk_text,
                    start_index=current_start,
                    end_index=current_start + len(chunk_text),
                    chunk_type='recursive',
                    metadata={
                        'separator': separator,
                        'chunk_level': len(self.separators) - len(separators)
                    }
                )
                chunks.append(chunk)
                current_chunk = []
                current_start += len(chunk_text) + len(separator)
            
            current_chunk.append(part)
            
            # 如果单个部分就超过chunk_size，递归处理
            if len(part) > self.chunk_size:
                sub_chunks = self._recursive_chunk(part, current_start, separators[1:])
                chunks.extend(sub_chunks)
                current_chunk = []
                current_start += len(part) + len(separator)
        
        # 处理剩余部分
        if current_chunk:
            chunk_text = separator.join(current_chunk)
            chunk = Chunk(
                text=chunk_text,
                start_index=current_start,
                end_index=current_start + len(chunk_text),
                chunk_type='recursive',
                metadata={
                    'separator': separator,
                    'chunk_level': len(self.separators) - len(separators)
                }
            )
            chunks.append(chunk)
        
        return chunks
    
    def _force_chunk(self, text: str, start_pos: int) -> List[Chunk]:
        """强制分块"""
        chunks = []
        current_start = start_pos
        
        while current_start < len(text):
            end = min(current_start + self.chunk_size, len(text))
            chunk_text = text[current_start:end]
            
            chunk = Chunk(
                text=chunk_text,
                start_index=current_start,
                end_index=end,
                chunk_type='recursive',
                metadata={
                    'force_chunk': True,
                    'chunk_level': len(self.separators)
                }
            )
            chunks.append(chunk)
            current_start = end
        
        return chunks


def main():
    """主函数 - 运行基础分块演示"""
    print("=" * 60)
    print("基础分块方法演示")
    print("=" * 60)
    
    # 示例文本
    sample_text = """
    人工智能（AI）是计算机科学的一个分支，致力于创建能够执行通常需要人类智能的任务的系统。
    这些任务包括学习、推理、问题解决、感知和语言理解。AI系统可以通过各种方式实现，
    包括机器学习、深度学习、自然语言处理等技术。
    
    机器学习是AI的核心技术之一，它使计算机能够从数据中学习而无需明确编程。
    深度学习是机器学习的一个子集，使用神经网络来模拟人脑的工作方式。
    自然语言处理则专注于使计算机能够理解和生成人类语言。
    
    AI的应用领域非常广泛，包括医疗保健、金融、教育、交通等。
    在医疗保健领域，AI可以帮助医生诊断疾病、预测患者风险、个性化治疗方案。
    在金融领域，AI可以用于风险评估、欺诈检测、算法交易等。
    在教育领域，AI可以提供个性化学习体验、自动化评分系统等。
    """
    
    # 1. 固定长度分块
    print("\n" + "=" * 40)
    print("1. 固定长度分块")
    print("=" * 40)
    fixed_chunker = FixedLengthChunker(chunk_size=200, overlap=50)
    chunks = fixed_chunker.chunk(sample_text)
    fixed_chunker.print_chunks(max_chunks=3)
    
    # 2. 句子分块
    print("\n" + "=" * 40)
    print("2. 句子分块")
    print("=" * 40)
    sentence_chunker = SentenceChunker(max_sentences=3)
    chunks = sentence_chunker.chunk(sample_text)
    sentence_chunker.print_chunks(max_chunks=3)
    
    # 3. 段落分块
    print("\n" + "=" * 40)
    print("3. 段落分块")
    print("=" * 40)
    paragraph_chunker = ParagraphChunker()
    chunks = paragraph_chunker.chunk(sample_text)
    paragraph_chunker.print_chunks(max_chunks=3)
    
    # 4. 词分块
    print("\n" + "=" * 40)
    print("4. 词分块")
    print("=" * 40)
    word_chunker = WordChunker(chunk_size=100, overlap=20)
    chunks = word_chunker.chunk(sample_text)
    word_chunker.print_chunks(max_chunks=3)
    
    # 5. 递归分块
    print("\n" + "=" * 40)
    print("5. 递归分块")
    print("=" * 40)
    recursive_chunker = RecursiveChunker(chunk_size=300)
    chunks = recursive_chunker.chunk(sample_text)
    recursive_chunker.print_chunks(max_chunks=3)
    
    # 保存结果
    print("\n" + "=" * 40)
    print("保存分块结果")
    print("=" * 40)
    fixed_chunker.save_chunks("fixed_length_chunks.json")
    sentence_chunker.save_chunks("sentence_chunks.json")
    
    print("\n" + "=" * 60)
    print("基础分块演示完成！")
    print("=" * 60)


if __name__ == "__main__":
    main()