"""
自适应分块方法演示
演示智能自适应的文本分块方法，根据文本内容特征动态调整分块策略
"""
import re
import numpy as np
from typing import List, Dict, Any, Optional, Tuple, Callable
from dataclasses import dataclass
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
import json

# 使用通用导入工具
try:
    from .import_utils import get_basic_chunker_classes
    BasicChunker, Chunk = get_basic_chunker_classes()
except ImportError:
    # 如果导入工具不可用，使用备用方法
    try:
        import sys
        import os
        sys.path.append(os.path.dirname(os.path.abspath(__file__)))
        from chunk_demo01_basic_chunking import BasicChunker, Chunk
    except ImportError:
        # 最后的直接导入尝试
        from basic_chunking import BasicChunker, Chunk


@dataclass
class TextFeatures:
    """文本特征数据类"""
    text_density: float  # 文本密度
    sentence_complexity: float  # 句子复杂度
    topic_coherence: float  # 主题连贯性
    structural_markers: List[str]  # 结构标记
    readability_score: float  # 可读性分数
    semantic_similarity: float  # 语义相似度


class AdaptiveChunker(BasicChunker):
    """自适应分块器基类"""
    
    def __init__(self, base_chunk_size: int = 1000,
                 min_chunk_size: int = 100,
                 max_chunk_size: int = 3000,
                 adaptiveness_level: str = 'medium'):
        super().__init__()
        self.base_chunk_size = base_chunk_size
        self.min_chunk_size = min_chunk_size
        self.max_chunk_size = max_chunk_size
        self.adaptiveness_level = adaptiveness_level
        self.text_features = None
    
    def analyze_text_features(self, text: str) -> TextFeatures:
        """分析文本特征"""
        raise NotImplementedError("子类必须实现此方法")
    
    def adaptive_chunk(self, text: str, features: TextFeatures) -> List[Chunk]:
        """根据文本特征自适应分块"""
        raise NotImplementedError("子类必须实现此方法")
    
    def chunk(self, text: str) -> List[Chunk]:
        """自适应分块"""
        self.clear()
        
        if not text:
            return self.chunks
        
        # 分析文本特征
        features = self.analyze_text_features(text)
        self.text_features = features
        
        # 根据特征进行自适应分块
        chunks = self.adaptive_chunk(text, features)
        
        self.chunks = chunks
        return chunks


class DensityBasedChunker(AdaptiveChunker):
    """基于文本密度的自适应分块器"""
    
    def __init__(self, base_chunk_size: int = 1000,
                 density_threshold_low: float = 0.3,
                 density_threshold_high: float = 0.7):
        super().__init__(base_chunk_size)
        self.density_threshold_low = density_threshold_low
        self.density_threshold_high = density_threshold_high
    
    def analyze_text_features(self, text: str) -> TextFeatures:
        """分析文本密度特征"""
        sentences = re.split(r'[.!?。！？]', text)
        sentences = [s.strip() for s in sentences if s.strip()]
        
        # 计算文本密度（每句子的平均信息量）
        if sentences:
            avg_sentence_length = np.mean([len(s) for s in sentences])
            word_count = sum(len(s.split()) for s in sentences)
            density = word_count / len(text) if text else 0
        else:
            density = 0
        
        # 计算句子复杂度
        complexity = self._calculate_complexity(text)
        
        # 检测结构标记
        structural_markers = self._detect_structural_markers(text)
        
        return TextFeatures(
            text_density=density,
            sentence_complexity=complexity,
            topic_coherence=0.0,
            structural_markers=structural_markers,
            readability_score=self._calculate_readability(text),
            semantic_similarity=0.0
        )
    
    def _calculate_complexity(self, text: str) -> float:
        """计算句子复杂度"""
        sentences = re.split(r'[.!?。！？]', text)
        sentences = [s.strip() for s in sentences if s.strip()]
        
        if not sentences:
            return 0.0
        
        # 基于句子长度、词汇多样性等计算复杂度
        avg_length = np.mean([len(s) for s in sentences])
        unique_words = len(set(word.lower() for s in sentences for word in s.split()))
        total_words = sum(len(s.split()) for s in sentences)
        
        vocabulary_diversity = unique_words / total_words if total_words > 0 else 0
        complexity = (avg_length / 100) * vocabulary_diversity
        
        return min(complexity, 1.0)
    
    def _detect_structural_markers(self, text: str) -> List[str]:
        """检测结构标记"""
        markers = []
        
        # 检测标题
        if re.search(r'^#{1,6}\s+', text, re.MULTILINE):
            markers.append('headers')
        
        # 检测列表
        if re.search(r'^\s*[-*+]\s+', text, re.MULTILINE):
            markers.append('lists')
        
        # 检测代码块
        if re.search(r'```', text):
            markers.append('code_blocks')
        
        # 检测表格
        if re.search(r'\|.*\|', text):
            markers.append('tables')
        
        return markers
    
    def _calculate_readability(self, text: str) -> float:
        """计算可读性分数"""
        sentences = re.split(r'[.!?。！？]', text)
        sentences = [s.strip() for s in sentences if s.strip()]
        
        if not sentences:
            return 0.0
        
        # 简单的可读性计算
        avg_sentence_length = np.mean([len(s.split()) for s in sentences])
        avg_word_length = np.mean([len(word) for s in sentences for word in s.split()])
        
        # 分数越低越容易阅读
        readability = 1.0 - min((avg_sentence_length / 20 + avg_word_length / 8) / 2, 1.0)
        return readability
    
    def adaptive_chunk(self, text: str, features: TextFeatures) -> List[Chunk]:
        """基于密度自适应分块"""
        chunks = []
        
        # 根据文本密度调整分块大小
        if features.text_density < self.density_threshold_low:
            # 低密度文本，使用较大的块
            chunk_size = min(self.base_chunk_size * 1.5, self.max_chunk_size)
        elif features.text_density > self.density_threshold_high:
            # 高密度文本，使用较小的块
            chunk_size = max(self.base_chunk_size * 0.7, self.min_chunk_size)
        else:
            # 中等密度，使用基础大小
            chunk_size = self.base_chunk_size
        
        # 根据结构标记调整策略
        if 'headers' in features.structural_markers:
            # 有标题，按段落分块
            chunks = self._chunk_by_paragraphs(text, chunk_size)
        elif 'code_blocks' in features.structural_markers:
            # 有代码块，特殊处理
            chunks = self._chunk_with_code_blocks(text, chunk_size)
        else:
            # 默认固定长度分块
            chunks = self._chunk_by_length(text, chunk_size)
        
        return chunks
    
    def _chunk_by_paragraphs(self, text: str, chunk_size: int) -> List[Chunk]:
        """按段落分块"""
        paragraphs = text.split('\n\n')
        chunks = []
        current_chunk = ""
        current_start = 0
        
        for para in paragraphs:
            para = para.strip()
            if not para:
                continue
            
            if len(current_chunk + para) > chunk_size:
                if current_chunk:
                    chunk = Chunk(
                        text=current_chunk,
                        start_index=current_start,
                        end_index=current_start + len(current_chunk),
                        chunk_type='density_adaptive_paragraph',
                        metadata={
                            'paragraph_count': current_chunk.count('\n\n') + 1,
                            'adaptation_reason': 'paragraph_based'
                        }
                    )
                    chunks.append(chunk)
                    current_start += len(current_chunk) + 2
                    current_chunk = ""
            
            current_chunk += para + "\n\n"
        
        if current_chunk:
            chunk = Chunk(
                text=current_chunk.strip(),
                start_index=current_start,
                end_index=current_start + len(current_chunk.strip()),
                chunk_type='density_adaptive_paragraph',
                metadata={
                    'paragraph_count': current_chunk.count('\n\n') + 1,
                    'adaptation_reason': 'paragraph_based'
                }
            )
            chunks.append(chunk)
        
        return chunks
    
    def _chunk_with_code_blocks(self, text: str, chunk_size: int) -> List[Chunk]:
        """处理包含代码块的分块"""
        # 简单实现：先按代码块分割
        code_pattern = r'```.*?```'
        parts = re.split(code_pattern, text, flags=re.DOTALL)
        code_blocks = re.findall(code_pattern, text, flags=re.DOTALL)
        
        chunks = []
        current_pos = 0
        
        for i, part in enumerate(parts):
            if part.strip():
                # 处理普通文本
                if len(part) > chunk_size:
                    sub_chunks = self._chunk_by_length(part, chunk_size)
                    for sub_chunk in sub_chunks:
                        chunk = Chunk(
                            text=sub_chunk.text,
                            start_index=current_pos,
                            end_index=current_pos + len(sub_chunk.text),
                            chunk_type='density_adaptive_text',
                            metadata={
                                'adaptation_reason': 'with_code_blocks',
                                'is_text_part': True
                            }
                        )
                        chunks.append(chunk)
                        current_pos += len(sub_chunk.text)
                else:
                    chunk = Chunk(
                        text=part,
                        start_index=current_pos,
                        end_index=current_pos + len(part),
                        chunk_type='density_adaptive_text',
                        metadata={
                            'adaptation_reason': 'with_code_blocks',
                            'is_text_part': True
                        }
                    )
                    chunks.append(chunk)
                    current_pos += len(part)
            
            # 添加代码块
            if i < len(code_blocks):
                code_block = code_blocks[i]
                chunk = Chunk(
                    text=code_block,
                    start_index=current_pos,
                    end_index=current_pos + len(code_block),
                    chunk_type='density_adaptive_code',
                    metadata={
                        'adaptation_reason': 'code_block',
                        'is_code_part': True
                    }
                )
                chunks.append(chunk)
                current_pos += len(code_block)
        
        return chunks
    
    def _chunk_by_length(self, text: str, chunk_size: int) -> List[Chunk]:
        """固定长度分块"""
        chunks = []
        start = 0
        
        while start < len(text):
            end = min(start + chunk_size, len(text))
            chunk_text = text[start:end]
            
            chunk = Chunk(
                text=chunk_text,
                start_index=start,
                end_index=end,
                chunk_type='density_adaptive_length',
                metadata={
                    'adaptation_reason': 'fixed_length',
                    'chunk_size': chunk_size
                }
            )
            chunks.append(chunk)
            start = end
        
        return chunks


class SemanticFlowChunker(AdaptiveChunker):
    """基于语义流的自适应分块器"""
    
    def __init__(self, base_chunk_size: int = 1000,
                 semantic_threshold: float = 0.6,
                 coherence_window: int = 3):
        super().__init__(base_chunk_size)
        self.semantic_threshold = semantic_threshold
        self.coherence_window = coherence_window
        self.vectorizer = TfidfVectorizer(max_features=1000)
    
    def analyze_text_features(self, text: str) -> TextFeatures:
        """分析语义流特征"""
        sentences = re.split(r'[.!?。！？]', text)
        sentences = [s.strip() for s in sentences if s.strip()]
        
        if len(sentences) < 2:
            return TextFeatures(0.0, 0.0, 0.0, [], 0.0, 0.0)
        
        # 计算句子嵌入
        try:
            tfidf_matrix = self.vectorizer.fit_transform(sentences)
            similarity_matrix = cosine_similarity(tfidf_matrix)
            
            # 计算语义连贯性
            coherence_scores = []
            for i in range(len(sentences) - 1):
                coherence_scores.append(similarity_matrix[i][i + 1])
            
            avg_coherence = np.mean(coherence_scores)
            
            # 计算语义相似度
            semantic_similarity = np.mean(similarity_matrix)
            
        except Exception:
            avg_coherence = 0.0
            semantic_similarity = 0.0
        
        # 计算文本密度
        word_count = sum(len(s.split()) for s in sentences)
        density = word_count / len(text) if text else 0
        
        return TextFeatures(
            text_density=density,
            sentence_complexity=self._calculate_complexity(text),
            topic_coherence=avg_coherence,
            structural_markers=self._detect_structural_markers(text),
            readability_score=self._calculate_readability(text),
            semantic_similarity=semantic_similarity
        )
    
    def _calculate_complexity(self, text: str) -> float:
        """计算复杂度"""
        sentences = re.split(r'[.!?。！？]', text)
        sentences = [s.strip() for s in sentences if s.strip()]
        
        if not sentences:
            return 0.0
        
        avg_length = np.mean([len(s) for s in sentences])
        return min(avg_length / 200, 1.0)
    
    def _detect_structural_markers(self, text: str) -> List[str]:
        """检测结构标记"""
        markers = []
        
        if re.search(r'^#{1,6}\s+', text, re.MULTILINE):
            markers.append('headers')
        
        if re.search(r'^\s*[-*+]\s+', text, re.MULTILINE):
            markers.append('lists')
        
        return markers
    
    def _calculate_readability(self, text: str) -> float:
        """计算可读性"""
        sentences = re.split(r'[.!?。！？]', text)
        sentences = [s.strip() for s in sentences if s.strip()]
        
        if not sentences:
            return 0.0
        
        avg_length = np.mean([len(s.split()) for s in sentences])
        return 1.0 - min(avg_length / 25, 1.0)
    
    def adaptive_chunk(self, text: str, features: TextFeatures) -> List[Chunk]:
        """基于语义流自适应分块"""
        sentences = re.split(r'[.!?。！？]', text)
        sentences = [s.strip() for s in sentences if s.strip()]
        
        if len(sentences) < 2:
            return [Chunk(text, 0, len(text), 'semantic_flow')]
        
        chunks = []
        current_chunk = []
        current_start = 0
        
        # 计算句子相似度
        try:
            tfidf_matrix = self.vectorizer.fit_transform(sentences)
            similarity_matrix = cosine_similarity(tfidf_matrix)
        except Exception:
            similarity_matrix = np.ones((len(sentences), len(sentences)))
        
        for i, sentence in enumerate(sentences):
            current_chunk.append(sentence)
            
            # 检查是否应该分块
            if self._should_split(current_chunk, i, sentences, similarity_matrix, features):
                chunk_text = '. '.join(current_chunk) + '.'
                
                # 在原始文本中找到位置
                chunk_start = text.find(chunk_text, current_start)
                if chunk_start != -1:
                    chunk = Chunk(
                        text=chunk_text,
                        start_index=chunk_start,
                        end_index=chunk_start + len(chunk_text),
                        chunk_type='semantic_flow',
                        metadata={
                            'sentence_count': len(current_chunk),
                            'coherence_score': self._calculate_chunk_coherence(current_chunk, similarity_matrix),
                            'split_reason': self._get_split_reason(current_chunk, i, sentences, similarity_matrix, features)
                        }
                    )
                    chunks.append(chunk)
                    current_start = chunk_start + len(chunk_text)
                
                current_chunk = []
        
        # 处理剩余句子
        if current_chunk:
            chunk_text = '. '.join(current_chunk) + '.'
            chunk_start = text.find(chunk_text, current_start)
            if chunk_start != -1:
                chunk = Chunk(
                    text=chunk_text,
                    start_index=chunk_start,
                    end_index=chunk_start + len(chunk_text),
                    chunk_type='semantic_flow',
                    metadata={
                        'sentence_count': len(current_chunk),
                        'coherence_score': self._calculate_chunk_coherence(current_chunk, similarity_matrix),
                        'split_reason': 'final_chunk'
                    }
                )
                chunks.append(chunk)
        
        return chunks
    
    def _should_split(self, current_chunk: List[str], current_index: int, 
                     all_sentences: List[str], similarity_matrix: np.ndarray,
                     features: TextFeatures) -> bool:
        """判断是否应该分块"""
        # 检查长度限制
        chunk_text = '. '.join(current_chunk) + '.'
        if len(chunk_text) > self.max_chunk_size:
            return True
        
        if len(current_chunk) < 2:
            return False
        
        # 检查语义连贯性
        if len(current_chunk) >= 2:
            coherence = self._calculate_chunk_coherence(current_chunk, similarity_matrix)
            if coherence < self.semantic_threshold:
                return True
        
        # 检查主题变化
        if current_index < len(all_sentences) - 1:
            current_indices = [all_sentences.index(s) for s in current_chunk if s in all_sentences]
            if current_indices:
                last_idx = current_indices[-1]
                if last_idx < len(all_sentences) - 1:
                    next_similarity = similarity_matrix[last_idx][last_idx + 1]
                    if next_similarity < self.semantic_threshold:
                        return True
        
        return False
    
    def _calculate_chunk_coherence(self, chunk_sentences: List[str], 
                                 similarity_matrix: np.ndarray) -> float:
        """计算块的连贯性"""
        if len(chunk_sentences) < 2:
            return 1.0
        
        # 获取句子在原始列表中的索引
        sentence_indices = []
        for sentence in chunk_sentences:
            try:
                # 简化处理，假设句子是唯一的
                idx = chunk_sentences.index(sentence)
                sentence_indices.append(idx)
            except ValueError:
                continue
        
        if len(sentence_indices) < 2:
            return 1.0
        
        # 计算相邻句子的相似度
        similarities = []
        for i in range(len(sentence_indices) - 1):
            idx1, idx2 = sentence_indices[i], sentence_indices[i + 1]
            if idx1 < len(similarity_matrix) and idx2 < len(similarity_matrix):
                similarities.append(similarity_matrix[idx1][idx2])
        
        return np.mean(similarities) if similarities else 1.0
    
    def _get_split_reason(self, current_chunk: List[str], current_index: int,
                         all_sentences: List[str], similarity_matrix: np.ndarray,
                         features: TextFeatures) -> str:
        """获取分块原因"""
        chunk_text = '. '.join(current_chunk) + '.'
        
        if len(chunk_text) > self.max_chunk_size:
            return "max_length_exceeded"
        
        coherence = self._calculate_chunk_coherence(current_chunk, similarity_matrix)
        if coherence < self.semantic_threshold:
            return "low_coherence"
        
        if current_index < len(all_sentences) - 1:
            current_indices = [all_sentences.index(s) for s in current_chunk if s in all_sentences]
            if current_indices:
                last_idx = current_indices[-1]
                if last_idx < len(all_sentences) - 1:
                    next_similarity = similarity_matrix[last_idx][last_idx + 1]
                    if next_similarity < self.semantic_threshold:
                        return "topic_change"
        
        return "natural_break"


class HybridAdaptiveChunker(AdaptiveChunker):
    """混合自适应分块器"""
    
    def __init__(self, base_chunk_size: int = 1000,
                 strategy_weights: Dict[str, float] = None):
        super().__init__(base_chunk_size)
        self.strategy_weights = strategy_weights or {
            'density': 0.3,
            'semantic': 0.4,
            'structure': 0.3
        }
        
        # 初始化子分块器
        self.density_chunker = DensityBasedChunker(base_chunk_size)
        self.semantic_chunker = SemanticFlowChunker(base_chunk_size)
    
    def analyze_text_features(self, text: str) -> TextFeatures:
        """分析文本特征"""
        # 结合多个特征分析器
        density_features = self.density_chunker.analyze_text_features(text)
        semantic_features = self.semantic_chunker.analyze_text_features(text)
        
        # 综合特征
        return TextFeatures(
            text_density=density_features.text_density,
            sentence_complexity=density_features.sentence_complexity,
            topic_coherence=semantic_features.topic_coherence,
            structural_markers=density_features.structural_markers,
            readability_score=density_features.readability_score,
            semantic_similarity=semantic_features.semantic_similarity
        )
    
    def adaptive_chunk(self, text: str, features: TextFeatures) -> List[Chunk]:
        """混合自适应分块"""
        # 根据特征权重选择最佳策略
        strategy = self._select_strategy(features)
        
        if strategy == 'density':
            chunks = self.density_chunker.adaptive_chunk(text, features)
        elif strategy == 'semantic':
            chunks = self.semantic_chunker.adaptive_chunk(text, features)
        elif strategy == 'structure':
            chunks = self._chunk_by_structure(text, features)
        else:
            chunks = self._chunk_hybrid(text, features)
        
        # 标记混合策略
        for chunk in chunks:
            chunk.metadata['hybrid_strategy'] = strategy
            chunk.metadata['strategy_weights'] = self.strategy_weights
        
        return chunks
    
    def _select_strategy(self, features: TextFeatures) -> str:
        """选择最佳策略"""
        scores = {}
        
        # 计算各策略得分
        scores['density'] = (features.text_density + features.sentence_complexity) / 2
        scores['semantic'] = (features.topic_coherence + features.semantic_similarity) / 2
        scores['structure'] = len(features.structural_markers) / 4  # 最多4种结构标记
        
        # 应用权重
        weighted_scores = {}
        for strategy, score in scores.items():
            weighted_scores[strategy] = score * self.strategy_weights.get(strategy, 0.3)
        
        # 选择得分最高的策略
        return max(weighted_scores, key=weighted_scores.get)
    
    def _chunk_by_structure(self, text: str, features: TextFeatures) -> List[Chunk]:
        """按结构分块"""
        if 'headers' in features.structural_markers:
            # 按标题分块
            return self._chunk_by_headers(text)
        elif 'lists' in features.structural_markers:
            # 按列表分块
            return self._chunk_by_lists(text)
        else:
            # 默认按段落分块
            return self._chunk_by_paragraphs(text)
    
    def _chunk_by_headers(self, text: str) -> List[Chunk]:
        """按标题分块"""
        header_pattern = r'(^#{1,6}\s+.+$)'
        sections = re.split(header_pattern, text, flags=re.MULTILINE)
        
        chunks = []
        current_pos = 0
        
        for i in range(1, len(sections), 2):
            header = sections[i]
            content = sections[i + 1] if i + 1 < len(sections) else ""
            
            section_text = header + "\n" + content
            chunk = Chunk(
                text=section_text,
                start_index=current_pos,
                end_index=current_pos + len(section_text),
                chunk_type='hybrid_structure',
                metadata={
                    'structure_type': 'header',
                    'header_level': len(header) - len(header.lstrip('#'))
                }
            )
            chunks.append(chunk)
            current_pos += len(section_text)
        
        return chunks
    
    def _chunk_by_lists(self, text: str) -> List[Chunk]:
        """按列表分块"""
        list_pattern = r'(^\s*[-*+]\s+.+$)'
        items = re.findall(list_pattern, text, flags=re.MULTILINE)
        
        chunks = []
        current_chunk = []
        current_start = 0
        
        for item in items:
            current_chunk.append(item)
            
            if len('\n'.join(current_chunk)) > self.base_chunk_size:
                chunk_text = '\n'.join(current_chunk)
                chunk = Chunk(
                    text=chunk_text,
                    start_index=current_start,
                    end_index=current_start + len(chunk_text),
                    chunk_type='hybrid_structure',
                    metadata={'structure_type': 'list'}
                )
                chunks.append(chunk)
                current_start += len(chunk_text)
                current_chunk = []
        
        if current_chunk:
            chunk_text = '\n'.join(current_chunk)
            chunk = Chunk(
                text=chunk_text,
                start_index=current_start,
                end_index=current_start + len(chunk_text),
                chunk_type='hybrid_structure',
                metadata={'structure_type': 'list'}
            )
            chunks.append(chunk)
        
        return chunks
    
    def _chunk_by_paragraphs(self, text: str) -> List[Chunk]:
        """按段落分块"""
        paragraphs = text.split('\n\n')
        chunks = []
        current_chunk = []
        current_start = 0
        
        for para in paragraphs:
            para = para.strip()
            if not para:
                continue
            
            current_chunk.append(para)
            
            if len('\n\n'.join(current_chunk)) > self.base_chunk_size:
                chunk_text = '\n\n'.join(current_chunk)
                chunk = Chunk(
                    text=chunk_text,
                    start_index=current_start,
                    end_index=current_start + len(chunk_text),
                    chunk_type='hybrid_structure',
                    metadata={'structure_type': 'paragraph'}
                )
                chunks.append(chunk)
                current_start += len(chunk_text)
                current_chunk = []
        
        if current_chunk:
            chunk_text = '\n\n'.join(current_chunk)
            chunk = Chunk(
                text=chunk_text,
                start_index=current_start,
                end_index=current_start + len(chunk_text),
                chunk_type='hybrid_structure',
                metadata={'structure_type': 'paragraph'}
            )
            chunks.append(chunk)
        
        return chunks
    
    def _chunk_hybrid(self, text: str, features: TextFeatures) -> List[Chunk]:
        """混合分块策略"""
        # 结合多种策略
        density_chunks = self.density_chunker.adaptive_chunk(text, features)
        semantic_chunks = self.semantic_chunker.adaptive_chunk(text, features)
        
        # 选择更好的分块结果
        if self._evaluate_chunks(density_chunks) > self._evaluate_chunks(semantic_chunks):
            return density_chunks
        else:
            return semantic_chunks
    
    def _evaluate_chunks(self, chunks: List[Chunk]) -> float:
        """评估分块质量"""
        if not chunks:
            return 0.0
        
        # 计算平均块长度
        avg_length = np.mean([len(chunk.text) for chunk in chunks])
        length_score = 1.0 - abs(avg_length - self.base_chunk_size) / self.base_chunk_size
        
        # 计算长度变异性
        lengths = [len(chunk.text) for chunk in chunks]
        variability_score = 1.0 - (np.std(lengths) / np.mean(lengths) if np.mean(lengths) > 0 else 0)
        
        return (length_score + variability_score) / 2


def main():
    """主函数 - 运行自适应分块演示"""
    print("=" * 60)
    print("自适应分块方法演示")
    print("=" * 60)
    
    # 示例文本
    sample_text = """
# 人工智能技术概述

人工智能（AI）是计算机科学的一个分支，致力于创建能够执行通常需要人类智能的任务的系统。
这些任务包括学习、推理、问题解决、感知和语言理解。AI系统可以通过各种方式实现，
包括机器学习、深度学习、自然语言处理等技术。

## 机器学习基础

机器学习是AI的核心技术之一，它使计算机能够从数据中学习而无需明确编程。
深度学习是机器学习的一个子集，使用神经网络来模拟人脑的工作方式。
自然语言处理则专注于使计算机能够理解和生成人类语言。

### 监督学习

监督学习使用标记数据进行训练。常见的算法包括线性回归、决策树、支持向量机。
这些算法在分类、回归、排名等任务中表现出色。

### 无监督学习

无监督学习处理未标记的数据，主要用于聚类分析、降维、异常检测等。
K-means、层次聚类、主成分分析是常用的无监督学习方法。

## 应用领域

AI的应用领域非常广泛，包括医疗保健、金融、教育、交通等。

在医疗保健领域，AI可以帮助医生诊断疾病、预测患者风险、个性化治疗方案。
在金融领域，AI可以用于风险评估、欺诈检测、算法交易等。
在教育领域，AI可以提供个性化学习体验、自动化评分系统等。

## 深度学习模型

深度学习模型，如卷积神经网络（CNN）和循环神经网络（RNN），在图像识别和语音识别方面表现出色。
这些模型通过大量的数据训练，能够自动学习特征和模式。

转换器模型，如BERT和GPT，在自然语言处理任务中取得了突破性进展。
这些模型基于注意力机制，能够处理长距离依赖关系。

## 未来发展趋势

人工智能的未来发展趋势包括：

1. **多模态学习**：结合视觉、语言、声音等多种模态
2. **自监督学习**：减少对标记数据的依赖
3. **可解释AI**：提高模型的透明度和可解释性
4. **边缘计算**：在设备端进行AI推理
5. **AI伦理**：确保AI系统的公平性和安全性

这些趋势将推动AI技术在各个领域的深入应用。
    """
    
    # 1. 基于密度的自适应分块
    print("\n" + "=" * 40)
    print("1. 基于文本密度的自适应分块")
    print("=" * 40)
    density_chunker = DensityBasedChunker(base_chunk_size=400)
    chunks = density_chunker.chunk(sample_text)
    density_chunker.print_chunks(max_chunks=3)
    
    # 打印文本特征
    print("\n文本特征分析:")
    print(f"文本密度: {density_chunker.text_features.text_density:.3f}")
    print(f"句子复杂度: {density_chunker.text_features.sentence_complexity:.3f}")
    print(f"结构标记: {density_chunker.text_features.structural_markers}")
    print(f"可读性分数: {density_chunker.text_features.readability_score:.3f}")
    
    # 2. 基于语义流的自适应分块
    print("\n" + "=" * 40)
    print("2. 基于语义流的自适应分块")
    print("=" * 40)
    semantic_chunker = SemanticFlowChunker(base_chunk_size=400)
    chunks = semantic_chunker.chunk(sample_text)
    semantic_chunker.print_chunks(max_chunks=3)
    
    # 打印语义特征
    print("\n语义特征分析:")
    print(f"主题连贯性: {semantic_chunker.text_features.topic_coherence:.3f}")
    print(f"语义相似度: {semantic_chunker.text_features.semantic_similarity:.3f}")
    
    # 3. 混合自适应分块
    print("\n" + "=" * 40)
    print("3. 混合自适应分块")
    print("=" * 40)
    hybrid_chunker = HybridAdaptiveChunker(base_chunk_size=400)
    chunks = hybrid_chunker.chunk(sample_text)
    hybrid_chunker.print_chunks(max_chunks=3)
    
    # 打印选择的策略
    print("\n分块策略分析:")
    for chunk in chunks[:3]:
        print(f"块 {chunks.index(chunk) + 1}: {chunk.metadata.get('hybrid_strategy', 'unknown')}")
        print(f"  分块原因: {chunk.metadata.get('split_reason', 'N/A')}")
    
    # 保存结果
    print("\n" + "=" * 40)
    print("保存分块结果")
    print("=" * 40)
    density_chunker.save_chunks("density_adaptive_chunks.json")
    semantic_chunker.save_chunks("semantic_flow_chunks.json")
    hybrid_chunker.save_chunks("hybrid_adaptive_chunks.json")
    
    print("\n" + "=" * 60)
    print("自适应分块演示完成！")
    print("=" * 60)


# =============================================================================
# 高级自适应分块方法
# =============================================================================

class ReinforcementLearningChunker(AdaptiveChunker):
    """强化学习自适应分块器"""
    
    def __init__(self, base_chunk_size: int = 1000, learning_rate: float = 0.01,
                 discount_factor: float = 0.9, exploration_rate: float = 0.1):
        super().__init__(base_chunk_size)
        self.learning_rate = learning_rate
        self.discount_factor = discount_factor
        self.exploration_rate = exploration_rate
        
        # 动作空间：不同的分块策略
        self.actions = [
            'fixed_length',
            'sentence_based', 
            'paragraph_based',
            'semantic_based',
            'density_based'
        ]
        
        # Q表：状态-动作值
        self.q_table = {}
        
        # 状态特征
        self.state_features = [
            'text_length',
            'sentence_count',
            'paragraph_count',
            'avg_sentence_length',
            'text_density',
            'semantic_coherence'
        ]
        
        # 奖励历史
        self.reward_history = []
        
        # 尝试导入强化学习库
        try:
            import gym
            self.gym_available = True
        except ImportError:
            self.gym_available = False
    
    def get_state(self, text: str, features: TextFeatures) -> Tuple:
        """获取当前状态"""
        state_values = [
            min(len(text) // 100, 10),  # 文本长度（离散化）
            min(features.sentence_complexity * 10, 10),  # 句子复杂度
            min(len(features.structural_markers), 5),  # 结构标记数量
            min(features.text_density * 10, 10),  # 文本密度
            1 if features.topic_coherence > 0.7 else 0,  # 主题连贯性
        ]
        return tuple(state_values)
    
    def choose_action(self, state: Tuple) -> str:
        """选择动作（ε-贪婪策略）"""
        if state not in self.q_table:
            # 初始化Q值
            self.q_table[state] = {action: 0.0 for action in self.actions}
        
        # ε-贪婪探索
        if np.random.random() < self.exploration_rate:
            return np.random.choice(self.actions)
        else:
            # 选择Q值最大的动作
            return max(self.q_table[state].items(), key=lambda x: x[1])[0]
    
    def calculate_reward(self, chunks: List[Chunk], text: str) -> float:
        """计算奖励"""
        if not chunks:
            return -100  # 严重惩罚
        
        # 多目标奖励函数
        rewards = []
        
        # 1. 块大小合理性奖励
        avg_chunk_size = np.mean([len(chunk.text) for chunk in chunks])
        size_reward = -abs(avg_chunk_size - self.base_chunk_size) / self.base_chunk_size
        rewards.append(size_reward)
        
        # 2. 块数量合理性奖励
        optimal_chunks = max(1, len(text) // self.base_chunk_size)
        count_reward = -abs(len(chunks) - optimal_chunks) / optimal_chunks
        rewards.append(count_reward)
        
        # 3. 语义连贯性奖励
        if len(chunks) > 1:
            # 计算相邻块的语义相似度
            from sklearn.feature_extraction.text import TfidfVectorizer
            from sklearn.metrics.pairwise import cosine_similarity
            
            chunk_texts = [chunk.text for chunk in chunks]
            try:
                vectorizer = TfidfVectorizer(max_features=100)
                tfidf_matrix = vectorizer.fit_transform(chunk_texts)
                similarities = cosine_similarity(tfidf_matrix)
                
                # 计算对角线相邻的相似度
                coherence_scores = []
                for i in range(len(similarities) - 1):
                    coherence_scores.append(similarities[i][i + 1])
                
                coherence_reward = np.mean(coherence_scores) if coherence_scores else 0
                rewards.append(coherence_reward)
            except:
                rewards.append(0)
        else:
            rewards.append(0)
        
        # 4. 结构完整性奖励
        structure_penalty = 0
        for chunk in chunks:
            if 'structural_markers' in chunk.metadata:
                structure_penalty += len(chunk.metadata['structural_markers'])
        structure_reward = -structure_penalty / (len(chunks) * 10)
        rewards.append(structure_reward)
        
        # 综合奖励
        total_reward = np.mean(rewards)
        return total_reward
    
    def execute_action(self, action: str, text: str) -> List[Chunk]:
        """执行选定的动作"""
        if action == 'fixed_length':
            from basic_chunking import FixedLengthChunker
            chunker = FixedLengthChunker(chunk_size=self.base_chunk_size, overlap=100)
            return chunker.chunk(text)
        
        elif action == 'sentence_based':
            from basic_chunking import SentenceChunker
            chunker = SentenceChunker(max_sentences=5)
            return chunker.chunk(text)
        
        elif action == 'paragraph_based':
            from basic_chunking import ParagraphChunker
            chunker = ParagraphChunker()
            return chunker.chunk(text)
        
        elif action == 'semantic_based':
            from semantic_chunking import TfidfSemanticChunker
            chunker = TfidfSemanticChunker(max_chunk_size=self.base_chunk_size)
            return chunker.chunk(text)
        
        elif action == 'density_based':
            # 简单的密度分块
            sentences = text.split('. ')
            chunks = []
            current_chunk = []
            current_length = 0
            
            for sentence in sentences:
                if current_length + len(sentence) > self.base_chunk_size:
                    if current_chunk:
                        chunk_text = '. '.join(current_chunk) + '.'
                        chunks.append(Chunk(
                            text=chunk_text,
                            start_index=0,
                            end_index=len(chunk_text),
                            chunk_type='density_rl'
                        ))
                        current_chunk = []
                        current_length = 0
                current_chunk.append(sentence)
                current_length += len(sentence) + 2
            
            if current_chunk:
                chunk_text = '. '.join(current_chunk) + '.'
                chunks.append(Chunk(
                    text=chunk_text,
                    start_index=0,
                    end_index=len(chunk_text),
                    chunk_type='density_rl'
                ))
            
            return chunks
        
        else:
            # 默认固定长度分块
            from basic_chunking import FixedLengthChunker
            chunker = FixedLengthChunker(chunk_size=self.base_chunk_size)
            return chunker.chunk(text)
    
    def update_q_table(self, state: Tuple, action: str, reward: float, 
                      next_state: Tuple) -> None:
        """更新Q表"""
        if state not in self.q_table:
            self.q_table[state] = {a: 0.0 for a in self.actions}
        if next_state not in self.q_table:
            self.q_table[next_state] = {a: 0.0 for a in self.actions}
        
        # Q-learning更新公式
        current_q = self.q_table[state][action]
        max_next_q = max(self.q_table[next_state].values())
        
        new_q = current_q + self.learning_rate * (
            reward + self.discount_factor * max_next_q - current_q
        )
        
        self.q_table[state][action] = new_q
    
    def chunk(self, text: str) -> List[Chunk]:
        """强化学习自适应分块"""
        self.clear()
        
        if not text:
            return self.chunks
        
        # 分析文本特征
        features = self.analyze_text_features(text)
        
        # 获取当前状态
        current_state = self.get_state(text, features)
        
        # 选择动作
        action = self.choose_action(current_state)
        
        # 执行动作
        chunks = self.execute_action(action, text)
        
        # 计算奖励
        reward = self.calculate_reward(chunks, text)
        self.reward_history.append(reward)
        
        # 模拟下一个状态（简化处理）
        next_state = current_state  # 在实际应用中需要更复杂的处理
        
        # 更新Q表
        self.update_q_table(current_state, action, reward, next_state)
        
        # 更新元数据
        for chunk in chunks:
            chunk.metadata.update({
                'rl_action': action,
                'rl_reward': reward,
                'rl_state': str(current_state),
                'learning_rate': self.learning_rate,
                'exploration_rate': self.exploration_rate
            })
            chunk.chunk_type = 'reinforcement_learning'
        
        self.chunks = chunks
        return self.chunks
    
    def get_learning_stats(self) -> Dict:
        """获取学习统计信息"""
        if not self.reward_history:
            return {}
        
        return {
            'total_episodes': len(self.reward_history),
            'average_reward': np.mean(self.reward_history),
            'max_reward': max(self.reward_history),
            'min_reward': min(self.reward_history),
            'recent_reward': np.mean(self.reward_history[-10:]) if len(self.reward_history) >= 10 else np.mean(self.reward_history),
            'q_table_size': len(self.q_table),
            'exploration_rate': self.exploration_rate
        }


class NeuralNetworkChunker(AdaptiveChunker):
    """神经网络自适应分块器"""
    
    def __init__(self, base_chunk_size: int = 1000, hidden_layers: List[int] = None,
                 learning_rate: float = 0.001, epochs: int = 100):
        super().__init__(base_chunk_size)
        self.hidden_layers = hidden_layers or [64, 32, 16]
        self.learning_rate = learning_rate
        self.epochs = epochs
        
        # 尝试导入深度学习库
        try:
            import tensorflow as tf
            from tensorflow import keras
            self.tf_available = True
            self.model = self._build_model()
        except ImportError:
            print("警告: 未安装TensorFlow，神经网络分块器将使用简单模型")
            self.tf_available = False
            self.model = None
    
    def _build_model(self):
        """构建神经网络模型"""
        if not self.tf_available:
            return None
        
        import tensorflow as tf
        from tensorflow import keras
        from tensorflow.keras import layers
        
        # 输入层：文本特征
        input_features = [
            'text_length',
            'sentence_count', 
            'avg_sentence_length',
            'paragraph_count',
            'text_density',
            'semantic_coherence',
            'structural_markers_count'
        ]
        
        model = keras.Sequential([
            layers.Input(shape=(len(input_features),)),
            layers.Dense(self.hidden_layers[0], activation='relu'),
            layers.Dropout(0.2),
            layers.Dense(self.hidden_layers[1], activation='relu'),
            layers.Dropout(0.2),
            layers.Dense(self.hidden_layers[2], activation='relu'),
            layers.Dense(5, activation='softmax')  # 5种分块策略
        ])
        
        model.compile(
            optimizer=keras.optimizers.Adam(learning_rate=self.learning_rate),
            loss='sparse_categorical_crossentropy',
            metrics=['accuracy']
        )
        
        return model
    
    def extract_features(self, text: str) -> np.ndarray:
        """提取文本特征"""
        sentences = text.split('. ')
        paragraphs = text.split('\n\n')
        
        features = [
            len(text),  # 文本长度
            len(sentences),  # 句子数量
            np.mean([len(s) for s in sentences]) if sentences else 0,  # 平均句子长度
            len(paragraphs),  # 段落数量
            len(text) / max(len(sentences), 1),  # 文本密度
            self._calculate_semantic_coherence(text),  # 语义连贯性
            self._count_structural_markers(text)  # 结构标记数量
        ]
        
        return np.array(features)
    
    def _calculate_semantic_coherence(self, text: str) -> float:
        """计算语义连贯性"""
        try:
            from sklearn.feature_extraction.text import TfidfVectorizer
            from sklearn.metrics.pairwise import cosine_similarity
            
            sentences = [s.strip() for s in text.split('. ') if s.strip()]
            if len(sentences) < 2:
                return 0.5
            
            vectorizer = TfidfVectorizer(max_features=100)
            tfidf_matrix = vectorizer.fit_transform(sentences)
            similarities = cosine_similarity(tfidf_matrix)
            
            # 计算相邻句子的相似度
            coherence_scores = []
            for i in range(len(similarities) - 1):
                coherence_scores.append(similarities[i][i + 1])
            
            return np.mean(coherence_scores) if coherence_scores else 0.5
        except:
            return 0.5
    
    def _count_structural_markers(self, text: str) -> int:
        """计算结构标记数量"""
        markers = 0
        # 标题标记
        markers += len(re.findall(r'^#+\s', text, re.MULTILINE))
        # 列表标记
        markers += len(re.findall(r'^[\*\-\+]\s', text, re.MULTILINE))
        # 数字标记
        markers += len(re.findall(r'^\d+\.\s', text, re.MULTILINE))
        # 引用标记
        markers += len(re.findall(r'^>\s', text, re.MULTILINE))
        
        return markers
    
    def predict_chunking_strategy(self, features: np.ndarray) -> int:
        """预测分块策略"""
        if not self.tf_available or self.model is None:
            # 简单的规则基预测
            if features[0] > 5000:  # 长文本
                return 0  # 固定长度
            elif features[3] > 10:  # 多段落
                return 2  # 段落分块
            elif features[5] > 0.7:  # 高语义连贯性
                return 3  # 语义分块
            else:
                return 1  # 句子分块
        
        # 使用神经网络预测
        features_scaled = features / np.max(features, axis=0)  # 简单归一化
        prediction = self.model.predict(features_scaled.reshape(1, -1), verbose=0)
        return np.argmax(prediction[0])
    
    def execute_strategy(self, strategy_id: int, text: str) -> List[Chunk]:
        """执行分块策略"""
        strategies = [
            'fixed_length',
            'sentence_based',
            'paragraph_based', 
            'semantic_based',
            'density_based'
        ]
        
        strategy = strategies[strategy_id]
        
        if strategy == 'fixed_length':
            from basic_chunking import FixedLengthChunker
            chunker = FixedLengthChunker(chunk_size=self.base_chunk_size, overlap=100)
            return chunker.chunk(text)
        
        elif strategy == 'sentence_based':
            from basic_chunking import SentenceChunker
            chunker = SentenceChunker(max_sentences=5)
            return chunker.chunk(text)
        
        elif strategy == 'paragraph_based':
            from basic_chunking import ParagraphChunker
            chunker = ParagraphChunker()
            return chunker.chunk(text)
        
        elif strategy == 'semantic_based':
            from semantic_chunking import TfidfSemanticChunker
            chunker = TfidfSemanticChunker(max_chunk_size=self.base_chunk_size)
            return chunker.chunk(text)
        
        elif strategy == 'density_based':
            # 简单密度分块
            sentences = text.split('. ')
            chunks = []
            current_chunk = []
            current_length = 0
            
            for sentence in sentences:
                if current_length + len(sentence) > self.base_chunk_size:
                    if current_chunk:
                        chunk_text = '. '.join(current_chunk) + '.'
                        chunks.append(Chunk(
                            text=chunk_text,
                            start_index=0,
                            end_index=len(chunk_text),
                            chunk_type='neural_density'
                        ))
                        current_chunk = []
                        current_length = 0
                current_chunk.append(sentence)
                current_length += len(sentence) + 2
            
            if current_chunk:
                chunk_text = '. '.join(current_chunk) + '.'
                chunks.append(Chunk(
                    text=chunk_text,
                    start_index=0,
                    end_index=len(chunk_text),
                    chunk_type='neural_density'
                ))
            
            return chunks
        
        return []
    
    def chunk(self, text: str) -> List[Chunk]:
        """神经网络自适应分块"""
        self.clear()
        
        if not text:
            return self.chunks
        
        # 提取特征
        features = self.extract_features(text)
        
        # 预测分块策略
        strategy_id = self.predict_chunking_strategy(features)
        
        # 执行策略
        chunks = self.execute_strategy(strategy_id, text)
        
        # 更新元数据
        strategy_names = ['fixed_length', 'sentence_based', 'paragraph_based', 
                         'semantic_based', 'density_based']
        
        for chunk in chunks:
            chunk.metadata.update({
                'nn_strategy': strategy_names[strategy_id],
                'nn_strategy_id': strategy_id,
                'features': features.tolist(),
                'tf_available': self.tf_available
            })
            chunk.chunk_type = 'neural_network'
        
        self.chunks = chunks
        return self.chunks
    
    def train_model(self, training_data: List[Tuple[str, int]]) -> None:
        """训练神经网络模型"""
        if not self.tf_available or self.model is None:
            print("TensorFlow不可用，无法训练模型")
            return
        
        # 准备训练数据
        X_train = []
        y_train = []
        
        for text, strategy_id in training_data:
            features = self.extract_features(text)
            X_train.append(features)
            y_train.append(strategy_id)
        
        X_train = np.array(X_train)
        y_train = np.array(y_train)
        
        # 归一化特征
        X_train_scaled = X_train / np.max(X_train, axis=0)
        
        # 训练模型
        print("开始训练神经网络模型...")
        self.model.fit(X_train_scaled, y_train, epochs=self.epochs, verbose=0)
        print("模型训练完成！")


class StreamingChunker(AdaptiveChunker):
    """流式分块器（用于处理大文档）"""
    
    def __init__(self, base_chunk_size: int = 1000, buffer_size: int = 5000,
                 overlap_ratio: float = 0.1):
        super().__init__(base_chunk_size)
        self.buffer_size = buffer_size
        self.overlap_ratio = overlap_ratio
        self.buffer = ""
        self.processed_chunks = []
        self.current_position = 0
        
        # 流式处理状态
        self.is_streaming = False
        self.stream_callback = None
    
    def start_stream(self, callback=None) -> None:
        """开始流式处理"""
        self.is_streaming = True
        self.buffer = ""
        self.processed_chunks = []
        self.current_position = 0
        self.stream_callback = callback
    
    def end_stream(self) -> List[Chunk]:
        """结束流式处理，处理剩余缓冲区"""
        self.is_streaming = False
        
        # 处理剩余的缓冲区内容
        if self.buffer:
            chunks = self._process_buffer(final_chunk=True)
            self.processed_chunks.extend(chunks)
        
        # 清空缓冲区
        self.buffer = ""
        
        return self.processed_chunks
    
    def feed_text(self, text: str) -> List[Chunk]:
        """向流式分块器输入文本"""
        if not self.is_streaming:
            self.start_stream()
        
        self.buffer += text
        
        # 当缓冲区达到一定大小时进行处理
        if len(self.buffer) >= self.buffer_size:
            chunks = self._process_buffer()
            self.processed_chunks.extend(chunks)
            
            # 调用回调函数
            if self.stream_callback:
                for chunk in chunks:
                    self.stream_callback(chunk)
            
            return chunks
        
        return []
    
    def _process_buffer(self, final_chunk: bool = False) -> List[Chunk]:
        """处理缓冲区内容"""
        if not self.buffer:
            return []
        
        chunks = []
        
        if final_chunk or len(self.buffer) >= self.base_chunk_size:
            # 分析缓冲区内容的特征
            features = self.analyze_text_features(self.buffer)
            
            # 根据特征选择分块策略
            strategy = self._select_streaming_strategy(features, final_chunk)
            
            # 执行分块
            if strategy == 'semantic':
                stream_chunks = self._chunk_stream_semantic()
            elif strategy == 'structure':
                stream_chunks = self._chunk_stream_structure()
            else:
                stream_chunks = self._chunk_stream_fixed()
            
            chunks.extend(stream_chunks)
            
            # 更新缓冲区，保留重叠部分
            if not final_chunk and stream_chunks:
                overlap_size = int(self.base_chunk_size * self.overlap_ratio)
                if len(self.buffer) > overlap_size:
                    self.buffer = self.buffer[-overlap_size:]
                else:
                    self.buffer = ""
            else:
                self.buffer = ""
        
        return chunks
    
    def _select_streaming_strategy(self, features: TextFeatures, final_chunk: bool) -> str:
        """选择流式分块策略"""
        if final_chunk:
            return 'fixed'  # 最后一块使用固定长度
        
        # 基于特征选择策略
        if features.semantic_coherence > 0.8:
            return 'semantic'
        elif len(features.structural_markers) > 3:
            return 'structure'
        else:
            return 'fixed'
    
    def _chunk_stream_fixed(self) -> List[Chunk]:
        """固定长度流式分块"""
        chunks = []
        remaining_text = self.buffer
        start_pos = self.current_position
        
        while len(remaining_text) >= self.base_chunk_size:
            chunk_text = remaining_text[:self.base_chunk_size]
            chunk = Chunk(
                text=chunk_text,
                start_index=start_pos,
                end_index=start_pos + len(chunk_text),
                chunk_type='streaming_fixed',
                metadata={
                    'streaming': True,
                    'buffer_size': self.buffer_size,
                    'overlap_ratio': self.overlap_ratio
                }
            )
            chunks.append(chunk)
            
            remaining_text = remaining_text[self.base_chunk_size:]
            start_pos += self.base_chunk_size
        
        self.current_position = start_pos
        return chunks
    
    def _chunk_stream_semantic(self) -> List[Chunk]:
        """语义流式分块"""
        try:
            from sklearn.feature_extraction.text import TfidfVectorizer
            from sklearn.metrics.pairwise import cosine_similarity
            
            # 分割句子
            sentences = [s.strip() for s in self.buffer.split('. ') if s.strip()]
            if len(sentences) < 2:
                return self._chunk_stream_fixed()
            
            # 计算句子相似度
            vectorizer = TfidfVectorizer(max_features=100)
            tfidf_matrix = vectorizer.fit_transform(sentences)
            similarities = cosine_similarity(tfidf_matrix)
            
            chunks = []
            current_chunk_sentences = []
            current_start = self.current_position
            
            for i, sentence in enumerate(sentences):
                current_chunk_sentences.append(sentence)
                
                # 检查是否需要分割
                if len(current_chunk_sentences) > 1:
                    # 计算当前块与下一个句子的相似度
                    if i < len(sentences) - 1:
                        similarity = similarities[len(current_chunk_sentences) - 1][i + 1]
                    else:
                        similarity = 0
                    
                    # 分割条件：块太大或相似度低
                    chunk_text = '. '.join(current_chunk_sentences) + '.'
                    if (len(chunk_text) > self.base_chunk_size or 
                        (similarity < 0.3 and len(current_chunk_sentences) > 2)):
                        
                        chunk = Chunk(
                            text=chunk_text,
                            start_index=current_start,
                            end_index=current_start + len(chunk_text),
                            chunk_type='streaming_semantic',
                            metadata={
                                'streaming': True,
                                'semantic_threshold': 0.3,
                                'num_sentences': len(current_chunk_sentences)
                            }
                        )
                        chunks.append(chunk)
                        
                        current_start += len(chunk_text)
                        current_chunk_sentences = []
            
            # 处理剩余句子
            if current_chunk_sentences:
                chunk_text = '. '.join(current_chunk_sentences) + '.'
                chunk = Chunk(
                    text=chunk_text,
                    start_index=current_start,
                    end_index=current_start + len(chunk_text),
                    chunk_type='streaming_semantic',
                    metadata={
                        'streaming': True,
                        'num_sentences': len(current_chunk_sentences)
                    }
                )
                chunks.append(chunk)
                current_start += len(chunk_text)
            
            self.current_position = current_start
            return chunks
            
        except Exception as e:
            print(f"语义流式分块错误: {e}")
            return self._chunk_stream_fixed()
    
    def _chunk_stream_structure(self) -> List[Chunk]:
        """结构流式分块"""
        # 按段落分块
        paragraphs = [p.strip() for p in self.buffer.split('\n\n') if p.strip()]
        
        chunks = []
        current_chunk_paragraphs = []
        current_start = self.current_position
        
        for paragraph in paragraphs:
            current_chunk_paragraphs.append(paragraph)
            
            # 检查块大小
            chunk_text = '\n\n'.join(current_chunk_paragraphs)
            if len(chunk_text) > self.base_chunk_size:
                if len(current_chunk_paragraphs) > 1:
                    # 移除最后一个段落，创建块
                    chunk_text = '\n\n'.join(current_chunk_paragraphs[:-1])
                    chunk = Chunk(
                        text=chunk_text,
                        start_index=current_start,
                        end_index=current_start + len(chunk_text),
                        chunk_type='streaming_structure',
                        metadata={
                            'streaming': True,
                            'num_paragraphs': len(current_chunk_paragraphs) - 1
                        }
                    )
                    chunks.append(chunk)
                    
                    current_start += len(chunk_text) + 2
                    current_chunk_paragraphs = [current_chunk_paragraphs[-1]]
                else:
                    # 单个段落就太大，按固定长度分割
                    sub_chunks = self._chunk_stream_fixed()
                    chunks.extend(sub_chunks)
                    current_start += len(chunk_text)
                    current_chunk_paragraphs = []
        
        # 处理剩余段落
        if current_chunk_paragraphs:
            chunk_text = '\n\n'.join(current_chunk_paragraphs)
            chunk = Chunk(
                text=chunk_text,
                start_index=current_start,
                end_index=current_start + len(chunk_text),
                chunk_type='streaming_structure',
                metadata={
                    'streaming': True,
                    'num_paragraphs': len(current_chunk_paragraphs)
                }
            )
            chunks.append(chunk)
            current_start += len(chunk_text)
        
        self.current_position = current_start
        return chunks
    
    def chunk(self, text: str) -> List[Chunk]:
        """流式分块（兼容接口）"""
        # 对于单次调用，使用简单的流式处理
        self.start_stream()
        chunks = self.feed_text(text)
        final_chunks = self.end_stream()
        
        self.chunks = chunks + final_chunks
        return self.chunks
    
    def get_stream_stats(self) -> Dict:
        """获取流式处理统计信息"""
        return {
            'buffer_size': len(self.buffer),
            'processed_chunks': len(self.processed_chunks),
            'current_position': self.current_position,
            'is_streaming': self.is_streaming,
            'base_chunk_size': self.base_chunk_size,
            'buffer_capacity': self.buffer_size
        }


if __name__ == "__main__":
    main()