"""
语义分块方法演示
演示基于语义相似性的文本分块方法
"""
import re
import numpy as np
from typing import List, Dict, Any, Optional, Tuple
from dataclasses import dataclass
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.cluster import KMeans, AgglomerativeClustering
import json
import torch

# 尝试导入sentence-transformers，如果不可用则使用sklearn
try:
    from sentence_transformers import SentenceTransformer
    SENTENCE_TRANSFORMERS_AVAILABLE = True
except ImportError:
    SENTENCE_TRANSFORMERS_AVAILABLE = False

# 使用通用导入工具
try:
    from .import_utils import get_basic_chunker_classes
    BasicChunker, Chunk = get_basic_chunker_classes()
except ImportError:
    # 如果导入工具不可用，使用备用方法
    try:
        import sys
        import os
        sys.path.append(os.path.dirname(os.path.abspath(__file__)))
        from chunk_demo01_basic_chunking import BasicChunker, Chunk
    except ImportError:
        # 最后的直接导入尝试
        from basic_chunking import BasicChunker, Chunk


class SemanticChunker(BasicChunker):
    """语义分块器基类"""
    
    def __init__(self):
        super().__init__()
        self.embeddings = None
        self.similarity_matrix = None
    
    def compute_similarity_matrix(self, sentences: List[str]) -> np.ndarray:
        """计算句子相似度矩阵"""
        raise NotImplementedError("子类必须实现此方法")
    
    def group_similar_sentences(self, sentences: List[str], 
                              similarity_matrix: np.ndarray,
                              threshold: float = 0.7) -> List[List[int]]:
        """根据相似度分组句子"""
        n = len(sentences)
        visited = [False] * n
        groups = []
        
        for i in range(n):
            if not visited[i]:
                group = [i]
                visited[i] = True
                
                # 找到所有相似度超过阈值的句子
                for j in range(i + 1, n):
                    if not visited[j] and similarity_matrix[i][j] >= threshold:
                        group.append(j)
                        visited[j] = True
                
                groups.append(group)
        
        return groups


class TfidfSemanticChunker(SemanticChunker):
    """基于TF-IDF的语义分块器"""
    
    def __init__(self, max_chunk_size: int = 1000, 
                 similarity_threshold: float = 0.3,
                 language: str = 'chinese'):
        super().__init__()
        self.max_chunk_size = max_chunk_size
        self.similarity_threshold = similarity_threshold
        self.language = language
        self.vectorizer = TfidfVectorizer(
            max_features=1000,
            stop_words=None,  # 可以根据需要添加停用词
            ngram_range=(1, 2)
        )
    
    def split_sentences(self, text: str) -> List[str]:
        """分割句子"""
        sentence_endings = r'[.!?。！？；;]'
        sentences = re.split(sentence_endings, text)
        
        cleaned_sentences = []
        for sentence in sentences:
            sentence = sentence.strip()
            if sentence and len(sentence) > 10:  # 过滤太短的句子
                cleaned_sentences.append(sentence)
        
        return cleaned_sentences
    
    def compute_similarity_matrix(self, sentences: List[str]) -> np.ndarray:
        """计算TF-IDF相似度矩阵"""
        if not sentences:
            return np.array([])
        
        # 计算TF-IDF向量
        tfidf_matrix = self.vectorizer.fit_transform(sentences)
        
        # 计算余弦相似度
        similarity_matrix = cosine_similarity(tfidf_matrix)
        
        self.similarity_matrix = similarity_matrix
        return similarity_matrix
    
    def chunk(self, text: str) -> List[Chunk]:
        """TF-IDF语义分块"""
        self.clear()
        
        if not text:
            return self.chunks
        
        # 分割句子
        sentences = self.split_sentences(text)
        if len(sentences) < 2:
            return [Chunk(text, 0, len(text), 'tfidf_semantic')]
        
        # 计算相似度矩阵
        similarity_matrix = self.compute_similarity_matrix(sentences)
        
        # 分组相似句子
        groups = self.group_similar_sentences(sentences, similarity_matrix, 
                                            self.similarity_threshold)
        
        # 创建分块
        chunks = []
        current_pos = 0
        
        for group in groups:
            group_sentences = [sentences[i] for i in group]
            chunk_text = '. '.join(group_sentences) + '.'
            
            # 如果块太大，进一步分割
            if len(chunk_text) > self.max_chunk_size:
                sub_chunks = self._split_large_chunk(chunk_text, current_pos)
                chunks.extend(sub_chunks)
            else:
                # 在原始文本中找到位置
                chunk_start = text.find(chunk_text, current_pos)
                if chunk_start != -1:
                    chunk = Chunk(
                        text=chunk_text,
                        start_index=chunk_start,
                        end_index=chunk_start + len(chunk_text),
                        chunk_type='tfidf_semantic',
                        metadata={
                            'sentence_count': len(group),
                            'sentence_indices': group,
                            'avg_similarity': np.mean([similarity_matrix[i][j] 
                                                      for i in group for j in group if i != j]) if len(group) > 1 else 0
                        }
                    )
                    chunks.append(chunk)
                    current_pos = chunk_start + len(chunk_text)
        
        self.chunks = chunks
        return self.chunks
    
    def _split_large_chunk(self, chunk_text: str, start_pos: int) -> List[Chunk]:
        """分割大块"""
        words = chunk_text.split()
        sub_chunks = []
        current_chunk = []
        current_length = 0
        current_start = start_pos
        
        for word in words:
            if current_length + len(word) + 1 > self.max_chunk_size:
                if current_chunk:
                    sub_chunk_text = ' '.join(current_chunk)
                    chunk = Chunk(
                        text=sub_chunk_text,
                        start_index=current_start,
                        end_index=current_start + len(sub_chunk_text),
                        chunk_type='tfidf_semantic',
                        metadata={'is_sub_chunk': True}
                    )
                    sub_chunks.append(chunk)
                    current_chunk = []
                    current_start += len(sub_chunk_text) + 1
                    current_length = 0
            
            current_chunk.append(word)
            current_length += len(word) + 1
        
        if current_chunk:
            sub_chunk_text = ' '.join(current_chunk)
            chunk = Chunk(
                text=sub_chunk_text,
                start_index=current_start,
                end_index=current_start + len(sub_chunk_text),
                chunk_type='tfidf_semantic',
                metadata={'is_sub_chunk': True}
            )
            sub_chunks.append(chunk)
        
        return sub_chunks


class SentenceTransformerChunker(SemanticChunker):
    """基于SentenceTransformer的语义分块器"""
    
    def __init__(self, model_name: str = 'all-MiniLM-L6-v2',
                 max_chunk_size: int = 1000,
                 similarity_threshold: float = 0.7,
                 device: str = 'cpu'):
        super().__init__()
        if not SENTENCE_TRANSFORMERS_AVAILABLE:
            raise ImportError("sentence-transformers not available. Install with: pip install sentence-transformers")
        
        self.model_name = model_name
        self.max_chunk_size = max_chunk_size
        self.similarity_threshold = similarity_threshold
        self.device = device
        
        print(f"Loading model: {model_name}")
        self.model = SentenceTransformer(model_name, device=device)
        print("Model loaded successfully")
    
    def split_sentences(self, text: str) -> List[str]:
        """分割句子"""
        sentence_endings = r'[.!?。！？；;]'
        sentences = re.split(sentence_endings, text)
        
        cleaned_sentences = []
        for sentence in sentences:
            sentence = sentence.strip()
            if sentence and len(sentence) > 10:
                cleaned_sentences.append(sentence)
        
        return cleaned_sentences
    
    def compute_similarity_matrix(self, sentences: List[str]) -> np.ndarray:
        """计算句子嵌入相似度矩阵"""
        if not sentences:
            return np.array([])
        
        # 计算句子嵌入
        embeddings = self.model.encode(sentences)
        self.embeddings = embeddings
        
        # 计算余弦相似度
        similarity_matrix = cosine_similarity(embeddings)
        
        self.similarity_matrix = similarity_matrix
        return similarity_matrix
    
    def chunk(self, text: str) -> List[Chunk]:
        """SentenceTransformer语义分块"""
        self.clear()
        
        if not text:
            return self.chunks
        
        # 分割句子
        sentences = self.split_sentences(text)
        if len(sentences) < 2:
            return [Chunk(text, 0, len(text), 'sentence_transformer')]
        
        # 计算相似度矩阵
        similarity_matrix = self.compute_similarity_matrix(sentences)
        
        # 分组相似句子
        groups = self.group_similar_sentences(sentences, similarity_matrix, 
                                            self.similarity_threshold)
        
        # 创建分块
        chunks = []
        current_pos = 0
        
        for group in groups:
            group_sentences = [sentences[i] for i in group]
            chunk_text = '. '.join(group_sentences) + '.'
            
            # 如果块太大，进一步分割
            if len(chunk_text) > self.max_chunk_size:
                sub_chunks = self._split_large_chunk(chunk_text, current_pos)
                chunks.extend(sub_chunks)
            else:
                # 在原始文本中找到位置
                chunk_start = text.find(chunk_text, current_pos)
                if chunk_start != -1:
                    chunk = Chunk(
                        text=chunk_text,
                        start_index=chunk_start,
                        end_index=chunk_start + len(chunk_text),
                        chunk_type='sentence_transformer',
                        metadata={
                            'sentence_count': len(group),
                            'sentence_indices': group,
                            'avg_similarity': np.mean([similarity_matrix[i][j] 
                                                      for i in group for j in group if i != j]) if len(group) > 1 else 0,
                            'model_name': self.model_name
                        }
                    )
                    chunks.append(chunk)
                    current_pos = chunk_start + len(chunk_text)
        
        self.chunks = chunks
        return self.chunks
    
    def _split_large_chunk(self, chunk_text: str, start_pos: int) -> List[Chunk]:
        """分割大块"""
        words = chunk_text.split()
        sub_chunks = []
        current_chunk = []
        current_length = 0
        current_start = start_pos
        
        for word in words:
            if current_length + len(word) + 1 > self.max_chunk_size:
                if current_chunk:
                    sub_chunk_text = ' '.join(current_chunk)
                    chunk = Chunk(
                        text=sub_chunk_text,
                        start_index=current_start,
                        end_index=current_start + len(sub_chunk_text),
                        chunk_type='sentence_transformer',
                        metadata={'is_sub_chunk': True}
                    )
                    sub_chunks.append(chunk)
                    current_chunk = []
                    current_start += len(sub_chunk_text) + 1
                    current_length = 0
            
            current_chunk.append(word)
            current_length += len(word) + 1
        
        if current_chunk:
            sub_chunk_text = ' '.join(current_chunk)
            chunk = Chunk(
                text=sub_chunk_text,
                start_index=current_start,
                end_index=current_start + len(sub_chunk_text),
                chunk_type='sentence_transformer',
                metadata={'is_sub_chunk': True}
            )
            sub_chunks.append(chunk)
        
        return sub_chunks


class ClusteringChunker(SemanticChunker):
    """基于聚类的语义分块器"""
    
    def __init__(self, n_clusters: int = 5, 
                 max_chunk_size: int = 1000,
                 clustering_method: str = 'kmeans'):
        super().__init__()
        self.n_clusters = n_clusters
        self.max_chunk_size = max_chunk_size
        self.clustering_method = clustering_method
        self.vectorizer = TfidfVectorizer(max_features=1000)
    
    def split_sentences(self, text: str) -> List[str]:
        """分割句子"""
        sentence_endings = r'[.!?。！？；;]'
        sentences = re.split(sentence_endings, text)
        
        cleaned_sentences = []
        for sentence in sentences:
            sentence = sentence.strip()
            if sentence and len(sentence) > 10:
                cleaned_sentences.append(sentence)
        
        return cleaned_sentences
    
    def compute_clusters(self, sentences: List[str]) -> np.ndarray:
        """计算句子聚类"""
        if not sentences:
            return np.array([])
        
        # 计算TF-IDF向量
        tfidf_matrix = self.vectorizer.fit_transform(sentences)
        
        # 聚类
        if self.clustering_method == 'kmeans':
            clustering = KMeans(n_clusters=self.n_clusters, random_state=42)
        elif self.clustering_method == 'hierarchical':
            clustering = AgglomerativeClustering(n_clusters=self.n_clusters)
        else:
            raise ValueError(f"Unsupported clustering method: {self.clustering_method}")
        
        cluster_labels = clustering.fit_predict(tfidf_matrix)
        return cluster_labels
    
    def chunk(self, text: str) -> List[Chunk]:
        """聚类语义分块"""
        self.clear()
        
        if not text:
            return self.chunks
        
        # 分割句子
        sentences = self.split_sentences(text)
        if len(sentences) < 2:
            return [Chunk(text, 0, len(text), 'clustering')]
        
        # 计算聚类
        cluster_labels = self.compute_clusters(sentences)
        
        # 按聚类分组
        clusters = {}
        for i, label in enumerate(cluster_labels):
            if label not in clusters:
                clusters[label] = []
            clusters[label].append(i)
        
        # 创建分块
        chunks = []
        current_pos = 0
        
        for cluster_id, sentence_indices in clusters.items():
            cluster_sentences = [sentences[i] for i in sentence_indices]
            chunk_text = '. '.join(cluster_sentences) + '.'
            
            # 如果块太大，进一步分割
            if len(chunk_text) > self.max_chunk_size:
                sub_chunks = self._split_large_chunk(chunk_text, current_pos)
                chunks.extend(sub_chunks)
            else:
                # 在原始文本中找到位置
                chunk_start = text.find(chunk_text, current_pos)
                if chunk_start != -1:
                    chunk = Chunk(
                        text=chunk_text,
                        start_index=chunk_start,
                        end_index=chunk_start + len(chunk_text),
                        chunk_type='clustering',
                        metadata={
                            'cluster_id': cluster_id,
                            'sentence_count': len(sentence_indices),
                            'sentence_indices': sentence_indices,
                            'clustering_method': self.clustering_method
                        }
                    )
                    chunks.append(chunk)
                    current_pos = chunk_start + len(chunk_text)
        
        self.chunks = chunks
        return self.chunks
    
    def _split_large_chunk(self, chunk_text: str, start_pos: int) -> List[Chunk]:
        """分割大块"""
        words = chunk_text.split()
        sub_chunks = []
        current_chunk = []
        current_length = 0
        current_start = start_pos
        
        for word in words:
            if current_length + len(word) + 1 > self.max_chunk_size:
                if current_chunk:
                    sub_chunk_text = ' '.join(current_chunk)
                    chunk = Chunk(
                        text=sub_chunk_text,
                        start_index=current_start,
                        end_index=current_start + len(sub_chunk_text),
                        chunk_type='clustering',
                        metadata={'is_sub_chunk': True}
                    )
                    sub_chunks.append(chunk)
                    current_chunk = []
                    current_start += len(sub_chunk_text) + 1
                    current_length = 0
            
            current_chunk.append(word)
            current_length += len(word) + 1
        
        if current_chunk:
            sub_chunk_text = ' '.join(current_chunk)
            chunk = Chunk(
                text=sub_chunk_text,
                start_index=current_start,
                end_index=current_start + len(sub_chunk_text),
                chunk_type='clustering',
                metadata={'is_sub_chunk': True}
            )
            sub_chunks.append(chunk)
        
        return sub_chunks


class TopicBasedChunker(SemanticChunker):
    """基于主题的语义分块器"""
    
    def __init__(self, max_chunk_size: int = 1000,
                 topic_similarity_threshold: float = 0.5):
        super().__init__()
        self.max_chunk_size = max_chunk_size
        self.topic_similarity_threshold = topic_similarity_threshold
        self.vectorizer = TfidfVectorizer(max_features=1000)
    
    def split_sentences(self, text: str) -> List[str]:
        """分割句子"""
        sentence_endings = r'[.!?。！？；;]'
        sentences = re.split(sentence_endings, text)
        
        cleaned_sentences = []
        for sentence in sentences:
            sentence = sentence.strip()
            if sentence and len(sentence) > 10:
                cleaned_sentences.append(sentence)
        
        return cleaned_sentences
    
    def extract_keywords(self, sentence: str, top_k: int = 5) -> List[str]:
        """提取句子关键词"""
        # 简单的关键词提取（基于TF-IDF）
        words = re.findall(r'\b\w+\b', sentence.lower())
        word_freq = {}
        for word in words:
            if len(word) > 2:  # 过滤太短的词
                word_freq[word] = word_freq.get(word, 0) + 1
        
        # 按频率排序
        sorted_words = sorted(word_freq.items(), key=lambda x: x[1], reverse=True)
        return [word for word, freq in sorted_words[:top_k]]
    
    def compute_topic_similarity(self, keywords1: List[str], 
                                keywords2: List[str]) -> float:
        """计算主题相似度"""
        if not keywords1 or not keywords2:
            return 0.0
        
        # 计算关键词重叠度
        common_keywords = set(keywords1) & set(keywords2)
        total_keywords = set(keywords1) | set(keywords2)
        
        if not total_keywords:
            return 0.0
        
        return len(common_keywords) / len(total_keywords)
    
    def chunk(self, text: str) -> List[Chunk]:
        """主题语义分块"""
        self.clear()
        
        if not text:
            return self.chunks
        
        # 分割句子
        sentences = self.split_sentences(text)
        if len(sentences) < 2:
            return [Chunk(text, 0, len(text), 'topic_based')]
        
        # 提取每个句子的关键词
        sentence_keywords = [self.extract_keywords(sentence) for sentence in sentences]
        
        # 计算主题相似度矩阵
        n = len(sentences)
        similarity_matrix = np.zeros((n, n))
        
        for i in range(n):
            for j in range(n):
                if i != j:
                    similarity_matrix[i][j] = self.compute_topic_similarity(
                        sentence_keywords[i], sentence_keywords[j]
                    )
        
        self.similarity_matrix = similarity_matrix
        
        # 分组相似句子
        groups = self.group_similar_sentences(sentences, similarity_matrix, 
                                            self.topic_similarity_threshold)
        
        # 创建分块
        chunks = []
        current_pos = 0
        
        for group in groups:
            group_sentences = [sentences[i] for i in group]
            chunk_text = '. '.join(group_sentences) + '.'
            
            # 如果块太大，进一步分割
            if len(chunk_text) > self.max_chunk_size:
                sub_chunks = self._split_large_chunk(chunk_text, current_pos)
                chunks.extend(sub_chunks)
            else:
                # 在原始文本中找到位置
                chunk_start = text.find(chunk_text, current_pos)
                if chunk_start != -1:
                    chunk = Chunk(
                        text=chunk_text,
                        start_index=chunk_start,
                        end_index=chunk_start + len(chunk_text),
                        chunk_type='topic_based',
                        metadata={
                            'sentence_count': len(group),
                            'sentence_indices': group,
                            'keywords': sentence_keywords[group[0]] if group else [],
                            'avg_topic_similarity': np.mean([similarity_matrix[i][j] 
                                                           for i in group for j in group if i != j]) if len(group) > 1 else 0
                        }
                    )
                    chunks.append(chunk)
                    current_pos = chunk_start + len(chunk_text)
        
        self.chunks = chunks
        return self.chunks
    
    def _split_large_chunk(self, chunk_text: str, start_pos: int) -> List[Chunk]:
        """分割大块"""
        words = chunk_text.split()
        sub_chunks = []
        current_chunk = []
        current_length = 0
        current_start = start_pos
        
        for word in words:
            if current_length + len(word) + 1 > self.max_chunk_size:
                if current_chunk:
                    sub_chunk_text = ' '.join(current_chunk)
                    chunk = Chunk(
                        text=sub_chunk_text,
                        start_index=current_start,
                        end_index=current_start + len(sub_chunk_text),
                        chunk_type='topic_based',
                        metadata={'is_sub_chunk': True}
                    )
                    sub_chunks.append(chunk)
                    current_chunk = []
                    current_start += len(sub_chunk_text) + 1
                    current_length = 0
            
            current_chunk.append(word)
            current_length += len(word) + 1
        
        if current_chunk:
            sub_chunk_text = ' '.join(current_chunk)
            chunk = Chunk(
                text=sub_chunk_text,
                start_index=current_start,
                end_index=current_start + len(sub_chunk_text),
                chunk_type='topic_based',
                metadata={'is_sub_chunk': True}
            )
            sub_chunks.append(chunk)
        
        return sub_chunks


def main():
    """主函数 - 运行语义分块演示"""
    print("=" * 60)
    print("语义分块方法演示")
    print("=" * 60)
    
    # 示例文本
    sample_text = """
    人工智能（AI）是计算机科学的一个分支，致力于创建能够执行通常需要人类智能的任务的系统。
    这些任务包括学习、推理、问题解决、感知和语言理解。AI系统可以通过各种方式实现，
    包括机器学习、深度学习、自然语言处理等技术。
    
    机器学习是AI的核心技术之一，它使计算机能够从数据中学习而无需明确编程。
    深度学习是机器学习的一个子集，使用神经网络来模拟人脑的工作方式。
    自然语言处理则专注于使计算机能够理解和生成人类语言。
    
    AI的应用领域非常广泛，包括医疗保健、金融、教育、交通等。
    在医疗保健领域，AI可以帮助医生诊断疾病、预测患者风险、个性化治疗方案。
    在金融领域，AI可以用于风险评估、欺诈检测、算法交易等。
    在教育领域，AI可以提供个性化学习体验、自动化评分系统等。
    
    深度学习模型，如卷积神经网络（CNN）和循环神经网络（RNN），在图像识别和语音识别方面表现出色。
    这些模型通过大量的数据训练，能够自动学习特征和模式。
    转换器模型，如BERT和GPT，在自然语言处理任务中取得了突破性进展。
    """
    
    # 1. TF-IDF语义分块
    print("\n" + "=" * 40)
    print("1. TF-IDF语义分块")
    print("=" * 40)
    tfidf_chunker = TfidfSemanticChunker(max_chunk_size=300, similarity_threshold=0.2)
    chunks = tfidf_chunker.chunk(sample_text)
    tfidf_chunker.print_chunks(max_chunks=3)
    
    # 2. SentenceTransformer语义分块（如果可用）
    if SENTENCE_TRANSFORMERS_AVAILABLE:
        print("\n" + "=" * 40)
        print("2. SentenceTransformer语义分块")
        print("=" * 40)
        st_chunker = SentenceTransformerChunker(
            model_name='all-MiniLM-L6-v2',
            max_chunk_size=300,
            similarity_threshold=0.7
        )
        chunks = st_chunker.chunk(sample_text)
        st_chunker.print_chunks(max_chunks=3)
    else:
        print("\n" + "=" * 40)
        print("2. SentenceTransformer不可用")
        print("=" * 40)
        print("跳过SentenceTransformer分块演示")
    
    # 3. 聚类语义分块
    print("\n" + "=" * 40)
    print("3. 聚类语义分块")
    print("=" * 40)
    clustering_chunker = ClusteringChunker(n_clusters=3, max_chunk_size=300)
    chunks = clustering_chunker.chunk(sample_text)
    clustering_chunker.print_chunks(max_chunks=3)
    
    # 4. 主题语义分块
    print("\n" + "=" * 40)
    print("4. 主题语义分块")
    print("=" * 40)
    topic_chunker = TopicBasedChunker(max_chunk_size=300, topic_similarity_threshold=0.3)
    chunks = topic_chunker.chunk(sample_text)
    topic_chunker.print_chunks(max_chunks=3)
    
    # 保存结果
    print("\n" + "=" * 40)
    print("保存分块结果")
    print("=" * 40)
    tfidf_chunker.save_chunks("tfidf_semantic_chunks.json")
    clustering_chunker.save_chunks("clustering_chunks.json")
    topic_chunker.save_chunks("topic_chunks.json")
    
    print("\n" + "=" * 60)
    print("语义分块演示完成！")
    print("=" * 60)


# =============================================================================
# 高级语义分块方法
# =============================================================================

class SlidingWindowSemanticChunker(SemanticChunker):
    """滑动窗口语义分块器"""
    
    def __init__(self, window_size: int = 3, step_size: int = 1, 
                 max_chunk_size: int = 1000, similarity_threshold: float = 0.5):
        super().__init__()
        self.window_size = window_size
        self.step_size = step_size
        self.max_chunk_size = max_chunk_size
        self.similarity_threshold = similarity_threshold
        self.vectorizer = TfidfVectorizer(max_features=1000, ngram_range=(1, 2))
    
    def split_sentences(self, text: str) -> List[str]:
        """分割句子"""
        sentence_endings = r'[.!?。！？；;]'
        sentences = re.split(sentence_endings, text)
        
        cleaned_sentences = []
        for sentence in sentences:
            sentence = sentence.strip()
            if sentence and len(sentence) > 10:
                cleaned_sentences.append(sentence)
        
        return cleaned_sentences
    
    def compute_similarity_matrix(self, sentences: List[str]) -> np.ndarray:
        """计算句子相似度矩阵"""
        if not sentences:
            return np.array([])
        
        tfidf_matrix = self.vectorizer.fit_transform(sentences)
        similarity_matrix = cosine_similarity(tfidf_matrix)
        
        self.similarity_matrix = similarity_matrix
        return similarity_matrix
    
    def chunk(self, text: str) -> List[Chunk]:
        """滑动窗口语义分块"""
        self.clear()
        
        if not text:
            return self.chunks
        
        sentences = self.split_sentences(text)
        if len(sentences) < 2:
            return [Chunk(text=text, start_index=0, end_index=len(text), 
                         chunk_type='sliding_window')]
        
        similarity_matrix = self.compute_similarity_matrix(sentences)
        chunks = []
        current_chunk_sentences = []
        current_start = 0
        
        for i in range(len(sentences)):
            current_chunk_sentences.append(sentences[i])
            
            # 检查当前窗口内的语义连贯性
            if len(current_chunk_sentences) >= self.window_size:
                window_similarity = self._calculate_window_coherence(
                    current_chunk_sentences[-self.window_size:])
                
                # 如果语义连贯性低，或者超过最大块大小，则分割
                if (window_similarity < self.similarity_threshold or 
                    len(' '.join(current_chunk_sentences)) > self.max_chunk_size):
                    
                    # 移除最后一句，创建块
                    if len(current_chunk_sentences) > 1:
                        chunk_text = ' '.join(current_chunk_sentences[:-1])
                        chunk = Chunk(
                            text=chunk_text,
                            start_index=current_start,
                            end_index=current_start + len(chunk_text),
                            chunk_type='sliding_window',
                            metadata={
                                'window_size': self.window_size,
                                'step_size': self.step_size,
                                'coherence_score': window_similarity,
                                'num_sentences': len(current_chunk_sentences) - 1
                            }
                        )
                        chunks.append(chunk)
                        
                        # 更新位置
                        current_start += len(chunk_text) + 1
                        current_chunk_sentences = [current_chunk_sentences[-1]]
        
        # 处理剩余句子
        if current_chunk_sentences:
            chunk_text = ' '.join(current_chunk_sentences)
            chunk = Chunk(
                text=chunk_text,
                start_index=current_start,
                end_index=current_start + len(chunk_text),
                chunk_type='sliding_window',
                metadata={
                    'window_size': self.window_size,
                    'step_size': self.step_size,
                    'num_sentences': len(current_chunk_sentences)
                }
            )
            chunks.append(chunk)
        
        self.chunks = chunks
        return self.chunks
    
    def _calculate_window_coherence(self, sentences: List[str]) -> float:
        """计算窗口内的语义连贯性"""
        if len(sentences) < 2:
            return 1.0
        
        # 计算相邻句子的相似度
        similarities = []
        for i in range(len(sentences) - 1):
            sim = self.similarity_matrix[i][i + 1]
            similarities.append(sim)
        
        return np.mean(similarities) if similarities else 0.0


class BERTSemanticChunker(SemanticChunker):
    """基于BERT的语义分块器"""
    
    def __init__(self, model_name: str = 'bert-base-uncased', 
                 max_chunk_size: int = 1000, similarity_threshold: float = 0.7,
                 device: str = 'auto'):
        super().__init__()
        self.model_name = model_name
        self.max_chunk_size = max_chunk_size
        self.similarity_threshold = similarity_threshold
        self.device = device
        
        # 尝试导入BERT相关库
        try:
            import torch
            from transformers import AutoTokenizer, AutoModel
            self.tokenizer = AutoTokenizer.from_pretrained(model_name)
            self.model = AutoModel.from_pretrained(model_name)
            
            if device == 'auto':
                self.device = 'cuda' if torch.cuda.is_available() else 'cpu'
            self.model.to(self.device)
            self.model.eval()
            
            self.bert_available = True
        except ImportError:
            print("警告: 未安装transformers库，BERT分块器将使用TF-IDF作为备选")
            self.bert_available = False
            self.vectorizer = TfidfVectorizer(max_features=1000, ngram_range=(1, 2))
    
    def split_sentences(self, text: str) -> List[str]:
        """分割句子"""
        sentence_endings = r'[.!?。！？；;]'
        sentences = re.split(sentence_endings, text)
        
        cleaned_sentences = []
        for sentence in sentences:
            sentence = sentence.strip()
            if sentence and len(sentence) > 10:
                cleaned_sentences.append(sentence)
        
        return cleaned_sentences
    
    def get_bert_embeddings(self, sentences: List[str]) -> np.ndarray:
        """获取BERT嵌入向量"""
        if not self.bert_available:
            # 使用TF-IDF作为备选
            tfidf_matrix = self.vectorizer.fit_transform(sentences)
            return tfidf_matrix.toarray()
        
        embeddings = []
        
        with torch.no_grad():
            for sentence in sentences:
                # 对句子进行编码
                inputs = self.tokenizer(sentence, return_tensors='pt',
                                      truncation=True, max_length=512,
                                      padding=True)
                inputs = {k: v.to(self.device) for k, v in inputs.items()}
                
                # 获取BERT输出
                outputs = self.model(**inputs)
                
                # 使用[CLS]标记的嵌入作为句子表示
                sentence_embedding = outputs.last_hidden_state[:, 0, :].cpu().numpy()
                embeddings.append(sentence_embedding[0])
        
        return np.array(embeddings)
    
    def compute_similarity_matrix(self, sentences: List[str]) -> np.ndarray:
        """计算BERT相似度矩阵"""
        if not sentences:
            return np.array([])
        
        embeddings = self.get_bert_embeddings(sentences)
        similarity_matrix = cosine_similarity(embeddings)
        
        self.similarity_matrix = similarity_matrix
        return similarity_matrix
    
    def chunk(self, text: str) -> List[Chunk]:
        """BERT语义分块"""
        self.clear()
        
        if not text:
            return self.chunks
        
        sentences = self.split_sentences(text)
        if len(sentences) < 2:
            return [Chunk(text=text, start_index=0, end_index=len(text), 
                         chunk_type='bert')]
        
        similarity_matrix = self.compute_similarity_matrix(sentences)
        chunks = []
        current_chunk_sentences = []
        current_start = 0
        
        for i, sentence in enumerate(sentences):
            # 检查与当前块最后一个句子的相似度
            if current_chunk_sentences:
                last_idx = len(current_chunk_sentences) - 1
                current_idx = len(current_chunk_sentences)
                similarity = similarity_matrix[last_idx][current_idx]
                
                # 决定是否分割
                should_split = (
                    similarity < self.similarity_threshold or
                    len(' '.join(current_chunk_sentences + [sentence])) > self.max_chunk_size
                )
                
                if should_split and current_chunk_sentences:
                    # 创建当前块
                    chunk_text = ' '.join(current_chunk_sentences)
                    chunk = Chunk(
                        text=chunk_text,
                        start_index=current_start,
                        end_index=current_start + len(chunk_text),
                        chunk_type='bert',
                        metadata={
                            'model_name': self.model_name,
                            'similarity_threshold': self.similarity_threshold,
                            'num_sentences': len(current_chunk_sentences),
                            'bert_available': self.bert_available
                        }
                    )
                    chunks.append(chunk)
                    
                    # 更新位置
                    current_start += len(chunk_text) + 1
                    current_chunk_sentences = []
            
            current_chunk_sentences.append(sentence)
        
        # 处理剩余句子
        if current_chunk_sentences:
            chunk_text = ' '.join(current_chunk_sentences)
            chunk = Chunk(
                text=chunk_text,
                start_index=current_start,
                end_index=current_start + len(chunk_text),
                chunk_type='bert',
                metadata={
                    'model_name': self.model_name,
                    'similarity_threshold': self.similarity_threshold,
                    'num_sentences': len(current_chunk_sentences),
                    'bert_available': self.bert_available
                }
            )
            chunks.append(chunk)
        
        self.chunks = chunks
        return self.chunks


class CrossEncoderChunker(SemanticChunker):
    """基于交叉编码器的语义分块器"""
    
    def __init__(self, model_name: str = 'cross-encoder/stsb-roberta-base',
                 max_chunk_size: int = 1000, similarity_threshold: float = 0.5,
                 device: str = 'auto'):
        super().__init__()
        self.model_name = model_name
        self.max_chunk_size = max_chunk_size
        self.similarity_threshold = similarity_threshold
        self.device = device
        
        # 尝试导入交叉编码器相关库
        try:
            from sentence_transformers import CrossEncoder
            self.model = CrossEncoder(model_name)
            self.cross_encoder_available = True
        except ImportError:
            print("警告: 未安装sentence-transformers库，交叉编码器分块器将使用TF-IDF作为备选")
            self.cross_encoder_available = False
            self.vectorizer = TfidfVectorizer(max_features=1000, ngram_range=(1, 2))
    
    def split_sentences(self, text: str) -> List[str]:
        """分割句子"""
        sentence_endings = r'[.!?。！？；;]'
        sentences = re.split(sentence_endings, text)
        
        cleaned_sentences = []
        for sentence in sentences:
            sentence = sentence.strip()
            if sentence and len(sentence) > 10:
                cleaned_sentences.append(sentence)
        
        return cleaned_sentences
    
    def compute_cross_encoder_similarity(self, sentence_pairs: List[Tuple[str, str]]) -> List[float]:
        """计算交叉编码器相似度"""
        if not self.cross_encoder_available:
            # 使用TF-IDF余弦相似度作为备选
            similarities = []
            for sent1, sent2 in sentence_pairs:
                tfidf_matrix = self.vectorizer.fit_transform([sent1, sent2])
                similarity_matrix = cosine_similarity(tfidf_matrix)
                similarities.append(similarity_matrix[0][1])
            return similarities
        
        # 使用交叉编码器计算相似度
        similarities = self.model.predict(sentence_pairs)
        return similarities.tolist()
    
    def chunk(self, text: str) -> List[Chunk]:
        """交叉编码器语义分块"""
        self.clear()
        
        if not text:
            return self.chunks
        
        sentences = self.split_sentences(text)
        if len(sentences) < 2:
            return [Chunk(text=text, start_index=0, end_index=len(text), 
                         chunk_type='cross_encoder')]
        
        chunks = []
        current_chunk_sentences = []
        current_start = 0
        
        for i, sentence in enumerate(sentences):
            if current_chunk_sentences:
                # 计算与当前块最后一个句子的相似度
                last_sentence = current_chunk_sentences[-1]
                sentence_pairs = [(last_sentence, sentence)]
                similarities = self.compute_cross_encoder_similarity(sentence_pairs)
                similarity = similarities[0]
                
                # 决定是否分割
                should_split = (
                    similarity < self.similarity_threshold or
                    len(' '.join(current_chunk_sentences + [sentence])) > self.max_chunk_size
                )
                
                if should_split and current_chunk_sentences:
                    # 创建当前块
                    chunk_text = ' '.join(current_chunk_sentences)
                    chunk = Chunk(
                        text=chunk_text,
                        start_index=current_start,
                        end_index=current_start + len(chunk_text),
                        chunk_type='cross_encoder',
                        metadata={
                            'model_name': self.model_name,
                            'similarity_threshold': self.similarity_threshold,
                            'num_sentences': len(current_chunk_sentences),
                            'cross_encoder_available': self.cross_encoder_available,
                            'last_similarity': similarity
                        }
                    )
                    chunks.append(chunk)
                    
                    # 更新位置
                    current_start += len(chunk_text) + 1
                    current_chunk_sentences = []
            
            current_chunk_sentences.append(sentence)
        
        # 处理剩余句子
        if current_chunk_sentences:
            chunk_text = ' '.join(current_chunk_sentences)
            chunk = Chunk(
                text=chunk_text,
                start_index=current_start,
                end_index=current_start + len(chunk_text),
                chunk_type='cross_encoder',
                metadata={
                    'model_name': self.model_name,
                    'similarity_threshold': self.similarity_threshold,
                    'num_sentences': len(current_chunk_sentences),
                    'cross_encoder_available': self.cross_encoder_available
                }
            )
            chunks.append(chunk)
        
        self.chunks = chunks
        return self.chunks


class GraphBasedChunker(SemanticChunker):
    """基于图的语义分块器"""
    
    def __init__(self, max_chunk_size: int = 1000, community_threshold: float = 0.3,
                 min_community_size: int = 2):
        super().__init__()
        self.max_chunk_size = max_chunk_size
        self.community_threshold = community_threshold
        self.min_community_size = min_community_size
        self.vectorizer = TfidfVectorizer(max_features=1000, ngram_range=(1, 2))
        
        # 尝试导入图分析库
        try:
            import networkx as nx
            from networkx.algorithms import community
            self.networkx_available = True
        except ImportError:
            print("警告: 未安装networkx库，图分块器将使用简单聚类作为备选")
            self.networkx_available = False
    
    def split_sentences(self, text: str) -> List[str]:
        """分割句子"""
        sentence_endings = r'[.!?。！？；;]'
        sentences = re.split(sentence_endings, text)
        
        cleaned_sentences = []
        for sentence in sentences:
            sentence = sentence.strip()
            if sentence and len(sentence) > 10:
                cleaned_sentences.append(sentence)
        
        return cleaned_sentences
    
    def build_similarity_graph(self, sentences: List[str]) -> Any:
        """构建相似度图"""
        if not sentences:
            return None
        
        # 计算TF-IDF相似度矩阵
        tfidf_matrix = self.vectorizer.fit_transform(sentences)
        similarity_matrix = cosine_similarity(tfidf_matrix)
        
        if not self.networkx_available:
            # 返回相似度矩阵用于简单聚类
            return similarity_matrix
        
        # 构建图
        import networkx as nx
        G = nx.Graph()
        
        # 添加节点
        for i, sentence in enumerate(sentences):
            G.add_node(i, text=sentence)
        
        # 添加边（基于相似度阈值）
        for i in range(len(sentences)):
            for j in range(i + 1, len(sentences)):
                similarity = similarity_matrix[i][j]
                if similarity > self.community_threshold:
                    G.add_edge(i, j, weight=similarity)
        
        return G
    
    def detect_communities(self, graph: Any) -> List[List[int]]:
        """检测社区（句子组）"""
        if not self.networkx_available:
            # 使用简单的层次聚类
            from sklearn.cluster import AgglomerativeClustering
            clustering = AgglomerativeClustering(
                n_clusters=None, distance_threshold=1 - self.community_threshold,
                linkage='average'
            )
            labels = clustering.fit_predict(1 - graph)  # 转换为距离
            
            communities = {}
            for i, label in enumerate(labels):
                if label not in communities:
                    communities[label] = []
                communities[label].append(i)
            
            return [comm for comm in communities.values() if len(comm) >= self.min_community_size]
        
        import networkx as nx
        from networkx.algorithms import community
        
        # 使用Louvain社区检测算法
        communities = community.louvain_communities(graph)
        
        # 过滤小社区
        filtered_communities = []
        for comm in communities:
            if len(comm) >= self.min_community_size:
                filtered_communities.append(list(comm))
        
        return filtered_communities
    
    def chunk(self, text: str) -> List[Chunk]:
        """基于图的语义分块"""
        self.clear()
        
        if not text:
            return self.chunks
        
        sentences = self.split_sentences(text)
        if len(sentences) < 2:
            return [Chunk(text=text, start_index=0, end_index=len(text), 
                         chunk_type='graph_based')]
        
        # 构建相似度图
        graph = self.build_similarity_graph(sentences)
        
        # 检测社区
        communities = self.detect_communities(graph)
        
        # 将社区转换为块
        chunks = []
        current_start = 0
        
        for community in communities:
            # 按原始顺序排列句子
            community_sentences = [sentences[i] for i in sorted(community)]
            chunk_text = ' '.join(community_sentences)
            
            # 检查块大小
            if len(chunk_text) > self.max_chunk_size:
                # 如果太大，进一步分割
                sub_chunks = self._split_large_chunk(chunk_text, current_start)
                chunks.extend(sub_chunks)
            else:
                chunk = Chunk(
                    text=chunk_text,
                    start_index=current_start,
                    end_index=current_start + len(chunk_text),
                    chunk_type='graph_based',
                    metadata={
                        'community_size': len(community),
                        'community_threshold': self.community_threshold,
                        'num_sentences': len(community_sentences),
                        'networkx_available': self.networkx_available
                    }
                )
                chunks.append(chunk)
            
            current_start += len(chunk_text) + 1
        
        # 处理未被分配到任何社区的句子
        assigned_indices = set()
        for community in communities:
            assigned_indices.update(community)
        
        unassigned_indices = [i for i in range(len(sentences)) if i not in assigned_indices]
        if unassigned_indices:
            unassigned_sentences = [sentences[i] for i in unassigned_indices]
            chunk_text = ' '.join(unassigned_sentences)
            
            chunk = Chunk(
                text=chunk_text,
                start_index=current_start,
                end_index=current_start + len(chunk_text),
                chunk_type='graph_based',
                metadata={
                    'unassigned_sentences': True,
                    'num_sentences': len(unassigned_sentences)
                }
            )
            chunks.append(chunk)
        
        self.chunks = chunks
        return self.chunks
    
    def _split_large_chunk(self, chunk_text: str, start_pos: int) -> List[Chunk]:
        """分割大块"""
        words = chunk_text.split()
        sub_chunks = []
        current_chunk = []
        current_length = 0
        current_start = start_pos
        
        for word in words:
            if current_length + len(word) + 1 > self.max_chunk_size:
                if current_chunk:
                    sub_chunk_text = ' '.join(current_chunk)
                    chunk = Chunk(
                        text=sub_chunk_text,
                        start_index=current_start,
                        end_index=current_start + len(sub_chunk_text),
                        chunk_type='graph_based',
                        metadata={'is_sub_chunk': True}
                    )
                    sub_chunks.append(chunk)
                    current_chunk = []
                    current_start += len(sub_chunk_text) + 1
                    current_length = 0
            
            current_chunk.append(word)
            current_length += len(word) + 1
        
        if current_chunk:
            sub_chunk_text = ' '.join(current_chunk)
            chunk = Chunk(
                text=sub_chunk_text,
                start_index=current_start,
                end_index=current_start + len(sub_chunk_text),
                chunk_type='graph_based',
                metadata={'is_sub_chunk': True}
            )
            sub_chunks.append(chunk)
        
        return sub_chunks


if __name__ == "__main__":
    main()