"""
分块评估和对比演示
演示如何评估和对比不同分块方法的效果
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import time
import json
from typing import List, Dict, Any, Optional, Tuple, Callable
from dataclasses import dataclass, asdict
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.feature_extraction.text import TfidfVectorizer
import re
from collections import Counter

# 导入所有分块器
try:
    from basic_chunking import (
        FixedLengthChunker, SentenceChunker, ParagraphChunker, 
        WordChunker, LineChunker, RecursiveChunker
    )
except (ImportError, SyntaxError):
    from import_utils import safe_import
    (FixedLengthChunker, SentenceChunker, ParagraphChunker, 
     WordChunker, LineChunker, RecursiveChunker) = safe_import(
        ".01_basic_chunking", package="chunk_demo", 
        from_list=["FixedLengthChunker", "SentenceChunker", "ParagraphChunker", 
                  "WordChunker", "LineChunker", "RecursiveChunker"]
    )

try:
    from semantic_chunking import (
        TfidfSemanticChunker, SentenceTransformerChunker, 
        ClusteringChunker, TopicBasedChunker, SlidingWindowSemanticChunker,
        BERTSemanticChunker, CrossEncoderChunker, GraphBasedChunker
    )
except (ImportError, SyntaxError):
    from import_utils import safe_import
    (TfidfSemanticChunker, SentenceTransformerChunker, 
     ClusteringChunker, TopicBasedChunker, SlidingWindowSemanticChunker,
     BERTSemanticChunker, CrossEncoderChunker, GraphBasedChunker) = safe_import(
        ".02_semantic_chunking", package="chunk_demo",
        from_list=["TfidfSemanticChunker", "SentenceTransformerChunker", 
                  "ClusteringChunker", "TopicBasedChunker", "SlidingWindowSemanticChunker",
                  "BERTSemanticChunker", "CrossEncoderChunker", "GraphBasedChunker"]
    )

try:
    from document_structure_chunking import (
        MarkdownChunker, HTMLChunker, CodeChunker, StructuredTextChunker
    )
except (ImportError, SyntaxError):
    from import_utils import safe_import
    (MarkdownChunker, HTMLChunker, CodeChunker, StructuredTextChunker) = safe_import(
        ".03_document_structure_chunking", package="chunk_demo",
        from_list=["MarkdownChunker", "HTMLChunker", "CodeChunker", "StructuredTextChunker"]
    )

try:
    from adaptive_chunking import (
        DensityBasedChunker, SemanticFlowChunker, HybridAdaptiveChunker,
        ReinforcementLearningChunker, NeuralNetworkChunker, StreamingChunker
    )
except (ImportError, SyntaxError):
    from import_utils import safe_import
    (DensityBasedChunker, SemanticFlowChunker, HybridAdaptiveChunker,
     ReinforcementLearningChunker, NeuralNetworkChunker, StreamingChunker) = safe_import(
        ".04_adaptive_chunking", package="chunk_demo",
        from_list=["DensityBasedChunker", "SemanticFlowChunker", "HybridAdaptiveChunker",
                  "ReinforcementLearningChunker", "NeuralNetworkChunker", "StreamingChunker"]
    )

try:
    from special_format_chunking import (
        TableChunker, JSONChunker, XMLChunker, LogChunker, CodeDocumentationChunker,
        YAMLChunker, LaTeXChunker, PDFChunker, DOCXChunker, EmailChunker
    )
except (ImportError, SyntaxError):
    from import_utils import safe_import
    (TableChunker, JSONChunker, XMLChunker, LogChunker, CodeDocumentationChunker,
     YAMLChunker, LaTeXChunker, PDFChunker, DOCXChunker, EmailChunker) = safe_import(
        ".05_special_format_chunking", package="chunk_demo",
        from_list=["TableChunker", "JSONChunker", "XMLChunker", "LogChunker", "CodeDocumentationChunker",
                  "YAMLChunker", "LaTeXChunker", "PDFChunker", "DOCXChunker", "EmailChunker"]
    )

try:
    from basic_chunking import Chunk
except (ImportError, SyntaxError):
    from import_utils import safe_import
    Chunk = safe_import(".01_basic_chunking", package="chunk_demo", from_list=["Chunk"])[0]


@dataclass
class ChunkMetrics:
    """分块指标数据类"""
    chunker_name: str
    total_chunks: int
    avg_chunk_length: float
    std_chunk_length: float
    min_chunk_length: int
    max_chunk_length: int
    chunking_time: float
    memory_usage_mb: float
    semantic_coherence: float
    boundary_quality: float
    content_completeness: float
    overlap_ratio: float
    metadata_score: float
    
    def to_dict(self) -> Dict[str, Any]:
        """转换为字典"""
        return asdict(self)


class ChunkEvaluator:
    """分块评估器"""
    
    def __init__(self, reference_text: str = None):
        self.reference_text = reference_text
        self.evaluation_results = []
        
    def evaluate_chunker(self, chunker, text: str, chunker_name: str) -> ChunkMetrics:
        """评估分块器"""
        print(f"评估分块器: {chunker_name}")
        
        # 记录开始时间和内存
        start_time = time.time()
        start_memory = self._get_memory_usage()
        
        # 执行分块
        chunks = chunker.chunk(text)
        
        # 记录结束时间和内存
        end_time = time.time()
        end_memory = self._get_memory_usage()
        
        # 计算基本指标
        chunk_lengths = [len(chunk.text) for chunk in chunks]
        total_chunks = len(chunks)
        avg_chunk_length = np.mean(chunk_lengths) if chunk_lengths else 0
        std_chunk_length = np.std(chunk_lengths) if chunk_lengths else 0
        min_chunk_length = min(chunk_lengths) if chunk_lengths else 0
        max_chunk_length = max(chunk_lengths) if chunk_lengths else 0
        chunking_time = end_time - start_time
        memory_usage = end_memory - start_memory
        
        # 计算高级指标
        semantic_coherence = self._calculate_semantic_coherence(chunks)
        boundary_quality = self._calculate_boundary_quality(chunks, text)
        content_completeness = self._calculate_content_completeness(chunks, text)
        overlap_ratio = self._calculate_overlap_ratio(chunks)
        metadata_score = self._calculate_metadata_score(chunks)
        
        metrics = ChunkMetrics(
            chunker_name=chunker_name,
            total_chunks=total_chunks,
            avg_chunk_length=avg_chunk_length,
            std_chunk_length=std_chunk_length,
            min_chunk_length=min_chunk_length,
            max_chunk_length=max_chunk_length,
            chunking_time=chunking_time,
            memory_usage_mb=memory_usage,
            semantic_coherence=semantic_coherence,
            boundary_quality=boundary_quality,
            content_completeness=content_completeness,
            overlap_ratio=overlap_ratio,
            metadata_score=metadata_score
        )
        
        self.evaluation_results.append(metrics)
        return metrics
    
    def _get_memory_usage(self) -> float:
        """获取内存使用（简化版）"""
        try:
            import psutil
            process = psutil.Process()
            return process.memory_info().rss / (1024 * 1024)
        except:
            return 0.0
    
    def _calculate_semantic_coherence(self, chunks: List[Chunk]) -> float:
        """计算语义连贯性"""
        if len(chunks) < 2:
            return 1.0
        
        try:
            # 使用TF-IDF计算语义相似度
            texts = [chunk.text for chunk in chunks]
            vectorizer = TfidfVectorizer(max_features=100, stop_words='english')
            tfidf_matrix = vectorizer.fit_transform(texts)
            
            # 计算相邻块的相似度
            similarities = []
            for i in range(len(texts) - 1):
                sim = cosine_similarity(tfidf_matrix[i:i+1], tfidf_matrix[i+1:i+2])[0][0]
                similarities.append(sim)
            
            return np.mean(similarities) if similarities else 0.0
        except:
            return 0.0
    
    def _calculate_boundary_quality(self, chunks: List[Chunk], original_text: str) -> float:
        """计算边界质量"""
        if not chunks:
            return 0.0
        
        # 检查分块边界是否在自然断点处
        boundary_scores = []
        
        for chunk in chunks:
            # 检查块的开头和结尾
            start_pos = chunk.start_index
            end_pos = chunk.end_index
            
            # 检查是否在句子边界
            start_score = self._is_sentence_boundary(original_text, start_pos)
            end_score = self._is_sentence_boundary(original_text, end_pos)
            
            boundary_scores.append((start_score + end_score) / 2)
        
        return np.mean(boundary_scores) if boundary_scores else 0.0
    
    def _is_sentence_boundary(self, text: str, position: int) -> float:
        """检查是否为句子边界"""
        if position <= 0 or position >= len(text):
            return 0.0
        
        # 检查周围的字符
        before_char = text[position - 1]
        after_char = text[position] if position < len(text) else ''
        
        # 句子结束标记
        sentence_endings = '.!?。！？'
        
        # 如果前面是句子结束标记，后面是空格或新行
        if before_char in sentence_endings and after_char in ' \n\t':
            return 1.0
        
        # 如果前面是空格或新行，后面是大写字母
        if before_char in ' \n\t' and after_char.isupper():
            return 0.8
        
        return 0.0
    
    def _calculate_content_completeness(self, chunks: List[Chunk], original_text: str) -> float:
        """计算内容完整性"""
        if not chunks:
            return 0.0
        
        # 计算覆盖的文本比例
        covered_text = ''.join(chunk.text for chunk in chunks)
        original_length = len(original_text)
        covered_length = len(covered_text)
        
        # 计算重叠和缺失
        coverage_ratio = covered_length / original_length if original_length > 0 else 0
        
        # 检查是否有重要的结构信息丢失
        structure_preservation = self._check_structure_preservation(chunks, original_text)
        
        return (coverage_ratio + structure_preservation) / 2
    
    def _check_structure_preservation(self, chunks: List[Chunk], original_text: str) -> float:
        """检查结构保留情况"""
        structure_markers = [
            r'^#{1,6}\s+',  # Markdown标题
            r'^\s*[-*+]\s+',  # 列表
            r'```',  # 代码块
            r'\|.*\|',  # 表格
        ]
        
        original_structures = 0
        preserved_structures = 0
        
        for pattern in structure_markers:
            original_matches = len(re.findall(pattern, original_text, re.MULTILINE))
            original_structures += original_matches
            
            # 检查分块中是否保留了这些结构
            for chunk in chunks:
                chunk_matches = len(re.findall(pattern, chunk.text, re.MULTILINE))
                preserved_structures += chunk_matches
        
        return preserved_structures / original_structures if original_structures > 0 else 1.0
    
    def _calculate_overlap_ratio(self, chunks: List[Chunk]) -> float:
        """计算重叠比例"""
        if len(chunks) < 2:
            return 0.0
        
        total_overlap = 0
        total_length = 0
        
        for i in range(len(chunks) - 1):
            current_chunk = chunks[i]
            next_chunk = chunks[i + 1]
            
            # 计算重叠区域
            overlap_start = max(current_chunk.start_index, next_chunk.start_index)
            overlap_end = min(current_chunk.end_index, next_chunk.end_index)
            
            if overlap_end > overlap_start:
                overlap_length = overlap_end - overlap_start
                total_overlap += overlap_length
            
            total_length += len(current_chunk.text)
        
        total_length += len(chunks[-1].text)
        
        return total_overlap / total_length if total_length > 0 else 0.0
    
    def _calculate_metadata_score(self, chunks: List[Chunk]) -> float:
        """计算元数据质量分数"""
        if not chunks:
            return 0.0
        
        metadata_scores = []
        
        for chunk in chunks:
            score = 0.0
            
            # 检查元数据完整性
            if chunk.metadata:
                score += 0.3
            
            # 检查是否有有用的元数据字段
            useful_fields = ['chunk_type', 'sentence_count', 'paragraph_count', 
                           'header_level', 'function_name', 'section_title']
            for field in useful_fields:
                if field in chunk.metadata:
                    score += 0.1
            
            metadata_scores.append(min(score, 1.0))
        
        return np.mean(metadata_scores) if metadata_scores else 0.0
    
    def print_evaluation_results(self):
        """打印评估结果"""
        if not self.evaluation_results:
            print("没有评估结果")
            return
        
        print("\n" + "=" * 80)
        print("分块器评估结果")
        print("=" * 80)
        
        # 转换为DataFrame
        df = pd.DataFrame([result.to_dict() for result in self.evaluation_results])
        
        # 选择重要列显示
        important_cols = ['chunker_name', 'total_chunks', 'avg_chunk_length', 
                         'chunking_time', 'semantic_coherence', 'boundary_quality']
        
        display_df = df[important_cols].copy()
        
        # 格式化数值
        display_df['avg_chunk_length'] = display_df['avg_chunk_length'].apply(lambda x: f"{x:.1f}")
        display_df['chunking_time'] = display_df['chunking_time'].apply(lambda x: f"{x:.4f}s")
        display_df['semantic_coherence'] = display_df['semantic_coherence'].apply(lambda x: f"{x:.3f}")
        display_df['boundary_quality'] = display_df['boundary_quality'].apply(lambda x: f"{x:.3f}")
        
        print(display_df.to_string(index=False))
        
        # 分析最佳分块器
        print("\n" + "=" * 60)
        print("最佳分块器分析")
        print("=" * 60)
        
        # 不同指标的最佳分块器
        best_speed = df.loc[df['chunking_time'].idxmin()]
        best_coherence = df.loc[df['semantic_coherence'].idxmax()]
        best_boundary = df.loc[df['boundary_quality'].idxmax()]
        best_completeness = df.loc[df['content_completeness'].idxmax()]
        
        print(f"⚡ 最快分块: {best_speed['chunker_name']} ({best_speed['chunking_time']:.4f}s)")
        print(f"🔗 语义连贯性最佳: {best_coherence['chunker_name']} ({best_coherence['semantic_coherence']:.3f})")
        print(f"📐 边界质量最佳: {best_boundary['chunker_name']} ({best_boundary['boundary_quality']:.3f})")
        print(f"✅ 内容完整性最佳: {best_completeness['chunker_name']} ({best_completeness['content_completeness']:.3f})")
    
    def save_results(self, filepath: str):
        """保存评估结果"""
        results_data = [result.to_dict() for result in self.evaluation_results]
        
        with open(filepath, 'w', encoding='utf-8') as f:
            json.dump(results_data, f, ensure_ascii=False, indent=2)
        
        print(f"评估结果已保存到: {filepath}")
    
    def create_visualization(self, output_dir: str = "evaluation_plots"):
        """创建可视化图表"""
        import os
        os.makedirs(output_dir, exist_ok=True)
        
        if not self.evaluation_results:
            print("没有评估结果，无法创建可视化")
            return
        
        df = pd.DataFrame([result.to_dict() for result in self.evaluation_results])
        
        # 1. 分块性能对比图
        plt.figure(figsize=(15, 10))
        
        # 子图1：分块时间对比
        plt.subplot(2, 3, 1)
        plt.bar(df['chunker_name'], df['chunking_time'])
        plt.title('分块时间对比')
        plt.ylabel('时间 (秒)')
        plt.xticks(rotation=45, ha='right')
        
        # 子图2：平均块长度对比
        plt.subplot(2, 3, 2)
        plt.bar(df['chunker_name'], df['avg_chunk_length'])
        plt.title('平均块长度对比')
        plt.ylabel('字符数')
        plt.xticks(rotation=45, ha='right')
        
        # 子图3：语义连贯性对比
        plt.subplot(2, 3, 3)
        plt.bar(df['chunker_name'], df['semantic_coherence'])
        plt.title('语义连贯性对比')
        plt.ylabel('连贯性分数')
        plt.xticks(rotation=45, ha='right')
        
        # 子图4：边界质量对比
        plt.subplot(2, 3, 4)
        plt.bar(df['chunker_name'], df['boundary_quality'])
        plt.title('边界质量对比')
        plt.ylabel('质量分数')
        plt.xticks(rotation=45, ha='right')
        
        # 子图5：内存使用对比
        plt.subplot(2, 3, 5)
        plt.bar(df['chunker_name'], df['memory_usage_mb'])
        plt.title('内存使用对比')
        plt.ylabel('内存 (MB)')
        plt.xticks(rotation=45, ha='right')
        
        # 子图6：综合评分
        plt.subplot(2, 3, 6)
        # 计算综合评分
        df['overall_score'] = (
            df['semantic_coherence'] * 0.3 +
            df['boundary_quality'] * 0.25 +
            df['content_completeness'] * 0.25 +
            df['metadata_score'] * 0.1 +
            (1 - df['chunking_time'] / df['chunking_time'].max()) * 0.1
        )
        plt.bar(df['chunker_name'], df['overall_score'])
        plt.title('综合评分对比')
        plt.ylabel('综合分数')
        plt.xticks(rotation=45, ha='right')
        
        plt.tight_layout()
        plt.savefig(os.path.join(output_dir, 'chunker_comparison.png'), dpi=300, bbox_inches='tight')
        plt.show()
        
        # 2. 雷达图
        plt.figure(figsize=(12, 8))
        
        # 选择几个关键指标
        metrics = ['semantic_coherence', 'boundary_quality', 'content_completeness', 
                  'metadata_score', 'overall_score']
        
        # 归一化数据
        normalized_data = df[metrics].copy()
        for col in metrics:
            normalized_data[col] = normalized_data[col] / normalized_data[col].max()
        
        # 创建雷达图
        angles = np.linspace(0, 2 * np.pi, len(metrics), endpoint=False).tolist()
        angles += angles[:1]  # 闭合图形
        
        fig, ax = plt.subplots(figsize=(10, 8), subplot_kw=dict(projection='polar'))
        
        for i, (_, row) in enumerate(normalized_data.iterrows()):
            values = row[metrics].tolist()
            values += values[:1]  # 闭合图形
            
            ax.plot(angles, values, 'o-', linewidth=2, label=df['chunker_name'][i])
            ax.fill(angles, values, alpha=0.25)
        
        ax.set_xticks(angles[:-1])
        ax.set_xticklabels(['语义连贯性', '边界质量', '内容完整性', '元数据质量', '综合评分'])
        ax.set_ylim(0, 1)
        ax.set_title('分块器性能雷达图', size=16, weight='bold', pad=20)
        ax.legend(loc='upper right', bbox_to_anchor=(1.2, 1.0))
        
        plt.tight_layout()
        plt.savefig(os.path.join(output_dir, 'chunker_radar.png'), dpi=300, bbox_inches='tight')
        plt.show()
        
        # 3. 相关性热力图
        plt.figure(figsize=(12, 10))
        
        correlation_data = df[[
            'total_chunks', 'avg_chunk_length', 'chunking_time', 'memory_usage_mb',
            'semantic_coherence', 'boundary_quality', 'content_completeness',
            'overlap_ratio', 'metadata_score'
        ]]
        
        correlation_matrix = correlation_data.corr()
        
        sns.heatmap(correlation_matrix, annot=True, cmap='coolwarm', center=0,
                   square=True, linewidths=0.5)
        plt.title('分块指标相关性热力图', size=16, weight='bold')
        plt.tight_layout()
        plt.savefig(os.path.join(output_dir, 'correlation_heatmap.png'), dpi=300, bbox_inches='tight')
        plt.show()
        
        print(f"可视化图表已保存到: {output_dir}/")


class ComprehensiveComparison:
    """综合对比测试"""
    
    def __init__(self, sample_text: str):
        self.sample_text = sample_text
        self.evaluator = ChunkEvaluator()
        self.chunkers = self._create_chunkers()
    
    def _create_chunkers(self) -> List[Tuple[Any, str]]:
        """创建所有分块器"""
        chunkers = [
            # 基础分块器
            (FixedLengthChunker(chunk_size=500, overlap=100), "FixedLength(500,100)"),
            (SentenceChunker(max_sentences=3), "Sentence(3)"),
            (ParagraphChunker(), "Paragraph"),
            (WordChunker(chunk_size=100, overlap=20), "Word(100,20)"),
            (RecursiveChunker(chunk_size=500), "Recursive(500)"),
            
            # 语义分块器
            (TfidfSemanticChunker(max_chunk_size=500), "TFIDF_Semantic"),
            (ClusteringChunker(n_clusters=3, max_chunk_size=500), "Clustering(3)"),
            (TopicBasedChunker(max_chunk_size=500), "Topic_Based"),
            
            # 文档结构分块器
            (MarkdownChunker(max_chunk_size=500), "Markdown"),
            (StructuredTextChunker(max_chunk_size=500), "Structured_Text"),
            
            # 自适应分块器
            (DensityBasedChunker(base_chunk_size=500), "Density_Based"),
            (SemanticFlowChunker(base_chunk_size=500), "Semantic_Flow"),
            (HybridAdaptiveChunker(base_chunk_size=500), "Hybrid_Adaptive"),
        ]
        
        return chunkers
    
    def run_comparison(self):
        """运行综合对比"""
        print("=" * 80)
        print("分块器综合对比测试")
        print("=" * 80)
        
        for chunker, name in self.chunkers:
            try:
                metrics = self.evaluator.evaluate_chunker(chunker, self.sample_text, name)
                print(f"✓ {name}: {metrics.total_chunks} 块, 平均长度 {metrics.avg_chunk_length:.1f}")
            except Exception as e:
                print(f"✗ {name}: 错误 - {str(e)}")
        
        # 打印结果
        self.evaluator.print_evaluation_results()
        
        # 创建可视化
        self.evaluator.create_visualization()
        
        # 保存结果
        self.evaluator.save_results("chunker_evaluation_results.json")
    
    def get_recommendations(self) -> Dict[str, str]:
        """获取分块器推荐"""
        if not self.evaluator.evaluation_results:
            return {}
        
        df = pd.DataFrame([result.to_dict() for result in self.evaluator.evaluation_results])
        
        recommendations = {}
        
        # 基于不同需求推荐
        recommendations['speed'] = df.loc[df['chunking_time'].idxmin(), 'chunker_name']
        recommendations['quality'] = df.loc[df['semantic_coherence'].idxmax(), 'chunker_name']
        recommendations['consistency'] = df.loc[df['std_chunk_length'].idxmin(), 'chunker_name']
        recommendations['structure'] = df.loc[df['boundary_quality'].idxmax(), 'chunker_name']
        
        # 综合最佳
        df['overall_score'] = (
            df['semantic_coherence'] * 0.3 +
            df['boundary_quality'] * 0.25 +
            df['content_completeness'] * 0.25 +
            df['metadata_score'] * 0.1 +
            (1 - df['chunking_time'] / df['chunking_time'].max()) * 0.1
        )
        recommendations['overall'] = df.loc[df['overall_score'].idxmax(), 'chunker_name']
        
        return recommendations


class GroundTruthEvaluator:
    """基于真实标签的分块评估器"""
    
    def __init__(self, ground_truth_chunks: List[Dict[str, Any]]):
        """
        初始化真实标签评估器
        
        Args:
            ground_truth_chunks: 真实分块结果列表，每个元素包含
                - text: 分块文本
                - start_index: 起始位置
                - end_index: 结束位置
                - metadata: 元数据
        """
        self.ground_truth_chunks = ground_truth_chunks
        self.evaluation_history = []
    
    def evaluate_chunker(self, predicted_chunks: List[Chunk], chunker_name: str) -> Dict[str, float]:
        """
        评估分块器与真实标签的匹配度
        
        Args:
            predicted_chunks: 预测的分块结果
            chunker_name: 分块器名称
            
        Returns:
            评估指标字典
        """
        print(f"评估分块器 (真实标签): {chunker_name}")
        
        # 转换真实标签为统一格式
        truth_chunks = [
            {
                'text': gt['text'],
                'start': gt['start_index'],
                'end': gt['end_index'],
                'metadata': gt.get('metadata', {})
            }
            for gt in self.ground_truth_chunks
        ]
        
        # 计算边界准确率
        boundary_accuracy = self._calculate_boundary_accuracy(predicted_chunks, truth_chunks)
        
        # 计算内容匹配度
        content_match = self._calculate_content_match(predicted_chunks, truth_chunks)
        
        # 计算F1分数
        f1_score = self._calculate_f1_score(predicted_chunks, truth_chunks)
        
        # 计算编辑距离
        edit_distance = self._calculate_edit_distance(predicted_chunks, truth_chunks)
        
        # 计算结构相似度
        structure_similarity = self._calculate_structure_similarity(predicted_chunks, truth_chunks)
        
        # 计算元数据匹配度
        metadata_match = self._calculate_metadata_match(predicted_chunks, truth_chunks)
        
        # 综合评分
        overall_score = (
            boundary_accuracy * 0.25 +
            content_match * 0.25 +
            f1_score * 0.20 +
            structure_similarity * 0.15 +
            metadata_match * 0.15
        )
        
        metrics = {
            'chunker_name': chunker_name,
            'boundary_accuracy': boundary_accuracy,
            'content_match': content_match,
            'f1_score': f1_score,
            'edit_distance': edit_distance,
            'structure_similarity': structure_similarity,
            'metadata_match': metadata_match,
            'overall_score': overall_score
        }
        
        self.evaluation_history.append(metrics)
        return metrics
    
    def _calculate_boundary_accuracy(self, predicted: List[Chunk], truth: List[Dict]) -> float:
        """计算边界准确率"""
        if not predicted or not truth:
            return 0.0
        
        # 获取所有边界位置
        predicted_boundaries = set()
        truth_boundaries = set()
        
        for chunk in predicted:
            predicted_boundaries.add(chunk.start_index)
            predicted_boundaries.add(chunk.end_index)
        
        for chunk in truth:
            predicted_boundaries.add(chunk['start'])
            predicted_boundaries.add(chunk['end'])
        
        # 计算边界匹配度（允许一定的容差）
        tolerance = max(10, len(''.join(c.text for c in predicted)) // 1000)
        matched_boundaries = 0
        
        for p_boundary in predicted_boundaries:
            for t_boundary in truth_boundaries:
                if abs(p_boundary - t_boundary) <= tolerance:
                    matched_boundaries += 1
                    break
        
        return matched_boundaries / max(len(predicted_boundaries), len(truth_boundaries))
    
    def _calculate_content_match(self, predicted: List[Chunk], truth: List[Dict]) -> float:
        """计算内容匹配度"""
        if not predicted or not truth:
            return 0.0
        
        # 计算文本覆盖率
        predicted_text = ''.join(chunk.text for chunk in predicted)
        truth_text = ''.join(chunk['text'] for chunk in truth)
        
        # 使用最长公共子序列计算相似度
        lcs_length = self._longest_common_subsequence(predicted_text, truth_text)
        max_length = max(len(predicted_text), len(truth_text))
        
        return lcs_length / max_length if max_length > 0 else 0.0
    
    def _longest_common_subsequence(self, text1: str, text2: str) -> int:
        """计算最长公共子序列长度"""
        m, n = len(text1), len(text2)
        dp = [[0] * (n + 1) for _ in range(m + 1)]
        
        for i in range(1, m + 1):
            for j in range(1, n + 1):
                if text1[i-1] == text2[j-1]:
                    dp[i][j] = dp[i-1][j-1] + 1
                else:
                    dp[i][j] = max(dp[i-1][j], dp[i][j-1])
        
        return dp[m][n]
    
    def _calculate_f1_score(self, predicted: List[Chunk], truth: List[Dict]) -> float:
        """计算F1分数"""
        if not predicted or not truth:
            return 0.0
        
        # 将分块转换为边界集合
        predicted_boundaries = set()
        truth_boundaries = set()
        
        for chunk in predicted:
            predicted_boundaries.add(chunk.start_index)
            predicted_boundaries.add(chunk.end_index)
        
        for chunk in truth:
            truth_boundaries.add(chunk['start'])
            truth_boundaries.add(chunk['end'])
        
        # 计算精确率和召回率
        true_positives = len(predicted_boundaries & truth_boundaries)
        false_positives = len(predicted_boundaries - truth_boundaries)
        false_negatives = len(truth_boundaries - predicted_boundaries)
        
        precision = true_positives / (true_positives + false_positives) if (true_positives + false_positives) > 0 else 0
        recall = true_positives / (true_positives + false_negatives) if (true_positives + false_negatives) > 0 else 0
        
        # 计算F1分数
        f1 = 2 * precision * recall / (precision + recall) if (precision + recall) > 0 else 0
        
        return f1
    
    def _calculate_edit_distance(self, predicted: List[Chunk], truth: List[Dict]) -> float:
        """计算编辑距离（归一化）"""
        if not predicted or not truth:
            return 0.0
        
        # 将分块序列转换为字符串表示
        pred_sequence = '|'.join([f"[{len(chunk.text)}]" for chunk in predicted])
        truth_sequence = '|'.join([f"[{len(chunk['text'])}]" for chunk in truth])
        
        # 计算编辑距离
        distance = self._levenshtein_distance(pred_sequence, truth_sequence)
        max_length = max(len(pred_sequence), len(truth_sequence))
        
        return 1.0 - (distance / max_length) if max_length > 0 else 0.0
    
    def _levenshtein_distance(self, s1: str, s2: str) -> int:
        """计算Levenshtein编辑距离"""
        if len(s1) < len(s2):
            return self._levenshtein_distance(s2, s1)
        
        if len(s2) == 0:
            return len(s1)
        
        previous_row = range(len(s2) + 1)
        for i, c1 in enumerate(s1):
            current_row = [i + 1]
            for j, c2 in enumerate(s2):
                insertions = previous_row[j + 1] + 1
                deletions = current_row[j] + 1
                substitutions = previous_row[j] + (c1 != c2)
                current_row.append(min(insertions, deletions, substitutions))
            previous_row = current_row
        
        return previous_row[-1]
    
    def _calculate_structure_similarity(self, predicted: List[Chunk], truth: List[Dict]) -> float:
        """计算结构相似度"""
        if not predicted or not truth:
            return 0.0
        
        # 比较分块数量的相似度
        count_similarity = 1.0 - abs(len(predicted) - len(truth)) / max(len(predicted), len(truth))
        
        # 比较长度分布的相似度
        pred_lengths = [len(chunk.text) for chunk in predicted]
        truth_lengths = [len(chunk['text']) for chunk in truth]
        
        # 计算长度分布的KL散度
        length_similarity = self._calculate_distribution_similarity(pred_lengths, truth_lengths)
        
        return (count_similarity + length_similarity) / 2
    
    def _calculate_distribution_similarity(self, list1: List[int], list2: List[int]) -> float:
        """计算分布相似度"""
        if not list1 or not list2:
            return 0.0
        
        # 创建长度分布直方图
        bins = np.linspace(0, max(max(list1), max(list2)), 20)
        hist1, _ = np.histogram(list1, bins=bins)
        hist2, _ = np.histogram(list2, bins=bins)
        
        # 归一化
        hist1 = hist1 / hist1.sum()
        hist2 = hist2 / hist2.sum()
        
        # 计算相似度（1 - KL散度）
        kl_divergence = 0
        for i in range(len(hist1)):
            if hist1[i] > 0 and hist2[i] > 0:
                kl_divergence += hist1[i] * np.log(hist1[i] / hist2[i])
        
        return max(0, 1 - kl_divergence)
    
    def _calculate_metadata_match(self, predicted: List[Chunk], truth: List[Dict]) -> float:
        """计算元数据匹配度"""
        if not predicted or not truth:
            return 0.0
        
        metadata_scores = []
        
        # 假设预测和真实的分块数量相同，一一对应
        min_length = min(len(predicted), len(truth))
        
        for i in range(min_length):
            pred_metadata = predicted[i].metadata or {}
            truth_metadata = truth[i].get('metadata', {})
            
            # 计算元数据字段匹配度
            all_fields = set(pred_metadata.keys()) | set(truth_metadata.keys())
            if not all_fields:
                metadata_scores.append(1.0)
                continue
            
            matched_fields = 0
            for field in all_fields:
                if field in pred_metadata and field in truth_metadata:
                    if pred_metadata[field] == truth_metadata[field]:
                        matched_fields += 1
            
            metadata_scores.append(matched_fields / len(all_fields))
        
        return np.mean(metadata_scores) if metadata_scores else 0.0
    
    def print_evaluation_results(self):
        """打印评估结果"""
        if not self.evaluation_history:
            print("没有评估结果")
            return
        
        print("\n" + "=" * 80)
        print("真实标签评估结果")
        print("=" * 80)
        
        df = pd.DataFrame(self.evaluation_history)
        
        # 格式化显示
        display_df = df.copy()
        display_df['boundary_accuracy'] = display_df['boundary_accuracy'].apply(lambda x: f"{x:.3f}")
        display_df['content_match'] = display_df['content_match'].apply(lambda x: f"{x:.3f}")
        display_df['f1_score'] = display_df['f1_score'].apply(lambda x: f"{x:.3f}")
        display_df['edit_distance'] = display_df['edit_distance'].apply(lambda x: f"{x:.3f}")
        display_df['overall_score'] = display_df['overall_score'].apply(lambda x: f"{x:.3f}")
        
        print(display_df.to_string(index=False))
        
        # 找出最佳分块器
        best_overall = df.loc[df['overall_score'].idxmax()]
        print(f"\n🏆 最佳分块器: {best_overall['chunker_name']} (综合评分: {best_overall['overall_score']:.3f})")


class TaskBasedEvaluator:
    """基于任务的分块评估器"""
    
    def __init__(self, task_type: str = "rag"):
        """
        初始化任务评估器
        
        Args:
            task_type: 任务类型，支持 "rag", "classification", "summarization", "qa"
        """
        self.task_type = task_type
        self.evaluation_results = []
        self.task_functions = {
            "rag": self._evaluate_rag_task,
            "classification": self._evaluate_classification_task,
            "summarization": self._evaluate_summarization_task,
            "qa": self._evaluate_qa_task
        }
    
    def evaluate_chunker(self, chunker, text: str, chunker_name: str, 
                        task_data: Dict[str, Any] = None) -> Dict[str, float]:
        """
        评估分块器在特定任务上的表现
        
        Args:
            chunker: 分块器
            text: 输入文本
            chunker_name: 分块器名称
            task_data: 任务相关数据
            
        Returns:
            任务评估指标
        """
        print(f"评估分块器 (任务: {self.task_type}): {chunker_name}")
        
        # 执行分块
        chunks = chunker.chunk(text)
        
        # 根据任务类型进行评估
        if self.task_type in self.task_functions:
            metrics = self.task_functions[self.task_type](chunks, text, task_data)
        else:
            print(f"不支持的任务类型: {self.task_type}")
            return {}
        
        metrics['chunker_name'] = chunker_name
        metrics['task_type'] = self.task_type
        metrics['total_chunks'] = len(chunks)
        
        self.evaluation_results.append(metrics)
        return metrics
    
    def _evaluate_rag_task(self, chunks: List[Chunk], text: str, task_data: Dict[str, Any]) -> Dict[str, float]:
        """评估RAG任务表现"""
        if not task_data or 'queries' not in task_data:
            return self._simulate_rag_evaluation(chunks, text)
        
        queries = task_data['queries']
        retrieval_scores = []
        
        for query in queries:
            # 模拟检索过程
            relevant_chunks = self._find_relevant_chunks(chunks, query)
            retrieval_score = self._calculate_retrieval_score(relevant_chunks, query)
            retrieval_scores.append(retrieval_score)
        
        return {
            'retrieval_accuracy': np.mean(retrieval_scores),
            'chunk_coverage': self._calculate_chunk_coverage(chunks, text),
            'query_response_time': self._simulate_response_time(chunks),
            'context_relevance': self._calculate_context_relevance(chunks, queries)
        }
    
    def _simulate_rag_evaluation(self, chunks: List[Chunk], text: str) -> Dict[str, float]:
        """模拟RAG评估"""
        # 生成模拟查询
        simulated_queries = self._generate_simulated_queries(text)
        
        retrieval_scores = []
        for query in simulated_queries:
            relevant_chunks = self._find_relevant_chunks(chunks, query)
            retrieval_score = len(relevant_chunks) / len(chunks) if chunks else 0
            retrieval_scores.append(retrieval_score)
        
        return {
            'retrieval_accuracy': np.mean(retrieval_scores),
            'chunk_coverage': self._calculate_chunk_coverage(chunks, text),
            'query_response_time': self._simulate_response_time(chunks),
            'context_relevance': np.mean(retrieval_scores)
        }
    
    def _evaluate_classification_task(self, chunks: List[Chunk], text: str, task_data: Dict[str, Any]) -> Dict[str, float]:
        """评估分类任务表现"""
        if not task_data or 'categories' not in task_data:
            return self._simulate_classification_evaluation(chunks, text)
        
        categories = task_data['categories']
        classification_scores = []
        
        for chunk in chunks:
            # 模拟分类过程
            predicted_category = self._classify_chunk(chunk.text, categories)
            true_category = task_data.get('true_labels', {}).get(chunk.start_index, None)
            
            if true_category:
                classification_scores.append(1.0 if predicted_category == true_category else 0.0)
        
        return {
            'classification_accuracy': np.mean(classification_scores) if classification_scores else 0.0,
            'category_coverage': self._calculate_category_coverage(chunks, categories),
            'chunk_purity': self._calculate_chunk_purity(chunks),
            'inter_chunk_consistency': self._calculate_inter_chunk_consistency(chunks)
        }
    
    def _simulate_classification_evaluation(self, chunks: List[Chunk], text: str) -> Dict[str, float]:
        """模拟分类评估"""
        # 模拟分类准确率
        simulated_accuracy = np.random.uniform(0.6, 0.9)
        
        return {
            'classification_accuracy': simulated_accuracy,
            'category_coverage': len(chunks) / 10.0,  # 假设有10个类别
            'chunk_purity': np.random.uniform(0.7, 0.95),
            'inter_chunk_consistency': np.random.uniform(0.6, 0.9)
        }
    
    def _evaluate_summarization_task(self, chunks: List[Chunk], text: str, task_data: Dict[str, Any]) -> Dict[str, float]:
        """评估摘要任务表现"""
        # 计算摘要质量指标
        chunk_quality_scores = []
        
        for chunk in chunks:
            quality_score = self._evaluate_chunk_quality(chunk.text)
            chunk_quality_scores.append(quality_score)
        
        return {
            'summary_coherence': np.mean(chunk_quality_scores),
            'information_coverage': self._calculate_information_coverage(chunks, text),
            'compression_ratio': self._calculate_compression_ratio(chunks, text),
            'readability_score': self._calculate_readability_score(chunks)
        }
    
    def _evaluate_qa_task(self, chunks: List[Chunk], text: str, task_data: Dict[str, Any]) -> Dict[str, float]:
        """评估问答任务表现"""
        if not task_data or 'questions' not in task_data:
            return self._simulate_qa_evaluation(chunks, text)
        
        questions = task_data['questions']
        answer_scores = []
        
        for question in questions:
            # 模拟问答过程
            relevant_chunks = self._find_relevant_chunks(chunks, question)
            answer_score = self._calculate_answer_score(relevant_chunks, question)
            answer_scores.append(answer_score)
        
        return {
            'answer_accuracy': np.mean(answer_scores),
            'context_sufficiency': self._calculate_context_sufficiency(chunks, questions),
            'response_time': self._simulate_response_time(chunks),
            'answer_completeness': np.mean(answer_scores)
        }
    
    def _simulate_qa_evaluation(self, chunks: List[Chunk], text: str) -> Dict[str, float]:
        """模拟问答评估"""
        return {
            'answer_accuracy': np.random.uniform(0.7, 0.9),
            'context_sufficiency': np.random.uniform(0.6, 0.9),
            'response_time': len(chunks) * 0.1,  # 模拟响应时间
            'answer_completeness': np.random.uniform(0.6, 0.9)
        }
    
    def _generate_simulated_queries(self, text: str) -> List[str]:
        """生成模拟查询"""
        # 简单的关键词提取作为模拟查询
        words = re.findall(r'\b\w+\b', text.lower())
        word_freq = Counter(words)
        
        # 选择高频词作为查询
        common_words = [word for word, freq in word_freq.most_common(5) if len(word) > 3]
        
        queries = []
        for word in common_words:
            queries.append(f"关于{word}的信息")
        
        return queries if queries else ["默认查询"]
    
    def _find_relevant_chunks(self, chunks: List[Chunk], query: str) -> List[Chunk]:
        """查找相关块"""
        relevant_chunks = []
        query_words = set(query.lower().split())
        
        for chunk in chunks:
            chunk_words = set(chunk.text.lower().split())
            # 计算重叠度
            overlap = len(query_words & chunk_words)
            if overlap > 0:
                relevant_chunks.append(chunk)
        
        return relevant_chunks
    
    def _calculate_retrieval_score(self, relevant_chunks: List[Chunk], query: str) -> float:
        """计算检索分数"""
        if not relevant_chunks:
            return 0.0
        
        # 简单的相关性评分
        total_relevance = 0
        for chunk in relevant_chunks:
            # 基于文本重叠度计算相关性
            query_words = set(query.lower().split())
            chunk_words = set(chunk.text.lower().split())
            overlap = len(query_words & chunk_words)
            relevance = overlap / len(query_words) if query_words else 0
            total_relevance += relevance
        
        return total_relevance / len(relevant_chunks)
    
    def _calculate_chunk_coverage(self, chunks: List[Chunk], text: str) -> float:
        """计算块覆盖率"""
        if not text:
            return 0.0
        
        covered_text = ''.join(chunk.text for chunk in chunks)
        return len(covered_text) / len(text)
    
    def _simulate_response_time(self, chunks: List[Chunk]) -> float:
        """模拟响应时间"""
        return len(chunks) * 0.05  # 每个块0.05秒
    
    def _calculate_context_relevance(self, chunks: List[Chunk], queries: List[str]) -> float:
        """计算上下文相关性"""
        if not queries or not chunks:
            return 0.0
        
        relevance_scores = []
        for query in queries:
            relevant_chunks = self._find_relevant_chunks(chunks, query)
            score = len(relevant_chunks) / len(chunks) if chunks else 0
            relevance_scores.append(score)
        
        return np.mean(relevance_scores)
    
    def _classify_chunk(self, text: str, categories: List[str]) -> str:
        """分类块"""
        # 简单的关键词匹配分类
        text_lower = text.lower()
        
        for category in categories:
            if category.lower() in text_lower:
                return category
        
        return categories[0] if categories else "unknown"
    
    def _calculate_category_coverage(self, chunks: List[Chunk], categories: List[str]) -> float:
        """计算类别覆盖率"""
        if not categories:
            return 0.0
        
        covered_categories = set()
        for chunk in chunks:
            category = self._classify_chunk(chunk.text, categories)
            covered_categories.add(category)
        
        return len(covered_categories) / len(categories)
    
    def _calculate_chunk_purity(self, chunks: List[Chunk]) -> float:
        """计算块纯度"""
        if not chunks:
            return 0.0
        
        purity_scores = []
        for chunk in chunks:
            # 简单的纯度计算：基于主题一致性
            sentences = re.split(r'[.!?。！？]', chunk.text)
            if len(sentences) <= 1:
                purity_scores.append(1.0)
                continue
            
            # 计算句子间的相似度
            similarities = []
            for i in range(len(sentences) - 1):
                for j in range(i + 1, len(sentences)):
                    if sentences[i].strip() and sentences[j].strip():
                        similarity = self._calculate_text_similarity(sentences[i], sentences[j])
                        similarities.append(similarity)
            
            avg_similarity = np.mean(similarities) if similarities else 0.0
            purity_scores.append(avg_similarity)
        
        return np.mean(purity_scores)
    
    def _calculate_inter_chunk_consistency(self, chunks: List[Chunk]) -> float:
        """计算块间一致性"""
        if len(chunks) < 2:
            return 1.0
        
        consistency_scores = []
        for i in range(len(chunks) - 1):
            current_chunk = chunks[i]
            next_chunk = chunks[i + 1]
            
            # 计算相邻块的一致性
            consistency = self._calculate_text_similarity(current_chunk.text, next_chunk.text)
            consistency_scores.append(consistency)
        
        return np.mean(consistency_scores)
    
    def _calculate_text_similarity(self, text1: str, text2: str) -> float:
        """计算文本相似度"""
        if not text1 or not text2:
            return 0.0
        
        # 使用TF-IDF计算相似度
        try:
            vectorizer = TfidfVectorizer(max_features=100, stop_words='english')
            tfidf_matrix = vectorizer.fit_transform([text1, text2])
            similarity = cosine_similarity(tfidf_matrix[0:1], tfidf_matrix[1:2])[0][0]
            return similarity
        except:
            # 降级到词重叠度
            words1 = set(text1.lower().split())
            words2 = set(text2.lower().split())
            overlap = len(words1 & words2)
            return overlap / len(words1 | words2) if words1 or words2 else 0.0
    
    def _evaluate_chunk_quality(self, text: str) -> float:
        """评估块质量"""
        # 基于多个因素评估质量
        scores = []
        
        # 长度适当性
        length_score = 1.0 if 50 <= len(text) <= 500 else 0.5
        
        # 句子完整性
        sentences = re.split(r'[.!?。！？]', text)
        complete_sentences = sum(1 for s in sentences if len(s.strip()) > 10)
        sentence_score = complete_sentences / len(sentences) if sentences else 0.0
        
        # 信息密度
        words = text.split()
        unique_words = len(set(words))
        density_score = unique_words / len(words) if words else 0.0
        
        scores.extend([length_score, sentence_score, density_score])
        
        return np.mean(scores)
    
    def _calculate_information_coverage(self, chunks: List[Chunk], text: str) -> float:
        """计算信息覆盖率"""
        # 简单的覆盖率计算
        covered_text = ''.join(chunk.text for chunk in chunks)
        return len(covered_text) / len(text) if text else 0.0
    
    def _calculate_compression_ratio(self, chunks: List[Chunk], text: str) -> float:
        """计算压缩比"""
        original_length = len(text)
        compressed_length = sum(len(chunk.text) for chunk in chunks)
        return compressed_length / original_length if original_length > 0 else 0.0
    
    def _calculate_readability_score(self, chunks: List[Chunk]) -> float:
        """计算可读性分数"""
        if not chunks:
            return 0.0
        
        readability_scores = []
        for chunk in chunks:
            # 简单的可读性计算
            sentences = re.split(r'[.!?。！？]', chunk.text)
            words = chunk.text.split()
            
            if not sentences or not words:
                readability_scores.append(0.0)
                continue
            
            avg_words_per_sentence = len(words) / len(sentences)
            avg_chars_per_word = sum(len(word) for word in words) / len(words)
            
            # 简单的可读性公式
            readability = max(0, 1 - (avg_words_per_sentence / 20 + avg_chars_per_word / 10))
            readability_scores.append(readability)
        
        return np.mean(readability_scores)
    
    def _calculate_context_sufficiency(self, chunks: List[Chunk], questions: List[str]) -> float:
        """计算上下文充分性"""
        if not questions or not chunks:
            return 0.0
        
        sufficiency_scores = []
        for question in questions:
            relevant_chunks = self._find_relevant_chunks(chunks, question)
            # 计算相关块的总长度是否足够回答问题
            total_relevant_length = sum(len(chunk.text) for chunk in relevant_chunks)
            sufficiency = min(1.0, total_relevant_length / 200)  # 假设200字符足够
            sufficiency_scores.append(sufficiency)
        
        return np.mean(sufficiency_scores)
    
    def _calculate_answer_score(self, relevant_chunks: List[Chunk], question: str) -> float:
        """计算答案分数"""
        if not relevant_chunks:
            return 0.0
        
        # 基于相关块的质量和数量计算答案分数
        quality_scores = []
        for chunk in relevant_chunks:
            quality = self._evaluate_chunk_quality(chunk.text)
            relevance = self._calculate_retrieval_score([chunk], question)
            quality_scores.append(quality * relevance)
        
        return np.mean(quality_scores) if quality_scores else 0.0
    
    def print_evaluation_results(self):
        """打印评估结果"""
        if not self.evaluation_results:
            print("没有评估结果")
            return
        
        print("\n" + "=" * 80)
        print(f"基于任务的评估结果 ({self.task_type})")
        print("=" * 80)
        
        df = pd.DataFrame(self.evaluation_results)
        
        # 根据任务类型选择显示的指标
        task_metrics = {
            "rag": ['chunker_name', 'retrieval_accuracy', 'chunk_coverage', 'context_relevance'],
            "classification": ['chunker_name', 'classification_accuracy', 'category_coverage', 'chunk_purity'],
            "summarization": ['chunker_name', 'summary_coherence', 'information_coverage', 'readability_score'],
            "qa": ['chunker_name', 'answer_accuracy', 'context_sufficiency', 'answer_completeness']
        }
        
        if self.task_type in task_metrics:
            display_cols = task_metrics[self.task_type]
            display_df = df[display_cols].copy()
            
            # 格式化数值
            for col in display_cols[1:]:  # 跳过名称列
                if col in display_df.columns:
                    display_df[col] = display_df[col].apply(lambda x: f"{x:.3f}")
            
            print(display_df.to_string(index=False))
            
            # 找出最佳分块器
            if self.task_type == "rag":
                best_metric = 'retrieval_accuracy'
            elif self.task_type == "classification":
                best_metric = 'classification_accuracy'
            elif self.task_type == "summarization":
                best_metric = 'summary_coherence'
            else:  # qa
                best_metric = 'answer_accuracy'
            
            if best_metric in df.columns:
                best_chunker = df.loc[df[best_metric].idxmax()]
                print(f"\n🏆 最佳分块器: {best_chunker['chunker_name']} ({best_metric}: {best_chunker[best_metric]:.3f})")
    
    def get_task_recommendations(self) -> Dict[str, str]:
        """获取任务相关的分块器推荐"""
        if not self.evaluation_results:
            return {}
        
        df = pd.DataFrame(self.evaluation_results)
        
        recommendations = {}
        
        # 根据任务类型推荐
        if self.task_type == "rag":
            recommendations['accuracy'] = df.loc[df['retrieval_accuracy'].idxmax(), 'chunker_name']
            recommendations['coverage'] = df.loc[df['chunk_coverage'].idxmax(), 'chunker_name']
            recommendations['relevance'] = df.loc[df['context_relevance'].idxmax(), 'chunker_name']
        elif self.task_type == "classification":
            recommendations['accuracy'] = df.loc[df['classification_accuracy'].idxmax(), 'chunker_name']
            recommendations['coverage'] = df.loc[df['category_coverage'].idxmax(), 'chunker_name']
            recommendations['purity'] = df.loc[df['chunk_purity'].idxmax(), 'chunker_name']
        elif self.task_type == "summarization":
            recommendations['coherence'] = df.loc[df['summary_coherence'].idxmax(), 'chunker_name']
            recommendations['coverage'] = df.loc[df['information_coverage'].idxmax(), 'chunker_name']
            recommendations['readability'] = df.loc[df['readability_score'].idxmax(), 'chunker_name']
        elif self.task_type == "qa":
            recommendations['accuracy'] = df.loc[df['answer_accuracy'].idxmax(), 'chunker_name']
            recommendations['sufficiency'] = df.loc[df['context_sufficiency'].idxmax(), 'chunker_name']
            recommendations['completeness'] = df.loc[df['answer_completeness'].idxmax(), 'chunker_name']
        
        return recommendations


def main():
    """主函数 - 运行分块评估和对比演示"""
    print("=" * 80)
    print("分块评估和对比演示")
    print("=" * 80)
    
    # 示例文本
    sample_text = """
# 人工智能技术发展报告

## 1. 引言

人工智能（AI）作为21世纪最重要的技术之一，正在深刻改变着我们的生活方式和工作方式。从智能手机到自动驾驶汽车，从医疗诊断到金融分析，AI技术的应用范围不断扩大，影响力日益增强。

本报告旨在全面分析AI技术的发展现状、应用领域以及未来趋势，为相关从业者和决策者提供参考。

## 2. 技术发展历程

### 2.1 早期发展（1950-1980）

人工智能的概念最早可以追溯到1950年，当时艾伦·图灵提出了著名的"图灵测试"。这一时期，研究者们主要专注于符号推理和专家系统的开发。

1956年的达特茅斯会议被认为是AI作为一个独立学科的诞生标志。会议期间，约翰·麦卡锡首次提出了"人工智能"这一术语。

### 2.2 机器学习时代（1980-2010）

随着计算机性能的提升和数据的积累，机器学习开始成为AI研究的核心。这一时期出现了许多重要的算法，如决策树、支持向量机、神经网络等。

1997年，IBM的深蓝计算机战胜国际象棋世界冠军卡斯帕罗夫，标志着AI在特定领域达到了人类专家水平。

### 2.3 深度学习革命（2010至今）

2012年，AlexNet在ImageNet竞赛中的突破性表现，开启了深度学习的新时代。深度神经网络、卷积神经网络、循环神经网络等技术的快速发展，使得AI在图像识别、语音识别、自然语言处理等领域取得了前所未有的成功。

## 3. 核心技术分析

### 3.1 机器学习算法

机器学习是AI的核心技术之一，主要包括以下几类：

**监督学习**：使用标记数据进行训练，包括分类、回归等任务。常用算法有线性回归、逻辑回归、决策树、随机森林、支持向量机等。

**无监督学习**：处理未标记数据，主要用于聚类、降维等任务。K-means、层次聚类、主成分分析等是典型算法。

**强化学习**：通过与环境交互学习最优策略，在游戏、机器人控制等领域表现出色。

### 3.2 深度学习架构

**卷积神经网络（CNN）**：主要用于图像处理，通过卷积层提取图像特征，在图像分类、目标检测等任务中表现优异。

**循环神经网络（RNN）**：适用于序列数据处理，如自然语言处理、时间序列分析等。LSTM和GRU是RNN的重要变体。

**Transformer**：基于注意力机制的架构，在自然语言处理领域取得了革命性进展。BERT、GPT等模型都是基于Transformer架构。

## 4. 应用领域分析

### 4.1 医疗健康

AI在医疗领域的应用日益广泛：

- **医学影像分析**：AI系统可以分析X光片、CT扫描、MRI等医学影像，辅助医生进行疾病诊断。
- **药物研发**：加速新药发现过程，降低研发成本。
- **个性化医疗**：根据患者基因组和病史提供个性化治疗方案。
- **健康管理**：通过可穿戴设备监测健康状况，提供健康建议。

### 4.2 金融服务

金融行业是AI应用的重要领域：

- **风险评估**：AI模型可以更准确地评估信用风险和欺诈风险。
- **算法交易**：高频交易和量化投资策略的优化。
- **客户服务**：智能客服系统提供24/7的客户支持。
- **反欺诈**：实时监测异常交易，防范金融欺诈。

### 4.3 教育培训

AI正在改变传统教育模式：

- **个性化学习**：根据学生学习进度和能力提供个性化教学内容。
- **智能评分**：自动评分系统减轻教师负担。
- **虚拟助教**：AI助教回答学生问题，提供学习指导。
- **教育分析**：分析学习数据，优化教学策略。

## 5. 挑战与机遇

### 5.1 技术挑战

尽管AI取得了巨大进步，但仍面临诸多挑战：

**数据质量**：AI系统的性能高度依赖训练数据的质量和数量。数据偏差、噪声和不完整性都会影响模型性能。

**可解释性**：深度学习模型通常被视为"黑盒"，缺乏可解释性，这在医疗、金融等关键领域是一个重大问题。

**鲁棒性**：AI系统在面对对抗性攻击或分布外数据时表现不佳，需要提高鲁棒性。

**计算效率**：大型AI模型需要大量计算资源，能源消耗和环境影响成为关注焦点。

### 5.2 伦理与社会挑战

AI的发展也带来了重要的伦理和社会问题：

**隐私保护**：AI系统处理大量个人数据，如何保护用户隐私成为一个重要议题。

**算法偏见**：训练数据中的偏见可能导致AI系统产生歧视性结果。

**就业影响**：AI自动化可能取代某些工作岗位，需要考虑社会影响。

**监管框架**：需要建立适当的监管框架，确保AI的负责任发展。

### 5.3 发展机遇

尽管存在挑战，AI的发展前景依然光明：

**跨学科融合**：AI与生物学、物理学、社会科学等学科的融合将产生新的突破。

**边缘计算**：将AI能力部署到终端设备，减少延迟，提高隐私保护。

**多模态AI**：结合视觉、语言、声音等多种模态的AI系统将更接近人类智能。

**AI民主化**：工具和平台的普及使更多组织和个人能够利用AI技术。

## 6. 未来展望

### 6.1 技术趋势

未来几年，AI技术可能呈现以下趋势：

**通用人工智能（AGI）**：虽然真正的AGI还有很长的路要走，但研究正在向更通用的智能系统发展。

**自监督学习**：减少对标记数据的依赖，从未标记数据中学习。

**神经符号AI**：结合神经网络和符号推理的优势，创建更强大的AI系统。

**量子AI**：量子计算与AI的结合可能带来算法效率的突破性提升。

### 6.2 应用前景

AI的应用将继续扩展到更多领域：

**科学发现**：AI加速材料科学、药物发现、气候变化研究等领域的科学突破。

**创意产业**：AI在艺术创作、音乐生成、内容创作等创意领域的应用将更加深入。

**智能制造**：AI驱动的智能制造将提高生产效率，降低成本。

**智慧城市**：AI技术将助力城市管理、交通优化、环境监测等。

## 7. 结论

人工智能技术正处于快速发展的关键时期。虽然面临技术、伦理和社会等多重挑战，但AI的潜力是巨大的。通过负责任的创新和跨学科合作，AI有望解决人类面临的许多重大挑战，创造更美好的未来。

成功的关键在于平衡技术创新与伦理考量，确保AI的发展造福全人类。我们需要建立有效的监管框架，促进国际合作，培养AI人才，共同推动AI技术的健康发展。

随着技术的不断进步，AI将继续重塑我们的世界。拥抱这一变革，同时保持谨慎和智慧，将是我们在AI时代取得成功的关键。
    """
    
    # 运行综合对比
    comparison = ComprehensiveComparison(sample_text)
    comparison.run_comparison()
    
    # 获取推荐
    recommendations = comparison.get_recommendations()
    
    print("\n" + "=" * 80)
    print("分块器推荐")
    print("=" * 80)
    
    print(f"🚀 速度优先: {recommendations.get('speed', 'N/A')}")
    print(f"🎯 质量优先: {recommendations.get('quality', 'N/A')}")
    print(f"📊 一致性优先: {recommendations.get('consistency', 'N/A')}")
    print(f"🏗️ 结构优先: {recommendations.get('structure', 'N/A')}")
    print(f"🏆 综合最佳: {recommendations.get('overall', 'N/A')}")
    
    print("\n" + "=" * 80)
    print("评估和对比演示完成！")
    print("=" * 80)


if __name__ == "__main__":
    main()