"""
性能基准测试
测试chunk_demo包中各个分块器的性能表现
"""
import time
import memory_profiler
import psutil
import os
import json
import pandas as pd
import numpy as np
from typing import List, Dict, Any, Tuple
import matplotlib.pyplot as plt
import seaborn as sns
from concurrent.futures import ThreadPoolExecutor, as_completed
import threading
import queue

# 导入被测试的模块
from chunk_demo import (
    FixedLengthChunker, SentenceChunker, ParagraphChunker,
    TfidfSemanticChunker, ClusteringChunker, MarkdownChunker,
    HTMLChunker, JSONChunker, XMLChunker, ChunkPipeline,
    ReinforcementLearningChunker, NeuralNetworkChunker,
    StreamingChunker, ChunkEvaluator, ChunkOptimizer
)

# 测试数据
class TestDataGenerator:
    """测试数据生成器"""
    
    @staticmethod
    def generate_small_text() -> str:
        """生成小文本 (1KB)"""
        return "这是一个测试文本。它包含多个句子和段落。" * 50
    
    @staticmethod
    def generate_medium_text() -> str:
        """生成中等文本 (100KB)"""
        base_text = """
        人工智能（AI）是计算机科学的一个分支，它致力于创建能够执行通常需要人类智能的任务的机器。
        这些任务包括学习、推理、问题解决、感知和语言理解。AI技术在过去几年中取得了巨大进展，
        特别是在机器学习、深度学习和自然语言处理等领域。
        
        机器学习是AI的核心技术之一，它使计算机能够从数据中学习，而无需明确编程。
        深度学习是机器学习的一个子集，它使用神经网络来模拟人脑的工作方式。
        自然语言处理则专注于使计算机能够理解和生成人类语言。
        
        AI的应用范围非常广泛，从自动驾驶汽车到医疗诊断，从金融分析到创意艺术。
        随着技术的不断发展，AI将在更多领域发挥重要作用，改变我们的生活方式和工作方式。
        """
        return base_text * 100
    
    @staticmethod
    def generate_large_text() -> str:
        """生成大文本 (1MB)"""
        return TestDataGenerator.generate_medium_text() * 10
    
    @staticmethod
    def generate_structured_text() -> str:
        """生成结构化文本"""
        return """
        # 人工智能技术发展报告
        
        ## 1. 引言
        
        人工智能（AI）作为21世纪最重要的技术之一，正在深刻改变着我们的生活方式和工作方式。
        
        ### 1.1 背景
        
        从智能手机到自动驾驶汽车，从医疗诊断到金融分析，AI技术的应用范围不断扩大。
        
        ### 1.2 目的
        
        本报告旨在全面分析AI技术的发展现状、应用领域以及未来趋势。
        
        ## 2. 技术发展历程
        
        ### 2.1 早期发展（1950-1980）
        
        人工智能的概念最早可以追溯到1950年，当时艾伦·图灵提出了著名的"图灵测试"。
        
        #### 2.1.1 重要里程碑
        
        1956年的达特茅斯会议被认为是AI作为一个独立学科的诞生标志。
        
        ### 2.2 机器学习时代（1980-2010）
        
        随着计算机性能的提升和数据的积累，机器学习开始成为AI研究的核心。
        
        ## 3. 核心技术分析
        
        ### 3.1 机器学习算法
        
        **监督学习**：使用标记数据进行训练，包括分类、回归等任务。
        
        **无监督学习**：处理未标记数据，主要用于聚类、降维等任务。
        
        **强化学习**：通过与环境交互学习最优策略。
        
        ## 4. 应用领域分析
        
        ### 4.1 医疗健康
        
        - **医学影像分析**：AI系统可以分析X光片、CT扫描、MRI等医学影像
        - **药物研发**：加速新药发现过程，降低研发成本
        - **个性化医疗**：根据患者基因组和病史提供个性化治疗方案
        
        ### 4.2 金融服务
        
        - **风险评估**：AI模型可以更准确地评估信用风险和欺诈风险
        - **算法交易**：高频交易和量化投资策略的优化
        - **客户服务**：智能客服系统提供24/7的客户支持
        
        ## 5. 挑战与机遇
        
        ### 5.1 技术挑战
        
        尽管AI取得了巨大进步，但仍面临诸多挑战：
        
        **数据质量**：AI系统的性能高度依赖训练数据的质量和数量。
        
        **可解释性**：深度学习模型通常被视为"黑盒"，缺乏可解释性。
        
        ### 5.2 发展机遇
        
        尽管存在挑战，AI的发展前景依然光明：
        
        **跨学科融合**：AI与生物学、物理学、社会科学等学科的融合将产生新的突破。
        
        **边缘计算**：将AI能力部署到终端设备，减少延迟，提高隐私保护。
        
        ## 6. 结论
        
        人工智能技术正处于快速发展的关键时期。虽然面临技术、伦理和社会等多重挑战，
        但AI的潜力是巨大的。通过负责任的创新和跨学科合作，AI有望解决人类面临的许多重大挑战。
        """
    
    @staticmethod
    def generate_json_data() -> str:
        """生成JSON数据"""
        import json
        data = {
            "report": {
                "title": "AI技术发展报告",
                "sections": [
                    {
                        "id": 1,
                        "title": "引言",
                        "content": "人工智能是计算机科学的重要分支...",
                        "subsections": [
                            {"id": "1.1", "title": "背景", "content": "AI技术应用范围不断扩大..."},
                            {"id": "1.2", "title": "目的", "content": "分析技术发展现状..."}
                        ]
                    },
                    {
                        "id": 2,
                        "title": "技术发展",
                        "content": "AI技术经历了多个发展阶段...",
                        "subsections": [
                            {"id": "2.1", "title": "早期发展", "content": "从1950年开始..."},
                            {"id": "2.2", "title": "机器学习时代", "content": "1980-2010年..."}
                        ]
                    }
                ],
                "metadata": {
                    "author": "AI研究团队",
                    "date": "2024-01-01",
                    "version": "1.0"
                }
            }
        }
        return json.dumps(data, ensure_ascii=False, indent=2)


class PerformanceBenchmark:
    """性能基准测试类"""
    
    def __init__(self):
        self.results = []
        self.process = psutil.Process()
        
    def measure_memory_usage(self, func, *args, **kwargs) -> Tuple[Any, float]:
        """测量内存使用"""
        # 使用memory_profiler测量内存使用
        mem_before = memory_profiler.memory_usage()[0]
        result = func(*args, **kwargs)
        mem_after = memory_profiler.memory_usage()[0]
        
        return result, mem_after - mem_before
    
    def measure_cpu_usage(self, func, *args, **kwargs) -> Tuple[Any, float]:
        """测量CPU使用"""
        cpu_before = self.process.cpu_percent()
        result = func(*args, **kwargs)
        cpu_after = self.process.cpu_percent()
        
        return result, cpu_after - cpu_before
    
    def benchmark_chunker(self, chunker, text: str, chunker_name: str, 
                         iterations: int = 5) -> Dict[str, Any]:
        """基准测试单个分块器"""
        print(f"基准测试: {chunker_name}")
        
        times = []
        memory_usages = []
        cpu_usages = []
        chunk_counts = []
        
        for i in range(iterations):
            # 测量执行时间
            start_time = time.time()
            
            # 测量内存使用
            chunks, memory_usage = self.measure_memory_usage(chunker.chunk, text)
            
            end_time = time.time()
            
            # 测量CPU使用
            _, cpu_usage = self.measure_cpu_usage(chunker.chunk, text)
            
            execution_time = end_time - start_time
            
            times.append(execution_time)
            memory_usages.append(memory_usage)
            cpu_usages.append(cpu_usage)
            chunk_counts.append(len(chunks))
        
        # 计算统计信息
        avg_time = np.mean(times)
        std_time = np.std(times)
        avg_memory = np.mean(memory_usages)
        avg_cpu = np.mean(cpu_usages)
        avg_chunks = np.mean(chunk_counts)
        
        # 计算吞吐量
        text_size_mb = len(text.encode('utf-8')) / (1024 * 1024)
        throughput_mb_s = text_size_mb / avg_time if avg_time > 0 else 0
        
        result = {
            "chunker_name": chunker_name,
            "text_size_mb": text_size_mb,
            "iterations": iterations,
            "avg_time_s": avg_time,
            "std_time_s": std_time,
            "avg_memory_mb": avg_memory,
            "avg_cpu_percent": avg_cpu,
            "avg_chunk_count": avg_chunks,
            "throughput_mb_s": throughput_mb_s,
            "all_times": times,
            "all_memory_usages": memory_usages,
            "all_cpu_usages": cpu_usages
        }
        
        self.results.append(result)
        return result
    
    def benchmark_all_chunkers(self, text: str, text_name: str = "unknown") -> Dict[str, Any]:
        """基准测试所有分块器"""
        print(f"\n开始基准测试 - 文本: {text_name}")
        print("=" * 60)
        
        chunkers = [
            (FixedLengthChunker(chunk_size=500), "FixedLengthChunker"),
            (SentenceChunker(max_sentences=3), "SentenceChunker"),
            (ParagraphChunker(), "ParagraphChunker"),
            (TfidfSemanticChunker(max_chunk_size=500), "TfidfSemanticChunker"),
            (ClusteringChunker(n_clusters=3, max_chunk_size=500), "ClusteringChunker"),
            (MarkdownChunker(max_chunk_size=500), "MarkdownChunker"),
            (HTMLChunker(max_chunk_size=500), "HTMLChunker"),
            (JSONChunker(max_chunk_size=500), "JSONChunker"),
            (XMLChunker(max_chunk_size=500), "XMLChunker"),
        ]
        
        results = {}
        
        for chunker, name in chunkers:
            try:
                result = self.benchmark_chunker(chunker, text, name, iterations=3)
                results[name] = result
                print(f"✓ {name}: {result['avg_time_s']:.3f}s, {result['throughput_mb_s']:.2f}MB/s")
            except Exception as e:
                print(f"✗ {name}: 错误 - {str(e)}")
                results[name] = {"error": str(e)}
        
        return results
    
    def benchmark_pipelines(self, text: str) -> Dict[str, Any]:
        """基准测试管道"""
        print(f"\n开始管道基准测试")
        print("=" * 60)
        
        # 创建不同的管道
        pipelines = {
            "Simple Pipeline": self._create_simple_pipeline(),
            "Hybrid Pipeline": self._create_hybrid_pipeline(),
            "Multi-stage Pipeline": self._create_multi_stage_pipeline()
        }
        
        results = {}
        
        for name, pipeline in pipelines.items():
            try:
                result = self.benchmark_pipeline(pipeline, text, name)
                results[name] = result
                print(f"✓ {name}: {result['avg_time_s']:.3f}s, {result['throughput_mb_s']:.2f}MB/s")
            except Exception as e:
                print(f"✗ {name}: 错误 - {str(e)}")
                results[name] = {"error": str(e)}
        
        return results
    
    def benchmark_pipeline(self, pipeline, text: str, pipeline_name: str, 
                         iterations: int = 3) -> Dict[str, Any]:
        """基准测试单个管道"""
        print(f"基准测试管道: {pipeline_name}")
        
        times = []
        memory_usages = []
        chunk_counts = []
        
        for i in range(iterations):
            start_time = time.time()
            
            # 执行管道
            result, memory_usage = self.measure_memory_usage(pipeline.process, text)
            
            end_time = time.time()
            
            execution_time = end_time - start_time
            
            times.append(execution_time)
            memory_usages.append(memory_usage)
            chunk_counts.append(len(result.final_chunks))
        
        # 计算统计信息
        avg_time = np.mean(times)
        std_time = np.std(times)
        avg_memory = np.mean(memory_usages)
        avg_chunks = np.mean(chunk_counts)
        
        # 计算吞吐量
        text_size_mb = len(text.encode('utf-8')) / (1024 * 1024)
        throughput_mb_s = text_size_mb / avg_time if avg_time > 0 else 0
        
        result = {
            "pipeline_name": pipeline_name,
            "text_size_mb": text_size_mb,
            "iterations": iterations,
            "avg_time_s": avg_time,
            "std_time_s": std_time,
            "avg_memory_mb": avg_memory,
            "avg_chunk_count": avg_chunks,
            "throughput_mb_s": throughput_mb_s,
            "all_times": times,
            "all_memory_usages": memory_usages
        }
        
        return result
    
    def benchmark_scalability(self) -> Dict[str, Any]:
        """基准测试可扩展性"""
        print(f"\n开始可扩展性测试")
        print("=" * 60)
        
        # 不同大小的文本
        text_sizes = {
            "Small (1KB)": TestDataGenerator.generate_small_text(),
            "Medium (100KB)": TestDataGenerator.generate_medium_text(),
            "Large (1MB)": TestDataGenerator.generate_large_text()
        }
        
        chunker = FixedLengthChunker(chunk_size=500)
        scalability_results = {}
        
        for size_name, text in text_sizes.items():
            try:
                result = self.benchmark_chunker(chunker, text, f"FixedLengthChunker-{size_name}", iterations=3)
                scalability_results[size_name] = result
                print(f"✓ {size_name}: {result['avg_time_s']:.3f}s")
            except Exception as e:
                print(f"✗ {size_name}: 错误 - {str(e)}")
                scalability_results[size_name] = {"error": str(e)}
        
        return scalability_results
    
    def benchmark_concurrent_processing(self, text: str) -> Dict[str, Any]:
        """基准测试并发处理"""
        print(f"\n开始并发处理测试")
        print("=" * 60)
        
        chunker = FixedLengthChunker(chunk_size=500)
        
        # 不同并发级别
        concurrency_levels = [1, 2, 4, 8]
        concurrent_results = {}
        
        for level in concurrency_levels:
            try:
                result = self.benchmark_concurrent(chunker, text, level)
                concurrent_results[f"Concurrency-{level}"] = result
                print(f"✓ Concurrency-{level}: {result['avg_time_s']:.3f}s")
            except Exception as e:
                print(f"✗ Concurrency-{level}: 错误 - {str(e)}")
                concurrent_results[f"Concurrency-{level}"] = {"error": str(e)}
        
        return concurrent_results
    
    def benchmark_concurrent(self, chunker, text: str, concurrency_level: int, 
                           iterations: int = 3) -> Dict[str, Any]:
        """基准测试并发级别"""
        print(f"基准测试并发级别: {concurrency_level}")
        
        times = []
        
        for i in range(iterations):
            start_time = time.time()
            
            # 并发处理
            with ThreadPoolExecutor(max_workers=concurrency_level) as executor:
                # 将文本分成多个部分
                chunk_size = len(text) // concurrency_level
                futures = []
                
                for j in range(concurrency_level):
                    start_idx = j * chunk_size
                    end_idx = (j + 1) * chunk_size if j < concurrency_level - 1 else len(text)
                    chunk_text = text[start_idx:end_idx]
                    
                    future = executor.submit(chunker.chunk, chunk_text)
                    futures.append(future)
                
                # 等待所有任务完成
                all_chunks = []
                for future in as_completed(futures):
                    chunks = future.result()
                    all_chunks.extend(chunks)
            
            end_time = time.time()
            execution_time = end_time - start_time
            times.append(execution_time)
        
        # 计算统计信息
        avg_time = np.mean(times)
        std_time = np.std(times)
        
        # 计算吞吐量
        text_size_mb = len(text.encode('utf-8')) / (1024 * 1024)
        throughput_mb_s = text_size_mb / avg_time if avg_time > 0 else 0
        
        result = {
            "concurrency_level": concurrency_level,
            "text_size_mb": text_size_mb,
            "iterations": iterations,
            "avg_time_s": avg_time,
            "std_time_s": std_time,
            "throughput_mb_s": throughput_mb_s,
            "all_times": times
        }
        
        return result
    
    def _create_simple_pipeline(self) -> 'ChunkPipeline':
        """创建简单管道"""
        from chunk_demo import ChunkPipeline
        pipeline = ChunkPipeline("Simple")
        pipeline.add_step("fixed", FixedLengthChunker(), {"chunk_size": 500})
        return pipeline
    
    def _create_hybrid_pipeline(self) -> 'ChunkPipeline':
        """创建混合管道"""
        from chunk_demo import ChunkPipeline
        pipeline = ChunkPipeline("Hybrid")
        pipeline.add_step("sentence", SentenceChunker(), {"max_sentences": 3})
        pipeline.add_step("semantic", TfidfSemanticChunker(), {"max_chunk_size": 500})
        return pipeline
    
    def _create_multi_stage_pipeline(self) -> 'ChunkPipeline':
        """创建多阶段管道"""
        from chunk_demo import ChunkPipeline
        pipeline = ChunkPipeline("MultiStage")
        pipeline.add_step("initial", FixedLengthChunker(), {"chunk_size": 1000})
        pipeline.add_step("refine", SentenceChunker(), {"max_sentences": 5})
        pipeline.add_step("optimize", TfidfSemanticChunker(), {"max_chunk_size": 600})
        return pipeline
    
    def generate_report(self, output_dir: str = "benchmark_results"):
        """生成基准测试报告"""
        import os
        os.makedirs(output_dir, exist_ok=True)
        
        # 保存原始数据
        with open(os.path.join(output_dir, "benchmark_results.json"), "w", encoding="utf-8") as f:
            json.dump(self.results, f, ensure_ascii=False, indent=2, default=str)
        
        # 生成可视化报告
        self._generate_visualizations(output_dir)
        
        # 生成文本报告
        self._generate_text_report(output_dir)
        
        print(f"\n基准测试报告已生成到: {output_dir}/")
    
    def _generate_visualizations(self, output_dir: str):
        """生成可视化图表"""
        if not self.results:
            return
        
        # 创建DataFrame
        df = pd.DataFrame(self.results)
        
        # 1. 性能对比图
        plt.figure(figsize=(15, 10))
        
        # 执行时间对比
        plt.subplot(2, 3, 1)
        plt.bar(df['chunker_name'], df['avg_time_s'])
        plt.title('平均执行时间对比')
        plt.ylabel('时间 (秒)')
        plt.xticks(rotation=45, ha='right')
        
        # 内存使用对比
        plt.subplot(2, 3, 2)
        plt.bar(df['chunker_name'], df['avg_memory_mb'])
        plt.title('平均内存使用对比')
        plt.ylabel('内存 (MB)')
        plt.xticks(rotation=45, ha='right')
        
        # 吞吐量对比
        plt.subplot(2, 3, 3)
        plt.bar(df['chunker_name'], df['throughput_mb_s'])
        plt.title('吞吐量对比')
        plt.ylabel('吞吐量 (MB/s)')
        plt.xticks(rotation=45, ha='right')
        
        # 块数量对比
        plt.subplot(2, 3, 4)
        plt.bar(df['chunker_name'], df['avg_chunk_count'])
        plt.title('平均块数量对比')
        plt.ylabel('块数量')
        plt.xticks(rotation=45, ha='right')
        
        # 时间分布箱线图
        plt.subplot(2, 3, 5)
        time_data = [times for times in df['all_times']]
        plt.boxplot(time_data, labels=df['chunker_name'])
        plt.title('执行时间分布')
        plt.ylabel('时间 (秒)')
        plt.xticks(rotation=45, ha='right')
        
        # 内存使用分布箱线图
        plt.subplot(2, 3, 6)
        memory_data = [memory for memory in df['all_memory_usages']]
        plt.boxplot(memory_data, labels=df['chunker_name'])
        plt.title('内存使用分布')
        plt.ylabel('内存 (MB)')
        plt.xticks(rotation=45, ha='right')
        
        plt.tight_layout()
        plt.savefig(os.path.join(output_dir, 'performance_comparison.png'), dpi=300, bbox_inches='tight')
        plt.show()
        
        # 2. 相关性热力图
        plt.figure(figsize=(12, 8))
        
        numeric_cols = ['avg_time_s', 'avg_memory_mb', 'avg_chunk_count', 'throughput_mb_s']
        correlation_data = df[numeric_cols]
        correlation_matrix = correlation_data.corr()
        
        sns.heatmap(correlation_matrix, annot=True, cmap='coolwarm', center=0,
                   square=True, linewidths=0.5)
        plt.title('性能指标相关性热力图', size=16, weight='bold')
        plt.tight_layout()
        plt.savefig(os.path.join(output_dir, 'correlation_heatmap.png'), dpi=300, bbox_inches='tight')
        plt.show()
    
    def _generate_text_report(self, output_dir: str):
        """生成文本报告"""
        if not self.results:
            return
        
        report_path = os.path.join(output_dir, "benchmark_report.txt")
        
        with open(report_path, "w", encoding="utf-8") as f:
            f.write("分块器性能基准测试报告\n")
            f.write("=" * 60 + "\n\n")
            
            # 总体统计
            f.write("总体统计\n")
            f.write("-" * 30 + "\n")
            
            df = pd.DataFrame(self.results)
            
            f.write(f"测试的分块器数量: {len(df)}\n")
            f.write(f"平均执行时间: {df['avg_time_s'].mean():.3f}s\n")
            f.write(f"平均内存使用: {df['avg_memory_mb'].mean():.2f}MB\n")
            f.write(f"平均吞吐量: {df['throughput_mb_s'].mean():.2f}MB/s\n")
            
            # 最佳和最差性能
            f.write(f"\n最快分块器: {df.loc[df['avg_time_s'].idxmin(), 'chunker_name']} ({df['avg_time_s'].min():.3f}s)\n")
            f.write(f"最慢分块器: {df.loc[df['avg_time_s'].idxmax(), 'chunker_name']} ({df['avg_time_s'].max():.3f}s)\n")
            f.write(f"最高吞吐量: {df.loc[df['throughput_mb_s'].idxmax(), 'chunker_name']} ({df['throughput_mb_s'].max():.2f}MB/s)\n")
            f.write(f"最低内存使用: {df.loc[df['avg_memory_mb'].idxmin(), 'chunker_name']} ({df['avg_memory_mb'].min():.2f}MB)\n")
            
            # 详细结果
            f.write("\n详细结果\n")
            f.write("-" * 30 + "\n")
            
            for _, row in df.iterrows():
                f.write(f"\n{row['chunker_name']}:\n")
                f.write(f"  平均时间: {row['avg_time_s']:.3f}s (±{row['std_time_s']:.3f}s)\n")
                f.write(f"  内存使用: {row['avg_memory_mb']:.2f}MB\n")
                f.write(f"  吞吐量: {row['throughput_mb_s']:.2f}MB/s\n")
                f.write(f"  块数量: {row['avg_chunk_count']:.1f}\n")
            
            # 建议
            f.write("\n性能建议\n")
            f.write("-" * 30 + "\n")
            
            # 基于结果提供建议
            if df['avg_time_s'].max() / df['avg_time_s'].min() > 10:
                f.write("- 性能差异较大，建议根据使用场景选择合适的分块器\n")
            
            if df['avg_memory_mb'].max() > 100:
                f.write("- 某些分块器内存使用较高，建议在大规模处理时注意内存管理\n")
            
            if df['throughput_mb_s'].max() > 10:
                f.write("- 部分分块器具有很高的吞吐量，适合大规模文本处理\n")
            
            f.write("- 建议在实际应用中进行小规模测试，选择最适合的分块策略\n")


def main():
    """主函数 - 运行性能基准测试"""
    print("分块器性能基准测试")
    print("=" * 60)
    
    # 创建基准测试实例
    benchmark = PerformanceBenchmark()
    
    # 测试数据
    test_texts = {
        "Small Text": TestDataGenerator.generate_small_text(),
        "Medium Text": TestDataGenerator.generate_medium_text(),
        "Structured Text": TestDataGenerator.generate_structured_text(),
        "JSON Data": TestDataGenerator.generate_json_data()
    }
    
    # 运行基准测试
    for text_name, text in test_texts.items():
        print(f"\n测试文本: {text_name}")
        print(f"文本大小: {len(text.encode('utf-8')) / 1024:.2f}KB")
        
        # 基准测试所有分块器
        results = benchmark.benchmark_all_chunkers(text, text_name)
        
        # 基准测试管道
        pipeline_results = benchmark.benchmark_pipelines(text)
    
    # 可扩展性测试
    scalability_results = benchmark.benchmark_scalability()
    
    # 并发处理测试
    medium_text = TestDataGenerator.generate_medium_text()
    concurrent_results = benchmark.benchmark_concurrent_processing(medium_text)
    
    # 生成报告
    benchmark.generate_report()
    
    print("\n" + "=" * 60)
    print("性能基准测试完成！")
    print("=" * 60)


if __name__ == "__main__":
    main()