#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
性能分析工具 - 用于测量 SemanticRetriever 的各个操作耗时
"""

import time
import json
import numpy as np
from semantic_retriever import SemanticRetriever

def measure_time(func, *args, **kwargs):
    """
    测量函数执行时间
    
    Args:
        func: 要执行的函数
        *args: 函数参数
        **kwargs: 函数关键字参数
    
    Returns:
        tuple: (函数返回值, 执行时间(毫秒))
    """
    start_time = time.time()
    result = func(*args, **kwargs)
    end_time = time.time()
    elapsed_time = (end_time - start_time) * 1000  # 转换为毫秒
    return result, elapsed_time

def run_performance_analysis():
    """
    运行完整的性能分析测试
    """
    print("=" * 80)
    print("SemanticRetriever 性能分析工具")
    print("此工具用于测量各个关键操作的耗时，帮助优化性能")
    print("=" * 80)
    
    # 测试配置
    config = {
        'kb_path': 'rust_knowledge_base/rust_docs_sample.json',
        'test_questions': [
            "什么是所有权？",
            "如何处理借用检查器错误？",
            "Rust和C++的主要区别是什么？",
            "生命周期参数的作用是什么？",
            "如何声明可变变量？"
        ],
        'top_k_values': [1, 3, 5],
        'iterations': 3  # 每个操作执行的次数，取平均值
    }
    
    print(f"测试配置: {json.dumps(config, ensure_ascii=False, indent=2)}")
    print("\n开始性能测试...\n")
    
    # 1. 初始化时间
    print("[测试1] 初始化时间测量")
    retriever, init_time = measure_time(SemanticRetriever, config['kb_path'])
    print(f"初始化完成，耗时: {init_time:.2f} ms")
    
    # 2. 资源加载时间（确保加载所有资源）
    print("\n[测试2] 资源加载时间测量")
    _, resource_load_time = measure_time(retriever._ensure_resources_loaded)
    print(f"资源加载完成，耗时: {resource_load_time:.2f} ms")
    
    # 3. 知识库大小
    print(f"\n知识库信息: {len(retriever.knowledge_base)} 条记录")
    if hasattr(retriever, 'index') and retriever.index is not None:
        print(f"FAISS索引维度: {retriever.index.d}")
    
    # 4. 分词性能测试
    print("\n[测试3] 分词性能测试")
    test_texts = [
        "Rust是一种系统编程语言，注重安全性、并发和内存效率。",
        "The borrow checker ensures memory safety without garbage collection.",
        "可变变量可以通过mut关键字声明。mutable variables can be declared with mut keyword."
    ]
    
    tokenize_times = []
    for text in test_texts:
        _, tokenize_time = measure_time(retriever._tokenize_text, text)
        tokens = retriever._tokenize_text(text)
        tokenize_times.append(tokenize_time)
        print(f"文本: {text[:30]}... | 分词数: {len(tokens)} | 耗时: {tokenize_time:.2f} ms")
    print(f"平均分词时间: {np.mean(tokenize_times):.2f} ms")
    
    # 5. 嵌入向量生成性能
    print("\n[测试4] 嵌入向量生成性能")
    embedding_times = []
    for text in test_texts:
        _, embed_time = measure_time(retriever._get_embedding, text)
        embedding_times.append(embed_time)
        print(f"文本: {text[:30]}... | 耗时: {embed_time:.2f} ms")
    print(f"平均嵌入时间: {np.mean(embedding_times):.2f} ms")
    
    # 6. 检索性能测试
    print("\n[测试5] 检索性能测试")
    retrieval_results = {}
    
    for question in config['test_questions']:
        question_results = {}
        
        for top_k in config['top_k_values']:
            iteration_times = []
            iteration_results = []
            
            for i in range(config['iterations']):
                results, retrieve_time = measure_time(
                    retriever.retrieve, 
                    question=question, 
                    top_k=top_k,
                    debug=False
                )
                iteration_times.append(retrieve_time)
                iteration_results.append(len(results))
            
            avg_time = np.mean(iteration_times)
            question_results[top_k] = {
                'avg_time': avg_time,
                'min_time': np.min(iteration_times),
                'max_time': np.max(iteration_times),
                'avg_results': np.mean(iteration_results)
            }
            
            print(f"问题: '{question}' | top_k={top_k} | 平均时间: {avg_time:.2f} ms | \\n" +
                  f"  最小值: {np.min(iteration_times):.2f} ms | 最大值: {np.max(iteration_times):.2f} ms | 平均结果数: {np.mean(iteration_results):.1f}")
        
        retrieval_results[question] = question_results
    
    # 7. 缓存效率测试
    print("\n[测试6] 缓存效率测试")
    # 清除缓存
    if hasattr(retriever, 'token_cache'):
        retriever.token_cache.clear()
    if hasattr(retriever, 'embedding_cache'):
        retriever.embedding_cache.clear()
    
    # 第一次执行
    first_time = []
    for question in config['test_questions'][:2]:  # 只测试前两个问题
        _, time1 = measure_time(retriever.retrieve, question=question, top_k=3)
        first_time.append(time1)
    avg_first_time = np.mean(first_time)
    
    # 第二次执行（应该使用缓存）
    second_time = []
    for question in config['test_questions'][:2]:
        _, time2 = measure_time(retriever.retrieve, question=question, top_k=3)
        second_time.append(time2)
    avg_second_time = np.mean(second_time)
    
    cache_improvement = (1 - avg_second_time / avg_first_time) * 100 if avg_first_time > 0 else 0
    print(f"第一次检索平均时间: {avg_first_time:.2f} ms")
    print(f"第二次检索平均时间: {avg_second_time:.2f} ms")
    print(f"缓存带来的性能提升: {cache_improvement:.1f}%")
    
    # 8. 缓存大小统计
    if hasattr(retriever, 'token_cache'):
        print(f"分词缓存大小: {len(retriever.token_cache)} 条")
    if hasattr(retriever, 'embedding_cache'):
        print(f"嵌入向量缓存大小: {len(retriever.embedding_cache)} 条")
    
    # 生成综合报告
    print("\n" + "=" * 80)
    print("综合性能报告")
    print("=" * 80)
    
    report = {
        'init_time_ms': init_time,
        'resource_load_time_ms': resource_load_time,
        'avg_tokenize_time_ms': np.mean(tokenize_times),
        'avg_embedding_time_ms': np.mean(embedding_times),
        'cache_improvement_percent': cache_improvement,
        'retrieval_performance': retrieval_results
    }
    
    print(json.dumps(report, ensure_ascii=False, indent=2))
    
    # 保存报告到文件
    timestamp = time.strftime("%Y%m%d_%H%M%S")
    report_file = f"performance_report_{timestamp}.json"
    try:
        with open(report_file, 'w', encoding='utf-8') as f:
            json.dump(report, f, ensure_ascii=False, indent=2)
        print(f"\n报告已保存到: {report_file}")
    except Exception as e:
        print(f"\n警告: 保存报告失败: {str(e)}")
    
    print("\n性能分析完成！")
    print("建议在代码修改前后各运行一次，比较性能差异。")

if __name__ == "__main__":
    run_performance_analysis()