#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
性能测试脚本 - 用于测量semantic_retriever.py的性能指标
可以在代码修改前后运行，对比性能变化
"""

import time
import json
import os
from typing import Dict, List, Tuple
from semantic_retriever import SemanticRetriever


def test_retrieval_performance(retriever: SemanticRetriever, test_questions: List[str], iterations: int = 5) -> Dict:
    """
    测试检索性能
    
    Args:
        retriever: 语义检索实例
        test_questions: 测试问题列表
        iterations: 每个问题重复测试的次数
    
    Returns:
        Dict: 包含性能指标的字典
    """
    print(f"\n开始测试检索性能（每个问题重复{iterations}次）...")
    
    results = {
        "question_times": [],
        "total_time": 0,
        "average_time_per_question": 0
    }
    
    total_start_time = time.time()
    
    for i, question in enumerate(test_questions):
        question_total_time = 0
        
        print(f"测试问题 {i+1}/{len(test_questions)}: '{question}'")
        
        for j in range(iterations):
            start_time = time.time()
            retrieved_docs = retriever.retrieve(question, top_k=3)
            end_time = time.time()
            
            elapsed = (end_time - start_time) * 1000  # 转换为毫秒
            question_total_time += elapsed
            
            print(f"  迭代 {j+1}/{iterations}: {elapsed:.2f}ms, 返回文档数: {len(retrieved_docs)}")
        
        avg_time = question_total_time / iterations
        results["question_times"].append({
            "question": question,
            "average_time_ms": avg_time,
            "total_time_ms": question_total_time
        })
        print(f"  平均时间: {avg_time:.2f}ms")
    
    total_end_time = time.time()
    total_time_ms = (total_end_time - total_start_time) * 1000
    avg_time_per_question = total_time_ms / (len(test_questions) * iterations)
    
    results["total_time"] = total_time_ms
    results["average_time_per_question"] = avg_time_per_question
    
    print(f"\n总测试时间: {total_time_ms:.2f}ms")
    print(f"平均每个问题检索时间: {avg_time_per_question:.2f}ms")
    
    return results


def test_initialization_performance(kb_path: str = 'rust_knowledge_base/rust_docs_sample.json', iterations: int = 3) -> Dict:
    """
    测试初始化性能
    
    Args:
        kb_path: 知识库路径
        iterations: 重复测试的次数
    
    Returns:
        Dict: 包含性能指标的字典
    """
    print(f"\n开始测试初始化性能（重复{iterations}次）...")
    
    results = {
        "init_times": [],
        "total_time": 0,
        "average_time": 0
    }
    
    total_start_time = time.time()
    
    for i in range(iterations):
        print(f"初始化迭代 {i+1}/{iterations}...")
        start_time = time.time()
        
        # 创建新的检索器实例
        retriever = SemanticRetriever(kb_path)
        
        end_time = time.time()
        elapsed = (end_time - start_time) * 1000  # 转换为毫秒
        
        results["init_times"].append(elapsed)
        print(f"  初始化时间: {elapsed:.2f}ms, 知识库文档数: {len(retriever.knowledge_base)}")
    
    total_end_time = time.time()
    total_time_ms = (total_end_time - total_start_time) * 1000
    avg_time = total_time_ms / iterations
    
    results["total_time"] = total_time_ms
    results["average_time"] = avg_time
    
    print(f"\n总初始化时间: {total_time_ms:.2f}ms")
    print(f"平均初始化时间: {avg_time:.2f}ms")
    
    return results


def test_embedding_generation(retriever: SemanticRetriever, texts: List[str]) -> Dict:
    """
    测试嵌入向量生成性能
    
    Args:
        retriever: 语义检索实例
        texts: 测试文本列表
    
    Returns:
        Dict: 包含性能指标的字典
    """
    if not retriever.model:
        print("模型未初始化，跳过嵌入向量生成测试")
        return {"error": "Model not initialized"}
    
    print(f"\n开始测试嵌入向量生成性能...")
    
    # 测试单个短文本
    short_text = "这是一个简短的测试文本"
    start_time = time.time()
    short_embedding = retriever.model.encode([short_text])
    short_time = (time.time() - start_time) * 1000
    
    # 测试多个文本
    start_time = time.time()
    embeddings = retriever.model.encode(texts)
    batch_time = (time.time() - start_time) * 1000
    avg_time_per_text = batch_time / len(texts)
    
    # 正确处理numpy数组的维度获取
    embedding_dimension = embeddings.shape[1] if embeddings.size > 0 else 0
    
    results = {
        "short_text_time_ms": short_time,
        "batch_test": {
            "total_texts": len(texts),
            "total_time_ms": batch_time,
            "average_time_per_text_ms": avg_time_per_text,
            "embedding_dimension": embedding_dimension
        }
    }
    
    print(f"单个短文本嵌入时间: {short_time:.2f}ms")
    print(f"批量文本嵌入 ({len(texts)}个文本): 总时间 {batch_time:.2f}ms, 平均每文本 {avg_time_per_text:.2f}ms")
    print(f"嵌入向量维度: {embedding_dimension}")
    
    return results


def run_comprehensive_benchmark(kb_path: str = 'rust_knowledge_base/rust_docs_sample.json', iterations: int = 3) -> Dict:
    """
    运行综合性能基准测试
    
    Args:
        kb_path: 知识库路径
        iterations: 重复测试次数
    
    Returns:
        Dict: 包含所有性能测试结果的字典
    """
    print("=" * 60)
    print("开始综合性能基准测试")
    print(f"测试配置: 知识库='{kb_path}', 迭代次数={iterations}")
    print("=" * 60)
    
    # 记录开始时间
    overall_start_time = time.time()
    
    # 初始化测试
    init_results = test_initialization_performance(kb_path, iterations)
    
    # 创建一个检索器实例用于后续测试
    print("\n创建检索器实例用于后续测试...")
    retriever = SemanticRetriever(kb_path)
    
    # 准备测试问题和文本
    test_questions = [
        "什么是Rust的所有权？",
        "如何在Rust中声明可变变量？",
        "解释一下Rust的生命周期概念",
        "Rust中的借用规则是什么？",
        "如何处理Rust中的错误？"
    ]
    
    # 从知识库中提取一些文本用于嵌入测试
    test_texts = [doc.get('content', '')[:200] for doc in retriever.knowledge_base[:5]]
    
    # 嵌入生成测试
    embedding_results = test_embedding_generation(retriever, test_texts)
    
    # 检索性能测试
    retrieval_results = test_retrieval_performance(retriever, test_questions, iterations)
    
    # 记录结束时间
    overall_end_time = time.time()
    overall_time_ms = (overall_end_time - overall_start_time) * 1000
    
    # 汇总结果
    results = {
        "timestamp": time.strftime("%Y-%m-%d %H:%M:%S"),
        "overall_test_time_ms": overall_time_ms,
        "initialization": init_results,
        "embedding_generation": embedding_results,
        "retrieval": retrieval_results
    }
    
    # 保存结果到文件
    output_file = f"performance_benchmark_{int(time.time())}.json"
    with open(output_file, 'w', encoding='utf-8') as f:
        json.dump(results, f, ensure_ascii=False, indent=2)
    
    print("\n" + "=" * 60)
    print(f"性能测试完成，结果已保存到 {output_file}")
    print(f"总测试时间: {overall_time_ms:.2f}ms")
    print("=" * 60)
    
    # 打印关键性能指标摘要
    print("\n性能指标摘要:")
    print(f"1. 初始化时间: {init_results['average_time']:.2f}ms")
    if 'batch_test' in embedding_results:
        print(f"2. 文本嵌入时间: {embedding_results['batch_test']['average_time_per_text_ms']:.2f}ms/文本")
    print(f"3. 问题检索时间: {retrieval_results['average_time_per_question']:.2f}ms/问题")
    
    return results


def compare_benchmark_files(file1: str, file2: str) -> None:
    """
    比较两个基准测试结果文件
    
    Args:
        file1: 第一个基准测试文件路径
        file2: 第二个基准测试文件路径
    """
    if not os.path.exists(file1) or not os.path.exists(file2):
        print(f"错误: 找不到文件 {file1} 或 {file2}")
        return
    
    with open(file1, 'r', encoding='utf-8') as f1, open(file2, 'r', encoding='utf-8') as f2:
        results1 = json.load(f1)
        results2 = json.load(f2)
    
    print("\n" + "=" * 70)
    print(f"性能对比: {file1} vs {file2}")
    print("=" * 70)
    
    # 比较初始化时间
    init1 = results1['initialization']['average_time']
    init2 = results2['initialization']['average_time']
    init_diff = ((init2 - init1) / init1) * 100
    print(f"初始化时间: {init1:.2f}ms → {init2:.2f}ms ({init_diff:+.2f}%)")
    
    # 比较嵌入时间
    if 'batch_test' in results1['embedding_generation'] and 'batch_test' in results2['embedding_generation']:
        emb1 = results1['embedding_generation']['batch_test']['average_time_per_text_ms']
        emb2 = results2['embedding_generation']['batch_test']['average_time_per_text_ms']
        emb_diff = ((emb2 - emb1) / emb1) * 100
        print(f"文本嵌入时间: {emb1:.2f}ms → {emb2:.2f}ms ({emb_diff:+.2f}%)")
    
    # 比较检索时间
    ret1 = results1['retrieval']['average_time_per_question']
    ret2 = results2['retrieval']['average_time_per_question']
    ret_diff = ((ret2 - ret1) / ret1) * 100
    print(f"问题检索时间: {ret1:.2f}ms → {ret2:.2f}ms ({ret_diff:+.2f}%)")
    
    # 比较总时间
    total1 = results1['overall_test_time_ms']
    total2 = results2['overall_test_time_ms']
    total_diff = ((total2 - total1) / total1) * 100
    print(f"总测试时间: {total1:.2f}ms → {total2:.2f}ms ({total_diff:+.2f}%)")
    
    # 总结
    print("\n" + "=" * 70)
    if ret_diff < 0:
        print(f"性能提升: 检索速度提高了 {-ret_diff:.2f}%")
    else:
        print(f"性能下降: 检索速度降低了 {ret_diff:.2f}%")
    print("=" * 70)


if __name__ == "__main__":
    import argparse
    
    parser = argparse.ArgumentParser(description='Semantic Retriever 性能测试工具')
    parser.add_argument('--compare', nargs=2, metavar=('FILE1', 'FILE2'),
                       help='比较两个性能测试结果文件')
    parser.add_argument('--iterations', type=int, default=3,
                       help='测试迭代次数（默认：3）')
    parser.add_argument('--kb-path', default='rust_knowledge_base/rust_docs_sample.json',
                       help='知识库文件路径')
    
    args = parser.parse_args()
    
    if args.compare:
        compare_benchmark_files(args.compare[0], args.compare[1])
    else:
        run_comprehensive_benchmark(args.kb_path, args.iterations)