#!/usr/bin/env python3
"""
速度对比测试脚本
比较优化前后的模型响应速度
"""

import time
from qwen_model_client import QwenModelClient, ModelConfig

def test_standard_mode(client, prompt):
    """测试标准模式"""
    print(f"测试标准模式: {prompt}")
    start_time = time.time()
    
    response = client.simple_chat(
        prompt, 
        fast_mode=False,
        max_tokens=200,
        temperature=0.7
    )
    
    end_time = time.time()
    duration = end_time - start_time
    print(f"响应: {response[:100]}...")
    print(f"耗时: {duration:.2f}秒\n")
    return duration, response

def test_fast_mode(client, prompt):
    """测试快速模式"""
    print(f"测试快速模式: {prompt}")
    start_time = time.time()
    
    response = client.simple_chat(
        prompt, 
        fast_mode=True,
        max_tokens=200
    )
    
    end_time = time.time()
    duration = end_time - start_time
    print(f"响应: {response[:100]}...")
    print(f"耗时: {duration:.2f}秒\n")
    return duration, response

def test_ultra_fast_mode(client, prompt):
    """测试极速模式"""
    print(f"测试极速模式: {prompt}")
    start_time = time.time()
    
    response = client.fast_chat(
        prompt, 
        max_tokens=100
    )
    
    end_time = time.time()
    duration = end_time - start_time
    content = response.get("choices", [{}])[0].get("message", {}).get("content", "")
    print(f"响应: {content[:100]}...")
    print(f"耗时: {duration:.2f}秒\n")
    return duration, content

def run_comparison():
    """运行速度对比测试"""
    # 创建客户端
    config = ModelConfig()
    client = QwenModelClient(config)
    
    # 健康检查
    if not client.health_check():
        print("❌ 服务不可用，无法进行测试")
        return
    
    # 获取可用模型
    try:
        models = client.get_available_models()
        if models:
            client.config.model_name = models[0]
            print(f"使用模型: {client.config.model_name}\n")
    except Exception as e:
        print(f"获取模型列表失败: {e}\n")
    
    # 测试提示
    test_prompts = [
        "请简单介绍一下人工智能",
        "什么是机器学习？",
        "请解释一下深度学习的概念",
        "Python有哪些优点？"
    ]
    
    # 结果存储
    results = {
        "standard": {"times": [], "responses": []},
        "fast": {"times": [], "responses": []},
        "ultra_fast": {"times": [], "responses": []}
    }
    
    # 运行测试
    for i, prompt in enumerate(test_prompts):
        print(f"===== 测试 {i+1}/{len(test_prompts)} =====")
        
        # 标准模式
        duration, response = test_standard_mode(client, prompt)
        results["standard"]["times"].append(duration)
        results["standard"]["responses"].append(response)
        
        # 快速模式
        duration, response = test_fast_mode(client, prompt)
        results["fast"]["times"].append(duration)
        results["fast"]["responses"].append(response)
        
        # 极速模式
        duration, response = test_ultra_fast_mode(client, prompt)
        results["ultra_fast"]["times"].append(duration)
        results["ultra_fast"]["responses"].append(response)
    
    # 计算统计数据
    standard_avg = sum(results["standard"]["times"]) / len(results["standard"]["times"])
    fast_avg = sum(results["fast"]["times"]) / len(results["fast"]["times"])
    ultra_fast_avg = sum(results["ultra_fast"]["times"]) / len(results["ultra_fast"]["times"])
    
    print("===== 速度对比结果 =====")
    print(f"标准模式平均耗时: {standard_avg:.2f}秒")
    print(f"快速模式平均耗时: {fast_avg:.2f}秒")
    print(f"极速模式平均耗时: {ultra_fast_avg:.2f}秒")
    print()
    print(f"快速模式比标准模式快: {((standard_avg - fast_avg) / standard_avg * 100):.1f}%")
    print(f"极速模式比标准模式快: {((standard_avg - ultra_fast_avg) / standard_avg * 100):.1f}%")
    print(f"极速模式比快速模式快: {((fast_avg - ultra_fast_avg) / fast_avg * 100):.1f}%")
    
    # 推荐使用场景
    print("\n===== 使用建议 =====")
    print("1. 标准模式: 适合需要高质量、长文本回复的场景")
    print("2. 快速模式: 适合一般对话场景，平衡速度和质量")
    print("3. 极速模式: 适合需要快速响应的简单问答场景")

if __name__ == "__main__":
    run_comparison()