#!/usr/bin/env python3
"""
速度优化配置指南
根据不同场景提供最佳配置建议
"""

from qwen_model_client import QwenModelClient, ModelConfig
import time

def test_configuration(client, prompt, config_name):
    """测试特定配置的响应速度"""
    print(f"\n测试配置: {config_name}")
    print(f"提示: {prompt}")
    
    start_time = time.time()
    response = client.simple_chat(prompt, fast_mode=False, **get_config_params(config_name))
    end_time = time.time()
    
    duration = end_time - start_time
    print(f"响应: {response[:100]}...")
    print(f"耗时: {duration:.2f}秒")
    return duration

def get_config_params(config_name):
    """根据配置名称返回对应的参数"""
    configs = {
        "默认配置": {
            "max_tokens": 200,
            "temperature": 0.7,
            "top_p": 0.9,
            "frequency_penalty": 0.0,
            "presence_penalty": 0.0
        },
        "快速回复": {
            "max_tokens": 100,
            "temperature": 0.1,
            "top_p": 0.8,
            "frequency_penalty": 0.0,
            "presence_penalty": 0.0,
            "use_cache": True,
            "skip_special_tokens": True
        },
        "平衡模式": {
            "max_tokens": 150,
            "temperature": 0.3,
            "top_p": 0.85,
            "frequency_penalty": 0.0,
            "presence_penalty": 0.0,
            "use_cache": True,
            "skip_special_tokens": True
        },
        "高质量模式": {
            "max_tokens": 300,
            "temperature": 0.8,
            "top_p": 0.95,
            "frequency_penalty": 0.1,
            "presence_penalty": 0.1,
            "use_cache": False,
            "skip_special_tokens": False
        },
        "极速模式": {
            "max_tokens": 50,
            "temperature": 0.0,
            "top_p": 0.7,
            "frequency_penalty": 0.0,
            "presence_penalty": 0.0,
            "use_cache": True,
            "skip_special_tokens": True
        }
    }
    return configs.get(config_name, configs["默认配置"])

def run_optimization_guide():
    """运行优化指南"""
    print("="*60)
    print("Qwen模型速度优化配置指南")
    print("="*60)
    
    # 初始化客户端
    config = ModelConfig()
    client = QwenModelClient(config)
    
    # 健康检查
    if not client.health_check():
        print("❌ 服务不可用，无法进行测试")
        return
    
    # 获取可用模型
    try:
        models = client.get_available_models()
        if models:
            client.config.model_name = models[0]
            print(f"使用模型: {client.config.model_name}")
    except Exception as e:
        print(f"获取模型列表失败: {e}")
    
    # 测试提示
    test_prompt = "请简单介绍一下机器学习的基本概念"
    
    # 测试不同配置
    configs_to_test = ["默认配置", "快速回复", "平衡模式", "高质量模式", "极速模式"]
    results = {}
    
    for config_name in configs_to_test:
        duration = test_configuration(client, test_prompt, config_name)
        results[config_name] = duration
    
    # 分析结果
    print("\n" + "="*60)
    print("配置对比结果")
    print("="*60)
    
    # 按速度排序
    sorted_results = sorted(results.items(), key=lambda x: x[1])
    
    for i, (config_name, duration) in enumerate(sorted_results):
        print(f"{i+1}. {config_name}: {duration:.2f}秒")
    
    # 找出最快的配置
    fastest_config = sorted_results[0][0]
    fastest_time = sorted_results[0][1]
    
    # 计算相对于默认配置的提升
    default_time = results["默认配置"]
    improvement = ((default_time - fastest_time) / default_time) * 100
    
    print(f"\n最快配置是 '{fastest_config}'，比默认配置快 {improvement:.1f}%")
    
    # 使用建议
    print("\n" + "="*60)
    print("使用场景建议")
    print("="*60)
    
    scenarios = [
        ("简单问答", "极速模式", "如天气查询、简单定义等"),
        ("日常对话", "快速回复", "如聊天机器人、客服等"),
        ("技术问题", "平衡模式", "如代码解释、技术文档等"),
        ("创意写作", "高质量模式", "如文章创作、故事生成等"),
        ("默认场景", "默认配置", "一般用途，平衡各种需求")
    ]
    
    for scenario, config, description in scenarios:
        print(f"{scenario}: {config} - {description}")
    
    # 创建自定义配置示例
    print("\n" + "="*60)
    print("自定义配置示例")
    print("="*60)
    
    print("""
# 创建自定义配置
custom_config = {
    "max_tokens": 120,  # 根据需要调整
    "temperature": 0.2,  # 降低温度提高一致性
    "top_p": 0.8,  # 限制选择范围
    "use_cache": True,  # 启用缓存
    "skip_special_tokens": True  # 跳过特殊token
}

# 使用自定义配置
response = client.simple_chat("你的问题", fast_mode=False, **custom_config)
""")

if __name__ == "__main__":
    run_optimization_guide()