#!/usr/bin/env python3
"""
Qwen3-4B-AWQ模型速度优化测试

此脚本测试不同参数设置对响应速度的影响，以找到最佳的配置。
"""

import sys
import os
import time
import statistics

# 添加当前目录到Python路径
sys.path.append(os.path.dirname(os.path.abspath(__file__)))

from qwen_model_client import QwenModelClient, ModelConfig


def test_speed_with_different_configs():
    """测试不同配置下的响应速度"""
    print("="*60)
    print("Qwen3-4B-AWQ模型速度优化测试")
    print("="*60)
    
    # 测试用例
    test_prompt = "请简单回答：什么是人工智能？"
    
    # 不同的配置参数
    configs = [
        {"name": "默认配置", "max_tokens": 500, "temperature": 0.7},
        {"name": "减少最大token数", "max_tokens": 200, "temperature": 0.7},
        {"name": "减少最大token数+低温度", "max_tokens": 200, "temperature": 0.1},
        {"name": "极低温度", "max_tokens": 500, "temperature": 0.01},
        {"name": "最小token数", "max_tokens": 100, "temperature": 0.7},
        {"name": "最小token数+低温度", "max_tokens": 100, "temperature": 0.1},
    ]
    
    # 初始化客户端
    config = ModelConfig(
        host="192.168.1.236",
        port=8000,
        timeout=60
    )
    client = QwenModelClient(config)
    
    # 健康检查
    print("正在进行健康检查...")
    if not client.health_check():
        print("❌ 健康检查失败，请确保模型服务正在运行")
        return
    
    print("✅ 健康检查通过")
    
    # 获取可用模型列表
    try:
        models = client.get_available_models()
        print(f"可用模型: {models}")
    except Exception as e:
        print(f"获取模型列表失败: {str(e)}")
    
    print(f"\n测试提示: {test_prompt}")
    print("="*60)
    
    results = []
    
    for i, cfg in enumerate(configs):
        print(f"\n测试配置 {i+1}: {cfg['name']}")
        print(f"参数: max_tokens={cfg['max_tokens']}, temperature={cfg['temperature']}")
        
        response_times = []
        response_lengths = []
        
        # 每个配置测试3次
        for j in range(3):
            try:
                start_time = time.time()
                response = client.simple_chat(
                    test_prompt, 
                    max_tokens=cfg['max_tokens'], 
                    temperature=cfg['temperature']
                )
                end_time = time.time()
                
                response_time = end_time - start_time
                response_length = len(response)
                
                response_times.append(response_time)
                response_lengths.append(response_length)
                
                print(f"  测试 {j+1}: {response_time:.2f}秒, {response_length}字符")
                
            except Exception as e:
                print(f"  测试 {j+1} 失败: {str(e)}")
        
        if response_times:
            avg_time = statistics.mean(response_times)
            avg_length = statistics.mean(response_lengths)
            min_time = min(response_times)
            max_time = max(response_times)
            
            result = {
                "name": cfg['name'],
                "max_tokens": cfg['max_tokens'],
                "temperature": cfg['temperature'],
                "avg_time": avg_time,
                "min_time": min_time,
                "max_time": max_time,
                "avg_length": avg_length,
                "tokens_per_second": avg_length / avg_time if avg_time > 0 else 0
            }
            
            results.append(result)
            
            print(f"  平均响应时间: {avg_time:.2f}秒")
            print(f"  最快响应时间: {min_time:.2f}秒")
            print(f"  最慢响应时间: {max_time:.2f}秒")
            print(f"  平均响应长度: {avg_length:.0f}字符")
            print(f"  生成速度: {result['tokens_per_second']:.2f}字符/秒")
    
    # 按平均响应时间排序
    results.sort(key=lambda x: x['avg_time'])
    
    print("\n" + "="*60)
    print("速度测试结果汇总")
    print("="*60)
    print(f"{'配置':<20} {'平均时间(秒)':<12} {'最快时间(秒)':<12} {'平均长度(字符)':<15} {'速度(字符/秒)':<15}")
    print("-" * 80)
    
    for result in results:
        print(f"{result['name']:<20} {result['avg_time']:<12.2f} {result['min_time']:<12.2f} "
              f"{result['avg_length']:<15.0f} {result['tokens_per_second']:<15.2f}")
    
    # 推荐最佳配置
    if results:
        best = results[0]
        print(f"\n推荐配置: {best['name']}")
        print(f"参数: max_tokens={best['max_tokens']}, temperature={best['temperature']}")
        print(f"预期响应时间: {best['avg_time']:.2f}秒")
        print(f"预期生成速度: {best['tokens_per_second']:.2f}字符/秒")
        
        # 创建优化的配置文件
        create_optimized_config(best)


def create_optimized_config(best_result):
    """创建优化的配置文件"""
    config_content = f"""
# Qwen3-4B-AWQ模型优化配置

# 服务器配置
host = "192.168.1.236"
port = 8000
timeout = 30

# 生成参数（基于速度测试优化）
max_tokens = {best_result['max_tokens']}
temperature = {best_result['temperature']}

# 其他参数
top_p = 0.9
frequency_penalty = 0.0
presence_penalty = 0.0
"""
    
    with open("optimized_config.py", "w", encoding="utf-8") as f:
        f.write(config_content)
    
    print(f"\n已创建优化配置文件: optimized_config.py")


def test_with_optimized_config():
    """使用优化配置进行测试"""
    print("\n" + "="*60)
    print("使用优化配置进行测试")
    print("="*60)
    
    try:
        # 导入优化配置
        import importlib.util
        spec = importlib.util.spec_from_file_location("optimized_config", "optimized_config.py")
        optimized_config = importlib.util.module_from_spec(spec)
        spec.loader.exec_module(optimized_config)
        
        # 创建客户端
        config = ModelConfig(
            host=optimized_config.host,
            port=optimized_config.port,
            timeout=optimized_config.timeout
        )
        client = QwenModelClient(config)
        
        # 测试用例
        test_cases = [
            "你好，请介绍一下你自己。",
            "请解释什么是机器学习。",
            "写一个Python函数计算斐波那契数列的第n项。",
            "请列出5种常见的编程语言及其特点。"
        ]
        
        total_time = 0
        total_chars = 0
        
        for i, prompt in enumerate(test_cases):
            print(f"\n测试 {i+1}: {prompt}")
            try:
                start_time = time.time()
                response = client.simple_chat(
                    prompt,
                    max_tokens=optimized_config.max_tokens,
                    temperature=optimized_config.temperature
                )
                end_time = time.time()
                
                response_time = end_time - start_time
                response_length = len(response)
                
                total_time += response_time
                total_chars += response_length
                
                print(f"响应时间: {response_time:.2f}秒")
                print(f"响应长度: {response_length}字符")
                print(f"生成速度: {response_length/response_time:.2f}字符/秒")
                print(f"响应内容: {response[:100]}..." if len(response) > 100 else f"响应内容: {response}")
                
            except Exception as e:
                print(f"错误: {str(e)}")
        
        if total_time > 0:
            avg_time = total_time / len(test_cases)
            avg_speed = total_chars / total_time
            
            print("\n" + "="*60)
            print("优化配置测试结果")
            print("="*60)
            print(f"总测试数: {len(test_cases)}")
            print(f"总耗时: {total_time:.2f}秒")
            print(f"平均响应时间: {avg_time:.2f}秒")
            print(f"平均生成速度: {avg_speed:.2f}字符/秒")
            
    except FileNotFoundError:
        print("未找到优化配置文件，请先运行速度测试。")
    except Exception as e:
        print(f"使用优化配置测试时发生错误: {str(e)}")


def main():
    """主函数"""
    print("Qwen3-4B-AWQ模型速度优化工具")
    print("1. 运行速度测试")
    print("2. 使用优化配置测试")
    
    choice = input("\n请选择 (1/2): ")
    
    if choice == "1":
        test_speed_with_different_configs()
    elif choice == "2":
        test_with_optimized_config()
    else:
        print("无效选项。")


if __name__ == "__main__":
    main()