'''
Date: 2025-04-16 11:29:15
LastEditTime: 2025-04-17 17:24:39
'''
import asyncio
import random
import statistics
import time
from datetime import datetime
from typing import Dict, List

import pandas as pd
# from model_config import MODEL_CONFIGS
from openai import AsyncOpenAI


# async def setup_client(model_name: str):
#     """初始化OpenAI客户端"""
#     try:
#         model_config = MODEL_CONFIGS[model_name]
#         return AsyncOpenAI(
#             base_url=model_config["base_url"],
#             api_key=model_config["api_key"]), model_config["model_name"]
#     except KeyError:
#         raise ValueError(f"未找到模型配置: {model_name}")
    

async def setup_client_auth(model_name):
    # 磐智认证参数
    appid = "dsjyybai"
    appKey = "bb9c5100caf0f84e22d88451e94ce2bb"
    capabilityname = "llmm"
    # 生成请求头
    from qwen3_client import gen_proxy_header
    headers = gen_proxy_header(appid, appKey, capabilityname)

    # URL，办公网或呼池使用10.217.247.48，其它资源池请更换对应IP
    base_url = "http://10.217.247.48:9050/llmm-prod/v1/chat/completions"
    api_key = "cs-baac3b82" # "chatbi"
    model = "qwen3-32b-hc"
    messages = [{"role": "user", "content": "初始化消息"}]

    client = Qwen3Client(headers, base_url, api_key, model, messages)
    return client



async def chat_completion(client: AsyncOpenAI, model_name: str, prompt: str,
                          request_id: int):
    """执行单个聊天完成任务并返回结果"""
    messages = [{"role": "user", "content": prompt}]
    start_time = time.time()
    response_text = ""
    first_response_time = None

    try:
        print(f"\n请求 {request_id} 使用模型: {model_name}")
        print("-" * 50)
        stream = await client.chat.completions.create(
            model=model_name,
            messages=messages,
            stream=True,
        )

        async for chunk in stream:
            if chunk.choices[0].delta.content is not None:
                delta_text = chunk.choices[0].delta.content

                if first_response_time is None:
                    first_response_time = time.time() - start_time

                # print(f"[请求 {request_id}] {delta_text}", end="", flush=True)
                response_text += delta_text
        print(f"[请求 {request_id}] 已完成", flush=True)
        print("\n" + "-" * 50)

        return {
            "request_id": request_id,
            "response_text": response_text,
            "first_response_time": first_response_time,
            "total_time": time.time() - start_time,
            "total_tokens": chunk.usage.total_tokens,
            "completion_tokens": chunk.usage.completion_tokens
        }

    except Exception as e:
        print(f"请求 {request_id} 发生错误: {str(e)}")
        return None


def print_batch_statistics(all_stats: List[Dict]):
    """打印批量请求的统计信息"""
    if not all_stats:
        print("没有有效的统计数据")
        return

    # 收集各项指标
    first_response_times = [stat['first_response_time'] for stat in all_stats]
    total_times = [stat['total_time'] for stat in all_stats]
    total_tokens = [stat['total_tokens'] for stat in all_stats]
    qps_list = [
        stat['completion_tokens'] / stat['total_time'] for stat in all_stats
    ]

    print("\n=== 批量请求性能统计 ===")
    print(f"总请求数: {len(all_stats)}")
    print("\n响应时间统计:")
    print(f"首次响应时间 - 平均: {statistics.mean(first_response_times):.2f}秒, "
          f"最小: {min(first_response_times):.2f}秒, "
          f"最大: {max(first_response_times):.2f}秒")
    print(f"总耗时 - 平均: {statistics.mean(total_times):.2f}秒, "
          f"最小: {min(total_times):.2f}秒, "
          f"最大: {max(total_times):.2f}秒")

    print("\nToken统计:")
    print(f"总Token数 - 平均: {statistics.mean(total_tokens):.2f}, "
          f"最小: {min(total_tokens)}, "
          f"最大: {max(total_tokens)}")
    print(f"生成速度 - 平均: {statistics.mean(qps_list):.2f} token/s, "
          f"最小: {min(qps_list):.2f} token/s, "
          f"最大: {max(qps_list):.2f} token/s")

    total_tokens_sum = sum(stat['total_tokens'] for stat in all_stats)
    total_time_max = max(total_times)
    print(f"\n整体吞吐量: {total_tokens_sum / total_time_max:.2f} token/s")


async def batch_completion(model_name, num_requests: int = 3):
    """执行批量并发请求"""

    prompts = [
        "介绍中国的四大发明", "解释人工智能的基本概念", "描述太阳系的构成", "讲解光合作用的过程", "介绍丝绸之路的历史"
    ]

    try:
        client, model = await setup_client(model_name)
        tasks = []
        for i in range(num_requests):
            prompt = prompts[random.randint(0, len(prompts) - 1)]
            task = chat_completion(client, model, prompt, i + 1)
            tasks.append(task)

        all_stats = await asyncio.gather(*tasks)
        all_stats = [stat for stat in all_stats if stat is not None]
        print_batch_statistics(all_stats)
        return all_stats

    except Exception as e:
        print(f"批量请求执行出错: {str(e)}")


def save_to_excel(model_name: str, all_results: List[Dict]):
    """将测试结果保存到Excel"""
    df_data = []
    for result in all_results:
        df_data.append({
            '并发数': result['num_requests'],
            '平均首次响应时间(秒)': result['mean_first_response'],
            '最小首次响应时间(秒)': result['min_first_response'],
            '最大首次响应时间(秒)': result['max_first_response'],
            '平均总耗时(秒)': result['mean_total_time'],
            '最小总耗时(秒)': result['min_total_time'],
            '最大总耗时(秒)': result['max_total_time'],
            '平均Token数': result['mean_tokens'],
            '平均生成速度(token/s)': result['mean_qps'],
            '整体吞吐量(token/s)': result['total_throughput']
        })

    df = pd.DataFrame(df_data)

    # 生成文件名，包含时间戳
    timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
    filename = f'benchmark_results_{model_name}_{timestamp}.xlsx'
    df.to_excel(filename, index=False)
    print(f"\n测试结果已保存到: {filename}")


def collect_statistics(model_name,all_stats: List[Dict]) -> Dict:
    """收集统计数据"""
    if not all_stats:
        return None

    first_response_times = [stat['first_response_time'] for stat in all_stats]
    total_times = [stat['total_time'] for stat in all_stats]
    total_tokens = [stat['total_tokens'] for stat in all_stats]
    qps_list = [
        stat['completion_tokens'] / stat['total_time'] for stat in all_stats
    ]

    total_tokens_sum = sum(stat['total_tokens'] for stat in all_stats)
    total_time_max = max(total_times)

    result = {
        'num_requests': len(all_stats),
        'mean_first_response': statistics.mean(first_response_times),
        'min_first_response': min(first_response_times),
        'max_first_response': max(first_response_times),
        'mean_total_time': statistics.mean(total_times),
        'min_total_time': min(total_times),
        'max_total_time': max(total_times),
        'mean_tokens': statistics.mean(total_tokens),
        'mean_qps': statistics.mean(qps_list),
        'total_throughput': total_tokens_sum / total_time_max
    }
    # 创建DataFrame用于写入表格
    df = pd.DataFrame([{
        '并发数': result['num_requests'],
        '平均首次响应时间(秒)': result['mean_first_response'],
        '最小首次响应时间(秒)': result['min_first_response'],
        '最大首次响应时间(秒)': result['max_first_response'],
        '平均总耗时(秒)': result['mean_total_time'],
        '最小总耗时(秒)': result['min_total_time'],
        '最大总耗时(秒)': result['max_total_time'],
        '平均Token数': result['mean_tokens'],
        '平均生成速度(token/s)': result['mean_qps'],
        '整体吞吐量(token/s)': result['total_throughput']
    }])
    # 生成文件名，包含时间戳
    timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
    filename = f'tmp/benchmark_results_{model_name}_{result["num_requests"]}_{timestamp}.xlsx'
    df.to_excel(filename, index=False)
    print(f"\n测试结果已保存到: {filename}")

    return result


async def run_benchmark(model_name: str,
                        concurrent_numbers: List[int]) -> List[Dict]:
    """运行基准测试"""
    all_results = []

    for num_requests in concurrent_numbers:
        print(f"\n开始测试 {num_requests} 并发请求...")
        stats = await batch_completion(model_name, num_requests)
        if stats:
            result = collect_statistics(model_name, stats)
            if result:
                all_results.append(result)

    return all_results


def main():
    concurrent_numbers = [1,3,5,8,10, 15, 20, 25]
    # model_name = list(MODEL_CONFIGS.keys())[2]
    # for model_name in list(MODEL_CONFIGS.keys()):
    model_name = "qwen3-32b-hc"
    print(f"开始性能测试，模型: {model_name}")
    print(f"测试并发数: {concurrent_numbers}")
    all_results = asyncio.run(run_benchmark(model_name, concurrent_numbers))
    save_to_excel(model_name, all_results)


if __name__ == "__main__":
    main()
