'''
Date: 2025-04-16 11:29:15
LastEditTime: 2025-04-17 17:24:39
'''
import asyncio
import random
import statistics
import time
from datetime import datetime
from typing import Dict, List
from concurrent.futures import ThreadPoolExecutor  # 非异步实现新增代码

import pandas as pd
# from model_config import MODEL_CONFIGS
from openai import AsyncOpenAI, OpenAI  # 非异步实现新增OpenAI同步客户端
import requests
import json
from datetime import datetime
import time
import hashlib
import base64
from uuid import uuid4


# header 构造
def gen_proxy_header(appid, appKey, capabilityname):
    csid = appid + capabilityname + "0" * (24 - len(capabilityname)) + str(
        uuid4()).replace("-", "")
    x_server_param = base64.urlsafe_b64encode(
        json.dumps({
            "appid": appid,
            "csid": csid
        }).encode())
    curtime = int(time.time())
    checkSum = hashlib.md5(
        f"{appKey}{str(curtime)}{x_server_param.decode()}".encode(
            'utf-8')).hexdigest()
    headers = {
        "Content-Type": "application/json;charset=UTF-8",
        "X-Server-Param": f"{x_server_param.decode()}",
        "X-CurTime": f"{curtime}",
        "X-CheckSum": f"{checkSum}",
    }
    return headers


def openai_api_streaming(headers,
                         base_url,
                         api_key,
                         model,
                         messages,
                         stream=True,
                         enable_thinking=True):
    temperature = 0.5
    max_tokens = 50
    headers["Authorization"] = "Bearer " + api_key
    data = {
        "model": model,
        "messages": messages,
        "max_tokens": max_tokens,
        "temperature": temperature,
        "stream": stream,
        # 注意该参数为推理软开关
        "chat_template_kwargs": {
            "enable_thinking": enable_thinking
        }
    }
    st_time = time.time()
    response = requests.post(base_url,
                             headers=headers,
                             json=data,
                             stream=stream,
                             timeout=100)
    if response.status_code != 200:
        raise ValueError(f"Failed to generate response: {response.text}")
    if stream:
        result = []
        print(datetime.now())
        flag = True
        for chunk in response.iter_lines():
            if chunk and chunk[5:] != b" [DONE]":
                # print(chunk)
                chunk = json.loads(chunk[5:])
                # print(chunk)
                if "choices" in chunk and len(chunk["choices"]) > 0:
                    if flag:
                        print(time.time() - st_time)
                        flag = False
                    choice = chunk["choices"][0]
                    if choice["delta"]:
                        result.append(choice["delta"].get("content", ""))
                        print(result[-1], end="", flush=True)
                if "usage" in chunk:
                    print()
                    print(chunk["usage"])
                    print(time.time() - st_time)
    else:
        # print(response)
        result = json.loads(
            response.content)['choices'][0]['message']['content']
        print(result)
        return result
    print()
    print(datetime.now())


class Qwen3Client:

    def __init__(self, headers, base_url, api_key, model, messages):
        self.headers = headers
        self.base_url = base_url
        self.api_key = api_key
        self.model = model
        self.messages = messages

    def chat(self, messages):
        return openai_api_streaming(self.headers,
                                    base_url=self.base_url,
                                    api_key=self.api_key,
                                    model=self.model,
                                    messages=messages,
                                    stream=False,
                                    enable_thinking=False)


# if __name__ == "__main__":
#     # 磐智认证参数
#     appid = "dsjyybai"
#     appKey = "bb9c5100caf0f84e22d88451e94ce2bb"
#     capabilityname = "llmm"
#     headers = gen_proxy_header(appid, appKey, capabilityname)

#     # URL，办公网或呼池使用10.217.247.48，其它资源池请更换对应IP
#     base_url = "http://10.217.247.48:9050/llmm-prod/v1/chat/completions"
#     api_key = "cs-baac3b82" # "chatbi"
#     model = "qwen3-32b-hc"

#     messages=[{"role": "user","content": "hello, 介绍以下你自己"}]
#     client = Qwen3Client(headers, base_url, api_key, model, messages)
#     resp = client.chat(messages)

# async def setup_client(model_name: str):
#     """初始化OpenAI客户端"""
#     try:
#         model_config = MODEL_CONFIGS[model_name]
#         return AsyncOpenAI(
#             base_url=model_config["base_url"],
#             api_key=model_config["api_key"]), model_config["model_name"]
#     except KeyError:
#         raise ValueError(f"未找到模型配置: {model_name}")

# async def setup_client_auth(model_name):
#     # 磐智认证参数
#     appid = "dsjyybai"
#     appKey = "bb9c5100caf0f84e22d88451e94ce2bb"
#     capabilityname = "llmm"
#     # 生成请求头
#     from qwen3_client import gen_proxy_header
#     headers = gen_proxy_header(appid, appKey, capabilityname)

#     # URL，办公网或呼池使用10.217.247.48，其它资源池请更换对应IP
#     base_url = "http://10.217.247.48:9050/llmm-prod/v1/chat/completions"
#     api_key = "cs-baac3b82" # "chatbi"
#     model = "qwen3-32b-hc"
#     messages = [{"role": "user", "content": "初始化消息"}]

#     client = Qwen3Client(headers, base_url, api_key, model, messages)
#     return client


# 非异步实现新增：同步客户端初始化
def setup_client_auth(model_name):
    # 磐智认证参数（与异步版本共用配置）
    app_info = {
        "appid": "dsjyybai",
        "appKey": "bb9c5100caf0f84e22d88451e94ce2bb",
        "capabilityname": "llmm"
    }
    # from qwen3_client import gen_proxy_header  # 假设qwen3_client支持同步调用
    headers = gen_proxy_header(**app_info)
    model_info = {
        "base_url": "http://10.217.247.48:9050/llmm-prod/v1/chat/completions",
        "api_key": "cs-baac3b82",
        "model": model_name
    }
    # messages = [{"role": "user", "content": "初始化消息"}]

    # 假设Qwen3Client支持同步模式（或使用OpenAI同步客户端）
    client = Qwen3Client(headers, **model_info)
    return client


# 非异步实现新增：同步聊天完成函数
def chat_completion_sync(client, model_name: str, prompt: str,
                         request_id: int):
    """同步执行单个聊天完成任务并返回结果"""
    messages = [{"role": "user", "content": prompt}]
    start_time = time.time()
    response_text = ""
    first_response_time = None

    try:
        print(f"\n请求 {request_id} 使用模型（同步）: {model_name}")
        print("-" * 50)
        # stream = client.chat.completions.create(  # 同步调用
        #     model=model_name,
        #     messages=messages,
        #     stream=True,
        # )
        # for chunk in stream:  # 同步遍历流
        #     if chunk.choices[0].delta.content is not None:
        #         delta_text = chunk.choices[0].delta.content

        #         if first_response_time is None:
        #             first_response_time = time.time() - start_time

        #         response_text += delta_text
        app_info = {
            "appid": "dsjyybai",
            "appKey": "bb9c5100caf0f84e22d88451e94ce2bb",
            "capabilityname": "llmm"
        }
        headers = gen_proxy_header(**app_info)
        base_url = "http://10.217.247.48:9050/llmm-prod/v1/chat/completions"
        api_key = "cs-baac3b82"
        stream = False
        headers["Authorization"] = "Bearer " + api_key
        result = []
        flag = True
        enable_thinking = False
        data = {
            "model": model_name,
            "messages": messages,
            "max_tokens": 1000,
            "temperature": 0.5,
            "stream": stream,
            "chat_template_kwargs": {
                "enable_thinking": enable_thinking
            }  # 注意该参数为推理软开关
        }

        st_time = time.time()
        response = requests.post(base_url,
                                 headers=headers,
                                 json=data,
                                 stream=stream,
                                 timeout=100)
        if stream:
            for chunk in response.iter_lines():
                if chunk and chunk[5:] != b" [DONE]":
                    # print(chunk)
                    chunk = json.loads(chunk[5:])
                    # print(chunk)
                    if "choices" in chunk and len(chunk["choices"]) > 0:
                        if flag:
                            first_response_time = time.time() - st_time
                            print("first response time: ", first_response_time)
                            flag = False
                        choice = chunk["choices"][0]
                        if choice["delta"]:
                            result.append(choice["delta"].get("content", ""))
                            # print(result[-1], end="", flush=True)
                            response_text += result[-1]
                    if "usage" in chunk:
                        print()
                        print(chunk["usage"])
                        total_tokens = chunk["usage"]["total_tokens"]
                        completion_tokens = chunk["usage"]["completion_tokens"]
                        print("cost time: ", time.time() - st_time)
        else:
            # result = json.loads(response.content)['choices'][0]['message']['content']
            response_text = json.loads(
                response.content)['choices'][0]['message']['content']
            first_response_time = time.time() - st_time
            total_tokens = json.loads(
                response.content)['usage']['total_tokens']
            completion_tokens = json.loads(
                response.content)['usage']['completion_tokens']

        print(f"[请求 {request_id}] 已完成（同步）", flush=True)
        print("\n" + "-" * 50)

        return {
            "request_id": request_id,
            "response_text": response_text,
            "first_response_time": first_response_time,
            "total_time": time.time() - start_time,
            "total_tokens": total_tokens,
            "completion_tokens": completion_tokens
        }

    except Exception as e:
        print(f"请求 {request_id} 发生错误（同步）: {str(e)}")
        return None


def print_batch_statistics(all_stats: List[Dict]):
    """打印批量请求的统计信息"""
    if not all_stats:
        print("没有有效的统计数据")
        return

    # 收集各项指标
    first_response_times = [stat['first_response_time'] for stat in all_stats]
    total_times = [stat['total_time'] for stat in all_stats]
    total_tokens = [stat['total_tokens'] for stat in all_stats]
    qps_list = [
        stat['completion_tokens'] / stat['total_time'] for stat in all_stats
    ]

    print("\n=== 批量请求性能统计 ===")
    print(f"总请求数: {len(all_stats)}")
    print("\n响应时间统计:")
    print(f"首次响应时间 - 平均: {statistics.mean(first_response_times):.2f}秒, "
          f"最小: {min(first_response_times):.2f}秒, "
          f"最大: {max(first_response_times):.2f}秒")
    print(f"总耗时 - 平均: {statistics.mean(total_times):.2f}秒, "
          f"最小: {min(total_times):.2f}秒, "
          f"最大: {max(total_times):.2f}秒")

    print("\nToken统计:")
    print(f"总Token数 - 平均: {statistics.mean(total_tokens):.2f}, "
          f"最小: {min(total_tokens)}, "
          f"最大: {max(total_tokens)}")
    print(f"生成速度 - 平均: {statistics.mean(qps_list):.2f} token/s, "
          f"最小: {min(qps_list):.2f} token/s, "
          f"最大: {max(qps_list):.2f} token/s")

    total_tokens_sum = sum(stat['total_tokens'] for stat in all_stats)
    total_time_max = max(total_times)
    print(f"\n整体吞吐量: {total_tokens_sum / total_time_max:.2f} token/s")


# 非异步实现新增：同步批量请求
def batch_completion_sync(model_name, num_requests: int = 3):
    """同步执行批量并发请求（使用线程池模拟并发）"""
    prompts = [
        "简单介绍中国的四大发明", "简单解释人工智能的基本概念", "简单描述太阳系的构成", "简单讲解光合作用的过程",
        "简单介绍丝绸之路的历史"
    ]

    try:
        # client = setup_client_auth_sync(model_name)  # 同步客户端初始化
        model = model_name  # 假设模型名称与异步版本一致
        # 使用线程池模拟并发
        with ThreadPoolExecutor(max_workers=num_requests) as executor:
            futures = []
            for i in range(num_requests):
                prompt = prompts[random.randint(0, len(prompts) - 1)]
                future = executor.submit(chat_completion_sync, None, model,
                                         prompt, i + 1)
                futures.append(future)

            all_stats = [future.result() for future in futures]
            all_stats = [stat for stat in all_stats if stat is not None]
            print_batch_statistics(all_stats)
            return all_stats

    except Exception as e:
        print(f"同步批量请求执行出错: {str(e)}")


def save_to_excel(model_name: str, all_results: List[Dict]):
    """将测试结果保存到Excel"""
    print("""将测试结果保存到Excel""")
    df_data = []
    # for result in all_results:
    #     df_data.append({
    #         '并发数':
    #         result['num_requests'],
    #         '成功数':
    #         result['success_requests'],
    #         "成功率":
    #         result['success_requests'] / result['num_requests'],
    #         '平均首次响应时间(秒)':
    #         result['mean_first_response'],
    #         '最小首次响应时间(秒)':
    #         result['min_first_response'],
    #         '最大首次响应时间(秒)':
    #         result['max_first_response'],
    #         '平均总耗时(秒)':
    #         result['mean_total_time'],
    #         '最小总耗时(秒)':
    #         result['min_total_time'],
    #         '最大总耗时(秒)':
    #         result['max_total_time'],
    #         '平均Token数':
    #         result['mean_tokens'],
    #         '平均生成速度(token/s)':
    #         result['mean_qps'],
    #         '整体吞吐量(token/s)':
    #         result['total_throughput']
    #     })

    df = pd.DataFrame(all_results)
    df.columns = [
        "并发数", "成功数", "平均首次响应时间(秒)", "最小首次响应时间(秒)", "最大首次响应时间(秒)", "平均总耗时(秒)",
        "最小总耗时(秒)", "最大总耗时(秒)", "平均Token数", "平均生成速度(token/s)", "整体吞吐量(token/s)"
    ]

    # 生成文件名，包含时间戳
    timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
    filename = f'benchmark_results_{model_name}_{timestamp}.xlsx'
    df.to_excel(filename, index=False)
    print(f"\n测试结果已保存到: {filename}")


def collect_statistics(model_name, num_requests,
                       all_stats: List[Dict]) -> Dict:
    """收集统计数据"""
    if not all_stats:
        return None

    first_response_times = [stat['first_response_time'] for stat in all_stats]
    total_times = [stat['total_time'] for stat in all_stats]
    total_tokens = [stat['total_tokens'] for stat in all_stats]
    qps_list = [
        stat['completion_tokens'] / stat['total_time'] for stat in all_stats
    ]

    total_tokens_sum = sum(stat['total_tokens'] for stat in all_stats)
    total_time_max = max(total_times)

    result = {
        'num_requests': num_requests,
        'success_requests': len(all_stats),
        'mean_first_response': statistics.mean(first_response_times),
        'min_first_response': min(first_response_times),
        'max_first_response': max(first_response_times),
        'mean_total_time': statistics.mean(total_times),
        'min_total_time': min(total_times),
        'max_total_time': max(total_times),
        'mean_tokens': statistics.mean(total_tokens),
        'mean_qps': statistics.mean(qps_list),
        'total_throughput': total_tokens_sum / total_time_max
    }
    result = {k: round(v, 2) for k, v in result.items()}
    # 创建DataFrame用于写入表格
    df = pd.DataFrame([{
        '并发数':
        result['num_requests'],
        '成功数':
        result['success_requests'],
        "成功率":
        result['success_requests'] / result['num_requests'],
        '平均首次响应时间(秒)':
        result['mean_first_response'],
        '最小首次响应时间(秒)':
        result['min_first_response'],
        '最大首次响应时间(秒)':
        result['max_first_response'],
        '平均总耗时(秒)':
        result['mean_total_time'],
        '最小总耗时(秒)':
        result['min_total_time'],
        '最大总耗时(秒)':
        result['max_total_time'],
        '平均Token数':
        result['mean_tokens'],
        '平均生成速度(token/s)':
        result['mean_qps'],
        '整体吞吐量(token/s)':
        result['total_throughput']
    }])
    # 生成文件名，包含时间戳
    timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
    filename = f'benchmark_results_{model_name}_{result["num_requests"]}_{timestamp}.xlsx'
    df.to_excel(filename, index=False)
    print(f"\n测试结果已保存到: {filename}")

    return result


# 非异步实现新增：同步基准测试入口
def run_benchmark_sync(model_name: str,
                       concurrent_numbers: List[int]) -> List[Dict]:
    """同步运行基准测试"""
    all_results = []

    for num_requests in concurrent_numbers:
        print(f"\n开始同步测试 {num_requests} 并发请求...")
        stats = batch_completion_sync(model_name, num_requests)
        if stats:
            result = collect_statistics(model_name, num_requests, stats)
            if result:
                all_results.append(result)

    return all_results


def main():
    # concurrent_numbers = [1,2,4,8,10,12,14,16,25,32,64]
    concurrent_numbers = [1, 2, 4, 8, 10, 12, 16]
    # model_name = list(MODEL_CONFIGS.keys())[2]
    # for model_name in list(MODEL_CONFIGS.keys()):
    model_name = "qwen3-30b-hc"
    print(f"开始性能测试，模型: {model_name}")
    print(f"测试并发数: {concurrent_numbers}")
    all_results = run_benchmark_sync(model_name, concurrent_numbers)
    save_to_excel(model_name, all_results)

    # 非异步测试入口（可取消注释运行）
    # print("\n\n=== 开始同步性能测试 ===")
    # all_results_sync = run_benchmark_sync(model_name, concurrent_numbers)
    # save_to_excel(f"{model_name}_sync", all_results_sync)


if __name__ == "__main__":
    main()
