import base64
import json
import requests
import time
from dataclasses import dataclass
from typing import List, Dict, Optional
import statistics
from concurrent.futures import ThreadPoolExecutor, as_completed
from urllib3.exceptions import InsecureRequestWarning
import numpy as np
from decimal import Decimal, ROUND_HALF_UP

# 忽略 SSL 警告（适用于自签名证书）
requests.packages.urllib3.disable_warnings(category=InsecureRequestWarning)

API_URL = "https://c-1933154884820365314.dzai.scnet.cn:58043/v1/chat/completions"
API_KEY = "EMPTY"  # 如果没有启用 API Key，设为 EMPTY 或任意值
CONCURRENCY_LEVELS = [1, 5, 10]  # 并发级别
NUM_REQUESTS_PER_LEVEL = 3  # 每个并发级别的请求次数
stream=True

@dataclass
class StressTestMetrics:
    """压力测试指标收集器
    
    该类用于收集和存储压力测试过程中的各项性能指标，包括请求统计、延迟指标和吞吐量数据。
    
    属性：
        total_requests (int): 总请求数
        success_count (int): 成功请求数
        failure_count (int): 失败请求数
        latency_list (List[float]): 请求延迟列表，记录每个请求的总延迟时间
        ttft_list (List[float]): 首Token延迟列表，记录每个请求的首Token响应时间
        tpot_list (List[float]): Token生成时间列表，记录每个请求的平均Token生成时间
        total_tokens (int): 累计生成的Token总数
        throughput_list (List[float]): 吞吐量跟踪列表，记录测试过程中的实时吞吐量
    """
    total_requests: int = 0
    success_count: int = 0
    failure_count: int = 0
    latency_list: List[float] = None          # 总延迟（请求开始到结束）
    ttft_list: List[float] = None             # 首Token延迟
    tpot_list: List[float] = None             # 每Token生成时间
    total_tokens: int = 0                     # 总生成Token数
    throughput_list: List[float] = None       # 吞吐量跟踪

def safe_mean(data: List[float]) -> Optional[float]:
    """安全计算平均值
    
    对输入的数据列表进行平均值计算，处理空数据集的情况。
    
    参数：
        data (List[float]): 需要计算平均值的数据列表
    
    返回：
        Optional[float]: 如果输入列表非空，返回平均值；否则返回None
    """
    return statistics.mean(data) if data else None


# 将图片转为 base64 编码
def image_to_base64(file_path):
    with open(file_path, "rb") as image_file:
        return base64.b64encode(image_file.read()).decode("utf-8")


# 构造一个带图片的消息内容
def build_img_message(prompt, IMAGE_PATH):
    base64_image = image_to_base64(IMAGE_PATH)
    return {
        "model": "Qwen2___5-VL-32B-Instruct",
        "messages": [
            {
                "role": "user",
                "content": [
                    {"type": "text", "text": prompt},
                    {
                        "type": "image_url",
                        "image_url": {
                            "url": f"data:image/jpeg;base64,{base64_image}"
                        },
                    },
                ],
            }
        ],
        "max_tokens": 2048,
        "presence_penalty": 1.03,
        "frequency_penalty": 1.0,
        "seed": None,
        "temperature": 0.5,
        "top_p": 0.95,
        "stream": stream  # 流式/非流式模式切换
    }

def build_message(prompt):
    return {
        "model": "Qwen2___5-VL-32B-Instruct",
        "messages": [
            {
                "role": "user",
                "content": [
                    {"type": "text", "text": prompt},
                ],
            }
        ],
        "max_tokens": 2048,
        "presence_penalty": 1.03,
        "frequency_penalty": 1.0,
        "seed": None,
        "temperature": 0.5,
        "top_p": 0.95,
        "stream": stream  # 流式/非流式模式切换
    }

# 单次请求函数
def send_request(payload) -> Dict:
    """执行单次请求
    
    处理单个API请求，收集性能指标，支持流式和非流式响应模式。
        
    返回：
        Dict: 包含请求执行结果和性能指标的字典
        {
            'ttft': float,        # 首Token延迟
            'latency': float,     # 总延迟
            'tpot': float,        # 平均Token生成时间
            'total_tokens': int,  # 生成的Token数量
            'success': bool,      # 请求是否成功
            'error': str          # 错误信息（如果有）
        }
    """
    metrics = {
        'ttft': None,
        'latency': None,
        'tpot': None,
        'total_tokens': 0,
        'success': False,
        'error': None
    }
    
    try:
        start_time = time.perf_counter()
        response = requests.post(
            API_URL,
            headers={
                "Content-Type": "application/json",
                "Authorization": f"Bearer {API_KEY}",
                "Accept": "application/json", 
            },
            json=payload,
            stream=stream,
            timeout=3000
        )
        response.raise_for_status()

        # 流式响应处理
        if stream:
            full_content = ""
            first_token_time = None
            last_token_time = None
            token_counter = 0
            
            for line in response.iter_lines():
                if line:
                    decoded_line = line.decode('utf-8')
                    if decoded_line.startswith('data: '):
                        event_data = decoded_line[6:]
                        
                        if event_data == "[DONE]":
                            break
                            
                        try:
                            json_data = json.loads(event_data)
                            if json_data.get('choices'):
                                current_time = time.perf_counter()
                                
                                # 记录首Token时间
                                if not first_token_time:
                                    first_token_time = current_time
                                    metrics['ttft'] = first_token_time - start_time
                                
                                # 更新末Token时间
                                last_token_time = current_time
                                
                                # 统计Token数量
                                delta = json_data['choices'][0].get('delta', {})
                                content = delta.get('content', '')
                                full_content += content
                                token_counter += 1
                        except json.JSONDecodeError:
                            pass
                            
            metrics['latency'] = time.perf_counter() - start_time
            metrics['total_tokens'] = token_counter
            
            # 计算TPOT（每Token生成时间）
            if first_token_time and last_token_time and token_counter > 0:
                generation_duration = last_token_time - first_token_time
                metrics['tpot'] = generation_duration / token_counter
            metrics['success'] = True
        
        # 非流式处理
        else:
            json_data = response.json()
            metrics['latency'] = time.perf_counter() - start_time
            metrics['total_tokens'] = json_data['usage']['completion_tokens']
            metrics['success'] = True

    except Exception as e:
        metrics['error'] = str(e)
    
    return metrics

def run_benchmark(concurrency, total_requests, payload):
    print(f"\n🚀 启动压力测试 | 并发数: {concurrency} | 总请求量: {total_requests}")
    test_metrics = StressTestMetrics(
        latency_list=[],
        ttft_list=[],
        tpot_list=[],
        throughput_list=[],
        total_tokens=0
    )
    start_time = time.perf_counter()
    last_report_time = start_time
    
    with ThreadPoolExecutor(max_workers=concurrency) as executor:
        futures = [executor.submit(send_request, payload) for _ in range(total_requests)]
        
        for future in as_completed(futures):
            test_metrics.total_requests += 1
            result = future.result()
            
            if result['success']:
                test_metrics.success_count += 1
                test_metrics.latency_list.append(result['latency'])
                test_metrics.total_tokens += result['total_tokens']
                
                if result['ttft'] is not None:
                    test_metrics.ttft_list.append(result['ttft'])
                    
                if result['tpot'] is not None:
                    test_metrics.tpot_list.append(result['tpot'])
            else:
                test_metrics.failure_count += 1
            
            # 实时监控输出（增加空值校验）
            if test_metrics.total_requests % 10 == 0:
                current_time = time.perf_counter()
                time_elapsed = current_time - last_report_time
                current_tps = 10 / time_elapsed
                test_metrics.throughput_list.append(current_tps)
                
                # 安全计算TTFT平均值
                ttft_avg = safe_mean(test_metrics.ttft_list)
                ttft_display = f"{ttft_avg*1000:.1f}ms" if ttft_avg else "N/A"
                
                print(f"⏳ 已完成: {test_metrics.total_requests}/{total_requests} | "
                      f"成功率: {test_metrics.success_count/test_metrics.total_requests:.1%} | "
                      f"实时TPS: {current_tps:.1f} | "
                      f"平均TTFT: {ttft_display}")
                last_report_time = current_time
    
    # ========== 生成压力测试报告 ==========
    total_duration = time.perf_counter() - start_time
    print("\n🔍 压力测试报告")
    print(f"总请求数: {test_metrics.total_requests}")
    print(f"成功请求: {test_metrics.success_count} ({test_metrics.success_count/test_metrics.total_requests:.1%})")
    print(f"失败请求: {test_metrics.failure_count}")
    
    if test_metrics.success_count > 0:
        print("\n⏱️ 延迟统计:")
        print(f"平均延迟: {safe_mean(test_metrics.latency_list) or 0:.2f}s")
        print(f"P95延迟: {np.percentile(test_metrics.latency_list, 95) if test_metrics.latency_list else 0:.2f}s")
        print(f"最大延迟: {max(test_metrics.latency_list) if test_metrics.latency_list else 0:.2f}s")
        
        # TTFT统计（增加空值校验）
        if test_metrics.ttft_list:
            print(f"\n⏳ 首Token延迟(TTFT):")
            print(f"平均值: {safe_mean(test_metrics.ttft_list)*1000:.2f}ms")
            print(f"P95值: {np.percentile(test_metrics.ttft_list, 95)*1000:.2f}ms")
        else:
            print("\n⚠️ 无有效TTFT数据")
        
        # TPOT统计（增加空值校验）
        if test_metrics.tpot_list:
            print(f"\n⚡ Token生成速率(TPOT):")
            print(f"平均值: {safe_mean(test_metrics.tpot_list)*1000:.2f}ms/token")
            print(f"P95值: {np.percentile(test_metrics.tpot_list, 95)*1000:.2f}ms/token")
        else:
            print("\n⚠️ 无有效TPOT数据")
        
        # 吞吐量统计
        latency_max = Decimal(max(test_metrics.latency_list))
        print(f"\n🚦 系统吞吐量:")
        print(f"请求吞吐量(RPS): {test_metrics.success_count/total_duration:.1f} req/s")
        print(f"Token吞吐量: {test_metrics.total_tokens/latency_max:.1f} tokens/s")

def single_request(payload):
    try:
        # ================== 新增性能指标容器 ==================
        metrics = {
            'ttft': None,      # 首Token延迟（Time to First Token）
            'tpot_avg': None,  # 平均Token生成时间（Time Per Output Token）
            'latency': None,   # 总推理延迟（Input->Last Token）
            'tps': None,       # 吞吐量（Tokens Per Second）
            'total_tokens': 0  # 总生成Token数
        }
        # ===================================================
        
        start_time = time.perf_counter()  # 使用高精度计时器
        response = requests.post(
            API_URL,
            headers={
                "Content-Type": "application/json",
                "Authorization": f"Bearer {API_KEY}",
                "Accept": "application/json", 
            },
            json=payload,
            stream=stream,
            timeout=3000
        )
        response.raise_for_status()

        full_content = ""
        first_token_received = False
        
        if stream:
            for line in response.iter_lines():
                if line:
                    decoded_line = line.decode('utf-8')
                    if decoded_line.startswith('data: '):
                        event_data = decoded_line[6:]
                        
                        if event_data == "[DONE]":
                            break
                            
                        try:
                            json_data = json.loads(event_data)
                            print(json_data)
                            if json_data.get('choices'):
                                delta = json_data['choices'][0].get('delta', {})
                                content = delta.get('content', '')
                                full_content += content
                                
                                # ========== 核心指标计算逻辑 ==========
                                metrics['total_tokens'] += 1
                                
                                # 记录首Token到达时间
                                if not first_token_received:
                                    metrics['ttft'] = time.perf_counter() - start_time
                                    first_token_received = True
                                    last_token_time = time.perf_counter()  # 记录首Token时间戳
                                
                                # 实时流式输出
                                # print(content, end='', flush=True)
                                
                                # 更新最后Token时间戳（用于TPOT计算）
                                current_time = time.perf_counter()
                                last_token_time = current_time
                        except json.JSONDecodeError:
                            print(f"\nJSON解析错误: {event_data}")
            
            # ========== 流式模式指标计算 ==========
            if metrics['total_tokens'] > 0:
                metrics['latency'] = time.perf_counter() - start_time
                
                # 计算TPOT（排除首Token时间）
                if metrics['total_tokens'] > 1:
                    tpot_total = metrics['latency'] - metrics['ttft']
                    metrics['tpot_avg'] = tpot_total / (metrics['total_tokens'] - 1)
                
                # 计算TPS（总生成Token数 / 总延迟）
                metrics['tps'] = metrics['total_tokens'] / metrics['latency']
        else:
            # ========== 非流式模式指标计算 ==========
            json_data = response.json()
            full_content = json_data['choices'][0]['message']['content']
            metrics['total_tokens'] = json_data['usage']['completion_tokens']

            # 非流式模式无法精确计算TTFT/TPOT，使用总延迟近似
            metrics['latency'] = time.perf_counter() - start_time
            metrics['tps'] = metrics['total_tokens'] / metrics['latency'] if metrics['latency'] > 0 else 0
            print(full_content)

        # ========== 性能报告输出 ==========
        print("\n\n[性能指标报告]\n")
        print(f"总生成Token数: {metrics['total_tokens']}")
        if metrics['ttft'] is not None:
            print(f"首Token延迟(TTFT): {metrics['ttft']*1000:.2f}ms")
        if metrics['tpot_avg'] is not None:
            print(f"平均Token生成时间(TPOT): {metrics['tpot_avg']*1000:.2f}ms")
        print(f"总推理延迟: {metrics['latency']:.2f}s")
        print(f"吞吐量(TPS): {metrics['tps']:.2f} tokens/s\n")
        
        return full_content

    except requests.exceptions.RequestException as e:
        print(f"请求失败: {str(e)}")
        return None


def single_chat_test(text):
    payload = build_message(text)
    single_request(payload)

def stress_chat_test(text):
    payload = build_message(text)
    run_benchmark(
        payload=payload,
        concurrency=5,
        total_requests=15
    )

def single_img_test(text, img_path):
    payload = build_img_message(text, img_path)
    single_request(payload)

def stress_img_test(text, img_path):
    payload = build_img_message(text, img_path)
    single_request(payload)
    run_benchmark(
        payload=payload,
        concurrency=10,
        total_requests=30
    )

# 主程序入口
if __name__ == "__main__":
    stream = True
    single_chat_test("我下周想去厦门玩，请帮我整理一个三天的旅游攻略。")

    # stress_chat_test("我下周想去厦门玩，请帮我整理一个三天的旅游攻略。")