#!/usr/bin/env python3
import os
import re
import json
import time
import base64
import argparse
import numpy as np
import requests
from tqdm import tqdm
from concurrent.futures import ThreadPoolExecutor, as_completed
import matplotlib.pyplot as plt
from tabulate import tabulate
import signal
import sys
import glob
import shutil
from datetime import datetime
import logging

# 设置全局字体配置
plt.rcParams['font.family'] = 'sans-serif'
plt.rcParams['font.sans-serif'] = ['SimHei', 'Microsoft YaHei', 'WenQuanYi Micro Hei', 'sans-serif']
plt.rcParams['axes.unicode_minus'] = False

# 日志配置
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)

# 常量配置
MAX_CONCURRENCY = 100
BENCHMARK_DIR = "vl_benchmark"
RESULTS_FILE = os.path.join(BENCHMARK_DIR, "results.json")

# API配置
API_URL = ""
MODEL_NAME = ""
API_HEADERS = {"Content-Type": "application/json"}

# 固定配置文件路径
SERVICE_CONF_PATH = "/usr/local/Ascend/mindie/latest/mindie-service/conf/config.json"

# 默认提示词
DEFAULT_PROMPT = "详细解释图片里面的内容，尝试解释局部特征和特点"

def signal_handler(sig, frame):
    """处理Ctrl+C中断信号"""
    logger.info("\n🛑 用户中断 (Ctrl+C), 清理中...")
    sys.exit(0)

def parse_service_config():
    """解析服务配置文件获取API参数"""
    global API_URL, MODEL_NAME
    
    try:
        with open(SERVICE_CONF_PATH, 'r') as f:
            config = json.load(f)
        
        ip = config['ServerConfig']['ipAddress']
        port = config['ServerConfig']['port']
        API_URL = f"http://{ip}:{port}/v1/chat/completions"
        
        model_config = config['BackendConfig']['ModelDeployConfig']['ModelConfig'][0]
        MODEL_NAME = model_config['modelName']
        
        logger.info(f"✅ 服务配置解析成功: API_URL={API_URL}, MODEL_NAME={MODEL_NAME}")
        return True
    except Exception as e:
        logger.error(f"❌ 解析服务配置失败: {str(e)}")
        logger.error(f"请确认配置文件存在: {SERVICE_CONF_PATH}")
        return False

def prepare_benchmark_env():
    """准备基准测试环境"""
    os.makedirs(BENCHMARK_DIR, exist_ok=True)
    logger.info(f"✅ 基准测试目录已创建: {os.path.abspath(BENCHMARK_DIR)}")
    return True

def encode_image(image_path):
    """将图像编码为base64"""
    try:
        with open(image_path, "rb") as f:
            return base64.b64encode(f.read()).decode('utf-8')
    except Exception as e:
        logger.error(f"❌ 图像编码失败: {image_path}, 错误: {str(e)}")
        return None

def load_dataset(dataset_dir, max_samples=None):
    """加载数据集 - 默认加载所有图片"""
    image_extensions = ['jpg', 'jpeg', 'png', 'bmp', 'gif']
    image_files = []
    
    for ext in image_extensions:
        pattern = os.path.join(dataset_dir, '**', f'*.{ext}')
        image_files.extend(glob.glob(pattern, recursive=True))
    
    if not image_files:
        logger.error(f"❌ 在 {dataset_dir} 中未找到图像文件")
        return []
    
    if max_samples and len(image_files) > max_samples:
        image_files = image_files[:max_samples]
    
    logger.info(f"📊 找到 {len(image_files)} 张图片")
    return image_files

def create_payload(image_b64, prompt):
    """创建API请求负载"""
    return {
        "model": MODEL_NAME,
        "messages": [
            {
                "role": "user",
                "content": [
                    {"type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{image_b64}"}},
                    {"type": "text", "text": prompt}
                ]
            }
        ],
        "stream": True
    }

def send_request(payload, timeout=60):
    """发送API请求并收集性能指标 - 修复时间测量"""
    start_time = time.perf_counter()  # 记录实际请求开始时间
    first_token_time = None
    token_times = []
    generated_text = ""
    token_count = 0
    
    try:
        response = requests.post(
            API_URL,
            headers=API_HEADERS,
            json=payload,
            stream=True,
            timeout=timeout
        )
        response.raise_for_status()
        
        # 处理流式响应
        for chunk in response.iter_lines():
            if chunk:
                chunk_time = time.perf_counter()
                try:
                    chunk_str = chunk.decode('utf-8')
                except UnicodeDecodeError:
                    try:
                        chunk_str = chunk.decode('gbk', errors='replace')
                    except:
                        chunk_str = chunk.decode('utf-8', errors='replace')
                
                if chunk_str.startswith('data:'):
                    chunk_data = chunk_str[5:].strip()
                    if chunk_data == '[DONE]':
                        break
                    
                    try:
                        data = json.loads(chunk_data)
                        if 'choices' in data and data['choices']:
                            choice = data['choices'][0]
                            if 'delta' in choice and 'content' in choice['delta']:
                                content = choice['delta']['content']
                                generated_text += content
                                token_count += 1
                                
                                if first_token_time is None:
                                    first_token_time = chunk_time
                                
                                token_times.append(chunk_time)
                    except json.JSONDecodeError:
                        pass
        
        end_time = time.perf_counter()
        
        # 计算性能指标
        ttft = (first_token_time - start_time) * 1000 if first_token_time else 0
        total_time = (end_time - start_time) * 1000
        
        if token_count > 1 and first_token_time:
            tpot = (end_time - first_token_time) * 1000 / (token_count - 1)
        else:
            tpot = 0
        
        return {
            "success": True,
            "start_time": start_time,  # 关键修复：记录实际开始时间
            "ttft": ttft,
            "tpot": tpot,
            "token_count": token_count,
            "total_time": total_time,
            "generated_text": generated_text
        }
    except Exception as e:
        logger.error(f"❌ 请求失败: {str(e)}")
        return {
            "success": False,
            "error": str(e)
        }

def run_concurrency_test(image_files, prompt, concurrency, output_dir):
    """运行并发测试 - 修复时间测量问题"""
    logger.info(f"🚀 开始并发测试: 并发数={concurrency}")
    
    # 准备测试数据
    test_data = []
    for image_path in image_files:
        image_b64 = encode_image(image_path)
        if image_b64:
            test_data.append({
                "image_path": image_path,
                "payload": create_payload(image_b64, prompt)
            })
    
    if not test_data:
        logger.error("❌ 无有效测试数据")
        return None
    
    # 运行并发测试
    start_time = time.time()
    results = []
    with ThreadPoolExecutor(max_workers=concurrency) as executor:
        # 使用 futures 字典记录实际任务启动时间
        futures = {}
        for item in test_data:
            # 记录任务提交时间
            submit_time = time.perf_counter()
            future = executor.submit(
                lambda x: (submit_time, send_request(x)), 
                item["payload"]
            )
            futures[future] = submit_time
        
        for future in tqdm(as_completed(futures), total=len(futures), desc=f"并发 {concurrency}"):
            submit_time = futures[future]
            result = future.result()[1]  # 获取实际结果
            # 修正：使用实际任务开始时间（提交时间）而不是整个测试的开始时间
            if "submit_time" not in result:
                result["submit_time"] = submit_time
            results.append(result)
    
    total_wall_time = time.time() - start_time
    
    # 收集性能指标 - 使用修正后的时间测量
    ttfts = []
    tpots = []
    token_counts = []
    successful_requests = 0
    
    for result in results:
        if result["success"] and "submit_time" in result and "start_time" in result:
            # 修正TTFT计算：使用任务提交时间作为起始点
            actual_ttft = result["ttft"] + (result["submit_time"] - result["start_time"]) * 1000
            ttfts.append(actual_ttft)
            tpots.append(result["tpot"])
            token_counts.append(result["token_count"])
            successful_requests += 1
        elif result["success"]:
            # 后备方案
            ttfts.append(result["ttft"])
            tpots.append(result["tpot"])
            token_counts.append(result["token_count"])
            successful_requests += 1
    
    # 计算统计指标
    metrics = {
        "concurrency": concurrency,
        "total_requests": len(results),
        "successful_requests": successful_requests,
        "failed_requests": len(results) - successful_requests,
        "wall_time": total_wall_time,
        "ttft_avg": np.mean(ttfts) if ttfts else 0,
        "ttft_min": min(ttfts) if ttfts else 0,
        "ttft_max": max(ttfts) if ttfts else 0,
        "ttft_p75": np.percentile(ttfts, 75) if ttfts else 0,
        "ttft_p90": np.percentile(ttfts, 90) if ttfts else 0,
        "ttft_p99": np.percentile(ttfts, 99) if ttfts else 0,
        "tpot_avg": np.mean(tpots) if tpots else 0,
        "tpot_min": min(tpots) if tpots else 0,
        "tpot_max": max(tpots) if tpots else 0,
        "tpot_p75": np.percentile(tpots, 75) if tpots else 0,
        "tpot_p90": np.percentile(tpots, 90) if tpots else 0,
        "tpot_p99": np.percentile(tpots, 99) if tpots else 0,
        "total_tokens": sum(token_counts),
        "throughput": successful_requests / total_wall_time if total_wall_time > 0 else 0,
        "generate_speed": sum(token_counts) / total_wall_time if total_wall_time > 0 else 0,
        "results": results
    }
    
    # 保存详细结果
    case_dir = os.path.join(output_dir, f"concurrency_{concurrency}")
    os.makedirs(case_dir, exist_ok=True)
    result_file = os.path.join(case_dir, "results.json")
    
    with open(result_file, 'w', encoding='utf-8') as f:
        json.dump(metrics, f, indent=2, ensure_ascii=False)
    
    logger.info(f"📝 结果已保存至: {result_file}")
    return metrics

def run_free_test(image_files, prompt, concurrencies, output_dir):
    """自由模式测试"""
    logger.info(f"🔧 自由测试模式: 并发数={concurrencies}")
    
    results = []
    for concurrency in concurrencies:
        metrics = run_concurrency_test(image_files, prompt, concurrency, output_dir)
        if metrics:
            results.append(metrics)
    
    return results

def plot_metrics(results, output_dir):
    """绘制性能指标图表"""
    if not results:
        return None
    
    concurrency = [r["concurrency"] for r in results]
    metrics = {
        "TTFT_avg": [r["ttft_avg"] for r in results],
        "TPOT_avg": [r["tpot_avg"] for r in results],
        "Throughput": [r["throughput"] for r in results],
        "GenerateSpeed": [r["generate_speed"] for r in results]
    }
    
    # 创建图表
    fig, axs = plt.subplots(2, 2, figsize=(15, 10))
    fig.suptitle(f'视觉语言模型性能指标', fontsize=16)
    
    # TTFT
    axs[0, 0].plot(concurrency, metrics["TTFT_avg"], 'o-')
    axs[0, 0].set_xlabel('并发数')
    axs[0, 0].set_ylabel('TTFT (ms)')
    axs[0, 0].set_title('首Token时间 (平均)')
    axs[0, 0].grid(True)
    
    # TPOT
    axs[0, 1].plot(concurrency, metrics["TPOT_avg"], 'o-')
    axs[0, 1].set_xlabel('并发数')
    axs[0, 1].set_ylabel('TPOT (ms)')
    axs[0, 1].set_title('每个Token时间 (平均)')
    axs[0, 1].grid(True)
    
    # 吞吐量
    axs[1, 0].plot(concurrency, metrics["Throughput"], 'o-')
    axs[1, 0].set_xlabel('并发数')
    axs[1, 0].set_ylabel('请求数/秒')
    axs[1, 0].set_title('吞吐量')
    axs[1, 0].grid(True)
    
    # 生成速度
    axs[1, 1].plot(concurrency, metrics["GenerateSpeed"], 'o-')
    axs[1, 1].set_xlabel('并发数')
    axs[1, 1].set_ylabel('Token/秒')
    axs[1, 1].set_title('生成速度')
    axs[1, 1].grid(True)
    
    plt.tight_layout(rect=[0, 0, 1, 0.96])
    plot_path = os.path.join(output_dir, "performance_metrics.png")
    plt.savefig(plot_path)
    plt.close()
    
    logger.info(f"📊 性能图表已保存至: {plot_path}")
    return plot_path

def generate_report(results, output_dir):
    """生成测试报告"""
    if not results:
        return
    
    # 创建汇总表格
    summary = []
    for metrics in results:
        summary.append({
            "并发数": metrics["concurrency"],
            "请求总数": metrics["total_requests"],
            "成功请求": metrics["successful_requests"],
            "失败请求": metrics["failed_requests"],
            "总Token数": metrics["total_tokens"],
            "TTFT平均(ms)": f"{metrics['ttft_avg']:.2f}",
            "TPOT平均(ms)": f"{metrics['tpot_avg']:.2f}",
            "吞吐量(req/s)": f"{metrics['throughput']:.2f}",
            "生成速度(token/s)": f"{metrics['generate_speed']:.2f}"
        })
    
    # 生成Markdown报告
    report_path = os.path.join(output_dir, "report.md")
    with open(report_path, 'w', encoding='utf-8') as f:
        f.write("# 视觉语言模型基准测试报告\n\n")
        f.write(f"**测试时间**: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n")
        f.write(f"**API地址**: {API_URL}\n")
        f.write(f"**模型名称**: {MODEL_NAME}\n")
        f.write(f"**测试模式**: 自由模式\n")
        
        f.write("## 性能摘要\n\n")
        f.write(tabulate(summary, headers="keys", tablefmt="github"))
        f.write("\n\n")
        
        f.write("## 详细指标\n")
        for metrics in results:
            f.write(f"\n### 并发数: {metrics['concurrency']}\n")
            f.write(f"- **请求总数**: {metrics['total_requests']}\n")
            f.write(f"- **成功请求**: {metrics['successful_requests']}\n")
            f.write(f"- **失败请求**: {metrics['failed_requests']}\n")
            f.write(f"- **总Token数**: {metrics['total_tokens']}\n")
            f.write(f"- **TTFT平均**: {metrics['ttft_avg']:.2f} ms\n")
            f.write(f"- **TTFT P90**: {metrics['ttft_p90']:.2f} ms\n")
            f.write(f"- **TTFT P99**: {metrics['ttft_p99']:.2f} ms\n")
            f.write(f"- **TPOT平均**: {metrics['tpot_avg']:.2f} ms\n")
            f.write(f"- **TPOT P90**: {metrics['tpot_p90']:.2f} ms\n")
            f.write(f"- **TPOT P99**: {metrics['tpot_p99']:.2f} ms\n")
            f.write(f"- **吞吐量**: {metrics['throughput']:.2f} req/s\n")
            f.write(f"- **生成速度**: {metrics['generate_speed']:.2f} token/s\n")
            
            # 添加示例响应
            if "results" in metrics and metrics["results"]:
                f.write("\n**示例响应**:\n```\n")
                sample_result = next((r for r in metrics["results"] if r.get("success") and r.get("generated_text")), None)
                if sample_result:
                    text = sample_result["generated_text"]
                    f.write(text[:500] + ("..." if len(text) > 500 else ""))
                f.write("\n```\n")
    
    logger.info(f"📝 测试报告已保存至: {report_path}")
    
    # 保存JSON结果
    with open(RESULTS_FILE, 'w', encoding='utf-8') as f:
        json.dump({
            "results": results
        }, f, indent=2, ensure_ascii=False)
    
    return report_path

def main():
    # 注册信号处理
    signal.signal(signal.SIGINT, signal_handler)
    
    # 解析命令行参数
    parser = argparse.ArgumentParser(description="视觉语言模型基准测试工具")
    parser.add_argument("--dataset_dir", type=str, required=True, help="数据集目录")
    parser.add_argument("--prompt", type=str, default=DEFAULT_PROMPT, 
                       help=f"图片描述提示语 (默认: '{DEFAULT_PROMPT}')")
    parser.add_argument("--max_samples", type=int, default=None, 
                       help="最大测试样本数 (默认: 全部图片)")
    parser.add_argument("--output_dir", type=str, default=BENCHMARK_DIR, 
                       help="输出目录 (默认: 'vl_benchmark')")
    parser.add_argument("--concurrencies", type=str, default="1,5,10,20,50,100",
                       help="并发数列表(逗号分隔) (默认: '1,5,10,20,50,100')")
    args = parser.parse_args()
    
    # 步骤1: 解析服务配置
    if not parse_service_config():
        sys.exit(1)
    
    # 步骤2: 准备测试环境
    if not prepare_benchmark_env():
        sys.exit(1)
    
    # 步骤3: 加载数据集
    image_files = load_dataset(args.dataset_dir, args.max_samples)
    if not image_files:
        sys.exit(1)
    
    # 创建输出目录
    timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
    output_dir = os.path.join(args.output_dir, f"test_{timestamp}")
    os.makedirs(output_dir, exist_ok=True)
    logger.info(f"📁 测试结果将保存至: {os.path.abspath(output_dir)}")
    
    # 步骤4: 设置并发数
    concurrencies = [int(c) for c in args.concurrencies.split(",")]
    
    # 步骤5: 运行测试
    logger.info(f"🚀 启动测试: 使用 {len(image_files)} 张图片, 并发数={concurrencies}")
    results = run_free_test(
        image_files, 
        args.prompt, 
        concurrencies,
        output_dir
    )
    
    # 步骤6: 生成报告
    plot_metrics(results, output_dir)
    generate_report(results, output_dir)
    
    logger.info("✅ 测试完成!")

if __name__ == "__main__":
    main()