#!/usr/bin/env python3
"""
性能基准测试脚本
测试不同配置下的性能
"""

import os
import json
import argparse
import subprocess
from pathlib import Path
from typing import List, Dict
import time


class BenchmarkConfig:
    """基准测试配置"""
    def __init__(self, 
                 batch_size: int,
                 seq_length: int,
                 num_gpus: int = 1,
                 model_type: str = "pytorch"):
        self.batch_size = batch_size
        self.seq_length = seq_length
        self.num_gpus = num_gpus
        self.model_type = model_type
    
    def __str__(self):
        return f"bs{self.batch_size}_seq{self.seq_length}_gpu{self.num_gpus}_{self.model_type}"


def create_benchmark_config(base_config_path: str, 
                           benchmark_config: BenchmarkConfig,
                           output_path: str):
    """创建基准测试配置文件"""
    with open(base_config_path, 'r') as f:
        config = json.load(f)
    
    # 修改配置
    config['data']['block_size'] = benchmark_config.seq_length
    config['training']['per_device_train_batch_size'] = benchmark_config.batch_size
    config['training']['output_dir'] = f"../results/benchmark/{benchmark_config}"
    config['performance']['metrics_file'] = f"../results/benchmark/{benchmark_config}/metrics.json"
    
    # 保存配置
    os.makedirs(os.path.dirname(output_path), exist_ok=True)
    with open(output_path, 'w') as f:
        json.dump(config, f, indent=2)
    
    return output_path


def run_benchmark(config_path: str, 
                 num_gpus: int,
                 model_type: str,
                 max_steps: int = 100):
    """运行单个基准测试"""
    script_name = f"train_{model_type}.py"
    
    env = os.environ.copy()
    env['NUM_GPUS'] = str(num_gpus)
    
    if num_gpus > 1:
        cmd = [
            "torchrun",
            f"--nproc_per_node={num_gpus}",
            script_name,
            "--config", config_path,
        ]
    else:
        cmd = [
            "python",
            script_name,
            "--config", config_path,
        ]
    
    print(f"\nRunning: {' '.join(cmd)}")
    
    try:
        result = subprocess.run(
            cmd,
            env=env,
            check=True,
            capture_output=True,
            text=True
        )
        print(result.stdout)
        return True
    except subprocess.CalledProcessError as e:
        print(f"Error running benchmark: {e}")
        print(e.stdout)
        print(e.stderr)
        return False


def main():
    parser = argparse.ArgumentParser(description="Run performance benchmarks")
    parser.add_argument("--batch-sizes", type=int, nargs="+", 
                       default=[1, 2, 4, 8],
                       help="Batch sizes to test")
    parser.add_argument("--seq-lengths", type=int, nargs="+",
                       default=[128, 256, 512, 1024],
                       help="Sequence lengths to test")
    parser.add_argument("--num-gpus", type=int, nargs="+",
                       default=[1],
                       help="Number of GPUs to test")
    parser.add_argument("--model-types", type=str, nargs="+",
                       default=["pytorch", "triton"],
                       help="Model types to test")
    parser.add_argument("--max-steps", type=int, default=100,
                       help="Maximum training steps per benchmark")
    parser.add_argument("--output-dir", type=str,
                       default="../results/benchmark",
                       help="Output directory for benchmark results")
    
    args = parser.parse_args()
    
    # 创建基准测试配置
    benchmarks = []
    for model_type in args.model_types:
        for num_gpus in args.num_gpus:
            for batch_size in args.batch_sizes:
                for seq_length in args.seq_lengths:
                    benchmarks.append(BenchmarkConfig(
                        batch_size=batch_size,
                        seq_length=seq_length,
                        num_gpus=num_gpus,
                        model_type=model_type
                    ))
    
    print(f"\nTotal benchmarks to run: {len(benchmarks)}")
    print("="*80)
    
    results = []
    for i, benchmark in enumerate(benchmarks, 1):
        print(f"\n[{i}/{len(benchmarks)}] Running benchmark: {benchmark}")
        print("-"*80)
        
        # 创建配置文件
        base_config = f"config_{benchmark.model_type}.json"
        config_path = f"../results/benchmark/configs/config_{benchmark}.json"
        create_benchmark_config(base_config, benchmark, config_path)
        
        # 运行基准测试
        start_time = time.time()
        success = run_benchmark(config_path, benchmark.num_gpus, 
                              benchmark.model_type, args.max_steps)
        duration = time.time() - start_time
        
        results.append({
            "config": str(benchmark),
            "success": success,
            "duration": duration
        })
        
        print(f"Duration: {duration:.2f}s")
    
    # 保存结果摘要
    summary_path = Path(args.output_dir) / "benchmark_summary.json"
    with open(summary_path, 'w') as f:
        json.dump(results, f, indent=2)
    
    print("\n" + "="*80)
    print("Benchmark completed!")
    print(f"Summary saved to {summary_path}")
    print("="*80)
    
    # 打印成功率
    success_count = sum(1 for r in results if r["success"])
    print(f"\nSuccess rate: {success_count}/{len(results)} ({100*success_count/len(results):.1f}%)")


if __name__ == "__main__":
    main()

