import torch
import torch.distributed as dist
import torch.multiprocessing as mp
import os
import time

def setup(rank, world_size):
    os.environ["MASTER_ADDR"] = "localhost"
    os.environ["MASTER_PORT"] = "12355"
    dist.init_process_group("nccl", rank=rank, world_size=world_size)
    torch.cuda.set_device(rank)

def cleanup():
    dist.destroy_process_group()

def benchmark_gemm(rank, world_size, M=8192, N=8192, K=8192, total_time_sec=30.0, min_iters=20, warmup_time=2.0):
    """
    延长运行时间以获取更稳定的 TFLOPS 测量
    参数:
        total_time_sec: 每张卡持续运行的时间（秒）
        min_iters: 最少执行的迭代次数（避免过早退出）
        warmup_time: 预热时间（秒）
    """
    setup(rank, world_size)
    device = torch.device(f'cuda:{rank}')

    # -----------------------------
    # 预热阶段：固定时间运行
    # -----------------------------
    print(f"[Rank {rank}] Starting warmup for {warmup_time} seconds...")
    start_warmup = time.time()
    while time.time() - start_warmup < warmup_time:
        a = torch.randn(M, K, device=device, dtype=torch.float16)
        b = torch.randn(K, N, device=device, dtype=torch.float16)
        c = torch.matmul(a, b)
        torch.cuda.synchronize()

    # -----------------------------
    # 正式测试：运行至少 total_time_sec 秒
    # -----------------------------
    timing_times = []  # 记录每次 GEMM 时间用于分析波动
    iter_count = 0
    start_run = time.time()

    print(f"[Rank {rank}] Starting timed benchmark for ~{total_time_sec}s...")

    while time.time() - start_run < total_time_sec or iter_count < min_iters:
        start_iter = time.time()

        # 生成数据 + 计算
        a = torch.randn(M, K, device=device, dtype=torch.float16)
        b = torch.randn(K, N, device=device, dtype=torch.float16)
        c = torch.matmul(a, b)

        # 同步确保完成
        torch.cuda.synchronize()
        end_iter = time.time()

        timing_times.append(end_iter - start_iter)
        iter_count += 1

        # 可选：防止内存溢出，释放引用
        del a, b, c

    total_elapsed = time.time() - start_run

    # -----------------------------
    # 计算统计结果
    # -----------------------------
    avg_time_per_iter = total_elapsed / iter_count
    std_time = torch.tensor(timing_times).std().item()
    min_time = min(timing_times)
    max_time = max(timing_times)

    # 计算 FLOPs (FP16: 使用 2*M*K*N)
    flops_per_iter = 2 * M * K * N  # 乘加算 2 ops
    tflops = flops_per_iter / (avg_time_per_iter * 1e12)  # TFLOPS per GPU

    result = {
        'rank': rank,
        'iter_count': iter_count,
        'total_time': total_elapsed,
        'avg_time': avg_time_per_iter,
        'std_time': std_time,
        'min_time': min_time,
        'max_time': max_time,
        'tflops': tflops
    }

    # 打印本地结果
    print(f"[Rank {rank}] Done. {iter_count} iters in {total_elapsed:.3f}s | "
          f"Avg: {avg_time_per_iter*1000:.2f}ms | "
          f"TFLOPS: {tflops:.2f}")

    # -----------------------------
    # 主进程汇总所有 GPU 结果
    # -----------------------------
    if rank == 0:
        # 收集其他进程的结果（简化：我们假设只打印主卡也够用）
        # 更完整做法可用 dist.gather，但这里为简洁直接输出
        print("\n" + "="*60)
        print("🔷 FINAL MULTI-GPU GEMM BENCHMARK RESULTS")
        print("="*60)
        print(f"Matrix Size      : {M} × {K} @ {K} × {N}")
        print(f"Data Type        : FP16 (CUDA Autocast)")
        print(f"Benchmark Time   : ~{total_time_sec} sec per GPU")
        print(f"Min Iterations   : {min_iters}")
        print(f"World Size       : {world_size}")
        print(f"GPUs Used        : {list(range(world_size))}")
        print(f"Device Name      : {torch.cuda.get_device_name(0) if world_size > 0 else 'Unknown'}")
        print("-"*60)
        print(f"Average Time     : {avg_time_per_iter*1000:.2f} ms/iter")
        print(f"Std Deviation    : {std_time*1000:.2f} ms")
        print(f"Min / Max Time   : {min_time*1000:.2f} / {max_time*1000:.2f} ms")
        print(f"Iterations Count : {iter_count}")
        print(f"Single GPU TFLOPS: {tflops:.2f}")
        print(f"Aggregate TFLOPS : {tflops * world_size:.2f}")
        peak = 330.0  # RTX 4090 FP16 Tensor Core 峰值，按需修改
        if tflops <= peak * 1.05:
            utilization = tflops / peak * 100
            print(f"Efficiency       : {utilization:.1f}% of theoretical peak ({peak:.0f} TFLOPS)")
        else:
            print(f"Efficiency       : Over {peak:.0f} TFLOPS (possible spiking)")

    cleanup()


def run_benchmark():
    world_size = torch.cuda.device_count()
    if world_size < 1:
        raise RuntimeError("No CUDA devices available.")

    print(f"🚀 Starting GEMM Benchmark on {world_size} GPU(s)")
    print(f"   Total runtime target: ~30 seconds per GPU")

    # 使用 mp.spawn 启动多进程
    mp.spawn(
        benchmark_gemm,
        args=(world_size,),
        nprocs=world_size,
        join=True
    )


if __name__ == "__main__":
    run_benchmark()