import torch
import torch.distributed as dist
import torch.multiprocessing as mp
import os
import time

def setup(rank, world_size):
    os.environ["MASTER_ADDR"] = "localhost"
    os.environ["MASTER_PORT"] = "12355"
    dist.init_process_group("nccl", rank=rank, world_size=world_size)
    torch.cuda.set_device(rank)

def cleanup():
    dist.destroy_process_group()

def benchmark_gemm(rank, world_size, M=8192, N=8192, K=8192, iterations=50, warmup=10):
    setup(rank, world_size)
    device = torch.device(f'cuda:{rank}')

    # 预热
    for _ in range(warmup):
        a = torch.randn(M, K, device=device, dtype=torch.float16)
        b = torch.randn(K, N, device=device, dtype=torch.float16)
        torch.matmul(a, b)
        torch.cuda.synchronize()

    # 正式测试
    torch.cuda.synchronize()
    start_time = time.time()

    for _ in range(iterations):
        a = torch.randn(M, K, device=device, dtype=torch.float16)
        b = torch.randn(K, N, device=device, dtype=torch.float16)
        torch.matmul(a, b)
        torch.cuda.synchronize()  # 确保完成

    torch.cuda.synchronize()
    total_time = time.time() - start_time
    avg_time = total_time / iterations

    # 计算 FLOPs
    flops_per_iter = 2 * M * K * N  # 乘加
    tflops = flops_per_iter / (avg_time * 1e12)

    if rank == 0:
        print(f"Matrix Multiplication Benchmark ({M}x{K} @ {K}x{N})")
        print(f"  Data Type: FP16")
        print(f"  Average Time per GEMM: {avg_time*1000:.2f} ms")
        print(f"  Achieved Performance: {tflops:.2f} TFLOPS/GPU")
        print(f"  Aggregate (World Size {world_size}): {tflops * world_size:.2f} TFLOPS")

    cleanup()

def run_benchmark():
    world_size = torch.cuda.device_count()
    print(f"Running GEMM benchmark on {world_size} GPU(s)")
    mp.spawn(benchmark_gemm, args=(world_size,), nprocs=world_size, join=True)

if __name__ == "__main__":
    run_benchmark()