import torch
import torch.distributed as dist
import os
import time

def setup_distributed():
    """
    初始化 PyTorch 分布式环境。
    """
    if not dist.is_available():
        raise RuntimeError("Distributed package is not available.")

    rank = int(os.environ["RANK"])
    world_size = int(os.environ["WORLD_SIZE"])

    dist.init_process_group(backend="nccl", world_size=world_size, rank=rank)

    if world_size != 2:
        raise ValueError("This benchmark is designed for exactly 2 GPUs/processes.")

    torch.cuda.set_device(rank)
    
    # 在 Rank 0 上打印一次设备信息
    if rank == 0:
        print(f"Initialized on {world_size} GPUs. Rank 0 on {torch.cuda.get_device_name(0)}, Rank 1 on {torch.cuda.get_device_name(1)}")
    
    return rank, world_size

def format_size(size_in_bytes):
    """
    将字节大小格式化为可读的字符串（B, KB, MB, GB）。
    """
    if size_in_bytes < 1024:
        return f"{size_in_bytes} B"
    elif size_in_bytes < 1024**2:
        return f"{size_in_bytes/1024:.2f} KB"
    elif size_in_bytes < 1024**3:
        return f"{size_in_bytes/1024**2:.2f} MB"
    else:
        return f"{size_in_bytes/1024**3:.2f} GB"

def format_numel(num_elements):
    """
    将元素数量格式化为可读的字符串（, K, M, G）。
    """
    if num_elements < 1000:
        return f"{num_elements}"
    elif num_elements < 1000**2:
        return f"{num_elements/1000:.0f}K"
    elif num_elements < 1000**3:
        return f"{num_elements/1000**2:.0f}M"
    else:
        return f"{num_elements/1000**3:.0f}G"

def run_benchmark(num_elements, rank, dtype=torch.float16):
    """
    对给定元素数量的数据进行一次发送/接收测试，并返回计时结果。
    """
    # 创建 CUDA 事件用于计时
    start_init = torch.cuda.Event(enable_timing=True)
    end_init = torch.cuda.Event(enable_timing=True)
    start_wait = torch.cuda.Event(enable_timing=True)
    end_wait = torch.cuda.Event(enable_timing=True)
    
    # 在各自的 GPU 上创建张量
    device = f'cuda:{rank}'
    if rank == 0:
        # 发送方创建随机数据
        tensor_to_send = torch.randn(num_elements, dtype=dtype, device=device)
    else:
        # 接收方创建空张量
        tensor_to_recv = torch.empty(num_elements, dtype=dtype, device=device)

    # 进程同步，确保双方都准备好了
    dist.barrier()

    req = None
    
    # --- 1. 测试 isend/irecv 的调用时间 ---
    start_init.record()
    if rank == 0:
        req = dist.isend(tensor=tensor_to_send, dst=1)
    else:
        req = dist.irecv(tensor=tensor_to_recv, src=0)
    end_init.record()

    # --- 2. 测试 wait() 的实际通信时间 ---
    start_wait.record()
    if req is not None:
        req.wait()
    end_wait.record()

    # --- 3. 同步并计算时间 ---
    torch.cuda.synchronize()

    init_time_ms = start_init.elapsed_time(end_init)
    wait_time_ms = start_wait.elapsed_time(end_wait)
    
    return init_time_ms, wait_time_ms

def main():
    """
    主执行函数
    """
    rank, world_size = setup_distributed()
    
    # 定义数据类型和每个元素的字节数
    dtype = torch.float16
    # torch.finfo(dtype).bits 获取类型的位数，除以8得到字节数
    bytes_per_element = torch.finfo(dtype).bits // 8

    # 定义要测试的元素数量 (numel)
    K = 1024
    M = 1024 * 1024
    numels_to_test = [
        1,          # 1 element
        1 * K,      # 1K elements
        1 * M,      # 1M elements
        10 * M,     # 10M elements
        100 * M,    # 100M elements
    ]
    
    # --- Warm-up ---
    WARMUP_ITERATIONS = 5
    if rank == 0:
        print("\n" + "="*90)
        print(f"Running {WARMUP_ITERATIONS} warm-up rounds to stabilize the system (using 1 element)...")
    
    # 使用最小的元素数量进行预热
    for i in range(WARMUP_ITERATIONS):
        run_benchmark(num_elements=1, rank=rank, dtype=dtype)
        # 每次预热后都同步，确保双方都完成
        dist.barrier()
    
    if rank == 0:
        print("Warm-up complete.")
        print("="*90 + "\n")
        # 打印表头
        print(f"{'Num Elements':>15s} | {'Data Size':>12s} | {'Init Time (ms)':>18s} | {'Transfer Time (ms)':>20s} | {'Bandwidth (GB/s)':>20s}")
        print("-" * 90)

    # --- 正式测试 ---
    for numel in numels_to_test:
        # 运行测试并获取时间
        init_time, wait_time = run_benchmark(num_elements=numel, rank=rank, dtype=dtype)
        
        # 只有 rank 0 打印结果，避免重复输出
        if rank == 0:
            # 根据元素数量和类型计算数据大小（Bytes）
            size_in_bytes = numel * bytes_per_element

            # 计算带宽
            # 带宽 (GB/s) = (数据大小 (Bytes) / 10^9) / (传输时间 (s))
            if wait_time > 0:
                bandwidth_gb_s = (size_in_bytes / 1e9) / (wait_time / 1000)
            else:
                bandwidth_gb_s = float('inf')

            print(f"{format_numel(numel):>15s} | {format_size(size_in_bytes):>12s} | {init_time:18.6f} | {wait_time:20.6f} | {bandwidth_gb_s:20.4f}")
            
        # 在每次测试后同步所有进程
        dist.barrier()

    dist.destroy_process_group()
    if rank == 0:
        print("\nBenchmark finished.")

if __name__ == "__main__":
    main()