# p2p_serial_per_rank_benchmark.py

import torch
import torch.distributed as dist
import os
import time
import numpy as np

def setup_distributed():
    """
    初始化分布式通信环境。
    """
    dist.init_process_group(backend='nccl')
    local_rank = int(os.environ['LOCAL_RANK'])
    torch.cuda.set_device(local_rank)
    return dist.get_rank(), dist.get_world_size()

def main():
    """
    主执行函数。
    """
    # 1. 初始化环境
    rank, world_size = setup_distributed()

    if world_size != 8:
        if rank == 0:
            print(f"错误：此脚本设计为在8个GPU上运行，但检测到 {world_size} 个。")
        return

    if rank == 0:
        print(f"总共 {world_size} 个GPU进程已启动。")
        print("测试模式: Rank 0 串行发送, 逐个测量到 Rank 1, 2, ..., 7 的时间。")

    # 2. 准备数据
    num_elements = 2 * 10**8
    data_type = torch.float16
    tensor_size_bytes = num_elements * 2
    tensor_size_mb = tensor_size_bytes / (1024 * 1024)

    if rank == 0:
        print("-" * 50)
        print(f"数据类型: {data_type}")
        print(f"数据规模: {num_elements:.0e} 个元素")
        print(f"每个张量大小: {tensor_size_mb:.2f} MB")
        print(f"将进行 5 次 Warm-up。")
        print("-" * 50)

    # Rank 0 创建要发送的张量，其他 ranks 创建接收缓冲区
    if rank == 0:
        tensor_to_send = torch.randn(num_elements, device=torch.cuda.current_device(), dtype=data_type)
        recv_buffer = None # Rank 0 不需要接收
    else:
        tensor_to_send = None 
        recv_buffer = torch.zeros(num_elements, device=torch.cuda.current_device(), dtype=data_type)

    dist.barrier()

    # 3. Warm-up (预热)
    if rank == 0:
        print("正在进行 Warm-up...")
    for i in range(5):
        if rank == 0:
            for dst_rank in range(1, world_size):
                dist.send(tensor_to_send, dst=dst_rank)
        else:
            dist.recv(recv_buffer, src=0)
    dist.barrier()
    if rank == 0:
        print("Warm-up 完成。")

    # 4. 执行串行P2P通信并逐个计时
    if rank == 0:
        times_ms = []
        print("\n--- 开始精确计时测量 ---")
        
        # 确保所有进程都准备好开始计时循环
        dist.barrier()

        for dst_rank in range(1, world_size):
            # 1. 在计时前同步，确保GPU空闲，计时起点准确
            torch.cuda.synchronize()
            
            # 2. 记录CPU开始时间
            start_time = time.time()
            
            # 3. 执行阻塞发送
            dist.send(tensor_to_send, dst=dst_rank)
            
            # 4. 在计时后同步，确保send操作相关的GPU工作全部完成
            torch.cuda.synchronize()
            
            # 5. 记录CPU结束时间
            end_time = time.time()
            
            duration_ms = (end_time - start_time) * 1000
            times_ms.append(duration_ms)
            print(f"    Rank 0 -> Rank {dst_rank} 发送耗时: {duration_ms:.2f} 毫秒")
        
        # 等待所有接收方完成，以便进行清晰的总结
        dist.barrier()
        
        # 打印总结
        print("\n--- 测量总结 ---")
        avg_time = np.mean(times_ms)
        min_time = np.min(times_ms)
        max_time = np.max(times_ms)
        total_time = np.sum(times_ms)
        print(f"总耗时 (所有发送操作相加): {total_time:.2f} 毫秒 ({total_time/1000:.4f} 秒)")
        print(f"平均单次发送耗时: {avg_time:.2f} 毫秒")
        print(f"最快单次发送耗时: {min_time:.2f} 毫秒")
        print(f"最慢单次发送耗时: {max_time:.2f} 毫秒")

    else: # 其他 ranks (1-7) 的逻辑
        # 与Rank 0同步，准备开始
        dist.barrier()
        
        # 每个接收进程只需执行一次接收操作
        dist.recv(recv_buffer, src=0)
        
        # 再次同步，表示已完成接收
        dist.barrier()

    # 5. 清理
    dist.destroy_process_group()

if __name__ == "__main__":
    main()
