import torch
import torch.nn as nn
import torch.distributed as dist
from torch.nn.parallel import DistributedDataParallel as DDP
import torch.multiprocessing as mp
import os
import time

def setup(rank, world_size):
    # 初始化进程组
    os.environ['MASTER_ADDR'] = 'localhost'
    os.environ['MASTER_PORT'] = '12355'
    dist.init_process_group("nccl", rank=rank, world_size=world_size)

def cleanup():
    dist.destroy_process_group()

def train_step(model, data, target, optimizer, scaler):
    optimizer.zero_grad()
    with torch.cuda.amp.autocast():  # 启用混合精度训练
        output = model(data)
        loss = nn.CrossEntropyLoss()(output, target)
    scaler.scale(loss).backward()
    scaler.step(optimizer)
    scaler.update()

def demo_basic(rank, world_size):
    print(f"Running DDP on rank {rank}.")
    setup(rank, world_size)

    # 设置设备
    device = torch.device(f'cuda:{rank}')

    # 创建一个计算密集型模型（如ResNet50）
    model = nn.Sequential(
        nn.Linear(4096, 4096),
        nn.ReLU(),
        nn.Linear(4096, 4096),
        nn.ReLU(),
        nn.Linear(4096, 1000)
    ).to(device)

    # 使用DDP包装模型
    ddp_model = DDP(model, device_ids=[rank])

    # 优化器和缩放器（用于FP16）
    optimizer = torch.optim.Adam(ddp_model.parameters())
    scaler = torch.cuda.amp.GradScaler()

    # 生成随机数据
    batch_size = 64
    data = torch.randn(batch_size, 4096).to(device)
    target = torch.randint(0, 1000, (batch_size,)).to(device)

    # 预热
    for _ in range(10):
        train_step(ddp_model, data, target, optimizer, scaler)

    # 正式测试
    start_time = time.time()
    iterations = 100
    for _ in range(iterations):
        train_step(ddp_model, data, target, optimizer, scaler)
    end_time = time.time()

    total_time = end_time - start_time
    avg_time_per_iter = total_time / iterations
    throughput = batch_size / avg_time_per_iter  # 样本/秒

    if rank == 0:  # 只在主进程打印结果
        print(f"Multi-GPU FP16 Test Results (4x RTX 4090):")
        print(f"Total Time for {iterations} iterations: {total_time:.4f}s")
        print(f"Average Time per Iteration: {avg_time_per_iter:.4f}s")
        print(f"Throughput: {throughput:.2f} samples/sec")

    cleanup()

def run_demo():
    world_size = 4  # 4块GPU
    mp.spawn(demo_basic, args=(world_size,), nprocs=world_size, join=True)

if __name__ == "__main__":
    run_demo()