import torch
import torch.nn as nn
import torch.optim as optim
from torch.nn.parallel import DistributedDataParallel as DDP
import torch.distributed as dist
import torch.multiprocessing as mp

def setup(rank, world_size):
    dist.init_process_group("nccl", rank=rank, world_size=world_size)

def cleanup():
    dist.destroy_process_group()

# 这里可以将SimpleModel更换为上述示例中的Transformer模型
class SimpleModel(nn.Module):
    def __init__(self):
        super(SimpleModel, self).__init__()
        self.fc = nn.Linear(10, 2)  # 一个简单的全连接层作为示例

    def forward(self, x):
        return self.fc(x)

def train(rank, world_size, model):
    torch.manual_seed(1234 + rank)
    device = torch.device(f"cuda:{rank}")
    model.to(device)
    model = DDP(model, device_ids=[rank])
    optimizer = optim.SGD(model.parameters(), lr=0.01)

    # 假设的数据加载和训练循环（这里使用随机数据代替真实数据）
    for epoch in range(2):  # 简化的训练过程，仅迭代两次
        for _ in range(10):  # 假设每个epoch有10个batch
            inputs = torch.randn(32, 10, device=device)  # 假设的batch数据
            labels = torch.randint(0, 2, (32,), device=device)  # 假设的标签

            optimizer.zero_grad()
            outputs = model(inputs)
            loss = nn.functional.cross_entropy(outputs, labels)
            loss.backward()
            optimizer.step()

        print(f"Rank {rank}, Epoch {epoch+1}, Loss: {loss.item()}")

def main():
    world_size = torch.cuda.device_count()
    mp.spawn(train,
             args=(world_size, SimpleModel()),
             nprocs=world_size,
             join=True)

if __name__ == "__main__":
    main()
