import torch
import torch.distributed as dist
import torch.multiprocessing as mp
import torch.nn as nn
import torch.optim as optim
from torch.nn.parallel import DistributedDataParallel as DDP
from transformers import BertTokenizer, BertModel, BertConfig
from torch.utils.data import DataLoader, Dataset

# 定义一个简单的数据集类（这里仅作为示例）
class SimpleDataset(Dataset):
    def __init__(self, texts):
        self.texts = texts
        self.tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')

    def __len__(self):
        return len(self.texts)

    def __getitem__(self, idx):
        text = self.texts[idx]
        inputs = self.tokenizer(text, return_tensors="pt", max_length=128, padding='max_length', truncation=True)
        return inputs['input_ids'], inputs['attention_mask']

# 定义一个简单的模型（这里使用预训练的BERT模型）
class BertForSequenceClassification(nn.Module):
    def __init__(self, num_labels):
        super(BertForSequenceClassification, self).__init__()
        self.bert = BertModel.from_pretrained('bert-base-uncased')
        self.dropout = nn.Dropout(0.3)
        self.classifier = nn.Linear(self.bert.config.hidden_size, num_labels)

    def forward(self, input_ids, attention_mask):
        outputs = self.bert(input_ids=input_ids, attention_mask=attention_mask)
        pooled_output = outputs[1]
        pooled_output = self.dropout(pooled_output)
        logits = self.classifier(pooled_output)
        return logits

def setup(rank, world_size):
    dist.init_process_group("nccl", rank=rank, world_size=world_size)
    torch.cuda.set_device(rank)

def cleanup():
    dist.destroy_process_group()

def train(rank, world_size, texts, labels, model, tokenizer, epochs=1, learning_rate=2e-5):
    setup(rank, world_size)
    torch.manual_seed(0)
    model.train()
    dataset = SimpleDataset(texts)
    sampler = torch.utils.data.distributed.DistributedSampler(dataset)
    dataloader = DataLoader(dataset, batch_size=2, sampler=sampler, shuffle=False)
    model = DDP(model, device_ids=[rank])
    optimizer = optim.AdamW(model.parameters(), lr=learning_rate)

    for epoch in range(epochs):
        sampler.set_epoch(epoch)
        for batch in dataloader:
            input_ids, attention_mask = tuple(t.cuda(rank) for t in batch)
            logits = model(input_ids, attention_mask)
            loss_fct = nn.CrossEntropyLoss()
            labels = torch.tensor(labels).cuda(rank)
            loss = loss_fct(logits.view(-1, len(set(labels))), labels.view(-1))
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
        if rank == 0:
            print(f'Epoch {epoch+1}/{epochs}, Loss: {loss.item()}')

    cleanup()

def main():
    world_size = torch.cuda.device_count()
    texts = ["Hello world!", "This is a test sentence."] * world_size
    labels = [0, 1] * world_size
    model = BertForSequenceClassification(num_labels=len(set(labels)))
    tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')

    mp.spawn(train,
             args=(world_size, texts, labels, model, tokenizer),
             nprocs=world_size,
             join=True)

if __name__ == "__main__":
    main()

