import torch
from MyData import MyDataset
from torch.utils.data import DataLoader
from net import Model
from transformers import BertTokenizer,AdamW

# 定义训练设备
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
EPOCHS = 1000 # 训练轮数

# 加载数据集
token = BertTokenizer.from_pretrained(r"D:\AI\HuggingFace\my-model-cache\bert-base-chinese")
def collate_fn(batch):
    # 填充到同一序列长度
    sentes = [i[0] for i in batch]
    labels = [i[1] for i in batch]
    # 编码
    data = token.batch_encode_plus(batch_text_or_text_pairs=sentes,
                                   truncation=True,
                                   padding='max_length',
                                   max_length=500,
                                   return_tensors='pt',
                                   return_length=True
                                   )
    # input_ids:编码之后的数字
    # attention_mask:是补零的位置是0,其他位置是1
    input_ids = data['input_ids'].to(DEVICE)
    attention_mask = data['attention_mask'].to(DEVICE)
    token_type_ids = data['token_type_ids'].to(DEVICE)
    labels = torch.LongTensor(labels).to(DEVICE)
    # 准备数据
    return input_ids, attention_mask, token_type_ids, labels

# 创建数据集
train_dataset = MyDataset("train")

# 创建数据加载器
train_loader = DataLoader(dataset=train_dataset, 
                          batch_size=32, 
                          shuffle=True,
                          drop_last=True,
                          collate_fn=collate_fn
                          )

if __name__ == "__main__":
    print(DEVICE)
    # 加载模型
    model = Model().to(DEVICE)
    optimizer = AdamW(model.parameters(), lr=5e-4)
    loss_func = torch.nn.CrossEntropyLoss() # 交叉熵损失函数

    model.train() # 训练模式
    for epoch in range(EPOCHS): # 训练轮数
        for i, (input_ids, attention_mask, token_type_ids, labels) in enumerate(train_loader): # 训练批次
            # 前向传播
            out = model(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids) # 模型输出

            # 计算损失
            loss = loss_func(out, labels) # 计算损失
            optimizer.zero_grad() # 梯度清零
            loss.backward() # 反向传播
            optimizer.step() # 更新参数

            if i % 5 == 0: # 每5个批次输出一次
                out = out.argmax(dim=1) # 预测类别
                acc = (out == labels).sum().item() / len(labels) # 计算准确率
                print(epoch, i, loss.item(), acc)

        torch.save(model.state_dict(), f"params1k/{epoch}bert.pt") # 保存模型
        print(epoch,"参数保存完成")
            

            