optimizer = optim.Adam(model.parameters(), lr=lr) # 优化器
criterion = nn.CrossEntropyLoss()  # 多分类损失函数
model.to(device)
best_acc = 0  # 保存最好准确率
best_model = None  # 保存对应最好准确率的模型参数
for epoch in range(epochs):
    model.train()  # 开启训练模式
    epoch_acc = 0  # 每个epoch的准确率
    epoch_acc_count = 0  # 每个epoch训练的样本数
    train_count = 0  # 用于计算总的样本数,方便求准确率
    loss_list=[]
    train_bar = tqdm(train_loader)  # 形成进度条
    for data in train_bar:
        x_train, y_train = data  # 解包迭代器中的X和Y
        x_input =x_train.long().transpose(1,0).contiguous()
        x_input = x_input.to(device)
        optimizer.zero_grad()
        # 形成预测结果
        output_ = model(x_input)
        # 计算损失
        loss = criterion(output_, y_train.long().view(-1))
        loss.backward()
        optimizer.step()
        loss_list.append(loss.item())
        # 计算每个epoch正确的个数
        epoch_acc_count += (output_.argmax(axis=1)==y_train.view(-1)).sum()
        train_count += len(x_train)
    # 每个epoch对应的准确率
    epoch_acc = epoch_acc_count / train_count

    # 打印信息
    print("【EPOCH: 】%s" % str(epoch + 1))
    print("训练损失为%s" % (str(sum(loss_list)/len(loss_list))))   
print("训练精度为%s" % (str(epoch_acc.item()*100)[:5])+'%')

# 在训练结束保存最优的模型参数
best_model = model.state_dict()
# 保存模型
torch.save(best_model, './best_model.pt')
