import torch


def train_epoch(features, labels, model, criterion, optimizer):
    optimizer.zero_grad()
    predict_labels = model(features.float())
    loss = criterion(predict_labels, labels.long())
    loss.backward()
    optimizer.step()

    return loss.item()


def valid_epoch(features, labels, model):
    predict = model(features.float())
    predict_labels = torch.argmax(predict, dim=-1)
    acc = sum(predict_labels == labels) / len(labels)
    return acc


def train(train_loader, valid_loader, model, criterion, optimizer, epochs=1000):
    best_acc = -1
    for epoch in range(epochs):
        loss_list = []
        acc_list = []
        model.train()
        print(f"============ epoch:{epoch + 1}/{epochs} =================")
        for train_features, train_labels in train_loader:
            # 设置features，labels的设备为模型使用的当前设备
            train_features = train_features.to(model.device)
            train_labels = train_labels.to(model.device)
            loss = train_epoch(train_features, train_labels, model, criterion, optimizer)
            print(f"batch_loss:{loss:.4f}")
            loss_list.append(loss)

        model.eval()
        for valid_features, valid_labels in valid_loader:
            # 设置features，labels的设备为模型使用的当前设备
            valid_features = valid_features.to(model.device)
            valid_labels = valid_labels.to(model.device)
            acc = valid_epoch(valid_features, valid_labels, model)
            print(f"batch_acc:{acc * 100:.2f}%")
            acc_list.append(acc)

        avg_loss = sum(loss_list) / len(loss_list)
        avg_acc = sum(acc_list) / len(acc_list)
        print(f"===== avg_loss:{avg_loss:.4f} -- avg_acc:{avg_acc * 100:.2f}%  ============")

        if avg_acc >= best_acc:
            torch.save(model.state_dict(), "./save/best.pt")
            best_acc = avg_acc
