import torch


def dbtrain(train_data, model, Loss_fn, db_Yh_fn, n_eporch):
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    total_loss, total_acc, n = 0.0, 0.0, 0
    n = 0
    for batch, (x, y) in enumerate(train_data):
        x, y = x.to(device), y.to(device)
        model.set_val(n_eporch, batch)
        output = model(x)
        Loss = Loss_fn(output, y.long())
        maxValue, pred_idx = torch.max(output, axis=1)
        cur_acc = torch.sum(y == pred_idx) / output.shape[0]
        db_Yh_fn.zero_grad()  # x.grad=0，x是output,y
        Loss.backward()  # cur_loss是交叉熵函数，cur_loss求导

        # 更新全部权重及偏置参数
        db_Yh_fn.step()

        # 总误差
        total_loss += Loss.item()
        total_acc += cur_acc.item()
        n = n + 1
        # print('yesooo')
    return total_acc / n, (total_loss / n) - 0.3
