import torch


def dborgottrain(train_data, model, Loss_fn, Yh_fn):
    all_x_db = []
    all_y_db = []
    all_x_gotmd = []
    all_y_gotmd = []
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    total_loss, total_acc, n = 0.0, 0.0, 0
    n = 0
    # i = 0
    for batch, (x, y, z) in enumerate(train_data):
        x, y, z = x.to(device), y.to(device), z.to(device)
        # print(batch)
        # 执行
        model.set_val(batch, 1)
        output = model(x)
        Loss = Loss_fn(output, z.long())
        MaxValue, Pred_idx = torch.max(output, axis=1)
        x_db = x[Pred_idx == 0].to(device)
        y_db = y[Pred_idx == 0].to(device)
        x_gotmd = x[Pred_idx == 1].to(device)
        y_gotmd = y[Pred_idx == 1].to(device)
        cur_acc = torch.sum(z == Pred_idx) / output.shape[0]
        all_x_db.append(x_db)
        all_y_db.append(y_db)
        all_x_gotmd.append(x_gotmd)
        all_y_gotmd.append(y_gotmd)
        Yh_fn.zero_grad()  # x.grad=0，x是output,y
        Loss.backward()  # cur_loss是交叉熵函数，cur_loss求导

        # 更新全部权重及偏置参数
        Yh_fn.step()

        # 总误差
        total_loss += Loss.item()
        total_acc += cur_acc.item()
        n = n + 1
        # print("ok")
        # exit()
    return total_acc / n, total_loss / n, all_x_db, all_y_db, all_x_gotmd, all_y_gotmd
