import torch
from tqdm import tqdm
import model.config as conf


def train(model, train_dataloader, valid_dataloader, device, loss_func,  optimizer):

    train_loss_list = []
    valid_loss_list = []
    acc = 0
    acc_pred_list = []
    for epoch in range(conf.epochs):
        train_loss_avg = 0
        correct_count = 0
        valid_loss_avg = 0
        model.train()
        for index, (input, atten_mask, label) in tqdm(enumerate(train_dataloader), total=len(train_dataloader),
                                      desc=f'train process: epoch {epoch}'):
            input = input.to(device)
            label = label.to(device)
            atten_mask = atten_mask.to(device)
            pred = model(input, atten_mask)
            # label = label.unsqueeze(1).expand(-1, 2).float()  # 调整label的维度
            # 应用2分类
            pred = pred.view(-1)
            label = label.float()
            # print("pred.size = ", pred.size(), "pred = ", pred)
            # print("label.size = ", label.size(), "label = ", label)
            loss = loss_func(pred, label)
            # print("loss = ", loss)
            train_loss_avg += loss.sum().item()

            loss.backward()
            # optimizer.step()
            # optimizer.zero_grad()
            if (index + 1) % conf.accum_grid == 0:
                optimizer.step()
                # lr_scheduler.step()
                optimizer.zero_grad()

        # train_loss_avg = train_loss_avg / len(train_dataloader)
        train_loss_list.append(train_loss_avg)

        model.eval()

        with torch.no_grad():

            for index, (input, atten_mask, label) in tqdm(enumerate(valid_dataloader), total=len(valid_dataloader),
                                          desc=f'valid process:'):
                input = input.to(device)
                label = label.to(device)
                atten_mask = atten_mask.to(device)
                pred = model(input, atten_mask)

                # 应用2分类
                pred = pred.view(-1)
                label = label.float()
                preds = torch.sigmoid(pred)  # 使用sigmoid函数转换为概率值
                preds = torch.round(preds)
                correct_count += (preds == label).type(torch.float).sum().item()

                # 应用3分类
                # correct_count += (pred.argmax(1) == label).type(torch.float).sum().item()

                loss = loss_func(pred, label)
                valid_loss_avg += loss.item()

            # valid_loss_avg = valid_loss_avg / len(valid_dataloader)
            valid_loss_list.append(valid_loss_avg)
            acc_pred = correct_count / (len(valid_dataloader) * conf.batch_size)
            acc_pred_list.append(acc_pred)
            print(f'epoch: {epoch}, train_loss: {train_loss_avg} , valid_loss: {valid_loss_avg} , valid_acc: {acc_pred}')

            if epoch == conf.epochs - 1:
                torch.save(model.state_dict(), f'{conf.checkpoint}/{epoch}_{acc_pred}.pt')
                continue

            if acc_pred > acc:
                acc = acc_pred
                torch.save(model.state_dict(), f'{conf.checkpoint}/{epoch}_{acc}.pt')


    return train_loss_list, valid_loss_list, acc_pred_list


def evaluate(model, test_dataloader, device):
    model.eval()
    correct_count = 0
    with torch.no_grad():
        for index, (input, atten_mask, label) in tqdm(enumerate(test_dataloader), total=len(test_dataloader),
                                                      desc=f'test process:'):
            input = input.to(device)
            label = label.to(device)
            atten_mask = atten_mask.to(device)
            pred = model(input, atten_mask)
            correct_count += (pred.argmax(1) == label).type(torch.float).sum().item()

        acc_pred = correct_count / (len(test_dataloader) * conf.batch_size)
        print(f'test_acc: {acc_pred}')
