import torch
import imgaug.augmenters as iaa
from torch.utils.data import DataLoader
from net.DLinkNet import DLinkNet34
from loss.dice_bce_loss import dice_bce_loss
from torch.utils.tensorboard import SummaryWriter
import torch
import tqdm
from torchvision.utils import save_image

import Dataset


def check_test_loss(loader, model):
    loss = 0  # 记录测试损失
    with torch.no_grad():
        for i, (x, y, name) in enumerate(loader):
            x = x.to(device, dtype=torch.float32)
            y = y.to(device, dtype=torch.float32)

            y_pred = model(x)

            loss_batch = loss_fn(y, y_pred)
            loss_batch = loss_batch.detach().cpu()
            loss += loss_batch

    loss = loss / len(loader)
    return loss


if __name__ == '__main__':
    # 设置设备
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    print(device)
    # 超参数
    batch_size = 32
    epoch_num = 100
    num_workers = 0  # dataloader多线程
    best_test_loss = 10000  # 设置一个大值，方便后面更新模型的最优测试损失
    save_path = './train_image'

    # 数据加载
    #   数据增强
    seq = iaa.Sequential([
        iaa.Affine(
            scale={"x": (0.8, 1.2), "y": (0.8, 1.2)},
            translate_percent=({"x": (-0.1, 0.1), "y": (-0.1, 0.1)}),
            rotate=(0, 360)
        ),
        iaa.Fliplr(0.5),
        iaa.Flipud(0.5),
        iaa.Sometimes(0.3, iaa.Affine(rotate=90)),
        iaa.Sometimes(0.3, iaa.Affine(rotate=180)),
        iaa.Sometimes(0.3, iaa.Affine(rotate=270))
    ])

    #  dataloader
    train_dataset = Dataset.SegmentationDataset(where='train', seq=seq)
    train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True,
                              num_workers=num_workers)
    test_dataset = Dataset.SegmentationDataset(where='test', seq=seq)
    test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False,
                             num_workers=num_workers)

    # 实例化
    model = DLinkNet34().to(device)
    loss_fn = dice_bce_loss()
    optimizer = torch.optim.Adam(model.parameters(), betas=(0.9, 0.999), lr=1e-4)

    # 学习率调度器
    scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', verbose=True,
                                                           patience=10, min_lr=1e-7)
    # 保存训练过程损失值
    writer = SummaryWriter(log_dir="./log/class12")

    # 训练
    for epoch in range(epoch_num):
        loss = 0  # 记录总的训练loss并平均

        for i, (x, y, name) in enumerate(tqdm.tqdm(train_loader)):
            model.zero_grad()

            x = x.to(device, dtype=torch.float32)
            y = y.to(device, dtype=torch.float32)

            y_pred = model(x)  # 前向传播

            loss_batch = loss_fn(y, y_pred)  # 计算损失

            optimizer.zero_grad()
            loss_batch.backward()  # 反向传播
            optimizer.step()  # 更新参数

            loss_batch = loss_batch.detach().cpu()

            loss += loss_batch
        # 保存最后一个批大小的模型输出结果
        save_image(y_pred, f'{save_path}/{epoch}.png')

        # 平均训练损失
        loss = loss / len(train_loader)

        # 使用当前模型对测试集计算损失
        test_loss = check_test_loss(test_loader, model)

        # 根据测试损失的变化来更新学习率
        scheduler.step(test_loss)

        # 记录该损失
        writer.add_scalar("Loss/train", loss, epoch)
        writer.add_scalar("Loss/test", test_loss, epoch)

        # 更新最佳的模型参数
        if best_test_loss > test_loss:
            best_test_loss = test_loss
            torch.save(model.state_dict(), './model/class12.pt')

        print(f'第{epoch}个epoch train loss 为{loss}, test loss为{test_loss}')
