import os

import torch
import torch.nn as nn
import torch.optim as optim
from skimage import io
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms

from Unet_Upsample import UNet


class CustomDataset(Dataset):
    def __init__(self, input_dir, mask_dir, transform=None):
        self.input_dir = input_dir
        self.mask_dir = mask_dir
        self.input_name = os.listdir(input_dir)
        self.transform = transform

    def __len__(self):
        return len(self.input_name)

    def __getitem__(self, idx):
        img_path = os.path.join(self.input_dir, self.input_name[idx])
        mask_path = os.path.join(self.mask_dir, self.input_name[idx])

        image = io.imread(img_path)
        mask = io.imread(mask_path, as_gray=True)
        mask = mask.squeeze(0)  # 先去掉大小为1的维度
        mask = mask.reshape(image.shape[0], image.shape[1], 1)  # 在最后一个维度添加一个大小为1的维度

        # 调整图像大小
        image = transforms.Resize((256, 256))(transforms.ToTensor()(image))
        mask = transforms.Resize((256, 256))(transforms.ToTensor()(mask))

        # 应用数据转换
        if self.transform:
            image = self.transform(image)

        return image, mask


def train(model, dataloader, criterion, optimizer, device, num_epochs=31):
    model.to(device)  # 将模型移动到指定设备
    model.train()  # 设置模型为训练模式

    for epoch in range(num_epochs):
        print(f'第{epoch + 1}轮训练:')

        epoch_loss = 0.0  # 累计每个epoch的损失
        for batch_idx, (images, masks) in enumerate(dataloader):
            images = images.float().to(device)  # 将图像移动到设备并转换为浮点数
            masks = masks.float().to(device)  # 将掩码移动到设备并转换为浮点数

            optimizer.zero_grad()  # 清零梯度
            outputs = model(images)  # 前向传播

            # 处理多输出情况（如果使用 deep_supervision）
            if isinstance(outputs, list):
                # 注意：模型输出为 logits，不要对其应用 sigmoid
                loss = sum(criterion(output, masks) for output in outputs) / len(outputs)
            else:
                loss = criterion(outputs, masks)  # 计算损失

            loss.backward()  # 反向传播
            optimizer.step()  # 更新参数

            epoch_loss += loss.item()  # 累加损失

            # 每 10 个批次打印一次损失
            if (batch_idx + 1) % 10 == 0:
                print(f'Epoch {epoch + 1}, Batch {batch_idx + 1}, Loss: {loss.item():.4f}')

        average_loss = epoch_loss / len(dataloader)  # 计算平均损失
        print(f'Epoch {epoch + 1}, Average Loss: {average_loss:.4f}')

        # 每隔3个周期保存一次模型
        if (epoch + 1) % 3 == 0:
            save_path = f'../pt_file/Unet_Upsample-{epoch + 1}.pt'
            torch.save(model.state_dict(), save_path)
            print(f'Model saved to {save_path}')


if __name__ == '__main__':
    # 数据转换
    image_transform = transforms.Compose([
        transforms.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225])
    ])

    # 加载数据集
    input_dir = '../unet_img/imgs/val'
    mask_dir = '../unet_img/masks/val'
    dataset = CustomDataset(input_dir, mask_dir, transform=image_transform)
    dataloader = DataLoader(dataset, batch_size=8, shuffle=False)

    # 初始化模型、损失函数和优化器
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model = UNet(in_channels=3, num_classes=1).to(device)

    criterion = nn.BCELoss()  # 是否使用BCELoss要根据输出是否使用了sigmoid函数来判断，如使用了则使用BCELoss
    optimizer = optim.Adam(model.parameters(), lr=0.001)

    # 开始训练
    train(model, dataloader, criterion, optimizer, device)
