import os

import torch
import torch.nn as nn
import torch.optim as optim
from skimage import io
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms

from Unet_ConvTranspose2d_better import UNet


# 数据集定义
class CustomDataset(Dataset):
    def __init__(self, input_dir, mask_dir, transform=None):
        self.input_dir = input_dir
        self.mask_dir = mask_dir
        self.input_name = os.listdir(input_dir)
        self.transform = transform

    def __len__(self):
        return len(self.input_name)

    def __getitem__(self, idx):
        img_path = os.path.join(self.input_dir, self.input_name[idx])
        mask_path = os.path.join(self.mask_dir, self.input_name[idx])

        image = io.imread(img_path)
        mask = io.imread(mask_path, as_gray=True)
        mask = mask.squeeze(0)  # 先去掉大小为1的维度
        mask = mask.reshape(image.shape[0], image.shape[1], 1)  # 在最后一个维度添加一个大小为1的维度

        # 调整图像大小
        image = transforms.Resize((256, 256))(transforms.ToTensor()(image))
        mask = transforms.Resize((256, 256))(transforms.ToTensor()(mask))

        # 应用数据转换
        if self.transform:
            image = self.transform(image)

        return image, mask


class DoubleConv(nn.Module):
    def __init__(self, in_channels, out_channels):
        super(DoubleConv, self).__init__()
        self.double_conv = nn.Sequential(
            nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1),
            nn.BatchNorm2d(out_channels),
            nn.ReLU(inplace=True),
            nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1),
            nn.BatchNorm2d(out_channels),
            nn.ReLU(inplace=True)
        )

    def forward(self, x):
        return self.double_conv(x)


# 训练与验证函数
def train(model, dataloader, criterion, optimizer, device, num_epochs=31):
    model.train()
    for epoch in range(num_epochs):
        print(f'第{epoch + 1}轮训练:')
        for images, masks in dataloader:
            images = images.float().to(device)
            masks = masks.float().to(device)

            optimizer.zero_grad()
            outputs = model(images)
            loss = criterion(outputs, masks)
            loss.backward()
            optimizer.step()

        print(f'Epoch {epoch}, Loss: {loss.item()}')
        if (epoch + 1) % 3 == 0:
            torch.save(model.state_dict(), f'../pt_file/Unet_ConvTranspose2d_better-{epoch}.pt')


if __name__ == '__main__':
    # 数据集与数据加载器
    # 可以根据需求进行数据增强操作
    transform = transforms.Compose([
        transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
    ])

    input_dir = '../unet_img/imgs/val'
    mask_dir = '../unet_img/masks/val'
    dataset = CustomDataset(input_dir, mask_dir, transform=transform)
    dataloader = DataLoader(dataset, batch_size=8, shuffle=True)

    # 初始化模型、损失函数和优化器
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model = UNet().to(device)
    criterion = nn.BCELoss()
    optimizer = optim.Adam(model.parameters(), lr=0.001)

    # 开始训练
    train(model, dataloader, criterion, optimizer, device)
