import os

import cv2
import numpy as np
import torch
import torch.optim as optim
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset


class MedicalDataset(Dataset):
    def __init__(self, root_dir, is_train=True, image_size=256):
        """
        Args:
            root_dir (string): 根目录路径，包含train_images和test_images文件夹
            is_train (bool): 是否为训练模式
            image_size (int): 图像调整大小
        """
        self.root_dir = root_dir
        self.is_train = is_train
        self.image_size = image_size
        self.image_paths = []
        self.mask_paths = []

        # 图像转换
        self.image_transform = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                 std=[0.229, 0.224, 0.225])
        ])

        # 根据是否训练模式选择相应文件夹
        if self.is_train:
            train_dir = os.path.join(root_dir, 'train_images')
            for folder in os.listdir(train_dir):
                folder_path = os.path.join(train_dir, folder)
                if os.path.isdir(folder_path):
                    img_folder = os.path.join(folder_path, 'images')
                    mask_folder = os.path.join(folder_path, 'masks')

                    # 检查文件夹是否存在
                    if not os.path.exists(img_folder) or not os.path.exists(mask_folder):
                        continue

                    img_files = os.listdir(img_folder)
                    mask_files = os.listdir(mask_folder)

                    if img_files and mask_files:  # 确保文件夹不为空
                        self.image_paths.append(os.path.join(img_folder, img_files[0]))
                        self.mask_paths.append(os.path.join(mask_folder, mask_files[0]))
        else:
            test_dir = os.path.join(root_dir, 'test_images')
            for folder in os.listdir(test_dir):
                folder_path = os.path.join(test_dir, folder)
                if os.path.isdir(folder_path):
                    test_files = os.listdir(folder_path)
                    if test_files:  # 确保文件夹不为空
                        self.image_paths.append(os.path.join(folder_path, test_files[0]))
                        self.mask_paths.append(None)

    def __len__(self):
        return len(self.image_paths)

    def preprocess_image(self, image):
        """预处理图像"""
        # 转换为PIL Image
        if isinstance(image, np.ndarray):
            image = Image.fromarray(image)
        # 调整大小
        image = image.resize((self.image_size, self.image_size), Image.BILINEAR)
        # 应用转换
        image = self.image_transform(image)
        return image

    def preprocess_mask(self, mask):
        """预处理mask"""
        # 转换为PIL Image
        if isinstance(mask, np.ndarray):
            mask = Image.fromarray(mask)
        # 调整大小
        mask = mask.resize((self.image_size, self.image_size), Image.NEAREST)
        # 转换为tensor
        mask = torch.from_numpy(np.array(mask)).float()
        mask = (mask > 0).float()  # 二值化
        return mask

    def __getitem__(self, idx):
        # 读取图片
        img_path = self.image_paths[idx]
        image = cv2.imread(img_path)
        image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
        image = self.preprocess_image(image)

        if self.is_train:
            # 读取和处理mask
            mask_path = self.mask_paths[idx]
            mask = cv2.imread(mask_path, cv2.IMREAD_GRAYSCALE)
            mask = self.preprocess_mask(mask)
            return image, mask
        else:
            return image


# 基本计算单元
class VGGBlock(nn.Module):
    def __init__(self, in_channels, middle_channels, out_channels):
        super().__init__()
        self.relu = nn.ReLU(inplace=True)
        self.conv1 = nn.Conv2d(in_channels, middle_channels, 3, padding=1)
        self.bn1 = nn.BatchNorm2d(middle_channels)
        self.conv2 = nn.Conv2d(middle_channels, out_channels, 3, padding=1)
        self.bn2 = nn.BatchNorm2d(out_channels)

    def forward(self, x):
        # VGGBlock实际上就是相当于做了两次卷积
        out = self.conv1(x)
        out = self.bn1(out)  # 归一化
        out = self.relu(out)

        out = self.conv2(out)
        out = self.bn2(out)
        out = self.relu(out)

        return out


class UNet(nn.Module):
    def __init__(self, num_classes, input_channels=3, **kwargs):
        super().__init__()

        nb_filter = [32, 64, 128, 256, 512]

        self.pool = nn.MaxPool2d(2, 2)
        self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)  # scale_factor:放大的倍数插值

        self.conv0_0 = VGGBlock(input_channels, nb_filter[0], nb_filter[0])
        self.conv1_0 = VGGBlock(nb_filter[0], nb_filter[1], nb_filter[1])
        self.conv2_0 = VGGBlock(nb_filter[1], nb_filter[2], nb_filter[2])
        self.conv3_0 = VGGBlock(nb_filter[2], nb_filter[3], nb_filter[3])
        self.conv4_0 = VGGBlock(nb_filter[3], nb_filter[4], nb_filter[4])

        self.conv3_1 = VGGBlock(nb_filter[3] + nb_filter[4], nb_filter[3], nb_filter[3])
        self.conv2_2 = VGGBlock(nb_filter[2] + nb_filter[3], nb_filter[2], nb_filter[2])
        self.conv1_3 = VGGBlock(nb_filter[1] + nb_filter[2], nb_filter[1], nb_filter[1])
        self.conv0_4 = VGGBlock(nb_filter[0] + nb_filter[1], nb_filter[0], nb_filter[0])

        self.final = nn.Conv2d(nb_filter[0], num_classes, kernel_size=1)

    def forward(self, input):
        x0_0 = self.conv0_0(input)
        x1_0 = self.conv1_0(self.pool(x0_0))
        x2_0 = self.conv2_0(self.pool(x1_0))
        x3_0 = self.conv3_0(self.pool(x2_0))
        x4_0 = self.conv4_0(self.pool(x3_0))

        x3_1 = self.conv3_1(torch.cat([x3_0, self.up(x4_0)], 1))
        x2_2 = self.conv2_2(torch.cat([x2_0, self.up(x3_1)], 1))
        x1_3 = self.conv1_3(torch.cat([x1_0, self.up(x2_2)], 1))
        x0_4 = self.conv0_4(torch.cat([x0_0, self.up(x1_3)], 1))

        output = self.final(x0_4)
        return nn.Sigmoid()(output)


def train(model, dataloader, criterion, optimizer, device, num_epochs=31):
    model.to(device)  # 将模型移动到指定设备
    model.train()  # 设置模型为训练模式

    for epoch in range(num_epochs):
        print(f'第{epoch + 1}轮训练:')

        epoch_loss = 0.0  # 累计每个epoch的损失
        for batch_idx, (images, masks) in enumerate(dataloader):
            images = images.float().to(device)  # 将图像移动到设备并转换为浮点数
            masks = masks.float().to(device)  # 将掩码移动到设备并转换为浮点数

            optimizer.zero_grad()  # 清零梯度
            outputs = model(images)  # 前向传播

            # 处理多输出情况（如果使用 deep_supervision）
            if isinstance(outputs, list):
                # 注意：模型输出为 logits，不要对其应用 sigmoid
                loss = sum(criterion(output, masks) for output in outputs) / len(outputs)
            else:
                masks = masks.unsqueeze(dim=1).float().to(device)
                loss = criterion(outputs, masks)  # 计算损失

            loss.backward()  # 反向传播
            optimizer.step()  # 更新参数

            epoch_loss += loss.item()  # 累加损失

            # 每 10 个批次打印一次损失
            if (batch_idx + 1) % 10 == 0:
                print(f'Epoch {epoch + 1}, Batch {batch_idx + 1}, Loss: {loss.item():.4f}')

        average_loss = epoch_loss / len(dataloader)  # 计算平均损失
        print(f'Epoch {epoch + 1}, Average Loss: {average_loss:.4f}')

        # 每隔3个周期保存一次模型
        if (epoch + 1) % 3 == 0:
            save_path = f'../pt_file/Unet_Upsample-{epoch + 1}.pt'
            torch.save(model.state_dict(), save_path)
            print(f'Model saved to {save_path}')


if __name__ == '__main__':
    # 创建训练数据集
    train_dataset = MedicalDataset(
        root_dir='/Volumes/For_Mac/dateset/Pulmonary_X_ray_and_masks',
        is_train=True,
        image_size=256
    )

    # 创建测试数据集
    test_dataset = MedicalDataset(
        root_dir='/Volumes/For_Mac/dateset/Pulmonary_X_ray_and_masks',
        is_train=False,
        image_size=256
    )

    # 创建数据加载器
    from torch.utils.data import DataLoader

    train_loader = DataLoader(
        train_dataset,
        batch_size=4,
        shuffle=True,
        num_workers=4
    )

    # 初始化模型、损失函数和优化器
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model = UNet(in_channels=3, num_classes=1).to(device)

    criterion = nn.BCELoss()  # 是否使用BCELoss要根据输出是否使用了sigmoid函数来判断，如使用了则使用BCELoss
    optimizer = optim.Adam(model.parameters(), lr=0.001)

    # 开始训练
    train(model, train_loader, criterion, optimizer, device)
