import os

import numpy as np
import torch
import torch.nn.functional as F
from skimage import io
from torch import nn, optim
from torch.utils.data import DataLoader
from torchvision import transforms


class Dataset(torch.utils.data.Dataset):
    def __init__(self, root_dir, target_size=256, min_size=64, max_size=1024, is_train=True):
        self.root_dir = root_dir
        self.target_size = target_size
        self.min_size = min_size
        self.max_size = max_size
        self.is_train = is_train

        self.folders = [d for d in os.listdir(root_dir)
                        if os.path.isdir(os.path.join(root_dir, d))]

    def process_image_and_mask(self, image, mask):
        # 1. 检查图片尺寸是否在合理范围内
        h, w = image.shape[-2:]
        if h < self.min_size or w < self.min_size:
            scale = self.min_size / min(h, w)
            new_h, new_w = int(h * scale), int(w * scale)
            image = F.interpolate(image.unsqueeze(0), size=(new_h, new_w),
                                  mode='bilinear', align_corners=False).squeeze(0)
            mask = F.interpolate(mask.unsqueeze(0), size=(new_h, new_w),
                                 mode='nearest').squeeze(0)

        elif h > self.max_size or w > self.max_size:
            scale = self.max_size / max(h, w)
            new_h, new_w = int(h * scale), int(w * scale)
            image = F.interpolate(image.unsqueeze(0), size=(new_h, new_w),
                                  mode='bilinear', align_corners=False).squeeze(0)
            mask = F.interpolate(mask.unsqueeze(0), size=(new_h, new_w),
                                 mode='nearest').squeeze(0)

        # 2. 保持长宽比的调整策略
        h, w = image.shape[-2:]
        if h > w:
            new_h = self.target_size
            new_w = int(w * (new_h / h))
        else:
            new_w = self.target_size
            new_h = int(h * (new_w / w))

        image = F.interpolate(image.unsqueeze(0), size=(new_h, new_w),
                              mode='bilinear', align_corners=False).squeeze(0)
        if mask is not None:
            mask = F.interpolate(mask.unsqueeze(0), size=(new_h, new_w),
                                 mode='nearest').squeeze(0)

        # 3. 填充到目标尺寸
        pad_h = self.target_size - new_h
        pad_w = self.target_size - new_w

        pad_h1, pad_h2 = pad_h // 2, pad_h - (pad_h // 2)
        pad_w1, pad_w2 = pad_w // 2, pad_w - (pad_w // 2)

        padding = (pad_w1, pad_w2, pad_h1, pad_h2)
        image = F.pad(image, padding, mode='constant', value=0)
        if mask is not None:
            mask = F.pad(mask, padding, mode='constant', value=0)

        return image, mask

    def __getitem__(self, idx):
        folder_name = self.folders[idx]
        folder_path = os.path.join(self.root_dir, folder_name)

        # Load image
        image_path = os.path.join(folder_path, 'images', os.listdir(os.path.join(folder_path, 'images'))[0])
        image = io.imread(image_path)

        # if image.shape[-1] == 4:
        #     image = image[..., :3]
        image = np.transpose(image, (2, 0, 1))
        image = torch.from_numpy(image).float() / 255.0

        if self.is_train:
            # Load and combine masks with explicit normalization
            mask_dir = os.path.join(folder_path, 'masks')
            mask_files = sorted(os.listdir(mask_dir))
            combined_mask = None

            for mask_file in mask_files:
                mask_path = os.path.join(mask_dir, mask_file)
                # 确保mask是灰度图并归一化到0-1
                mask = io.imread(mask_path, as_gray=True)
                # 如果mask不是0-1范围，进行归一化
                if mask.max() > 1:
                    mask = mask / 255.0
                if combined_mask is None:
                    combined_mask = mask
                else:
                    combined_mask = np.clip(combined_mask + mask, 0, 1)

            # 确保mask是浮点数类型并在0-1范围内
            combined_mask = combined_mask.astype(np.float32)
            combined_mask = np.clip(combined_mask, 0, 1)
            combined_mask = torch.from_numpy(combined_mask).float()
            combined_mask = combined_mask.unsqueeze(0)  # Add channel dim

            # Process image and mask
            image, mask = self.process_image_and_mask(image, combined_mask)

            # 最后再次确保mask在0-1范围内
            mask = torch.clamp(mask, 0, 1)

            return image, mask, {'folder_name': folder_name}
        else:
            # Process image only
            image, _ = self.process_image_and_mask(image, None)
            return image, {'folder_name': folder_name}

    def __len__(self):
        return len(self.folders)


# 基本计算单元
class VGGBlock(nn.Module):
    def __init__(self, in_channels, middle_channels, out_channels):
        super().__init__()
        self.relu = nn.ReLU(inplace=True)
        self.conv1 = nn.Conv2d(in_channels, middle_channels, 3, padding=1)
        self.bn1 = nn.BatchNorm2d(middle_channels)
        self.conv2 = nn.Conv2d(middle_channels, out_channels, 3, padding=1)
        self.bn2 = nn.BatchNorm2d(out_channels)

    def forward(self, x):
        # VGGBlock实际上就是相当于做了两次卷积
        out = self.conv1(x)
        out = self.bn1(out)  # 归一化
        out = self.relu(out)

        out = self.conv2(out)
        out = self.bn2(out)
        out = self.relu(out)

        return out


class NestedUNet(nn.Module):
    def __init__(self, num_classes=1, input_channels=3, deep_supervision=True, **kwargs):
        super().__init__()
        # 定义了一个列表，包含NestedUNet中不同层的通道数
        nb_filter = [64, 128, 256, 512, 1024]
        # 深度监督：是否需要都计算损失函数
        self.deep_supervision = deep_supervision

        self.pool = nn.MaxPool2d(2, 2)  # 最大池化，池化核大小为2x2，步幅为2
        # 创建一个上采样层实例，尺度因子为2，采用双线性插值的方式进行上采样，边缘对齐方式为True
        self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)

        self.conv0_0 = VGGBlock(input_channels, nb_filter[0], nb_filter[0])
        self.conv1_0 = VGGBlock(nb_filter[0], nb_filter[1], nb_filter[1])
        self.conv2_0 = VGGBlock(nb_filter[1], nb_filter[2], nb_filter[2])
        self.conv3_0 = VGGBlock(nb_filter[2], nb_filter[3], nb_filter[3])
        self.conv4_0 = VGGBlock(nb_filter[3], nb_filter[4], nb_filter[4])

        self.conv0_1 = VGGBlock(nb_filter[0] + nb_filter[1], nb_filter[0], nb_filter[0])
        self.conv1_1 = VGGBlock(nb_filter[1] + nb_filter[2], nb_filter[1], nb_filter[1])
        self.conv2_1 = VGGBlock(nb_filter[2] + nb_filter[3], nb_filter[2], nb_filter[2])
        self.conv3_1 = VGGBlock(nb_filter[3] + nb_filter[4], nb_filter[3], nb_filter[3])

        self.conv0_2 = VGGBlock(nb_filter[0] * 2 + nb_filter[1], nb_filter[0], nb_filter[0])
        self.conv1_2 = VGGBlock(nb_filter[1] * 2 + nb_filter[2], nb_filter[1], nb_filter[1])
        self.conv2_2 = VGGBlock(nb_filter[2] * 2 + nb_filter[3], nb_filter[2], nb_filter[2])

        self.conv0_3 = VGGBlock(nb_filter[0] * 3 + nb_filter[1], nb_filter[0], nb_filter[0])
        self.conv1_3 = VGGBlock(nb_filter[1] * 3 + nb_filter[2], nb_filter[1], nb_filter[1])

        self.conv0_4 = VGGBlock(nb_filter[0] * 4 + nb_filter[1], nb_filter[0], nb_filter[0])

        if self.deep_supervision:
            self.final1 = nn.Conv2d(nb_filter[0], num_classes, kernel_size=1)
            self.final2 = nn.Conv2d(nb_filter[0], num_classes, kernel_size=1)
            self.final3 = nn.Conv2d(nb_filter[0], num_classes, kernel_size=1)
            self.final4 = nn.Conv2d(nb_filter[0], num_classes, kernel_size=1)
        else:
            self.final = nn.Conv2d(nb_filter[0], num_classes, kernel_size=1)

    def forward(self, input):
        # 入口函数打个断点，看数据的维度很重要
        x0_0 = self.conv0_0(input)  # 第一次卷积
        x1_0 = self.conv1_0(self.pool(x0_0))
        x0_1 = self.conv0_1(torch.cat([x0_0, self.up(x1_0)], 1))
        # cat 拼接，再经历一次卷积，input是96=32+64，output=32
        # 梳理清楚一个关键点即可，后面依次类推，可以打印结果自己手动推一下
        x2_0 = self.conv2_0(self.pool(x1_0))
        x1_1 = self.conv1_1(torch.cat([x1_0, self.up(x2_0)], 1))
        x0_2 = self.conv0_2(torch.cat([x0_0, x0_1, self.up(x1_1)], 1))

        x3_0 = self.conv3_0(self.pool(x2_0))
        x2_1 = self.conv2_1(torch.cat([x2_0, self.up(x3_0)], 1))
        x1_2 = self.conv1_2(torch.cat([x1_0, x1_1, self.up(x2_1)], 1))
        x0_3 = self.conv0_3(torch.cat([x0_0, x0_1, x0_2, self.up(x1_2)], 1))
        x4_0 = self.conv4_0(self.pool(x3_0))
        x3_1 = self.conv3_1(torch.cat([x3_0, self.up(x4_0)], 1))
        x2_2 = self.conv2_2(torch.cat([x2_0, x2_1, self.up(x3_1)], 1))
        x1_3 = self.conv1_3(torch.cat([x1_0, x1_1, x1_2, self.up(x2_2)], 1))
        x0_4 = self.conv0_4(torch.cat([x0_0, x0_1, x0_2, x0_3, self.up(x1_3)], 1))

        if self.deep_supervision:
            output1 = nn.Sigmoid()(self.final1(x0_1))
            output2 = nn.Sigmoid()(self.final2(x0_2))
            output3 = nn.Sigmoid()(self.final3(x0_3))
            output4 = nn.Sigmoid()(self.final4(x0_4))
            return [output1, output2, output3, output4]

        else:
            # 输出一个结果，结果是0~1之间
            output = self.final(x0_4)
            return nn.Sigmoid()(output)


def train(model, dataloader, criterion, optimizer, device, num_epochs=31):
    model.train()
    for epoch in range(num_epochs):
        print(f'第{epoch + 1}轮训练:')
        for images, masks, _ in dataloader:
            images = images.float().to(device)
            masks = masks.float().to(device)

            optimizer.zero_grad()
            outputs = model(images)
            # 对每一个output计算loss
            loss_list = []
            for output in outputs:
                loss_list.append(criterion.cul_loss(output, masks))
            loss = MultipleOutputLoss2([1, 1, 1, 1]).cul_deep_loss(loss_list)
            loss.backward()
            optimizer.step()
            outputs.clear()

        print(f'Epoch {epoch}, Loss: {loss.item()}')
        if (epoch + 1) % 3 == 0:
            torch.save(model.state_dict(), f'UPP_Cell-{epoch}.pt')


class UNetPlusPlusLoss(nn.Module):
    def __init__(self, ce_weight=0.5, dice_weight=0.5):
        super().__init__()
        self.ce_weight = ce_weight
        self.dice_weight = dice_weight

        # 如果输出已经经过sigmoid，应该使用BCE而不是BCEWithLogitsLoss
        self.ce = nn.BCELoss()  # 修改这里

    def dice_loss(self, pred, target):
        # 这里不需要再做sigmoid了，因为输入已经是sigmoid后的结果
        pred = pred.view(-1)
        target = target.view(-1)

        intersection = (pred * target).sum()
        union = pred.sum() + target.sum()

        dice = 1 - (2.0 * intersection + 1e-7) / (union + 1e-7)
        return dice

    def cul_loss(self, pred, target):
        # pred已经是sigmoid后的结果，直接用于计算loss
        ce_loss = self.ce(pred, target)
        dice_loss = self.dice_loss(pred, target)

        total_loss = self.ce_weight * ce_loss + self.dice_weight * dice_loss
        return total_loss


class MultipleOutputLoss2(nn.Module):
    def __init__(self, weight_factors=None):
        """
        use this if you have several outputs and ground truth (both list of same len) and the loss should be computed
        between them (x[0] and y[0], x[1] and y[1] etc)
        :param loss:
        :param weight_factors:
        """
        super(MultipleOutputLoss2, self).__init__()
        self.weight_factors = weight_factors

    def cul_deep_loss(self, x):  # x 是loss集合的列表
        assert isinstance(x, (tuple, list)), "x must be either tuple or list"

        if self.weight_factors is None:
            weights = [1] * len(x)
        else:
            weights = self.weight_factors

        # l = weights[0] * self.loss(x[-1], y[0])
        l = 0
        for i in range(0, len(x)):
            if weights[i] != 0:
                l += weights[i] * x[i]
        return l


if __name__ == '__main__':
    dataset = Dataset(
        root_dir='/Volumes/For_Mac/unet++/unet++/inputs/stage1_train',
        target_size=256,
        min_size=64,
        max_size=1024
    )

    # 数据集与数据加载器
    # 可以根据需求进行数据增强操作
    transform = transforms.Compose([
        transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
    ])

    dataloader = DataLoader(dataset, batch_size=10, shuffle=True)

    # 初始化模型、损失函数和优化器
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model = NestedUNet(1, 4, True).to(device)
    criterion = UNetPlusPlusLoss()
    optimizer = optim.Adam(model.parameters(), lr=0.001)

    # 开始训练
    train(model, dataloader, criterion, optimizer, device)
