import os

import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from skimage import io
from torch.utils.data import DataLoader


class DoubleConv(nn.Module):
    """
    两次卷积操作，每次包含卷积层、批归一化和ReLU激活函数。
    用于在下采样和上采样阶段处理特征图。
    """

    def __init__(self, in_channels, out_channels):
        super(DoubleConv, self).__init__()
        # 使用 nn.Sequential 包装两次卷积操作，方便调用
        self.double_conv = nn.Sequential(
            nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1),  # 3x3 卷积，padding=1 保持特征图尺寸
            nn.BatchNorm2d(out_channels),  # 批归一化，稳定训练过程，加速收敛
            nn.ReLU(inplace=True),  # ReLU 激活函数，增加非线性

            nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1),  # 第二次 3x3 卷积
            nn.BatchNorm2d(out_channels),  # 再次进行批归一化
            nn.ReLU(inplace=True)  # ReLU 激活
        )

    def forward(self, x):
        # 前向传播，直接调用 sequential 定义的操作
        return self.double_conv(x)


# 定义U-Net网络结构
class UNet(nn.Module):
    def __init__(self, in_channels=3, out_channels=1, features=[64, 128, 256, 512]):
        super(UNet, self).__init__()
        self.downs = nn.ModuleList()  # 用于存储下采样路径中的层
        self.ups = nn.ModuleList()  # 用于存储上采样路径中的层
        self.pool = nn.MaxPool2d(kernel_size=2, stride=2)  # 2x2 最大池化，用于下采样
        # o = ⌊(i + 2p - k) / s⌋ + 1
        # •    o：输出大小（output size）。
        # •    i：输入大小（input size），指的是输入特征图的高度或宽度。
        # •    p：填充（padding），指在输入特征图周围填充的零的层数。
        # •    k：卷积核大小（kernel size），也就是卷积窗口的高度或宽度。
        # •    s：步长（stride），指卷积核滑动的步长。
        # •    ⌊ ⌋：表示向下取整，这意味着任何小数部分都被舍弃。

        # 下采样路径，构建多个卷积模块
        for feature in features:
            self.downs.append(DoubleConv(in_channels, feature))  # 使用 DoubleConv 处理特征
            in_channels = feature  # 更新输入通道数，便于下一个模块使用，依次为64 -> 128 -> 256 -> 512

        # 上采样路径，按倒序构建多个上采样模块
        for feature in reversed(features):
            # 反卷积层，用于上采样（尺寸扩大一倍）
            self.ups.append(
                nn.ConvTranspose2d(feature * 2, feature, kernel_size=2, stride=2)  # 通道数从 2*feature 变为 feature
            )
            # 使用 DoubleConv 进一步处理拼接后的特征图
            self.ups.append(DoubleConv(feature * 2, feature))  # 拼接后，输入的通道数为 2*feature

        # 瓶颈层（最底部的卷积操作），进行两次卷积
        self.bottleneck = DoubleConv(features[-1], features[-1] * 2)  # 通道数从 features[-1] 扩大为其两倍
        # 最后一层卷积，用于将输出调整为目标通道数（例如1表示单通道输出）
        self.final_conv = nn.Conv2d(features[0], out_channels, kernel_size=1)  # 1x1 卷积，用于通道变换

    def forward(self, x):
        skip_connections = []  # 用于存储跳跃连接的特征图

        # 下采样路径（编码器）
        for down in self.downs:
            x = down(x)  # 经过 DoubleConv 处理
            skip_connections.append(x)  # 保存当前特征图，用于跳跃连接
            x = self.pool(x)  # 最大池化，下采样特征图

        x = self.bottleneck(x)  # 瓶颈层，进一步提取特征

        # 反向遍历上采样路径（解码器）
        skip_connections = skip_connections[::-1]  # 反转跳跃连接列表，以与上采样对应

        for idx in range(0, len(self.ups), 2):  # 上采样模块和 DoubleConv 模块是成对出现的
            x = self.ups[idx](x)  # 上采样（反卷积操作，尺寸变大）
            skip_connection = skip_connections[idx // 2]  # 获取对应的跳跃连接特征图

            # 如果输入的尺寸与跳跃连接的特征图不匹配，使用插值调整尺寸
            if x.shape != skip_connection.shape:
                x = F.interpolate(x, size=skip_connection.shape[2:], mode='bilinear', align_corners=True)

            # 将跳跃连接的特征图与上采样后的特征图在通道维度上拼接
            x = torch.cat((skip_connection, x), dim=1)  # 拼接后通道数加倍
            x = self.ups[idx + 1](x)  # 使用 DoubleConv 进一步处理拼接后的特征图

        return nn.Sigmoid()(self.final_conv(x))  # 最终 1x1 卷积，输出所需的通道数


class Dataset(torch.utils.data.Dataset):
    def __init__(self, root_dir, target_size=256, min_size=64, max_size=1024, is_train=True):
        self.root_dir = root_dir
        self.target_size = target_size
        self.min_size = min_size
        self.max_size = max_size
        self.is_train = is_train

        self.folders = [d for d in os.listdir(root_dir)
                        if os.path.isdir(os.path.join(root_dir, d))]

    def process_image_and_mask(self, image, mask):
        # 1. 检查图片尺寸是否在合理范围内
        h, w = image.shape[-2:]
        if h < self.min_size or w < self.min_size:
            scale = self.min_size / min(h, w)
            new_h, new_w = int(h * scale), int(w * scale)
            image = F.interpolate(image.unsqueeze(0), size=(new_h, new_w),
                                  mode='bilinear', align_corners=False).squeeze(0)
            mask = F.interpolate(mask.unsqueeze(0), size=(new_h, new_w),
                                 mode='nearest').squeeze(0)

        elif h > self.max_size or w > self.max_size:
            scale = self.max_size / max(h, w)
            new_h, new_w = int(h * scale), int(w * scale)
            image = F.interpolate(image.unsqueeze(0), size=(new_h, new_w),
                                  mode='bilinear', align_corners=False).squeeze(0)
            mask = F.interpolate(mask.unsqueeze(0), size=(new_h, new_w),
                                 mode='nearest').squeeze(0)

        # 2. 保持长宽比的调整策略
        h, w = image.shape[-2:]
        if h > w:
            new_h = self.target_size
            new_w = int(w * (new_h / h))
        else:
            new_w = self.target_size
            new_h = int(h * (new_w / w))

        image = F.interpolate(image.unsqueeze(0), size=(new_h, new_w),
                              mode='bilinear', align_corners=False).squeeze(0)
        if mask is not None:
            mask = F.interpolate(mask.unsqueeze(0), size=(new_h, new_w),
                                 mode='nearest').squeeze(0)

        # 3. 填充到目标尺寸
        pad_h = self.target_size - new_h
        pad_w = self.target_size - new_w

        pad_h1, pad_h2 = pad_h // 2, pad_h - (pad_h // 2)
        pad_w1, pad_w2 = pad_w // 2, pad_w - (pad_w // 2)

        padding = (pad_w1, pad_w2, pad_h1, pad_h2)
        image = F.pad(image, padding, mode='constant', value=0)
        if mask is not None:
            mask = F.pad(mask, padding, mode='constant', value=0)

        return image, mask

    def __getitem__(self, idx):
        folder_name = self.folders[idx]
        folder_path = os.path.join(self.root_dir, folder_name)

        # Load image
        image_path = os.path.join(folder_path, 'images', os.listdir(os.path.join(folder_path, 'images'))[0])
        image = io.imread(image_path)

        if image.shape[-1] == 4:
            image = image[..., :3]
        image = np.transpose(image, (2, 0, 1))
        image = torch.from_numpy(image).float() / 255.0

        if self.is_train:
            # Load and combine masks with explicit normalization
            mask_dir = os.path.join(folder_path, 'masks')
            mask_files = sorted(os.listdir(mask_dir))
            combined_mask = None

            for mask_file in mask_files:
                mask_path = os.path.join(mask_dir, mask_file)
                # 确保mask是灰度图并归一化到0-1
                mask = io.imread(mask_path, as_gray=True)
                # 如果mask不是0-1范围，进行归一化
                if mask.max() > 1:
                    mask = mask / 255.0
                if combined_mask is None:
                    combined_mask = mask
                else:
                    combined_mask = np.clip(combined_mask + mask, 0, 1)

            # 确保mask是浮点数类型并在0-1范围内
            combined_mask = combined_mask.astype(np.float32)
            combined_mask = np.clip(combined_mask, 0, 1)
            combined_mask = torch.from_numpy(combined_mask).float()
            combined_mask = combined_mask.unsqueeze(0)  # Add channel dim

            # Process image and mask
            image, mask = self.process_image_and_mask(image, combined_mask)

            # 最后再次确保mask在0-1范围内
            mask = torch.clamp(mask, 0, 1)

            return image, mask, {'folder_name': folder_name}
        else:
            # Process image only
            image, _ = self.process_image_and_mask(image, None)
            return image, {'folder_name': folder_name}

    def __len__(self):
        return len(self.folders)


def test(model, dataloader, device):
    model.eval()
    with torch.no_grad():
        for i, [test_X, _], in enumerate(dataloader):
            if i >= 10:  # 只显示前10张图片
                break

            # 将输入图像移到设备并进行预测
            test_X = test_X.float().to(device)
            pred_Y = model(test_X)

            # 如果模型输出是列表，选择最后一个输出
            if isinstance(pred_Y, list):
                pred_Y = pred_Y[-1]

            # 转换为NumPy格式用于显示
            test_X_np = test_X[0].permute(1, 2, 0).cpu().numpy()
            pred_Y_np = pred_Y[0].squeeze(0).cpu().numpy()
            pred_Y_np = (pred_Y_np > 0.5).astype(np.float32)  # 二值化
            # 创建画布，并添加子图
            plt.figure(figsize=(8, 4))

            # 显示输入图像
            plt.subplot(1, 2, 1)
            # 反归一化处理以显示原始图像
            mean = np.array([0.485, 0.456, 0.406])
            std = np.array([0.229, 0.224, 0.225])
            test_X_display = (test_X_np * std) + mean
            test_X_display = np.clip(test_X_display, 0, 1)
            plt.imshow(test_X_display)
            plt.title("Input Image")
            plt.xticks([])  # 去掉横坐标值
            plt.yticks([])  # 去掉纵坐标值
            plt.axis('off')

            # 显示预测的分割掩码
            plt.subplot(1, 2, 2)
            plt.imshow(pred_Y_np, cmap='gray')
            plt.title("Predicted Mask")
            plt.xticks([])  # 去掉横坐标值
            plt.yticks([])  # 去掉纵坐标值
            plt.axis('off')

            plt.tight_layout()
            plt.show()

            print(f"Processed image {i + 1}/10")


if __name__ == '__main__':

    # 创建测试数据集
    test_dataset = Dataset(
        root_dir='/Volumes/For_Mac/unet++/unet++/inputs/test',
        target_size=256,
        min_size=64,
        max_size=1024,
        is_train=False
    )

    # 创建数据加载器
    test_loader = DataLoader(
        test_dataset,
        batch_size=1,
        shuffle=False,
        num_workers=4
    )

    # 初始化模型
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model = UNet().to(device)

    # 加载训练好的模型参数
    model_path = '../pt_file/Cell-30.pt'  # 确保这是你保存模型的正确路径
    if os.path.exists(model_path):
        checkpoint = torch.load(model_path, map_location=device)
        model.load_state_dict(checkpoint['model_state_dict'])
        print(f"Model loaded from {model_path}")
    else:
        print(f"Model file {model_path} does not exist.")
        exit()

    # 开始测试
    test(model, test_loader, device)
