import os

import cv2
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.optim as optim
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
from torchvision.models.swin_transformer import swin_t
from tqdm import tqdm  # 导入tqdm用于进度显示


class SwinUNetWithPretrainedEncoder(nn.Module):
    def __init__(self, num_classes=1):
        super().__init__()
        # 加载预训练的Swin Transformer作为编码器
        self.encoder = swin_t(weights='DEFAULT')

        # 冻结编码器参数
        for param in self.encoder.parameters():
            param.requires_grad = False

        # 获取编码器输出通道数
        encoder_channels = self.encoder.head.in_features

        # 定义解码器(示例,可根据需要修改)
        self.decoder = nn.Sequential(
            nn.Conv2d(encoder_channels, 512, 3, padding=1, bias=False),
            nn.BatchNorm2d(512),
            nn.ReLU(inplace=True),
            nn.Conv2d(512, 256, 3, padding=1, bias=False),
            nn.BatchNorm2d(256),
            nn.ReLU(inplace=True),
            nn.Conv2d(256, 128, 3, padding=1, bias=False),
            nn.BatchNorm2d(128),
            nn.ReLU(inplace=True),
            nn.Conv2d(128, 64, 3, padding=1, bias=False),
            nn.BatchNorm2d(64),
            nn.ReLU(inplace=True),
            nn.Conv2d(64, 64, 3, padding=1, bias=False),
            nn.BatchNorm2d(64),
            nn.ReLU(inplace=True),
        )

        # 定义分割头
        self.segmentation_head = nn.Conv2d(64, num_classes, 1)

        # 上采样层用于恢复原始分辨率
        self.upsample = nn.Upsample(scale_factor=32, mode='bilinear', align_corners=False)

    def forward(self, x):
        # 编码器前向传播
        x = self.encoder.features(x)

        # 解码器前向传播
        x = x.permute(0, 3, 1, 2)
        x = self.decoder(x)

        # 分割头输出
        x = self.segmentation_head(x)

        # 恢复原始分辨率
        x = self.upsample(x)

        return nn.Sigmoid()(x)  # sigmoid激活


class MedicalDataset(Dataset):
    def __init__(self, root_dir, is_train=True, image_size=256):
        """
        Args:
            root_dir (string): 根目录路径，包含train_images和test_images文件夹
            is_train (bool): 是否为训练模式
            image_size (int): 图像调整大小
        """
        self.root_dir = root_dir
        self.is_train = is_train
        self.image_size = image_size
        self.image_paths = []
        self.mask_paths = []

        # 图像转换
        self.image_transform = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                 std=[0.229, 0.224, 0.225])
        ])

        # 根据是否训练模式选择相应文件夹
        if self.is_train:
            train_dir = os.path.join(root_dir, 'train_images')
            for folder in os.listdir(train_dir):
                folder_path = os.path.join(train_dir, folder)
                if os.path.isdir(folder_path):
                    img_folder = os.path.join(folder_path, 'images')
                    mask_folder = os.path.join(folder_path, 'masks')

                    # 检查文件夹是否存在
                    if not os.path.exists(img_folder) or not os.path.exists(mask_folder):
                        continue

                    img_files = os.listdir(img_folder)
                    mask_files = os.listdir(mask_folder)

                    if img_files and mask_files:  # 确保文件夹不为空
                        self.image_paths.append(os.path.join(img_folder, img_files[0]))
                        self.mask_paths.append(os.path.join(mask_folder, mask_files[0]))
        else:
            test_dir = os.path.join(root_dir, 'test_images')
            for folder in os.listdir(test_dir):
                folder_path = os.path.join(test_dir, folder)
                if os.path.isdir(folder_path):
                    test_files = os.listdir(folder_path)
                    if test_files:  # 确保文件夹不为空
                        self.image_paths.append(os.path.join(folder_path, test_files[0]))
                        self.mask_paths.append(None)

    def __len__(self):
        return len(self.image_paths)

    def preprocess_image(self, image):
        """预处理图像"""
        # 转换为PIL Image
        if isinstance(image, np.ndarray):
            image = Image.fromarray(image)
        # 调整大小
        image = image.resize((self.image_size, self.image_size), Image.BILINEAR)
        # 应用转换
        image = self.image_transform(image)
        return image

    def preprocess_mask(self, mask):
        """预处理mask"""
        # 转换为PIL Image
        if isinstance(mask, np.ndarray):
            mask = Image.fromarray(mask)
        # 调整大小
        mask = mask.resize((self.image_size, self.image_size), Image.NEAREST)
        # 转换为tensor
        mask = torch.from_numpy(np.array(mask)).float()
        mask = (mask > 0).float()  # 二值化
        return mask

    def __getitem__(self, idx):
        # 读取图片
        img_path = self.image_paths[idx]
        image = cv2.imread(img_path)
        image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
        image = self.preprocess_image(image)

        if self.is_train:
            # 读取和处理mask
            mask_path = self.mask_paths[idx]
            mask = cv2.imread(mask_path, cv2.IMREAD_GRAYSCALE)
            mask = self.preprocess_mask(mask)
            return image, mask
        else:
            return image


def train(model, dataloader, criterion, optimizer, device, num_epochs=31):
    model.to(device)  # 将模型移动到指定设备
    model.train()  # 设置模型为训练模式

    # 使用tqdm包装epoch循环，设置position=0使其显示在顶部
    epoch_pbar = tqdm(range(num_epochs), desc='Training Epochs', position=0)
    for epoch in epoch_pbar:
        epoch_loss = 0.0  # 累计每个epoch的损失

        # 使用tqdm包装dataloader，position=1使其显示在epoch进度条下方
        batch_pbar = tqdm(enumerate(dataloader),
                          total=len(dataloader),
                          desc=f'Epoch {epoch + 1}/{num_epochs}',
                          position=1,
                          leave=False)  # leave=False确保每个epoch结束后清除该进度条

        for batch_idx, (images, masks) in batch_pbar:
            images = images.float().to(device)  # 将图像移动到设备并转换为浮点数
            masks = masks.float().to(device)  # 将掩码移动到设备并转换为浮点数

            optimizer.zero_grad()  # 清零梯度
            outputs = model(images)  # 前向传播

            masks = masks.unsqueeze(dim=1).float().to(device)
            loss = criterion(outputs, masks)  # 计算损失

            loss.backward()  # 反向传播
            optimizer.step()  # 更新参数

            epoch_loss += loss.item()  # 累加损失

            # 更新进度条显示当前batch的loss
            batch_pbar.set_postfix({'batch_loss': f'{loss.item():.4f}'}, refresh=True)

        # 关闭batch进度条
        batch_pbar.close()

        # 计算并显示每个epoch的平均损失
        average_loss = epoch_loss / len(dataloader)
        epoch_pbar.set_postfix({'avg_loss': f'{average_loss:.4f}'})

        # 每隔3个周期保存一次模型
        if (epoch + 1) % 3 == 0:
            save_path = f'../pt_file/SwinUnet-{epoch + 1}.pt'
            torch.save(model.state_dict(), save_path)
            tqdm.write(f'Model saved to {save_path}')


def test(model):
    # 创建测试数据集
    test_dataset = MedicalDataset(
        root_dir='/Volumes/For_Mac/dateset/Pulmonary_X_ray_and_masks',
        is_train=False,
        image_size=224
    )

    # 创建数据加载器
    test_loader = DataLoader(
        test_dataset,
        batch_size=4,
        shuffle=False,
        num_workers=4
    )

    # 初始化模型
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    # 加载训练好的模型参数
    model_path = '../pt_file/PreSwinTrans-30.pt'  # 确保这是你保存模型的正确路径
    if os.path.exists(model_path):
        model.load_state_dict(torch.load(model_path, map_location=device))
        print(f"Model loaded from {model_path}")
    else:
        print(f"Model file {model_path} does not exist.")
        exit()

    model.eval()
    with torch.no_grad():
        for i, test_X in enumerate(test_loader):
            if i >= 10:  # 只显示前10张图片
                break

            # 将输入图像移到设备并进行预测
            test_X = test_X.float().to(device)
            pred_Y = model(test_X)

            # 如果模型输出是列表，选择最后一个输出
            if isinstance(pred_Y, list):
                pred_Y = pred_Y[-1]

            # 转换为NumPy格式用于显示
            test_X_np = test_X[0].permute(1, 2, 0).cpu().numpy()
            pred_Y_np = pred_Y[0].squeeze(0).cpu().numpy()
            pred_Y_np = (pred_Y_np > 0.5).astype(np.float32)  # 二值化
            # 创建画布，并添加子图
            plt.figure(figsize=(8, 4))

            # 显示输入图像
            plt.subplot(1, 2, 1)
            # 反归一化处理以显示原始图像
            mean = np.array([0.485, 0.456, 0.406])
            std = np.array([0.229, 0.224, 0.225])
            test_X_display = (test_X_np * std) + mean
            test_X_display = np.clip(test_X_display, 0, 1)
            plt.imshow(test_X_display)
            plt.title("Input Image")
            plt.xticks([])  # 去掉横坐标值
            plt.yticks([])  # 去掉纵坐标值
            plt.axis('off')

            # 显示预测的分割掩码
            plt.subplot(1, 2, 2)
            plt.imshow(pred_Y_np, cmap='gray')
            plt.title("Predicted Mask")
            plt.xticks([])  # 去掉横坐标值
            plt.yticks([])  # 去掉纵坐标值
            plt.axis('off')

            plt.tight_layout()
            plt.show()

            print(f"Processed image {i + 1}/10")


if __name__ == '__main__':
    # 创建训练数据集
    train_dataset = MedicalDataset(
        root_dir='/Volumes/For_Mac/dateset/Pulmonary_X_ray_and_masks',
        is_train=True,
        image_size=224
    )

    # 创建数据加载器
    from torch.utils.data import DataLoader

    train_loader = DataLoader(
        train_dataset,
        batch_size=4,
        shuffle=True,
        num_workers=4
    )

    # 初始化模型、损失函数和优化器
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model = SwinUNetWithPretrainedEncoder().to(device)

    criterion = nn.BCELoss()  # 是否使用BCELoss要根据输出是否使用了sigmoid函数来判断，如使用了则使用BCELoss
    optimizer = optim.Adam(model.parameters(), lr=0.001)

    # 开始训练
    # train(model, train_loader, criterion, optimizer, device)
    test(model)
