import os

import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from skimage import io
from torch import optim
from torch.utils.data import DataLoader
from torchvision.models import vit_b_16, ViT_B_16_Weights
from tqdm import tqdm


class Dataset(torch.utils.data.Dataset):
    def __init__(self, root_dir, target_size=256, min_size=64, max_size=1024, is_train=True):
        self.root_dir = root_dir
        self.target_size = target_size
        self.min_size = min_size
        self.max_size = max_size
        self.is_train = is_train

        self.folders = [d for d in os.listdir(root_dir)
                        if os.path.isdir(os.path.join(root_dir, d))]

    def process_image_and_mask(self, image, mask):
        # 1. 检查图片尺寸是否在合理范围内
        h, w = image.shape[-2:]
        if h < self.min_size or w < self.min_size:
            scale = self.min_size / min(h, w)
            new_h, new_w = int(h * scale), int(w * scale)
            image = F.interpolate(image.unsqueeze(0), size=(new_h, new_w),
                                  mode='bilinear', align_corners=False).squeeze(0)
            mask = F.interpolate(mask.unsqueeze(0), size=(new_h, new_w),
                                 mode='nearest').squeeze(0)

        elif h > self.max_size or w > self.max_size:
            scale = self.max_size / max(h, w)
            new_h, new_w = int(h * scale), int(w * scale)
            image = F.interpolate(image.unsqueeze(0), size=(new_h, new_w),
                                  mode='bilinear', align_corners=False).squeeze(0)
            mask = F.interpolate(mask.unsqueeze(0), size=(new_h, new_w),
                                 mode='nearest').squeeze(0)

        # 2. 保持长宽比的调整策略
        h, w = image.shape[-2:]
        if h > w:
            new_h = self.target_size
            new_w = int(w * (new_h / h))
        else:
            new_w = self.target_size
            new_h = int(h * (new_w / w))

        image = F.interpolate(image.unsqueeze(0), size=(new_h, new_w),
                              mode='bilinear', align_corners=False).squeeze(0)
        if mask is not None:
            mask = F.interpolate(mask.unsqueeze(0), size=(new_h, new_w),
                                 mode='nearest').squeeze(0)

        # 3. 填充到目标尺寸
        pad_h = self.target_size - new_h
        pad_w = self.target_size - new_w

        pad_h1, pad_h2 = pad_h // 2, pad_h - (pad_h // 2)
        pad_w1, pad_w2 = pad_w // 2, pad_w - (pad_w // 2)

        padding = (pad_w1, pad_w2, pad_h1, pad_h2)
        image = F.pad(image, padding, mode='constant', value=0)
        if mask is not None:
            mask = F.pad(mask, padding, mode='constant', value=0)

        return image, mask

    def __getitem__(self, idx):
        folder_name = self.folders[idx]
        folder_path = os.path.join(self.root_dir, folder_name)

        # Load image
        image_path = os.path.join(folder_path, 'images', os.listdir(os.path.join(folder_path, 'images'))[0])
        image = io.imread(image_path)

        if image.shape[-1] == 4:
            image = image[..., :3]
        image = np.transpose(image, (2, 0, 1))
        image = torch.from_numpy(image).float() / 255.0

        if self.is_train:
            # Load and combine masks with explicit normalization
            mask_dir = os.path.join(folder_path, 'masks')
            mask_files = sorted(os.listdir(mask_dir))
            combined_mask = None

            for mask_file in mask_files:
                mask_path = os.path.join(mask_dir, mask_file)
                # 确保mask是灰度图并归一化到0-1
                mask = io.imread(mask_path, as_gray=True)
                # 如果mask不是0-1范围，进行归一化
                if mask.max() > 1:
                    mask = mask / 255.0
                if combined_mask is None:
                    combined_mask = mask
                else:
                    combined_mask = np.clip(combined_mask + mask, 0, 1)

            # 确保mask是浮点数类型并在0-1范围内
            combined_mask = combined_mask.astype(np.float32)
            combined_mask = np.clip(combined_mask, 0, 1)
            combined_mask = torch.from_numpy(combined_mask).float()
            combined_mask = combined_mask.unsqueeze(0)  # Add channel dim

            # Process image and mask
            image, mask = self.process_image_and_mask(image, combined_mask)

            # 最后再次确保mask在0-1范围内
            mask = torch.clamp(mask, 0, 1)

            return image, mask, {'folder_name': folder_name}
        else:
            # Process image only
            image, _ = self.process_image_and_mask(image, None)
            return image, {'folder_name': folder_name}

    def __len__(self):
        return len(self.folders)


class DecoderBlock(nn.Module):
    def __init__(self, in_channels, out_channels):
        super().__init__()
        self.conv = nn.Sequential(
            nn.Conv2d(in_channels, out_channels, 3, padding=1),
            nn.BatchNorm2d(out_channels),
            nn.ReLU(inplace=True),
            nn.Conv2d(out_channels, out_channels, 3, padding=1),
            nn.BatchNorm2d(out_channels),
            nn.ReLU(inplace=True)
        )

    def forward(self, x):
        return self.conv(x)


class ViTSegmentation(nn.Module):
    def __init__(self, num_classes):
        super().__init__()

        # 加载预训练的ViT
        self.vit = vit_b_16(weights=ViT_B_16_Weights.IMAGENET1K_V1)

        # 冻结编码器参数
        for param in self.vit.parameters():
            param.requires_grad = False

        # 移除分类头
        self.vit.heads = nn.Identity()

        # 获取patch_size和图像大小
        self.patch_size = 16  # ViT-B/16的patch size
        self.image_size = 224  # 默认输入大小
        self.num_patches = (self.image_size // self.patch_size) ** 2
        self.hidden_dim = self.vit.hidden_dim  # 通常是768

        # 解码器
        self.decoder = nn.Sequential(
            # 通道转换: 768 -> 256
            nn.Conv2d(768, 256, kernel_size=1),
            nn.BatchNorm2d(256),
            nn.ReLU(inplace=True),

            # 解码器blocks
            DecoderBlock(256, 128),
            nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True),  # 14->28

            DecoderBlock(128, 64),
            nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True),  # 28->56

            DecoderBlock(64, 32),
            nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True),  # 56->112

            DecoderBlock(32, 16),
            nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True),  # 112->224

            # 最终输出层
            nn.Conv2d(16, num_classes, kernel_size=1)
        )

    def forward(self, x):
        # 通过ViT编码器
        B = x.shape[0]
        x = self.vit.conv_proj(x)  # patch embedding
        x = x.flatten(2)
        x = x.transpose(1, 2)
        # 添加cls token和position embedding
        cls_token = self.vit.class_token.expand(B, -1, -1)

        x = torch.cat((cls_token, x), dim=1)

        # 通过transformer blocks
        x = self.vit.encoder(x)

        # 移除cls token
        x = x[:, 1:, :]

        # 重塑为空间特征 [B, num_patches, hidden_dim]
        x = x.transpose(1, 2).view(B, self.hidden_dim, self.image_size // self.patch_size,
                                   self.image_size // self.patch_size)

        # 通过解码器
        # (2,768,14,14)
        x = self.decoder(x)

        return x


# 添加Dice损失函数
class DiceLoss(nn.Module):
    def __init__(self, smooth=1):
        super(DiceLoss, self).__init__()
        self.smooth = smooth

    def forward(self, pred, target):
        pred = torch.sigmoid(pred)  # 添加sigmoid激活
        pred = pred.view(-1)
        target = target.view(-1)
        intersection = (pred * target).sum()
        return 1 - ((2. * intersection + self.smooth) /
                    (pred.sum() + target.sum() + self.smooth))


# 组合损失函数
class CombinedLoss(nn.Module):
    def __init__(self, alpha=0.5):
        super(CombinedLoss, self).__init__()
        self.alpha = alpha
        self.bce_loss = nn.BCEWithLogitsLoss()
        self.dice_loss = DiceLoss()

    def forward(self, pred, target):
        bce = self.bce_loss(pred, target)
        dice = self.dice_loss(pred, target)
        return self.alpha * bce + (1 - self.alpha) * dice


def train(model, dataloader, criterion, optimizer, device, num_epochs=100, save_dir='checkpoints'):
    # 创建保存模型的目录
    os.makedirs(save_dir, exist_ok=True)

    # 记录最佳性能
    best_loss = float('inf')

    for epoch in range(num_epochs):
        model.train()
        epoch_loss = 0

        # 使用tqdm创建进度条
        pbar = tqdm(dataloader, desc=f'Epoch {epoch + 1}/{num_epochs}')

        for batch_idx, (images, masks, _) in enumerate(pbar):
            # 移动数据到设备
            images = images.to(device)
            masks = masks.to(device)

            # 前向传播
            outputs = model(images)
            loss = criterion(outputs, masks)

            # 反向传播和优化
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            # 更新损失
            epoch_loss += loss.item()

            # 更新进度条
            pbar.set_postfix({'loss': f'{loss.item():.4f}'})

        # 计算平均损失
        avg_loss = epoch_loss / len(dataloader)
        print(f'\nEpoch {epoch + 1} Average Loss: {avg_loss:.4f}')

        # 每3个epoch保存一次模型
        if (epoch + 1) % 3 == 0:
            checkpoint_path = os.path.join(save_dir, f'model_epoch_{epoch + 1}.pt')
            torch.save({
                'epoch': epoch + 1,
                'model_state_dict': model.state_dict(),
                'optimizer_state_dict': optimizer.state_dict(),
                'loss': avg_loss,
            }, checkpoint_path)
            print(f'Saved checkpoint at epoch {epoch + 1}')

        # 保存最佳模型
        if avg_loss < best_loss:
            best_loss = avg_loss
            best_model_path = os.path.join(save_dir, 'best_model.pt')
            torch.save({
                'epoch': epoch + 1,
                'model_state_dict': model.state_dict(),
                'optimizer_state_dict': optimizer.state_dict(),
                'loss': best_loss,
            }, best_model_path)
            print(f'Saved best model with loss {best_loss:.4f}')


if __name__ == "__main__":
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print(f"Using device: {device}")

    dataset = Dataset(
        root_dir='/Volumes/For_Mac/unet++/unet++/inputs/train',
        target_size=224,
        min_size=64,
        max_size=1024
    )

    dataloader = DataLoader(dataset, batch_size=10, shuffle=True)

    # 初始化模型、损失函数和优化器
    model = ViTSegmentation(1).to(device)
    criterion = CombinedLoss().to(device)  # 使用组合损失函数
    optimizer = optim.Adam(model.parameters(), lr=0.001)

    # 开始训练
    train(model, dataloader, criterion, optimizer, device)
