import torch
import torch.nn as nn
from torchvision.models import vit_b_16, ViT_B_16_Weights


class DecoderBlock(nn.Module):
    def __init__(self, in_channels, out_channels):
        super().__init__()
        self.conv = nn.Sequential(
            nn.Conv2d(in_channels, out_channels, 3, padding=1),
            nn.BatchNorm2d(out_channels),
            nn.ReLU(inplace=True),
            nn.Conv2d(out_channels, out_channels, 3, padding=1),
            nn.BatchNorm2d(out_channels),
            nn.ReLU(inplace=True)
        )

    def forward(self, x):
        return self.conv(x)


class ViTSegmentation(nn.Module):
    def __init__(self, num_classes):
        super().__init__()

        # 加载预训练的ViT
        self.vit = vit_b_16(weights=ViT_B_16_Weights.IMAGENET1K_V1)

        # 冻结编码器参数
        for param in self.vit.parameters():
            param.requires_grad = False

        # 移除分类头
        self.vit.heads = nn.Identity()

        # 获取patch_size和图像大小
        self.patch_size = 16  # ViT-B/16的patch size
        self.image_size = 224  # 默认输入大小
        self.num_patches = (self.image_size // self.patch_size) ** 2
        self.hidden_dim = self.vit.hidden_dim  # 通常是768

        # 解码器
        self.decoder = nn.Sequential(
            # 通道转换: 768 -> 256
            nn.Conv2d(768, 256, kernel_size=1),
            nn.BatchNorm2d(256),
            nn.ReLU(inplace=True),

            # 解码器blocks
            DecoderBlock(256, 128),
            nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True),  # 14->28

            DecoderBlock(128, 64),
            nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True),  # 28->56

            DecoderBlock(64, 32),
            nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True),  # 56->112

            DecoderBlock(32, 16),
            nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True),  # 112->224

            # 最终输出层
            nn.Conv2d(16, num_classes, kernel_size=1)
        )

    def forward(self, x):
        # 通过ViT编码器
        B = x.shape[0]
        x = self.vit.conv_proj(x)  # patch embedding
        x = x.flatten(2)
        x = x.transpose(1, 2)
        # 添加cls token和position embedding
        cls_token = self.vit.class_token.expand(B, -1, -1)

        x = torch.cat((cls_token, x), dim=1)

        # 通过transformer blocks
        x = self.vit.encoder(x)

        # 移除cls token
        x = x[:, 1:, :]

        # 重塑为空间特征 [B, num_patches, hidden_dim]
        x = x.transpose(1, 2).view(B, self.hidden_dim, self.image_size // self.patch_size,
                                   self.image_size // self.patch_size)

        # 通过解码器
        # (2,768,14,14)
        x = self.decoder(x)

        return x


# 示例使用
def main():
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    # 创建模型
    model = ViTSegmentation(num_classes=2).to(device)

    # 示例输入
    x = torch.randn(2, 3, 224, 224).to(device)

    # 前向传播
    output = model(x)
    print("Output shape:", output.shape)  # 应该是[2, 21, 224, 224]


if __name__ == "__main__":
    main()
