# 导入必要的库
import numpy as np
import torch
import torch.nn as nn
from torchvision.models import resnet50

# 从自定义模块导入所需组件
from Network_components import PatchEmbed, TransformerLayer


class CNN(nn.Module):
    """CNN特征提取器，基于ResNet50架构"""

    def __init__(self):
        super(CNN, self).__init__()

        # 加载预训练的ResNet50模型
        resnet = resnet50()
        # 将ResNet50分解为不同的特征块
        resnet_blocks = list(resnet.children())
        # 将ResNet50的各个部分分别保存，用于提取不同层级的特征
        self.resnet_features1 = nn.Sequential(*resnet_blocks[0:3])  # 第一个特征块
        self.resnet_features2 = nn.Sequential(*resnet_blocks[3:5])  # 第二个特征块
        self.resnet_features3 = nn.Sequential(*resnet_blocks[5])  # 第三个特征块
        self.resnet_features4 = nn.Sequential(*resnet_blocks[6])  # 第四个特征块

    def forward(self, x):
        """前向传播，依次通过各个特征块并返回中间特征"""
        x1 = self.resnet_features1(x)  # 第一层特征
        x2 = self.resnet_features2(x1)  # 第二层特征
        x3 = self.resnet_features3(x2)  # 第三层特征
        x = self.resnet_features4(x3)  # 最终特征
        return x, x1, x2, x3  # 返回所有层级的特征图


class TransformerBlock(nn.Module):
    """Transformer编码器模块"""

    def __init__(self, cnn_feat_size=14, patch_size=1, num_layers=12, in_channels=1024, emb_dim=768):
        # cnn_feat_size=14 代表从卷积层出来后的w和h
        super(TransformerBlock, self).__init__()
        # Patch嵌入层，将特征图转换为序列
        self.patch_embedding = PatchEmbed(in_channels, cnn_feat_size, patch_size, emb_dim)
        # 创建多层Transformer
        self.transformer_layers = nn.ModuleList()
        for i in range(num_layers):
            self.transformer_layers.append(TransformerLayer(emb_dim))
        # 最终的层归一化
        self.norm = nn.LayerNorm(emb_dim, eps=1e-6)

    def forward(self, x):
        """transformer块的前向传播"""
        x = self.patch_embedding(x)  # Patch嵌入
        for transformer_layers in self.transformer_layers:  # 依次通过transformer层
            x = transformer_layers(x)
        x = self.norm(x)  # 最终归一化
        return x


class SegmentationHead(nn.Module):
    """分割头部模块，用于生成最终的分割图"""

    def __init__(self, in_channels, out_channels):
        super(SegmentationHead, self).__init__()
        # 1x1卷积层，用于调整通道数到目标类别数
        self.conv = nn.Conv2d(in_channels, out_channels, 1, 1)

    def forward(self, x):
        """通过1x1卷积生成分割图"""
        x = self.conv(x)
        return x


class CascadedUpsampler(nn.Module):
    """级联上采样器，用于逐步恢复特征图的空间分辨率"""

    def __init__(self, in_channels=768, num_classes=2):
        super(CascadedUpsampler, self).__init__()

        # 双线性插值上采样
        self.upsample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False)
        self.relu = nn.ReLU()

        # 初始卷积层
        self.conv1 = nn.Conv2d(in_channels, 512, 3, 1, 1)

        # 级联上采样中的各个卷积层
        self.cup_conv1 = nn.Conv2d(1024, 256, 3, 1, 1)
        self.cup_conv2 = nn.Conv2d(512, 128, 3, 1, 1)
        self.cup_conv3 = nn.Conv2d(192, 64, 3, 1, 1)
        self.cup_conv4 = nn.Conv2d(64, 16, 3, 1, 1)

        # 分割头部
        self.segmentationHead = SegmentationHead(16, num_classes)

    def forward(self, x, x1, x2, x3, scat_mat):
        # x1,x2,x3 的channel分别是64,256,512

        """级联上采样的前向传播过程"""
        # 重塑transformer输出为特征图格式
        batch_size, num_patches, D = x.size()
        h = w = int(np.sqrt(num_patches))
        x = x.view(batch_size, h, w, D)
        x = x.permute(0, 3, 1, 2)

        # 初始卷积
        x = self.conv1(x)
        x = self.relu(x)

        # 第一次上采样和特征融合
        x = self.upsample(x)  # 512 -> 512
        x = torch.cat([x, x3], dim=1)  # 512 -> 1024
        x = self.cup_conv1(x)  # 1024 -> 256
        x = self.relu(x)

        # 第二次上采样和特征融合
        x = self.upsample(x)  # 256 -> 256
        x = torch.cat([x, x2], dim=1)  # 256 -> 512
        x = self.cup_conv2(x)  # 512 -> 128
        x = self.relu(x)

        # 第三次上采样和特征融合
        x = self.upsample(x)  # 128 -> 128
        x = torch.cat([x, x1], dim=1)  # 128 -> 192
        x = self.cup_conv3(x)  # 192 -> 64
        x = self.relu(x)

        # 最后的上采样和卷积
        x = self.upsample(x)
        x = self.cup_conv4(x)  # 64 -> 16
        x = self.relu(x)

        # 通过分割头部生成最终输出
        x = self.segmentationHead(x)

        return x


class TransUNet(nn.Module):
    """TransUNet模型的主体架构"""

    def __init__(self, num_classes):
        super(TransUNet, self).__init__()

        # 初始化三个主要组件：CNN特征提取器、Transformer和级联上采样器
        self.cnn = CNN()
        # cnn_feat_size=14 代表从卷积层出来后的w和h
        self.transformer_net = TransformerBlock(cnn_feat_size=16, patch_size=1, num_layers=12, emb_dim=768)
        self.cup = CascadedUpsampler(768, num_classes)

    def forward(self, x, scat_mat=None):
        """模型的前向传播"""
        # 如果输入是单通道，复制到三通道
        if x.shape[1] == 1:
            x = x.repeat(1, 3, 1, 1)
        # 依次通过CNN、Transformer和级联上采样器
        x, x1, x2, x3 = self.cnn(x)
        x = self.transformer_net(x)
        x = self.cup(x, x1, x2, x3, scat_mat)
        return nn.Sigmoid()(x)


# 测试代码部分
if __name__ == "__main__":
    # 创建模型实例
    model = TransUNet(num_classes=1)  # 创建单类别分割的TransUNet模型
    # 创建测试输入张量
    x = torch.randn(1, 3, 256, 256)  # 批大小为1，3通道，224x224分辨率的输入
    # 执行前向传播
    preds = model(x)  # 获取模型预测结果
    # 打印输出张量的形状
    print(preds.shape)  # 输出预测结果的维度信息
