import torch
import torch.nn as nn
from torchvision.models import resnet50


class TripleResNet50(nn.Module):
    """使用三个完整ResNet50模型的输出进行连接，并通过多个全连接层减少到8个数值的模块"""

    def __init__(self):
        super().__init__()
        # 使用默认的完整ResNet50模型
        self.resnet50_1 = resnet50(pretrained=True)
        self.resnet50_2 = resnet50(pretrained=True)
        self.resnet50_3 = resnet50(pretrained=True)

        # 定义降维的全连接层
        self.fc1 = nn.Linear(3 * 1000, 2048)  # 3个ResNet50的输出连接起来
        self.fc2 = nn.Linear(2048, 1024)
        self.fc3 = nn.Linear(1024, 512)
        self.fc4 = nn.Linear(512, 256)
        self.fc5 = nn.Linear(256, 8)  # 输出为8个数值

        # ReLU激活函数
        self.relu = nn.ReLU()

    def forward(self, x1, x2, x3):
        # 分别通过三个ResNet50模型获取输出
        x1 = self.resnet50_1(x1)
        x2 = self.resnet50_2(x2)
        x3 = self.resnet50_3(x3)

        # 将输出连接起来
        x = torch.cat((x1, x2, x3), dim=1)

        # 通过全连接层和ReLU激活
        x = self.relu(self.fc1(x))
        x = self.relu(self.fc2(x))
        x = self.relu(self.fc3(x))
        x = self.relu(self.fc4(x))
        x = self.fc5(x)  # 最后一层无需ReLU

        return x


class TripleResNet50Embedding(nn.Module):
    """使用三个ResNet50作为嵌入层的模块"""

    def __init__(self):
        super().__init__()
        # 创建三个独立的ResNet50模型
        self.resnet50_1 = nn.Sequential(*list(resnet50(pretrained=True).children())[:-2])
        self.resnet50_2 = nn.Sequential(*list(resnet50(pretrained=True).children())[:-2])
        self.resnet50_3 = nn.Sequential(*list(resnet50(pretrained=True).children())[:-2])

    def forward(self, x1, x2, x3):
        # 分别通过三个ResNet50模型
        x1 = self.resnet50_1(x1).flatten(2).transpose(1, 2)
        x2 = self.resnet50_2(x2).flatten(2).transpose(1, 2)
        x3 = self.resnet50_3(x3).flatten(2).transpose(1, 2)

        # 将输出连接起来
        x = torch.cat((x1, x2, x3), dim=1)
        return x


class TransformerEncoder(nn.Module):
    """Transformer编码器层"""

    def __init__(self, embed_dim, num_heads, ff_dim, dropout=0.1):
        super().__init__()
        self.self_attn = nn.MultiheadAttention(embed_dim, num_heads, dropout)
        self.linear1 = nn.Linear(embed_dim, ff_dim)
        self.dropout = nn.Dropout(dropout)
        self.linear2 = nn.Linear(ff_dim, embed_dim)

        self.norm1 = nn.LayerNorm(embed_dim)
        self.norm2 = nn.LayerNorm(embed_dim)
        self.dropout1 = nn.Dropout(dropout)
        self.dropout2 = nn.Dropout(dropout)

        self.activation = nn.ReLU()

    def forward(self, src):
        src2 = self.norm1(src)
        src = src + self.dropout1(self.self_attn(src2, src2, src2)[0])
        src2 = self.norm2(src)
        src = src + self.dropout2(self.linear2(self.dropout(self.activation(self.linear1(src2)))))
        return src


class CvTWithTripleResNet50(nn.Module):
    """使用三个ResNet50嵌入层的Convolutional Vision Transformer模型"""

    def __init__(self, embed_dim, num_heads, ff_dim, num_layers):
        super().__init__()
        self.triple_resnet50_embedding = TripleResNet50Embedding()
        self.transformer_encoders = nn.Sequential(
            *[TransformerEncoder(embed_dim, num_heads, ff_dim) for _ in range(num_layers)])
        self.head = nn.Linear(embed_dim, 8)  # 输出层调整为9个数值

    def forward(self, x1, x2, x3):
        x = self.triple_resnet50_embedding(x1, x2, x3)
        x = self.transformer_encoders(x)
        x = x.mean(dim=1)  # 使用平均池化作为分类令牌
        x = self.head(x)
        return x
