import torch
import torch.nn as nn
from torchvision.models import resnet18, ResNet18_Weights
from torchvision.models import resnet34, ResNet34_Weights  # 导入 resnet34
from torchvision.models import resnet50, ResNet50_Weights  # 导入 resnet50
from torch.utils.tensorboard import SummaryWriter
from config import config

data_types_count = len(config['data_types'])  # 决定了每个病人使用几张图片


# 自定义的多头自注意力机制
class MultiHeadAttention(nn.Module):
    def __init__(self, embed_dim, num_heads):
        super(MultiHeadAttention, self).__init__()
        self.num_heads = num_heads
        assert embed_dim % num_heads == 0, "embedding dimension must be divisible by number of heads"
        self.head_dim = embed_dim // num_heads

        self.values = nn.Linear(embed_dim, embed_dim, bias=False)
        self.keys = nn.Linear(embed_dim, embed_dim, bias=False)
        self.queries = nn.Linear(embed_dim, embed_dim, bias=False)
        self.fc_out = nn.Linear(embed_dim, embed_dim)

    def forward(self, x):
        N, seq_length, _ = x.shape

        values = self.values(x)
        keys = self.keys(x)
        queries = self.queries(x)
        # 分成多个 heads (N, num_heads, seq_length, head_dim)
        values = values.view(N, seq_length, self.num_heads, self.head_dim).transpose(1, 2)
        keys = keys.view(N, seq_length, self.num_heads, self.head_dim).transpose(1, 2)
        queries = queries.view(N, seq_length, self.num_heads, self.head_dim).transpose(1, 2)
        energy = torch.einsum("nhqd,nhkd->nhqk", [queries, keys])  # 计算注意力得分
        attention = torch.softmax(energy / torch.sqrt(torch.tensor(self.head_dim, dtype=torch.float32)), dim=3)  # 注意力权重
        # (N, seq_length, embed_dim)
        out = torch.einsum("nhqk,nhvd->nhqd", [attention, values]).reshape(N, seq_length, -1)
        out = self.fc_out(out)  # 最后通过一个线性层
        return out


class MultiResNetAttentionModel(nn.Module):
    def __init__(self, unfreeze_layers=["layer1", 'layer2', "layer3", 'layer4'], freeze_layers_except=None):
        super(MultiResNetAttentionModel, self).__init__()

        # 定义多个并行的 ResNet50 模型
        if unfreeze_layers is None:
            unfreeze_layers = []
        self.resnets = nn.ModuleList([self.create_resnet() for _ in range(data_types_count)])
        self.attention = MultiHeadAttention(embed_dim=2048, num_heads=16)  # 增加到 16 个 head
        self.fc1 = nn.Linear(2048, 1024)  # 增加全连接层的宽度
        self.fc2 = nn.Linear(1024, 512)
        self.fc3 = nn.Linear(512, 256)
        self.fc4 = nn.Linear(256, 128)
        self.fc5 = nn.Linear(128, 4)  # 最后一层输出4个值用于回归
        self.dropout = nn.Dropout(p=0.2)

        # 先冻结所有 ResNet 参数，然后选择性解冻某些参数
        for resnet in self.resnets:
            for name, param in resnet.named_parameters():
                param.requires_grad = False  # 初始冻结所有层
                # 检查要解冻的层
                if (unfreeze_layers is not None and any(layer in name for layer in unfreeze_layers)) or \
                        (freeze_layers_except is not None and all(layer not in name for layer in freeze_layers_except)):
                    param.requires_grad = True  # 解冻指定的层

    def create_resnet(self):
        model = resnet50(weights=ResNet50_Weights.IMAGENET1K_V1)  # 实例化 resnet50
        model.fc = nn.Identity()  # 去掉最后的分类层
        return model

    def forward(self, x):
        batch_size = x.size(0)  # (batch_size, 18, H, W)
        x = x.view(batch_size, data_types_count, 3, *x.size()[2:])  # 重新调整为 (batch_size, data_types_count, 3, H, W)
        features = [self.resnets[i](x[:, i]) for i in range(data_types_count)]  # (batch_size, 2048) 的特征列表
        features = torch.stack(features, dim=1)  # (batch_size, data_types_count, 2048)

        # 使用多头自注意力机制进行特征融合
        attentive_features = self.attention(features)  # (batch_size, data_types_count, 2048)
        combined_features = torch.mean(attentive_features, dim=1)  # (batch_size, 2048)

        # 通过全连接层得到输出
        out = self.dropout(torch.relu(self.fc1(combined_features)))
        out = self.dropout(torch.relu(self.fc2(out)))
        out = self.dropout(torch.relu(self.fc3(out)))
        out = self.dropout(torch.relu(self.fc4(out)))
        out = self.fc5(out)  # 输出4个数字
        return out


# class MultiResNetAttentionModel(nn.Module):
#     def __init__(self, unfreeze_layers=['layer4'], freeze_layers_except=None):
#         super(MultiResNetAttentionModel, self).__init__()
#
#         # 定义多个并行的 ResNet18 模型
#         if unfreeze_layers is None:
#             unfreeze_layers = []
#         self.resnets = nn.ModuleList([self.create_resnet() for _ in range(data_types_count)])
#         self.attention = MultiHeadAttention(embed_dim=512, num_heads=8)  # 使用 8 个 head
#         self.fc1 = nn.Linear(512, 256)  # 第一个全连接层
#         self.fc2 = nn.Linear(256, 128)  # 第二个全连接层
#         self.fc3 = nn.Linear(128, 4)  # 第三个全连接层，输出4个值用于回归
#         self.dropout = nn.Dropout(p=0.2)
#
#         # 先冻结所有 ResNet 参数，然后选择性解冻某些参数
#         for resnet in self.resnets:
#             for name, param in resnet.named_parameters():
#                 param.requires_grad = False  # 初始冻结所有层
#                 # 检查要解冻的层
#                 if (unfreeze_layers is not None and any(layer in name for layer in unfreeze_layers)) or \
#                         (freeze_layers_except is not None and all(layer not in name for layer in freeze_layers_except)):
#                     param.requires_grad = True  # 解冻指定的层
#
#     def create_resnet(self):
#         # model = resnet18(weights=ResNet18_Weights.IMAGENET1K_V1)
#         model = resnet34(weights=ResNet34_Weights.IMAGENET1K_V1)
#         model.fc = nn.Identity()  # 去掉最后的分类层
#         return model
#
#     def forward(self, x):
#         batch_size = x.size(0)  # (batch_size, 18, H, W)
#         x = x.view(batch_size, data_types_count, 3, *x.size()[2:])  # 重新调整为 (batch_size, data_types_count, 3, H, W)
#         features = [self.resnets[i](x[:, i]) for i in range(data_types_count)]  # (batch_size, 512) 的特征列表
#         features = torch.stack(features, dim=1)  # (batch_size, data_types_count, 512)
#
#         # 使用多头自注意力机制进行特征融合
#         attentive_features = self.attention(features)  # (batch_size, data_types_count, 512)
#         combined_features = torch.mean(attentive_features, dim=1)  # (batch_size, 512)
#
#         # 通过全连接层得到输出
#         out = self.dropout(torch.relu(self.fc1(combined_features)))
#         out = self.dropout(torch.relu(self.fc2(out)))
#         out = self.fc3(out)  # 输出4个数字
#         return out


# 实例化模型
model = MultiResNetAttentionModel()

if __name__ == '__main__':
    # 创建一个 SummaryWriter
    log_dir = "logs/model_structure"
    writer = SummaryWriter(log_dir)

    # 将模型添加到 summary 中
    input_tensor = torch.randn(1, data_types_count * 3, 224, 224)
    writer.add_graph(model, input_tensor)

    # 关闭 writer
    writer.close()
