import torch.nn as nn
import torch
import torchvision
import torch.nn.functional as F

# 定义图像编码器
class ImageEncoder(nn.Module):
    def __init__(self):
        super(ImageEncoder, self).__init__()
        self.resnet = torchvision.models.resnet101(pretrained=True)

    def forward(self, image):
        return self.resnet(image)


# 定义脑电特征提取模型
class EEGEncoder(nn.Module):
    def __init__(self, input_dim, output_dim, nhead=8, num_encoder_layers=3):
        super(EEGEncoder, self).__init__()
        self.encoder = nn.TransformerEncoder(
            nn.TransformerEncoderLayer(d_model=input_dim, nhead=nhead, dropout=0.1),
            num_layers=num_encoder_layers
        )
        self.fc = nn.Linear(input_dim, output_dim)  # 输出梅尔频谱维度

    def forward(self, x):
        # 需要转置以符合 Transformer 的输入格式
        x = x.permute(1, 0, 2)  # (seq_len, batch_size, input_dim)
        x = self.encoder(x)
        x = self.fc(x[-1])
        return x   # 返回最后一个时间步的输出

    
class MainBackbone(nn.Module):
    def __init__(self, input_dim):
        super(MainBackbone, self).__init__()
        self.image_encoder = ImageEncoder()
        self.eeg_encoder = EEGEncoder(input_dim, 1024)
        self.projection_head1 = nn.Linear(1000, 512)
        self.projection_head2 = nn.Linear(1024, 512)

    def forward(self, image, eeg_feature):
        image_features = self.image_encoder(image)
        eeg_features = self.eeg_encoder(eeg_feature)
        return self.projection_head1(image_features), self.projection_head2(eeg_features)

def freeze_model(model, kind = 'part'):
    for name, param in model.named_parameters():
        # print(name)
        if 'image_encoder.' in name and ('.layer2.' in name 
                                         or '.layer1.' in name or '.layer0.' in name):
            param.requires_grad = False
        elif kind == 'all':
            param.requires_grad = False
    print(sum(p.numel() for p in model.parameters() if p.requires_grad == True))  #模型总参数量
    return model

# 损失函数 InfoNCE
class ContrastiveLoss(nn.Module):
    def __init__(self, batch_size, device='cuda', temperature=0.5, penalty_weight=1.0):
        super().__init__()
        self.batch_size = batch_size
        self.register_buffer("temperature", torch.tensor(temperature).to(device))
        self.penalty_weight = penalty_weight
     
    def forward(self, emb_i, emb_j, labels):
        batch_size = emb_i.shape[0]

        z_i = F.normalize(emb_i, dim=1) # 增强相似度计算的鲁棒性，减少样本长度对相似距离的影响
        z_j = F.normalize(emb_j, dim=1)

        # representations = torch.cat([z_i, z_j], dim=0)
        # similarity_matrix = F.cosine_similarity(representations.unsqueeze(1), representations.unsqueeze(0), dim=2)
        similarity_matrix = F.cosine_similarity(z_i.unsqueeze(1), z_j.unsqueeze(0), dim=2)

        # 创建正样本矩阵
        # labels = torch.cat([labels, labels], dim=0)
        positive_mask = (labels.unsqueeze(1) == labels.unsqueeze(0)).float()
        # 计算所有正样本对的相似度
        positive_sim = similarity_matrix * positive_mask
        negative_sim = similarity_matrix * (torch.logical_not(positive_mask).float())

        # 将相似度进行指数化，将相似度转换为概率，放大差异
        nominator = torch.exp(positive_sim / self.temperature)
        denominator = torch.exp(negative_sim / self.temperature)
        # exp_all = torch.exp(similarity_matrix / self.temperature) 

        # 使用所有正样本对的相似度来计算损失
        loss_partial = -torch.log(nominator / torch.sum(denominator, dim=1)) # 将概率分布归一化
        # loss = torch.sum(loss_partial) / (2 * batch_size)
        loss = torch.sum(loss_partial) / batch_size
        
        return loss