import os
os.environ['HF_ENDPOINT'] = 'https://hf-mirror.com'
import torch
from transformers import SeamlessM4Tv2Model
import torch.nn as nn
import torch.nn.functional as F

class TransformerMelSpectrogramRegressor(nn.Module):
    def __init__(self, input_dim, output_dim, nhead=1, num_encoder_layers=3):
        super(TransformerMelSpectrogramRegressor, self).__init__()
        self.encoder = nn.TransformerEncoder(
            nn.TransformerEncoderLayer(d_model=input_dim, nhead=nhead, dropout=0.1),
            num_layers=num_encoder_layers
        )
        self.fc = nn.Linear(input_dim, output_dim)  # 输出梅尔频谱维度

    def forward(self, x):
        # 需要转置以符合 Transformer 的输入格式
        x = x.permute(0, 2, 1)  # (batch_size, seq_len, input_dim)
        x = self.encoder(x)
        x = x.mean(dim=1)
        out = self.fc(x)
        return out   # 返回最后一个时间步的输出
    

class EEGCNN(nn.Module):
    def __init__(self, input_dim, output_dim, sampling_rate=400):
        super(EEGCNN, self).__init__()
        self.conv1 = nn.Conv1d(input_dim, 16, kernel_size=5)
        self.pool1 = nn.MaxPool1d(kernel_size=2)
        self.conv2 = nn.Conv1d(16, 32, kernel_size=5)
        self.pool2 = nn.MaxPool1d(kernel_size=2)
        # 根据输入的采样率和卷积池化操作后的尺寸计算全连接层的输入特征数量
        out_channels = ((sampling_rate - 4) // 2 - 4) // 2 * 32
        self.fc = nn.Linear(out_channels, output_dim)

    def forward(self, x):
        x = torch.relu(self.conv1(x))
        x = self.pool1(x)
        x = torch.relu(self.conv2(x))
        x = self.pool2(x)
        x = x.view(x.size(0), -1)
        x = self.fc(x)
        return x


class SeamlessM4Tv2(nn.Module):
    def __init__(self, output_dim):
        super(SeamlessM4Tv2, self).__init__()
        self.model = SeamlessM4Tv2Model.from_pretrained("facebook/seamless-m4t-v2-large").speech_encoder
        self.mlp = nn.Linear(1024, output_dim)
        
    def forward(self, x):   
        input_features = x['input_features']  
        attention_mask = x['attention_mask']
        out = self.model(input_features, attention_mask=attention_mask)
        x = out['last_hidden_state']     # (bs, 7, 1024)
        x = x.mean(dim=1)
        out = self.mlp(x)
        return out

class MainBackbone(nn.Module):
    def __init__(self, input_dim, output_dim):
        super(MainBackbone, self).__init__()
        self.wave = SeamlessM4Tv2(output_dim)
        self.encoder = TransformerMelSpectrogramRegressor(input_dim, output_dim)
        # self.encoder = EEGCNN(input_dim, output_dim)

    def forward(self, x1, x2):
        x2 = self.encoder(x2)
        x1 = self.wave(x1)
        return x1, x2
    

class ContrastiveLoss(nn.Module):
    def __init__(self, batch_size, device='cuda', temperature=0.5):
        super().__init__()
        self.batch_size = batch_size
        self.register_buffer("temperature", torch.tensor(temperature).to(device))			# 超参数 温度
        self.device = device
         
    def forward(self, emb_i, emb_j):		# emb_i-eeg  emb_j-wave
        bs = emb_i.shape[0]
        z_i = F.normalize(emb_i, dim=1)     # (bs, dim)  --->  (bs, dim)
        z_j = F.normalize(emb_j, dim=1)     # (bs, dim)  --->  (bs, dim)

        representations = torch.cat([z_i, z_j], dim=0)          # repre: (2*bs, dim)
        similarity_matrix = F.cosine_similarity(representations.unsqueeze(1), representations.unsqueeze(0), dim=2)      # simi_mat: (2*bs, 2*bs)
        
        sim_ij = torch.diag(similarity_matrix, bs)         # bs
        sim_ji = torch.diag(similarity_matrix, -bs)        # bs
        positives = torch.cat([sim_ij, sim_ji], dim=0)                  # 2*bs
        
        nominator = torch.exp(positives / self.temperature)             # 2*bs
        negatives_mask = (~torch.eye(bs * 2, bs * 2, dtype=bool).to(self.device)).float()
        denominator = negatives_mask * torch.exp(similarity_matrix / self.temperature)             # 2*bs, 2*bs
    
        loss_partial = -torch.log(nominator / torch.sum(denominator, dim=1))        # 2*bs
        loss = torch.sum(loss_partial) / (2 * bs)
        return loss

def freeze(model):
    for name, param in model.named_parameters():
        if 'wave.model.' in name:
            param.requires_grad = False
        else:
            param.requires_grad = True
            print(name)
        
    print(sum(p.numel() for p in model.parameters() if p.requires_grad == True))  #模型总参数量
    return model