import torch
import torch.nn as nn
from models.model import FCNN

class TransformerMelSpectrogramRegressor(nn.Module):
    def __init__(self, args):
        super(TransformerMelSpectrogramRegressor, self).__init__()
        self.encoder = nn.TransformerEncoder(
            nn.TransformerEncoderLayer(d_model=args.input_dim, nhead=args.nhead, dropout=0.1),
            num_layers=args.num_encoder_layers
        )
        self.multihead_attention = nn.MultiheadAttention(embed_dim=args.input_dim, num_heads=args.nhead, dropout=0.1)
        self.fc = nn.Linear(args.input_dim*args.input_channels, args.input_dim2) 
        self.mlp = nn.Linear(args.input_dim2, args.output_dim) 
        self.tanh = nn.Tanh()
        self.fnn = FCNN(args)
        self.scale_x1 = nn.Parameter(torch.ones(1))  # 创建可学习的缩放因子
        self.scale_x2 = nn.Parameter(torch.ones(1))
        

    def forward(self, x2, x):
        x = x.permute(0, 3, 1, 2)
        batch_size, seq_len, channel, dim = x.size()
        # 对 (batch_size, seq, dim) 维度进行 Transformer 处理
        x = x.reshape(batch_size*seq_len, channel, dim)  # 注意这里将维度改为了 (batch_size*seq_len, channel, dim)
        x = self.encoder(x.permute(1, 0, 2)) # (channel, batch_size*seq_len, dim)

        # 在 channel 维度上进行多头注意力
        x, _ = self.multihead_attention(x, x, x)
        x = x.permute(1, 0, 2).view(batch_size, seq_len, channel, dim) # (batch_size, seq_len, channel, dim)

        # 将每个维度的信息合并
        x = x.reshape(batch_size, seq_len, -1)  
        x = torch.mean(x, dim=1)
        x = self.fc(x)
        x1 = self.tanh(x)
        x2 = self.fnn(x2)
        x = self.scale_x1 * x1 + self.scale_x2 * x2
        x = self.mlp(x)

        return x 