import torch.nn as nn
import torch
    
class TransformerMelSpectrogramRegressor(nn.Module):
    def __init__(self, args):
        super(TransformerMelSpectrogramRegressor, self).__init__()
        self.encoder = nn.TransformerEncoder(
            nn.TransformerEncoderLayer(d_model=args.input_dim, nhead=args.nhead, dropout=0.1),
            num_layers=args.num_encoder_layers
        )
        self.fc = nn.Linear(args.input_dim, args.input_dim2)  # 输出梅尔频谱维度

    def forward(self, x):
        # 需要转置以符合 Transformer 的输入格式
        x = x.permute(2, 0, 1)  # (seq_len, batch_size, input_dim)
        x = self.encoder(x)
        out = self.fc(x[-1])
        # out = self.fc(torch.mean(x, dim=0))
        return out   # 返回最后一个时间步的输出
    
class FCNN(nn.Module):
    def __init__(self, args):
        super(FCNN, self).__init__()
        self.fc1 = nn.Linear(args.band_numbers * args.feature_size, args.input_dim1)
        self.bn1 = nn.BatchNorm1d(args.input_dim1)
        self.fc2 = nn.Linear(args.input_dim1, args.input_dim2)
        self.bn2 = nn.BatchNorm1d(args.input_dim2)
        self.tanh = nn.ReLU() # nn.Tanh()
        self.sigmoid = nn.Sigmoid()

    def forward(self, x):
        x = x.view(x.size(0), -1)  # 将输入数据展开成一维
        x = self.tanh(self.fc1(x))
        x = self.bn1(x)
        x = self.tanh(self.fc2(x))
        x = self.bn2(x)
        return x


class MainBackbone(nn.Module):
    def __init__(self, args):
        super(MainBackbone, self).__init__()
        self.fnn = FCNN(args)
        self.encoder = TransformerMelSpectrogramRegressor(args)
        self.mlp = nn.Linear(args.input_dim2, args.output_dim)
        self.scale_x1 = nn.Parameter(torch.ones(1))  # 创建可学习的缩放因子
        self.scale_x2 = nn.Parameter(torch.ones(1))
        

    def forward(self, x1, x2):
        x1 = self.fnn(x1)
        x2 = self.encoder(x2)
        x = self.scale_x1 * x1 + self.scale_x2 * x2
        x = self.mlp(x)
        return x