import torch.nn as nn
import torch

class TransformerMelSpectrogramRegressor(nn.Module):
    def __init__(self, args):
        super(TransformerMelSpectrogramRegressor, self).__init__()
        self.encoder = nn.TransformerEncoder(
            nn.TransformerEncoderLayer(d_model=args.input_dim, nhead=args.nhead, dropout=0.1),
            num_layers=args.num_encoder_layers
        )
        self.fc = nn.Linear(args.input_dim, args.output_dim)  # 输出梅尔频谱维度


    def forward(self, x):
        # 需要转置以符合 Transformer 的输入格式
        x = x.permute(1, 0, 2)  # (seq_len, batch_size, input_dim)
        x = self.encoder(x)
        x = self.fc(x[-1])
        return x   # 返回最后一个时间步的输出
    
class EEGCNN(nn.Module):
    def __init__(self, output_dim, class_num=30, num_channels=57, sampling_rate=400):
        super(EEGCNN, self).__init__()
        self.conv1 = nn.Conv1d(num_channels, 16, kernel_size=5)
        self.pool1 = nn.MaxPool1d(kernel_size=2)
        self.conv2 = nn.Conv1d(16, 32, kernel_size=5)
        self.pool2 = nn.MaxPool1d(kernel_size=2)
        # 根据输入的采样率和卷积池化操作后的尺寸计算全连接层的输入特征数量
        out_channels = ((sampling_rate - 4) // 2 - 4) // 2 * 32
        self.fc = nn.Linear(out_channels, output_dim)
        self.mlp = nn.Linear(output_dim, class_num)

    def forward(self, x):
        x = torch.relu(self.conv1(x))
        x = self.pool1(x)
        x = torch.relu(self.conv2(x))
        x = self.pool2(x)
        x = x.view(x.size(0), -1)
        x = torch.relu(self.fc(x))
        x = self.mlp(x)
        return x