import torch.nn as nn

class BasicBlock(nn.Module):
    def __init__(self, input_dim, output_dim):
        super(BasicBlock, self).__init__()

        # TODO: apply batch normalization and dropout for strong baseline.
        # Reference: https://pytorch.org/docs/stable/generated/torch.nn.BatchNorm1d.html (batch normalization)
        #       https://pytorch.org/docs/stable/generated/torch.nn.Dropout.html (dropout)
        self.block = nn.Sequential(
            nn.Linear(input_dim, output_dim),
            nn.BatchNorm1d(output_dim),
            nn.ReLU(),
            nn.Dropout(p=0.2)
        )

    def forward(self, x):
        x = self.block(x)
        return x

class Classifier(nn.Module):
    def __init__(self, input_dim, output_dim=41, hidden_layers=1, hidden_dim=256):
        super(Classifier, self).__init__()

        self.fc = nn.Sequential(
            BasicBlock(input_dim, hidden_dim),
            *[BasicBlock(hidden_dim, hidden_dim) for _ in range(hidden_layers)],
            nn.Linear(hidden_dim, output_dim)
        )

    def forward(self, x):
        x = self.fc(x)
        return x

class RNNBlock(nn.Module):
    def __init__(self, input_dim, hidden_dim, rnn_type='LSTM', num_layers=1, dropout=0.2):
        super(RNNBlock, self).__init__()

        # 选择 RNN 类型
        rnn_layer = {
            'RNN': nn.RNN,
            'LSTM': nn.LSTM,
            'GRU': nn.GRU
        }.get(rnn_type, nn.LSTM)  # 默认使用 LSTM

        self.rnn = rnn_layer(input_dim, hidden_dim, num_layers=num_layers, batch_first=True, dropout=dropout if num_layers > 1 else 0)
        self.batch_norm = nn.BatchNorm1d(hidden_dim)  # 对序列最后一个维度进行归一化

    def forward(self, x):
        # RNN 的输出包含 (output, hidden_state)
        out, _ = self.rnn(x)  # 忽略 hidden state
        # 使用最后一个时间步的输出进行分类
        out = out[:, -1, :]  # 获取序列的最后一个时间步
        out = self.batch_norm(out)  # 对最后的时间步结果进行归一化
        return out

class RNNClassifier(nn.Module):
    def __init__(self, input_dim, output_dim=41, hidden_layers=1, hidden_dim=256, rnn_type='LSTM', dropout=0.2):
        super(RNNClassifier, self).__init__()

        self.rnn_block = RNNBlock(input_dim, hidden_dim, rnn_type=rnn_type, num_layers=hidden_layers, dropout=dropout)
        self.fc = nn.Sequential(
            nn.ReLU(),
            nn.Dropout(p=dropout),
            nn.Linear(hidden_dim, output_dim)
        )

    def forward(self, x):
        x = self.rnn_block(x)
        x = self.fc(x)
        return x