import torch
import torch.nn as nn

class AttentionRNN(nn.Module):
    def __init__(self, vocab_size, embed_dim, hidden_dim, num_classes):
        super(AttentionRNN, self).__init__()
        self.embedding = nn.Embedding(vocab_size, embed_dim)
        self.rnn = nn.LSTM(embed_dim, hidden_dim, batch_first=True, bidirectional=True)
        self.attention = nn.Linear(hidden_dim * 2, 1)
        self.fc = nn.Linear(hidden_dim * 2, num_classes)

    def forward(self, x):
        x = self.embedding(x)
        output, _ = self.rnn(x)
        attn_weights = torch.softmax(self.attention(output).squeeze(-1), dim=1)
        context = torch.bmm(attn_weights.unsqueeze(1), output).squeeze(1)
        out = self.fc(context)
        return out
