import torch
import torch.nn as nn
import torch.nn.functional as F

class ActorCritic(nn.Module):
    def __init__(self, input_dim, embedding_dim, hidden_dim, num_nodes_max=None, capacity=None):
        super(ActorCritic, self).__init__()
        
        self.embedding = nn.Linear(input_dim, embedding_dim)
        
        self.encoder = nn.LSTM(embedding_dim, hidden_dim, batch_first=True)
        
        # Attention mechanism
        self.W_q = nn.Linear(hidden_dim, hidden_dim)
        self.W_k = nn.Linear(hidden_dim, hidden_dim)
        self.v = nn.Linear(hidden_dim, 1)
        
        self.decoder_lstm = nn.LSTMCell(embedding_dim, hidden_dim)
        
        self.value_net = nn.Sequential(
            nn.Linear(hidden_dim, hidden_dim),
            nn.ReLU(),
            nn.Linear(hidden_dim, 1)
        )

    def forward(self, model_state, last_action_idx=None, hidden_state=None):
        coords = model_state['coords']
        demands = model_state['demands']
        visited_mask = model_state['visited_mask']
        
        features = torch.cat([
            coords,
            demands.unsqueeze(-1),
            visited_mask.unsqueeze(-1).float()
        ], dim=-1)
        
        embedded_features = self.embedding(features)
        
        encoder_outputs, (hidden, cell) = self.encoder(embedded_features)
        
        if hidden_state is None:
            hidden_state = (hidden.squeeze(0), cell.squeeze(0))
        
        # Get the embedding of the last action
        last_action_embedding = embedded_features.gather(
            1, 
            last_action_idx.view(-1, 1, 1).expand(-1, -1, embedded_features.size(-1))
        ).squeeze(1)
        
        h_d, c_d = self.decoder_lstm(last_action_embedding, hidden_state)
        
        # Attention mechanism
        query = self.W_q(h_d.unsqueeze(1))
        keys = self.W_k(encoder_outputs)
        
        # Bahdanau-style attention
        attention_scores = self.v(torch.tanh(query + keys)).squeeze(-1)
        
        # Value
        value = self.value_net(h_d)
        
        return attention_scores, value, (h_d, c_d)
