import torch
from torch import nn
from torch.nn import TransformerEncoderLayer, TransformerDecoderLayer


class CriticEncoder(nn.Module):
    """Replicates CriticEncoder from original code (Bert.py)."""
    def __init__(self, input_dim, hidden_dim):
        super().__init__()
        self.encoder_layer = TransformerEncoderLayer(d_model=input_dim, nhead=4)
        self.encoder = nn.TransformerEncoder(self.encoder_layer, num_layers=2)
        self.fc = nn.Linear(input_dim, 1)

    def forward(self, node_encodings):
        enc_out = self.encoder(node_encodings.unsqueeze(1)).squeeze(1)
        pooled = enc_out.mean(dim=0)
        value = self.fc(pooled)
        return value

class CriticDecoder(nn.Module):
    """Replicates CriticEncoder from original code (Bert.py)."""
    def __init__(self, input_dim, hidden_dim):
        super().__init__()
        self.decoder_layer = TransformerDecoderLayer(d_model=input_dim, nhead=4)
        self.decoder = nn.TransformerDecoderLayer(self.encoder_layer, num_layers=2)
        self.fc = nn.Linear(input_dim, 1)

    def forward(self, node_encodings):
        enc_out = self.decoder(node_encodings.unsqueeze(1)).squeeze(1)
        pooled = enc_out.mean(dim=0)
        value = self.fc(pooled)
        return value


class ActorDecoder(nn.Module):
    """Replicates ActorDecoder from original code (Bert.py)."""
    def __init__(self, input_dim, hidden_dim):
        super().__init__()
        self.decoder_layer = TransformerDecoderLayer(d_model=input_dim, nhead=4)
        self.decoder = nn.TransformerDecoder(self.decoder_layer, num_layers=2)
        self.fc = nn.Linear(input_dim, 1)

    def forward(self, node_encodings, selected_mask):
        dec_out = self.decoder(node_encodings.unsqueeze(1), node_encodings.unsqueeze(1)).squeeze(1)
        logits = self.fc(dec_out).squeeze(-1) + selected_mask
        probs = torch.softmax(logits, dim=-1)
        return probs

