import math
import torch
import torch.nn as nn
import torch.nn.functional as F


class PointerNet(nn.Module):
    """A simple Pointer Network implementation using LSTM encoder/decoder with additive attention.

    Interface compatible with repository usage:
    - __init__(input_dim, embed_dim, enc_hidden, dec_hidden, dropout)
    - forward(x): returns decoder outputs (not used heavily)
    - decode_with_depot(coords, depot_index=0, greedy=False, temperature=1.0):
          coords: [B, S, 2] (or feature dim = input_dim)
          returns (tours, logp)
          - tours: LongTensor [B, S] containing an ordering over coords indices. Depot should be at index 0.
          - logp: FloatTensor [B] containing sum of log-probabilities for the chosen sequence (differentiable)

    This implementation keeps things small and self-contained so it can be used as a drop-in placeholder
    but provides reasonable pointer behavior (attention-based selection, masking of already selected indices).
    """

    def __init__(self, input_dim=2, embed_dim=128, enc_hidden=128, dec_hidden=128, dropout=0.1):
        super().__init__()
        self.input_dim = input_dim
        self.embed_dim = embed_dim
        self.enc_hidden = enc_hidden
        self.dec_hidden = dec_hidden
        self.dropout = dropout

        # input embedding
        self.embed = nn.Linear(input_dim, embed_dim)

        # encoder LSTM
        self.encoder = nn.LSTM(embed_dim, enc_hidden, batch_first=True)

        # decoder LSTM cell
        self.decoder_cell = nn.LSTMCell(embed_dim, dec_hidden)

        # attention parameters (additive / Bahdanau-style)
        self.W_enc = nn.Linear(enc_hidden, dec_hidden, bias=False)
        self.W_dec = nn.Linear(dec_hidden, dec_hidden, bias=False)
        self.v = nn.Linear(dec_hidden, 1, bias=False)

        # projection from encoder hidden to decoder input when using picked embedding
        self.pick_proj = nn.Linear(enc_hidden, embed_dim)

        self.dropout_layer = nn.Dropout(p=dropout)

        # initialize weights moderately
        self._init_weights()

    def _init_weights(self):
        for p in self.parameters():
            if p.dim() > 1:
                nn.init.xavier_uniform_(p)

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        """Run encoder then a single dummy decode step and return decoder outputs for convenience.
        Not heavily relied upon by the repo, but present for API compatibility.
        x: [B, S, input_dim]
        returns: dec_out [B, S, dec_hidden]
        """
        B, S, _ = x.size()
        emb = F.relu(self.embed(x))  # [B, S, E]
        enc_out, _ = self.encoder(emb)  # [B, S, enc_hidden]
        # simple projection as dummy decoder outputs
        dec_out = torch.tanh(self.pick_proj(enc_out))  # [B, S, embed_dim]
        return dec_out

    def _compute_attention_logits(self, enc_out: torch.Tensor, dec_hidden: torch.Tensor):
        """Compute additive attention logits.
        enc_out: [B, S, enc_hidden]
        dec_hidden: [B, dec_hidden]
        returns logits: [B, S]
        """
        # W_enc(enc_out): [B, S, dec_hidden]
        enc_part = self.W_enc(enc_out)
        # W_dec(dec_hidden): [B, dec_hidden] -> [B, 1, dec_hidden]
        dec_part = self.W_dec(dec_hidden).unsqueeze(1)  # [B,1,dec_hidden]
        # sum and apply v
        u = torch.tanh(enc_part + dec_part)  # [B,S,dec_hidden]
        scores = self.v(u).squeeze(-1)  # [B,S]
        return scores

    def decode_with_depot(self, coords: torch.Tensor, depot_index: int = 0, greedy: bool = False, temperature: float = 1.0):
        """Decode a permutation over coords using pointer mechanism.

        coords: [B, S, input_dim]
        returns:
            tours: LongTensor [B, S] (a permutation of 0..S-1). Typically depot at index 0 and customers are 1..S-1.
            logp: FloatTensor [B] sum of log probs of selected sequence
        """
        device = coords.device
        B, S, _ = coords.size()
        assert S >= 2, "coords must include depot + at least one customer"

        emb = F.relu(self.embed(coords))  # [B,S,E]
        enc_out, _ = self.encoder(emb)  # [B,S,enc_hidden]

        # We'll produce a permutation of customers (1..S-1) in coords indexing, and keep depot at wherever provided.
        # Initialize decoder state using mean of encoder outputs projected
        enc_mean = enc_out.mean(dim=1)  # [B, enc_hidden]
        # map to decoder hidden/cell
        h = torch.tanh(nn.functional.linear(enc_mean, torch.randn(self.dec_hidden, enc_mean.size(-1), device=device)))
        # Above is a quick ad-hoc init; better to initialize zeros for stability
        h = torch.zeros(B, self.dec_hidden, device=device)
        c = torch.zeros(B, self.dec_hidden, device=device)

        # We'll use the embedding of the depot as the first decoder input
        start_input = emb[:, depot_index, :]  # [B, E]
        dec_input = start_input

        # mask to prevent selecting indices already chosen; initially allow all, but we will treat depot as selectable
        # We'll produce a tour of length S-1 (customers only) and return a full permutation including depot if desired
        remaining_mask = torch.zeros(B, S, dtype=torch.bool, device=device)  # False means available
        # We'll allow selecting depot as well but it's not wanted; higher-level code expects customers indices in tour
        # To mimic existing repository behavior, we'll bias towards customer indices (1..S-1)

        tours = torch.zeros(B, S, dtype=torch.long, device=device)
        log_probs = torch.zeros(B, device=device)

        # We'll perform S steps, but we'll record customer picks (skip depot picks in final tour)
        # However to be compatible with older placeholder which returned arange, we will return a full-length sequence
        # where customers map to indices 1..S-1 in some order. Simpler: we will pick exactly S-1 customers in sequence

        # Prepare an availability mask that initially forbids depot selection (so customers chosen)
        avail_mask = torch.ones(B, S, dtype=torch.bool, device=device)  # True means available
        avail_mask[:, depot_index] = False

        selected = torch.zeros(B, S, dtype=torch.bool, device=device)
        seq = []

        for step in range(S - 1):
            # compute logits over encoder outputs
            logits = self._compute_attention_logits(enc_out, h)  # [B,S]
            # mask out already selected and depot
            logits = logits.masked_fill(~avail_mask, float('-inf'))

            # apply temperature
            if temperature is not None and temperature > 0:
                logits = logits / float(max(temperature, 1e-8))

            probs = F.softmax(logits, dim=-1)  # [B,S]

            if greedy:
                # choose argmax
                _, indices = probs.max(dim=-1)
                chosen = indices  # [B]
                chosen_logp = torch.log(probs.gather(1, indices.unsqueeze(1)).squeeze(1) + 1e-12)
            else:
                # sample from categorical
                m = torch.distributions.Categorical(probs)
                chosen = m.sample()  # [B]
                chosen_logp = m.log_prob(chosen)

            # append chosen indices
            seq.append(chosen)
            # accumulate log prob
            log_probs = log_probs + chosen_logp

            # mark chosen as no longer available
            avail_mask[torch.arange(B, device=device), chosen] = False

            # prepare next decoder input as projection of chosen encoder output
            # gather encoder hidden for chosen index
            chosen_enc = enc_out[torch.arange(B, device=device), chosen]  # [B, enc_hidden]
            dec_input = torch.tanh(self.pick_proj(chosen_enc))  # [B, embed]

            # advance decoder cell
            h, c = self.decoder_cell(dec_input, (h, c))

        # Build tours tensor: we want to return indices in coords indexing (1..S-1 for customers)
        # seq is list length S-1 of tensors [B], stack to [B, S-1]
        seq_tensor = torch.stack(seq, dim=1)  # [B, S-1]

        # For backward compatibility with existing code which often expects a length-S tensor with depot omitted or depot at 0,
        # create a full-length tensor: [customer_indices + maybe padding]
        # We will create tours_full where values are coords indices in 1..S-1 (customers). We'll place them in first S-1 positions.
        tours_full = torch.zeros(B, S, dtype=torch.long, device=device)
        # set first S-1 entries to chosen indices; for the last slot put depot_index by default
        # However chosen are coords indices in 0..S-1; ensure they are in 0..S-1
        tours_full[:, : S - 1] = seq_tensor
        tours_full[:, S - 1] = depot_index

        return tours_full, log_probs

