import math

import torch
import torch.nn as nn
import torch.nn.functional as F


class PositionalEncoding(nn.Module):
    def __init__(self, d_model: int, max_len: int = 5000):
        super(PositionalEncoding, self).__init__()
        pe = torch.zeros(max_len, d_model)
        position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
        div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))
        pe[:, 0::2] = torch.sin(position * div_term)
        if d_model % 2 == 1:
            # odd d_model: last column stays zero for cos
            pe[:, 1::2] = torch.cos(position * div_term[:-1])
        else:
            pe[:, 1::2] = torch.cos(position * div_term)
        pe = pe.unsqueeze(0)  # [1, max_len, d_model]
        self.register_buffer("pe", pe)

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        # x: [B, L, D]
        l = x.size(1)
        return x + self.pe[:, :l]


class TransformerTSP(nn.Module):
    """A compact Transformer-based model that exposes a PointerNet-compatible API.

    decode_with_depot(coords, depot_index=0, greedy=False, temperature=1.0)
    - coords: [B, L, 2] where L = N+1 (depot + N customers)
    - returns (tours, logp)
      * tours: LongTensor of shape [B, N] containing indices in [1..L-1] (customers only)
      * logp: Tensor [B] representing the (sum) log-probability used by REINFORCE

    Implementation notes:
    - Uses a Transformer encoder to produce per-node embeddings.
    - Greedy vs sampled decoding is supported.
    - The returned `logp` is computed from the chosen indices' softmax probabilities, so
      gradients flow to the parameters (suitable for REINFORCE-style updates in the training code).
    """

    def __init__(
        self,
        input_dim: int = 2,
        d_model: int = 128,
        nhead: int = 8,
        num_encoder_layers: int = 3,
        dim_feedforward: int = 256,
        dropout: float = 0.1,
        max_nodes: int = 128,
    ):
        super(TransformerTSP, self).__init__()
        self.input_dim = input_dim
        self.d_model = d_model
        self.max_nodes = max_nodes

        self.input_proj = nn.Linear(input_dim, d_model)
        self.pos_enc = PositionalEncoding(d_model, max_len=max_nodes)

        encoder_layer = nn.TransformerEncoderLayer(d_model=d_model, nhead=nhead, dim_feedforward=dim_feedforward, dropout=dropout, batch_first=True)
        self.encoder = nn.TransformerEncoder(encoder_layer, num_layers=num_encoder_layers)

        # scoring head: project encoded nodes to keys; we will compare with a query vector
        self.key_proj = nn.Linear(d_model, d_model)
        self.query_proj = nn.Linear(d_model, d_model)

        # optional small MLP for final scoring
        self.score_proj = nn.Linear(d_model, 1)

        # learnable start token for decoder query if depot is not provided
        self.start_token = nn.Parameter(torch.randn(1, d_model))

    def forward(self, coords: torch.Tensor) -> torch.Tensor:
        """Encode coordinates to node embeddings.
        coords: [B, L, input_dim]
        returns: [B, L, d_model]
        """
        x = self.input_proj(coords)  # [B, L, d_model]
        x = self.pos_enc(x)
        enc = self.encoder(x)  # [B, L, d_model]
        return enc

    def _score(self, enc: torch.Tensor, query: torch.Tensor, mask = None, temperature: float = 1.0):
        # enc: [B, M, d_model]  (M = number of candidate nodes)
        # query: [B, d_model]
        # output: [B, M] raw scores
        keys = self.key_proj(enc)  # [B, M, d]
        q = self.query_proj(query).unsqueeze(1)  # [B,1,d]
        # dot product similarity
        scores = (keys * q).sum(dim=-1)  # [B, M]
        scores = scores / max(1e-6, float(temperature))
        if mask is not None:
            scores = scores.masked_fill(mask, float('-inf'))
        return scores

    def decode_with_depot(self, coords: torch.Tensor, depot_index: int = 0, greedy: bool = False, temperature: float = 1.0):
        """Autoregressive greedy/sampled decoder that returns a permutation over customers (1..L-1).

        coords: [B, L, input_dim]
        """
        assert coords.dim() == 3, "coords must be [B, L, input_dim]"
        batch_size, L, _ = coords.size()
        device = coords.device

        enc = self.forward(coords)  # [B, L, d_model]

        # customers indices are 1..L-1 (exclude depot)
        if L <= 1:
            # no customers
            return torch.zeros((batch_size, 0), dtype=torch.long, device=device), torch.zeros(batch_size, device=device)

        cust_idx = torch.arange(1, L, device=device)
        num_cust = L - 1

        # initial query: use depot embedding if available, else start token
        try:
            depot_emb = enc[:, depot_index, :]
        except Exception:
            depot_emb = self.start_token.expand(batch_size, -1)

        # maintain mask of already selected (shape [B, num_cust])
        selected_mask = torch.zeros((batch_size, num_cust), dtype=torch.bool, device=device)

        tours = torch.zeros((batch_size, num_cust), dtype=torch.long, device=device)
        logp = torch.zeros(batch_size, device=device)

        # prepare candidate embeddings (customers only)
        enc_cust = enc[:, 1:, :]  # [B, num_cust, d]

        query = depot_emb  # [B, d]

        for t in range(num_cust):
            # static type checkers sometimes treat plain tensors as non-bool; ensure explicit bool dtype
            scores = self._score(enc_cust, query, mask=selected_mask.bool(), temperature=temperature)  # [B, remaining]
            probs = F.softmax(scores, dim=-1)  # [B, num_cust]

            if greedy:
                idx = probs.argmax(dim=-1)  # [B]
            else:
                # sample from categorical; ensure probabilities are non-negative and sum to one
                idx = torch.multinomial(probs, num_samples=1).squeeze(-1)  # [B]

            # gather log probabilities at chosen indices
            chosen_prob = probs.gather(1, idx.unsqueeze(1)).squeeze(1).clamp_min(1e-12)
            logp = logp + torch.log(chosen_prob)

            # map idx (which is index among customers 0..num_cust-1) to original index in coords (1..L-1)
            selected_node = cust_idx[idx]  # [B]
            tours[:, t] = selected_node

            # mark selected positions so they won't be chosen again
            # Avoid in-place modification of the mask that participated in previous ops
            # (this can break autograd). Build a fresh one-hot mask for the selected indices
            one_hot = torch.zeros_like(selected_mask)
            one_hot[torch.arange(batch_size, device=device), idx] = True
            selected_mask = selected_mask | one_hot

            # update query to be embedding of selected node (teacher forcing style)
            # take the encoded embedding for the chosen indices
            # enc_cust: [B, num_cust, d]; gather chosen embeddings
            query = enc_cust.gather(1, idx.view(batch_size, 1, 1).expand(-1, -1, enc_cust.size(-1))).squeeze(1)

        return tours, logp


class BertEncoder(nn.Module):
    """A standalone encoder matching TransformerTSP's encoding pipeline.

    forward(pos_features):
      - pos_features: Tensor [N, input_dim]
      - returns: Tensor [N, output_dim]
    """
    def __init__(self, input_dim: int = 2, d_model: int = 128, nhead: int = 8, num_layers: int = 3, max_nodes: int = 128, output_dim: int = 64):
        super(BertEncoder, self).__init__()
        self.input_proj = nn.Linear(input_dim, d_model)
        self.pos_enc = PositionalEncoding(d_model, max_len=max_nodes)
        encoder_layer = nn.TransformerEncoderLayer(d_model=d_model, nhead=nhead, dim_feedforward=4*d_model, dropout=0.1, batch_first=True)
        self.encoder = nn.TransformerEncoder(encoder_layer, num_layers=num_layers)
        self.output_proj = nn.Linear(d_model, output_dim)

    def forward(self, pos_features: torch.Tensor) -> torch.Tensor:
        # pos_features: [N, input_dim]
        if pos_features.dim() == 1:
            pos_features = pos_features.unsqueeze(-1)
        x = self.input_proj(pos_features)  # [N, d]
        x = x.unsqueeze(0)  # [1, N, d]
        x = self.pos_enc(x)
        enc = self.encoder(x)  # [1, N, d]
        enc = enc.squeeze(0)
        out = self.output_proj(enc)  # [N, output_dim]
        return out


if __name__ == "__main__":
    # quick smoke test
    model = TransformerTSP(max_nodes=32)
    coords = torch.randn(2, 6, 2)
    tours, logp = model.decode_with_depot(coords, depot_index=0, greedy=False, temperature=1.0)
    print(tours.shape, logp.shape)
