import torch
import torch.nn as nn
import numpy as np
import math
import torch.nn.functional as F

class PositionalEncoding(nn.Module):
    # This class remains unchanged, it's a standard component.
    def __init__(self, d_model: int, dropout: float = 0.1, max_len: int = 512):
        super().__init__()
        self.dropout = nn.Dropout(p=dropout)
        position = torch.arange(max_len).unsqueeze(1)
        div_term = torch.exp(torch.arange(0, d_model, 2) * (-math.log(10000.0) / d_model))
        pe = torch.zeros(max_len, d_model)
        pe[:, 0::2] = torch.sin(position * div_term)
        pe[:, 1::2] = torch.cos(position * div_term)
        self.register_buffer('pe', pe)

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        """
        Args:
            x: Tensor, shape [batch_size, seq_len, embedding_dim]
        """
        # We expect batch_first=True, so shape is [batch, seq_len, dim]
        x = x + self.pe[:x.size(1)]
        return self.dropout(x)


class NumericalCoATNet(nn.Module):
    """
    A Transformer Encoder-Decoder model to generate symbolic reasoning traces (Chain-of-Thought)
    for solving deadlock problems.

    - The Encoder processes a numerical sequence representing the system's state.
    - The Decoder autoregressively generates a sequence of symbolic tokens representing the
      reasoning process to arrive at a decision.
    """

    def __init__(self,
                 feature_dim: int,
                 vocab_size: int,
                 d_model: int = 128,
                 nhead: int = 4,
                 num_encoder_layers: int = 3,
                 num_decoder_layers: int = 3):
        super().__init__()
        self.d_model = d_model

        # === 1. Encoder Components ===
        # The feature dimension for each token in the input state sequence.
        # allocation + need + status_one_hot + properties
        feature_dim = feature_dim
        self.input_embedder = nn.Linear(feature_dim, d_model)

        encoder_layer = nn.TransformerEncoderLayer(d_model=d_model, nhead=nhead, batch_first=True)
        self.transformer_encoder = nn.TransformerEncoder(encoder_layer, num_layers=num_encoder_layers)

        # === 2. Decoder Components ===
        self.tgt_tok_emb = nn.Embedding(vocab_size, d_model)
        decoder_layer = nn.TransformerDecoderLayer(d_model=d_model, nhead=nhead, batch_first=True)
        self.transformer_decoder = nn.TransformerDecoder(decoder_layer, num_layers=num_decoder_layers)

        # === 3. Shared Components & Output Layers ===
        self.positional_encoder = PositionalEncoding(d_model)
        # This single layer generates logits for the next token in the symbolic trace.
        self.generator = nn.Linear(d_model, vocab_size)

        # The value head for PPO, operating on the encoded state representation.
        self.value_head = nn.Sequential(
            nn.Linear(d_model, d_model // 2),
            nn.ReLU(),
            nn.Linear(d_model // 2, 1)
        )

    def _encode_state(self, src: torch.Tensor) -> torch.Tensor:
        """
        Runs the source state sequence through the encoder.
        src shape: [batch_size, src_seq_len, feature_dim]
        Returns memory: [batch_size, src_seq_len, d_model]
        """
        src_emb = self.input_embedder(src)
        pos_encoded_src = self.positional_encoder(src_emb)
        memory = self.transformer_encoder(pos_encoded_src)
        return memory

    def forward(self, src: torch.Tensor, tgt: torch.Tensor, pad_id: int) -> torch.Tensor:
        """
        Forward pass for training (Supervised Fine-Tuning with Teacher Forcing).
        """
        # 1. Encode the source state to get the memory
        memory = self._encode_state(src)

        # 2. Prepare masks for the decoder
        tgt_seq_len = tgt.shape[1]

        future_mask_float = nn.Transformer.generate_square_subsequent_mask(tgt_seq_len, device=src.device)
        # Convert it to a boolean mask. Positions with -inf become True (masked).
        tgt_mask = (future_mask_float == -float('inf'))

        # This mask is already boolean, so it's correct.
        tgt_padding_mask = (tgt == pad_id)

        # 3. Embed and encode the target sequence
        tgt_emb = self.tgt_tok_emb(tgt)
        pos_encoded_tgt = self.positional_encoder(tgt_emb)

        decoder_output = self.transformer_decoder(
            pos_encoded_tgt,
            memory,
            tgt_mask=tgt_mask,
            tgt_key_padding_mask=tgt_padding_mask
        )

        # 5. Generate logits
        return self.generator(decoder_output)

    def predict_value(self, src: torch.Tensor) -> torch.Tensor:
        """
        Predicts the state value (for PPO). Operates on the encoder's output.
        """
        memory = self._encode_state(src)
        # Use the representation of the first token (like a [CLS] token) for state value
        state_representation = memory[:, 0, :]
        return self.value_head(state_representation)

    @torch.no_grad()
    def generate(self, src: torch.Tensor, max_len: int, start_symbol: int, end_symbol: int) -> torch.Tensor:
        """
        Autoregressively generates a symbolic trace for a given state (for inference).

        Args:
            src (torch.Tensor): The source state sequence.
                                Shape: [batch_size, src_seq_len, feature_dim]
            max_len (int): The maximum length of the generated trace.
            start_symbol (int): The ID for the START token.
            end_symbol (int): The ID for the END token.

        Returns:
            torch.Tensor: The generated sequence of token IDs.
                          Shape: [batch_size, generated_seq_len]
        """
        self.eval()
        batch_size = src.shape[0]
        device = src.device

        # 1. Encode the source state once
        memory = self._encode_state(src)

        # 2. Initialize the generated sequence with the start symbol
        ys = torch.full((batch_size, 1), start_symbol, dtype=torch.long, device=device)

        for i in range(max_len - 1):
            # Embed the currently generated sequence
            tgt_emb = self.tgt_tok_emb(ys)
            pos_encoded_tgt = self.positional_encoder(tgt_emb)

            # Create a mask for the decoder input
            tgt_mask = nn.Transformer.generate_square_subsequent_mask(ys.size(1)).to(device)

            # Get decoder output
            out = self.transformer_decoder(pos_encoded_tgt, memory, tgt_mask=tgt_mask)

            # Get the logits for the very last token
            last_token_output = out[:, -1, :]
            logits = self.generator(last_token_output)

            # Greedily select the next token
            next_token = logits.argmax(dim=-1)

            # Append the new token to the sequence
            ys = torch.cat([ys, next_token.unsqueeze(1)], dim=1)

            # Stop if all sequences in the batch have generated the END symbol
            if (next_token == end_symbol).all():
                break

        self.train()
        return ys

    @torch.no_grad()
    def sample(self, src: torch.Tensor, max_len: int, start_symbol: int, end_symbol: int, pad_symbol: int) -> torch.Tensor:
        """
        [新版] 通过从概率分布中采样，自回归地生成符号轨迹。
        这个版本会独立地停止已经生成了[END]符号的序列，避免噪声。
        """
        # self.eval() 会关闭dropout和batchnorm层，确保生成过程是确定性的
        self.eval()
        batch_size = src.shape[0]
        device = src.device

        memory = self._encode_state(src)
        ys = torch.full((batch_size, 1), start_symbol, dtype=torch.long, device=device)

        # --- [核心修改 1] ---
        # 创建一个掩码来跟踪哪些序列还没有完成
        unfinished_sequences = torch.ones(batch_size, 1, dtype=torch.bool, device=device)

        for i in range(max_len - 1):
            tgt_emb = self.tgt_tok_emb(ys)
            pos_encoded_tgt = self.positional_encoder(tgt_emb)
            tgt_mask = nn.Transformer.generate_square_subsequent_mask(ys.size(1)).to(device)

            out = self.transformer_decoder(pos_encoded_tgt, memory, tgt_mask=tgt_mask)
            last_token_output = out[:, -1, :]
            logits = self.generator(last_token_output)

            probs = F.softmax(logits, dim=-1)
            next_token = torch.multinomial(probs, num_samples=1)

            # --- [核心修改 2] ---
            # 如果一个序列上一步已经完成了，那么它这一步生成的应该是PAD
            # 我们用PAD覆盖掉已完成序列新生成的token
            next_token = torch.where(unfinished_sequences, next_token, torch.tensor(pad_symbol, device=device))
            ys = torch.cat([ys, next_token], dim=1)

            # --- [核心修改 3] ---
            # 更新未完成序列的掩码
            unfinished_sequences = unfinished_sequences & (next_token != end_symbol)

            # 如果所有序列都已完成，则提前退出
            if not unfinished_sequences.any():
                break
        self.train()
        return ys
