import torch
import torch.nn as nn
from stable_baselines3.common.torch_layers import BaseFeaturesExtractor
import gymnasium as gym


class TransformerFeatureExtractor(BaseFeaturesExtractor):
    """
    升级版 Transformer 特征提取器:
    - 使用 [CLS] token 作为全局表示
    - 可学习位置编码 (nn.Embedding)
    - 输出经过 LayerNorm 稳定训练
    """
    def __init__(self, observation_space: gym.spaces.Box,
                 d_model=128, nhead=4, num_layers=2, dim_feedforward=256, dropout=0.1):
        super().__init__(observation_space, features_dim=d_model)
        obs_dim = observation_space.shape[0]  # 55

        self.d_model = d_model
        self.obs_dim = obs_dim

        # 将每个 token 映射到 d_model 维度
        self.input_fc = nn.Linear(1, d_model)

        # [CLS] token 参数
        self.cls_token = nn.Parameter(torch.zeros(1, 1, d_model))

        # Positional Encoding (Embedding 方式, 支持变长)
        self.pos_embedding = nn.Embedding(obs_dim + 1, d_model)  # +1 是为了容纳 CLS 位置

        # Transformer Encoder
        encoder_layer = nn.TransformerEncoderLayer(
            d_model=d_model,
            nhead=nhead,
            dim_feedforward=dim_feedforward,
            dropout=dropout,
            activation='relu',
            batch_first=True  # 让输入输出格式 [batch, seq_len, d_model]
        )
        self.transformer = nn.TransformerEncoder(encoder_layer, num_layers=num_layers)

        # LayerNorm 输出稳定
        self.ln = nn.LayerNorm(d_model)

    def forward(self, observations: torch.Tensor) -> torch.Tensor:
        batch_size = observations.size(0)

        # 映射到 d_model
        x = observations.unsqueeze(-1)      # [batch, 55, 1]
        x = self.input_fc(x)                # [batch, 55, d_model]

        # 位置索引 (0~55)，0 给 CLS，后面 1~55 给 observation
        pos_ids = torch.arange(1, self.obs_dim + 1, device=observations.device).unsqueeze(0).expand(batch_size, -1)

        # 加上 pos embedding
        x = x + self.pos_embedding(pos_ids)  # [batch, 55, d_model]

        # 添加 [CLS] token
        cls_tokens = self.cls_token.expand(batch_size, -1, -1)  # [batch, 1, d_model]
        cls_tokens = cls_tokens + self.pos_embedding(torch.zeros((batch_size, 1), dtype=torch.long, device=observations.device))
        x = torch.cat([cls_tokens, x], dim=1)  # [batch, 56, d_model]

        # Transformer 编码
        x = self.transformer(x)  # [batch, 56, d_model]

        # 取 [CLS] 作为输出
        cls_output = x[:, 0, :]  # [batch, d_model]
        return self.ln(cls_output)
