import gym
import torch
import torch.nn as nn
from stable_baselines3 import PPO
from stable_baselines3.common.policies import ActorCriticPolicy
from stable_baselines3.common.env_util import make_vec_env
from stable_baselines3.common.torch_layers import BaseFeaturesExtractor


class CustomFeatureExtractor(BaseFeaturesExtractor):
    def __init__(self, observation_space, self_dim, ally_dim, enemy_dim,seq_len,features_dim=256, gru_hidden_size=64, num_heads=4):
        super().__init__(observation_space, features_dim)
        # 输入分解
        self.self_dim = self_dim
        self.ally_dim = ally_dim
        self.enemy_dim = enemy_dim
        self.seq_len = seq_len  # 明确序列长度

        # GRU网络
        self.self_gru = nn.GRU(input_size=self.self_dim, hidden_size=gru_hidden_size, batch_first=True)
        self.ally_gru = nn.GRU(input_size=self.ally_dim, hidden_size=gru_hidden_size, batch_first=True)
        self.enemy_gru = nn.GRU(input_size=self.enemy_dim, hidden_size=gru_hidden_size, batch_first=True)

        # LayerNorm for GRU outputs
        self.self_ln = nn.LayerNorm(gru_hidden_size)
        self.ally_ln = nn.LayerNorm(gru_hidden_size)
        self.enemy_ln = nn.LayerNorm(gru_hidden_size)

        # 多头注意力
        self.self_ally_attention = nn.MultiheadAttention(embed_dim=gru_hidden_size, num_heads=num_heads)
        self.self_enemy_attention = nn.MultiheadAttention(embed_dim=gru_hidden_size, num_heads=num_heads)

        # LayerNorm for attention outputs
        self.ally_attn_ln = nn.LayerNorm(gru_hidden_size)
        self.enemy_attn_ln = nn.LayerNorm(gru_hidden_size)

        # 融合层：先线性+ReLU，再加残差，再归一化
        self.fusion_linear = nn.Linear(gru_hidden_size * 3, features_dim)           # 全连接层 用于拼接活的特征映射到指定维度
        self.fusion_relu = nn.ReLU()                                                # 激活函数
        self.fusion_ln = nn.LayerNorm(features_dim)                                 # 层激活层

    def forward(self, observations):
        batch_size = observations.shape[0]

        # 分解输入
        self_data = observations[:, :, :self.self_dim]  # [batch_size, seq_len, self_dim]
        ally_data = observations[:, :, self.self_dim:self.self_dim + self.ally_dim]  # [batch_size, seq_len, ally_dim]
        enemy_data = observations[:, :, self.self_dim + self.ally_dim:]  # [batch_size, seq_len, enemy_dim]

        # GRU处理序列数据
        self_output, _ = self.self_gru(self_data)  # [batch_size, self.seq_len, gru_hidden_size]
        ally_output, _ = self.ally_gru(ally_data)  # [batch_size, self.seq_len, gru_hidden_size]
        enemy_output, _ = self.enemy_gru(enemy_data)  # [batch_size, self.seq_len, gru_hidden_size]

        # 取最后一个时间步的特征（或可以尝试平均/最大池化）
        self_feat = self_output[:, -1, :]  # [batch_size, gru_hidden_size]
        ally_feat = ally_output[:, -1, :]
        enemy_feat = enemy_output[:, -1, :]

        # LayerNorm
        self_feat = self.self_ln(self_feat)
        ally_feat = self.ally_ln(ally_feat)
        enemy_feat = self.enemy_ln(enemy_feat)

        # 多头注意力（调整维度为[seq_len, batch_size, features]）
        self_feat_attn = self_feat.unsqueeze(0)  # [seq_len, batch_size, gru_hidden_size]
        ally_feat_attn = ally_feat.unsqueeze(0)
        enemy_feat_attn = enemy_feat.unsqueeze(0)

        ally_attn_output, _ = self.self_ally_attention(
            query=self_feat_attn, key=ally_feat_attn, value=ally_feat_attn
        )
        ally_attn_output = self.ally_attn_ln(ally_attn_output.squeeze(0))

        enemy_attn_output, _ = self.self_enemy_attention(
            query=self_feat_attn, key=enemy_feat_attn, value=enemy_feat_attn
        )
        enemy_attn_output = self.enemy_attn_ln(enemy_attn_output.squeeze(0))

        # 拼接
        combined = torch.cat([self_feat, ally_attn_output, enemy_attn_output], dim=1)

        # 残差融合
        # 全连接层 + ReLU + 残差 + 归一化
        combined = self.fusion_linear(combined)
        fused = self.fusion_relu(combined)
        features = self.fusion_ln(fused + combined)  # 加残差后归一化

        return features


class SplitFeatureActorCriticPolicy(ActorCriticPolicy):
    def __init__(self, observation_space, action_space, lr_schedule,
                 features_dim=256, gru_hidden_size=64, num_heads=4, **kwargs):
        super().__init__(
            observation_space,
            action_space,
            lr_schedule,
            features_extractor_class=CustomFeatureExtractor,
            features_extractor_kwargs=dict(
                self_dim=8,
                ally_dim=5,
                enemy_dim=7,
                seq_len=15,
                features_dim=features_dim,
                gru_hidden_size=gru_hidden_size,
                num_heads=num_heads,
            ),
            share_features_extractor=False,
            **kwargs
        )
