import torch
import torch.nn as nn
from torch.distributions.normal import Normal


# 增强版策略网络 (Actor)
class PolicyNetwork(nn.Module):
    def __init__(
        self, state_dim, action_dim, hidden_dims=[128, 256, 128], activation=nn.ReLU()
    ):
        super(PolicyNetwork, self).__init__()

        # 构建更复杂的特征提取网络
        layers = []
        input_dim = state_dim

        for hidden_dim in hidden_dims:
            layers.append(nn.Linear(input_dim, hidden_dim))
            layers.append(activation)
            layers.append(nn.LayerNorm(hidden_dim))  # 添加层归一化以稳定训练
            input_dim = hidden_dim

        self.feature_extractor = nn.Sequential(*layers)

        # 均值和标准差输出头
        self.mu_head = nn.Linear(hidden_dims[-1], action_dim)
        self.log_std_head = nn.Linear(hidden_dims[-1], action_dim)

        # 参数初始化
        self._init_weights()

    def _init_weights(self):
        # 使用He初始化提高深层网络训练效果
        for m in self.feature_extractor.modules():
            if isinstance(m, nn.Linear):
                nn.init.kaiming_normal_(m.weight, nonlinearity="relu")
                if m.bias is not None:
                    nn.init.constant_(m.bias, 0.0)

        # 均值输出层使用小权重初始化
        nn.init.xavier_uniform_(self.mu_head.weight, gain=0.01)
        nn.init.constant_(self.mu_head.bias, 0.0)

        # 标准差初始化
        nn.init.xavier_uniform_(self.log_std_head.weight, gain=0.01)
        nn.init.constant_(self.log_std_head.bias, -0.5)  # 初始较小的方差

    def forward(self, x):
        x = self.feature_extractor(x)

        # 均值输出使用tanh激活，范围为[-1, 1]
        mu = torch.tanh(self.mu_head(x))

        # 标准差使用指数函数确保为正
        log_std = self.log_std_head(x)
        # 限制log_std的范围，防止梯度爆炸或方差过小
        log_std = torch.clamp(log_std, min=-20, max=2)
        std = torch.exp(log_std)

        return mu, std

    def get_action(self, state, deterministic=False):
        """获取动作和对应的log概率

        Args:
            state: 环境状态
            deterministic: 是否使用确定性策略(仅使用均值)

        Returns:
            如果deterministic=True，返回确定性动作
            否则返回随机采样的动作和对应的log概率
        """
        if not isinstance(state, torch.Tensor):
            state = torch.FloatTensor(state)

        # 将状态移动到模型所在设备
        device = next(self.parameters()).device
        state = state.to(device)

        mu, std = self.forward(state)

        # 确定性模式 - 直接返回均值作为动作
        if deterministic:
            return mu.cpu().detach().numpy()

        # 随机模式 - 从分布中采样
        dist = Normal(mu, std)
        action = dist.sample()
        log_prob = dist.log_prob(action).sum()

        return action.cpu().detach().numpy(), log_prob
