"""
Actor-Critic 网络模型

基于 TorchRL 风格的 Actor-Critic 网络实现。
"""

from __future__ import annotations

from typing import Tuple

import torch
import torch.nn as nn
from torch.distributions import Normal


class ActorCriticNetwork(nn.Module):
    """
    Actor-Critic网络：同时输出策略和价值估计

    网络结构：
    - 输入：观测向量 (obs_dim维)
    - 共享层：两层全连接 (hidden_dim -> hidden_dim)
    - Actor输出：动作均值 (action_dim维) 和 log标准差 (action_dim维)
    - Critic输出：状态价值估计 (1维)
    
    使用 tanh 激活函数和独立的输出头。
    """

    def __init__(
        self,
        obs_dim: int = 4,
        action_dim: int = 1,
        hidden_dim: int = 64,
        log_std_min: float = -20.0,
        log_std_max: float = 2.0,
    ):
        """
        初始化网络
        
        Args:
            obs_dim: 观测空间维度
            action_dim: 动作空间维度
            hidden_dim: 隐藏层维度
            log_std_min: log标准差的最小值（数值稳定性）
            log_std_max: log标准差的最大值（数值稳定性）
        """
        super().__init__()

        self.obs_dim = obs_dim
        self.action_dim = action_dim
        self.hidden_dim = hidden_dim
        self.log_std_min = log_std_min
        self.log_std_max = log_std_max

        # 共享特征提取网络
        self.shared_net = nn.Sequential(
            nn.Linear(obs_dim, hidden_dim),
            nn.Tanh(),
            nn.Linear(hidden_dim, hidden_dim),
            nn.Tanh(),
        )

        # Actor：动作均值输出头
        self.mean_head = nn.Linear(hidden_dim, action_dim)

        # Actor：动作标准差输出头（输出log_std以保证数值稳定性）
        self.log_std_head = nn.Linear(hidden_dim, action_dim)

        # Critic：状态价值输出头
        self.value_head = nn.Linear(hidden_dim, 1)
        
        # 初始化权重
        self._initialize_weights()
    
    def _initialize_weights(self):
        """初始化网络权重"""
        for m in self.modules():
            if isinstance(m, nn.Linear):
                nn.init.orthogonal_(m.weight, gain=1.0)
                if m.bias is not None:
                    nn.init.constant_(m.bias, 0.0)

    def forward(
        self, obs: torch.Tensor
    ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
        """
        前向传播

        Args:
            obs: 观测张量 [batch_size, obs_dim]

        Returns:
            mean: 动作均值 [batch_size, action_dim]
            std: 动作标准差 [batch_size, action_dim]
            value: 状态价值 [batch_size, 1]
        """
        features = self.shared_net(obs)

        # Actor输出
        mean = self.mean_head(features)
        log_std = self.log_std_head(features)
        log_std = torch.clamp(log_std, self.log_std_min, self.log_std_max)
        std = torch.exp(log_std)

        # Critic输出
        value = self.value_head(features)

        return mean, std, value

    def get_action(
        self, obs: torch.Tensor, deterministic: bool = False
    ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
        """
        从策略中采样动作

        Args:
            obs: 观测张量 [batch_size, obs_dim]
            deterministic: 是否使用确定性策略（用于评估）

        Returns:
            action: 采样的动作 [batch_size, action_dim]
            log_prob: 动作的对数概率 [batch_size]
            value: 状态价值估计 [batch_size, 1]
        """
        mean, std, value = self.forward(obs)
        dist = Normal(mean, std)
        
        if deterministic:
            action_raw = mean
        else:
            action_raw = dist.sample()
        
        log_prob = dist.log_prob(action_raw).sum(dim=-1)

        # 限制动作范围到 [-1, 1]
        action = torch.tanh(action_raw)
        
        # 计算 tanh 变换后的对数概率（如果需要精确的log_prob）
        # log_prob = log_prob - torch.log(1 - action.pow(2) + 1e-6).sum(dim=-1)

        return action, log_prob, value

    def evaluate_actions(
        self, obs: torch.Tensor, actions: torch.Tensor
    ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
        """
        评估给定观测和动作的对数概率、熵和价值

        Args:
            obs: 观测张量 [batch_size, obs_dim]
            actions: 动作张量 [batch_size, action_dim]

        Returns:
            log_probs: 动作的对数概率 [batch_size]
            entropy: 策略熵 [batch_size]
            values: 状态价值估计 [batch_size, 1]
        """
        mean, std, value = self.forward(obs)
        dist = Normal(mean, std)

        # 反tanh变换（从 [-1, 1] 映射回原始动作空间）
        actions_untanh = torch.atanh(torch.clamp(actions, -0.999, 0.999))

        log_probs = dist.log_prob(actions_untanh).sum(dim=-1)
        entropy = dist.entropy().sum(dim=-1)

        return log_probs, entropy, value
    
    def get_value(self, obs: torch.Tensor) -> torch.Tensor:
        """
        获取状态价值估计（仅Critic）
        
        Args:
            obs: 观测张量 [batch_size, obs_dim]
            
        Returns:
            value: 状态价值 [batch_size, 1]
        """
        features = self.shared_net(obs)
        value = self.value_head(features)
        return value
