import numpy as np
from typing import List, Optional, Tuple, Union
from abc import ABC, abstractmethod


class ActivationFunctions:
    """激活函数集合"""

    @staticmethod
    def relu(x: np.ndarray) -> np.ndarray:
        return np.maximum(0, x)

    @staticmethod
    def relu_derivative(x: np.ndarray) -> np.ndarray:
        return (x > 0).astype(np.float32)

    @staticmethod
    def tanh(x: np.ndarray) -> np.ndarray:
        return np.tanh(x)

    @staticmethod
    def tanh_derivative(x: np.ndarray) -> np.ndarray:
        return 1 - np.tanh(x) ** 2

    @staticmethod
    def softmax(x: np.ndarray, axis: int = -1) -> np.ndarray:
        x_shifted = x - np.max(x, axis=axis, keepdims=True)
        exp_x = np.exp(x_shifted)
        return exp_x / np.sum(exp_x, axis=axis, keepdims=True)

    @staticmethod
    def log_softmax(x: np.ndarray, axis: int = -1) -> np.ndarray:
        x_shifted = x - np.max(x, axis=axis, keepdims=True)
        return x_shifted - np.log(np.sum(np.exp(x_shifted), axis=axis, keepdims=True))


class LinearLayer:

    def __init__(
        self,
        input_dim: int,
        output_dim: int,
        activation: str = "relu",
        use_bias: bool = True,
    ):
        """
        初始化全连接层

        Args:
            input_dim: 输入维度
            output_dim: 输出维度
            activation: 激活函数类型 ('relu', 'tanh', 'linear', 'softmax')
            use_bias: 是否使用偏置
        """

        self.input_dim = input_dim
        self.output_dim = output_dim
        self.activation_type = activation
        self.use_bias = use_bias

        # Xavier 初始化权重
        self.weight = np.random.randn(input_dim, output_dim) * np.sqrt(2.0 / input_dim)

        if use_bias:
            self.bias = np.zeros((1, output_dim))
        else:
            self.bias = None

        # 缓存前向传播的中间结果用于反向传播
        self.last_input = None
        self.last_linear_output = None
        self.last_output = None

        # 梯度缓存
        self.weight_gradients = None
        self.bias_gradients = None

    def forward(self, x: np.ndarray) -> np.ndarray:
        """
        前向传播

        Args:
            x: 输入数据 shape: (batch_size, input_dim)

        Returns:
            输出数据 shape: (batch_size, output_dim)
        """

        # 缓存输入用于反向传播
        self.last_input = x.copy()

        # 线性变换
        linear_output = np.dot(x, self.weight)
        if self.use_bias:
            linear_output += self.bias

        self.last_linear_output = linear_output.copy()

        # 应用激活函数
        if self.activation_type == "relu":
            output = ActivationFunctions.relu(linear_output)
        elif self.activation_type == "tanh":
            output = ActivationFunctions.tanh(linear_output)
        elif self.activation_type == "softmax":
            output = ActivationFunctions.softmax(linear_output)
        elif self.activation_type == "linear":
            output = linear_output
        else:
            raise ValueError(f"Unsupported activation: {self.activation_type}")

        self.last_output = output.copy()
        return output

    def backward(self, grad_output: np.ndarray) -> np.ndarray:
        """
        反向传播

        Args:
            grad_output: 来自上一层的梯度 shape: (batch_size, output_dim)

        Returns:
            传递给下一层的梯度 shape: (batch_size, input_dim)
        """

        batch_size = grad_output.shape[0]

        # 计算激活函数的梯度
        if self.activation_type == "relu":
            activation_grad = ActivationFunctions.relu_derivative(
                self.last_linear_output
            )
            grad_linear = grad_output * activation_grad
        elif self.activation_type == "tanh":
            activation_grad = ActivationFunctions.tanh_derivative(
                self.last_linear_output  # ✅ 修正：lase -> last
            )
            grad_linear = grad_output * activation_grad
        elif self.activation_type == "softmax":
            grad_linear = self._softmax_backward(grad_output)
        elif self.activation_type == "linear":
            grad_linear = grad_output
        else:
            raise ValueError(f"Unsupported activation: {self.activation_type}")

        # 计算权重梯度
        self.weight_gradients = np.dot(self.last_input.T, grad_linear) / batch_size

        # 计算偏置梯度
        if self.use_bias:
            self.bias_gradients = np.mean(grad_linear, axis=0, keepdims=True)

        # 计算传递给下一层的梯度
        grad_input = np.dot(grad_linear, self.weight.T)

        return grad_input

    def _softmax_backward(self, grad_output: np.ndarray) -> np.ndarray:
        """
        Softmax反向传播的正确实现
        
        ✅ 修正：使用正确的softmax梯度计算
        对于softmax输出s_i，梯度为：s_i * (δ_ij - s_j) * grad_j
        """
        softmax_output = self.last_output  # shape: (batch_size, output_dim)
        batch_size, output_dim = softmax_output.shape
        
        grad_linear = np.zeros_like(grad_output)
        
        for b in range(batch_size):
            s = softmax_output[b]  # shape: (output_dim,)
            g = grad_output[b]     # shape: (output_dim,)
            
            # 计算雅可比矩阵 J_ij = s_i * (δ_ij - s_j)
            jacobian = np.diag(s) - np.outer(s, s)
            grad_linear[b] = np.dot(jacobian, g)
            
        return grad_linear

    def get_parameters(self) -> Tuple[np.ndarray, Optional[np.ndarray]]:
        return self.weight, self.bias

    def set_parameters(self, weight: np.ndarray, bias: Optional[np.ndarray] = None):
        self.weight = weight.copy()
        if bias is not None and self.use_bias:
            self.bias = bias.copy()


class NeuralNetwork:
    """多层神经网络基类"""

    def __init__(
        self, layer_sizes: List[int], activations: List[str], use_bias: bool = True
    ):
        """
        初始化多层神经网络

        Args:
            layer_sizes: 每层的神经元数量 [input_dim, hidden1, hidden2, ..., output_dim]
            activations: 每层的激活函数 ['relu', 'tanh', 'linear', 'softmax']
            use_bias: 是否使用偏置
        """
        assert len(layer_sizes) >= 2, "至少需要输入层和输出层"
        assert len(activations) == len(layer_sizes) - 1, "激活函数数量应该等于层数 - 1"

        self.layer_sizes = layer_sizes
        self.activations = activations
        self.layers = []

        # 构建各层
        for i in range(len(layer_sizes) - 1):
            layer = LinearLayer(
                input_dim=layer_sizes[i],
                output_dim=layer_sizes[i + 1],
                activation=activations[i],
                use_bias=use_bias,
            )
            self.layers.append(layer)

    def forward(self, x: np.ndarray) -> np.ndarray:
        output = x
        for layer in self.layers:
            output = layer.forward(output)
        return output

    def backward(self, grad_output: np.ndarray) -> np.ndarray:
        grad = grad_output
        for layer in reversed(self.layers):
            grad = layer.backward(grad)
        return grad

    def get_all_parameters(self) -> List[Tuple[np.ndarray, Optional[np.ndarray]]]:
        return [layer.get_parameters() for layer in self.layers]  # ✅ 修正：layser -> layer

    def set_all_parameters(
        self, parameters: List[Tuple[np.ndarray, Optional[np.ndarray]]]
    ):
        assert len(parameters) == len(self.layers), "参数数量与层数不匹配"
        for layer, (weight, bias) in zip(self.layers, parameters):
            layer.set_parameters(weight, bias)

    def get_gradients(self) -> List[Tuple[np.ndarray, Optional[np.ndarray]]]:
        """获取所有层的梯度"""
        gradients = []
        for layer in self.layers:
            weight_grad = layer.weight_gradients
            bias_grad = layer.bias_gradients
            gradients.append((weight_grad, bias_grad))
        return gradients


class PolicyNetwork(NeuralNetwork):
    """策略网络 Actor"""

    def __init__(
        self,
        state_dim: int,
        action_dim: int,
        hidden_sizes: List[int] = [64, 64],
        action_type: str = "discrete",
    ):
        """
        初始化策略网络

        Args:
            state_dim: 状态维度
            action_dim: 动作维度
            hidden_sizes: 隐藏层大小列表
            action_type: 动作类型 ('discrete' 或 'continuous')
        """

        self.state_dim = state_dim
        self.action_dim = action_dim
        self.action_type = action_type

        # 构建网络结构
        layer_sizes = [state_dim] + hidden_sizes + [action_dim]

        if action_type == "discrete":
            # 离散动作空间，最后一层使用 softmax
            activations = ["relu"] * (len(hidden_sizes)) + ["softmax"]
        elif action_type == "continuous":
            # 连续动作空间，输出动作均值，假设标准差为固定值或另外学习
            activations = ["relu"] * (len(hidden_sizes)) + ["tanh"]
        else:
            # ✅ 修正：字符串格式和变量名错误
            raise ValueError(f"action_type must be 'discrete' or 'continuous'")

        super().__init__(layer_sizes, activations)

        # 连续动作空间的额外参数
        if action_type == "continuous":
            # ✅ 改进：可学习的 log 标准差，初始化为小的负值
            self.log_std = np.full((1, action_dim), -0.5)
            # 添加log_std的梯度缓存
            self.log_std_gradients = None

    def get_action_distribution(
        self, states: np.ndarray
    ) -> Tuple[np.ndarray, Optional[np.ndarray]]:
        """
        获取动作分布

        Args:
            states: 状态 shape: (batch_size, state_dim)

        Returns:
            如果是离散动作：(action_probs, None)
            如果是连续动作：(action_means, action_stds)
        """
        if self.action_type == "discrete":
            action_probs = self.forward(states)
            return action_probs, None
        else:
            action_means = self.forward(states)
            action_stds = np.exp(self.log_std)  # 保证标准差为正
            return action_means, action_stds

    def sample_action(self, states: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
        """
        采样动作

        Args:
            states: 状态 shape: (batch_size, state_dim)

        Returns:
            actions: 采样的动作
            log_probs: 动作的对数概率
        """

        if self.action_type == "discrete":
            action_probs, _ = self.get_action_distribution(states)

            # 采样动作 (使用 Numpy 的随机采样)
            batch_size = states.shape[0]
            actions = np.array(
                [
                    np.random.choice(self.action_dim, p=action_probs[i])
                    for i in range(batch_size)
                ]
            )

            # 计算对数概率，添加数值稳定性
            log_probs = np.log(action_probs[np.arange(batch_size), actions] + 1e-8)

            return actions, log_probs
        else:
            action_means, action_stds = self.get_action_distribution(states)

            # 从正态分布采样
            noise = np.random.normal(0, 1, action_means.shape)
            actions = action_means + action_stds * noise

            # 计算对数概率
            log_probs = self._compute_log_prob_continuous(
                actions, action_means, action_stds  # ✅ 修正：actions_means -> action_means
            )

            return actions, log_probs

    def compute_log_prob(self, states: np.ndarray, actions: np.ndarray) -> np.ndarray:  # ✅ 修正：acitons -> actions
        """
        计算给定状态和动作的对数概率

        Args:
            states: 状态
            actions: 动作

        Returns:
            对数概率
        """

        if self.action_type == "discrete":
            action_probs, _ = self.get_action_distribution(states)
            actions = actions.astype(int)
            log_probs = np.log(action_probs[np.arange(len(actions)), actions] + 1e-8)
            return log_probs
        else:
            action_means, action_stds = self.get_action_distribution(states)
            return self._compute_log_prob_continuous(actions, action_means, action_stds)

    def _compute_log_prob_continuous(
        self, actions: np.ndarray, means: np.ndarray, stds: np.ndarray
    ) -> np.ndarray:
        """计算连续动作的对数概率"""
        # 多变量正态分布的对数概率密度
        action_dim = actions.shape[-1]
        log_prob = -0.5 * np.sum(((actions - means) / (stds + 1e-8)) ** 2, axis=-1)  # ✅ 添加数值稳定性
        log_prob -= 0.5 * action_dim * np.log(2 * np.pi)
        log_prob -= np.sum(np.log(stds + 1e-8), axis=-1)  # ✅ 添加数值稳定性
        return log_prob

    # ✅ 新增：包含log_std参数的参数管理方法
    def get_all_parameters(self) -> List[Tuple[np.ndarray, Optional[np.ndarray]]]:
        """获取所有参数，包括log_std"""
        params = super().get_all_parameters()
        if self.action_type == "continuous":
            params.append((self.log_std, None))  # log_std作为额外参数
        return params

    def set_all_parameters(
        self, parameters: List[Tuple[np.ndarray, Optional[np.ndarray]]]
    ):
        """设置所有参数，包括log_std"""
        if self.action_type == "continuous":
            # 最后一个参数是log_std
            log_std_param = parameters[-1]
            self.log_std = log_std_param[0].copy()
            # 设置网络层参数
            super().set_all_parameters(parameters[:-1])
        else:
            super().set_all_parameters(parameters)

    def get_gradients(self) -> List[Tuple[np.ndarray, Optional[np.ndarray]]]:
        """获取所有梯度，包括log_std的梯度"""
        gradients = super().get_gradients()
        if self.action_type == "continuous":
            gradients.append((self.log_std_gradients, None))
        return gradients

    def update_log_std_gradients(self, grad_log_std: np.ndarray):
        """更新log_std的梯度"""
        if self.action_type == "continuous":
            self.log_std_gradients = grad_log_std


class ValueNetwork(NeuralNetwork):
    """价值网络 (Critic)"""

    def __init__(self, state_dim: int, hidden_sizes: List[int] = [64, 64]):
        """
        初始化价值网络

        Args:
            state_dim: 状态维度
            hidden_sizes: 隐藏层大小列表
        """
        self.state_dim = state_dim

        # 构建网络结构：输出标量价值
        layer_sizes = [state_dim] + hidden_sizes + [1]
        activations = ["relu"] * len(hidden_sizes) + ["linear"]

        super().__init__(layer_sizes, activations)

    def estimate_value(self, states: np.ndarray) -> np.ndarray:
        """
        估计状态价值

        Args:
            states: 状态 shape: (batch_size, state_dim)

        Returns:
            values: 状态价值 shape: (batch_size,)
        """
        values = self.forward(states)
        return values.squeeze(-1)  # 去掉最后一个维度


class ActorCritic:
    """Actor-Critic组合网络"""

    def __init__(
        self,
        state_dim: int,
        action_dim: int,
        hidden_sizes: List[int] = [64, 64],
        action_type: str = "discrete",
        shared_network: bool = True,  # ✅ 改进：默认使用独立网络，更稳定
    ):
        """
        初始化Actor-Critic网络

        Args:
            state_dim: 状态维度
            action_dim: 动作维度
            hidden_sizes: 隐藏层大小
            action_type: 动作类型
            shared_network: 是否共享底层网络
        """

        self.state_dim = state_dim
        self.action_dim = action_dim
        self.action_type = action_type
        self.shared_network = shared_network

        if shared_network:
            # 共享底层特征提取网络
            if len(hidden_sizes) < 2:
                raise ValueError("共享网络模式至少需要2个隐藏层")
                
            self.shared_layers = NeuralNetwork(
                layer_sizes=[state_dim] + hidden_sizes[:-1],
                activations=["relu"] * (len(hidden_sizes) - 1),
            )

            # 分别的输出头
            final_hidden = hidden_sizes[-1]
            if action_type == "discrete":
                self.policy_head = LinearLayer(final_hidden, action_dim, "softmax")
            else:
                self.policy_head = LinearLayer(final_hidden, action_dim, "tanh")
                # ✅ 新增：连续动作的log_std参数
                self.log_std = np.full((1, action_dim), -0.5)
                self.log_std_gradients = None

            self.value_head = LinearLayer(final_hidden, 1, "linear")
        else:
            # 独立的网络
            self.policy_network = PolicyNetwork(
                state_dim, action_dim, hidden_sizes, action_type
            )
            self.value_network = ValueNetwork(state_dim, hidden_sizes)

    def forward(self, states: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
        """
        前向传播同时获取策略和价值

        Returns:
            policy_output: 策略网络输出
            values: 价值估计
        """

        if self.shared_network:
            shared_features = self.shared_layers.forward(states)
            policy_output = self.policy_head.forward(shared_features)
            values = self.value_head.forward(shared_features).squeeze(-1)
        else:
            # 使用独立网络
            policy_output, _ = self.policy_network.get_action_distribution(states)
            values = self.value_network.estimate_value(states)
        return policy_output, values

    def get_action_distribution(
        self, states: np.ndarray
    ) -> Tuple[np.ndarray, Optional[np.ndarray]]:
        """获取动作分布"""
        if self.shared_network:
            if self.action_type == "discrete":
                policy_output, _ = self.forward(states)
                return policy_output, None
            else:
                policy_output, _ = self.forward(states)
                action_stds = np.exp(self.log_std)
                return policy_output, action_stds
        else:
            return self.policy_network.get_action_distribution(states)

    def sample_action(self, states: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
        """采样动作"""
        if self.shared_network:
            if self.action_type == "discrete":
                action_probs, _ = self.get_action_distribution(states)
                batch_size = states.shape[0]
                actions = np.array([
                    np.random.choice(self.action_dim, p=action_probs[i])
                    for i in range(batch_size)
                ])
                log_probs = np.log(action_probs[np.arange(batch_size), actions] + 1e-8)
                return actions, log_probs
            else:
                action_means, action_stds = self.get_action_distribution(states)
                noise = np.random.normal(0, 1, action_means.shape)
                actions = action_means + action_stds * noise
                log_probs = self._compute_log_prob_continuous(actions, action_means, action_stds)
                return actions, log_probs
        else:
            return self.policy_network.sample_action(states)

    def compute_log_prob(self, states: np.ndarray, actions: np.ndarray) -> np.ndarray:
        """计算动作的对数概率"""
        if self.shared_network:
            if self.action_type == "discrete":
                action_probs, _ = self.get_action_distribution(states)
                actions = actions.astype(int)
                log_probs = np.log(action_probs[np.arange(len(actions)), actions] + 1e-8)
                return log_probs
            else:
                action_means, action_stds = self.get_action_distribution(states)
                return self._compute_log_prob_continuous(actions, action_means, action_stds)
        else:
            return self.policy_network.compute_log_prob(states, actions)

    def _compute_log_prob_continuous(
        self, actions: np.ndarray, means: np.ndarray, stds: np.ndarray
    ) -> np.ndarray:
        """计算连续动作的对数概率"""
        action_dim = actions.shape[-1]
        log_prob = -0.5 * np.sum(((actions - means) / (stds + 1e-8)) ** 2, axis=-1)
        log_prob -= 0.5 * action_dim * np.log(2 * np.pi)
        log_prob -= np.sum(np.log(stds + 1e-8), axis=-1)
        return log_prob

    def get_all_parameters(self):
        """获取所有参数"""
        if self.shared_network:
            params = []
            params.extend(self.shared_layers.get_all_parameters())
            params.append(self.policy_head.get_parameters())
            params.append(self.value_head.get_parameters())
            if self.action_type == "continuous":
                params.append((self.log_std, None))
            return params
        else:
            policy_params = self.policy_network.get_all_parameters()
            value_params = self.value_network.get_all_parameters()
            return {"policy": policy_params, "value": value_params}

    def set_all_parameters(self, parameters):
        """设置所有参数"""
        if self.shared_network:
            if self.action_type == "continuous":
                # 最后一个参数是log_std
                log_std_param = parameters[-1]
                self.log_std = log_std_param[0].copy()
                remaining_params = parameters[:-1]
            else:
                remaining_params = parameters
            
            # 分配参数给各个组件
            shared_param_count = len(self.shared_layers.layers)
            self.shared_layers.set_all_parameters(remaining_params[:shared_param_count])
            self.policy_head.set_parameters(*remaining_params[shared_param_count])
            self.value_head.set_parameters(*remaining_params[shared_param_count + 1])
        else:
            self.policy_network.set_all_parameters(parameters["policy"])
            self.value_network.set_all_parameters(parameters["value"])

    def get_gradients(self):
        """获取所有梯度"""
        if self.shared_network:
            gradients = []
            gradients.extend(self.shared_layers.get_gradients())
            gradients.append(self.policy_head.get_parameters())  # 这里需要改为梯度
            gradients.append(self.value_head.get_parameters())   # 这里需要改为梯度
            if self.action_type == "continuous":
                gradients.append((self.log_std_gradients, None))
            return gradients
        else:
            policy_grads = self.policy_network.get_gradients()
            value_grads = self.value_network.get_gradients()
            return {"policy": policy_grads, "value": value_grads}


# ✅ 新增：梯度裁剪工具函数
def clip_gradients(gradients: List[Tuple[np.ndarray, Optional[np.ndarray]]], 
                  max_norm: float = 1.0) -> List[Tuple[np.ndarray, Optional[np.ndarray]]]:
    """
    梯度裁剪，防止梯度爆炸
    
    Args:
        gradients: 梯度列表
        max_norm: 最大梯度范数
    
    Returns:
        裁剪后的梯度
    """
    total_norm = 0.0
    
    # 计算总的梯度范数
    for weight_grad, bias_grad in gradients:
        if weight_grad is not None:
            total_norm += np.sum(weight_grad ** 2)
        if bias_grad is not None:
            total_norm += np.sum(bias_grad ** 2)
    
    total_norm = np.sqrt(total_norm)
    
    # 如果范数超过阈值，进行裁剪
    if total_norm > max_norm:
        clip_coef = max_norm / total_norm
        clipped_gradients = []
        for weight_grad, bias_grad in gradients:
            clipped_weight_grad = weight_grad * clip_coef if weight_grad is not None else None
            clipped_bias_grad = bias_grad * clip_coef if bias_grad is not None else None
            clipped_gradients.append((clipped_weight_grad, clipped_bias_grad))
        return clipped_gradients
    
    return gradients


# ✅ 新增：简单的测试函数
def test_networks():
    """测试神经网络的基本功能"""
    print("🧪 测试神经网络...")
    
    # 测试参数
    state_dim = 10
    action_dim = 4
    batch_size = 32
    
    # 创建测试数据
    states = np.random.randn(batch_size, state_dim)
    
    # 测试策略网络 - 离散动作
    print("✅ 测试离散动作策略网络...")
    policy_discrete = PolicyNetwork(state_dim, action_dim, [32, 32], "discrete")
    actions_discrete, log_probs_discrete = policy_discrete.sample_action(states)
    print(f"离散动作形状: {actions_discrete.shape}, 对数概率形状: {log_probs_discrete.shape}")
    
    # 测试策略网络 - 连续动作
    print("✅ 测试连续动作策略网络...")
    policy_continuous = PolicyNetwork(state_dim, action_dim, [32, 32], "continuous")
    actions_continuous, log_probs_continuous = policy_continuous.sample_action(states)
    print(f"连续动作形状: {actions_continuous.shape}, 对数概率形状: {log_probs_continuous.shape}")
    
    # 测试价值网络
    print("✅ 测试价值网络...")
    value_network = ValueNetwork(state_dim, [32, 32])
    values = value_network.estimate_value(states)
    print(f"价值估计形状: {values.shape}")
    
    # 测试Actor-Critic网络
    print("✅ 测试Actor-Critic网络...")
    actor_critic = ActorCritic(state_dim, action_dim, [32, 32], "continuous", shared_network=False)
    ac_actions, ac_log_probs = actor_critic.sample_action(states)
    policy_output, ac_values = actor_critic.forward(states)
    print(f"AC动作形状: {ac_actions.shape}, AC价值形状: {ac_values.shape}")
    
    # 测试前向和反向传播
    print("✅ 测试前向和反向传播...")
    target_values = np.random.randn(batch_size)  # 模拟目标价值
    value_loss_grad = 2 * (ac_values - target_values) / batch_size  # MSE损失的梯度
    
    # 反向传播
    if hasattr(actor_critic, 'value_network'):
        _ = actor_critic.value_network.backward(value_loss_grad.reshape(-1, 1))
        gradients = actor_critic.value_network.get_gradients()
        print("✅ 梯度计算成功")
    
    print("🎉 所有测试通过！")


if __name__ == "__main__":
    # 运行测试
    test_networks()
    
    # 示例：创建人型机器人的PPO网络
    print("\n🤖 创建人型机器人PPO网络示例...")
    
    # 人型机器人参数
    HUMANOID_STATE_DIM = 376    # 人型机器人状态维度（位置、速度、角度等）
    HUMANOID_ACTION_DIM = 22    # 22个关节的动作
    HIDDEN_SIZES = [512, 256, 128]  # 更大的网络容量
    
    # 创建Actor-Critic网络
    humanoid_ac = ActorCritic(
        state_dim=HUMANOID_STATE_DIM,
        action_dim=HUMANOID_ACTION_DIM, 
        hidden_sizes=HIDDEN_SIZES,
        action_type="continuous",
        shared_network=False  # 独立网络更稳定
    )
    
    # 模拟一个batch的状态
    batch_states = np.random.randn(64, HUMANOID_STATE_DIM)
    
    # 采样动作
    sampled_actions, action_log_probs = humanoid_ac.sample_action(batch_states)
    
    # 估计价值
    _, estimated_values = humanoid_ac.forward(batch_states)
    
    print(f"✅ 人型机器人网络创建成功!")
    print(f"   - 状态维度: {HUMANOID_STATE_DIM}")
    print(f"   - 动作维度: {HUMANOID_ACTION_DIM}")
    print(f"   - 网络结构: {HIDDEN_SIZES}")
    print(f"   - 采样动作形状: {sampled_actions.shape}")
    print(f"   - 动作范围: [{sampled_actions.min():.3f}, {sampled_actions.max():.3f}]")
    print(f"   - 价值估计形状: {estimated_values.shape}")
    print(f"   - 价值范围: [{estimated_values.min():.3f}, {estimated_values.max():.3f}]")
    
    # 参数统计
    if isinstance(humanoid_ac.get_all_parameters(), dict):
        policy_params = humanoid_ac.get_all_parameters()["policy"]
        value_params = humanoid_ac.get_all_parameters()["value"]
        
        total_policy_params = sum(
            np.prod(w.shape) + (np.prod(b.shape) if b is not None else 0) 
            for w, b in policy_params
        )
        total_value_params = sum(
            np.prod(w.shape) + (np.prod(b.shape) if b is not None else 0) 
            for w, b in value_params  
        )
        
        print(f"   - 策略网络参数: {total_policy_params:,}")
        print(f"   - 价值网络参数: {total_value_params:,}")
        print(f"   - 总参数量: {total_policy_params + total_value_params:,}")
    
    print("\n🚀 网络架构模块已完成，准备实现轨迹缓冲区模块!")
