{
 "cells": [
  {
   "cell_type": "markdown",
   "id": "771955b1",
   "metadata": {},
   "source": [
    "# 基础神经网络组件"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "id": "3fd6f044",
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "from typing import List, Optional, Tuple, Union\n",
    "from abc import ABC, abstractmethod"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "1e916c6e",
   "metadata": {},
   "source": [
    "## 激活函数集合\n",
    "### 1. ReLU激活函数\n",
    "会将小于等于 0 的数转换为 0， 大于 0 的数等于数本身\n",
    "### 2. ReLU导数\n",
    "把一个任意数组成的数组转换成 0 和 1 组成的浮点数数组\n",
    "### 3. Tanh激活函数\n",
    "![alt text](<../pics/tanh.webp>)\n",
    "将输入的值归一化到 -1 和 1 之间\n",
    "### 4. Tanh导数\n",
    "### 5. Softmax激活函数\n",
    "将输入的数组归一化到 0 和 1 之间，且值越大，概率越大，所有概率的和为 1\n",
    "优化后的方案是将 数组中的每个元素减去数组中的最大值，然后取e的幂级数，最后除以所有幂级数的和\n",
    "### 6. LogSoftmax\n",
    "对 Softmax 函数的输出取对数\n"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "18ea9997",
   "metadata": {},
   "source": [
    "# 激活函数集合"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "id": "bb083b4c",
   "metadata": {},
   "outputs": [],
   "source": [
    "class ActivationFunctions:\n",
    "    \"\"\" 激活函数集合 \"\"\"\n",
    "    \n",
    "    @staticmethod\n",
    "    def relu(x: np.ndarray) -> np.ndarray:\n",
    "        return np.maximum(0, x)\n",
    "    \n",
    "    @staticmethod\n",
    "    def relu_derivative(x: np.ndarray) -> np.ndarray:\n",
    "        return (x > 0).astype(np.float32)\n",
    "    \n",
    "    @staticmethod\n",
    "    def tanh(x: np.ndarray) -> np.ndarray:\n",
    "        return np.tanh(x)\n",
    "    \n",
    "    @staticmethod\n",
    "    def tanh_derivative(x: np.ndarray) -> np.ndarray:\n",
    "        return 1 - np.tanh(x) ** 2\n",
    "    \n",
    "    @staticmethod\n",
    "    def softmax(x: np.ndarray, axis: int = -1) -> np.ndarray:\n",
    "        x_shifted = x - np.max(x ,axis=axis, keepdims=True)\n",
    "        exp_x = np.exp(x_shifted)\n",
    "        return exp_x / np.sum(exp_x, axis=axis, keepdims=True)\n",
    "    \n",
    "    @staticmethod\n",
    "    def log_softmax(x: np.ndarray, axis: int = -1) -> np.ndarray:\n",
    "        x_shifted = x - np.max(x ,axis=axis, keepdims=True)\n",
    "        return x_shifted - np.log(np.sum(np.exp(x_shifted), axis=axis, keepdims=True))"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "c4da5124",
   "metadata": {},
   "source": [
    "# 全连接层"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "id": "7aad6c41",
   "metadata": {},
   "outputs": [],
   "source": [
    "class LinearLayer:\n",
    "    \n",
    "    def __init__(self, input_dim: int, output_dim: int,\n",
    "                 activation: str = 'relu', use_bias: bool = True):\n",
    "        \n",
    "        \"\"\"\n",
    "        初始化全连接层\n",
    "      \n",
    "        Args:\n",
    "            input_dim: 输入维度\n",
    "            output_dim: 输出维度\n",
    "            activation: 激活函数类型 ('relu', 'tanh', 'linear', 'softmax')\n",
    "            use_bias: 是否使用偏置\n",
    "        \"\"\"\n",
    "        \n",
    "        self.input_dim = input_dim\n",
    "        self.output_dim = output_dim\n",
    "        self.activation_type =  activation\n",
    "        self.use_bias = use_bias\n",
    "        \n",
    "        # Xaviar 初始化权重\n",
    "        self.weight = np.random.randn(input_dim, output_dim) * np.sqrt(2.0 / input_dim)\n",
    "        \n",
    "        if use_bias:\n",
    "            self.bias = np.zeros((1, output_dim))\n",
    "        else:\n",
    "            self.bias = None\n",
    "            \n",
    "        # 缓存前向传播的中间结果用于反向传播\n",
    "        self.last_input = None\n",
    "        self.last_linear_output = None\n",
    "        self.last_output = None\n",
    "        \n",
    "        # 梯度缓存\n",
    "        self.weight_gradients = None\n",
    "        self.bias_gradients = None\n",
    "        \n",
    "    def forward(self, x: np.ndarray) -> np.ndarray:\n",
    "        \"\"\"\n",
    "        前向传播\n",
    "      \n",
    "        Args:\n",
    "            x: 输入数据 shape: (batch_size, input_dim)\n",
    "          \n",
    "        Returns:\n",
    "            输出数据 shape: (batch_size, output_dim)\n",
    "        \"\"\"\n",
    "        \n",
    "        # 缓存输入用于反向传播\n",
    "        self.last_input = x.copy()\n",
    "        \n",
    "        # 线性变换\n",
    "        linear_output = np.dot(x, self.weight)\n",
    "        if self.use_bias:\n",
    "            linear_output += self.bias\n",
    "            \n",
    "        self.last_linear_output = linear_output.copy()\n",
    "        \n",
    "        # 应用激活函数\n",
    "        if self.activation_type == 'relu':\n",
    "            output = ActivationFunctions.relu(linear_output)\n",
    "        elif self.activation_type == 'tanh':\n",
    "            output = ActivationFunctions.tanh(linear_output)\n",
    "        elif self.activation_type == 'softmax':\n",
    "            output = ActivationFunctions.softmax(linear_output)\n",
    "        elif self.activation_type == 'linear':\n",
    "            output = linear_output\n",
    "        else:\n",
    "            raise ValueError(f\"Unsupported activation: {self.activation_type}\")\n",
    "        \n",
    "        self.last_output = output.copy()\n",
    "        return output\n",
    "    \n",
    "    def backward(self, grad_output: np.ndarray) -> np.ndarray:\n",
    "        \"\"\"\n",
    "        反向传播\n",
    "      \n",
    "        Args:\n",
    "            grad_output: 来自上一层的梯度 shape: (batch_size, output_dim)\n",
    "          \n",
    "        Returns:\n",
    "            传递给下一层的梯度 shape: (batch_size, input_dim)\n",
    "        \"\"\"\n",
    "        \n",
    "        batch_size = grad_output.shape[0]\n",
    "        \n",
    "        # 计算激活函数的梯度\n",
    "        if self.activation_type == \"relu\":\n",
    "            activation_grad = ActivationFunctions.relu_derivative(\n",
    "                self.last_linear_output\n",
    "            )\n",
    "            grad_linear = grad_output * activation_grad\n",
    "        elif self.activation_type == \"tanh\":\n",
    "            activation_grad = ActivationFunctions.tanh_derivative(\n",
    "                self.last_linear_output\n",
    "            )\n",
    "            grad_linear = grad_output * activation_grad\n",
    "        elif self.activation_type == \"softmax\":\n",
    "            grad_linear = self._softmax_backward(grad_output)\n",
    "        elif self.activation_type == \"linear\":\n",
    "            grad_linear = grad_output\n",
    "        else:\n",
    "            raise ValueError(f\"Unsupported activation: {self.activation_type}\")\n",
    "        \n",
    "        # 计算权重梯度\n",
    "        self.weight_gradients = np.dot(self.last_input.T, grad_linear) / batch_size\n",
    "        \n",
    "        # 计算偏执梯度\n",
    "        if self.use_bias:\n",
    "            self.bias_gradients = np.mean(grad_linear, axis=0, keepdims=True)\n",
    "            \n",
    "        # 计算传递给下一层的梯度\n",
    "        grad_input = np.dot(grad_linear, self.weight.T)\n",
    "        \n",
    "        return grad_input\n",
    "    \n",
    "    def _softmax_backward(self, grad_output: np.ndarray) -> np.ndarray:\n",
    "        \"\"\"\n",
    "        Softmax反向传播的正确实现\n",
    "        \n",
    "        ✅ 修正：使用正确的softmax梯度计算\n",
    "        对于softmax输出s_i，梯度为：s_i * (δ_ij - s_j) * grad_j\n",
    "        \"\"\"\n",
    "        softmax_output = self.last_output  # shape: (batch_size, output_dim)\n",
    "        batch_size, output_dim = softmax_output.shape\n",
    "        \n",
    "        grad_linear = np.zeros_like(grad_output)\n",
    "        \n",
    "        for b in range(batch_size):\n",
    "            s = softmax_output[b]  # shape: (output_dim,)\n",
    "            g = grad_output[b]     # shape: (output_dim,)\n",
    "            \n",
    "            # 计算雅可比矩阵 J_ij = s_i * (δ_ij - s_j)\n",
    "            jacobian = np.diag(s) - np.outer(s, s)\n",
    "            grad_linear[b] = np.dot(jacobian, g)\n",
    "            \n",
    "        return grad_linear\n",
    "    \n",
    "    def get_parameters(self) -> Tuple[np.ndarray, Optional[np.ndarray]]:\n",
    "        return self.weight, self.bias\n",
    "\n",
    "    def set_parameters(self, weight: np.ndarray, bias: Optional[np.ndarray] = None):\n",
    "        self.weight = weight.copy()\n",
    "        if bias is not None and self.use_bias:\n",
    "            self.bias = bias.copy()\n"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "36b6a0f4",
   "metadata": {},
   "source": [
    "## 测试激活函数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "id": "e49fd3cc",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "=== 测试激活函数 ===\n",
      "输入: [[-1  0  1  2]\n",
      " [-2 -1  0  1]]\n",
      "ReLU: [[0 0 1 2]\n",
      " [0 0 0 1]]\n",
      "Tanh: [[-0.76159416  0.          0.76159416  0.96402758]\n",
      " [-0.96402758 -0.76159416  0.          0.76159416]]\n",
      "Softmax: [[0.0320586  0.08714432 0.23688282 0.64391426]\n",
      " [0.0320586  0.08714432 0.23688282 0.64391426]]\n",
      "\n",
      "=== 测试全连接层 ===\n",
      "前向传播输出形状: (2, 3)\n",
      "前向传播输出: [[0.86572312 0.         0.68808047]\n",
      " [2.34940453 0.         1.41317139]]\n",
      "反向传播梯度形状: (2, 4)\n",
      "权重梯度形状: (4, 3)\n",
      "偏置梯度形状: (1, 3)\n"
     ]
    }
   ],
   "source": [
    "# 测试激活函数\n",
    "print(\"=== 测试激活函数 ===\")\n",
    "x = np.array([[-1, 0, 1, 2], [-2, -1, 0, 1]])\n",
    "print(\"输入:\", x)\n",
    "print(\"ReLU:\", ActivationFunctions.relu(x))\n",
    "print(\"Tanh:\", ActivationFunctions.tanh(x))\n",
    "print(\"Softmax:\", ActivationFunctions.softmax(x))\n",
    "\n",
    "# 测试全连接层\n",
    "print(\"\\n=== 测试全连接层 ===\")\n",
    "layer = LinearLayer(input_dim=4, output_dim=3, activation='relu')\n",
    "\n",
    "# 前向传播\n",
    "output = layer.forward(x)\n",
    "print(\"前向传播输出形状:\", output.shape)\n",
    "print(\"前向传播输出:\", output)\n",
    "\n",
    "# 反向传播\n",
    "grad_output = np.random.randn(2, 3)\n",
    "grad_input = layer.backward(grad_output)\n",
    "print(\"反向传播梯度形状:\", grad_input.shape)\n",
    "print(\"权重梯度形状:\", layer.weight_gradients.shape)\n",
    "print(\"偏置梯度形状:\", layer.bias_gradients.shape if layer.bias_gradients is not None else None)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "84a67605",
   "metadata": {},
   "source": [
    "# 多层神经网络基类"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "id": "56965ee6",
   "metadata": {},
   "outputs": [],
   "source": [
    "class NeuralNetwork:\n",
    "    \"\"\" 多层神经网络基类 \"\"\"\n",
    "    \n",
    "    def __init__(self, layer_sizes: List[int], activations: List[str],\n",
    "                 use_bias: bool = True):\n",
    "        \"\"\"\n",
    "        初始化多层神经网络\n",
    "        \n",
    "        Args:\n",
    "            layer_sizes: 每层的神经元数量 [input_dim, hidden1, hidden2, ..., output_dim]\n",
    "            activations: 每层的激活函数 ['relu', 'tanh', 'linear', 'softmax']\n",
    "            use_bias: 是否使用偏置\n",
    "        \"\"\"\n",
    "        assert len(layer_sizes) >= 2, \"至少需要输入层和输出层\"\n",
    "        assert len(activations) == len(layer_sizes) - 1, \"激活函数数量应该等于层数 - 1\"\n",
    "        \n",
    "        self.layer_sizes = layer_sizes\n",
    "        self.activations = activations\n",
    "        self.layers = []\n",
    "        \n",
    "        # 构建各层\n",
    "        for i in range(len(layer_sizes) - 1):\n",
    "            layer = LinearLayer(\n",
    "                input_dim=layer_sizes[i],\n",
    "                output_dim=layer_sizes[i +1],\n",
    "                activation=activations[i],\n",
    "                use_bias=use_bias\n",
    "            )\n",
    "            self.layers.append(layer)\n",
    "            \n",
    "    def forward(self, x: np.ndarray) -> np.ndarray:\n",
    "        output = x\n",
    "        for layer in self.layers:\n",
    "            output = layer.forward(output)\n",
    "        return output\n",
    "        \n",
    "    def backward(self, grad_output: np.ndarray) -> np.ndarray:\n",
    "        grad = grad_output\n",
    "        for layer in reversed(self.layers):\n",
    "            grad = layer.backward(grad)\n",
    "        return grad\n",
    "     \n",
    "    def get_all_parameters(self) -> List[Tuple[np.ndarray, Optional[np.ndarray]]]:\n",
    "        return [layer.get_parameters() for layer in self.layers]\n",
    "    \n",
    "    def set_all_parameters(self, parameters: List[Tuple[np.ndarray, Optional[np.ndarray]]]):\n",
    "        assert len(parameters) == len(self.layers), \"参数数量与层数不匹配\"\n",
    "        for layer, (weight, bias) in zip(self.layers, parameters):\n",
    "            layer.set_parameters(weight, bias)\n",
    "            \n",
    "    # 简洁写法\n",
    "    # def get_gradients(self) -> List[Tuple[np.ndarray, Optional[np.ndarray]]]:\n",
    "    #     return [(layer.weight_gradients, layer.bias_gradients) for layer in self.layers]\n",
    "    \n",
    "    # 适合调试的写法\n",
    "    def get_gradients(self) -> List[Tuple[np.ndarray, Optional[np.ndarray]]]:\n",
    "        \"\"\"获取所有层的梯度\"\"\"\n",
    "        gradients = []\n",
    "        for layer in self.layers:\n",
    "            weight_grad = layer.weight_gradients\n",
    "            bias_grad = layer.bias_gradients\n",
    "            gradients.append((weight_grad, bias_grad))\n",
    "        return gradients"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "c3bb775b",
   "metadata": {},
   "source": [
    "# Actor 策略网络"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "id": "fe39a200",
   "metadata": {},
   "outputs": [],
   "source": [
    "class PolicyNetwork(NeuralNetwork):\n",
    "    \"\"\"策略网络 Actor\"\"\"\n",
    "\n",
    "    def __init__(\n",
    "        self,\n",
    "        state_dim: int,\n",
    "        action_dim: int,\n",
    "        hidden_sizes: List[int] = [64, 64],\n",
    "        action_type: str = \"discrete\",\n",
    "    ):\n",
    "        \"\"\"\n",
    "        初始化策略网络\n",
    "\n",
    "        Args:\n",
    "            state_dim: 状态维度\n",
    "            action_dim: 动作维度\n",
    "            hidden_sizes: 隐藏层大小列表\n",
    "            action_type: 动作类型 ('discrete' 或 'continuous')\n",
    "        \"\"\"\n",
    "\n",
    "        self.state_dim = state_dim\n",
    "        self.action_dim = action_dim\n",
    "        self.action_type = action_type\n",
    "\n",
    "        # 构建网络结构\n",
    "        layer_sizes = [state_dim] + hidden_sizes + [action_dim]\n",
    "\n",
    "        if action_type == \"discrete\":\n",
    "            # 离散动作空间，最后一层使用 softmax\n",
    "            activations = [\"relu\"] * (len(hidden_sizes)) + [\"softmax\"]\n",
    "        elif action_type == \"continuous\":\n",
    "            # 连续动作空间，输出动作均值，假设标准差为固定值或另外学习\n",
    "            activations = [\"relu\"] * (len(hidden_sizes)) + [\"tanh\"]\n",
    "        else:\n",
    "            # ✅ 修正：字符串格式和变量名错误\n",
    "            raise ValueError(f\"action_type must be 'discrete' or 'continuous'\")\n",
    "\n",
    "        super().__init__(layer_sizes, activations)\n",
    "\n",
    "        # 连续动作空间的额外参数\n",
    "        if action_type == \"continuous\":\n",
    "            # ✅ 改进：可学习的 log 标准差，初始化为小的负值\n",
    "            self.log_std = np.full((1, action_dim), -0.5)\n",
    "            # 添加log_std的梯度缓存\n",
    "            self.log_std_gradients = None\n",
    "\n",
    "    def get_action_distribution(\n",
    "        self, states: np.ndarray\n",
    "    ) -> Tuple[np.ndarray, Optional[np.ndarray]]:\n",
    "        \"\"\"\n",
    "        获取动作分布\n",
    "\n",
    "        Args:\n",
    "            states: 状态 shape: (batch_size, state_dim)\n",
    "\n",
    "        Returns:\n",
    "            如果是离散动作：(action_probs, None)\n",
    "            如果是连续动作：(action_means, action_stds)\n",
    "        \"\"\"\n",
    "        if self.action_type == \"discrete\":\n",
    "            action_probs = self.forward(states)\n",
    "            return action_probs, None\n",
    "        else:\n",
    "            action_means = self.forward(states)\n",
    "            action_stds = np.exp(self.log_std)  # 保证标准差为正\n",
    "            return action_means, action_stds\n",
    "\n",
    "    def sample_action(self, states: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:\n",
    "        \"\"\"\n",
    "        采样动作\n",
    "\n",
    "        Args:\n",
    "            states: 状态 shape: (batch_size, state_dim)\n",
    "\n",
    "        Returns:\n",
    "            actions: 采样的动作\n",
    "            log_probs: 动作的对数概率\n",
    "        \"\"\"\n",
    "\n",
    "        if self.action_type == \"discrete\":\n",
    "            action_probs, _ = self.get_action_distribution(states)\n",
    "\n",
    "            # 采样动作 (使用 Numpy 的随机采样)\n",
    "            batch_size = states.shape[0]\n",
    "            actions = np.array(\n",
    "                [\n",
    "                    np.random.choice(self.action_dim, p=action_probs[i])\n",
    "                    for i in range(batch_size)\n",
    "                ]\n",
    "            )\n",
    "\n",
    "            # 计算对数概率，添加数值稳定性\n",
    "            log_probs = np.log(action_probs[np.arange(batch_size), actions] + 1e-8)\n",
    "\n",
    "            return actions, log_probs\n",
    "        else:\n",
    "            action_means, action_stds = self.get_action_distribution(states)\n",
    "\n",
    "            # 从正态分布采样\n",
    "            noise = np.random.normal(0, 1, action_means.shape)\n",
    "            actions = action_means + action_stds * noise\n",
    "\n",
    "            # 计算对数概率\n",
    "            log_probs = self._compute_log_prob_continuous(\n",
    "                actions, action_means, action_stds  # ✅ 修正：actions_means -> action_means\n",
    "            )\n",
    "\n",
    "            return actions, log_probs\n",
    "\n",
    "    def compute_log_prob(self, states: np.ndarray, actions: np.ndarray) -> np.ndarray:  # ✅ 修正：acitons -> actions\n",
    "        \"\"\"\n",
    "        计算给定状态和动作的对数概率\n",
    "\n",
    "        Args:\n",
    "            states: 状态\n",
    "            actions: 动作\n",
    "\n",
    "        Returns:\n",
    "            对数概率\n",
    "        \"\"\"\n",
    "\n",
    "        if self.action_type == \"discrete\":\n",
    "            action_probs, _ = self.get_action_distribution(states)\n",
    "            actions = actions.astype(int)\n",
    "            log_probs = np.log(action_probs[np.arange(len(actions)), actions] + 1e-8)\n",
    "            return log_probs\n",
    "        else:\n",
    "            action_means, action_stds = self.get_action_distribution(states)\n",
    "            return self._compute_log_prob_continuous(actions, action_means, action_stds)\n",
    "\n",
    "    def _compute_log_prob_continuous(\n",
    "        self, actions: np.ndarray, means: np.ndarray, stds: np.ndarray\n",
    "    ) -> np.ndarray:\n",
    "        \"\"\"计算连续动作的对数概率\"\"\"\n",
    "        # 多变量正态分布的对数概率密度\n",
    "        action_dim = actions.shape[-1]\n",
    "        log_prob = -0.5 * np.sum(((actions - means) / (stds + 1e-8)) ** 2, axis=-1)  # ✅ 添加数值稳定性\n",
    "        log_prob -= 0.5 * action_dim * np.log(2 * np.pi)\n",
    "        log_prob -= np.sum(np.log(stds + 1e-8), axis=-1)  # ✅ 添加数值稳定性\n",
    "        return log_prob\n",
    "\n",
    "    # ✅ 新增：包含log_std参数的参数管理方法\n",
    "    def get_all_parameters(self) -> List[Tuple[np.ndarray, Optional[np.ndarray]]]:\n",
    "        \"\"\"获取所有参数，包括log_std\"\"\"\n",
    "        params = super().get_all_parameters()\n",
    "        if self.action_type == \"continuous\":\n",
    "            params.append((self.log_std, None))  # log_std作为额外参数\n",
    "        return params\n",
    "\n",
    "    def set_all_parameters(\n",
    "        self, parameters: List[Tuple[np.ndarray, Optional[np.ndarray]]]\n",
    "    ):\n",
    "        \"\"\"设置所有参数，包括log_std\"\"\"\n",
    "        if self.action_type == \"continuous\":\n",
    "            # 最后一个参数是log_std\n",
    "            log_std_param = parameters[-1]\n",
    "            self.log_std = log_std_param[0].copy()\n",
    "            # 设置网络层参数\n",
    "            super().set_all_parameters(parameters[:-1])\n",
    "        else:\n",
    "            super().set_all_parameters(parameters)\n",
    "\n",
    "    def get_gradients(self) -> List[Tuple[np.ndarray, Optional[np.ndarray]]]:\n",
    "        \"\"\"获取所有梯度，包括log_std的梯度\"\"\"\n",
    "        gradients = super().get_gradients()\n",
    "        if self.action_type == \"continuous\":\n",
    "            gradients.append((self.log_std_gradients, None))\n",
    "        return gradients\n",
    "\n",
    "    def update_log_std_gradients(self, grad_log_std: np.ndarray):\n",
    "        \"\"\"更新log_std的梯度\"\"\"\n",
    "        if self.action_type == \"continuous\":\n",
    "            self.log_std_gradients = grad_log_std"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "efebe3d9",
   "metadata": {},
   "source": [
    "# Critic价值网络"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "id": "dcf49c24",
   "metadata": {},
   "outputs": [],
   "source": [
    "class ValueNetwork(NeuralNetwork):\n",
    "    \"\"\"价值网络 (Critic)\"\"\"\n",
    "    \n",
    "    def __init__(self, state_dim: int, hidden_sizes: List[int] = [64, 64]):\n",
    "        \"\"\"\n",
    "        初始化价值网络\n",
    "        \n",
    "        Args:\n",
    "            state_dim: 状态维度\n",
    "            hidden_sizes: 隐藏层大小列表\n",
    "        \"\"\"\n",
    "        self.state_dim = state_dim\n",
    "        \n",
    "        # 构建网络结构：输出标量价值\n",
    "        layer_sizes = [state_dim] + hidden_sizes + [1]\n",
    "        activations = ['relu'] * len(hidden_sizes) + ['linear']\n",
    "        \n",
    "        super().__init__(layer_sizes, activations)\n",
    "        \n",
    "        \n",
    "    def estimate_value(self, states: np.ndarray) -> np.ndarray:\n",
    "        \"\"\"\n",
    "        估计状态价值\n",
    "        \n",
    "        Args:\n",
    "            states: 状态 shape: (batch_size, state_dim)\n",
    "            \n",
    "        Returns:\n",
    "            values: 状态价值 shape: (batch_size,)\n",
    "        \"\"\"\n",
    "        values = self.forward(states)\n",
    "        return values.squeeze(-1)  # 去掉最后一个维度"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "96fcd19f",
   "metadata": {},
   "source": [
    "# Actor-Critic组合网络"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "id": "838bb466",
   "metadata": {},
   "outputs": [],
   "source": [
    "class ActorCritic:\n",
    "    \"\"\"Actor-Critic组合网络\"\"\"\n",
    "\n",
    "    def __init__(\n",
    "        self,\n",
    "        state_dim: int,\n",
    "        action_dim: int,\n",
    "        hidden_sizes: List[int] = [64, 64],\n",
    "        action_type: str = \"discrete\",\n",
    "        shared_network: bool = True,  # ✅ 改进：默认使用独立网络，更稳定\n",
    "    ):\n",
    "        \"\"\"\n",
    "        初始化Actor-Critic网络\n",
    "\n",
    "        Args:\n",
    "            state_dim: 状态维度\n",
    "            action_dim: 动作维度\n",
    "            hidden_sizes: 隐藏层大小\n",
    "            action_type: 动作类型\n",
    "            shared_network: 是否共享底层网络\n",
    "        \"\"\"\n",
    "\n",
    "        self.state_dim = state_dim\n",
    "        self.action_dim = action_dim\n",
    "        self.action_type = action_type\n",
    "        self.shared_network = shared_network\n",
    "\n",
    "        if shared_network:\n",
    "            # 共享底层特征提取网络\n",
    "            if len(hidden_sizes) < 2:\n",
    "                raise ValueError(\"共享网络模式至少需要2个隐藏层\")\n",
    "                \n",
    "            self.shared_layers = NeuralNetwork(\n",
    "                layer_sizes=[state_dim] + hidden_sizes[:-1],\n",
    "                activations=[\"relu\"] * (len(hidden_sizes) - 1),\n",
    "            )\n",
    "\n",
    "            # 分别的输出头\n",
    "            final_hidden = hidden_sizes[-1]\n",
    "            if action_type == \"discrete\":\n",
    "                self.policy_head = LinearLayer(final_hidden, action_dim, \"softmax\")\n",
    "            else:\n",
    "                self.policy_head = LinearLayer(final_hidden, action_dim, \"tanh\")\n",
    "                # ✅ 新增：连续动作的log_std参数\n",
    "                self.log_std = np.full((1, action_dim), -0.5)\n",
    "                self.log_std_gradients = None\n",
    "\n",
    "            self.value_head = LinearLayer(final_hidden, 1, \"linear\")\n",
    "        else:\n",
    "            # 独立的网络\n",
    "            self.policy_network = PolicyNetwork(\n",
    "                state_dim, action_dim, hidden_sizes, action_type\n",
    "            )\n",
    "            self.value_network = ValueNetwork(state_dim, hidden_sizes)\n",
    "\n",
    "    def forward(self, states: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:\n",
    "        \"\"\"\n",
    "        前向传播同时获取策略和价值\n",
    "\n",
    "        Returns:\n",
    "            policy_output: 策略网络输出\n",
    "            values: 价值估计\n",
    "        \"\"\"\n",
    "\n",
    "        if self.shared_network:\n",
    "            shared_features = self.shared_layers.forward(states)\n",
    "            policy_output = self.policy_head.forward(shared_features)\n",
    "            values = self.value_head.forward(shared_features).squeeze(-1)\n",
    "        else:\n",
    "            # 使用独立网络\n",
    "            policy_output, _ = self.policy_network.get_action_distribution(states)\n",
    "            values = self.value_network.estimate_value(states)\n",
    "        return policy_output, values\n",
    "\n",
    "    def get_action_distribution(\n",
    "        self, states: np.ndarray\n",
    "    ) -> Tuple[np.ndarray, Optional[np.ndarray]]:\n",
    "        \"\"\"获取动作分布\"\"\"\n",
    "        if self.shared_network:\n",
    "            if self.action_type == \"discrete\":\n",
    "                policy_output, _ = self.forward(states)\n",
    "                return policy_output, None\n",
    "            else:\n",
    "                policy_output, _ = self.forward(states)\n",
    "                action_stds = np.exp(self.log_std)\n",
    "                return policy_output, action_stds\n",
    "        else:\n",
    "            return self.policy_network.get_action_distribution(states)\n",
    "\n",
    "    def sample_action(self, states: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:\n",
    "        \"\"\"采样动作\"\"\"\n",
    "        if self.shared_network:\n",
    "            if self.action_type == \"discrete\":\n",
    "                action_probs, _ = self.get_action_distribution(states)\n",
    "                batch_size = states.shape[0]\n",
    "                actions = np.array([\n",
    "                    np.random.choice(self.action_dim, p=action_probs[i])\n",
    "                    for i in range(batch_size)\n",
    "                ])\n",
    "                log_probs = np.log(action_probs[np.arange(batch_size), actions] + 1e-8)\n",
    "                return actions, log_probs\n",
    "            else:\n",
    "                action_means, action_stds = self.get_action_distribution(states)\n",
    "                noise = np.random.normal(0, 1, action_means.shape)\n",
    "                actions = action_means + action_stds * noise\n",
    "                log_probs = self._compute_log_prob_continuous(actions, action_means, action_stds)\n",
    "                return actions, log_probs\n",
    "        else:\n",
    "            return self.policy_network.sample_action(states)\n",
    "\n",
    "    def compute_log_prob(self, states: np.ndarray, actions: np.ndarray) -> np.ndarray:\n",
    "        \"\"\"计算动作的对数概率\"\"\"\n",
    "        if self.shared_network:\n",
    "            if self.action_type == \"discrete\":\n",
    "                action_probs, _ = self.get_action_distribution(states)\n",
    "                actions = actions.astype(int)\n",
    "                log_probs = np.log(action_probs[np.arange(len(actions)), actions] + 1e-8)\n",
    "                return log_probs\n",
    "            else:\n",
    "                action_means, action_stds = self.get_action_distribution(states)\n",
    "                return self._compute_log_prob_continuous(actions, action_means, action_stds)\n",
    "        else:\n",
    "            return self.policy_network.compute_log_prob(states, actions)\n",
    "\n",
    "    def _compute_log_prob_continuous(\n",
    "        self, actions: np.ndarray, means: np.ndarray, stds: np.ndarray\n",
    "    ) -> np.ndarray:\n",
    "        \"\"\"计算连续动作的对数概率\"\"\"\n",
    "        action_dim = actions.shape[-1]\n",
    "        log_prob = -0.5 * np.sum(((actions - means) / (stds + 1e-8)) ** 2, axis=-1)\n",
    "        log_prob -= 0.5 * action_dim * np.log(2 * np.pi)\n",
    "        log_prob -= np.sum(np.log(stds + 1e-8), axis=-1)\n",
    "        return log_prob\n",
    "\n",
    "    def get_all_parameters(self):\n",
    "        \"\"\"获取所有参数\"\"\"\n",
    "        if self.shared_network:\n",
    "            params = []\n",
    "            params.extend(self.shared_layers.get_all_parameters())\n",
    "            params.append(self.policy_head.get_parameters())\n",
    "            params.append(self.value_head.get_parameters())\n",
    "            if self.action_type == \"continuous\":\n",
    "                params.append((self.log_std, None))\n",
    "            return params\n",
    "        else:\n",
    "            policy_params = self.policy_network.get_all_parameters()\n",
    "            value_params = self.value_network.get_all_parameters()\n",
    "            return {\"policy\": policy_params, \"value\": value_params}\n",
    "\n",
    "    def set_all_parameters(self, parameters):\n",
    "        \"\"\"设置所有参数\"\"\"\n",
    "        if self.shared_network:\n",
    "            if self.action_type == \"continuous\":\n",
    "                # 最后一个参数是log_std\n",
    "                log_std_param = parameters[-1]\n",
    "                self.log_std = log_std_param[0].copy()\n",
    "                remaining_params = parameters[:-1]\n",
    "            else:\n",
    "                remaining_params = parameters\n",
    "            \n",
    "            # 分配参数给各个组件\n",
    "            shared_param_count = len(self.shared_layers.layers)\n",
    "            self.shared_layers.set_all_parameters(remaining_params[:shared_param_count])\n",
    "            self.policy_head.set_parameters(*remaining_params[shared_param_count])\n",
    "            self.value_head.set_parameters(*remaining_params[shared_param_count + 1])\n",
    "        else:\n",
    "            self.policy_network.set_all_parameters(parameters[\"policy\"])\n",
    "            self.value_network.set_all_parameters(parameters[\"value\"])\n",
    "\n",
    "    def get_gradients(self):\n",
    "        \"\"\"获取所有梯度\"\"\"\n",
    "        if self.shared_network:\n",
    "            gradients = []\n",
    "            gradients.extend(self.shared_layers.get_gradients())\n",
    "            gradients.append(self.policy_head.get_parameters())  # 这里需要改为梯度\n",
    "            gradients.append(self.value_head.get_parameters())   # 这里需要改为梯度\n",
    "            if self.action_type == \"continuous\":\n",
    "                gradients.append((self.log_std_gradients, None))\n",
    "            return gradients\n",
    "        else:\n",
    "            policy_grads = self.policy_network.get_gradients()\n",
    "            value_grads = self.value_network.get_gradients()\n",
    "            return {\"policy\": policy_grads, \"value\": value_grads}\n",
    "            "
   ]
  },
  {
   "cell_type": "markdown",
   "id": "b8d8c8c6",
   "metadata": {},
   "source": [
    "## 测试 Actor-Critic 网络"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "id": "f22155ba",
   "metadata": {},
   "outputs": [],
   "source": [
    "def test_networks():\n",
    "    \"\"\"测试神经网络的基本功能\"\"\"\n",
    "    print(\"🧪 测试神经网络...\")\n",
    "    \n",
    "    # 测试参数\n",
    "    state_dim = 10\n",
    "    action_dim = 4\n",
    "    batch_size = 32\n",
    "    \n",
    "    # 创建测试数据\n",
    "    states = np.random.randn(batch_size, state_dim)\n",
    "    \n",
    "    # 测试策略网络 - 离散动作\n",
    "    print(\"✅ 测试离散动作策略网络...\")\n",
    "    policy_discrete = PolicyNetwork(state_dim, action_dim, [32, 32], \"discrete\")\n",
    "    actions_discrete, log_probs_discrete = policy_discrete.sample_action(states)\n",
    "    print(f\"离散动作形状: {actions_discrete.shape}, 对数概率形状: {log_probs_discrete.shape}\")\n",
    "    \n",
    "    # 测试策略网络 - 连续动作\n",
    "    print(\"✅ 测试连续动作策略网络...\")\n",
    "    policy_continuous = PolicyNetwork(state_dim, action_dim, [32, 32], \"continuous\")\n",
    "    actions_continuous, log_probs_continuous = policy_continuous.sample_action(states)\n",
    "    print(f\"连续动作形状: {actions_continuous.shape}, 对数概率形状: {log_probs_continuous.shape}\")\n",
    "    \n",
    "    # 测试价值网络\n",
    "    print(\"✅ 测试价值网络...\")\n",
    "    value_network = ValueNetwork(state_dim, [32, 32])\n",
    "    values = value_network.estimate_value(states)\n",
    "    print(f\"价值估计形状: {values.shape}\")\n",
    "    \n",
    "    # 测试Actor-Critic网络\n",
    "    print(\"✅ 测试Actor-Critic网络...\")\n",
    "    actor_critic = ActorCritic(state_dim, action_dim, [32, 32], \"continuous\", shared_network=False)\n",
    "    ac_actions, ac_log_probs = actor_critic.sample_action(states)\n",
    "    policy_output, ac_values = actor_critic.forward(states)\n",
    "    print(f\"AC动作形状: {ac_actions.shape}, AC价值形状: {ac_values.shape}\")\n",
    "    \n",
    "    # 测试前向和反向传播\n",
    "    print(\"✅ 测试前向和反向传播...\")\n",
    "    target_values = np.random.randn(batch_size)  # 模拟目标价值\n",
    "    value_loss_grad = 2 * (ac_values - target_values) / batch_size  # MSE损失的梯度\n",
    "    \n",
    "    # 反向传播\n",
    "    if hasattr(actor_critic, 'value_network'):\n",
    "        _ = actor_critic.value_network.backward(value_loss_grad.reshape(-1, 1))\n",
    "        gradients = actor_critic.value_network.get_gradients()\n",
    "        print(\"✅ 梯度计算成功\")\n",
    "    \n",
    "    print(\"🎉 所有测试通过！\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "id": "9463dd50",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "🧪 测试神经网络...\n",
      "✅ 测试离散动作策略网络...\n",
      "离散动作形状: (32,), 对数概率形状: (32,)\n",
      "✅ 测试连续动作策略网络...\n",
      "连续动作形状: (32, 4), 对数概率形状: (32,)\n",
      "✅ 测试价值网络...\n",
      "价值估计形状: (32,)\n",
      "✅ 测试Actor-Critic网络...\n",
      "AC动作形状: (32, 4), AC价值形状: (32,)\n",
      "✅ 测试前向和反向传播...\n",
      "✅ 梯度计算成功\n",
      "🎉 所有测试通过！\n",
      "\n",
      "🤖 创建人型机器人PPO网络示例...\n",
      "✅ 人型机器人网络创建成功!\n",
      "   - 状态维度: 376\n",
      "   - 动作维度: 22\n",
      "   - 网络结构: [512, 256, 128]\n",
      "   - 采样动作形状: (64, 22)\n",
      "   - 动作范围: [-2.535, 2.773]\n",
      "   - 价值估计形状: (64,)\n",
      "   - 价值范围: [-1.718, 1.763]\n",
      "   - 策略网络参数: 360,108\n",
      "   - 价值网络参数: 357,377\n",
      "   - 总参数量: 717,485\n",
      "\n",
      "🚀 网络架构模块已完成，准备实现轨迹缓冲区模块!\n"
     ]
    }
   ],
   "source": [
    "if __name__ == \"__main__\":\n",
    "    # 运行测试\n",
    "    test_networks()\n",
    "\n",
    "    # 示例：创建人型机器人的PPO网络\n",
    "    print(\"\\n🤖 创建人型机器人PPO网络示例...\")\n",
    "\n",
    "    # 人型机器人参数\n",
    "    HUMANOID_STATE_DIM = 376    # 人型机器人状态维度（位置、速度、角度等）\n",
    "    HUMANOID_ACTION_DIM = 22    # 22个关节的动作\n",
    "    HIDDEN_SIZES = [512, 256, 128]  # 更大的网络容量\n",
    "\n",
    "    # 创建Actor-Critic网络\n",
    "    humanoid_ac = ActorCritic(\n",
    "        state_dim=HUMANOID_STATE_DIM,\n",
    "        action_dim=HUMANOID_ACTION_DIM, \n",
    "        hidden_sizes=HIDDEN_SIZES,\n",
    "        action_type=\"continuous\",\n",
    "        shared_network=False  # 独立网络更稳定\n",
    "    )\n",
    "\n",
    "    # 模拟一个batch的状态\n",
    "    batch_states = np.random.randn(64, HUMANOID_STATE_DIM)\n",
    "\n",
    "    # 采样动作\n",
    "    sampled_actions, action_log_probs = humanoid_ac.sample_action(batch_states)\n",
    "\n",
    "    # 估计价值\n",
    "    _, estimated_values = humanoid_ac.forward(batch_states)\n",
    "\n",
    "    print(f\"✅ 人型机器人网络创建成功!\")\n",
    "    print(f\"   - 状态维度: {HUMANOID_STATE_DIM}\")\n",
    "    print(f\"   - 动作维度: {HUMANOID_ACTION_DIM}\")\n",
    "    print(f\"   - 网络结构: {HIDDEN_SIZES}\")\n",
    "    print(f\"   - 采样动作形状: {sampled_actions.shape}\")\n",
    "    print(f\"   - 动作范围: [{sampled_actions.min():.3f}, {sampled_actions.max():.3f}]\")\n",
    "    print(f\"   - 价值估计形状: {estimated_values.shape}\")\n",
    "    print(f\"   - 价值范围: [{estimated_values.min():.3f}, {estimated_values.max():.3f}]\")\n",
    "\n",
    "    # 参数统计\n",
    "    if isinstance(humanoid_ac.get_all_parameters(), dict):\n",
    "        policy_params = humanoid_ac.get_all_parameters()[\"policy\"]\n",
    "        value_params = humanoid_ac.get_all_parameters()[\"value\"]\n",
    "        \n",
    "        total_policy_params = sum(\n",
    "            np.prod(w.shape) + (np.prod(b.shape) if b is not None else 0) \n",
    "            for w, b in policy_params\n",
    "        )\n",
    "        total_value_params = sum(\n",
    "            np.prod(w.shape) + (np.prod(b.shape) if b is not None else 0) \n",
    "            for w, b in value_params  \n",
    "        )\n",
    "        \n",
    "        print(f\"   - 策略网络参数: {total_policy_params:,}\")\n",
    "        print(f\"   - 价值网络参数: {total_value_params:,}\")\n",
    "        print(f\"   - 总参数量: {total_policy_params + total_value_params:,}\")\n",
    "\n",
    "    print(\"\\n🚀 网络架构模块已完成，准备实现轨迹缓冲区模块!\")"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "mjwarp",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.16"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
