{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "最大化动作的熵,增强模型的稳定性\n",
    "\n",
    "Q(state,action) + alpha * 熵\\[Q(static,*)\\]\n",
    "\n",
    "训练过程中alpha应该递减.\n",
    "\n",
    "为了缓解自举,会用两个value模型评估Q函数,取其中小的值."
   ]
  },
  {
   "cell_type": "code",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-05T04:24:21.005469Z",
     "start_time": "2025-01-05T04:24:20.271488Z"
    }
   },
   "source": [
    "import gym\n",
    "\n",
    "\n",
    "#定义环境\n",
    "class MyWrapper(gym.Wrapper):\n",
    "\n",
    "    def __init__(self):\n",
    "        env = gym.make('CartPole-v1', render_mode='rgb_array')\n",
    "        super().__init__(env)\n",
    "        self.env = env\n",
    "        self.step_n = 0\n",
    "\n",
    "    def reset(self):\n",
    "        state, _ = self.env.reset()\n",
    "        self.step_n = 0\n",
    "        return state\n",
    "\n",
    "    def step(self, action):\n",
    "        state, reward, terminated, truncated, info = self.env.step(action)\n",
    "        over = terminated or truncated\n",
    "\n",
    "        #限制最大步数\n",
    "        self.step_n += 1\n",
    "        if self.step_n >= 200:\n",
    "            over = True\n",
    "\n",
    "        return state, reward, over\n",
    "\n",
    "    #打印游戏图像\n",
    "    def show(self):\n",
    "        from matplotlib import pyplot as plt\n",
    "        plt.figure(figsize=(3, 3))\n",
    "        plt.imshow(self.env.render())\n",
    "        plt.show()\n",
    "\n",
    "\n",
    "env = MyWrapper()\n",
    "\n",
    "env.reset()\n",
    "\n",
    "env.show()"
   ],
   "outputs": [
    {
     "data": {
      "text/plain": [
       "<Figure size 300x300 with 1 Axes>"
      ],
      "image/png": "iVBORw0KGgoAAAANSUhEUgAAASAAAADMCAYAAADTcn7NAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuNSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/xnp5ZAAAACXBIWXMAAA9hAAAPYQGoP6dpAAAUG0lEQVR4nO3dfWxT570H8K+d2E5CchxCGru5xAJp3WjGS7cAyVmnu2r1krXRNEakvQh1WYXgljqoNB1SI1EqeiulY9Jl6wphutOAfzqqVKITEaWKQhtuV5dA2kghQNRVTIkA2wWW4yQQO7F/9w+Us7oEFpPgx06+H+lI+Hke27/zGH9zXnxsi4gIiIgUsKougIjmLwYQESnDACIiZRhARKQMA4iIlGEAEZEyDCAiUoYBRETKMICISBkGEBEpoyyA9u7diyVLliAnJweVlZXo6upSVQoRKaIkgN566y00Njbi5ZdfxieffIJVq1ahpqYGoVBIRTlEpIhFxcWolZWVWLNmDd544w0AQDweR1lZGbZu3YoXX3wx1eUQkSLZqX7CaDSK7u5uNDU1mW1WqxVerxd+v3/K+0QiEUQiEfN2PB7H9evXsWjRIlgslvteMxElR0QwPDyM0tJSWK133tFKeQBdvXoVsVgMLpcrod3lcuHChQtT3qe5uRm7du1KRXlENIsGBwexePHiO/anPIDuRVNTExobG83bhmHA4/FgcHAQmqYprIyIphIOh1FWVoaCgoK7jkt5ABUXFyMrKwvBYDChPRgMwu12T3kfh8MBh8NxW7umaQwgojT27w6RpPwsmN1uR0VFBTo6Osy2eDyOjo4O6Lqe6nKISCElu2CNjY2or6/H6tWrsXbtWvzud7/D6Ogonn76aRXlEJEiSgLoZz/7Gb744gvs3LkTgUAAjzzyCI4fP37bgWkimtuUfA5opsLhMJxOJwzD4DEgojQ03fcorwUjImUYQESkDAOIiJRhABGRMgwgIlKGAUREyjCAiEgZBhARKcMAIiJlGEBEpAwDiIiUYQARkTIMICJShgFERMowgIhIGQYQESnDACIiZRhARKQMA4iIlGEAEZEyDCAiUoYBRETKMICISBkGEBEpwwAiImUYQESkDAOIiJRhABGRMkkH0MmTJ/GjH/0IpaWlsFgseOeddxL6RQQ7d+7Egw8+iNzcXHi9Xnz22WcJY65fv44NGzZA0zQUFhZi48aNGBkZmdGKEFHmSTqARkdHsWrVKuzdu3fK/t27d+P111/H/v37cerUKSxYsAA1NTUYGxszx2zYsAF9fX1ob29HW1sbTp48ic2bN9/7WhBRZpIZACBHjhwxb8fjcXG73fLb3/7WbBsaGhKHwyF/+ctfRETk3LlzAkBOnz5tjnn33XfFYrHIpUuXpvW8hmEIADEMYyblE9F9Mt336KweA7p48SICgQC8Xq/Z5nQ6UVlZCb/fDwDw+/0oLCzE6tWrzTFerxdWqxWnTp2a8nEjkQjC4XDCQkSZb1YDKBAIAABcLldCu8vlMvsCgQBKSkoS+rOzs1FUVGSO+arm5mY4nU5zKSsrm82yiUiRjDgL1tTUBMMwzGVwcFB1SUQ0C2Y1gNxuNwAgGAwmtAeDQbPP7XYjFAol9E9MTOD69evmmK9yOBzQNC1hIaLMN6sBtHTpUrjdbnR0dJht4XAYp06dgq7rAABd1zE0NITu7m5zzIkTJxCPx1FZWTmb5RBRmstO9g4jIyP4+9//bt6+ePEienp6UFRUBI/Hg23btuHVV1/FQw89hKVLl+Kll15CaWkp1q1bBwB4+OGH8cMf/hCbNm3C/v37MT4+joaGBvz85z9HaWnprK0YEWWAZE+vvf/++wLgtqW+vl5Ebp2Kf+mll8TlconD4ZDHH39c+vv7Ex7j2rVr8otf/ELy8/NF0zR5+umnZXh4eNZP8RGRGtN9j1pERBTm3z0Jh8NwOp0wDIPHg4jS0HTfoxlxFoyI5iYGEBEpwwAiImUYQESkDAOIiJRhABGRMgwgIlKGAUREyjCAiEgZBhARKcMAIiJlGEBEpAwDiIiUYQARkTIMICJShgFERMowgIhIGQYQESnDACIiZRhARKRM0j/LQ3Q/iAiGL/dj/IZhtuUsfBALij0Kq6L7jQFEaUJw+dNjGL50wWxxr6pmAM1x3AWj9CACxOOqq6AUYwBRWhCJQ4QBNN8wgCg9iEDiMdVVUIoxgCgtiAi3gOYhBhClB4nzGNA8xACitMAtoPkpqQBqbm7GmjVrUFBQgJKSEqxbtw79/f0JY8bGxuDz+bBo0SLk5+ejrq4OwWAwYczAwABqa2uRl5eHkpISbN++HRMTEzNfG8pYEptAbDyS0GbNdiiqhlIlqQDq7OyEz+fDxx9/jPb2doyPj6O6uhqjo6PmmOeffx5Hjx5Fa2srOjs7cfnyZaxfv97sj8ViqK2tRTQaxUcffYRDhw7h4MGD2Llz5+ytFWWcieiNhA8hwmJBTqFLXUGUGjIDoVBIAEhnZ6eIiAwNDYnNZpPW1lZzzPnz5wWA+P1+ERE5duyYWK1WCQQC5piWlhbRNE0ikci0ntcwDAEghmHMpHxKIzf+eUVO/++z0rV/063lj/8l1z7vVl0W3aPpvkdndAzIMG79xSoqKgIAdHd3Y3x8HF6v1xyzbNkyeDwe+P1+AIDf78eKFSvgcv3rr1tNTQ3C4TD6+vqmfJ5IJIJwOJyw0NxnzeIH9ee6ew6geDyObdu24dFHH8Xy5csBAIFAAHa7HYWFhQljXS4XAoGAOebL4TPZP9k3lebmZjidTnMpKyu717Ipg1isWapLoPvsngPI5/Ph7NmzOHz48GzWM6WmpiYYhmEug4OD9/05ST2LlVtAc909vcINDQ1oa2vDyZMnsXjxYrPd7XYjGo1iaGgoYSsoGAzC7XabY7q6uhIeb/Is2eSYr3I4HHA4eEZkPrEAsGRxC2iuS2oLSETQ0NCAI0eO4MSJE1i6dGlCf0VFBWw2Gzo6Osy2/v5+DAwMQNd1AICu6+jt7UUoFDLHtLe3Q9M0lJeXz2RdaC6xWLgLNg8ktQXk8/nw5ptv4q9//SsKCgrMYzZOpxO5ublwOp3YuHEjGhsbUVRUBE3TsHXrVui6jqqqKgBAdXU1ysvL8dRTT2H37t0IBALYsWMHfD4ft3IogZW7YHNeUq9wS0sLAOCxxx5LaD9w4AB+9atfAQD27NkDq9WKuro6RCIR1NTUYN++febYrKwstLW1YcuWLdB1HQsWLEB9fT1eeeWVma0JZbapPgVtsaS+Dkopi4iI6iKSFQ6H4XQ6YRgGNE1TXQ7NghtXB3HunWZI7NYn4i3WLJSv34G8Rf+huDK6F9N9j/JaMEoL8fgEkHF/CmmmGECUFvhdQPMTA4jSgsRi4CbQ/MMAorQgcX4bwnzEAKK0wF2w+YkBRGkhHhtHwglZi+XWx6FpTmMAUVoY++eVhM8C2fKcyM7JV1gRpQIDiNJC/Cu7YFZrNqy8FGPOYwBRerJaAQv/e851fIUpLVksVlh4KcacxwCitGSxcAtoPuArTOnJar0VQjSn8RWmtHRrC4i7YHMdA4iUm+oLGSzWLG4BzQN8hSk93BZCFm4BzQMMIEoLvBRjfmIAUVrgxajzEwOI0oBwC2ieYgCRegLEYwyg+YgBRGlAuAs2TzGAKC1wF2x+YgBRWmAAzU8MIFJO4jFEwl8ktDm0BxRVQ6nEACLlRASx6M2ENltugaJqKJX427eUEqOjoxgfH5+yLz4RRTye+MuokfEJGIZxx8crKChAVha/sCzTMYAoJX7961/j6NGjU/Y5bFnYs/m7cC9cYLa9sXcf3v7whSnH2+12HD9+HF//+tfvS62UOgwgSonr16/j0qVLU/Y5bFkYjdrRN/pd3Ihp8OScw7V/dt1xvN1uv+PWFGWWpI4BtbS0YOXKldA0DZqmQdd1vPvuu2b/2NgYfD4fFi1ahPz8fNTV1SEYDCY8xsDAAGpra5GXl4eSkhJs374dExP8DMh8JsjC2ZH/xODYw7g2vhi9I48heNOluixKgaQCaPHixXjttdfQ3d2NM2fO4Pvf/z5+/OMfo6+vDwDw/PPP4+jRo2htbUVnZycuX76M9evXm/ePxWKora1FNBrFRx99hEOHDuHgwYPYuXPn7K4VZRSBBSOxQkz+Ds+E2DEczVNaE6WIzNDChQvlT3/6kwwNDYnNZpPW1laz7/z58wJA/H6/iIgcO3ZMrFarBAIBc0xLS4tomiaRSGTaz2kYhgAQwzBmWj6lyE9/+lPBrd9evm1x2Gzymxd/I//9ql92vdol/9N8XLxVa+843m63y9mzZ1WvEt3FdN+j93wMKBaLobW1FaOjo9B1Hd3d3RgfH4fX6zXHLFu2DB6PB36/H1VVVfD7/VixYgVcrn9tXtfU1GDLli3o6+vDt771raRquHDhAvLz+dtRmSAcDt+xbyI2Af/fDmEs+2+4Gc9HiX0AFwc/u+N4EcHnn3/OL61PYyMjI9Mal3QA9fb2Qtd1jI2NIT8/H0eOHEF5eTl6enpgt9tRWFiYMN7lciEQCAAAAoFAQvhM9k/23UkkEkEkEjFvT/5nNgyDx48yRDQavWNfLC545//OATg37ccbHh7G0NDQzAuj+2J0dHRa45IOoG984xvo6emBYRh4++23UV9fj87OzqQLTEZzczN27dp1W3tlZSU0Tbuvz02zo7i4eNYey2Kx4JFHHsE3v/nNWXtMml132+L9sqQ/CW232/G1r30NFRUVaG5uxqpVq/D73/8ebrcb0Wj0tr9KwWAQbrcbAOB2u287KzZ5e3LMVJqammAYhrkMDg4mWzYRpaEZX4oRj8cRiURQUVEBm82Gjo4Os6+/vx8DAwPQdR0AoOs6ent7EQqFzDHt7e3QNA3l5eV3fA6Hw2Ge+p9ciCjzJbUL1tTUhCeeeAIejwfDw8N488038cEHH+C9996D0+nExo0b0djYiKKiImiahq1bt0LXdVRVVQEAqqurUV5ejqeeegq7d+9GIBDAjh074PP54HA47ssKElH6SiqAQqEQfvnLX+LKlStwOp1YuXIl3nvvPfzgBz8AAOzZswdWqxV1dXWIRCKoqanBvn37zPtnZWWhra0NW7Zsga7rWLBgAerr6/HKK6/M7lpR2snNzZ21LVe73Q6rlddRzwUWkSl+lCnNhcNhOJ1OGIbB3bEMcfXqVdy8efPfD5wmt9sNm802a49Hs2u671FeC0YpMZtnwWju4HYsESnDACIiZRhARKQMA4iIlGEAEZEyDCAiUoYBRETKMICISBkGEBEpwwAiImUYQESkDAOIiJRhABGRMgwgIlKGAUREyjCAiEgZBhARKcMAIiJlGEBEpAwDiIiUYQARkTIMICJShgFERMowgIhIGQYQESnDACIiZRhARKQMA4iIlGEAEZEyDCAiUiZbdQH3QkQAAOFwWHElRDSVyffm5Hv1TjIygK5duwYAKCsrU1wJEd3N8PAwnE7nHfszMoCKiooAAAMDA3ddOUoUDodRVlaGwcFBaJqmupyMwDm7NyKC4eFhlJaW3nVcRgaQ1Xrr0JXT6eR/inugaRrnLUmcs+RNZ+OAB6GJSBkGEBEpk5EB5HA48PLLL8PhcKguJaNw3pLHObu/LPLvzpMREd0nGbkFRERzAwOIiJRhABGRMgwgIlImIwNo7969WLJkCXJyclBZWYmuri7VJSnT3NyMNWvWoKCgACUlJVi3bh36+/sTxoyNjcHn82HRokXIz89HXV0dgsFgwpiBgQHU1tYiLy8PJSUl2L59OyYmJlK5Ksq89tprsFgs2LZtm9nGOUsRyTCHDx8Wu90uf/7zn6Wvr082bdokhYWFEgwGVZemRE1NjRw4cEDOnj0rPT098uSTT4rH45GRkRFzzDPPPCNlZWXS0dEhZ86ckaqqKvnOd75j9k9MTMjy5cvF6/XKp59+KseOHZPi4mJpampSsUop1dXVJUuWLJGVK1fKc889Z7ZzzlIj4wJo7dq14vP5zNuxWExKS0ulublZYVXpIxQKCQDp7OwUEZGhoSGx2WzS2tpqjjl//rwAEL/fLyIix44dE6vVKoFAwBzT0tIimqZJJBJJ7Qqk0PDwsDz00EPS3t4u3/ve98wA4pylTkbtgkWjUXR3d8Pr9ZptVqsVXq8Xfr9fYWXpwzAMAP+6YLe7uxvj4+MJc7Zs2TJ4PB5zzvx+P1asWAGXy2WOqampQTgcRl9fXwqrTy2fz4fa2tqEuQE4Z6mUURejXr16FbFYLOFFBwCXy4ULFy4oqip9xONxbNu2DY8++iiWL18OAAgEArDb7SgsLEwY63K5EAgEzDFTzelk31x0+PBhfPLJJzh9+vRtfZyz1MmoAKK78/l8OHv2LD788EPVpaS1wcFBPPfcc2hvb0dOTo7qcua1jNoFKy4uRlZW1m1nI4LBINxut6Kq0kNDQwPa2trw/vvvY/HixWa72+1GNBrF0NBQwvgvz5nb7Z5yTif75pru7m6EQiF8+9vfRnZ2NrKzs9HZ2YnXX38d2dnZcLlcnLMUyagAstvtqKioQEdHh9kWj8fR0dEBXdcVVqaOiKChoQFHjhzBiRMnsHTp0oT+iooK2Gy2hDnr7+/HwMCAOWe6rqO3txehUMgc097eDk3TUF5enpoVSaHHH38cvb296OnpMZfVq1djw4YN5r85Zymi+ih4sg4fPiwOh0MOHjwo586dk82bN0thYWHC2Yj5ZMuWLeJ0OuWDDz6QK1eumMuNGzfMMc8884x4PB45ceKEnDlzRnRdF13Xzf7JU8rV1dXS09Mjx48flwceeGBenVL+8lkwEc5ZqmRcAImI/OEPfxCPxyN2u13Wrl0rH3/8seqSlAEw5XLgwAFzzM2bN+XZZ5+VhQsXSl5envzkJz+RK1euJDzOP/7xD3niiSckNzdXiouL5YUXXpDx8fEUr406Xw0gzllq8Os4iEiZjDoGRERzCwOIiJRhABGRMgwgIlKGAUREyjCAiEgZBhARKcMAIiJlGEBEpAwDiIiUYQARkTIMICJS5v8Btq66uGnZ8l0AAAAASUVORK5CYII="
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "execution_count": 1
  },
  {
   "cell_type": "code",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-05T04:24:23.387116Z",
     "start_time": "2025-01-05T04:24:21.027791Z"
    }
   },
   "source": [
    "import torch\n",
    "\n",
    "model_action = torch.nn.Sequential(\n",
    "    torch.nn.Linear(4, 64),\n",
    "    torch.nn.ReLU(),\n",
    "    torch.nn.Linear(64, 64),\n",
    "    torch.nn.ReLU(),\n",
    "    torch.nn.Linear(64, 2),\n",
    "    torch.nn.Softmax(dim=1),\n",
    ")\n",
    "\n",
    "model_action(torch.randn(2, 4))"
   ],
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[0.4928, 0.5072],\n",
       "        [0.5118, 0.4882]], grad_fn=<SoftmaxBackward0>)"
      ]
     },
     "execution_count": 2,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 2
  },
  {
   "cell_type": "code",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-05T04:24:23.446440Z",
     "start_time": "2025-01-05T04:24:23.437931Z"
    }
   },
   "source": [
    "'''SAC算法特征：双Q网络：定义了两个value模型，取其中小的，缓解自举'''\n",
    "model_value1 = torch.nn.Sequential(\n",
    "    torch.nn.Linear(4, 64),\n",
    "    torch.nn.ReLU(),\n",
    "    torch.nn.Linear(64, 64),\n",
    "    torch.nn.ReLU(),\n",
    "    torch.nn.Linear(64, 2),\n",
    ")\n",
    "\n",
    "model_value2 = torch.nn.Sequential(\n",
    "    torch.nn.Linear(4, 64),\n",
    "    torch.nn.ReLU(),\n",
    "    torch.nn.Linear(64, 64),\n",
    "    torch.nn.ReLU(),\n",
    "    torch.nn.Linear(64, 2),\n",
    ")\n",
    "# 两组value模型，一组用来计算target，另外一组计算value\n",
    "model_value1_next = torch.nn.Sequential(\n",
    "    torch.nn.Linear(4, 64),\n",
    "    torch.nn.ReLU(),\n",
    "    torch.nn.Linear(64, 64),\n",
    "    torch.nn.ReLU(),\n",
    "    torch.nn.Linear(64, 2),\n",
    ")\n",
    "\n",
    "model_value2_next = torch.nn.Sequential(\n",
    "    torch.nn.Linear(4, 64),\n",
    "    torch.nn.ReLU(),\n",
    "    torch.nn.Linear(64, 64),\n",
    "    torch.nn.ReLU(),\n",
    "    torch.nn.Linear(64, 2),\n",
    ")\n",
    "\n",
    "model_value1_next.load_state_dict(model_value1.state_dict())\n",
    "model_value2_next.load_state_dict(model_value2.state_dict())\n",
    "\n",
    "model_value1(torch.randn(2, 4))"
   ],
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[-0.1336, -0.2670],\n",
       "        [-0.2267, -0.3672]], grad_fn=<AddmmBackward0>)"
      ]
     },
     "execution_count": 3,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 3
  },
  {
   "cell_type": "code",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-05T04:24:23.487746Z",
     "start_time": "2025-01-05T04:24:23.474249Z"
    }
   },
   "source": [
    "from IPython import display\n",
    "import random\n",
    "\n",
    "\n",
    "#玩一局游戏并记录数据\n",
    "def play(show=False):\n",
    "    data = []\n",
    "    reward_sum = 0\n",
    "\n",
    "    state = env.reset()\n",
    "    over = False\n",
    "    while not over:\n",
    "        prob = model_action(torch.FloatTensor(state).reshape(1, 4))[0].tolist()\n",
    "        action = random.choices(range(2), weights=prob, k=1)[0]\n",
    "\n",
    "        next_state, reward, over = env.step(action)\n",
    "\n",
    "        data.append((state, action, reward, next_state, over))\n",
    "        reward_sum += reward\n",
    "\n",
    "        state = next_state\n",
    "\n",
    "        if show:\n",
    "            display.clear_output(wait=True)\n",
    "            env.show()\n",
    "\n",
    "    return data, reward_sum\n",
    "\n",
    "\n",
    "play()[-1]"
   ],
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "D:\\app\\anaconda3\\install\\envs\\pytorch_env_3_8\\lib\\site-packages\\gym\\utils\\passive_env_checker.py:233: DeprecationWarning: `np.bool8` is a deprecated alias for `np.bool_`.  (Deprecated NumPy 1.24)\n",
      "  if not isinstance(terminated, (bool, np.bool8)):\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "34.0"
      ]
     },
     "execution_count": 4,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 4
  },
  {
   "cell_type": "code",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-05T04:24:23.558321Z",
     "start_time": "2025-01-05T04:24:23.536109Z"
    }
   },
   "source": [
    "#数据池\n",
    "class Pool:\n",
    "\n",
    "    def __init__(self):\n",
    "        self.pool = []\n",
    "\n",
    "    def __len__(self):\n",
    "        return len(self.pool)\n",
    "\n",
    "    def __getitem__(self, i):\n",
    "        return self.pool[i]\n",
    "\n",
    "    #更新动作池\n",
    "    def update(self):\n",
    "        #每次更新不少于N条新数据\n",
    "        old_len = len(self.pool)\n",
    "        while len(pool) - old_len < 200:\n",
    "            self.pool.extend(play()[0])\n",
    "\n",
    "        #只保留最新的N条数据\n",
    "        self.pool = self.pool[-2_0000:]\n",
    "\n",
    "    #获取一批数据样本\n",
    "    def sample(self):\n",
    "        data = random.sample(self.pool, 64)\n",
    "\n",
    "        state = torch.FloatTensor([i[0] for i in data]).reshape(-1, 4)\n",
    "        action = torch.LongTensor([i[1] for i in data]).reshape(-1, 1)\n",
    "        reward = torch.FloatTensor([i[2] for i in data]).reshape(-1, 1)\n",
    "        next_state = torch.FloatTensor([i[3] for i in data]).reshape(-1, 4)\n",
    "        over = torch.LongTensor([i[4] for i in data]).reshape(-1, 1)\n",
    "\n",
    "        return state, action, reward, next_state, over\n",
    "\n",
    "\n",
    "pool = Pool()\n",
    "pool.update()\n",
    "state, action, reward, next_state, over = pool.sample()\n",
    "\n",
    "len(pool), pool[0]"
   ],
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "C:\\Users\\25036\\AppData\\Local\\Temp\\ipykernel_49988\\1750144527.py:27: UserWarning: Creating a tensor from a list of numpy.ndarrays is extremely slow. Please consider converting the list to a single numpy.ndarray with numpy.array() before converting to a tensor. (Triggered internally at C:\\actions-runner\\_work\\pytorch\\pytorch\\builder\\windows\\pytorch\\torch\\csrc\\utils\\tensor_new.cpp:281.)\n",
      "  state = torch.FloatTensor([i[0] for i in data]).reshape(-1, 4)\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "(217,\n",
       " (array([-0.00484784, -0.00994993,  0.0382708 , -0.0325453 ], dtype=float32),\n",
       "  0,\n",
       "  1.0,\n",
       "  array([-0.00504683, -0.2055992 ,  0.0376199 ,  0.27196258], dtype=float32),\n",
       "  False))"
      ]
     },
     "execution_count": 5,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 5
  },
  {
   "cell_type": "code",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-05T04:24:24.302515Z",
     "start_time": "2025-01-05T04:24:23.622561Z"
    }
   },
   "source": [
    "optimizer_action = torch.optim.Adam(model_action.parameters(), lr=2e-4)\n",
    "optimizer_value1 = torch.optim.Adam(model_value1.parameters(), lr=2e-3)\n",
    "optimizer_value2 = torch.optim.Adam(model_value2.parameters(), lr=2e-3)\n",
    "\n",
    "\n",
    "def soft_update(_from, _to):\n",
    "    for _from, _to in zip(_from.parameters(), _to.parameters()):\n",
    "        value = _to.data * 0.995 + _from.data * 0.005\n",
    "        _to.data.copy_(value)\n",
    "\n",
    "\n",
    "def get_prob_entropy(state):\n",
    "    prob = model_action(torch.FloatTensor(state).reshape(-1, 4))\n",
    "    entropy = prob * (prob + 1e-8).log()\n",
    "    entropy = -entropy.sum(dim=1, keepdim=True)\n",
    "\n",
    "    return prob, entropy\n",
    "\n",
    "\n",
    "def requires_grad(model, value):\n",
    "    for param in model.parameters():\n",
    "        param.requires_grad_(value)\n",
    "\n",
    "\n",
    "alpha = 1.0"
   ],
   "outputs": [],
   "execution_count": 6
  },
  {
   "cell_type": "code",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-05T04:24:24.433378Z",
     "start_time": "2025-01-05T04:24:24.327140Z"
    }
   },
   "source": [
    "'''SAC算法特征：双Q网络'''\n",
    "def train_value(state, action, reward, next_state, over):\n",
    "    requires_grad(model_value1, True)\n",
    "    requires_grad(model_value2, True)\n",
    "    requires_grad(model_action, False)\n",
    "\n",
    "    #计算target\n",
    "    with torch.no_grad():\n",
    "        #计算动作的熵\n",
    "        prob, entropy = get_prob_entropy(next_state)\n",
    "        target1 = model_value1_next(next_state)\n",
    "        target2 = model_value2_next(next_state)\n",
    "        # 计算两个target ，缓解自举，取其中小的值\n",
    "        target = torch.min(target1, target2)\n",
    "\n",
    "    #加权熵,熵越大越好\n",
    "    target = (prob * target).sum(dim=1, keepdim=True)\n",
    "    # 核心公式：Q值+熵\n",
    "    target = target + alpha * entropy\n",
    "    target = target * 0.98 * (1 - over) + reward\n",
    "    # 因为两个value模型，所以需要两次梯度下降\n",
    "    #计算value\n",
    "    value = model_value1(state).gather(dim=1, index=action)\n",
    "    loss = torch.nn.functional.mse_loss(value, target)\n",
    "    loss.backward()\n",
    "    optimizer_value1.step()\n",
    "    optimizer_value1.zero_grad()\n",
    "\n",
    "    value = model_value2(state).gather(dim=1, index=action)\n",
    "    loss = torch.nn.functional.mse_loss(value, target)\n",
    "    loss.backward()\n",
    "    optimizer_value2.step()\n",
    "    optimizer_value2.zero_grad()\n",
    "\n",
    "    return loss.item()\n",
    "\n",
    "\n",
    "train_value(state, action, reward, next_state, over)"
   ],
   "outputs": [
    {
     "data": {
      "text/plain": [
       "2.2214725017547607"
      ]
     },
     "execution_count": 7,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 7
  },
  {
   "cell_type": "code",
   "metadata": {
    "scrolled": true,
    "ExecuteTime": {
     "end_time": "2025-01-05T04:24:24.458988Z",
     "start_time": "2025-01-05T04:24:24.441537Z"
    }
   },
   "source": [
    "def train_action(state):\n",
    "    requires_grad(model_value1, False)\n",
    "    requires_grad(model_value2, False)\n",
    "    requires_grad(model_action, True)\n",
    "\n",
    "    #计算熵\n",
    "    prob, entropy = get_prob_entropy(state)\n",
    "\n",
    "    #计算value\n",
    "    value1 = model_value1(state)\n",
    "    value2 = model_value2(state)\n",
    "    value = torch.min(value1, value2)\n",
    "\n",
    "    #求期望求和\n",
    "    value = (prob * value).sum(dim=1, keepdim=True)\n",
    "\n",
    "    #加权熵\n",
    "    loss = -(value + alpha * entropy).mean()\n",
    "\n",
    "    loss.backward()\n",
    "    optimizer_action.step()\n",
    "    optimizer_action.zero_grad()\n",
    "\n",
    "    return loss.item()\n",
    "\n",
    "\n",
    "train_action(state)"
   ],
   "outputs": [
    {
     "data": {
      "text/plain": [
       "-0.531906008720398"
      ]
     },
     "execution_count": 8,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 8
  },
  {
   "cell_type": "code",
   "metadata": {
    "id": "OHoSU6uI-xIt",
    "scrolled": false,
    "ExecuteTime": {
     "end_time": "2025-01-05T04:26:28.311143Z",
     "start_time": "2025-01-05T04:24:24.469670Z"
    }
   },
   "source": [
    "def train():\n",
    "    global alpha\n",
    "    model_action.train()\n",
    "    model_value1.train()\n",
    "    model_value2.train()\n",
    "\n",
    "    #训练N次\n",
    "    for epoch in range(200):\n",
    "        #更新N条数据\n",
    "        pool.update()\n",
    "\n",
    "        #每次更新过数据后,学习N次\n",
    "        for i in range(200):\n",
    "            #采样一批数据\n",
    "            state, action, reward, next_state, over = pool.sample()\n",
    "\n",
    "            #训练\n",
    "            train_value(state, action, reward, next_state, over)\n",
    "            train_action(state)\n",
    "            soft_update(model_value1, model_value1_next)\n",
    "            soft_update(model_value2, model_value2_next)\n",
    "\n",
    "        alpha *= 0.9\n",
    "\n",
    "        if epoch % 10 == 0:\n",
    "            test_result = sum([play()[-1] for _ in range(20)]) / 20\n",
    "            print(epoch, len(pool), alpha, test_result)\n",
    "\n",
    "\n",
    "train()"
   ],
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0 435 0.9 21.35\n",
      "10 3042 0.31381059609000017 194.95\n",
      "20 5957 0.10941898913151243 180.3\n",
      "30 8596 0.03815204244769462 198.2\n",
      "40 10772 0.013302794647291147 200.0\n",
      "50 12772 0.004638397686588107 200.0\n",
      "60 14772 0.0016173092699229901 194.5\n",
      "70 16967 0.0005639208733960181 200.0\n",
      "80 18967 0.00019662705047555326 200.0\n",
      "90 20000 6.85596132412799e-05 200.0\n",
      "100 20000 2.390525899882879e-05 200.0\n",
      "110 20000 8.335248417898115e-06 200.0\n",
      "120 20000 2.9063214161987086e-06 200.0\n",
      "130 20000 1.0133716178293888e-06 156.35\n",
      "140 20000 3.5334083494636473e-07 114.45\n",
      "150 20000 1.2320233115273002e-07 108.55\n",
      "160 20000 4.295799664301754e-08 139.35\n",
      "170 20000 1.4978527259308396e-08 152.9\n",
      "180 20000 5.222689519770981e-09 144.45\n",
      "190 20000 1.821039234880364e-09 172.3\n"
     ]
    }
   ],
   "execution_count": 9
  },
  {
   "cell_type": "code",
   "metadata": {},
   "source": [
    "play(True)[-1]"
   ],
   "execution_count": 10,
   "outputs": [
    {
     "data": {
      "text/plain": [
       "128.0"
      ]
     },
     "execution_count": 10,
     "metadata": {},
     "output_type": "execute_result"
    }
   ]
  }
 ],
 "metadata": {
  "colab": {
   "collapsed_sections": [],
   "name": "第7章-DQN算法.ipynb",
   "provenance": []
  },
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.13"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 1
}
