{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/root/anaconda3/envs/cpu/lib/python3.6/site-packages/gym/core.py:26: UserWarning: \u001b[33mWARN: Gym minimally supports python 3.6 as the python foundation not longer supports the version, please update your version to 3.7+\u001b[0m\n",
      "  \"Gym minimally supports python 3.6 as the python foundation not longer supports the version, please update your version to 3.7+\"\n",
      "/root/anaconda3/envs/cpu/lib/python3.6/site-packages/gym/core.py:330: DeprecationWarning: \u001b[33mWARN: Initializing wrapper in old step API which returns one bool instead of two. It is recommended to set `new_step_api=True` to use new step API. This will be the default behaviour in future.\u001b[0m\n",
      "  \"Initializing wrapper in old step API which returns one bool instead of two. It is recommended to set `new_step_api=True` to use new step API. This will be the default behaviour in future.\"\n",
      "/root/anaconda3/envs/cpu/lib/python3.6/site-packages/gym/wrappers/step_api_compatibility.py:40: DeprecationWarning: \u001b[33mWARN: Initializing environment in old step API which returns one bool instead of two. It is recommended to set `new_step_api=True` to use new step API. This will be the default behaviour in future.\u001b[0m\n",
      "  \"Initializing environment in old step API which returns one bool instead of two. It is recommended to set `new_step_api=True` to use new step API. This will be the default behaviour in future.\"\n",
      "/root/anaconda3/envs/cpu/lib/python3.6/site-packages/gym/core.py:52: DeprecationWarning: \u001b[33mWARN: The argument mode in render method is deprecated; use render_mode during environment initialization instead.\n",
      "See here for more information: https://www.gymlibrary.ml/content/api/\u001b[0m\n",
      "  \"The argument mode in render method is deprecated; \"\n"
     ]
    },
    {
     "data": {
      "image/png": "iVBORw0KGgoAAAANSUhEUgAAAQEAAAD8CAYAAAB3lxGOAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjMuNCwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8QVMy6AAAACXBIWXMAAAsTAAALEwEAmpwYAAASB0lEQVR4nO3da4xVZ73H8e9vNsNwL7dhGBjaUp00hdqjdURjG216sZzaSsHWUFMlTRPe1ETPOYkHYvTEF00858TGF8e+aGyVplWkFy2i9oho2xwvpYO9BIrItFgZGJghyK3AwMz8z4tZrbsw7SyY2bP3nuf3SXbWWs9+nrX/O+z5sdbaa6+liMDM0lVT7gLMrLwcAmaJcwiYJc4hYJY4h4BZ4hwCZokrWQhIWixph6Q2SatK9TpmNjQqxXkCkgrAX4AbgHbgBeCOiHh12F/MzIakVFsCi4C2iHg9Ik4Ba4ElJXotMxuCMSVa71xgd9FyO/DRd+s8c+bMuPjii0tUipkBbNmy5UBE1J/ZXqoQ0ABt79jvkLQSWAlw4YUX0traWqJSzAxA0hsDtZdqd6AdmFe03ATsLe4QEQ9EREtEtNTXnxVOZjZCShUCLwDNkuZLGgssB9aX6LXMbAhKsjsQET2SvgT8L1AAHoqIbaV4LTMbmlIdEyAifgH8olTrN7Ph4TMGzRLnEDBLnEPALHEOAbPEOQTMEucQMEucQ8AscQ4Bs8Q5BMwS5xAwS5xDwCxxDgGzxDkEzBLnEDBLnEPALHEOAbPEOQTMEucQMEucQ8AscQ4Bs8Q5BMwS5xAwS5xDwCxxDgGzxDkEzBLnEDBLnEPALHEOAbPEOQTMEucQMEucQ8AscQ4Bs8Q5BMwS5xAwS5xDwCxxg4aApIckdUraWtQ2XdJGSTuz6bSi51ZLapO0Q9KNpSrczIZHni2BHwCLz2hbBWyKiGZgU7aMpAXAcmBhNuZ+SYVhq9bMht2gIRARzwEHz2heAqzJ5tcAtxa1r42I7ojYBbQBi4anVDMrhfM9JtAQER0A2XRW1j4X2F3Urz1rO4uklZJaJbV2dXWdZxlmNlTDfWBQA7TFQB0j4oGIaImIlvr6+mEuw8zyOt8Q2C+pESCbdmbt7cC8on5NwN7zL8/MSu18Q2A9sCKbXwE8VdS+XFKdpPlAM7B5aCWaWSmNGayDpB8B1wAzJbUD/wF8C1gn6W7gb8DtABGxTdI64FWgB7gnInpLVLuZDYNBQyAi7niXp657l/73AvcOpSgzGzk+Y9AscQ4Bs8Q5BMwS5xAwS5xDwCxxDgGzxDkEzBLnEDBLnEPALHEOAbPEOQTMEjfobwes8kVvLz1Hj9LX3Y1qaxkzeTI1tbXlLsuqhEOgikUEp7q62P/Tn3JkyxZO//3vFCZMYNJll9GwdCkTmpuRBrrOi9k/OASqVETQvXcvu+67j+M7d77d3nfyJH//3e84tn07F33pS0y58kpU470+e3f+dFSpvu5udj/44DsCoNjpgwfZ9e1vc+DXvyZiwCu8mQEOgap1bOtWjr788nv26X3zTfY9/jjde/aMUFVWjRwCVer0wYPE6dOD9ju1bx8djz1GX0/PCFRl1cghkIBDf/gDx7Zu9W6BDcghUKXq5syhZty4XH37Tp6k/fvfp/fYsRJXZdXIIVClJl56KRcsyn9zpxO7dnFg40ZvDdhZHAJVSmPGMOfzn2dsQ0PuMfueeIITr79ewqqsGjkEqpQk6hobaViyBBXy3fO19+hR9jzyCL0nTpS4OqsmDoEqJomZn/oUk6+4IveYoy+/zMFnnvFugb3NIVDlVFtL0113MeaCC3L1j54eOn78Y07t3+8gMMAhUPUkMe7CC5l5ww25x5w+eJD9P/kJ9PWVsDKrFg6BUUA1NcxasoQJ739/7jEHNm7kyIsvlrAqqxYOgVFizJQpNC5fnvvcgejpof2hhzh96FBpC7OK5xAYJSRxwYc/zLSrr8495uSePXQ9/TTh3YKkOQRGERUKNCxdytj6+nwDIuj6+c85vnOnDxImzCEwyoxraqJh2TLIeQ2BnsOH6Xj88Vw/RrLRySEwykhi+jXXMLG5OfeYI1u2cPC557w1kCiHwCg0ZuJEmu6665wOEu577DFOdXWVuDKrRA6BUWripZcy65Zbcvfv7uigY+1awtcdSI5DYJRSocCsm2+mrrEx95iDzz7LoT/+0bsFiRk0BCTNk/RbSdslbZP05ax9uqSNknZm02lFY1ZLapO0Q9KNpXwD9u7GTJ3KnDvvRGPH5uofp0+z78kn6X3zzRJXZpUkz5ZAD/BvEXEZ8DHgHkkLgFXApohoBjZly2TPLQcWAouB+yXl+5mbDStJTP3oR5l6DtcdOP7aa+x/6imfO5CQQUMgIjoi4k/Z/FFgOzAXWAKsybqtAW7N5pcAayOiOyJ2AW1A/k+hDSvV1tJ4xx3UTps2eGeACA788pe8uWOHdwsScU7HBCRdDHwIeB5oiIgO6A8KYFbWbS6wu2hYe9ZmZSCJcXPnUn/TTZDzRiQ9R46w94c/pK+7u8TVWSXIHQKSJgFPAF+JiCPv1XWAtrP+S5G0UlKrpNYufzVVUqqpYdYttzDx0ktzjzm2bRuHfv97bw0kIFcISKqlPwAejYgns+b9khqz5xuBzqy9HZhXNLwJ2HvmOiPigYhoiYiW+rynudp5qxk/ntm33YZy3qMwenrY++ijPncgAXm+HRDwILA9Iu4remo9sCKbXwE8VdS+XFKdpPlAM7B5+Eq28/HWD4xmXHdd7jFv3ecwentLWJmVW54tgauALwDXSnope9wEfAu4QdJO4IZsmYjYBqwDXgWeBu6JCH+KKoAKBWbfdhvjmppyjznwq19xZJA7HVl1G/SGpBHxfwy8nw8w4H8rEXEvcO8Q6rISGVtfT8PSpbzx3e/murJQnDrFnocfZsIll1A7dWrpC7QR5zMGEyOJaVdfzZQrr8w95sSuXXT+7GfeLRilHAIJKowfz5w77qAwcWK+AW9dd+D11/1twSjkEEjUhPe9jxnXXpv73IHe48fZ99hjxKlTJa7MRppDIFGqqWH25z7HuHnzBu+cOfT8877uwCjkEEjYmClTaLrrrtznDhDBnocfpnvvWad9WBVzCCRMEpOvuIILPvKR3GN6Dh/uP3fA1x0YNRwCiauprWXuF79I7YwZucccfPZZDm/Z4t2CUcIhYNQ1NjL7s5/NfWPTvpMn2ff44/QeP+4gGAUcAoYkZlx3HZMWLsw95s2dO+nasKGEVdlIcQgYADXjxjH79tvznzvQ10fnhg2c2LWrtIVZyTkEDMgOEl5++Tn9wKjn8GH2PPwwvb7uQFVzCNjbVCgw65Zb8t/BCDjy0kscePppHxuoYg4Be4e6hgbmfOELkPMgIX19dK5fz8ndux0EVcohYGeZdtVVTPv4xwd87sipU/ygrY3/3rqVVw4eJCI41dVFx9q1uX6VaJXHIWBn0ZgxzL7tNgqTJr2j/ejp03zjxRf5n+3b+fGuXfzL5s38Mbvy0OEXXuDEG2+Uo1wbIoeAnUUS4y+6iIalS9/xA6M9x4/z+87Ot5cPnz7Nr7JTiPu6u+k7eXLEa7WhcwjYgFRTQ/2NN77j4qRja2qoO+NYwZS8vzuwiuUQsHdVmDyZOXfeSU1dHQDzJ03i3z/wAWbW1VFXKHBtYyN3Z3c/HjtrFmNnzy5nuXaeBr28mKVLEpMXLmTaJz7B4eefp+fIET7d1MSVM2ZwoqeHuRMnMq5QQGPH0rBsWf4bnFhF8ZaAvScVCly4ciWXrF7N+PnzUU0NcyZM4H1TpjCuUKBmwgRm3347M6+/HuW8QIlVFm8J2KBq6uqYtGAB7//61zn47LMcfeUVek+eZPy8eUz/5CeZeNll1IzxR6la+V/OcpHE2JkzaVi2jIZlyyDi7W8OvAVQ3RwCdk7e/oP3H/6o4WMCZolzCJglziFgljiHgFniHAJmiXMImCXOIWCWOIeAWeIcAmaJcwiYJc4hYJY4h4BZ4gYNAUnjJG2W9LKkbZK+mbVPl7RR0s5sOq1ozGpJbZJ2SLqxlG/AzIYmz5ZAN3BtRPwT8EFgsaSPAauATRHRDGzKlpG0AFgOLAQWA/dLynkRezMbaYOGQPQ7li3WZo8AlgBrsvY1wK3Z/BJgbUR0R8QuoA1YNJxFm9nwyXVMQFJB0ktAJ7AxIp4HGiKiAyCbzsq6zwV2Fw1vz9rOXOdKSa2SWruya9eb2cjLFQIR0RsRHwSagEWSLn+P7gNdbeKs+1NFxAMR0RIRLfXncO87Mxte5/TtQEQcAp6hf19/v6RGgGz61l0p2oF5RcOagL1DLdTMSiPPtwP1kqZm8+OB64E/A+uBFVm3FcBT2fx6YLmkOknzgWZg8zDXbWbDJM81BhuBNdkR/hpgXURskPQHYJ2ku4G/AbcDRMQ2SeuAV4Ee4J6I6C1N+WY2VKqE20m3tLREa2trucswG9UkbYmIljPbfcagWeIcAmaJcwiYJc4hYJY4h4BZ4hwCZolzCJglziFgljiHgFniHAJmiXMImCXOIWCWOIeAWeIcAmaJcwiYJc4hYJY4h4BZ4hwCZolzCJglziFgljiHgFniHAJmiXMImCXOIWCWOIeAWeIcAmaJcwiYJc4hYJY4h4BZ4hwCZolzCJglziFgljiHgFniHAJmicsdApIKkl6UtCFbni5po6Sd2XRaUd/Vktok7ZB0YykKN7PhcS5bAl8GthctrwI2RUQzsClbRtICYDmwEFgM3C+pMDzlmtlwyxUCkpqATwPfK2peAqzJ5tcAtxa1r42I7ojYBbQBi4alWjMbdnm3BL4DfBXoK2priIgOgGw6K2ufC+wu6teetZlZBRo0BCTdDHRGxJac69QAbTHAeldKapXU2tXVlXPVZjbc8mwJXAV8RtJfgbXAtZIeAfZLagTIpp1Z/3ZgXtH4JmDvmSuNiAcioiUiWurr64fwFsxsKAYNgYhYHRFNEXEx/Qf8fhMRdwLrgRVZtxXAU9n8emC5pDpJ84FmYPOwV25mw2LMEMZ+C1gn6W7gb8DtABGxTdI64FWgB7gnInqHXKmZlYQiztpdH3EtLS3R2tpa7jLMRjVJWyKi5cx2nzFoljiHgFniHAJmiXMImCXOIWCWOIeAWeIcAmaJcwiYJc4hYJY4h4BZ4hwCZolzCJglziFgljiHgFniHAJmiXMImCXOIWCWOIeAWeIcAmaJcwiYJc4hYJY4h4BZ4hwCZolzCJglziFgljiHgFniHAJmiXMImCXOIWCWOIeAWeIcAmaJcwiYJc4hYJY4h4BZ4hwCZolzCJglziFgljiHgFniFBHlrgFJXcCbwIFy15LTTKqnVqiuel1r6VwUEfVnNlZECABIao2IlnLXkUc11QrVVa9rHXneHTBLnEPALHGVFAIPlLuAc1BNtUJ11etaR1jFHBMws/KopC0BMyuDsoeApMWSdkhqk7Sq3PUASHpIUqekrUVt0yVtlLQzm04rem51Vv8OSTeOcK3zJP1W0nZJ2yR9uVLrlTRO0mZJL2e1frNSay16/YKkFyVtqPRaz1tElO0BFIDXgEuAscDLwIJy1pTV9QngSmBrUdt/Aauy+VXAf2bzC7K664D52fspjGCtjcCV2fxk4C9ZTRVXLyBgUjZfCzwPfKwSay2q+V+BHwIbKvlzMJRHubcEFgFtEfF6RJwC1gJLylwTEfEccPCM5iXAmmx+DXBrUfvaiOiOiF1AG/3va0REREdE/CmbPwpsB+ZWYr3R71i2WJs9ohJrBZDUBHwa+F5Rc0XWOhTlDoG5wO6i5fasrRI1REQH9P/hAbOy9op5D5IuBj5E//+wFVlvtnn9EtAJbIyIiq0V+A7wVaCvqK1Saz1v5Q4BDdBWbV9XVMR7kDQJeAL4SkQcea+uA7SNWL0R0RsRHwSagEWSLn+P7mWrVdLNQGdEbMk7ZIC2qvgslzsE2oF5RctNwN4y1TKY/ZIaAbJpZ9Ze9vcgqZb+AHg0Ip7Mmiu2XoCIOAQ8AyymMmu9CviMpL/Sv5t6raRHKrTWISl3CLwANEuaL2kssBxYX+aa3s16YEU2vwJ4qqh9uaQ6SfOBZmDzSBUlScCDwPaIuK+S65VUL2lqNj8euB74cyXWGhGrI6IpIi6m/3P5m4i4sxJrHbJyH5kEbqL/iPZrwNfKXU9W04+ADuA0/Ql/NzAD2ATszKbTi/p/Lat/B/DPI1zr1fRvdr4CvJQ9bqrEeoErgBezWrcC38jaK67WM+q+hn98O1DRtZ7Pw2cMmiWu3LsDZlZmDgGzxDkEzBLnEDBLnEPALHEOAbPEOQTMEucQMEvc/wNBeh6lJaANCgAAAABJRU5ErkJggg==\n",
      "text/plain": [
       "<Figure size 432x288 with 1 Axes>"
      ]
     },
     "metadata": {
      "needs_background": "light"
     },
     "output_type": "display_data"
    }
   ],
   "source": [
    "import gym\n",
    "from matplotlib import pyplot as plt\n",
    "%matplotlib inline\n",
    "\n",
    "#创建环境\n",
    "env = gym.make('Pendulum-v1')\n",
    "env.reset()\n",
    "\n",
    "\n",
    "#打印游戏\n",
    "def show():\n",
    "    plt.imshow(env.render(mode='rgb_array'))\n",
    "    plt.show()\n",
    "\n",
    "\n",
    "show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(Sequential(\n",
       "   (0): Linear(in_features=3, out_features=128, bias=True)\n",
       "   (1): ReLU()\n",
       "   (2): Linear(in_features=128, out_features=11, bias=True)\n",
       " ),\n",
       " Sequential(\n",
       "   (0): Linear(in_features=3, out_features=128, bias=True)\n",
       "   (1): ReLU()\n",
       "   (2): Linear(in_features=128, out_features=11, bias=True)\n",
       " ))"
      ]
     },
     "execution_count": 2,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "import torch\n",
    "\n",
    "#计算动作的模型,也是真正要用的模型\n",
    "model = torch.nn.Sequential(\n",
    "    torch.nn.Linear(3, 128),\n",
    "    torch.nn.ReLU(),\n",
    "    torch.nn.Linear(128, 11),\n",
    ")\n",
    "\n",
    "#经验网络,用于评估一个状态的分数\n",
    "next_model = torch.nn.Sequential(\n",
    "    torch.nn.Linear(3, 128),\n",
    "    torch.nn.ReLU(),\n",
    "    torch.nn.Linear(128, 11),\n",
    ")\n",
    "\n",
    "#把model的参数复制给next_model\n",
    "next_model.load_state_dict(model.state_dict())\n",
    "\n",
    "model, next_model"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(7, 0.7999999999999998)"
      ]
     },
     "execution_count": 3,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "import random\n",
    "\n",
    "\n",
    "def get_action(state):\n",
    "    #走神经网络,得到一个动作\n",
    "    state = torch.FloatTensor(state).reshape(1, 3)\n",
    "    action = model(state).argmax().item()\n",
    "\n",
    "    if random.random() < 0.01:\n",
    "        action = random.choice(range(11))\n",
    "\n",
    "    #离散动作连续化\n",
    "    action_continuous = action\n",
    "    action_continuous /= 10\n",
    "    action_continuous *= 4\n",
    "    action_continuous -= 2\n",
    "\n",
    "    return action, action_continuous\n",
    "\n",
    "\n",
    "get_action([0.29292667, 0.9561349, 1.0957013])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "((200, 0), 200)"
      ]
     },
     "execution_count": 4,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "#样本池\n",
    "datas = []\n",
    "\n",
    "\n",
    "#向样本池中添加N条数据,删除M条最古老的数据\n",
    "def update_data():\n",
    "    old_count = len(datas)\n",
    "\n",
    "    #玩到新增了N个数据为止\n",
    "    while len(datas) - old_count < 200:\n",
    "        #初始化游戏\n",
    "        state = env.reset()\n",
    "\n",
    "        #玩到游戏结束为止\n",
    "        over = False\n",
    "        while not over:\n",
    "            #根据当前状态得到一个动作\n",
    "            action, action_continuous = get_action(state)\n",
    "\n",
    "            #执行动作,得到反馈\n",
    "            next_state, reward, over, _ = env.step([action_continuous])\n",
    "\n",
    "            #记录数据样本\n",
    "            datas.append((state, action, reward, next_state, over))\n",
    "\n",
    "            #更新游戏状态,开始下一个动作\n",
    "            state = next_state\n",
    "\n",
    "    update_count = len(datas) - old_count\n",
    "    drop_count = max(len(datas) - 5000, 0)\n",
    "\n",
    "    #数据上限,超出时从最古老的开始删除\n",
    "    while len(datas) > 5000:\n",
    "        datas.pop(0)\n",
    "\n",
    "    return update_count, drop_count\n",
    "\n",
    "\n",
    "update_data(), len(datas)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/root/anaconda3/envs/cpu/lib/python3.6/site-packages/ipykernel_launcher.py:7: UserWarning: Creating a tensor from a list of numpy.ndarrays is extremely slow. Please consider converting the list to a single numpy.ndarray with numpy.array() before converting to a tensor. (Triggered internally at  /opt/conda/conda-bld/pytorch_1640811701593/work/torch/csrc/utils/tensor_new.cpp:201.)\n",
      "  import sys\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "(tensor([[-8.9951e-01, -4.3690e-01,  3.7928e+00],\n",
       "         [-1.6644e-01,  9.8605e-01,  2.9892e+00],\n",
       "         [-3.0922e-01, -9.5099e-01,  4.1052e+00],\n",
       "         [-4.9656e-01, -8.6800e-01,  4.6962e+00],\n",
       "         [-9.3330e-01,  3.5911e-01, -3.4026e-01],\n",
       "         [-8.8602e-01,  4.6365e-01,  2.8801e+00],\n",
       "         [ 2.8952e-02, -9.9958e-01, -1.8164e+00],\n",
       "         [ 9.9426e-01,  1.0697e-01,  1.1617e+00],\n",
       "         [-4.1777e-01, -9.0855e-01, -3.6801e+00],\n",
       "         [-7.0695e-01, -7.0726e-01,  3.1443e+00],\n",
       "         [-5.3940e-01,  8.4205e-01,  5.3068e+00],\n",
       "         [-8.0284e-01,  5.9619e-01, -2.1768e+00],\n",
       "         [-9.9646e-01,  8.4086e-02,  6.4359e+00],\n",
       "         [ 9.8520e-01,  1.7142e-01,  1.3019e+00],\n",
       "         [-9.9646e-01, -8.4011e-02,  1.7648e+00],\n",
       "         [ 3.1591e-01, -9.4879e-01,  2.8399e+00],\n",
       "         [-9.1077e-01, -4.1291e-01, -5.3200e+00],\n",
       "         [-9.3408e-01,  3.5707e-01,  6.6605e+00],\n",
       "         [-9.6071e-01, -2.7756e-01,  5.6075e+00],\n",
       "         [-8.5524e-01, -5.1824e-01,  2.4473e-01],\n",
       "         [-8.9643e-01,  4.4318e-01, -5.9731e+00],\n",
       "         [-5.9022e-01, -8.0724e-01, -1.9506e+00],\n",
       "         [-4.3651e-01, -8.9970e-01, -4.5756e+00],\n",
       "         [-9.9160e-01,  1.2936e-01,  3.7000e+00],\n",
       "         [ 3.3033e-01,  9.4386e-01,  1.1804e+00],\n",
       "         [-3.5195e-01,  9.3602e-01,  3.8487e+00],\n",
       "         [-8.3451e-01, -5.5100e-01, -5.5800e+00],\n",
       "         [ 1.0051e-01, -9.9494e-01,  2.0795e+00],\n",
       "         [-5.4872e-01,  8.3601e-01, -6.0177e+00],\n",
       "         [-9.6282e-01,  2.7014e-01,  5.2856e+00],\n",
       "         [-8.9566e-01, -4.4474e-01,  1.1415e+00],\n",
       "         [-4.5005e-01, -8.9300e-01,  1.5559e+00],\n",
       "         [-1.1321e-01,  9.9357e-01, -7.8134e-01],\n",
       "         [-9.9903e-01,  4.4043e-02, -6.4390e+00],\n",
       "         [-9.9276e-01, -1.2008e-01, -2.3616e+00],\n",
       "         [ 8.9737e-01, -4.4128e-01,  1.7498e+00],\n",
       "         [-1.2630e-01,  9.9199e-01,  2.6384e-01],\n",
       "         [-1.0000e+00, -8.7377e-04,  5.4882e+00],\n",
       "         [ 3.7834e-02, -9.9928e-01,  5.4276e+00],\n",
       "         [-7.8102e-01, -6.2451e-01, -2.8632e+00],\n",
       "         [-9.1957e-01, -3.9292e-01,  1.3762e+00],\n",
       "         [ 9.8444e-01, -1.7574e-01,  8.4247e-01],\n",
       "         [-9.8715e-01,  1.5980e-01, -5.9730e+00],\n",
       "         [-2.7856e-01,  9.6042e-01,  1.9853e+00],\n",
       "         [-2.2110e-01, -9.7525e-01, -3.9642e+00],\n",
       "         [ 4.8733e-01, -8.7322e-01, -4.4805e-01],\n",
       "         [-9.6171e-01,  2.7408e-01,  6.5623e-01],\n",
       "         [-5.5901e-01,  8.2916e-01,  4.6707e+00],\n",
       "         [ 3.5699e-01,  9.3411e-01, -1.8015e+00],\n",
       "         [-9.8643e-01,  1.6421e-01,  1.2717e+00],\n",
       "         [-7.6603e-01,  6.4280e-01,  6.0584e+00],\n",
       "         [-9.9781e-01, -6.6084e-02,  3.9170e+00],\n",
       "         [-9.7170e-01, -2.3620e-01, -2.0644e+00],\n",
       "         [-7.6033e-01,  6.4953e-01,  5.4126e+00],\n",
       "         [-9.4441e-01, -3.2877e-01,  1.5627e+00],\n",
       "         [-9.6719e-01, -2.5405e-01,  1.6933e+00],\n",
       "         [-6.9335e-01,  7.2060e-01, -1.4688e+00],\n",
       "         [ 2.7152e-01,  9.6243e-01, -2.8234e+00],\n",
       "         [-3.0058e-01,  9.5376e-01, -5.5106e+00],\n",
       "         [-5.1809e-01, -8.5533e-01,  2.1374e+00],\n",
       "         [-7.3702e-01,  6.7587e-01,  1.6937e+00],\n",
       "         [-9.4219e-01, -3.3507e-01, -1.6931e+00],\n",
       "         [ 4.1763e-01, -9.0862e-01,  2.1883e+00],\n",
       "         [-4.0731e-01, -9.1329e-01,  9.4619e-01]]),\n",
       " tensor([[ 6],\n",
       "         [ 7],\n",
       "         [ 6],\n",
       "         [ 6],\n",
       "         [ 3],\n",
       "         [ 7],\n",
       "         [ 7],\n",
       "         [ 6],\n",
       "         [ 7],\n",
       "         [ 6],\n",
       "         [ 7],\n",
       "         [ 3],\n",
       "         [ 2],\n",
       "         [ 6],\n",
       "         [ 6],\n",
       "         [ 6],\n",
       "         [ 3],\n",
       "         [ 7],\n",
       "         [ 7],\n",
       "         [ 7],\n",
       "         [ 3],\n",
       "         [ 7],\n",
       "         [ 7],\n",
       "         [ 7],\n",
       "         [ 7],\n",
       "         [ 7],\n",
       "         [ 3],\n",
       "         [ 6],\n",
       "         [ 3],\n",
       "         [ 5],\n",
       "         [ 6],\n",
       "         [ 6],\n",
       "         [10],\n",
       "         [ 3],\n",
       "         [ 3],\n",
       "         [ 6],\n",
       "         [ 7],\n",
       "         [ 7],\n",
       "         [ 6],\n",
       "         [ 3],\n",
       "         [ 6],\n",
       "         [ 6],\n",
       "         [ 3],\n",
       "         [ 5],\n",
       "         [ 7],\n",
       "         [ 7],\n",
       "         [ 7],\n",
       "         [ 7],\n",
       "         [10],\n",
       "         [ 7],\n",
       "         [ 7],\n",
       "         [ 6],\n",
       "         [ 3],\n",
       "         [ 7],\n",
       "         [ 6],\n",
       "         [ 6],\n",
       "         [ 3],\n",
       "         [10],\n",
       "         [ 3],\n",
       "         [ 6],\n",
       "         [ 7],\n",
       "         [ 3],\n",
       "         [ 1],\n",
       "         [ 6]]),\n",
       " tensor([[ -8.6718],\n",
       "         [ -3.9149],\n",
       "         [ -5.2393],\n",
       "         [ -6.5755],\n",
       "         [ -7.7088],\n",
       "         [ -7.9030],\n",
       "         [ -2.7078],\n",
       "         [ -0.1466],\n",
       "         [ -5.3621],\n",
       "         [ -6.5394],\n",
       "         [ -7.3987],\n",
       "         [ -6.7387],\n",
       "         [-13.4913],\n",
       "         [ -0.1993],\n",
       "         [ -9.6598],\n",
       "         [ -2.3676],\n",
       "         [-10.2073],\n",
       "         [-12.1456],\n",
       "         [-11.3266],\n",
       "         [ -6.7500],\n",
       "         [-10.7640],\n",
       "         [ -5.2305],\n",
       "         [ -6.1848],\n",
       "         [-10.4410],\n",
       "         [ -1.6631],\n",
       "         [ -5.2086],\n",
       "         [ -9.6579],\n",
       "         [ -2.5938],\n",
       "         [ -8.2514],\n",
       "         [-11.0195],\n",
       "         [ -7.3167],\n",
       "         [ -4.3941],\n",
       "         [ -2.9017],\n",
       "         [-13.7415],\n",
       "         [ -9.6861],\n",
       "         [ -0.5152],\n",
       "         [ -2.8889],\n",
       "         [-12.8767],\n",
       "         [ -5.2960],\n",
       "         [ -6.9069],\n",
       "         [ -7.6850],\n",
       "         [ -0.1023],\n",
       "         [-12.4553],\n",
       "         [ -3.8281],\n",
       "         [ -4.7896],\n",
       "         [ -1.1481],\n",
       "         [ -8.2460],\n",
       "         [ -6.8651],\n",
       "         [ -1.7824],\n",
       "         [ -9.0227],\n",
       "         [ -9.6414],\n",
       "         [-10.9929],\n",
       "         [ -8.8550],\n",
       "         [ -8.8577],\n",
       "         [ -8.1213],\n",
       "         [ -8.6085],\n",
       "         [ -5.6776],\n",
       "         [ -2.4803],\n",
       "         [ -6.5571],\n",
       "         [ -4.9320],\n",
       "         [ -6.0448],\n",
       "         [ -8.1268],\n",
       "         [ -1.7809],\n",
       "         [ -4.0510]]),\n",
       " tensor([[-8.0897e-01, -5.8785e-01,  3.5252e+00],\n",
       "         [-3.5195e-01,  9.3602e-01,  3.8487e+00],\n",
       "         [-1.4130e-01, -9.8997e-01,  3.4520e+00],\n",
       "         [-3.0922e-01, -9.5099e-01,  4.1052e+00],\n",
       "         [-9.2982e-01,  3.6800e-01, -1.9092e-01],\n",
       "         [-9.5088e-01,  3.0955e-01,  3.3478e+00],\n",
       "         [-9.3213e-02, -9.9565e-01, -2.4461e+00],\n",
       "         [ 9.8520e-01,  1.7142e-01,  1.3019e+00],\n",
       "         [-5.9965e-01, -8.0026e-01, -4.2415e+00],\n",
       "         [-6.0637e-01, -7.9518e-01,  2.6738e+00],\n",
       "         [-7.6603e-01,  6.4280e-01,  6.0584e+00],\n",
       "         [-7.4435e-01,  6.6779e-01, -1.8497e+00],\n",
       "         [-9.7326e-01, -2.2970e-01,  6.3190e+00],\n",
       "         [ 9.6970e-01,  2.4430e-01,  1.4905e+00],\n",
       "         [-9.8521e-01, -1.7135e-01,  1.7618e+00],\n",
       "         [ 4.1763e-01, -9.0862e-01,  2.1883e+00],\n",
       "         [-9.9047e-01, -1.3772e-01, -5.7497e+00],\n",
       "         [-9.9992e-01,  1.2715e-02,  7.0483e+00],\n",
       "         [-8.4873e-01, -5.2883e-01,  5.5193e+00],\n",
       "         [-8.5586e-01, -5.1721e-01, -2.3946e-02],\n",
       "         [-7.3361e-01,  6.7957e-01, -5.7608e+00],\n",
       "         [-6.8393e-01, -7.2955e-01, -2.4360e+00],\n",
       "         [-6.5050e-01, -7.5951e-01, -5.1304e+00],\n",
       "         [-9.9781e-01, -6.6084e-02,  3.9170e+00],\n",
       "         [ 2.3405e-01,  9.7222e-01,  2.0083e+00],\n",
       "         [-5.5901e-01,  8.2916e-01,  4.6707e+00],\n",
       "         [-9.6164e-01, -2.7433e-01, -6.1133e+00],\n",
       "         [ 1.6952e-01, -9.8553e-01,  1.3933e+00],\n",
       "         [-3.0058e-01,  9.5376e-01, -5.5106e+00],\n",
       "         [-1.0000e+00, -8.7377e-04,  5.4882e+00],\n",
       "         [-8.7552e-01, -4.8317e-01,  8.6791e-01],\n",
       "         [-4.0731e-01, -9.1329e-01,  9.4619e-01],\n",
       "         [-1.2630e-01,  9.9199e-01,  2.6384e-01],\n",
       "         [-9.3220e-01,  3.6195e-01, -6.5260e+00],\n",
       "         [-9.9997e-01,  8.2080e-03, -2.5716e+00],\n",
       "         [ 9.2752e-01, -3.7378e-01,  1.4788e+00],\n",
       "         [-1.8201e-01,  9.8330e-01,  1.1278e+00],\n",
       "         [-9.6071e-01, -2.7756e-01,  5.6075e+00],\n",
       "         [ 2.7131e-01, -9.6249e-01,  4.7381e+00],\n",
       "         [-8.7666e-01, -4.8111e-01, -3.4515e+00],\n",
       "         [-8.9566e-01, -4.4474e-01,  1.1415e+00],\n",
       "         [ 9.9048e-01, -1.3768e-01,  7.7067e-01],\n",
       "         [-8.9643e-01,  4.4318e-01, -5.9731e+00],\n",
       "         [-4.0555e-01,  9.1407e-01,  2.7056e+00],\n",
       "         [-4.3651e-01, -8.9970e-01, -4.5756e+00],\n",
       "         [ 4.4384e-01, -8.9611e-01, -9.8296e-01],\n",
       "         [-9.7400e-01,  2.2656e-01,  9.8179e-01],\n",
       "         [-7.6033e-01,  6.4953e-01,  5.4126e+00],\n",
       "         [ 3.9411e-01,  9.1907e-01, -8.0096e-01],\n",
       "         [-9.9602e-01,  8.9096e-02,  1.5149e+00],\n",
       "         [-9.3408e-01,  3.5707e-01,  6.6605e+00],\n",
       "         [-9.6574e-01, -2.5950e-01,  3.9275e+00],\n",
       "         [-9.9276e-01, -1.2008e-01, -2.3616e+00],\n",
       "         [-9.1871e-01,  3.9492e-01,  6.0198e+00],\n",
       "         [-9.1957e-01, -3.9292e-01,  1.3762e+00],\n",
       "         [-9.4441e-01, -3.2877e-01,  1.5627e+00],\n",
       "         [-6.5464e-01,  7.5594e-01, -1.0484e+00],\n",
       "         [ 3.5699e-01,  9.3411e-01, -1.8015e+00],\n",
       "         [-5.9496e-02,  9.9823e-01, -4.9153e+00],\n",
       "         [-4.5005e-01, -8.9300e-01,  1.5559e+00],\n",
       "         [-8.1031e-01,  5.8600e-01,  2.3206e+00],\n",
       "         [-9.7170e-01, -2.3620e-01, -2.0644e+00],\n",
       "         [ 4.7430e-01, -8.8036e-01,  1.2668e+00],\n",
       "         [-3.9259e-01, -9.1971e-01,  3.2122e-01]]),\n",
       " tensor([[0],\n",
       "         [0],\n",
       "         [0],\n",
       "         [0],\n",
       "         [0],\n",
       "         [0],\n",
       "         [0],\n",
       "         [0],\n",
       "         [0],\n",
       "         [0],\n",
       "         [0],\n",
       "         [0],\n",
       "         [0],\n",
       "         [0],\n",
       "         [0],\n",
       "         [0],\n",
       "         [0],\n",
       "         [0],\n",
       "         [0],\n",
       "         [0],\n",
       "         [0],\n",
       "         [0],\n",
       "         [0],\n",
       "         [0],\n",
       "         [0],\n",
       "         [0],\n",
       "         [0],\n",
       "         [0],\n",
       "         [0],\n",
       "         [0],\n",
       "         [0],\n",
       "         [0],\n",
       "         [0],\n",
       "         [0],\n",
       "         [0],\n",
       "         [0],\n",
       "         [0],\n",
       "         [0],\n",
       "         [0],\n",
       "         [0],\n",
       "         [0],\n",
       "         [0],\n",
       "         [0],\n",
       "         [0],\n",
       "         [0],\n",
       "         [0],\n",
       "         [0],\n",
       "         [0],\n",
       "         [0],\n",
       "         [0],\n",
       "         [0],\n",
       "         [0],\n",
       "         [0],\n",
       "         [0],\n",
       "         [0],\n",
       "         [0],\n",
       "         [0],\n",
       "         [0],\n",
       "         [0],\n",
       "         [0],\n",
       "         [0],\n",
       "         [0],\n",
       "         [0],\n",
       "         [0]]))"
      ]
     },
     "execution_count": 5,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "#获取一批数据样本\n",
    "def get_sample():\n",
    "    #从样本池中采样\n",
    "    samples = random.sample(datas, 64)\n",
    "\n",
    "    #[b, 3]\n",
    "    state = torch.FloatTensor([i[0] for i in samples]).reshape(-1, 3)\n",
    "    #[b, 1]\n",
    "    action = torch.LongTensor([i[1] for i in samples]).reshape(-1, 1)\n",
    "    #[b, 1]\n",
    "    reward = torch.FloatTensor([i[2] for i in samples]).reshape(-1, 1)\n",
    "    #[b, 3]\n",
    "    next_state = torch.FloatTensor([i[3] for i in samples]).reshape(-1, 3)\n",
    "    #[b, 1]\n",
    "    over = torch.LongTensor([i[4] for i in samples]).reshape(-1, 1)\n",
    "\n",
    "    return state, action, reward, next_state, over\n",
    "\n",
    "\n",
    "state, action, reward, next_state, over = get_sample()\n",
    "\n",
    "state, action, reward, next_state, over"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[ 1.3391],\n",
       "        [ 0.8844],\n",
       "        [ 1.4963],\n",
       "        [ 1.6260],\n",
       "        [ 0.4134],\n",
       "        [ 0.9186],\n",
       "        [ 0.5917],\n",
       "        [ 0.4420],\n",
       "        [ 0.6617],\n",
       "        [ 1.2486],\n",
       "        [ 1.5474],\n",
       "        [ 0.4382],\n",
       "        [-0.3254],\n",
       "        [ 0.4708],\n",
       "        [ 0.7500],\n",
       "        [ 1.1212],\n",
       "        [ 0.6998],\n",
       "        [ 1.9591],\n",
       "        [ 1.7438],\n",
       "        [ 0.4457],\n",
       "        [ 0.6775],\n",
       "        [ 0.4928],\n",
       "        [ 0.7196],\n",
       "        [ 1.1974],\n",
       "        [ 0.4550],\n",
       "        [ 1.1274],\n",
       "        [ 0.7116],\n",
       "        [ 0.9745],\n",
       "        [ 0.5884],\n",
       "        [ 1.2917],\n",
       "        [ 0.6311],\n",
       "        [ 0.8295],\n",
       "        [ 0.2662],\n",
       "        [ 0.7518],\n",
       "        [ 0.4914],\n",
       "        [ 0.6842],\n",
       "        [ 0.3503],\n",
       "        [ 1.6876],\n",
       "        [ 1.7730],\n",
       "        [ 0.5106],\n",
       "        [ 0.6974],\n",
       "        [ 0.4216],\n",
       "        [ 0.7141],\n",
       "        [ 0.6672],\n",
       "        [ 0.7444],\n",
       "        [ 0.4185],\n",
       "        [ 0.4414],\n",
       "        [ 1.3738],\n",
       "        [ 0.3089],\n",
       "        [ 0.5873],\n",
       "        [ 1.7667],\n",
       "        [ 1.2800],\n",
       "        [ 0.4701],\n",
       "        [ 1.5918],\n",
       "        [ 0.7444],\n",
       "        [ 0.7657],\n",
       "        [ 0.3604],\n",
       "        [ 0.3804],\n",
       "        [ 0.5074],\n",
       "        [ 1.0223],\n",
       "        [ 0.6169],\n",
       "        [ 0.4427],\n",
       "        [-0.2233],\n",
       "        [ 0.6556]], grad_fn=<GatherBackward0>)"
      ]
     },
     "execution_count": 6,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "def get_value(state, action):\n",
    "    #使用状态计算出动作的logits\n",
    "    #[b, 3] -> [b, 11]\n",
    "    value = model(state)\n",
    "\n",
    "    #根据实际使用的action取出每一个值\n",
    "    #这个值就是模型评估的在该状态下,执行动作的分数\n",
    "    #在执行动作前,显然并不知道会得到的反馈和next_state\n",
    "    #所以这里不能也不需要考虑next_state和reward\n",
    "    #[b, 11] -> [b, 1]\n",
    "    value = value.gather(dim=1, index=action)\n",
    "\n",
    "    return value\n",
    "\n",
    "\n",
    "get_value(state, action)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[ -7.3859],\n",
       "        [ -2.8100],\n",
       "        [ -3.9300],\n",
       "        [ -5.1091],\n",
       "        [ -7.3004],\n",
       "        [ -6.8470],\n",
       "        [ -2.0688],\n",
       "        [  0.3148],\n",
       "        [ -4.7427],\n",
       "        [ -5.4069],\n",
       "        [ -5.6674],\n",
       "        [ -6.3401],\n",
       "        [-11.6017],\n",
       "        [  0.3039],\n",
       "        [ -8.9078],\n",
       "        [ -1.4473],\n",
       "        [ -9.5011],\n",
       "        [-10.0887],\n",
       "        [ -9.6153],\n",
       "        [ -6.3464],\n",
       "        [-10.1572],\n",
       "        [ -4.7622],\n",
       "        [ -5.5225],\n",
       "        [ -9.1866],\n",
       "        [ -1.0370],\n",
       "        [ -3.8622],\n",
       "        [ -8.9257],\n",
       "        [ -1.8541],\n",
       "        [ -7.7541],\n",
       "        [ -9.3656],\n",
       "        [ -6.7607],\n",
       "        [ -3.7516],\n",
       "        [ -2.5585],\n",
       "        [-13.0316],\n",
       "        [ -9.1908],\n",
       "        [  0.0690],\n",
       "        [ -2.4266],\n",
       "        [-11.1678],\n",
       "        [ -3.7435],\n",
       "        [ -6.3418],\n",
       "        [ -7.0665],\n",
       "        [  0.2925],\n",
       "        [-11.7914],\n",
       "        [ -3.0105],\n",
       "        [ -4.0844],\n",
       "        [ -0.6920],\n",
       "        [ -7.7349],\n",
       "        [ -5.3051],\n",
       "        [ -1.4953],\n",
       "        [ -8.3991],\n",
       "        [ -7.7215],\n",
       "        [ -9.6924],\n",
       "        [ -8.3735],\n",
       "        [ -7.1087],\n",
       "        [ -7.4379],\n",
       "        [ -7.8790],\n",
       "        [ -5.3611],\n",
       "        [ -2.1776],\n",
       "        [ -6.1027],\n",
       "        [ -4.1191],\n",
       "        [ -5.3007],\n",
       "        [ -7.6662],\n",
       "        [ -1.0991],\n",
       "        [ -3.5733]])"
      ]
     },
     "execution_count": 7,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "def get_target(reward, next_state, over):\n",
    "    #上面已经把模型认为的状态下执行动作的分数给评估出来了\n",
    "    #下面使用next_state和reward计算真实的分数\n",
    "    #针对一个状态,它到底应该多少分,可以使用以往模型积累的经验评估\n",
    "    #这也是没办法的办法,因为显然没有精确解,这里使用延迟更新的next_model评估\n",
    "\n",
    "    #使用next_state计算下一个状态的分数\n",
    "    #[b, 3] -> [b, 11]\n",
    "    with torch.no_grad():\n",
    "        target = next_model(next_state)\n",
    "    \"\"\"以下是主要的Double DQN和DQN的区别\"\"\"\n",
    "    #取所有动作中分数最大的\n",
    "    #[b, 11] -> [b]\n",
    "    #target = target.max(dim=1)[0]\n",
    "\n",
    "    #使用model计算下一个状态的分数\n",
    "    #[b, 3] -> [b, 11]\n",
    "    with torch.no_grad():\n",
    "        model_target = model(next_state)\n",
    "\n",
    "    #取分数最高的下标\n",
    "    #[b, 11] -> [b, 1]\n",
    "    model_target = model_target.max(dim=1)[1]\n",
    "    model_target = model_target.reshape(-1, 1)\n",
    "\n",
    "    #以这个下标取next_value当中的值\n",
    "    #[b, 11] -> [b]\n",
    "    target = target.gather(dim=1, index=model_target)\n",
    "    \"\"\"以上是主要的Double DQN和DQN的区别\"\"\"\n",
    "\n",
    "    #下一个状态的分数乘以一个系数,相当于权重\n",
    "    target *= 0.98\n",
    "\n",
    "    #如果next_state已经游戏结束,则next_state的分数是0\n",
    "    #因为如果下一步已经游戏结束,显然不需要再继续玩下去,也就不需要考虑next_state了.\n",
    "    #[b, 1] * [b, 1] -> [b, 1]\n",
    "    target *= (1 - over)\n",
    "\n",
    "    #加上reward就是最终的分数\n",
    "    #[b, 1] + [b, 1] -> [b, 1]\n",
    "    target += reward\n",
    "\n",
    "    return target\n",
    "\n",
    "\n",
    "get_target(reward, next_state, over)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "-1133.1286610540683"
      ]
     },
     "execution_count": 8,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "from IPython import display\n",
    "\n",
    "\n",
    "def test(play):\n",
    "    #初始化游戏\n",
    "    state = env.reset()\n",
    "\n",
    "    #记录反馈值的和,这个值越大越好\n",
    "    reward_sum = 0\n",
    "\n",
    "    #玩到游戏结束为止\n",
    "    over = False\n",
    "    while not over:\n",
    "        #根据当前状态得到一个动作\n",
    "        _, action_continuous = get_action(state)\n",
    "\n",
    "        #执行动作,得到反馈\n",
    "        state, reward, over, _ = env.step([action_continuous])\n",
    "        reward_sum += reward\n",
    "\n",
    "        #打印动画\n",
    "        if play and random.random() < 0.2:  #跳帧\n",
    "            display.clear_output(wait=True)\n",
    "            show()\n",
    "\n",
    "    return reward_sum\n",
    "\n",
    "\n",
    "test(play=False)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {
    "id": "OHoSU6uI-xIt",
    "scrolled": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0 400 200 0 -1479.498676235349\n",
      "20 4400 200 0 -1204.547655987538\n",
      "40 5000 200 200 -1312.7791582019645\n",
      "60 5000 200 200 -739.5277905665986\n",
      "80 5000 200 200 -1198.5771182167168\n",
      "100 5000 200 200 -450.9070913566222\n",
      "120 5000 200 200 -338.56148842632837\n",
      "140 5000 200 200 -594.8304707965137\n",
      "160 5000 200 200 -174.31797969196458\n",
      "180 5000 200 200 -171.95442129758607\n"
     ]
    }
   ],
   "source": [
    "def train():\n",
    "    model.train()\n",
    "    optimizer = torch.optim.Adam(model.parameters(), lr=1e-2)\n",
    "    loss_fn = torch.nn.MSELoss()\n",
    "\n",
    "    #训练N次\n",
    "    for epoch in range(200):\n",
    "        #更新N条数据\n",
    "        update_count, drop_count = update_data()\n",
    "\n",
    "        #每次更新过数据后,学习N次\n",
    "        for i in range(200):\n",
    "            #采样一批数据\n",
    "            state, action, reward, next_state, over = get_sample()\n",
    "\n",
    "            #计算一批样本的value和target\n",
    "            value = get_value(state, action)\n",
    "            target = get_target(reward, next_state, over)\n",
    "\n",
    "            #更新参数\n",
    "            loss = loss_fn(value, target)\n",
    "            optimizer.zero_grad()\n",
    "            loss.backward()\n",
    "            optimizer.step()\n",
    "\n",
    "            #把model的参数复制给next_model\n",
    "            if (i + 1) % 50 == 0:\n",
    "                next_model.load_state_dict(model.state_dict())\n",
    "\n",
    "        if epoch % 20 == 0:\n",
    "            test_result = sum([test(play=False) for _ in range(20)]) / 20\n",
    "            print(epoch, len(datas), update_count, drop_count, test_result)\n",
    "\n",
    "\n",
    "train()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "image/png": "iVBORw0KGgoAAAANSUhEUgAAAQEAAAD8CAYAAAB3lxGOAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjMuNCwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8QVMy6AAAACXBIWXMAAAsTAAALEwEAmpwYAAAQY0lEQVR4nO3df2zU933H8efLZ2NjCAkEQ4xNAtkcKSTZkshlmTKtVZoJ0lQlioREpa78gcQ/TEq1SR2saqdKi5Ttj6p/VPkDNdEsNStCaqWgqO2GSJOqyhpiGpKFUIpbKLgg7AZBIPi33/vD3yQXY3xf8J3vLp/XQzrd9z73ufPrdPbL3/ve976niMDM0tVQ7QBmVl0uAbPEuQTMEucSMEucS8AscS4Bs8RVrAQkbZR0TFKfpJ2V+jlmNjeqxH4CkgrAb4G/A/qBN4AvR8S7Zf9hZjYnlVoTWA/0RcTvI2IU2ANsqtDPMrM5aKzQ/XYAp4su9wN/da3Jy5cvjzVr1lQoipkBHDp06E8R0TZ9vFIloBnGPvG6Q9J2YDvA7bffTm9vb4WimBmApD/MNF6plwP9wOqiy53AmeIJEbE7Irojorut7apyMrN5UqkSeAPokrRW0gJgC7CvQj/LzOagIi8HImJc0j8A/w0UgOcj4kglfpaZzU2ltgkQET8BflKp+zez8vAeg2aJcwmYJc4lYJY4l4BZ4lwCZolzCZglziVgljiXgFniXAJmiXMJmCXOJWCWOJeAWeJcAmaJcwmYJc4lYJY4l4BZ4lwCZolzCZglziVgljiXgFniXAJmiXMJmCXOJWCWOJeAWeJcAmaJcwmYJc4lYJY4l4BZ4lwCZolzCZglziVgljiXgFniXAJmiXMJmCXOJWCWuJIlIOl5SQOS3ikaWyZpv6Tj2fnSout2SeqTdEzShkoFN7PyyLMm8J/AxmljO4EDEdEFHMguI2kdsAW4J7vNs5IKZUtrZmVXsgQi4hfA+WnDm4CebLkHeKJofE9EjETECaAPWF+eqGZWCTe6TWBlRJwFyM5XZOMdwOmief3Z2FUkbZfUK6l3cHDwBmOY2VyVe8OgZhiLmSZGxO6I6I6I7ra2tjLHMLO8brQEzklqB8jOB7LxfmB10bxO4MyNxzOzSrvREtgHbM2WtwIvFo1vkdQsaS3QBRycW0Qzq6TGUhMk/RD4HLBcUj/wr8AzwF5J24BTwGaAiDgiaS/wLjAO7IiIiQplN7MyKFkCEfHla1z1+WvMfxp4ei6hzGz+eI9Bs8S5BMwS5xIwS5xLwCxxJTcMms1kcmyMoZMnuXz0KJPDwzTfdhuL772XpqVLkWbaZ8xqlUvArktEMHH5MmdeeIH3XnmFyaEhiIBCgZZVq+j46le5+TOfQQ1eyawXfqbsusToKKefe47Bn/6UyStXpgoAYGKC4dOn+cP3vsf7hw8TMePe4laDXAKWW0Rw4Ve/4vyrr378xz/N+MWL/LGnh4krV+Y5nd0ol4Bdl/defhkmZt8JdOjkSa4cPz5PiWyuXAKW3+QkkyMjpedFMHLunF8S1AmXgOU2OTaWrwSAC6+9VuE0Vi4uAcstRkdzl4DVD5eA5TZ28SKj771X7RhWZi4Byy1GR5kcHs41t7Wrq8JprFxcAlYRC1asKD3JaoJLwHKLEm8NFmtcvLiCSaycXAKW2/gHH+Se29DS4s8Q1AmXgOU2celStSNYBbgELLehU6euubuw1S+XgOU2lvPtwcLixTQtW1bhNFYuLgEru0JrK00331ztGJaTS8ByiYjcnwVQUxMNLS0VTmTl4hKwfCYnc+8opEIBNTVVOJCVi0vAconxcSaGhnLP99uD9cMlYLlMjowwOjBQeqLVHZeA5TI5OsrY+fO55ja3t1c4jZWTS8DKrqWjA3yg0brhZ8rKruDPDdQVl4DlMjE8nHtvwUJra4XTWDm5BCyXiQ8+ICYnc82V5HcH6ohLwHIZHRgoeZRhq08uActl5MyZXMcTUGMjjbfcUvlAVjYuASurhgULaL7ttmrHsOvgErCSruv7AwoFbxisMyVLQNJqST+XdFTSEUlPZePLJO2XdDw7X1p0m12S+iQdk7Shkg/A5kfeQ42roYHCwoUVTmPllGdNYBz4p4i4G3gI2CFpHbATOBARXcCB7DLZdVuAe4CNwLOSCpUIb/Mkgom8hxaTvKNQnSn5bEXE2Yj4dbZ8CTgKdACbgJ5sWg/wRLa8CdgTESMRcQLoA9aXObfNp8nJ6zq+oNWX66psSWuAB4DXgZURcRamigL48BjTHcDpopv1Z2NWp2JigqETJ3LNbbzpJuQ1gbqS+9mStBj4EfC1iHh/tqkzjF21ZUnSdkm9knoHBwfzxrAqiIjcxxJoWb3axxKoM7lKQFITUwXwQkT8OBs+J6k9u74d+PBzpv3A6qKbdwJnpt9nROyOiO6I6G5ra7vR/FZjCq2tXhOoM3neHRDwHHA0Ir5TdNU+YGu2vBV4sWh8i6RmSWuBLuBg+SLbfIvx8dxvExZaW71hsM405pjzMPD3wP9JOpyN/QvwDLBX0jbgFLAZICKOSNoLvMvUOws7IsL7m9axyeHh3N8+pELBnxuoMyVLICJ+ycyv8wE+f43bPA08PYdcVkMmhoaI8fFqx7AK8XqblTRy5gyTV67kmuu9BeuPS8BKmhgayvfhoUKBhXfeOQ+JrJxcAlY+kr+NuA65BGxWEZH/K8klCosWVTaQlZ1LwErK/bkB8I5CdcglYCVNXL5c7QhWQS4BK+lKzs8NNDQ1eW/BOuRnzGYXkf9LR1at8uHG65BLwMqmYeFCGhrz7IRqtcQlYKXl/dxASwtyCdQdl4DNanJ0NPcuw2pq8oeH6pCfMZtVjI4yOTqae74/PFR/XAI2q7ELFxi/eDHXXG8PqE9+1mxWE5cvf2JnofdHR/nxqVMMDg+zYdUq7lu69KP//q133VWtmDYHLgHL7dLYGN96801+OTB1EKmf9ffzbw8+yF+vmDq8ZOOSJdWMZzfILwdsdoXC1An445UrvDYw8NFVF8fG+J8zHx85rvGmm+Y9ns2dS8Bm1XrnnSy84w4AFjQ00Fz45FdILCn6rEBDS8u8ZrPycAnYrNTYyJIHHgBg7eLF/PN997G8uZnmQoFH2tvZ1tUFwIIVK2hub69mVLtB3iZgs5JE22OP8f6hQwydPMnjnZ08eOutDI2P07FoES2FAlqwgJVPPknT0qWl79BqjtcErKQFbW3cvmMHC9euRQ0NrGpt5c+WLKGlUKChtZXbNm9m+aOPeh+BOuU1AStJEovuuos//+Y3Of/qq1x6+20mhodZuHo1yz77WRbdfbf3EahjfuYsF0ksWL6clU8+yconn5z6PEH2n99rAPXNJWDX5aM/eP/hf2p4m4BZ4lwCZolzCZglziVgljiXgFniXAJmiXMJmCXOJWCWOJeAWeJcAmaJcwmYJc4lYJa4kiUgqUXSQUlvSToi6dvZ+DJJ+yUdz86XFt1ml6Q+ScckbajkAzCzucmzJjACPBIRfwncD2yU9BCwEzgQEV3AgewyktYBW4B7gI3As5IKM92xmVVfyRKIKR9+QX1TdgpgE9CTjfcAT2TLm4A9ETESESeAPmB9OUObWfnk2iYgqSDpMDAA7I+I14GVEXEWIDtfkU3vAE4X3bw/G5t+n9sl9UrqHRwcnMNDMLO5yFUCETEREfcDncB6SffOMn2mo01c9bW2EbE7IrojorutrS1XWDMrv+t6dyAiLgCvMPVa/5ykdoDs/MNvpegHVhfdrBM4g5nVpDzvDrRJuiVbXgg8CvwG2AdszaZtBV7MlvcBWyQ1S1oLdAEHy5zbzMokzzEG24GebAt/A7A3Il6S9L/AXknbgFPAZoCIOCJpL/AuMA7siIiJysQ3s7lSxFUv1+ddd3d39Pb2VjuG2aeapEMR0T193HsMmiXOJWCWOJeAWeJcAmaJcwmYJc4lYJY4l4BZ4lwCZolzCZglziVgljiXgFniXAJmiXMJmCXOJWCWOJeAWeJcAmaJcwmYJc4lYJY4l4BZ4lwCZolzCZglziVgljiXgFniXAJmiXMJmCXOJWCWOJeAWeJcAmaJcwmYJc4lYJY4l4BZ4lwCZolzCZglziVglrjcJSCpIOlNSS9ll5dJ2i/peHa+tGjuLkl9ko5J2lCJ4GZWHtezJvAUcLTo8k7gQER0AQeyy0haB2wB7gE2As9KKpQnrpmVW64SkNQJPA58v2h4E9CTLfcATxSN74mIkYg4AfQB68uS1szKLu+awHeBrwOTRWMrI+IsQHa+IhvvAE4XzevPxsysBpUsAUlfBAYi4lDO+9QMYzHD/W6X1Cupd3BwMOddm1m55VkTeBj4kqSTwB7gEUk/AM5JagfIzgey+f3A6qLbdwJnpt9pROyOiO6I6G5ra5vDQzCzuShZAhGxKyI6I2INUxv8Xo6IrwD7gK3ZtK3Ai9nyPmCLpGZJa4Eu4GDZk5tZWTTO4bbPAHslbQNOAZsBIuKIpL3Au8A4sCMiJuac1MwqQhFXvVyfd93d3dHb21vtGGafapIORUT39HHvMWiWOJeAWeJcAmaJcwmYJc4lYJY4l4BZ4lwCZolzCZglziVgljiXgFniXAJmiXMJmCXOJWCWOJeAWeJcAmaJcwmYJc4lYJY4l4BZ4lwCZolzCZglziVgljiXgFniXAJmiXMJmCXOJWCWOJeAWeJcAmaJcwmYJc4lYJY4l4BZ4lwCZolzCZglziVgljiXgFniXAJmiXMJmCXOJWCWOJeAWeIUEdXOgKRB4APgT9XOktNy6icr1FdeZ62cOyKibfpgTZQAgKTeiOiudo486ikr1FdeZ51/fjlgljiXgFniaqkEdlc7wHWop6xQX3mddZ7VzDYBM6uOWloTMLMqqHoJSNoo6ZikPkk7q50HQNLzkgYkvVM0tkzSfknHs/OlRdftyvIfk7RhnrOulvRzSUclHZH0VK3mldQi6aCkt7Ks367VrEU/vyDpTUkv1XrWGxYRVTsBBeB3wJ3AAuAtYF01M2W5/hZ4EHinaOw/gJ3Z8k7g37PldVnuZmBt9ngK85i1HXgwW74J+G2WqebyAgIWZ8tNwOvAQ7WYtSjzPwL/BbxUy78HczlVe01gPdAXEb+PiFFgD7CpypmIiF8A56cNbwJ6suUe4Imi8T0RMRIRJ4A+ph7XvIiIsxHx62z5EnAU6KjFvDHlcnaxKTtFLWYFkNQJPA58v2i4JrPORbVLoAM4XXS5PxurRSsj4ixM/eEBK7LxmnkMktYADzD1H7Ym82ar14eBAWB/RNRsVuC7wNeByaKxWs16w6pdApphrN7erqiJxyBpMfAj4GsR8f5sU2cYm7e8ETEREfcDncB6SffOMr1qWSV9ERiIiEN5bzLDWF38Lle7BPqB1UWXO4EzVcpSyjlJ7QDZ+UA2XvXHIKmJqQJ4ISJ+nA3XbF6AiLgAvAJspDazPgx8SdJJpl6mPiLpBzWadU6qXQJvAF2S1kpaAGwB9lU507XsA7Zmy1uBF4vGt0hqlrQW6AIOzlcoSQKeA45GxHdqOa+kNkm3ZMsLgUeB39Ri1ojYFRGdEbGGqd/LlyPiK7WYdc6qvWUS+AJTW7R/B3yj2nmyTD8EzgJjTDX8NuBW4ABwPDtfVjT/G1n+Y8Bj85z1b5ha7XwbOJydvlCLeYG/AN7Msr4DfCsbr7ms03J/jo/fHajprDdy8h6DZomr9ssBM6syl4BZ4lwCZolzCZglziVgljiXgFniXAJmiXMJmCXu/wG0428gpDzgwgAAAABJRU5ErkJggg==\n",
      "text/plain": [
       "<Figure size 432x288 with 1 Axes>"
      ]
     },
     "metadata": {
      "needs_background": "light"
     },
     "output_type": "display_data"
    },
    {
     "data": {
      "text/plain": [
       "-118.11529185998903"
      ]
     },
     "execution_count": 14,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "test(play=True)"
   ]
  }
 ],
 "metadata": {
  "colab": {
   "collapsed_sections": [],
   "name": "第7章-DQN算法.ipynb",
   "provenance": []
  },
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.13"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 1
}
