{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 3.2 CartPoleを試す"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 使用するパッケージの宣言\n",
    "import numpy as np\n",
    "import matplotlib.pyplot as plt\n",
    "%matplotlib inline\n",
    "import gym\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 動画の描画関数の宣言\n",
    "# 参考URL http://nbviewer.jupyter.org/github/patrickmineault/xcorr-notebooks/blob/master/Render%20OpenAI%20gym%20as%20GIF.ipynb\n",
    "from JSAnimation.IPython_display import display_animation\n",
    "from matplotlib import animation\n",
    "from IPython.display import display\n",
    "\n",
    "\n",
    "def display_frames_as_gif(frames):\n",
    "    \"\"\"\n",
    "    Displays a list of frames as a gif, with controls\n",
    "    \"\"\"\n",
    "    plt.figure(figsize=(frames[0].shape[1]/72.0, frames[0].shape[0]/72.0),\n",
    "               dpi=72)\n",
    "    patch = plt.imshow(frames[0])\n",
    "    plt.axis('off')\n",
    "\n",
    "    def animate(i):\n",
    "        patch.set_data(frames[i])\n",
    "\n",
    "    anim = animation.FuncAnimation(plt.gcf(), animate, frames=len(frames),\n",
    "                                   interval=50)\n",
    "\n",
    "    anim.save('movie_cartpole.mp4')  # 追記：動画の保存です\n",
    "    display(display_animation(anim, default_mode='loop'))\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/bowen/soft/anaconda3/lib/python3.7/site-packages/gym/envs/registration.py:556: UserWarning: \u001b[33mWARN: The environment CartPole-v0 is out of date. You should consider upgrading to version `v1`.\u001b[0m\n",
      "  f\"The environment {id} is out of date. You should consider \"\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "step: 0\n",
      "action: 1\n",
      "state = [-0.02564488  0.22663404  0.01813279 -0.28658074]; reward = 1.0; terminated = False; truncated = False; info = {}\n",
      "step: 1\n",
      "action: 1\n",
      "state = [-0.0211122   0.42149276  0.01240118 -0.57349   ]; reward = 1.0; terminated = False; truncated = False; info = {}\n",
      "step: 2\n",
      "action: 0\n",
      "state = [-0.01268234  0.22619915  0.00093138 -0.27692628]; reward = 1.0; terminated = False; truncated = False; info = {}\n",
      "step: 3\n",
      "action: 0\n",
      "state = [-0.00815836  0.03106392 -0.00460715  0.01605026]; reward = 1.0; terminated = False; truncated = False; info = {}\n",
      "step: 4\n",
      "action: 0\n",
      "state = [-0.00753708 -0.16399166 -0.00428614  0.307276  ]; reward = 1.0; terminated = False; truncated = False; info = {}\n",
      "step: 5\n",
      "action: 1\n",
      "state = [-0.01081691  0.03119111  0.00185938  0.01324444]; reward = 1.0; terminated = False; truncated = False; info = {}\n",
      "step: 6\n",
      "action: 0\n",
      "state = [-0.01019309 -0.16395746  0.00212427  0.30651346]; reward = 1.0; terminated = False; truncated = False; info = {}\n",
      "step: 7\n",
      "action: 0\n",
      "state = [-0.01347224 -0.3591096   0.00825454  0.59986556]; reward = 1.0; terminated = False; truncated = False; info = {}\n",
      "step: 8\n",
      "action: 0\n",
      "state = [-0.02065443 -0.5543461   0.02025185  0.8951371 ]; reward = 1.0; terminated = False; truncated = False; info = {}\n",
      "step: 9\n",
      "action: 0\n",
      "state = [-0.03174135 -0.74973667  0.03815459  1.1941166 ]; reward = 1.0; terminated = False; truncated = False; info = {}\n",
      "step: 10\n",
      "action: 1\n",
      "state = [-0.04673609 -0.55512905  0.06203692  0.9136323 ]; reward = 1.0; terminated = False; truncated = False; info = {}\n",
      "step: 11\n",
      "action: 0\n",
      "state = [-0.05783867 -0.7510328   0.08030956  1.2251499 ]; reward = 1.0; terminated = False; truncated = False; info = {}\n",
      "step: 12\n",
      "action: 1\n",
      "state = [-0.07285932 -0.5570316   0.10481256  0.9586715 ]; reward = 1.0; terminated = False; truncated = False; info = {}\n",
      "step: 13\n",
      "action: 0\n",
      "state = [-0.08399996 -0.75339466  0.12398599  1.2823582 ]; reward = 1.0; terminated = False; truncated = False; info = {}\n",
      "step: 14\n",
      "action: 1\n",
      "state = [-0.09906785 -0.5600506   0.14963315  1.030927  ]; reward = 1.0; terminated = False; truncated = False; info = {}\n",
      "step: 15\n",
      "action: 1\n",
      "state = [-0.11026886 -0.36720175  0.1702517   0.78871423]; reward = 1.0; terminated = False; truncated = False; info = {}\n",
      "step: 16\n",
      "action: 1\n",
      "state = [-0.1176129  -0.17477591  0.18602598  0.5540611 ]; reward = 1.0; terminated = False; truncated = False; info = {}\n",
      "step: 17\n",
      "action: 1\n",
      "state = [-0.12110842  0.01731385  0.1971072   0.32527438]; reward = 1.0; terminated = False; truncated = False; info = {}\n",
      "step: 18\n",
      "action: 1\n",
      "state = [-0.12076214  0.2091635   0.20361269  0.10064705]; reward = 1.0; terminated = False; truncated = False; info = {}\n",
      "step: 19\n",
      "action: 0\n",
      "state = [-0.11657887  0.01179399  0.20562562  0.45003492]; reward = 1.0; terminated = False; truncated = False; info = {}\n",
      "step: 20\n",
      "action: 1\n",
      "state = [-0.11634299  0.20350493  0.21462633  0.22855537]; reward = 1.0; terminated = True; truncated = False; info = {}\n",
      "step: 21\n",
      "action: 0\n",
      "state = [-0.1122729   0.00606477  0.21919744  0.5805373 ]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 22\n",
      "action: 1\n",
      "state = [-0.1121516   0.19755185  0.23080818  0.3641087 ]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 23\n",
      "action: 0\n",
      "state = [-1.0820056e-01  2.2060145e-05  2.3809035e-01  7.1980292e-01]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 24\n",
      "action: 1\n",
      "state = [-0.10820012  0.19119816  0.2524864   0.51046747]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 25\n",
      "action: 1\n",
      "state = [-0.10437615  0.38204306  0.26269576  0.3067212 ]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 26\n",
      "action: 1\n",
      "state = [-0.09673529  0.57265186  0.26883018  0.10696409]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 27\n",
      "action: 0\n",
      "state = [-0.08528226  0.3748814   0.27096948  0.4710521 ]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 28\n",
      "action: 0\n",
      "state = [-0.07778463  0.17715527  0.2803905   0.835513  ]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 29\n",
      "action: 1\n",
      "state = [-0.07424153  0.36758488  0.29710078  0.64238256]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 30\n",
      "action: 0\n",
      "state = [-0.06688983  0.16980653  0.3099484   1.0121212 ]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 31\n",
      "action: 1\n",
      "state = [-0.0634937   0.35977432  0.33019084  0.83042043]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 32\n",
      "action: 1\n",
      "state = [-0.05629821  0.5492583   0.34679925  0.65686995]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 33\n",
      "action: 0\n",
      "state = [-0.04531305  0.35136786  0.35993665  1.035961  ]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 34\n",
      "action: 0\n",
      "state = [-0.03828569  0.15368156  0.38065588  1.4170399 ]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 35\n",
      "action: 0\n",
      "state = [-0.03521205 -0.04366621  0.40899667  1.8011023 ]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 36\n",
      "action: 1\n",
      "state = [-0.03608538  0.1452946   0.4450187   1.6579598 ]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 37\n",
      "action: 0\n",
      "state = [-0.03317949 -0.05157585  0.4781779   2.051063  ]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 38\n",
      "action: 1\n",
      "state = [-0.03421101  0.13665906  0.5191992   1.9356685 ]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 39\n",
      "action: 1\n",
      "state = [-0.03147782  0.32404092  0.5579125   1.8375148 ]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 40\n",
      "action: 1\n",
      "state = [-0.02499701  0.5106387   0.59466285  1.7557094 ]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 41\n",
      "action: 0\n",
      "state = [-0.01478423  0.31503826  0.629777    2.1634517 ]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 42\n",
      "action: 1\n",
      "state = [-0.00848346  0.501296    0.67304605  2.1108186 ]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 43\n",
      "action: 0\n",
      "state = [1.5424544e-03 3.0740625e-01 7.1526241e-01 2.5215008e+00]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 44\n",
      "action: 0\n",
      "state = [0.00769058 0.11529765 0.7656925  2.931851  ]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 45\n",
      "action: 0\n",
      "state = [ 0.00999653 -0.07450712  0.82432944  3.3408496 ]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 46\n",
      "action: 1\n",
      "state = [0.00850639 0.11402535 0.8911465  3.3646386 ]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 47\n",
      "action: 0\n",
      "state = [ 0.0107869  -0.07131299  0.95843923  3.7680433 ]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 48\n",
      "action: 1\n",
      "state = [0.00936064 0.11907034 1.0338001  3.844474  ]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 49\n",
      "action: 0\n",
      "state = [ 0.01174204 -0.06027674  1.1106895   4.234713  ]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 50\n",
      "action: 1\n",
      "state = [0.01053651 0.13343589 1.1953838  4.369113  ]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 51\n",
      "action: 0\n",
      "state = [ 0.01320523 -0.03837052  1.2827661   4.7371287 ]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 52\n",
      "action: 1\n",
      "state = [0.01243782 0.16046181 1.3775086  4.9342957 ]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 53\n",
      "action: 1\n",
      "state = [0.01564705 0.36198953 1.4761946  5.164755  ]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 54\n",
      "action: 1\n",
      "state = [0.02288684 0.5668169  1.5794897  5.428418  ]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 55\n",
      "action: 1\n",
      "state = [0.03422318 0.7755402  1.688058   5.7251287 ]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 56\n",
      "action: 1\n",
      "state = [0.04973399 0.9887027  1.8025606  6.0545173 ]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 57\n",
      "action: 1\n",
      "state = [0.06950804 1.2067264  1.923651   6.4157753 ]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 58\n",
      "action: 0\n",
      "state = [0.09364256 1.0631877  2.0519664  6.617256  ]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 59\n",
      "action: 1\n",
      "state = [0.11490632 1.2890749  2.1843116  7.03469   ]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 60\n",
      "action: 0\n",
      "state = [0.14068782 1.147123   2.3250053  7.1524806 ]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 61\n",
      "action: 1\n",
      "state = [0.16363028 1.3768483  2.468055   7.602696  ]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 62\n",
      "action: 1\n",
      "state = [0.19116725 1.6075684  2.6201088  8.056582  ]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 63\n",
      "action: 0\n",
      "state = [0.2233186 1.4529947 2.7812405 8.002002 ]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 64\n",
      "action: 1\n",
      "state = [0.25237852 1.6728758  2.9412806  8.414306  ]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 65\n",
      "action: 1\n",
      "state = [0.285836  1.8839267 3.1095667 8.783051 ]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 66\n",
      "action: 0\n",
      "state = [0.32351455 1.6916881  3.2852278  8.504254  ]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 67\n",
      "action: 1\n",
      "state = [0.35734832 1.8744045  3.4553127  8.733423  ]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 68\n",
      "action: 0\n",
      "state = [0.39483643 1.6536475  3.6299813  8.327722  ]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 69\n",
      "action: 0\n",
      "state = [0.42790937 1.424528   3.7965357  7.886276  ]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 70\n",
      "action: 1\n",
      "state = [0.45639992 1.5717658  3.9542613  7.882354  ]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 71\n",
      "action: 0\n",
      "state = [0.48783523 1.3346181  4.1119084  7.424292  ]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 72\n",
      "action: 1\n",
      "state = [0.5145276 1.4718502 4.260394  7.2980356]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 73\n",
      "action: 1\n",
      "state = [0.5439646 1.6066136 4.406355  7.121849 ]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 74\n",
      "action: 0\n",
      "state = [0.5760969 1.3755591 4.548792  6.7370915]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 75\n",
      "action: 0\n",
      "state = [0.6036081 1.150475  4.6835337 6.3920283]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 76\n",
      "action: 0\n",
      "state = [0.62661755 0.9311308  4.811374   6.0886583 ]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 77\n",
      "action: 0\n",
      "state = [0.6452402 0.7169476 4.9331474 5.827847 ]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 78\n",
      "action: 1\n",
      "state = [0.6595791  0.87200105 5.0497046  5.4900537 ]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 79\n",
      "action: 1\n",
      "state = [0.6770191 1.0333412 5.1595054 5.1325274]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 80\n",
      "action: 0\n",
      "state = [0.69768596 0.83258015 5.262156   4.997632  ]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 81\n",
      "action: 1\n",
      "state = [0.7143376 1.004186  5.3621087 4.6124606]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 82\n",
      "action: 1\n",
      "state = [0.7344213 1.181465  5.4543576 4.217491 ]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 83\n",
      "action: 0\n",
      "state = [0.7580506  0.98837215 5.5387077  4.196493  ]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 84\n",
      "action: 1\n",
      "state = [0.777818  1.1728033 5.6226377 3.793825 ]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 85\n",
      "action: 1\n",
      "state = [0.8012741 1.3610723 5.698514  3.3904397]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 86\n",
      "action: 1\n",
      "state = [0.82849556 1.5523419  5.7663226  2.988926  ]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 87\n",
      "action: 1\n",
      "state = [0.85954237 1.7458603  5.8261013  2.5912845 ]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 88\n",
      "action: 1\n",
      "state = [0.8944596 1.9409899 5.877927  2.1988854]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 89\n",
      "action: 1\n",
      "state = [0.9332794 2.1372163 5.9219046 1.8124756]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 90\n",
      "action: 0\n",
      "state = [0.97602373 1.9474386  5.958154   1.9748446 ]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 91\n",
      "action: 1\n",
      "state = [1.0149724 2.1442173 5.997651  1.601246 ]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 92\n",
      "action: 0\n",
      "state = [1.0578568 1.9533746 6.029676  1.7931086]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 93\n",
      "action: 0\n",
      "state = [1.0969243 1.7618294 6.0655384 1.9975072]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 94\n",
      "action: 1\n",
      "state = [1.1321609 1.9584637 6.1054883 1.64603  ]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 95\n",
      "action: 0\n",
      "state = [1.1713302 1.76581   6.138409  1.8784915]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 96\n",
      "action: 0\n",
      "state = [1.2066464 1.5725332 6.1759787 2.1229582]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 97\n",
      "action: 0\n",
      "state = [1.2380971 1.378629  6.218438  2.3806863]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 98\n",
      "action: 1\n",
      "state = [1.2656696 1.5742592 6.266052  2.0688334]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 99\n",
      "action: 1\n",
      "state = [1.2971548 1.769551  6.3074284 1.7709014]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 100\n",
      "action: 1\n",
      "state = [1.3325459 1.9643912 6.3428464 1.4858538]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 101\n",
      "action: 0\n",
      "state = [1.3718337 1.7685951 6.3725634 1.7965555]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 102\n",
      "action: 0\n",
      "state = [1.4072056 1.5725935 6.4084945 2.1156266]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 103\n",
      "action: 0\n",
      "state = [1.4386574 1.376463  6.450807  2.4442601]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 104\n",
      "action: 1\n",
      "state = [1.4661866 1.5698045 6.4996924 2.207363 ]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 105\n",
      "action: 1\n",
      "state = [1.4975828 1.7622888 6.5438395 1.9885341]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 106\n",
      "action: 0\n",
      "state = [1.5328286 1.565546  6.58361   2.3494473]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 107\n",
      "action: 1\n",
      "state = [1.5641395 1.756981  6.630599  2.1621585]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 108\n",
      "action: 0\n",
      "state = [1.5992792 1.5604875 6.6738424 2.5393872]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 109\n",
      "action: 1\n",
      "state = [1.6304889 1.7509347 6.72463   2.3871932]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 110\n",
      "action: 0\n",
      "state = [1.6655076 1.5552617 6.772374  2.778176 ]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 111\n",
      "action: 0\n",
      "state = [1.6966128 1.3608702 6.8279376 3.1737177]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 112\n",
      "action: 1\n",
      "state = [1.7238302 1.5509926 6.891412  3.0821655]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 113\n",
      "action: 0\n",
      "state = [1.75485   1.3590275 6.9530554 3.4864686]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 114\n",
      "action: 0\n",
      "state = [1.7820306 1.1696308 7.0227847 3.8917115]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 115\n",
      "action: 0\n",
      "state = [1.8054233 0.9835135 7.100619  4.296103 ]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 116\n",
      "action: 0\n",
      "state = [1.8250935 0.8014563 7.1865406 4.6973615]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 117\n",
      "action: 1\n",
      "state = [1.8411226 0.9976578 7.280488  4.7461047]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 118\n",
      "action: 1\n",
      "state = [1.8610758 1.1945385 7.37541   4.8328357]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 119\n",
      "action: 1\n",
      "state = [1.8849665 1.3926054 7.472067  4.9569874]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 120\n",
      "action: 0\n",
      "state = [1.9128187 1.2253096 7.5712066 5.323332 ]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 121\n",
      "action: 0\n",
      "state = [1.9373249 1.063792  7.6776733 5.673256 ]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 122\n",
      "action: 0\n",
      "state = [1.9586006  0.90814596 7.791138   6.003648  ]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 123\n",
      "action: 0\n",
      "state = [1.9767636  0.75815225 7.9112115  6.3111973 ]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 124\n",
      "action: 0\n",
      "state = [1.9919267  0.61321574 8.037436   6.592281  ]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 125\n",
      "action: 0\n",
      "state = [2.004191   0.47231933 8.169281   6.8427925 ]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 126\n",
      "action: 1\n",
      "state = [2.0136373 0.700039  8.306137  7.228224 ]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 127\n",
      "action: 1\n",
      "state = [2.0276382  0.93286365 8.450701   7.6452622 ]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 128\n",
      "action: 1\n",
      "state = [2.0462954 1.1699516 8.603606  8.088295 ]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 129\n",
      "action: 0\n",
      "state = [2.0696945 1.0340266 8.765372  8.164565 ]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 130\n",
      "action: 1\n",
      "state = [2.090375  1.2694697 8.928663  8.62381  ]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 131\n",
      "action: 0\n",
      "state = [2.1157644 1.1174102 9.10114   8.563167 ]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 132\n",
      "action: 0\n",
      "state = [2.1381125  0.95059747 9.272403   8.419435  ]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 133\n",
      "action: 0\n",
      "state = [2.1571245 0.7684314 9.440792  8.193976 ]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 134\n",
      "action: 1\n",
      "state = [2.1724932 0.9622712 9.6046715 8.479991 ]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 135\n",
      "action: 1\n",
      "state = [2.1917386 1.141895  9.774271  8.692475 ]; reward = 0.0; terminated = True; truncated = False; info = {}\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/bowen/soft/anaconda3/lib/python3.7/site-packages/gym/envs/classic_control/cartpole.py:178: UserWarning: \u001b[33mWARN: You are calling 'step()' even though this environment has already returned terminated = True. You should always call 'reset()' once you receive 'terminated = True' -- any further steps are undefined behavior.\u001b[0m\n",
      "  \"You are calling 'step()' even though this \"\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "step: 136\n",
      "action: 1\n",
      "state = [2.2145765 1.3057551 9.948121  8.822735 ]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 137\n",
      "action: 0\n",
      "state = [ 2.2406917  1.0707656 10.124576   8.370494 ]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 138\n",
      "action: 0\n",
      "state = [ 2.262107    0.83180505 10.2919855   7.9069424 ]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 139\n",
      "action: 0\n",
      "state = [ 2.278743   0.5932474 10.450124   7.4512544]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 140\n",
      "action: 1\n",
      "state = [ 2.290608   0.7284703 10.599149   7.3051467]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 141\n",
      "action: 0\n",
      "state = [ 2.3051772   0.49476522 10.745253    6.8985887 ]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 142\n",
      "action: 0\n",
      "state = [ 2.3150725   0.26687065 10.883224    6.5290723 ]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 143\n",
      "action: 1\n",
      "state = [ 2.32041     0.40881255 11.013805    6.2607965 ]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 144\n",
      "action: 1\n",
      "state = [ 2.3285863  0.5552494 11.139021   5.962841 ]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 145\n",
      "action: 0\n",
      "state = [ 2.3396912   0.34303513 11.258278    5.717366  ]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 146\n",
      "action: 1\n",
      "state = [ 2.346552    0.50023043 11.372625    5.3722196 ]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 147\n",
      "action: 0\n",
      "state = [ 2.3565567   0.29671183 11.48007     5.211269  ]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 148\n",
      "action: 1\n",
      "state = [ 2.362491   0.4646752 11.584295   4.8337593]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 149\n",
      "action: 1\n",
      "state = [ 2.3717842  0.6386575 11.68097    4.4443355]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 150\n",
      "action: 0\n",
      "state = [ 2.3845575   0.44417375 11.769857    4.4013863 ]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 151\n",
      "action: 0\n",
      "state = [ 2.393441   0.2499718 11.857884   4.3948793]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 152\n",
      "action: 1\n",
      "state = [ 2.3984404   0.43421072 11.945783    3.9937258 ]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 153\n",
      "action: 1\n",
      "state = [ 2.4071245  0.6224118 12.025657   3.593098 ]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 154\n",
      "action: 0\n",
      "state = [ 2.4195728   0.43084943 12.097519    3.6881137 ]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 155\n",
      "action: 1\n",
      "state = [ 2.4281898  0.622887  12.171281   3.2982945]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 156\n",
      "action: 1\n",
      "state = [ 2.4406476  0.8169154 12.237247   2.9165154]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 157\n",
      "action: 0\n",
      "state = [ 2.456986   0.6249651 12.295577   3.093962 ]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 158\n",
      "action: 0\n",
      "state = [ 2.4694853   0.43205145 12.357456    3.2941437 ]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 159\n",
      "action: 1\n",
      "state = [ 2.4781263  0.627273  12.42334    2.9467037]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 160\n",
      "action: 1\n",
      "state = [ 2.4906716   0.82291996 12.482273    2.614322  ]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 161\n",
      "action: 1\n",
      "state = [ 2.5071301  1.0185813 12.53456    2.2971718]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 162\n",
      "action: 0\n",
      "state = [ 2.5275016  0.8237659 12.580503   2.5798965]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 163\n",
      "action: 0\n",
      "state = [ 2.543977    0.62853587 12.632101    2.876867  ]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 164\n",
      "action: 0\n",
      "state = [ 2.5565476  0.433066  12.689639   3.1887496]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 165\n",
      "action: 1\n",
      "state = [ 2.5652092  0.6274427 12.753414   2.9355462]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 166\n",
      "action: 0\n",
      "state = [ 2.5777578   0.43175888 12.812124    3.2786229 ]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 167\n",
      "action: 1\n",
      "state = [ 2.586393    0.62521017 12.877697    3.068691  ]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 168\n",
      "action: 0\n",
      "state = [ 2.5988972  0.4300604 12.939071   3.4374025]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 169\n",
      "action: 0\n",
      "state = [ 2.6074984   0.23615356 13.007819    3.815349  ]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 170\n",
      "action: 1\n",
      "state = [ 2.6122215   0.42922446 13.0841255   3.6791174 ]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 171\n",
      "action: 0\n",
      "state = [ 2.620806    0.23789997 13.157708    4.0739994 ]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 172\n",
      "action: 1\n",
      "state = [ 2.625564    0.43102002 13.239188    3.997405  ]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 173\n",
      "action: 1\n",
      "state = [ 2.6341844   0.62340057 13.319137    3.954941  ]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 174\n",
      "action: 1\n",
      "state = [ 2.6466525  0.815239  13.398235   3.9459314]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 175\n",
      "action: 0\n",
      "state = [ 2.6629572   0.63154995 13.477154    4.348824  ]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 176\n",
      "action: 1\n",
      "state = [ 2.6755881  0.8254473 13.564131   4.402754 ]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 177\n",
      "action: 1\n",
      "state = [ 2.6920972  1.0198815 13.652185   4.4916606]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 178\n",
      "action: 1\n",
      "state = [ 2.7124949  1.2153099 13.742019   4.615097 ]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 179\n",
      "action: 1\n",
      "state = [ 2.736801   1.4122406 13.83432    4.77273  ]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 180\n",
      "action: 0\n",
      "state = [ 2.765046   1.245372  13.929775   5.1280003]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 181\n",
      "action: 0\n",
      "state = [ 2.7899532  1.0837876 14.032335   5.4656076]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 182\n",
      "action: 1\n",
      "state = [ 2.811629   1.2913781 14.141647   5.72541  ]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 183\n",
      "action: 0\n",
      "state = [ 2.8374565  1.1394196 14.256156   6.018386 ]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 184\n",
      "action: 0\n",
      "state = [ 2.860245   0.99173   14.376523   6.2840095]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 185\n",
      "action: 0\n",
      "state = [ 2.8800795  0.8473117 14.502203   6.51827  ]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 186\n",
      "action: 1\n",
      "state = [ 2.8970258  1.0716155 14.632568   6.9130077]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 187\n",
      "action: 0\n",
      "state = [ 2.9184582   0.93144864 14.770829    7.0717125 ]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 188\n",
      "action: 1\n",
      "state = [ 2.937087   1.1617861 14.912263   7.513211 ]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 189\n",
      "action: 1\n",
      "state = [ 2.9603229  1.3947202 15.062528   7.9677367]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 190\n",
      "action: 1\n",
      "state = [ 2.988217   1.6278195 15.221882   8.423904 ]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 191\n",
      "action: 0\n",
      "state = [ 3.0207736  1.4734293 15.39036    8.356489 ]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 192\n",
      "action: 1\n",
      "state = [ 3.0502422  1.6925181 15.55749    8.7605   ]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 193\n",
      "action: 0\n",
      "state = [ 3.0840926  1.5110444 15.7327     8.535438 ]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 194\n",
      "action: 1\n",
      "state = [ 3.1143134  1.7040449 15.903409   8.817578 ]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 195\n",
      "action: 0\n",
      "state = [ 3.1483943  1.4920449 16.07976    8.448537 ]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 196\n",
      "action: 0\n",
      "state = [ 3.1782353  1.2689252 16.248732   8.029916 ]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 197\n",
      "action: 0\n",
      "state = [ 3.2036138  1.0395378 16.409328   7.583581 ]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 198\n",
      "action: 0\n",
      "state = [ 3.2244046   0.80818987 16.561       7.1287627 ]; reward = 0.0; terminated = True; truncated = False; info = {}\n",
      "step: 199\n",
      "action: 1\n",
      "state = [ 3.2405684  0.9528527 16.703577   7.050015 ]; reward = 0.0; terminated = True; truncated = True; info = {}\n"
     ]
    }
   ],
   "source": [
    "# CartPoleをランダムに動かす\n",
    "\n",
    "frames = []\n",
    "# env = gym.make('CartPole-v0',render_mode='human')\n",
    "env = gym.make('CartPole-v0',render_mode='rgb_array')\n",
    "observation = env.reset()  # 最初に環境のresetが必要\n",
    "\n",
    "for step in range(0, 200):\n",
    "#     frames.append(env.render(mode='rgb_array'))  # framesに各時刻の画像を追加していく\n",
    "    frames.append(env.render()) \n",
    "    action = env.action_space.sample()\n",
    "#     action = np.random.choice(2)  # 0(カートを左に押す), 1(カートを右に押す)をランダムに返す\n",
    "#     observation, reward, done, info = env.step(action)  # actionを実行する\n",
    "    state, reward, terminated, truncated, info = env.step(action)\n",
    "    print(\"step:\", step)\n",
    "    print(\"action:\", action)\n",
    "    print('state = {0}; reward = {1}; terminated = {2}; truncated = {3}; info = {4}'.format(state, reward, terminated, truncated, info))\n",
    "    \n",
    "#     if terminated:\n",
    "#         print('terminated')\n",
    "#         break\n",
    "        \n",
    "        \n",
    "# print(frames)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [
    {
     "ename": "TypeError",
     "evalue": "setup() got an unexpected keyword argument 'clear_temp'",
     "output_type": "error",
     "traceback": [
      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[0;31mTypeError\u001b[0m                                 Traceback (most recent call last)",
      "\u001b[0;32m<ipython-input-8-2d605f8148aa>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m()\u001b[0m\n\u001b[1;32m      1\u001b[0m \u001b[0;31m# 動画を保存と描画\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 2\u001b[0;31m \u001b[0mdisplay_frames_as_gif\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mframes\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m",
      "\u001b[0;32m<ipython-input-6-9d95926f2835>\u001b[0m in \u001b[0;36mdisplay_frames_as_gif\u001b[0;34m(frames)\u001b[0m\n\u001b[1;32m     22\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     23\u001b[0m     \u001b[0manim\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msave\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'movie_cartpole.mp4'\u001b[0m\u001b[0;34m)\u001b[0m  \u001b[0;31m# 追記：動画の保存です\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 24\u001b[0;31m     \u001b[0mdisplay\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdisplay_animation\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0manim\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdefault_mode\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m'loop'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m",
      "\u001b[0;32m~/soft/anaconda3/lib/python3.7/site-packages/JSAnimation/IPython_display.py\u001b[0m in \u001b[0;36mdisplay_animation\u001b[0;34m(anim, **kwargs)\u001b[0m\n\u001b[1;32m     84\u001b[0m     \u001b[0;34m\"\"\"Display the animation with an IPython HTML object\"\"\"\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     85\u001b[0m     \u001b[0;32mfrom\u001b[0m \u001b[0mIPython\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdisplay\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mHTML\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 86\u001b[0;31m     \u001b[0;32mreturn\u001b[0m \u001b[0mHTML\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0manim_to_html\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0manim\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m     87\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     88\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m~/soft/anaconda3/lib/python3.7/site-packages/JSAnimation/IPython_display.py\u001b[0m in \u001b[0;36manim_to_html\u001b[0;34m(anim, fps, embed_frames, default_mode)\u001b[0m\n\u001b[1;32m     74\u001b[0m             anim.save(f.name,  writer=HTMLWriter(fps=fps,\n\u001b[1;32m     75\u001b[0m                                                  \u001b[0membed_frames\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0membed_frames\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 76\u001b[0;31m                                                  default_mode=default_mode))\n\u001b[0m\u001b[1;32m     77\u001b[0m             \u001b[0mhtml\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mopen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mf\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mname\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mread\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     78\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m~/soft/anaconda3/lib/python3.7/site-packages/matplotlib/animation.py\u001b[0m in \u001b[0;36msave\u001b[0;34m(self, filename, writer, fps, dpi, codec, bitrate, extra_args, metadata, extra_anim, savefig_kwargs, progress_callback)\u001b[0m\n\u001b[1;32m   1070\u001b[0m         \u001b[0;31m# widget (both are likewise done in savefig()).\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m   1071\u001b[0m         \u001b[0;32mwith\u001b[0m \u001b[0mmpl\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrc_context\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m{\u001b[0m\u001b[0;34m'savefig.bbox'\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m}\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;31m \u001b[0m\u001b[0;31m\\\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1072\u001b[0;31m              \u001b[0mwriter\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msaving\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_fig\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfilename\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdpi\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;31m \u001b[0m\u001b[0;31m\\\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m   1073\u001b[0m              cbook._setattr_cm(self._fig.canvas,\n\u001b[1;32m   1074\u001b[0m                                _is_saving=True, manager=None):\n",
      "\u001b[0;32m~/soft/anaconda3/lib/python3.7/contextlib.py\u001b[0m in \u001b[0;36m__enter__\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m    110\u001b[0m         \u001b[0;32mdel\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mkwds\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mfunc\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    111\u001b[0m         \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 112\u001b[0;31m             \u001b[0;32mreturn\u001b[0m \u001b[0mnext\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mgen\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m    113\u001b[0m         \u001b[0;32mexcept\u001b[0m \u001b[0mStopIteration\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    114\u001b[0m             \u001b[0;32mraise\u001b[0m \u001b[0mRuntimeError\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"generator didn't yield\"\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m~/soft/anaconda3/lib/python3.7/site-packages/matplotlib/animation.py\u001b[0m in \u001b[0;36msaving\u001b[0;34m(self, fig, outfile, dpi, *args, **kwargs)\u001b[0m\n\u001b[1;32m    230\u001b[0m         \"\"\"\n\u001b[1;32m    231\u001b[0m         \u001b[0;31m# This particular sequence is what contextlib.contextmanager wants\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 232\u001b[0;31m         \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msetup\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mfig\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0moutfile\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdpi\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m    233\u001b[0m         \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    234\u001b[0m             \u001b[0;32myield\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m~/soft/anaconda3/lib/python3.7/site-packages/JSAnimation/html_writer.py\u001b[0m in \u001b[0;36msetup\u001b[0;34m(self, fig, outfile, dpi, frame_dir)\u001b[0m\n\u001b[1;32m    280\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    281\u001b[0m         super(HTMLWriter, self).setup(fig, outfile, dpi,\n\u001b[0;32m--> 282\u001b[0;31m                                       frame_prefix, clear_temp=False)\n\u001b[0m\u001b[1;32m    283\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    284\u001b[0m     \u001b[0;32mdef\u001b[0m \u001b[0mgrab_frame\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0msavefig_kwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;31mTypeError\u001b[0m: setup() got an unexpected keyword argument 'clear_temp'"
     ]
    }
   ],
   "source": [
    "# 動画を保存と描画\n",
    "display_frames_as_gif(frames)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.0"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
