{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import gym, random, pickle, os.path, math, glob\n",
    "\n",
    "import numpy as np\n",
    "import pandas as pd\n",
    "import matplotlib.pyplot as plt\n",
    "%matplotlib inline\n",
    "\n",
    "import torch\n",
    "import torch.optim as optim\n",
    "import torch.nn as nn\n",
    "import torch.nn.functional as F\n",
    "import torch.autograd as autograd\n",
    "import pdb\n",
    "\n",
    "from atari_wrappers import make_atari, wrap_deepmind, LazyFrames\n",
    "from IPython.display import clear_output\n",
    "from tensorboardX import SummaryWriter\n",
    "\n",
    "USE_CUDA = torch.cuda.is_available()\n",
    "dtype = torch.cuda.FloatTensor if torch.cuda.is_available() else torch.FloatTensor\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "<matplotlib.image.AxesImage at 0x21892507c48>"
      ]
     },
     "execution_count": 2,
     "metadata": {},
     "output_type": "execute_result"
    },
    {
     "data": {
      "image/png": "iVBORw0KGgoAAAANSUhEUgAAAPsAAAD7CAYAAACscuKmAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjMsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy+AADFEAAAO+0lEQVR4nO3da6wc5X3H8e/f59g+2Ib4wqWujbCJKJcSYahFoFQVgbihJIWoJRUoqtIKiTdpCk2kAO2LKFKlEqlK4EVU1YKkqKLcHGiQFUEtAy8qtQZzCcHYDjZQOOBgh2BIjDHY/vfFjM2Ru4cz5+zl7Pr5fqSjnZmd9Tzj0W+f2dnZ5x+ZiaSj34zpboCk3jDsUiEMu1QIwy4VwrBLhTDsUiHaCntEXBYRWyNiW0Tc1KlGSeq8mOr37BExBPwcWAWMAk8C12TmC51rnqROGW7jtecD2zLzJYCIuAe4Ehg37LNido4wd+J/ec7IR9Mzoo0mdk7W7dh/THvtGX6/enONA97M1G0lHrP339/NBx/uabnD7YR9CfDamPlR4NMf94IR5vLpuHTCfzjO+t3D0wfmzZpi8zrrwOwhAHad0157Fm36EICZe/a33SZ9vEPH7Jefau+YLdw8OMfsyY3fH/e5dsLe6t3j/731RcR1wHUAI8xpY3OS2tFO2EeBk8fMLwXeOHKlzFwNrAY47tgleXDluW1scvocHKre2/accqCtf2f+9uqa6Mw9bTdJEzh0zH6zrL1j9omXj45j1s7V+CeB0yJieUTMAq4GHupMsyR12pR79szcHxF/DTwCDAE/yMxNHWtZn5n5XvV57ZP3N3/Ny38yG4CDI/1/Yac0n1zzQcvlr3y+ujh84JiDvWxOT7RzGk9m/gT4SYfaIqmLvINOKkRbPXtJDg5V74vvLG/+NU4Oefqu/mHPLhXCnr2hA7Or98VfnXv0XbhRGezZpUIYdqkQnsZ3wJJHm687a3f/31+to5M9u1QIwy4VwtN4Fen9Ra3vl8gZR++9EfbsUiHs2Tvg9Uuar7vk0eq/fOSt1j/EUG+8/pnxnrFnlzTgDLtUCE/jGxraV90mu/CZ9v7Lhvd82InmqIHDx+xZjxnYs0vF6GnPvm/BDLZf1R+jxU5dez+E+dWKoXpq6GPXUyeVc8z2bR+//56wZ4+IH0TEzoh4fsyyhRGxLiJerB8XdKitkrqkyWn8vwKXHbHsJmB9Zp4GrK/nJfWxRuWfImIZsDYzz67ntwIXZ+aOiFgMPJ6Zp0/076w8ZySfeOTkiVaTNEXnf+41Nv70/ZYVYaZ6ge6kzNwBUD+eONXGSeqNrl+Nj4jrImJjRGzc9VZ7g/VLmrqphv3N+vSd+nHneCtm5urMXJmZK09Y1P9XM6Wj1VTD/hDwlXr6K8CPO9McSd3S5Ku3u4H/Bk6PiNGIuBa4BVgVES9S1We/pbvNlNSuCW+qycxrxnlq4trLkvqGt8tKhejp7bJb9s7nouf+tJeblKbVKce+fXj6tHkfXcce3VvddLpld2e/td6y985xn7NnlwrR0559xo4h5vzjJ3q5SWlabT77tw5P/88Zv3N4et4r1dfQJ23c29Htzdgx/tfb9uxSIQy7VAjDLhXCsEuFMOxSIRxwUuqi45/fO2Z6GhuCPbtUDMMuFcKwS4Uw7FIhDLtUCMMuFcKwS4VoMizVyRHxWERsjohNEXF9vdyqMNIAadKz7we+kZlnAhcAX42Is7AqjDRQJgx7Zu7IzKfr6V8Dm4ElwJXAoWEx7gS+2K1GSmrfpD6z12WgzgU20LAqzNgiER98uKe91kqassZhj4h5wI+AGzLz3aavG1skYtbMuVNpo6QOaBT2iJhJFfS7MvOBenHjqjCSpl+Tq/EB3AFszszvjnnKqjDSAGnyE9eLgL8AfhYRz9bL/o6qCsx9dYWYV4EvdaeJkjqhSUWY/wJa1nvGqjDSwPAOOqkQhl0qhGGXCmHYpUIYdqkQhl0qhGGXCmHYpUIYdqkQhl0qhGGXCmHYpUIYdqkQhl0qhGGXCmHYpUIYdqkQTcagG4mIJyLip3VFmG/Xy5dHxIa6Isy9ETGr+82VNFVNevZ9wCWZeQ6wArgsIi4AvgN8r64I8zZwbfeaKaldTSrCZGb+pp6dWf8lcAmwpl5uRRipzzUdN36oHll2J7AO2A7szsz99SqjVCWhWr3WijBSH2gU9sw8kJkrgKXA+cCZrVYb57VWhJH6wKSuxmfmbuBxqmqu8yPi0FDUS4E3Ots0SZ3U5Gr8CRExv54+BvgsVSXXx4Cr6tWsCCP1uSYVYRYDd0bEENWbw32ZuTYiXgDuiYh/AJ6hKhElqU81qQjzHFWZ5iOXv0T1+V3SAPAOOqkQhl0qhGGXCmHYpUIYdqkQhl0qhGGXCmHYpUIYdqkQhl0qhGGXCmHYpUIYdqkQhl0qhGGXCmHYpUIYdqkQjcNeDyf9TESsreetCCMNkMn07NdTDTR5iBVhpAHStEjEUuDzwO31fGBFGGmgNO3ZbwW+CRys5xdhRRhpoDQZN/4LwM7MfGrs4harWhFG6mNNxo2/CLgiIi4HRoDjqHr6+RExXPfuVoSR+lyTKq43Z+bSzFwGXA08mplfxoow0kBp53v2G4GvR8Q2qs/wVoSR+liT0/jDMvNxqsKOVoSRBox30EmFMOxSIQy7VAjDLhXCsEuFMOxSIQy7VAjDLhXCsEuFMOxSIQy7VAjDLhXCsEuFMOxSIQy7VAjDLhXCsEuFaDRSTUS8AvwaOADsz8yVEbEQuBdYBrwC/Hlmvt2dZkpq12R69s9k5orMXFnP3wSsryvCrK/nJfWpdk7jr6SqBANWhJH6XtOwJ/CfEfFURFxXLzspM3cA1I8ntnqhFWGk/tB0dNmLMvONiDgRWBcRW5puIDNXA6sBjjt2ScuqMZK6r1HPnplv1I87gQephpB+MyIWA9SPO7vVSEnta1LrbW5EHHtoGvgj4HngIapKMGBFGKnvNTmNPwl4sKrSzDDw75n5cEQ8CdwXEdcCrwJf6l4zJbVrwrDXlV/OabH8LeDSbjRKUud5B51UCMMuFcKwS4Uw7FIhDLtUCMMuFcKwS4Uw7FIhDLtUCMMuFcKwS4Uw7FIhDLtUCMMuFcKwS4Uw7FIhDLtUiEZhj4j5EbEmIrZExOaIuDAiFkbEuoh4sX5c0O3GSpq6pj37bcDDmXkG1RBVm7EijDRQJhyDLiKOA/4Q+EuAzPwA+CAirgQurle7E3gcuLEbjfw47ywfAWDvCXF42bzXD46Z3tfrJkl9qUnPfiqwC/hhRDwTEbfXQ0pbEUYaIE2Gkh4GzgO+lpkbIuI2JnHK3u2KMId69D2nHDi8bHjv0OHpea93eovSYGrSs48Co5m5oZ5fQxV+K8JIA2TCsGfmL4DXIuL0etGlwAtYEUYaKE0LO34NuCsiZgEvAX9F9UZhRRhpQDQKe2Y+C6xs8ZQVYaQB4R10UiEMu1QIwy4VwrBLhTDsUiEMu1QIwy4VwrBLhWh6B13fGq5/SDdz90fvW0N7O/57G2ng2bNLhTDsUiEG/jT++E17q4lN09sOqd/Zs0uFMOxSIQy7VAjDLhViwrBHxOkR8eyYv3cj4gaLREiDpckYdFszc0VmrgB+D3gPeBCLREgDZbKn8ZcC2zPzf4ErqYpDUD9+sZMNk9RZkw371cDd9XSjIhGS+kPjsNcjy14B3D+ZDVgRRuoPk+nZ/xh4OjPfrOcbFYnIzNWZuTIzV86aObe91kqassmE/Ro+OoUHi0RIA6VpffY5wCrggTGLbwFWRcSL9XO3dL55kjqlaZGI94BFRyx7C4tESAPDO+ikQhh2qRCGXSqEYZcKYdilQhh2qRCGXSqEYZcKYdilQhh2qRCGXSqEYZcK0dOKMPsWzGD7VbN6uUmpKPu2j99/27NLhehpz/6pBbt44s/+pZeblIpy/upd4z5nzy4VwrBLhWg6LNXfRsSmiHg+Iu6OiJGIWB4RG+qKMPfWo89K6lNNyj8tAf4GWJmZZwNDVOPHfwf4Xl0R5m3g2m42VFJ7mp7GDwPHRMQwMAfYAVwCrKmftyKM1Oea1Hp7Hfgn4FWqkL8DPAXszsz99WqjwJJuNVJS+5qcxi+gquu2HPhtYC5VwYgj5TivP1wRZtdbB9ppq6Q2NDmN/yzwcmbuyswPqcaO/31gfn1aD7AUeKPVi8dWhDlh0VBHGi1p8pqE/VXggoiYExFBNVb8C8BjwFX1OlaEkfpck8/sG6guxD0N/Kx+zWrgRuDrEbGNqoDEHV1sp6Q2Na0I8y3gW0csfgk4v+MtktQV3kEnFcKwS4Uw7FIhDLtUiMhseS9MdzYWsQvYA/yyZxvtvuNxf/rV0bQv0Gx/TsnME1o90dOwA0TExsxc2dONdpH707+Opn2B9vfH03ipEIZdKsR0hH31NGyzm9yf/nU07Qu0uT89/8wuaXp4Gi8Voqdhj4jLImJrRGyLiJt6ue12RcTJEfFYRGyux+O7vl6+MCLW1WPxrat//z8wImIoIp6JiLX1/MCOLRgR8yNiTURsqY/ThYN8fDo99mPPwh4RQ8D3qQa+OAu4JiLO6tX2O2A/8I3MPBO4APhq3f6bgPX1WHzr6/lBcj2wecz8II8teBvwcGaeAZxDtV8DeXy6MvZjZvbkD7gQeGTM/M3Azb3afhf258fAKmArsLhethjYOt1tm8Q+LKUKwCXAWiCobtoYbnXM+vkPOA54mfo61JjlA3l8qIZ5ew1YSPXr1LXA59o5Pr08jT/U+EMGdty6iFgGnAtsAE7KzB0A9eOJ09eySbsV+CZwsJ5fxOCOLXgqsAv4Yf2x5PaImMuAHp/swtiPvQx7tFg2cF8FRMQ84EfADZn57nS3Z6oi4gvAzsx8auziFqsOyjEaBs4D/jkzz6W6LXsgTtlbaXfsx1Z6GfZR4OQx8+OOW9evImImVdDvyswH6sVvRsTi+vnFwM7pat8kXQRcERGvAPdQncrfSsOxBfvQKDCa1chKUI2udB6De3zaGvuxlV6G/UngtPpq4iyqiw0P9XD7banH37sD2JyZ3x3z1ENUY/DBAI3Fl5k3Z+bSzFxGdSwezcwvM6BjC2bmL4DXIuL0etGhsRIH8vjQjbEfe3zR4XLg58B24O+n+yLIJNv+B1SnTM8Bz9Z/l1N9zl0PvFg/Lpzutk5h3y4G1tbTpwJPANuA+4HZ092+SezHCmBjfYz+A1gwyMcH+DawBXge+DdgdjvHxzvopEJ4B51UCMMuFcKwS4Uw7FIhDLtUCMMuFcKwS4Uw7FIh/g+Swkjra2WMOQAAAABJRU5ErkJggg==\n",
      "text/plain": [
       "<Figure size 432x288 with 1 Axes>"
      ]
     },
     "metadata": {
      "needs_background": "light"
     },
     "output_type": "display_data"
    }
   ],
   "source": [
    "# Create and wrap the environment\n",
    "env = make_atari('PongNoFrameskip-v4') # only use in no frameskip environment\n",
    "env = wrap_deepmind(env, scale = False, frame_stack=True )\n",
    "n_actions = env.action_space.n\n",
    "state_dim = env.observation_space.shape\n",
    "\n",
    "# env.render()\n",
    "test = env.reset()\n",
    "for i in range(100):\n",
    "    test = env.step(env.action_space.sample())[0]\n",
    "\n",
    "plt.imshow(test._force()[...,0])\n",
    "\n",
    "#plt.imshow(env.render(\"rgb_array\"))\n",
    "# env.close()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "class NoisyFactorizedLinear(nn.Linear):\n",
    "    \"\"\"\n",
    "    NoisyNet layer with factorized gaussian noise\n",
    "    N.B. nn.Linear already initializes weight and bias to\n",
    "    \"\"\"\n",
    "    def __init__(self, in_features, out_features, sigma_zero=0.4, bias=True):\n",
    "        super(NoisyFactorizedLinear, self).__init__(in_features, out_features, bias=bias)\n",
    "        sigma_init = sigma_zero / math.sqrt(in_features)\n",
    "        self.sigma_weight = nn.Parameter(torch.Tensor(out_features, in_features).fill_(sigma_init))\n",
    "        self.register_buffer(\"epsilon_input\", torch.zeros(1, in_features))\n",
    "        self.register_buffer(\"epsilon_output\", torch.zeros(out_features, 1))\n",
    "        if bias:\n",
    "            self.sigma_bias = nn.Parameter(torch.Tensor(out_features).fill_(sigma_init))\n",
    "\n",
    "    def forward(self, input):\n",
    "        bias = self.bias\n",
    "        func = lambda x: torch.sign(x) * torch.sqrt(torch.abs(x))\n",
    "\n",
    "        with torch.no_grad():\n",
    "            torch.randn(self.epsilon_input.size(), out=self.epsilon_input)\n",
    "            torch.randn(self.epsilon_output.size(), out=self.epsilon_output)\n",
    "            eps_in = func(self.epsilon_input)\n",
    "            eps_out = func(self.epsilon_output)\n",
    "            noise_v = torch.mul(eps_in, eps_out).detach()\n",
    "        if bias is not None:\n",
    "            bias = bias + self.sigma_bias * eps_out.t()\n",
    "        return F.linear(input, self.weight + self.sigma_weight * noise_v, bias)\n",
    "\n",
    "class Noisy_DQN(nn.Module):\n",
    "    def __init__(self, in_channels=4, num_actions=5):\n",
    "        \"\"\"\n",
    "        Initialize a deep Q-learning network as described in\n",
    "        https://storage.googleapis.com/deepmind-data/assets/papers/DeepMindNature14236Paper.pdf\n",
    "        Arguments:\n",
    "            in_channels: number of channel of input.\n",
    "                i.e The number of most recent frames stacked together as describe in the paper\n",
    "            num_actions: number of action-value to output, one-to-one correspondence to action in game.\n",
    "        \"\"\"\n",
    "        super(Noisy_DQN, self).__init__()\n",
    "        self.conv1 = nn.Conv2d(in_channels, 32, kernel_size=8, stride=4)\n",
    "        self.conv2 = nn.Conv2d(32, 64, kernel_size=4, stride=2)\n",
    "        self.conv3 = nn.Conv2d(64, 64, kernel_size=3, stride=1)\n",
    "        self.fc4 = NoisyFactorizedLinear(7 * 7 * 64, 512)\n",
    "        self.fc5 = NoisyFactorizedLinear(512, num_actions)\n",
    "\n",
    "    def forward(self, x):\n",
    "        x = F.relu(self.conv1(x))\n",
    "        x = F.relu(self.conv2(x))\n",
    "        x = F.relu(self.conv3(x))\n",
    "        x = F.relu(self.fc4(x.view(x.size(0), -1)))\n",
    "        return self.fc5(x)\n",
    "    "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "class Memory_Buffer(object):\n",
    "    def __init__(self, memory_size=1000):\n",
    "        self.buffer = []\n",
    "        self.memory_size = memory_size\n",
    "        self.next_idx = 0\n",
    "        \n",
    "    def push(self, state, action, reward, next_state, done):\n",
    "        data = (state, action, reward, next_state, done)\n",
    "        if len(self.buffer) <= self.memory_size: # buffer not full\n",
    "            self.buffer.append(data)\n",
    "        else: # buffer is full\n",
    "            self.buffer[self.next_idx] = data\n",
    "        self.next_idx = (self.next_idx + 1) % self.memory_size\n",
    "\n",
    "    def sample(self, batch_size):\n",
    "        states, actions, rewards, next_states, dones = [], [], [], [], []\n",
    "        for i in range(batch_size):\n",
    "            idx = random.randint(0, self.size() - 1)\n",
    "            data = self.buffer[idx]\n",
    "            state, action, reward, next_state, done= data\n",
    "            states.append(state)\n",
    "            actions.append(action)\n",
    "            rewards.append(reward)\n",
    "            next_states.append(next_state)\n",
    "            dones.append(done)\n",
    "            \n",
    "            \n",
    "        return np.concatenate(states), actions, rewards, np.concatenate(next_states), dones\n",
    "    \n",
    "    def size(self):\n",
    "        return len(self.buffer)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "class Noisy_DQNAgent: \n",
    "    def __init__(self, in_channels = 1, action_space = [], USE_CUDA = False, memory_size = 10000, epsilon  = 1, lr = 1e-4):\n",
    "        self.action_space = action_space\n",
    "        self.memory_buffer = Memory_Buffer(memory_size)\n",
    "        self.DQN = Noisy_DQN(in_channels = in_channels, num_actions = action_space.n)\n",
    "        self.DQN_target = Noisy_DQN(in_channels = in_channels, num_actions = action_space.n)\n",
    "        self.DQN_target.load_state_dict(self.DQN.state_dict())\n",
    "\n",
    "        self.USE_CUDA = USE_CUDA\n",
    "        if USE_CUDA:\n",
    "            self.DQN = self.DQN.cuda()\n",
    "            self.DQN_target = self.DQN_target.cuda()\n",
    "        self.optimizer = optim.RMSprop(self.DQN.parameters(),lr=lr, eps=0.001, alpha=0.95)\n",
    "\n",
    "    def observe(self, lazyframe):\n",
    "        # from Lazy frame to tensor\n",
    "        state =  torch.from_numpy(lazyframe._force().transpose(2,0,1)[None]/255).float()\n",
    "        if self.USE_CUDA:\n",
    "            state = state.cuda()\n",
    "        return state\n",
    "\n",
    "    def value(self, state):\n",
    "        q_values = self.DQN(state)\n",
    "        return q_values\n",
    "    \n",
    "    def act(self, state):\n",
    "        \"\"\"\n",
    "        action follows greedy policy (with intrinsic noise generated from NoisyNet)\n",
    "        \"\"\"\n",
    "        q_values = self.value(state).cpu().detach().numpy()\n",
    "        aciton = q_values.argmax(1)[0]\n",
    "        return aciton\n",
    "    \n",
    "    def compute_td_loss(self, states, actions, rewards, next_states, is_done, gamma=0.99):\n",
    "        \"\"\" Compute td loss using torch operations only. Use the formula above. \"\"\"\n",
    "        actions = torch.tensor(actions).long()    # shape: [batch_size]\n",
    "        rewards = torch.tensor(rewards, dtype =torch.float)  # shape: [batch_size]\n",
    "        is_done = torch.tensor(is_done).bool()  # shape: [batch_size]\n",
    "        \n",
    "        if self.USE_CUDA:\n",
    "            actions = actions.cuda()\n",
    "            rewards = rewards.cuda()\n",
    "            is_done = is_done.cuda()\n",
    "\n",
    "        # get q-values for all actions in current states\n",
    "        predicted_qvalues = self.DQN(states)\n",
    "\n",
    "        # select q-values for chosen actions\n",
    "        predicted_qvalues_for_actions = predicted_qvalues[\n",
    "          range(states.shape[0]), actions\n",
    "        ]\n",
    "\n",
    "        # compute q-values for all actions in next states\n",
    "        predicted_next_qvalues = self.DQN_target(next_states) # YOUR CODE\n",
    "\n",
    "        # compute V*(next_states) using predicted next q-values\n",
    "        next_state_values =  predicted_next_qvalues.max(-1)[0] # YOUR CODE\n",
    "\n",
    "        # compute \"target q-values\" for loss - it's what's inside square parentheses in the above formula.\n",
    "        target_qvalues_for_actions = rewards + gamma *next_state_values # YOUR CODE\n",
    "\n",
    "        # at the last state we shall use simplified formula: Q(s,a) = r(s,a) since s' doesn't exist\n",
    "        target_qvalues_for_actions = torch.where(\n",
    "            is_done, rewards, target_qvalues_for_actions)\n",
    "\n",
    "        # mean squared error loss to minimize\n",
    "        #loss = torch.mean((predicted_qvalues_for_actions -\n",
    "        #                   target_qvalues_for_actions.detach()) ** 2)\n",
    "        loss = F.smooth_l1_loss(predicted_qvalues_for_actions, target_qvalues_for_actions.detach())\n",
    "        return loss\n",
    "    \n",
    "    def sample_from_buffer(self, batch_size):\n",
    "        states, actions, rewards, next_states, dones = [], [], [], [], []\n",
    "        for i in range(batch_size):\n",
    "            idx = random.randint(0, self.memory_buffer.size() - 1)\n",
    "            data = self.memory_buffer.buffer[idx]\n",
    "            frame, action, reward, next_frame, done= data\n",
    "            states.append(self.observe(frame))\n",
    "            actions.append(action)\n",
    "            rewards.append(reward)\n",
    "            next_states.append(self.observe(next_frame))\n",
    "            dones.append(done)\n",
    "        return torch.cat(states), actions, rewards, torch.cat(next_states), dones\n",
    "\n",
    "    def learn_from_experience(self, batch_size):\n",
    "        if self.memory_buffer.size() > batch_size:\n",
    "            states, actions, rewards, next_states, dones = self.sample_from_buffer(batch_size)\n",
    "            td_loss = self.compute_td_loss(states, actions, rewards, next_states, dones)\n",
    "            self.optimizer.zero_grad()\n",
    "            td_loss.backward()\n",
    "            self.optimizer.step()\n",
    "            return(td_loss.item())\n",
    "        else:\n",
    "            return(0)\n",
    "    "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "F:\\softwares\\ANACONDA\\lib\\site-packages\\numpy\\core\\fromnumeric.py:3335: RuntimeWarning: Mean of empty slice.\n",
      "  out=out, **kwargs)\n",
      "F:\\softwares\\ANACONDA\\lib\\site-packages\\numpy\\core\\_methods.py:161: RuntimeWarning: invalid value encountered in double_scalars\n",
      "  ret = ret.dtype.type(ret / rcount)\n",
      "WARNING:root:NaN or Inf found in input tensor.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "frames:     0, reward:   nan, loss: 0.000000, episode:    0\n",
      "frames:  1000, reward: -21.000000, loss: 0.000000, episode:    1\n",
      "frames:  2000, reward: -21.000000, loss: 0.000000, episode:    2\n",
      "frames:  3000, reward: -21.000000, loss: 0.000000, episode:    3\n",
      "frames:  4000, reward: -21.000000, loss: 0.000000, episode:    5\n",
      "frames:  5000, reward: -21.000000, loss: 0.000000, episode:    6\n",
      "frames:  6000, reward: -21.000000, loss: 0.000000, episode:    7\n",
      "frames:  7000, reward: -21.000000, loss: 0.000000, episode:    9\n",
      "frames:  8000, reward: -21.000000, loss: 0.000000, episode:   10\n",
      "frames:  9000, reward: -21.000000, loss: 0.000000, episode:   11\n",
      "frames: 10000, reward: -21.000000, loss: 0.032251, episode:   13\n",
      "frames: 11000, reward: -20.900000, loss: 0.000695, episode:   14\n",
      "frames: 12000, reward: -20.700000, loss: 0.015139, episode:   15\n",
      "frames: 13000, reward: -20.700000, loss: 0.000849, episode:   16\n",
      "frames: 14000, reward: -20.600000, loss: 0.000401, episode:   17\n",
      "frames: 15000, reward: -20.600000, loss: 0.000599, episode:   18\n",
      "frames: 16000, reward: -20.600000, loss: 0.044423, episode:   19\n",
      "frames: 17000, reward: -20.500000, loss: 0.030480, episode:   21\n",
      "frames: 18000, reward: -20.400000, loss: 0.000553, episode:   22\n",
      "frames: 19000, reward: -20.400000, loss: 0.000375, episode:   23\n",
      "frames: 20000, reward: -20.500000, loss: 0.031222, episode:   24\n",
      "frames: 21000, reward: -20.600000, loss: 0.015099, episode:   25\n",
      "frames: 22000, reward: -20.500000, loss: 0.000250, episode:   26\n",
      "frames: 23000, reward: -20.600000, loss: 0.015847, episode:   28\n",
      "frames: 24000, reward: -20.600000, loss: 0.000157, episode:   29\n",
      "frames: 25000, reward: -20.700000, loss: 0.044690, episode:   30\n",
      "frames: 26000, reward: -20.700000, loss: 0.030108, episode:   31\n",
      "frames: 27000, reward: -20.800000, loss: 0.000547, episode:   32\n",
      "frames: 28000, reward: -20.600000, loss: 0.015058, episode:   34\n",
      "frames: 29000, reward: -20.700000, loss: 0.000622, episode:   35\n",
      "frames: 30000, reward: -20.800000, loss: 0.029765, episode:   36\n",
      "frames: 31000, reward: -20.800000, loss: 0.015115, episode:   37\n",
      "frames: 32000, reward: -20.700000, loss: 0.000394, episode:   39\n",
      "frames: 33000, reward: -20.600000, loss: 0.015605, episode:   40\n",
      "frames: 34000, reward: -20.600000, loss: 0.000242, episode:   41\n",
      "frames: 35000, reward: -20.600000, loss: 0.000440, episode:   42\n",
      "frames: 36000, reward: -20.600000, loss: 0.000214, episode:   43\n",
      "frames: 37000, reward: -20.600000, loss: 0.000435, episode:   44\n",
      "frames: 38000, reward: -20.500000, loss: 0.000185, episode:   46\n",
      "frames: 39000, reward: -20.400000, loss: 0.030375, episode:   47\n",
      "frames: 40000, reward: -20.500000, loss: 0.000173, episode:   48\n",
      "frames: 41000, reward: -20.500000, loss: 0.029901, episode:   49\n",
      "frames: 42000, reward: -20.500000, loss: 0.059901, episode:   50\n",
      "frames: 43000, reward: -20.400000, loss: 0.074559, episode:   51\n",
      "frames: 44000, reward: -20.400000, loss: 0.015204, episode:   53\n",
      "frames: 45000, reward: -20.500000, loss: 0.000219, episode:   54\n",
      "frames: 46000, reward: -20.500000, loss: 0.075348, episode:   55\n",
      "frames: 47000, reward: -20.400000, loss: 0.000371, episode:   56\n",
      "frames: 48000, reward: -20.400000, loss: 0.000251, episode:   57\n",
      "frames: 49000, reward: -20.300000, loss: 0.000073, episode:   58\n",
      "frames: 50000, reward: -20.200000, loss: 0.000245, episode:   59\n",
      "frames: 51000, reward: -20.300000, loss: 0.000189, episode:   61\n",
      "frames: 52000, reward: -20.300000, loss: 0.000063, episode:   62\n",
      "frames: 53000, reward: -20.300000, loss: 0.062740, episode:   63\n",
      "frames: 54000, reward: -20.300000, loss: 0.015109, episode:   64\n",
      "frames: 55000, reward: -20.300000, loss: 0.000565, episode:   65\n",
      "frames: 56000, reward: -20.300000, loss: 0.028968, episode:   66\n",
      "frames: 57000, reward: -20.400000, loss: 0.001660, episode:   67\n",
      "frames: 58000, reward: -20.400000, loss: 0.000060, episode:   69\n",
      "frames: 59000, reward: -20.300000, loss: 0.015288, episode:   70\n",
      "frames: 60000, reward: -20.300000, loss: 0.000018, episode:   71\n",
      "frames: 61000, reward: -20.400000, loss: 0.000214, episode:   72\n",
      "frames: 62000, reward: -20.300000, loss: 0.000245, episode:   73\n",
      "frames: 63000, reward: -20.300000, loss: 0.028958, episode:   74\n",
      "frames: 64000, reward: -20.400000, loss: 0.000331, episode:   76\n",
      "frames: 65000, reward: -20.300000, loss: 0.015440, episode:   77\n",
      "frames: 66000, reward: -20.400000, loss: 0.015038, episode:   78\n",
      "frames: 67000, reward: -20.400000, loss: 0.015035, episode:   79\n",
      "frames: 68000, reward: -20.500000, loss: 0.000627, episode:   80\n",
      "frames: 69000, reward: -20.500000, loss: 0.015311, episode:   81\n",
      "frames: 70000, reward: -20.600000, loss: 0.014667, episode:   83\n",
      "frames: 71000, reward: -20.500000, loss: 0.029904, episode:   84\n",
      "frames: 72000, reward: -20.400000, loss: 0.015034, episode:   85\n",
      "frames: 73000, reward: -20.500000, loss: 0.031212, episode:   86\n",
      "frames: 74000, reward: -20.500000, loss: 0.000135, episode:   87\n",
      "frames: 75000, reward: -20.500000, loss: 0.015563, episode:   88\n",
      "frames: 76000, reward: -20.400000, loss: 0.013051, episode:   89\n",
      "frames: 77000, reward: -20.400000, loss: 0.000295, episode:   90\n",
      "frames: 78000, reward: -20.300000, loss: 0.000417, episode:   91\n",
      "frames: 79000, reward: -20.300000, loss: 0.059520, episode:   92\n",
      "frames: 80000, reward: -20.200000, loss: 0.031130, episode:   94\n",
      "frames: 81000, reward: -20.200000, loss: 0.000353, episode:   94\n",
      "frames: 82000, reward: -20.000000, loss: 0.000056, episode:   96\n",
      "frames: 83000, reward: -20.000000, loss: 0.000112, episode:   97\n",
      "frames: 84000, reward: -20.100000, loss: 0.000405, episode:   98\n",
      "frames: 85000, reward: -20.000000, loss: 0.029871, episode:   99\n",
      "frames: 86000, reward: -20.100000, loss: 0.015134, episode:  100\n",
      "frames: 87000, reward: -20.200000, loss: 0.001298, episode:  101\n",
      "frames: 88000, reward: -20.200000, loss: 0.000129, episode:  102\n",
      "frames: 89000, reward: -20.300000, loss: 0.015443, episode:  104\n",
      "frames: 90000, reward: -20.500000, loss: 0.000408, episode:  105\n",
      "frames: 91000, reward: -20.500000, loss: 0.000105, episode:  106\n",
      "frames: 92000, reward: -20.600000, loss: 0.000118, episode:  107\n",
      "frames: 93000, reward: -20.600000, loss: 0.000076, episode:  108\n",
      "frames: 94000, reward: -20.700000, loss: 0.030214, episode:  110\n",
      "frames: 95000, reward: -20.700000, loss: 0.000578, episode:  111\n",
      "frames: 96000, reward: -20.700000, loss: 0.029234, episode:  112\n",
      "frames: 97000, reward: -20.700000, loss: 0.000405, episode:  113\n",
      "frames: 98000, reward: -20.800000, loss: 0.000163, episode:  114\n",
      "frames: 99000, reward: -20.900000, loss: 0.015325, episode:  116\n",
      "frames: 100000, reward: -20.900000, loss: 0.030472, episode:  117\n",
      "frames: 101000, reward: -20.800000, loss: 0.015176, episode:  118\n",
      "frames: 102000, reward: -20.800000, loss: 0.000079, episode:  119\n",
      "frames: 103000, reward: -20.800000, loss: 0.016450, episode:  120\n",
      "frames: 104000, reward: -20.800000, loss: 0.000710, episode:  122\n",
      "frames: 105000, reward: -20.800000, loss: 0.000048, episode:  123\n",
      "frames: 106000, reward: -20.800000, loss: 0.074395, episode:  124\n",
      "frames: 107000, reward: -20.800000, loss: 0.000053, episode:  125\n",
      "frames: 108000, reward: -20.800000, loss: 0.015569, episode:  126\n",
      "frames: 109000, reward: -20.800000, loss: 0.000347, episode:  128\n",
      "frames: 110000, reward: -20.800000, loss: 0.015442, episode:  129\n",
      "frames: 111000, reward: -20.800000, loss: 0.014777, episode:  130\n",
      "frames: 112000, reward: -20.800000, loss: 0.015476, episode:  131\n",
      "frames: 113000, reward: -20.700000, loss: 0.000233, episode:  132\n",
      "frames: 114000, reward: -20.700000, loss: 0.015348, episode:  133\n",
      "frames: 115000, reward: -20.400000, loss: 0.000551, episode:  135\n",
      "frames: 116000, reward: -20.300000, loss: 0.000184, episode:  136\n",
      "frames: 117000, reward: -20.300000, loss: 0.000369, episode:  137\n",
      "frames: 118000, reward: -20.400000, loss: 0.000268, episode:  138\n",
      "frames: 119000, reward: -20.200000, loss: 0.015211, episode:  139\n",
      "frames: 120000, reward: -20.300000, loss: 0.000651, episode:  140\n",
      "frames: 121000, reward: -20.400000, loss: 0.000410, episode:  142\n",
      "frames: 122000, reward: -20.300000, loss: 0.000431, episode:  143\n",
      "frames: 123000, reward: -20.300000, loss: 0.014926, episode:  144\n",
      "frames: 124000, reward: -20.400000, loss: 0.000043, episode:  145\n",
      "frames: 125000, reward: -20.400000, loss: 0.015689, episode:  146\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "frames: 126000, reward: -20.300000, loss: 0.000283, episode:  147\n",
      "frames: 127000, reward: -20.500000, loss: 0.000265, episode:  149\n",
      "frames: 128000, reward: -20.400000, loss: 0.029867, episode:  150\n",
      "frames: 129000, reward: -20.400000, loss: 0.000388, episode:  151\n",
      "frames: 130000, reward: -20.400000, loss: 0.026610, episode:  152\n",
      "frames: 131000, reward: -20.400000, loss: 0.014898, episode:  153\n",
      "frames: 132000, reward: -20.500000, loss: 0.000128, episode:  154\n",
      "frames: 133000, reward: -20.400000, loss: 0.000030, episode:  156\n",
      "frames: 134000, reward: -20.300000, loss: 0.014891, episode:  157\n",
      "frames: 135000, reward: -20.300000, loss: 0.015979, episode:  158\n",
      "frames: 136000, reward: -20.300000, loss: 0.015023, episode:  159\n",
      "frames: 137000, reward: -20.400000, loss: 0.000842, episode:  160\n",
      "frames: 138000, reward: -20.300000, loss: 0.012260, episode:  161\n",
      "frames: 139000, reward: -20.100000, loss: 0.015457, episode:  162\n",
      "frames: 140000, reward: -20.100000, loss: 0.015600, episode:  164\n",
      "frames: 141000, reward: -20.200000, loss: 0.000214, episode:  165\n",
      "frames: 142000, reward: -20.300000, loss: 0.000501, episode:  166\n",
      "frames: 143000, reward: -20.500000, loss: 0.030248, episode:  167\n",
      "frames: 144000, reward: -20.400000, loss: 0.026179, episode:  168\n",
      "frames: 145000, reward: -20.300000, loss: 0.000121, episode:  169\n",
      "frames: 146000, reward: -20.400000, loss: 0.000166, episode:  171\n",
      "frames: 147000, reward: -20.600000, loss: 0.000366, episode:  172\n",
      "frames: 148000, reward: -20.600000, loss: 0.015314, episode:  173\n",
      "frames: 149000, reward: -20.600000, loss: 0.000153, episode:  174\n",
      "frames: 150000, reward: -20.600000, loss: 0.015184, episode:  175\n",
      "frames: 151000, reward: -20.600000, loss: 0.000172, episode:  176\n",
      "frames: 152000, reward: -20.700000, loss: 0.015371, episode:  178\n",
      "frames: 153000, reward: -20.800000, loss: 0.015362, episode:  179\n",
      "frames: 154000, reward: -20.800000, loss: 0.000055, episode:  180\n",
      "frames: 155000, reward: -20.800000, loss: 0.000675, episode:  181\n",
      "frames: 156000, reward: -20.800000, loss: 0.041213, episode:  182\n",
      "frames: 157000, reward: -21.000000, loss: 0.014567, episode:  184\n",
      "frames: 158000, reward: -20.900000, loss: 0.014659, episode:  185\n",
      "frames: 159000, reward: -20.600000, loss: 0.000121, episode:  186\n",
      "frames: 160000, reward: -20.600000, loss: 0.000528, episode:  187\n",
      "frames: 161000, reward: -20.600000, loss: 0.001103, episode:  188\n",
      "frames: 162000, reward: -20.600000, loss: 0.000043, episode:  189\n",
      "frames: 163000, reward: -20.500000, loss: 0.015165, episode:  190\n",
      "frames: 164000, reward: -20.500000, loss: 0.015984, episode:  191\n",
      "frames: 165000, reward: -20.300000, loss: 0.030285, episode:  192\n",
      "frames: 166000, reward: -20.200000, loss: 0.000144, episode:  194\n",
      "frames: 167000, reward: -20.300000, loss: 0.000504, episode:  195\n",
      "frames: 168000, reward: -20.500000, loss: 0.029219, episode:  196\n",
      "frames: 169000, reward: -20.500000, loss: 0.015167, episode:  197\n",
      "frames: 170000, reward: -20.400000, loss: 0.000275, episode:  198\n",
      "frames: 171000, reward: -20.400000, loss: 0.000232, episode:  199\n",
      "frames: 172000, reward: -20.500000, loss: 0.030658, episode:  200\n",
      "frames: 173000, reward: -20.200000, loss: 0.000328, episode:  201\n",
      "frames: 174000, reward: -20.100000, loss: 0.015751, episode:  202\n",
      "frames: 175000, reward: -19.900000, loss: 0.031822, episode:  203\n",
      "frames: 176000, reward: -19.800000, loss: 0.000267, episode:  205\n",
      "frames: 177000, reward: -19.900000, loss: 0.000432, episode:  206\n",
      "frames: 178000, reward: -19.800000, loss: 0.000403, episode:  207\n",
      "frames: 179000, reward: -19.900000, loss: 0.000277, episode:  208\n",
      "frames: 180000, reward: -19.800000, loss: 0.015223, episode:  209\n",
      "frames: 181000, reward: -19.800000, loss: 0.014939, episode:  210\n",
      "frames: 182000, reward: -20.000000, loss: 0.001236, episode:  211\n",
      "frames: 183000, reward: -20.300000, loss: 0.000906, episode:  212\n",
      "frames: 184000, reward: -20.500000, loss: 0.015558, episode:  214\n",
      "frames: 185000, reward: -20.600000, loss: 0.000115, episode:  215\n",
      "frames: 186000, reward: -20.500000, loss: 0.030710, episode:  216\n",
      "frames: 187000, reward: -20.400000, loss: 0.000324, episode:  217\n",
      "frames: 188000, reward: -20.400000, loss: 0.000437, episode:  218\n",
      "frames: 189000, reward: -20.400000, loss: 0.029739, episode:  220\n",
      "frames: 190000, reward: -20.500000, loss: 0.014743, episode:  221\n",
      "frames: 191000, reward: -20.500000, loss: 0.029416, episode:  222\n",
      "frames: 192000, reward: -20.500000, loss: 0.000286, episode:  223\n",
      "frames: 193000, reward: -20.500000, loss: 0.026009, episode:  224\n",
      "frames: 194000, reward: -20.400000, loss: 0.014906, episode:  225\n",
      "frames: 195000, reward: -20.600000, loss: 0.000886, episode:  227\n",
      "frames: 196000, reward: -20.500000, loss: 0.000666, episode:  228\n",
      "frames: 197000, reward: -20.600000, loss: 0.000047, episode:  229\n",
      "frames: 198000, reward: -20.600000, loss: 0.000308, episode:  230\n",
      "frames: 199000, reward: -20.600000, loss: 0.000397, episode:  231\n",
      "frames: 200000, reward: -20.500000, loss: 0.000347, episode:  233\n",
      "frames: 201000, reward: -20.500000, loss: 0.000613, episode:  233\n",
      "frames: 202000, reward: -20.500000, loss: 0.000791, episode:  235\n",
      "frames: 203000, reward: -20.500000, loss: 0.015757, episode:  236\n",
      "frames: 204000, reward: -20.300000, loss: 0.030034, episode:  237\n",
      "frames: 205000, reward: -20.400000, loss: 0.014600, episode:  238\n",
      "frames: 206000, reward: -20.200000, loss: 0.000144, episode:  239\n",
      "frames: 207000, reward: -20.000000, loss: 0.000115, episode:  240\n",
      "frames: 208000, reward: -20.000000, loss: 0.000122, episode:  241\n",
      "frames: 209000, reward: -20.100000, loss: 0.015853, episode:  242\n",
      "frames: 210000, reward: -20.100000, loss: 0.000235, episode:  244\n",
      "frames: 211000, reward: -20.100000, loss: 0.001124, episode:  245\n",
      "frames: 212000, reward: -20.200000, loss: 0.015301, episode:  246\n",
      "frames: 213000, reward: -20.300000, loss: 0.030333, episode:  247\n",
      "frames: 214000, reward: -20.300000, loss: 0.000395, episode:  248\n",
      "frames: 215000, reward: -20.400000, loss: 0.030406, episode:  249\n",
      "frames: 216000, reward: -20.500000, loss: 0.000833, episode:  251\n",
      "frames: 217000, reward: -20.400000, loss: 0.045928, episode:  252\n",
      "frames: 218000, reward: -20.500000, loss: 0.015823, episode:  253\n",
      "frames: 219000, reward: -20.400000, loss: 0.000439, episode:  254\n",
      "frames: 220000, reward: -20.300000, loss: 0.000188, episode:  255\n",
      "frames: 221000, reward: -20.300000, loss: 0.015878, episode:  256\n",
      "frames: 222000, reward: -20.400000, loss: 0.025337, episode:  257\n",
      "frames: 223000, reward: -20.300000, loss: 0.000165, episode:  258\n",
      "frames: 224000, reward: -20.400000, loss: 0.000044, episode:  260\n",
      "frames: 225000, reward: -20.300000, loss: 0.014942, episode:  261\n",
      "frames: 226000, reward: -20.400000, loss: 0.001581, episode:  262\n",
      "frames: 227000, reward: -20.300000, loss: 0.034663, episode:  263\n",
      "frames: 228000, reward: -20.500000, loss: 0.000311, episode:  264\n",
      "frames: 229000, reward: -20.600000, loss: 0.000683, episode:  265\n",
      "frames: 230000, reward: -20.600000, loss: 0.000228, episode:  266\n",
      "frames: 231000, reward: -20.400000, loss: 0.000063, episode:  267\n",
      "frames: 232000, reward: -20.500000, loss: 0.000472, episode:  269\n",
      "frames: 233000, reward: -20.600000, loss: 0.015429, episode:  270\n",
      "frames: 234000, reward: -20.600000, loss: 0.015007, episode:  271\n",
      "frames: 235000, reward: -20.600000, loss: 0.014757, episode:  272\n",
      "frames: 236000, reward: -20.700000, loss: 0.015726, episode:  273\n",
      "frames: 237000, reward: -20.600000, loss: 0.000293, episode:  274\n",
      "frames: 238000, reward: -20.500000, loss: 0.015296, episode:  275\n",
      "frames: 239000, reward: -20.500000, loss: 0.015698, episode:  276\n",
      "frames: 240000, reward: -20.700000, loss: 0.030112, episode:  278\n",
      "frames: 241000, reward: -20.600000, loss: 0.015812, episode:  279\n",
      "frames: 242000, reward: -20.600000, loss: 0.001043, episode:  280\n",
      "frames: 243000, reward: -20.600000, loss: 0.000603, episode:  281\n",
      "frames: 244000, reward: -20.500000, loss: 0.014842, episode:  282\n",
      "frames: 245000, reward: -20.500000, loss: 0.015303, episode:  283\n",
      "frames: 246000, reward: -20.500000, loss: 0.000680, episode:  284\n",
      "frames: 247000, reward: -20.400000, loss: 0.015434, episode:  286\n",
      "frames: 248000, reward: -20.300000, loss: 0.015581, episode:  287\n",
      "frames: 249000, reward: -20.300000, loss: 0.029601, episode:  288\n",
      "frames: 250000, reward: -20.400000, loss: 0.015651, episode:  289\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "frames: 251000, reward: -20.400000, loss: 0.043163, episode:  290\n",
      "frames: 252000, reward: -20.500000, loss: 0.028646, episode:  291\n",
      "frames: 253000, reward: -20.500000, loss: 0.000105, episode:  293\n",
      "frames: 254000, reward: -20.600000, loss: 0.015493, episode:  294\n",
      "frames: 255000, reward: -20.700000, loss: 0.000646, episode:  295\n",
      "frames: 256000, reward: -20.800000, loss: 0.014848, episode:  296\n",
      "frames: 257000, reward: -20.800000, loss: 0.015726, episode:  297\n",
      "frames: 258000, reward: -20.800000, loss: 0.000124, episode:  298\n",
      "frames: 259000, reward: -20.800000, loss: 0.000143, episode:  299\n",
      "frames: 260000, reward: -20.800000, loss: 0.000201, episode:  301\n",
      "frames: 261000, reward: -20.800000, loss: 0.015506, episode:  302\n",
      "frames: 262000, reward: -20.800000, loss: 0.014863, episode:  303\n",
      "frames: 263000, reward: -20.800000, loss: 0.000205, episode:  304\n",
      "frames: 264000, reward: -20.800000, loss: 0.014449, episode:  305\n",
      "frames: 265000, reward: -20.800000, loss: 0.000489, episode:  306\n",
      "frames: 266000, reward: -20.900000, loss: 0.000132, episode:  308\n",
      "frames: 267000, reward: -20.800000, loss: 0.014934, episode:  309\n",
      "frames: 268000, reward: -20.800000, loss: 0.000196, episode:  310\n",
      "frames: 269000, reward: -20.800000, loss: 0.000269, episode:  311\n",
      "frames: 270000, reward: -20.800000, loss: 0.045160, episode:  312\n",
      "frames: 271000, reward: -20.800000, loss: 0.015566, episode:  313\n",
      "frames: 272000, reward: -20.700000, loss: 0.029784, episode:  314\n",
      "frames: 273000, reward: -20.600000, loss: 0.000453, episode:  316\n",
      "frames: 274000, reward: -20.500000, loss: 0.015368, episode:  317\n",
      "frames: 275000, reward: -20.400000, loss: 0.000131, episode:  318\n",
      "frames: 276000, reward: -20.400000, loss: 0.015272, episode:  319\n",
      "frames: 277000, reward: -20.300000, loss: 0.015191, episode:  320\n",
      "frames: 278000, reward: -20.300000, loss: 0.015837, episode:  321\n",
      "frames: 279000, reward: -20.200000, loss: 0.000136, episode:  322\n",
      "frames: 280000, reward: -20.200000, loss: 0.000256, episode:  324\n",
      "frames: 281000, reward: -20.200000, loss: 0.015201, episode:  325\n",
      "frames: 282000, reward: -20.000000, loss: 0.015262, episode:  326\n",
      "frames: 283000, reward: -20.000000, loss: 0.000542, episode:  327\n",
      "frames: 284000, reward: -19.900000, loss: 0.000475, episode:  328\n",
      "frames: 285000, reward: -19.900000, loss: 0.000114, episode:  329\n",
      "frames: 286000, reward: -20.000000, loss: 0.000060, episode:  330\n",
      "frames: 287000, reward: -20.000000, loss: 0.000307, episode:  331\n",
      "frames: 288000, reward: -20.300000, loss: 0.000175, episode:  333\n",
      "frames: 289000, reward: -20.200000, loss: 0.014578, episode:  334\n",
      "frames: 290000, reward: -20.200000, loss: 0.014629, episode:  335\n",
      "frames: 291000, reward: -20.300000, loss: 0.015472, episode:  336\n",
      "frames: 292000, reward: -20.400000, loss: 0.030376, episode:  337\n",
      "frames: 293000, reward: -20.700000, loss: 0.000077, episode:  339\n",
      "frames: 294000, reward: -20.600000, loss: 0.015503, episode:  340\n",
      "frames: 295000, reward: -20.600000, loss: 0.000807, episode:  341\n",
      "frames: 296000, reward: -20.600000, loss: 0.014937, episode:  342\n",
      "frames: 297000, reward: -20.600000, loss: 0.031335, episode:  343\n",
      "frames: 298000, reward: -20.700000, loss: 0.015189, episode:  345\n",
      "frames: 299000, reward: -20.800000, loss: 0.000449, episode:  346\n",
      "frames: 300000, reward: -20.700000, loss: 0.000440, episode:  347\n",
      "frames: 301000, reward: -20.700000, loss: 0.000590, episode:  348\n",
      "frames: 302000, reward: -20.500000, loss: 0.001421, episode:  349\n",
      "frames: 303000, reward: -20.500000, loss: 0.000037, episode:  350\n",
      "frames: 304000, reward: -20.200000, loss: 0.000224, episode:  351\n",
      "frames: 305000, reward: -20.200000, loss: 0.060410, episode:  352\n",
      "frames: 306000, reward: -20.200000, loss: 0.015740, episode:  354\n",
      "frames: 307000, reward: -20.200000, loss: 0.000728, episode:  355\n",
      "frames: 308000, reward: -20.200000, loss: 0.013829, episode:  356\n",
      "frames: 309000, reward: -20.300000, loss: 0.000497, episode:  358\n",
      "frames: 310000, reward: -20.500000, loss: 0.013621, episode:  359\n",
      "frames: 311000, reward: -20.600000, loss: 0.000748, episode:  360\n",
      "frames: 312000, reward: -20.900000, loss: 0.000397, episode:  361\n",
      "frames: 313000, reward: -20.800000, loss: 0.046383, episode:  362\n",
      "frames: 314000, reward: -20.800000, loss: 0.016383, episode:  363\n",
      "frames: 315000, reward: -20.600000, loss: 0.012938, episode:  365\n",
      "frames: 316000, reward: -20.500000, loss: 0.000419, episode:  366\n",
      "frames: 317000, reward: -20.500000, loss: 0.000450, episode:  367\n",
      "frames: 318000, reward: -20.500000, loss: 0.012045, episode:  368\n",
      "frames: 319000, reward: -20.500000, loss: 0.025832, episode:  369\n",
      "frames: 320000, reward: -20.400000, loss: 0.012302, episode:  371\n",
      "frames: 321000, reward: -20.500000, loss: 0.012746, episode:  372\n",
      "frames: 322000, reward: -20.500000, loss: 0.026944, episode:  373\n",
      "frames: 323000, reward: -20.500000, loss: 0.028439, episode:  374\n",
      "frames: 324000, reward: -20.600000, loss: 0.000867, episode:  375\n",
      "frames: 325000, reward: -20.600000, loss: 0.015622, episode:  376\n",
      "frames: 326000, reward: -20.600000, loss: 0.015756, episode:  377\n",
      "frames: 327000, reward: -20.500000, loss: 0.011803, episode:  378\n",
      "frames: 328000, reward: -20.400000, loss: 0.001330, episode:  379\n",
      "frames: 329000, reward: -20.100000, loss: 0.011642, episode:  380\n",
      "frames: 330000, reward: -20.000000, loss: 0.015904, episode:  381\n",
      "frames: 331000, reward: -20.000000, loss: 0.001186, episode:  383\n",
      "frames: 332000, reward: -20.100000, loss: 0.001744, episode:  384\n",
      "frames: 333000, reward: -20.100000, loss: 0.019270, episode:  385\n",
      "frames: 334000, reward: -20.100000, loss: 0.016695, episode:  386\n",
      "frames: 335000, reward: -20.000000, loss: 0.024678, episode:  387\n",
      "frames: 336000, reward: -19.900000, loss: 0.004254, episode:  388\n",
      "frames: 337000, reward: -19.900000, loss: 0.015506, episode:  389\n",
      "frames: 338000, reward: -20.300000, loss: 0.019266, episode:  391\n",
      "frames: 339000, reward: -20.400000, loss: 0.014748, episode:  392\n",
      "frames: 340000, reward: -20.400000, loss: 0.009503, episode:  393\n",
      "frames: 341000, reward: -20.400000, loss: 0.003414, episode:  394\n",
      "frames: 342000, reward: -20.300000, loss: 0.002285, episode:  395\n",
      "frames: 343000, reward: -20.400000, loss: 0.019582, episode:  397\n",
      "frames: 344000, reward: -20.500000, loss: 0.009630, episode:  398\n",
      "frames: 345000, reward: -20.600000, loss: 0.001323, episode:  399\n",
      "frames: 346000, reward: -20.500000, loss: 0.006971, episode:  400\n",
      "frames: 347000, reward: -20.600000, loss: 0.002705, episode:  401\n",
      "frames: 348000, reward: -20.600000, loss: 0.015009, episode:  402\n",
      "frames: 349000, reward: -20.500000, loss: 0.002061, episode:  404\n",
      "frames: 350000, reward: -20.600000, loss: 0.019419, episode:  405\n",
      "frames: 351000, reward: -20.600000, loss: 0.005261, episode:  406\n",
      "frames: 352000, reward: -20.700000, loss: 0.009857, episode:  407\n",
      "frames: 353000, reward: -20.800000, loss: 0.007959, episode:  408\n",
      "frames: 354000, reward: -20.800000, loss: 0.001474, episode:  410\n",
      "frames: 355000, reward: -20.700000, loss: 0.012282, episode:  411\n",
      "frames: 356000, reward: -20.500000, loss: 0.025744, episode:  412\n",
      "frames: 357000, reward: -20.600000, loss: 0.020336, episode:  413\n",
      "frames: 358000, reward: -20.600000, loss: 0.003149, episode:  414\n",
      "frames: 359000, reward: -20.600000, loss: 0.007826, episode:  415\n",
      "frames: 360000, reward: -20.600000, loss: 0.003773, episode:  416\n",
      "frames: 361000, reward: -20.500000, loss: 0.013192, episode:  417\n",
      "frames: 362000, reward: -20.500000, loss: 0.015447, episode:  419\n",
      "frames: 363000, reward: -20.500000, loss: 0.016960, episode:  420\n",
      "frames: 364000, reward: -20.600000, loss: 0.001241, episode:  421\n",
      "frames: 365000, reward: -20.800000, loss: 0.001433, episode:  422\n",
      "frames: 366000, reward: -20.700000, loss: 0.006909, episode:  424\n",
      "frames: 367000, reward: -20.700000, loss: 0.012844, episode:  425\n",
      "frames: 368000, reward: -20.700000, loss: 0.011897, episode:  426\n",
      "frames: 369000, reward: -20.800000, loss: 0.012146, episode:  427\n",
      "frames: 370000, reward: -20.800000, loss: 0.017175, episode:  429\n",
      "frames: 371000, reward: -20.800000, loss: 0.002481, episode:  430\n",
      "frames: 372000, reward: -20.800000, loss: 0.010861, episode:  431\n",
      "frames: 373000, reward: -20.800000, loss: 0.019142, episode:  432\n",
      "frames: 374000, reward: -20.700000, loss: 0.006972, episode:  433\n",
      "frames: 375000, reward: -20.600000, loss: 0.003264, episode:  434\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "frames: 376000, reward: -20.600000, loss: 0.004533, episode:  436\n",
      "frames: 377000, reward: -20.600000, loss: 0.004012, episode:  437\n",
      "frames: 378000, reward: -20.600000, loss: 0.024028, episode:  438\n",
      "frames: 379000, reward: -20.600000, loss: 0.010660, episode:  439\n",
      "frames: 380000, reward: -20.500000, loss: 0.002233, episode:  440\n",
      "frames: 381000, reward: -20.500000, loss: 0.005407, episode:  441\n",
      "frames: 382000, reward: -20.500000, loss: 0.004034, episode:  443\n",
      "frames: 383000, reward: -20.600000, loss: 0.001561, episode:  444\n",
      "frames: 384000, reward: -20.500000, loss: 0.011417, episode:  445\n",
      "frames: 385000, reward: -20.500000, loss: 0.005690, episode:  446\n",
      "frames: 386000, reward: -20.500000, loss: 0.007146, episode:  447\n",
      "frames: 387000, reward: -20.600000, loss: 0.023203, episode:  448\n",
      "frames: 388000, reward: -20.600000, loss: 0.004677, episode:  449\n",
      "frames: 389000, reward: -20.600000, loss: 0.006225, episode:  451\n",
      "frames: 390000, reward: -20.600000, loss: 0.005303, episode:  452\n",
      "frames: 391000, reward: -20.700000, loss: 0.005555, episode:  453\n",
      "frames: 392000, reward: -20.600000, loss: 0.023654, episode:  454\n",
      "frames: 393000, reward: -20.700000, loss: 0.010468, episode:  456\n",
      "frames: 394000, reward: -20.600000, loss: 0.008827, episode:  457\n",
      "frames: 395000, reward: -20.600000, loss: 0.009366, episode:  458\n",
      "frames: 396000, reward: -20.600000, loss: 0.003301, episode:  459\n",
      "frames: 397000, reward: -20.700000, loss: 0.006910, episode:  460\n",
      "frames: 398000, reward: -20.700000, loss: 0.007556, episode:  461\n",
      "frames: 399000, reward: -20.600000, loss: 0.012820, episode:  462\n",
      "frames: 400000, reward: -20.600000, loss: 0.005786, episode:  463\n",
      "frames: 401000, reward: -20.700000, loss: 0.011403, episode:  465\n",
      "frames: 402000, reward: -20.700000, loss: 0.001790, episode:  466\n",
      "frames: 403000, reward: -20.800000, loss: 0.022213, episode:  467\n",
      "frames: 404000, reward: -20.800000, loss: 0.008403, episode:  468\n",
      "frames: 405000, reward: -20.800000, loss: 0.003953, episode:  470\n",
      "frames: 406000, reward: -20.800000, loss: 0.004043, episode:  471\n",
      "frames: 407000, reward: -21.000000, loss: 0.019534, episode:  472\n",
      "frames: 408000, reward: -21.000000, loss: 0.007878, episode:  473\n",
      "frames: 409000, reward: -21.000000, loss: 0.003766, episode:  474\n",
      "frames: 410000, reward: -21.000000, loss: 0.004238, episode:  476\n",
      "frames: 411000, reward: -21.000000, loss: 0.001186, episode:  477\n",
      "frames: 412000, reward: -20.900000, loss: 0.006393, episode:  478\n",
      "frames: 413000, reward: -20.900000, loss: 0.003766, episode:  479\n",
      "frames: 414000, reward: -20.900000, loss: 0.007971, episode:  480\n",
      "frames: 415000, reward: -20.900000, loss: 0.007607, episode:  481\n",
      "frames: 416000, reward: -20.700000, loss: 0.007522, episode:  482\n",
      "frames: 417000, reward: -20.700000, loss: 0.003233, episode:  484\n",
      "frames: 418000, reward: -20.700000, loss: 0.003070, episode:  485\n",
      "frames: 419000, reward: -20.600000, loss: 0.000984, episode:  486\n",
      "frames: 420000, reward: -20.500000, loss: 0.003041, episode:  487\n",
      "frames: 421000, reward: -20.500000, loss: 0.003460, episode:  488\n",
      "frames: 422000, reward: -20.500000, loss: 0.005866, episode:  489\n",
      "frames: 423000, reward: -20.400000, loss: 0.007232, episode:  491\n",
      "frames: 424000, reward: -20.400000, loss: 0.005153, episode:  491\n",
      "frames: 425000, reward: -20.400000, loss: 0.003746, episode:  493\n",
      "frames: 426000, reward: -20.400000, loss: 0.001790, episode:  494\n",
      "frames: 427000, reward: -20.400000, loss: 0.003423, episode:  495\n",
      "frames: 428000, reward: -20.400000, loss: 0.001335, episode:  496\n",
      "frames: 429000, reward: -20.400000, loss: 0.003642, episode:  497\n",
      "frames: 430000, reward: -20.500000, loss: 0.004902, episode:  498\n",
      "frames: 431000, reward: -20.600000, loss: 0.011288, episode:  500\n",
      "frames: 432000, reward: -20.600000, loss: 0.004322, episode:  501\n",
      "frames: 433000, reward: -20.700000, loss: 0.017322, episode:  502\n",
      "frames: 434000, reward: -20.800000, loss: 0.017278, episode:  503\n",
      "frames: 435000, reward: -20.800000, loss: 0.004871, episode:  504\n",
      "frames: 436000, reward: -20.700000, loss: 0.001934, episode:  505\n",
      "frames: 437000, reward: -20.900000, loss: 0.002279, episode:  507\n",
      "frames: 438000, reward: -20.800000, loss: 0.001463, episode:  508\n",
      "frames: 439000, reward: -20.800000, loss: 0.002780, episode:  509\n",
      "frames: 440000, reward: -20.800000, loss: 0.002267, episode:  510\n",
      "frames: 441000, reward: -20.800000, loss: 0.013055, episode:  511\n",
      "frames: 442000, reward: -20.700000, loss: 0.005437, episode:  513\n",
      "frames: 443000, reward: -20.700000, loss: 0.001080, episode:  514\n",
      "frames: 444000, reward: -20.800000, loss: 0.011174, episode:  515\n",
      "frames: 445000, reward: -20.800000, loss: 0.001346, episode:  516\n",
      "frames: 446000, reward: -20.800000, loss: 0.003207, episode:  518\n",
      "frames: 447000, reward: -20.800000, loss: 0.006536, episode:  519\n",
      "frames: 448000, reward: -20.700000, loss: 0.009653, episode:  520\n",
      "frames: 449000, reward: -20.700000, loss: 0.000864, episode:  521\n",
      "frames: 450000, reward: -20.800000, loss: 0.001950, episode:  522\n",
      "frames: 451000, reward: -20.700000, loss: 0.001663, episode:  523\n",
      "frames: 452000, reward: -20.700000, loss: 0.005640, episode:  525\n",
      "frames: 453000, reward: -20.700000, loss: 0.002634, episode:  526\n",
      "frames: 454000, reward: -20.800000, loss: 0.002655, episode:  527\n",
      "frames: 455000, reward: -20.700000, loss: 0.001448, episode:  528\n",
      "frames: 456000, reward: -20.700000, loss: 0.005241, episode:  529\n",
      "frames: 457000, reward: -20.800000, loss: 0.001555, episode:  530\n",
      "frames: 458000, reward: -20.700000, loss: 0.020023, episode:  532\n",
      "frames: 459000, reward: -20.800000, loss: 0.001865, episode:  533\n",
      "frames: 460000, reward: -20.600000, loss: 0.003287, episode:  534\n",
      "frames: 461000, reward: -20.600000, loss: 0.001542, episode:  535\n",
      "frames: 462000, reward: -20.600000, loss: 0.000864, episode:  536\n",
      "frames: 463000, reward: -20.600000, loss: 0.001409, episode:  537\n",
      "frames: 464000, reward: -20.700000, loss: 0.002110, episode:  538\n",
      "frames: 465000, reward: -20.700000, loss: 0.000964, episode:  539\n",
      "frames: 466000, reward: -20.700000, loss: 0.001913, episode:  541\n",
      "frames: 467000, reward: -20.700000, loss: 0.000621, episode:  542\n",
      "frames: 468000, reward: -20.700000, loss: 0.003644, episode:  543\n",
      "frames: 469000, reward: -20.800000, loss: 0.003196, episode:  544\n",
      "frames: 470000, reward: -20.700000, loss: 0.000947, episode:  545\n",
      "frames: 471000, reward: -20.600000, loss: 0.006211, episode:  546\n",
      "frames: 472000, reward: -20.600000, loss: 0.001882, episode:  547\n",
      "frames: 473000, reward: -20.500000, loss: 0.001711, episode:  548\n",
      "frames: 474000, reward: -20.400000, loss: 0.008795, episode:  549\n",
      "frames: 475000, reward: -20.400000, loss: 0.006851, episode:  550\n",
      "frames: 476000, reward: -20.400000, loss: 0.003724, episode:  552\n",
      "frames: 477000, reward: -20.300000, loss: 0.001059, episode:  553\n",
      "frames: 478000, reward: -20.300000, loss: 0.002402, episode:  554\n",
      "frames: 479000, reward: -20.400000, loss: 0.000576, episode:  555\n",
      "frames: 480000, reward: -20.400000, loss: 0.002068, episode:  556\n",
      "frames: 481000, reward: -20.400000, loss: 0.003198, episode:  558\n",
      "frames: 482000, reward: -20.500000, loss: 0.002928, episode:  559\n",
      "frames: 483000, reward: -20.400000, loss: 0.015746, episode:  560\n",
      "frames: 484000, reward: -20.500000, loss: 0.003195, episode:  561\n",
      "frames: 485000, reward: -20.500000, loss: 0.005169, episode:  562\n",
      "frames: 486000, reward: -20.600000, loss: 0.002450, episode:  563\n",
      "frames: 487000, reward: -20.700000, loss: 0.001150, episode:  565\n",
      "frames: 488000, reward: -20.800000, loss: 0.001108, episode:  566\n",
      "frames: 489000, reward: -20.900000, loss: 0.001798, episode:  567\n",
      "frames: 490000, reward: -20.800000, loss: 0.001585, episode:  568\n",
      "frames: 491000, reward: -20.800000, loss: 0.001867, episode:  569\n",
      "frames: 492000, reward: -20.800000, loss: 0.001485, episode:  570\n",
      "frames: 493000, reward: -20.800000, loss: 0.001321, episode:  571\n",
      "frames: 494000, reward: -20.800000, loss: 0.003728, episode:  573\n",
      "frames: 495000, reward: -20.800000, loss: 0.002607, episode:  574\n",
      "frames: 496000, reward: -20.700000, loss: 0.006476, episode:  575\n",
      "frames: 497000, reward: -20.700000, loss: 0.002414, episode:  576\n",
      "frames: 498000, reward: -20.700000, loss: 0.001695, episode:  577\n",
      "frames: 499000, reward: -20.800000, loss: 0.002568, episode:  579\n",
      "frames: 500000, reward: -20.700000, loss: 0.000822, episode:  580\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "frames: 501000, reward: -20.700000, loss: 0.003184, episode:  581\n",
      "frames: 502000, reward: -20.700000, loss: 0.002818, episode:  582\n",
      "frames: 503000, reward: -20.700000, loss: 0.002383, episode:  583\n",
      "frames: 504000, reward: -20.800000, loss: 0.000984, episode:  585\n",
      "frames: 505000, reward: -20.700000, loss: 0.010444, episode:  586\n",
      "frames: 506000, reward: -20.700000, loss: 0.001482, episode:  587\n",
      "frames: 507000, reward: -20.700000, loss: 0.004136, episode:  588\n",
      "frames: 508000, reward: -20.900000, loss: 0.002771, episode:  590\n",
      "frames: 509000, reward: -20.900000, loss: 0.006435, episode:  591\n",
      "frames: 510000, reward: -20.900000, loss: 0.001310, episode:  592\n",
      "frames: 511000, reward: -20.900000, loss: 0.001986, episode:  593\n",
      "frames: 512000, reward: -20.800000, loss: 0.002334, episode:  595\n",
      "frames: 513000, reward: -20.800000, loss: 0.001248, episode:  596\n",
      "frames: 514000, reward: -20.800000, loss: 0.001945, episode:  597\n",
      "frames: 515000, reward: -20.800000, loss: 0.004196, episode:  598\n",
      "frames: 516000, reward: -20.800000, loss: 0.001507, episode:  600\n",
      "frames: 517000, reward: -20.800000, loss: 0.003367, episode:  601\n",
      "frames: 518000, reward: -20.700000, loss: 0.000957, episode:  602\n",
      "frames: 519000, reward: -20.600000, loss: 0.001568, episode:  603\n",
      "frames: 520000, reward: -20.700000, loss: 0.002612, episode:  605\n",
      "frames: 521000, reward: -20.700000, loss: 0.004382, episode:  606\n",
      "frames: 522000, reward: -20.700000, loss: 0.003375, episode:  607\n",
      "frames: 523000, reward: -20.700000, loss: 0.002461, episode:  609\n",
      "frames: 524000, reward: -20.700000, loss: 0.000668, episode:  610\n",
      "frames: 525000, reward: -20.700000, loss: 0.004342, episode:  611\n",
      "frames: 526000, reward: -20.800000, loss: 0.001262, episode:  612\n",
      "frames: 527000, reward: -20.800000, loss: 0.001005, episode:  613\n",
      "frames: 528000, reward: -20.700000, loss: 0.002849, episode:  615\n",
      "frames: 529000, reward: -20.700000, loss: 0.001999, episode:  616\n",
      "frames: 530000, reward: -20.700000, loss: 0.003146, episode:  617\n",
      "frames: 531000, reward: -20.700000, loss: 0.002481, episode:  618\n",
      "frames: 532000, reward: -20.700000, loss: 0.001073, episode:  619\n",
      "frames: 533000, reward: -20.600000, loss: 0.001052, episode:  621\n",
      "frames: 534000, reward: -20.600000, loss: 0.006155, episode:  622\n",
      "frames: 535000, reward: -20.700000, loss: 0.001716, episode:  623\n",
      "frames: 536000, reward: -20.800000, loss: 0.000984, episode:  624\n",
      "frames: 537000, reward: -20.800000, loss: 0.001102, episode:  625\n",
      "frames: 538000, reward: -20.700000, loss: 0.001065, episode:  627\n",
      "frames: 539000, reward: -20.700000, loss: 0.001029, episode:  628\n",
      "frames: 540000, reward: -20.700000, loss: 0.001343, episode:  629\n",
      "frames: 541000, reward: -20.700000, loss: 0.001582, episode:  630\n",
      "frames: 542000, reward: -20.700000, loss: 0.000835, episode:  631\n",
      "frames: 543000, reward: -20.700000, loss: 0.000992, episode:  632\n",
      "frames: 544000, reward: -20.700000, loss: 0.000721, episode:  633\n",
      "frames: 545000, reward: -20.500000, loss: 0.000909, episode:  634\n",
      "frames: 546000, reward: -20.500000, loss: 0.001894, episode:  636\n",
      "frames: 547000, reward: -20.600000, loss: 0.001089, episode:  637\n",
      "frames: 548000, reward: -20.500000, loss: 0.001383, episode:  638\n",
      "frames: 549000, reward: -20.500000, loss: 0.002381, episode:  639\n",
      "frames: 550000, reward: -20.600000, loss: 0.000776, episode:  641\n",
      "frames: 551000, reward: -20.500000, loss: 0.001921, episode:  642\n",
      "frames: 552000, reward: -20.300000, loss: 0.001123, episode:  643\n",
      "frames: 553000, reward: -20.300000, loss: 0.001240, episode:  644\n",
      "frames: 554000, reward: -20.400000, loss: 0.000809, episode:  645\n",
      "frames: 555000, reward: -20.300000, loss: 0.000973, episode:  646\n",
      "frames: 556000, reward: -20.300000, loss: 0.016299, episode:  647\n",
      "frames: 557000, reward: -20.400000, loss: 0.001545, episode:  649\n",
      "frames: 558000, reward: -20.300000, loss: 0.001465, episode:  650\n",
      "frames: 559000, reward: -20.200000, loss: 0.002001, episode:  651\n",
      "frames: 560000, reward: -20.300000, loss: 0.000881, episode:  652\n",
      "frames: 561000, reward: -20.500000, loss: 0.000549, episode:  653\n",
      "frames: 562000, reward: -20.600000, loss: 0.001961, episode:  654\n",
      "frames: 563000, reward: -20.500000, loss: 0.001950, episode:  655\n",
      "frames: 564000, reward: -20.600000, loss: 0.000925, episode:  656\n",
      "frames: 565000, reward: -20.400000, loss: 0.000466, episode:  657\n",
      "frames: 566000, reward: -20.400000, loss: 0.001135, episode:  658\n",
      "frames: 567000, reward: -20.400000, loss: 0.001315, episode:  659\n",
      "frames: 568000, reward: -20.400000, loss: 0.000662, episode:  660\n",
      "frames: 569000, reward: -20.500000, loss: 0.002300, episode:  661\n",
      "frames: 570000, reward: -20.200000, loss: 0.000499, episode:  662\n",
      "frames: 571000, reward: -20.000000, loss: 0.001875, episode:  663\n",
      "frames: 572000, reward: -20.000000, loss: 0.002132, episode:  664\n",
      "frames: 573000, reward: -19.900000, loss: 0.017570, episode:  665\n",
      "frames: 574000, reward: -19.800000, loss: 0.001104, episode:  666\n",
      "frames: 575000, reward: -19.900000, loss: 0.000679, episode:  667\n",
      "frames: 576000, reward: -19.700000, loss: 0.001436, episode:  668\n",
      "frames: 577000, reward: -19.600000, loss: 0.000749, episode:  669\n",
      "frames: 578000, reward: -19.700000, loss: 0.001454, episode:  670\n",
      "frames: 579000, reward: -19.600000, loss: 0.002224, episode:  671\n",
      "frames: 580000, reward: -19.900000, loss: 0.001447, episode:  672\n",
      "frames: 581000, reward: -19.800000, loss: 0.001917, episode:  673\n",
      "frames: 582000, reward: -19.800000, loss: 0.000833, episode:  674\n",
      "frames: 583000, reward: -19.900000, loss: 0.000634, episode:  675\n",
      "frames: 584000, reward: -19.900000, loss: 0.001538, episode:  676\n",
      "frames: 585000, reward: -20.000000, loss: 0.000796, episode:  677\n",
      "frames: 586000, reward: -20.100000, loss: 0.001829, episode:  678\n",
      "frames: 587000, reward: -20.000000, loss: 0.001118, episode:  679\n",
      "frames: 588000, reward: -19.700000, loss: 0.002734, episode:  680\n",
      "frames: 589000, reward: -19.800000, loss: 0.001806, episode:  681\n",
      "frames: 590000, reward: -19.700000, loss: 0.000680, episode:  682\n",
      "frames: 591000, reward: -19.900000, loss: 0.003634, episode:  683\n",
      "frames: 592000, reward: -19.700000, loss: 0.000934, episode:  684\n",
      "frames: 593000, reward: -19.700000, loss: 0.002379, episode:  685\n",
      "frames: 594000, reward: -19.800000, loss: 0.003193, episode:  686\n",
      "frames: 595000, reward: -19.800000, loss: 0.001111, episode:  687\n",
      "frames: 596000, reward: -19.700000, loss: 0.003478, episode:  688\n",
      "frames: 597000, reward: -19.900000, loss: 0.009017, episode:  689\n",
      "frames: 598000, reward: -20.200000, loss: 0.007402, episode:  690\n",
      "frames: 599000, reward: -20.000000, loss: 0.001123, episode:  691\n",
      "frames: 600000, reward: -20.000000, loss: 0.001240, episode:  692\n",
      "frames: 601000, reward: -20.000000, loss: 0.005401, episode:  693\n",
      "frames: 602000, reward: -20.000000, loss: 0.001312, episode:  694\n",
      "frames: 603000, reward: -20.000000, loss: 0.001777, episode:  695\n",
      "frames: 604000, reward: -19.900000, loss: 0.001341, episode:  696\n",
      "frames: 605000, reward: -19.700000, loss: 0.002012, episode:  697\n",
      "frames: 606000, reward: -19.900000, loss: 0.003046, episode:  698\n",
      "frames: 607000, reward: -19.700000, loss: 0.001133, episode:  699\n",
      "frames: 608000, reward: -19.600000, loss: 0.001328, episode:  700\n",
      "frames: 609000, reward: -19.600000, loss: 0.002931, episode:  701\n",
      "frames: 610000, reward: -19.500000, loss: 0.002278, episode:  702\n",
      "frames: 611000, reward: -19.500000, loss: 0.004711, episode:  703\n",
      "frames: 612000, reward: -19.600000, loss: 0.002906, episode:  704\n",
      "frames: 613000, reward: -19.700000, loss: 0.004849, episode:  705\n",
      "frames: 614000, reward: -19.500000, loss: 0.002449, episode:  706\n",
      "frames: 615000, reward: -19.700000, loss: 0.001325, episode:  707\n",
      "frames: 616000, reward: -19.600000, loss: 0.002398, episode:  708\n",
      "frames: 617000, reward: -19.700000, loss: 0.001211, episode:  709\n",
      "frames: 618000, reward: -19.800000, loss: 0.001496, episode:  710\n",
      "frames: 619000, reward: -19.700000, loss: 0.001095, episode:  711\n",
      "frames: 620000, reward: -19.800000, loss: 0.002929, episode:  712\n",
      "frames: 621000, reward: -19.900000, loss: 0.004810, episode:  713\n",
      "frames: 622000, reward: -19.900000, loss: 0.000915, episode:  714\n",
      "frames: 623000, reward: -19.900000, loss: 0.001548, episode:  715\n",
      "frames: 624000, reward: -20.200000, loss: 0.003269, episode:  716\n",
      "frames: 625000, reward: -20.200000, loss: 0.002607, episode:  717\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "frames: 626000, reward: -19.900000, loss: 0.001905, episode:  718\n",
      "frames: 627000, reward: -20.000000, loss: 0.000582, episode:  719\n",
      "frames: 628000, reward: -20.000000, loss: 0.006408, episode:  720\n",
      "frames: 629000, reward: -20.000000, loss: 0.002400, episode:  720\n",
      "frames: 630000, reward: -20.300000, loss: 0.002794, episode:  722\n",
      "frames: 631000, reward: -20.300000, loss: 0.000614, episode:  723\n",
      "frames: 632000, reward: -20.500000, loss: 0.017746, episode:  724\n",
      "frames: 633000, reward: -20.400000, loss: 0.003316, episode:  725\n",
      "frames: 634000, reward: -20.200000, loss: 0.002168, episode:  726\n",
      "frames: 635000, reward: -20.100000, loss: 0.002243, episode:  727\n",
      "frames: 636000, reward: -20.500000, loss: 0.002150, episode:  728\n",
      "frames: 637000, reward: -20.500000, loss: 0.001932, episode:  729\n",
      "frames: 638000, reward: -20.500000, loss: 0.002540, episode:  730\n",
      "frames: 639000, reward: -20.600000, loss: 0.004294, episode:  731\n",
      "frames: 640000, reward: -20.600000, loss: 0.002248, episode:  732\n",
      "frames: 641000, reward: -20.300000, loss: 0.001859, episode:  733\n",
      "frames: 642000, reward: -20.300000, loss: 0.007281, episode:  734\n",
      "frames: 643000, reward: -20.200000, loss: 0.002396, episode:  735\n",
      "frames: 644000, reward: -20.100000, loss: 0.003709, episode:  736\n",
      "frames: 645000, reward: -20.100000, loss: 0.001463, episode:  737\n",
      "frames: 646000, reward: -20.000000, loss: 0.001163, episode:  739\n",
      "frames: 647000, reward: -19.900000, loss: 0.002558, episode:  740\n",
      "frames: 648000, reward: -19.800000, loss: 0.002242, episode:  741\n",
      "frames: 649000, reward: -19.800000, loss: 0.006314, episode:  742\n",
      "frames: 650000, reward: -20.000000, loss: 0.002227, episode:  743\n",
      "frames: 651000, reward: -20.000000, loss: 0.003111, episode:  744\n",
      "frames: 652000, reward: -20.100000, loss: 0.001647, episode:  745\n",
      "frames: 653000, reward: -20.400000, loss: 0.001869, episode:  746\n",
      "frames: 654000, reward: -20.500000, loss: 0.002676, episode:  748\n",
      "frames: 655000, reward: -20.500000, loss: 0.001939, episode:  748\n",
      "frames: 656000, reward: -20.600000, loss: 0.003383, episode:  750\n",
      "frames: 657000, reward: -20.600000, loss: 0.000916, episode:  750\n",
      "frames: 658000, reward: -20.700000, loss: 0.002437, episode:  751\n",
      "frames: 659000, reward: -20.700000, loss: 0.003493, episode:  752\n",
      "frames: 660000, reward: -20.800000, loss: 0.002557, episode:  754\n",
      "frames: 661000, reward: -20.800000, loss: 0.001692, episode:  754\n",
      "frames: 662000, reward: -20.700000, loss: 0.001368, episode:  755\n",
      "frames: 663000, reward: -20.700000, loss: 0.001666, episode:  756\n",
      "frames: 664000, reward: -20.800000, loss: 0.000930, episode:  757\n",
      "frames: 665000, reward: -20.800000, loss: 0.002513, episode:  758\n",
      "frames: 666000, reward: -20.800000, loss: 0.001826, episode:  759\n",
      "frames: 667000, reward: -20.600000, loss: 0.004490, episode:  760\n",
      "frames: 668000, reward: -20.500000, loss: 0.005435, episode:  761\n",
      "frames: 669000, reward: -20.500000, loss: 0.003598, episode:  762\n",
      "frames: 670000, reward: -20.400000, loss: 0.004396, episode:  763\n",
      "frames: 671000, reward: -20.400000, loss: 0.020069, episode:  763\n",
      "frames: 672000, reward: -20.000000, loss: 0.002857, episode:  764\n",
      "frames: 673000, reward: -20.000000, loss: 0.001465, episode:  765\n",
      "frames: 674000, reward: -19.800000, loss: 0.003414, episode:  766\n",
      "frames: 675000, reward: -19.800000, loss: 0.001846, episode:  766\n",
      "frames: 676000, reward: -19.500000, loss: 0.001809, episode:  767\n",
      "frames: 677000, reward: -19.400000, loss: 0.017097, episode:  768\n",
      "frames: 678000, reward: -19.300000, loss: 0.003602, episode:  769\n",
      "frames: 679000, reward: -19.300000, loss: 0.001258, episode:  770\n",
      "frames: 680000, reward: -19.300000, loss: 0.000828, episode:  771\n",
      "frames: 681000, reward: -19.300000, loss: 0.003465, episode:  772\n",
      "frames: 682000, reward: -19.300000, loss: 0.000565, episode:  772\n",
      "frames: 683000, reward: -19.400000, loss: 0.002672, episode:  774\n",
      "frames: 684000, reward: -19.400000, loss: 0.001288, episode:  774\n",
      "frames: 685000, reward: -19.200000, loss: 0.004907, episode:  775\n",
      "frames: 686000, reward: -19.400000, loss: 0.001758, episode:  776\n",
      "frames: 687000, reward: -19.600000, loss: 0.001418, episode:  777\n",
      "frames: 688000, reward: -19.500000, loss: 0.004597, episode:  778\n",
      "frames: 689000, reward: -19.500000, loss: 0.002257, episode:  778\n",
      "frames: 690000, reward: -19.200000, loss: 0.002329, episode:  779\n",
      "frames: 691000, reward: -19.200000, loss: 0.002872, episode:  780\n",
      "frames: 692000, reward: -19.300000, loss: 0.005143, episode:  781\n",
      "frames: 693000, reward: -19.100000, loss: 0.002081, episode:  782\n",
      "frames: 694000, reward: -19.100000, loss: 0.002034, episode:  783\n",
      "frames: 695000, reward: -19.100000, loss: 0.001816, episode:  784\n",
      "frames: 696000, reward: -19.400000, loss: 0.001919, episode:  785\n",
      "frames: 697000, reward: -19.400000, loss: 0.001218, episode:  785\n",
      "frames: 698000, reward: -19.300000, loss: 0.002316, episode:  786\n",
      "frames: 699000, reward: -19.300000, loss: 0.002825, episode:  787\n",
      "frames: 700000, reward: -19.200000, loss: 0.002628, episode:  788\n",
      "frames: 701000, reward: -19.600000, loss: 0.001752, episode:  789\n",
      "frames: 702000, reward: -19.800000, loss: 0.001587, episode:  790\n",
      "frames: 703000, reward: -19.800000, loss: 0.016111, episode:  791\n",
      "frames: 704000, reward: -20.000000, loss: 0.003353, episode:  792\n",
      "frames: 705000, reward: -20.000000, loss: 0.000834, episode:  792\n",
      "frames: 706000, reward: -20.000000, loss: 0.001959, episode:  793\n",
      "frames: 707000, reward: -20.000000, loss: 0.002907, episode:  794\n",
      "frames: 708000, reward: -20.000000, loss: 0.003801, episode:  795\n",
      "frames: 709000, reward: -19.800000, loss: 0.003471, episode:  796\n",
      "frames: 710000, reward: -19.700000, loss: 0.002012, episode:  797\n",
      "frames: 711000, reward: -19.700000, loss: 0.002393, episode:  797\n",
      "frames: 712000, reward: -19.700000, loss: 0.003501, episode:  798\n",
      "frames: 713000, reward: -19.600000, loss: 0.005177, episode:  800\n",
      "frames: 714000, reward: -19.600000, loss: 0.002604, episode:  800\n",
      "frames: 715000, reward: -19.500000, loss: 0.001067, episode:  801\n",
      "frames: 716000, reward: -19.200000, loss: 0.003232, episode:  802\n",
      "frames: 717000, reward: -19.300000, loss: 0.018468, episode:  803\n",
      "frames: 718000, reward: -19.300000, loss: 0.003293, episode:  803\n",
      "frames: 719000, reward: -19.200000, loss: 0.002597, episode:  804\n",
      "frames: 720000, reward: -19.300000, loss: 0.001836, episode:  805\n",
      "frames: 721000, reward: -19.100000, loss: 0.002528, episode:  806\n",
      "frames: 722000, reward: -19.100000, loss: 0.003261, episode:  806\n",
      "frames: 723000, reward: -19.200000, loss: 0.001129, episode:  807\n",
      "frames: 724000, reward: -19.400000, loss: 0.019207, episode:  808\n",
      "frames: 725000, reward: -19.000000, loss: 0.002647, episode:  809\n",
      "frames: 726000, reward: -18.800000, loss: 0.003315, episode:  810\n",
      "frames: 727000, reward: -18.800000, loss: 0.001496, episode:  810\n",
      "frames: 728000, reward: -18.800000, loss: 0.011024, episode:  811\n",
      "frames: 729000, reward: -18.800000, loss: 0.000829, episode:  812\n",
      "frames: 730000, reward: -18.600000, loss: 0.002003, episode:  813\n",
      "frames: 731000, reward: -18.600000, loss: 0.003244, episode:  814\n",
      "frames: 732000, reward: -18.600000, loss: 0.002754, episode:  815\n",
      "frames: 733000, reward: -19.000000, loss: 0.004919, episode:  816\n",
      "frames: 734000, reward: -19.000000, loss: 0.001610, episode:  817\n",
      "frames: 735000, reward: -19.100000, loss: 0.003145, episode:  818\n",
      "frames: 736000, reward: -19.300000, loss: 0.015052, episode:  819\n",
      "frames: 737000, reward: -19.400000, loss: 0.000968, episode:  820\n",
      "frames: 738000, reward: -19.400000, loss: 0.002767, episode:  821\n",
      "frames: 739000, reward: -19.400000, loss: 0.001114, episode:  821\n",
      "frames: 740000, reward: -19.800000, loss: 0.002136, episode:  823\n",
      "frames: 741000, reward: -19.900000, loss: 0.003950, episode:  824\n",
      "frames: 742000, reward: -19.900000, loss: 0.014941, episode:  825\n",
      "frames: 743000, reward: -20.000000, loss: 0.003120, episode:  826\n",
      "frames: 744000, reward: -20.000000, loss: 0.000967, episode:  827\n",
      "frames: 745000, reward: -19.900000, loss: 0.018921, episode:  828\n",
      "frames: 746000, reward: -20.200000, loss: 0.001998, episode:  829\n",
      "frames: 747000, reward: -20.300000, loss: 0.016133, episode:  830\n",
      "frames: 748000, reward: -20.400000, loss: 0.001427, episode:  831\n",
      "frames: 749000, reward: -20.700000, loss: 0.001419, episode:  832\n",
      "frames: 750000, reward: -20.700000, loss: 0.000935, episode:  833\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "frames: 751000, reward: -20.700000, loss: 0.002373, episode:  834\n",
      "frames: 752000, reward: -20.600000, loss: 0.002154, episode:  835\n",
      "frames: 753000, reward: -20.600000, loss: 0.001953, episode:  836\n",
      "frames: 754000, reward: -20.600000, loss: 0.001277, episode:  837\n",
      "frames: 755000, reward: -20.600000, loss: 0.001665, episode:  838\n",
      "frames: 756000, reward: -20.600000, loss: 0.001223, episode:  839\n",
      "frames: 757000, reward: -20.600000, loss: 0.002084, episode:  840\n",
      "frames: 758000, reward: -20.500000, loss: 0.009286, episode:  841\n",
      "frames: 759000, reward: -20.200000, loss: 0.017326, episode:  842\n",
      "frames: 760000, reward: -19.700000, loss: 0.001317, episode:  843\n",
      "frames: 761000, reward: -19.700000, loss: 0.001090, episode:  843\n",
      "frames: 762000, reward: -19.700000, loss: 0.001068, episode:  844\n",
      "frames: 763000, reward: -19.700000, loss: 0.004965, episode:  845\n",
      "frames: 764000, reward: -19.400000, loss: 0.002267, episode:  846\n",
      "frames: 765000, reward: -19.600000, loss: 0.005103, episode:  848\n",
      "frames: 766000, reward: -19.400000, loss: 0.004438, episode:  849\n",
      "frames: 767000, reward: -19.400000, loss: 0.004518, episode:  849\n",
      "frames: 768000, reward: -19.200000, loss: 0.004401, episode:  850\n",
      "frames: 769000, reward: -19.200000, loss: 0.001354, episode:  851\n",
      "frames: 770000, reward: -19.200000, loss: 0.001937, episode:  852\n",
      "frames: 771000, reward: -19.300000, loss: 0.002062, episode:  853\n",
      "frames: 772000, reward: -19.200000, loss: 0.002701, episode:  854\n",
      "frames: 773000, reward: -19.000000, loss: 0.001507, episode:  855\n",
      "frames: 774000, reward: -19.300000, loss: 0.002455, episode:  856\n",
      "frames: 775000, reward: -19.200000, loss: 0.000843, episode:  857\n",
      "frames: 776000, reward: -18.800000, loss: 0.002532, episode:  858\n",
      "frames: 777000, reward: -18.800000, loss: 0.001600, episode:  858\n",
      "frames: 778000, reward: -18.700000, loss: 0.002853, episode:  859\n",
      "frames: 779000, reward: -18.500000, loss: 0.001214, episode:  860\n",
      "frames: 780000, reward: -18.400000, loss: 0.001409, episode:  861\n",
      "frames: 781000, reward: -18.600000, loss: 0.002617, episode:  862\n",
      "frames: 782000, reward: -18.800000, loss: 0.002874, episode:  863\n",
      "frames: 783000, reward: -18.900000, loss: 0.002402, episode:  864\n",
      "frames: 784000, reward: -18.900000, loss: 0.006464, episode:  865\n",
      "frames: 785000, reward: -18.700000, loss: 0.015892, episode:  866\n",
      "frames: 786000, reward: -18.700000, loss: 0.003343, episode:  866\n",
      "frames: 787000, reward: -18.500000, loss: 0.002066, episode:  867\n",
      "frames: 788000, reward: -18.800000, loss: 0.017957, episode:  868\n",
      "frames: 789000, reward: -18.800000, loss: 0.000949, episode:  869\n",
      "frames: 790000, reward: -18.900000, loss: 0.001443, episode:  870\n",
      "frames: 791000, reward: -19.100000, loss: 0.003944, episode:  871\n",
      "frames: 792000, reward: -18.800000, loss: 0.003229, episode:  872\n",
      "frames: 793000, reward: -18.600000, loss: 0.001809, episode:  873\n",
      "frames: 794000, reward: -18.600000, loss: 0.004343, episode:  874\n",
      "frames: 795000, reward: -18.700000, loss: 0.001206, episode:  875\n",
      "frames: 796000, reward: -18.800000, loss: 0.002901, episode:  876\n",
      "frames: 797000, reward: -19.000000, loss: 0.014925, episode:  877\n",
      "frames: 798000, reward: -18.800000, loss: 0.001004, episode:  878\n",
      "frames: 799000, reward: -19.100000, loss: 0.003945, episode:  879\n",
      "frames: 800000, reward: -19.200000, loss: 0.001007, episode:  880\n",
      "frames: 801000, reward: -19.000000, loss: 0.001571, episode:  881\n",
      "frames: 802000, reward: -19.000000, loss: 0.002008, episode:  881\n",
      "frames: 803000, reward: -19.400000, loss: 0.001379, episode:  883\n",
      "frames: 804000, reward: -19.500000, loss: 0.002064, episode:  884\n",
      "frames: 805000, reward: -19.500000, loss: 0.000920, episode:  884\n",
      "frames: 806000, reward: -19.300000, loss: 0.001499, episode:  885\n",
      "frames: 807000, reward: -19.100000, loss: 0.000909, episode:  886\n",
      "frames: 808000, reward: -19.100000, loss: 0.004878, episode:  887\n",
      "frames: 809000, reward: -19.200000, loss: 0.001192, episode:  888\n",
      "frames: 810000, reward: -19.100000, loss: 0.003482, episode:  889\n",
      "frames: 811000, reward: -19.300000, loss: 0.001382, episode:  891\n",
      "frames: 812000, reward: -19.600000, loss: 0.001479, episode:  892\n",
      "frames: 813000, reward: -19.600000, loss: 0.000528, episode:  893\n",
      "frames: 814000, reward: -19.300000, loss: 0.002686, episode:  894\n",
      "frames: 815000, reward: -19.300000, loss: 0.001907, episode:  894\n",
      "frames: 816000, reward: -19.600000, loss: 0.001191, episode:  895\n",
      "frames: 817000, reward: -19.600000, loss: 0.001602, episode:  896\n",
      "frames: 818000, reward: -19.600000, loss: 0.001821, episode:  897\n",
      "frames: 819000, reward: -19.200000, loss: 0.002340, episode:  898\n",
      "frames: 820000, reward: -19.300000, loss: 0.001525, episode:  899\n",
      "frames: 821000, reward: -19.400000, loss: 0.000976, episode:  900\n",
      "frames: 822000, reward: -19.500000, loss: 0.002941, episode:  901\n",
      "frames: 823000, reward: -19.500000, loss: 0.002911, episode:  902\n",
      "frames: 824000, reward: -19.600000, loss: 0.003064, episode:  903\n",
      "frames: 825000, reward: -19.500000, loss: 0.001873, episode:  904\n",
      "frames: 826000, reward: -19.200000, loss: 0.002257, episode:  905\n",
      "frames: 827000, reward: -19.500000, loss: 0.001960, episode:  906\n",
      "frames: 828000, reward: -19.500000, loss: 0.000963, episode:  906\n",
      "frames: 829000, reward: -19.000000, loss: 0.001850, episode:  907\n",
      "frames: 830000, reward: -19.600000, loss: 0.003222, episode:  908\n",
      "frames: 831000, reward: -19.600000, loss: 0.002716, episode:  909\n",
      "frames: 832000, reward: -19.600000, loss: 0.001245, episode:  910\n",
      "frames: 833000, reward: -19.300000, loss: 0.002232, episode:  911\n",
      "frames: 834000, reward: -19.100000, loss: 0.001457, episode:  912\n",
      "frames: 835000, reward: -19.000000, loss: 0.001800, episode:  913\n",
      "frames: 836000, reward: -19.300000, loss: 0.002581, episode:  914\n",
      "frames: 837000, reward: -19.600000, loss: 0.016058, episode:  915\n",
      "frames: 838000, reward: -19.600000, loss: 0.001313, episode:  916\n",
      "frames: 839000, reward: -20.200000, loss: 0.007738, episode:  917\n",
      "frames: 840000, reward: -20.200000, loss: 0.000659, episode:  918\n",
      "frames: 841000, reward: -19.700000, loss: 0.018152, episode:  919\n",
      "frames: 842000, reward: -19.500000, loss: 0.000798, episode:  920\n",
      "frames: 843000, reward: -19.800000, loss: 0.002440, episode:  921\n",
      "frames: 844000, reward: -20.000000, loss: 0.002633, episode:  922\n",
      "frames: 845000, reward: -20.100000, loss: 0.000989, episode:  923\n",
      "frames: 846000, reward: -20.200000, loss: 0.003109, episode:  924\n",
      "frames: 847000, reward: -20.300000, loss: 0.003438, episode:  925\n",
      "frames: 848000, reward: -19.900000, loss: 0.001753, episode:  926\n",
      "frames: 849000, reward: -19.900000, loss: 0.001376, episode:  927\n",
      "frames: 850000, reward: -19.800000, loss: 0.001390, episode:  928\n",
      "frames: 851000, reward: -20.300000, loss: 0.001785, episode:  929\n",
      "frames: 852000, reward: -20.000000, loss: 0.003737, episode:  930\n",
      "frames: 853000, reward: -20.000000, loss: 0.001500, episode:  930\n",
      "frames: 854000, reward: -19.300000, loss: 0.001728, episode:  932\n",
      "frames: 855000, reward: -19.200000, loss: 0.007288, episode:  933\n",
      "frames: 856000, reward: -19.100000, loss: 0.001160, episode:  934\n",
      "frames: 857000, reward: -19.000000, loss: 0.003292, episode:  935\n",
      "frames: 858000, reward: -19.000000, loss: 0.001702, episode:  935\n",
      "frames: 859000, reward: -18.700000, loss: 0.002455, episode:  936\n",
      "frames: 860000, reward: -18.700000, loss: 0.002326, episode:  938\n",
      "frames: 861000, reward: -18.600000, loss: 0.001506, episode:  939\n",
      "frames: 862000, reward: -18.600000, loss: 0.002212, episode:  939\n",
      "frames: 863000, reward: -19.000000, loss: 0.001309, episode:  940\n",
      "frames: 864000, reward: -19.400000, loss: 0.004154, episode:  941\n",
      "frames: 865000, reward: -19.200000, loss: 0.002358, episode:  942\n",
      "frames: 866000, reward: -18.700000, loss: 0.002121, episode:  943\n",
      "frames: 867000, reward: -18.800000, loss: 0.004995, episode:  944\n",
      "frames: 868000, reward: -18.700000, loss: 0.001290, episode:  945\n",
      "frames: 869000, reward: -19.300000, loss: 0.002540, episode:  946\n",
      "frames: 870000, reward: -19.300000, loss: 0.001285, episode:  947\n",
      "frames: 871000, reward: -19.200000, loss: 0.001814, episode:  948\n",
      "frames: 872000, reward: -19.100000, loss: 0.002689, episode:  949\n",
      "frames: 873000, reward: -19.200000, loss: 0.001914, episode:  950\n",
      "frames: 874000, reward: -19.300000, loss: 0.007264, episode:  951\n",
      "frames: 875000, reward: -19.500000, loss: 0.003662, episode:  952\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "frames: 876000, reward: -19.500000, loss: 0.001369, episode:  952\n",
      "frames: 877000, reward: -19.800000, loss: 0.002290, episode:  953\n",
      "frames: 878000, reward: -19.600000, loss: 0.002291, episode:  954\n",
      "frames: 879000, reward: -19.300000, loss: 0.001196, episode:  955\n",
      "frames: 880000, reward: -19.300000, loss: 0.032091, episode:  955\n",
      "frames: 881000, reward: -19.000000, loss: 0.002044, episode:  957\n",
      "frames: 882000, reward: -19.200000, loss: 0.005322, episode:  958\n",
      "frames: 883000, reward: -19.400000, loss: 0.002080, episode:  959\n",
      "frames: 884000, reward: -19.000000, loss: 0.003861, episode:  960\n",
      "frames: 885000, reward: -19.100000, loss: 0.004491, episode:  961\n",
      "frames: 886000, reward: -19.200000, loss: 0.002191, episode:  962\n",
      "frames: 887000, reward: -19.500000, loss: 0.012612, episode:  963\n",
      "frames: 888000, reward: -19.600000, loss: 0.002157, episode:  964\n",
      "frames: 889000, reward: -19.800000, loss: 0.002484, episode:  965\n",
      "frames: 890000, reward: -19.600000, loss: 0.001174, episode:  966\n",
      "frames: 891000, reward: -19.600000, loss: 0.002224, episode:  967\n",
      "frames: 892000, reward: -19.600000, loss: 0.003312, episode:  967\n",
      "frames: 893000, reward: -19.300000, loss: 0.004793, episode:  969\n",
      "frames: 894000, reward: -19.500000, loss: 0.001595, episode:  970\n",
      "frames: 895000, reward: -19.500000, loss: 0.001542, episode:  970\n",
      "frames: 896000, reward: -19.500000, loss: 0.002683, episode:  971\n",
      "frames: 897000, reward: -19.400000, loss: 0.002298, episode:  973\n",
      "frames: 898000, reward: -19.400000, loss: 0.004673, episode:  973\n",
      "frames: 899000, reward: -19.600000, loss: 0.001975, episode:  975\n",
      "frames: 900000, reward: -20.100000, loss: 0.001192, episode:  976\n",
      "frames: 901000, reward: -20.200000, loss: 0.002509, episode:  977\n",
      "frames: 902000, reward: -20.200000, loss: 0.002330, episode:  977\n",
      "frames: 903000, reward: -20.100000, loss: 0.003435, episode:  979\n",
      "frames: 904000, reward: -20.100000, loss: 0.001873, episode:  979\n",
      "frames: 905000, reward: -20.200000, loss: 0.002647, episode:  980\n",
      "frames: 906000, reward: -20.100000, loss: 0.001776, episode:  981\n",
      "frames: 907000, reward: -20.000000, loss: 0.003596, episode:  982\n",
      "frames: 908000, reward: -19.700000, loss: 0.003692, episode:  983\n",
      "frames: 909000, reward: -19.800000, loss: 0.001534, episode:  984\n",
      "frames: 910000, reward: -19.800000, loss: 0.002526, episode:  984\n",
      "frames: 911000, reward: -19.300000, loss: 0.003487, episode:  985\n",
      "frames: 912000, reward: -19.200000, loss: 0.001626, episode:  986\n",
      "frames: 913000, reward: -19.000000, loss: 0.003125, episode:  987\n",
      "frames: 914000, reward: -19.200000, loss: 0.005940, episode:  988\n",
      "frames: 915000, reward: -18.900000, loss: 0.001320, episode:  989\n",
      "frames: 916000, reward: -18.700000, loss: 0.001851, episode:  990\n",
      "frames: 917000, reward: -18.700000, loss: 0.003490, episode:  990\n",
      "frames: 918000, reward: -18.500000, loss: 0.001773, episode:  991\n",
      "frames: 919000, reward: -18.300000, loss: 0.003568, episode:  992\n",
      "frames: 920000, reward: -18.400000, loss: 0.001229, episode:  993\n",
      "frames: 921000, reward: -18.300000, loss: 0.002172, episode:  994\n",
      "frames: 922000, reward: -18.800000, loss: 0.003555, episode:  995\n",
      "frames: 923000, reward: -18.900000, loss: 0.003417, episode:  996\n",
      "frames: 924000, reward: -19.000000, loss: 0.002382, episode:  997\n",
      "frames: 925000, reward: -19.000000, loss: 0.001115, episode:  997\n",
      "frames: 926000, reward: -18.800000, loss: 0.013655, episode:  998\n",
      "frames: 927000, reward: -18.600000, loss: 0.001115, episode:  999\n",
      "frames: 928000, reward: -18.600000, loss: 0.000917, episode:  999\n",
      "frames: 929000, reward: -19.100000, loss: 0.003772, episode: 1001\n",
      "frames: 930000, reward: -19.500000, loss: 0.015592, episode: 1002\n",
      "frames: 931000, reward: -19.500000, loss: 0.002168, episode: 1002\n",
      "frames: 932000, reward: -19.600000, loss: 0.002401, episode: 1003\n",
      "frames: 933000, reward: -19.300000, loss: 0.005215, episode: 1004\n",
      "frames: 934000, reward: -19.000000, loss: 0.006579, episode: 1005\n",
      "frames: 935000, reward: -19.000000, loss: 0.013224, episode: 1005\n",
      "frames: 936000, reward: -18.900000, loss: 0.002988, episode: 1006\n",
      "frames: 937000, reward: -18.900000, loss: 0.007860, episode: 1007\n",
      "frames: 938000, reward: -19.200000, loss: 0.004561, episode: 1008\n",
      "frames: 939000, reward: -19.500000, loss: 0.002192, episode: 1009\n",
      "frames: 940000, reward: -19.400000, loss: 0.004736, episode: 1010\n",
      "frames: 941000, reward: -19.300000, loss: 0.009930, episode: 1011\n",
      "frames: 942000, reward: -19.200000, loss: 0.003086, episode: 1012\n",
      "frames: 943000, reward: -19.200000, loss: 0.002621, episode: 1012\n",
      "frames: 944000, reward: -19.000000, loss: 0.004319, episode: 1013\n",
      "frames: 945000, reward: -19.400000, loss: 0.004611, episode: 1014\n",
      "frames: 946000, reward: -19.700000, loss: 0.003228, episode: 1015\n",
      "frames: 947000, reward: -19.700000, loss: 0.003370, episode: 1016\n",
      "frames: 948000, reward: -19.600000, loss: 0.003388, episode: 1017\n",
      "frames: 949000, reward: -19.400000, loss: 0.001655, episode: 1018\n",
      "frames: 950000, reward: -19.400000, loss: 0.003435, episode: 1019\n",
      "frames: 951000, reward: -19.400000, loss: 0.002157, episode: 1019\n",
      "frames: 952000, reward: -19.100000, loss: 0.002755, episode: 1020\n",
      "frames: 953000, reward: -19.100000, loss: 0.002228, episode: 1020\n",
      "frames: 954000, reward: -18.700000, loss: 0.001541, episode: 1021\n",
      "frames: 955000, reward: -18.600000, loss: 0.003989, episode: 1022\n",
      "frames: 956000, reward: -18.600000, loss: 0.003652, episode: 1023\n",
      "frames: 957000, reward: -18.600000, loss: 0.001187, episode: 1023\n",
      "frames: 958000, reward: -18.500000, loss: 0.005863, episode: 1024\n",
      "frames: 959000, reward: -17.900000, loss: 0.003466, episode: 1025\n",
      "frames: 960000, reward: -17.900000, loss: 0.004678, episode: 1025\n",
      "frames: 961000, reward: -17.900000, loss: 0.015717, episode: 1026\n",
      "frames: 962000, reward: -17.600000, loss: 0.004728, episode: 1027\n",
      "frames: 963000, reward: -17.800000, loss: 0.005770, episode: 1028\n",
      "frames: 964000, reward: -17.800000, loss: 0.011878, episode: 1028\n",
      "frames: 965000, reward: -17.600000, loss: 0.002555, episode: 1029\n",
      "frames: 966000, reward: -17.600000, loss: 0.006847, episode: 1030\n",
      "frames: 967000, reward: -18.000000, loss: 0.004219, episode: 1031\n",
      "frames: 968000, reward: -18.200000, loss: 0.001146, episode: 1032\n",
      "frames: 969000, reward: -18.200000, loss: 0.001191, episode: 1032\n",
      "frames: 970000, reward: -18.200000, loss: 0.005121, episode: 1033\n",
      "frames: 971000, reward: -18.300000, loss: 0.005484, episode: 1034\n",
      "frames: 972000, reward: -18.700000, loss: 0.001628, episode: 1035\n",
      "frames: 973000, reward: -18.700000, loss: 0.002334, episode: 1036\n",
      "frames: 974000, reward: -19.100000, loss: 0.003372, episode: 1037\n",
      "frames: 975000, reward: -19.100000, loss: 0.003104, episode: 1037\n",
      "frames: 976000, reward: -18.800000, loss: 0.001939, episode: 1038\n",
      "frames: 977000, reward: -19.100000, loss: 0.002408, episode: 1039\n",
      "frames: 978000, reward: -19.200000, loss: 0.001792, episode: 1040\n",
      "frames: 979000, reward: -19.200000, loss: 0.003971, episode: 1040\n",
      "frames: 980000, reward: -18.800000, loss: 0.001807, episode: 1041\n",
      "frames: 981000, reward: -18.700000, loss: 0.004505, episode: 1042\n",
      "frames: 982000, reward: -18.800000, loss: 0.005572, episode: 1043\n",
      "frames: 983000, reward: -18.700000, loss: 0.001749, episode: 1044\n",
      "frames: 984000, reward: -18.700000, loss: 0.003776, episode: 1044\n",
      "frames: 985000, reward: -18.500000, loss: 0.004706, episode: 1045\n",
      "frames: 986000, reward: -18.400000, loss: 0.003012, episode: 1046\n",
      "frames: 987000, reward: -18.200000, loss: 0.001975, episode: 1047\n",
      "frames: 988000, reward: -18.200000, loss: 0.004002, episode: 1047\n",
      "frames: 989000, reward: -18.400000, loss: 0.005285, episode: 1048\n",
      "frames: 990000, reward: -18.300000, loss: 0.001071, episode: 1049\n",
      "frames: 991000, reward: -18.300000, loss: 0.001680, episode: 1049\n",
      "frames: 992000, reward: -18.300000, loss: 0.002154, episode: 1050\n",
      "frames: 993000, reward: -18.300000, loss: 0.004030, episode: 1050\n",
      "frames: 994000, reward: -18.200000, loss: 0.004278, episode: 1051\n",
      "frames: 995000, reward: -18.000000, loss: 0.007095, episode: 1052\n",
      "frames: 996000, reward: -18.000000, loss: 0.001052, episode: 1052\n",
      "frames: 997000, reward: -17.800000, loss: 0.004365, episode: 1053\n",
      "frames: 998000, reward: -17.700000, loss: 0.004969, episode: 1054\n",
      "frames: 999000, reward: -18.000000, loss: 0.007390, episode: 1055\n"
     ]
    }
   ],
   "source": [
    "# if __name__ == '__main__':\n",
    "    \n",
    "# Training DQN in PongNoFrameskip-v4 \n",
    "env = make_atari('PongNoFrameskip-v4')\n",
    "env = wrap_deepmind(env, scale = False, frame_stack=True)\n",
    "\n",
    "gamma = 0.99\n",
    "frames = 1000000\n",
    "USE_CUDA = True\n",
    "learning_rate = 1e-4\n",
    "max_buff = 100000\n",
    "update_tar_interval = 5000\n",
    "batch_size = 32\n",
    "print_interval = 1000\n",
    "log_interval = 1000\n",
    "learning_start = 10000\n",
    "update_current_step =4 # update current model every 4 steps\n",
    "win_reward = 18     # Pong-v4\n",
    "win_break = True\n",
    "\n",
    "action_space = env.action_space\n",
    "action_dim = env.action_space.n\n",
    "state_dim = env.observation_space.shape[0]\n",
    "state_channel = env.observation_space.shape[2]\n",
    "agent = Noisy_DQNAgent(in_channels = state_channel, action_space= action_space, USE_CUDA = USE_CUDA, lr = learning_rate, memory_size = max_buff)\n",
    "\n",
    "frame = env.reset()\n",
    "\n",
    "episode_reward = 0\n",
    "all_rewards = []\n",
    "losses = []\n",
    "episode_num = 0\n",
    "is_win = False\n",
    "# tensorboard\n",
    "summary_writer = SummaryWriter(log_dir = \"6_Noisy_DQN_new2\", comment= \"good_makeatari\")\n",
    "\n",
    "\n",
    "for i in range(frames):\n",
    "    state_tensor = agent.observe(frame)\n",
    "    action = agent.act(state_tensor)\n",
    "    \n",
    "    next_frame, reward, is_done, _ = env.step(action)\n",
    "    \n",
    "    episode_reward += reward\n",
    "    agent.memory_buffer.push(frame, action, reward, next_frame, is_done)\n",
    "    frame = next_frame\n",
    "    \n",
    "    loss = 0\n",
    "    if agent.memory_buffer.size() >= learning_start:\n",
    "        if i % update_current_step == 0:\n",
    "            loss = agent.learn_from_experience(batch_size)\n",
    "            losses.append(loss)\n",
    "\n",
    "    if i % print_interval == 0:\n",
    "        print(\"frames: %5d, reward: %5f, loss: %4f, episode: %4d\" % (i, np.mean(all_rewards[-10:]), loss, episode_num))\n",
    "        summary_writer.add_scalar(\"Temporal Difference Loss\", loss, i)\n",
    "        summary_writer.add_scalar(\"Mean Reward\", np.mean(all_rewards[-10:]), i)\n",
    "        \n",
    "    if i % update_tar_interval == 0:\n",
    "        agent.DQN_target.load_state_dict(agent.DQN.state_dict())\n",
    "    \n",
    "    if is_done:\n",
    "        \n",
    "        frame = env.reset()\n",
    "        \n",
    "        all_rewards.append(episode_reward)\n",
    "        episode_reward = 0\n",
    "        episode_num += 1\n",
    "        avg_reward = float(np.mean(all_rewards[-100:]))\n",
    "\n",
    "summary_writer.close()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def plot_training(frame_idx, rewards, losses):\n",
    "    clear_output(True)\n",
    "    plt.figure(figsize=(20,5))\n",
    "    plt.subplot(131)\n",
    "    plt.title('frame %s. reward: %s' % (frame_idx, np.mean(rewards[-10:])))\n",
    "    plt.plot(rewards)\n",
    "    plt.subplot(132)\n",
    "    plt.title('loss')\n",
    "    plt.plot(losses)\n",
    "    plt.show()\n",
    "\n",
    "plot_training(i, all_rewards, losses)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
