{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import gym, random, pickle, os.path, math, glob\n",
    "\n",
    "import numpy as np\n",
    "import pandas as pd\n",
    "import matplotlib.pyplot as plt\n",
    "%matplotlib inline\n",
    "\n",
    "import torch\n",
    "import torch.optim as optim\n",
    "import torch.nn as nn\n",
    "import torch.nn.functional as F\n",
    "import torch.autograd as autograd\n",
    "import pdb\n",
    "\n",
    "from atari_wrappers import make_atari, wrap_deepmind,LazyFrames\n",
    "from IPython.display import clear_output\n",
    "from tensorboardX import SummaryWriter\n",
    "\n",
    "USE_CUDA = torch.cuda.is_available()\n",
    "dtype = torch.cuda.FloatTensor if torch.cuda.is_available() else torch.FloatTensor\n",
    "Variable = lambda *args, **kwargs: autograd.Variable(*args, **kwargs).cuda() if USE_CUDA else autograd.Variable(*args, **kwargs)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "<matplotlib.image.AxesImage at 0x20eb2de2d48>"
      ]
     },
     "execution_count": 2,
     "metadata": {},
     "output_type": "execute_result"
    },
    {
     "data": {
      "image/png": "iVBORw0KGgoAAAANSUhEUgAAAPsAAAD7CAYAAACscuKmAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjMsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy+AADFEAAAPB0lEQVR4nO3da4xc5X3H8e/fu2svNhdfAsS1EYYIEUgibGpxKVWVQEgoSaFqSQWKqqhC4k3aQhMpgfZFFKlSiVQl5EUU1YKkqKLcHGiQFUEtAy8qVTbmEm62gw0UDAY7Dg7EYOO1/31xju2tO4vP7lx2xs/3I41mzpkZn+f46DfPM2fOPv/ITCQd+2ZMdwMk9YZhlwph2KVCGHapEIZdKoRhlwrRVtgj4oqI2BQRmyPi5k41SlLnxVR/Z4+IIeBXwOXAVuAJ4LrMfLFzzZPUKcNtvPcCYHNmvgwQEfcAVwMThn1mzMpR5hz9X549evjxjGijiZ2TdTvGjmuvPcN7qg/X2O/FTN1W4jHbs2cXH+7b3XKH2wn7IuD1cctbgQs/6g2jzOHCuOyo/3Cc+6lDj/cfP3OKzeus/bOGANhxXnvtWfDCPgBGdo+13SZ9tIPH7Nefae+Yzd8wOMfsifU/mvC5dsLe6tPj/330RcQNwA0Ao8xuY3OS2tFO2LcCp41bXgy8eeSLMnMFsALgxBMW5YHly9rY5PQ5MFR9tu0+fX9b/87cLdU50ZHdbTdJR3HwmP1uSXvH7KRXjo1j1s7Z+CeAsyLijIiYCVwLPNSZZknqtCn37Jk5FhF/DTwCDAE/ycwXOtayPjPyfvV97RP3N3/PK38yC4ADo/1/Yqc0n1j5Ycv1r36pOjm8/7gDvWxOT7QzjCczfwH8okNtkdRFXkEnFaKtnr0kB4aqz8XfntH8Z5wccviu/mHPLhXCnr2h/bOqz8XfLDv2TtyoDPbsUiEMu1QIh/EdsOjR5q+duav/r6/WscmeXSqEYZcK4TBeRdqzoPX1Ejnj2L02wp5dKoQ9ewe8cWnz1y56tPovH93Z+g8x1BtvfG6iZ+zZJQ04wy4VwmF8Q0N7q8tk5z/d3n/Z8O59nWiOGjh0zJ7xmIE9u1SMnvbse+fNYMs1/TFb7NS194cwv1k6VD8a+sjXqZPKOWZ7t0zcfx+1Z4+In0TE9oh4fty6+RGxOiJequ/ndaitkrqkyTD+X4Erjlh3M7AmM88C1tTLkvpYo/JPEbEEWJWZn66XNwGfzcxtEbEQeDwzzz7av7P8vNFc98hpR3uZpCm64Iuvs/6Xe1pWhJnqCbpTM3MbQH1/ylQbJ6k3un42PiJuiIj1EbF+x872JuuXNHVTDfvb9fCd+n77RC/MzBWZuTwzl5+8oP/PZkrHqqmG/SHga/XjrwE/70xzJHVLk5/e7gb+Gzg7IrZGxPXArcDlEfESVX32W7vbTEntOupFNZl53QRPHb32sqS+4eWyUiF6ernsxg/mcsmzf9bLTUpF2fjBnRM+Z88uFaKnPfuMbUPM/qeTerlJqSgztk3887Y9u1QIwy4VwrBLhTDsUiEMu1QIwy4VwrBLhTDsUiEMu1QIwy4VwrBLhTDsUiEMu1SIJtNSnRYRj0XEhoh4ISJurNdbFUYaIE169jHgm5l5DnAR8PWIOBerwkgD5ahhz8xtmflU/fg9YAOwCLgaODgtxp3An3arkZLaN6nv7HUZqGXAWhpWhRlfJOLDfbvba62kKWsc9og4HvgZcFNmvtv0feOLRMwcmTOVNkrqgEZhj4gRqqDflZkP1KsbV4WRNP2anI0P4A5gQ2Z+f9xTVoWRBkiTCScvAf4SeC4inqnX/T1VFZj76goxrwFf6U4TJXVCk4ow/wW0rPeMVWGkgeEVdFIhDLtUCMMuFcKwS4Uw7FIhDLtUCMMuFcKwS4Uw7FIhDLtUCMMuFcKwS4Uw7FIhDLtUCMMuFcKwS4Uw7FIhmsxBNxoR6yLil3VFmO/W68+IiLV1RZh7I2Jm95sraaqa9Ox7gUsz8zxgKXBFRFwEfA/4QV0R5h3g+u41U1K7mlSEycz8Xb04Ut8SuBRYWa+3IozU55rOGz9Uzyy7HVgNbAF2ZeZY/ZKtVCWhWr3XijBSH2gU9szcn5lLgcXABcA5rV42wXutCCP1gUmdjc/MXcDjVNVc50bEwamoFwNvdrZpkjqpydn4kyNibv34OODzVJVcHwOuqV9mRRipzzWpCLMQuDMihqg+HO7LzFUR8SJwT0T8I/A0VYkoSX2qSUWYZ6nKNB+5/mWq7++SBoBX0EmFMOxSIQy7VAjDLhXCsEuFMOxSIQy7VAjDLhXCsEuFMOxSIQy7VAjDLhXCsEuFMOxSIQy7VAjDLhXCsEuFaBz2ejrppyNiVb1sRRhpgEymZ7+RaqLJg6wIIw2QpkUiFgNfAm6vlwMrwkgDpWnPfhvwLeBAvbwAK8JIA6XJvPFfBrZn5pPjV7d4qRVhpD7WZN74S4CrIuJKYBQ4kaqnnxsRw3XvbkUYqc81qeJ6S2YuzswlwLXAo5n5VawIIw2Udn5n/zbwjYjYTPUd3oowUh9rMow/JDMfpyrsaEUYacB4BZ1UCMMuFcKwS4Uw7FIhDLtUCMMuFcKwS4Uw7FIhDLtUCMMuFcKwS4Uw7FIhDLtUCMMuFcKwS4Uw7FIhDLtUiEYz1UTEq8B7wH5gLDOXR8R84F5gCfAq8BeZ+U53mimpXZPp2T+XmUszc3m9fDOwpq4Is6ZeltSn2hnGX01VCQasCCP1vaZhT+A/I+LJiLihXndqZm4DqO9PafVGK8JI/aHp7LKXZOabEXEKsDoiNjbdQGauAFYAnHjCopZVYyR1X6OePTPfrO+3Aw9STSH9dkQsBKjvt3erkZLa16TW25yIOOHgY+ALwPPAQ1SVYMCKMFLfazKMPxV4sKrSzDDw75n5cEQ8AdwXEdcDrwFf6V4zJbXrqGGvK7+c12L9TuCybjRKUud5BZ1UCMMuFcKwS4Uw7FIhDLtUCMMuFcKwS4Uw7FIhDLtUCMMuFcKwS4Uw7FIhDLtUCMMuFaLptFR9a9+cahf2jx7+3Br64MChxyPvj/W8TVI/smeXCjHwPfvOT40AsPv0/YfWnbRx5NDjjz1nzy5Bw549IuZGxMqI2BgRGyLi4oiYHxGrI+Kl+n5etxsraeqaDuN/CDycmZ+kmqJqA1aEkQZKk9llTwT+CLgDIDM/zMxdWBFGGihNevYzgR3ATyPi6Yi4vZ5S2oow0gBpEvZh4Hzgx5m5DNjNJIbsmbkiM5dn5vKZI3Om2ExJ7WoS9q3A1sxcWy+vpAq/FWGkAXLUsGfmW8DrEXF2veoy4EWsCCMNlKa/s/8NcFdEzAReBv6K6oPCijDSgGgU9sx8Blje4ikrwkgDwstlpUIYdqkQhl0qhGGXCmHYpUIYdqkQhl0qhGGXCjHwM9V8fN0H1YN109sOqd/Zs0uFMOxSIQy7VAjDLhVi4E/QSf3srQuPO/R477LD07INbapmbVr82Ac9a4s9u1QIwy4VoslU0mdHxDPjbu9GxE0WiZCOLsYO38b2Dh+6xT6Ifb1tS5M56DZl5tLMXAr8PvA+8CAWiZAGymSH8ZcBWzLzf7BIhDRQJns2/lrg7vrx/ykSEREti0RIJRvaO27hvcNxG97T+7Y07tnrmWWvAu6fzAasCCP1h8kM4/8YeCoz366XGxWJsCKM1B8mE/brODyEB4tESAOlaX322cDlwAPjVt8KXB4RL9XP3dr55knqlKZFIt4HFhyxbicWiZAGhlfQSYUw7FIhDLtUCMMuFcK/Z5e6aP+scQsnjB16ODY61PO22LNLhTDsUiEcxktdlOMSNjzr8DA+R2a1eHV32bNLhbBnl7ro42vHTSi5dnzf2ruJJg+yZ5cKYdilQvR0GL933gy2XDOzl5uUirJ3y8T9tz27VIie9uyfmbeDdX/+L73cpFSUC1bsmPA5e3apEIZdKkTTaan+LiJeiIjnI+LuiBiNiDMiYm1dEebeevZZSX2qSfmnRcDfAssz89PAENX88d8DflBXhHkHuL6bDZXUnqbD+GHguIgYBmYD24BLgZX181aEkfpck1pvbwD/DLxGFfLfAk8CuzLz4JX9W4FF3WqkpPY1GcbPo6rrdgbwe8AcqoIRR8oJ3n+oIsyOnfvbaaukNjQZxn8eeCUzd2TmPqq54/8AmFsP6wEWA2+2evP4ijAnL+j97BySKk3C/hpwUUTMjoigmiv+ReAx4Jr6NVaEkfpck+/sa6lOxD0FPFe/ZwXwbeAbEbGZqoDEHV1sp6Q2Na0I8x3gO0esfhm4oOMtktQVXkEnFcKwS4Uw7FIhDLtUiMhseS1MdzYWsQPYDfy6Zxvtvo/h/vSrY2lfoNn+nJ6ZJ7d6oqdhB4iI9Zm5vKcb7SL3p38dS/sC7e+Pw3ipEIZdKsR0hH3FNGyzm9yf/nUs7Qu0uT89/84uaXo4jJcK0dOwR8QVEbEpIjZHxM293Ha7IuK0iHgsIjbU8/HdWK+fHxGr67n4Vtd//z8wImIoIp6OiFX18sDOLRgRcyNiZURsrI/TxYN8fDo992PPwh4RQ8CPqCa+OBe4LiLO7dX2O2AM+GZmngNcBHy9bv/NwJp6Lr419fIguRHYMG55kOcW/CHwcGZ+EjiPar8G8vh0Ze7HzOzJDbgYeGTc8i3ALb3afhf25+fA5cAmYGG9biGwabrbNol9WEwVgEuBVUBQXbQx3OqY9fMNOBF4hfo81Lj1A3l8qKZ5ex2YT/XXqauAL7ZzfHo5jD/Y+IMGdt66iFgCLAPWAqdm5jaA+v6U6WvZpN0GfAs4UC8vYHDnFjwT2AH8tP5acntEzGFAj092Ye7HXoY9WqwbuJ8CIuJ44GfATZn57nS3Z6oi4svA9sx8cvzqFi8dlGM0DJwP/Dgzl1Fdlj0QQ/ZW2p37sZVehn0rcNq45QnnretXETFCFfS7MvOBevXbEbGwfn4hsH262jdJlwBXRcSrwD1UQ/nbaDi3YB/aCmzNamYlqGZXOp/BPT5tzf3YSi/D/gRwVn02cSbVyYaHerj9ttTz790BbMjM74976iGqOfhggObiy8xbMnNxZi6hOhaPZuZXGdC5BTPzLeD1iDi7XnVwrsSBPD50Y+7HHp90uBL4FbAF+IfpPgkyybb/IdWQ6Vngmfp2JdX33DXAS/X9/Olu6xT27bPAqvrxmcA6YDNwPzBruts3if1YCqyvj9F/APMG+fgA3wU2As8D/wbMauf4eAWdVAivoJMKYdilQhh2qRCGXSqEYZcKYdilQhh2qRCGXSrE/wJNz1z3y24G/AAAAABJRU5ErkJggg==\n",
      "text/plain": [
       "<Figure size 432x288 with 1 Axes>"
      ]
     },
     "metadata": {
      "needs_background": "light"
     },
     "output_type": "display_data"
    }
   ],
   "source": [
    "# Create and wrap the environment\n",
    "env = make_atari('PongNoFrameskip-v4') # only use in no frameskip environment\n",
    "env = wrap_deepmind(env, scale = False, frame_stack=True )\n",
    "n_actions = env.action_space.n\n",
    "state_dim = env.observation_space.shape\n",
    "\n",
    "# env.render()\n",
    "test = env.reset()\n",
    "for i in range(100):\n",
    "    test = env.step(env.action_space.sample())[0]\n",
    "\n",
    "plt.imshow(test._force()[...,0])\n",
    "\n",
    "#plt.imshow(env.render(\"rgb_array\"))\n",
    "# env.close()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "class Dueling_DQN(nn.Module):\n",
    "    def __init__(self, input_shape, num_outputs):\n",
    "        super(Dueling_DQN, self).__init__()\n",
    "        \n",
    "        self.input_shape = input_shape\n",
    "        self.num_actions = num_outputs\n",
    "        \n",
    "        self.features = nn.Sequential(\n",
    "            nn.Conv2d(input_shape[0], 32, kernel_size=8, stride=4),\n",
    "            nn.ReLU(),\n",
    "            nn.Conv2d(32, 64, kernel_size=4, stride=2),\n",
    "            nn.ReLU(),\n",
    "            nn.Conv2d(64, 64, kernel_size=3, stride=1),\n",
    "            nn.ReLU()\n",
    "        )\n",
    "        \n",
    "        self.advantage = nn.Sequential(\n",
    "            nn.Linear(self.feature_size(), 512),\n",
    "            nn.ReLU(),\n",
    "            nn.Linear(512, num_outputs)\n",
    "        )\n",
    "        \n",
    "        self.value = nn.Sequential(\n",
    "            nn.Linear(self.feature_size(), 512),\n",
    "            nn.ReLU(),\n",
    "            nn.Linear(512, 1)\n",
    "        )\n",
    "        \n",
    "    def forward(self, x):\n",
    "        x = self.features(x)\n",
    "        x = x.view(x.size(0), -1)\n",
    "        advantage = self.advantage(x)\n",
    "        value     = self.value(x)\n",
    "        return value + advantage  - advantage.mean()\n",
    "    \n",
    "    def feature_size(self):\n",
    "        return self.features(autograd.Variable(torch.zeros(1, *self.input_shape))).view(1, -1).size(1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "class Memory_Buffer(object):\n",
    "    def __init__(self, memory_size=1000):\n",
    "        self.buffer = []\n",
    "        self.memory_size = memory_size\n",
    "        self.next_idx = 0\n",
    "        \n",
    "    def push(self, state, action, reward, next_state, done):\n",
    "        data = (state, action, reward, next_state, done)\n",
    "        if len(self.buffer) <= self.memory_size: # buffer not full\n",
    "            self.buffer.append(data)\n",
    "        else: # buffer is full\n",
    "            self.buffer[self.next_idx] = data\n",
    "        self.next_idx = (self.next_idx + 1) % self.memory_size\n",
    "\n",
    "    def sample(self, batch_size):\n",
    "        states, actions, rewards, next_states, dones = [], [], [], [], []\n",
    "        for i in range(batch_size):\n",
    "            idx = random.randint(0, self.size() - 1)\n",
    "            data = self.buffer[idx]\n",
    "            state, action, reward, next_state, done= data\n",
    "            states.append(state)\n",
    "            actions.append(action)\n",
    "            rewards.append(reward)\n",
    "            next_states.append(next_state)\n",
    "            dones.append(done)\n",
    "            \n",
    "            \n",
    "        return np.concatenate(states), actions, rewards, np.concatenate(next_states), dones\n",
    "    \n",
    "    def size(self):\n",
    "        return len(self.buffer)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "class Dueling_DQNAgent: \n",
    "    def __init__(self, input_shape, action_space = [], USE_CUDA = False, memory_size = 10000, epsilon  = 1, lr = 1e-4):\n",
    "        self.epsilon = epsilon\n",
    "        self.action_space = action_space\n",
    "        self.memory_buffer = Memory_Buffer(memory_size)\n",
    "        self.Dueling_DQN = Dueling_DQN(input_shape = input_shape, num_outputs = action_space.n)\n",
    "        self.Dueling_DQN_target = Dueling_DQN(input_shape = input_shape, num_outputs = action_space.n)\n",
    "        self.Dueling_DQN_target.load_state_dict(self.Dueling_DQN.state_dict())\n",
    "\n",
    "\n",
    "        self.USE_CUDA = USE_CUDA\n",
    "        if USE_CUDA:\n",
    "            self.Dueling_DQN = self.Dueling_DQN.cuda()\n",
    "            self.Dueling_DQN_target = self.Dueling_DQN_target.cuda()\n",
    "        self.optimizer = optim.RMSprop(self.Dueling_DQN.parameters(),lr=lr, eps=0.001, alpha=0.95)\n",
    "\n",
    "    def observe(self, lazyframe):\n",
    "        # from Lazy frame to tensor\n",
    "        state =  torch.from_numpy(lazyframe._force().transpose(2,0,1)[None]/255).float()\n",
    "        if self.USE_CUDA:\n",
    "            state = state.cuda()\n",
    "        return state\n",
    "\n",
    "    def value(self, state):\n",
    "        q_values = self.Dueling_DQN(state)\n",
    "        return q_values\n",
    "    \n",
    "    def act(self, state, epsilon = None):\n",
    "        \"\"\"\n",
    "        sample actions with epsilon-greedy policy\n",
    "        recap: with p = epsilon pick random action, else pick action with highest Q(s,a)\n",
    "        \"\"\"\n",
    "        if epsilon is None: epsilon = self.epsilon\n",
    "\n",
    "        q_values = self.value(state).cpu().detach().numpy()\n",
    "        if random.random()<epsilon:\n",
    "            aciton = random.randrange(self.action_space.n)\n",
    "        else:\n",
    "            aciton = q_values.argmax(1)[0]\n",
    "        return aciton\n",
    "    \n",
    "    def compute_td_loss(self, states, actions, rewards, next_states, is_done, gamma=0.99):\n",
    "        \"\"\" Compute td loss using torch operations only. Use the formula above. \"\"\"\n",
    "        actions = torch.tensor(actions).long()    # shape: [batch_size]\n",
    "        rewards = torch.tensor(rewards, dtype =torch.float)  # shape: [batch_size]\n",
    "        is_done = torch.tensor(is_done).bool()  # shape: [batch_size]\n",
    "        \n",
    "        if self.USE_CUDA:\n",
    "            actions = actions.cuda()\n",
    "            rewards = rewards.cuda()\n",
    "            is_done = is_done.cuda()\n",
    "\n",
    "        # get q-values for all actions in current states\n",
    "        predicted_qvalues = self.Dueling_DQN(states)\n",
    "\n",
    "        # select q-values for chosen actions\n",
    "        predicted_qvalues_for_actions = predicted_qvalues[\n",
    "          range(states.shape[0]), actions\n",
    "        ]\n",
    "\n",
    "        # compute q-values for all actions in next states\n",
    "        predicted_next_qvalues = self.Dueling_DQN_target(next_states) # YOUR CODE\n",
    "\n",
    "        # compute V*(next_states) using predicted next q-values\n",
    "        next_state_values =  predicted_next_qvalues.max(-1)[0] # YOUR CODE\n",
    "\n",
    "        # compute \"target q-values\" for loss - it's what's inside square parentheses in the above formula.\n",
    "        target_qvalues_for_actions = rewards + gamma *next_state_values # YOUR CODE\n",
    "\n",
    "        # at the last state we shall use simplified formula: Q(s,a) = r(s,a) since s' doesn't exist\n",
    "        target_qvalues_for_actions = torch.where(\n",
    "            is_done, rewards, target_qvalues_for_actions)\n",
    "\n",
    "        # mean squared error loss to minimize\n",
    "        #loss = torch.mean((predicted_qvalues_for_actions -\n",
    "        #                   target_qvalues_for_actions.detach()) ** 2)\n",
    "        loss = F.smooth_l1_loss(predicted_qvalues_for_actions, target_qvalues_for_actions.detach())\n",
    "\n",
    "        return loss\n",
    "    \n",
    "    def sample_from_buffer(self, batch_size):\n",
    "        states, actions, rewards, next_states, dones = [], [], [], [], []\n",
    "        for i in range(batch_size):\n",
    "            idx = random.randint(0, self.memory_buffer.size() - 1)\n",
    "            data = self.memory_buffer.buffer[idx]\n",
    "            frame, action, reward, next_frame, done= data\n",
    "            states.append(self.observe(frame))\n",
    "            actions.append(action)\n",
    "            rewards.append(reward)\n",
    "            next_states.append(self.observe(next_frame))\n",
    "            dones.append(done)\n",
    "        return torch.cat(states), actions, rewards, torch.cat(next_states), dones\n",
    "\n",
    "    def learn_from_experience(self, batch_size):\n",
    "        if self.memory_buffer.size() > batch_size:\n",
    "            states, actions, rewards, next_states, dones = self.sample_from_buffer(batch_size)\n",
    "            td_loss = self.compute_td_loss(states, actions, rewards, next_states, dones)\n",
    "            self.optimizer.zero_grad()\n",
    "            td_loss.backward()\n",
    "            for param in self.Dueling_DQN.parameters():\n",
    "                param.grad.data.clamp_(-1, 1)\n",
    "\n",
    "            self.optimizer.step()\n",
    "            return(td_loss.item())\n",
    "        else:\n",
    "            return(0)\n",
    "    "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "F:\\softwares\\ANACONDA\\lib\\site-packages\\numpy\\core\\fromnumeric.py:3335: RuntimeWarning: Mean of empty slice.\n",
      "  out=out, **kwargs)\n",
      "F:\\softwares\\ANACONDA\\lib\\site-packages\\numpy\\core\\_methods.py:161: RuntimeWarning: invalid value encountered in double_scalars\n",
      "  ret = ret.dtype.type(ret / rcount)\n",
      "WARNING:root:NaN or Inf found in input tensor.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "frames:     0, reward:   nan, loss: 0.000000, epsilon: 1.000000, episode:    0\n",
      "frames:  1000, reward: -21.000000, loss: 0.000000, epsilon: 0.967544, episode:    1\n",
      "frames:  2000, reward: -21.000000, loss: 0.000000, epsilon: 0.936152, episode:    2\n",
      "frames:  3000, reward: -20.333333, loss: 0.000000, epsilon: 0.905789, episode:    3\n",
      "frames:  4000, reward: -20.500000, loss: 0.000000, epsilon: 0.876422, episode:    4\n",
      "frames:  5000, reward: -20.600000, loss: 0.000000, epsilon: 0.848017, episode:    5\n",
      "frames:  6000, reward: -20.714286, loss: 0.000000, epsilon: 0.820543, episode:    7\n",
      "frames:  7000, reward: -20.750000, loss: 0.000000, epsilon: 0.793971, episode:    8\n",
      "frames:  8000, reward: -20.666667, loss: 0.000000, epsilon: 0.768269, episode:    9\n",
      "frames:  9000, reward: -20.700000, loss: 0.000000, epsilon: 0.743410, episode:   10\n",
      "frames: 10000, reward: -20.700000, loss: 0.000893, epsilon: 0.719366, episode:   12\n",
      "frames: 11000, reward: -20.900000, loss: 0.000301, epsilon: 0.696110, episode:   13\n",
      "frames: 12000, reward: -20.900000, loss: 0.029878, epsilon: 0.673617, episode:   14\n",
      "frames: 13000, reward: -20.900000, loss: 0.044363, epsilon: 0.651861, episode:   15\n",
      "frames: 14000, reward: -20.900000, loss: 0.000451, epsilon: 0.630818, episode:   16\n",
      "frames: 15000, reward: -20.900000, loss: 0.000343, epsilon: 0.610465, episode:   18\n",
      "frames: 16000, reward: -21.000000, loss: 0.000302, epsilon: 0.590780, episode:   19\n",
      "frames: 17000, reward: -21.000000, loss: 0.008640, epsilon: 0.571740, episode:   20\n",
      "frames: 18000, reward: -21.000000, loss: 0.000793, epsilon: 0.553324, episode:   21\n",
      "frames: 19000, reward: -20.900000, loss: 0.000271, epsilon: 0.535511, episode:   22\n",
      "frames: 20000, reward: -20.700000, loss: 0.004643, epsilon: 0.518283, episode:   23\n",
      "frames: 21000, reward: -20.600000, loss: 0.000839, epsilon: 0.501619, episode:   25\n",
      "frames: 22000, reward: -20.400000, loss: 0.000547, epsilon: 0.485502, episode:   26\n",
      "frames: 23000, reward: -20.400000, loss: 0.003109, epsilon: 0.469913, episode:   26\n",
      "frames: 24000, reward: -20.200000, loss: 0.018502, epsilon: 0.454836, episode:   28\n",
      "frames: 25000, reward: -20.100000, loss: 0.000999, epsilon: 0.440252, episode:   29\n",
      "frames: 26000, reward: -20.100000, loss: 0.001303, epsilon: 0.426147, episode:   30\n",
      "frames: 27000, reward: -20.000000, loss: 0.012767, epsilon: 0.412504, episode:   31\n",
      "frames: 28000, reward: -20.100000, loss: 0.001048, epsilon: 0.399308, episode:   32\n",
      "frames: 29000, reward: -20.200000, loss: 0.002104, epsilon: 0.386545, episode:   33\n",
      "frames: 30000, reward: -20.100000, loss: 0.002956, epsilon: 0.374201, episode:   34\n",
      "frames: 31000, reward: -20.000000, loss: 0.000560, epsilon: 0.362261, episode:   35\n",
      "frames: 32000, reward: -20.400000, loss: 0.000922, epsilon: 0.350712, episode:   37\n",
      "frames: 33000, reward: -20.400000, loss: 0.002211, epsilon: 0.339542, episode:   38\n",
      "frames: 34000, reward: -20.500000, loss: 0.025655, epsilon: 0.328739, episode:   39\n",
      "frames: 35000, reward: -20.400000, loss: 0.000978, epsilon: 0.318289, episode:   40\n",
      "frames: 36000, reward: -20.400000, loss: 0.000620, epsilon: 0.308182, episode:   41\n",
      "frames: 37000, reward: -20.400000, loss: 0.001260, epsilon: 0.298407, episode:   42\n",
      "frames: 38000, reward: -20.700000, loss: 0.002315, epsilon: 0.288952, episode:   44\n",
      "frames: 39000, reward: -20.800000, loss: 0.000566, epsilon: 0.279806, episode:   45\n",
      "frames: 40000, reward: -20.600000, loss: 0.000879, epsilon: 0.270961, episode:   46\n",
      "frames: 41000, reward: -20.400000, loss: 0.001940, epsilon: 0.262406, episode:   47\n",
      "frames: 42000, reward: -20.400000, loss: 0.008830, epsilon: 0.254131, episode:   48\n",
      "frames: 43000, reward: -20.500000, loss: 0.001013, epsilon: 0.246127, episode:   50\n",
      "frames: 44000, reward: -20.500000, loss: 0.001683, epsilon: 0.238386, episode:   51\n",
      "frames: 45000, reward: -20.500000, loss: 0.002117, epsilon: 0.230899, episode:   52\n",
      "frames: 46000, reward: -20.400000, loss: 0.020158, epsilon: 0.223657, episode:   53\n",
      "frames: 47000, reward: -20.300000, loss: 0.002367, epsilon: 0.216652, episode:   54\n",
      "frames: 48000, reward: -20.100000, loss: 0.002156, epsilon: 0.209878, episode:   55\n",
      "frames: 49000, reward: -20.400000, loss: 0.001063, epsilon: 0.203325, episode:   57\n",
      "frames: 50000, reward: -20.400000, loss: 0.001168, epsilon: 0.196987, episode:   58\n",
      "frames: 51000, reward: -20.400000, loss: 0.000951, epsilon: 0.190857, episode:   59\n",
      "frames: 52000, reward: -20.400000, loss: 0.001418, epsilon: 0.184928, episode:   60\n",
      "frames: 53000, reward: -20.500000, loss: 0.001341, epsilon: 0.179193, episode:   61\n",
      "frames: 54000, reward: -20.400000, loss: 0.002015, epsilon: 0.173646, episode:   62\n",
      "frames: 55000, reward: -20.400000, loss: 0.001428, epsilon: 0.168281, episode:   63\n",
      "frames: 56000, reward: -20.500000, loss: 0.004891, epsilon: 0.163092, episode:   64\n",
      "frames: 57000, reward: -20.600000, loss: 0.001555, epsilon: 0.158073, episode:   65\n",
      "frames: 58000, reward: -20.600000, loss: 0.003325, epsilon: 0.153219, episode:   66\n",
      "frames: 59000, reward: -20.700000, loss: 0.005448, epsilon: 0.148523, episode:   67\n",
      "frames: 60000, reward: -20.700000, loss: 0.001768, epsilon: 0.143982, episode:   68\n",
      "frames: 61000, reward: -20.600000, loss: 0.000911, epsilon: 0.139589, episode:   69\n",
      "frames: 62000, reward: -20.600000, loss: 0.004457, epsilon: 0.135341, episode:   71\n",
      "frames: 63000, reward: -20.500000, loss: 0.006333, epsilon: 0.131232, episode:   72\n",
      "frames: 64000, reward: -20.400000, loss: 0.001649, epsilon: 0.127257, episode:   73\n",
      "frames: 65000, reward: -20.400000, loss: 0.002298, epsilon: 0.123413, episode:   74\n",
      "frames: 66000, reward: -20.500000, loss: 0.003811, epsilon: 0.119695, episode:   75\n",
      "frames: 67000, reward: -20.400000, loss: 0.004679, epsilon: 0.116099, episode:   76\n",
      "frames: 68000, reward: -20.400000, loss: 0.001374, epsilon: 0.112621, episode:   77\n",
      "frames: 69000, reward: -20.300000, loss: 0.002828, epsilon: 0.109256, episode:   78\n",
      "frames: 70000, reward: -20.400000, loss: 0.000586, epsilon: 0.106002, episode:   79\n",
      "frames: 71000, reward: -20.300000, loss: 0.000706, epsilon: 0.102855, episode:   80\n",
      "frames: 72000, reward: -20.300000, loss: 0.002151, epsilon: 0.099811, episode:   81\n",
      "frames: 73000, reward: -20.400000, loss: 0.003932, epsilon: 0.096866, episode:   82\n",
      "frames: 74000, reward: -20.500000, loss: 0.002712, epsilon: 0.094019, episode:   83\n",
      "frames: 75000, reward: -20.400000, loss: 0.008293, epsilon: 0.091264, episode:   84\n",
      "frames: 76000, reward: -20.400000, loss: 0.002843, epsilon: 0.088600, episode:   86\n",
      "frames: 77000, reward: -20.300000, loss: 0.003582, epsilon: 0.086023, episode:   87\n",
      "frames: 78000, reward: -20.300000, loss: 0.001969, epsilon: 0.083531, episode:   88\n",
      "frames: 79000, reward: -20.300000, loss: 0.003767, epsilon: 0.081120, episode:   89\n",
      "frames: 80000, reward: -20.200000, loss: 0.005353, epsilon: 0.078789, episode:   90\n",
      "frames: 81000, reward: -20.100000, loss: 0.003602, epsilon: 0.076533, episode:   91\n",
      "frames: 82000, reward: -20.100000, loss: 0.000774, epsilon: 0.074352, episode:   92\n",
      "frames: 83000, reward: -20.100000, loss: 0.011500, epsilon: 0.072243, episode:   93\n",
      "frames: 84000, reward: -20.100000, loss: 0.001224, epsilon: 0.070202, episode:   94\n",
      "frames: 85000, reward: -19.900000, loss: 0.002527, epsilon: 0.068228, episode:   95\n",
      "frames: 86000, reward: -20.000000, loss: 0.002585, epsilon: 0.066319, episode:   96\n",
      "frames: 87000, reward: -20.000000, loss: 0.004876, epsilon: 0.064473, episode:   97\n",
      "frames: 88000, reward: -20.100000, loss: 0.001697, epsilon: 0.062687, episode:   98\n",
      "frames: 89000, reward: -20.000000, loss: 0.001632, epsilon: 0.060960, episode:   99\n",
      "frames: 90000, reward: -20.000000, loss: 0.002151, epsilon: 0.059289, episode:  100\n",
      "frames: 91000, reward: -20.100000, loss: 0.004755, epsilon: 0.057673, episode:  101\n",
      "frames: 92000, reward: -20.000000, loss: 0.002371, epsilon: 0.056110, episode:  102\n",
      "frames: 93000, reward: -20.100000, loss: 0.001682, epsilon: 0.054599, episode:  103\n",
      "frames: 94000, reward: -20.100000, loss: 0.001677, epsilon: 0.053137, episode:  104\n",
      "frames: 95000, reward: -20.200000, loss: 0.001778, epsilon: 0.051722, episode:  105\n",
      "frames: 96000, reward: -19.900000, loss: 0.002391, epsilon: 0.050355, episode:  106\n",
      "frames: 97000, reward: -19.900000, loss: 0.002765, epsilon: 0.049032, episode:  107\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "frames: 98000, reward: -19.800000, loss: 0.002848, epsilon: 0.047752, episode:  108\n",
      "frames: 99000, reward: -19.800000, loss: 0.002823, epsilon: 0.046514, episode:  109\n",
      "frames: 100000, reward: -19.900000, loss: 0.001558, epsilon: 0.045317, episode:  110\n",
      "frames: 101000, reward: -19.700000, loss: 0.001436, epsilon: 0.044159, episode:  111\n",
      "frames: 102000, reward: -19.900000, loss: 0.001341, epsilon: 0.043040, episode:  112\n",
      "frames: 103000, reward: -19.800000, loss: 0.002416, epsilon: 0.041956, episode:  113\n",
      "frames: 104000, reward: -19.900000, loss: 0.001479, epsilon: 0.040909, episode:  114\n",
      "frames: 105000, reward: -20.000000, loss: 0.002231, epsilon: 0.039895, episode:  115\n",
      "frames: 106000, reward: -20.200000, loss: 0.003361, epsilon: 0.038915, episode:  116\n",
      "frames: 107000, reward: -20.200000, loss: 0.004092, epsilon: 0.037967, episode:  116\n",
      "frames: 108000, reward: -20.200000, loss: 0.006710, epsilon: 0.037050, episode:  117\n",
      "frames: 109000, reward: -20.200000, loss: 0.001811, epsilon: 0.036164, episode:  118\n",
      "frames: 110000, reward: -20.200000, loss: 0.001944, epsilon: 0.035306, episode:  119\n",
      "frames: 111000, reward: -20.300000, loss: 0.002939, epsilon: 0.034476, episode:  120\n",
      "frames: 112000, reward: -20.300000, loss: 0.003764, epsilon: 0.033674, episode:  120\n",
      "frames: 113000, reward: -20.300000, loss: 0.004124, epsilon: 0.032898, episode:  121\n",
      "frames: 114000, reward: -20.300000, loss: 0.004313, epsilon: 0.032147, episode:  122\n",
      "frames: 115000, reward: -20.300000, loss: 0.006609, epsilon: 0.031421, episode:  122\n",
      "frames: 116000, reward: -20.200000, loss: 0.001845, epsilon: 0.030719, episode:  123\n",
      "frames: 117000, reward: -19.700000, loss: 0.002249, epsilon: 0.030039, episode:  124\n",
      "frames: 118000, reward: -19.500000, loss: 0.002197, epsilon: 0.029383, episode:  125\n",
      "frames: 119000, reward: -19.500000, loss: 0.004113, epsilon: 0.028747, episode:  125\n",
      "frames: 120000, reward: -19.400000, loss: 0.001751, epsilon: 0.028132, episode:  126\n",
      "frames: 121000, reward: -19.500000, loss: 0.001320, epsilon: 0.027538, episode:  127\n",
      "frames: 122000, reward: -19.500000, loss: 0.002698, epsilon: 0.026963, episode:  127\n",
      "frames: 123000, reward: -19.500000, loss: 0.003009, epsilon: 0.026407, episode:  128\n",
      "frames: 124000, reward: -19.500000, loss: 0.004732, epsilon: 0.025869, episode:  129\n",
      "frames: 125000, reward: -19.500000, loss: 0.002788, epsilon: 0.025349, episode:  130\n",
      "frames: 126000, reward: -19.700000, loss: 0.005618, epsilon: 0.024846, episode:  131\n",
      "frames: 127000, reward: -19.700000, loss: 0.002915, epsilon: 0.024359, episode:  131\n",
      "frames: 128000, reward: -19.600000, loss: 0.003062, epsilon: 0.023888, episode:  132\n",
      "frames: 129000, reward: -19.600000, loss: 0.003343, epsilon: 0.023433, episode:  132\n",
      "frames: 130000, reward: -19.800000, loss: 0.001630, epsilon: 0.022992, episode:  133\n",
      "frames: 131000, reward: -20.200000, loss: 0.006754, epsilon: 0.022567, episode:  134\n",
      "frames: 132000, reward: -20.200000, loss: 0.001550, epsilon: 0.022155, episode:  134\n",
      "frames: 133000, reward: -20.400000, loss: 0.002706, epsilon: 0.021756, episode:  135\n",
      "frames: 134000, reward: -20.400000, loss: 0.003179, epsilon: 0.021371, episode:  135\n",
      "frames: 135000, reward: -20.500000, loss: 0.002243, epsilon: 0.020998, episode:  136\n",
      "frames: 136000, reward: -20.300000, loss: 0.002337, epsilon: 0.020637, episode:  137\n",
      "frames: 137000, reward: -20.300000, loss: 0.003681, epsilon: 0.020289, episode:  137\n",
      "frames: 138000, reward: -19.900000, loss: 0.001400, epsilon: 0.019951, episode:  138\n",
      "frames: 139000, reward: -20.000000, loss: 0.001146, epsilon: 0.019625, episode:  139\n",
      "frames: 140000, reward: -20.000000, loss: 0.001166, epsilon: 0.019310, episode:  139\n",
      "frames: 141000, reward: -19.700000, loss: 0.001173, epsilon: 0.019004, episode:  140\n",
      "frames: 142000, reward: -19.700000, loss: 0.002553, epsilon: 0.018709, episode:  140\n",
      "frames: 143000, reward: -19.600000, loss: 0.001695, epsilon: 0.018424, episode:  141\n",
      "frames: 144000, reward: -19.600000, loss: 0.004200, epsilon: 0.018147, episode:  141\n",
      "frames: 145000, reward: -19.300000, loss: 0.001623, epsilon: 0.017880, episode:  142\n",
      "frames: 146000, reward: -19.300000, loss: 0.002237, epsilon: 0.017622, episode:  143\n",
      "frames: 147000, reward: -19.300000, loss: 0.001750, epsilon: 0.017372, episode:  143\n",
      "frames: 148000, reward: -19.400000, loss: 0.003844, epsilon: 0.017130, episode:  144\n",
      "frames: 149000, reward: -19.400000, loss: 0.017568, epsilon: 0.016897, episode:  144\n",
      "frames: 150000, reward: -19.200000, loss: 0.001025, epsilon: 0.016671, episode:  145\n",
      "frames: 151000, reward: -19.200000, loss: 0.001501, epsilon: 0.016452, episode:  145\n",
      "frames: 152000, reward: -19.300000, loss: 0.003907, epsilon: 0.016240, episode:  146\n",
      "frames: 153000, reward: -19.300000, loss: 0.003251, epsilon: 0.016036, episode:  146\n",
      "frames: 154000, reward: -19.500000, loss: 0.001868, epsilon: 0.015838, episode:  147\n",
      "frames: 155000, reward: -19.900000, loss: 0.001851, epsilon: 0.015647, episode:  148\n",
      "frames: 156000, reward: -19.900000, loss: 0.001608, epsilon: 0.015461, episode:  148\n",
      "frames: 157000, reward: -19.900000, loss: 0.001810, epsilon: 0.015282, episode:  149\n",
      "frames: 158000, reward: -20.100000, loss: 0.003250, epsilon: 0.015109, episode:  150\n",
      "frames: 159000, reward: -20.100000, loss: 0.001066, epsilon: 0.014942, episode:  150\n",
      "frames: 160000, reward: -20.200000, loss: 0.001669, epsilon: 0.014780, episode:  151\n",
      "frames: 161000, reward: -20.400000, loss: 0.002104, epsilon: 0.014623, episode:  152\n",
      "frames: 162000, reward: -20.400000, loss: 0.002553, epsilon: 0.014471, episode:  152\n",
      "frames: 163000, reward: -20.200000, loss: 0.000997, epsilon: 0.014325, episode:  153\n",
      "frames: 164000, reward: -20.200000, loss: 0.002058, epsilon: 0.014183, episode:  153\n",
      "frames: 165000, reward: -19.600000, loss: 0.002479, epsilon: 0.014046, episode:  154\n",
      "frames: 166000, reward: -19.600000, loss: 0.001113, epsilon: 0.013913, episode:  154\n",
      "frames: 167000, reward: -19.300000, loss: 0.000923, epsilon: 0.013785, episode:  155\n",
      "frames: 168000, reward: -19.300000, loss: 0.002015, epsilon: 0.013661, episode:  155\n",
      "frames: 169000, reward: -18.800000, loss: 0.003617, epsilon: 0.013541, episode:  156\n",
      "frames: 170000, reward: -18.800000, loss: 0.002327, epsilon: 0.013425, episode:  156\n",
      "frames: 171000, reward: -18.600000, loss: 0.007829, epsilon: 0.013313, episode:  157\n",
      "frames: 172000, reward: -18.600000, loss: 0.001450, epsilon: 0.013204, episode:  157\n",
      "frames: 173000, reward: -18.700000, loss: 0.000958, epsilon: 0.013099, episode:  158\n",
      "frames: 174000, reward: -18.600000, loss: 0.001331, epsilon: 0.012997, episode:  159\n",
      "frames: 175000, reward: -18.600000, loss: 0.003954, epsilon: 0.012899, episode:  159\n",
      "frames: 176000, reward: -18.000000, loss: 0.002151, epsilon: 0.012804, episode:  160\n",
      "frames: 177000, reward: -17.800000, loss: 0.002153, epsilon: 0.012712, episode:  161\n",
      "frames: 178000, reward: -17.800000, loss: 0.001175, epsilon: 0.012623, episode:  161\n",
      "frames: 179000, reward: -17.700000, loss: 0.001590, epsilon: 0.012537, episode:  162\n",
      "frames: 180000, reward: -17.700000, loss: 0.002198, epsilon: 0.012454, episode:  162\n",
      "frames: 181000, reward: -17.400000, loss: 0.003167, epsilon: 0.012374, episode:  163\n",
      "frames: 182000, reward: -17.400000, loss: 0.001433, epsilon: 0.012296, episode:  163\n",
      "frames: 183000, reward: -17.400000, loss: 0.001031, epsilon: 0.012220, episode:  164\n",
      "frames: 184000, reward: -17.400000, loss: 0.001907, epsilon: 0.012148, episode:  164\n",
      "frames: 185000, reward: -17.400000, loss: 0.001825, epsilon: 0.012077, episode:  164\n",
      "frames: 186000, reward: -17.700000, loss: 0.001642, epsilon: 0.012009, episode:  165\n",
      "frames: 187000, reward: -17.700000, loss: 0.001989, epsilon: 0.011943, episode:  165\n",
      "frames: 188000, reward: -17.600000, loss: 0.001553, epsilon: 0.011880, episode:  166\n",
      "frames: 189000, reward: -17.600000, loss: 0.002611, epsilon: 0.011818, episode:  166\n",
      "frames: 190000, reward: -17.300000, loss: 0.001989, epsilon: 0.011758, episode:  167\n",
      "frames: 191000, reward: -17.300000, loss: 0.002041, epsilon: 0.011701, episode:  167\n",
      "frames: 192000, reward: -16.600000, loss: 0.001888, epsilon: 0.011645, episode:  168\n",
      "frames: 193000, reward: -16.600000, loss: 0.002157, epsilon: 0.011591, episode:  168\n",
      "frames: 194000, reward: -16.200000, loss: 0.002591, epsilon: 0.011539, episode:  169\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "frames: 195000, reward: -16.200000, loss: 0.004459, epsilon: 0.011488, episode:  169\n",
      "frames: 196000, reward: -16.200000, loss: 0.003810, epsilon: 0.011440, episode:  169\n",
      "frames: 197000, reward: -16.200000, loss: 0.003751, epsilon: 0.011392, episode:  169\n",
      "frames: 198000, reward: -16.100000, loss: 0.001519, epsilon: 0.011347, episode:  170\n",
      "frames: 199000, reward: -16.100000, loss: 0.004102, epsilon: 0.011303, episode:  170\n",
      "frames: 200000, reward: -15.500000, loss: 0.001997, epsilon: 0.011260, episode:  171\n",
      "frames: 201000, reward: -15.500000, loss: 0.002874, epsilon: 0.011219, episode:  171\n",
      "frames: 202000, reward: -15.500000, loss: 0.002199, epsilon: 0.011179, episode:  171\n",
      "frames: 203000, reward: -15.100000, loss: 0.003060, epsilon: 0.011140, episode:  172\n",
      "frames: 204000, reward: -15.600000, loss: 0.001199, epsilon: 0.011103, episode:  173\n",
      "frames: 205000, reward: -15.600000, loss: 0.001421, epsilon: 0.011066, episode:  173\n",
      "frames: 206000, reward: -16.000000, loss: 0.001982, epsilon: 0.011032, episode:  174\n",
      "frames: 207000, reward: -16.000000, loss: 0.002186, epsilon: 0.010998, episode:  174\n",
      "frames: 208000, reward: -16.000000, loss: 0.002882, epsilon: 0.010965, episode:  174\n",
      "frames: 209000, reward: -14.900000, loss: 0.002276, epsilon: 0.010933, episode:  175\n",
      "frames: 210000, reward: -14.900000, loss: 0.002887, epsilon: 0.010903, episode:  175\n",
      "frames: 211000, reward: -15.200000, loss: 0.003745, epsilon: 0.010873, episode:  176\n",
      "frames: 212000, reward: -15.200000, loss: 0.002286, epsilon: 0.010845, episode:  176\n",
      "frames: 213000, reward: -15.400000, loss: 0.001365, epsilon: 0.010817, episode:  177\n",
      "frames: 214000, reward: -15.400000, loss: 0.002906, epsilon: 0.010790, episode:  177\n",
      "frames: 215000, reward: -15.400000, loss: 0.002097, epsilon: 0.010764, episode:  177\n",
      "frames: 216000, reward: -15.000000, loss: 0.002684, epsilon: 0.010739, episode:  178\n",
      "frames: 217000, reward: -15.000000, loss: 0.002070, epsilon: 0.010715, episode:  178\n",
      "frames: 218000, reward: -15.000000, loss: 0.002171, epsilon: 0.010691, episode:  178\n",
      "frames: 219000, reward: -14.700000, loss: 0.003054, epsilon: 0.010669, episode:  179\n",
      "frames: 220000, reward: -14.700000, loss: 0.002489, epsilon: 0.010647, episode:  179\n",
      "frames: 221000, reward: -14.700000, loss: 0.000912, epsilon: 0.010626, episode:  179\n",
      "frames: 222000, reward: -15.100000, loss: 0.001817, epsilon: 0.010605, episode:  180\n",
      "frames: 223000, reward: -15.900000, loss: 0.002584, epsilon: 0.010585, episode:  181\n",
      "frames: 224000, reward: -15.900000, loss: 0.001212, epsilon: 0.010566, episode:  181\n",
      "frames: 225000, reward: -15.900000, loss: 0.001458, epsilon: 0.010548, episode:  181\n",
      "frames: 226000, reward: -15.900000, loss: 0.000668, epsilon: 0.010530, episode:  181\n",
      "frames: 227000, reward: -16.000000, loss: 0.001762, epsilon: 0.010512, episode:  182\n",
      "frames: 228000, reward: -16.000000, loss: 0.001108, epsilon: 0.010495, episode:  182\n",
      "frames: 229000, reward: -16.000000, loss: 0.000917, epsilon: 0.010479, episode:  182\n",
      "frames: 230000, reward: -15.200000, loss: 0.001993, epsilon: 0.010463, episode:  183\n",
      "frames: 231000, reward: -15.200000, loss: 0.001532, epsilon: 0.010448, episode:  183\n",
      "frames: 232000, reward: -15.100000, loss: 0.000917, epsilon: 0.010434, episode:  184\n",
      "frames: 233000, reward: -15.100000, loss: 0.003891, epsilon: 0.010419, episode:  184\n",
      "frames: 234000, reward: -15.100000, loss: 0.000945, epsilon: 0.010406, episode:  184\n",
      "frames: 235000, reward: -15.800000, loss: 0.003160, epsilon: 0.010392, episode:  185\n",
      "frames: 236000, reward: -16.000000, loss: 0.000642, epsilon: 0.010379, episode:  186\n",
      "frames: 237000, reward: -16.000000, loss: 0.002948, epsilon: 0.010367, episode:  186\n",
      "frames: 238000, reward: -16.000000, loss: 0.001458, epsilon: 0.010355, episode:  186\n",
      "frames: 239000, reward: -16.000000, loss: 0.001647, epsilon: 0.010343, episode:  187\n",
      "frames: 240000, reward: -16.000000, loss: 0.001131, epsilon: 0.010332, episode:  187\n",
      "frames: 241000, reward: -16.000000, loss: 0.001590, epsilon: 0.010321, episode:  187\n",
      "frames: 242000, reward: -16.100000, loss: 0.001398, epsilon: 0.010311, episode:  188\n",
      "frames: 243000, reward: -16.100000, loss: 0.003014, epsilon: 0.010301, episode:  188\n",
      "frames: 244000, reward: -16.100000, loss: 0.006141, epsilon: 0.010291, episode:  188\n",
      "frames: 245000, reward: -16.700000, loss: 0.001315, epsilon: 0.010281, episode:  189\n",
      "frames: 246000, reward: -16.700000, loss: 0.000668, epsilon: 0.010272, episode:  189\n",
      "frames: 247000, reward: -16.700000, loss: 0.000837, epsilon: 0.010263, episode:  189\n",
      "frames: 248000, reward: -17.100000, loss: 0.001676, epsilon: 0.010254, episode:  190\n",
      "frames: 249000, reward: -17.100000, loss: 0.001222, epsilon: 0.010246, episode:  190\n",
      "frames: 250000, reward: -17.100000, loss: 0.001180, epsilon: 0.010238, episode:  190\n",
      "frames: 251000, reward: -15.900000, loss: 0.001346, epsilon: 0.010230, episode:  191\n",
      "frames: 252000, reward: -15.900000, loss: 0.001749, epsilon: 0.010223, episode:  191\n",
      "frames: 253000, reward: -15.900000, loss: 0.002537, epsilon: 0.010215, episode:  191\n",
      "frames: 254000, reward: -15.800000, loss: 0.001120, epsilon: 0.010208, episode:  192\n",
      "frames: 255000, reward: -15.800000, loss: 0.001537, epsilon: 0.010201, episode:  192\n",
      "frames: 256000, reward: -15.800000, loss: 0.001632, epsilon: 0.010195, episode:  192\n",
      "frames: 257000, reward: -15.600000, loss: 0.001316, epsilon: 0.010188, episode:  193\n",
      "frames: 258000, reward: -15.600000, loss: 0.001225, epsilon: 0.010182, episode:  193\n",
      "frames: 259000, reward: -15.600000, loss: 0.001949, epsilon: 0.010176, episode:  193\n",
      "frames: 260000, reward: -15.600000, loss: 0.000635, epsilon: 0.010171, episode:  193\n",
      "frames: 261000, reward: -15.600000, loss: 0.001726, epsilon: 0.010165, episode:  193\n",
      "frames: 262000, reward: -14.700000, loss: 0.001344, epsilon: 0.010160, episode:  194\n",
      "frames: 263000, reward: -14.700000, loss: 0.001906, epsilon: 0.010154, episode:  194\n",
      "frames: 264000, reward: -14.700000, loss: 0.000991, epsilon: 0.010149, episode:  194\n",
      "frames: 265000, reward: -14.700000, loss: 0.001015, epsilon: 0.010144, episode:  194\n",
      "frames: 266000, reward: -14.700000, loss: 0.001104, epsilon: 0.010140, episode:  194\n",
      "frames: 267000, reward: -13.400000, loss: 0.002502, epsilon: 0.010135, episode:  195\n",
      "frames: 268000, reward: -13.400000, loss: 0.000714, epsilon: 0.010131, episode:  195\n",
      "frames: 269000, reward: -13.400000, loss: 0.001214, epsilon: 0.010126, episode:  195\n",
      "frames: 270000, reward: -12.500000, loss: 0.001295, epsilon: 0.010122, episode:  196\n",
      "frames: 271000, reward: -12.500000, loss: 0.000453, epsilon: 0.010118, episode:  196\n",
      "frames: 272000, reward: -12.500000, loss: 0.001095, epsilon: 0.010114, episode:  196\n",
      "frames: 273000, reward: -11.800000, loss: 0.001662, epsilon: 0.010111, episode:  197\n",
      "frames: 274000, reward: -11.800000, loss: 0.000804, epsilon: 0.010107, episode:  197\n",
      "frames: 275000, reward: -11.800000, loss: 0.001052, epsilon: 0.010103, episode:  197\n",
      "frames: 276000, reward: -11.800000, loss: 0.002569, epsilon: 0.010100, episode:  197\n",
      "frames: 277000, reward: -11.200000, loss: 0.003670, epsilon: 0.010097, episode:  198\n",
      "frames: 278000, reward: -11.200000, loss: 0.001164, epsilon: 0.010094, episode:  198\n",
      "frames: 279000, reward: -11.200000, loss: 0.001886, epsilon: 0.010091, episode:  198\n",
      "frames: 280000, reward: -11.200000, loss: 0.000746, epsilon: 0.010088, episode:  198\n",
      "frames: 281000, reward: -11.200000, loss: 0.000685, epsilon: 0.010085, episode:  198\n",
      "frames: 282000, reward: -9.700000, loss: 0.001040, epsilon: 0.010082, episode:  199\n",
      "frames: 283000, reward: -9.700000, loss: 0.000748, epsilon: 0.010079, episode:  199\n",
      "frames: 284000, reward: -9.700000, loss: 0.001002, epsilon: 0.010077, episode:  199\n",
      "frames: 285000, reward: -9.700000, loss: 0.000523, epsilon: 0.010074, episode:  199\n",
      "frames: 286000, reward: -8.400000, loss: 0.000712, epsilon: 0.010072, episode:  200\n",
      "frames: 287000, reward: -8.400000, loss: 0.001975, epsilon: 0.010069, episode:  200\n",
      "frames: 288000, reward: -8.400000, loss: 0.001935, epsilon: 0.010067, episode:  200\n",
      "frames: 289000, reward: -8.400000, loss: 0.000744, epsilon: 0.010065, episode:  200\n",
      "frames: 290000, reward: -8.000000, loss: 0.001082, epsilon: 0.010063, episode:  201\n",
      "frames: 291000, reward: -8.300000, loss: 0.001146, epsilon: 0.010061, episode:  202\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "frames: 292000, reward: -8.300000, loss: 0.002544, epsilon: 0.010059, episode:  202\n",
      "frames: 293000, reward: -8.300000, loss: 0.002262, epsilon: 0.010057, episode:  202\n",
      "frames: 294000, reward: -8.300000, loss: 0.002193, epsilon: 0.010055, episode:  202\n",
      "frames: 295000, reward: -7.900000, loss: 0.002706, epsilon: 0.010053, episode:  203\n",
      "frames: 296000, reward: -7.900000, loss: 0.002483, epsilon: 0.010051, episode:  203\n",
      "frames: 297000, reward: -7.900000, loss: 0.002780, epsilon: 0.010050, episode:  203\n",
      "frames: 298000, reward: -7.900000, loss: 0.001842, epsilon: 0.010048, episode:  203\n",
      "frames: 299000, reward: -7.800000, loss: 0.001436, epsilon: 0.010046, episode:  204\n",
      "frames: 300000, reward: -7.800000, loss: 0.001603, epsilon: 0.010045, episode:  204\n",
      "frames: 301000, reward: -7.800000, loss: 0.001203, epsilon: 0.010043, episode:  204\n",
      "frames: 302000, reward: -7.800000, loss: 0.000604, epsilon: 0.010042, episode:  204\n",
      "frames: 303000, reward: -8.900000, loss: 0.001132, epsilon: 0.010041, episode:  205\n",
      "frames: 304000, reward: -8.900000, loss: 0.001148, epsilon: 0.010039, episode:  205\n",
      "frames: 305000, reward: -8.900000, loss: 0.001663, epsilon: 0.010038, episode:  205\n",
      "frames: 306000, reward: -8.900000, loss: 0.000727, epsilon: 0.010037, episode:  205\n",
      "frames: 307000, reward: -8.900000, loss: 0.000569, epsilon: 0.010036, episode:  205\n",
      "frames: 308000, reward: -8.700000, loss: 0.002297, epsilon: 0.010034, episode:  206\n",
      "frames: 309000, reward: -8.700000, loss: 0.000921, epsilon: 0.010033, episode:  206\n",
      "frames: 310000, reward: -9.100000, loss: 0.004531, epsilon: 0.010032, episode:  207\n",
      "frames: 311000, reward: -9.100000, loss: 0.000434, epsilon: 0.010031, episode:  207\n",
      "frames: 312000, reward: -9.100000, loss: 0.001047, epsilon: 0.010030, episode:  207\n",
      "frames: 313000, reward: -10.700000, loss: 0.000826, epsilon: 0.010029, episode:  208\n",
      "frames: 314000, reward: -10.700000, loss: 0.001361, epsilon: 0.010028, episode:  208\n",
      "frames: 315000, reward: -11.900000, loss: 0.001009, epsilon: 0.010027, episode:  209\n",
      "frames: 316000, reward: -11.900000, loss: 0.000833, epsilon: 0.010026, episode:  209\n",
      "frames: 317000, reward: -11.900000, loss: 0.002045, epsilon: 0.010026, episode:  209\n",
      "frames: 318000, reward: -11.900000, loss: 0.001644, epsilon: 0.010025, episode:  209\n",
      "frames: 319000, reward: -11.900000, loss: 0.002664, epsilon: 0.010024, episode:  209\n",
      "frames: 320000, reward: -11.200000, loss: 0.004114, epsilon: 0.010023, episode:  210\n",
      "frames: 321000, reward: -11.200000, loss: 0.001045, epsilon: 0.010022, episode:  210\n",
      "frames: 322000, reward: -11.200000, loss: 0.001368, epsilon: 0.010022, episode:  210\n",
      "frames: 323000, reward: -11.200000, loss: 0.001050, epsilon: 0.010021, episode:  210\n",
      "frames: 324000, reward: -11.500000, loss: 0.002163, epsilon: 0.010020, episode:  211\n",
      "frames: 325000, reward: -11.500000, loss: 0.001273, epsilon: 0.010020, episode:  211\n",
      "frames: 326000, reward: -11.500000, loss: 0.001577, epsilon: 0.010019, episode:  211\n",
      "frames: 327000, reward: -11.500000, loss: 0.002668, epsilon: 0.010018, episode:  211\n",
      "frames: 328000, reward: -11.500000, loss: 0.001112, epsilon: 0.010018, episode:  211\n",
      "frames: 329000, reward: -9.500000, loss: 0.000892, epsilon: 0.010017, episode:  212\n",
      "frames: 330000, reward: -9.500000, loss: 0.001269, epsilon: 0.010017, episode:  212\n",
      "frames: 331000, reward: -9.500000, loss: 0.002125, epsilon: 0.010016, episode:  212\n",
      "frames: 332000, reward: -9.500000, loss: 0.003224, epsilon: 0.010015, episode:  212\n",
      "frames: 333000, reward: -8.600000, loss: 0.000806, epsilon: 0.010015, episode:  213\n",
      "frames: 334000, reward: -8.600000, loss: 0.001192, epsilon: 0.010014, episode:  213\n",
      "frames: 335000, reward: -8.600000, loss: 0.001533, epsilon: 0.010014, episode:  213\n",
      "frames: 336000, reward: -8.600000, loss: 0.000667, epsilon: 0.010014, episode:  213\n",
      "frames: 337000, reward: -8.600000, loss: 0.001254, epsilon: 0.010013, episode:  214\n",
      "frames: 338000, reward: -8.600000, loss: 0.000874, epsilon: 0.010013, episode:  214\n",
      "frames: 339000, reward: -8.600000, loss: 0.000570, epsilon: 0.010012, episode:  214\n",
      "frames: 340000, reward: -8.600000, loss: 0.000494, epsilon: 0.010012, episode:  214\n",
      "frames: 341000, reward: -7.700000, loss: 0.000869, epsilon: 0.010011, episode:  215\n",
      "frames: 342000, reward: -7.700000, loss: 0.000926, epsilon: 0.010011, episode:  215\n",
      "frames: 343000, reward: -7.700000, loss: 0.000959, epsilon: 0.010011, episode:  215\n",
      "frames: 344000, reward: -7.700000, loss: 0.000916, epsilon: 0.010010, episode:  215\n",
      "frames: 345000, reward: -7.400000, loss: 0.000871, epsilon: 0.010010, episode:  216\n",
      "frames: 346000, reward: -7.400000, loss: 0.000717, epsilon: 0.010010, episode:  216\n",
      "frames: 347000, reward: -7.400000, loss: 0.001425, epsilon: 0.010009, episode:  216\n",
      "frames: 348000, reward: -7.400000, loss: 0.000694, epsilon: 0.010009, episode:  216\n",
      "frames: 349000, reward: -7.400000, loss: 0.000832, epsilon: 0.010009, episode:  216\n",
      "frames: 350000, reward: -5.800000, loss: 0.000755, epsilon: 0.010008, episode:  217\n",
      "frames: 351000, reward: -5.800000, loss: 0.000383, epsilon: 0.010008, episode:  217\n",
      "frames: 352000, reward: -5.800000, loss: 0.000476, epsilon: 0.010008, episode:  217\n",
      "frames: 353000, reward: -3.100000, loss: 0.000891, epsilon: 0.010008, episode:  218\n",
      "frames: 354000, reward: -3.100000, loss: 0.000854, epsilon: 0.010007, episode:  218\n",
      "frames: 355000, reward: -3.100000, loss: 0.000673, epsilon: 0.010007, episode:  218\n",
      "frames: 356000, reward: -3.100000, loss: 0.002551, epsilon: 0.010007, episode:  218\n",
      "frames: 357000, reward: -1.000000, loss: 0.000838, epsilon: 0.010007, episode:  219\n",
      "frames: 358000, reward: -1.000000, loss: 0.009485, epsilon: 0.010007, episode:  219\n",
      "frames: 359000, reward: -1.000000, loss: 0.000895, epsilon: 0.010006, episode:  219\n",
      "frames: 360000, reward: -1.000000, loss: 0.001582, epsilon: 0.010006, episode:  219\n",
      "frames: 361000, reward: -0.500000, loss: 0.000856, epsilon: 0.010006, episode:  220\n",
      "frames: 362000, reward: -0.500000, loss: 0.001078, epsilon: 0.010006, episode:  220\n",
      "frames: 363000, reward: -0.500000, loss: 0.000909, epsilon: 0.010006, episode:  220\n",
      "frames: 364000, reward: 2.100000, loss: 0.001295, epsilon: 0.010005, episode:  221\n",
      "frames: 365000, reward: 2.100000, loss: 0.001647, epsilon: 0.010005, episode:  221\n",
      "frames: 366000, reward: 2.100000, loss: 0.000508, epsilon: 0.010005, episode:  221\n",
      "frames: 367000, reward: 2.100000, loss: 0.000775, epsilon: 0.010005, episode:  221\n",
      "frames: 368000, reward: 2.100000, loss: 0.000685, epsilon: 0.010005, episode:  221\n",
      "frames: 369000, reward: 2.500000, loss: 0.001577, epsilon: 0.010005, episode:  222\n",
      "frames: 370000, reward: 2.500000, loss: 0.000817, epsilon: 0.010004, episode:  222\n",
      "frames: 371000, reward: 2.500000, loss: 0.001466, epsilon: 0.010004, episode:  222\n",
      "frames: 372000, reward: 3.400000, loss: 0.000615, epsilon: 0.010004, episode:  223\n",
      "frames: 373000, reward: 3.400000, loss: 0.000829, epsilon: 0.010004, episode:  223\n",
      "frames: 374000, reward: 3.400000, loss: 0.001192, epsilon: 0.010004, episode:  223\n",
      "frames: 375000, reward: 5.700000, loss: 0.000567, epsilon: 0.010004, episode:  224\n",
      "frames: 376000, reward: 5.700000, loss: 0.004107, epsilon: 0.010004, episode:  224\n",
      "frames: 377000, reward: 5.700000, loss: 0.000541, epsilon: 0.010003, episode:  224\n",
      "frames: 378000, reward: 7.500000, loss: 0.000470, epsilon: 0.010003, episode:  225\n",
      "frames: 379000, reward: 7.500000, loss: 0.001202, epsilon: 0.010003, episode:  225\n",
      "frames: 380000, reward: 7.500000, loss: 0.002049, epsilon: 0.010003, episode:  225\n",
      "frames: 381000, reward: 9.100000, loss: 0.000565, epsilon: 0.010003, episode:  226\n",
      "frames: 382000, reward: 9.100000, loss: 0.000310, epsilon: 0.010003, episode:  226\n",
      "frames: 383000, reward: 10.900000, loss: 0.000818, epsilon: 0.010003, episode:  227\n",
      "frames: 384000, reward: 10.900000, loss: 0.001007, epsilon: 0.010003, episode:  227\n",
      "frames: 385000, reward: 10.900000, loss: 0.000420, epsilon: 0.010003, episode:  227\n",
      "frames: 386000, reward: 10.900000, loss: 0.000914, epsilon: 0.010003, episode:  227\n",
      "frames: 387000, reward: 10.900000, loss: 0.001587, epsilon: 0.010002, episode:  227\n",
      "frames: 388000, reward: 11.400000, loss: 0.002823, epsilon: 0.010002, episode:  228\n",
      "frames: 389000, reward: 11.400000, loss: 0.000430, epsilon: 0.010002, episode:  228\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "frames: 390000, reward: 11.400000, loss: 0.000517, epsilon: 0.010002, episode:  228\n",
      "frames: 391000, reward: 11.400000, loss: 0.000478, epsilon: 0.010002, episode:  228\n",
      "frames: 392000, reward: 11.800000, loss: 0.000366, epsilon: 0.010002, episode:  229\n",
      "frames: 393000, reward: 11.800000, loss: 0.000343, epsilon: 0.010002, episode:  229\n",
      "frames: 394000, reward: 13.000000, loss: 0.000945, epsilon: 0.010002, episode:  230\n",
      "frames: 395000, reward: 13.000000, loss: 0.000362, epsilon: 0.010002, episode:  230\n",
      "frames: 396000, reward: 13.000000, loss: 0.001601, epsilon: 0.010002, episode:  230\n",
      "frames: 397000, reward: 13.000000, loss: 0.001080, epsilon: 0.010002, episode:  230\n",
      "frames: 398000, reward: 12.600000, loss: 0.000766, epsilon: 0.010002, episode:  231\n",
      "frames: 399000, reward: 12.600000, loss: 0.001226, epsilon: 0.010002, episode:  231\n",
      "frames: 400000, reward: 12.600000, loss: 0.000785, epsilon: 0.010002, episode:  231\n",
      "frames: 401000, reward: 13.200000, loss: 0.000847, epsilon: 0.010002, episode:  232\n",
      "frames: 402000, reward: 13.200000, loss: 0.000859, epsilon: 0.010001, episode:  232\n",
      "frames: 403000, reward: 13.900000, loss: 0.000825, epsilon: 0.010001, episode:  233\n",
      "frames: 404000, reward: 13.900000, loss: 0.000593, epsilon: 0.010001, episode:  233\n",
      "frames: 405000, reward: 13.900000, loss: 0.000464, epsilon: 0.010001, episode:  233\n",
      "frames: 406000, reward: 13.200000, loss: 0.000496, epsilon: 0.010001, episode:  234\n",
      "frames: 407000, reward: 13.200000, loss: 0.007997, epsilon: 0.010001, episode:  234\n",
      "frames: 408000, reward: 13.600000, loss: 0.003133, epsilon: 0.010001, episode:  235\n",
      "frames: 409000, reward: 13.600000, loss: 0.000540, epsilon: 0.010001, episode:  235\n",
      "frames: 410000, reward: 13.700000, loss: 0.000937, epsilon: 0.010001, episode:  236\n",
      "frames: 411000, reward: 13.700000, loss: 0.000319, epsilon: 0.010001, episode:  236\n",
      "frames: 412000, reward: 13.700000, loss: 0.000727, epsilon: 0.010001, episode:  236\n",
      "frames: 413000, reward: 12.700000, loss: 0.001998, epsilon: 0.010001, episode:  237\n",
      "frames: 414000, reward: 12.700000, loss: 0.000864, epsilon: 0.010001, episode:  237\n",
      "frames: 415000, reward: 12.700000, loss: 0.000494, epsilon: 0.010001, episode:  237\n",
      "frames: 416000, reward: 12.400000, loss: 0.000611, epsilon: 0.010001, episode:  238\n",
      "frames: 417000, reward: 12.400000, loss: 0.000435, epsilon: 0.010001, episode:  238\n",
      "frames: 418000, reward: 12.400000, loss: 0.001220, epsilon: 0.010001, episode:  238\n",
      "frames: 419000, reward: 12.300000, loss: 0.001056, epsilon: 0.010001, episode:  239\n",
      "frames: 420000, reward: 12.300000, loss: 0.001755, epsilon: 0.010001, episode:  239\n",
      "frames: 421000, reward: 12.300000, loss: 0.000555, epsilon: 0.010001, episode:  239\n",
      "frames: 422000, reward: 12.200000, loss: 0.000915, epsilon: 0.010001, episode:  240\n",
      "frames: 423000, reward: 12.200000, loss: 0.000892, epsilon: 0.010001, episode:  240\n",
      "frames: 424000, reward: 12.200000, loss: 0.000970, epsilon: 0.010001, episode:  240\n",
      "frames: 425000, reward: 12.200000, loss: 0.000529, epsilon: 0.010001, episode:  240\n",
      "frames: 426000, reward: 12.200000, loss: 0.000637, epsilon: 0.010001, episode:  240\n",
      "frames: 427000, reward: 11.400000, loss: 0.000737, epsilon: 0.010001, episode:  241\n",
      "frames: 428000, reward: 11.400000, loss: 0.000658, epsilon: 0.010001, episode:  241\n",
      "frames: 429000, reward: 11.800000, loss: 0.000780, epsilon: 0.010001, episode:  242\n",
      "frames: 430000, reward: 11.800000, loss: 0.000540, epsilon: 0.010001, episode:  242\n",
      "frames: 431000, reward: 11.800000, loss: 0.000459, epsilon: 0.010001, episode:  242\n",
      "frames: 432000, reward: 11.800000, loss: 0.001045, epsilon: 0.010001, episode:  242\n",
      "frames: 433000, reward: 10.200000, loss: 0.000601, epsilon: 0.010001, episode:  243\n",
      "frames: 434000, reward: 10.200000, loss: 0.001026, epsilon: 0.010001, episode:  243\n",
      "frames: 435000, reward: 11.200000, loss: 0.001469, epsilon: 0.010000, episode:  244\n",
      "frames: 436000, reward: 11.200000, loss: 0.000568, epsilon: 0.010000, episode:  244\n",
      "frames: 437000, reward: 11.400000, loss: 0.000477, epsilon: 0.010000, episode:  245\n",
      "frames: 438000, reward: 11.400000, loss: 0.000468, epsilon: 0.010000, episode:  245\n",
      "frames: 439000, reward: 12.200000, loss: 0.000462, epsilon: 0.010000, episode:  246\n",
      "frames: 440000, reward: 12.200000, loss: 0.000286, epsilon: 0.010000, episode:  246\n",
      "frames: 441000, reward: 12.200000, loss: 0.000368, epsilon: 0.010000, episode:  246\n",
      "frames: 442000, reward: 12.700000, loss: 0.001756, epsilon: 0.010000, episode:  247\n",
      "frames: 443000, reward: 12.700000, loss: 0.000339, epsilon: 0.010000, episode:  247\n",
      "frames: 444000, reward: 12.700000, loss: 0.000621, epsilon: 0.010000, episode:  247\n",
      "frames: 445000, reward: 13.200000, loss: 0.000275, epsilon: 0.010000, episode:  248\n",
      "frames: 446000, reward: 13.200000, loss: 0.000499, epsilon: 0.010000, episode:  248\n",
      "frames: 447000, reward: 14.400000, loss: 0.000244, epsilon: 0.010000, episode:  249\n",
      "frames: 448000, reward: 14.900000, loss: 0.000361, epsilon: 0.010000, episode:  250\n",
      "frames: 449000, reward: 14.900000, loss: 0.000447, epsilon: 0.010000, episode:  250\n",
      "frames: 450000, reward: 14.900000, loss: 0.000287, epsilon: 0.010000, episode:  250\n",
      "frames: 451000, reward: 14.900000, loss: 0.000422, epsilon: 0.010000, episode:  250\n",
      "frames: 452000, reward: 15.300000, loss: 0.000380, epsilon: 0.010000, episode:  251\n",
      "frames: 453000, reward: 15.300000, loss: 0.000471, epsilon: 0.010000, episode:  251\n",
      "frames: 454000, reward: 15.300000, loss: 0.002078, epsilon: 0.010000, episode:  251\n",
      "frames: 455000, reward: 15.300000, loss: 0.000846, epsilon: 0.010000, episode:  251\n",
      "frames: 456000, reward: 14.700000, loss: 0.001168, epsilon: 0.010000, episode:  252\n",
      "frames: 457000, reward: 14.700000, loss: 0.001181, epsilon: 0.010000, episode:  252\n",
      "frames: 458000, reward: 14.700000, loss: 0.000556, epsilon: 0.010000, episode:  252\n",
      "frames: 459000, reward: 14.700000, loss: 0.000512, epsilon: 0.010000, episode:  252\n",
      "frames: 460000, reward: 15.800000, loss: 0.001810, epsilon: 0.010000, episode:  253\n",
      "frames: 461000, reward: 15.800000, loss: 0.001775, epsilon: 0.010000, episode:  253\n",
      "frames: 462000, reward: 15.800000, loss: 0.000597, epsilon: 0.010000, episode:  253\n",
      "frames: 463000, reward: 15.300000, loss: 0.000762, epsilon: 0.010000, episode:  254\n",
      "frames: 464000, reward: 15.300000, loss: 0.001391, epsilon: 0.010000, episode:  254\n",
      "frames: 465000, reward: 15.100000, loss: 0.007689, epsilon: 0.010000, episode:  255\n",
      "frames: 466000, reward: 15.100000, loss: 0.001668, epsilon: 0.010000, episode:  255\n",
      "frames: 467000, reward: 15.100000, loss: 0.001260, epsilon: 0.010000, episode:  255\n",
      "frames: 468000, reward: 15.100000, loss: 0.000437, epsilon: 0.010000, episode:  255\n",
      "frames: 469000, reward: 15.100000, loss: 0.000382, epsilon: 0.010000, episode:  255\n",
      "frames: 470000, reward: 15.100000, loss: 0.000617, epsilon: 0.010000, episode:  255\n",
      "frames: 471000, reward: 14.200000, loss: 0.000450, epsilon: 0.010000, episode:  256\n",
      "frames: 472000, reward: 14.200000, loss: 0.000576, epsilon: 0.010000, episode:  256\n",
      "frames: 473000, reward: 14.200000, loss: 0.000252, epsilon: 0.010000, episode:  256\n",
      "frames: 474000, reward: 14.300000, loss: 0.000326, epsilon: 0.010000, episode:  257\n",
      "frames: 475000, reward: 14.300000, loss: 0.000472, epsilon: 0.010000, episode:  257\n",
      "frames: 476000, reward: 14.300000, loss: 0.000519, epsilon: 0.010000, episode:  257\n",
      "frames: 477000, reward: 14.500000, loss: 0.001663, epsilon: 0.010000, episode:  258\n",
      "frames: 478000, reward: 14.500000, loss: 0.000146, epsilon: 0.010000, episode:  258\n",
      "frames: 479000, reward: 14.300000, loss: 0.000390, epsilon: 0.010000, episode:  259\n",
      "frames: 480000, reward: 14.300000, loss: 0.000319, epsilon: 0.010000, episode:  259\n",
      "frames: 481000, reward: 14.200000, loss: 0.000576, epsilon: 0.010000, episode:  260\n",
      "frames: 482000, reward: 14.200000, loss: 0.000653, epsilon: 0.010000, episode:  260\n",
      "frames: 483000, reward: 14.200000, loss: 0.000298, epsilon: 0.010000, episode:  260\n",
      "frames: 484000, reward: 15.000000, loss: 0.000517, epsilon: 0.010000, episode:  261\n",
      "frames: 485000, reward: 15.000000, loss: 0.000395, epsilon: 0.010000, episode:  261\n",
      "frames: 486000, reward: 15.700000, loss: 0.000386, epsilon: 0.010000, episode:  262\n",
      "frames: 487000, reward: 15.700000, loss: 0.000269, epsilon: 0.010000, episode:  262\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "frames: 488000, reward: 16.500000, loss: 0.000451, epsilon: 0.010000, episode:  263\n",
      "frames: 489000, reward: 16.500000, loss: 0.000457, epsilon: 0.010000, episode:  263\n",
      "frames: 490000, reward: 16.500000, loss: 0.000395, epsilon: 0.010000, episode:  263\n",
      "frames: 491000, reward: 17.100000, loss: 0.000307, epsilon: 0.010000, episode:  264\n",
      "frames: 492000, reward: 17.100000, loss: 0.000757, epsilon: 0.010000, episode:  264\n",
      "frames: 493000, reward: 17.200000, loss: 0.000166, epsilon: 0.010000, episode:  265\n",
      "frames: 494000, reward: 18.300000, loss: 0.000594, epsilon: 0.010000, episode:  266\n",
      "frames: 495000, reward: 18.300000, loss: 0.000166, epsilon: 0.010000, episode:  266\n",
      "frames: 496000, reward: 18.300000, loss: 0.000963, epsilon: 0.010000, episode:  266\n",
      "frames: 497000, reward: 18.200000, loss: 0.000206, epsilon: 0.010000, episode:  267\n",
      "frames: 498000, reward: 18.200000, loss: 0.002087, epsilon: 0.010000, episode:  267\n",
      "frames: 499000, reward: 18.200000, loss: 0.000338, epsilon: 0.010000, episode:  267\n",
      "frames: 500000, reward: 18.500000, loss: 0.000190, epsilon: 0.010000, episode:  268\n",
      "frames: 501000, reward: 18.800000, loss: 0.000246, epsilon: 0.010000, episode:  269\n",
      "frames: 502000, reward: 18.800000, loss: 0.000239, epsilon: 0.010000, episode:  269\n",
      "frames: 503000, reward: 18.800000, loss: 0.000307, epsilon: 0.010000, episode:  269\n",
      "frames: 504000, reward: 18.500000, loss: 0.000429, epsilon: 0.010000, episode:  270\n",
      "frames: 505000, reward: 18.500000, loss: 0.000402, epsilon: 0.010000, episode:  270\n",
      "frames: 506000, reward: 18.500000, loss: 0.000335, epsilon: 0.010000, episode:  270\n",
      "frames: 507000, reward: 18.600000, loss: 0.000176, epsilon: 0.010000, episode:  271\n",
      "frames: 508000, reward: 18.600000, loss: 0.000648, epsilon: 0.010000, episode:  271\n",
      "frames: 509000, reward: 18.700000, loss: 0.000662, epsilon: 0.010000, episode:  272\n",
      "frames: 510000, reward: 18.700000, loss: 0.000493, epsilon: 0.010000, episode:  272\n",
      "frames: 511000, reward: 18.300000, loss: 0.000262, epsilon: 0.010000, episode:  273\n",
      "frames: 512000, reward: 18.300000, loss: 0.000349, epsilon: 0.010000, episode:  273\n",
      "frames: 513000, reward: 18.300000, loss: 0.000688, epsilon: 0.010000, episode:  273\n",
      "frames: 514000, reward: 18.300000, loss: 0.000283, epsilon: 0.010000, episode:  273\n",
      "frames: 515000, reward: 18.300000, loss: 0.000187, epsilon: 0.010000, episode:  274\n",
      "frames: 516000, reward: 18.300000, loss: 0.000794, epsilon: 0.010000, episode:  274\n",
      "frames: 517000, reward: 18.400000, loss: 0.000223, epsilon: 0.010000, episode:  275\n",
      "frames: 518000, reward: 18.400000, loss: 0.000425, epsilon: 0.010000, episode:  275\n",
      "frames: 519000, reward: 18.400000, loss: 0.000280, epsilon: 0.010000, episode:  275\n",
      "frames: 520000, reward: 17.400000, loss: 0.000294, epsilon: 0.010000, episode:  276\n",
      "frames: 521000, reward: 18.100000, loss: 0.000349, epsilon: 0.010000, episode:  277\n",
      "frames: 522000, reward: 18.100000, loss: 0.001297, epsilon: 0.010000, episode:  277\n",
      "frames: 523000, reward: 18.100000, loss: 0.001170, epsilon: 0.010000, episode:  278\n",
      "frames: 524000, reward: 18.100000, loss: 0.000894, epsilon: 0.010000, episode:  278\n",
      "frames: 525000, reward: 18.100000, loss: 0.000592, epsilon: 0.010000, episode:  278\n",
      "frames: 526000, reward: 18.000000, loss: 0.000426, epsilon: 0.010000, episode:  279\n",
      "frames: 527000, reward: 18.000000, loss: 0.000222, epsilon: 0.010000, episode:  279\n",
      "frames: 528000, reward: 18.500000, loss: 0.000558, epsilon: 0.010000, episode:  280\n",
      "frames: 529000, reward: 18.500000, loss: 0.000224, epsilon: 0.010000, episode:  281\n",
      "frames: 530000, reward: 18.500000, loss: 0.000544, epsilon: 0.010000, episode:  281\n",
      "frames: 531000, reward: 18.700000, loss: 0.000305, epsilon: 0.010000, episode:  282\n",
      "frames: 532000, reward: 18.700000, loss: 0.001310, epsilon: 0.010000, episode:  282\n",
      "frames: 533000, reward: 18.900000, loss: 0.001155, epsilon: 0.010000, episode:  283\n",
      "frames: 534000, reward: 18.900000, loss: 0.000515, epsilon: 0.010000, episode:  283\n",
      "frames: 535000, reward: 18.900000, loss: 0.000512, epsilon: 0.010000, episode:  283\n",
      "frames: 536000, reward: 18.800000, loss: 0.003639, epsilon: 0.010000, episode:  284\n",
      "frames: 537000, reward: 18.800000, loss: 0.000283, epsilon: 0.010000, episode:  284\n",
      "frames: 538000, reward: 18.500000, loss: 0.001401, epsilon: 0.010000, episode:  285\n",
      "frames: 539000, reward: 18.500000, loss: 0.000538, epsilon: 0.010000, episode:  285\n",
      "frames: 540000, reward: 19.100000, loss: 0.000277, epsilon: 0.010000, episode:  286\n",
      "frames: 541000, reward: 19.100000, loss: 0.000442, epsilon: 0.010000, episode:  286\n",
      "frames: 542000, reward: 19.000000, loss: 0.000388, epsilon: 0.010000, episode:  287\n",
      "frames: 543000, reward: 19.200000, loss: 0.000434, epsilon: 0.010000, episode:  288\n",
      "frames: 544000, reward: 19.200000, loss: 0.000147, epsilon: 0.010000, episode:  288\n",
      "frames: 545000, reward: 19.200000, loss: 0.000723, epsilon: 0.010000, episode:  288\n",
      "frames: 546000, reward: 18.900000, loss: 0.001064, epsilon: 0.010000, episode:  289\n",
      "frames: 547000, reward: 18.900000, loss: 0.000125, epsilon: 0.010000, episode:  289\n",
      "frames: 548000, reward: 18.700000, loss: 0.000206, epsilon: 0.010000, episode:  290\n",
      "frames: 549000, reward: 18.300000, loss: 0.000513, epsilon: 0.010000, episode:  291\n",
      "frames: 550000, reward: 18.300000, loss: 0.000305, epsilon: 0.010000, episode:  291\n",
      "frames: 551000, reward: 18.300000, loss: 0.000207, epsilon: 0.010000, episode:  291\n",
      "frames: 552000, reward: 17.900000, loss: 0.001105, epsilon: 0.010000, episode:  292\n",
      "frames: 553000, reward: 17.900000, loss: 0.001345, epsilon: 0.010000, episode:  292\n",
      "frames: 554000, reward: 18.100000, loss: 0.000524, epsilon: 0.010000, episode:  293\n",
      "frames: 555000, reward: 18.100000, loss: 0.000624, epsilon: 0.010000, episode:  293\n",
      "frames: 556000, reward: 18.200000, loss: 0.000193, epsilon: 0.010000, episode:  294\n",
      "frames: 557000, reward: 18.600000, loss: 0.000382, epsilon: 0.010000, episode:  295\n",
      "frames: 558000, reward: 18.600000, loss: 0.000494, epsilon: 0.010000, episode:  295\n",
      "frames: 559000, reward: 18.600000, loss: 0.003458, epsilon: 0.010000, episode:  295\n",
      "frames: 560000, reward: 18.600000, loss: 0.000456, epsilon: 0.010000, episode:  295\n",
      "frames: 561000, reward: 17.600000, loss: 0.000382, epsilon: 0.010000, episode:  296\n",
      "frames: 562000, reward: 17.600000, loss: 0.000517, epsilon: 0.010000, episode:  297\n",
      "frames: 563000, reward: 17.600000, loss: 0.000469, epsilon: 0.010000, episode:  297\n",
      "frames: 564000, reward: 17.600000, loss: 0.000470, epsilon: 0.010000, episode:  297\n",
      "frames: 565000, reward: 17.300000, loss: 0.000716, epsilon: 0.010000, episode:  298\n",
      "frames: 566000, reward: 17.300000, loss: 0.000191, epsilon: 0.010000, episode:  298\n",
      "frames: 567000, reward: 17.600000, loss: 0.000374, epsilon: 0.010000, episode:  299\n",
      "frames: 568000, reward: 17.800000, loss: 0.000337, epsilon: 0.010000, episode:  300\n",
      "frames: 569000, reward: 17.800000, loss: 0.000345, epsilon: 0.010000, episode:  300\n",
      "frames: 570000, reward: 17.800000, loss: 0.000169, epsilon: 0.010000, episode:  300\n",
      "frames: 571000, reward: 18.300000, loss: 0.000209, epsilon: 0.010000, episode:  301\n",
      "frames: 572000, reward: 18.300000, loss: 0.000700, epsilon: 0.010000, episode:  301\n",
      "frames: 573000, reward: 18.300000, loss: 0.000605, epsilon: 0.010000, episode:  301\n",
      "frames: 574000, reward: 17.300000, loss: 0.000587, epsilon: 0.010000, episode:  302\n",
      "frames: 575000, reward: 17.300000, loss: 0.000387, epsilon: 0.010000, episode:  302\n",
      "frames: 576000, reward: 17.200000, loss: 0.000169, epsilon: 0.010000, episode:  303\n",
      "frames: 577000, reward: 17.200000, loss: 0.000209, epsilon: 0.010000, episode:  303\n",
      "frames: 578000, reward: 17.400000, loss: 0.001630, epsilon: 0.010000, episode:  304\n",
      "frames: 579000, reward: 17.400000, loss: 0.000449, epsilon: 0.010000, episode:  304\n",
      "frames: 580000, reward: 17.300000, loss: 0.000236, epsilon: 0.010000, episode:  305\n",
      "frames: 581000, reward: 18.500000, loss: 0.003775, epsilon: 0.010000, episode:  306\n",
      "frames: 582000, reward: 18.500000, loss: 0.000383, epsilon: 0.010000, episode:  306\n",
      "frames: 583000, reward: 18.600000, loss: 0.000424, epsilon: 0.010000, episode:  307\n",
      "frames: 584000, reward: 18.600000, loss: 0.000281, epsilon: 0.010000, episode:  307\n",
      "frames: 585000, reward: 18.800000, loss: 0.000416, epsilon: 0.010000, episode:  308\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "frames: 586000, reward: 18.800000, loss: 0.000458, epsilon: 0.010000, episode:  308\n",
      "frames: 587000, reward: 18.900000, loss: 0.000156, epsilon: 0.010000, episode:  309\n",
      "frames: 588000, reward: 18.900000, loss: 0.000322, epsilon: 0.010000, episode:  309\n",
      "frames: 589000, reward: 18.600000, loss: 0.000135, epsilon: 0.010000, episode:  310\n",
      "frames: 590000, reward: 18.600000, loss: 0.000470, epsilon: 0.010000, episode:  310\n",
      "frames: 591000, reward: 17.900000, loss: 0.001390, epsilon: 0.010000, episode:  311\n",
      "frames: 592000, reward: 17.900000, loss: 0.000514, epsilon: 0.010000, episode:  311\n",
      "frames: 593000, reward: 17.900000, loss: 0.000357, epsilon: 0.010000, episode:  311\n",
      "frames: 594000, reward: 18.800000, loss: 0.000424, epsilon: 0.010000, episode:  312\n",
      "frames: 595000, reward: 18.800000, loss: 0.000245, epsilon: 0.010000, episode:  312\n",
      "frames: 596000, reward: 18.800000, loss: 0.000372, epsilon: 0.010000, episode:  313\n",
      "frames: 597000, reward: 18.800000, loss: 0.000339, epsilon: 0.010000, episode:  313\n",
      "frames: 598000, reward: 18.600000, loss: 0.000328, epsilon: 0.010000, episode:  314\n",
      "frames: 599000, reward: 18.600000, loss: 0.000206, epsilon: 0.010000, episode:  314\n",
      "frames: 600000, reward: 18.600000, loss: 0.000650, epsilon: 0.010000, episode:  315\n",
      "frames: 601000, reward: 18.600000, loss: 0.000299, epsilon: 0.010000, episode:  315\n",
      "frames: 602000, reward: 18.200000, loss: 0.000789, epsilon: 0.010000, episode:  316\n",
      "frames: 603000, reward: 18.200000, loss: 0.000138, epsilon: 0.010000, episode:  316\n",
      "frames: 604000, reward: 17.700000, loss: 0.000262, epsilon: 0.010000, episode:  317\n",
      "frames: 605000, reward: 17.700000, loss: 0.000173, epsilon: 0.010000, episode:  317\n",
      "frames: 606000, reward: 17.400000, loss: 0.000282, epsilon: 0.010000, episode:  318\n",
      "frames: 607000, reward: 17.400000, loss: 0.000192, epsilon: 0.010000, episode:  318\n",
      "frames: 608000, reward: 17.200000, loss: 0.000322, epsilon: 0.010000, episode:  319\n",
      "frames: 609000, reward: 17.200000, loss: 0.000259, epsilon: 0.010000, episode:  319\n",
      "frames: 610000, reward: 17.200000, loss: 0.000557, epsilon: 0.010000, episode:  320\n",
      "frames: 611000, reward: 17.200000, loss: 0.002122, epsilon: 0.010000, episode:  320\n",
      "frames: 612000, reward: 17.200000, loss: 0.000258, epsilon: 0.010000, episode:  320\n",
      "frames: 613000, reward: 18.000000, loss: 0.000772, epsilon: 0.010000, episode:  321\n",
      "frames: 614000, reward: 18.400000, loss: 0.000250, epsilon: 0.010000, episode:  322\n",
      "frames: 615000, reward: 18.400000, loss: 0.000596, epsilon: 0.010000, episode:  322\n",
      "frames: 616000, reward: 18.400000, loss: 0.000438, epsilon: 0.010000, episode:  322\n",
      "frames: 617000, reward: 18.300000, loss: 0.000260, epsilon: 0.010000, episode:  323\n",
      "frames: 618000, reward: 18.300000, loss: 0.000302, epsilon: 0.010000, episode:  323\n",
      "frames: 619000, reward: 18.300000, loss: 0.000457, epsilon: 0.010000, episode:  324\n",
      "frames: 620000, reward: 18.300000, loss: 0.000171, epsilon: 0.010000, episode:  324\n",
      "frames: 621000, reward: 18.300000, loss: 0.000134, epsilon: 0.010000, episode:  324\n",
      "frames: 622000, reward: 18.100000, loss: 0.000496, epsilon: 0.010000, episode:  325\n",
      "frames: 623000, reward: 18.100000, loss: 0.000159, epsilon: 0.010000, episode:  325\n",
      "frames: 624000, reward: 18.500000, loss: 0.000295, epsilon: 0.010000, episode:  326\n",
      "frames: 625000, reward: 18.500000, loss: 0.000240, epsilon: 0.010000, episode:  326\n",
      "frames: 626000, reward: 18.700000, loss: 0.000338, epsilon: 0.010000, episode:  327\n",
      "frames: 627000, reward: 18.700000, loss: 0.000383, epsilon: 0.010000, episode:  327\n",
      "frames: 628000, reward: 18.900000, loss: 0.002514, epsilon: 0.010000, episode:  328\n",
      "frames: 629000, reward: 18.900000, loss: 0.000518, epsilon: 0.010000, episode:  328\n",
      "frames: 630000, reward: 19.100000, loss: 0.000298, epsilon: 0.010000, episode:  329\n",
      "frames: 631000, reward: 19.100000, loss: 0.000260, epsilon: 0.010000, episode:  329\n",
      "frames: 632000, reward: 19.100000, loss: 0.000352, epsilon: 0.010000, episode:  329\n",
      "frames: 633000, reward: 18.600000, loss: 0.003444, epsilon: 0.010000, episode:  330\n",
      "frames: 634000, reward: 18.600000, loss: 0.000709, epsilon: 0.010000, episode:  330\n",
      "frames: 635000, reward: 18.600000, loss: 0.000222, epsilon: 0.010000, episode:  330\n",
      "frames: 636000, reward: 18.200000, loss: 0.000296, epsilon: 0.010000, episode:  331\n",
      "frames: 637000, reward: 18.200000, loss: 0.000077, epsilon: 0.010000, episode:  331\n",
      "frames: 638000, reward: 18.200000, loss: 0.000337, epsilon: 0.010000, episode:  332\n",
      "frames: 639000, reward: 18.200000, loss: 0.000319, epsilon: 0.010000, episode:  332\n",
      "frames: 640000, reward: 18.200000, loss: 0.000135, epsilon: 0.010000, episode:  333\n",
      "frames: 641000, reward: 18.200000, loss: 0.000200, epsilon: 0.010000, episode:  333\n",
      "frames: 642000, reward: 18.200000, loss: 0.000273, epsilon: 0.010000, episode:  333\n",
      "frames: 643000, reward: 18.000000, loss: 0.000197, epsilon: 0.010000, episode:  334\n",
      "frames: 644000, reward: 18.300000, loss: 0.006308, epsilon: 0.010000, episode:  335\n",
      "frames: 645000, reward: 18.300000, loss: 0.000222, epsilon: 0.010000, episode:  335\n",
      "frames: 646000, reward: 18.500000, loss: 0.000479, epsilon: 0.010000, episode:  336\n",
      "frames: 647000, reward: 18.500000, loss: 0.000357, epsilon: 0.010000, episode:  336\n",
      "frames: 648000, reward: 18.500000, loss: 0.000228, epsilon: 0.010000, episode:  336\n",
      "frames: 649000, reward: 18.000000, loss: 0.001196, epsilon: 0.010000, episode:  337\n",
      "frames: 650000, reward: 18.000000, loss: 0.001014, epsilon: 0.010000, episode:  337\n",
      "frames: 651000, reward: 18.000000, loss: 0.000604, epsilon: 0.010000, episode:  337\n",
      "frames: 652000, reward: 17.400000, loss: 0.000695, epsilon: 0.010000, episode:  338\n",
      "frames: 653000, reward: 17.400000, loss: 0.000395, epsilon: 0.010000, episode:  338\n",
      "frames: 654000, reward: 17.200000, loss: 0.000928, epsilon: 0.010000, episode:  339\n",
      "frames: 655000, reward: 17.200000, loss: 0.000530, epsilon: 0.010000, episode:  339\n",
      "frames: 656000, reward: 17.900000, loss: 0.000329, epsilon: 0.010000, episode:  340\n",
      "frames: 657000, reward: 17.900000, loss: 0.000209, epsilon: 0.010000, episode:  340\n",
      "frames: 658000, reward: 17.900000, loss: 0.000164, epsilon: 0.010000, episode:  340\n",
      "frames: 659000, reward: 18.000000, loss: 0.002413, epsilon: 0.010000, episode:  341\n",
      "frames: 660000, reward: 18.000000, loss: 0.000383, epsilon: 0.010000, episode:  341\n",
      "frames: 661000, reward: 18.000000, loss: 0.000234, epsilon: 0.010000, episode:  342\n",
      "frames: 662000, reward: 18.000000, loss: 0.000513, epsilon: 0.010000, episode:  342\n",
      "frames: 663000, reward: 18.100000, loss: 0.000570, epsilon: 0.010000, episode:  343\n",
      "frames: 664000, reward: 18.100000, loss: 0.000296, epsilon: 0.010000, episode:  343\n",
      "frames: 665000, reward: 18.300000, loss: 0.000215, epsilon: 0.010000, episode:  344\n",
      "frames: 666000, reward: 18.300000, loss: 0.000351, epsilon: 0.010000, episode:  344\n",
      "frames: 667000, reward: 18.200000, loss: 0.000164, epsilon: 0.010000, episode:  345\n",
      "frames: 668000, reward: 18.000000, loss: 0.014242, epsilon: 0.010000, episode:  346\n",
      "frames: 669000, reward: 18.000000, loss: 0.000213, epsilon: 0.010000, episode:  346\n",
      "frames: 670000, reward: 18.700000, loss: 0.001033, epsilon: 0.010000, episode:  347\n",
      "frames: 671000, reward: 18.700000, loss: 0.000396, epsilon: 0.010000, episode:  347\n",
      "frames: 672000, reward: 19.500000, loss: 0.000187, epsilon: 0.010000, episode:  348\n",
      "frames: 673000, reward: 19.500000, loss: 0.000171, epsilon: 0.010000, episode:  348\n",
      "frames: 674000, reward: 19.700000, loss: 0.000095, epsilon: 0.010000, episode:  349\n",
      "frames: 675000, reward: 19.700000, loss: 0.000104, epsilon: 0.010000, episode:  349\n",
      "frames: 676000, reward: 19.800000, loss: 0.000136, epsilon: 0.010000, episode:  350\n",
      "frames: 677000, reward: 20.000000, loss: 0.000184, epsilon: 0.010000, episode:  351\n",
      "frames: 678000, reward: 20.000000, loss: 0.000107, epsilon: 0.010000, episode:  351\n",
      "frames: 679000, reward: 19.900000, loss: 0.000274, epsilon: 0.010000, episode:  352\n",
      "frames: 680000, reward: 19.900000, loss: 0.000417, epsilon: 0.010000, episode:  352\n",
      "frames: 681000, reward: 19.900000, loss: 0.000441, epsilon: 0.010000, episode:  353\n",
      "frames: 682000, reward: 19.900000, loss: 0.000304, epsilon: 0.010000, episode:  353\n",
      "frames: 683000, reward: 19.900000, loss: 0.000236, epsilon: 0.010000, episode:  354\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "frames: 684000, reward: 19.900000, loss: 0.000649, epsilon: 0.010000, episode:  354\n",
      "frames: 685000, reward: 19.900000, loss: 0.000123, epsilon: 0.010000, episode:  355\n",
      "frames: 686000, reward: 20.000000, loss: 0.000188, epsilon: 0.010000, episode:  356\n",
      "frames: 687000, reward: 20.000000, loss: 0.000329, epsilon: 0.010000, episode:  356\n",
      "frames: 688000, reward: 19.900000, loss: 0.000221, epsilon: 0.010000, episode:  357\n",
      "frames: 689000, reward: 19.900000, loss: 0.000209, epsilon: 0.010000, episode:  357\n",
      "frames: 690000, reward: 20.000000, loss: 0.000594, epsilon: 0.010000, episode:  358\n",
      "frames: 691000, reward: 20.000000, loss: 0.000157, epsilon: 0.010000, episode:  358\n",
      "frames: 692000, reward: 20.000000, loss: 0.000091, epsilon: 0.010000, episode:  359\n",
      "frames: 693000, reward: 20.000000, loss: 0.000083, epsilon: 0.010000, episode:  360\n",
      "frames: 694000, reward: 20.000000, loss: 0.000246, epsilon: 0.010000, episode:  360\n",
      "frames: 695000, reward: 20.000000, loss: 0.000117, epsilon: 0.010000, episode:  361\n",
      "frames: 696000, reward: 20.000000, loss: 0.000066, epsilon: 0.010000, episode:  361\n",
      "frames: 697000, reward: 20.100000, loss: 0.000212, epsilon: 0.010000, episode:  362\n",
      "frames: 698000, reward: 20.100000, loss: 0.000179, epsilon: 0.010000, episode:  362\n",
      "frames: 699000, reward: 19.800000, loss: 0.000223, epsilon: 0.010000, episode:  363\n",
      "frames: 700000, reward: 19.900000, loss: 0.000360, epsilon: 0.010000, episode:  364\n",
      "frames: 701000, reward: 19.900000, loss: 0.000171, epsilon: 0.010000, episode:  364\n",
      "frames: 702000, reward: 19.800000, loss: 0.000247, epsilon: 0.010000, episode:  365\n",
      "frames: 703000, reward: 19.800000, loss: 0.000239, epsilon: 0.010000, episode:  365\n",
      "frames: 704000, reward: 19.900000, loss: 0.000250, epsilon: 0.010000, episode:  366\n",
      "frames: 705000, reward: 19.900000, loss: 0.000528, epsilon: 0.010000, episode:  366\n",
      "frames: 706000, reward: 20.100000, loss: 0.001117, epsilon: 0.010000, episode:  367\n",
      "frames: 707000, reward: 20.100000, loss: 0.000157, epsilon: 0.010000, episode:  367\n",
      "frames: 708000, reward: 20.000000, loss: 0.000416, epsilon: 0.010000, episode:  368\n",
      "frames: 709000, reward: 20.000000, loss: 0.000315, epsilon: 0.010000, episode:  368\n",
      "frames: 710000, reward: 19.500000, loss: 0.000461, epsilon: 0.010000, episode:  369\n",
      "frames: 711000, reward: 19.500000, loss: 0.000224, epsilon: 0.010000, episode:  369\n",
      "frames: 712000, reward: 18.600000, loss: 0.000232, epsilon: 0.010000, episode:  370\n",
      "frames: 713000, reward: 18.600000, loss: 0.000189, epsilon: 0.010000, episode:  370\n",
      "frames: 714000, reward: 18.500000, loss: 0.000388, epsilon: 0.010000, episode:  371\n",
      "frames: 715000, reward: 18.500000, loss: 0.000211, epsilon: 0.010000, episode:  371\n",
      "frames: 716000, reward: 18.500000, loss: 0.001182, epsilon: 0.010000, episode:  371\n",
      "frames: 717000, reward: 17.300000, loss: 0.000474, epsilon: 0.010000, episode:  372\n",
      "frames: 718000, reward: 17.300000, loss: 0.000414, epsilon: 0.010000, episode:  372\n",
      "frames: 719000, reward: 17.300000, loss: 0.000222, epsilon: 0.010000, episode:  372\n",
      "frames: 720000, reward: 17.300000, loss: 0.000242, epsilon: 0.010000, episode:  372\n",
      "frames: 721000, reward: 16.400000, loss: 0.000243, epsilon: 0.010000, episode:  373\n",
      "frames: 722000, reward: 16.400000, loss: 0.000272, epsilon: 0.010000, episode:  374\n",
      "frames: 723000, reward: 16.400000, loss: 0.000189, epsilon: 0.010000, episode:  374\n",
      "frames: 724000, reward: 16.400000, loss: 0.000459, epsilon: 0.010000, episode:  375\n",
      "frames: 725000, reward: 16.400000, loss: 0.000456, epsilon: 0.010000, episode:  375\n",
      "frames: 726000, reward: 16.300000, loss: 0.000149, epsilon: 0.010000, episode:  376\n",
      "frames: 727000, reward: 16.300000, loss: 0.000343, epsilon: 0.010000, episode:  376\n",
      "frames: 728000, reward: 15.900000, loss: 0.000150, epsilon: 0.010000, episode:  377\n",
      "frames: 729000, reward: 15.900000, loss: 0.000314, epsilon: 0.010000, episode:  377\n",
      "frames: 730000, reward: 15.700000, loss: 0.000428, epsilon: 0.010000, episode:  378\n",
      "frames: 731000, reward: 15.700000, loss: 0.000112, epsilon: 0.010000, episode:  378\n",
      "frames: 732000, reward: 16.200000, loss: 0.000852, epsilon: 0.010000, episode:  379\n",
      "frames: 733000, reward: 17.000000, loss: 0.000162, epsilon: 0.010000, episode:  380\n",
      "frames: 734000, reward: 17.000000, loss: 0.000271, epsilon: 0.010000, episode:  380\n",
      "frames: 735000, reward: 17.100000, loss: 0.002898, epsilon: 0.010000, episode:  381\n",
      "frames: 736000, reward: 17.100000, loss: 0.000154, epsilon: 0.010000, episode:  381\n",
      "frames: 737000, reward: 18.300000, loss: 0.000301, epsilon: 0.010000, episode:  382\n",
      "frames: 738000, reward: 18.300000, loss: 0.000213, epsilon: 0.010000, episode:  382\n",
      "frames: 739000, reward: 19.100000, loss: 0.000151, epsilon: 0.010000, episode:  383\n",
      "frames: 740000, reward: 19.100000, loss: 0.000130, epsilon: 0.010000, episode:  383\n",
      "frames: 741000, reward: 19.100000, loss: 0.000105, epsilon: 0.010000, episode:  384\n",
      "frames: 742000, reward: 19.200000, loss: 0.000559, epsilon: 0.010000, episode:  385\n",
      "frames: 743000, reward: 19.200000, loss: 0.000166, epsilon: 0.010000, episode:  385\n",
      "frames: 744000, reward: 19.200000, loss: 0.000157, epsilon: 0.010000, episode:  386\n",
      "frames: 745000, reward: 19.200000, loss: 0.000089, epsilon: 0.010000, episode:  386\n",
      "frames: 746000, reward: 19.500000, loss: 0.000088, epsilon: 0.010000, episode:  387\n",
      "frames: 747000, reward: 19.800000, loss: 0.000084, epsilon: 0.010000, episode:  388\n",
      "frames: 748000, reward: 19.800000, loss: 0.000140, epsilon: 0.010000, episode:  388\n",
      "frames: 749000, reward: 19.700000, loss: 0.000104, epsilon: 0.010000, episode:  389\n",
      "frames: 750000, reward: 19.700000, loss: 0.000039, epsilon: 0.010000, episode:  389\n",
      "frames: 751000, reward: 19.700000, loss: 0.000105, epsilon: 0.010000, episode:  390\n",
      "frames: 752000, reward: 19.700000, loss: 0.000031, epsilon: 0.010000, episode:  390\n",
      "frames: 753000, reward: 19.700000, loss: 0.000043, epsilon: 0.010000, episode:  391\n",
      "frames: 754000, reward: 19.700000, loss: 0.000118, epsilon: 0.010000, episode:  391\n",
      "frames: 755000, reward: 19.500000, loss: 0.000158, epsilon: 0.010000, episode:  392\n",
      "frames: 756000, reward: 19.500000, loss: 0.000078, epsilon: 0.010000, episode:  392\n",
      "frames: 757000, reward: 18.900000, loss: 0.000135, epsilon: 0.010000, episode:  393\n",
      "frames: 758000, reward: 18.900000, loss: 0.000153, epsilon: 0.010000, episode:  394\n",
      "frames: 759000, reward: 18.900000, loss: 0.000124, epsilon: 0.010000, episode:  394\n",
      "frames: 760000, reward: 18.900000, loss: 0.000166, epsilon: 0.010000, episode:  395\n",
      "frames: 761000, reward: 18.900000, loss: 0.000296, epsilon: 0.010000, episode:  395\n",
      "frames: 762000, reward: 18.900000, loss: 0.000183, epsilon: 0.010000, episode:  396\n",
      "frames: 763000, reward: 18.900000, loss: 0.000070, epsilon: 0.010000, episode:  396\n",
      "frames: 764000, reward: 18.900000, loss: 0.000055, epsilon: 0.010000, episode:  397\n",
      "frames: 765000, reward: 18.900000, loss: 0.001856, epsilon: 0.010000, episode:  398\n",
      "frames: 766000, reward: 18.900000, loss: 0.000337, epsilon: 0.010000, episode:  398\n",
      "frames: 767000, reward: 18.900000, loss: 0.000206, epsilon: 0.010000, episode:  398\n",
      "frames: 768000, reward: 18.700000, loss: 0.000126, epsilon: 0.010000, episode:  399\n",
      "frames: 769000, reward: 18.800000, loss: 0.000067, epsilon: 0.010000, episode:  400\n",
      "frames: 770000, reward: 18.800000, loss: 0.000097, epsilon: 0.010000, episode:  400\n",
      "frames: 771000, reward: 18.700000, loss: 0.000151, epsilon: 0.010000, episode:  401\n",
      "frames: 772000, reward: 18.700000, loss: 0.000090, epsilon: 0.010000, episode:  401\n",
      "frames: 773000, reward: 18.900000, loss: 0.000149, epsilon: 0.010000, episode:  402\n",
      "frames: 774000, reward: 18.900000, loss: 0.000068, epsilon: 0.010000, episode:  402\n",
      "frames: 775000, reward: 20.000000, loss: 0.000343, epsilon: 0.010000, episode:  403\n",
      "frames: 776000, reward: 20.000000, loss: 0.000167, epsilon: 0.010000, episode:  404\n",
      "frames: 777000, reward: 20.000000, loss: 0.000075, epsilon: 0.010000, episode:  404\n",
      "frames: 778000, reward: 19.900000, loss: 0.000153, epsilon: 0.010000, episode:  405\n",
      "frames: 779000, reward: 19.900000, loss: 0.000168, epsilon: 0.010000, episode:  405\n",
      "frames: 780000, reward: 19.900000, loss: 0.000089, epsilon: 0.010000, episode:  406\n",
      "frames: 781000, reward: 19.900000, loss: 0.000083, epsilon: 0.010000, episode:  406\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "frames: 782000, reward: 19.900000, loss: 0.000066, epsilon: 0.010000, episode:  407\n",
      "frames: 783000, reward: 19.900000, loss: 0.000501, epsilon: 0.010000, episode:  407\n",
      "frames: 784000, reward: 19.700000, loss: 0.000275, epsilon: 0.010000, episode:  408\n",
      "frames: 785000, reward: 19.700000, loss: 0.000192, epsilon: 0.010000, episode:  408\n",
      "frames: 786000, reward: 19.600000, loss: 0.000138, epsilon: 0.010000, episode:  409\n",
      "frames: 787000, reward: 19.400000, loss: 0.000674, epsilon: 0.010000, episode:  410\n",
      "frames: 788000, reward: 19.400000, loss: 0.000825, epsilon: 0.010000, episode:  410\n",
      "frames: 789000, reward: 19.300000, loss: 0.000440, epsilon: 0.010000, episode:  411\n",
      "frames: 790000, reward: 19.300000, loss: 0.000366, epsilon: 0.010000, episode:  411\n",
      "frames: 791000, reward: 19.400000, loss: 0.000092, epsilon: 0.010000, episode:  412\n",
      "frames: 792000, reward: 19.400000, loss: 0.002841, epsilon: 0.010000, episode:  412\n",
      "frames: 793000, reward: 19.300000, loss: 0.000068, epsilon: 0.010000, episode:  413\n",
      "frames: 794000, reward: 19.300000, loss: 0.001372, epsilon: 0.010000, episode:  413\n",
      "frames: 795000, reward: 19.300000, loss: 0.000328, epsilon: 0.010000, episode:  414\n",
      "frames: 796000, reward: 19.300000, loss: 0.000433, epsilon: 0.010000, episode:  415\n",
      "frames: 797000, reward: 19.300000, loss: 0.000194, epsilon: 0.010000, episode:  415\n",
      "frames: 798000, reward: 19.300000, loss: 0.000396, epsilon: 0.010000, episode:  416\n",
      "frames: 799000, reward: 19.300000, loss: 0.000086, epsilon: 0.010000, episode:  416\n",
      "frames: 800000, reward: 19.400000, loss: 0.000450, epsilon: 0.010000, episode:  417\n",
      "frames: 801000, reward: 19.400000, loss: 0.000105, epsilon: 0.010000, episode:  417\n",
      "frames: 802000, reward: 19.500000, loss: 0.000081, epsilon: 0.010000, episode:  418\n",
      "frames: 803000, reward: 19.700000, loss: 0.000093, epsilon: 0.010000, episode:  419\n",
      "frames: 804000, reward: 19.700000, loss: 0.000161, epsilon: 0.010000, episode:  419\n",
      "frames: 805000, reward: 19.900000, loss: 0.000087, epsilon: 0.010000, episode:  420\n",
      "frames: 806000, reward: 19.900000, loss: 0.000211, epsilon: 0.010000, episode:  420\n",
      "frames: 807000, reward: 19.800000, loss: 0.000091, epsilon: 0.010000, episode:  421\n",
      "frames: 808000, reward: 19.800000, loss: 0.000228, epsilon: 0.010000, episode:  421\n",
      "frames: 809000, reward: 19.600000, loss: 0.000164, epsilon: 0.010000, episode:  422\n",
      "frames: 810000, reward: 19.600000, loss: 0.000108, epsilon: 0.010000, episode:  422\n",
      "frames: 811000, reward: 19.700000, loss: 0.001077, epsilon: 0.010000, episode:  423\n",
      "frames: 812000, reward: 19.600000, loss: 0.000220, epsilon: 0.010000, episode:  424\n",
      "frames: 813000, reward: 19.600000, loss: 0.000467, epsilon: 0.010000, episode:  424\n",
      "frames: 814000, reward: 19.800000, loss: 0.000246, epsilon: 0.010000, episode:  425\n",
      "frames: 815000, reward: 19.800000, loss: 0.000080, epsilon: 0.010000, episode:  425\n",
      "frames: 816000, reward: 19.900000, loss: 0.000118, epsilon: 0.010000, episode:  426\n",
      "frames: 817000, reward: 19.900000, loss: 0.000131, epsilon: 0.010000, episode:  426\n",
      "frames: 818000, reward: 19.800000, loss: 0.000052, epsilon: 0.010000, episode:  427\n",
      "frames: 819000, reward: 19.700000, loss: 0.000133, epsilon: 0.010000, episode:  428\n",
      "frames: 820000, reward: 19.700000, loss: 0.000122, epsilon: 0.010000, episode:  428\n",
      "frames: 821000, reward: 19.900000, loss: 0.000098, epsilon: 0.010000, episode:  429\n",
      "frames: 822000, reward: 19.900000, loss: 0.000071, epsilon: 0.010000, episode:  429\n",
      "frames: 823000, reward: 19.900000, loss: 0.000061, epsilon: 0.010000, episode:  430\n",
      "frames: 824000, reward: 19.900000, loss: 0.000079, epsilon: 0.010000, episode:  430\n",
      "frames: 825000, reward: 20.200000, loss: 0.000119, epsilon: 0.010000, episode:  431\n",
      "frames: 826000, reward: 20.300000, loss: 0.000566, epsilon: 0.010000, episode:  432\n",
      "frames: 827000, reward: 20.300000, loss: 0.000105, epsilon: 0.010000, episode:  432\n",
      "frames: 828000, reward: 20.200000, loss: 0.000069, epsilon: 0.010000, episode:  433\n",
      "frames: 829000, reward: 20.200000, loss: 0.000038, epsilon: 0.010000, episode:  433\n",
      "frames: 830000, reward: 20.200000, loss: 0.000174, epsilon: 0.010000, episode:  434\n",
      "frames: 831000, reward: 20.200000, loss: 0.000074, epsilon: 0.010000, episode:  434\n",
      "frames: 832000, reward: 19.900000, loss: 0.000296, epsilon: 0.010000, episode:  435\n",
      "frames: 833000, reward: 19.900000, loss: 0.000112, epsilon: 0.010000, episode:  435\n",
      "frames: 834000, reward: 19.900000, loss: 0.000069, epsilon: 0.010000, episode:  436\n",
      "frames: 835000, reward: 20.000000, loss: 0.000106, epsilon: 0.010000, episode:  437\n",
      "frames: 836000, reward: 20.000000, loss: 0.000067, epsilon: 0.010000, episode:  437\n",
      "frames: 837000, reward: 20.200000, loss: 0.000124, epsilon: 0.010000, episode:  438\n",
      "frames: 838000, reward: 20.200000, loss: 0.000138, epsilon: 0.010000, episode:  438\n",
      "frames: 839000, reward: 20.100000, loss: 0.000266, epsilon: 0.010000, episode:  439\n",
      "frames: 840000, reward: 20.100000, loss: 0.000206, epsilon: 0.010000, episode:  439\n",
      "frames: 841000, reward: 20.100000, loss: 0.000053, epsilon: 0.010000, episode:  440\n",
      "frames: 842000, reward: 20.200000, loss: 0.000062, epsilon: 0.010000, episode:  441\n",
      "frames: 843000, reward: 20.200000, loss: 0.000084, epsilon: 0.010000, episode:  441\n",
      "frames: 844000, reward: 20.200000, loss: 0.000031, epsilon: 0.010000, episode:  442\n",
      "frames: 845000, reward: 20.200000, loss: 0.000040, epsilon: 0.010000, episode:  442\n",
      "frames: 846000, reward: 20.300000, loss: 0.000064, epsilon: 0.010000, episode:  443\n",
      "frames: 847000, reward: 20.400000, loss: 0.000168, epsilon: 0.010000, episode:  444\n",
      "frames: 848000, reward: 20.400000, loss: 0.000044, epsilon: 0.010000, episode:  444\n",
      "frames: 849000, reward: 20.600000, loss: 0.000070, epsilon: 0.010000, episode:  445\n",
      "frames: 850000, reward: 20.600000, loss: 0.000049, epsilon: 0.010000, episode:  445\n",
      "frames: 851000, reward: 20.500000, loss: 0.000082, epsilon: 0.010000, episode:  446\n",
      "frames: 852000, reward: 20.500000, loss: 0.000056, epsilon: 0.010000, episode:  446\n",
      "frames: 853000, reward: 20.400000, loss: 0.000067, epsilon: 0.010000, episode:  447\n",
      "frames: 854000, reward: 20.400000, loss: 0.000197, epsilon: 0.010000, episode:  448\n",
      "frames: 855000, reward: 20.400000, loss: 0.000064, epsilon: 0.010000, episode:  448\n",
      "frames: 856000, reward: 20.300000, loss: 0.000103, epsilon: 0.010000, episode:  449\n",
      "frames: 857000, reward: 20.300000, loss: 0.000095, epsilon: 0.010000, episode:  449\n",
      "frames: 858000, reward: 19.900000, loss: 0.000270, epsilon: 0.010000, episode:  450\n",
      "frames: 859000, reward: 19.900000, loss: 0.000178, epsilon: 0.010000, episode:  450\n",
      "frames: 860000, reward: 19.800000, loss: 0.000077, epsilon: 0.010000, episode:  451\n",
      "frames: 861000, reward: 19.800000, loss: 0.000056, epsilon: 0.010000, episode:  451\n",
      "frames: 862000, reward: 19.900000, loss: 0.000044, epsilon: 0.010000, episode:  452\n",
      "frames: 863000, reward: 19.900000, loss: 0.000109, epsilon: 0.010000, episode:  453\n",
      "frames: 864000, reward: 19.900000, loss: 0.000054, epsilon: 0.010000, episode:  453\n",
      "frames: 865000, reward: 19.900000, loss: 0.000188, epsilon: 0.010000, episode:  454\n",
      "frames: 866000, reward: 19.900000, loss: 0.000211, epsilon: 0.010000, episode:  454\n",
      "frames: 867000, reward: 19.600000, loss: 0.000076, epsilon: 0.010000, episode:  455\n",
      "frames: 868000, reward: 19.600000, loss: 0.000121, epsilon: 0.010000, episode:  455\n",
      "frames: 869000, reward: 19.700000, loss: 0.000148, epsilon: 0.010000, episode:  456\n",
      "frames: 870000, reward: 19.800000, loss: 0.000225, epsilon: 0.010000, episode:  457\n",
      "frames: 871000, reward: 19.800000, loss: 0.000290, epsilon: 0.010000, episode:  457\n",
      "frames: 872000, reward: 19.800000, loss: 0.000045, epsilon: 0.010000, episode:  457\n",
      "frames: 873000, reward: 19.200000, loss: 0.000110, epsilon: 0.010000, episode:  458\n",
      "frames: 874000, reward: 19.400000, loss: 0.000357, epsilon: 0.010000, episode:  459\n",
      "frames: 875000, reward: 19.400000, loss: 0.000190, epsilon: 0.010000, episode:  459\n",
      "frames: 876000, reward: 19.800000, loss: 0.000080, epsilon: 0.010000, episode:  460\n",
      "frames: 877000, reward: 19.800000, loss: 0.000229, epsilon: 0.010000, episode:  460\n",
      "frames: 878000, reward: 19.800000, loss: 0.000218, epsilon: 0.010000, episode:  461\n",
      "frames: 879000, reward: 19.800000, loss: 0.000117, epsilon: 0.010000, episode:  461\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "frames: 880000, reward: 19.800000, loss: 0.000087, epsilon: 0.010000, episode:  462\n",
      "frames: 881000, reward: 19.800000, loss: 0.000443, epsilon: 0.010000, episode:  463\n",
      "frames: 882000, reward: 19.800000, loss: 0.000072, epsilon: 0.010000, episode:  463\n",
      "frames: 883000, reward: 19.800000, loss: 0.000065, epsilon: 0.010000, episode:  464\n",
      "frames: 884000, reward: 19.800000, loss: 0.000273, epsilon: 0.010000, episode:  464\n",
      "frames: 885000, reward: 20.100000, loss: 0.000128, epsilon: 0.010000, episode:  465\n",
      "frames: 886000, reward: 20.100000, loss: 0.000177, epsilon: 0.010000, episode:  465\n",
      "frames: 887000, reward: 20.000000, loss: 0.004280, epsilon: 0.010000, episode:  466\n",
      "frames: 888000, reward: 20.000000, loss: 0.000093, epsilon: 0.010000, episode:  467\n",
      "frames: 889000, reward: 20.000000, loss: 0.000094, epsilon: 0.010000, episode:  467\n",
      "frames: 890000, reward: 20.500000, loss: 0.000124, epsilon: 0.010000, episode:  468\n",
      "frames: 891000, reward: 20.500000, loss: 0.000022, epsilon: 0.010000, episode:  468\n",
      "frames: 892000, reward: 20.500000, loss: 0.000131, epsilon: 0.010000, episode:  469\n",
      "frames: 893000, reward: 20.500000, loss: 0.000039, epsilon: 0.010000, episode:  469\n",
      "frames: 894000, reward: 20.400000, loss: 0.000088, epsilon: 0.010000, episode:  470\n",
      "frames: 895000, reward: 20.400000, loss: 0.000256, epsilon: 0.010000, episode:  470\n",
      "frames: 896000, reward: 20.000000, loss: 0.000101, epsilon: 0.010000, episode:  471\n",
      "frames: 897000, reward: 20.000000, loss: 0.000147, epsilon: 0.010000, episode:  471\n",
      "frames: 898000, reward: 20.000000, loss: 0.000171, epsilon: 0.010000, episode:  472\n",
      "frames: 899000, reward: 19.800000, loss: 0.000194, epsilon: 0.010000, episode:  473\n",
      "frames: 900000, reward: 19.800000, loss: 0.000109, epsilon: 0.010000, episode:  473\n",
      "frames: 901000, reward: 19.800000, loss: 0.000149, epsilon: 0.010000, episode:  474\n",
      "frames: 902000, reward: 19.800000, loss: 0.000097, epsilon: 0.010000, episode:  474\n",
      "frames: 903000, reward: 19.900000, loss: 0.000146, epsilon: 0.010000, episode:  475\n",
      "frames: 904000, reward: 20.000000, loss: 0.000283, epsilon: 0.010000, episode:  476\n",
      "frames: 905000, reward: 20.000000, loss: 0.000123, epsilon: 0.010000, episode:  476\n",
      "frames: 906000, reward: 19.800000, loss: 0.000103, epsilon: 0.010000, episode:  477\n",
      "frames: 907000, reward: 19.800000, loss: 0.000903, epsilon: 0.010000, episode:  477\n",
      "frames: 908000, reward: 19.700000, loss: 0.000099, epsilon: 0.010000, episode:  478\n",
      "frames: 909000, reward: 19.700000, loss: 0.000686, epsilon: 0.010000, episode:  478\n",
      "frames: 910000, reward: 19.700000, loss: 0.000202, epsilon: 0.010000, episode:  479\n",
      "frames: 911000, reward: 19.700000, loss: 0.000084, epsilon: 0.010000, episode:  479\n",
      "frames: 912000, reward: 19.800000, loss: 0.000142, epsilon: 0.010000, episode:  480\n",
      "frames: 913000, reward: 20.300000, loss: 0.000071, epsilon: 0.010000, episode:  481\n",
      "frames: 914000, reward: 20.300000, loss: 0.000233, epsilon: 0.010000, episode:  481\n",
      "frames: 915000, reward: 20.200000, loss: 0.000064, epsilon: 0.010000, episode:  482\n",
      "frames: 916000, reward: 20.200000, loss: 0.000055, epsilon: 0.010000, episode:  482\n",
      "frames: 917000, reward: 20.400000, loss: 0.000128, epsilon: 0.010000, episode:  483\n",
      "frames: 918000, reward: 20.400000, loss: 0.000190, epsilon: 0.010000, episode:  483\n",
      "frames: 919000, reward: 20.000000, loss: 0.000155, epsilon: 0.010000, episode:  484\n",
      "frames: 920000, reward: 20.000000, loss: 0.000467, epsilon: 0.010000, episode:  484\n",
      "frames: 921000, reward: 20.000000, loss: 0.000114, epsilon: 0.010000, episode:  485\n",
      "frames: 922000, reward: 19.900000, loss: 0.000534, epsilon: 0.010000, episode:  486\n",
      "frames: 923000, reward: 19.900000, loss: 0.000217, epsilon: 0.010000, episode:  486\n",
      "frames: 924000, reward: 20.100000, loss: 0.000118, epsilon: 0.010000, episode:  487\n",
      "frames: 925000, reward: 20.100000, loss: 0.000083, epsilon: 0.010000, episode:  487\n",
      "frames: 926000, reward: 20.300000, loss: 0.000099, epsilon: 0.010000, episode:  488\n",
      "frames: 927000, reward: 20.300000, loss: 0.000176, epsilon: 0.010000, episode:  488\n",
      "frames: 928000, reward: 20.200000, loss: 0.000057, epsilon: 0.010000, episode:  489\n",
      "frames: 929000, reward: 20.200000, loss: 0.000074, epsilon: 0.010000, episode:  490\n",
      "frames: 930000, reward: 20.200000, loss: 0.000053, epsilon: 0.010000, episode:  490\n",
      "frames: 931000, reward: 20.200000, loss: 0.000365, epsilon: 0.010000, episode:  491\n",
      "frames: 932000, reward: 20.200000, loss: 0.000215, epsilon: 0.010000, episode:  491\n",
      "frames: 933000, reward: 19.900000, loss: 0.000143, epsilon: 0.010000, episode:  492\n",
      "frames: 934000, reward: 19.900000, loss: 0.000124, epsilon: 0.010000, episode:  492\n",
      "frames: 935000, reward: 19.900000, loss: 0.000112, epsilon: 0.010000, episode:  492\n",
      "frames: 936000, reward: 19.000000, loss: 0.000935, epsilon: 0.010000, episode:  493\n",
      "frames: 937000, reward: 19.000000, loss: 0.000490, epsilon: 0.010000, episode:  493\n",
      "frames: 938000, reward: 19.400000, loss: 0.000211, epsilon: 0.010000, episode:  494\n",
      "frames: 939000, reward: 19.400000, loss: 0.000100, epsilon: 0.010000, episode:  495\n",
      "frames: 940000, reward: 19.400000, loss: 0.000308, epsilon: 0.010000, episode:  495\n",
      "frames: 941000, reward: 19.400000, loss: 0.000235, epsilon: 0.010000, episode:  495\n",
      "frames: 942000, reward: 19.400000, loss: 0.000158, epsilon: 0.010000, episode:  495\n",
      "frames: 943000, reward: 19.400000, loss: 0.000186, epsilon: 0.010000, episode:  495\n",
      "frames: 944000, reward: 18.200000, loss: 0.000077, epsilon: 0.010000, episode:  496\n",
      "frames: 945000, reward: 18.200000, loss: 0.000196, epsilon: 0.010000, episode:  497\n",
      "frames: 946000, reward: 18.200000, loss: 0.000172, epsilon: 0.010000, episode:  497\n",
      "frames: 947000, reward: 18.100000, loss: 0.000112, epsilon: 0.010000, episode:  498\n",
      "frames: 948000, reward: 18.100000, loss: 0.000067, epsilon: 0.010000, episode:  498\n",
      "frames: 949000, reward: 18.200000, loss: 0.000094, epsilon: 0.010000, episode:  499\n",
      "frames: 950000, reward: 18.200000, loss: 0.000101, epsilon: 0.010000, episode:  499\n",
      "frames: 951000, reward: 18.200000, loss: 0.000040, epsilon: 0.010000, episode:  500\n",
      "frames: 952000, reward: 18.200000, loss: 0.000095, epsilon: 0.010000, episode:  501\n",
      "frames: 953000, reward: 18.200000, loss: 0.000043, epsilon: 0.010000, episode:  501\n",
      "frames: 954000, reward: 18.400000, loss: 0.000093, epsilon: 0.010000, episode:  502\n",
      "frames: 955000, reward: 18.400000, loss: 0.000036, epsilon: 0.010000, episode:  502\n",
      "frames: 956000, reward: 19.300000, loss: 0.000069, epsilon: 0.010000, episode:  503\n",
      "frames: 957000, reward: 19.400000, loss: 0.000080, epsilon: 0.010000, episode:  504\n",
      "frames: 958000, reward: 19.400000, loss: 0.000054, epsilon: 0.010000, episode:  504\n",
      "frames: 959000, reward: 19.300000, loss: 0.000106, epsilon: 0.010000, episode:  505\n",
      "frames: 960000, reward: 19.300000, loss: 0.000105, epsilon: 0.010000, episode:  505\n",
      "frames: 961000, reward: 20.400000, loss: 0.000123, epsilon: 0.010000, episode:  506\n",
      "frames: 962000, reward: 20.400000, loss: 0.000209, epsilon: 0.010000, episode:  506\n",
      "frames: 963000, reward: 20.400000, loss: 0.000080, epsilon: 0.010000, episode:  507\n",
      "frames: 964000, reward: 20.400000, loss: 0.000178, epsilon: 0.010000, episode:  507\n",
      "frames: 965000, reward: 20.400000, loss: 0.000117, epsilon: 0.010000, episode:  508\n",
      "frames: 966000, reward: 20.400000, loss: 0.000096, epsilon: 0.010000, episode:  509\n",
      "frames: 967000, reward: 20.400000, loss: 0.000208, epsilon: 0.010000, episode:  509\n",
      "frames: 968000, reward: 20.300000, loss: 0.000067, epsilon: 0.010000, episode:  510\n",
      "frames: 969000, reward: 20.300000, loss: 0.000064, epsilon: 0.010000, episode:  510\n",
      "frames: 970000, reward: 20.300000, loss: 0.000043, epsilon: 0.010000, episode:  511\n",
      "frames: 971000, reward: 20.300000, loss: 0.000047, epsilon: 0.010000, episode:  511\n",
      "frames: 972000, reward: 20.400000, loss: 0.000093, epsilon: 0.010000, episode:  512\n",
      "frames: 973000, reward: 20.400000, loss: 0.000064, epsilon: 0.010000, episode:  513\n",
      "frames: 974000, reward: 20.400000, loss: 0.000060, epsilon: 0.010000, episode:  513\n",
      "frames: 975000, reward: 20.400000, loss: 0.000042, epsilon: 0.010000, episode:  514\n",
      "frames: 976000, reward: 20.400000, loss: 0.000061, epsilon: 0.010000, episode:  514\n",
      "frames: 977000, reward: 20.500000, loss: 0.000036, epsilon: 0.010000, episode:  515\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "frames: 978000, reward: 20.600000, loss: 0.000085, epsilon: 0.010000, episode:  516\n",
      "frames: 979000, reward: 20.600000, loss: 0.000040, epsilon: 0.010000, episode:  516\n",
      "frames: 980000, reward: 20.600000, loss: 0.000062, epsilon: 0.010000, episode:  517\n",
      "frames: 981000, reward: 20.600000, loss: 0.000027, epsilon: 0.010000, episode:  517\n",
      "frames: 982000, reward: 20.700000, loss: 0.000034, epsilon: 0.010000, episode:  518\n",
      "frames: 983000, reward: 20.700000, loss: 0.000086, epsilon: 0.010000, episode:  519\n",
      "frames: 984000, reward: 20.700000, loss: 0.000049, epsilon: 0.010000, episode:  519\n",
      "frames: 985000, reward: 20.700000, loss: 0.000078, epsilon: 0.010000, episode:  520\n",
      "frames: 986000, reward: 20.700000, loss: 0.000146, epsilon: 0.010000, episode:  520\n",
      "frames: 987000, reward: 20.400000, loss: 0.000574, epsilon: 0.010000, episode:  521\n",
      "frames: 988000, reward: 20.400000, loss: 0.000136, epsilon: 0.010000, episode:  521\n",
      "frames: 989000, reward: 20.500000, loss: 0.000148, epsilon: 0.010000, episode:  522\n",
      "frames: 990000, reward: 20.500000, loss: 0.000038, epsilon: 0.010000, episode:  523\n",
      "frames: 991000, reward: 20.500000, loss: 0.000060, epsilon: 0.010000, episode:  523\n",
      "frames: 992000, reward: 20.300000, loss: 0.000126, epsilon: 0.010000, episode:  524\n",
      "frames: 993000, reward: 20.300000, loss: 0.000125, epsilon: 0.010000, episode:  524\n",
      "frames: 994000, reward: 20.300000, loss: 0.000120, epsilon: 0.010000, episode:  525\n",
      "frames: 995000, reward: 20.300000, loss: 0.000204, epsilon: 0.010000, episode:  526\n",
      "frames: 996000, reward: 20.300000, loss: 0.000045, epsilon: 0.010000, episode:  526\n",
      "frames: 997000, reward: 20.300000, loss: 0.000088, epsilon: 0.010000, episode:  527\n",
      "frames: 998000, reward: 20.300000, loss: 0.000099, epsilon: 0.010000, episode:  527\n",
      "frames: 999000, reward: 20.100000, loss: 0.000026, epsilon: 0.010000, episode:  528\n"
     ]
    }
   ],
   "source": [
    "# if __name__ == '__main__':\n",
    "    \n",
    "# Training DQN in PongNoFrameskip-v4 \n",
    "env = make_atari('PongNoFrameskip-v4')\n",
    "env = wrap_deepmind(env, scale = False, frame_stack=True)\n",
    "\n",
    "gamma = 0.99\n",
    "epsilon_max = 1\n",
    "epsilon_min = 0.01\n",
    "eps_decay = 30000\n",
    "frames = 1000000\n",
    "USE_CUDA = True\n",
    "learning_rate = 2e-4\n",
    "max_buff = 100000\n",
    "update_tar_interval = 1000\n",
    "batch_size = 32\n",
    "print_interval = 1000\n",
    "log_interval = 1000\n",
    "learning_start = 10000\n",
    "win_reward = 18     # Pong-v4\n",
    "win_break = True\n",
    "\n",
    "frame = env.reset()\n",
    "action_space = env.action_space\n",
    "action_dim = env.action_space.n\n",
    "state_dim = env.observation_space.shape[0]\n",
    "state_channel = env.observation_space.shape[2]\n",
    "input_shape = frame._force().transpose(2,0,1).shape\n",
    "agent = Dueling_DQNAgent(input_shape = input_shape, action_space= action_space, USE_CUDA = USE_CUDA, lr = learning_rate)\n",
    "\n",
    "episode_reward = 0\n",
    "all_rewards = []\n",
    "losses = []\n",
    "episode_num = 0\n",
    "is_win = False\n",
    "# tensorboard\n",
    "summary_writer = SummaryWriter(log_dir = \"Dueling_DQN\", comment= \"good_makeatari\")\n",
    "\n",
    "# e-greedy decay\n",
    "epsilon_by_frame = lambda frame_idx: epsilon_min + (epsilon_max - epsilon_min) * math.exp(\n",
    "            -1. * frame_idx / eps_decay)\n",
    "# plt.plot([epsilon_by_frame(i) for i in range(10000)])\n",
    "\n",
    "for i in range(frames):\n",
    "    epsilon = epsilon_by_frame(i)\n",
    "    state_tensor = agent.observe(frame)\n",
    "    action = agent.act(state_tensor, epsilon)\n",
    "    \n",
    "    next_frame, reward, done, _ = env.step(action)\n",
    "    \n",
    "    episode_reward += reward\n",
    "    agent.memory_buffer.push(frame, action, reward, next_frame, done)\n",
    "    frame = next_frame\n",
    "    \n",
    "    loss = 0\n",
    "    if agent.memory_buffer.size() >= learning_start:\n",
    "        loss = agent.learn_from_experience(batch_size)\n",
    "        losses.append(loss)\n",
    "\n",
    "    if i % print_interval == 0:\n",
    "        print(\"frames: %5d, reward: %5f, loss: %4f, epsilon: %5f, episode: %4d\" % (i, np.mean(all_rewards[-10:]), loss, epsilon, episode_num))\n",
    "        summary_writer.add_scalar(\"Temporal Difference Loss\", loss, i)\n",
    "        summary_writer.add_scalar(\"Mean Reward\", np.mean(all_rewards[-10:]), i)\n",
    "        summary_writer.add_scalar(\"Epsilon\", epsilon, i)\n",
    "        \n",
    "    if i % update_tar_interval == 0:\n",
    "        agent.Dueling_DQN_target.load_state_dict(agent.Dueling_DQN.state_dict())\n",
    "    \n",
    "    if done:\n",
    "        \n",
    "        frame = env.reset()\n",
    "        \n",
    "        all_rewards.append(episode_reward)\n",
    "        episode_reward = 0\n",
    "        episode_num += 1\n",
    "        avg_reward = float(np.mean(all_rewards[-100:]))\n",
    "\n",
    "summary_writer.close()\n",
    "torch.save(agent.Dueling_DQN.state_dict(), \"trained model/Dueling_DQN_dict.pth.tar\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def moving_average(a, n=3) :\n",
    "    ret = np.cumsum(a, dtype=float)\n",
    "    ret[n:] = ret[n:] - ret[:-n]\n",
    "    return ret[n - 1:] / n\n",
    "\n",
    "def plot_training(frame_idx, rewards, losses):\n",
    "    clear_output(True)\n",
    "    plt.figure(figsize=(20,5))\n",
    "    plt.subplot(131)\n",
    "    plt.title('frame %s. reward: %s' % (frame_idx, np.mean(rewards[-100:])))\n",
    "    plt.plot(moving_average(rewards,20))\n",
    "    plt.subplot(132)\n",
    "    plt.title('loss, average on 100 stpes')\n",
    "    plt.plot(moving_average(losses, 100),linewidth=0.2)\n",
    "    plt.show()\n",
    "\n",
    "plot_training(i, all_rewards, losses)"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
