{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Atari 游戏 PongDeterministic-v4"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "2019-01-01 07:13:21,998 [DEBUG] Loaded backend module://ipykernel.pylab.backend_inline version unknown.\n"
     ]
    }
   ],
   "source": [
    "%matplotlib inline\n",
    "import os\n",
    "import sys\n",
    "import time\n",
    "import itertools\n",
    "import logging\n",
    "\n",
    "import numpy as np\n",
    "np.random.seed(0)\n",
    "import pandas as pd\n",
    "import gym\n",
    "import tensorflow as tf\n",
    "from tensorflow import keras\n",
    "from PIL import Image\n",
    "import matplotlib.pyplot as plt\n",
    "\n",
    "logging.basicConfig(stream=sys.stdout, level=logging.DEBUG,\n",
    "        format='%(asctime)s [%(levelname)s] %(message)s')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "观测空间 = Box(210, 160, 3)\n",
      "动作空间 = Discrete(6)\n",
      "回合最大步数 = 100000\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "[0, 592379725]"
      ]
     },
     "execution_count": 2,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# env_spec_id = 'BreakoutDeterministic-v4'\n",
    "env_spec_id = 'PongDeterministic-v4'\n",
    "# env_spec_id = 'SeaquestDeterministic-v4'\n",
    "# env_spec_id = 'SpaceInvadersDeterministic-v4'\n",
    "# env_spec_id = 'BeamRiderDeterministic-v4'\n",
    "env = gym.make(env_spec_id)\n",
    "print('观测空间 = {}'.format(env.observation_space))\n",
    "print('动作空间 = {}'.format(env.action_space))\n",
    "print('回合最大步数 = {}'.format(env._max_episode_steps))\n",
    "env.seed(0)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 深度 Q 网络智能体\n",
    "经验回放"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "class DQNReplayer:\n",
    "    def __init__(self, capacity):\n",
    "        self.memory = pd.DataFrame(index=range(capacity),\n",
    "                columns=['observation', 'action', 'reward',\n",
    "                'next_observation', 'done'])\n",
    "        self.i = 0\n",
    "        self.count = 0\n",
    "        self.capacity = capacity\n",
    "    \n",
    "    def store(self, *args):\n",
    "        self.memory.loc[self.i] = args\n",
    "        self.i = (self.i + 1) % self.capacity\n",
    "        self.count = min(self.count + 1, self.capacity)\n",
    "        \n",
    "    def sample(self, size):\n",
    "        indices = np.random.choice(self.count, size=size)\n",
    "        return tuple(np.stack(self.memory.loc[indices, field]) for \\\n",
    "                field in self.memory.columns)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "智能体"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "class DQNAgent:\n",
    "    def __init__(self, env, input_shape, learning_rate=0.00025,\n",
    "            load_path=None, gamma=0.99,\n",
    "            replay_memory_size=1000000, batch_size=32,\n",
    "            replay_start_size=0,\n",
    "            epsilon=1., epsilon_decrease_rate=9e-7, min_epsilon=0.1,\n",
    "            random_initial_steps=0,\n",
    "            clip_reward=True, rescale_state=True,\n",
    "            update_freq=1, target_network_update_freq=1):\n",
    "        \n",
    "        self.action_n = env.action_space.n\n",
    "        self.gamma = gamma\n",
    "        \n",
    "        # 经验回放参数\n",
    "        self.replay_memory_size = replay_memory_size\n",
    "        self.replay_start_size = replay_start_size\n",
    "        self.batch_size = batch_size\n",
    "        self.replayer = DQNReplayer(replay_memory_size)\n",
    "        \n",
    "        # 图像输入参数\n",
    "        self.img_shape = (input_shape[-1], input_shape[-2])\n",
    "        self.img_stack = input_shape[-3]\n",
    "        \n",
    "        # 探索参数\n",
    "        self.epsilon = epsilon\n",
    "        self.epsilon_decrease_rate = epsilon_decrease_rate\n",
    "        self.min_epsilon = min_epsilon\n",
    "        self.random_initial_steps = random_initial_steps\n",
    "        \n",
    "        self.clip_reward = clip_reward\n",
    "        self.rescale_state = rescale_state\n",
    "        \n",
    "        self.update_freq = update_freq\n",
    "        self.target_network_update_freq = target_network_update_freq\n",
    "        \n",
    "        # 评估网络\n",
    "        self.evaluate_net = self.build_network(\n",
    "                input_shape=input_shape, output_size=self.action_n,\n",
    "                conv_activation=tf.nn.relu,\n",
    "                fc_hidden_sizes=[512,], fc_activation=tf.nn.relu,\n",
    "                learning_rate=learning_rate, load_path=load_path)\n",
    "        self.evaluate_net.summary() # 输出网络结构\n",
    "        # 目标网络\n",
    "        self.target_net = self.build_network(\n",
    "                input_shape=input_shape, output_size=self.action_n,\n",
    "                conv_activation=tf.nn.relu,\n",
    "                fc_hidden_sizes=[512,], fc_activation=tf.nn.relu,\n",
    "                )\n",
    "        self.update_target_network()\n",
    "        \n",
    "        # 初始化计数值\n",
    "        self.step = 0\n",
    "        self.fit_count = 0\n",
    "\n",
    "\n",
    "    def build_network(self, input_shape, output_size, conv_activation,\n",
    "            fc_hidden_sizes, fc_activation, output_activation=None,\n",
    "            learning_rate=0.001, load_path=None):\n",
    "        # 网络输入格式: (样本, 通道, 行, 列)\n",
    "        model = keras.models.Sequential()\n",
    "        # tf 要求从 (样本, 通道, 行, 列) 改为 (样本, 行, 列, 通道)\n",
    "        model.add(keras.layers.Permute((2, 3, 1), input_shape=input_shape))\n",
    "        \n",
    "        # 卷积层\n",
    "        model.add(keras.layers.Conv2D(32, 8, strides=4,\n",
    "                activation=conv_activation))\n",
    "        model.add(keras.layers.Conv2D(64, 4, strides=2,\n",
    "                activation=conv_activation))\n",
    "        model.add(keras.layers.Conv2D(64, 3, strides=1,\n",
    "                activation=conv_activation))\n",
    "        \n",
    "        model.add(keras.layers.Flatten())\n",
    "        \n",
    "        # 全连接层\n",
    "        for hidden_size in fc_hidden_sizes:\n",
    "            model.add(keras.layers.Dense(hidden_size,\n",
    "                    activation=fc_activation))\n",
    "        model.add(keras.layers.Dense(output_size,\n",
    "                activation=output_activation))\n",
    "\n",
    "        if load_path is not None:\n",
    "            logging.info('载入网络权重 {}.'.format(load_path))\n",
    "            model.load_weights(load_path)\n",
    "\n",
    "        try: # tf2\n",
    "            optimizer = keras.optimizers.RMSprop(learning_rate, 0.95,\n",
    "                    momentum=0.95, epsilon=0.01)\n",
    "        except: # tf1\n",
    "            optimizer = tf.train.RMSPropOptimizer(learning_rate, 0.95,\n",
    "                    momentum=0.95, epsilon=0.01)\n",
    "        model.compile(loss=keras.losses.mse, optimizer=optimizer)\n",
    "        return model\n",
    "        \n",
    "    def get_next_state(self, state=None, observation=None):\n",
    "        img = Image.fromarray(observation, 'RGB') \n",
    "        img = img.resize(self.img_shape).convert('L') # 改大小,变灰度\n",
    "        img = np.asarray(img.getdata(), dtype=np.uint8\n",
    "                ).reshape(img.size[1], img.size[0]) # 转成 np.array\n",
    "        \n",
    "        # 堆叠图像\n",
    "        if state is None:\n",
    "            next_state = np.array([img,] * self.img_stack) # 初始化\n",
    "        else:\n",
    "            next_state = np.append(state[1:], [img,], axis=0)\n",
    "        return next_state\n",
    "    \n",
    "    def decide(self, state, test=False, step=None):\n",
    "        if step is not None and step < self.random_initial_steps:\n",
    "            epsilon = 1.\n",
    "        elif test:\n",
    "            epsilon = 0.05\n",
    "        else:\n",
    "            epsilon = self.epsilon\n",
    "        if np.random.rand() < epsilon:\n",
    "            action = np.random.choice(self.action_n)\n",
    "        else:\n",
    "            if self.rescale_state:\n",
    "                state = state / 128. - 1.\n",
    "            q_values = self.evaluate_net.predict(state[np.newaxis])[0]\n",
    "            action = np.argmax(q_values)\n",
    "        return action\n",
    "\n",
    "    def learn(self, state, action, reward, next_state, done):\n",
    "        self.replayer.store(state, action, reward, next_state, done)\n",
    "\n",
    "        self.step += 1\n",
    "        \n",
    "        if self.step % self.update_freq == 0 and \\\n",
    "                self.replayer.count >= self.replay_start_size:\n",
    "            states, actions, rewards, next_states, dones = \\\n",
    "                    self.replayer.sample(self.batch_size) # 回放\n",
    "\n",
    "            if self.rescale_state:\n",
    "                states = states / 128. - 1.\n",
    "                next_states = next_states / 128. - 1.\n",
    "            if self.clip_reward:\n",
    "                rewards = np.clip(rewards, -1., 1.)\n",
    "            \n",
    "            next_qs = self.target_net.predict(next_states)\n",
    "            next_max_qs = next_qs.max(axis=-1)\n",
    "            targets = self.evaluate_net.predict(states)\n",
    "            targets[range(self.batch_size), actions] = rewards + \\\n",
    "                    self.gamma * next_max_qs * (1. - dones)\n",
    "\n",
    "            h = self.evaluate_net.fit(states, targets, verbose=0)\n",
    "            self.fit_count += 1\n",
    "            \n",
    "            if self.fit_count % 100 == 0:\n",
    "                logging.info('训练 {}, 回合 {}, 存储大小 {}, 损失 {}' \\\n",
    "                        .format(self.fit_count, self.epsilon,\n",
    "                        self.replayer.count, h.history['loss'][0]))\n",
    "            \n",
    "            if self.fit_count % self.target_network_update_freq == 0:\n",
    "                self.update_target_network()\n",
    "        \n",
    "        # 更新 epsilon 的值：线性下降\n",
    "        if self.step >= self.replay_start_size:\n",
    "            self.epsilon = max(self.epsilon - self.epsilon_decrease_rate,\n",
    "                               self.min_epsilon)\n",
    "\n",
    "    def update_target_network(self): # 更新目标网络\n",
    "        self.target_net.set_weights(self.evaluate_net.get_weights())\n",
    "        logging.info('目标网络已更新')\n",
    "\n",
    "    def save_network(self, path): # 保存网络\n",
    "        dirname = os.path.dirname(save_path)\n",
    "        if not os.path.exists(dirname):\n",
    "            os.makedirs(dirname)\n",
    "            logging.info('创建文件夹 {}'.format(dirname))\n",
    "        self.evaluate_net.save_weights(path)\n",
    "        logging.info('网络权重已保存 {}'.format(path))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "测试"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "def test(env, agent, episodes=100, render=False, verbose=True):\n",
    "    steps, episode_rewards = [], []\n",
    "    for episode in range(episodes):\n",
    "        episode_reward = 0\n",
    "        observation = env.reset()\n",
    "        state = agent.get_next_state(None, observation)\n",
    "        for step in itertools.count():\n",
    "            if render:\n",
    "                env.render()\n",
    "            action = agent.decide(state, test=True, step=step)\n",
    "            observation, reward, done, info = env.step(action)\n",
    "            state = agent.get_next_state(state, observation)\n",
    "            episode_reward += reward\n",
    "            if done:\n",
    "                break\n",
    "        step += 1\n",
    "        steps.append(step)\n",
    "        episode_rewards.append(episode_reward)\n",
    "        logging.info('[测试] 回合 {}: 步骤 {}, 奖励 {}, 步数 {}'\n",
    "                .format(episode, step, episode_reward, np.sum(steps)))\n",
    "            \n",
    "    if verbose:\n",
    "        logging.info('[测试小结] 步数: 平均 = {}, 最小 = {}, 最大 = {}.' \\\n",
    "                .format(np.mean(steps), np.min(steps), np.max(steps)))\n",
    "        logging.info('[测试小结] 奖励: 平均 = {}, 最小 = {}, 最大 = {}' \\\n",
    "                .format(np.mean(episode_rewards), np.min(episode_rewards),\n",
    "                np.max(episode_rewards)))\n",
    "    return episode_rewards"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "参数设置"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "render = False\n",
    "load_path = None\n",
    "save_path = './output/' + env.unwrapped.spec.id + '-' + \\\n",
    "        time.strftime('%Y%m%d-%H%M%S') + '/model.h5'\n",
    "\n",
    "\"\"\"\n",
    "Nature 文章使用的参数, 运行极慢, 请勿轻易尝试\n",
    "\"\"\"\n",
    "input_shape = (4, 110, 84) # 输入网络大小\n",
    "batch_size = 32\n",
    "replay_memory_size = 1000000\n",
    "target_network_update_freq = 10000\n",
    "gamma = 0.99\n",
    "update_freq = 4 # 训练网络的间隔\n",
    "learning_rate = 0.00025 # 优化器学习率\n",
    "epsilon = 1. # 初始探索率\n",
    "min_epsilon = 0.1 # 最终探索率\n",
    "epsilon_decrease = 9e-7 # 探索减小速度\n",
    "replay_start_size = 50000 # 开始训练前的经验数\n",
    "random_initial_steps = 30 # 每个回合开始时随机步数\n",
    "frames = 50000000 # 整个算法的总训练步数\n",
    "test_freq = 50000 # 验证智能体的步数间隔\n",
    "test_episodes = 100 # 每次验证智能体的回合数\n",
    "\n",
    "\n",
    "\"\"\"\n",
    "小规模参数, 运行时间数小时, 有一点点训练效果\n",
    "\"\"\"\n",
    "batch_size = 32\n",
    "replay_memory_size = 50000\n",
    "target_network_update_freq = 4000\n",
    "replay_start_size = 10000\n",
    "random_initial_steps = 30\n",
    "frames = 100000\n",
    "test_freq = 25000\n",
    "test_episodes = 50\n",
    "\n",
    "\n",
    "# \"\"\"\n",
    "# 超小规模参数, 数分钟即可运行完, 基本没有训练效果\n",
    "# \"\"\"\n",
    "# batch_size = 6\n",
    "# replay_memory_size = 5000\n",
    "# target_network_update_freq = 80\n",
    "# replay_start_size = 500\n",
    "# random_initial_steps = 30\n",
    "# frames = 7500\n",
    "# test_freq = 2500\n",
    "# test_episodes = 10"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "训练"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "_________________________________________________________________\n",
      "Layer (type)                 Output Shape              Param #   \n",
      "=================================================================\n",
      "permute (Permute)            (None, 110, 84, 4)        0         \n",
      "_________________________________________________________________\n",
      "conv2d (Conv2D)              (None, 26, 20, 32)        8224      \n",
      "_________________________________________________________________\n",
      "conv2d_1 (Conv2D)            (None, 12, 9, 64)         32832     \n",
      "_________________________________________________________________\n",
      "conv2d_2 (Conv2D)            (None, 10, 7, 64)         36928     \n",
      "_________________________________________________________________\n",
      "flatten (Flatten)            (None, 4480)              0         \n",
      "_________________________________________________________________\n",
      "dense (Dense)                (None, 512)               2294272   \n",
      "_________________________________________________________________\n",
      "dense_1 (Dense)              (None, 6)                 3078      \n",
      "=================================================================\n",
      "Total params: 2,375,334\n",
      "Trainable params: 2,375,334\n",
      "Non-trainable params: 0\n",
      "_________________________________________________________________\n",
      "2019-01-01 07:13:23,275 [INFO] 目标网络已更新\n",
      "2019-01-01 07:13:23,275 [INFO] 训练开始\n",
      "2019-01-01 07:13:47,448 [INFO] 训练 100, 回合 0.9996409000000118, 存储大小 400, 损失 6.952669355086982e-05\n",
      "2019-01-01 07:14:12,616 [INFO] 训练 200, 回合 0.9992809000000237, 存储大小 800, 损失 0.0050011612474918365\n",
      "2019-01-01 07:14:28,185 [INFO] 回合 0, 步数 1011, 奖励 -20.0, 总步数 1012\n",
      "2019-01-01 07:14:43,178 [INFO] 训练 300, 回合 0.9989209000000355, 存储大小 1200, 损失 0.005295116454362869\n",
      "2019-01-01 07:15:11,761 [INFO] 训练 400, 回合 0.9985609000000474, 存储大小 1600, 损失 4.329016519477591e-05\n",
      "2019-01-01 07:15:35,044 [INFO] 回合 1, 步数 912, 奖励 -21.0, 总步数 1925\n",
      "2019-01-01 07:15:40,687 [INFO] 训练 500, 回合 0.9982009000000592, 存储大小 2000, 损失 0.0049638813361525536\n",
      "2019-01-01 07:16:09,888 [INFO] 训练 600, 回合 0.9978409000000711, 存储大小 2400, 损失 0.00497302133589983\n",
      "2019-01-01 07:16:33,371 [INFO] 回合 2, 步数 793, 奖励 -21.0, 总步数 2719\n",
      "2019-01-01 07:16:39,475 [INFO] 训练 700, 回合 0.9974809000000829, 存储大小 2800, 损失 8.625220652902499e-05\n",
      "2019-01-01 07:17:08,119 [INFO] 训练 800, 回合 0.9971209000000948, 存储大小 3200, 损失 0.00011839660874102265\n",
      "2019-01-01 07:17:36,960 [INFO] 训练 900, 回合 0.9967609000001066, 存储大小 3600, 损失 0.010098924860358238\n",
      "2019-01-01 07:17:38,929 [INFO] 回合 3, 步数 914, 奖励 -21.0, 总步数 3634\n",
      "2019-01-01 07:18:05,998 [INFO] 训练 1000, 回合 0.9964009000001185, 存储大小 4000, 损失 0.0001408264070050791\n",
      "2019-01-01 07:18:35,217 [INFO] 训练 1100, 回合 0.9960409000001303, 存储大小 4400, 损失 0.005166203249245882\n",
      "2019-01-01 07:18:51,246 [INFO] 回合 4, 步数 992, 奖励 -20.0, 总步数 4627\n",
      "2019-01-01 07:19:04,278 [INFO] 训练 1200, 回合 0.9956809000001422, 存储大小 4800, 损失 0.004938897676765919\n",
      "2019-01-01 07:19:33,365 [INFO] 训练 1300, 回合 0.995320900000154, 存储大小 5200, 损失 0.0049417894333601\n",
      "2019-01-01 07:19:56,813 [INFO] 回合 5, 步数 899, 奖励 -20.0, 总步数 5527\n",
      "2019-01-01 07:20:02,668 [INFO] 训练 1400, 回合 0.9949609000001659, 存储大小 5600, 损失 8.269715908681974e-05\n",
      "2019-01-01 07:20:31,902 [INFO] 训练 1500, 回合 0.9946009000001778, 存储大小 6000, 损失 0.00017220384324900806\n",
      "2019-01-01 07:21:01,463 [INFO] 训练 1600, 回合 0.9942409000001896, 存储大小 6400, 损失 0.00501394085586071\n",
      "2019-01-01 07:21:03,485 [INFO] 回合 6, 步数 908, 奖励 -21.0, 总步数 6436\n",
      "2019-01-01 07:21:30,820 [INFO] 训练 1700, 回合 0.9938809000002015, 存储大小 6800, 损失 0.00010619861131999642\n",
      "2019-01-01 07:21:59,966 [INFO] 训练 1800, 回合 0.9935209000002133, 存储大小 7200, 损失 0.004999125376343727\n",
      "2019-01-01 07:22:11,075 [INFO] 回合 7, 步数 923, 奖励 -20.0, 总步数 7360\n",
      "2019-01-01 07:22:29,606 [INFO] 训练 1900, 回合 0.9931609000002252, 存储大小 7600, 损失 0.004954996984452009\n",
      "2019-01-01 07:22:59,428 [INFO] 训练 2000, 回合 0.992800900000237, 存储大小 8000, 损失 8.066499140113592e-05\n",
      "2019-01-01 07:23:25,242 [INFO] 回合 8, 步数 992, 奖励 -21.0, 总步数 8353\n",
      "2019-01-01 07:23:29,438 [INFO] 训练 2100, 回合 0.9924409000002489, 存储大小 8400, 损失 0.005641413852572441\n",
      "2019-01-01 07:23:59,639 [INFO] 训练 2200, 回合 0.9920809000002607, 存储大小 8800, 损失 0.00011237421131227165\n",
      "2019-01-01 07:24:30,054 [INFO] 训练 2300, 回合 0.9917209000002726, 存储大小 9200, 损失 9.342255361843854e-05\n",
      "2019-01-01 07:24:53,710 [INFO] 回合 9, 步数 1171, 奖励 -18.0, 总步数 9525\n",
      "2019-01-01 07:25:00,500 [INFO] 训练 2400, 回合 0.9913609000002844, 存储大小 9600, 损失 0.00010061463399324566\n",
      "2019-01-01 07:25:30,877 [INFO] 训练 2500, 回合 0.9910009000002963, 存储大小 10000, 损失 0.005402026232331991\n",
      "2019-01-01 07:26:01,539 [INFO] 训练 2600, 回合 0.9906409000003081, 存储大小 10400, 损失 9.703381510917097e-05\n",
      "2019-01-01 07:26:05,769 [INFO] 回合 10, 步数 944, 奖励 -19.0, 总步数 10470\n",
      "2019-01-01 07:26:32,095 [INFO] 训练 2700, 回合 0.99028090000032, 存储大小 10800, 损失 8.964162407210097e-05\n",
      "2019-01-01 07:27:02,449 [INFO] 训练 2800, 回合 0.9899209000003318, 存储大小 11200, 损失 0.004950098693370819\n",
      "2019-01-01 07:27:22,721 [INFO] 回合 11, 步数 1003, 奖励 -21.0, 总步数 11474\n",
      "2019-01-01 07:27:33,713 [INFO] 训练 2900, 回合 0.9895609000003437, 存储大小 11600, 损失 7.300431025214493e-05\n",
      "2019-01-01 07:28:04,801 [INFO] 训练 3000, 回合 0.9892009000003555, 存储大小 12000, 损失 0.005174863617867231\n",
      "2019-01-01 07:28:31,698 [INFO] 回合 12, 步数 885, 奖励 -21.0, 总步数 12360\n",
      "2019-01-01 07:28:36,017 [INFO] 训练 3100, 回合 0.9888409000003674, 存储大小 12400, 损失 0.0001424278161721304\n",
      "2019-01-01 07:29:07,257 [INFO] 训练 3200, 回合 0.9884809000003792, 存储大小 12800, 损失 8.78292485140264e-05\n",
      "2019-01-01 07:29:38,578 [INFO] 训练 3300, 回合 0.9881209000003911, 存储大小 13200, 损失 0.00011484585411380976\n",
      "2019-01-01 07:29:54,407 [INFO] 回合 13, 步数 1057, 奖励 -19.0, 总步数 13418\n",
      "2019-01-01 07:30:09,507 [INFO] 训练 3400, 回合 0.987760900000403, 存储大小 13600, 损失 0.009830944240093231\n",
      "2019-01-01 07:30:40,881 [INFO] 训练 3500, 回合 0.9874009000004148, 存储大小 14000, 损失 0.00010503661178518087\n",
      "2019-01-01 07:31:08,328 [INFO] 回合 14, 步数 946, 奖励 -21.0, 总步数 14365\n",
      "2019-01-01 07:31:12,318 [INFO] 训练 3600, 回合 0.9870409000004267, 存储大小 14400, 损失 5.540250276681036e-05\n",
      "2019-01-01 07:31:43,759 [INFO] 训练 3700, 回合 0.9866809000004385, 存储大小 14800, 损失 8.747617539484054e-05\n",
      "2019-01-01 07:32:15,684 [INFO] 训练 3800, 回合 0.9863209000004504, 存储大小 15200, 损失 0.010036972351372242\n",
      "2019-01-01 07:32:34,146 [INFO] 回合 15, 步数 1082, 奖励 -21.0, 总步数 15448\n",
      "2019-01-01 07:32:47,426 [INFO] 训练 3900, 回合 0.9859609000004622, 存储大小 15600, 损失 5.11862599523738e-05\n",
      "2019-01-01 07:33:19,557 [INFO] 训练 4000, 回合 0.9856009000004741, 存储大小 16000, 损失 0.00010806576756294817\n",
      "2019-01-01 07:33:19,597 [INFO] 目标网络已更新\n",
      "2019-01-01 07:33:41,883 [INFO] 回合 16, 步数 852, 奖励 -21.0, 总步数 16301\n",
      "2019-01-01 07:33:51,162 [INFO] 训练 4100, 回合 0.9852409000004859, 存储大小 16400, 损失 7.26819271221757e-05\n",
      "2019-01-01 07:34:23,013 [INFO] 训练 4200, 回合 0.9848809000004978, 存储大小 16800, 损失 0.004943440202623606\n",
      "2019-01-01 07:34:55,080 [INFO] 训练 4300, 回合 0.9845209000005096, 存储大小 17200, 损失 0.00011076130613218993\n",
      "2019-01-01 07:34:59,930 [INFO] 回合 17, 步数 978, 奖励 -20.0, 总步数 17280\n",
      "2019-01-01 07:35:27,833 [INFO] 训练 4400, 回合 0.9841609000005215, 存储大小 17600, 损失 0.005110707599669695\n",
      "2019-01-01 07:36:00,220 [INFO] 训练 4500, 回合 0.9838009000005333, 存储大小 18000, 损失 0.005364255513995886\n",
      "2019-01-01 07:36:09,550 [INFO] 回合 18, 步数 853, 奖励 -21.0, 总步数 18134\n",
      "2019-01-01 07:36:33,645 [INFO] 训练 4600, 回合 0.9834409000005452, 存储大小 18400, 损失 0.010049818083643913\n",
      "2019-01-01 07:37:06,763 [INFO] 训练 4700, 回合 0.983080900000557, 存储大小 18800, 损失 0.005057406611740589\n",
      "2019-01-01 07:37:29,241 [INFO] 回合 19, 步数 959, 奖励 -20.0, 总步数 19094\n",
      "2019-01-01 07:37:39,691 [INFO] 训练 4800, 回合 0.9827209000005689, 存储大小 19200, 损失 6.659397331532091e-05\n",
      "2019-01-01 07:38:13,258 [INFO] 训练 4900, 回合 0.9823609000005807, 存储大小 19600, 损失 0.005060179159045219\n",
      "2019-01-01 07:38:40,323 [INFO] 回合 20, 步数 852, 奖励 -21.0, 总步数 19947\n",
      "2019-01-01 07:38:46,443 [INFO] 训练 5000, 回合 0.9820009000005926, 存储大小 20000, 损失 0.0001672791549935937\n",
      "2019-01-01 07:39:19,713 [INFO] 训练 5100, 回合 0.9816409000006044, 存储大小 20400, 损失 5.63412468181923e-05\n",
      "2019-01-01 07:39:46,031 [INFO] 回合 21, 步数 782, 奖励 -21.0, 总步数 20730\n",
      "2019-01-01 07:39:53,751 [INFO] 训练 5200, 回合 0.9812809000006163, 存储大小 20800, 损失 6.501337338704616e-05\n",
      "2019-01-01 07:40:27,261 [INFO] 训练 5300, 回合 0.9809209000006281, 存储大小 21200, 损失 9.964045602828264e-05\n",
      "2019-01-01 07:40:54,526 [INFO] 回合 22, 步数 811, 奖励 -21.0, 总步数 21542\n",
      "2019-01-01 07:41:01,635 [INFO] 训练 5400, 回合 0.98056090000064, 存储大小 21600, 损失 0.0050551509484648705\n",
      "2019-01-01 07:41:36,250 [INFO] 训练 5500, 回合 0.9802009000006519, 存储大小 22000, 损失 0.0048956735990941525\n",
      "2019-01-01 07:42:11,211 [INFO] 训练 5600, 回合 0.9798409000006637, 存储大小 22400, 损失 0.005044360179454088\n",
      "2019-01-01 07:42:11,251 [INFO] 回合 23, 步数 884, 奖励 -21.0, 总步数 22427\n",
      "2019-01-01 07:42:46,144 [INFO] 训练 5700, 回合 0.9794809000006756, 存储大小 22800, 损失 0.00015740843082312495\n",
      "2019-01-01 07:43:20,320 [INFO] 回合 24, 步数 792, 奖励 -21.0, 总步数 23220\n",
      "2019-01-01 07:43:20,980 [INFO] 训练 5800, 回合 0.9791209000006874, 存储大小 23200, 损失 0.00015091930981725454\n",
      "2019-01-01 07:43:55,992 [INFO] 训练 5900, 回合 0.9787609000006993, 存储大小 23600, 损失 0.00504944147542119\n",
      "2019-01-01 07:44:30,744 [INFO] 训练 6000, 回合 0.9784009000007111, 存储大小 24000, 损失 0.0052033900283277035\n",
      "2019-01-01 07:44:31,124 [INFO] 回合 25, 步数 811, 奖励 -21.0, 总步数 24032\n",
      "2019-01-01 07:45:05,437 [INFO] 训练 6100, 回合 0.978040900000723, 存储大小 24400, 损失 0.00011856340279337019\n",
      "2019-01-01 07:45:40,241 [INFO] 训练 6200, 回合 0.9776809000007348, 存储大小 24800, 损失 0.00021738179202657193\n",
      "2019-01-01 07:46:14,720 [INFO] [测试] 回合 0: 步骤 783, 奖励 -21.0, 步数 783\n",
      "2019-01-01 07:46:36,643 [INFO] [测试] 回合 1: 步骤 916, 奖励 -21.0, 步数 1699\n",
      "2019-01-01 07:46:51,973 [INFO] [测试] 回合 2: 步骤 764, 奖励 -21.0, 步数 2463\n",
      "2019-01-01 07:47:09,208 [INFO] [测试] 回合 3: 步骤 844, 奖励 -21.0, 步数 3307\n",
      "2019-01-01 07:47:28,208 [INFO] [测试] 回合 4: 步骤 921, 奖励 -20.0, 步数 4228\n",
      "2019-01-01 07:47:45,486 [INFO] [测试] 回合 5: 步骤 855, 奖励 -21.0, 步数 5083\n",
      "2019-01-01 07:48:03,965 [INFO] [测试] 回合 6: 步骤 915, 奖励 -21.0, 步数 5998\n",
      "2019-01-01 07:48:22,818 [INFO] [测试] 回合 7: 步骤 979, 奖励 -21.0, 步数 6977\n",
      "2019-01-01 07:48:38,906 [INFO] [测试] 回合 8: 步骤 844, 奖励 -21.0, 步数 7821\n",
      "2019-01-01 07:48:55,189 [INFO] [测试] 回合 9: 步骤 855, 奖励 -21.0, 步数 8676\n",
      "2019-01-01 07:49:12,528 [INFO] [测试] 回合 10: 步骤 906, 奖励 -21.0, 步数 9582\n",
      "2019-01-01 07:49:28,869 [INFO] [测试] 回合 11: 步骤 844, 奖励 -21.0, 步数 10426\n",
      "2019-01-01 07:49:46,629 [INFO] [测试] 回合 12: 步骤 921, 奖励 -20.0, 步数 11347\n",
      "2019-01-01 07:50:02,955 [INFO] [测试] 回合 13: 步骤 855, 奖励 -21.0, 步数 12202\n",
      "2019-01-01 07:50:22,876 [INFO] [测试] 回合 14: 步骤 1043, 奖励 -20.0, 步数 13245\n",
      "2019-01-01 07:50:38,792 [INFO] [测试] 回合 15: 步骤 844, 奖励 -21.0, 步数 14089\n",
      "2019-01-01 07:50:54,779 [INFO] [测试] 回合 16: 步骤 844, 奖励 -21.0, 步数 14933\n",
      "2019-01-01 07:51:12,359 [INFO] [测试] 回合 17: 步骤 921, 奖励 -20.0, 步数 15854\n",
      "2019-01-01 07:51:29,814 [INFO] [测试] 回合 18: 步骤 915, 奖励 -21.0, 步数 16769\n",
      "2019-01-01 07:51:46,004 [INFO] [测试] 回合 19: 步骤 844, 奖励 -21.0, 步数 17613\n",
      "2019-01-01 07:52:00,478 [INFO] [测试] 回合 20: 步骤 764, 奖励 -21.0, 步数 18377\n",
      "2019-01-01 07:52:16,602 [INFO] [测试] 回合 21: 步骤 844, 奖励 -21.0, 步数 19221\n",
      "2019-01-01 07:52:35,307 [INFO] [测试] 回合 22: 步骤 979, 奖励 -21.0, 步数 20200\n",
      "2019-01-01 07:52:51,422 [INFO] [测试] 回合 23: 步骤 844, 奖励 -21.0, 步数 21044\n",
      "2019-01-01 07:53:07,686 [INFO] [测试] 回合 24: 步骤 844, 奖励 -21.0, 步数 21888\n",
      "2019-01-01 07:53:24,121 [INFO] [测试] 回合 25: 步骤 855, 奖励 -21.0, 步数 22743\n",
      "2019-01-01 07:53:39,055 [INFO] [测试] 回合 26: 步骤 783, 奖励 -21.0, 步数 23526\n",
      "2019-01-01 07:53:55,344 [INFO] [测试] 回合 27: 步骤 844, 奖励 -21.0, 步数 24370\n",
      "2019-01-01 07:54:11,402 [INFO] [测试] 回合 28: 步骤 844, 奖励 -21.0, 步数 25214\n",
      "2019-01-01 07:54:27,761 [INFO] [测试] 回合 29: 步骤 855, 奖励 -21.0, 步数 26069\n",
      "2019-01-01 07:54:45,361 [INFO] [测试] 回合 30: 步骤 921, 奖励 -20.0, 步数 26990\n",
      "2019-01-01 07:55:01,730 [INFO] [测试] 回合 31: 步骤 844, 奖励 -21.0, 步数 27834\n",
      "2019-01-01 07:55:19,454 [INFO] [测试] 回合 32: 步骤 921, 奖励 -20.0, 步数 28755\n",
      "2019-01-01 07:55:37,487 [INFO] [测试] 回合 33: 步骤 935, 奖励 -20.0, 步数 29690\n",
      "2019-01-01 07:55:54,150 [INFO] [测试] 回合 34: 步骤 844, 奖励 -21.0, 步数 30534\n",
      "2019-01-01 07:56:10,285 [INFO] [测试] 回合 35: 步骤 844, 奖励 -21.0, 步数 31378\n",
      "2019-01-01 07:56:26,483 [INFO] [测试] 回合 36: 步骤 844, 奖励 -21.0, 步数 32222\n",
      "2019-01-01 07:56:42,730 [INFO] [测试] 回合 37: 步骤 844, 奖励 -21.0, 步数 33066\n",
      "2019-01-01 07:57:00,522 [INFO] [测试] 回合 38: 步骤 917, 奖励 -21.0, 步数 33983\n",
      "2019-01-01 07:57:16,651 [INFO] [测试] 回合 39: 步骤 844, 奖励 -21.0, 步数 34827\n",
      "2019-01-01 07:57:32,946 [INFO] [测试] 回合 40: 步骤 844, 奖励 -21.0, 步数 35671\n",
      "2019-01-01 07:57:49,839 [INFO] [测试] 回合 41: 步骤 844, 奖励 -21.0, 步数 36515\n",
      "2019-01-01 07:58:06,878 [INFO] [测试] 回合 42: 步骤 764, 奖励 -21.0, 步数 37279\n",
      "2019-01-01 07:58:29,089 [INFO] [测试] 回合 43: 步骤 977, 奖励 -21.0, 步数 38256\n",
      "2019-01-01 07:58:47,843 [INFO] [测试] 回合 44: 步骤 855, 奖励 -21.0, 步数 39111\n",
      "2019-01-01 07:59:07,042 [INFO] [测试] 回合 45: 步骤 855, 奖励 -21.0, 步数 39966\n",
      "2019-01-01 07:59:27,636 [INFO] [测试] 回合 46: 步骤 905, 奖励 -21.0, 步数 40871\n",
      "2019-01-01 07:59:48,481 [INFO] [测试] 回合 47: 步骤 915, 奖励 -21.0, 步数 41786\n",
      "2019-01-01 08:00:07,735 [INFO] [测试] 回合 48: 步骤 844, 奖励 -21.0, 步数 42630\n",
      "2019-01-01 08:00:30,354 [INFO] [测试] 回合 49: 步骤 1001, 奖励 -20.0, 步数 43631\n",
      "2019-01-01 08:00:30,364 [INFO] [测试小结] 步数: 平均 = 872.62, 最小 = 764, 最大 = 1043.\n",
      "2019-01-01 08:00:30,374 [INFO] [测试小结] 奖励: 平均 = -20.84, 最小 = -21.0, 最大 = -20.0\n",
      "2019-01-01 08:00:30,394 [INFO] 创建文件夹 ./output/PongDeterministic-v4-20190124-071322\n",
      "2019-01-01 08:00:30,574 [INFO] 网络权重已保存 ./output/PongDeterministic-v4-20190124-071322/model.h5\n",
      "2019-01-01 08:00:30,744 [INFO] 网络权重已保存 ./output/PongDeterministic-v4-20190124-071322/model.6243.h5\n",
      "2019-01-01 08:00:30,764 [INFO] 回合 26, 步数 969, 奖励 -15.0, 总步数 25002\n",
      "2019-01-01 08:00:50,931 [INFO] 训练 6300, 回合 0.9773209000007467, 存储大小 25200, 损失 0.015041057951748371\n",
      "2019-01-01 08:01:26,527 [INFO] 训练 6400, 回合 0.9769609000007585, 存储大小 25600, 损失 0.00014138760161586106\n",
      "2019-01-01 08:01:43,818 [INFO] 回合 27, 步数 824, 奖励 -21.0, 总步数 25827\n",
      "2019-01-01 08:02:01,768 [INFO] 训练 6500, 回合 0.9766009000007704, 存储大小 26000, 损失 8.211677777580917e-05\n",
      "2019-01-01 08:02:36,967 [INFO] 训练 6600, 回合 0.9762409000007822, 存储大小 26400, 损失 0.005056244321167469\n",
      "2019-01-01 08:03:07,384 [INFO] 回合 28, 步数 945, 奖励 -21.0, 总步数 26773\n",
      "2019-01-01 08:03:12,463 [INFO] 训练 6700, 回合 0.9758809000007941, 存储大小 26800, 损失 0.010282173752784729\n",
      "2019-01-01 08:03:48,114 [INFO] 训练 6800, 回合 0.9755209000008059, 存储大小 27200, 损失 0.00017965266306418926\n",
      "2019-01-01 08:04:23,325 [INFO] 训练 6900, 回合 0.9751609000008178, 存储大小 27600, 损失 0.004924997687339783\n",
      "2019-01-01 08:04:44,814 [INFO] 回合 29, 步数 1093, 奖励 -21.0, 总步数 27867\n",
      "2019-01-01 08:04:59,761 [INFO] 训练 7000, 回合 0.9748009000008296, 存储大小 28000, 损失 0.0049437908455729485\n",
      "2019-01-01 08:05:35,982 [INFO] 训练 7100, 回合 0.9744409000008415, 存储大小 28400, 损失 0.004898494575172663\n",
      "2019-01-01 08:05:54,241 [INFO] 回合 30, 步数 764, 奖励 -21.0, 总步数 28632\n",
      "2019-01-01 08:06:12,922 [INFO] 训练 7200, 回合 0.9740809000008533, 存储大小 28800, 损失 0.005201266147196293\n",
      "2019-01-01 08:06:50,232 [INFO] 训练 7300, 回合 0.9737209000008652, 存储大小 29200, 损失 7.377802103292197e-05\n",
      "2019-01-01 08:07:24,123 [INFO] 回合 31, 步数 962, 奖励 -20.0, 总步数 29595\n",
      "2019-01-01 08:07:28,021 [INFO] 训练 7400, 回合 0.973360900000877, 存储大小 29600, 损失 5.2542483899742365e-05\n",
      "2019-01-01 08:08:05,111 [INFO] 训练 7500, 回合 0.9730009000008889, 存储大小 30000, 损失 0.009760704822838306\n",
      "2019-01-01 08:08:42,497 [INFO] 训练 7600, 回合 0.9726409000009008, 存储大小 30400, 损失 7.004110375419259e-05\n",
      "2019-01-01 08:08:46,947 [INFO] 回合 32, 步数 888, 奖励 -20.0, 总步数 30484\n",
      "2019-01-01 08:09:20,914 [INFO] 训练 7700, 回合 0.9722809000009126, 存储大小 30800, 损失 2.9851662475266494e-05\n",
      "2019-01-01 08:09:59,758 [INFO] 训练 7800, 回合 0.9719209000009245, 存储大小 31200, 损失 9.871051588561386e-05\n",
      "2019-01-01 08:10:27,458 [INFO] 回合 33, 步数 1036, 奖励 -21.0, 总步数 31521\n",
      "2019-01-01 08:10:38,128 [INFO] 训练 7900, 回合 0.9715609000009363, 存储大小 31600, 损失 6.783707794966176e-05\n",
      "2019-01-01 08:11:15,911 [INFO] 训练 8000, 回合 0.9712009000009482, 存储大小 32000, 损失 9.468595089856535e-05\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "2019-01-01 08:11:15,941 [INFO] 目标网络已更新\n",
      "2019-01-01 08:11:54,695 [INFO] 训练 8100, 回合 0.97084090000096, 存储大小 32400, 损失 0.010320146568119526\n",
      "2019-01-01 08:11:55,155 [INFO] 回合 34, 步数 920, 奖励 -19.0, 总步数 32442\n",
      "2019-01-01 08:12:33,435 [INFO] 训练 8200, 回合 0.9704809000009719, 存储大小 32800, 损失 0.00011695676221279427\n",
      "2019-01-01 08:13:12,399 [INFO] 训练 8300, 回合 0.9701209000009837, 存储大小 33200, 损失 8.273567073047161e-05\n",
      "2019-01-01 08:13:45,107 [INFO] 回合 35, 步数 1123, 奖励 -20.0, 总步数 33566\n",
      "2019-01-01 08:13:52,417 [INFO] 训练 8400, 回合 0.9697609000009956, 存储大小 33600, 损失 0.005170745309442282\n",
      "2019-01-01 08:14:32,865 [INFO] 训练 8500, 回合 0.9694009000010074, 存储大小 34000, 损失 0.00015319156227633357\n",
      "2019-01-01 08:15:14,242 [INFO] 训练 8600, 回合 0.9690409000010193, 存储大小 34400, 损失 0.004856262821704149\n",
      "2019-01-01 08:15:31,999 [INFO] 回合 36, 步数 1048, 奖励 -21.0, 总步数 34615\n",
      "2019-01-01 08:15:54,264 [INFO] 训练 8700, 回合 0.9686809000010311, 存储大小 34800, 损失 0.005102431867271662\n",
      "2019-01-01 08:16:34,705 [INFO] 训练 8800, 回合 0.968320900001043, 存储大小 35200, 损失 0.005040741991251707\n",
      "2019-01-01 08:16:55,784 [INFO] 回合 37, 步数 824, 奖励 -21.0, 总步数 35440\n",
      "2019-01-01 08:17:15,982 [INFO] 训练 8900, 回合 0.9679609000010548, 存储大小 35600, 损失 7.11977991159074e-05\n",
      "2019-01-01 08:17:56,506 [INFO] 训练 9000, 回合 0.9676009000010667, 存储大小 36000, 损失 0.009798716753721237\n",
      "2019-01-01 08:18:17,606 [INFO] 回合 38, 步数 812, 奖励 -21.0, 总步数 36253\n",
      "2019-01-01 08:18:37,037 [INFO] 训练 9100, 回合 0.9672409000010785, 存储大小 36400, 损失 0.010183618403971195\n",
      "2019-01-01 08:19:19,206 [INFO] 训练 9200, 回合 0.9668809000010904, 存储大小 36800, 损失 7.508712587878108e-05\n",
      "2019-01-01 08:19:48,217 [INFO] 回合 39, 步数 854, 奖励 -21.0, 总步数 37108\n",
      "2019-01-01 08:20:02,408 [INFO] 训练 9300, 回合 0.9665209000011022, 存储大小 37200, 损失 6.311567267403007e-05\n",
      "2019-01-01 08:20:44,111 [INFO] 训练 9400, 回合 0.9661609000011141, 存储大小 37600, 损失 0.0048536756075918674\n",
      "2019-01-01 08:21:20,899 [INFO] 回合 40, 步数 886, 奖励 -21.0, 总步数 37995\n",
      "2019-01-01 08:21:26,080 [INFO] 训练 9500, 回合 0.965800900001126, 存储大小 38000, 损失 0.009788292460143566\n",
      "2019-01-01 08:22:07,673 [INFO] 训练 9600, 回合 0.9654409000011378, 存储大小 38400, 损失 0.005159065127372742\n",
      "2019-01-01 08:22:49,487 [INFO] 训练 9700, 回合 0.9650809000011497, 存储大小 38800, 损失 0.010215570218861103\n",
      "2019-01-01 08:23:04,099 [INFO] 回合 41, 步数 979, 奖励 -19.0, 总步数 38975\n",
      "2019-01-01 08:23:32,805 [INFO] 训练 9800, 回合 0.9647209000011615, 存储大小 39200, 损失 0.004975436255335808\n",
      "2019-01-01 08:24:15,152 [INFO] 训练 9900, 回合 0.9643609000011734, 存储大小 39600, 损失 0.004973416682332754\n",
      "2019-01-01 08:24:38,946 [INFO] 回合 42, 步数 884, 奖励 -21.0, 总步数 39860\n",
      "2019-01-01 08:24:59,024 [INFO] 训练 10000, 回合 0.9640009000011852, 存储大小 40000, 损失 0.009749960154294968\n",
      "2019-01-01 08:25:42,441 [INFO] 训练 10100, 回合 0.9636409000011971, 存储大小 40400, 损失 0.0048868958838284016\n",
      "2019-01-01 08:26:19,647 [INFO] 回合 43, 步数 916, 奖励 -19.0, 总步数 40777\n",
      "2019-01-01 08:26:27,437 [INFO] 训练 10200, 回合 0.9632809000012089, 存储大小 40800, 损失 8.560358401155099e-05\n",
      "2019-01-01 08:27:10,734 [INFO] 训练 10300, 回合 0.9629209000012208, 存储大小 41200, 损失 0.00010775425471365452\n",
      "2019-01-01 08:27:54,163 [INFO] 训练 10400, 回合 0.9625609000012326, 存储大小 41600, 损失 0.004972799681127071\n",
      "2019-01-01 08:28:06,602 [INFO] 回合 44, 步数 977, 奖励 -20.0, 总步数 41755\n",
      "2019-01-01 08:28:40,282 [INFO] 训练 10500, 回合 0.9622009000012445, 存储大小 42000, 损失 0.0050499471835792065\n",
      "2019-01-01 08:29:26,139 [INFO] 训练 10600, 回合 0.9618409000012563, 存储大小 42400, 损失 0.0047561610117554665\n",
      "2019-01-01 08:29:47,886 [INFO] 回合 45, 步数 880, 奖励 -21.0, 总步数 42636\n",
      "2019-01-01 08:30:11,491 [INFO] 训练 10700, 回合 0.9614809000012682, 存储大小 42800, 损失 0.005193616729229689\n",
      "2019-01-01 08:30:57,182 [INFO] 训练 10800, 回合 0.96112090000128, 存储大小 43200, 损失 0.005089828744530678\n",
      "2019-01-01 08:31:22,300 [INFO] 回合 46, 步数 826, 奖励 -21.0, 总步数 43463\n",
      "2019-01-01 08:31:43,446 [INFO] 训练 10900, 回合 0.9607609000012919, 存储大小 43600, 损失 0.005120721645653248\n",
      "2019-01-01 08:32:29,746 [INFO] 训练 11000, 回合 0.9604009000013037, 存储大小 44000, 损失 5.134511593496427e-05\n",
      "2019-01-01 08:33:00,708 [INFO] 回合 47, 步数 852, 奖励 -21.0, 总步数 44316\n",
      "2019-01-01 08:33:15,957 [INFO] 训练 11100, 回合 0.9600409000013156, 存储大小 44400, 损失 0.00013660317927133292\n",
      "2019-01-01 08:34:02,793 [INFO] 训练 11200, 回合 0.9596809000013274, 存储大小 44800, 损失 0.004965053405612707\n",
      "2019-01-01 08:34:47,447 [INFO] 回合 48, 步数 900, 奖励 -21.0, 总步数 45217\n",
      "2019-01-01 08:34:51,266 [INFO] 训练 11300, 回合 0.9593209000013393, 存储大小 45200, 损失 0.00012040577712468803\n",
      "2019-01-01 08:35:38,695 [INFO] 训练 11400, 回合 0.9589609000013511, 存储大小 45600, 损失 0.00015216253814287484\n",
      "2019-01-01 08:36:25,558 [INFO] 训练 11500, 回合 0.958600900001363, 存储大小 46000, 损失 5.546374450204894e-05\n",
      "2019-01-01 08:36:32,308 [INFO] 回合 49, 步数 884, 奖励 -21.0, 总步数 46102\n",
      "2019-01-01 08:37:15,142 [INFO] 训练 11600, 回合 0.9582409000013749, 存储大小 46400, 损失 7.419577741529793e-05\n",
      "2019-01-01 08:38:02,589 [INFO] 训练 11700, 回合 0.9578809000013867, 存储大小 46800, 损失 0.005192899610847235\n",
      "2019-01-01 08:38:14,338 [INFO] 回合 50, 步数 842, 奖励 -20.0, 总步数 46945\n",
      "2019-01-01 08:38:52,787 [INFO] 训练 11800, 回合 0.9575209000013986, 存储大小 47200, 损失 0.015434741973876953\n",
      "2019-01-01 08:39:41,062 [INFO] 训练 11900, 回合 0.9571609000014104, 存储大小 47600, 损失 0.00492897629737854\n",
      "2019-01-01 08:40:11,420 [INFO] 回合 51, 步数 950, 奖励 -20.0, 总步数 47896\n",
      "2019-01-01 08:40:30,099 [INFO] 训练 12000, 回合 0.9568009000014223, 存储大小 48000, 损失 0.004974123556166887\n",
      "2019-01-01 08:40:30,139 [INFO] 目标网络已更新\n",
      "2019-01-01 08:41:19,952 [INFO] 训练 12100, 回合 0.9564409000014341, 存储大小 48400, 损失 0.004981549922376871\n",
      "2019-01-01 08:42:01,379 [INFO] 回合 52, 步数 884, 奖励 -21.0, 总步数 48781\n",
      "2019-01-01 08:42:09,919 [INFO] 训练 12200, 回合 0.956080900001446, 存储大小 48800, 损失 0.00012322940165176988\n",
      "2019-01-01 08:42:53,865 [INFO] 训练 12300, 回合 0.9557209000014578, 存储大小 49200, 损失 5.260171747067943e-05\n",
      "2019-01-01 08:43:38,649 [INFO] 训练 12400, 回合 0.9553609000014697, 存储大小 49600, 损失 0.00014382235531229526\n",
      "2019-01-01 08:43:47,988 [INFO] 回合 53, 步数 955, 奖励 -20.0, 总步数 49737\n",
      "2019-01-01 08:44:57,480 [INFO] [测试] 回合 0: 步骤 792, 奖励 -21.0, 步数 792\n",
      "2019-01-01 08:45:36,693 [INFO] [测试] 回合 1: 步骤 792, 奖励 -21.0, 步数 1584\n",
      "2019-01-01 08:46:18,323 [INFO] [测试] 回合 2: 步骤 837, 奖励 -20.0, 步数 2421\n",
      "2019-01-01 08:47:05,478 [INFO] [测试] 回合 3: 步骤 940, 奖励 -19.0, 步数 3361\n",
      "2019-01-01 08:47:47,270 [INFO] [测试] 回合 4: 步骤 837, 奖励 -20.0, 步数 4198\n",
      "2019-01-01 08:48:28,818 [INFO] [测试] 回合 5: 步骤 837, 奖励 -20.0, 步数 5035\n",
      "2019-01-01 08:49:09,467 [INFO] [测试] 回合 6: 步骤 812, 奖励 -21.0, 步数 5847\n",
      "2019-01-01 08:49:51,452 [INFO] [测试] 回合 7: 步骤 837, 奖励 -20.0, 步数 6684\n",
      "2019-01-01 08:50:36,354 [INFO] [测试] 回合 8: 步骤 914, 奖励 -20.0, 步数 7598\n",
      "2019-01-01 08:51:17,412 [INFO] [测试] 回合 9: 步骤 850, 奖励 -21.0, 步数 8448\n",
      "2019-01-01 08:51:54,101 [INFO] [测试] 回合 10: 步骤 764, 奖励 -21.0, 步数 9212\n",
      "2019-01-01 08:52:32,480 [INFO] [测试] 回合 11: 步骤 812, 奖励 -21.0, 步数 10024\n",
      "2019-01-01 08:53:14,002 [INFO] [测试] 回合 12: 步骤 865, 奖励 -20.0, 步数 10889\n",
      "2019-01-01 08:53:54,593 [INFO] [测试] 回合 13: 步骤 837, 奖励 -20.0, 步数 11726\n",
      "2019-01-01 08:54:33,271 [INFO] [测试] 回合 14: 步骤 792, 奖励 -21.0, 步数 12518\n",
      "2019-01-01 08:55:13,061 [INFO] [测试] 回合 15: 步骤 811, 奖励 -21.0, 步数 13329\n",
      "2019-01-01 08:55:54,068 [INFO] [测试] 回合 16: 步骤 837, 奖励 -20.0, 步数 14166\n",
      "2019-01-01 08:56:32,715 [INFO] [测试] 回合 17: 步骤 792, 奖励 -21.0, 步数 14958\n",
      "2019-01-01 08:57:13,112 [INFO] [测试] 回合 18: 步骤 868, 奖励 -21.0, 步数 15826\n",
      "2019-01-01 08:57:52,799 [INFO] [测试] 回合 19: 步骤 820, 奖励 -21.0, 步数 16646\n",
      "2019-01-01 08:58:36,502 [INFO] [测试] 回合 20: 步骤 893, 奖励 -20.0, 步数 17539\n",
      "2019-01-01 08:59:15,608 [INFO] [测试] 回合 21: 步骤 792, 奖励 -21.0, 步数 18331\n",
      "2019-01-01 08:59:54,135 [INFO] [测试] 回合 22: 步骤 792, 奖励 -21.0, 步数 19123\n",
      "2019-01-01 09:00:30,771 [INFO] [测试] 回合 23: 步骤 792, 奖励 -21.0, 步数 19915\n",
      "2019-01-01 09:01:09,343 [INFO] [测试] 回合 24: 步骤 792, 奖励 -21.0, 步数 20707\n",
      "2019-01-01 09:01:48,900 [INFO] [测试] 回合 25: 步骤 812, 奖励 -21.0, 步数 21519\n",
      "2019-01-01 09:02:26,425 [INFO] [测试] 回合 26: 步骤 792, 奖励 -21.0, 步数 22311\n",
      "2019-01-01 09:03:05,690 [INFO] [测试] 回合 27: 步骤 821, 奖励 -21.0, 步数 23132\n",
      "2019-01-01 09:03:44,070 [INFO] [测试] 回合 28: 步骤 792, 奖励 -21.0, 步数 23924\n",
      "2019-01-01 09:04:24,225 [INFO] [测试] 回合 29: 步骤 812, 奖励 -21.0, 步数 24736\n",
      "2019-01-01 09:05:02,940 [INFO] [测试] 回合 30: 步骤 792, 奖励 -21.0, 步数 25528\n",
      "2019-01-01 09:05:45,001 [INFO] [测试] 回合 31: 步骤 865, 奖励 -20.0, 步数 26393\n",
      "2019-01-01 09:06:23,162 [INFO] [测试] 回合 32: 步骤 792, 奖励 -21.0, 步数 27185\n",
      "2019-01-01 09:07:02,431 [INFO] [测试] 回合 33: 步骤 810, 奖励 -21.0, 步数 27995\n",
      "2019-01-01 09:07:40,828 [INFO] [测试] 回合 34: 步骤 812, 奖励 -21.0, 步数 28807\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "2019-01-01 09:08:19,604 [INFO] [测试] 回合 35: 步骤 810, 奖励 -21.0, 步数 29617\n",
      "2019-01-01 09:08:58,718 [INFO] [测试] 回合 36: 步骤 812, 奖励 -21.0, 步数 30429\n",
      "2019-01-01 09:09:39,276 [INFO] [测试] 回合 37: 步骤 837, 奖励 -20.0, 步数 31266\n",
      "2019-01-01 09:10:19,009 [INFO] [测试] 回合 38: 步骤 821, 奖励 -21.0, 步数 32087\n",
      "2019-01-01 09:10:59,027 [INFO] [测试] 回合 39: 步骤 820, 奖励 -21.0, 步数 32907\n",
      "2019-01-01 09:11:37,161 [INFO] [测试] 回合 40: 步骤 792, 奖励 -21.0, 步数 33699\n",
      "2019-01-01 09:12:15,732 [INFO] [测试] 回合 41: 步骤 838, 奖励 -20.0, 步数 34537\n",
      "2019-01-01 09:12:56,635 [INFO] [测试] 回合 42: 步骤 837, 奖励 -20.0, 步数 35374\n",
      "2019-01-01 09:13:35,522 [INFO] [测试] 回合 43: 步骤 783, 奖励 -21.0, 步数 36157\n",
      "2019-01-01 09:14:12,259 [INFO] [测试] 回合 44: 步骤 764, 奖励 -21.0, 步数 36921\n",
      "2019-01-01 09:14:51,713 [INFO] [测试] 回合 45: 步骤 812, 奖励 -21.0, 步数 37733\n",
      "2019-01-01 09:15:31,907 [INFO] [测试] 回合 46: 步骤 837, 奖励 -20.0, 步数 38570\n",
      "2019-01-01 09:16:10,127 [INFO] [测试] 回合 47: 步骤 810, 奖励 -21.0, 步数 39380\n",
      "2019-01-01 09:16:50,934 [INFO] [测试] 回合 48: 步骤 837, 奖励 -20.0, 步数 40217\n",
      "2019-01-01 09:17:36,490 [INFO] [测试] 回合 49: 步骤 792, 奖励 -21.0, 步数 41009\n",
      "2019-01-01 09:17:36,490 [INFO] [测试小结] 步数: 平均 = 820.18, 最小 = 764, 最大 = 940.\n",
      "2019-01-01 09:17:36,500 [INFO] [测试小结] 奖励: 平均 = -20.66, 最小 = -21.0, 最大 = -19.0\n",
      "2019-01-01 09:17:36,690 [INFO] 网络权重已保存 ./output/PongDeterministic-v4-20190124-071322/model.h5\n",
      "2019-01-01 09:17:36,740 [INFO] 网络权重已保存 ./output/PongDeterministic-v4-20190124-071322/model.12486.h5\n",
      "2019-01-01 09:17:36,760 [INFO] 回合 54, 步数 264, 奖励 -4.0, 总步数 50002\n",
      "2019-01-01 09:17:43,990 [INFO] 训练 12500, 回合 0.9550009000014815, 存储大小 50000, 损失 0.00010554787877481431\n",
      "2019-01-01 09:18:38,550 [INFO] 训练 12600, 回合 0.9546409000014934, 存储大小 50000, 损失 0.0001081433001672849\n",
      "2019-01-01 09:19:33,615 [INFO] 训练 12700, 回合 0.9542809000015052, 存储大小 50000, 损失 0.005016499198973179\n",
      "2019-01-01 09:19:56,363 [INFO] 回合 55, 步数 1024, 奖励 -20.0, 总步数 51027\n",
      "2019-01-01 09:20:27,908 [INFO] 训练 12800, 回合 0.9539209000015171, 存储大小 50000, 损失 9.025151666719466e-05\n",
      "2019-01-01 09:21:23,161 [INFO] 训练 12900, 回合 0.9535609000015289, 存储大小 50000, 损失 0.009938712231814861\n",
      "2019-01-01 09:21:53,428 [INFO] 回合 56, 步数 853, 奖励 -21.0, 总步数 51881\n",
      "2019-01-01 09:22:17,497 [INFO] 训练 13000, 回合 0.9532009000015408, 存储大小 50000, 损失 0.005098412279039621\n",
      "2019-01-01 09:23:12,113 [INFO] 训练 13100, 回合 0.9528409000015526, 存储大小 50000, 损失 0.00501358462497592\n",
      "2019-01-01 09:23:53,804 [INFO] 回合 57, 步数 882, 奖励 -21.0, 总步数 52764\n",
      "2019-01-01 09:24:06,648 [INFO] 训练 13200, 回合 0.9524809000015645, 存储大小 50000, 损失 6.998999742791057e-05\n",
      "2019-01-01 09:25:00,603 [INFO] 训练 13300, 回合 0.9521209000015763, 存储大小 50000, 损失 0.009942956268787384\n",
      "2019-01-01 09:25:55,381 [INFO] 训练 13400, 回合 0.9517609000015882, 存储大小 50000, 损失 7.966139673953876e-05\n",
      "2019-01-01 09:26:07,797 [INFO] 回合 58, 步数 982, 奖励 -19.0, 总步数 53747\n",
      "2019-01-01 09:26:50,116 [INFO] 训练 13500, 回合 0.9514009000016, 存储大小 50000, 损失 0.00015943750622682273\n",
      "2019-01-01 09:27:44,332 [INFO] 训练 13600, 回合 0.9510409000016119, 存储大小 50000, 损失 0.0051483651623129845\n",
      "2019-01-01 09:28:37,760 [INFO] 回合 59, 步数 1107, 奖励 -19.0, 总步数 54855\n",
      "2019-01-01 09:28:38,820 [INFO] 训练 13700, 回合 0.9506809000016238, 存储大小 50000, 损失 0.004879917949438095\n",
      "2019-01-01 09:29:33,557 [INFO] 训练 13800, 回合 0.9503209000016356, 存储大小 50000, 损失 3.28020760207437e-05\n",
      "2019-01-01 09:30:27,603 [INFO] 训练 13900, 回合 0.9499609000016475, 存储大小 50000, 损失 0.009814174845814705\n",
      "2019-01-01 09:30:33,682 [INFO] 回合 60, 步数 852, 奖励 -21.0, 总步数 55708\n",
      "2019-01-01 09:31:21,461 [INFO] 训练 14000, 回合 0.9496009000016593, 存储大小 50000, 损失 0.00010087521513924003\n",
      "2019-01-01 09:32:15,317 [INFO] 训练 14100, 回合 0.9492409000016712, 存储大小 50000, 损失 0.0001979235967155546\n",
      "2019-01-01 09:32:28,761 [INFO] 回合 61, 步数 854, 奖励 -21.0, 总步数 56563\n",
      "2019-01-01 09:33:09,374 [INFO] 训练 14200, 回合 0.948880900001683, 存储大小 50000, 损失 0.00011127383913844824\n",
      "2019-01-01 09:34:03,360 [INFO] 训练 14300, 回合 0.9485209000016949, 存储大小 50000, 损失 0.00010219412070000544\n",
      "2019-01-01 09:34:22,999 [INFO] 回合 62, 步数 843, 奖励 -21.0, 总步数 57407\n",
      "2019-01-01 09:34:57,758 [INFO] 训练 14400, 回合 0.9481609000017067, 存储大小 50000, 损失 0.005240641534328461\n",
      "2019-01-01 09:35:50,626 [INFO] 训练 14500, 回合 0.9478009000017186, 存储大小 50000, 损失 0.005066708195954561\n",
      "2019-01-01 09:36:13,360 [INFO] 回合 63, 步数 824, 奖励 -21.0, 总步数 58232\n",
      "2019-01-01 09:36:44,529 [INFO] 训练 14600, 回合 0.9474409000017304, 存储大小 50000, 损失 0.004896771628409624\n",
      "2019-01-01 09:37:38,181 [INFO] 训练 14700, 回合 0.9470809000017423, 存储大小 50000, 损失 0.004873240366578102\n",
      "2019-01-01 09:38:15,281 [INFO] 回合 64, 步数 912, 奖励 -21.0, 总步数 59145\n",
      "2019-01-01 09:38:31,363 [INFO] 训练 14800, 回合 0.9467209000017541, 存储大小 50000, 损失 7.125723641365767e-05\n",
      "2019-01-01 09:39:24,951 [INFO] 训练 14900, 回合 0.946360900001766, 存储大小 50000, 损失 0.00017158221453428268\n",
      "2019-01-01 09:40:18,674 [INFO] 训练 15000, 回合 0.9460009000017778, 存储大小 50000, 损失 0.00011011880997102708\n",
      "2019-01-01 09:40:22,683 [INFO] 回合 65, 步数 949, 奖励 -20.0, 总步数 60095\n",
      "2019-01-01 09:41:12,548 [INFO] 训练 15100, 回合 0.9456409000017897, 存储大小 50000, 损失 7.9738674685359e-05\n",
      "2019-01-01 09:42:06,620 [INFO] 训练 15200, 回合 0.9452809000018015, 存储大小 50000, 损失 0.005898010917007923\n",
      "2019-01-01 09:42:17,347 [INFO] 回合 66, 步数 852, 奖励 -21.0, 总步数 60948\n",
      "2019-01-01 09:42:59,588 [INFO] 训练 15300, 回合 0.9449209000018134, 存储大小 50000, 损失 0.005538377445191145\n",
      "2019-01-01 09:43:53,237 [INFO] 训练 15400, 回合 0.9445609000018252, 存储大小 50000, 损失 0.009656621143221855\n",
      "2019-01-01 09:44:19,442 [INFO] 回合 67, 步数 913, 奖励 -21.0, 总步数 61862\n",
      "2019-01-01 09:44:47,366 [INFO] 训练 15500, 回合 0.9442009000018371, 存储大小 50000, 损失 7.112828461686149e-05\n",
      "2019-01-01 09:45:41,212 [INFO] 训练 15600, 回合 0.943840900001849, 存储大小 50000, 损失 0.004918345715850592\n",
      "2019-01-01 09:46:12,938 [INFO] 回合 68, 步数 842, 奖励 -20.0, 总步数 62705\n",
      "2019-01-01 09:46:34,695 [INFO] 训练 15700, 回合 0.9434809000018608, 存储大小 50000, 损失 0.009792571887373924\n",
      "2019-01-01 09:47:28,608 [INFO] 训练 15800, 回合 0.9431209000018727, 存储大小 50000, 损失 9.128355304710567e-05\n",
      "2019-01-01 09:48:22,244 [INFO] 训练 15900, 回合 0.9427609000018845, 存储大小 50000, 损失 0.019216682761907578\n",
      "2019-01-01 09:48:41,667 [INFO] 回合 69, 步数 1109, 奖励 -21.0, 总步数 63815\n",
      "2019-01-01 09:49:15,596 [INFO] 训练 16000, 回合 0.9424009000018964, 存储大小 50000, 损失 0.00477676372975111\n",
      "2019-01-01 09:49:15,663 [INFO] 目标网络已更新\n",
      "2019-01-01 09:50:09,495 [INFO] 训练 16100, 回合 0.9420409000019082, 存储大小 50000, 损失 0.004995974712073803\n",
      "2019-01-01 09:50:47,225 [INFO] 回合 70, 步数 936, 奖励 -20.0, 总步数 64752\n",
      "2019-01-01 09:51:03,166 [INFO] 训练 16200, 回合 0.9416809000019201, 存储大小 50000, 损失 0.00013411648978944868\n",
      "2019-01-01 09:51:57,024 [INFO] 训练 16300, 回合 0.9413209000019319, 存储大小 50000, 损失 0.00012320313544478267\n",
      "2019-01-01 09:52:41,752 [INFO] 回合 71, 步数 852, 奖励 -21.0, 总步数 65605\n",
      "2019-01-01 09:52:50,741 [INFO] 训练 16400, 回合 0.9409609000019438, 存储大小 50000, 损失 0.00016386134666390717\n",
      "2019-01-01 09:53:44,751 [INFO] 训练 16500, 回合 0.9406009000019556, 存储大小 50000, 损失 8.030483149923384e-05\n",
      "2019-01-01 09:54:32,310 [INFO] 回合 72, 步数 824, 奖励 -21.0, 总步数 66430\n",
      "2019-01-01 09:54:38,321 [INFO] 训练 16600, 回合 0.9402409000019675, 存储大小 50000, 损失 0.004938559141010046\n",
      "2019-01-01 09:55:32,556 [INFO] 训练 16700, 回合 0.9398809000019793, 存储大小 50000, 损失 6.920886517036706e-05\n",
      "2019-01-01 09:56:26,634 [INFO] 训练 16800, 回合 0.9395209000019912, 存储大小 50000, 损失 0.00010912821744568646\n",
      "2019-01-01 09:56:50,334 [INFO] 回合 73, 步数 1026, 奖励 -19.0, 总步数 67457\n",
      "2019-01-01 09:57:20,134 [INFO] 训练 16900, 回合 0.939160900002003, 存储大小 50000, 损失 9.862329898169264e-05\n",
      "2019-01-01 09:58:13,688 [INFO] 训练 17000, 回合 0.9388009000020149, 存储大小 50000, 损失 0.005015483126044273\n",
      "2019-01-01 09:59:07,765 [INFO] 训练 17100, 回合 0.9384409000020267, 存储大小 50000, 损失 0.00011675456335069612\n",
      "2019-01-01 09:59:31,623 [INFO] 回合 74, 步数 1194, 奖励 -18.0, 总步数 68652\n",
      "2019-01-01 10:00:01,746 [INFO] 训练 17200, 回合 0.9380809000020386, 存储大小 50000, 损失 0.0050497958436608315\n",
      "2019-01-01 10:00:56,369 [INFO] 训练 17300, 回合 0.9377209000020504, 存储大小 50000, 损失 0.004838139750063419\n",
      "2019-01-01 10:01:15,487 [INFO] 回合 75, 步数 764, 奖励 -21.0, 总步数 69417\n",
      "2019-01-01 10:01:50,354 [INFO] 训练 17400, 回合 0.9373609000020623, 存储大小 50000, 损失 0.000131014094222337\n",
      "2019-01-01 10:02:44,239 [INFO] 训练 17500, 回合 0.9370009000020741, 存储大小 50000, 损失 6.12440999248065e-05\n",
      "2019-01-01 10:02:58,516 [INFO] 回合 76, 步数 764, 奖励 -21.0, 总步数 70182\n",
      "2019-01-01 10:03:39,091 [INFO] 训练 17600, 回合 0.936640900002086, 存储大小 50000, 损失 0.004943275824189186\n",
      "2019-01-01 10:04:33,608 [INFO] 训练 17700, 回合 0.9362809000020978, 存储大小 50000, 损失 0.00948044378310442\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "2019-01-01 10:05:08,106 [INFO] 回合 77, 步数 948, 奖励 -21.0, 总步数 71131\n",
      "2019-01-01 10:05:27,398 [INFO] 训练 17800, 回合 0.9359209000021097, 存储大小 50000, 损失 0.0001252963556908071\n",
      "2019-01-01 10:06:20,220 [INFO] 训练 17900, 回合 0.9355609000021216, 存储大小 50000, 损失 0.0001242768339579925\n",
      "2019-01-01 10:06:49,322 [INFO] 回合 78, 步数 764, 奖励 -21.0, 总步数 71896\n",
      "2019-01-01 10:07:14,489 [INFO] 训练 18000, 回合 0.9352009000021334, 存储大小 50000, 损失 0.00021455474779941142\n",
      "2019-01-01 10:08:08,797 [INFO] 训练 18100, 回合 0.9348409000021453, 存储大小 50000, 损失 0.00480900751426816\n",
      "2019-01-01 10:08:50,656 [INFO] 回合 79, 步数 884, 奖励 -21.0, 总步数 72781\n",
      "2019-01-01 10:09:03,674 [INFO] 训练 18200, 回合 0.9344809000021571, 存储大小 50000, 损失 0.00011050832108594477\n",
      "2019-01-01 10:09:58,157 [INFO] 训练 18300, 回合 0.934120900002169, 存储大小 50000, 损失 0.0048782615922391415\n",
      "2019-01-01 10:10:52,101 [INFO] 训练 18400, 回合 0.9337609000021808, 存储大小 50000, 损失 0.00011736399756046012\n",
      "2019-01-01 10:10:57,962 [INFO] 回合 80, 步数 939, 奖励 -19.0, 总步数 73721\n",
      "2019-01-01 10:11:48,371 [INFO] 训练 18500, 回合 0.9334009000021927, 存储大小 50000, 损失 0.005105503369122744\n",
      "2019-01-01 10:12:43,309 [INFO] 训练 18600, 回合 0.9330409000022045, 存储大小 50000, 损失 5.113199586048722e-05\n",
      "2019-01-01 10:12:47,080 [INFO] 回合 81, 步数 792, 奖励 -21.0, 总步数 74514\n",
      "2019-01-01 10:13:30,649 [INFO] 训练 18700, 回合 0.9326809000022164, 存储大小 50000, 损失 0.00011202161840628833\n",
      "2019-01-01 10:14:22,798 [INFO] [测试] 回合 0: 步骤 764, 奖励 -21.0, 步数 764\n",
      "2019-01-01 10:15:00,305 [INFO] [测试] 回合 1: 步骤 764, 奖励 -21.0, 步数 1528\n",
      "2019-01-01 10:15:39,156 [INFO] [测试] 回合 2: 步骤 764, 奖励 -21.0, 步数 2292\n",
      "2019-01-01 10:16:16,875 [INFO] [测试] 回合 3: 步骤 764, 奖励 -21.0, 步数 3056\n",
      "2019-01-01 10:16:55,214 [INFO] [测试] 回合 4: 步骤 764, 奖励 -21.0, 步数 3820\n",
      "2019-01-01 10:17:32,841 [INFO] [测试] 回合 5: 步骤 764, 奖励 -21.0, 步数 4584\n",
      "2019-01-01 10:18:10,410 [INFO] [测试] 回合 6: 步骤 764, 奖励 -21.0, 步数 5348\n",
      "2019-01-01 10:18:47,214 [INFO] [测试] 回合 7: 步骤 764, 奖励 -21.0, 步数 6112\n",
      "2019-01-01 10:19:24,838 [INFO] [测试] 回合 8: 步骤 764, 奖励 -21.0, 步数 6876\n",
      "2019-01-01 10:20:02,347 [INFO] [测试] 回合 9: 步骤 764, 奖励 -21.0, 步数 7640\n",
      "2019-01-01 10:20:40,350 [INFO] [测试] 回合 10: 步骤 764, 奖励 -21.0, 步数 8404\n",
      "2019-01-01 10:21:17,052 [INFO] [测试] 回合 11: 步骤 764, 奖励 -21.0, 步数 9168\n",
      "2019-01-01 10:21:54,847 [INFO] [测试] 回合 12: 步骤 764, 奖励 -21.0, 步数 9932\n",
      "2019-01-01 10:22:33,914 [INFO] [测试] 回合 13: 步骤 764, 奖励 -21.0, 步数 10696\n",
      "2019-01-01 10:23:11,786 [INFO] [测试] 回合 14: 步骤 764, 奖励 -21.0, 步数 11460\n",
      "2019-01-01 10:23:49,860 [INFO] [测试] 回合 15: 步骤 764, 奖励 -21.0, 步数 12224\n",
      "2019-01-01 10:24:27,613 [INFO] [测试] 回合 16: 步骤 764, 奖励 -21.0, 步数 12988\n",
      "2019-01-01 10:25:06,012 [INFO] [测试] 回合 17: 步骤 764, 奖励 -21.0, 步数 13752\n",
      "2019-01-01 10:25:43,831 [INFO] [测试] 回合 18: 步骤 764, 奖励 -21.0, 步数 14516\n",
      "2019-01-01 10:26:24,289 [INFO] [测试] 回合 19: 步骤 764, 奖励 -21.0, 步数 15280\n",
      "2019-01-01 10:27:04,946 [INFO] [测试] 回合 20: 步骤 764, 奖励 -21.0, 步数 16044\n",
      "2019-01-01 10:27:44,605 [INFO] [测试] 回合 21: 步骤 764, 奖励 -21.0, 步数 16808\n",
      "2019-01-01 10:28:21,600 [INFO] [测试] 回合 22: 步骤 764, 奖励 -21.0, 步数 17572\n",
      "2019-01-01 10:29:00,578 [INFO] [测试] 回合 23: 步骤 764, 奖励 -21.0, 步数 18336\n",
      "2019-01-01 10:29:36,884 [INFO] [测试] 回合 24: 步骤 764, 奖励 -21.0, 步数 19100\n",
      "2019-01-01 10:30:16,929 [INFO] [测试] 回合 25: 步骤 764, 奖励 -21.0, 步数 19864\n",
      "2019-01-01 10:31:02,112 [INFO] [测试] 回合 26: 步骤 764, 奖励 -21.0, 步数 20628\n",
      "2019-01-01 10:31:48,456 [INFO] [测试] 回合 27: 步骤 764, 奖励 -21.0, 步数 21392\n",
      "2019-01-01 10:32:33,793 [INFO] [测试] 回合 28: 步骤 764, 奖励 -21.0, 步数 22156\n",
      "2019-01-01 10:33:20,417 [INFO] [测试] 回合 29: 步骤 764, 奖励 -21.0, 步数 22920\n",
      "2019-01-01 10:34:06,148 [INFO] [测试] 回合 30: 步骤 764, 奖励 -21.0, 步数 23684\n",
      "2019-01-01 10:34:50,702 [INFO] [测试] 回合 31: 步骤 764, 奖励 -21.0, 步数 24448\n",
      "2019-01-01 10:35:35,867 [INFO] [测试] 回合 32: 步骤 764, 奖励 -21.0, 步数 25212\n",
      "2019-01-01 10:36:21,856 [INFO] [测试] 回合 33: 步骤 764, 奖励 -21.0, 步数 25976\n",
      "2019-01-01 10:37:06,690 [INFO] [测试] 回合 34: 步骤 764, 奖励 -21.0, 步数 26740\n",
      "2019-01-01 10:37:55,366 [INFO] [测试] 回合 35: 步骤 764, 奖励 -21.0, 步数 27504\n",
      "2019-01-01 10:38:42,032 [INFO] [测试] 回合 36: 步骤 764, 奖励 -21.0, 步数 28268\n",
      "2019-01-01 10:39:27,941 [INFO] [测试] 回合 37: 步骤 764, 奖励 -21.0, 步数 29032\n",
      "2019-01-01 10:40:13,914 [INFO] [测试] 回合 38: 步骤 764, 奖励 -21.0, 步数 29796\n",
      "2019-01-01 10:41:00,016 [INFO] [测试] 回合 39: 步骤 764, 奖励 -21.0, 步数 30560\n",
      "2019-01-01 10:41:45,505 [INFO] [测试] 回合 40: 步骤 764, 奖励 -21.0, 步数 31324\n",
      "2019-01-01 10:42:30,297 [INFO] [测试] 回合 41: 步骤 764, 奖励 -21.0, 步数 32088\n",
      "2019-01-01 10:43:16,638 [INFO] [测试] 回合 42: 步骤 764, 奖励 -21.0, 步数 32852\n",
      "2019-01-01 10:44:02,246 [INFO] [测试] 回合 43: 步骤 764, 奖励 -21.0, 步数 33616\n",
      "2019-01-01 10:44:47,258 [INFO] [测试] 回合 44: 步骤 764, 奖励 -21.0, 步数 34380\n",
      "2019-01-01 10:45:32,548 [INFO] [测试] 回合 45: 步骤 764, 奖励 -21.0, 步数 35144\n",
      "2019-01-01 10:46:17,938 [INFO] [测试] 回合 46: 步骤 764, 奖励 -21.0, 步数 35908\n",
      "2019-01-01 10:47:03,349 [INFO] [测试] 回合 47: 步骤 764, 奖励 -21.0, 步数 36672\n",
      "2019-01-01 10:47:48,507 [INFO] [测试] 回合 48: 步骤 764, 奖励 -21.0, 步数 37436\n",
      "2019-01-01 10:48:33,385 [INFO] [测试] 回合 49: 步骤 764, 奖励 -21.0, 步数 38200\n",
      "2019-01-01 10:48:33,387 [INFO] [测试小结] 步数: 平均 = 764.0, 最小 = 764, 最大 = 764.\n",
      "2019-01-01 10:48:33,403 [INFO] [测试小结] 奖励: 平均 = -21.0, 最小 = -21.0, 最大 = -21.0\n",
      "2019-01-01 10:48:33,452 [INFO] 回合 82, 步数 487, 奖励 -9.0, 总步数 75002\n",
      "2019-01-01 10:49:11,303 [INFO] 训练 18800, 回合 0.9323209000022282, 存储大小 50000, 损失 0.010086768306791782\n",
      "2019-01-01 10:50:04,888 [INFO] 训练 18900, 回合 0.9319609000022401, 存储大小 50000, 损失 0.004691357724368572\n",
      "2019-01-01 10:50:55,393 [INFO] 回合 83, 步数 1062, 奖励 -21.0, 总步数 76065\n",
      "2019-01-01 10:50:58,133 [INFO] 训练 19000, 回合 0.9316009000022519, 存储大小 50000, 损失 0.00013591644528787583\n",
      "2019-01-01 10:51:51,151 [INFO] 训练 19100, 回合 0.9312409000022638, 存储大小 50000, 损失 0.00011471923789940774\n",
      "2019-01-01 10:52:44,996 [INFO] 训练 19200, 回合 0.9308809000022756, 存储大小 50000, 损失 0.0050059896893799305\n",
      "2019-01-01 10:52:59,541 [INFO] 回合 84, 步数 917, 奖励 -20.0, 总步数 76983\n",
      "2019-01-01 10:53:45,299 [INFO] 训练 19300, 回合 0.9305209000022875, 存储大小 50000, 损失 0.004734855145215988\n",
      "2019-01-01 10:54:44,089 [INFO] 训练 19400, 回合 0.9301609000022993, 存储大小 50000, 损失 0.004838769789785147\n",
      "2019-01-01 10:55:27,997 [INFO] 回合 85, 步数 1016, 奖励 -18.0, 总步数 78000\n",
      "2019-01-01 10:55:40,718 [INFO] 训练 19500, 回合 0.9298009000023112, 存储大小 50000, 损失 0.014265062287449837\n",
      "2019-01-01 10:56:35,832 [INFO] 训练 19600, 回合 0.929440900002323, 存储大小 50000, 损失 0.0047987597063183784\n",
      "2019-01-01 10:57:31,991 [INFO] 训练 19700, 回合 0.9290809000023349, 存储大小 50000, 损失 0.00015332261682488024\n",
      "2019-01-01 10:57:51,627 [INFO] 回合 86, 步数 1034, 奖励 -19.0, 总步数 79035\n",
      "2019-01-01 10:58:25,610 [INFO] 训练 19800, 回合 0.9287209000023467, 存储大小 50000, 损失 0.00475681247189641\n",
      "2019-01-01 10:59:18,711 [INFO] 训练 19900, 回合 0.9283609000023586, 存储大小 50000, 损失 0.00012199743650853634\n",
      "2019-01-01 11:00:12,358 [INFO] 训练 20000, 回合 0.9280009000023705, 存储大小 50000, 损失 8.047121809795499e-05\n",
      "2019-01-01 11:00:12,466 [INFO] 目标网络已更新\n",
      "2019-01-01 11:00:12,538 [INFO] 回合 87, 步数 1054, 奖励 -20.0, 总步数 80090\n",
      "2019-01-01 11:01:04,511 [INFO] 训练 20100, 回合 0.9276409000023823, 存储大小 50000, 损失 0.0001629430626053363\n",
      "2019-01-01 11:01:57,739 [INFO] 训练 20200, 回合 0.9272809000023942, 存储大小 50000, 损失 8.779038034845144e-05\n",
      "2019-01-01 11:02:15,198 [INFO] 回合 88, 步数 931, 奖励 -20.0, 总步数 81022\n",
      "2019-01-01 11:02:50,470 [INFO] 训练 20300, 回合 0.926920900002406, 存储大小 50000, 损失 0.004905343055725098\n",
      "2019-01-01 11:03:43,468 [INFO] 训练 20400, 回合 0.9265609000024179, 存储大小 50000, 损失 8.033677295316011e-05\n",
      "2019-01-01 11:04:22,625 [INFO] 回合 89, 步数 948, 奖励 -20.0, 总步数 81971\n",
      "2019-01-01 11:04:39,541 [INFO] 训练 20500, 回合 0.9262009000024297, 存储大小 50000, 损失 4.5914759539300576e-05\n",
      "2019-01-01 11:05:32,243 [INFO] 训练 20600, 回合 0.9258409000024416, 存储大小 50000, 损失 0.009519419632852077\n",
      "2019-01-01 11:06:09,270 [INFO] 回合 90, 步数 801, 奖励 -21.0, 总步数 82773\n",
      "2019-01-01 11:06:24,641 [INFO] 训练 20700, 回合 0.9254809000024534, 存储大小 50000, 损失 0.004774325527250767\n",
      "2019-01-01 11:07:17,472 [INFO] 训练 20800, 回合 0.9251209000024653, 存储大小 50000, 损失 0.009701000526547432\n",
      "2019-01-01 11:08:09,730 [INFO] 训练 20900, 回合 0.9247609000024771, 存储大小 50000, 损失 0.004860905930399895\n",
      "2019-01-01 11:08:11,482 [INFO] 回合 91, 步数 931, 奖励 -20.0, 总步数 83705\n",
      "2019-01-01 11:09:01,792 [INFO] 训练 21000, 回合 0.924400900002489, 存储大小 50000, 损失 0.00012266486010048538\n",
      "2019-01-01 11:09:55,411 [INFO] 训练 21100, 回合 0.9240409000025008, 存储大小 50000, 损失 5.613173561869189e-05\n",
      "2019-01-01 11:10:13,544 [INFO] 回合 92, 步数 922, 奖励 -21.0, 总步数 84628\n",
      "2019-01-01 11:10:53,598 [INFO] 训练 21200, 回合 0.9236809000025127, 存储大小 50000, 损失 0.004652800504118204\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "2019-01-01 11:11:47,293 [INFO] 训练 21300, 回合 0.9233209000025245, 存储大小 50000, 损失 0.00011355702736182138\n",
      "2019-01-01 11:12:17,971 [INFO] 回合 93, 步数 900, 奖励 -21.0, 总步数 85529\n",
      "2019-01-01 11:12:39,880 [INFO] 训练 21400, 回合 0.9229609000025364, 存储大小 50000, 损失 0.0048707230016589165\n",
      "2019-01-01 11:13:31,455 [INFO] 训练 21500, 回合 0.9226009000025482, 存储大小 50000, 损失 0.004719461314380169\n",
      "2019-01-01 11:14:11,380 [INFO] 回合 94, 步数 869, 奖励 -21.0, 总步数 86399\n",
      "2019-01-01 11:14:24,044 [INFO] 训练 21600, 回合 0.9222409000025601, 存储大小 50000, 损失 0.00017177779227495193\n",
      "2019-01-01 11:15:16,487 [INFO] 训练 21700, 回合 0.921880900002572, 存储大小 50000, 损失 0.0001753100223140791\n",
      "2019-01-01 11:15:54,063 [INFO] 回合 95, 步数 783, 奖励 -21.0, 总步数 87183\n",
      "2019-01-01 11:16:10,823 [INFO] 训练 21800, 回合 0.9215209000025838, 存储大小 50000, 损失 0.013821765780448914\n",
      "2019-01-01 11:17:04,187 [INFO] 训练 21900, 回合 0.9211609000025957, 存储大小 50000, 损失 0.004503559786826372\n",
      "2019-01-01 11:17:56,868 [INFO] 训练 22000, 回合 0.9208009000026075, 存储大小 50000, 损失 9.02197789400816e-05\n",
      "2019-01-01 11:18:17,052 [INFO] 回合 96, 步数 1065, 奖励 -19.0, 总步数 88249\n",
      "2019-01-01 11:18:49,922 [INFO] 训练 22100, 回合 0.9204409000026194, 存储大小 50000, 损失 0.009190400131046772\n",
      "2019-01-01 11:19:42,779 [INFO] 训练 22200, 回合 0.9200809000026312, 存储大小 50000, 损失 0.004605864640325308\n",
      "2019-01-01 11:20:33,839 [INFO] 回合 97, 步数 1041, 奖励 -19.0, 总步数 89291\n",
      "2019-01-01 11:20:34,909 [INFO] 训练 22300, 回合 0.9197209000026431, 存储大小 50000, 损失 0.0001584937854204327\n",
      "2019-01-01 11:21:26,761 [INFO] 训练 22400, 回合 0.9193609000026549, 存储大小 50000, 损失 9.006913751363754e-05\n",
      "2019-01-01 11:22:18,248 [INFO] 训练 22500, 回合 0.9190009000026668, 存储大小 50000, 损失 0.00024173437850549817\n",
      "2019-01-01 11:22:22,664 [INFO] 回合 98, 步数 843, 奖励 -21.0, 总步数 90135\n",
      "2019-01-01 11:23:03,387 [INFO] 训练 22600, 回合 0.9186409000026786, 存储大小 50000, 损失 0.004636019468307495\n",
      "2019-01-01 11:23:48,066 [INFO] 训练 22700, 回合 0.9182809000026905, 存储大小 50000, 损失 0.009031029418110847\n",
      "2019-01-01 11:23:49,967 [INFO] 回合 99, 步数 783, 奖励 -21.0, 总步数 90919\n",
      "2019-01-01 11:24:33,677 [INFO] 训练 22800, 回合 0.9179209000027023, 存储大小 50000, 损失 0.004462072160094976\n",
      "2019-01-01 11:25:20,019 [INFO] 训练 22900, 回合 0.9175609000027142, 存储大小 50000, 损失 0.0044585238210856915\n",
      "2019-01-01 11:25:39,104 [INFO] 回合 100, 步数 951, 奖励 -20.0, 总步数 91871\n",
      "2019-01-01 11:26:05,582 [INFO] 训练 23000, 回合 0.917200900002726, 存储大小 50000, 损失 0.004316230770200491\n",
      "2019-01-01 11:26:50,726 [INFO] 训练 23100, 回合 0.9168409000027379, 存储大小 50000, 损失 0.004512345883995295\n",
      "2019-01-01 11:27:22,005 [INFO] 回合 101, 步数 909, 奖励 -20.0, 总步数 92781\n",
      "2019-01-01 11:27:35,885 [INFO] 训练 23200, 回合 0.9164809000027497, 存储大小 50000, 损失 0.0001207998429890722\n",
      "2019-01-01 11:28:21,023 [INFO] 训练 23300, 回合 0.9161209000027616, 存储大小 50000, 损失 0.013722389936447144\n",
      "2019-01-01 11:29:06,461 [INFO] 训练 23400, 回合 0.9157609000027734, 存储大小 50000, 损失 0.0002401389938313514\n",
      "2019-01-01 11:29:08,347 [INFO] 回合 102, 步数 940, 奖励 -21.0, 总步数 93722\n",
      "2019-01-01 11:29:50,873 [INFO] 训练 23500, 回合 0.9154009000027853, 存储大小 50000, 损失 0.00024877715623006225\n",
      "2019-01-01 11:30:34,125 [INFO] 回合 103, 步数 764, 奖励 -21.0, 总步数 94487\n",
      "2019-01-01 11:30:36,242 [INFO] 训练 23600, 回合 0.9150409000027971, 存储大小 50000, 损失 0.0002470028412062675\n",
      "2019-01-01 11:31:20,760 [INFO] 训练 23700, 回合 0.914680900002809, 存储大小 50000, 损失 0.00015668969717808068\n",
      "2019-01-01 11:32:05,281 [INFO] 回合 104, 步数 813, 奖励 -21.0, 总步数 95301\n",
      "2019-01-01 11:32:05,776 [INFO] 训练 23800, 回合 0.9143209000028208, 存储大小 50000, 损失 0.013646859675645828\n",
      "2019-01-01 11:32:50,641 [INFO] 训练 23900, 回合 0.9139609000028327, 存储大小 50000, 损失 0.004542382899671793\n",
      "2019-01-01 11:33:35,911 [INFO] 训练 24000, 回合 0.9136009000028446, 存储大小 50000, 损失 0.004350860137492418\n",
      "2019-01-01 11:33:35,966 [INFO] 目标网络已更新\n",
      "2019-01-01 11:33:57,647 [INFO] 回合 105, 步数 996, 奖励 -20.0, 总步数 96298\n",
      "2019-01-01 11:34:21,076 [INFO] 训练 24100, 回合 0.9132409000028564, 存储大小 50000, 损失 0.00011564647138584405\n",
      "2019-01-01 11:35:05,710 [INFO] 训练 24200, 回合 0.9128809000028683, 存储大小 50000, 损失 0.00015924205945339054\n",
      "2019-01-01 11:35:39,202 [INFO] 回合 106, 步数 902, 奖励 -20.0, 总步数 97201\n",
      "2019-01-01 11:35:51,362 [INFO] 训练 24300, 回合 0.9125209000028801, 存储大小 50000, 损失 0.004939386621117592\n",
      "2019-01-01 11:36:36,813 [INFO] 训练 24400, 回合 0.912160900002892, 存储大小 50000, 损失 0.00029918772634118795\n",
      "2019-01-01 11:37:22,249 [INFO] 训练 24500, 回合 0.9118009000029038, 存储大小 50000, 损失 0.009624644182622433\n",
      "2019-01-01 11:37:26,367 [INFO] 回合 107, 步数 944, 奖励 -21.0, 总步数 98146\n",
      "2019-01-01 11:38:07,388 [INFO] 训练 24600, 回合 0.9114409000029157, 存储大小 50000, 损失 0.0044519477523863316\n",
      "2019-01-01 11:38:52,379 [INFO] 训练 24700, 回合 0.9110809000029275, 存储大小 50000, 损失 0.009027558378875256\n",
      "2019-01-01 11:39:14,823 [INFO] 回合 108, 步数 959, 奖励 -20.0, 总步数 99106\n",
      "2019-01-01 11:39:37,967 [INFO] 训练 24800, 回合 0.9107209000029394, 存储大小 50000, 损失 0.014315520413219929\n",
      "2019-01-01 11:40:23,497 [INFO] 训练 24900, 回合 0.9103609000029512, 存储大小 50000, 损失 0.005008659791201353\n",
      "2019-01-01 11:40:53,382 [INFO] 回合 109, 步数 869, 奖励 -21.0, 总步数 99976\n",
      "2019-01-01 11:41:29,611 [INFO] [测试] 回合 0: 步骤 792, 奖励 -21.0, 步数 792\n",
      "2019-01-01 11:42:02,937 [INFO] [测试] 回合 1: 步骤 764, 奖励 -21.0, 步数 1556\n",
      "2019-01-01 11:42:35,737 [INFO] [测试] 回合 2: 步骤 764, 奖励 -21.0, 步数 2320\n",
      "2019-01-01 11:43:08,823 [INFO] [测试] 回合 3: 步骤 764, 奖励 -21.0, 步数 3084\n",
      "2019-01-01 11:43:42,437 [INFO] [测试] 回合 4: 步骤 764, 奖励 -21.0, 步数 3848\n",
      "2019-01-01 11:44:17,379 [INFO] [测试] 回合 5: 步骤 792, 奖励 -21.0, 步数 4640\n",
      "2019-01-01 11:44:51,262 [INFO] [测试] 回合 6: 步骤 764, 奖励 -21.0, 步数 5404\n",
      "2019-01-01 11:45:25,578 [INFO] [测试] 回合 7: 步骤 764, 奖励 -21.0, 步数 6168\n",
      "2019-01-01 11:46:02,686 [INFO] [测试] 回合 8: 步骤 842, 奖励 -20.0, 步数 7010\n",
      "2019-01-01 11:46:36,663 [INFO] [测试] 回合 9: 步骤 764, 奖励 -21.0, 步数 7774\n",
      "2019-01-01 11:47:13,095 [INFO] [测试] 回合 10: 步骤 820, 奖励 -21.0, 步数 8594\n",
      "2019-01-01 11:47:47,825 [INFO] [测试] 回合 11: 步骤 792, 奖励 -21.0, 步数 9386\n",
      "2019-01-01 11:48:22,294 [INFO] [测试] 回合 12: 步骤 792, 奖励 -21.0, 步数 10178\n",
      "2019-01-01 11:48:59,216 [INFO] [测试] 回合 13: 步骤 820, 奖励 -21.0, 步数 10998\n",
      "2019-01-01 11:49:32,587 [INFO] [测试] 回合 14: 步骤 764, 奖励 -21.0, 步数 11762\n",
      "2019-01-01 11:50:06,501 [INFO] [测试] 回合 15: 步骤 764, 奖励 -21.0, 步数 12526\n",
      "2019-01-01 11:50:40,702 [INFO] [测试] 回合 16: 步骤 764, 奖励 -21.0, 步数 13290\n",
      "2019-01-01 11:51:15,236 [INFO] [测试] 回合 17: 步骤 792, 奖励 -21.0, 步数 14082\n",
      "2019-01-01 11:51:48,913 [INFO] [测试] 回合 18: 步骤 764, 奖励 -21.0, 步数 14846\n",
      "2019-01-01 11:52:24,112 [INFO] [测试] 回合 19: 步骤 792, 奖励 -21.0, 步数 15638\n",
      "2019-01-01 11:52:57,468 [INFO] [测试] 回合 20: 步骤 764, 奖励 -21.0, 步数 16402\n",
      "2019-01-01 11:53:31,883 [INFO] [测试] 回合 21: 步骤 764, 奖励 -21.0, 步数 17166\n",
      "2019-01-01 11:54:04,422 [INFO] [测试] 回合 22: 步骤 764, 奖励 -21.0, 步数 17930\n",
      "2019-01-01 11:54:39,591 [INFO] [测试] 回合 23: 步骤 792, 奖励 -21.0, 步数 18722\n",
      "2019-01-01 11:55:13,582 [INFO] [测试] 回合 24: 步骤 764, 奖励 -21.0, 步数 19486\n",
      "2019-01-01 11:55:54,998 [INFO] [测试] 回合 25: 步骤 949, 奖励 -19.0, 步数 20435\n",
      "2019-01-01 11:56:32,175 [INFO] [测试] 回合 26: 步骤 842, 奖励 -20.0, 步数 21277\n",
      "2019-01-01 11:57:05,618 [INFO] [测试] 回合 27: 步骤 764, 奖励 -21.0, 步数 22041\n",
      "2019-01-01 11:57:43,804 [INFO] [测试] 回合 28: 步骤 842, 奖励 -20.0, 步数 22883\n",
      "2019-01-01 11:58:22,138 [INFO] [测试] 回合 29: 步骤 870, 奖励 -20.0, 步数 23753\n",
      "2019-01-01 11:58:55,895 [INFO] [测试] 回合 30: 步骤 764, 奖励 -21.0, 步数 24517\n",
      "2019-01-01 11:59:29,378 [INFO] [测试] 回合 31: 步骤 764, 奖励 -21.0, 步数 25281\n",
      "2019-01-01 12:00:03,387 [INFO] [测试] 回合 32: 步骤 764, 奖励 -21.0, 步数 26045\n",
      "2019-01-01 12:00:38,088 [INFO] [测试] 回合 33: 步骤 792, 奖励 -21.0, 步数 26837\n",
      "2019-01-01 12:01:12,040 [INFO] [测试] 回合 34: 步骤 764, 奖励 -21.0, 步数 27601\n",
      "2019-01-01 12:01:45,253 [INFO] [测试] 回合 35: 步骤 764, 奖励 -21.0, 步数 28365\n",
      "2019-01-01 12:02:18,439 [INFO] [测试] 回合 36: 步骤 764, 奖励 -21.0, 步数 29129\n",
      "2019-01-01 12:02:52,260 [INFO] [测试] 回合 37: 步骤 764, 奖励 -21.0, 步数 29893\n",
      "2019-01-01 12:03:29,298 [INFO] [测试] 回合 38: 步骤 840, 奖励 -21.0, 步数 30733\n",
      "2019-01-01 12:04:02,521 [INFO] [测试] 回合 39: 步骤 764, 奖励 -21.0, 步数 31497\n",
      "2019-01-01 12:04:35,956 [INFO] [测试] 回合 40: 步骤 764, 奖励 -21.0, 步数 32261\n",
      "2019-01-01 12:05:08,821 [INFO] [测试] 回合 41: 步骤 764, 奖励 -21.0, 步数 33025\n",
      "2019-01-01 12:05:43,981 [INFO] [测试] 回合 42: 步骤 792, 奖励 -21.0, 步数 33817\n",
      "2019-01-01 12:06:18,901 [INFO] [测试] 回合 43: 步骤 792, 奖励 -21.0, 步数 34609\n",
      "2019-01-01 12:06:52,085 [INFO] [测试] 回合 44: 步骤 764, 奖励 -21.0, 步数 35373\n",
      "2019-01-01 12:07:25,509 [INFO] [测试] 回合 45: 步骤 764, 奖励 -21.0, 步数 36137\n",
      "2019-01-01 12:08:00,591 [INFO] [测试] 回合 46: 步骤 792, 奖励 -21.0, 步数 36929\n",
      "2019-01-01 12:08:37,071 [INFO] [测试] 回合 47: 步骤 842, 奖励 -20.0, 步数 37771\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "2019-01-01 12:09:11,065 [INFO] [测试] 回合 48: 步骤 764, 奖励 -21.0, 步数 38535\n",
      "2019-01-01 12:09:54,186 [INFO] [测试] 回合 49: 步骤 967, 奖励 -19.0, 步数 39502\n",
      "2019-01-01 12:09:54,190 [INFO] [测试小结] 步数: 平均 = 790.04, 最小 = 764, 最大 = 967.\n",
      "2019-01-01 12:09:54,202 [INFO] [测试小结] 奖励: 平均 = -20.82, 最小 = -21.0, 最大 = -19.0\n",
      "2019-01-01 12:09:54,276 [INFO] 回合 110, 步数 25, 奖励 0.0, 总步数 100002\n",
      "2019-01-01 12:09:54,284 [INFO] 训练结束\n"
     ]
    }
   ],
   "source": [
    "agent = DQNAgent(env, input_shape=input_shape, batch_size=batch_size,\n",
    "        replay_memory_size=replay_memory_size,\n",
    "        learning_rate=learning_rate, gamma=gamma,\n",
    "        epsilon=epsilon, epsilon_decrease_rate=epsilon_decrease,\n",
    "        min_epsilon=min_epsilon, random_initial_steps=random_initial_steps,\n",
    "        load_path=load_path,\n",
    "        update_freq=update_freq,\n",
    "        target_network_update_freq=target_network_update_freq)\n",
    "\n",
    "logging.info(\"训练开始\")\n",
    "\n",
    "frame = 0\n",
    "max_mean_episode_reward = float(\"-inf\")\n",
    "for episode in itertools.count():\n",
    "    observation = env.reset()\n",
    "    episode_reward = 0\n",
    "    state = agent.get_next_state(None, observation)\n",
    "    for step in itertools.count():\n",
    "        if render:\n",
    "            env.render()\n",
    "        frame += 1\n",
    "        action = agent.decide(state, step=step)\n",
    "        observation, reward, done, _ = env.step(action)\n",
    "        next_state = agent.get_next_state(state, observation)\n",
    "        episode_reward += reward\n",
    "        agent.learn(state, action, reward, next_state, done)\n",
    "        \n",
    "        # 验证\n",
    "        if frame % test_freq == 0 or \\\n",
    "                (done and (frame + 1) % test_freq == 0):\n",
    "            test_episode_rewards = test(env=env,\n",
    "                    agent=agent, episodes=test_episodes, render=render)\n",
    "            if max_mean_episode_reward < np.mean(test_episode_rewards):\n",
    "                max_mean_episode_reward = np.mean(test_episode_rewards)\n",
    "                agent.save_network(save_path)\n",
    "                path = save_path[:-2] + str(agent.fit_count) + '.h5'\n",
    "                agent.save_network(path)\n",
    "        \n",
    "        if done:\n",
    "            step += 1\n",
    "            frame += 1\n",
    "            break\n",
    "        state = next_state\n",
    "    \n",
    "    logging.info(\"回合 {}, 步数 {}, 奖励 {}, 总步数 {}\".format(\n",
    "            episode, step, episode_reward, frame))\n",
    "    \n",
    "    if frame > frames:\n",
    "        break\n",
    "\n",
    "logging.info(\"训练结束\")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "测试"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {
    "scrolled": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "2019-01-01 12:09:54,897 [INFO] 载入网络权重 ./output/PongDeterministic-v4-20190124-071322/model.h5.\n",
      "_________________________________________________________________\n",
      "Layer (type)                 Output Shape              Param #   \n",
      "=================================================================\n",
      "permute_2 (Permute)          (None, 110, 84, 4)        0         \n",
      "_________________________________________________________________\n",
      "conv2d_6 (Conv2D)            (None, 26, 20, 32)        8224      \n",
      "_________________________________________________________________\n",
      "conv2d_7 (Conv2D)            (None, 12, 9, 64)         32832     \n",
      "_________________________________________________________________\n",
      "conv2d_8 (Conv2D)            (None, 10, 7, 64)         36928     \n",
      "_________________________________________________________________\n",
      "flatten_2 (Flatten)          (None, 4480)              0         \n",
      "_________________________________________________________________\n",
      "dense_4 (Dense)              (None, 512)               2294272   \n",
      "_________________________________________________________________\n",
      "dense_5 (Dense)              (None, 6)                 3078      \n",
      "=================================================================\n",
      "Total params: 2,375,334\n",
      "Trainable params: 2,375,334\n",
      "Non-trainable params: 0\n",
      "_________________________________________________________________\n",
      "2019-01-01 12:09:56,575 [INFO] 目标网络已更新\n",
      "2019-01-01 12:10:28,794 [INFO] [测试] 回合 0: 步骤 792, 奖励 -21.0, 步数 792\n",
      "2019-01-01 12:11:01,506 [INFO] [测试] 回合 1: 步骤 837, 奖励 -20.0, 步数 1629\n",
      "2019-01-01 12:11:33,899 [INFO] [测试] 回合 2: 步骤 812, 奖励 -21.0, 步数 2441\n",
      "2019-01-01 12:12:05,583 [INFO] [测试] 回合 3: 步骤 792, 奖励 -21.0, 步数 3233\n",
      "2019-01-01 12:12:39,251 [INFO] [测试] 回合 4: 步骤 837, 奖励 -20.0, 步数 4070\n",
      "2019-01-01 12:13:12,981 [INFO] [测试] 回合 5: 步骤 841, 奖励 -21.0, 步数 4911\n",
      "2019-01-01 12:13:45,118 [INFO] [测试] 回合 6: 步骤 812, 奖励 -21.0, 步数 5723\n",
      "2019-01-01 12:14:14,168 [INFO] [测试] 回合 7: 步骤 792, 奖励 -21.0, 步数 6515\n",
      "2019-01-01 12:14:36,018 [INFO] [测试] 回合 8: 步骤 837, 奖励 -20.0, 步数 7352\n",
      "2019-01-01 12:14:59,297 [INFO] [测试] 回合 9: 步骤 906, 奖励 -20.0, 步数 8258\n",
      "2019-01-01 12:15:18,731 [INFO] [测试] 回合 10: 步骤 764, 奖励 -21.0, 步数 9022\n",
      "2019-01-01 12:15:40,760 [INFO] [测试] 回合 11: 步骤 865, 奖励 -20.0, 步数 9887\n",
      "2019-01-01 12:16:01,450 [INFO] [测试] 回合 12: 步骤 812, 奖励 -21.0, 步数 10699\n",
      "2019-01-01 12:16:22,246 [INFO] [测试] 回合 13: 步骤 812, 奖励 -21.0, 步数 11511\n",
      "2019-01-01 12:16:43,267 [INFO] [测试] 回合 14: 步骤 821, 奖励 -21.0, 步数 12332\n",
      "2019-01-01 12:17:03,507 [INFO] [测试] 回合 15: 步骤 792, 奖励 -21.0, 步数 13124\n",
      "2019-01-01 12:17:24,241 [INFO] [测试] 回合 16: 步骤 812, 奖励 -21.0, 步数 13936\n",
      "2019-01-01 12:17:44,595 [INFO] [测试] 回合 17: 步骤 792, 奖励 -21.0, 步数 14728\n",
      "2019-01-01 12:18:04,741 [INFO] [测试] 回合 18: 步骤 792, 奖励 -21.0, 步数 15520\n",
      "2019-01-01 12:18:26,028 [INFO] [测试] 回合 19: 步骤 838, 奖励 -21.0, 步数 16358\n",
      "2019-01-01 12:18:46,851 [INFO] [测试] 回合 20: 步骤 812, 奖励 -21.0, 步数 17170\n",
      "2019-01-01 12:19:08,426 [INFO] [测试] 回合 21: 步骤 837, 奖励 -20.0, 步数 18007\n",
      "2019-01-01 12:19:30,294 [INFO] [测试] 回合 22: 步骤 867, 奖励 -20.0, 步数 18874\n",
      "2019-01-01 12:19:50,260 [INFO] [测试] 回合 23: 步骤 792, 奖励 -21.0, 步数 19666\n",
      "2019-01-01 12:20:10,537 [INFO] [测试] 回合 24: 步骤 792, 奖励 -21.0, 步数 20458\n",
      "2019-01-01 12:20:31,652 [INFO] [测试] 回合 25: 步骤 812, 奖励 -21.0, 步数 21270\n",
      "2019-01-01 12:20:52,031 [INFO] [测试] 回合 26: 步骤 792, 奖励 -21.0, 步数 22062\n",
      "2019-01-01 12:21:13,440 [INFO] [测试] 回合 27: 步骤 838, 奖励 -20.0, 步数 22900\n",
      "2019-01-01 12:21:32,546 [INFO] [测试] 回合 28: 步骤 764, 奖励 -21.0, 步数 23664\n",
      "2019-01-01 12:21:53,128 [INFO] [测试] 回合 29: 步骤 812, 奖励 -21.0, 步数 24476\n",
      "2019-01-01 12:22:15,031 [INFO] [测试] 回合 30: 步骤 865, 奖励 -20.0, 步数 25341\n",
      "2019-01-01 12:22:35,357 [INFO] [测试] 回合 31: 步骤 792, 奖励 -21.0, 步数 26133\n",
      "2019-01-01 12:22:58,903 [INFO] [测试] 回合 32: 步骤 914, 奖励 -20.0, 步数 27047\n",
      "2019-01-01 12:23:20,237 [INFO] [测试] 回合 33: 步骤 840, 奖励 -21.0, 步数 27887\n",
      "2019-01-01 12:23:40,409 [INFO] [测试] 回合 34: 步骤 792, 奖励 -21.0, 步数 28679\n",
      "2019-01-01 12:24:00,698 [INFO] [测试] 回合 35: 步骤 792, 奖励 -21.0, 步数 29471\n",
      "2019-01-01 12:24:21,805 [INFO] [测试] 回合 36: 步骤 821, 奖励 -21.0, 步数 30292\n",
      "2019-01-01 12:24:46,739 [INFO] [测试] 回合 37: 步骤 968, 奖励 -19.0, 步数 31260\n",
      "2019-01-01 12:25:07,511 [INFO] [测试] 回合 38: 步骤 812, 奖励 -21.0, 步数 32072\n",
      "2019-01-01 12:25:28,150 [INFO] [测试] 回合 39: 步骤 812, 奖励 -21.0, 步数 32884\n",
      "2019-01-01 12:25:47,991 [INFO] [测试] 回合 40: 步骤 782, 奖励 -21.0, 步数 33666\n",
      "2019-01-01 12:26:08,833 [INFO] [测试] 回合 41: 步骤 812, 奖励 -21.0, 步数 34478\n",
      "2019-01-01 12:26:30,551 [INFO] [测试] 回合 42: 步骤 856, 奖励 -20.0, 步数 35334\n",
      "2019-01-01 12:26:51,597 [INFO] [测试] 回合 43: 步骤 812, 奖励 -21.0, 步数 36146\n",
      "2019-01-01 12:27:11,457 [INFO] [测试] 回合 44: 步骤 782, 奖励 -21.0, 步数 36928\n",
      "2019-01-01 12:27:31,615 [INFO] [测试] 回合 45: 步骤 792, 奖励 -21.0, 步数 37720\n",
      "2019-01-01 12:27:55,245 [INFO] [测试] 回合 46: 步骤 927, 奖励 -20.0, 步数 38647\n",
      "2019-01-01 12:28:15,359 [INFO] [测试] 回合 47: 步骤 792, 奖励 -21.0, 步数 39439\n",
      "2019-01-01 12:28:35,905 [INFO] [测试] 回合 48: 步骤 812, 奖励 -21.0, 步数 40251\n",
      "2019-01-01 12:28:57,203 [INFO] [测试] 回合 49: 步骤 837, 奖励 -20.0, 步数 41088\n",
      "2019-01-01 12:28:57,205 [INFO] [测试小结] 步数: 平均 = 821.76, 最小 = 764, 最大 = 968.\n",
      "2019-01-01 12:28:57,210 [INFO] [测试小结] 奖励: 平均 = -20.7, 最小 = -21.0, 最大 = -19.0\n",
      "平均回合奖励 = -20.7\n"
     ]
    }
   ],
   "source": [
    "test_agent = DQNAgent(env, input_shape=input_shape, load_path=save_path)\n",
    "test_episode_rewards = test(env, test_agent, episodes=test_episodes)\n",
    "print('平均回合奖励 = {}'.format(np.mean(test_episode_rewards)))"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.3"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
