{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {
    "collapsed": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001B[32m[09-07 09:11:04 MainThread @utils.py:73]\u001B[0m paddlepaddle version: 2.3.2.\n",
      "pygame 2.1.2 (SDL 2.0.18, Python 3.8.13)\n",
      "Hello from the pygame community. https://www.pygame.org/contribute.html\n"
     ]
    }
   ],
   "source": [
    "import time\n",
    "\n",
    "import paddle\n",
    "import parl\n",
    "import pygame.event\n",
    "from paddle import nn\n",
    "import paddle.nn.functional as F\n",
    "import numpy as np\n",
    "from parl.utils import ReplayMemory\n",
    "from parl.algorithms import DDQN, DQN\n",
    "from q_table.Envs.DQNMazeEnv import MazeEnv\n",
    "\n",
    "LEARN_FREQ = 5  # 训练频率，不需要每一个step都learn，攒一些新增经验后再learn，提高效率\n",
    "MEMORY_SIZE = 200000  # replay memory的大小，越大越占用内存\n",
    "MEMORY_WARMUP_SIZE = 200  # replay_memory 里需要预存一些经验数据，再开启训练\n",
    "BATCH_SIZE = 32  # 每次给agent learn的数据数量，从replay memory随机里sample一批数据出来\n",
    "LEARNING_RATE = 0.1  # 学习率\n",
    "GAMMA = 0.9  # reward 的衰减因子，一般取 0.9 到 0.999 不等\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "outputs": [],
   "source": [
    "class CartpoleModel(parl.Model):\n",
    "    def __init__(self, obs_dim, act_dim):\n",
    "        super(CartpoleModel, self).__init__()\n",
    "        hid1_size = 128\n",
    "        hid2_size = 128\n",
    "        self.fc1 = nn.Linear(obs_dim, hid1_size)\n",
    "        self.fc2 = nn.Linear(hid1_size, hid2_size)\n",
    "        self.fc3 = nn.Linear(hid2_size, act_dim)\n",
    "\n",
    "    def forward(self, obs):\n",
    "        h1 = F.relu(self.fc1(obs))\n",
    "        h2 = F.relu(self.fc2(h1))\n",
    "        Q = self.fc3(h2)\n",
    "        return Q\n"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "outputs": [],
   "source": [
    "class MazeAgent(parl.Agent):\n",
    "    def __init__(self, algorithm, act_dim, e_greed=0.1, e_greed_decrement=0.):\n",
    "        super(MazeAgent, self).__init__(algorithm)\n",
    "        assert isinstance(act_dim, int)\n",
    "        self.act_dim = act_dim\n",
    "\n",
    "        self.global_step = 0\n",
    "        self.update_target_steps = 2000  # 每隔2000个training steps再把model的参数复制到target_model中\n",
    "\n",
    "        self.e_greed = e_greed  # 有一定概率随机选取动作，探索\n",
    "        self.e_greed_decrement = e_greed_decrement  # 随着训练逐步收敛，探索的程度慢慢降低\n",
    "\n",
    "    def sample(self, obs):\n",
    "        sample = np.random.random()\n",
    "        if sample < self.e_greed:\n",
    "            act = np.random.randint(0, self.act_dim)\n",
    "        else:\n",
    "            act = self.predict(obs)\n",
    "        self.e_greed = max(0.1, self.e_greed - self.e_greed_decrement)\n",
    "\n",
    "        return act\n",
    "\n",
    "    def predict(self, obs):\n",
    "        obs = paddle.to_tensor(obs, dtype='float32')\n",
    "        pred_q = self.alg.predict(obs)\n",
    "        act = pred_q.argmax().numpy()[0]  # 选择Q最大的下标，即对应的动作\n",
    "        # print(f'pred_q:{pred_q}')\n",
    "        return act\n",
    "\n",
    "    def learn(self, obs, act, reward, next_obs, terminal):\n",
    "        # 每隔200个training steps同步一次model和target_model的参数\n",
    "        if self.global_step % self.update_target_steps == 0:\n",
    "            self.alg.sync_target()\n",
    "        self.global_step += 1\n",
    "\n",
    "        act = np.expand_dims(act, -1)\n",
    "        reward = np.expand_dims(reward, -1)\n",
    "        terminal = np.expand_dims(terminal, -1)\n",
    "\n",
    "        obs = paddle.to_tensor(obs, dtype='float32')\n",
    "        act = paddle.to_tensor(act, dtype='int32')\n",
    "        reward = paddle.to_tensor(reward, dtype='float32')\n",
    "        next_obs = paddle.to_tensor(next_obs, dtype='float32')\n",
    "        terminal = paddle.to_tensor(terminal, dtype='float32')\n",
    "        loss = self.alg.learn(obs, act, reward, next_obs, terminal)\n",
    "        # print('loss:', loss.numpy()[0])\n",
    "        return loss.numpy()[0]\n"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "outputs": [],
   "source": [
    "# 训练一个episode\n",
    "def run_episode(env, agent, rpm, render=False):\n",
    "    total_reward = 0\n",
    "    obs = env.reset()\n",
    "    step = 0\n",
    "    tmp = {}\n",
    "    while True:\n",
    "        for event in pygame.event.get():\n",
    "            if event.type == pygame.QUIT:\n",
    "                pygame.quit()\n",
    "        step += 1\n",
    "        action = agent.sample(obs)  # 采样动作，所有动作都有概率被尝试到\n",
    "        next_obs, reward, done, _ = env.step(action, render)\n",
    "\n",
    "        rpm.append(obs, action, reward, next_obs, done)\n",
    "        if (len(rpm) > MEMORY_WARMUP_SIZE) and (step % LEARN_FREQ == 0):\n",
    "            print('\\rlearning', end=\"\")\n",
    "            (batch_obs, batch_action, batch_reward, batch_next_obs,\n",
    "             batch_done) = rpm.sample_batch(min(step + 1, MEMORY_SIZE - 1))\n",
    "            # print(f'\\rtrain_num: {step}', end='')\n",
    "            train_loss = agent.learn(batch_obs, batch_action, batch_reward,\n",
    "                                     batch_next_obs,\n",
    "                                     batch_done)  # s,a,r,s',done\n",
    "        if render:\n",
    "            # time.sleep(1)\n",
    "            env.gameMaze.draw_maze(env.maze, env.cur_pos)\n",
    "            # print(f'obs: {obs}, actions:{action}, reward:{reward}, next_obs:{next_obs}, done:{done}')\n",
    "        total_reward += reward\n",
    "        obs = next_obs\n",
    "        if done:\n",
    "            break\n",
    "    return total_reward\n",
    "\n",
    "\n",
    "# 手动训练一个episode\n",
    "def manul_run(env, agent, rpm):\n",
    "    total_reward = 0\n",
    "    obs = env.reset()\n",
    "    step = 0\n",
    "    while True:\n",
    "        time.sleep(0.01)\n",
    "        env.gameMaze.draw_maze(env.maze, env.cur_pos)\n",
    "        has_done = False\n",
    "        # print(f'obs: {obs}, actions:{action}, reward:{reward}, next_obs:{next_obs}, done:{done}')\n",
    "\n",
    "        for event in pygame.event.get():\n",
    "            if event.type == pygame.QUIT:\n",
    "                pygame.quit()\n",
    "            keys = pygame.key.get_pressed()\n",
    "            moved = False\n",
    "\n",
    "            if keys[pygame.K_LEFT]:\n",
    "                next_obs, reward, done, _ = env.step(3, True)\n",
    "                step += 1\n",
    "                moved = True\n",
    "                for i in range(1000):\n",
    "                    rpm.append(obs, 3, reward, next_obs, done)\n",
    "                total_reward += reward\n",
    "                obs = next_obs\n",
    "                if done:\n",
    "                    has_done = True\n",
    "                    break\n",
    "            if keys[pygame.K_RIGHT]:\n",
    "                next_obs, reward, done, _ = env.step(2, True)\n",
    "                step += 1\n",
    "                moved = True\n",
    "                rpm.append(obs, 2, reward, next_obs, done)\n",
    "                for i in range(1000):\n",
    "                    rpm.append(obs, 3, reward, next_obs, done)\n",
    "                total_reward += reward\n",
    "                obs = next_obs\n",
    "                if done:\n",
    "                    has_done = True\n",
    "                    break\n",
    "            if keys[pygame.K_DOWN]:\n",
    "                next_obs, reward, done, _ = env.step(1, True)\n",
    "                step += 1\n",
    "                moved = True\n",
    "                rpm.append(obs, 0, reward, next_obs, done)\n",
    "                for i in range(1000):\n",
    "                    rpm.append(obs, 3, reward, next_obs, done)\n",
    "                total_reward += reward\n",
    "                obs = next_obs\n",
    "                if done:\n",
    "                    has_done = True\n",
    "                    break\n",
    "            if keys[pygame.K_UP]:\n",
    "                next_obs, reward, done, _ = env.step(0, True)\n",
    "                step += 1\n",
    "                moved = True\n",
    "                rpm.append(obs, 1, reward, next_obs, done)\n",
    "                for i in range(1000):\n",
    "                    rpm.append(obs, 3, reward, next_obs, done)\n",
    "                total_reward += reward\n",
    "                obs = next_obs\n",
    "                if done:\n",
    "                    has_done = True\n",
    "                    break\n",
    "            if moved:\n",
    "                if (len(rpm) > MEMORY_WARMUP_SIZE) and (step % LEARN_FREQ == 0):\n",
    "                    (batch_obs, batch_action, batch_reward, batch_next_obs,\n",
    "                     batch_done) = rpm.sample_batch(env.size ** 2)\n",
    "                    print(batch_obs, batch_action, batch_reward, batch_next_obs,\n",
    "                          batch_done)\n",
    "                    # print(f'\\rtrain_num: {step}', end='')\n",
    "                    train_loss = agent.learn(batch_obs, batch_action, batch_reward,\n",
    "                                             batch_next_obs,\n",
    "                                             batch_done)  # s,a,r,s',done\n",
    "        if has_done:\n",
    "            break\n",
    "\n",
    "    return total_reward\n",
    "\n",
    "\n",
    "# 评估 agent, 跑 5 个episode，总reward求平均\n",
    "def evaluate(env, agent, render=False):\n",
    "    eval_reward = []\n",
    "    obs = env.reset()\n",
    "    episode_reward = 0\n",
    "    total_step = 0\n",
    "    while True:\n",
    "        for event in pygame.event.get():\n",
    "            if event.type == pygame.QUIT:\n",
    "                pygame.quit()\n",
    "        action = agent.predict(obs)  # 预测动作，只选最优动作\n",
    "        obs, reward, done, _ = env.step(action)\n",
    "        episode_reward += reward\n",
    "        total_step += 1\n",
    "        print(f'\\r#####total_step: {total_step}, done: {done},action: {action},obs: {obs}#####', end=\"\")\n",
    "        if render:\n",
    "            time.sleep(0.1)\n",
    "            env.gameMaze.draw_maze(env.maze, env.cur_pos)\n",
    "            # print(f'obs: {obs}, actions:{action}, reward:{reward}, next_obs:{next_obs}, done:{done}')\n",
    "        if done:\n",
    "            break\n",
    "        if total_step > env.size ** 2:\n",
    "            break\n",
    "    eval_reward.append(episode_reward)\n",
    "    return np.mean(eval_reward)"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "2 4\n",
      "3 2\n"
     ]
    }
   ],
   "source": [
    "env = MazeEnv(\n",
    "    minsize=3,\n",
    "    maxsize=3,\n",
    ")\n",
    "action_dim = env.action_space.n\n",
    "obs_shape = (env.size ** 2 + 1,)\n",
    "obs_dim = obs_shape[0]\n",
    "rpm = ReplayMemory(MEMORY_SIZE, obs_dim=obs_dim, act_dim=0)  # DQN的经验回放池\n",
    "model = CartpoleModel(obs_dim=obs_dim, act_dim=action_dim)\n",
    "algorithm = DQN(model, gamma=GAMMA, lr=LEARNING_RATE)\n",
    "agent = MazeAgent(\n",
    "    algorithm,\n",
    "    act_dim=action_dim,\n",
    "    e_greed=1,  # 有一定概率随机选取动作，探索\n",
    "    e_greed_decrement=1e-7\n",
    ")"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1], [1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1], [1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1], [1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], [1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1], [1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1], [1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1], [1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1], [1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1], [1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]\n"
     ]
    }
   ],
   "source": [
    "env.gameMaze.start_game()\n",
    "env.reset()\n",
    "env.gameMaze.draw_maze(env.maze, env.cur_pos)\n",
    "# tmp = []\n",
    "# for row in env.maze:\n",
    "#     row_tmp = []\n",
    "#     for cell in row:\n",
    "#         row_tmp.append(cell.type)\n",
    "#     tmp.append(row_tmp)\n",
    "# print(tmp)"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "####len: 0####\n"
     ]
    },
    {
     "ename": "IndexError",
     "evalue": "list index out of range",
     "output_type": "error",
     "traceback": [
      "\u001B[1;31m---------------------------------------------------------------------------\u001B[0m",
      "\u001B[1;31mIndexError\u001B[0m                                Traceback (most recent call last)",
      "Input \u001B[1;32mIn [7]\u001B[0m, in \u001B[0;36m<cell line: 2>\u001B[1;34m()\u001B[0m\n\u001B[0;32m      2\u001B[0m \u001B[38;5;28;01mwhile\u001B[39;00m \u001B[38;5;28mlen\u001B[39m(rpm) \u001B[38;5;241m<\u001B[39m MEMORY_WARMUP_SIZE:\n\u001B[0;32m      3\u001B[0m     \u001B[38;5;28mprint\u001B[39m(\u001B[38;5;124mf\u001B[39m\u001B[38;5;124m'\u001B[39m\u001B[38;5;130;01m\\r\u001B[39;00m\u001B[38;5;124m####len: \u001B[39m\u001B[38;5;132;01m{\u001B[39;00m\u001B[38;5;28mlen\u001B[39m(rpm)\u001B[38;5;132;01m}\u001B[39;00m\u001B[38;5;124m####\u001B[39m\u001B[38;5;124m'\u001B[39m)\n\u001B[1;32m----> 4\u001B[0m     \u001B[43mrun_episode\u001B[49m\u001B[43m(\u001B[49m\u001B[43menv\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43magent\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mrpm\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mrender\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[38;5;28;43;01mTrue\u001B[39;49;00m\u001B[43m)\u001B[49m\n\u001B[0;32m      5\u001B[0m     \u001B[38;5;66;03m# manul_run(env, agent, rpm)\u001B[39;00m\n\u001B[0;32m      6\u001B[0m \u001B[38;5;28mprint\u001B[39m(\u001B[38;5;124m'\u001B[39m\u001B[38;5;124mexperience collection finished\u001B[39m\u001B[38;5;124m'\u001B[39m)\n",
      "Input \u001B[1;32mIn [4]\u001B[0m, in \u001B[0;36mrun_episode\u001B[1;34m(env, agent, rpm, render)\u001B[0m\n\u001B[0;32m      2\u001B[0m \u001B[38;5;28;01mdef\u001B[39;00m \u001B[38;5;21mrun_episode\u001B[39m(env, agent, rpm, render\u001B[38;5;241m=\u001B[39m\u001B[38;5;28;01mFalse\u001B[39;00m):\n\u001B[0;32m      3\u001B[0m     total_reward \u001B[38;5;241m=\u001B[39m \u001B[38;5;241m0\u001B[39m\n\u001B[1;32m----> 4\u001B[0m     obs \u001B[38;5;241m=\u001B[39m \u001B[43menv\u001B[49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mreset\u001B[49m\u001B[43m(\u001B[49m\u001B[43m)\u001B[49m\n\u001B[0;32m      5\u001B[0m     step \u001B[38;5;241m=\u001B[39m \u001B[38;5;241m0\u001B[39m\n\u001B[0;32m      6\u001B[0m     tmp \u001B[38;5;241m=\u001B[39m {}\n",
      "File \u001B[1;32mD:\\my_gitee\\dqn_learning\\q_table\\Envs\\DQNMazeEnv.py:32\u001B[0m, in \u001B[0;36mMazeEnv.reset\u001B[1;34m(self, *args, **kwargs)\u001B[0m\n\u001B[0;32m     28\u001B[0m \u001B[38;5;28;01mdef\u001B[39;00m \u001B[38;5;21mreset\u001B[39m(\u001B[38;5;28mself\u001B[39m, \u001B[38;5;241m*\u001B[39margs, \u001B[38;5;241m*\u001B[39m\u001B[38;5;241m*\u001B[39mkwargs):\n\u001B[0;32m     29\u001B[0m     maze \u001B[38;5;241m=\u001B[39m [[\u001B[38;5;241m1\u001B[39m, \u001B[38;5;241m1\u001B[39m, \u001B[38;5;241m1\u001B[39m, \u001B[38;5;241m1\u001B[39m, \u001B[38;5;241m1\u001B[39m, \u001B[38;5;241m1\u001B[39m, \u001B[38;5;241m1\u001B[39m, \u001B[38;5;241m1\u001B[39m, \u001B[38;5;241m1\u001B[39m], [\u001B[38;5;241m0\u001B[39m, \u001B[38;5;241m0\u001B[39m, \u001B[38;5;241m0\u001B[39m, \u001B[38;5;241m0\u001B[39m, \u001B[38;5;241m0\u001B[39m, \u001B[38;5;241m0\u001B[39m, \u001B[38;5;241m0\u001B[39m, \u001B[38;5;241m0\u001B[39m, \u001B[38;5;241m1\u001B[39m], [\u001B[38;5;241m1\u001B[39m, \u001B[38;5;241m1\u001B[39m, \u001B[38;5;241m1\u001B[39m, \u001B[38;5;241m0\u001B[39m, \u001B[38;5;241m1\u001B[39m, \u001B[38;5;241m0\u001B[39m, \u001B[38;5;241m1\u001B[39m, \u001B[38;5;241m0\u001B[39m, \u001B[38;5;241m1\u001B[39m],\n\u001B[0;32m     30\u001B[0m             [\u001B[38;5;241m1\u001B[39m, \u001B[38;5;241m0\u001B[39m, \u001B[38;5;241m0\u001B[39m, \u001B[38;5;241m0\u001B[39m, \u001B[38;5;241m1\u001B[39m, \u001B[38;5;241m0\u001B[39m, \u001B[38;5;241m1\u001B[39m, \u001B[38;5;241m0\u001B[39m, \u001B[38;5;241m1\u001B[39m], [\u001B[38;5;241m1\u001B[39m, \u001B[38;5;241m1\u001B[39m, \u001B[38;5;241m1\u001B[39m, \u001B[38;5;241m1\u001B[39m, \u001B[38;5;241m1\u001B[39m, \u001B[38;5;241m1\u001B[39m, \u001B[38;5;241m1\u001B[39m, \u001B[38;5;241m0\u001B[39m, \u001B[38;5;241m1\u001B[39m], [\u001B[38;5;241m1\u001B[39m, \u001B[38;5;241m0\u001B[39m, \u001B[38;5;241m0\u001B[39m, \u001B[38;5;241m0\u001B[39m, \u001B[38;5;241m1\u001B[39m, \u001B[38;5;241m0\u001B[39m, \u001B[38;5;241m1\u001B[39m, \u001B[38;5;241m0\u001B[39m, \u001B[38;5;241m1\u001B[39m],\n\u001B[0;32m     31\u001B[0m             [\u001B[38;5;241m1\u001B[39m, \u001B[38;5;241m0\u001B[39m, \u001B[38;5;241m1\u001B[39m, \u001B[38;5;241m0\u001B[39m, \u001B[38;5;241m1\u001B[39m, \u001B[38;5;241m0\u001B[39m, \u001B[38;5;241m1\u001B[39m, \u001B[38;5;241m0\u001B[39m, \u001B[38;5;241m1\u001B[39m], [\u001B[38;5;241m1\u001B[39m, \u001B[38;5;241m0\u001B[39m, \u001B[38;5;241m1\u001B[39m, \u001B[38;5;241m0\u001B[39m, \u001B[38;5;241m0\u001B[39m, \u001B[38;5;241m0\u001B[39m, \u001B[38;5;241m0\u001B[39m, \u001B[38;5;241m0\u001B[39m, \u001B[38;5;241m0\u001B[39m], [\u001B[38;5;241m1\u001B[39m, \u001B[38;5;241m1\u001B[39m, \u001B[38;5;241m1\u001B[39m, \u001B[38;5;241m1\u001B[39m, \u001B[38;5;241m1\u001B[39m, \u001B[38;5;241m1\u001B[39m, \u001B[38;5;241m1\u001B[39m, \u001B[38;5;241m1\u001B[39m, \u001B[38;5;241m1\u001B[39m]]\n\u001B[1;32m---> 32\u001B[0m     \u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mMaze\u001B[49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mreset_cellMaze\u001B[49m\u001B[43m(\u001B[49m\u001B[43mmaze\u001B[49m\u001B[43m)\u001B[49m\n\u001B[0;32m     33\u001B[0m     \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39mmaze \u001B[38;5;241m=\u001B[39m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39mMaze\u001B[38;5;241m.\u001B[39mcellMaze\n\u001B[0;32m     34\u001B[0m     \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39mcur_pos \u001B[38;5;241m=\u001B[39m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39mstart_cell\n",
      "File \u001B[1;32mD:\\my_gitee\\dqn_learning\\q_table\\Games\\MazeDTO.py:62\u001B[0m, in \u001B[0;36mMaze.reset_cellMaze\u001B[1;34m(self, maze)\u001B[0m\n\u001B[0;32m     60\u001B[0m tmp \u001B[38;5;241m=\u001B[39m []\n\u001B[0;32m     61\u001B[0m \u001B[38;5;28;01mfor\u001B[39;00m j \u001B[38;5;129;01min\u001B[39;00m \u001B[38;5;28mrange\u001B[39m(\u001B[38;5;28mlen\u001B[39m(\u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39mmaze[i])):\n\u001B[1;32m---> 62\u001B[0m     tmp\u001B[38;5;241m.\u001B[39mappend(CellType(\u001B[38;5;28mtype\u001B[39m\u001B[38;5;241m=\u001B[39m\u001B[38;5;241m1\u001B[39m \u001B[38;5;28;01mif\u001B[39;00m \u001B[43mmaze\u001B[49m\u001B[43m[\u001B[49m\u001B[43mi\u001B[49m\u001B[43m]\u001B[49m\u001B[43m[\u001B[49m\u001B[43mj\u001B[49m\u001B[43m]\u001B[49m \u001B[38;5;241m==\u001B[39m \u001B[38;5;241m1\u001B[39m \u001B[38;5;28;01melse\u001B[39;00m \u001B[38;5;241m0\u001B[39m, loc\u001B[38;5;241m=\u001B[39m[i, j]))\n\u001B[0;32m     63\u001B[0m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39mcellMaze\u001B[38;5;241m.\u001B[39mappend(tmp)\n",
      "\u001B[1;31mIndexError\u001B[0m: list index out of range"
     ]
    }
   ],
   "source": [
    "# 先往经验池里存一些数据，避免最开始训练的时候样本丰富度不够\n",
    "while len(rpm) < MEMORY_WARMUP_SIZE:\n",
    "    print(f'\\r####len: {len(rpm)}####')\n",
    "    run_episode(env, agent, rpm, render=True)\n",
    "    # manul_run(env, agent, rpm)\n",
    "print('experience collection finished')"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "outputs": [],
   "source": [
    "max_episode = 500000"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "outputs": [],
   "source": [
    "# 开始训练\n",
    "episode = 0\n",
    "max_reward = -1000000000000000000\n",
    "average_reward = 0\n",
    "while episode < max_episode:  # 训练max_episode个回合，test部分不计算入episode数量\n",
    "    # train part\n",
    "    total_reward = run_episode(env, agent, rpm, render=False)\n",
    "    if total_reward > max_reward:\n",
    "        max_reward = total_reward\n",
    "    episode += 1\n",
    "    average_reward = (average_reward * (episode - 1) + total_reward) / episode\n",
    "    print(\n",
    "        f'\\r#####episode: {episode}, reward: {total_reward},max_reward: {max_reward}, average_reward: {average_reward}, e_greed: {agent.e_greed}#####',\n",
    "        end=\"\")\n",
    "    # print(env.obs)\n"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "outputs": [],
   "source": [
    "eval_reward = evaluate(env, agent, render=True)  # render=True 查看显示效果\n",
    "print()\n",
    "print('episode:{}    e_greed:{}   test_reward:{}'.format(\n",
    "    episode, agent.e_greed, eval_reward))\n",
    "print(agent.e_greed)"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "outputs": [],
   "source": [
    "# obs = [1, 1, 1, 1, 1, 1, 1, 1, 1,\n",
    "#        0, 2, 1, 0, 1, 0, 1, 0, 1,\n",
    "#        1, 2, 1, 0, 1, 0, 1, 0, 1,\n",
    "#        1, 2, 2, 2, 2, 2, 2, 2, 1,\n",
    "#        1, 1, 1, 0, 1, 1, 1, 2, 1,\n",
    "#        1, 0, 0, 0, 1, 0, 1, 0, 1,\n",
    "#        1, 1, 1, 0, 1, 0, 1, 0, 1,\n",
    "#        1, 0, 0, 0, 1, 0, 0, 0, 0,\n",
    "#        1, 1, 1, 1, 1, 1, 1, 1, 1]\n",
    "# obs = 72, 0\n",
    "# t = paddle.to_tensor(obs, dtype='float32')\n",
    "# print(agent.alg.model(t))\n",
    "agent.e_greed=1\n"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 0
}