{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {
    "collapsed": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001B[32m[09-06 21:57:38 MainThread @utils.py:73]\u001B[0m paddlepaddle version: 2.3.2.\n",
      "pygame 2.1.0 (SDL 2.0.16, Python 3.9.12)\n",
      "Hello from the pygame community. https://www.pygame.org/contribute.html\n"
     ]
    }
   ],
   "source": [
    "import time\n",
    "\n",
    "import paddle\n",
    "import parl\n",
    "import pygame.event\n",
    "from paddle import nn\n",
    "import paddle.nn.functional as F\n",
    "import numpy as np\n",
    "from parl.utils import ReplayMemory\n",
    "from parl.algorithms import PolicyGradient\n",
    "from q_table.Envs.DQNMazeEnv import MazeEnv\n",
    "\n",
    "LEARN_FREQ = 5  # 训练频率，不需要每一个step都learn，攒一些新增经验后再learn，提高效率\n",
    "MEMORY_SIZE = 200000  # replay memory的大小，越大越占用内存\n",
    "MEMORY_WARMUP_SIZE = 200  # replay_memory 里需要预存一些经验数据，再开启训练\n",
    "BATCH_SIZE = 32  # 每次给agent learn的数据数量，从replay memory随机里sample一批数据出来\n",
    "LEARNING_RATE = 0.1  # 学习率\n",
    "GAMMA = 0.999  # reward 的衰减因子，一般取 0.9 到 0.999 不等\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "outputs": [],
   "source": [
    "class CartpoleModel(parl.Model):\n",
    "    def __init__(self, obs_dim, act_dim):\n",
    "        super(CartpoleModel, self).__init__()\n",
    "        hid1_size = 128\n",
    "        hid2_size = 128\n",
    "        self.fc1 = nn.Linear(obs_dim, hid1_size)\n",
    "        self.fc2 = nn.Linear(hid1_size, act_dim)\n",
    "\n",
    "    def forward(self, obs):\n",
    "        h1 = F.tanh(self.fc1(obs))\n",
    "        Q = F.softmax(self.fc2(h1), dtype='float64')\n",
    "        return Q\n"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "outputs": [],
   "source": [
    "class MazeAgent(parl.Agent):\n",
    "    def __init__(self, algorithm, act_dim, e_greed=0.1, e_greed_decrement=0.):\n",
    "        super(MazeAgent, self).__init__(algorithm)\n",
    "        assert isinstance(act_dim, int)\n",
    "        self.act_dim = act_dim\n",
    "\n",
    "        self.global_step = 0\n",
    "        self.update_target_steps = 2000  # 每隔2000个training steps再把model的参数复制到target_model中\n",
    "\n",
    "        self.e_greed = e_greed  # 有一定概率随机选取动作，探索\n",
    "        self.e_greed_decrement = e_greed_decrement  # 随着训练逐步收敛，探索的程度慢慢降低\n",
    "\n",
    "    def sample(self, obs):\n",
    "        obs = paddle.to_tensor(obs, dtype='float32')\n",
    "        pred_q = self.alg.predict(obs)\n",
    "        # print(f'###\\rpred_q: {pred_q}###', end=\"\")\n",
    "        act = np.random.choice(range(self.act_dim), p=pred_q)\n",
    "        return act\n",
    "\n",
    "    def predict(self, obs):\n",
    "        obs = paddle.to_tensor(obs, dtype='float32')\n",
    "        pred_q = self.alg.predict(obs)\n",
    "        act = pred_q.argmax().numpy()[0]  # 选择Q最大的下标，即对应的动作\n",
    "        return act\n",
    "\n",
    "    def learn(self, obs, act, reward):\n",
    "        act = np.expand_dims(act, -1)\n",
    "        reward = np.expand_dims(reward, -1)\n",
    "\n",
    "        obs = paddle.to_tensor(obs, dtype='float32')\n",
    "        act = paddle.to_tensor(act, dtype='int32')\n",
    "        reward = paddle.to_tensor(reward, dtype='float32')\n",
    "        loss = self.alg.learn(obs, act, reward)\n",
    "        return loss.numpy()[0]\n"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "outputs": [],
   "source": [
    "# 训练一个episode\n",
    "def run_episode(env, agent, render=False):\n",
    "    obs_list, action_list, reward_list = [], [], []\n",
    "    obs = env.reset()\n",
    "    while True:\n",
    "        for event in pygame.event.get():\n",
    "            if event.type == pygame.QUIT:\n",
    "                pygame.quit()\n",
    "        obs_list.append(obs)\n",
    "        action = agent.sample(obs)  # 采样动作，所有动作都有概率被尝试到\n",
    "        action_list.append(action)\n",
    "        next_obs, reward, done, _ = env.step(action, render)\n",
    "        reward_list.append(reward)\n",
    "\n",
    "        if render:\n",
    "            # time.sleep(1)\n",
    "            env.gameMaze.draw_maze(env.maze, env.cur_pos)\n",
    "        if done:\n",
    "            break\n",
    "    batch_obs = np.array(obs_list)\n",
    "    batch_action = np.array(action_list)\n",
    "    for i in range(len(reward_list) - 2, -1, -1):\n",
    "        reward_list[i] += GAMMA * reward_list[i + 1]\n",
    "\n",
    "    batch_reward = np.array(reward_list)\n",
    "    agent.learn(batch_obs, batch_action, batch_reward)\n",
    "\n",
    "    return np.sum(reward_list)\n",
    "\n",
    "\n",
    "# 评估 agent, 跑 5 个episode，总reward求平均\n",
    "def evaluate(env, agent, render=False):\n",
    "    eval_reward = []\n",
    "    obs = env.reset()\n",
    "    episode_reward = 0\n",
    "    total_step = 0\n",
    "    while True:\n",
    "        for event in pygame.event.get():\n",
    "            if event.type == pygame.QUIT:\n",
    "                pygame.quit()\n",
    "        action = agent.predict(obs)  # 预测动作，只选最优动作\n",
    "        obs, reward, done, _ = env.step(action)\n",
    "        episode_reward += reward\n",
    "        total_step += 1\n",
    "        if render:\n",
    "            time.sleep(0.1)\n",
    "            env.gameMaze.draw_maze(env.maze, env.cur_pos)\n",
    "        if done:\n",
    "            break\n",
    "        if total_step > env.size ** 2:\n",
    "            break\n",
    "    eval_reward.append(episode_reward)\n",
    "    return np.mean(eval_reward)"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0 3\n",
      "3 0\n"
     ]
    }
   ],
   "source": [
    "env = MazeEnv(\n",
    "    minsize=2,\n",
    "    maxsize=2,\n",
    ")\n",
    "action_dim = env.action_space.n\n",
    "obs_shape = (env.size ** 2 + 1,)\n",
    "obs_dim = obs_shape[0]\n",
    "model = CartpoleModel(obs_dim=obs_dim, act_dim=action_dim)\n",
    "algorithm = PolicyGradient(model, lr=LEARNING_RATE)\n",
    "agent = MazeAgent(\n",
    "    algorithm,\n",
    "    act_dim=action_dim,\n",
    "    e_greed=1,  # 有一定概率随机选取动作，探索\n",
    "    e_greed_decrement=1e-7\n",
    ")"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "outputs": [],
   "source": [
    "env.gameMaze.start_game()\n"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "outputs": [],
   "source": [
    "max_episode = 500000"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "D:\\ProgramData\\Anaconda3\\envs\\dqn\\lib\\site-packages\\paddle\\fluid\\dygraph\\math_op_patch.py:276: UserWarning: The dtype of left and right variables are not the same, left dtype is paddle.float64, but right dtype is paddle.float32, the right dtype will convert to paddle.float64\n",
      "  warnings.warn(\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "#####episode: 95, reward: -2122512.750882808,max_reward: 158019590.47776198, average_reward: -426803.8424540998, e_greed: 1######"
     ]
    }
   ],
   "source": [
    "# 开始训练\n",
    "episode = 0\n",
    "max_reward = -1000000000000000000\n",
    "average_reward = 0\n",
    "while episode < max_episode:  # 训练max_episode个回合，test部分不计算入episode数量\n",
    "    # train part\n",
    "    total_reward = run_episode(env, agent, render=True)\n",
    "    if total_reward > max_reward:\n",
    "        max_reward = total_reward\n",
    "    episode += 1\n",
    "    average_reward = (average_reward * (episode - 1) + total_reward) / episode\n",
    "    print(\n",
    "        f'\\r#####episode: {episode}, reward: {total_reward},max_reward: {max_reward}, average_reward: {average_reward}, e_greed: {agent.e_greed}#####',\n",
    "        end=\"\")\n"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "outputs": [],
   "source": [
    "eval_reward = evaluate(env, agent, render=True)  # render=True 查看显示效果\n",
    "print()\n",
    "print('episode:{}    e_greed:{}   test_reward:{}'.format(\n",
    "    episode, agent.e_greed, eval_reward))\n",
    "print(agent.e_greed)"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "outputs": [],
   "source": [
    "# obs = [1, 1, 1, 1, 1, 1, 1, 1, 1,\n",
    "#        0, 2, 1, 0, 1, 0, 1, 0, 1,\n",
    "#        1, 2, 1, 0, 1, 0, 1, 0, 1,\n",
    "#        1, 2, 2, 2, 2, 2, 2, 2, 1,\n",
    "#        1, 1, 1, 0, 1, 1, 1, 2, 1,\n",
    "#        1, 0, 0, 0, 1, 0, 1, 0, 1,\n",
    "#        1, 1, 1, 0, 1, 0, 1, 0, 1,\n",
    "#        1, 0, 0, 0, 1, 0, 0, 0, 0,\n",
    "#        1, 1, 1, 1, 1, 1, 1, 1, 1]\n",
    "# obs = 72, 0\n",
    "# t = paddle.to_tensor(obs, dtype='float32')\n",
    "# print(agent.alg.model(t))\n",
    "agent.e_greed = 1\n"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 0
}