{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 创建游戏环境\n",
    "def get_state(row, col):\n",
    "    if row != 3:\n",
    "        return \"ground\"\n",
    "    if row != 3 and col == 0:\n",
    "        return \"ground\"\n",
    "    if row == 3 and col == 11:\n",
    "        return \"terminal\"\n",
    "    if row == 3 and col == 0:\n",
    "        return \"ground\"\n",
    "    return \"trap\"\n",
    "\n",
    "\n",
    "def move(row, col, action):\n",
    "    if get_state(row, col) in [\"trap\", \"terminal\"]:\n",
    "        return row, col, 0\n",
    "\n",
    "    # 向上\n",
    "    if action == 0:\n",
    "        row -= 1\n",
    "\n",
    "    # 向下\n",
    "    if action == 1:\n",
    "        row += 1\n",
    "\n",
    "    # 向左\n",
    "    if action == 2:\n",
    "        col -= 1\n",
    "\n",
    "    # 向右\n",
    "    if action == 3:\n",
    "        col += 1\n",
    "\n",
    "    # 不允许走到地图外面\n",
    "    row = max(0, row)\n",
    "    row = min(3, row)\n",
    "    col = max(0, col)\n",
    "    col = min(11, col)\n",
    "\n",
    "    # 陷阱 奖励-100，否则是-1\n",
    "    reward = -1\n",
    "    if get_state(row, col) == \"trap\":\n",
    "        reward = -100\n",
    "\n",
    "    return row, col, reward"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "import random\n",
    "from IPython import display\n",
    "import time"
   ]
  },
  {
   "attachments": {},
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### DynaQ 相比于Sarsa算法，增加了离线学习的部分\n",
    "- Sarsa算法的数据用一次就丢，对数据的利用率不是很高\n",
    "- DynaQ将数据保存下来，用于模型对之前的数据进行复习\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "((4, 12, 4), {})"
      ]
     },
     "execution_count": 11,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "import numpy as np\n",
    "#初始化Q矩阵\n",
    "Q = np.zeros([4,12,4])\n",
    "\n",
    "#保存历史数据\n",
    "history = dict()\n",
    "#history的key：状态，动作\n",
    "#值：下一个状态，奖励\n",
    "Q.shape,history"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 动作函数 ——贪婪算法\n",
    "# 根据状态选择一个动作\n",
    "def get_action(row, col):\n",
    "    # 有一个小的概率随机选择动作\n",
    "    if random.random() < 0.1:\n",
    "        return random.choice(range(4))\n",
    "\n",
    "    # 否则选择分数最高的动作\n",
    "    return Q[row, col].argmax()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 更新分数 （更新Q表中的值）\n",
    "# 这里用的是QLearing更新算法\n",
    "def get_update(row, col, action, reward, next_row, next_col):\n",
    "################################Q-Learning######################################\n",
    "#\n",
    "    target = reward + 0.9 * Q[next_row, next_col].max()\n",
    "#\n",
    "################################################################################\n",
    "\n",
    "    # Step2.更新当前动作的目标值\n",
    "    # 计算Value：\n",
    "    value = Q[row, col, action]\n",
    "\n",
    "    # 计算更新值\n",
    "    update = (target - value) * 0.1  # 0.1表示学习率\n",
    "\n",
    "    # 更新当前状态和动作的分数\n",
    "    return update"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 离线学习函数\n",
    "def q_planning():\n",
    "    # Q planning循环，相当于是在复习历史数据，随机取N个历史数据再进行离线学习\n",
    "    #\n",
    "    for _ in range(20):\n",
    "        # 随机选择增加遇到过的状态动作对\n",
    "        row, col, action = random.choice(list(history.keys()))\n",
    "\n",
    "        # 再获取下一个状态和反馈\n",
    "        next_row, next_col, reward = history[(row, col, action)]\n",
    "\n",
    "        # 计算分数\n",
    "        update = get_update(row,col,action,reward,next_row,next_col)\n",
    "\n",
    "        #更新分数\n",
    "        Q[row,col,action] += update"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "第0轮：reward_sum:-12\n",
      "第150轮：reward_sum:-13\n",
      "第300轮：reward_sum:-22\n",
      "第450轮：reward_sum:-14\n",
      "第600轮：reward_sum:-16\n",
      "第750轮：reward_sum:-12\n",
      "第900轮：reward_sum:-15\n",
      "第1050轮：reward_sum:-106\n",
      "第1200轮：reward_sum:-105\n",
      "第1350轮：reward_sum:-13\n",
      "第1500轮：reward_sum:-13\n",
      "第1650轮：reward_sum:-16\n",
      "第1800轮：reward_sum:-14\n",
      "第1950轮：reward_sum:-13\n"
     ]
    }
   ],
   "source": [
    "# 训练\n",
    "def train():\n",
    "    for epoch in range(2000):\n",
    "        # 初始化当前位置\n",
    "        row = random.choice(range(4))\n",
    "        col = 0\n",
    "\n",
    "        # 初始化第一个动作\n",
    "        action = get_action(row, col)\n",
    "\n",
    "        # 计算反馈的和，这个数字应该越来越小\n",
    "        reward_sum = 0\n",
    "\n",
    "        # 循环直到终点或掉进陷阱\n",
    "        while get_state(row, col) not in [\"terminal\", \"trap\"]:\n",
    "            # 执行动作\n",
    "            next_row, next_col, reward = move(row, col, action)\n",
    "            reward_sum += reward\n",
    "\n",
    "            # 求新位置的动作\n",
    "            next_action = get_action(next_row, next_col)\n",
    "\n",
    "            # 更新分数\n",
    "            update = get_update(row,col,action,reward,next_row,next_col)\n",
    "\n",
    "            Q[row,col,action] += update\n",
    "\n",
    "            #将数据添加到历史中\n",
    "            history[(row,col,action)] = next_row,next_col,reward\n",
    "\n",
    "            #离线学习\n",
    "            q_planning()\n",
    "\n",
    "            #更新当前位置\n",
    "            row = next_row\n",
    "            col = next_col\n",
    "            action = next_action\n",
    "        if epoch%150==0:\n",
    "            print(f'第{epoch}轮：reward_sum:{reward_sum}')\n",
    "train()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 打印游戏，方便测试\n",
    "def show(row, col, action):\n",
    "    graph = [\"⬜\"] * 37 + [\"❌\"] * 10 + [\"🚩\"]\n",
    "    action = {0: \"🔺\", 1: \"🔻\", 2: \"👈\", 3: \"👉\"}[action]\n",
    "    graph[row * 12 + col] = action\n",
    "    graph = \"\".join(graph)\n",
    "\n",
    "    for i in range(0, 4 * 12, 12):\n",
    "        print(graph[i : i + 12])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "⬜⬜⬜⬜⬜⬜⬜⬜⬜⬜⬜⬜\n",
      "⬜⬜⬜⬜⬜⬜⬜⬜⬜⬜⬜⬜\n",
      "⬜⬜⬜⬜⬜⬜⬜⬜⬜⬜⬜🔻\n",
      "⬜❌❌❌❌❌❌❌❌❌❌🚩\n"
     ]
    }
   ],
   "source": [
    "from IPython import display\n",
    "import time\n",
    "\n",
    "\n",
    "def test():\n",
    "    row = random.choice(range(4))\n",
    "    col = 0\n",
    "\n",
    "    # 至多玩N步\n",
    "    for _ in range(200):\n",
    "        # 获取当前状态，若当前状态在终点或者掉进陷阱则终止\n",
    "        if get_state(row, col) in [\"trap\", \"terminal\"]:\n",
    "            break\n",
    "\n",
    "        # 选择最优的动作\n",
    "        action = Q[row, col].argmax()\n",
    "\n",
    "        # 打印这个动作\n",
    "        display.clear_output(wait=True)\n",
    "        time.sleep(0.1)\n",
    "        show(row, col, action)\n",
    "\n",
    "        # 执行动作\n",
    "        row, col, reward = move(row, col, action)\n",
    "test()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "↓↓↓↓↓↓↓↓↓↓↓↓\n",
      "↓↓↓↓↓↓↓↓↓↓↓↓\n",
      "→→→→→→→→→→→↓\n",
      "↑↑↑↑↑↑↑↑↑↑↑↑\n"
     ]
    }
   ],
   "source": [
    "# 打印策略\n",
    "##行，列，动作\n",
    "tactic = Q.argmax(axis=2)\n",
    "for row in tactic:\n",
    "    for element in row:\n",
    "        if element == 0:\n",
    "            print(\"↑\", end=\"\")\n",
    "        elif element == 1:\n",
    "            print(\"↓\", end=\"\")\n",
    "        elif element == 2:\n",
    "            print(\"←\", end=\"\")\n",
    "        elif element == 3:\n",
    "            print(\"→\", end=\"\")\n",
    "    print()"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Gym",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.16"
  },
  "orig_nbformat": 4
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
