{
 "metadata": {
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.8-final"
  },
  "orig_nbformat": 2,
  "kernelspec": {
   "name": "python_defaultSpec_1598168724688",
   "display_name": "Python 3.7.8 64-bit ('venv': venv)"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2,
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 2.5 Sarsa 的实现\n",
    "### 2.5.0 用 $\\epsilon$-贪婪法 实现策略\n",
    "Sarsa 算法，是一种价值迭代算法。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 首先建立迷宫，定义 theta_0\n",
    "# 导入包\n",
    "import numpy as np\n",
    "import matplotlib.pyplot as plt\n",
    "%matplotlib inline\n",
    "\n",
    "# 迷宫的初始位置\n",
    "\n",
    "# 声明图的大小以及图的变量名\n",
    "fig = plt.figure(figsize=(5, 5))\n",
    "ax = plt.gca()\n",
    "\n",
    "# 画出红色的墙壁\n",
    "plt.plot([1, 1], [0, 1], color='red', linewidth=2)\n",
    "plt.plot([1, 2], [2, 2], color='red', linewidth=2)\n",
    "plt.plot([2, 2], [2, 1], color='red', linewidth=2)\n",
    "plt.plot([2, 3], [1, 1], color='red', linewidth=2)\n",
    "\n",
    "# 画出表示状态的文字 S0~S8\n",
    "plt.text(0.5, 2.5, 'S0', size=14, ha='center')\n",
    "plt.text(1.5, 2.5, 'S1', size=14, ha='center')\n",
    "plt.text(2.5, 2.5, 'S2', size=14, ha='center')\n",
    "plt.text(0.5, 1.5, 'S3', size=14, ha='center')\n",
    "plt.text(1.5, 1.5, 'S4', size=14, ha='center')\n",
    "plt.text(2.5, 1.5, 'S5', size=14, ha='center')\n",
    "plt.text(0.5, 0.5, 'S6', size=14, ha='center')\n",
    "plt.text(1.5, 0.5, 'S7', size=14, ha='center')\n",
    "\n",
    "plt.text(2.5, 0.5, 'S8', size=14, ha='center')\n",
    "plt.text(0.5, 2.3, 'START', ha='center')\n",
    "plt.text(2.5, 0.3, 'GOAL', ha='center')\n",
    "\n",
    "# 设定画图的范围\n",
    "ax.set_xlim(0, 3)\n",
    "ax.set_ylim(0, 3)\n",
    "plt.tick_params(axis='both', which='both', bottom='off', top='off',\n",
    "                labelbottom='off', right='off', left='off', labelleft='off')\n",
    "\n",
    "# 当前位置 S0 用绿色圆圈画出\n",
    "line, = ax.plot([0.5], [2.5], marker=\"o\", color='g', markersize=60)\n",
    "\n",
    "# 定义初始 theta_0\n",
    "theta_0 = np.array([[np.nan, 1, 1, np.nan], # S0\n",
    "                    [np.nan, 1, np.nan, 1], # S1\n",
    "                    [np.nan, np.nan, 1, 1], # S2\n",
    "                    [1, 1, 1, np.nan], # S3\n",
    "                    [np.nan, np.nan, 1, 1], # S4\n",
    "                    [1, np.nan, np.nan, np.nan], # S5\n",
    "                    [1, np.nan, np.nan, np.nan], # S6\n",
    "                    [1, 1, np.nan, np.nan], # S7，S8是目标所以无策略\n",
    "                    ])"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "价值迭代方法中使用的`动作价值函数`以 `表格形式` 实现。\n",
    "\n",
    "行表示状态 $s$\n",
    "\n",
    "列表示动作 $a$\n",
    "\n",
    "表中的值为动作价值函数 $Q(s,a)$\n",
    "\n",
    "初始状态赋予随机值"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 设置初始的东顾总价值函数\n",
    "\n",
    "[a, b] = theta_0.shape  # 行列数放入a、b\n",
    "Q = np.random.rand(a, b) * theta_0\n",
    "Q"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 定义随机行动策略 pi_0\n",
    "\n",
    "# 将策略参数 theta 转换为随机策略\n",
    "def simple_convert_into_pi_from_theta(theta):\n",
    "    '''简单地计算比率'''\n",
    "\n",
    "    [m, n] = theta.shape\n",
    "    pi = np.zeros((m, n))\n",
    "    for i in range(0, m):\n",
    "        pi[i, :] = theta[i, :] / np.nansum(theta[i, :])\n",
    "\n",
    "    pi = np.nan_to_num(pi)\n",
    "\n",
    "    return pi\n",
    "\n",
    "# 求随机行动策略 pi_0\n",
    "pi_0 = simple_convert_into_pi_from_theta(theta_0)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "实现 $\\epsilon$-贪婪法："
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 实现 ε-贪婪法\n",
    "\n",
    "# 定义动作函数 get_action\n",
    "def get_action(s, Q, epsilon, pi_0):\n",
    "    direction = [\"up\", \"right\", \"down\", \"left\"]\n",
    "\n",
    "    # 确定行动\n",
    "    if np.random.rand() < epsilon:\n",
    "        # 以 ε 概率随即行动\n",
    "        next_direction = np.random.choice(direction, p=pi_0[s, :])\n",
    "    else:\n",
    "        # 采用 Q 的最大值对应的动作\n",
    "        next_direction = direction[np.nanargmax(Q[s, :])]\n",
    "\n",
    "    # 为动作加上索引\n",
    "    if next_direction == \"up\":\n",
    "        action = 0\n",
    "    elif next_direction == \"right\":\n",
    "        action = 1\n",
    "    elif next_direction == \"down\":\n",
    "        action = 2\n",
    "    elif next_direction == \"left\":\n",
    "        action = 3\n",
    "\n",
    "    return action\n",
    "\n",
    "# 定义更具动作求取下一状态的函数 get_s_next\n",
    "def get_s_next(s, a, Q, epsilon, pi_0):\n",
    "    direction = [\"up\", \"right\", \"down\", \"left\"]\n",
    "    next_direction = direction[a]\n",
    "\n",
    "    if next_direction == \"up\":\n",
    "        s_next = s - 3\n",
    "    elif next_direction == \"right\":\n",
    "        s_next = s + 1\n",
    "    elif next_direction == \"down\":\n",
    "        s_next = s + 3\n",
    "    elif next_direction == \"left\":\n",
    "        s_next = s - 1\n",
    "\n",
    "    return s_next"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "使动作价值函数 $Q(s,a)$ 学到正确的值后\n",
    "`贝尔曼方程`应成立：\n",
    "$$\n",
    "    Q(s_t,a_t) = R_{t+1}+\\gamma Q(S_{t+1},a_{t+1})\n",
    "$$\n",
    "两边之间的差值称为 `TD误差（时间差， Temporal Difference error）`，目标是 TD=0\n",
    "\n",
    "更新 Q 的公式：\n",
    "$$\n",
    "    Q(s_t,a_t) = Q(s_t,a_t)+\\eta*TD \\\\\n",
    "    = Q(s_t,a_t)+\\eta*(R_{t+1}+\\gamma Q(s_{t+1},a_{t+1})-Q(s_t,a_t))\n",
    "$$\n",
    "遵循此更新公式的算法称为 `Sarsa`"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 基于 Sarsa 更新动作价值函数 Q\n",
    "def Sarsa(s, a, r, s_next, a_next, Q, eta, gamma):\n",
    "    if s_next ==8:\n",
    "        Q[s, a] = Q[s, a] + eta * (r - Q[s, a])\n",
    "\n",
    "    else:\n",
    "        Q[s, a] = Q[s, a] + eta * (r + gamma * Q[s_next, a_next] - Q[s, a])\n",
    "\n",
    "    return Q"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 定义基于 Saras 求解迷宫问题的函数，输出状态、动作的历史记录以及更新后的价值表 Q\n",
    "def goal_maze_ret_s_a_Q(Q, epsilon, eta, gamma, pi):\n",
    "    s = 0\n",
    "    a = a_next = get_action(s, Q, epsilon, pi)\n",
    "    s_a_history = [[0, np.nan]]\n",
    "\n",
    "    while (1):\n",
    "        a = a_next\n",
    "        s_a_history[-1][1] = a\n",
    "\n",
    "        s_next = get_s_next(s, a, Q, epsilon, pi)\n",
    "    \n",
    "        s_a_history.append([s_next, np.nan])\n",
    "\n",
    "        if s_next == 8:\n",
    "            r = 1\n",
    "            a_next = np.nan\n",
    "        else:\n",
    "            r = 0\n",
    "            a_next = get_action(s_next, Q, epsilon, pi)\n",
    "\n",
    "        # 更新价值函数\n",
    "        Q = Sarsa(s, a, r, s_next, a_next, Q, eta, gamma)\n",
    "\n",
    "        # 终止判断\n",
    "        if s_next == 8:\n",
    "            break\n",
    "        else:\n",
    "            s = s_next\n",
    "\n",
    "    return [s_a_history, Q]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "tags": []
   },
   "outputs": [],
   "source": [
    "# 通过 Sarsa 求解迷宫问题\n",
    "eta = 0.1   # 学习率\n",
    "gamma = 0.9 # 时间折扣率\n",
    "epsilon = 0.5   # ε-贪婪法的初始值\n",
    "v = np.nanmax(Q, axis=1)    # 根据状态求价值的最大\n",
    "is_continue = True\n",
    "episode = 1\n",
    "\n",
    "while is_continue:\n",
    "    print(\" 当前回合：\" + str(episode))\n",
    "\n",
    "    # ε 值随逐渐减小\n",
    "    epsilon = epsilon / 2\n",
    "\n",
    "    # 通过 Sarsa 求解迷宫问题，求移动历史和更新后的 Q\n",
    "    [s_a_history, Q] = goal_maze_ret_s_a_Q(Q, epsilon, eta, gamma, pi_0)\n",
    "\n",
    "    # 状态价值的变化\n",
    "    new_v = np.nanmax(Q, axis=1)\n",
    "    print(np.sum(np.abs(new_v - v)))\n",
    "    v = new_v\n",
    "\n",
    "    print(\"求解迷宫所需步数：\" + str(len(s_a_history) - 1) + \"\\n\")\n",
    "\n",
    "    episode = episode + 1\n",
    "    if episode > 100:\n",
    "        break"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ]
}