{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 寻路问题 CliffWalking-v0"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "np.random.seed(0)\n",
    "import scipy\n",
    "import gym"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 环境使用"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "引入环境"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "观测空间 = Discrete(48)\n",
      "动作空间 = Discrete(4)\n",
      "状态数量 = 48, 动作数量 = 4\n",
      "地图大小 = (4, 12)\n"
     ]
    }
   ],
   "source": [
    "env = gym.make('CliffWalking-v0')\n",
    "env.seed(0)\n",
    "print('观测空间 = {}'.format(env.observation_space))\n",
    "print('动作空间 = {}'.format(env.action_space))\n",
    "print('状态数量 = {}, 动作数量 = {}'.format(env.nS, env.nA))\n",
    "print('地图大小 = {}'.format(env.shape))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "运行一回合"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "def play_once(env, policy):\n",
    "    total_reward = 0\n",
    "    state = env.reset()\n",
    "    while True:\n",
    "        loc = np.unravel_index(state, env.shape)\n",
    "        print('状态 = {}, 位置 = {}'.format(state, loc), end=' ')\n",
    "        action = np.random.choice(env.nA, p=policy[state])\n",
    "        next_state, reward, done, _ = env.step(action)\n",
    "        print('动作 = {}, 奖励 = {}'.format(action, reward))\n",
    "        total_reward += reward\n",
    "        if done:\n",
    "            break\n",
    "        state = next_state\n",
    "    return total_reward"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "用最优策略运行一回合"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "actions = np.ones(env.shape, dtype=int)\n",
    "actions[-1, :] = 0\n",
    "actions[:, -1] = 2\n",
    "optimal_policy = np.eye(4)[actions.reshape(-1)]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "状态 = 36, 位置 = (3, 0) 动作 = 0, 奖励 = -1\n",
      "状态 = 24, 位置 = (2, 0) 动作 = 1, 奖励 = -1\n",
      "状态 = 25, 位置 = (2, 1) 动作 = 1, 奖励 = -1\n",
      "状态 = 26, 位置 = (2, 2) 动作 = 1, 奖励 = -1\n",
      "状态 = 27, 位置 = (2, 3) 动作 = 1, 奖励 = -1\n",
      "状态 = 28, 位置 = (2, 4) 动作 = 1, 奖励 = -1\n",
      "状态 = 29, 位置 = (2, 5) 动作 = 1, 奖励 = -1\n",
      "状态 = 30, 位置 = (2, 6) 动作 = 1, 奖励 = -1\n",
      "状态 = 31, 位置 = (2, 7) 动作 = 1, 奖励 = -1\n",
      "状态 = 32, 位置 = (2, 8) 动作 = 1, 奖励 = -1\n",
      "状态 = 33, 位置 = (2, 9) 动作 = 1, 奖励 = -1\n",
      "状态 = 34, 位置 = (2, 10) 动作 = 1, 奖励 = -1\n",
      "状态 = 35, 位置 = (2, 11) 动作 = 2, 奖励 = -1\n",
      "回合奖励 = -13\n"
     ]
    }
   ],
   "source": [
    "total_reward = play_once(env, optimal_policy)\n",
    "print('回合奖励 = {}'.format(total_reward))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 求解 Bellman 期望方程"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "def evaluate_bellman(env, policy, gamma=1.):\n",
    "    a, b = np.eye(env.nS), np.zeros((env.nS))\n",
    "    for state in range(env.nS - 1):\n",
    "        for action in range(env.nA):\n",
    "            pi = policy[state][action]\n",
    "            for p, next_state, reward, done in env.P[state][action]:\n",
    "                a[state, next_state] -= (pi * gamma * p)\n",
    "                b[state] += (pi * reward * p)\n",
    "    v = np.linalg.solve(a, b)\n",
    "    q = np.zeros((env.nS, env.nA))\n",
    "    for state in range(env.nS - 1):\n",
    "        for action in range(env.nA):\n",
    "            for p, next_state, reward, done in env.P[state][action]:\n",
    "                q[state][action] += ((reward + gamma * v[next_state]) * p)\n",
    "    return v, q"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "评估随机策略的价值"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "状态价值 = [-71259.73993045 -71237.78971175 -71194.84815306 -71140.80283084\n",
      " -71061.70657406 -70938.91093029 -68108.48102392 -66319.10665148\n",
      " -64517.35282332 -61466.5310719  -57841.39233558 -49099.24383729\n",
      " -71264.96522919 -71247.53092064 -71217.52824196 -71120.56871127\n",
      " -71054.59876056 -70900.38265542 -69089.0248754  -67348.61705425\n",
      " -65077.28575313 -59299.02626941 -54477.04027155 -43408.92846113\n",
      " -71343.97588504 -71365.86170718 -71356.2948821  -71310.51896578\n",
      " -71248.19189084 -70916.43626142 -70432.29357881 -69519.4952358\n",
      " -69113.60088439 -66843.01499868 -60740.99702847 -33361.65231114\n",
      " -71417.87145989 -71463.16859933 -71447.66816432 -71480.47316827\n",
      " -71514.26925747 -71213.20010573 -71233.56329315 -71021.31099413\n",
      " -70840.15827719 -69680.13842414 -65008.3404254       0.        ]\n",
      "动作价值 = [[-7.12607399e+04 -7.12387897e+04 -7.12659652e+04 -7.12607399e+04]\n",
      " [-7.12387897e+04 -7.11958482e+04 -7.12485309e+04 -7.12607399e+04]\n",
      " [-7.11958482e+04 -7.11418028e+04 -7.12185282e+04 -7.12387897e+04]\n",
      " [-7.11418028e+04 -7.10627066e+04 -7.11215687e+04 -7.11958482e+04]\n",
      " [-7.10627066e+04 -7.09399109e+04 -7.10555988e+04 -7.11418028e+04]\n",
      " [-7.09399109e+04 -6.81094810e+04 -7.09013827e+04 -7.10627066e+04]\n",
      " [-6.81094810e+04 -6.63201067e+04 -6.90900249e+04 -7.09399109e+04]\n",
      " [-6.63201067e+04 -6.45183528e+04 -6.73496171e+04 -6.81094810e+04]\n",
      " [-6.45183528e+04 -6.14675311e+04 -6.50782858e+04 -6.63201067e+04]\n",
      " [-6.14675311e+04 -5.78423923e+04 -5.93000263e+04 -6.45183528e+04]\n",
      " [-5.78423923e+04 -4.91002438e+04 -5.44780403e+04 -6.14675311e+04]\n",
      " [-4.91002438e+04 -4.91002438e+04 -4.34099285e+04 -5.78423923e+04]\n",
      " [-7.12607399e+04 -7.12485309e+04 -7.13449759e+04 -7.12659652e+04]\n",
      " [-7.12387897e+04 -7.12185282e+04 -7.13668617e+04 -7.12659652e+04]\n",
      " [-7.11958482e+04 -7.11215687e+04 -7.13572949e+04 -7.12485309e+04]\n",
      " [-7.11418028e+04 -7.10555988e+04 -7.13115190e+04 -7.12185282e+04]\n",
      " [-7.10627066e+04 -7.09013827e+04 -7.12491919e+04 -7.11215687e+04]\n",
      " [-7.09399109e+04 -6.90900249e+04 -7.09174363e+04 -7.10555988e+04]\n",
      " [-6.81094810e+04 -6.73496171e+04 -7.04332936e+04 -7.09013827e+04]\n",
      " [-6.63201067e+04 -6.50782858e+04 -6.95204952e+04 -6.90900249e+04]\n",
      " [-6.45183528e+04 -5.93000263e+04 -6.91146009e+04 -6.73496171e+04]\n",
      " [-6.14675311e+04 -5.44780403e+04 -6.68440150e+04 -6.50782858e+04]\n",
      " [-5.78423923e+04 -4.34099285e+04 -6.07419970e+04 -5.93000263e+04]\n",
      " [-4.91002438e+04 -4.34099285e+04 -3.33626523e+04 -5.44780403e+04]\n",
      " [-7.12659652e+04 -7.13668617e+04 -7.14188715e+04 -7.13449759e+04]\n",
      " [-7.12485309e+04 -7.13572949e+04 -7.15178715e+04 -7.13449759e+04]\n",
      " [-7.12185282e+04 -7.13115190e+04 -7.15178715e+04 -7.13668617e+04]\n",
      " [-7.11215687e+04 -7.12491919e+04 -7.15178715e+04 -7.13572949e+04]\n",
      " [-7.10555988e+04 -7.09174363e+04 -7.15178715e+04 -7.13115190e+04]\n",
      " [-7.09013827e+04 -7.04332936e+04 -7.15178715e+04 -7.12491919e+04]\n",
      " [-6.90900249e+04 -6.95204952e+04 -7.15178715e+04 -7.09174363e+04]\n",
      " [-6.73496171e+04 -6.91146009e+04 -7.15178715e+04 -7.04332936e+04]\n",
      " [-6.50782858e+04 -6.68440150e+04 -7.15178715e+04 -6.95204952e+04]\n",
      " [-5.93000263e+04 -6.07419970e+04 -7.15178715e+04 -6.91146009e+04]\n",
      " [-5.44780403e+04 -3.33626523e+04 -7.15178715e+04 -6.68440150e+04]\n",
      " [-4.34099285e+04 -3.33626523e+04 -1.00000000e+00 -6.07419970e+04]\n",
      " [-7.13449759e+04 -7.15178715e+04 -7.14188715e+04 -7.14188715e+04]\n",
      " [-7.13668617e+04 -7.15178715e+04 -7.15178715e+04 -7.14188715e+04]\n",
      " [-7.13572949e+04 -7.15178715e+04 -7.15178715e+04 -7.15178715e+04]\n",
      " [-7.13115190e+04 -7.15178715e+04 -7.15178715e+04 -7.15178715e+04]\n",
      " [-7.12491919e+04 -7.15178715e+04 -7.15178715e+04 -7.15178715e+04]\n",
      " [-7.09174363e+04 -7.15178715e+04 -7.15178715e+04 -7.15178715e+04]\n",
      " [-7.04332936e+04 -7.15178715e+04 -7.15178715e+04 -7.15178715e+04]\n",
      " [-6.95204952e+04 -7.15178715e+04 -7.15178715e+04 -7.15178715e+04]\n",
      " [-6.91146009e+04 -7.15178715e+04 -7.15178715e+04 -7.15178715e+04]\n",
      " [-6.68440150e+04 -7.15178715e+04 -7.15178715e+04 -7.15178715e+04]\n",
      " [-6.07419970e+04 -1.00000000e+00 -7.15178715e+04 -7.15178715e+04]\n",
      " [ 0.00000000e+00  0.00000000e+00  0.00000000e+00  0.00000000e+00]]\n"
     ]
    }
   ],
   "source": [
    "policy = np.random.uniform(size=(env.nS, env.nA))\n",
    "policy = policy / np.sum(policy, axis=1)[:, np.newaxis]\n",
    "\n",
    "state_values, action_values = evaluate_bellman(env, policy)\n",
    "print('状态价值 = {}'.format(state_values))\n",
    "print('动作价值 = {}'.format(action_values))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "评估最优策略的价值"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {
    "scrolled": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "最优状态价值 = [-14. -13. -12. -11. -10.  -9.  -8.  -7.  -6.  -5.  -4.  -3. -13. -12.\n",
      " -11. -10.  -9.  -8.  -7.  -6.  -5.  -4.  -3.  -2. -12. -11. -10.  -9.\n",
      "  -8.  -7.  -6.  -5.  -4.  -3.  -2.  -1. -13. -12. -11. -10.  -9.  -8.\n",
      "  -7.  -6.  -5.  -4.  -3.   0.]\n",
      "最优动作价值 = [[ -15.  -14.  -14.  -15.]\n",
      " [ -14.  -13.  -13.  -15.]\n",
      " [ -13.  -12.  -12.  -14.]\n",
      " [ -12.  -11.  -11.  -13.]\n",
      " [ -11.  -10.  -10.  -12.]\n",
      " [ -10.   -9.   -9.  -11.]\n",
      " [  -9.   -8.   -8.  -10.]\n",
      " [  -8.   -7.   -7.   -9.]\n",
      " [  -7.   -6.   -6.   -8.]\n",
      " [  -6.   -5.   -5.   -7.]\n",
      " [  -5.   -4.   -4.   -6.]\n",
      " [  -4.   -4.   -3.   -5.]\n",
      " [ -15.  -13.  -13.  -14.]\n",
      " [ -14.  -12.  -12.  -14.]\n",
      " [ -13.  -11.  -11.  -13.]\n",
      " [ -12.  -10.  -10.  -12.]\n",
      " [ -11.   -9.   -9.  -11.]\n",
      " [ -10.   -8.   -8.  -10.]\n",
      " [  -9.   -7.   -7.   -9.]\n",
      " [  -8.   -6.   -6.   -8.]\n",
      " [  -7.   -5.   -5.   -7.]\n",
      " [  -6.   -4.   -4.   -6.]\n",
      " [  -5.   -3.   -3.   -5.]\n",
      " [  -4.   -3.   -2.   -4.]\n",
      " [ -14.  -12.  -14.  -13.]\n",
      " [ -13.  -11. -113.  -13.]\n",
      " [ -12.  -10. -113.  -12.]\n",
      " [ -11.   -9. -113.  -11.]\n",
      " [ -10.   -8. -113.  -10.]\n",
      " [  -9.   -7. -113.   -9.]\n",
      " [  -8.   -6. -113.   -8.]\n",
      " [  -7.   -5. -113.   -7.]\n",
      " [  -6.   -4. -113.   -6.]\n",
      " [  -5.   -3. -113.   -5.]\n",
      " [  -4.   -2. -113.   -4.]\n",
      " [  -3.   -2.   -1.   -3.]\n",
      " [ -13. -113.  -14.  -14.]\n",
      " [ -12. -113. -113.  -14.]\n",
      " [ -11. -113. -113. -113.]\n",
      " [ -10. -113. -113. -113.]\n",
      " [  -9. -113. -113. -113.]\n",
      " [  -8. -113. -113. -113.]\n",
      " [  -7. -113. -113. -113.]\n",
      " [  -6. -113. -113. -113.]\n",
      " [  -5. -113. -113. -113.]\n",
      " [  -4. -113. -113. -113.]\n",
      " [  -3.   -1. -113. -113.]\n",
      " [   0.    0.    0.    0.]]\n"
     ]
    }
   ],
   "source": [
    "optimal_state_values, optimal_action_values = evaluate_bellman(env, optimal_policy)\n",
    "print('最优状态价值 = {}'.format(optimal_state_values))\n",
    "print('最优动作价值 = {}'.format(optimal_action_values))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 求解 Bellman 最优方程"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [],
   "source": [
    "def optimal_bellman(env, gamma=1.):\n",
    "    p = np.zeros((env.nS, env.nA, env.nS))\n",
    "    r = np.zeros((env.nS, env.nA))\n",
    "    for state in range(env.nS - 1):\n",
    "        for action in range(env.nA):\n",
    "            for prob, next_state, reward, done in env.P[state][action]:\n",
    "                p[state, action, next_state] += prob\n",
    "                r[state, action] += (reward * prob)\n",
    "    c = np.ones(env.nS)\n",
    "    a_ub = gamma * p.reshape(-1, env.nS) - \\\n",
    "            np.repeat(np.eye(env.nS), env.nA, axis=0)\n",
    "    b_ub = -r.reshape(-1)\n",
    "    a_eq = np.zeros((0, env.nS))\n",
    "    b_eq = np.zeros(0)\n",
    "    bounds = [(None, None),] * env.nS\n",
    "    res = scipy.optimize.linprog(c, a_ub, b_ub, bounds=bounds,\n",
    "            method='interior-point')\n",
    "    v = res.x\n",
    "    q = r + gamma * np.dot(p, v)\n",
    "    return v, q"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "最优状态价值 = [-1.40000000e+01 -1.30000000e+01 -1.20000000e+01 -1.10000000e+01\n",
      " -1.00000000e+01 -9.00000000e+00 -8.00000000e+00 -7.00000000e+00\n",
      " -6.00000000e+00 -5.00000000e+00 -4.00000000e+00 -3.00000000e+00\n",
      " -1.30000000e+01 -1.20000000e+01 -1.10000000e+01 -1.00000000e+01\n",
      " -9.00000000e+00 -8.00000000e+00 -7.00000000e+00 -6.00000000e+00\n",
      " -5.00000000e+00 -4.00000000e+00 -3.00000000e+00 -2.00000000e+00\n",
      " -1.20000000e+01 -1.10000000e+01 -1.00000000e+01 -9.00000000e+00\n",
      " -8.00000000e+00 -7.00000000e+00 -6.00000000e+00 -5.00000000e+00\n",
      " -4.00000000e+00 -3.00000000e+00 -2.00000000e+00 -1.00000000e+00\n",
      " -1.30000000e+01 -1.20000000e+01 -1.10000000e+01 -1.00000000e+01\n",
      " -9.00000000e+00 -8.00000000e+00 -7.00000000e+00 -6.00000000e+00\n",
      " -5.00000000e+00 -4.00000000e+00 -9.99999999e-01  1.82224030e-11]\n",
      "最优动作价值 = [[ -14.99999999  -13.99999999  -13.99999999  -14.99999999]\n",
      " [ -13.99999999  -13.          -13.          -14.99999999]\n",
      " [ -13.          -12.          -12.          -13.99999999]\n",
      " [ -12.          -11.          -11.          -13.        ]\n",
      " [ -11.          -10.          -10.          -12.        ]\n",
      " [ -10.           -9.           -9.          -11.        ]\n",
      " [  -9.           -8.           -8.          -10.        ]\n",
      " [  -8.           -7.           -7.           -9.        ]\n",
      " [  -7.           -6.           -6.           -8.        ]\n",
      " [  -6.           -5.           -5.           -7.        ]\n",
      " [  -5.           -4.           -4.           -6.        ]\n",
      " [  -4.           -4.           -3.           -5.        ]\n",
      " [ -14.99999999  -13.          -13.          -13.99999999]\n",
      " [ -13.99999999  -12.          -12.          -13.99999999]\n",
      " [ -13.          -11.          -11.          -13.        ]\n",
      " [ -12.          -10.          -10.          -12.        ]\n",
      " [ -11.           -9.           -9.          -11.        ]\n",
      " [ -10.           -8.           -8.          -10.        ]\n",
      " [  -9.           -7.           -7.           -9.        ]\n",
      " [  -8.           -6.           -6.           -8.        ]\n",
      " [  -7.           -5.           -5.           -7.        ]\n",
      " [  -6.           -4.           -4.           -6.        ]\n",
      " [  -5.           -3.           -3.           -5.        ]\n",
      " [  -4.           -3.           -2.           -4.        ]\n",
      " [ -13.99999999  -12.          -14.          -13.        ]\n",
      " [ -13.          -11.         -113.          -13.        ]\n",
      " [ -12.          -10.         -113.          -12.        ]\n",
      " [ -11.           -9.         -113.          -11.        ]\n",
      " [ -10.           -8.         -113.          -10.        ]\n",
      " [  -9.           -7.         -113.           -9.        ]\n",
      " [  -8.           -6.         -113.           -8.        ]\n",
      " [  -7.           -5.         -113.           -7.        ]\n",
      " [  -6.           -4.         -113.           -6.        ]\n",
      " [  -5.           -3.         -113.           -5.        ]\n",
      " [  -4.           -2.         -113.           -4.        ]\n",
      " [  -3.           -2.           -1.           -3.        ]\n",
      " [ -13.         -113.          -14.          -14.        ]\n",
      " [ -12.         -113.         -113.          -14.        ]\n",
      " [ -11.         -113.         -113.         -113.        ]\n",
      " [ -10.         -113.         -113.         -113.        ]\n",
      " [  -9.         -113.         -113.         -113.        ]\n",
      " [  -8.         -113.         -113.         -113.        ]\n",
      " [  -7.         -113.         -113.         -113.        ]\n",
      " [  -6.         -113.         -113.         -113.        ]\n",
      " [  -5.         -113.         -113.         -113.        ]\n",
      " [  -4.         -113.         -113.         -113.        ]\n",
      " [  -3.           -1.         -113.         -113.        ]\n",
      " [   0.            0.            0.            0.        ]]\n"
     ]
    }
   ],
   "source": [
    "optimal_state_values, optimal_action_values = optimal_bellman(env)\n",
    "print('最优状态价值 = {}'.format(optimal_state_values))\n",
    "print('最优动作价值 = {}'.format(optimal_action_values))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "最优策略 = [2 1 1 1 1 1 1 1 1 1 1 2 1 1 1 1 1 1 1 1 1 1 1 2 1 1 1 1 1 1 1 1 1 1 1 2 0\n",
      " 0 0 0 0 0 0 0 0 0 1 0]\n"
     ]
    }
   ],
   "source": [
    "optimal_actions = optimal_action_values.argmax(axis=1)\n",
    "print('最优策略 = {}'.format(optimal_actions))"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.1"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
