{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 创建游戏环境\n",
    "def get_state(row,col):\n",
    "    if row != 3:\n",
    "        return 'ground'\n",
    "    if row !=3 and col ==0:\n",
    "        return 'ground'\n",
    "    if row == 3 and col == 11:\n",
    "        return 'terminal'\n",
    "    if row == 3 and col ==0:\n",
    "        return 'ground'\n",
    "    return 'trap'\n"
   ]
  },
  {
   "attachments": {},
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "![游戏环境](imgs/%E6%B8%B8%E6%88%8F%E7%8E%AF%E5%A2%83.png)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "'ground'"
      ]
     },
     "execution_count": 6,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "get_state(0,1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "def move(row,col,action):\n",
    "\n",
    "    if get_state(row,col) in ['trap','terminal']:\n",
    "        return row,col,0\n",
    "    \n",
    "    # 向上\n",
    "    if action == 0:\n",
    "        row -= 1\n",
    "\n",
    "    # 向下\n",
    "    if action == 1:\n",
    "        row += 1\n",
    "\n",
    "    # 向左\n",
    "    if action == 2:\n",
    "        col -=1\n",
    "    \n",
    "    # 向右\n",
    "    if action == 3:\n",
    "        col += 1\n",
    "\n",
    "    # 不允许走到地图外面\n",
    "    row = max(0,row)\n",
    "    row = min(3,row)\n",
    "    col = max(0,col)\n",
    "    col = min(11,col)\n",
    "\n",
    "    # 陷阱 奖励-100，否则是-1\n",
    "    reward = -1\n",
    "    if get_state(row,col) == 'trap':\n",
    "        reward = -100\n",
    "\n",
    "    return row, col, reward\n"
   ]
  },
  {
   "attachments": {},
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "思想就是算出每个格子的分数，不断的向着高分值的格子走，最终走到终点"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(array([[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],\n",
       "        [0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],\n",
       "        [0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],\n",
       "        [0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]]),\n",
       " array([[0.25, 0.25, 0.25, 0.25],\n",
       "        [0.25, 0.25, 0.25, 0.25],\n",
       "        [0.25, 0.25, 0.25, 0.25],\n",
       "        [0.25, 0.25, 0.25, 0.25],\n",
       "        [0.25, 0.25, 0.25, 0.25],\n",
       "        [0.25, 0.25, 0.25, 0.25],\n",
       "        [0.25, 0.25, 0.25, 0.25],\n",
       "        [0.25, 0.25, 0.25, 0.25],\n",
       "        [0.25, 0.25, 0.25, 0.25],\n",
       "        [0.25, 0.25, 0.25, 0.25],\n",
       "        [0.25, 0.25, 0.25, 0.25],\n",
       "        [0.25, 0.25, 0.25, 0.25]]))"
      ]
     },
     "execution_count": 8,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "import numpy as np\n",
    "# 初始化每个格子的价值\n",
    "values = np.zeros([4,12]) #全0初始化\n",
    "\n",
    "#初始化每个格子采取每个动作的概率\n",
    "pi = np.ones([4,12,4])*0.25\n",
    "\n",
    "values,pi[0]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "-1.0"
      ]
     },
     "execution_count": 9,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# Q函数,求state，action的分数\n",
    "# Q函数，求一个状态下，执行一个动作的分数\n",
    "# Q函数 = 下一个格子的价值*0.9 + 执行动作以后的奖励\n",
    "def get_QSA(row,col,action): \n",
    "    #在当前状态执行动作得到下一个状态和reward\n",
    "    next_row,next_col,reward = move(row,col,action)\n",
    "\n",
    "    #计算下一个状态的分数，取values当前中记录的分数即可,0.9是折扣因子\n",
    "    value = values[next_row,next_col]*0.9\n",
    "\n",
    "    #如果下一个状态是终点或者陷阱，则下一个状态的分数为0\n",
    "    if get_state(next_row,next_col)in['trap','terminal']:\n",
    "        value = 0\n",
    "\n",
    "    #动作的分数本身就是reward + 下一个动作的分数\n",
    "    return value + reward\n",
    "get_QSA(0,0,'left')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 策略评估函数\n",
    "# 评估每个格子的价值，更新values\n",
    "\n",
    "def get_values():\n",
    "\n",
    "    # 初始化一个新的values，重新评估所有格子的分数\n",
    "    new_values = np.zeros([4,12])\n",
    "\n",
    "    # 遍历所有格子\n",
    "    for row in range(4):\n",
    "        for col in range(12):\n",
    "\n",
    "            # 计算当前格子4个动作分别的分数\n",
    "            action_value = np.zeros(4)\n",
    "            \n",
    "            #遍历所有动作\n",
    "            for action in range(4):\n",
    "                action_value[action] = get_QSA(row,col,action)\n",
    "\n",
    "\n",
    "###########################策略迭代算法############################           \n",
    "            #每个动作的分数和它的概率相乘                       \n",
    "#            action_value *= pi[row,col]\n",
    "\n",
    "            #最后这个格子的分数，等于该格子下所有动作的分数求和\n",
    "#            new_values[row,col] = action_value.sum()\n",
    "#################################################################\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "##########################价值迭代算法############################\n",
    "            new_values[row,col] = action_value.max()\n",
    "################################################################\n",
    "\n",
    "## 策略迭代每个格子的分数是由所有动作的分数决定，而价值迭代中格子的分数由最好的那个动作决定\n",
    "\n",
    "    return new_values\n",
    "\n",
    "\n",
    "# 如何评判一个格子的好坏？\n",
    "#   如果一个格子分数高，应该在这个格子下执行任意一个动作都不会使状态变得更差\n",
    "\n",
    "# 如何评判一个动作的分数？\n",
    "#   与该格子的分数有关，也与执行该动作的概率有关\n",
    "#   因此，分数 = 该格子分数*执行该动作概率\n"
   ]
  },
  {
   "attachments": {},
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "![价值迭代](imgs/%E4%BB%B7%E5%80%BC%E8%BF%AD%E4%BB%A3.png)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 策略提升\n",
    "# 根据每个格子的价值重新计算每个动作的概率\n",
    "def get_pi():\n",
    "    new_pi = np.zeros([4,12,4])\n",
    "\n",
    "    #遍历所有格子\n",
    "    for row in range(4):\n",
    "        for col in range(12):\n",
    "\n",
    "            #计算当前格子4个动作的分数\n",
    "            action_value = np.zeros(4)\n",
    "\n",
    "            #遍历所有动作\n",
    "            for action in range(4):\n",
    "                action_value[action] = get_QSA(row,col,action)\n",
    "\n",
    "            # 计算当前状态下，达到最大分数的动作有几个\n",
    "            count = (action_value==action_value.max()).sum()\n",
    "\n",
    "            # 让这些动作均分概率\n",
    "            for action in range(4):\n",
    "                if action_value[action] == action_value.max():\n",
    "                    new_pi[row,col,action]=1/count\n",
    "                else:\n",
    "                    new_pi[row,col,action]=0\n",
    "    return new_pi"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(array([[-7.71232075, -7.45813417, -7.17570464, -6.86189404, -6.5132156 ,\n",
       "         -6.12579511, -5.6953279 , -5.217031  , -4.68559   , -4.0951    ,\n",
       "         -3.439     , -2.71      ],\n",
       "        [-7.45813417, -7.17570464, -6.86189404, -6.5132156 , -6.12579511,\n",
       "         -5.6953279 , -5.217031  , -4.68559   , -4.0951    , -3.439     ,\n",
       "         -2.71      , -1.9       ],\n",
       "        [-7.17570464, -6.86189404, -6.5132156 , -6.12579511, -5.6953279 ,\n",
       "         -5.217031  , -4.68559   , -4.0951    , -3.439     , -2.71      ,\n",
       "         -1.9       , -1.        ],\n",
       "        [-7.45813417,  0.        ,  0.        ,  0.        ,  0.        ,\n",
       "          0.        ,  0.        ,  0.        ,  0.        ,  0.        ,\n",
       "          0.        ,  0.        ]]),\n",
       " array([[[0.  , 0.5 , 0.  , 0.5 ],\n",
       "         [0.  , 0.5 , 0.  , 0.5 ],\n",
       "         [0.  , 0.5 , 0.  , 0.5 ],\n",
       "         [0.  , 0.5 , 0.  , 0.5 ],\n",
       "         [0.  , 0.5 , 0.  , 0.5 ],\n",
       "         [0.  , 0.5 , 0.  , 0.5 ],\n",
       "         [0.  , 0.5 , 0.  , 0.5 ],\n",
       "         [0.  , 0.5 , 0.  , 0.5 ],\n",
       "         [0.  , 0.5 , 0.  , 0.5 ],\n",
       "         [0.  , 0.5 , 0.  , 0.5 ],\n",
       "         [0.  , 0.5 , 0.  , 0.5 ],\n",
       "         [0.  , 1.  , 0.  , 0.  ]],\n",
       " \n",
       "        [[0.  , 0.5 , 0.  , 0.5 ],\n",
       "         [0.  , 0.5 , 0.  , 0.5 ],\n",
       "         [0.  , 0.5 , 0.  , 0.5 ],\n",
       "         [0.  , 0.5 , 0.  , 0.5 ],\n",
       "         [0.  , 0.5 , 0.  , 0.5 ],\n",
       "         [0.  , 0.5 , 0.  , 0.5 ],\n",
       "         [0.  , 0.5 , 0.  , 0.5 ],\n",
       "         [0.  , 0.5 , 0.  , 0.5 ],\n",
       "         [0.  , 0.5 , 0.  , 0.5 ],\n",
       "         [0.  , 0.5 , 0.  , 0.5 ],\n",
       "         [0.  , 0.5 , 0.  , 0.5 ],\n",
       "         [0.  , 1.  , 0.  , 0.  ]],\n",
       " \n",
       "        [[0.  , 0.  , 0.  , 1.  ],\n",
       "         [0.  , 0.  , 0.  , 1.  ],\n",
       "         [0.  , 0.  , 0.  , 1.  ],\n",
       "         [0.  , 0.  , 0.  , 1.  ],\n",
       "         [0.  , 0.  , 0.  , 1.  ],\n",
       "         [0.  , 0.  , 0.  , 1.  ],\n",
       "         [0.  , 0.  , 0.  , 1.  ],\n",
       "         [0.  , 0.  , 0.  , 1.  ],\n",
       "         [0.  , 0.  , 0.  , 1.  ],\n",
       "         [0.  , 0.  , 0.  , 1.  ],\n",
       "         [0.  , 0.  , 0.  , 1.  ],\n",
       "         [0.  , 1.  , 0.  , 0.  ]],\n",
       " \n",
       "        [[1.  , 0.  , 0.  , 0.  ],\n",
       "         [0.25, 0.25, 0.25, 0.25],\n",
       "         [0.25, 0.25, 0.25, 0.25],\n",
       "         [0.25, 0.25, 0.25, 0.25],\n",
       "         [0.25, 0.25, 0.25, 0.25],\n",
       "         [0.25, 0.25, 0.25, 0.25],\n",
       "         [0.25, 0.25, 0.25, 0.25],\n",
       "         [0.25, 0.25, 0.25, 0.25],\n",
       "         [0.25, 0.25, 0.25, 0.25],\n",
       "         [0.25, 0.25, 0.25, 0.25],\n",
       "         [0.25, 0.25, 0.25, 0.25],\n",
       "         [0.25, 0.25, 0.25, 0.25]]]))"
      ]
     },
     "execution_count": 12,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# 循环迭代策列评估和策略提升，寻找最优解\n",
    "for _ in range(10):\n",
    "    for _ in range(100):\n",
    "        values = get_values()\n",
    "    pi = get_pi()\n",
    "values,pi"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 打印游戏，方便测试\n",
    "def show(row,col,action):\n",
    "    graph=['⬜']*37+['❌']*10+['🚩']\n",
    "    action = {0:'🔺',1:'🔻',2:'👈',3:'👉'}[action]\n",
    "    graph[row*12+col] = action\n",
    "    graph = ''.join(graph)\n",
    "\n",
    "    for i in range(0,4*12,12):\n",
    "        print(graph[i:i+12])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "⬜⬜⬜⬜⬜⬜⬜⬜⬜⬜⬜⬜\n",
      "⬜⬜⬜⬜⬜⬜⬜⬜⬜⬜⬜⬜\n",
      "⬜⬜⬜⬜⬜⬜⬜⬜⬜⬜⬜🔻\n",
      "⬜❌❌❌❌❌❌❌❌❌❌🚩\n"
     ]
    }
   ],
   "source": [
    "from IPython import display\n",
    "import time\n",
    "def test():\n",
    "    row = 0\n",
    "    col = 0\n",
    "    #最多玩N步\n",
    "    for _ in range(200):\n",
    "        #选择一个动作\n",
    "        action = np.random.choice(np.arange(4),size=1,p=pi[row,col])[0]\n",
    "\n",
    "        #打印这个动作\n",
    "        display.clear_output(wait=True)\n",
    "        time.sleep(0.1)\n",
    "        show(row,col,action)\n",
    "\n",
    "        #执行动作\n",
    "        row,col,reward = move(row,col,action)\n",
    "        \n",
    "        #获取当前状态，如果状态是终点或者陷阱，则终止\n",
    "        if get_state(row,col) in ['trap','terminal']:\n",
    "            break\n",
    "\n",
    "test()\n",
    "        "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "↓↓↓↓↓↓↓↓↓↓↓↓\n",
      "↓↓↓↓↓↓↓↓↓↓↓↓\n",
      "→→→→→→→→→→→↓\n",
      "↑↑↑↑↑↑↑↑↑↑↑↑\n"
     ]
    }
   ],
   "source": [
    "# 打印策略\n",
    "##行，列，动作\n",
    "tactic = pi.argmax(axis=2)\n",
    "for row in tactic:\n",
    "    for element in row:\n",
    "        if element == 0:\n",
    "            print(\"↑\",end='')\n",
    "        elif element == 1:\n",
    "            print(\"↓\",end='')\n",
    "        elif element == 2:\n",
    "            print(\"←\",end='')\n",
    "        elif element == 3:\n",
    "            print(\"→\",end='')\n",
    "    print()"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "py38",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.16"
  },
  "orig_nbformat": 4
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
