{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "22066007-dad6-4ea8-86ef-a26266be0434",
   "metadata": {},
   "outputs": [
    {
     "ename": "ModuleNotFoundError",
     "evalue": "No module named 'gym'",
     "output_type": "error",
     "traceback": [
      "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[1;31mModuleNotFoundError\u001b[0m                       Traceback (most recent call last)",
      "Cell \u001b[1;32mIn[1], line 1\u001b[0m\n\u001b[1;32m----> 1\u001b[0m \u001b[38;5;28;01mimport\u001b[39;00m \u001b[38;5;21;01mgym\u001b[39;00m\n\u001b[0;32m      2\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mmatplotlib\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m pyplot \u001b[38;5;28;01mas\u001b[39;00m plt\n\u001b[0;32m      3\u001b[0m get_ipython()\u001b[38;5;241m.\u001b[39mrun_line_magic(\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mmatplotlib\u001b[39m\u001b[38;5;124m'\u001b[39m, \u001b[38;5;124m'\u001b[39m\u001b[38;5;124minline\u001b[39m\u001b[38;5;124m'\u001b[39m)\n",
      "\u001b[1;31mModuleNotFoundError\u001b[0m: No module named 'gym'"
     ]
    }
   ],
   "source": [
    "import gym\n",
    "from matplotlib import pyplot as plt\n",
    "%matplotlib inline\n",
    "import os\n",
    "\n",
    "os.environ['SDL_VIDEODRIVER']='dummy' # 隐藏窗口\n",
    "\n",
    "# 创建环境\n",
    "# is_slippery=False ：不考虑冰上的滑水\n",
    "# map_name='4x4' ：4x4的冰湖\n",
    "# desc决定地形\n",
    "\n",
    "env = gym.make('FrozenLake-v1', is_slippery=False,\n",
    "               map_name='4x4',  # 修正：添加缺失的引号\n",
    "               desc=['SFFF',\n",
    "                     'FHFH',\n",
    "                     'FFFH',\n",
    "                     'HFFG'])\n",
    "env.reset() # 重置环境\n",
    "\n",
    "# 解封装才能访问状态转移矩阵P\n",
    "env = env.unwrapped\n",
    "\n",
    "def show():\n",
    "    plt.imshow(env.render('rgb_array'))\n",
    "    plt.show()\n",
    "\n",
    "# 查看冰湖这个游戏的状态列表\n",
    "# 一共4*4=16个状态\n",
    "# 每个状态下都可以进行4个动作\n",
    "# 每个动作执行完，都有概率得到3个结果\n",
    "# (0.333333333333, 0, 0, 0, False)这个数据结构表示（概率， 下一个状态，奖励，是否结束）\n",
    "\n",
    "print(len(env.P), env.P[0])\n",
    "\n",
    "# show()\n",
    "\n",
    "import numpy as np\n",
    "\n",
    "# 初始化每个格子的价值\n",
    "values = np.zeros(16)\n",
    "\n",
    "# 初始化每个格子下采用动作的概率\n",
    "pi = np.ones([16, 4]) * 0.25 # 每个状态下，每个动作的概率都是0.25\n",
    "\n",
    "# 两个算法都是可以的，但是价值迭代的速度更快\n",
    "algorithm = '策略迭代'\n",
    "algorithm = '价值迭代'\n",
    "\n",
    "print(values, pi)\n",
    "\n",
    "# 计算qsa\n",
    "def get_qsa(state, action):\n",
    "    \"\"\"\n",
    "    获取状态s下，动作a的价值\n",
    "    \"\"\"\n",
    "    value = 0\n",
    "\n",
    "    # 每个动作都会有三个不同的结果，这里要按概率把他们加权求和\n",
    "    for prop, next_state, reward, over in env.P[state][action]:\n",
    "\n",
    "        #计算下个状态的分数，取values当中记录的分数即可，0.9是折扣因子\n",
    "        next_value = values[next_state] * 0.9\n",
    "\n",
    "        # 如果下个状态是终点或者陷阱，则下一个状态的分数是0\n",
    "        if over:\n",
    "            next_value = 0\n",
    "\n",
    "        # 获取当前状态的分数\n",
    "        next_value += reward\n",
    "\n",
    "        # 因为下个状态是概率出现了，所以要乘以概率\n",
    "        next_value *= prop\n",
    "\n",
    "        value += next_value\n",
    "\n",
    "    return value\n",
    "\n",
    "print(get_qsa(0, 0)) # 0状态下，0动作的价值\n",
    "\n",
    "# 策略评估\n",
    "def get_value():\n",
    "    # 初始化一个新的values，重新评估所有格子的分数\n",
    "    new_values = np.zeros([16])\n",
    "\n",
    "    # 遍历所有格子\n",
    "    for state in range(16):\n",
    "        # 计算当前格子4个动作分别的分数\n",
    "        action_value = np.zeros(4)\n",
    "\n",
    "        # 遍历所有动作\n",
    "        for action in range(4):\n",
    "            action_value[action] = get_qsa(state, action)\n",
    "        \n",
    "        if algorithm == '策略迭代':\n",
    "            # 每个动作的分数和它的概率相乘\n",
    "            action_value  *= pi[state]\n",
    "            # 最后这个格子的分数，等于该格子下所有动作的分数求和\n",
    "            new_values[state] = action_value.sum()\n",
    "        \n",
    "        if algorithm == '价值迭代':\n",
    "            # 价值迭代，直接取最大的分数\n",
    "            new_values[state] = action_value.max()\n",
    "    \n",
    "    return new_values\n",
    "\n",
    "print(get_value())\n",
    "\n",
    "# 策略提升\n",
    "def get_pi():\n",
    "    # 重新初始化每个格子下采用动作的概率，重新评估\n",
    "    new_pi = np.zeros([16, 4])\n",
    "\n",
    "    # 遍历所有动作\n",
    "    for state in range(16):\n",
    "        # 计算当前格子4个动作分别的分数\n",
    "        action_value = np.zeros(4)\n",
    "\n",
    "        #遍历所有动作\n",
    "        for action in range(4):\n",
    "            action_value[action] = get_qsa(state, action)\n",
    "        \n",
    "        # 在每个状态内部计算count\n",
    "        count = (action_value == action_value.max()).sum() # 概率 = 当前格子下，该动作的分数除以所有动作的分数之和\n",
    "\n",
    "        # 让这些动作均分概率\n",
    "        for action in range(4):\n",
    "            if action_value[action] == action_value.max(): # 如果当前行动值 == 行动表最大值\n",
    "                new_pi[state, action] = 1 / count # 均分概率\n",
    "            else:\n",
    "                new_pi[state, action] = 0\n",
    "\n",
    "    return new_pi\n",
    "\n",
    "print(get_pi())\n",
    "\n",
    "for _ in range(1000):\n",
    "    values = get_value()\n",
    "\n",
    "pi = get_pi()\n",
    "\n",
    "print(values, pi)\n",
    "\n",
    "from IPython import display\n",
    "import time\n",
    "\n",
    "def play():\n",
    "    env.reset()\n",
    "\n",
    "    # 起点0\n",
    "    index = 0\n",
    "\n",
    "    # 最多玩N步\n",
    "    for i in range(100):\n",
    "        # 选择一个动作\n",
    "        action = np.random.choice(4, size=1, p=pi[index])[0]\n",
    "\n",
    "        # 执行动作\n",
    "        index, reward, terminated, truncated, _ = env.step(action)\n",
    "\n",
    "        display.clear_output(wait=True)\n",
    "        time.sleep(0.1)\n",
    "        show()\n",
    "\n",
    "        # 获取当前状态，如果状态时中带你或者掉进陷阱则终止\n",
    "        if terminated or truncated:\n",
    "            break\n",
    "    \n",
    "    print(i)\n",
    "\n",
    "play()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "bd3c75ce-1f2a-4282-a7c2-fa2375ba6a8a",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.12.4"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
