{
 "cells": [
  {
   "cell_type": "code",
   "id": "0177b8be-3392-4c5a-b8ca-ccd3832f1724",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-08-15T07:22:40.071617Z",
     "start_time": "2025-08-15T07:22:40.026681Z"
    }
   },
   "source": [
    "import numpy as np     #只需要下载numpy库即可\n",
    "import random\n",
    "import GridWorld_v2"
   ],
   "outputs": [],
   "execution_count": 1
  },
  {
   "cell_type": "code",
   "id": "927872b8",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-08-15T07:22:40.076438Z",
     "start_time": "2025-08-15T07:22:40.072622Z"
    }
   },
   "source": [
    "np.eye(5)"
   ],
   "outputs": [
    {
     "data": {
      "text/plain": [
       "array([[1., 0., 0., 0., 0.],\n",
       "       [0., 1., 0., 0., 0.],\n",
       "       [0., 0., 1., 0., 0.],\n",
       "       [0., 0., 0., 1., 0.],\n",
       "       [0., 0., 0., 0., 1.]])"
      ]
     },
     "execution_count": 2,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 2
  },
  {
   "cell_type": "code",
   "id": "6ec328c7-b310-4c31-9462-be10cea730ac",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-08-15T07:22:40.088252Z",
     "start_time": "2025-08-15T07:22:40.076438Z"
    }
   },
   "source": [
    "gamma = 0.9   #折扣因子，越接近0越近视\n",
    "\n",
    "rows = 5      #记得行数和列数这里要同步改\n",
    "columns = 5\n",
    "# gridworld = GridWorld_v2.GridWorld_v2(rows=rows, columns=columns, forbiddenAreaNums=8, targetNums=2, seed = 52,forbiddenAreaScore=-10)\n",
    "# gridworld = GridWorld_v2.GridWorld_v2(desc = [\".#\",\".T\"])             #赵老师4-1的例子\n",
    "# gridworld = GridWorld_v2.GridWorld_v2(desc = [\"##.T\",\"...#\",\"....\"])  #随便弄的例子\n",
    "gridworld = GridWorld_v2.GridWorld_v2(forbiddenAreaScore=-10, score=1,desc = [\".....\",\".##..\",\"..#..\",\".#T#.\",\".#...\"]) \n",
    "gridworld.show()\n",
    "\n",
    "trajectorySteps = 100\n",
    "\n",
    "value = np.zeros(rows*columns)       #初始化可以任意，也可以全0\n",
    "qtable = np.zeros((rows*columns,5))  #初始化，这里主要是初始化维数，里面的内容会被覆盖所以无所谓\n",
    "\n",
    "# np.random.seed(50)\n",
    "policy = np.eye(5)[np.random.randint(0,5,size=(rows*columns))] \n",
    "print(policy) #随机得到25*5的矩阵，每一行是一个state选择5个action的概率\n",
    "\n",
    "gridworld.showPolicy(policy)\n",
    "# gridworld.showPolicy(policy)"
   ],
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "⬜️⬜️⬜️⬜️⬜️\n",
      "⬜️🚫🚫⬜️⬜️\n",
      "⬜️⬜️🚫⬜️⬜️\n",
      "⬜️🚫✅🚫⬜️\n",
      "⬜️🚫⬜️⬜️⬜️\n",
      "[[0. 0. 0. 0. 1.]\n",
      " [0. 0. 1. 0. 0.]\n",
      " [0. 1. 0. 0. 0.]\n",
      " [0. 0. 0. 1. 0.]\n",
      " [0. 0. 0. 1. 0.]\n",
      " [0. 1. 0. 0. 0.]\n",
      " [0. 0. 0. 1. 0.]\n",
      " [0. 0. 1. 0. 0.]\n",
      " [0. 1. 0. 0. 0.]\n",
      " [0. 1. 0. 0. 0.]\n",
      " [0. 0. 0. 0. 1.]\n",
      " [0. 0. 0. 1. 0.]\n",
      " [0. 0. 1. 0. 0.]\n",
      " [0. 1. 0. 0. 0.]\n",
      " [0. 0. 0. 0. 1.]\n",
      " [0. 0. 0. 1. 0.]\n",
      " [0. 1. 0. 0. 0.]\n",
      " [0. 0. 0. 1. 0.]\n",
      " [0. 1. 0. 0. 0.]\n",
      " [0. 0. 0. 0. 1.]\n",
      " [0. 1. 0. 0. 0.]\n",
      " [0. 0. 0. 0. 1.]\n",
      " [0. 1. 0. 0. 0.]\n",
      " [0. 0. 1. 0. 0.]\n",
      " [0. 1. 0. 0. 0.]]\n",
      "🔄⬇️➡️⬅️⬅️\n",
      "➡️⏪⏬➡️➡️\n",
      "🔄⬅️⏬➡️🔄\n",
      "⬅️⏩️✅⏩️🔄\n",
      "➡️🔄➡️⬇️➡️\n"
     ]
    }
   ],
   "execution_count": 3
  },
  {
   "cell_type": "code",
   "id": "9a59115c-1e07-440a-b885-5003eebcd8f8",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-08-15T07:22:42.948412Z",
     "start_time": "2025-08-15T07:22:40.089257Z"
    }
   },
   "source": [
    "# 通过采样的方法计算action value，model free的话意味着不知道整个gridworld,并不知道有多少个state，因此我们不再用上面的value\n",
    "# 显示当前策略在网格世界中的情况\n",
    "gridworld.showPolicy(policy)\n",
    "# 打印提示信息，表示当前使用的是随机策略\n",
    "print(\"random policy\")\n",
    "# 初始化一个全零的Q表，行数为网格世界的状态总数（rows * columns），列数为动作数（5）\n",
    "qtable = np.zeros((rows*columns,5))\n",
    "# 复制Q表，并给每个元素加1，作为上一轮的Q表，用于后续比较两次迭代的差异\n",
    "qtable_pre = qtable.copy()+1\n",
    "# 当两轮Q表的差异平方和大于0.001时，继续迭代更新Q表\n",
    "while(np.sum((qtable_pre-qtable)**2)>0.001):\n",
    "    # 打印两轮Q表的差异平方和，该值越小表示Q表越稳定\n",
    "    print(np.sum((qtable_pre-qtable)**2)) \n",
    "    # 将当前Q表复制给上一轮的Q表，为下一轮迭代做准备\n",
    "    qtable_pre = qtable.copy()\n",
    "    # 通过采样获得action-value的值\n",
    "    # 遍历所有状态\n",
    "    for i in range(rows*columns):\n",
    "        # 遍历所有动作\n",
    "        for j in range(5):\n",
    "            # 从当前状态i出发，执行动作j，根据策略policy采样一条轨迹，并返回轨迹的得分信息\n",
    "            Trajectory = gridworld.getTrajectoryScore(nowState=i, action=j, policy=policy, steps=trajectorySteps)\n",
    "            \n",
    "            # 注意这里的返回值是大小为(trajectorySteps+1)的元组列表，因为把第一个动作也加入进去了\n",
    "            # a = r + gamma*r1 + gamma*gamma*r2 + gamma*gamma*gamma*r3 ……\n",
    "            # 返回值是 S A R\n",
    "            \n",
    "            # 初始化临时变量，用于存储动作价值的累计回报，初始值为轨迹最后一步的奖励\n",
    "            tmp = Trajectory[trajectorySteps][2]\n",
    "            # 从倒数第二步开始，从后往前遍历轨迹，计算折扣累计回报\n",
    "            for k in range(trajectorySteps-1,-1,-1):\n",
    "                # 根据折扣因子gamma更新临时变量，累加每一步的奖励\n",
    "                tmp = tmp*gamma + Trajectory[k][2]  # 细节从后往前优化算法\n",
    "            # print(tmp)\n",
    "            \n",
    "            # 将计算得到的动作价值更新到Q表中对应的位置，Q表的索引是状态i和动作j，相当于从任何一个状态i出发，执行动作j，得到的动作价值\n",
    "            qtable[i][j] = tmp # 这里是通过采样，获得action value的值\n",
    "\n",
    "    # 根据更新后的Q表，选择每个状态下的最优动作，并将策略更新为对应的独热编码,policy的shape是(rows*columns,5)，单步可以看到如何把sparse id变为one-hot\n",
    "    policy = np.eye(5)[np.argmax(qtable,axis=1)]\n",
    "    # 打印状态17对应的Q表值\n",
    "    print(qtable[17])\n",
    "    # 打印状态22对应的Q表值\n",
    "    print(qtable[22])\n",
    "    # policy = np.eye(5)[np.argmax(qtable,axis=1)]  # qtable的最优值作为更新策略，并用独热码来表示\n",
    "    # 显示更新策略后在网格世界中的情况\n",
    "    gridworld.showPolicy(policy)\n",
    "    # 打印更新后两轮Q表的差异平方和\n",
    "    print(f'Q差异{np.sum((qtable_pre-qtable)**2)}')\n"
   ],
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "🔄⬇️➡️⬅️⬅️\n",
      "➡️⏪⏬➡️➡️\n",
      "🔄⬅️⏬➡️🔄\n",
      "⬅️⏩️✅⏩️🔄\n",
      "➡️🔄➡️⬇️➡️\n",
      "random policy\n",
      "125.0\n",
      "[-47.8937303  -10.          -8.09976095 -47.8937303  -42.10411822]\n",
      "[-42.10411822  -8.99976095  -9.09976095 -99.99760947  -8.09976095]\n",
      "🔄➡️➡️➡️⬅️\n",
      "⬆️⏬⏫️⬆️⬆️\n",
      "➡️⬅️⏩️➡️⬇️\n",
      "⬆️⏫️✅⏫️⬆️\n",
      "⬆️⏩️🔄⬅️⬆️\n",
      "Q差异174415.5480033233\n",
      "174415.5480033233\n",
      "[-10. -10.   0. -10.   1.]\n",
      "[  1.   0.  -1. -10.   0.]\n",
      "➡️➡️➡️➡️⬇️\n",
      "⬆️⏫️⏫️⬆️⬆️\n",
      "⬆️⬅️⏬⬆️⬆️\n",
      "⬆️⏩️✅⏪⬆️\n",
      "⬆️⏩️⬆️➡️⬆️\n",
      "Q差异147733.92644541844\n",
      "147733.92644541844\n",
      "[-1.00023905 -1.00023905  8.99976095 -1.00023905  9.99976095]\n",
      "[ 9.99976095  0.          7.99976095 -1.90023905  8.99976095]\n",
      "➡️➡️➡️➡️⬇️\n",
      "⬆️⏫️⏫️⬆️⬆️\n",
      "⬆️⬅️⏬⬆️⬆️\n",
      "⬆️⏩️✅⏪⬆️\n",
      "⬆️⏩️⬆️⬅️⬆️\n",
      "Q差异2352.9230647891027\n",
      "2352.9230647891027\n",
      "[-1.00023905 -1.00023905  8.99976095 -1.00023905  9.99976095]\n",
      "[ 9.99976095  8.09976095  7.99976095 -1.90023905  8.99976095]\n",
      "➡️➡️➡️➡️⬇️\n",
      "⬆️⏫️⏫️⬆️⬆️\n",
      "⬆️⬅️⏬⬆️⬆️\n",
      "⬆️⏩️✅⏪⬆️\n",
      "⬆️⏩️⬆️⬅️⬅️\n",
      "Q差异328.0306370259419\n",
      "328.0306370259419\n",
      "[-1.00023905 -1.00023905  8.99976095 -1.00023905  9.99976095]\n",
      "[ 9.99976095  8.09976095  7.99976095 -1.90023905  8.99976095]\n",
      "➡️➡️➡️➡️⬇️\n",
      "⬆️⏫️⏫️⬆️⬆️\n",
      "⬆️⬅️⏬⬆️⬆️\n",
      "⬆️⏩️✅⏪⬇️\n",
      "⬆️⏩️⬆️⬅️⬅️\n",
      "Q差异265.70307335192075\n",
      "265.70307335192075\n",
      "[-1.00023905 -1.00023905  8.99976095 -1.00023905  9.99976095]\n",
      "[ 9.99976095  8.09976095  7.99976095 -1.90023905  8.99976095]\n",
      "➡️➡️➡️➡️⬇️\n",
      "⬆️⏫️⏫️⬆️⬆️\n",
      "⬆️⬅️⏬⬆️⬇️\n",
      "⬆️⏩️✅⏪⬇️\n",
      "⬆️⏩️⬆️⬅️⬅️\n",
      "Q差异215.2179210453017\n",
      "215.2179210453017\n",
      "[-1.00023905 -1.00023905  8.99976095 -1.00023905  9.99976095]\n",
      "[ 9.99976095  8.09976095  7.99976095 -1.90023905  8.99976095]\n",
      "➡️➡️➡️➡️⬇️\n",
      "⬆️⏫️⏫️⬆️⬇️\n",
      "⬆️⬅️⏬➡️⬇️\n",
      "⬆️⏩️✅⏪⬇️\n",
      "⬆️⏩️⬆️⬅️⬅️\n",
      "Q差异174.32510451934456\n",
      "174.32510451934456\n",
      "[-1.00023905 -1.00023905  8.99976095 -1.00023905  9.99976095]\n",
      "[ 9.99976095  8.09976095  7.99976095 -1.90023905  8.99976095]\n",
      "➡️➡️➡️➡️⬇️\n",
      "⬆️⏫️⏫️➡️⬇️\n",
      "⬆️⬅️⏬➡️⬇️\n",
      "⬆️⏩️✅⏪⬇️\n",
      "⬆️⏩️⬆️⬅️⬅️\n",
      "Q差异1005.2058492449106\n",
      "1005.2058492449106\n",
      "[-1.00023905 -1.00023905  8.99976095 -1.00023905  9.99976095]\n",
      "[ 9.99976095  8.09976095  7.99976095 -1.90023905  8.99976095]\n",
      "➡️➡️➡️➡️⬇️\n",
      "⬆️⏫️⏩️➡️⬇️\n",
      "⬆️⬅️⏬➡️⬇️\n",
      "⬆️⏩️✅⏪⬇️\n",
      "⬆️⏩️⬆️⬅️⬅️\n",
      "Q差异4.129261038120451\n",
      "4.129261038120451\n",
      "[-1.00023905 -1.00023905  8.99976095 -1.00023905  9.99976095]\n",
      "[ 9.99976095  8.09976095  7.99976095 -1.90023905  8.99976095]\n",
      "➡️➡️➡️➡️⬇️\n",
      "⬆️⏫️⏩️➡️⬇️\n",
      "⬆️⬅️⏬➡️⬇️\n",
      "⬆️⏩️✅⏪⬇️\n",
      "⬆️⏩️⬆️⬅️⬅️\n",
      "Q差异3.3447014408775724\n",
      "3.3447014408775724\n",
      "[-1.00023905 -1.00023905  8.99976095 -1.00023905  9.99976095]\n",
      "[ 9.99976095  8.09976095  7.99976095 -1.90023905  8.99976095]\n",
      "➡️➡️➡️➡️⬇️\n",
      "⬆️⏫️⏩️➡️⬇️\n",
      "⬆️⬅️⏬➡️⬇️\n",
      "⬆️⏩️✅⏪⬇️\n",
      "⬆️⏩️⬆️⬅️⬅️\n",
      "Q差异0.0\n"
     ]
    }
   ],
   "execution_count": 4
  },
  {
   "cell_type": "code",
   "id": "2015e134-e1b0-41bf-9f36-1de26026daac",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-08-15T07:22:42.952659Z",
     "start_time": "2025-08-15T07:22:42.949416Z"
    }
   },
   "source": [
    "np.eye(5)"
   ],
   "outputs": [
    {
     "data": {
      "text/plain": [
       "array([[1., 0., 0., 0., 0.],\n",
       "       [0., 1., 0., 0., 0.],\n",
       "       [0., 0., 1., 0., 0.],\n",
       "       [0., 0., 0., 1., 0.],\n",
       "       [0., 0., 0., 0., 1.]])"
      ]
     },
     "execution_count": 5,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 5
  },
  {
   "cell_type": "code",
   "id": "d3ca3537-d7fa-4ec4-a315-d74805af4178",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-08-15T07:22:42.954956Z",
     "start_time": "2025-08-15T07:22:42.952659Z"
    }
   },
   "source": [],
   "outputs": [],
   "execution_count": 5
  },
  {
   "cell_type": "code",
   "id": "5fdb1c26-1133-420b-b850-c0a3b4916d1b",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-08-15T07:22:42.957084Z",
     "start_time": "2025-08-15T07:22:42.954956Z"
    }
   },
   "source": [],
   "outputs": [],
   "execution_count": 5
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.12.3"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
