{
 "cells": [
  {
   "cell_type": "code",
   "id": "2a129cdd-2f42-41ca-a367-20e73e3e160c",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-08-15T08:01:05.657237Z",
     "start_time": "2025-08-15T08:01:05.604720Z"
    }
   },
   "source": [
    "import numpy as np     #只需要下载numpy库即可\n",
    "import random\n",
    "import GridWorld_v2\n",
    "# import set"
   ],
   "outputs": [],
   "execution_count": 1
  },
  {
   "cell_type": "code",
   "id": "10109ffe-ac93-4120-b750-62a6263f7594",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-08-15T08:01:05.671706Z",
     "start_time": "2025-08-15T08:01:05.658429Z"
    }
   },
   "source": [
    "gamma = 0.9   # 折扣因子，越接近0，智能体越关注短期奖励；越接近1，越关注长期奖励\n",
    "rows = 5      # 网格世界的行数，记得行数和列数这里要同步修改\n",
    "columns = 5   # 网格世界的列数\n",
    "\n",
    "# 以下几种不同的网格世界初始化方式，被注释掉的为不同示例，可按需选择\n",
    "# gridworld = GridWorld_v2.GridWorld_v2(rows=rows, columns=columns, forbiddenAreaNums=8, targetNums=2, seed = 52,forbiddenAreaScore=-10)\n",
    "# gridworld = GridWorld_v2.GridWorld_v2(desc = [\".#\",\".T\"])             # 赵老师4-1的例子\n",
    "# gridworld = GridWorld_v2.GridWorld_v2(desc = [\"##.T\",\"...#\",\"....\"])  # 随便弄的例子\n",
    "# 初始化网格世界，设置禁止区域得分 -10，目标得分 1，使用指定的网格布局\n",
    "gridworld = GridWorld_v2.GridWorld_v2(forbiddenAreaScore=-10, score=1,desc = [\".....\",\".##..\",\"..#..\",\".#T#.\",\".#...\"]) \n",
    "#gridworld = GridWorld_v2.GridWorld_v2(forbiddenAreaScore=-10, score=1,desc = [\"T.\"]) \n",
    "gridworld.show()  # 显示网格世界的当前状态\n",
    "\n",
    "value = np.zeros(rows*columns)       # 初始化状态价值函数，初始值全为 0，也可以任意初始化\n",
    "qtable = np.zeros((rows*columns,5))  # 初始化动作价值函数表，维度为 (状态数, 动作数)，初始值全为 0，后续内容会被覆盖\n",
    "\n",
    "# np.random.seed(50)  # 设置随机数种子，确保结果可复现，此处被注释掉\n",
    "# 随机初始化策略，每个状态对应一个动作概率分布，尺寸是 (状态数, 动作数)\n",
    "policy = np.eye(5)[np.random.randint(0,5,size=(rows*columns))] \n",
    "gridworld.showPolicy(policy)  # 显示当前策略\n"
   ],
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "⬜️⬜️⬜️⬜️⬜️\n",
      "⬜️🚫🚫⬜️⬜️\n",
      "⬜️⬜️🚫⬜️⬜️\n",
      "⬜️🚫✅🚫⬜️\n",
      "⬜️🚫⬜️⬜️⬜️\n",
      "⬅️🔄🔄➡️⬆️\n",
      "🔄⏩️🔄⬆️⬅️\n",
      "➡️⬅️🔄⬆️⬅️\n",
      "🔄⏬✅⏬⬇️\n",
      "🔄⏩️⬅️⬇️⬇️\n"
     ]
    }
   ],
   "execution_count": 2
  },
  {
   "cell_type": "code",
   "id": "04199d4d-c797-4075-801e-a469ec189ab2",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-08-15T08:01:08.374336Z",
     "start_time": "2025-08-15T08:01:05.672713Z"
    }
   },
   "source": [
    "# Every-visited  ，下一个cell是first-visited\n",
    "# 通过采样的方法计算action value，model free的话意味着不知道整个gridworld的概率了，所以不能直接套贝尔曼方程迭代求解\n",
    "# 生成随机策略，使用单位矩阵 np.eye(5) 为每个状态随机选择一个动作\n",
    "policy = np.eye(5)[np.random.randint(0,5,size=(rows*columns))] \n",
    "# 显示网格世界\n",
    "gridworld.show()\n",
    "# 显示当前策略\n",
    "gridworld.showPolicy(policy)\n",
    "# 打印提示信息，表示当前是随机策略\n",
    "print(\"random policy\")\n",
    "\n",
    "# 定义每条轨迹的步数\n",
    "trajectorySteps = 100\n",
    "# 初始化动作价值表 Qtable\n",
    "qtable = np.zeros((rows*columns,5))    \n",
    "# 复制一份 Qtable 并加 1，用于判断迭代是否收敛\n",
    "qtable_pre = qtable.copy()+1\n",
    "# 当 Qtable 的变化平方和大于 0.001 时，继续迭代\n",
    "while(np.sum((qtable_pre-qtable)**2)>0.001):\n",
    "    # print(np.sum((qtable_pre-qtable)**2))\n",
    "    # 更新上一次的 Qtable\n",
    "    qtable_pre = qtable.copy()\n",
    "    # 通过采样获得action-value的值\n",
    "    for i in range(rows * columns):    # 循环每个状态\n",
    "        for j in range(5):             # 循环每个动作\n",
    "            # 初始化每个状态-动作对的累计奖励，形状和qtable一样\n",
    "            qtable_rewards = [[0 for j in range(5)] for i in range(rows * columns)] \n",
    "            # 初始化每个状态-动作对的访问次数,形状和qtable_rewards一样\n",
    "            qtable_nums =    [[0 for j in range(5)] for i in range(rows * columns)]\n",
    "            # 获取从状态 i 执行动作 j ，按照当前策略生成的轨迹及得分\n",
    "            Trajectory = gridworld.getTrajectoryScore(nowState=i, action=j, policy=policy, steps=trajectorySteps)\n",
    "            # 注意这里的返回值是大小为(trajectorySteps+1)的元组列表，因为把第一个动作也加入进去了\n",
    "            # 获取轨迹最后一步的得分，这样就可以通过伪代码中的g<--γg+rt+1来依次计算出每个状态-动作对的平均值\n",
    "            _, _, score, _, _ = Trajectory[trajectorySteps]\n",
    "            # 从后往前遍历轨迹，更新动作价值\n",
    "            for k in range(trajectorySteps-1,-1,-1):\n",
    "                # 获取当前步的状态、动作和得分\n",
    "                tmpstate, tmpaction, tmpscore,_,_  = Trajectory[k]\n",
    "                # 使用折扣因子 gamma 对得分进行更新\n",
    "                score = score*gamma + tmpscore  # 细节从后往前优化算法g<--γg+rt+1\n",
    "                # 累加当前状态-动作对的奖励\n",
    "                qtable_rewards[tmpstate][tmpaction] += score\n",
    "                # 累加当前状态-动作对的访问次数\n",
    "                qtable_nums[tmpstate][tmpaction] += 1\n",
    "                # 计算当前状态-动作对的平均奖励，更新 Qtable\n",
    "                qtable[tmpstate][tmpaction] = qtable_rewards[tmpstate][tmpaction] / qtable_nums[tmpstate][tmpaction]\n",
    "                # every visit\n",
    "            # qtable[i,j] = score\n",
    "\n",
    "    # print(\"qtable[0]:\", qtable[0])\n",
    "    # print(\"qtable[1]:\", qtable[1])\n",
    "    # 根据 Qtable 的最优值更新策略，并用独热码来表示\n",
    "    policy = np.eye(5)[np.argmax(qtable,axis=1)]  \n",
    "    # 显示更新后的策略\n",
    "    gridworld.showPolicy( policy)\n",
    "    # 打印当前 Qtable 的平均动作价值\n",
    "    print(\"action value's mean\",qtable.mean())\n"
   ],
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "⬜️⬜️⬜️⬜️⬜️\n",
      "⬜️🚫🚫⬜️⬜️\n",
      "⬜️⬜️🚫⬜️⬜️\n",
      "⬜️🚫✅🚫⬜️\n",
      "⬜️🚫⬜️⬜️⬜️\n",
      "➡️⬇️⬇️➡️⬅️\n",
      "⬆️⏫️⏩️⬆️⬆️\n",
      "🔄⬅️🔄⬅️➡️\n",
      "➡️⏩️✅⏫️⬇️\n",
      "⬅️⏬🔄⬇️⬅️\n",
      "random policy\n",
      "⬇️➡️➡️➡️⬇️\n",
      "⬇️⏬⏩️⬆️⬆️\n",
      "➡️⬅️⏪⬆️⬆️\n",
      "⬆️⏫️✅⏩️🔄\n",
      "🔄⏩️🔄⬅️⬆️\n",
      "action value's mean -26.88149228262773\n",
      "➡️➡️➡️➡️⬇️\n",
      "⬆️⏫️⏫️⬆️⬆️\n",
      "⬆️⬅️⏬⬆️⬆️\n",
      "⬆️⏩️✅⏪⬆️\n",
      "⬆️⏩️⬆️➡️⬆️\n",
      "action value's mean -2.44\n",
      "➡️➡️➡️➡️⬇️\n",
      "⬆️⏫️⏫️⬆️⬆️\n",
      "⬆️⬅️⏬⬆️⬆️\n",
      "⬆️⏩️✅⏪⬆️\n",
      "⬆️⏩️⬆️⬅️⬆️\n",
      "action value's mean -0.3226685507270451\n",
      "➡️➡️➡️➡️⬇️\n",
      "⬆️⏫️⏫️⬆️⬆️\n",
      "⬆️⬅️⏬⬆️⬆️\n",
      "⬆️⏩️✅⏪⬆️\n",
      "⬆️⏩️⬆️⬅️⬅️\n",
      "action value's mean 0.0012532976793463745\n",
      "➡️➡️➡️➡️⬇️\n",
      "⬆️⏫️⏫️⬆️⬆️\n",
      "⬆️⬅️⏬⬆️⬆️\n",
      "⬆️⏩️✅⏪⬇️\n",
      "⬆️⏩️⬆️⬅️⬅️\n",
      "action value's mean 0.29277346688155703\n",
      "➡️➡️➡️➡️⬇️\n",
      "⬆️⏫️⏫️⬆️⬆️\n",
      "⬆️⬅️⏬⬆️⬇️\n",
      "⬆️⏩️✅⏪⬇️\n",
      "⬆️⏩️⬆️⬅️⬅️\n",
      "action value's mean 0.5552036922867665\n",
      "➡️➡️➡️➡️⬇️\n",
      "⬆️⏫️⏫️⬆️⬇️\n",
      "⬆️⬅️⏬➡️⬇️\n",
      "⬆️⏩️✅⏪⬇️\n",
      "⬆️⏩️⬆️⬅️⬅️\n",
      "action value's mean 0.791389917691976\n",
      "➡️➡️➡️➡️⬇️\n",
      "⬆️⏫️⏩️➡️⬇️\n",
      "⬆️⬅️⏬➡️⬇️\n",
      "⬆️⏩️✅⏪⬇️\n",
      "⬆️⏩️⬆️⬅️⬅️\n",
      "action value's mean 2.8998281594850077\n",
      "➡️➡️➡️⬇️⬇️\n",
      "⬆️⏫️⏩️⬇️⬇️\n",
      "⬆️⬅️⏬➡️⬇️\n",
      "⬆️⏩️✅⏪⬇️\n",
      "⬆️⏩️⬆️⬅️⬅️\n",
      "action value's mean 2.9688942318450082\n",
      "➡️➡️➡️➡️⬇️\n",
      "⬆️⏫️⏩️➡️⬇️\n",
      "⬆️⬅️⏬➡️⬇️\n",
      "⬆️⏩️✅⏪⬇️\n",
      "⬆️⏩️⬆️⬅️⬅️\n",
      "action value's mean 2.968894231845008\n"
     ]
    }
   ],
   "execution_count": 3
  },
  {
   "cell_type": "code",
   "id": "bce921c3-3b3c-464b-9b0c-b0e52aa87523",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-08-15T08:01:10.816322Z",
     "start_time": "2025-08-15T08:01:08.375343Z"
    }
   },
   "source": [
    "#下面不用看\n",
    "#通过采样的方法计算action value，model free的话意味着不知道整个gridworld的概率了，所以不能直接套贝尔曼方程迭代求解\n",
    "policy = np.eye(5)[np.random.randint(0,5,size=(rows*columns))] \n",
    "gridworld.show()\n",
    "gridworld.showPolicy(policy)\n",
    "print(\"random policy\")\n",
    "\n",
    "\n",
    "trajectorySteps = 100\n",
    "qtable = np.zeros((rows*columns,5))    #生成Qtable，也就是action-value-table\n",
    "qtable_pre = qtable.copy()+1\n",
    "while(np.sum((qtable_pre-qtable)**2)>0.001):\n",
    "    print(np.sum((qtable_pre-qtable)**2))\n",
    "    qtable_pre = qtable.copy()\n",
    "    #通过采样获得action-value的值\n",
    "    for i in range(rows * columns):    #循环每个state\n",
    "        for j in range(5):             #循环每个action\n",
    "            # qtable_rewards = [[0 for j in range(5)] for i in range(rows * columns)] \n",
    "            # qtable_nums =    [[0 for j in range(5)] for i in range(rows * columns)]\n",
    "            Trajectory = gridworld.getTrajectoryScore(nowState=i, action=j, policy=policy, steps=trajectorySteps)\n",
    "            # 注意这里的返回值是大小为(trajectorySteps+1)的元组列表，因为把第一个动作也加入进去了\n",
    "            _, _, score, _, _ = Trajectory[trajectorySteps]\n",
    "            for k in range(trajectorySteps-1,-1,-1):\n",
    "                tmpstate, tmpaction, tmpscore,_ ,_  = Trajectory[k]\n",
    "                score = score*gamma + tmpscore  #细节从后往前优化算法\n",
    "                # qtable_rewards[tmpstate][tmpaction] += score\n",
    "                # qtable_nums[tmpstate][tmpaction] += 1\n",
    "                qtable[tmpstate][tmpaction] = score  #first visit\n",
    "                \n",
    "            #qtable[i,j] = score\n",
    "\n",
    "    print(\"qtable[0]:\", qtable[0])\n",
    "    print(\"qtable[1]:\", qtable[1])\n",
    "    policy = np.eye(5)[np.argmax(qtable,axis=1)]  #qtable的最优值作为更新策略，并用独热码来表示\n",
    "    \n",
    "    gridworld.showPolicy(policy)\n",
    "    # print(np.sum((qtable_pre-qtable)**2))\n",
    "\n",
    "\n",
    "\n",
    "    "
   ],
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "⬜️⬜️⬜️⬜️⬜️\n",
      "⬜️🚫🚫⬜️⬜️\n",
      "⬜️⬜️🚫⬜️⬜️\n",
      "⬜️🚫✅🚫⬜️\n",
      "⬜️🚫⬜️⬜️⬜️\n",
      "🔄⬆️🔄➡️⬆️\n",
      "🔄⏩️⏬🔄➡️\n",
      "⬆️⬅️⏩️➡️🔄\n",
      "⬅️🔄✅⏩️⬆️\n",
      "⬇️🔄⬅️➡️⬅️\n",
      "random policy\n",
      "125.0\n",
      "qtable[0]: [-1.         -8.99976095  0.         -1.          0.        ]\n",
      "qtable[1]: [ -9.99973439   0.         -27.1          0.          -8.99976095]\n",
      "⬇️➡️🔄⬇️⬅️\n",
      "⬆️⏬⏫️⬇️⬇️\n",
      "⬆️⬅️⏬⬆️⬇️\n",
      "⬆️⏩️✅⏪⬆️\n",
      "⬆️⏪⬆️➡️⬆️\n",
      "148445.02909057122\n",
      "qtable[0]: [-1.  0.  0. -1.  0.]\n",
      "qtable[1]: [ -1.   0. -10.   0.   0.]\n",
      "➡️➡️➡️➡️⬇️\n",
      "⬆️⏫️⏫️⬆️⬆️\n",
      "⬆️⬅️⏬⬆️⬆️\n",
      "⬆️⏩️✅⏪⬆️\n",
      "⬆️⏩️⬆️⬅️⬆️\n",
      "143532.3678967472\n",
      "qtable[0]: [-1.  0.  0. -1.  0.]\n",
      "qtable[1]: [ -1.   0. -10.   0.   0.]\n",
      "➡️➡️➡️➡️⬇️\n",
      "⬆️⏫️⏫️⬆️⬆️\n",
      "⬆️⬅️⏬⬆️⬆️\n",
      "⬆️⏩️✅⏪⬆️\n",
      "⬆️⏩️⬆️⬅️⬅️\n",
      "656.0612740552413\n",
      "qtable[0]: [-1.  0.  0. -1.  0.]\n",
      "qtable[1]: [ -1.   0. -10.   0.   0.]\n",
      "➡️➡️➡️➡️⬇️\n",
      "⬆️⏫️⏫️⬆️⬆️\n",
      "⬆️⬅️⏬⬆️⬆️\n",
      "⬆️⏩️✅⏪⬇️\n",
      "⬆️⏩️⬆️⬅️⬅️\n",
      "265.70307335590013\n",
      "qtable[0]: [-1.  0.  0. -1.  0.]\n",
      "qtable[1]: [ -1.   0. -10.   0.   0.]\n",
      "➡️➡️➡️➡️⬇️\n",
      "⬆️⏫️⏫️⬆️⬆️\n",
      "⬆️⬅️⏬⬆️⬇️\n",
      "⬆️⏩️✅⏪⬇️\n",
      "⬆️⏩️⬆️⬅️⬅️\n",
      "215.21792104600723\n",
      "qtable[0]: [-1.  0.  0. -1.  0.]\n",
      "qtable[1]: [ -1.   0. -10.   0.   0.]\n",
      "➡️➡️➡️➡️⬇️\n",
      "⬆️⏫️⏫️⬆️⬇️\n",
      "⬆️⬅️⏬➡️⬇️\n",
      "⬆️⏩️✅⏪⬇️\n",
      "⬆️⏩️⬆️⬅️⬅️\n",
      "174.3251045200501\n",
      "qtable[0]: [2.13786691 3.48637956 2.82405631 2.13786691 3.13786691]\n",
      "qtable[1]: [ 2.48654535  3.87375507 -6.86213309  3.13786691  3.48654535]\n",
      "➡️➡️➡️➡️⬇️\n",
      "⬆️⏫️⏩️➡️⬇️\n",
      "⬆️⬅️⏬➡️⬇️\n",
      "⬆️⏩️✅⏪⬇️\n",
      "⬆️⏩️⬆️⬅️⬅️\n",
      "1005.1913240876647\n",
      "qtable[0]: [2.13786691 3.48637956 2.82405631 2.13786691 3.13786691]\n",
      "qtable[1]: [ 2.48654535  3.87375507 -6.86213309  3.13786691  3.48654535]\n",
      "➡️➡️➡️⬇️⬇️\n",
      "⬆️⏫️⏩️⬇️⬇️\n",
      "⬆️⬅️⏬➡️⬇️\n",
      "⬆️⏩️✅⏪⬇️\n",
      "⬆️⏩️⬆️⬅️⬅️\n",
      "7.473914205728017\n",
      "qtable[0]: [2.13786691 3.48637956 2.82405631 2.13786691 3.13786691]\n",
      "qtable[1]: [ 2.48654535  3.87375507 -6.86213309  3.13786691  3.48654535]\n",
      "➡️➡️➡️➡️⬇️\n",
      "⬆️⏫️⏩️➡️⬇️\n",
      "⬆️⬅️⏬➡️⬇️\n",
      "⬆️⏩️✅⏪⬇️\n",
      "⬆️⏩️⬆️⬅️⬅️\n"
     ]
    }
   ],
   "execution_count": 4
  },
  {
   "cell_type": "code",
   "id": "610b6845-1a7a-485a-a5b1-788fd9adf37b",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-08-15T08:01:10.820328Z",
     "start_time": "2025-08-15T08:01:10.817329Z"
    }
   },
   "source": [],
   "outputs": [],
   "execution_count": 4
  },
  {
   "cell_type": "code",
   "id": "69941fef-966e-4a9a-9e0c-f9c47e83fc71",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-08-15T08:01:10.823356Z",
     "start_time": "2025-08-15T08:01:10.820837Z"
    }
   },
   "source": [],
   "outputs": [],
   "execution_count": 4
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.12.3"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
