{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "afa56065",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "(3, 4)\n",
      "[0, 1, 2, 3]\n",
      "(0, 0)\n",
      "(0, 1)\n",
      "(0, 2)\n",
      "(0, 3)\n",
      "(1, 0)\n",
      "(1, 1)\n",
      "(1, 2)\n",
      "(1, 3)\n",
      "(2, 0)\n",
      "(2, 1)\n",
      "(2, 2)\n",
      "(2, 3)\n",
      "<generator object GridWorld.states at 0x0000022DFF44B680>\n"
     ]
    },
    {
     "ename": "AttributeError",
     "evalue": "'GridWorld' object has no attribute 'render_v'",
     "output_type": "error",
     "traceback": [
      "\u001b[31m---------------------------------------------------------------------------\u001b[39m",
      "\u001b[31mAttributeError\u001b[39m                            Traceback (most recent call last)",
      "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[14]\u001b[39m\u001b[32m, line 65\u001b[39m\n\u001b[32m     63\u001b[39m     \u001b[38;5;28mprint\u001b[39m(state)\n\u001b[32m     64\u001b[39m \u001b[38;5;28mprint\u001b[39m(env.states)\n\u001b[32m---> \u001b[39m\u001b[32m65\u001b[39m \u001b[43menv\u001b[49m\u001b[43m.\u001b[49m\u001b[43mrender_v\u001b[49m()\n",
      "\u001b[31mAttributeError\u001b[39m: 'GridWorld' object has no attribute 'render_v'"
     ]
    }
   ],
   "source": [
     "import numpy as np\n",
     "import matplotlib.pyplot as plt\n",
     "import random\n",
     "from collections import defaultdict\n",
     "\n",
     "class GridWorld:\n",
     "    def __init__(self):\n",
     "        self.action_space = [0, 1, 2, 3]\n",
     "        self.action_meaning = {0: 'up', 1: 'right', 2: 'down', 3: 'left'}\n",
     "        self.reward_map=np.array(\n",
     "            [[0,0,0,1.0],\n",
     "             [0,None,0,-1.0],\n",
     "             [0,0,0,0]]\n",
     "        )\n",
     "        self.goal_state=(0,3)\n",
     "        self.wall_state=(1,1)\n",
     "        self.start_state=(2,0)\n",
     "        self.agent_state=self.start_state\n",
     "    \n",
     "    @property\n",
     "    def height(self):\n",
     "        return self.reward_map.shape[0]\n",
     "    \n",
     "    @property\n",
     "    def width(self):\n",
     "        return self.reward_map.shape[1]\n",
     "\n",
     "    @property\n",
     "    def shape(self):\n",
     "        return self.reward_map.shape\n",
     "\n",
     "    @property\n",
     "    def actions(self):\n",
     "        return self.action_space\n",
     "\n",
     "    @property\n",
     "    def states(self):\n",
     "        for i in range(self.height):\n",
     "            for j in range(self.width):\n",
     "                if (i, j) != self.wall_state:  # 排除墙壁状态\n",
     "                    yield (i, j)\n",
     "\n",
     "    def next_state(self, state, action):\n",
     "        # 根据当前状态和动作计算下一个状态的位置\n",
     "        action_move_map = [(-1, 0), (0, 1), (1, 0), (0, -1)]\n",
     "        move=action_move_map[action]\n",
     "        next_state=(state[0]+move[0],state[1]+move[1])\n",
     "        ny,nx=next_state\n",
     "        if ny<0 or ny>=self.height or nx<0 or nx>=self.width:\n",
     "            next_state=state\n",
     "        elif (ny,nx)==self.wall_state:\n",
     "            next_state=state\n",
     "        return next_state\n",
     "    \n",
     "    def reward(self, state, action, next_state):\n",
     "        return self.reward_map[next_state]\n",
     "    \n",
     "    def is_terminal(self, state):\n",
     "        return state == self.goal_state or state == (1, 3)  # 目标状态或负奖励状态\n",
     "    \n",
     "    def reset(self):\n",
     "        self.agent_state = self.start_state\n",
     "        return self.agent_state\n",
     "    \n",
     "    def step(self, action):\n",
     "        current_state = self.agent_state\n",
     "        next_state = self.next_state(current_state, action)\n",
     "        reward = self.reward(current_state, action, next_state)\n",
     "        self.agent_state = next_state\n",
     "        done = self.is_terminal(next_state)\n",
     "        return next_state, reward, done\n",
     "    \n",
     "    def render_v(self, value_function=None):\n",
     "        \"\"\"可视化价值函数\"\"\"\n",
     "        if value_function is None:\n",
     "            value_function = {}\n",
     "        \n",
     "        fig, ax = plt.subplots(figsize=(8, 6))\n",
     "        \n",
     "        # 创建网格显示\n",
     "        for i in range(self.height):\n",
     "            for j in range(self.width):\n",
     "                if (i, j) == self.wall_state:\n",
     "                    # 墙壁用黑色表示\n",
     "                    ax.add_patch(plt.Rectangle((j, self.height-1-i), 1, 1, \n",
     "                                             facecolor='black', edgecolor='white'))\n",
     "                    ax.text(j+0.5, self.height-1-i+0.5, 'WALL', \n",
     "                           ha='center', va='center', color='white', fontsize=8)\n",
     "                elif (i, j) == self.goal_state:\n",
     "                    # 目标状态用绿色表示\n",
     "                    ax.add_patch(plt.Rectangle((j, self.height-1-i), 1, 1, \n",
     "                                             facecolor='lightgreen', edgecolor='black'))\n",
     "                    value = value_function.get((i, j), 0)\n",
     "                    ax.text(j+0.5, self.height-1-i+0.5, f'GOAL\\n{value:.2f}', \n",
     "                           ha='center', va='center', fontsize=8)\n",
     "                elif (i, j) == (1, 3):  # 负奖励状态\n",
     "                    ax.add_patch(plt.Rectangle((j, self.height-1-i), 1, 1, \n",
     "                                             facecolor='lightcoral', edgecolor='black'))\n",
     "                    value = value_function.get((i, j), 0)\n",
     "                    ax.text(j+0.5, self.height-1-i+0.5, f'TRAP\\n{value:.2f}', \n",
     "                           ha='center', va='center', fontsize=8)\n",
     "                elif (i, j) == self.start_state:\n",
     "                    # 起始状态用蓝色表示\n",
     "                    ax.add_patch(plt.Rectangle((j, self.height-1-i), 1, 1, \n",
     "                                             facecolor='lightblue', edgecolor='black'))\n",
     "                    value = value_function.get((i, j), 0)\n",
     "                    ax.text(j+0.5, self.height-1-i+0.5, f'START\\n{value:.2f}', \n",
     "                           ha='center', va='center', fontsize=8)\n",
     "                else:\n",
     "                    # 普通状态用白色表示\n",
     "                    ax.add_patch(plt.Rectangle((j, self.height-1-i), 1, 1, \n",
     "                                             facecolor='white', edgecolor='black'))\n",
     "                    value = value_function.get((i, j), 0)\n",
     "                    ax.text(j+0.5, self.height-1-i+0.5, f'{value:.2f}', \n",
     "                           ha='center', va='center', fontsize=10)\n",
     "        \n",
     "        ax.set_xlim(0, self.width)\n",
     "        ax.set_ylim(0, self.height)\n",
     "        ax.set_aspect('equal')\n",
     "        ax.set_title('Grid World Value Function')\n",
     "        ax.set_xticks(range(self.width+1))\n",
     "        ax.set_yticks(range(self.height+1))\n",
     "        plt.grid(True)\n",
     "        plt.show()\n",
     "\n",
     "# 蒙特卡洛方法实现\n",
     "def random_policy(env, state):\n",
     "    \"\"\"随机策略\"\"\"\n",
     "    return random.choice(env.actions)\n",
     "\n",
     "def epsilon_greedy_policy(env, state, Q, epsilon=0.1):\n",
     "    \"\"\"ε-贪婪策略\"\"\"\n",
     "    if random.random() < epsilon:\n",
     "        return random.choice(env.actions)\n",
     "    else:\n",
     "        # 选择Q值最大的动作\n",
     "        q_values = [Q.get((state, action), 0) for action in env.actions]\n",
     "        max_q = max(q_values)\n",
     "        best_actions = [action for action, q in zip(env.actions, q_values) if q == max_q]\n",
     "        return random.choice(best_actions)\n",
     "\n",
     "def generate_episode(env, policy, Q=None, epsilon=0.1):\n",
     "    \"\"\"生成一个episode\"\"\"\n",
     "    episode = []\n",
     "    state = env.reset()\n",
     "    \n",
     "    while True:\n",
     "        if Q is not None:\n",
     "            action = epsilon_greedy_policy(env, state, Q, epsilon)\n",
     "        else:\n",
     "            action = policy(env, state)\n",
     "        \n",
     "        next_state, reward, done = env.step(action)\n",
     "        episode.append((state, action, reward))\n",
     "        \n",
     "        if done:\n",
     "            break\n",
     "        state = next_state\n",
     "    \n",
     "    return episode\n",
     "\n",
     "def monte_carlo_first_visit(env, num_episodes=10000, gamma=0.9):\n",
     "    \"\"\"首次访问蒙特卡洛方法\"\"\"\n",
     "    returns = defaultdict(list)\n",
     "    value_function = defaultdict(float)\n",
     "    \n",
     "    for episode_num in range(num_episodes):\n",
     "        # 生成episode\n",
     "        episode = generate_episode(env, random_policy)\n",
     "        \n",
     "        # 计算每个状态的回报\n",
     "        G = 0\n",
     "        visited_states = set()\n",
     "        \n",
     "        # 从后往前计算回报\n",
     "        for t in reversed(range(len(episode))):\n",
     "            state, action, reward = episode[t]\n",
     "            G = gamma * G + reward\n",
     "            \n",
     "            # 首次访问\n",
     "            if state not in visited_states:\n",
     "                visited_states.add(state)\n",
     "                returns[state].append(G)\n",
     "                value_function[state] = np.mean(returns[state])\n",
     "        \n",
     "        # 每1000个episode打印一次进度\n",
     "        if (episode_num + 1) % 1000 == 0:\n",
     "            print(f\"Episode {episode_num + 1}/{num_episodes} completed\")\n",
     "    \n",
     "    return dict(value_function)\n",
     "\n",
     "def monte_carlo_control(env, num_episodes=10000, gamma=0.9, epsilon=0.1):\n",
     "    \"\"\"蒙特卡洛控制方法\"\"\"\n",
     "    Q = defaultdict(float)\n",
     "    returns = defaultdict(list)\n",
     "    \n",
     "    for episode_num in range(num_episodes):\n",
     "        # 生成episode\n",
     "        episode = generate_episode(env, None, Q, epsilon)\n",
     "        \n",
     "        # 计算每个状态-动作对的回报\n",
     "        G = 0\n",
     "        visited_pairs = set()\n",
     "        \n",
     "        # 从后往前计算回报\n",
     "        for t in reversed(range(len(episode))):\n",
     "            state, action, reward = episode[t]\n",
     "            G = gamma * G + reward\n",
     "            \n",
     "            # 首次访问\n",
     "            state_action = (state, action)\n",
     "            if state_action not in visited_pairs:\n",
     "                visited_pairs.add(state_action)\n",
     "                returns[state_action].append(G)\n",
     "                Q[state_action] = np.mean(returns[state_action])\n",
     "        \n",
     "        # 每1000个episode打印一次进度\n",
     "        if (episode_num + 1) % 1000 == 0:\n",
     "            print(f\"Episode {episode_num + 1}/{num_episodes} completed\")\n",
     "    \n",
     "    # 从Q函数计算价值函数\n",
     "    value_function = {}\n",
     "    for state in env.states:\n",
     "        q_values = [Q.get((state, action), 0) for action in env.actions]\n",
     "        value_function[state] = max(q_values) if q_values else 0\n",
     "    \n",
     "    return dict(value_function), dict(Q)\n",
     "\n",
     "# 测试代码\n",
     "env = GridWorld()\n",
     "print(\"Grid World Environment:\")\n",
     "print(f\"Shape: {env.shape}\")\n",
     "print(f\"Actions: {env.actions}\")\n",
     "print(f\"Start state: {env.start_state}\")\n",
     "print(f\"Goal state: {env.goal_state}\")\n",
     "print(f\"Wall state: {env.wall_state}\")\n",
     "\n",
     "# 使用蒙特卡洛方法计算价值函数\n",
     "print(\"\\nRunning Monte Carlo First Visit...\")\n",
     "value_function = monte_carlo_first_visit(env, num_episodes=5000)\n",
     "\n",
     "print(\"\\nValue Function:\")\n",
     "for state in sorted(value_function.keys()):\n",
     "    print(f\"State {state}: {value_function[state]:.3f}\")\n",
     "\n",
     "# 可视化价值函数\n",
     "env.render_v(value_function)\n"
    ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.12.3"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
