{
 "cells": [
  {
   "cell_type": "markdown",
   "id": "c46aafca",
   "metadata": {},
   "source": [
    "# 强化学习"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "2560cd18",
   "metadata": {},
   "source": [
    "## 01 极简的强化学习训练示例"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "b88c3924",
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "from enum import Enum, auto\n",
    "from typing import NamedTuple, Tuple\n",
    "import random\n",
    "\n",
    "# ======================\n",
    "# 配置参数类（集中管理参数）\n",
    "# ======================\n",
    "class Config:\n",
    "    GRID_SIZE = 3               # 3x3网格世界\n",
    "    GOAL_STATE = (2, 2)         # 目标位置\n",
    "    LEARNING_RATE = 0.1         # 学习率α\n",
    "    DISCOUNT_FACTOR = 0.95      # 折扣因子γ\n",
    "    EPISODES = 1000             # 训练轮次\n",
    "    EPSILON = 0.1               # 探索率\n",
    "    DEBUG_MODE = True           # 调试信息开关\n",
    "\n",
    "# ======================\n",
    "# 动作枚举类型（替代魔法数字）\n",
    "# ======================\n",
    "class Action(Enum):\n",
    "    UP = 0\n",
    "    DOWN = auto()             # 自动递增，注意这个值是从1开始的\n",
    "    LEFT = auto()\n",
    "    RIGHT = auto()\n",
    "\n",
    "    @property\n",
    "    def vector(self) -> Tuple[int, int]:\n",
    "        \"\"\"获取动作对应的坐标变化量\"\"\"\n",
    "        return {\n",
    "            Action.UP: (-1, 0),\n",
    "            Action.DOWN: (1, 0),\n",
    "            Action.LEFT: (0, -1),\n",
    "            Action.RIGHT: (0, 1)\n",
    "        }[self]\n",
    "\n",
    "# ======================\n",
    "# 环境交互结果类型\n",
    "# ======================\n",
    "class StepResult(NamedTuple):\n",
    "    state: Tuple[int, int]\n",
    "    reward: float\n",
    "    done: bool\n",
    "\n",
    "# ======================\n",
    "# 网格世界环境类\n",
    "# ======================\n",
    "class GridWorld:\n",
    "    def __init__(self):\n",
    "        self.state = (0, 0)\n",
    "\n",
    "    def reset(self) -> Tuple[int, int]:\n",
    "        \"\"\"重置环境到初始状态\"\"\"\n",
    "        self.state = (0, 0)\n",
    "        return self.state\n",
    "\n",
    "    def step(self, action: Action) -> StepResult:\n",
    "        \"\"\"执行动作并返回环境反馈\"\"\"\n",
    "        x, y = self.state\n",
    "        dx, dy = action.vector\n",
    "\n",
    "        # 边界限制逻辑\n",
    "        new_x = min(max(x + dx, 0), Config.GRID_SIZE-1)\n",
    "        new_y = min(max(y + dy, 0), Config.GRID_SIZE-1)\n",
    "        if Config.DEBUG_MODE:\n",
    "            print(f\"Move from {self.state}: {action.name} to {new_x, new_y}\")\n",
    "\n",
    "        self.state = (new_x, new_y)\n",
    "        done = (self.state == Config.GOAL_STATE)\n",
    "        reward = 10.0 if done else -0.1  # 到达目标奖励，移动成本惩罚\n",
    "        return StepResult(self.state, reward, done)\n",
    "\n",
    "# ======================\n",
    "# Q-learning智能体\n",
    "# ======================\n",
    "class QAgent:\n",
    "    def __init__(self):\n",
    "        # 初始化Q表：行×列×动作数\n",
    "        self.q_table = np.zeros((\n",
    "            Config.GRID_SIZE,\n",
    "            Config.GRID_SIZE,\n",
    "            len(Action)\n",
    "        ))\n",
    "\n",
    "    def choose_action(self, state: Tuple[int, int]) -> Action:\n",
    "        \"\"\"ε-greedy策略选择动作\"\"\"\n",
    "        if random.random() < Config.EPSILON:\n",
    "            return random.choice(list(Action))  # 探索\n",
    "        if Config.DEBUG_MODE:\n",
    "            print(f\"Current Q-Table: {self.q_table[state]}\", np.argmax(self.q_table[state]))\n",
    "        return Action(np.argmax(self.q_table[state]))  # 利用\n",
    "\n",
    "    def update_q(self,\n",
    "                state: Tuple[int, int],\n",
    "                action: Action,\n",
    "                reward: float,\n",
    "                next_state: Tuple[int, int]):\n",
    "        \"\"\"Q值更新核心逻辑\"\"\"\n",
    "        current_q = self.q_table[state][action.value]\n",
    "        max_future_q = np.max(self.q_table[next_state])\n",
    "\n",
    "        # Q-learning更新公式\n",
    "        # 旧值 + 学习率α ×（即时奖励 + 折扣因子γ × 下个状态最大价值 - 旧值）\n",
    "        new_q = current_q + Config.LEARNING_RATE * (\n",
    "            reward +\n",
    "            Config.DISCOUNT_FACTOR * max_future_q -\n",
    "            current_q\n",
    "        )\n",
    "\n",
    "        # 调试信息输出\n",
    "        if Config.DEBUG_MODE:\n",
    "            print(f\"Update Q[{state}|{action.name}]: \"\n",
    "                  f\"{current_q:.2f} → {new_q:.2f} \"\n",
    "                  f\"(Reward: {reward:.1f}, FutureQ: {max_future_q:.2f})\")\n",
    "\n",
    "        self.q_table[state][action.value] = new_q\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "3bb4caf8",
   "metadata": {},
   "outputs": [],
   "source": [
    "# ======================\n",
    "# 训练过程\n",
    "# ======================\n",
    "env = GridWorld()\n",
    "agent = QAgent()\n",
    "Config.DEBUG_MODE = False\n",
    "\n",
    "print(\"=== 开始Q-learning训练 ===\")\n",
    "total_reward = 0\n",
    "step_count = 0\n",
    "for episode in range(1, Config.EPISODES+1):\n",
    "    state = env.reset()\n",
    "    done = False\n",
    "    path = [state]\n",
    "    if episode < 3:\n",
    "        print(\"*\"*50)\n",
    "\n",
    "    while not done:\n",
    "        action = agent.choose_action(state)\n",
    "        next_state, reward, done = env.step(action)\n",
    "        agent.update_q(state, action, reward, next_state)\n",
    "\n",
    "        if episode < 3:\n",
    "            print(f\"\\n{episode}: {state} → {action.name} → {next_state}\\n\")\n",
    "            print(np.round(agent.q_table, 2))\n",
    "\n",
    "        state = next_state\n",
    "        path.append(state)\n",
    "        total_reward += reward\n",
    "        step_count += 1\n",
    "\n",
    "    if episode < 3:\n",
    "        print(\"\\n路径序列:\", path)\n",
    "        print(\"*\"*50)\n",
    "\n",
    "    # 每100轮显示训练进度\n",
    "    if episode % 100 == 0:\n",
    "        total_reward /= 100\n",
    "        step_count /= 100\n",
    "        print(f\"Episode {episode:04d} | \"\n",
    "              f\"Steps: {step_count:.1f} | \"\n",
    "              f\"Total Reward: {total_reward:.1f}\")\n",
    "        total_reward = 0\n",
    "        step_count = 0\n",
    "\n",
    "# ======================\n",
    "# 训练结果展示\n",
    "# ======================\n",
    "print(\"\\n=== 最终Q表 ===\")\n",
    "print(np.round(agent.q_table, 2))\n",
    "\n",
    "print(\"\\n=== 最优路径 ===\")\n",
    "env.reset()\n",
    "state = (0, 0)\n",
    "path = [state]\n",
    "while state != Config.GOAL_STATE:\n",
    "    action_idx = np.argmax(agent.q_table[state])\n",
    "    action = Action(action_idx)\n",
    "    next_state, _, _ = env.step(action)\n",
    "    print(f\"{state} → {action.name} → {next_state}\", agent.q_table[state])\n",
    "    state = next_state\n",
    "    path.append(state)\n",
    "print(\"路径坐标序列:\", path)"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.12.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
