{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "a38863e6-6c15-4e60-9e35-6795b505c41e",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "pygame 2.2.0 (SDL 2.32.50, Python 3.8.20)\n",
      "Hello from the pygame community. https://www.pygame.org/contribute.html\n"
     ]
    }
   ],
   "source": [
    "import random\n",
    "import pygame"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "4cc4e015-f592-46c8-abb3-e3a5a1514753",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 定义常量\n",
    "SCREEN_WIDTH = 800\n",
    "SCREEN_HEIGHT = 600\n",
    "POP_SIZE = 50 # 训练次数\n",
    "BLOCK_SIZE = 20\n",
    "\n",
    "# 定义颜色\n",
    "BLACK = (0, 0, 0)\n",
    "WHITE = (255, 255, 255)\n",
    "GREEN = (0, 255, 0)\n",
    "RED = (255, 0, 0)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "8a4ecdbc-7838-4da3-950f-bc093e30f375",
   "metadata": {},
   "source": [
    "### 实现 Snake 类"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "c2502651-799b-4511-984c-f899247d1e1b",
   "metadata": {},
   "outputs": [],
   "source": [
    "class Snake:\n",
    "    def __init__(self):\n",
    "        \"\"\"初始化蛇\"\"\"\n",
    "        self.length = 3\n",
    "        # 初始化蛇身，从中心开始向左延伸\n",
    "        self.positions = [\n",
    "            (SCREEN_WIDTH / 2, SCREEN_HEIGHT / 2),\n",
    "            (SCREEN_WIDTH / 2 - BLOCK_SIZE, SCREEN_HEIGHT / 2),\n",
    "            (SCREEN_WIDTH / 2 - 2 * BLOCK_SIZE, SCREEN_HEIGHT / 2)\n",
    "        ]\n",
    "        self.direction = random.choice([(0, 1), (0, -1), (1, 0), (-1, 0)])\n",
    "        self.color = GREEN\n",
    "        self.is_alive = True\n",
    "\n",
    "    def get_head_position(self):\n",
    "        \"\"\"获得蛇头的坐标\"\"\"\n",
    "        return self.positions[0]\n",
    "\n",
    "    def turn(self, point):\n",
    "        \"\"\"\n",
    "        改变蛇移动方向\n",
    "        防止180度反向移动\n",
    "        \"\"\"\n",
    "        # 如果新方向与当前方向相反，则不改变\n",
    "        if (point[0] * -1, point[1] * -1) == self.direction:\n",
    "            return\n",
    "        self.direction = point\n",
    "\n",
    "    def move(self):\n",
    "        \"\"\"移动蛇身\"\"\"\n",
    "        cur = self.get_head_position()\n",
    "        x, y = self.direction\n",
    "        new = (cur[0] + (x * BLOCK_SIZE), cur[1] + (y * BLOCK_SIZE))\n",
    "        \n",
    "        # 检查是否撞墙\n",
    "        if (new[0] < 0 or new[0] >= SCREEN_WIDTH or \n",
    "            new[1] < 0 or new[1] >= SCREEN_HEIGHT):\n",
    "            self.is_alive = False\n",
    "            return\n",
    "\n",
    "        \"\"\"检查是否撞到自己\"\"\"\n",
    "        head = self.get_head_position()\n",
    "        if head in self.positions[1:]:\n",
    "            self.is_alive = False\n",
    "            return\n",
    "        \n",
    "        self.positions.insert(0, new)\n",
    "        if len(self.positions) > self.length:\n",
    "            self.positions.pop()\n",
    "\n",
    "    def reset(self):\n",
    "        \"\"\"重新开始\"\"\"\n",
    "        self.__init__()\n",
    "\n",
    "    def draw(self, surface):\n",
    "        \"\"\"在画布上绘制蛇身\"\"\"\n",
    "        for p in self.positions:\n",
    "            r = pygame.Rect((p[0], p[1]), (BLOCK_SIZE, BLOCK_SIZE))\n",
    "            pygame.draw.rect(surface, self.color, r)\n",
    "            pygame.draw.rect(surface, BLACK, r, 1)  # 绘制边框\n",
    "\n",
    "    def grow(self):\n",
    "        \"\"\"蛇身增长\"\"\"\n",
    "        self.length += 1\n",
    "\n",
    "    def check_is_alive(self):\n",
    "        return self.is_alive"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "a5345aa1-ee17-4948-929c-1944936c5b70",
   "metadata": {},
   "source": [
    "### 实现 Food 类"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "6ccaf002-a454-4b08-879c-f4a0e45141f4",
   "metadata": {},
   "outputs": [],
   "source": [
    "class Food:\n",
    "    def __init__(self, snake_positions=None):\n",
    "        \"\"\"\n",
    "        初始化食物\n",
    "        snake_positions: 可选参数，蛇身位置列表，用于避免食物生成在蛇身上\n",
    "        \"\"\"\n",
    "        self.color = RED\n",
    "        self._generate_position(snake_positions)\n",
    "\n",
    "    def _generate_position(self, snake_positions):\n",
    "        \"\"\"生成食物位置，确保不与蛇身重叠\"\"\"\n",
    "        while True:\n",
    "            x = random.randrange(0, SCREEN_WIDTH, BLOCK_SIZE)\n",
    "            y = random.randrange(0, SCREEN_HEIGHT, BLOCK_SIZE)\n",
    "            self.position = (x, y)\n",
    "            # 如果没有传入蛇身位置，或位置不在蛇身上，则跳出循环\n",
    "            if snake_positions is None or self.position not in snake_positions:\n",
    "                break\n",
    "\n",
    "    def get_position(self):\n",
    "        \"\"\"获得食物坐标\"\"\"\n",
    "        return self.position\n",
    "\n",
    "    def draw(self, surface):\n",
    "        \"\"\"在画布上绘制食物\"\"\"\n",
    "        r = pygame.Rect((self.position[0], self.position[1]), (BLOCK_SIZE, BLOCK_SIZE))\n",
    "        pygame.draw.rect(surface, self.color, r)\n",
    "        pygame.draw.rect(surface, BLACK, r, 1)  # 绘制边框\n",
    "\n",
    "    def respawn(self, snake_positions=None):\n",
    "        \"\"\"重新生成食物位置\"\"\"\n",
    "        self._generate_position(snake_positions)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "f1835258-8a2e-4687-b213-a60011d432df",
   "metadata": {},
   "source": [
    "### 构建训练蛇的神经网络"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "7bd6e615-bb2c-4aa3-9be3-8a9686325ae5",
   "metadata": {},
   "source": [
    "#### Q 值是什么？\n",
    "Q 值和 return 是强化学习中的两个重要概念，但它们并不相同。\n",
    "- Q 值表示在某个状态下采取某个动作后，未来可能获得的总奖励的期望值（模型预测得到）。\n",
    "- Return 是从当前时刻开始，未来所有奖励的总和（历史经验数据得到）。\n",
    "- Q 值用于指导策略（选择动作），而 return 用于评估策略的性能。\n",
    "\n",
    "\n",
    "#### 目标策略网络与当前策略模型的区别与联系，为什么要用两个模型？\n",
    "在Deep Q Learning（DQN）中，使用两个神经网络模型是为了解决训练不稳定的问题。  \n",
    "- 当前策略网络 (self.model)：用于预测当前状态的 Q 值，并通过优化器更新权重。\n",
    "- 目标策略网络 (self.target_model)：用于计算下一个状态的 Q 值。\n",
    "- 目标策略网络的权重是从当前策略网络定期同步的，因此目标 Q 值在一段时间内是稳定的，这有助于模型更好地收敛。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "f8b44783-c1b2-49da-873f-cfcfe49744b7",
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "import torch\n",
    "import torch.nn as nn\n",
    "import torch.optim as optim\n",
    "from collections import deque\n",
    "import os\n",
    "\n",
    "class SnakeAI:\n",
    "    def __init__(self, buffer_size=1000, batch_size=32):\n",
    "        \"\"\"\n",
    "        初始化 SnakeAI 类。\n",
    "        参数:\n",
    "            buffer_size: 经验回放缓冲区的大小。\n",
    "            batch_size: 每次训练时从缓冲区中采样的批次大小。\n",
    "        \"\"\"\n",
    "        # 设置参数\n",
    "        self.gamma = 0.99  # 折扣因子，用于计算未来奖励的重要性\n",
    "        self.input_size = 12  # 输入状态的维度(Game类get_state()中定义)\n",
    "        self.output_size = 4  # 输出动作的维度（上下左右四个方向）\n",
    "        self.hidden_size = 100  # 神经网络隐藏层的大小\n",
    "        self.batch_size = batch_size  # 训练批次大小\n",
    "        self.update_freq = 1000  # 目标网络更新频率\n",
    "        self.train_steps = 0  # 训练步数计数器\n",
    "        self.epsilon = 1.0  # 初始 epsilon epsilon表示蛇移动的随机性\n",
    "        self.epsilon_min = 0.01  # 最小 epsilon\n",
    "        self.epsilon_decay = 0.995  # 衰减率\n",
    "\n",
    "        # 创建神经网络模型\n",
    "        self.model = self.build_model()  # 当前策略网络，用于预测动作\n",
    "        self.target_model = self.build_model()  # 目标策略网络，用于计算训练目标\n",
    "        # 检查预训练权重文件是否存在\n",
    "        weight_path = '../models/5_RL_Snake/best_weights.pth'\n",
    "        if os.path.exists(weight_path):\n",
    "            self.model.load_state_dict(torch.load(weight_path))\n",
    "            self.target_model.load_state_dict(torch.load(weight_path))\n",
    "            print(f\"Loaded pre-trained weights from {weight_path}\")\n",
    "            \n",
    "        self.optimizer = optim.Adam(self.model.parameters(), lr=0.001)  # 优化器，用于更新模型参数\n",
    "        self.criterion = nn.MSELoss()  # 损失函数，用于计算预测值与目标值的差距\n",
    "\n",
    "        # 经验回放缓冲区，用于存储游戏经验（状态、动作、奖励等）\n",
    "        self.buffer = deque(maxlen=buffer_size)\n",
    "\n",
    "        # 同步目标网络的权重，确保初始时目标网络与当前网络一致\n",
    "        self.update_target_model()\n",
    "\n",
    "    def build_model(self):\n",
    "        \"\"\"\n",
    "        构建神经网络模型。\n",
    "        返回:\n",
    "            一个包含输入层、隐藏层和输出层的神经网络模型。\n",
    "        \"\"\"\n",
    "        model = nn.Sequential(\n",
    "            nn.Linear(self.input_size, self.hidden_size),  # 输入层到隐藏层\n",
    "            nn.ReLU(),  # 激活函数\n",
    "            nn.Linear(self.hidden_size, self.hidden_size),  # 隐藏层到隐藏层\n",
    "            nn.ReLU(),  # 激活函数\n",
    "            nn.Linear(self.hidden_size, self.hidden_size),  # 隐藏层到隐藏层\n",
    "            nn.ReLU(),  # 激活函数\n",
    "            nn.Linear(self.hidden_size, self.output_size),  # 隐藏层到输出层\n",
    "        )\n",
    "        return model\n",
    "\n",
    "    def update_target_model(self):\n",
    "        \"\"\"\n",
    "        更新目标策略网络的权重。\n",
    "        将当前策略网络的权重复制到目标策略网络中。\n",
    "        \"\"\"\n",
    "        self.target_model.load_state_dict(self.model.state_dict())\n",
    "\n",
    "    def get_action(self, state):\n",
    "        \"\"\"\n",
    "        根据当前状态选择动作。\n",
    "        使用 epsilon-greedy 策略，平衡探索与利用\n",
    "        参数:\n",
    "            state: 当前游戏状态。\n",
    "            epsilon: 探索概率，用于控制随机探索与利用的平衡。\n",
    "        返回:\n",
    "            选择的动作。\n",
    "        \"\"\"\n",
    "        if random.random() < self.epsilon:\n",
    "            # 随机选择一个动作（探索）\n",
    "            return random.randint(0, self.output_size - 1)\n",
    "        else:\n",
    "            # 使用模型预测动作（利用）\n",
    "            state = torch.FloatTensor(state).unsqueeze(0)  # 将状态转换为张量\n",
    "            with torch.no_grad():\n",
    "                q_values = self.model(state)  # 获取 Q 值\n",
    "            return torch.argmax(q_values).item()  # 选择 Q 值最大的动作\n",
    "\n",
    "    def train_model(self):\n",
    "        \"\"\"\n",
    "        使用经验回放进行模型训练。\n",
    "        从缓冲区中随机采样一个批次的数据，计算损失并更新模型。\n",
    "        \"\"\"\n",
    "        if len(self.buffer) < self.batch_size:\n",
    "            return  # 如果缓冲区中的数据不足，则跳过训练\n",
    "\n",
    "        # 从缓冲区中随机采样一个批次的数据\n",
    "        batch = random.sample(self.buffer, self.batch_size)\n",
    "\n",
    "        # 解析批次数据\n",
    "        states = torch.FloatTensor([sample[0] for sample in batch])  # 当前状态\n",
    "        actions = torch.LongTensor([sample[1] for sample in batch])  # 执行的动作\n",
    "        rewards = torch.FloatTensor([sample[2] for sample in batch])  # 获得的奖励\n",
    "        next_states = torch.FloatTensor([sample[3] for sample in batch])  # 下一个状态\n",
    "        dones = torch.FloatTensor([sample[4] for sample in batch])  # 是否结束\n",
    "\n",
    "        # 计算当前 Q 值\n",
    "        current_q_values = self.model(states).gather(1, actions.unsqueeze(1))\n",
    "\n",
    "        # 计算目标 Q 值\n",
    "        with torch.no_grad():\n",
    "            next_q_values = self.target_model(next_states).max(1)[0]\n",
    "        target_q_values = rewards + self.gamma * next_q_values * (1 - dones)\n",
    "\n",
    "        # 计算损失并更新模型\n",
    "        loss = self.criterion(current_q_values.squeeze(), target_q_values)\n",
    "        self.optimizer.zero_grad()  # 清空梯度\n",
    "        loss.backward()  # 反向传播\n",
    "        self.optimizer.step()  # 更新模型参数\n",
    "\n",
    "        # 更新目标网络\n",
    "        self.train_steps += 1\n",
    "        if self.train_steps % self.update_freq == 0:\n",
    "            self.update_target_model()\n",
    "        # 更新epsilon\n",
    "        if self.epsilon > self.epsilon_min:\n",
    "            self.epsilon *= self.epsilon_decay\n",
    "\n",
    "    def add_experience(self, state, action, reward, next_state, done):\n",
    "        \"\"\"\n",
    "        将经验添加到经验回放缓冲区中。\n",
    "        参数:\n",
    "            state: 当前状态。\n",
    "            action: 执行的动作。\n",
    "            reward: 获得的奖励。\n",
    "            next_state: 下一个状态。\n",
    "            done: 是否结束。\n",
    "        \"\"\"\n",
    "        self.buffer.append((state, action, reward, next_state, done))"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "5c4707d2-d15d-41c0-97f4-f843912ab8e3",
   "metadata": {},
   "source": [
    "### 主游戏逻辑"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "c67db4bc-c56c-45c5-87b5-b17340e10f30",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "C:\\Users\\PC\\AppData\\Local\\Temp\\ipykernel_3160\\2476721932.py:34: FutureWarning: You are using `torch.load` with `weights_only=False` (the current default value), which uses the default pickle module implicitly. It is possible to construct malicious pickle data which will execute arbitrary code during unpickling (See https://github.com/pytorch/pytorch/blob/main/SECURITY.md#untrusted-models for more details). In a future release, the default value for `weights_only` will be flipped to `True`. This limits the functions that could be executed during unpickling. Arbitrary objects will no longer be allowed to be loaded via this mode unless they are explicitly allowlisted by the user via `torch.serialization.add_safe_globals`. We recommend you start setting `weights_only=True` for any use case where you don't have full control of the loaded file. Please open an issue on GitHub for any issues related to this experimental feature.\n",
      "  self.model.load_state_dict(torch.load(weight_path))\n",
      "C:\\Users\\PC\\AppData\\Local\\Temp\\ipykernel_3160\\2476721932.py:35: FutureWarning: You are using `torch.load` with `weights_only=False` (the current default value), which uses the default pickle module implicitly. It is possible to construct malicious pickle data which will execute arbitrary code during unpickling (See https://github.com/pytorch/pytorch/blob/main/SECURITY.md#untrusted-models for more details). In a future release, the default value for `weights_only` will be flipped to `True`. This limits the functions that could be executed during unpickling. Arbitrary objects will no longer be allowed to be loaded via this mode unless they are explicitly allowlisted by the user via `torch.serialization.add_safe_globals`. We recommend you start setting `weights_only=True` for any use case where you don't have full control of the loaded file. Please open an issue on GitHub for any issues related to this experimental feature.\n",
      "  self.target_model.load_state_dict(torch.load(weight_path))\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Loaded pre-trained weights from ../models/5_RL_Snake/best_weights.pth\n",
      "Episode 1/50, Score: 0, Best: 0\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "C:\\Users\\PC\\AppData\\Local\\Temp\\ipykernel_3160\\2476721932.py:103: UserWarning: Creating a tensor from a list of numpy.ndarrays is extremely slow. Please consider converting the list to a single numpy.ndarray with numpy.array() before converting to a tensor. (Triggered internally at C:\\cb\\pytorch_1000000000000\\work\\torch\\csrc\\utils\\tensor_new.cpp:281.)\n",
      "  states = torch.FloatTensor([sample[0] for sample in batch])  # 当前状态\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Episode 2/50, Score: 1, Best: 1\n",
      "Episode 3/50, Score: 1, Best: 1\n",
      "Episode 4/50, Score: 1, Best: 1\n",
      "Episode 5/50, Score: 1, Best: 1\n",
      "Episode 6/50, Score: 35, Best: 35\n",
      "Episode 7/50, Score: 17, Best: 35\n",
      "Episode 8/50, Score: 11, Best: 35\n",
      "Episode 9/50, Score: 1, Best: 35\n",
      "Episode 10/50, Score: 22, Best: 35\n",
      "Episode 11/50, Score: 42, Best: 42\n",
      "Episode 12/50, Score: 20, Best: 42\n",
      "Episode 13/50, Score: 6, Best: 42\n",
      "Episode 14/50, Score: 13, Best: 42\n",
      "Episode 15/50, Score: 40, Best: 42\n",
      "Episode 16/50, Score: 31, Best: 42\n"
     ]
    }
   ],
   "source": [
    "class Game:\n",
    "    def __init__(self, buffer_size=10000, batch_size=64):\n",
    "        \"\"\"初始化游戏\"\"\"\n",
    "        pygame.init()\n",
    "        self.screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))\n",
    "        pygame.display.set_caption(\"Snake AI Training\")\n",
    "        self.clock = pygame.time.Clock()\n",
    "        self.snake = Snake()\n",
    "        self.food = Food(self.snake.positions)  # 确保食物不生成在蛇身上\n",
    "        self.ai_player = SnakeAI(buffer_size=10000, batch_size=64)\n",
    "        self.score = 0\n",
    "        self.best_score = 0\n",
    "        self.scores = []\n",
    "        self.steps = 0 # 未吃到食物的累计步数\n",
    "        # 初始化字体\n",
    "        self.font = pygame.font.SysFont(\"Arial\", 24)  # 使用 Arial 字体，大小 24\n",
    "\n",
    "    def get_direction(self, action):\n",
    "        \"\"\"将动作索引转换为方向\"\"\"\n",
    "        directions = [(0, -1), (0, 1), (-1, 0), (1, 0)]  # 上、下、左、右\n",
    "        return directions[action]\n",
    "\n",
    "    def get_state(self):\n",
    "        \"\"\"获取当前游戏状态\"\"\"\n",
    "        head = self.snake.get_head_position()\n",
    "        food = self.food.position\n",
    "\n",
    "        left = (head[0] - BLOCK_SIZE, head[1])\n",
    "        right = (head[0] + BLOCK_SIZE, head[1])\n",
    "        up = (head[0], head[1] - BLOCK_SIZE)\n",
    "        down = (head[0], head[1] + BLOCK_SIZE)\n",
    "\n",
    "        # 检查边界\n",
    "        danger_left = left[0] < 0 or left in self.snake.positions[1:]\n",
    "        danger_right = right[0] >= SCREEN_WIDTH or right in self.snake.positions[1:]\n",
    "        danger_up = up[1] < 0 or up in self.snake.positions[1:]\n",
    "        danger_down = down[1] >= SCREEN_HEIGHT or down in self.snake.positions[1:]\n",
    "\n",
    "        state = [\n",
    "            danger_left, danger_right, danger_up, danger_down,  # 四个方向的危险\n",
    "            food[0] < head[0], food[0] > head[0],              # 食物相对位置（左右）\n",
    "            food[1] < head[1], food[1] > head[1],              # 食物相对位置（上下）\n",
    "            self.snake.direction == (0, -1),                   # 当前方向（上）\n",
    "            self.snake.direction == (0, 1),                    # 当前方向（下）\n",
    "            self.snake.direction == (-1, 0),                   # 当前方向（左）\n",
    "            self.snake.direction == (1, 0)                     # 当前方向（右）\n",
    "        ]\n",
    "        return np.array(state, dtype=np.float32)\n",
    "\n",
    "    def update(self):\n",
    "        \"\"\"更新游戏状态和AI训练\"\"\"\n",
    "        state = self.get_state()\n",
    "        action = self.ai_player.get_action(state)\n",
    "        old_direction = self.snake.direction\n",
    "        new_direction = self.get_direction(action)\n",
    "        \n",
    "        # 更新蛇的方向\n",
    "        self.snake.turn(new_direction)\n",
    "\n",
    "        # 与食物的距离\n",
    "        old_distance = np.sqrt(np.sum((np.array(self.snake.get_head_position()) - np.array(self.food.position)) ** 2))\n",
    "        self.snake.move()\n",
    "        \n",
    "        # 检查游戏结束条件\n",
    "        done = False\n",
    "        reward = 0\n",
    "        \n",
    "        # 吃到食物\n",
    "        if self.snake.get_head_position() == self.food.position:\n",
    "            self.steps = 0 # 重置步数计数器\n",
    "            self.score += 1\n",
    "            self.snake.grow()\n",
    "            self.food.respawn(self.snake.positions)  # 重新生成食物\n",
    "            reward += 10\n",
    "        # 撞墙或撞自己\n",
    "        elif not self.snake.check_is_alive():\n",
    "            self.scores.append(self.snake.length)\n",
    "            done = True\n",
    "            reward -= 20\n",
    "        # 计算距离变化的奖励\n",
    "        else:\n",
    "            new_distance = np.sqrt(np.sum((np.array(self.snake.get_head_position()) - np.array(self.food.position)) ** 2))\n",
    "            reward += 0.2 if new_distance < old_distance else -0.1\n",
    "            if self.steps > 10:  # 长时间未吃到食物\n",
    "                reward -= 0.1\n",
    "\n",
    "        next_state = self.get_state()\n",
    "        self.ai_player.add_experience(state, action, reward, next_state, done)\n",
    "        self.ai_player.train_model() \n",
    "        \n",
    "        return done\n",
    "\n",
    "    def run(self):\n",
    "        \"\"\"主游戏循环\"\"\"\n",
    "        for episode in range(POP_SIZE):\n",
    "            self.snake.reset()\n",
    "            self.food = Food(self.snake.positions)\n",
    "            self.score = 0\n",
    "            self.steps = 0\n",
    "            done = False\n",
    "\n",
    "            while not done:\n",
    "                for event in pygame.event.get():\n",
    "                    if event.type == pygame.QUIT:\n",
    "                        pygame.quit()\n",
    "                        return\n",
    "\n",
    "                self.steps += 1\n",
    "                done = self.update()\n",
    "                \n",
    "                # 渲染画面\n",
    "                self.screen.fill(BLACK)\n",
    "                self.snake.draw(self.screen)\n",
    "                self.food.draw(self.screen)\n",
    "                pygame.display.flip()\n",
    "                # 渲染分数\n",
    "                score_text = self.font.render(f\"Score: {self.score}\", True, WHITE)  # 白色文字\n",
    "                self.screen.blit(score_text, (10, 10))  # 显示在左上角 (10, 10)\n",
    "                pygame.display.flip()\n",
    "                \n",
    "                # 控制帧率（训练时可以更快，观看时调慢）\n",
    "                # self.clock.tick(100 if episode < POP_SIZE - 1 else 10)\n",
    "                self.clock.tick(80)\n",
    "\n",
    "            # 更新最佳分数并保存模型\n",
    "            if self.score > self.best_score:\n",
    "                self.best_score = self.score\n",
    "                torch.save(self.ai_player.model.state_dict(), '../models/5_RL_Snake/best_weights.pth')\n",
    "            print(f\"Episode {episode + 1}/{POP_SIZE}, Score: {self.score}, Best: {self.best_score}\")\n",
    "\n",
    "        pygame.quit()\n",
    "\n",
    "if __name__ == \"__main__\":\n",
    "    game = Game(buffer_size=10000, batch_size=64)\n",
    "    game.run()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "3d4ec8e1-b7c4-41e2-b63d-63b2420de371",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.20"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
