import numpy as np
import random
import matplotlib.pyplot as plt
import tkinter as tk
from tkinter import messagebox
import time
from enum import Enum
import matplotlib
matplotlib.rcParams['font.sans-serif'] = ['SimHei']
matplotlib.rcParams['axes.unicode_minus'] = False

class CellType(Enum):
    """单元格类型枚举"""
    EMPTY = 0
    WUMPUS = 1
    PIT = 2
    GOLD = 3
    START = 4

class Action(Enum):
    """动作枚举"""
    UP = 0
    RIGHT = 1
    DOWN = 2
    LEFT = 3
    GRAB = 4
    SHOOT_UP = 5
    SHOOT_RIGHT = 6
    SHOOT_DOWN = 7
    SHOOT_LEFT = 8
    CLIMB = 9

class WumpusWorld:
    """Wumpus怪兽世界环境"""
    
    def __init__(self, size=4, pit_prob=0.2, random_positions=True):
        """
        初始化Wumpus世界
        
        参数:
        - size: 世界大小（默认4x4）
        - pit_prob: 每个单元格生成陷阱的概率
        - random_positions: 是否随机放置Wumpus和金子
        """
        self.size = size
        self.pit_prob = pit_prob
        self.random_positions = random_positions
        self.reset()
    
    def reset(self):
        """重置世界状态"""
        # 初始化空白世界
        self.grid = np.zeros((self.size, self.size), dtype=int)
        
        # 设置起点位置
        self.start_pos = (self.size-1, 0)
        self.grid[self.start_pos] = CellType.START.value
        
        # 放置Wumpus（不在起点）
        if self.random_positions:
            while True:
                wumpus_pos = (random.randint(0, self.size-1), random.randint(0, self.size-1))
                if wumpus_pos != self.start_pos:
                    break
        else:
            wumpus_pos = (0, 2)  # 固定位置
            
        self.wumpus_pos = wumpus_pos
        self.grid[wumpus_pos] = CellType.WUMPUS.value
        self.wumpus_alive = True
        
        # 放置金子（不在起点和Wumpus位置）
        if self.random_positions:
            while True:
                gold_pos = (random.randint(0, self.size-1), random.randint(0, self.size-1))
                if gold_pos != self.start_pos and gold_pos != wumpus_pos:
                    break
        else:
            gold_pos = (0, 3)  # 固定位置
            
        self.gold_pos = gold_pos
        self.grid[gold_pos] = CellType.GOLD.value
        
        # 放置陷阱（不在起点、Wumpus位置和金子位置）
        self.pit_positions = []
        for i in range(self.size):
            for j in range(self.size):
                pos = (i, j)
                if pos != self.start_pos and pos != wumpus_pos and pos != gold_pos:
                    if random.random() < self.pit_prob:
                        self.grid[pos] = CellType.PIT.value
                        self.pit_positions.append(pos)
        
        # 初始化代理状态
        self.agent_pos = self.start_pos
        self.has_gold = False
        self.has_arrow = True
        self.game_over = False
        self.won = False
        self.score = 0
        
        return self._get_state()
    
    def step(self, action):
        """
        执行一个动作并返回结果
        
        参数:
        - action: 要执行的动作
        
        返回:
        - next_state: 下一个状态
        - reward: 获得的奖励
        - done: 游戏是否结束
        - info: 额外信息
        """
        if self.game_over:
            return self._get_state(), 0, True, {"message": "游戏已经结束"}
        
        reward = -1  # 每一步都有-1的奖励（生存惩罚）
        info = {"message": ""}
        
        # 根据动作更新状态
        if action in [Action.UP.value, Action.RIGHT.value, Action.DOWN.value, Action.LEFT.value]:
            # 移动动作
            next_pos = self._get_next_position(action)
            if self._is_valid_position(next_pos):
                self.agent_pos = next_pos
                
                # 检查是否掉入陷阱或遇到Wumpus
                if self.grid[next_pos] == CellType.PIT.value:
                    reward = -1000  # 掉入陷阱惩罚
                    self.game_over = True
                    info["message"] = "代理掉入了陷阱!"
                elif self.grid[next_pos] == CellType.WUMPUS.value and self.wumpus_alive:
                    reward = -1000  # 被Wumpus杀死惩罚
                    self.game_over = True
                    info["message"] = "代理被Wumpus杀死!"
            else:
                reward = -10  # 尝试移动到无效位置惩罚
                info["message"] = "撞墙了!"
                
        elif action == Action.GRAB.value:
            # 拾取金子
            if self.agent_pos == self.gold_pos and not self.has_gold:
                self.has_gold = True
                reward = 1000  # 拾取金子奖励
                info["message"] = "代理获取了金子!"
            else:
                reward = -10  # 尝试拾取不存在的金子惩罚
                info["message"] = "这里没有金子!"
                
        elif action in [Action.SHOOT_UP.value, Action.SHOOT_RIGHT.value, 
                       Action.SHOOT_DOWN.value, Action.SHOOT_LEFT.value]:
            # 射箭
            if self.has_arrow:
                self.has_arrow = False
                shoot_dir = action - Action.SHOOT_UP.value  # 将射箭动作转换为方向
                if self._shoot_arrow(shoot_dir):
                    self.wumpus_alive = False
                    reward = 500  # 击杀Wumpus奖励
                    info["message"] = "代理击杀了Wumpus!"
                else:
                    reward = -10  # 射箭未命中惩罚
                    info["message"] = "箭未命中目标!"
            else:
                reward = -10  # 尝试使用已用完的箭惩罚
                info["message"] = "代理没有箭了!"
                
        elif action == Action.CLIMB.value:
            # 爬出洞穴
            if self.agent_pos == self.start_pos:
                if self.has_gold:
                    reward = 1000  # 成功完成任务奖励
                    self.game_over = True
                    self.won = True
                    info["message"] = "代理成功带着金子离开洞穴!"
                else:
                    reward = -100  # 未带金子离开惩罚
                    info["message"] = "代理未带金子就离开了洞穴!"
            else:
                reward = -10  # 尝试在非起点位置爬出惩罚
                info["message"] = "只能在起点位置爬出洞穴!"
        
        # 更新总分
        self.score += reward
        
        return self._get_state(), reward, self.game_over, info
    
    def _get_state(self):
        """获取当前状态表示"""
        # 状态表示为: (代理位置x, 代理位置y, 是否有金子, 是否有箭, Wumpus是否活着)
        return (self.agent_pos[0], self.agent_pos[1], 
                int(self.has_gold), int(self.has_arrow), int(self.wumpus_alive))
    
    def _get_next_position(self, action):
        """根据动作获取下一个位置"""
        x, y = self.agent_pos
        if action == Action.UP.value:
            return (x-1, y)
        elif action == Action.RIGHT.value:
            return (x, y+1)
        elif action == Action.DOWN.value:
            return (x+1, y)
        elif action == Action.LEFT.value:
            return (x, y-1)
        return self.agent_pos
    
    def _is_valid_position(self, pos):
        """检查位置是否有效"""
        x, y = pos
        return 0 <= x < self.size and 0 <= y < self.size
    
    def _shoot_arrow(self, direction):
        """射箭并检查是否击中Wumpus"""
        x, y = self.agent_pos
        wumpus_x, wumpus_y = self.wumpus_pos
        
        # 根据方向检查是否在同一行/列并且Wumpus在箭的路径上
        if direction == Action.UP.value - Action.SHOOT_UP.value:  # 向上射
            return y == wumpus_y and x > wumpus_x
        elif direction == Action.RIGHT.value - Action.SHOOT_UP.value:  # 向右射
            return x == wumpus_x and y < wumpus_y
        elif direction == Action.DOWN.value - Action.SHOOT_UP.value:  # 向下射
            return y == wumpus_y and x < wumpus_x
        elif direction == Action.LEFT.value - Action.SHOOT_UP.value:  # 向左射
            return x == wumpus_x and y > wumpus_y
        
        return False
    
    def get_percepts(self):
        """获取代理在当前位置的感知"""
        x, y = self.agent_pos
        
        # 检查是否有微风（陷阱附近）
        breeze = False
        for dx, dy in [(-1, 0), (0, 1), (1, 0), (0, -1)]:
            nx, ny = x + dx, y + dy
            if 0 <= nx < self.size and 0 <= ny < self.size:
                if self.grid[(nx, ny)] == CellType.PIT.value:
                    breeze = True
                    break
        
        # 检查是否有臭气（Wumpus附近）
        stench = False
        if self.wumpus_alive:
            wx, wy = self.wumpus_pos
            for dx, dy in [(-1, 0), (0, 1), (1, 0), (0, -1)]:
                nx, ny = wx + dx, wy + dy
                if 0 <= nx < self.size and 0 <= ny < self.size:
                    if (nx, ny) == (x, y):
                        stench = True
                        break
        
        # 检查是否有金光（金子位置）
        glitter = (x, y) == self.gold_pos and not self.has_gold
        
        return {
            "breeze": breeze,
            "stench": stench,
            "glitter": glitter,
            "bump": False,  # 碰撞在step函数中处理
            "scream": not self.wumpus_alive  # 如果Wumpus死了就会听到尖叫
        }
    
    def render(self, mode='human'):
        """渲染世界状态"""
        if mode == 'human':
            grid_repr = np.full((self.size, self.size), '.')
            
            # 标记陷阱位置
            for pit_pos in self.pit_positions:
                grid_repr[pit_pos] = 'P'
            
            # 标记Wumpus位置
            if self.wumpus_alive:
                grid_repr[self.wumpus_pos] = 'W'
            else:
                grid_repr[self.wumpus_pos] = 'w'  # 小写表示死亡的Wumpus
            
            # 标记金子位置
            if not self.has_gold:
                grid_repr[self.gold_pos] = 'G'
            
            # 标记起点位置
            grid_repr[self.start_pos] = 'S'
            
            # 标记代理位置
            grid_repr[self.agent_pos] = 'A'
            
            # 打印世界
            for i in range(self.size):
                print(' '.join(grid_repr[i]))
                
            # 打印代理状态
            percepts = self.get_percepts()
            percept_str = []
            if percepts["breeze"]:
                percept_str.append("微风")
            if percepts["stench"]:
                percept_str.append("臭气")
            if percepts["glitter"]:
                percept_str.append("金光")
            if percepts["scream"]:
                percept_str.append("尖叫")
                
            print(f"感知: {', '.join(percept_str) if percept_str else '无'}")
            print(f"状态: 拥有金子: {self.has_gold}, 拥有箭: {self.has_arrow}, Wumpus存活: {self.wumpus_alive}")
            print(f"分数: {self.score}")
            print()

class QLearningAgent:
    """Q-learning强化学习代理"""
    
    def __init__(self, state_size, action_size, learning_rate=0.1, discount_factor=0.95, 
                 exploration_rate=1.0, exploration_decay=0.995, min_exploration_rate=0.01):
        """
        初始化Q-learning代理
        
        参数:
        - state_size: 状态空间大小
        - action_size: 动作空间大小
        - learning_rate: 学习率
        - discount_factor: 折扣因子
        - exploration_rate: 初始探索率
        - exploration_decay: 探索率衰减
        - min_exploration_rate: 最小探索率
        """
        self.state_size = state_size
        self.action_size = action_size
        self.learning_rate = learning_rate
        self.discount_factor = discount_factor
        self.exploration_rate = exploration_rate
        self.exploration_decay = exploration_decay
        self.min_exploration_rate = min_exploration_rate
        
        # 初始化Q表
        self.q_table = {}
    
    def get_action(self, state, training=True):
        """
        根据当前状态选择动作
        
        参数:
        - state: 当前状态
        - training: 是否处于训练模式
        
        返回:
        - 选择的动作
        """
        # 转换状态为可哈希类型
        state_key = self._state_to_key(state)
        
        # 如果状态不在Q表中，添加一个新条目
        if state_key not in self.q_table:
            self.q_table[state_key] = np.zeros(self.action_size)
        
        # 探索-利用策略
        if training and random.random() < self.exploration_rate:
            # 探索: 随机选择动作
            return random.randint(0, self.action_size - 1)
        else:
            # 利用: 选择Q值最高的动作
            return np.argmax(self.q_table[state_key])
    
    def learn(self, state, action, reward, next_state, done):
        """
        更新Q值
        
        参数:
        - state: 当前状态
        - action: 执行的动作
        - reward: 获得的奖励
        - next_state: 下一个状态
        - done: 是否结束
        """
        # 转换状态为可哈希类型
        state_key = self._state_to_key(state)
        next_state_key = self._state_to_key(next_state)
        
        # 如果状态不在Q表中，添加新条目
        if state_key not in self.q_table:
            self.q_table[state_key] = np.zeros(self.action_size)
        
        if next_state_key not in self.q_table:
            self.q_table[next_state_key] = np.zeros(self.action_size)
        
        # Q-learning更新公式
        current_q = self.q_table[state_key][action]
        
        # 如果游戏结束，不考虑下一状态的Q值
        if done:
            max_next_q = 0
        else:
            max_next_q = np.max(self.q_table[next_state_key])
        
        # 计算新的Q值
        new_q = current_q + self.learning_rate * (reward + self.discount_factor * max_next_q - current_q)
        
        # 更新Q表
        self.q_table[state_key][action] = new_q
        
        # 衰减探索率
        if self.exploration_rate > self.min_exploration_rate:
            self.exploration_rate *= self.exploration_decay
    
    def _state_to_key(self, state):
        """将状态转换为可哈希类型"""
        return tuple(state)
    
    def save_q_table(self, filepath):
        """保存Q表到文件"""
        try:
            np.save(filepath, self.q_table)
            print(f"Q表已保存到 {filepath}")
        except Exception as e:
            print(f"保存Q表时出错: {e}")
    
    def load_q_table(self, filepath):
        """从文件加载Q表"""
        try:
            self.q_table = np.load(filepath, allow_pickle=True).item()
            print(f"从 {filepath} 加载Q表成功")
        except Exception as e:
            print(f"加载Q表时出错: {e}")

class WumpusWorldGUI:
    """Wumpus世界图形界面"""
    
    def __init__(self, env, agent):
        """
        初始化图形界面
        
        参数:
        - env: Wumpus环境
        - agent: 强化学习代理
        """
        self.env = env
        self.agent = agent
        self.cell_size = 80
        self.setup_gui()
    
    def setup_gui(self):
        """设置GUI"""
        self.window = tk.Tk()
        self.window.title("Wumpus世界 - 强化学习")
        
        size = self.env.size
        
        # 顶部信息栏
        frame_info = tk.Frame(self.window)
        frame_info.pack(pady=10)
        
        self.label_score = tk.Label(frame_info, text="分数: 0", font=("微软雅黑", 12))
        self.label_score.pack(side=tk.LEFT, padx=20)
        
        self.label_status = tk.Label(frame_info, text="状态: 游戏开始", font=("微软雅黑", 12))
        self.label_status.pack(side=tk.LEFT, padx=20)
        
        # 创建画布
        canvas_width = self.cell_size * size
        canvas_height = self.cell_size * size
        self.canvas = tk.Canvas(self.window, width=canvas_width, height=canvas_height, bg="white")
        self.canvas.pack(pady=10)
        
        # 控制面板
        frame_control = tk.Frame(self.window)
        frame_control.pack(pady=10)
        
        self.btn_reset = tk.Button(frame_control, text="重置", font=("微软雅黑", 12),
                                  command=self.reset_game)
        self.btn_reset.pack(side=tk.LEFT, padx=10)
        
        self.btn_step = tk.Button(frame_control, text="单步执行", font=("微软雅黑", 12),
                                 command=self.step_game)
        self.btn_step.pack(side=tk.LEFT, padx=10)
        
        self.btn_auto = tk.Button(frame_control, text="自动执行", font=("微软雅黑", 12),
                                 command=self.auto_play)
        self.btn_auto.pack(side=tk.LEFT, padx=10)
        
        self.btn_quit = tk.Button(frame_control, text="退出", font=("微软雅黑", 12),
                                 command=self.window.quit)
        self.btn_quit.pack(side=tk.LEFT, padx=10)
        
        # 代理状态信息
        frame_agent = tk.Frame(self.window)
        frame_agent.pack(pady=10)
        
        self.label_agent = tk.Label(frame_agent, text="代理状态: 拥有箭", font=("微软雅黑", 12))
        self.label_agent.pack()
        
        # 感知信息
        frame_percepts = tk.Frame(self.window)
        frame_percepts.pack(pady=10)
        
        self.label_percepts = tk.Label(frame_percepts, text="感知: 无", font=("微软雅黑", 12))
        self.label_percepts.pack()
        
        # 初始化游戏
        self.reset_game()
        
        # 自动播放标志
        self.auto_playing = False
    
    def reset_game(self):
        """重置游戏"""
        self.env.reset()
        self.update_display()
        self.auto_playing = False
    
    def step_game(self):
        """执行一步游戏"""
        if self.env.game_over:
            messagebox.showinfo("游戏结束", "游戏已结束，请重置游戏")
            return
        
        # 获取代理的动作
        state = self.env._get_state()
        action = self.agent.get_action(state, training=False)
        
        # 执行动作
        next_state, reward, done, info = self.env.step(action)
        
        # 更新显示
        self.update_display()
        
        # 检查游戏是否结束
        if done:
            if self.env.won:
                messagebox.showinfo("游戏结束", f"恭喜! 代理成功带着金子离开洞穴!\n最终分数: {self.env.score}")
            else:
                messagebox.showinfo("游戏结束", f"游戏结束!\n最终分数: {self.env.score}")
    
    def auto_play(self):
        """自动执行游戏"""
        if self.env.game_over:
            messagebox.showinfo("游戏结束", "游戏已结束，请重置游戏")
            return
        
        self.auto_playing = not self.auto_playing
        
        if self.auto_playing:
            self.btn_auto.config(text="停止自动")
            self.auto_step()
        else:
            self.btn_auto.config(text="自动执行")
    
    def auto_step(self):
        """自动执行的单步"""
        if not self.auto_playing or self.env.game_over:
            self.auto_playing = False
            self.btn_auto.config(text="自动执行")
            return
        
        # 执行一步
        self.step_game()
        
        # 如果游戏未结束，继续自动执行
        if not self.env.game_over:
            self.window.after(500, self.auto_step)
        else:
            self.auto_playing = False
            self.btn_auto.config(text="自动执行")
    
    def update_display(self):
        """更新显示"""
        self.canvas.delete("all")
        
        size = self.env.size
        cell_size = self.cell_size
        
        # 绘制网格
        for i in range(size+1):
            # 水平线
            self.canvas.create_line(0, i*cell_size, size*cell_size, i*cell_size, width=2)
            # 垂直线
            self.canvas.create_line(i*cell_size, 0, i*cell_size, size*cell_size, width=2)
        
        # 绘制陷阱
        for pit_pos in self.env.pit_positions:
            x, y = pit_pos
            self.canvas.create_text((y+0.5)*cell_size, (x+0.5)*cell_size, 
                                   text="陷阱", font=("微软雅黑", 14), fill="black")
        
        # 绘制Wumpus
        wx, wy = self.env.wumpus_pos
        if self.env.wumpus_alive:
            self.canvas.create_text((wy+0.5)*cell_size, (wx+0.5)*cell_size, 
                                   text="Wumpus", font=("微软雅黑", 14), fill="red")
        else:
            self.canvas.create_text((wy+0.5)*cell_size, (wx+0.5)*cell_size, 
                                   text="死亡Wumpus", font=("微软雅黑", 12), fill="gray")
        
        # 绘制金子
        if not self.env.has_gold:
            gx, gy = self.env.gold_pos
            self.canvas.create_text((gy+0.5)*cell_size, (gx+0.5)*cell_size, 
                                   text="金子", font=("微软雅黑", 14), fill="gold")
        
        # 绘制起点
        sx, sy = self.env.start_pos
        self.canvas.create_text((sy+0.5)*cell_size, (sx+0.5)*cell_size, 
                               text="起点", font=("微软雅黑", 14), fill="blue")
        
        # 绘制代理
        ax, ay = self.env.agent_pos
        agent_text = "代理"
        if self.env.has_gold:
            agent_text += "🔶"  # 金子符号
        self.canvas.create_oval((ay+0.2)*cell_size, (ax+0.2)*cell_size, 
                               (ay+0.8)*cell_size, (ax+0.8)*cell_size, 
                               fill="green", outline="black")
        self.canvas.create_text((ay+0.5)*cell_size, (ax+0.5)*cell_size, 
                               text=agent_text, font=("微软雅黑", 12), fill="white")
        
        # 更新分数和状态
        self.label_score.config(text=f"分数: {self.env.score}")
        
        if self.env.game_over:
            if self.env.won:
                self.label_status.config(text="状态: 胜利!")
            else:
                self.label_status.config(text="状态: 失败!")
        else:
            self.label_status.config(text="状态: 游戏进行中")
        
        # 更新代理状态
        agent_info = []
        if self.env.has_arrow:
            agent_info.append("拥有箭")
        if self.env.has_gold:
            agent_info.append("拥有金子")
        self.label_agent.config(text=f"代理状态: {', '.join(agent_info) if agent_info else '无'}")
        
        # 更新感知
        percepts = self.env.get_percepts()
        percept_str = []
        if percepts["breeze"]:
            percept_str.append("微风")
        if percepts["stench"]:
            percept_str.append("臭气")
        if percepts["glitter"]:
            percept_str.append("金光")
        if percepts["scream"]:
            percept_str.append("尖叫")
        self.label_percepts.config(text=f"感知: {', '.join(percept_str) if percept_str else '无'}")
    
    def run(self):
        """运行GUI"""
        self.window.mainloop()

def train_agent(env, agent, num_episodes=10000, max_steps=100, save_interval=1000):
    """
    训练代理
    
    参数:
    - env: Wumpus环境
    - agent: Q-learning代理
    - num_episodes: 训练的回合数
    - max_steps: 每回合的最大步数
    - save_interval: 保存Q表的间隔
    
    返回:
    - scores: 每回合的得分
    - wins: 每回合是否获胜
    """
    scores = []
    wins = []
    
    for episode in range(1, num_episodes+1):
        state = env.reset()
        total_reward = 0
        
        for step in range(max_steps):
            # 获取动作
            action = agent.get_action(state)
            
            # 执行动作
            next_state, reward, done, _ = env.step(action)
            
            # 更新Q表
            agent.learn(state, action, reward, next_state, done)
            
            total_reward += reward
            state = next_state
            
            if done:
                break
        
        # 记录得分和是否获胜
        scores.append(total_reward)
        wins.append(env.won)
        
        # 打印进度
        if episode % 100 == 0:
            avg_score = np.mean(scores[-100:])
            win_rate = np.mean(wins[-100:])
            print(f"回合: {episode}/{num_episodes}, 平均得分: {avg_score:.2f}, 胜率: {win_rate:.2f}, 探索率: {agent.exploration_rate:.4f}")
        
        # 保存Q表
        if episode % save_interval == 0:
            agent.save_q_table(f"wumpus_q_table_ep{episode}.npy")
    
    # 保存最终Q表
    agent.save_q_table("wumpus_q_table_final.npy")
    
    return scores, wins

def plot_training_results(scores, wins, window_size=100):
    """绘制训练结果"""
    # 计算滑动平均
    def moving_average(data, window_size):
        return np.convolve(data, np.ones(window_size)/window_size, mode='valid')
    
    episodes = range(1, len(scores)+1)
    
    # 绘制得分
    plt.figure(figsize=(12, 10))
    
    plt.subplot(2, 1, 1)
    plt.plot(episodes, scores, 'b-', alpha=0.3)
    if len(scores) >= window_size:
        ma_scores = moving_average(scores, window_size)
        ma_episodes = range(window_size, len(scores)+1)
        plt.plot(ma_episodes, ma_scores, 'r-')
    plt.title('训练过程中的得分')
    plt.xlabel('回合')
    plt.ylabel('得分')
    plt.grid(True)
    
    # 绘制胜率
    plt.subplot(2, 1, 2)
    win_rate = [sum(wins[:i+1])/(i+1) for i in range(len(wins))]
    plt.plot(episodes, win_rate, 'g-')
    plt.title('训练过程中的胜率')
    plt.xlabel('回合')
    plt.ylabel('胜率')
    plt.grid(True)
    
    plt.tight_layout()
    plt.savefig('wumpus_training_results.png')
    plt.show()

def main():
    """主函数"""
    # 创建Wumpus环境
    env = WumpusWorld(size=4, pit_prob=0.15)
    
    # 计算状态空间和动作空间大小
    # 状态: (x位置, y位置, 是否有金子, 是否有箭, Wumpus是否活着)
    state_size = env.size * env.size * 2 * 2 * 2
    action_size = len(Action)
    
    # 创建Q-learning代理
    agent = QLearningAgent(
        state_size=state_size,
        action_size=action_size,
        learning_rate=0.1,
        discount_factor=0.95,
        exploration_rate=1.0,
        exploration_decay=0.9995,
        min_exploration_rate=0.01
    )
    
    # 询问用户是否进行训练
    train = input("是否进行训练? (y/n): ").lower() == 'y'
    
    if train:
        print("开始训练...")
        num_episodes = int(input("训练回合数 (建议10000): "))
        scores, wins = train_agent(env, agent, num_episodes=num_episodes)
        plot_training_results(scores, wins)
    else:
        # 加载已训练的Q表
        try:
            agent.load_q_table("wumpus_q_table_final.npy")
        except:
            print("未找到已训练的Q表，将使用初始Q表")
    
    # 创建并运行GUI
    gui = WumpusWorldGUI(env, agent)
    gui.run()

if __name__ == "__main__":
    main()