import numpy as np
import matplotlib.pyplot as plt
import time

# 1. 定义迷宫环境
class MazeEnv:
    def __init__(self):
        self.grid = np.array([
            ['S', ' ', ' ', ' ', ' '],
            [' ', '#', '#', ' ', '#'],
            [' ', '#', ' ', ' ', ' '],
            [' ', '#', '#', '#', ' '],
            [' ', ' ', ' ', ' ', 'G']
        ])
        self.start_pos = (0, 0)
        self.goal_pos = (4, 4)
        self.current_pos = self.start_pos
        self.actions = ['up', 'down', 'left', 'right']
        
    def reset(self):
        self.current_pos = self.start_pos
        return self.current_pos
    
    def step(self, action):
        x, y = self.current_pos
        if action == 'up': x -= 1
        elif action == 'down': x += 1
        elif action == 'left': y -= 1
        elif action == 'right': y += 1
        
        # 检查是否越界或撞墙
        if (0 <= x < 5) and (0 <= y < 5) and (self.grid[x, y] != '#'):
            self.current_pos = (x, y)
        
        # 计算奖励
        if self.current_pos == self.goal_pos:
            reward = 10
            done = True
        else:
            reward = -0.1
            done = False
        
        return self.current_pos, reward, done

# 2. Q-Learning算法
def q_learning(env, episodes=1000, alpha=0.1, gamma=0.9, epsilon=0.1):
    q_table = np.zeros((5, 5, 4))  # 状态空间：5x5网格，动作空间：4个方向
    
    for episode in range(episodes):
        state = env.reset()
        done = False
        step = 0
        while not done:
            step += 1

            # ε-greedy策略选择动作
            if np.random.random() < epsilon:
                action_idx = np.random.randint(4)  # 探索
            else:
                action_idx = np.argmax(q_table[state[0], state[1], :])  # 利用
            
            action = env.actions[action_idx]
            next_state, reward, done = env.step(action)
            
            # 更新Q表
            old_q = q_table[state[0], state[1], action_idx]
            next_max = np.max(q_table[next_state[0], next_state[1], :])
            new_q = old_q + alpha * (reward + gamma * next_max - old_q)
            q_table[state[0], state[1], action_idx] = new_q
            
            state = next_state
        

        # # 打印每一轮需要的步数step  发现越往后步数越少
        # print('Episode: {}, Steps: {}'.format(episode, step))

        # 每50轮打印进度
        if episode % 50 == 0:
            print(f"Episode: {episode}, Latest Reward: {reward}")

    
    return q_table

# 3. 训练并测试
env = MazeEnv()
q_table = q_learning(env)

# 4. 可视化最优路径
def visualize_path(env, q_table):
    state = env.reset()
    path = [state]
    done = False
    
    while not done:
        action_idx = np.argmax(q_table[state[0], state[1], :])
        action = env.actions[action_idx]
        state, _, done = env.step(action)
        path.append(state)
    
    # 绘制迷宫和路径
    grid = env.grid.copy()
    for (x, y) in path[1:-1]:
        grid[x, y] = 'o'  # 路径标记为o
    
    print("Optimal Path:")
    for row in grid:
        print(' '.join(row))

visualize_path(env, q_table)