import gymnasium as gym
import numpy as np
import time

# 1. 创建 FrozenLake 环境（slippery=False 表示不会滑行，更简单）
env = gym.make('FrozenLake-v1')

# 2. 初始化 Q 表（状态数 × 动作数）
state_size = env.observation_space.n  # 16 个状态（4x4）
action_size = env.action_space.n      # 4 个动作（上下左右）
Q = np.zeros((state_size, action_size))  # 初始化 Q 表为全 0

# 3. 设置超参数
episodes = 10000  # 训练轮数
alpha = 0.1       # 学习率
gamma = 0.99      # 折扣因子
epsilon = 1.0     # 探索率（初始 100% 随机探索）
epsilon_decay = 0.999  # 探索率衰减
min_epsilon = 0.01     # 最小探索率

# 4. Q-Learning 训练
for episode in range(episodes):
    state = env.reset()[0]  # 重置环境，获取初始状态
    done = False
    total_reward = 0

    while not done:
        # ε-greedy 策略（探索 or 利用）
        if np.random.rand() < epsilon:
            action = env.action_space.sample()  # 随机探索
        else:
            action = np.argmax(Q[state, :])     # 选择 Q 值最大的动作（利用）

        # 执行动作，观察新状态、奖励、是否结束
        next_state, reward, done, truncated, _ = env.step(action)
        total_reward += reward

        # Q 值更新
        Q[state, action] = Q[state, action] + alpha * (
            reward + gamma * np.max(Q[next_state, :]) - Q[state, action]
        )

        state = next_state  # 进入新状态

    # 衰减探索率（后期减少随机探索）
    epsilon = max(min_epsilon, epsilon * epsilon_decay)

    # 每 1000 轮打印一次进度
    if (episode + 1) % 1000 == 0:
        print(f"Episode: {episode + 1}, Total Reward: {total_reward}, Epsilon: {epsilon:.3f}")

# 5. 测试训练好的 Q 表（不探索，只用 Q 值决策）
print("\n=== 测试训练结果 ===")
state = env.reset()[0]
done = False
steps = 0

while not done:
    action = np.argmax(Q[state, :])  # 总是选最优动作
    next_state, reward, done, truncated, _ = env.step(action)
    env.render()  # 显示环境（需要 gym>=0.26）
    state = next_state
    steps += 1
    time.sleep(0.5)  # 延迟以便观察

print(f"测试完成，共走了 {steps} 步，奖励: {reward}")
env.close()