import random
from pprint import pprint

# 定义环境参数
GRID_SIZE = 10   # 棋盘边长
START = (0, 0)  # 起点
GOAL = (9,9)   # 终点
TRAP = {(1,1),(6,6)}   # 陷阱
# TRAP.update({(x, 8) for x in range(2, 10)})
ACTIONS = ['up', 'down', 'left', 'right']  # 动作空间

' —— —— 奖励函数：到达目标大奖赏，陷阱惩罚，其他小惩罚鼓励高效移动'
def get_reward(state) -> int:
    
    if state == GOAL:
        return 100
    elif state in TRAP:
        return -5
    else:
        return -1  # 每多走一步都有小惩罚，促使智能体尽快找到最短路径

' —— —— 移动函数：根据当前状态和动作，计算下一个状态'
def move(current_state, action) -> tuple:
    x, y = current_state
    # 定义动作效果
    match action:
        case 'up':      y +=1
        case 'right':   x +=1     
        case 'down':    y -=1
        case 'left':    x -=1
        case _:         return current_state
    
    next_state =(x, y)
    # 边界检查
    if (0<= x < GRID_SIZE and 0<= y < GRID_SIZE): 
        return next_state
    return current_state

' —— —— 初始化 Q '
def create_Q() -> dict:
    Q = {}  
    for x in range(GRID_SIZE):
        for y in range(GRID_SIZE):
            Q[(x, y)] = {action: 0.0 for action in ACTIONS}
    return Q
    ''' Q的结构：
    {   (0,0):{'up':0, 'down':0, 'left':0, 'right':0},
        (0,1):{...},
    }   '''

' —— —— 训练Q-learning智能体'
def q_learning_train(
        episodes =1000,  # 训练轮数
        interval = 100,  # 输出间隔轮数
        alpha    =0.1,  # 学习率
        gamma    =0.9,  # 折扣因子
        epsilon  =0.1   # 探索率（epsilon-greedy策略）
    ) -> dict:
    
    Q = create_Q()   # 初始化 Q表，记录每个状态-动作对的价值
    
    for episode in range(episodes):
        state = START  # 每轮训练都从起点开始
        sum_reward = 0    # 总分
        
        while state != GOAL:  # 未到达目标则继续
            
            # 1. ε-greedy策略选择动作：以epsilon概率探索，否则利用
            if random.uniform(0, 1) < epsilon:
                action = random.choice(ACTIONS)  # 随机探索
            else:
                # 选择当前状态Q值最大的动作
                action = max(Q[state], key=Q[state].get)
            
            # 2. 执行动作，获取下一个状态和奖励
            next_state = move(state, action)
            reward = get_reward(next_state)
            sum_reward += reward
            
            # 3. 使用贝尔曼方程更新Q值
            current_q = Q[state][action]
            max_next_q = max(Q[next_state].values())  # 下一个状态的最大Q值
            
            # 核心更新公式
            new_q = current_q + alpha * (reward + gamma * max_next_q - current_q)
            Q[state][action] = new_q
            
            state = next_state  # 状态转移
            
            if state == GOAL:
                break  # 到达目标，结束本轮
        
        # 可选的探索率衰减：随着训练进行，逐渐减少探索，增加利用
        epsilon = max(0.01, epsilon * 0.995)
        
        if (episode + 1) % interval == 0:
            print(f"轮次：{episode+1}, 总得分：{sum_reward}")
    return Q

' —— —— 测试训练好的Q表，展示最优路径'
def test_optimal_policy(Q):

        state = START
        path = [state]
        steps = 0
        max_steps = 100  # 防止无限循环

        print("\n--- 学习到的最优路径 ---")

        while state != GOAL and steps < max_steps:
            action = max(Q[state], key=Q[state].get)  # 始终选择Q值最大的动作
            state = move(state, action)
            path.append(state)
            steps += 1
            print(f"Step_{steps}\t{path[-2]} -> {action}\t-> {path[-1]}")
        
        return path


if __name__ == "__main__":
    print("开始训练...")
    Q_table = q_learning_train(episodes=1000,interval=100)
    print("训练完成！")
    print("Q-Table:")
    pprint(Q_table)

    # 测试最优策略
    optimal_path = test_optimal_policy(Q_table)
    print(f"\n路径总步数: {len(optimal_path)-1}")