from tqdm import tqdm
import numpy as np

# 定义环境的状态空间和动作空间
states = range(25)
actions = range(5)
gamma = 0.9

# 定义状态转移矩阵
transitions = np.zeros((25, 5), dtype=int)
for i in range(25):
    transitions[i][0] = i if i // 5 == 0 else i - 5
    transitions[i][1] = i if i // 5 == 4 else i + 5
    transitions[i][2] = i if i % 5 == 0 else i - 1
    transitions[i][3] = i if i % 5 == 4 else i + 1
    transitions[i][4] = i


# 定义环境的奖励函数和状态转移函数
def reward(s, a):
    def is_outedge(s, a):
        schemes = [
            s // 5 == 0 and a == 0,
            s // 5 == 4 and a == 1,
            s % 5 == 0 and a == 2,
            s % 5 == 4 and a == 3,
        ]
        return True if any(schemes) else False

    areas_reward = np.array(
        [
            [0, 0, 0, 0, 0],
            [0, -10, -10, 0, 0],
            [0, 0, -10, 0, 0],
            [0, -10, 1, -10, 0],
            [0, -10, 0, 0, 0],
        ],
        dtype=float,
    ).flatten()
    r = areas_reward[transitions[s, a]]
    return r - 1 if is_outedge(s, a) else r


# 可视化策略
def visual_policy(policy):
    signs = np.array(["\u2191", "\u2193", "\u2190", "\u2192", "\u25CB"])
    graph = np.zeros((5, 5), dtype=str)
    for i in range(5):
        for j in range(5):
            graph[i][j] = signs[policy[i][j]]
    print(graph)


# 初始化Q函数
Q = np.zeros((25, 5))
# 定义Sarsa算法的超参数
alpha = 0.1  # 学习率
gamma = 0.9  # 折扣因子
episodes = 100_000
epsilons = [1/(i + 1) for i in range(episodes)]

# 进行Sarsa算法的训练生成200000个episode
for episode in tqdm(range(episodes)):
    state = np.random.choice(25)
    action = np.random.choice(5)
    iter_num=0
    while True:
        iter_num+=1
        next_state = transitions[state][action]
        next_action = (
            np.random.choice(5)
            if np.random.uniform() < epsilons[episode]
            else np.argmax(Q[next_state])
        )
        td_target = reward(state, action) + gamma * Q[next_state][next_action]
        td_error = Q[state][action] - td_target
        Q[state][action] -= alpha * td_error

        state = next_state
        action = next_action

        if state == 17:  # 到达终止状态
            print(iter_num)
            break

# 输出训练后得到的最优策略
policy = Q.argmax(axis=1).reshape(5, 5)
print("Optimal Policy:")
visual_policy(policy)
print(Q.round(4))