from tqdm import tqdm
import numpy as np

# 定义环境的状态空间和动作空间
states = range(25)
actions = ["up", "down", "left", "right"]
gamma = 0.9


# 定义状态转移矩阵
transitions = np.zeros((25, 5), dtype=int)
for i in range(25):
    transitions[i][0] = i if i // 5 == 0 else i - 5
    transitions[i][1] = i if i // 5 == 4 else i + 5
    transitions[i][2] = i if i % 5 == 0 else i - 1
    transitions[i][3] = i if i % 5 == 4 else i + 1
    transitions[i][4] = i


# 定义环境的奖励函数和状态转移函数
def reward(s, a):
    def is_outedge(s, a):
        schemes = [
            s // 5 == 0 and a == 0,
            s // 5 == 4 and a == 1,
            s % 5 == 0 and a == 2,
            s % 5 == 4 and a == 3,
        ]
        return True if any(schemes) else False

    areas_reward = np.array(
        [
            [0, 0, 0, 0, 0],
            [0, -10, -10, 0, 0],
            [0, 0, -10, 0, 0],
            [0, -10, 1, -10, 0],
            [0, -10, 0, 0, 0],
        ],
        dtype=float,
    ).flatten()
    r = areas_reward[transitions[s, a]]
    return r - 1 if is_outedge(s, a) else r

def visual_policy(policy):
    signs = np.array(["\u2191", "\u2193", "\u2190", "\u2192", "\u25CB"])
    graph = np.zeros((5, 5), dtype=str)
    for i in range(5):
        for j in range(5):
            graph[i][j] = signs[policy[i][j]]
    print(graph)


# 初始化状态值函数和动作值函数
Q = np.zeros((25, 5))
N = np.zeros((25, 5))

# 定义蒙特卡罗方法的超参数
gamma = 0.9  # 折扣因子
episodes=1000
epsilons = [1/(i + 1) for i in range(episodes)]
# 进行蒙特卡罗方法的训练
for episode in tqdm(range(episodes)):
    state = np.random.choice(range(25))  # 随机选择初始状态
    action = np.random.choice(range(5))
    episode_data = []
    episode_len = 0
    while True:
        episode_data.append((state, action, reward(state, action)))
        state = transitions[state][action]
        action = (
            np.random.choice(range(5), p=[1 / 5] * 5)
            if np.random.uniform() < epsilons[episode]
            else np.argmax(Q[state])
        )
        # print(episode_len)
        episode_len += 1
        if state == 17:  # 到达终止状态
            break

    G = 0
    for state, action, r in reversed(episode_data):
        G = gamma * G + r
        N[state][action] += 1
        Q[state][action] -= (1 / N[state][action]) * (Q[state][action] - G)



# 输出训练后得到的最优策略
policy = Q.argmax(axis=1).reshape(5, 5)
print("Optimal Policy:")
visual_policy(policy)
print(Q.round(4))
print(np.load("./optimal_qv.npy"))
