import math
import numpy as np

# 定义环境的状态空间和动作空间
states = range(25)
actions = ["up", "down", "left", "right"]
gamma = 0.9


# 定义状态转移矩阵
transitions = np.zeros((25, 5), dtype=int)
for i in range(len(transitions)):
    transitions[i][0] = i if i // 5 == 0 else i - 5
    transitions[i][1] = i if i // 5 == 4 else i + 5
    transitions[i][2] = i if i % 5 == 0 else i - 1
    transitions[i][3] = i if i % 5 == 4 else i + 1
    transitions[i][4] = i


# 定义环境的奖励函数和状态转移函数
def reward(s, a):
    def is_outedge(s, a):
        schemes = [
            s // 5 == 0 and a == 0,
            s // 5 == 4 and a == 1,
            s % 5 == 0 and a == 2,
            s % 5 == 4 and a == 3,
        ]
        return True if any(schemes) else False

    areas_reward = np.array(
        [
            [0, 0, 0, 0, 0],
            [0, -10, -10, 0, 0],
            [0, 0, -10, 0, 0],
            [0, -10, 1, -10, 0],
            [0, -10, 0, 0, 0],
        ],
        dtype=float,
    ).flatten()
    r = areas_reward[transitions[s, a]]
    return r - 1 if is_outedge(s, a) else r


def next_state(s, a):
    return transitions[s][a]


# 可视化策略
def visual_policy(policy_table):
    signs = np.array(["\u2191", "\u2193", "\u2190", "\u2192", "\u25CB"])
    idxs = np.argmax(policy_table, axis=1)
    graph = np.zeros_like(idxs, dtype=str)
    for i in range(len(idxs)):
        graph[i] = signs[idxs[i]]
    graph = graph.reshape((int(math.sqrt(policy_table.shape[0])), -1))

    print(graph)


# 得到Q表格
def get_Q_table(V):
    Q = np.zeros((25, 5), dtype=float)
    for idx, state in enumerate(states):
        action_values = np.array(
            [reward(state, action) + gamma * V[next_state(state, action)] for action in range(5)]
        )
        Q[idx] = action_values
    return Q


# 初始化值函数
V = np.zeros(25)
V_bkp = np.zeros(25)
# 进行值迭代算法的训练
iter_num = 0
while True:
    iter_num += 1
    delta = 0
    for state in states:
        action_values = [
            reward(state, action) + gamma * V[next_state(state, action)] for action in range(5)
        ]
        V[state] = np.max(action_values)
    delta = np.abs(V_bkp - V).sum()
    V_bkp = np.copy(V)
    if delta < 1e-3:
        break


# 输出训练后得到的最优策略
policy = np.zeros((25, 5))
for state in states:
    action_values = [
        reward(state, action) + gamma * V[next_state(state, action)] for action in range(5)
    ]
    best_action = np.argmax(action_values)
    policy[state][best_action] = 1


print("Optimal Policy:")
visual_policy(policy)
print("Optimal State Values:")
print(V.reshape(5, 5).round(4))
Q = get_Q_table(V).round(4)
print("Optimal Q-table:")
print(Q)
np.save("optimal_sv.npy", V)
np.save("optimal_qv.npy", Q)
