# VI 冰湖环境
import gym
import numpy as np


def greedy_policy(env, value_table):
    # 贪心选择策略，根据策略评估获得的value_table来选择最优动作
    gamma = 1
    policy = np.zeros(len(value_table))  # 初始化policy
    for state in range(len(value_table)):
        Q_value = [0] * 4  # Q表价值
        for action in range(0, 4):  # 0，1，2，3分别代表四个方向
            # 对每个动作方向选择计算Q值，贪心取最大
            for p, next_state, r, done in env.P[state][action]:
                Q_value[action] += p * (r + gamma * value_table[next_state])
        policy[state] = Q_value.index(max(Q_value))  # 贪心选择policy
    return policy


def value_iteration(env):
    # 价值迭代，类比PI的先评估后更新策略。这里价值迭代用边评估边更新策略一起完成
    policy = np.zeros(env.observation_space.n)  # 初始化策略
    iteration_times = 400
    gamma = 1  # 折扣因子
    value_table = np.zeros(env.observation_space.n)  # 价值表初始化
    for i in range(iteration_times):
        for state in range(len(value_table)):  # 计算每个状态的价值
            action = policy[state]
            v = 0
            for p, next_s, r, done in env.P[state][action]:  # bellman迭代
                v += p * (r + gamma * value_table[next_s])
            value_table[state] = v
        new_value_table = value_table
        policy = greedy_policy(env, new_value_table)  # 选择最大的价值趋向作为策略

        if i % 50 == 0:  # 训练可视化输出
            print('iteration:' + str(i))
            print('policy:\n' + str(policy))
            print('value_table:\n'+str(new_value_table))
    return policy


def test_visualization(policy):
    # 测试过程可视化模块
    observation = env.reset()
    while True:
        env.render()  # 环境可视化
        action = int(policy[observation])
        # 分别为：当前状态;价值；是否完成;转移概率
        observation, r, done, prob = env.step(action)
        if done:
            break


if __name__ == '__main__':
    env = gym.make('FrozenLake-v1')
    env.reset()  # 重置环境

    policy = value_iteration(env)  # VI模块
    test_visualization(policy)  # 训练过程可视化模块
