# PI 冰湖环境
import gym
import numpy as np


def Policy_evaluation(env, policy):
    # 策略评估模块
    threshold = 1e-5  # 退出界限
    evaluation_terms = 0  # 迭代轮次数（不超过上界）
    gamma = 1  # 折扣因子
    value_table = np.zeros(env.observation_space.n)  # 价值表初始化
    while True:
        evaluation_terms += 1
        value_table_last = value_table[:]  # 记录上次迭代
        for state in range(len(value_table)):  # 计算每个状态
            action = policy[state]  # 从策略迭代提升模块中获取更优策略
            v = 0  # 价值
            for p, next_state, r, done in env.P[state][action]:  # 冰湖环境有随机性P
                v += p * (r + gamma * value_table[next_state])  # 计算价值
            value_table[state] = v  # 加入table中

        # 小于界限或超出迭代次数则退出循环，最终输出策略评估值
        if np.sum(np.abs(value_table_last - value_table)) < threshold and evaluation_terms > 100:
            return value_table


def greedy_policy(env, value_table):
    # 贪心选择策略，根据策略评估获得的value_table来选择最优动作
    gamma = 1
    policy = np.zeros(len(value_table))  # 初始化policy
    for state in range(len(value_table)):
        Q_value = [0] * 4  # Q表价值
        for action in range(0, 4):  # 0，1，2，3分别代表四个方向
            # 对每个动作方向选择计算Q值，贪心取最大
            for p, next_state, r, done in env.P[state][action]:
                Q_value[action] += p * (r + gamma * value_table[next_state])
        policy[state] = Q_value.index(max(Q_value))  # 贪心选择policy
    return policy


def policy_improvement(env):
    # 策略迭代改进
    policy = np.zeros(env.observation_space.n)  # 初始化policy
    iteration_terms = 10
    for i in range(iteration_terms):
        new_value_table = Policy_evaluation(env, policy)  # 得到评估值
        new_policy = greedy_policy(env, new_value_table)  # 贪心选择
        policy = new_policy  # 更新策略

        if i % 1 == 0:  # 训练可视化输出
            print('iteration:' + str(i))
            print('policy:\n' + str(policy))
            print('value_table:\n'+str(new_value_table))
    return policy


def test_visualization(policy):
    # 测试过程可视化模块
    observation = env.reset()
    while True:
        env.render()  # 环境可视化
        action = int(policy[observation])
        # 分别为：当前状态;价值；是否完成;转移概率
        observation, r, done, prob = env.step(action)
        if done:
            break


if __name__ == '__main__':
    env = gym.make('FrozenLake-v1')
    env.reset()  # 重置环境

    policy = policy_improvement(env)  # PI模块
    test_visualization(policy)  # 训练过程可视化模块
