import numpy as np
from envs.my_grid import MyGridEnv
from envs.my_grid import GAMMA
import gym
import time
# Taken from Policy Evaluation Exercise!


def policy_eval(policy, env, discount_factor=1.0, theta=0.1):
    V = np.zeros(env.nS)    # V为每个状态的值函数组成的向量
    while True:
        delta = np.zeros(env.nS)    # 记录每个值函数每次迭代的变化量
        # For each state, perform a "full backup"
        for s in range(env.nS):
            v = 0
            # Look at the possible next actions
            for a, action_prob in enumerate(policy[s]):     # a是动作的下标，action_prob是选择这个动作的概率
                env_a = env.action_space[a]    # 下标转字母
                env_s = s + 1   # 环境下标从1开始的
                # For each action, look at the possible next states...
                prob, next_state, reward, done = env.P[env_s][env_a]    # 为了和模板对接，环境修改的主要部分，增加了这个P
                # 因为我们的环境 给定一个  (状态，动作)，必定只会转移到一个状态，而不是可能的多个状态，所以prob都是1
                # Calculate the expected value
                v += action_prob * prob * (reward + discount_factor * V[next_state-1])
            # How much our value function changed (across any states)
            delta[s] = np.abs(v - V[s]) # 更新每次值函数的变化量
            V[s] = v
        # Stop evaluating once our value function change is below a threshold
        if np.max(delta) < theta:   # 当变化最大的那个变得还比较少的时候结束
            break
    return np.array(V)


def policy_improvement(env, policy_eval_fn=policy_eval, discount_factor=1.0):
    def one_step_lookahead(state, V):   # 计算当前状态的值函数的时候要看下一个状态，这里为了得到在当前
        env_s = state + 1               # 状态做某个动作的奖励
        A = np.zeros(env.nA)
        for a in range(env.nA):
            env_a = env.action_space[a]
            prob, next_state, reward, done = env.P[env_s][env_a]
            A[a] += prob * (reward + discount_factor * V[next_state-1])
        return A

    # Start with a random policy
    policy = np.ones([env.nS, env.nA]) / env.nA

    while True:
        # Evaluate the current policy
        V = policy_eval_fn(policy, env, discount_factor)
        # Will be set to false if we make any changes to the policy
        policy_stable = True
        # For each state...
        for s in range(env.nS):
            # The best action we would take under the current policy
            chosen_a = np.argmax(policy[s])

            # Find the best action by one-step lookahead
            # Ties are resolved arbitarily
            action_values = one_step_lookahead(s, V)
            best_a = np.argmax(action_values)

            # Greedily update the policy
            if chosen_a != best_a:
                policy_stable = False
            policy[s] = np.eye(env.nA)[best_a]

        # If the policy is stable we've found an optimal policy. Return it
        if policy_stable:
            return policy, V


if __name__ == '__main__':

    env = gym.make('my_grid-v0')

    policy, V = policy_improvement(env)
    for i_episode in range(10):
        state = env.reset()
        for t in range(100):
            env.render()
            print(state)
            action = env.action_space[np.argmax(policy[state-1])]   # 按照policy里面是1的那个选动作，下标转字母
            next_state, r, is_terminal, _ = env.step(action)
            state = next_state
            time.sleep(1)
            if is_terminal:
                env.render()
                print(state)
                time.sleep(1)
                print('Episode #%d finished after %d timesteps' % (i_episode, t))
                break