import numpy as np
from envs.my_grid import MyGridEnv
from envs.my_grid import GAMMA
import gym
import time

def value_iteration(env, theta=0.0001, discount_factor=1.0):
    def one_step_lookahead(state, V):
        env_s = state + 1
        A = np.zeros(env.nA)
        for a in range(env.nA):
            env_a = env.action_space[a]
            prob, next_state, reward, done = env.P[env_s][env_a]
            A[a] += prob * (reward + discount_factor * V[next_state - 1])
        return A

    V = np.zeros(env.nS)
    while True:
        # Stopping condition
        delta = 0
        # Update each state...
        for s in range(env.nS):
            # Do a one-step lookahead to find the best action
            A = one_step_lookahead(s, V)
            best_action_value = np.max(A)
            # Calculate delta across all states seen so far
            delta = max(delta, np.abs(best_action_value - V[s]))
            # Update the value function. Ref: Sutton book eq. 4.10.
            V[s] = best_action_value
            # Check if we can stop
        if delta < theta:
            break

    # Create a deterministic policy using the optimal value function
    policy = np.zeros([env.nS, env.nA])
    for s in range(env.nS):
        # One step lookahead to find the best action for this state
        A = one_step_lookahead(s, V)
        best_action = np.argmax(A)
        # Always take the best action
        policy[s, best_action] = 1.0

    return policy, V



if __name__ == '__main__':

    env = gym.make('my_grid-v0')

    policy, V = value_iteration(env)
    for i_episode in range(10):
        state = env.reset()
        for t in range(100):
            env.render()
            print(state)
            action = env.action_space[np.argmax(policy[state - 1])]  # 按照policy里面是1的那个选动作，下标转字母
            next_state, r, is_terminal, _ = env.step(action)
            state = next_state
            time.sleep(1)
            if is_terminal:
                env.render()
                print(state)
                time.sleep(1)
                print('Episode #%d finished after %d timesteps' % (i_episode, t))
                break