# import gym
#
# env = gym.make('CarRacing-v0')
# env.reset()
#
# for _ in range(1000):
#     env.render()
#     env.step(env.action_space.sample())

import tensorflow as tf
# hello = tf.constant("Hello World")
# sess = tf.Session()
# print(sess.run(hello))
# # Variables
# weights = tf.Variable(tf.random_normal([3, 2], stddev=0.1), name="weights")
#
# c = tf.constant(13)
#
# x = tf.placeholder("float", shape=None)
# with tf.name_scope("Computation"):
#
#     with tf.name_scope("Part1"):
#         a = tf.constant(5)
#         b = tf.constant(4)
#         c = tf.multiply(a, b)
#
#     with tf.name_scope("Part2"):
#         d = tf.constant(2)
#         e = tf.constant(3)
#         f = tf.multiply(d, e)
    # g = tf.add(c,f)

# with tf.name_scope("Result"):
#     g = tf.add(c,f)
#
#
#
# with tf.Session() as sess:
#     writer = tf.summary.FileWriter("output", sess.graph)
#     print(sess.run(g))
#     writer.close()


import gym
import numpy as np

env = gym.make('FrozenLake-v0')
print(env.observation_space.n)


def value_iteration(env,gamma = 1.0):
    value_table = np.zeros(env.observation_space.n)
    no_of_iterations = 100000
    threshold = 0.1
    # gama = 0.99
    for i in range(no_of_iterations):
        updated_value_table = np.copy(value_table)
        for state in range(env.observation_space.n):
            Q_value = []
            for action in range(env.action_space.n):
                next_states_rewards = []
                for next_sr in env.P[state][action]:
                    trans_prob, next_state, reward_prob, _ = next_sr
                    next_states_rewards.append(trans_prob * (reward_prob + \
                                                             gamma * updated_value_table[next_state]))

                    Q_value.append(np.sum(next_states_rewards))
            value_table[state] = max(Q_value)
        if (np.sum(np.fabs(updated_value_table - value_table)) <= threshold):
            print('Value-iteration converged at iteration# %d.' % (i + 1))
        break

    return value_table

def extract_policy(value_table, gamma = 1.0):
    policy = np.zeros(env.observation_space.n)
    for state in range(env.observation_space.n):
        Q_table = np.zeros(env.action_space.n)
        for action in range(env.action_space.n):
            for next_sr in env.P[state][action]:
                trans_prob, next_state, reward_prob, _ = next_sr
                Q_table[action] += (trans_prob * (reward_prob + gamma *\
                value_table[next_state]))
        policy[state] = np.argmax(Q_table)
    return policy


def policy_evaluate(policy, gamma=1.0):
    value_table = np.zeros(env.observation_space.n)
    threshold = 0.01
    for i in range(1000):
        update_value_table = np.copy(value_table)
        for state in range(env.observation_space.n):
            act = policy[state]
            Q_value = 0
            for next_sr in env.P[state][act]:
                trans_prob, next_state, reward_prob, _ = next_sr
                Q_value += trans_prob * (reward_prob + gamma * update_value_table[next_state])
            value_table[state] = Q_value
        if np.sum(np.fabs(update_value_table - value_table)) < threshold:
            print("finished")
            break
    return value_table

def policy_iteration(gamma= 1.0):
    random_policy = np.zeros(env.observation_space.n)

    for i in range(100):
        value_table = policy_evaluate(random_policy)
        new_policy = extract_policy(value_table)

        if np.sum(new_policy - random_policy) == 0:
            break
        random_policy = new_policy
    return random_policy

# optimal_value_function = value_iteration(env=env,gamma=1.0)
print(policy_iteration())