import sys
import gymnasium as gym
import numpy as np

obsN: int = 16
actN: int = 4


def value_iteration(env, gamma) -> np.ndarray:
    v = np.zeros(obsN)
    tolerance = 1e-10
    while True:
        prev_v = v.copy()
        for s in range(obsN):
            q_sa = np.zeros(actN)
            for a in range(actN):
                for p, s_next, r, done in env.unwrapped.P[s][a]:
                    q_sa[a] += p * (r + gamma * prev_v[s_next])
            v[s] = np.max(q_sa)
        if np.sum(np.fabs(prev_v - v)) <= tolerance:
            print("Value-iteration Stable")
            break
    policy = np.zeros(obsN, dtype=np.int8)
    for s in range(obsN):
        q_sa = np.zeros(actN)
        for a in range(actN):
            q_sa[a] = sum(
                [
                    p * (r + gamma * v[s_next])
                    for p, s_next, r, done in env.unwrapped.P[s][a]
                ]
            )
        policy[s] = np.argmax(q_sa)
    return policy


env = gym.make("FrozenLake-v1", render_mode="human", is_slippery=True)
observation, info = env.reset(seed=42)

policy = value_iteration(env, 1)
print(policy)

for _ in range(100):
    action = policy[observation]
    print(action, end=" : ")
    observation, reward, terminated, truncated, info = env.step(action)
    print(observation)

    if terminated or truncated:
        observation, info = env.reset()
env.close()
