import gymnasium as gym
import numpy as np


class MonteCarlo:
    def __init__(self, state_len: int, action_len: int, gamma: float, epsilon: float):
        self.state_len = state_len
        self.action_len = action_len
        # Q[s, a]
        self.QTable = np.zeros((state_len, action_len))
        self.NTable = np.zeros((state_len, action_len))
        self.gamma = gamma
        self.epsilon = epsilon

    def choose_action(self, state: int) -> int:
        if np.random.uniform() < self.epsilon:
            return np.random.randint(0, self.action_len)
        return int(np.argmax([self.QTable[state][a] for a in range(self.action_len)]))

    def learn(self, reward_chain: list[tuple[int, int, float]]) -> None:
        reward = 0
        for s, a, r in reversed(reward_chain):
            reward = r + self.gamma * reward
            self.NTable[s][a] += 1
            self.QTable[s][a] += (reward - self.QTable[s][a]) / self.NTable[s][a]


is_slippery = False
# for training
env = gym.make("FrozenLake-v1", render_mode=None, is_slippery=is_slippery)
observation, info = env.reset(seed=42)

if not isinstance(env.observation_space, gym.spaces.Discrete):
    raise TypeError("Observation space is not of type 'Discrete'.")
if not isinstance(env.action_space, gym.spaces.Discrete):
    raise TypeError("Action space is not of type 'Discrete'.")

agent = MonteCarlo(
    state_len=int(env.observation_space.n),
    action_len=int(env.action_space.n),
    gamma=1,
    epsilon=0.5,
)

start_action = env.action_space.sample()
start_observation = observation
reward_chain = []
for i in range(1000000):
    action = agent.choose_action(observation)
    next_observation, reward, terminated, truncated, info = env.step(action)
    reward_chain.append((observation, action, reward))
    observation = next_observation

    if terminated or truncated:
        observation, info = env.reset()
        action = agent.choose_action(observation)
        agent.learn(reward_chain)
        reward_chain = []

    if i % 1000 == 0:
        print(f"loop {i}")
env.close()
print("QTable:")
print(agent.QTable)
print("finish training")

# for testing
env = gym.make("FrozenLake-v1", render_mode="human", is_slippery=is_slippery)
observation, info = env.reset(seed=42)
for _ in range(100):
    agent.epsilon = 0
    action = agent.choose_action(observation)
    observation, reward, terminated, truncated, info = env.step(action)
    if terminated or truncated:
        break
env.close()

maxQsa = np.max(agent.QTable, axis=1)
print(maxQsa.reshape((4, 4)))
