import gymnasium as gym
import numpy as np


class QLearning:
    def __init__(
        self, state_len: int, action_len: int, gamma: float, epsilon: float, lr: float
    ):
        self.state_len = state_len
        self.action_len = action_len
        # Q[s, a]
        self.QTable = np.zeros((state_len, action_len))
        self.gamma = gamma
        self.epsilon = epsilon
        self.lr = lr

    def sample_action(self, state: int)->int:
        if np.random.uniform() < self.epsilon:
            return np.random.randint(0, self.action_len)
        return self.best_action(state)
    
    def best_action(self, state: int) -> int:
        return int(np.argmax([self.QTable[state][a] for a in range(self.action_len)]))

    def learn(self, s: int, a: int, r: float, ns: int, finished: bool) -> None:
        if finished:
            target = r
        else:
            na = self.best_action(ns)
            target = r + self.gamma * self.QTable[ns][na]
        self.QTable[s][a] += self.lr * (target - self.QTable[s][a])


is_slippery = False
# for training
env = gym.make("FrozenLake-v1", render_mode=None, is_slippery=is_slippery)
observation, info = env.reset(seed=42)

if not isinstance(env.observation_space, gym.spaces.Discrete):
    raise TypeError("Observation space is not of type 'Discrete'.")
if not isinstance(env.action_space, gym.spaces.Discrete):
    raise TypeError("Action space is not of type 'Discrete'.")

agent = QLearning(
    state_len=int(env.observation_space.n),
    action_len=int(env.action_space.n),
    gamma=1,
    epsilon=0.5,
    lr=0.01,
)


for i in range(1000000):
    action = agent.sample_action(observation)
    next_observation, reward, terminated, truncated, info = env.step(action)
    agent.learn(
        observation, action, float(reward), next_observation, terminated
    )
    observation = next_observation

    if terminated or truncated:
        observation, info = env.reset()
        action = agent.best_action(observation)

    if i % 1000 == 0:
        print(f"loop {i}")
env.close()
print("QTable:")
print(agent.QTable)
print("finish training")

# for testing
env = gym.make("FrozenLake-v1", render_mode="human", is_slippery=is_slippery)
observation, info = env.reset(seed=42)
for _ in range(100):
    action = agent.best_action(observation)
    observation, reward, terminated, truncated, info = env.step(action)
    if terminated or truncated:
        break
env.close()

maxQsa = np.max(agent.QTable, axis=1)
print(maxQsa.reshape((4, 4)))
