import math
import random
import sys
from collections import deque

import gym
import numpy as np


class Agent:

    def __init__(self, env, alpha=0.1, gamma=1, start_epsilon=1, epsilon_decay=0.9, epsilon_cut=0.1):
        self.env = env
        self.q_table = np.zeros([env.observation_space.n, env.action_space.n])
        self.alpha = alpha
        self.gamma = gamma
        self.epsilon, self.epsilon_decay, self.epsilon_cut = start_epsilon, epsilon_decay, epsilon_cut

    def epsilon_greedy_policy(self, state):
        if random.uniform(0, 1) < self.epsilon:
            return self.env.action_space.sample()
        else:
            return np.argmax(self.q_table[state])

    def update_q_table(self, state, action, reward, next_state, done):
        old_value = self.q_table[state, action]
        next_max = np.max(self.q_table[next_state])
        new_value = (1 - self.alpha) * old_value + self.alpha * (reward + self.gamma * next_max)
        self.q_table[state, action] = new_value

        if done:
            self.epsilon = self.epsilon * self.epsilon_decay
            if self.epsilon_cut is not None:
                self.epsilon = max(self.epsilon, self.epsilon_cut)


def interact(env, agent, num_episodes=20000, window=100, print_logs=True):
    avg_rewards = deque(maxlen=num_episodes)
    best_avg_reward = -math.inf
    samp_rewards = deque(maxlen=window)

    for i_episode in range(1, num_episodes + 1):
        state = env.reset()
        samp_reward = 0

        while True:
            action = agent.epsilon_greedy_policy(state)
            next_state, reward, done, _ = env.step(action)
            agent.update_q_table(state, action, reward, next_state, done)
            state = next_state
            samp_reward += reward
            if done:
                samp_rewards.append(samp_reward)
                break

        if i_episode >= 100:
            avg_reward = np.mean(samp_rewards)
            avg_rewards.append(avg_reward)

            if avg_reward > best_avg_reward:
                best_avg_reward = avg_reward

            if print_logs:
                sys.stdout.flush()
                template = "Episode {}/{} | Average reward {} | Best average reward {}"
                print(template.format(i_episode, num_episodes, avg_reward, best_avg_reward))

    return avg_rewards, best_avg_reward


def main():
    param = {
        'alpha': 0.1,
        'gamma': 0.6,
        'start_epsilon': 0.1,
        'epsilon_decay': 1,
        'epsilon_cut': 0.1,
    }

    # param = {
    #     'alpha': 0.2512238484351891,
    #     'gamma': 0.7749915552696941,
    #     'start_epsilon': 0.9957089031634627,
    #     'epsilon_decay': 0.8888782926665223,
    #     'epsilon_cut': 0,
    # }

    # 第一次搜索的超参数
    # param = {
    #     'alpha': 0.2101117594233529,
    #     'gamma': 0.3082691421440402,
    #     'start_epsilon': 0.7001397860957914,
    #     'epsilon_decay': 0.912856902200881,
    #     'epsilon_cut': 0,
    # }

    # 第二次搜索的超参数
    param = {
        'alpha': 0.12502338306305688,
        'gamma': 0.8239571609757927,
        'start_epsilon': 0.8502004766640675,
        'epsilon_decay': 0.9367728013968069,
        'epsilon_cut': 0,
    }

    env = gym.make('Taxi-v3')
    agent = Agent(env, **param)
    avg_rewards, best_avg_reward = interact(env, agent, num_episodes=50000)
    print(best_avg_reward)


if __name__ == '__main__':
    main()
