import gym
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as colors


class Agent(object):
    def __init__(self):
        self.env = gym.make('FrozenLake-v1')
        self.Q = np.zeros((self.env.observation_space.n, self.env.action_space.n))
        self.alpha, self.gamma, self.epsilon, self.learning_method = 0, 0, 0, 'sarsa'

    def greedy_predict(self, state):
        return np.random.choice(np.where(self.Q[state, :] == self.Q[state, :].max())[0])

    def epsilon_greedy_predict(self, state):
        return self.greedy_predict(state) if np.random.uniform(0, 1) > self.epsilon else self.env.action_space.sample()

    # learn for ONE episode
    def learn_single_episode(self, max_steps):
        state = self.env.reset()
        if self.learning_method == 'sarsa':
            action = self.epsilon_greedy_predict(state)
            for _ in range(max_steps):
                next_state, reward, done, _ = self.env.step(action)
                next_action = self.epsilon_greedy_predict(next_state)
                self.Q[state, action] += self.alpha * (
                        reward + self.gamma * self.Q[next_state, next_action] - self.Q[state, action])
                state, action = next_state, next_action
                if done:
                    break
        elif self.learning_method == 'q-learning':
            for _ in range(max_steps):
                action = self.epsilon_greedy_predict(state)
                next_state, reward, done, _ = self.env.step(action)
                self.Q[state, action] += self.alpha * (
                        reward + self.gamma * np.max(self.Q[next_state, :]) - self.Q[state, action])
                state = next_state
                if done:
                    break

    def learn(self, episodes=100, max_steps=1000, alpha=0.01, gamma=0.9, epsilon=0.3, learning_method='sarsa'):
        self.Q = np.zeros((self.env.observation_space.n, self.env.action_space.n))
        self.alpha, self.gamma, self.epsilon, self.learning_method = alpha, gamma, epsilon, learning_method
        for _ in range(episodes):
            self.learn_single_episode(max_steps)
        # OUTPUT: finely trained self.Q

    def test(self, episodes=500, max_steps=1000):
        total_reward = 0.0
        for _ in range(episodes):
            state = self.env.reset()
            episode_reward = 0
            for _ in range(max_steps):
                action = self.greedy_predict(state)
                state, reward, done, _ = self.env.step(action)
                episode_reward += reward
                if done:
                    break
            total_reward += episode_reward
        return total_reward / episodes


def evaluation():
    agent = Agent()
    alpha_list, gamma_list = np.arange(0.01, 1, 0.01), np.arange(0.01, 1, 0.01)
    result_sarsa = np.zeros((alpha_list.size, gamma_list.size))
    result_q_learning = np.zeros((alpha_list.size, gamma_list.size))
    for i, alp in enumerate(alpha_list):
        for j, gam in enumerate(gamma_list):
            agent.learn(episodes=2000, alpha=alp, gamma=gam, epsilon=0.3, learning_method='sarsa')
            result_sarsa[i, j] = agent.test()
            agent.learn(episodes=2000, alpha=alp, gamma=gam, epsilon=0.3, learning_method='q-learning')
            result_q_learning[i, j] = agent.test()
            print('Evaluated on alpha=%.2f, gamma=%.2f, current progress %.4f, sarsa acc=%.2f, q-learning acc=%.2f'
                  % (alp, gam, (i * 99 + j) / (99 * 99), result_sarsa[i, j], result_q_learning[i, j]))
    np.save('result_sarsa', result_sarsa)
    np.save('result_q_learning', result_q_learning)


def draw_image():
    result_sarsa = np.load('result_sarsa.npy')
    result_q_learning = np.load('result_q_learning.npy')
    fig, (ax1, ax2) = plt.subplots(nrows=1, ncols=2, figsize=(8.5, 4), dpi=250)
    norm = colors.Normalize(vmin=0, vmax=0.8)
    im1 = ax1.imshow(result_sarsa, norm=norm, cmap='rainbow', extent=[0, 1, 0, 1])
    im2 = ax2.imshow(result_q_learning, norm=norm, cmap='rainbow', extent=[0, 1, 0, 1])
    ax1.set_title('Sarsa')
    ax1.set_xlabel(r'$\alpha$')
    ax1.set_ylabel(r'$\gamma$')
    ax2.set_title('Q-learning')
    ax2.set_xlabel(r'$\alpha$')
    ax2.set_ylabel(r'$\gamma$')
    fig.subplots_adjust(right=0.85)
    cb = fig.colorbar(im2, cax=fig.add_axes([0.9, 0.1, 0.02, 0.78]))
    cb.ax.set_title('Win Rate')
    plt.savefig('res.pdf')


def draw_diff():
    result_sarsa = np.load('result_sarsa.npy')
    result_q_learning = np.load('result_q_learning.npy')
    plt.imshow(result_q_learning - result_sarsa, cmap='rainbow', extent=[0, 1, 0, 1])
    cb = plt.colorbar()
    cb.ax.set_title('Difference')
    plt.xlabel(r'$\alpha$')
    plt.ylabel(r'$\gamma$')
    plt.title('Q-learning minus Sarsa')
    plt.savefig('diff.pdf')


if __name__ == '__main__':
    # evaluation()
    # draw_image()
    draw_diff()
