'''Blackjack,
also called 21, is a popular card game played in casinos. The goal of the game is
to have a sum of all your cards close to 21 and not exceeding 21. The value of
cards J, K, and Q is 10. The value of ace can be 1 or 11; this depends on player
choice. The value of the rest of the cards (1 to 10) is the same as the numbers
they show.'''

'''The steps involved in the on-policy Monte Carlo method are very simple:
1. First, we initialize a random policy and a random Q function.
2. Then we initialize a list called return for storing the returns.
3. We generate an episode using the random policy π.
4. We store the return of every state action pair occurring in the episode to the
return list.
5. Then we take an average of the returns in the return list and assign that
value to the Q function.
6. Now the probability of selecting an action a in the state s will be decided by
epsilon.
7. If the probability is 1-epsilon we pick up the action which has the maximal
Q value.
8. If the probability is epsilon, we explore for different actions.'''

# on-policy

import gym
import numpy as np
import pandas as pd
from matplotlib import pyplot
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from collections import defaultdict
from functools import partial
# %matplotlib inline
plt.style.use('ggplot')

env = gym.make('Blackjack-v0')

# random initialize policy with Q value
# env.action_space.n
# env.observation_space.n
# Q_table = pd.DataFrame(columns=env.action_space.n,dtype=float)
# policy = defaultdict(int)
# epsilon = 0.1

class  MonteCarloControl:

    def __init__(self,env):
        # self.Q_table = pd.DataFrame(columns=env.action_space.n,dtype=float)
        self.epsilon = 1.0
        self.epsilon_min = 0.01
        self.env = env
        self.act_n = env.action_space.n

        nA = self.env.action_space.n
        obs_space = [dim.n for dim in self.env.observation_space]
        policy_space = [obs_space[0], obs_space[1], obs_space[2], nA]

        self.policy = np.ones(policy_space) / nA  # 50/50 policy
        self.Q_table = defaultdict(float)




    def __use_policy(self,obs):
        # 判断是否有状态在q-table中
        # if observation not in Q_table.index:
        #     self.Q_table = self.Q_table.append(
        #         pd.Series([0] * self.act_n),
        #         columns=self.Q_table.columns,
        #         name=observation
        #     )

        # if np.random.rand() < epsilon:
        #     return env.action_space.sample()
        # state_action = self.Q_table.loc[observation, :]
        # state_action = state_action.reindex(np.random.permutation(state_action.index))  # some actions have same value
        # action = state_action.idxmax()

        action = np.random.choice(2, p=self.policy[obs[0], obs[1], int(obs[2])])
        return action


    def __generate_episode(self):
        states, actions, rewards = [], [], []
        observation = env.reset()
        while True:
            states.append(observation)
            action = self.__use_policy(observation)
            actions.append(action)
            observation, reward, done, info = env.step(action)
            rewards.append(reward)
            if done:
                break
        return states,actions,rewards

    def comput_Q_value(self,n_episodes):

        # value_table = defaultdict(float)
        N = defaultdict(int)
        gamma = 0.1
        for _ in range(n_episodes):
            returns = 0
            states, actions, rewards = self.__generate_episode()
            state_actions_in_episode = [(s, actions[t]) \
                                        for (t,s) in enumerate(states)]



            for t in range(len(states)-1, -1, -1):
                R = rewards[t]
                S = states[t]
                A = actions[t]
                returns =gamma*returns +R
                if (S,A) not in state_actions_in_episode[:t]:
                    N[(S,A)] += 1
                    if (S, A) not in self.Q_table.keys():
                        self.Q_table[(S,A)] = 0

                    self.Q_table[(S, A)] += (returns - self.Q_table[(S, A)]) / N[(S,A)]
                    #     self.Q_table[(S,A)] = returns / N[(S,A)]
                best_action = np.argmax([self.Q_table[(S, 0)], self.Q_table[(S, 1)]])
                for a in range(self.env.action_space.n):
                    if a == best_action:
                        self.policy[S[0],S[1],int(S[2])][a] = 1 - self.epsilon +self.epsilon / self.act_n
                    else:
                        self.policy[S[0], S[1], int(S[2])][a] = self.epsilon / self.act_n
            self.epsilon = max(self.epsilon * 0.99995, self.epsilon_min)

        # return self.Q_table, self.policy
    def plotpolicy(self):
        p_no_ace = self.policy[:, :, 0]
        p_have_ace = self.policy[:, :, 1]

        best_policy_no_ace = np.argmax(p_no_ace, axis=2)
        best_policy_have_ace = np.argmax(p_have_ace, axis=2)

        fig, ax = plt.subplots(ncols=2, figsize=(20, 20))

        ax1, ax2 = ax

        m1 = ax1.matshow(best_policy_no_ace)
        m2 = ax2.matshow(best_policy_have_ace)

        xticks = np.arange(11, 22)
        yticks = np.arange(1, 11)
        # Show all ticks, remove what rows and columns to not to show
        ax1.set_yticks(xticks)
        ax1.set_xticks(yticks)
        ax2.set_yticks(xticks)
        ax2.set_xticks(yticks)

        ax1.set_ylabel('Player sum', fontsize=16)
        ax1.set_xlabel('Dealer showing card', fontsize=16)
        ax2.set_ylabel('Player sum', fontsize=16)
        ax2.set_xlabel('Dealer showing card', fontsize=16)

        ax1.set_title('Policy, no usable ace', fontsize=22)
        ax2.set_title('Policy, with usable ace', fontsize=22)

        fig.colorbar(m1, ax=ax1)
        fig.colorbar(m2, ax=ax2)

        #
        plt.savefig('test.png')
        plt.show()

backjack = MonteCarloControl(env)

backjack.comput_Q_value(200000)

backjack.plotpolicy()


# def update_policy():
#     pass
# import gym, collections, matplotlib
# import numpy as np
# import matplotlib.pyplot as plt
# from matplotlib import cm
# from mpl_toolkits.mplot3d import Axes3D
#
#
# def on_policy_mc_control(env, policy, gamma=0.1, epsilon=1.0, iterations=1000):
#     Q = collections.defaultdict(float)
#     state_actions_count = collections.defaultdict(int)
#     returns = collections.defaultdict(float)
#
#     for itr in range(iterations):
#
#         if itr % 1000 == 0:
#             print('Playing episode {} out of {}.'.format(itr, iterations))
#
#         episode = play_episode(env, policy)
#         G = 0
#
#         # encourage exploration at start
#         epsilon = max(epsilon * 0.99995, 0.01)
#
#         state_actions_in_episode = [(s, a) for (s, a, r) in episode]
#
#         for t, (state, action, reward) in enumerate(episode):
#             G = gamma * G + reward
#
#             if not (state, action) in state_actions_in_episode[0:t]:
#                 returns[(state, action)] += G
#                 state_actions_count[(state, action)] += 1
#                 Q[(state, action)] = returns[(state, action)] / state_actions_count[(state, action)]
#
#                 best_action = np.argmax([Q[(state, 0)], Q[(state, 1)]])
#
#                 for a in range(env.action_space.n):  # enumerate action space
#                     if a == best_action:
#                         policy[state[0], state[1], int(state[2])][a] = 1 - epsilon + epsilon / env.action_space.n
#                     else:
#                         policy[state[0], state[1], int(state[2])][a] = epsilon / env.action_space.n
#     return Q, policy
#
#
# def play_episode(env, policy):
#     episode = []
#     state = env.reset()
#     while True:
#         action = np.random.choice(2, p=policy[state[0], state[1], int(state[2])])
#         next_state, reward, done, info = env.step(action)
#         episode.append((state, action, reward))
#         state = next_state
#         if done:
#             break
#     return episode
#
#
# def plot_policy(p):
#     p_no_ace = p[:, :, 0]
#     p_have_ace = p[:, :, 1]
#
#     best_policy_no_ace = np.argmax(p_no_ace, axis=2)
#     best_policy_have_ace = np.argmax(p_have_ace, axis=2)
#
#     fig, ax = plt.subplots(ncols=2, figsize=(20, 20))
#
#     ax1, ax2 = ax
#
#     m1 = ax1.matshow(best_policy_no_ace)
#     m2 = ax2.matshow(best_policy_have_ace)
#
#     xticks = np.arange(11, 22)
#     yticks = np.arange(1, 11)
#     # Show all ticks, remove what rows and columns to not to show
#     ax1.set_yticks(xticks)
#     ax1.set_xticks(yticks)
#     ax2.set_yticks(xticks)
#     ax2.set_xticks(yticks)
#
#     ax1.set_ylabel('Player sum', fontsize=16)
#     ax1.set_xlabel('Dealer showing card', fontsize=16)
#     ax2.set_ylabel('Player sum', fontsize=16)
#     ax2.set_xlabel('Dealer showing card', fontsize=16)
#
#     ax1.set_title('Policy, no usable ace', fontsize=22)
#     ax2.set_title('Policy, with usable ace', fontsize=22)
#
#     fig.colorbar(m1, ax=ax1)
#     fig.colorbar(m2, ax=ax2)
#
#     plt.show()
#
#
# if __name__ == "__main__":
#     env = gym.make('Blackjack-v0')
#
#     nA = env.action_space.n
#     obs_space = [dim.n for dim in env.observation_space]
#     policy_space = [obs_space[0], obs_space[1], obs_space[2], nA]
#
#     policy = np.ones(policy_space) / nA  # 50/50 policy
#
#     Q, p = on_policy_mc_control(env, policy, iterations=10000000)
#
#     plot_policy(p)