# MC Q-table存储 on-policy
import gym
import matplotlib.pyplot as plt
import numpy as np
import random
from collections import defaultdict


def epsilon_greedy_policy(Q, epsilon, n):
    # 返回一个函数，这个函数的输入是一个状态/观察(observation)
    # 输出是一个长度为nA的numpy数组，表示采取不同Action的概率
    # 实现epsilon-greedy功能，最终返回目标策略
    epsilon -= 0.0001  # epsilon线性衰减

    def policy(observation):
        A = np.ones(n, dtype=float) * epsilon / n  # 初始化随机行为概率
        if random.random() > epsilon:  # 根据规则，只有随机数>epsilon时才根据policy行动，否则随机
            best_action = np.argmax(Q[observation])  # 得到MC最优动作
            A[best_action] += 1.0  # MC来对该动作策略采用bellman最优方程
        return A

    return policy


def MC(env, num_episodes, discount_factor=0.99, epsilon=0.1):
    # 使用Epsilon-greedy搜索的mc方式
    # Q函数 state -> action values。key是状态，value是长为n的numpy数组，表示Q(s,a)

    # 记录每个状态的回报累加值和次数
    returns_sum = defaultdict(float)
    returns_count = defaultdict(float)

    # Q函数state -> (action -> action-value)。
    # key是状态s，value又是一个dict，其key是a，value是Q(s,a)
    Q = defaultdict(lambda: np.zeros(env.action_space.n))

    # epsilon-greedy的策略
    policy = epsilon_greedy_policy(Q, epsilon, env.action_space.n)
    episode_all = []  # 结束游戏后的情况

    for i_episode in range(1, num_episodes + 1):
        # 生成一个episode
        # 一个episode是一个数组，每个元素是一个三元组(state, action, reward)
        episode = []
        state = env.reset()
        for t in range(200):
            probs = policy(state)  # 获取对应状态下的策略对应动作概率
            probs /= probs.sum()  # 归一化
            action = np.random.choice(np.arange(len(probs)), p=probs)  # greedy的选择策略
            next_state, reward, done, _ = env.step(action)
            episode.append((state, action, reward))

            if done:
                episode_all.append((state, action, reward))
                break
            state = next_state  # 替换新的状态

        # 找到episode里出现的所有(s,a)对epsilon。把它变成tuple以便作为dict的key
        # F-V MC过程
        sa_q_temp = set([(tuple(x[0]), x[1]) for x in episode])
        for state, action in sa_q_temp:
            sa_pair = (state, action)
            # 找到(s,a)第一次出现的下标
            first_visit_idx = next(i for i, x in enumerate(episode)
                                       if x[0] == state and x[1] == action)
            # 计算(s,a)的回报
            G = sum([x[2] * (discount_factor ** i) for i, x in enumerate(episode[first_visit_idx:])])
            # 累计计数
            returns_sum[sa_pair] += G
            returns_count[sa_pair] += 1.0
            Q[state][action] = returns_sum[sa_pair] / returns_count[sa_pair]

    return Q, policy, episode_all


if __name__ == '__main__':
    env = gym.make('Blackjack-v0')
    env.reset()  # 重置环境

    Q, policy, episode_all = MC(env, num_episodes=5000, epsilon=0.1)
    V = defaultdict(float)

    # 计算获胜率
    win_num = 0
    all_num = 0
    for state_temp, action_temp, reward_temp in episode_all:
        if reward_temp == 1:
            win_num += 1
        all_num += 1
    print("win present=", win_num/all_num)

    # 绘图1：
    x1_axis = []
    y1_axis = []
    # 绘图2：
    x2_axis = []
    y2_axis = []
    for state, actions in Q.items():
        # 为了画图1:action_value 与 是否有ACE的图像
        action_value = np.max(actions)
        V[state] = action_value  # 所有episode的动作价值集
        # 绘图1：
        x1_axis.append(action_value)
        if state[2] == True:
            y1_axis.append(1)
        else:
            y1_axis.append(-1)

        # 绘图2：
        Q_loss = abs(np.max(actions) - np.min(actions)) ** 2
        y2_axis.append(Q_loss)

    y2_axis.sort(reverse=True)
    # 画一个是否有ace与V的图
    plt.plot(x1_axis, y1_axis, 'c.', alpha=0.7, label='ACE')
    plt.legend()
    plt.xlabel('Action_Value')
    plt.ylabel('Ace')  # accuracy
    plt.show()

    # 画一个Q_loss的图
    x2_axis = range(len(y2_axis))
    plt.plot(x2_axis, y2_axis, 'c.--', alpha=0.7, label='Q_loss')
    plt.legend()
    plt.xlabel('Episodes')
    plt.ylabel('Q_loss')
    plt.xticks([])
    plt.yticks([])
    plt.show()