# TD(1) forward off-policy
import gym
import numpy as np
import matplotlib.pyplot as plt
import random
from collections import defaultdict


def epsilon_greedy_policy(Action, epsilon, n):
    # 返回一个函数，这个函数的输入是一个状态/观察(observation)
    # 输出是一个长度为nA的numpy数组，表示采取不同Action的概率
    # 实现epsilon-greedy功能，最终返回目标策略
    epsilon -= 0.0001  # epsilon线性衰减

    def policy(observation):
        A = np.ones(n, dtype=float) * epsilon / n  # 初始化随机行为概率
        if random.random() > epsilon:  # 根据规则，只有随机数>epsilon时才根据policy行动，否则随机
            best_action = np.argmax(Action[observation])  # 得到TD最优动作
            A[best_action] += 1.0  # TD来对该动作策略采用bellman方程
        return A

    return policy


def TD_forward(env, num_episodes, learning_rate, lambd, epsilon):
    # 使用Epsilon-greedy搜索的TD(1)方式
    V = defaultdict(float)
    Action = defaultdict(lambda: np.zeros(env.action_space.n))
    # epsilon-greedy的策略
    policy = epsilon_greedy_policy(Action, epsilon, env.action_space.n)
    episode_all = []  # 所有结束游戏后的情况

    for i_episode in range(1, num_episodes + 1):
        # 生成一个episode
        # 一个episode是一个数组，每个元素是一个三元组(state, action, reward)
        episode = []
        state = env.reset()
        for t in range(200):
            probs = policy(state)  # 获取对应状态下的策略对应动作概率
            probs /= probs.sum()  # 归一化
            action = np.random.choice(np.arange(len(probs)), p=probs)  # greedy的选择策略
            next_state, reward, done, _ = env.step(action)
            episode.append((state, action, reward))
            if done:
                episode_all.append((state, action, reward))
                break
            state = next_state  # 替换新的状态

        # 还原在采用epsilon-greedy中选择对应策略后对应的state与reward
        states, _, rewards = zip(*episode)

        # TD(lambda=1)的迭代过程
        lambda_returns_factor = np.array([lambd ** i for i in range(len(rewards) + 1)])
        # 更新V函数
        for i, state in enumerate(state):
            returns = [sum(rewards[i:i + k + 1]) for k in range(len(states) - i)]
            V[state] += learning_rate * (sum(returns * lambda_returns_factor[:len(returns)]) - V[state])
    return V, episode_all


if __name__ == "__main__":
    env = gym.make('Blackjack-v0')
    env.reset()  # 重置环境

    # 前向TD(lambda=1)的过程
    V, episode_all = TD_forward(env, num_episodes=5000, learning_rate=0.1, lambd=1, epsilon=0.1)

    # 计算获胜率
    win_num = 0
    all_num = 0
    for state_temp, action_temp, reward_temp in episode_all:
        if reward_temp == 1:
            win_num += 1
        all_num += 1
    print("win present=", win_num / all_num)

    # 画一个loss的图
    x2_axis = []
    y2_axis = []
    V = sorted(V.items(), key=lambda x: x[0], reverse=True)
    for _, value in V:
        y2_axis.append(value ** 2)

    x2_axis = range(len(y2_axis))
    plt.plot(x2_axis, y2_axis, 'c--', alpha=0.7, label='loss')
    plt.legend()
    plt.xlabel('Episodes')
    plt.ylabel('loss')
    plt.xticks([])
    plt.yticks([])
    plt.show()