import matplotlib
import numpy as np
from matplotlib import pyplot as plt

from RL.多臂老虎机.DecayingEpsilonGreedy import DecayingEpsilonGreedy
from RL.多臂老虎机.ThompsonSampling import ThompsonSampling
from RL.多臂老虎机.UCB import UCB

matplotlib.use("TkAgg")
from RL.多臂老虎机.BernoulliBandit import BernoulliBandit
from RL.多臂老虎机.EpsilonGreedy import EpsilonGreedy


def plot_results(solvers, solver_names):
    """生成累积懊悔随时间变化的图像。输入solvers是一个列表,列表中的每个元素是一种特定的策略。
    而solver_names也是一个列表,存储每个策略的名称"""
    for idx, solver in enumerate(solvers):
        time_list = range(len(solver.regrets))
        plt.plot(time_list, solver.regrets, label=solver_names[idx])
    plt.xlabel('Time steps')
    plt.ylabel('Cumulative regrets')
    plt.title('%d-armed bandit' % solvers[0].bandit.K)
    plt.legend()
    plt.show()

np.random.seed(1)  # 设定随机种子,使实验具有可重复性
np.set_printoptions(suppress=True, precision=4)
K = 10
bandit_10_arm = BernoulliBandit(K)
print(f"随机生成了一个{K}臂伯努利老虎机")
print(f"每个老虎机的获奖概率{np.array2string(bandit_10_arm.probs, precision=4)}")
print(f"获奖概率最大的拉杆为{bandit_10_arm.best_idx}号,其获奖概率为{bandit_10_arm.best_prob}")


"""
===============================================================================================
ϵ-贪心算法
===============================================================================================
"""

"""
ϵ-贪心算法: ϵ=0.01
"""
# np.random.seed(1)
# epsilon_greedy_solver = EpsilonGreedy(bandit_10_arm, epsilon=0.01)
# epsilon_greedy_solver.run(5000)
# print(f"每台老虎机的使用次数列表{epsilon_greedy_solver.counts.astype(int)}")
# print(f"每台老虎机的期望值{np.array2string(epsilon_greedy_solver.estimates, precision=4)}")
# print('epsilon-贪婪算法的累积懊悔为：', epsilon_greedy_solver.regret)
# plot_results([epsilon_greedy_solver], ["EpsilonGreedy"])

"""
ϵ-贪心算法: ϵ=[1e-4, 0.01, 0.1, 0.25, 0.5]
"""
# np.random.seed(0)
# epsilons = [1e-4, 0.01, 0.1, 0.25, 0.5]
# epsilon_greedy_solver_list = [
#     EpsilonGreedy(bandit_10_arm, epsilon=e) for e in epsilons
# ]
# epsilon_greedy_solver_names = ["epsilon={}".format(e) for e in epsilons]
# for i, solver in enumerate(epsilon_greedy_solver_list):
#     solver.run(5000)
#     print(f'第{i} epsilon值衰减的贪婪算法的累积懊悔为：{solver.regret}')
# plot_results(epsilon_greedy_solver_list, epsilon_greedy_solver_names)


"""
动态ϵ-贪心算法: ϵ=根据时间进行变化
优点: 前期会广泛的探索多个老虎机, 后期随着总次数增加减少探索, 明显优于固定ϵ-贪心算法
"""
# np.random.seed(1)
# decaying_epsilon_greedy_solver = DecayingEpsilonGreedy(bandit_10_arm)
# decaying_epsilon_greedy_solver.run(5000)
# print('epsilon值衰减的贪婪算法的累积懊悔为：', decaying_epsilon_greedy_solver.regret)
# plot_results([decaying_epsilon_greedy_solver], ["DecayingEpsilonGreedy"])


"""
这里我们发现一个问题
ϵ-贪心算法的ϵ=1e-4的累积懊悔为：5.886492256558774
动态ϵ-贪心算法累积懊悔为： 10.114334931260183
那么是不是我取更小的ϵ那么这个策略就是最好的呢?

答: 并不是, 固定ϵ-贪心算法的ϵ=1e-4只是在这次的随机数为np.random.seed(0)的情况为好, 但是换一个随机数就不是了, 但是动态ϵ-贪心算法是泛化更强的
"""


"""
===============================================================================================
上置信界算法
===============================================================================================
"""
# np.random.seed(1)
# coef = 1  # 控制不确定性比重的系数
# UCB_solver = UCB(bandit_10_arm, coef)
# UCB_solver.run(5000)
# print(f"每台老虎机的使用次数列表{UCB_solver.counts.astype(int)}")
# print(f"每台老虎机的期望值{np.array2string(UCB_solver.estimates, precision=4)}")
# print('上置信界算法的累积懊悔为：', UCB_solver.regret)
# plot_results([UCB_solver], ["UCB"])


"""
===============================================================================================
汤普森采样算法
===============================================================================================
"""
np.random.seed(1)
thompson_sampling_solver = ThompsonSampling(bandit_10_arm)
thompson_sampling_solver.run(5000)
print(f"每台老虎机的使用次数列表{thompson_sampling_solver.counts.astype(int)}")
print('汤普森采样算法的累积懊悔为：', thompson_sampling_solver.regret)
plot_results([thompson_sampling_solver], ["ThompsonSampling"])
