import matplotlib
import numpy as np
import matplotlib.pyplot as plt
from RL.多臂老虎机.BernoulliBandit import BernoulliBandit
from RL.多臂老虎机.EpsilonGreedy import EpsilonGreedy
from RL.多臂老虎机.DecayingEpsilonGreedy import DecayingEpsilonGreedy
matplotlib.use("TkAgg")
# 单次实验步数
T = 10000
# 重复实验次数
runs = 10000
# 老虎机臂数
K = 10

# 记录结果
regrets_eps_fixed = []
regrets_eps_decay = []

for run in range(runs):
    np.random.seed(run)  # 为了公平比较，每次 run 的随机环境相同

    # 随机生成一个K臂老虎机环境
    bandit = BernoulliBandit(K)

    # 固定 ε=1e-4
    solver_fixed = EpsilonGreedy(bandit, epsilon=1e-4)
    solver_fixed.run(T)
    regrets_eps_fixed.append(solver_fixed.regret)

    # 衰减 ε=1/t
    solver_decay = DecayingEpsilonGreedy(bandit)
    solver_decay.run(T)
    regrets_eps_decay.append(solver_decay.regret)

# 计算平均懊悔
avg_regret_fixed = np.mean(regrets_eps_fixed)
avg_regret_decay = np.mean(regrets_eps_decay)

print("固定 ε=1e-4 平均懊悔:", np.mean(regrets_eps_fixed),
      "方差:", np.var(regrets_eps_fixed))

print("动态 ε=1/t 平均懊悔:", np.mean(regrets_eps_decay),
      "方差:", np.var(regrets_eps_decay))

# 可选：画出每次实验的懊悔对比
plt.figure(figsize=(10, 5))
plt.plot(regrets_eps_fixed, label="Fixed ε=1e-4")
plt.plot(regrets_eps_decay, label="Decaying ε=1/t")
plt.xlabel("Experiment runs")
plt.ylabel("Cumulative Regret")
plt.title("Comparison of Fixed vs Decaying ε-greedy")
plt.legend()
plt.show()
