﻿# 老虎机 游戏
import numpy as np

from Agent.EpsilonGreedy.ucb import UCBAgent
from Game.ArmedBandit.env import MultiArmedBanditEnv
from Game.ArmedBandit.agent import TdAgent
from Agent.EpsilonGreedy.epsion_greedy import EpsilonGreedyAgent
import matplotlib.pyplot as plt


def train_agent(env, agent, steps=100):
    rewards = []
    best_arm_counts = []

    for _ in range(steps):
        arm = agent.choose_action()
        reward = env.step(arm)
        agent.update(arm, reward)

        rewards.append(reward)
        best_arm_counts.append(1 if arm == env.best_arm else 0)

    return np.array(rewards), np.array(best_arm_counts)


def plot_results(algorithm_name, rewards, best_arm_rates):
    plt.figure(figsize=(12, 5))

    # 平均奖励曲线
    plt.subplot(1, 2, 1)
    plt.plot(np.cumsum(rewards) / np.arange(1, len(rewards) + 1), label=algorithm_name)
    plt.xlabel("Steps")
    plt.ylabel("Average Reward")
    plt.legend()

    # 最佳臂选择比例
    plt.subplot(1, 2, 2)
    plt.plot(
        np.cumsum(best_arm_rates) / np.arange(1, len(best_arm_rates) + 1),
        label=algorithm_name,
    )
    plt.xlabel("Steps")
    plt.ylabel("Best Arm Rate")
    plt.legend()

    plt.show()


# 创建环境和智能体
env = MultiArmedBanditEnv(k_arms=2)

# # ε-Greedy 测试
# epsilon_agent = EpsilonGreedyAgent(k_arms=env.k, epsilon=0.1)
# epsilon_rewards, epsilon_best = train_agent(env, epsilon_agent, steps=200)
# plot_results("ε-Greedy (ε=0.1)", epsilon_rewards, epsilon_best)

# UCB 测试
# ucb_agent = epsilon_agent = UCBAgent(k_arms=env.k, c=2)
# ucb_rewards, ucb_best = train_agent(env, ucb_agent, steps=200)
# plot_results("UCB (c=2)", ucb_rewards, ucb_best)

# td 算法
td_agent = TdAgent(k_arms=env.k)
td_rewards, td_best = train_agent(env, td_agent, steps=200)
plot_results("td", td_rewards, td_best)