{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "bd1a5ea3",
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "import matplotlib.pyplot as plt\n",
    "\n",
    "np.set_printoptions(precision=2)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "6b506e79",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "随机生成了一个10臂伯努利老虎机\n",
      "获奖概率最大的拉杆为1号,其获奖概率为0.72\n"
     ]
    }
   ],
   "source": [
    "class BernoulliBandit:\n",
    "    \"\"\" 伯努利多臂老虎机,输入K表示拉杆个数 \"\"\"\n",
    "    def __init__(self, K):\n",
    "        # 随机生成K个0～1的数,作为拉动每根拉杆的获奖概率\n",
    "        probs = np.random.uniform(size=K)\n",
    "        self.probs = np.round(probs, 2)\n",
    "        '''\n",
    "        print(self.probs)\n",
    "        [0.42 0.72 0.   0.3  0.15 0.09 0.19 0.35 0.4  0.54]\n",
    "        [4.17022005e-01 7.20324493e-01 1.14374817e-04 3.02332573e-01\n",
    "        1.46755891e-01 9.23385948e-02 1.86260211e-01 3.45560727e-01\n",
    "        3.96767474e-01 5.38816734e-01]\n",
    "        '''\n",
    "        # 获奖概率最大的拉杆\n",
    "        self.best_idx = np.argmax(self.probs)\n",
    "        # 最大的获奖概率\n",
    "        self.best_prob = self.probs[self.best_idx]\n",
    "        self.K = K\n",
    "    \n",
    "    def step(self, k):\n",
    "        # 当玩家选择了k号拉杆后,根据拉动该老虎机的k号拉杆获得奖励的概率返回1（获奖）或0（未获奖）\n",
    "        if np.random.rand() < self.probs[k]:\n",
    "            return 1\n",
    "        else:\n",
    "            return 0\n",
    "\n",
    "# 设定随机种子,使实验具有可重复性\n",
    "np.random.seed(1)\n",
    "K = 10\n",
    "bandit_10_arm = BernoulliBandit(K)\n",
    "print(\"随机生成了一个%d臂伯努利老虎机\" % K)\n",
    "print(\"获奖概率最大的拉杆为%d号,其获奖概率为%.2f\" %\n",
    "                        (bandit_10_arm.best_idx, bandit_10_arm.best_prob))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "ad3cf3d0",
   "metadata": {},
   "outputs": [],
   "source": [
    "class Solver:\n",
    "    \"\"\" 多臂老虎机算法基本框架 \"\"\"\n",
    "    def __init__(self, bandit):\n",
    "        self.bandit = bandit\n",
    "        # 每根拉杆的尝试次数\n",
    "        self.counts = np.zeros(self.bandit.K)\n",
    "        # 当前步的累积懊悔\n",
    "        self.regret = 0\n",
    "        # 维护一个列表,记录每一步的动作\n",
    "        self.actions = []\n",
    "        # 维护一个列表,记录每一步的累积懊悔\n",
    "        self.regrets = []\n",
    "    \n",
    "    def update_regret(self, k):\n",
    "        # 计算累积懊悔并保存,k为本次动作选择的拉杆的编号\n",
    "        self.regret += self.bandit.best_prob - self.bandit.probs[k]\n",
    "        self.regrets.append(self.regrets)\n",
    "    \n",
    "    def run_one_step(self):\n",
    "        # 返回当前动作选择哪一根拉杆,由每个具体的策略实现\n",
    "        raise NotImplementedError\n",
    "    \n",
    "    def run(self, num_steps):\n",
    "        # 运行一定次数,num_steps为总运行次数\n",
    "        for _ in range(num_steps):\n",
    "            k = self.run_one_step()\n",
    "            self.counts[k] += 1\n",
    "            self.actions.append(k)\n",
    "            self.update_regret(k)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "54116131",
   "metadata": {},
   "outputs": [],
   "source": [
    "class EpsilonGreedy(Solver):\n",
    "    \"\"\" epsilon贪婪算法,继承Solver类 \"\"\"\n",
    "    def __init__(self, bandit, epsilon=0.01, init_prob=1.0):\n",
    "        super().__init__(bandit)\n",
    "        self.epsilon = epsilon\n",
    "        # 初始化拉动所有拉杆的期望奖励估值\n",
    "        # estimates 数组用于存储每根拉杆的期望奖励估值，初始值都设为 init_prob。\n",
    "        self.estimates = np.array([init_prob] * self.bandit.K)\n",
    "        '''\n",
    "        print(self.estimates)\n",
    "        [1. 1. 1. 1. 1. 1. 1. 1. 1. 1.]\n",
    "        '''\n",
    "    \n",
    "    def run_one_step(self):\n",
    "        # 随机选择一根拉杆\n",
    "        # 以 epsilon 的概率随机选择一根拉杆进行探索，否则选择期望奖励估值最大的拉杆进行利用\n",
    "        if np.random.random() < self.epsilon:\n",
    "            k = np.random.randint(0, self.bandit.K)\n",
    "        else:\n",
    "            # 选择期望奖励估值最大的拉杆\n",
    "            k = np.argmax(self.estimates)\n",
    "        # 得到本次动作的奖励\n",
    "        # 调用 self.bandit.step(k) 方法来模拟拉动第 k 根拉杆并获得奖励。\n",
    "        '''        \n",
    "        print(\"更新前self.estimates\")\n",
    "        更新前self.estimates\n",
    "        [1. 1. 1. 1. 1. 1. 1. 1. 1. 1.]\n",
    "        '''\n",
    "        r = self.bandit.step(k)\n",
    "        # 使用增量更新公式 self.estimates[k] += 1 / (self.counts[k] + 1) * (r - self.estimates[k]) 来更新第 k 根拉杆的期望奖励估值。\n",
    "        self.estimates[k] += 1 / (self.counts[k] + 1) * (r - self.estimates[k])\n",
    "        # print(\"跟新后self.estimates\")\n",
    "        # print(self.estimates)\n",
    "        '''        \n",
    "        跟新后self.estimates\n",
    "        [0. 1. 1. 1. 1. 1. 1. 1. 1. 1.]\n",
    "        跟新后self.estimates\n",
    "        [0. 1. 1. 1. 1. 0. 1. 1. 1. 1.]\n",
    "        跟新后self.estimates\n",
    "        [0.    0.875 1.    1.    1.    0.    1.    1.    1.    1.   ]\n",
    "        '''\n",
    "        return k"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "8514bd12",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "epsilon-贪婪算法的累积懊悔为： 25.400000000000006\n"
     ]
    }
   ],
   "source": [
    "def plot_results(solvers, solver_names):\n",
    "    \"\"\"生成累积懊悔随时间变化的图像。输入solvers是一个列表,列表中的每个元素是一种特定的策略。\n",
    "    而solver_names也是一个列表,存储每个策略的名称\"\"\"\n",
    "    for idx, solver in enumerate(solvers):\n",
    "        time_list = range(len(solver.regrets))\n",
    "        plt.plot(time_list, solver.regrets, label=solver_names[idx])\n",
    "    plt.xlabel('Time steps')\n",
    "    plt.ylabel('Cumulative regrets')\n",
    "    plt.title('%d-armed bandit' % solvers[0].bandit.K)\n",
    "    plt.legend()\n",
    "    plt.show()\n",
    "\n",
    "\n",
    "np.random.seed(1)\n",
    "epsilon_greedy_solver = EpsilonGreedy(bandit_10_arm, epsilon=0.01)\n",
    "epsilon_greedy_solver.run(5000)\n",
    "print('epsilon-贪婪算法的累积懊悔为：', epsilon_greedy_solver.regret)\n",
    "# plot_results([epsilon_greedy_solver], [\"EpsilonGreedy\"])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "6dfb528f",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "epsilon=0.0001贪婪算法的累计懊悔为: 5.87\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "epsilon=0.01贪婪算法的累计懊悔为: 28.71000000000002\n",
      "epsilon=0.1贪婪算法的累计懊悔为: 222.0999999999997\n",
      "epsilon=0.25贪婪算法的累计懊悔为: 528.3300000000004\n",
      "epsilon=0.5贪婪算法的累计懊悔为: 982.9999999999972\n"
     ]
    }
   ],
   "source": [
    "np.random.seed(0)\n",
    "epsilons = [1e-4, 0.01, 0.1, 0.25, 0.5]\n",
    "epsilon_greedy_solver_list = [\n",
    "    EpsilonGreedy(bandit_10_arm, epsilon=e) for e in epsilons\n",
    "]\n",
    "epsilon_greedy_solver_name = [\"epsilon={}\".format(e) for e in epsilons]\n",
    "for name, solver in zip(epsilon_greedy_solver_name, epsilon_greedy_solver_list):\n",
    "    solver.run(5000)\n",
    "    print(f'{name}贪婪算法的累计懊悔为:', solver.regret)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "1bc300d2",
   "metadata": {},
   "outputs": [],
   "source": [
    "class DecayingEpsilonGreedy(Solver):\n",
    "    \"\"\" epsilon值随时间衰减的epsilon-贪婪算法,继承Solver类 \"\"\"\n",
    "    def __init__(self, bandit, init_prob=1.0):\n",
    "        super().__init__(bandit)\n",
    "        self.estimates = np.array([init_prob] * self.bandit.K)\n",
    "        self.total_count = 0\n",
    "\n",
    "    def run_one_step(self):\n",
    "        self.total_count += 1\n",
    "        # epsilon值随时间衰减\n",
    "        if np.random.random() < 1 / self.total_count:\n",
    "            k = np.random.randint(0, self.bandit.K)\n",
    "        else:\n",
    "            k = np.argmax(self.estimates)\n",
    "        r = self.bandit.step(k)\n",
    "        self.estimates[k] += 1 / (self.counts[k] + 1) * (r - self.estimates[k])\n",
    "        return k"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "5e70e160",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "epsilon值衰减的贪婪算法的累积懊悔为： 10.07\n"
     ]
    }
   ],
   "source": [
    "np.random.seed(1)\n",
    "decaying_epsilon_greedy_solver = DecayingEpsilonGreedy(bandit_10_arm)\n",
    "decaying_epsilon_greedy_solver.run(5000)\n",
    "print('epsilon值衰减的贪婪算法的累积懊悔为：', decaying_epsilon_greedy_solver.regret)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "e3c7c483",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "上置信界算法的累计懊悔为： 79.64000000000003\n"
     ]
    }
   ],
   "source": [
    "class UCB(Solver):\n",
    "    \"\"\" UCB算法,继承Solver类 \"\"\"\n",
    "    def __init__(self, bandit, coef, init_prob=1.0):\n",
    "        super().__init__(bandit)\n",
    "        self.total_count = 0\n",
    "        self.estimates = np.array([init_prob] * self.bandit.K)\n",
    "        self.coef = coef\n",
    "    \n",
    "    def run_one_step(self):\n",
    "        self.total_count += 1\n",
    "        # 计算上置信界\n",
    "        ucb = self.estimates + self.coef * np.sqrt(\n",
    "            np.log(self.total_count) / (2 * (self.counts + 1))\n",
    "        )\n",
    "        # 选出上置信界最大的拉杆\n",
    "        k = np.argmax(ucb)\n",
    "        r = self.bandit.step(k)\n",
    "        self.estimates[k] += 1 / (self.counts[k] + 1) * (r - self.estimates[k])\n",
    "        return k\n",
    "\n",
    "np.random.seed(1)\n",
    "# 控制不确定性比重的系数\n",
    "coef = 1    \n",
    "UCB_solver = UCB(bandit_10_arm, coef)\n",
    "UCB_solver.run(5000)\n",
    "print(\"上置信界算法的累计懊悔为：\", UCB_solver.regret)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "c3fd0cd3",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "汤普森采样算法的累积懊悔为： 47.86\n"
     ]
    }
   ],
   "source": [
    "class ThompsonSampline(Solver):\n",
    "    \"\"\" 汤普森采样算法,继承Solver类 \"\"\"\n",
    "    def __init__(self, bandit):\n",
    "        super().__init__(bandit)\n",
    "        # 列表,表示每根拉杆奖励为1的次数\n",
    "        self._a = np.ones(self.bandit.K)\n",
    "        '''        \n",
    "        print(self._a)\n",
    "        [1. 1. 1. 1. 1. 1. 1. 1. 1. 1.]\n",
    "        '''\n",
    "        # 列表,表示每根拉杆奖励为0的次数\n",
    "        self._b = np.ones(self.bandit.K)\n",
    "        '''        \n",
    "        print(self._b)\n",
    "        [1. 1. 1. 1. 1. 1. 1. 1. 1. 1.]\n",
    "        '''\n",
    "    \n",
    "    def run_one_step(self):\n",
    "        # 按照Beta分布采样一组奖励样本\n",
    "        # 使用 np.random.beta 函数从 Beta 分布中采样一组奖励样本。每个拉杆对应一个 Beta 分布，其参数由 self._a 和 self._b 决定。\n",
    "        samples = np.random.beta(self._a, self._b)\n",
    "        '''        \n",
    "        print(samples)\n",
    "        [0.88 0.51 0.87 0.76 0.35 0.09 0.2  0.46 0.89 0.5 ]\n",
    "        最后一轮概率\n",
    "        [0.38 0.72 0.31 0.12 0.36 0.11 0.25 0.25 0.28 0.47]\n",
    "        '''\n",
    "        k = np.argmax(samples)\n",
    "        r = self.bandit.step(k)\n",
    "\n",
    "        # 根据本次拉动的奖励 r 更新对应拉杆的 Beta 分布参数。如果奖励为 1，则 self._a[k] 加 1；如果奖励为 0，则 self._b[k] 加 1。\n",
    "        # 更新Beta分布的第一个参数\n",
    "        self._a[k] += r\n",
    "        # 更新Beta分布的第二个参数\n",
    "        self._b[k] += (1 - r)\n",
    "        '''     \n",
    "        更新_a\n",
    "        [10, 3540, 1, 1, 4, 2, 1, 5, 5, 26]\n",
    "        更新_b\n",
    "        [  15. 1326.    7.    7.   11.    8.    7.   12.   11.   23.]\n",
    "        '''\n",
    "        return k\n",
    "\n",
    "np.random.seed(1)\n",
    "thompson_sampling_solver = ThompsonSampline(bandit_10_arm)\n",
    "thompson_sampling_solver.run(5000)\n",
    "print('汤普森采样算法的累积懊悔为：', thompson_sampling_solver.regret)\n",
    "\n"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": ".venv",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.11.11"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
