{
 "cells": [
  {
   "cell_type": "markdown",
   "id": "220141a1",
   "metadata": {},
   "source": [
    "# Play BernoulliMABEnv-v0"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "de214dc0",
   "metadata": {},
   "outputs": [],
   "source": [
    "import sys\n",
    "import logging\n",
    "\n",
    "import numpy as np\n",
    "np.random.seed(0)\n",
    "import scipy.stats as stats\n",
    "import gym\n",
    "import gym.spaces as spaces\n",
    "import gym.utils.seeding as seeding\n",
    "\n",
    "logging.basicConfig(level=logging.DEBUG,\n",
    "        format='%(asctime)s [%(levelname)s] %(message)s',\n",
    "        stream=sys.stdout, datefmt='%H:%M:%S')"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "2ec533c5",
   "metadata": {},
   "source": [
    "### Environment"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "46bf842b",
   "metadata": {},
   "outputs": [],
   "source": [
    "class BernoulliMABEnv(gym.Env):\n",
    "    \"\"\" Multi-Armed Bandit (MAB) with Bernoulli rewards \"\"\"\n",
    "\n",
    "    def __init__(self, n=10, means=None):\n",
    "        super(BernoulliMABEnv, self).__init__()\n",
    "        self.observation_space = spaces.Box(low=0, high=0, shape=(0,), dtype=np.float)\n",
    "        self.action_space = spaces.Discrete(n)\n",
    "        self.seed(0)\n",
    "        self.means = means or self.np_random.rand(n)\n",
    "\n",
    "    def seed(self, seed=None):\n",
    "        self.np_random, seed = seeding.np_random(seed)\n",
    "        return [seed,]\n",
    "\n",
    "    def reset(self):\n",
    "        return np.empty(0, dtype=np.float)\n",
    "\n",
    "    def step(self, action):\n",
    "        mean = self.means[action]\n",
    "        reward = self.np_random.binomial(1, mean)\n",
    "        observation = np.empty(0, dtype=np.float)\n",
    "        return observation, reward, True, {}\n",
    "\n",
    "\n",
    "from gym.envs.registration import register\n",
    "register(\n",
    "        id='BernoulliMABEnv-v0',\n",
    "        entry_point=BernoulliMABEnv,\n",
    "        )"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "fec9df7c",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "00:00:04 [INFO] action_space: Discrete(10)\n",
      "00:00:04 [INFO] np_random: RandomState(MT19937)\n",
      "00:00:04 [INFO] means: [0.05436006 0.96539094 0.63269095 0.29001734 0.10248426 0.67307635\n",
      " 0.39257674 0.66984607 0.05983897 0.52698724]\n",
      "00:00:04 [INFO] spec: EnvSpec(BernoulliMABEnv-v0)\n",
      "00:00:04 [INFO] id: BernoulliMABEnv-v0\n",
      "00:00:04 [INFO] entry_point: <class '__main__.BernoulliMABEnv'>\n",
      "00:00:04 [INFO] reward_threshold: None\n",
      "00:00:04 [INFO] nondeterministic: False\n",
      "00:00:04 [INFO] max_episode_steps: None\n",
      "00:00:04 [INFO] _kwargs: {}\n",
      "00:00:04 [INFO] _env_name: BernoulliMABEnv\n"
     ]
    }
   ],
   "source": [
    "env = gym.make('BernoulliMABEnv-v0')\n",
    "env.seed(0)\n",
    "for key in vars(env):\n",
    "    if key == \"observation_space\":\n",
    "        continue\n",
    "    logging.info('%s: %s', key, vars(env)[key])\n",
    "for key in vars(env.spec):\n",
    "    logging.info('%s: %s', key, vars(env.spec)[key])"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "2135c5dc",
   "metadata": {},
   "source": [
    "### Agent"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "5883b966",
   "metadata": {},
   "source": [
    "$\\epsilon$-greedy Agent"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "93bdd23c",
   "metadata": {},
   "outputs": [],
   "source": [
    "class EpsilonGreedyAgent:\n",
    "    def __init__(self, env):\n",
    "        self.epsilon = 0.1\n",
    "        self.action_n = env.action_space.n\n",
    "        self.counts = np.zeros(self.action_n, dtype=np.float)\n",
    "        self.qs = np.zeros(self.action_n, dtype=np.float)\n",
    "\n",
    "    def reset(self, mode=None):\n",
    "        self.mode = mode\n",
    "\n",
    "    def step(self, observation, reward, done):\n",
    "        if np.random.rand() < self.epsilon:\n",
    "            action = np.random.randint(self.action_n)\n",
    "        else:\n",
    "            action = self.qs.argmax()\n",
    "        if self.mode == 'train':\n",
    "            if done:\n",
    "                self.reward = reward # save reward\n",
    "            else:\n",
    "                self.action = action # save action\n",
    "        return action\n",
    "\n",
    "    def close(self):\n",
    "        if self.mode == 'train':\n",
    "            self.counts[self.action] += 1\n",
    "            self.qs[self.action] += (self.reward - self.qs[self.action]) / self.counts[self.action]\n"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "a66bd73d",
   "metadata": {},
   "source": [
    "UCB1 Agent"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "ae202f52",
   "metadata": {},
   "outputs": [],
   "source": [
    "class UCB1Agent:\n",
    "    def __init__(self, env):\n",
    "        self.action_n = env.action_space.n\n",
    "        self.counts = np.zeros(self.action_n, dtype=np.float)\n",
    "        self.qs = np.zeros(self.action_n, dtype=np.float)\n",
    "\n",
    "    def reset(self, mode=None):\n",
    "        self.mode = mode\n",
    "\n",
    "    def step(self, observation, reward, done):\n",
    "        total_count = max(self.counts.sum(), 1) # lower bounded by 1\n",
    "        sqrts = np.sqrt(2 * np.log(total_count) / self.counts.clip(min=0.01))\n",
    "        ucbs = self.qs + sqrts\n",
    "        action = ucbs.argmax()\n",
    "        if self.mode == 'train':\n",
    "            if done:\n",
    "                self.reward = reward # save reward\n",
    "            else:\n",
    "                self.action = action # save action\n",
    "        return action\n",
    "\n",
    "    def close(self):\n",
    "        if self.mode == 'train':\n",
    "            self.counts[self.action] += 1\n",
    "            self.qs[self.action] += (self.reward - self.qs[self.action]) / \\\n",
    "                    self.counts[self.action]"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "3e2a2f30",
   "metadata": {},
   "source": [
    "Bayesian UCB Agent\n",
    "\n",
    "(Use Beta distribution)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "f54b380c",
   "metadata": {},
   "outputs": [],
   "source": [
    "class BayesianUCBAgent:\n",
    "    def __init__(self, env):\n",
    "        self.action_n = env.action_space.n\n",
    "        self.alphas = np.ones(self.action_n, dtype=np.float)\n",
    "        self.betas = np.ones(self.action_n, dtype=np.float)\n",
    "\n",
    "    def reset(self, mode=None):\n",
    "        self.mode = mode\n",
    "\n",
    "    def step(self, observation, reward, done):\n",
    "        means = stats.beta.mean(self.alphas, self.betas)\n",
    "        stds = stats.beta.std(self.alphas, self.betas)\n",
    "        ucbs = means + 3 * stds\n",
    "        action = ucbs.argmax()\n",
    "        if self.mode == 'train':\n",
    "            if done:\n",
    "                self.reward = reward # save reward\n",
    "            else:\n",
    "                self.action = action # save action\n",
    "        return action\n",
    "\n",
    "    def close(self):\n",
    "        if self.mode == 'train':\n",
    "            self.alphas[self.action] += self.reward\n",
    "            self.betas[self.action] += (1. - self.reward)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "8f7a4c98",
   "metadata": {},
   "source": [
    "Thompson Sampling Agent\n",
    "\n",
    "(Use Beta distribution)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "e4acb1d2",
   "metadata": {},
   "outputs": [],
   "source": [
    "class ThompsonSamplingAgent:\n",
    "    def __init__(self, env):\n",
    "        self.action_n = env.action_space.n\n",
    "        self.alphas = np.ones(self.action_n, dtype=np.float)\n",
    "        self.betas = np.ones(self.action_n, dtype=np.float)\n",
    "\n",
    "    def reset(self, mode=None):\n",
    "        self.mode = mode\n",
    "\n",
    "    def step(self, observation, reward, done):\n",
    "        samples = [np.random.beta(max(alpha, 1e-6), max(beta, 1e-6))\n",
    "                for alpha, beta in zip(self.alphas, self.betas)]\n",
    "        action = np.argmax(samples)\n",
    "        if self.mode == 'train':\n",
    "            if done:\n",
    "                self.reward = reward # save reward\n",
    "            else:\n",
    "                self.action = action # save action\n",
    "        return action\n",
    "\n",
    "    def close(self):\n",
    "        if self.mode == 'train':\n",
    "            self.alphas[self.action] += self.reward\n",
    "            self.betas[self.action] += (1. - self.reward)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "99948a5d",
   "metadata": {},
   "source": [
    "### Online Interaction"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "82f79e37",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "00:00:04 [INFO] trial 0: average episode reward = 0.96 ± 0.20, regret = 27.39\n",
      "00:00:04 [INFO] trial 1: average episode reward = 0.97 ± 0.17, regret = 12.39\n",
      "00:00:04 [INFO] trial 2: average episode reward = 0.99 ± 0.10, regret = 17.39\n",
      "00:00:04 [INFO] trial 3: average episode reward = 0.92 ± 0.27, regret = 1.39\n",
      "00:00:04 [INFO] trial 4: average episode reward = 0.96 ± 0.20, regret = 33.39\n",
      "00:00:04 [INFO] trial 5: average episode reward = 0.96 ± 0.20, regret = 9.39\n",
      "00:00:05 [INFO] trial 6: average episode reward = 0.99 ± 0.10, regret = 21.39\n",
      "00:00:05 [INFO] trial 7: average episode reward = 0.97 ± 0.17, regret = 16.39\n",
      "00:00:05 [INFO] trial 8: average episode reward = 0.97 ± 0.17, regret = 24.39\n",
      "00:00:05 [INFO] trial 9: average episode reward = 0.98 ± 0.14, regret = 19.39\n",
      "00:00:05 [INFO] trial 10: average episode reward = 0.98 ± 0.14, regret = 9.39\n",
      "00:00:05 [INFO] trial 11: average episode reward = 0.97 ± 0.17, regret = 18.39\n",
      "00:00:05 [INFO] trial 12: average episode reward = 0.94 ± 0.24, regret = 23.39\n",
      "00:00:05 [INFO] trial 13: average episode reward = 0.94 ± 0.24, regret = 22.39\n",
      "00:00:05 [INFO] trial 14: average episode reward = 0.94 ± 0.24, regret = 16.39\n",
      "00:00:05 [INFO] trial 15: average episode reward = 0.94 ± 0.24, regret = 14.39\n",
      "00:00:05 [INFO] trial 16: average episode reward = 0.94 ± 0.24, regret = 20.39\n",
      "00:00:05 [INFO] trial 17: average episode reward = 0.97 ± 0.17, regret = 16.39\n",
      "00:00:06 [INFO] trial 18: average episode reward = 0.95 ± 0.22, regret = 5.39\n",
      "00:00:06 [INFO] trial 19: average episode reward = 0.96 ± 0.20, regret = 16.39\n",
      "00:00:06 [INFO] trial 20: average episode reward = 0.94 ± 0.24, regret = 22.39\n",
      "00:00:06 [INFO] trial 21: average episode reward = 0.99 ± 0.10, regret = 28.39\n",
      "00:00:06 [INFO] trial 22: average episode reward = 0.97 ± 0.17, regret = 20.39\n",
      "00:00:06 [INFO] trial 23: average episode reward = 0.97 ± 0.17, regret = 27.39\n",
      "00:00:06 [INFO] trial 24: average episode reward = 0.95 ± 0.22, regret = 31.39\n",
      "00:00:06 [INFO] trial 25: average episode reward = 0.92 ± 0.27, regret = 19.39\n",
      "00:00:06 [INFO] trial 26: average episode reward = 0.99 ± 0.10, regret = 17.39\n",
      "00:00:06 [INFO] trial 27: average episode reward = 0.97 ± 0.17, regret = 27.39\n",
      "00:00:06 [INFO] trial 28: average episode reward = 0.96 ± 0.20, regret = 21.39\n",
      "00:00:06 [INFO] trial 29: average episode reward = 0.97 ± 0.17, regret = 29.39\n",
      "00:00:07 [INFO] trial 30: average episode reward = 0.98 ± 0.14, regret = 11.39\n",
      "00:00:07 [INFO] trial 31: average episode reward = 0.96 ± 0.20, regret = 12.39\n",
      "00:00:07 [INFO] trial 32: average episode reward = 0.96 ± 0.20, regret = 11.39\n",
      "00:00:07 [INFO] trial 33: average episode reward = 0.96 ± 0.20, regret = 13.39\n",
      "00:00:07 [INFO] trial 34: average episode reward = 0.96 ± 0.20, regret = 23.39\n",
      "00:00:07 [INFO] trial 35: average episode reward = 0.93 ± 0.26, regret = 20.39\n",
      "00:00:07 [INFO] trial 36: average episode reward = 0.96 ± 0.20, regret = 16.39\n",
      "00:00:07 [INFO] trial 37: average episode reward = 0.98 ± 0.14, regret = 17.39\n",
      "00:00:07 [INFO] trial 38: average episode reward = 0.96 ± 0.20, regret = 20.39\n",
      "00:00:07 [INFO] trial 39: average episode reward = 0.96 ± 0.20, regret = 21.39\n",
      "00:00:07 [INFO] trial 40: average episode reward = 0.97 ± 0.17, regret = 22.39\n",
      "00:00:08 [INFO] trial 41: average episode reward = 0.99 ± 0.10, regret = 9.39\n",
      "00:00:08 [INFO] trial 42: average episode reward = 0.91 ± 0.29, regret = 7.39\n",
      "00:00:08 [INFO] trial 43: average episode reward = 0.93 ± 0.26, regret = 3.39\n",
      "00:00:08 [INFO] trial 44: average episode reward = 0.96 ± 0.20, regret = 35.39\n",
      "00:00:08 [INFO] trial 45: average episode reward = 0.97 ± 0.17, regret = 23.39\n",
      "00:00:08 [INFO] trial 46: average episode reward = 0.93 ± 0.26, regret = 24.39\n",
      "00:00:08 [INFO] trial 47: average episode reward = 0.98 ± 0.14, regret = 29.39\n",
      "00:00:08 [INFO] trial 48: average episode reward = 0.95 ± 0.22, regret = 14.39\n",
      "00:00:08 [INFO] trial 49: average episode reward = 0.98 ± 0.14, regret = 21.39\n",
      "00:00:08 [INFO] trial 50: average episode reward = 0.96 ± 0.20, regret = 13.39\n",
      "00:00:08 [INFO] trial 51: average episode reward = 0.96 ± 0.20, regret = 20.39\n",
      "00:00:09 [INFO] trial 52: average episode reward = 0.97 ± 0.17, regret = 1.39\n",
      "00:00:09 [INFO] trial 53: average episode reward = 0.95 ± 0.22, regret = 13.39\n",
      "00:00:09 [INFO] trial 54: average episode reward = 0.99 ± 0.10, regret = 12.39\n",
      "00:00:09 [INFO] trial 55: average episode reward = 0.94 ± 0.24, regret = 9.39\n",
      "00:00:09 [INFO] trial 56: average episode reward = 0.94 ± 0.24, regret = 19.39\n",
      "00:00:09 [INFO] trial 57: average episode reward = 0.93 ± 0.26, regret = 18.39\n",
      "00:00:09 [INFO] trial 58: average episode reward = 0.95 ± 0.22, regret = 15.39\n",
      "00:00:09 [INFO] trial 59: average episode reward = 0.97 ± 0.17, regret = 21.39\n",
      "00:00:09 [INFO] trial 60: average episode reward = 0.95 ± 0.22, regret = 7.39\n",
      "00:00:09 [INFO] trial 61: average episode reward = 0.97 ± 0.17, regret = 21.39\n",
      "00:00:09 [INFO] trial 62: average episode reward = 0.97 ± 0.17, regret = 21.39\n",
      "00:00:10 [INFO] trial 63: average episode reward = 0.97 ± 0.17, regret = 11.39\n",
      "00:00:10 [INFO] trial 64: average episode reward = 0.93 ± 0.26, regret = 19.39\n",
      "00:00:10 [INFO] trial 65: average episode reward = 0.97 ± 0.17, regret = 32.39\n",
      "00:00:10 [INFO] trial 66: average episode reward = 0.97 ± 0.17, regret = 29.39\n",
      "00:00:10 [INFO] trial 67: average episode reward = 0.96 ± 0.20, regret = 15.39\n",
      "00:00:10 [INFO] trial 68: average episode reward = 0.93 ± 0.26, regret = 15.39\n",
      "00:00:10 [INFO] trial 69: average episode reward = 0.96 ± 0.20, regret = 12.39\n",
      "00:00:10 [INFO] trial 70: average episode reward = 0.93 ± 0.26, regret = 12.39\n",
      "00:00:10 [INFO] trial 71: average episode reward = 0.98 ± 0.14, regret = 2.39\n",
      "00:00:10 [INFO] trial 72: average episode reward = 0.98 ± 0.14, regret = 20.39\n",
      "00:00:10 [INFO] trial 73: average episode reward = 0.95 ± 0.22, regret = 15.39\n",
      "00:00:11 [INFO] trial 74: average episode reward = 0.97 ± 0.17, regret = 17.39\n",
      "00:00:11 [INFO] trial 75: average episode reward = 0.93 ± 0.26, regret = 16.39\n",
      "00:00:11 [INFO] trial 76: average episode reward = 0.94 ± 0.24, regret = 25.39\n",
      "00:00:11 [INFO] trial 77: average episode reward = 0.96 ± 0.20, regret = 4.39\n",
      "00:00:11 [INFO] trial 78: average episode reward = 0.98 ± 0.14, regret = 9.39\n",
      "00:00:11 [INFO] trial 79: average episode reward = 0.95 ± 0.22, regret = 28.39\n",
      "00:00:11 [INFO] trial 80: average episode reward = 0.95 ± 0.22, regret = 26.39\n",
      "00:00:11 [INFO] trial 81: average episode reward = 0.98 ± 0.14, regret = 22.39\n",
      "00:00:11 [INFO] trial 82: average episode reward = 0.98 ± 0.14, regret = 9.39\n",
      "00:00:11 [INFO] trial 83: average episode reward = 0.97 ± 0.17, regret = 20.39\n",
      "00:00:11 [INFO] trial 84: average episode reward = 0.98 ± 0.14, regret = 12.39\n",
      "00:00:11 [INFO] trial 85: average episode reward = 0.97 ± 0.17, regret = 21.39\n",
      "00:00:12 [INFO] trial 86: average episode reward = 0.99 ± 0.10, regret = 23.39\n",
      "00:00:12 [INFO] trial 87: average episode reward = 0.98 ± 0.14, regret = 26.39\n",
      "00:00:12 [INFO] trial 88: average episode reward = 0.95 ± 0.22, regret = 17.39\n",
      "00:00:12 [INFO] trial 89: average episode reward = 0.99 ± 0.10, regret = 8.39\n",
      "00:00:12 [INFO] trial 90: average episode reward = 0.95 ± 0.22, regret = 21.39\n",
      "00:00:12 [INFO] trial 91: average episode reward = 0.94 ± 0.24, regret = 14.39\n",
      "00:00:12 [INFO] trial 92: average episode reward = 1.00 ± 0.00, regret = 8.39\n",
      "00:00:12 [INFO] trial 93: average episode reward = 0.94 ± 0.24, regret = 15.39\n",
      "00:00:12 [INFO] trial 94: average episode reward = 0.96 ± 0.20, regret = 16.39\n",
      "00:00:12 [INFO] trial 95: average episode reward = 0.90 ± 0.30, regret = 17.39\n",
      "00:00:12 [INFO] trial 96: average episode reward = 0.96 ± 0.20, regret = 11.39\n",
      "00:00:12 [INFO] trial 97: average episode reward = 0.97 ± 0.17, regret = 32.39\n",
      "00:00:13 [INFO] trial 98: average episode reward = 0.97 ± 0.17, regret = 24.39\n",
      "00:00:13 [INFO] trial 99: average episode reward = 0.97 ± 0.17, regret = 17.39\n",
      "00:00:13 [INFO] average regret = 17.99 ± 7.34\n"
     ]
    }
   ],
   "source": [
    "def play_episode(env, agent, max_episode_steps=None, mode=None, render=False):\n",
    "    observation, reward, done = env.reset(), 0., False\n",
    "    agent.reset(mode=mode)\n",
    "    episode_reward, elapsed_steps = 0., 0\n",
    "    while True:\n",
    "        action = agent.step(observation, reward, done)\n",
    "        if render:\n",
    "            env.render()\n",
    "        if done:\n",
    "            break\n",
    "        observation, reward, done, _ = env.step(action)\n",
    "        episode_reward += reward\n",
    "        elapsed_steps += 1\n",
    "        if max_episode_steps and elapsed_steps >= max_episode_steps:\n",
    "            break\n",
    "    agent.close()\n",
    "    return episode_reward, elapsed_steps\n",
    "\n",
    "\n",
    "trial_regrets = []\n",
    "for trial in range(100):\n",
    "    # create a new agent for each trial - change agent here\n",
    "    agent = EpsilonGreedyAgent(env)\n",
    "    agent = UCB1Agent(env)\n",
    "    agent = BayesianUCBAgent(env)\n",
    "    agent = ThompsonSamplingAgent(env)\n",
    "\n",
    "    # train\n",
    "    episode_rewards = []\n",
    "    for episode in range(1000):\n",
    "        episode_reward, elapsed_steps = play_episode(env.unwrapped, agent,\n",
    "                max_episode_steps=env.spec.max_episode_steps, mode='train')\n",
    "        episode_rewards.append(episode_reward)\n",
    "    regrets = env.means.max() - np.array(episode_rewards)\n",
    "    trial_regret = regrets.sum()\n",
    "    trial_regrets.append(trial_regret)\n",
    "\n",
    "    # test\n",
    "    episode_rewards = []\n",
    "    for episode in range(100):\n",
    "        episode_reward, elapsed_steps = play_episode(env, agent)\n",
    "        episode_rewards.append(episode_reward)\n",
    "    logging.info('trial %d: average episode reward = %.2f ± %.2f, regret = %.2f',\n",
    "            trial, np.mean(episode_rewards), np.std(episode_rewards),\n",
    "            trial_regret)\n",
    "\n",
    "logging.info('average regret = %.2f ± %.2f',\n",
    "        np.mean(trial_regrets), np.std(trial_regrets))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "id": "a0bff45b",
   "metadata": {},
   "outputs": [],
   "source": [
    "env.close()"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.9.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
