{
  "nbformat": 4,
  "nbformat_minor": 0,
  "metadata": {
    "colab": {
      "provenance": []
    },
    "kernelspec": {
      "name": "python3",
      "display_name": "Python 3"
    },
    "language_info": {
      "name": "python"
    }
  },
  "cells": [
    {
      "cell_type": "markdown",
      "source": [
        "Copyright 2022 Google LLC\n",
        "\n",
        "Licensed under the Apache License, Version 2.0 (the \"License\");\n",
        "you may not use this file except in compliance with the License.\n",
        "You may obtain a copy of the License at\n",
        "\n",
        "    http://www.apache.org/licenses/LICENSE-2.0\n",
        "\n",
        "Unless required by applicable law or agreed to in writing, software\n",
        "distributed under the License is distributed on an \"AS IS\" BASIS,\n",
        "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n",
        "See the License for the specific language governing permissions and\n",
        "limitations under the License."
      ],
      "metadata": {
        "id": "w8LaKNHHyGIB"
      }
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "tCso6AsdwbVN"
      },
      "outputs": [],
      "source": [
        "import numpy as np\n",
        "import matplotlib.pyplot as plt\n",
        "import scipy as sp\n",
        "from scipy import special"
      ]
    },
    {
      "cell_type": "code",
      "source": [
        "def KL(mu, nu, dist='Gaussian'):\n",
        "  \"\"\"\n",
        "  Parameters\n",
        "  ----------\n",
        "  mu, nu:\n",
        "    array of size (K, n_instances), where K is the number of arms,\n",
        "    and n_instances is the number of problem instances.\n",
        "    values denote the expected reward of arms.\n",
        "  \"\"\"\n",
        "  if dist == 'Gaussian':\n",
        "    return (mu-nu)**2/2\n",
        "  elif dist == 'Bernoulli':\n",
        "    return mu*np.log(mu/nu) + (1-mu)*np.log((1-mu)/(1-nu))\n",
        "  elif dist == 'Exponential':\n",
        "    return np.log(mu/nu) + nu/mu - 1\n",
        "\n",
        "def KLRatio(mu, w, dist='Gaussian'):\n",
        "  \"\"\"\n",
        "  Parameters\n",
        "  ----------\n",
        "  mu:\n",
        "    array of size (K, n_instances), where K is the number of arms,\n",
        "    and n_instances is the number of problem instances.\n",
        "    values denote the expected reward of arms.\n",
        "  w:\n",
        "    array of size (K, n_instances).\n",
        "    values denote the weight of each arm\n",
        "\n",
        "  Output\n",
        "  -------\n",
        "  KL(mu1, (w1*mu1+wa*mua)/(w1+wa))/KL(mua, (w1*mu1+wa*mua)/(w1+wa)) for every arm a\n",
        "  \"\"\"\n",
        "  K = mu.shape[0]\n",
        "  n = mu.shape[1]\n",
        "  best_arm_idx = np.argmax(mu, axis=0)\n",
        "  w_best = w[best_arm_idx, np.arange(n)]\n",
        "  mu_best = mu[best_arm_idx, np.arange(n)]\n",
        "  mu_avg = (w*mu+(w_best*mu_best)[None,:])/(w+w_best[None,:])\n",
        "\n",
        "  return KL(mu_best[None,:], mu_avg, dist)/KL(mu, mu_avg, dist)\n",
        "\n",
        "def KLObjective(mu, w, dist='Gaussian'):\n",
        "  \"\"\"\n",
        "  Parameters\n",
        "  ----------\n",
        "  mu:\n",
        "    array of size (K, n_instances), where K is the number of arms,\n",
        "    and n_instances is the number of problem instances.\n",
        "    values denote the expected reward of arms.\n",
        "  w:\n",
        "    array of size (K, n_instances).\n",
        "    values denote the weight of each arm\n",
        "\n",
        "  Output\n",
        "  -------\n",
        "  w1*KL(mu1, (w1*mu1+wa*mua)/(w1+wa)) + wa* KL(mua, (w1*mu1+wa*mua)/(w1+wa)) for every arm a\n",
        "  \"\"\"\n",
        "  K = mu.shape[0]\n",
        "  n = mu.shape[1]\n",
        "  best_arm_idx = np.argmax(mu, axis=0)\n",
        "  w_best = w[best_arm_idx, np.arange(n)]\n",
        "  mu_best = mu[best_arm_idx, np.arange(n)]\n",
        "  mu_avg = (w*mu+(w_best*mu_best)[None,:])/(w+w_best[None,:])\n",
        "\n",
        "  return w_best[None,:]*KL(mu_best[None,:], mu_avg, dist) + w*KL(mu, mu_avg, dist)\n",
        "\n",
        "def KLObjectiveGrad(mu, w, dist='Gaussian'):\n",
        "  \"\"\"\n",
        "  Parameters\n",
        "  ----------\n",
        "  mu:\n",
        "    array of size (K, n_instances), where K is the number of arms,\n",
        "    and n_instances is the number of problem instances.\n",
        "    values denote the expected reward of arms.\n",
        "  w:\n",
        "    array of size (K, n_instances).\n",
        "    values denote the weight of each arm\n",
        "\n",
        "  Output\n",
        "  -------\n",
        "  [KL(mu1, (w1*mu1+wa*mua)/(w1+wa)), KL(mua, (w1*mu1+wa*mua)/(w1+wa))]\n",
        "  where a is the arm with the smallest index\n",
        "  \"\"\"\n",
        "  K = mu.shape[0]\n",
        "  n = mu.shape[1]\n",
        "  best_arm_idx = np.argmax(mu, axis=0)\n",
        "  w_best = w[best_arm_idx, np.arange(n)]\n",
        "  mu_best = mu[best_arm_idx, np.arange(n)]\n",
        "  mu_avg = (w*mu+(w_best*mu_best)[None,:])/(w+w_best[None,:])\n",
        "\n",
        "  t1 = KL(mu_best[None,:], mu_avg, dist)\n",
        "  t2 = KL(mu, mu_avg, dist)\n",
        "  kl_obj = w_best[None,:]*t1 + w*t2\n",
        "  kl_obj[best_arm_idx, np.arange(n)] = np.Inf\n",
        "  competitors = np.argmin(kl_obj, axis=0)\n",
        "  result = np.zeros_like(w)\n",
        "  result[best_arm_idx, np.arange(n)] = t1[competitors, np.arange(n)]\n",
        "  result[competitors, np.arange(n)] = t2[competitors, np.arange(n)]\n",
        "  return result"
      ],
      "metadata": {
        "id": "CbVuiCDuw3Bv"
      },
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "source": [
        "class OfflineOnlineMaxMinSolver(object):\n",
        "    def __init__(self, mu, delta, n_offline, dist='Gaussian'):\n",
        "        \"\"\"\n",
        "        Parameters\n",
        "        ----------\n",
        "        mu:\n",
        "            array of size (K, n_instances), where K is the number of arms,\n",
        "            and n_instances is the number of problem instances.\n",
        "            values denote the expected reward of arms.\n",
        "        delta:\n",
        "            confidence parameter (scalar)\n",
        "        n_offline:\n",
        "            array of size (K, n_instances).\n",
        "            number of offline samples available for each arm\n",
        "        dist:\n",
        "            distribution of the arms (string)\n",
        "            options: [Gaussian, Bernoulli, Exponential]\n",
        "        \"\"\"\n",
        "        self.mu = mu\n",
        "\n",
        "        self.K = mu.shape[0]\n",
        "        self.n_instances = mu.shape[1]\n",
        "        self.best_arm_idx = np.argmax(self.mu, axis=0)\n",
        "\n",
        "        self.delta = delta\n",
        "        self.beta = -np.log(delta) + np.log(-np.log(delta))\n",
        "\n",
        "        self.n_offline = n_offline\n",
        "        self.dist = dist\n",
        "\n",
        "        self.n_upper_bound = 1e10\n",
        "\n",
        "    def _na_star(self, n1_online, tol=1e-6):\n",
        "        # do bisection search to find Na*(N1)\n",
        "        n_total_lb = np.zeros_like(self.mu)\n",
        "        n_total_ub = self.n_upper_bound*np.ones_like(self.mu)\n",
        "        constraint_error = np.zeros_like(self.mu) # store the error in the constraint\n",
        "\n",
        "        n1_offline = self.n_offline[self.best_arm_idx, np.arange(self.n_instances)]\n",
        "        n1_total = n1_offline + n1_online\n",
        "        n_total_lb[self.best_arm_idx, np.arange(self.n_instances)] = n1_total\n",
        "        n_total_ub[self.best_arm_idx, np.arange(self.n_instances)] = n1_total\n",
        "\n",
        "        while np.any(n_total_ub - n_total_lb > tol):\n",
        "          # determine the next point for bisection search\n",
        "          n_total_next = 0.5*(n_total_lb+n_total_ub)\n",
        "          idx_inf = np.where(n_total_ub == np.Inf)\n",
        "          n_total_next[idx_inf] = np.maximum(2*n_total_lb[idx_inf], 1)\n",
        "          n_total_next[idx_inf] = np.minimum(n_total_next[idx_inf], self.n_upper_bound)\n",
        "\n",
        "          # do bisection search based on the constraint value\n",
        "          constraint = KLObjective(self.mu, n_total_next, self.dist)\n",
        "          constraint_error = np.abs(constraint - self.beta)\n",
        "          constraint_error[self.best_arm_idx, np.arange(self.n_instances)] = 0\n",
        "\n",
        "          idx_neg = np.where((constraint <= self.beta))\n",
        "          idx_pos = np.where((constraint > self.beta))\n",
        "          n_total_lb[idx_neg] = n_total_next[idx_neg]\n",
        "          n_total_ub[idx_pos] = n_total_next[idx_pos]\n",
        "\n",
        "\n",
        "        return 0.5*(n_total_lb+n_total_ub) - n_offline, constraint_error\n",
        "\n",
        "    def _n1_sub_grad(self, n1_online, tol=1e-6):\n",
        "        # computes the gradient of N1+sum_{a>1} Na*(N1) w.r.t N1\n",
        "        n_online, constraint_error = self._na_star(n1_online, tol)\n",
        "        kl_ratio = KLRatio(self.mu, self.n_offline + n_online, self.dist)\n",
        "\n",
        "        inactive_arms = np.where(n_online <= 0)\n",
        "        kl_ratio[inactive_arms] = 0\n",
        "        kl_ratio[self.best_arm_idx, np.arange(self.n_instances)] = -1\n",
        "\n",
        "        sub_grad = -np.sum(kl_ratio, axis=0)\n",
        "\n",
        "        # if there exists any constraint that isn't satisfied, increase n1\n",
        "        idx_bad = np.where(np.any(constraint_error > 1e-3, axis = 0))\n",
        "        sub_grad[idx_bad] = -1\n",
        "        return sub_grad\n",
        "\n",
        "    def compute_optimal_proportions(self, tol=1e-2, algo='bisection'):\n",
        "        if algo == 'bisection':\n",
        "          # lower and upper bounds for n1 (number of online samples of the best arm)\n",
        "          n1_lb = np.zeros(self.n_instances)\n",
        "          n1_ub = self.n_upper_bound*np.ones(self.n_instances)\n",
        "          while np.any(n1_ub - n1_lb > tol):\n",
        "            # determine the next point for bisection search\n",
        "            n1_next = 0.5*(n1_lb+n1_ub)\n",
        "            idx_inf = np.where(n1_ub == np.Inf)\n",
        "            n1_next[idx_inf] = np.maximum(2*n1_lb[idx_inf], 1)\n",
        "\n",
        "            # do bisection search based on the sign of the gradients\n",
        "            n1_grad = self._n1_sub_grad(n1_next)\n",
        "            idx_neg_grad = np.where((n1_grad <= 0))\n",
        "            idx_pos_grad = np.where((n1_grad > 0))\n",
        "            n1_lb[idx_neg_grad] = n1_next[idx_neg_grad]\n",
        "            n1_ub[idx_pos_grad] = n1_next[idx_pos_grad]\n",
        "\n",
        "          optimal_proportions, _ = self._na_star(0.5*(n1_lb+n1_ub))\n",
        "          return optimal_proportions"
      ],
      "metadata": {
        "id": "IPXZcgxaw7nw"
      },
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "source": [
        "class OnlineMaxMinSolver(object):\n",
        "    def __init__(self, mu, delta, dist='Gaussian'):\n",
        "        \"\"\"\n",
        "        Parameters\n",
        "        ----------\n",
        "        mu:\n",
        "            array of size (K, n_instances), where K is the number of arms,\n",
        "            and n_instances is the number of problem instances.\n",
        "            values denote the expected reward of arms.\n",
        "        delta:\n",
        "            confidence parameter (scalar)\n",
        "        n_offline:\n",
        "            array of size (K, n_instances)\n",
        "        dist:\n",
        "            distribution of the arms (string)\n",
        "            options: [Gaussian, Bernoulli, Exponential]\n",
        "        \"\"\"\n",
        "        self.mu = mu\n",
        "\n",
        "        self.K = mu.shape[0]\n",
        "        self.n_instances = mu.shape[1]\n",
        "        self.best_arm_idx = np.argmax(self.mu, axis=0)\n",
        "\n",
        "        self.delta = delta\n",
        "        self.beta = -np.log(delta) + np.log(-np.log(delta))\n",
        "\n",
        "        self.dist = dist\n",
        "\n",
        "\n",
        "    def compute_optimal_proportions(self, iters=10000, algo='Top2'):\n",
        "        if algo == 'FW': # FW doesn't work both theoretically and practically\n",
        "          w = np.ones_like(self.mu)/self.K\n",
        "          for i in range(1, iters):\n",
        "            kl_ratio = KLRatio(self.mu, w, self.dist)\n",
        "            kl_ratio[self.best_arm_idx, np.arange(self.n_instances)] = 0\n",
        "            kl_objective = KLObjective(self.mu, w, self.dist)\n",
        "            kl_objective[self.best_arm_idx, np.arange(self.n_instances)] = np.Inf\n",
        "\n",
        "            idx = np.argmin(kl_objective, axis=0)\n",
        "            idx1 = np.where(kl_ratio[idx, np.arange(self.n_instances)] >= 1)\n",
        "            w_next = i*w\n",
        "            w_next[self.best_arm_idx[idx1], idx1] += 1\n",
        "\n",
        "            idx2 = np.where(kl_ratio[idx, np.arange(self.n_instances)] < 1)\n",
        "            w_next[idx[idx2[0]], idx2] += 1\n",
        "            w = w_next/(i+1)\n",
        "\n",
        "          return w\n",
        "        elif algo == 'Top2':\n",
        "          w = np.ones_like(self.mu)/self.K\n",
        "          for i in range(1, iters):\n",
        "            kl_ratio = KLRatio(self.mu, w, self.dist)\n",
        "            kl_ratio[self.best_arm_idx, np.arange(self.n_instances)] = 0\n",
        "            sum_kl_ratio = np.sum(kl_ratio, axis=0)\n",
        "            kl_objective = KLObjective(self.mu, w, self.dist)\n",
        "            kl_objective[self.best_arm_idx, np.arange(self.n_instances)] = np.Inf\n",
        "\n",
        "            # pull arm 1 if the sum_kl_ratio is greater than 1\n",
        "            idx1 = np.where(sum_kl_ratio > 1)\n",
        "            w_next = i*w\n",
        "            w_next[self.best_arm_idx[idx1], idx1] += 1\n",
        "            w_next[:, idx1] /= (i+1)\n",
        "\n",
        "            # otherwise pull a competitor\n",
        "            idx2 = np.where(sum_kl_ratio <= 1)\n",
        "            idx2_ = idx2[0]\n",
        "            if idx2_.size == 0:\n",
        "              w = w_next\n",
        "              continue\n",
        "            for j in range(idx2_.size):\n",
        "              arm_pull = np.argmin(kl_objective[:, idx2_[j]])\n",
        "              w_next[arm_pull, idx2_[j]] += 1\n",
        "            w_next[:, idx2] /= (i+1)\n",
        "            w = w_next\n",
        "\n",
        "          return w\n",
        "        elif algo == 'EWA': # exponential weights algorithm\n",
        "          w = np.ones_like(self.mu)/self.K\n",
        "          g = np.zeros_like(self.mu)\n",
        "          for i in range(1, iters):\n",
        "            eta =\n",
        "            g = g + KLObjectiveGrad(self.mu, w, self.dist)\n",
        "            w = sp.special.softmax(eta*g, axis=0)\n",
        "          return w\n"
      ],
      "metadata": {
        "id": "1a5EgwTcw9qS"
      },
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "source": [
        "# Generate Problem Instances"
      ],
      "metadata": {
        "id": "HdIWc9dEw_xa"
      }
    },
    {
      "cell_type": "code",
      "source": [
        "# generate the problem instance\n",
        "K = 10 # number of arms\n",
        "delta = 0.02 # gap between arms\n",
        "mu = np.random.uniform(0.0+delta, 1.0 - delta, K) # expected reward of the arms\n",
        "mu = -np.sort(-mu)\n",
        "mu[0] = 1-delta/2"
      ],
      "metadata": {
        "id": "Am4mPZCcxASn"
      },
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "source": [
        "# run bisection search\n",
        "mu_rep = np.hstack((mu[:,None], mu[:, None]))\n",
        "n_offline = np.zeros_like(mu_rep)\n",
        "bisection_solver = OfflineOnlineMaxMinSolver(mu = mu_rep, delta = 1e-6, n_offline=n_offline, dist='Gaussian')\n",
        "opt_props = bisection_solver.compute_optimal_proportions()\n",
        "print(opt_props/np.sum(opt_props, axis=0)[None,:])"
      ],
      "metadata": {
        "id": "wdBRHN0jxB2y"
      },
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "source": [
        "# run top2\n",
        "Top2_solver = OnlineMaxMinSolver(mu = mu_rep, delta = 1e-6, dist='Gaussian')\n",
        "Top2_opt_props = Top2_solver.compute_optimal_proportions(algo='Top2')\n",
        "print(Top2_opt_props/np.sum(Top2_opt_props, axis=0)[None,:])"
      ],
      "metadata": {
        "id": "pARag8MCxDY8"
      },
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "source": [
        "# run EWA\n",
        "EWA_solver = OnlineMaxMinSolver(mu = mu_rep, delta = 1e-6, dist='Gaussian')\n",
        "EWA_opt_props = EWA_solver.compute_optimal_proportions(algo='EWA')\n",
        "print(EWA_opt_props/np.sum(EWA_opt_props, axis=0)[None,:])"
      ],
      "metadata": {
        "id": "Skg-as6oxFPG"
      },
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "source": [
        "# run FW\n",
        "FW_solver = OnlineMaxMinSolver(mu = mu_rep, delta = 1e-6, dist='Gaussian')\n",
        "FW_opt_props = FW_solver.compute_optimal_proportions(algo='FW')\n",
        "print(FW_opt_props/np.sum(FW_opt_props, axis=0)[None,:])"
      ],
      "metadata": {
        "id": "aHviLt71xHcE"
      },
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "source": [
        "# Bandit Algorithms"
      ],
      "metadata": {
        "id": "ItqxG6RZxLP1"
      }
    },
    {
      "cell_type": "code",
      "source": [
        "def beta(N, delta):\n",
        "  K = len(N);\n",
        "  t = sum(N);\n",
        "  # beta = np.log(K-1) -np.log(delta) + 6*np.log(1 + np.log(t/2)) + 8*np.log(1 + np.log((K-1)/delta));\n",
        "  beta = np.log(K-1) - np.log(delta) + np.log(1 + np.log(t));\n",
        "  return beta\n",
        "\n",
        "# Define a Stopping condition function.\n",
        "def Stop(mu, N, delta, dist):\n",
        "  \"\"\"\n",
        "  mu : bandit instance\n",
        "  N : number of samples to each arm\n",
        "  delta : bound on error probability\n",
        "  \"\"\"\n",
        "\n",
        "  K = len(N);\n",
        "  t = sum(N);\n",
        "  w = N/t;\n",
        "  glrt = t*KLObjective(mu, w, dist);\n",
        "  best_idx = np.argmax(mu);\n",
        "  # index for best arm will always be minimum (~ 0). Exclude that.\n",
        "  glrt[best_idx] = float('inf')\n",
        "  m = min(glrt);\n",
        "  threshold = beta(N,delta)\n",
        "\n",
        "  return m >= threshold\n",
        "\n",
        "def track(w, N):\n",
        "  \"\"\"\n",
        "  w : weight to track\n",
        "  N : number of samples to each arm\n",
        "  returns the arm to pull\n",
        "  \"\"\"\n",
        "  K = len(N);\n",
        "  t = sum(N);\n",
        "  return np.argmax(w/N);\n",
        "\n",
        "def sample(mu, dist, idx):\n",
        "  \"\"\"\n",
        "  mu : mean vector\n",
        "  dist : class of SPEF\n",
        "  idx : index to sample from\n",
        "  \"\"\"\n",
        "  # Generate a sample from distribution dist with mean mu[idx]\n",
        "  if dist == 'Gaussian':\n",
        "    return np.random.normal(mu[idx], 1);\n",
        "  elif dist == 'Bernoulli':\n",
        "    return np.random.binomial(1, mu[idx]);"
      ],
      "metadata": {
        "id": "Pe31HTzWxM39"
      },
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "source": [
        "def batched_tas(mu, K, mu_hat, n_offline, dist, delta):\n",
        "  \"\"\"\n",
        "  mu : mean vector\n",
        "  mu_hat : emp. mean from offline samples\n",
        "  K : number of arms\n",
        "  n_offline : number of offline samples from each arm\n",
        "  dist : SPEF\n",
        "  delta : error bound\n",
        "  \"\"\"\n",
        "\n",
        "  N = n_offline; # array to store number of samples for each arm\n",
        "  w = np.zeros_like(mu); # store the average w (to be tracked)\n",
        "  wt = np.zeros_like(mu); # store the current w\n",
        "\n",
        "  for at in range(0,K):\n",
        "    X = sample(mu, dist, at);\n",
        "    mu_hat[at] = (mu_hat[at] * N[at] + X)/(N[at] + 1);\n",
        "    N[at] += 1;\n",
        "\n",
        "  w = 1/K * np.ones_like(mu);\n",
        "  wt = w;\n",
        "  t = K;\n",
        "\n",
        "  while (not Stop(mu_hat, N, delta, dist)):\n",
        "    if float(np.sqrt(np.floor(t/K))).is_integer():\n",
        "      print(t, \"~~~~~~~ Entring Forced Exploration ~~~~~~~\")\n",
        "      t_ = 0;\n",
        "      while (t_ < K):\n",
        "        # Forced Exploration\n",
        "        wt = 1/K * np.ones_like(mu);\n",
        "\n",
        "        w = (1-1/(t+t_))*w + 1/(t+t_)*wt #np.array([(1-1/(t+t_))*w[x] + 1/(t+t_)*wt[x] for x in range(0,K)]);\n",
        "        at = track(w, N);\n",
        "        X = sample(mu, dist, at);\n",
        "        mu_hat[at] = (mu_hat[at] * N[at] + X)/(N[at] + 1);\n",
        "        N[at] += 1;\n",
        "        t_ += 1;\n",
        "      t += K;\n",
        "      print(t, \"~~~~~~~ Completed FE, computing oracle weights ~~~~~~~\")\n",
        "\n",
        "      bisection_solver = OfflineOnlineMaxMinSolver(mu_hat, 1e-6, n_offline, dist);\n",
        "      wt = bisection_solver.compute_optimal_proportions();\n",
        "    else:\n",
        "      w = (1-1/t)*w + (1/t)*wt # np.array([(1-1/(t))*w[x] + 1/(t)*wt[x] for x in range(0,K)]);\n",
        "      at = track(w,N);\n",
        "      X = sample(mu, dist, at);\n",
        "      mu_hat[at] = (mu_hat[at] * N[at] + X)/(N[at] + 1);\n",
        "      N[at] += 1;\n",
        "      t += 1;\n",
        "\n",
        "  return np.argmax(mu_hat, axis=0), t;\n",
        "\n",
        "# Implement top2\n",
        "def online_top2(mu, K, dist, delta):\n",
        "  \"\"\"\n",
        "  mu : mean vector\n",
        "  K : number of arms\n",
        "  dist : SPEF (Gaussian or Bernoulli)\n",
        "  delta : error bound\n",
        "  \"\"\"\n",
        "\n",
        "  N = np.zeros_like(mu); # array to store number of samples for each arm\n",
        "  mu_hat = np.zeros_like(mu); # store estimated means\n",
        "\n",
        "  # Sample each arm once\n",
        "  for at in range(0,K):\n",
        "    X = sample(mu, dist, at);\n",
        "    mu_hat[at] = (mu_hat[at] * N[at] + X)/(N[at] + 1);\n",
        "    N[at] += 1;\n",
        "\n",
        "  t = K;\n",
        "\n",
        "  while (not Stop(mu_hat, N, delta, dist)):\n",
        "    best_arm_idx = np.argmax(mu_hat);\n",
        "    # Check ratio constraint\n",
        "    kl_ratio = KLRatio(mu_hat, w, dist)\n",
        "    kl_ratio[best_arm_idx, np.arange(n_instances)] = 0\n",
        "    sum_kl_ratio = np.sum(kl_ratio, axis=0)\n",
        "    kl_objective = KLObjective(mu_hat, w, dist)\n",
        "    kl_objective[best_arm_idx, np.arange(n_instances)] = np.Inf\n",
        "\n",
        "    # can't implement this using if else?\n",
        "    # pull best arm if the sum_kl_ratio is greater than 1\n",
        "    idx1 = np.where(sum_kl_ratio > 1)\n",
        "\n",
        "    # otherwise pull a competitor\n",
        "    idx2 = np.where(sum_kl_ratio <= 1)\n",
        "\n",
        "    t += 1;\n",
        "\n",
        "  return np.argmax(mu_hat, axis=0), t;"
      ],
      "metadata": {
        "id": "7XCtUFmIxNBy"
      },
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "source": [
        "# generate the problem instance\n",
        "\n",
        "K = 3 # number of arms\n",
        "delta = 0.1\n",
        "gap = 0.1 # gap between arms\n",
        "mu = np.random.uniform(0.0+gap, 1.0 - gap, K) # expected reward of the arms\n",
        "mu = -np.sort(-mu)\n",
        "#mu[0] = 1-gap/2\n",
        "\n",
        "mu = mu[:,None];\n",
        "\n",
        "dist = 'Gaussian';\n",
        "n_offline = np.zeros_like(mu);\n",
        "s_hat = np.zeros_like(mu);\n",
        "mu_hat = np.zeros_like(mu);\n",
        "\n",
        "# generate offline data\n",
        "n_offline = np.ones(mu.shape)*1\n",
        "s_hat = mu * n_offline;\n",
        "mu_hat = s_hat / n_offline;"
      ],
      "metadata": {
        "id": "dMNrzuVpxQoN"
      },
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "source": [
        "print(mu)\n",
        "tas = batched_tas(mu, K, mu_hat, n_offline, dist, delta)\n",
        "print(tas)"
      ],
      "metadata": {
        "id": "GLpTu858xUK7"
      },
      "execution_count": null,
      "outputs": []
    }
  ]
}