from __future__ import print_function
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns

# @kArm: number of arms
# @epsilon: probability for exploration in epsilon in spsilon-greedy algorithm
# @initial: initial estimation for each action
# @stepSize: constant setp size for updating estimation
# @sampleAverage: if true, use sample averages to update estimations instead of constant step size
# @UCB: if not None, use UCB algorithm to select action
# @gradient: if True, use gradient based bandit algorithm
# @gradientBaseline: if Ture, use average reward as baseline for gradient based bandit algorithm

class Bandit:
	def __init__(self, kArm=10, epsilon=0, initial=0, stepSize=0.1, sampleAverages=False, UCBParam=None, gradient=False,
	             gradientBaseline=False, trueReward=0):
		self.k = kArm
		self.stepSize = stepSize
		self.sampleAverages = sampleAverages
		self.indices = np.arange(self.k)
		self.time = 0
		self.gradient = gradient
		self.UCBParam = UCBParam
		self.gradientBaseline = gradientBaselie
		self.averageReward = 0
		self.trueReward = trueReward
		
		self.qTrue = []
		self.qEst = np.zeros(self.k)
		self.actionCount = []
		self.epsilon = epsilon
		
		# initialize real rewards with N(0,1) distribution and estimations with desired initial value
		for i in range(0, self.k):
			self.qTrue.append(np.random.randn() + trueReward )
			self.qEst[i] = initial
			self.actionCount.append(0)
		
		self.bestAction = np.argmax(self.qTrue)
		
	def getAction(self):
		# explore
		if self.epsilon > 0:
			if np.random.binomial(1, self.epsilon) == 1:
				np.random.shuffle(self.indices)
				return self.indices[0]
		
		# exploit
		if self.UCBParam is not None:
			UCBEst = self.qEst + self.UCBParam * np.sqrt(np.log(self.time + 1 ) / (np.asarray(self.actionCount)+1))
			return np.argmax(UCBEst)
		
		if self.gradient:
			expEst = np.exp( self.qEst )
			self.actionProb=expEst/np.sum(expEst)
			