'''
Reproduction of the multi-arm problem from RL Sutton book, page 29.

sample-average method:each estimate is an average of the sample of relevant rewards

Qt(a) =sum of rewards when a taken prior to t / number of times a taken prior to t

'''
import numpy as np

# This function draws action for soft max trial

def get_action(probs):
	p = np.random.rand()
	cum_prop = 0
	for i in range (len(probs)):
		cum_prop += probs[i]
		if cum_prop > p:
			return  i

class episongreddy:
	
	def __init__(self, epsilon, rewards):
		self.epsilon = epsilon
		self.counts = np.zeros(len(rewards))  # count of arm picked up during sampling
		self.q_values = np.zeros(len(rewards)) # action value assigned to each arm
		self.rewards = rewards #reward behind each door
	
	# this fucntion picks the epsion greedy action
	def pick_action(self):
		if  np.random.rand() > ( 1-self.epsilon ):
			return  np.random.randint(len(self.q_values))
		else:
			return np.argmax(self.q_values)
	# this function allows up to pick action based on soft max
	def annealing_action(self,const):
		t = sum(self.counts) + 0.00001
		z = sum([ np.exp(v*t*4/const) for v in self.q_values])
		probs = [ np.exp(v*t*4/const)/z for v in self.q_values]
		return  get_action(probs)
	def calc_rewards(self, runs, a):
		rewards = np.zeros(runs)
		for s in range(runs):
			action = self.pick_action() if a == 'greedy' else self.annealing_action(runs)
			# pick epison greedy action or soft max
			reward = np.random.normal(0,1,1) + self.rewards[action]
			# updating the action value for action chosen
			self.q_values[action] = (self.counts[action] * self.q_values[action] + reward) / (self.counts[action] + 1)
			# updating the count of action chosen
			self.counts[action] += 1
			# updating average rewards gained over time
			rewards[s] = reward
		return  rewards
	
