#Author: Salvatore Gionfriddo
from random import randint
from Agent import Agent
from Memory import Memory
class SolusIpse(Agent):
    """
    An implementation of a reinforcement learning agent.
    """
    def __init__(self, initial_state=None, memory_length=6, change_temp=2, calc_temp_depth=5, calc_temp_weight=5):
        "Initializes the agent."
        self.current_state = initial_state
        self.memory = Memory(memory_length) # (state, aactions, action)
        self.change_temp = change_temp
        self.calc_temp_depth = calc_temp_depth
        self.calc_temp_weight = calc_temp_weight
        self.debug = False
    def add_Memory(self, memory):
        self.memory.add(memory)
    def select_HighestQAction(self, state):
        return self.memory.maxQ(state)
    def get_ProbabilityOfReward(self, state, action):
        #print 's',state
        return self.memory.r(state,action)
    def select_HighestTState(self, state, action):
        return self.T.run(state+[action])
    def calculate_Value(self, current_state, action, n=2, temperature=11, history=None):
        """
        Calculate the value of performing an action in the current_state.
        """
        if history == None: history = {}
        if False:
            pass
        #if (action, current_state, n) in history:
        #    Value = history[(action,current_state, n)]
        else:
            # Q(s,a) = R(s,a) + (1/n)Sum(T(s,a,s')Q(s',a'))
            Value = self.get_ProbabilityOfReward(current_state, action) # R(s,a)
            if n < temperature:
                n_state = self.select_HighestTState(current_state, action)
                n_action = self.select_HighestQAction(n_state)
                transitionValueSum = self.calculate_Value(n_state, n_action, n+1)
                Value += (1/self.calc_temp_weight)*transitionValueSum # + (1/n)Sum(T(s,a,s')Q(s',a'))
            #history.update([((action, current_state, n), Value)])
        return Value
    def change_ProbabilityOfActions(self, state, aa, action, reward, temperature=11):
        """
        Modifies R for all action state pairs with the given state.
        """
        # If there is no reward do nothing.
        if reward == 0:
            return
        # Delta-R = (1/n)R(s,a)
        run1 = self.R.run(state)
        run = self.R.crossproduct(run1, aa)
        n = (1.0/(temperature/2) * -reward) / 8
        nr = [n,n,n,n,n,n,n,n,n]
        nr[action] = -n * 8
        r = self.R.crossproduct(run, nr)
        for i in range(0, len(r)):
            r[i] += run[i]
        if self.debug:
            for d in r:
                print 'r:',d
        for i in range(0,len(r)):
            if r[i] >= 1.0:
                r[i] = 0.99
            elif r[i] <= 0.0:
                r[i] = 0.01
        # Apply Delta-R to the action that was taken.
        # For each action not taken apply -Delta-R / Count(actions)
        for i in range(0,10):
            self.R.train(state, r, 0.1, 0.9)
        print '---'
    def select_Action(self, state):
        """
        This function needs to be carefully tested.
        Selects an available action randomly.
        """
        aa = []
        ai = []
        i = 0
        #print self.available_actions
        for a in self.available_actions:
            if a == 1:
                v = int(self.calculate_Value(state, i, self.calc_temp_depth) * 100)
                ai.append(i)
                aa.append(v)
            i += 1
        #if len(aa) == 0: return 
        P = max(aa)
        if self.debug:
            print 'Here are the actions I thought about:'
            for i in range(0, len(aa)):
                print ai[i], str(aa[i])+'%'
        while True:
            p = randint(0, int(P))
            if p in aa:
                return ai[aa.index(p)]
    def Initialize(self, state, available_actions):
        self.current_state = state
        self.memory = []
        self.available_actions = available_actions
    def SelectNextAction(self):
        # Select an action to perform.
        action = self.select_Action(self.current_state)
        #Add the event to memory
        self.last_action = action
        self.add_Memory((self.current_state, self.available_actions, action))
        return action
    def ApplyResult(self, state, reward, available_actions):
        self.available_actions = available_actions
        if self.debug: print 'training T'
        self.T.train(self.current_state + [self.last_action], state, 10, 0.1)
        if self.debug: print 'done training T'
        #Change the current state
        self.current_state = state
        #back propogate the reward
        n=float(len(self.memory))
        for memory in self.memory:
            (state, aa, action) = memory
            self.change_ProbabilityOfActions(state, aa, action, reward, n)
            n -= 1

            
