'''
Created on 19/11/2012

@author: greis
'''
class Iteraction:
    """
    Iteraction define the parameters need by the iteraction methods
    """
    
    gama = 0.0 #the discount rate
    alpha = 0.0 #the learning rate
    kInterations = 0 #the number of iterations to run
    
    transitions = {} #the transitions value for a node,action,nodePrime
    rewards = {} #the reward value for a node,action,nodePrime
    samples = {} #the sample value for a node,action,nodePrime
    values = {} #the value for a given node
    valuesStar = {} #the best value for a given node
    qValues = {} #the qValue for a node,action 
    qValuesStar = {} #the best qValue for a node,action
    
    actions = [] #available actions
    nodes = [] #available nodes
    policies = [] #policies that define an action for a given node 
    
    def getRewardValue(self,action,node):
        rewards = []
        for nodePrime in self.nodes:
            if (node,action,nodePrime) in self.transitions:
                transition = self.transitions[(node,action,nodePrime)]
                reward = self.rewards[(node,action,nodePrime)]
                value = self.values[nodePrime]
                rewards.append(transition * (reward + (self.gama * value)))
        return sum(rewards)

class ValueIteration(Iteraction):
    """
    ValueIteration
    """    
    def getValueIteration(self, node):
        return max(self.getActionValue(node))
    
    def getActionValue(self, node):
        actionValues=[]
        for action in self.actions:
            actionValues.append((self.getRewardValue(action,node),action))
        return actionValues
    
class PolicyIteration(Iteraction):
    """
    PolicyIteration
    """
    def getPolicyIteration(self, node):
        return self.getPolicyValue(node)
    
    def getPolicyValue(self, node):
        action=""
        actionValues=[]
        if self.policies[node] in self.actions:
            action = self.policies[node]
            actionValues.append((self.getRewardValue(action,node),action))
        return actionValues
    
class QValueItaration(Iteraction):
    """
    QValueItaration
    """
    def getSampleAverages(self):
        """
        for model-based, with some samples, infer the transferFunction values and the rewards values
        """
        for node in self.nodes:
            sampleSum = 0.0
            sampleNum = 0.0
            for action in self.actions:
                difNodePrime = 0.0
                transFunc = []
                for nodePrime in self.nodes:
                    trans = (node,action,nodePrime)
                    if trans in self.samples:
                        sample = self.samples[trans]
                        sampleSum += sum(sample)
                        sampleNum += len(sample)
                        transFunc.append((nodePrime,len(sample)))
                        difNodePrime += 1*len(sample)
                for nodePrime in self.nodes:
                    for transFuncNode in transFunc:
                        if nodePrime == transFuncNode[0]:
                            self.transitions[node,action,nodePrime] = transFuncNode[1]/float(difNodePrime)
            self.values[node] = sampleSum/sampleNum
        for node in self.nodes:
            for action in self.actions:
                for nodePrime in self.nodes:
                    trans = (node,action,nodePrime)
                    if trans in self.samples:
                        self.rewards[trans] = sum(self.samples[trans])/len(self.samples[trans])
    
    def getQValueItaration(self):
        """
        the Q-ValueIteration function
        """
        for node in self.nodes:
            for action in self.actions:
                if not (node,action) in self.qValues:
                    self.qValues[(node,action)] = 0
        while self.kInterations > 0:
            self.getQValue()
            self.kInterations -= 1
    
    def getQValue(self):
        tempQ = {}
        for node in self.nodes:
            for action in self.actions:
                if not (node,action) in self.qValues:
                        self.qValues[(node,action)] = 0.0
                tempQ[(node,action)] = self.getRewardQValue(node,action)
        self.qValues = tempQ
                
    def getRewardQValue(self,node,action):
        rewards = []
        qValue = []
        for nodePrime in self.nodes:
            if nodePrime <> node and (node,action,nodePrime) in self.transitions and (node,action,nodePrime) in self.rewards:
                transition = self.transitions[(node,action,nodePrime)]
                reward = self.rewards[(node,action,nodePrime)]
                if float(self.rewards[(node,action,nodePrime)]) <> float(self.rewards[(node,action,nodePrime)]):
                    reward = 0.0
                if float(self.transitions[(node,action,nodePrime)]) <> float(self.transitions[(node,action,nodePrime)]):
                    transition = 0.0
                qValue = []
                for actionPrime in self.actions:
                    qValue.append(self.qValues[(nodePrime,actionPrime)])
                rewards.append(transition * (reward + (self.gama * max(qValue))))
        return sum(rewards)

    def getRunningAverage(self):
        for node in self.nodes:
            for action in self.actions:
                sampleReward = []
                for nodePrime in self.nodes:
                    if node <> nodePrime and (node,action,nodePrime) in self.samples:
                        reward = self.samples[(node,action,nodePrime)]
                        qValue = []
                        for actionPrime in self.actions:
                            if (nodePrime, actionPrime) in self.qValues:
                                qValue.append(self.qValues[(nodePrime, actionPrime)])
                        sampleReward.append(reward + self.gama * max(qValue))
                        self.qValues[node, action] = (1-self.alpha)*self.qValues[node,action] + self.alpha * sum(sampleReward)
