'''
Created on 02/11/2012

@author: GabrielBacelar
'''

class QValueIteration(object):
    actions = []
    nodes = []
    gama = 0.0
    alpha = 0.0
    T = {}
    R = {}
    S = {}
    V = {}
    Q = {}
    Qstar = {}
    k = 0
    
    def __init__(self):
        '''
        Constructor
        '''
        self.rewardruns = 0
        self.policyruns = 0
        self.actionruns = 0
        
        self.actions = []
        self.nodes = []
        self.gama = 0.0
        self.alpha = 0.0
        self.T = {}
        self.R = {}
        self.S = {}
        self.V = {}
        self.Q = {}
        self.Qstar = {}
        self.k = 0

    def getSampleAverages(self):
        """
        for model-based, with some samples, infer the transferFunction values and the rewards values
        """
        for node in self.nodes:
            sampleSum = 0.0
            sampleNum = 0.0
            for action in self.actions:
                difNodePrime = 0.0
                transFunc = []
                for nodePrime in self.nodes:
                    trans = (node,action,nodePrime)
                    if trans in self.S:
                        sample = self.S[trans]
                        sampleSum += sum(sample)
                        sampleNum += len(sample)
                        transFunc.append((nodePrime,len(sample)))
                        difNodePrime += 1*len(sample)
                for nodePrime in self.nodes:
                    #self.T[node,action,nodePrime] = 0
                    for transFuncNode in transFunc:
                        if nodePrime == transFuncNode[0]:
                            #if self.T[node,action,nodePrime] == float('nan'):
                            self.T[node,action,nodePrime] = transFuncNode[1]/float(difNodePrime)
                            #print "node,action,nodePrime: ", node,action,nodePrime, "transFuncNode: ", transFuncNode, "difNodePrime: ", difNodePrime
            self.V[node] = sampleSum/sampleNum
        for node in self.nodes:
            for action in self.actions:
                for nodePrime in self.nodes:
                    trans = (node,action,nodePrime)
                    if trans in self.S:
                        #self.R[trans] = sum(self.S[trans])/len(self.S[trans]) - self.gama*self.V[nodePrime]
                        self.R[trans] = sum(self.S[trans])/len(self.S[trans])
    
    def getQValueItaration(self):
        """
        the Q-ValueIteration function
        """
        for node in self.nodes:
            for action in self.actions:
                if not (node,action) in self.Q:
                    self.Q[(node,action)] = 0
        while self.k > 0:
            print "QValue for k  : ", self.k, self.Q
            self.getQValue()
            print "QValue for k+1: ", self.k, self.Q 
            self.k -= 1
    
    def getQValue(self):
        tempQ = {}
        for node in self.nodes:
            for action in self.actions:
                if not (node,action) in self.Q:
                        self.Q[(node,action)] = 0.0
                tempQ[(node,action)] = self.getRewardQValue(node,action)
        self.Q = tempQ
                
    def getRewardQValue(self,node,action):
        rewards = []
        qValue = []
        for nodePrime in self.nodes:
            if nodePrime <> node and (node,action,nodePrime) in self.T and (node,action,nodePrime) in self.R:
                transition = self.T[(node,action,nodePrime)]
                reward = self.R[(node,action,nodePrime)]
                if float(self.R[(node,action,nodePrime)]) <> float(self.R[(node,action,nodePrime)]):
                    reward = 0.0
                if float(self.T[(node,action,nodePrime)]) <> float(self.T[(node,action,nodePrime)]):
                    transition = 0.0
                qValue = []
                for actionPrime in self.actions:
                    qValue.append(self.Q[(nodePrime,actionPrime)])
                #print "node: ", node, "action: ", action, "nodePrime: ", nodePrime, "transition: ", transition, "reward: ", reward, "max(qValue): ", max(qValue), "qValue: ", qValue, "Q: ", self.Q
                rewards.append(transition * (reward + (self.gama * max(qValue))))
        #print "rewards: node,action", node,action, rewards, "sum(rewards): ", sum(rewards)
        return sum(rewards)

    def getRunningAverage(self):
        for node in self.nodes:
            for action in self.actions:
                sampleReward = []
                for nodePrime in self.nodes:
                    if node <> nodePrime and (node,action,nodePrime) in self.S:
                        reward = self.S[(node,action,nodePrime)]
                        qValue = []
                        for actionPrime in self.actions:
                            if (nodePrime, actionPrime) in self.Q:
                                qValue.append(self.Q[(nodePrime, actionPrime)])
                        sampleReward.append(reward + self.gama * max(qValue))
                        self.Q[node, action] = (1-self.alpha)*self.Q[node,action] + self.alpha * sum(sampleReward)
                








    

    def getValueIteration(self, node):
        return max(self.getActionValue(node))
    
    def getActionValue(self, node):
        actionValues=[]
        for action in self.actions:
            self.actionruns += 1
            actionValues.append((self.getRewardValue(action,node),action))
        return actionValues
    
    def getPolicyIteration(self, node):
        return self.getPolicyValue(node)
    
    def getPolicyValue(self, node):
        action=""
        actionValues=[]
        if self.policies[node] in self.actions:
            self.policyruns += 1
            action = self.policies[node]
            actionValues.append((self.getRewardValue(action,node),action))
        return actionValues
    
    def getRewardValue(self,action,node):
        rewards = []
        for nodePrime in self.nodes:
            self.rewardruns += 1
            if (node,action,nodePrime) in self.T:
                transition = self.T[(node,action,nodePrime)]
                reward = self.R[(node,action,nodePrime)]
                value = self.V[nodePrime]
                rewards.append(transition * (reward + (self.gama * value)))
        return sum(rewards)
    
    

