'''
Created on Jul 22, 2009

@author: sternron
'''
import cliques.mdp.heuristicMdp
from cliques.mdp.potentialBasedState import potentialBasedState
from cliques.potentialCliques import potentialCliques
import graph
class dfsState(potentialBasedState):
    def __init__(self,pcdb,g):
        potentialBasedState.__init__(self,pcdb, g)
        self.terminal=not self.pcdb.has_generated()
        
    def is_terminal(self):
        return self.terminal

class dfsMdp(cliques.mdp.heuristicMdp.heuristicMdp):
    '''
    classdocs
    '''

    def __init__(self,k,edge_prob, num_of_nodes):
        '''
        Constructor
        '''
        cliques.mdp.heuristicMdp.heuristicMdp.__init__(self, k, edge_prob, num_of_nodes)
        self.real_graph = graph.graph()

    def create_state_0(self, ckg, generated):
        return dfsState(potentialCliques(self.k, self.search.pcdb),\
                             len(ckg) - len(generated))

    def is_terminal(self,state):
        return state.terminal
            
    def run(self, ckg,generated):
        self.initial = self.create_state_0(ckg, generated)
        self.leafs = 0        
        (action,value) = self.choose_action(self.initial )
        v = {self.initial:value}
        policy = {self.initial:action}        
        return (v,policy)        

    def create_state(self,pcdb, g):
        return dfsState(pcdb,g)

    def find_value(self,state,action):
        ''' Calculate Q[state,action]=pr(s',a,s)*v(s') '''
        value=float(0)
        child_value=0
        for (next_state,prob) in self.generate_next_states(state, action):
            if self.is_goal(next_state):
                child_value = next_state.g
                self.leafs=self.leafs+1
                if self.leafs % 1000 ==0:
                    print "G=%d   Leafs=%d" % (next_state.g, self.leafs)
            else:
                if next_state.terminal:
                    child_value = next_state.g+self.k-1
                    self.leafs=self.leafs+1
                    if self.leafs % 1000 ==0:
                        print "G=%d   Leafs=%d" % (next_state.g, self.leafs)                    
                else:
                    (best_action, child_value) = self.choose_action(next_state)

            value = value+prob*child_value
        
        # print "G=%d    -    Q[state,action]=%f" % (state.g,value)
        return value
        
    def choose_action(self,state):
        ''' Choose the best action '''
        min_value = 10000
        best_action = None
        
        for action in state.actions():
            value = self.find_value(state,action)
            print "action %d, value %f" %(action,value)
            if value<min_value:
                best_action = action
                min_value = value

        return (best_action, min_value)
            
class dfsLookaheadMdp(dfsMdp):
    '''
    classdocs
    '''
    def __init__(self,k,edge_prob, num_of_nodes):
        dfsMdp.__init__(self,k,edge_prob,num_of_nodes)
        self.lookahead_depth=2

    def set_lookahead_depth(self,new_depth):
        self.lookahead_depth=new_depth
        
        # Calculate upper bound of #states
        self.upper_bound_states = 1
        nodes = int(self.nodes)
        for i in xrange(nodes-1,max(nodes-1-new_depth,0),-1):
            self.upper_bound_states=self.upper_bound_states*i*pow(2,i)
        #print "O(#states)=%d" % self.upper_bound_states

    def choose_action(self,state):
        ''' Choose the best action '''
        min_value = 10000
        best_action = None
        
        if state.g-self.initial.g<self.lookahead_depth:            
            for action in state.actions():
                value = self.find_value(state,action)
                if value<min_value:
                    best_action = action
                    min_value = value    
            return (best_action, min_value)
        else: # Reached lookahead depth
            return (None, state.g+self.h(state))





     