'''
Created on Jul 13, 2009

@author: user
'''

from cliques.mdp.mdpLookahead import mdpLookahead

class limitedStatesMdp(mdpLookahead):
    '''
    classdocs
    '''


    def __init__(self, k, edge_prob, num_of_nodes):
        '''
        Constructor
        '''
        mdpLookahead.__init__(self, k, edge_prob, num_of_nodes)
        self.max_states_per_action = 200
        self.max_states = 10000

    def q(self, state, action, v):
        action_value = 0
        sum_prob =0.0
        for (next_state, prob) in self.children(state, action):
            action_value = action_value + prob * v[next_state]
            sum_prob = sum_prob+prob
        action_value = action_value + (1-sum_prob)*(state.g+self.h(state))
        return action_value

       
    def expand(self, state, v, policy, openlist):
        moves = state.g
        if self.is_goal(state) or len(state.generated) == 0:
            v[state] = moves
            policy[state] = state
            self.goals.add(state)
        else:
            if len(self.state_graph)>self.max_states:
                #print "MAX STATES %d REACHED" % self.max_states
                return
            for action in state.actions():
                state_counter = 0
                #sum_prob = 0
                self.state_graph.add_node((state, action))
                self.state_graph.add_edge(state, (state, action))                
                for (next_state, next_state_prob) in self.generate_next_states(state, action):
                    #same_state = self.find_same_state(next_state)
                    same_state=None # Don't perform duplicate detection
                    if same_state is None:
                        self.states.add(next_state)
                        self.state_graph.add_node(next_state)
                        #print "Added %s" % next_state
                        
                        v[next_state] = moves+self.h(state)
                        if moves-self.state_0.g<self.lookahead_depth:
                            openlist.append(next_state)
                        same_state = next_state
                    self.state_graph.add_edge((state, action), same_state, next_state_prob)
                    state_counter=state_counter+1
                    #sum_prob = sum_prob+next_state_prob
                    if state_counter>self.max_states_per_action:
                        break
                    if len(self.state_graph)>self.max_states:
                        #print "MAX STATES %d REACHED" % self.max_states
                        break
                
                    
    