'''
Created on Jul 8, 2009

@author: sternron
'''
from cliques.mdp.potentialBasedState import potentialBasedState

class AoStar(heuristicMdp):
    '''
    classdocs
    '''


    def __init__(selfparams):
        '''
        Constructor
        '''
        heuristicMdp.__init__(self, k, edge_prob, num_of_nodes)
        self.openlist = []
                           
    def run(self, initial_state):
        ''' Runs a value iteration algorithm and return a 2-tuple, containing:
         1. A dictionary of state to optimal value 
         2. A dictionary of state to optimal action (policy)
         '''        
        # Setup 
        self.goals = set()
        self.states = set()
        v = dict()
        policy = dict()
        state_0 = potentialBasedState(initial_state.ckg, initial_state.generated,potentialCliques(self.k, self.search.pcdb))
        self.state_graph = graph.digraph()
        self.state_graph.add_node(state_0)
        self.states.add(state_0)
#        tip_states = self.find_tip_states(state_0,state_graph,policy)
        state = state_0
        # update_value_counter = 0
        counter=0
        while state is not None:
            counter=counter+1
            if counter % 100 ==0:
                print "States=%d" % len(self.state_graph)

            # If state is goal - set the real value and the policy to a loop
            if self.is_goal(state) or len(state.generated)==0:
                v[state] = state.g
                policy[state]=state
                self.goals.add(state)                 
            else:
                self.expand(state,v,policy)                

            self.update_values(state, v,policy)
            state = self.find_tip_state(state_0 ,policy,1)            


    def expand(self, state, v, policy):
        if self.is_goal(state) or len(state.generated) == 0:
            v[state] = self.g(state)
            policy[state] = state
            self.goals.add(state)
        else:
            min_action_value = self.nodes
            best_action = None
            for action in state.actions():
                self.state_graph.add_node((state, action))
                self.state_graph.add_edge(state, (state, action))
                for (next_state, next_state_prob) in self.generate_next_states(state, action):
                    same_state = self.find_same_state(next_state)
#                    same_state = None
                    if same_state is None:
                        self.states.add(next_state)
                        self.state_graph.add_node(next_state)
                        #print "Added %s" % next_state
                        v[next_state] = next_state.g+next_state.pcdb.h
                        same_state = next_state
                    self.state_graph.add_edge((state, action), same_state, next_state_prob)
                
                # Check action value
                action_value = self.q(state,action,v)
                if action_value<min_action_value:
                    min_action_value=action_value
                    best_action=action
            # Set v and policy
            policy[state]=best_action
            v[state]=min_action_value
            
            
    def find_tip_state(self,state_0, policy,num_of_states):
        ''' Performs a DFS on the state_graph according to the current policy '''
        possible_tip_states = [state_0]
        while len(possible_tip_states)>0:
            state = possible_tip_states.pop() # Depth First Search
            if state.expanded==False:
                return state
            else:            
                action_state = (state,policy[state])
                possible_tip_states.extend(self.state_graph.neighbors[action_state]) 
        return None

          
    def update_values(self,state, v,policy):
        ''' Update value of state predecessors '''
        states_to_update = list(self.state_graph.incidents(state))

        while len(states_to_update)>0:
            state = states_to_update.pop()
            (new_value, new_policy) = self.iterate_state(state,v)
            v[state]=new_value
            policy[state]=new_policy
            states_to_update.extend(self.state_graph.incidents(state))
        