'''
Created on May 26, 2009

@author: user
'''
from graph import *

from cliques.cliqueFinder import cliqueFinder
from cliques.utils import powerset
from cliques.utils import graphs_equal
from cliques.utils import list_set_equals
from cliques.potentialBasedSearch import potentialBasedSearch
from cliques.mdp.edgeProbModel import edgeProbModel
import sys

class cliqueMdpSolver(object):
    def __init__(self,k,edge_prob, num_of_nodes):
        self.nodes= num_of_nodes
        self.no_goal_value = self.nodes
        self.p = edge_prob
        self.k = k
        self.clique_finder = cliqueFinder()
        self.edge_model = edgeProbModel(self.p)

    def add_state(self, state, v, openlist, moves, action, next_state, next_state_prob):
        ''' Add a new state as a predecessor to the state_graph '''
        same_state = self.find_same_state(next_state)
        #                    same_state=None
        if same_state is None:
            self.states.add(next_state)
            self.state_graph.add_node(next_state)
            #print "Added %s" % next_state
            v[next_state] = moves + self.h(state)
            openlist.append(next_state)
            same_state = next_state
        
        self.state_graph.add_edge((state, action), same_state, next_state_prob)


    def expand(self, state, v, policy, openlist):
        moves = state.g
        if self.is_goal(state):
            v[state] = moves
            policy[state] = state
            self.goals.add(state)
        else:
            actions = state.actions()
            if len(actions)==0: # No relevant actions
                v[state]=moves+self.k-1
                policy[state]=None
            else:
                for action in state.actions():
                    self.state_graph.add_node((state, action))
                    self.state_graph.add_edge(state, (state, action))                
                    for (next_state, next_state_prob) in self.generate_next_states(state, action):
                        self.add_state(state, v, openlist, moves, action, next_state, next_state_prob)
        
   
    def h(self,state):
        ''' Return an admissible heuristic estimate for the expected number of 
        moves to the goal '''
        return 0
    
    def q(self, state, action, v):
        action_value = 0
        for (next_state, prob) in self.children(state, action):
            action_value = action_value + prob * v[next_state]
        return action_value


    def iterate_state(self, state,v):
        ''' Return a 2-tuple of the new value of a state 
            and the new best policy according to v  '''
        min_value = self.nodes+1
        new_policy = None 
        for action in state.actions():
            action_value = self.q(state, action,v)
            if action_value < min_value:
                min_value = action_value
                new_policy = action

        if new_policy==None: # If actions are limited
            new_policy = self.search.generated[0]
        return (min_value,new_policy)

    def generate_next_states(self,state,action):
        ''' Generate the next state possible by applying action to state '''
        ''' Return a generator of 2-tuples containing the state and transition probability '''
        # Create state for every possible neighbors of action
        generated = list(state.generated)
        generated.remove(action)
        #for possible_neighbors in powerset(unexplored_nodes): #self.find_relevant_neighbors(state, action):
        for possible_neighbors in self.find_relevant_neighbors(state, action):
            new_ckg = graph()
            new_ckg.add_graph(state.ckg)
            new_generated = list(generated)
            for new_neighbor in possible_neighbors:
                if new_ckg.has_node(new_neighbor)==False:                   
                    new_ckg.add_node(new_neighbor)
                    new_generated.append(new_neighbor)
                
                new_ckg.add_edge(action,new_neighbor)

            new_state = cliqueSearchState(new_ckg,new_generated)
            yield (new_state, self.tr(state,action,new_state))        
    
    def children(self,state,action):
        ''' 
        Returns all the possible states that may result of applying action to state
        Each result is a 2-tuple of the state and the transition probability 
        '''
        for new_state in self.state_graph.neighbors((state,action)):
            yield (new_state, self.state_graph.get_edge_weight((state,action),new_state))

    def find_relevant_neighbors(self,state, action):
        ''' Find all the possible relevant neighbors of the action given the current state '''
        all_relevant_nodes = self.relevant_possible_neighbors(state, action)
        for relevant_neighbors in powerset(all_relevant_nodes):
            yield relevant_neighbors
            
    def relevant_possible_neighbors(self,state,action):
        ''' Return all the relevant nodes that my become neighbors of the node action '''
        unexplored_nodes = list(state.generated)
        for new_node in xrange(int(self.nodes-len(state.ckg))):
            unexplored_nodes.append('new'+str(new_node))
        unexplored_nodes.remove(action)
        return unexplored_nodes
            
    def tr(self,state,action,new_state):
        ''' Calculate the probabilty to reach new_state by applying action to state '''
        new_node_degree = len(new_state.ckg.neighbors(action))        
        old_node_degree = len(state.ckg.neighbors(action))
        # POSSIBLE EFFICIENCY IMPROVEMENT: relevant_possible_neighbors() may be heavy
        max_new_node_degree = len(self.relevant_possible_neighbors(state, action))
        new_edges = new_node_degree-old_node_degree
        new_non_edges = max_new_node_degree-new_edges
        return pow(self.p,new_edges)*pow(1-self.p,new_non_edges)
      
    def reward(self,state,action):
        ''' Calculate the immediate reward of performing action when in node state '''
        if state.is_goal():
            return len(state.ckg)-len(generated)+1 # Count number of steps
        else:
            return 0 # Should change to very large number (probably)
       
    def is_goal(self,state):
        for action in state.actions():
            self.clique_finder.setup(state.ckg)
            if self.clique_finder.has_clique(self.k-1, state.ckg[action]):
                return True
        return False       
        
    def create_state_0(self,ckg,generated):
        ''' Create a state 0 with a copy of ckg and generated,since can be mutable'''
        copy_ckg = graph()
        copy_ckg.add_graph(ckg)
        copy_generated = list(generated)
        return cliqueSearchState(copy_ckg,copy_generated)

    def setup(self,ckg,generated):
        initial_state = self.create_state_0(ckg, generated)
        self.initial= initial_state
        
    def run(self, ckg,generated):
        self.setup(ckg, generated)
        ''' Runs a value iteration algorithm and return a 2-tuple, containing:
         1. A dictionary of state to optimal value 
         2. A dictionary of state to optimal action (policy)
         '''
        (v_new, policy) = self.build_state_graph(self.initial)

        # Store state graph (for future usages)
        # self.state_graph = state_graph
        
        # Start iterating
        v_old = dict()        
        iteration = 0
        while True:
            print "Value iteration %d" % iteration
            v_old = v_new
            v_new = dict()
            bellman_error = 0
            for state in v_old.keys():
                if state in self.goals:
                    v_new[state]=v_old[state]
                else:
                    (min_value, state_policy) = self.iterate_state(state, v_old)
                            
                    v_new[state]= min_value
                    policy[state]=state_policy
                    error=abs(v_new[state]-v_old[state])
                    if bellman_error<error:
                        bellman_error=error
            
            iteration=iteration+1
            if bellman_error==0:
                break;

        return (v_new,policy)
        
        
    def build_state_graph(self,initial_state):
        ''' Builds self.state_graph starting from the given initial state '''
        self.state_graph = digraph()
        self.state_0=initial_state
        v_new = dict()
        policy = dict()
                
        # initialization
        # Span state tree
        print "Building the state graph..."
        openlist = [initial_state]
        closedlist = set()
        self.state_graph.add_node(initial_state)
        v_new[initial_state]=self.no_goal_value
        self.goals = set()
        self.states = set()
        self.states.add(initial_state)
        
        iteration=0
        
        while len(openlist)>0:
            state=openlist.pop()         
            iteration=iteration+1
            if iteration % 100 ==0:
                print "Building state graph [%d] len(open)=%d, len(state graph)=%d, goals=%s"\
                     % (iteration, len(openlist),len(self.states),len(self.goals))
            self.expand(state, v_new, policy, openlist)
        
        # Add policy to first state
        (new_value, state_policy) = self.iterate_state(initial_state, v_new)
        if state_policy==None:
            print "DEBUG"
        v_new[initial_state]=new_value
        policy[initial_state]=state_policy
        return (v_new,policy)        
    
    def find_same_state(self,searched_state):
        ''' Return a state from states that is the same as the given state '''
        g_searched_state = searched_state.g
        generated_searched_state = searched_state.generated
        gen_size = len(generated_searched_state)
        for state in self.states:
            if state.g==g_searched_state:
                if len(state.generated)==gen_size:
                    if state.same(searched_state):
                        return state
        return None
                
class smartCliqueMdpSolver(cliqueMdpSolver):
    '''
    MDP solver that utilizes data on the potential cliques
    '''
    def __init__(self,k,edge_prob, num_of_nodes):
        cliqueMdpSolver.__init__(self, k, edge_prob, num_of_nodes)        
    
    def relevant_possible_neighbors(self,state,action):
        ''' Find all the relevant possible neighbors of the action given the current state '''
        unexplored_nodes = list(state.generated)
        unexplored_nodes.remove(action)
        return unexplored_nodes

class smarterCliqueMdpSolver(cliqueMdpSolver):
    '''
    MDP solver that utilizes data on the potential cliques
    '''
    def __init__(self,k,edge_prob, num_of_nodes):
        cliqueMdpSolver.__init__(self, k, edge_prob, num_of_nodes)        
    
    def set_search(self,search):
        self.search = search

    def relevant_possible_neighbors(self,state,action):
        ''' Find all the relevant possible neighbors of the action given the current state '''
        unexplored_nodes = list(state.generated)
        unexplored_nodes.remove(action)
        generated_to_kPc = self.search.generated_to_kPc
        relevant_unexplored = [node for node in unexplored_nodes if len(generated_to_kPc[node])>0]
        if action in relevant_unexplored:
            print 1
        return relevant_unexplored

class cliqueSearchState(object):
    '''
    A state in the k-clique search state space
    '''
    
    def __init__(self,ckg,generated):
        '''
        Constructor
        '''
        self.ckg = ckg
        self.generated = generated
        self.g = len(ckg) - len(generated)

    def same(self, other):
        return graphs_equal(self.ckg,other.ckg) and \
                list_set_equals(self.generated,other.generated) 
        
    #def __ne__(self, other):
    #    return not self.__eq__(other)
    
    def __hash__(self):
        return len(self.ckg)*13+len(self.generated)*7
    
    def __str__(self):
        return "Gen[%s],Ckg[%s]" % (self.generated,self.ckg)
    
    def copy(self):
        copy_ckg = graph()
        copy_ckg.add_graph(self.ckg)
        copy_generated = list(self.generated)
        copy_state = cliqueSearchState(copy_ckg,copy_generated)
        return copy_state
    
    def actions(self):
        return self.generated.__iter__()    
    

def main():
    test_graph = graph()
    nodes = 5
    edges = nodes*4/2-2
    p=edges/(float(nodes)*(nodes-1)/2)
    test_graph.generate(nodes,edges)    
    k=4
    solver = cliqueMdpSolver(k,p, nodes)
    
    # Perform first 2 steps
    ckg = graph()
    initial_node = test_graph.nodes()[0]
    openlist = [initial_node]
    closedlist = set()
    ckg.add_node(initial_node)
    initial_iterations=2
    for i in xrange(initial_iterations):
        print "Initial iteration %d" % i
        node = openlist.pop()
        closedlist.add(node)
        for neighbor in test_graph[node]:
            if neighbor not in closedlist:
                if neighbor not in openlist:
                    openlist.append(neighbor)
                    ckg.add_node(neighbor)
                ckg.add_edge(node,neighbor)
                
    
    initial_state = cliqueSearchState(ckg, openlist)
    (v, policy) = solver.run(initial_state)

    hist = dict()
    for value in v.values():
        if hist.has_key(value):
            hist[value]=hist[value]+1
        else:
            hist[value]=1
    print hist
       
if __name__ == "__main__":
    #import sys;sys.argv = ['', 'Test.testName']
    try:
        main()
    except:      
        print "Error"
        print "Unexpected error:", sys.exc_info()[0]
        print "After"
        raise

     