'''
Created on Jul 14, 2009

@author: user
'''


import cliques.mdp.heuristicMdp
import cliques.mdp.dfsMdp
import math
import random
import logging
class rtdp(cliques.mdp.dfsMdp.dfsLookaheadMdp):
    '''
    classdocs
    '''

    def __init__(self, k, edge_prob, num_of_nodes):
        '''
        Constructor
        '''
        cliques.mdp.dfsMdp.dfsLookaheadMdp.__init__(self, k, edge_prob, num_of_nodes)
        self.num_of_trials = 10

    def choose_action(self,state):
        ''' Choose the best action '''
        min_value = 10000
        best_action = None

        actions = list(state.actions())
        #print "Running trials for OPEN(%d)" % len(actions)
        cliques.potentialCliques.potentialCliques.instance_counter=0
        action_index=0
        for action in state.actions():
            action_index=action_index+1    
            # print "Trial(%s) [%d/%d]" % (action,action_index,len(actions))
            # logging.info("Running trials for action %d ..." % action)
            value = self.run_trials(state,action)
            if value<min_value:
                best_action = action
                min_value = value    
        return (best_action, min_value)            
    
    def run_trials(self,state,action):
        sum = float(0) 
        for trial in xrange(self.num_of_trials):
            # logging.debug("Trial %d for action %d ..." % (trial, action))
            value = self.find_dfs_trial_value(state, action)
            sum = sum + value            
        return sum/self.num_of_trials
    
    def find_dfs_trial_value(self,state,action):
        original_value = state.g
        original_state = state
        value = state.g        
        while self.is_goal(state)==False and self.is_terminal(state)==False:
            state = self.generate_random_state(state,action)
            action = self.choose_heuristic_action(state)
            value=value+1
#        print "Solution Depth=%d" % (value-original_value)
        if self.is_terminal(state):
            return original_value+len(list(original_state.actions()))
        else:
            return value

    def generate_random_state(self,state,action):
        possible_neighbors = self.relevant_possible_neighbors(state,action)
        neighbors = []
        for neighbor in possible_neighbors:
            if random.random()<self.pr(action, neighbor):
                neighbors.append(neighbor)

        new_pcdb = state.pcdb.expand(action,neighbors)
        new_g = state.g+1
        return self.create_state(new_pcdb, new_g)
    
        
#    def choose_heuristic_action(self,state):
#        for kPc in state.pcdb:
#            if state.pcdb.max_core==len(kPc.core):
#                return kPc.gcn[0]
#        return None
#    
      
    def choose_heuristic_action(self,state):
        biggests = []
        for kPc in state.pcdb:
            if state.pcdb.max_core==len(kPc.core):
                biggests.append(kPc)
        
        if len(biggests)>0:
            if len(biggests)>1:
                kPc = biggests[random.randint(0,len(biggests)-1)]
            else:
                kPc=biggests[0]
            if len(kPc.gcn)>1:
                return kPc.gcn[random.randint(0,len(kPc.gcn)-1)]
            else:
                return kPc.gcn[0]
        else:
            return None
        
    
    
    def pr(self, node, neighbor):
        return self.p