import planningproblem
import astar_fibheap as astar
from action import *
from plannersupport import flatten
from operator import itemgetter

class DiscreteActionFlags:
    def __init__(self):
        self.disabled = None
        self.cost = None

class DiscretePlanner:
    """Implements a planner for a discrete-only planning problem, which
    will then be used to help solve the hybrid planning problem.
    """

    def __init__(self,problem):
        #TODO: double-check that it's a discrete problem?
        self.problem = problem
        self.astar = DiscretePlannerAstar(problem)
        self.solutions = {}
        self.numSearches = 0
        self.solutionDepth = 0
        
    def reset(self):
        self.solutions = {}
    
    def solve(self,state=None):
        """Returns a solution path (list of actions) starting from state.
        if state is not provided, it starts at the initial state specified
        in the problem"""
        if state == None:
            state = self.problem.initialState
        ls = self.to_tuple(state)
        try:
            return self.solutions[ls]
        except KeyError:
            #No prior solution was computed
            self.numSearches += 1
            soln = self.astar.search_from(state)
            #Store prior solution
            self.solutions[ls] = soln
            if soln:
                #Store prior sub solutions
                for n in xrange(1,len(soln)):
                    self.solutions[self.to_tuple(soln[n].state)] = soln[n:]
                self.solutionDepth += len(soln)
            return soln

    def to_tuple(self,state):
        if len(state)==0: return tuple()
        _,vals=zip(*sorted(state.iteritems()))
        return tuple(flatten(vals))

    def transitionFlags(self,state,boundAction):
        """returns a DiscreteActionFlags object that can be modified to
        change the behavior of boundAction at state.  """
        key = (self.to_tuple(state),boundAction.action.name)
        pkey = self.to_tuple(boundAction.params)
        return self.astar.modifiedBoundActions.setdefault(key,dict()).setdefault(pkey,DiscreteActionFlags())
            
    def actionAtStateFlags(self,state,action):
        key = (self.to_tuple(state),action.name)
        return self.astar.modifiedActions.setdefault(key,DiscreteActionFlags())

    def boundActionFlags(self,boundAction):
        key = boundAction.action.name
        pkey = self.to_tuple(boundAction.params)
        return self.astar.modifiedStatelessActions.setdefault(key,dict()).setdefault(pkey,DiscreteActionFlags())

    
class DiscretePlannerAstar(astar.AStar):
    def __init__(self,problem):
        self.problem = problem
        #visited state detection
        self.visited = {}
        #successor function modifications given by DiscreteActionFlags
        #-modifiedActions is a map from (state,action_name) pairs to modifications
        #-modifiedBoundActions is a map from (state,action_name) pairs to a map from params to modifications
        #-modifiedStatelessActions is a map from action_names to a map from params to modifications
        self.modifiedActions = {}
        self.modifiedBoundActions = {}
        self.modifiedStatelessActions = {}
        astar.AStar.__init__(self,problem.initialState)

    def search_from(self,state):
        """Performs a search from state, returning None if no solution exists
        or returns a list of actions that reaches the goal
        """
        #print "Start state:",state
        #print "Modified actions:",self.modifiedActions
        #print "Modified bound actions:",self.modifiedBoundActions
        #print "Modified stateless actions:",self.modifiedStatelessActions
        self.set_start(state)
        if self.search():
            return self.result_path()
        return None
        
    def clear_visited(self):
        self.visited.clear()

    def to_tuple(self,state):
        if len(state)==0: return tuple()
        _,vals=zip(*sorted(state.iteritems(),key=itemgetter(0)))
        return tuple(flatten(vals))
        
    def visit(self, state, node):
        self.visited[self.to_tuple(state)] = node
        
    def visited_state_node(self, state):
        return self.visited.get(self.to_tuple(state),None)

    def successors(self,state):
        succ = []
        costs = []
        actions = []
        skey = self.to_tuple(state)
        #print "State",state
        for samp in self.problem.actionSamplers:
            a = samp.action
            amod = self.modifiedActions.get((skey,a.name),DiscreteActionFlags())
            if amod.disabled == True:
                #just ignore the action
                continue
            if amod.cost != None:
                raise RuntimeError("Modified unbound action costs not supported yet")

            #parameter dictionaries
            modified = self.modifiedBoundActions.get((skey,a.name),dict())
            modified2 = self.modifiedStatelessActions.get(a.name,dict())

            samp.bindState(state)
            for params in samp.enumerate_discrete_domains():
                pkey = self.to_tuple(params)
                cost = a.cost(self.problem.env,params,state)
                try:
                    mod = modified[pkey]
                    if mod.disabled==True: continue
                    if mod.cost != None: cost += mod.cost
                except KeyError:
                    pass
                try:
                    mod = modified2[pkey]
                    if mod.disabled==True: continue
                    if mod.cost != None: cost += mod.cost
                except KeyError:
                    pass

                newstate = a.apply(self.problem.env,params,state)
                if self.problem.stateSpace.infeasible(newstate,self.problem.env):
                    continue

                #print "   Applicable",a.name,"params",params
                succ.append(newstate)
                costs.append(cost)
                actions.append(BoundAction(a,params))
        #raw_input()
        return succ,costs,actions

    def is_goal(self,state):
        return self.problem.goalTest(state,self.problem.env)

    def heuristic(self,state):
        return self.problem.goalTest.distance(state,self.problem.env)
