import random
import math
import types
import sys
import planningproblem
import discreteplanner
import diffusion
import plannersupport
import output
from statcollector import *
from iterative_subproblem import *
from action import *

DISCRETE_PLANNER_ISRR = False

class PlanSettings:
    def __init__(self,**opts):
        self.planner = 'normal'
        self.lazyMaxDepth = 100   #for lazy planning
        self.oneShot = True       #for discrete planning
        # parameters for diffusion encouragement
        self.diffusionDimensions = 2
        self.diffusionTechnique = 'grid'
        #self.diffusionTechnique = 'kernel'
        #self.diffusionTechnique = 'voronoi'
        #self.diffusionTechnique = 'random'
        # scaling denotes a relative size of the state space.  It can be a
        # dict
        self.scaling = 1.0
        self.bandwidth = 0.1
        # parameters for heuristic selection
        #self.heuristicTechnique = 'none'
        self.heuristicTechnique = 'goal'
        #self.heuristicTechnique = 'discrete'
        self.heuristicWeighting = 5
        
        # states that are irrelevant to outer-level planning
        self.irrelevant = set()
        
        # how many successors to sample per outer planning iteration
        self.numSuccessors = 1
        # how often to print data about the outer planning tree
        self.printFrequency = 100
        
        # how many iterations to take for sampling a valid action parameter
        self.actionParameterSamples = 100

        # how many newton root solving iterations when solving for equality
        # preconditions
        self.actionBindSolveIters = 100

        # how much time to give to subproblems
        # this can be an expression regarding N
        self.subplanTime = None

        # other options
        for (k,v) in opts.iteritems():
            setattr(self,k,v)

class PlanTreeNode:
	def __init__(self, state):
		self.state = state
		self.in_edges = []
		self.out_edges = []        

class PlanTreeEdge:
	def __init__(self, v1, action, v2, plan=None):
		if (not isinstance(v1, PlanTreeNode)) or \
				(not isinstance(v2, PlanTreeNode)):
			raise RuntimeException("Edge head, tail must be tree-node type")
		self.v1 = v1
		self.v2 = v2
		v1.out_edges.append(self)
		v2.in_edges.append(self)
		self.a = action
		self.plan = plan
		self.planTotalIters = 0

	def get_head(self):
		return self.v1
		
	def get_tail(self):
		return self.v2
		
	def get_action(self):
		return self.a
		
	def get_plan(self):
		return self.plan
	
	def update_plan(self, v):
		for iters in xrange(v):
			self.planTotalIters += 1
			self.plan.plan_more()
			if self.plan.done():
				break

	def plan_iters(self):
		return self.planTotalIters

	def done_planning(self):
		if not self.plan: return True
		return self.plan.done()

	def complete(self):
		if not self.plan: return True
		return self.plan.done() and not self.plan.failed()

	def failed(self):
		if not self.plan: return False
		return self.plan.done() and self.plan.failed()

class PlanTree:
    def __init__(self, init_state, settings=PlanSettings()):
        self.head = PlanTreeNode(init_state)
        self.activeEdges = set()
        self.completeEdges = list()
        self.failedEdges = list()
        
        self.settings = settings
        if hasattr(settings,"seed"):
            random.seed(settings.seed)

        #initialize root of tree
        eInitial = PlanTreeEdge(self.head, BoundAction(None,None),self.head)
        self.add_edge(eInitial)
        self.complete(eInitial)
	
    def add_edge(self, edge):
        """Called when a new edge is added to the tree"""
        if not isinstance(edge, PlanTreeEdge):
            raise RuntimeException("New edge must be tree-edge type")
        self.activeEdges.add(edge)
       
    def complete(self, edge):
        """Marks an edge as complete"""
        self.activeEdges.remove(edge)
        self.completeEdges.append(edge)
        
    def fail(self,edge):
    	self.activeEdges.remove(edge)
    	self.failedEdges.append(edge)
            
    def active_edges(self):
        return self.activeEdges
        
    def complete_edges(self):
        return self.completeEdges
        
    def failed_edges(self):
    	return self.failedEdges
        
    def size(self):
        return len(self.completeEdges) + len(self.activeEdges)
    
    def active_count(self):
        return len(self.activeEdges)

    def complete_count(self):
        return len(self.completeEdges)-1
	
    def failed_count(self):
        return len(self.failedEdges)

    def path_to(self,e):
        """Returns the path from the root to the edge e"""
        path = []
        while e.get_head()!=e.get_tail():
            path.append(e)
            assert len(e.get_head().in_edges)==1
            e = e.get_head().in_edges[0]
        path.reverse()
        return path

    def pick_nodes_random(self,k):
        """Picks k nodes uniformly at random from the tree. An O(k) operation"""
        return [random.choice(self.completeEdges).get_tail() for i in xrange(k)]

    def enumerate(self,n=None,nodes={},edges={}):
        if n==None:
            nodes[self.head] = 0
            #first edge is a dummy
            assert self.head.out_edges[0].get_tail() == self.head
            for e in self.head.out_edges[1:]:
                self.enumerate(e.get_tail(),nodes,edges)
            return nodes,edges
        else:
            nodes[n] = len(nodes)
            for e in n.in_edges:
                edges[e] = (nodes[e.get_head()],nodes[e.get_tail()])
            for e in n.out_edges:
                self.enumerate(e.get_tail(),nodes,edges)

    def dump(self,fn):
        """Writes the tree into a TGF file"""
        nodes,edges = self.enumerate()
        f = open(fn,'w')
        for (id,n) in sorted((id,n) for (n,id) in nodes.items()):
            f.write(str(id+1)+"    "+str(n.state)+'\n')
        # now write edges
        f.write('#\n')
        for (id1,id2,e) in sorted((id1,id2,e) for (e,(id1,id2)) in edges.items()):
            status = "active"
            if e.failed(): status = "failed"
            if e.complete():
                if not e.get_plan() or e.get_plan().solution() == None:
                    status = "complete"
                else:
                    status = str(e.get_plan().solution())
            f.write(str(id1+1)+" "+str(id2+1)+"    "+str(e.a)+"; "+status+'\n')
        f.close()


class PTRPlanner:
    """Implements the PTR planner.  To use, construct the Planner given a
    PlanningProblem instance, and run planMore() repeatedly, or plan() for
    some number of iterations."""

    def __init__(self, problem, settings):
        self.problem = problem
        self.settings = settings
        self.tree = PlanTree(problem.initialState,settings)
        self.diffusion = diffusion.Diffusion(settings,problem.initialState)
        self.iters = 0
        self.numSuccessorsSampled = 0
        self.numApplicable = dict((a.name,0) for a in problem.actions)
        self.numParameterSamples = dict((a.name,0) for a in problem.actions)
        self.numParameterSuccesses = dict((a.name,0) for a in problem.actions)
        self.numFeasibleSuccessors = 0
        self.numInfeasibleSuccessors = [0]*len(problem.stateSpace.constraints)
        if settings.heuristicTechnique == 'discrete':
            self.discretePlanner = discreteplanner.DiscretePlanner(problem.constructDiscrete())
            print "Discrete Problem:"
            self.discretePlanner.problem.print_properties()
        self.diffusion.add_node(self.tree.head.state,self.tree.head.in_edges[0])

    def planMore(self):
        """Plans a single outer iteration of PTR.  Returns the solution path
        if found."""
        self.iters += 1
        if self.iters % self.settings.printFrequency == 0:
            print
            print "Tree size:", self.tree.size()
            print "Edges active:", self.tree.active_count(), \
                    ", complete:",self.tree.complete_count(), \
                    ", failed:", self.tree.failed_count()
            sys.stdout.flush()
        # Add new nodes/edges to tree
        n,newEdges = self.extend_tree()
        if newEdges == None:  #indicates failure
            return False
        for e in list(self.tree.active_edges()):
            # Run planner for a while on active edge
            if e.complete() or self.plan_more(e, self.time(e)):
                #mark the edge complete
                self.tree.complete(e)
                #add state to the density estimator
                self.diffusion.add_node(e.get_tail().state,e)
                #call the callback
                self.completed_edge(e)

                # if planning was successful and the end state is a goal,
                #  return the path
                if self.problem.goalTest(e.get_tail().state,self.problem.env):
                    return self.tree.path_to(e)
            if e.failed():
                self.tree.fail(e)
        return None
    
    def plan(self, iters):
        for i in xrange(iters):
            r = self.planMore()
            if r != None:
                return r
        # if the planner is unsuccessful in "iters" iterations,
        #  return None
        return None

    def addEdge(self,node,action,newState):
        """Adds an edge obtained by applying action to the state of node
        'node'.  newState is the resulting state."""
        #parse out the motion planning precondition, if any
        moplan = None
        mps = action.action.planningPreconds()
        if len(mps)==0:
            moplan = None
        elif len(mps) > 1:
            raise RuntimeError("TODO: multiple motion planning preconditions per action")
        else:
            #parse out the configuration space that the plan must use
            localvars = copy.deepcopy(node.state)
            localvars.update(copy.deepcopy(action.params))
            moplan = eval(mps[0].code,self.problem.env,localvars)
            if not isinstance(moplan,IterativeSubproblem):
                raise RuntimeError("Error, motion planning precondition didnt evaluate to an IterativeSubproblem object")

        #make add the edge
        e = PlanTreeEdge(node,action,PlanTreeNode(newState),moplan)
        self.tree.add_edge(e)
        return e

    def completed_edge(self,e):
        """Called upon completion of an edge"""
        pass

    def pick_nodes(self,k):
        return self.diffusion.pick_nodes(k)

    def path_to(self,e):
        """Returns the path from the root to the edge e"""
        return self.tree.path_to(e)

    def cost_to(self,e):
        """Returns the sum of edge costs leading to and including this edge"""
        sumcost = 0
        while e.get_head()!=e.get_tail():
            sumcost += e.a.cost(self.problem.env,e.get_head().state)
            e = e.get_head().in_edges[0]
            assert len(e.get_head().in_edges)==1
        return sumcost

    def heuristic_value(self,node):
        """Returns the heuristic value of this node, as given by the
        heuristicTechnique setting"""
        if self.settings.heuristicTechnique=='goal':
            return self.problem.goalTest.distance(node.state,self.problem.env)
        elif self.settings.heuristicTechnique=='discrete':
            #extract discrete state
            dstate = dict([(s,node.state[s]) for s in self.discretePlanner.problem.stateSpace.names])
            plan = self.discretePlanner.solve(dstate)
            if plan == None: return float('inf')
            else: return len(plan)
        elif self.settings.heuristicTechnique=='none':
            return 0
        else:
            raise RuntimeError('Invalid heuristic technique'+self.settings.heuristicTechnique)

    def pick_extend_node(self):
        if self.settings.heuristicTechnique=='none':
            #default implementation picks a node just using diffusion
            return self.pick_nodes(1)[0]
        else:
        #this implementation picks k nodes using the given diffusionTechique
        #and uses the one that is closest to the goal.  The larger k is, the
        #greedier the search will be.
            k = self.settings.heuristicWeighting
            nodes = self.pick_nodes(k)
            d,n = min((self.heuristic_value(n),n) for n in nodes)
            return n

    def extend_tree(self):
        """Extends the search tree by sampling valid successors from a
        sampled node in the tree.  Returns a list of edges leading from
        the node to its new children, or None if it believes the problem is
        infeasible.

        settings.heuristicTechnique = 'none': sample random node
        settings.heuristicTechnique = 'goal': sample k nodes, pick one
          closest to the goal.
        settings.heuristicTechnique = 'discrete': produce a discrete plan,
          then attempt to instantiate continuous parameters for the plan.
        """
        n = self.pick_extend_node()
        if n == None: return None,[]
        cands = self.sample_successors(n)
        res = []
        for a,c in cands:
            res.append(self.addEdge(n,a,c))
        return n,res
        
    def sample_successors(self, node):
        self.numSuccessorsSampled += 1
        r = []
        #Select all applicable actions
        applicable = []
        for i,a in enumerate(self.problem.actions):
            if a.applicable(env=self.problem.env,state=node.state):
                self.numApplicable[a.name] += 1
                asamp = self.problem.actionSamplers[i]
                asamp.bindState(node.state)
                applicable.append(i)
        if len(applicable)==0:
            return []
        #Sample candidate successors
        for s in xrange(self.settings.numSuccessors):
            i = random.choice(applicable)
            asamp=self.problem.actionSamplers[i]
            asamp.rootSolveIters = self.settings.actionBindSolveIters
            self.numParameterSamples[asamp.action.name] += 1
            params = asamp.sample_applicable(self.settings.actionParameterSamples)
            if params != None:
                self.numParameterSuccesses[asamp.action.name] += 1
                a = BoundAction (self.problem.actions[i],params)
                s2 = a.apply(self.problem.env,node.state)
                failed = self.problem.stateSpace.infeasible(s2,self.problem.env)
                if failed == None:
                    self.numFeasibleSuccessors += 1
                    r.append((a,s2))
                else:
                    self.numInfeasibleSuccessors[failed[0]] += 1
        return r
        
    def plan_more(self, e, t):
        """Contributes up to t more iterations to planning edge e.
        Returns true if the plan is successful."""
        if e.done_planning(): return False
        e.update_plan(t)
        return e.get_plan().done() and not e.get_plan().failed()
    
    def time(self, e):
        """This method can be modified to change how much time should be
        given to iterative subproblems at the current iteration.  Harder
        subproblems should be given more time."""
        
        if self.settings.subplanTime != None:
            if isinstance(self.settings.subplanTime,(int,float)):
                return self.settings.subplanTime-e.plan_iters()
            elif isinstance(self.settings.subplanTime,types.FunctionType):
                return self.settings.subplanTime(self.iters)-e.plan_iters()
            elif isinstance(self.settings.subplanTime,string):
                local = {'n':self.iters,'N':self.iters}
                return eval(self.settings.subplanTime,self.problem.env,local)
            else:
                raise RuntimeError("Unknown type for subplanTime")

        #this implementation gives each subproblem 10 sqrt(N) where N = outer
        #planner iterations
        #return 10*int(math.sqrt(self.iters))-e.plan_iters()

        #this gives each subproblem 20 + sqrt(N) iters
        return 20+int(math.sqrt(self.iters))-e.plan_iters()

        #this implementation gives each subproblem 2* the number of outer
        #planner iterations
        return self.iters*2-e.plan_iters()
        
        #this implementation gives each subproblem a constant number (100)
        #of iterations.
        if e.plan_iters()==0:
            return 100
        else:
            return 0


    def print_stats(self):
        print
        print "* Outer level search stats *"
        print "Completed %d outer planning iterations."%(self.iters)
        print "%d successor sampling iters (%d per iteration)"%(self.numSuccessorsSampled,self.settings.numSuccessors)
        print "applicable:",self.numApplicable
        print "parameter samples:",self.numParameterSamples
        print "parameter successes:",self.numParameterSuccesses
        print "feasible successors: %d"%(self.numFeasibleSuccessors)
        print "infeasible successors:"
        for i in xrange(len(self.numInfeasibleSuccessors)):
            print self.problem.stateSpace.constraints[i],':',self.numInfeasibleSuccessors[i]
        tsum = dict((a.name,0.0) for a in self.problem.actions)
        snum = dict((a.name,0) for a in self.problem.actions)
        fnum = dict((a.name,0) for a in self.problem.actions)
        anum = dict((a.name,0) for a in self.problem.actions)
        for e in self.tree.completeEdges:
            if e.a.action != None:  #root
                tsum[e.a.action.name] += e.plan_iters()
                snum[e.a.action.name] += 1
        for e in self.tree.activeEdges:
            if not e.failed():
                anum[e.a.action.name] += 1
            else:
                fnum[e.a.action.name] += 1
            if e.a.action != None:  #root
                tsum[e.a.action.name] += e.plan_iters()
        for e in self.tree.failedEdges:
            if e.a.action != None:  #root
                tsum[e.a.action.name] += e.plan_iters()
                fnum[e.a.action.name] += 1
        print
        print "* Motion planning precondition stats *"
        print "action\t#successful\t#failed\t#active\tavg time"
        for a in self.problem.actions:
            num = snum[a.name]+anum[a.name]+fnum[a.name]
            print '%s\t%d\t%d\t%d\t%f'%(a.name,snum[a.name],fnum[a.name],anum[a.name],tsum[a.name]/num if num>0 else 0.0)
        print
        print "* Equality precondition solver stats *"
        print "action\t% resolve successful"
        for a in self.problem.actionSamplers:
            frac = float(a.numResolved)/(a.numResolved+a.numResolveFailed) if a.numResolved+a.numResolveFailed > 0 else 1.0
            print '%s\t%f'%(a.action.name,frac*100.0)
        print
        print "* Global search stats *"
        print "Max # of achieved goals: ",max(sum(self.problem.goalTest.results(e.get_tail().state,self.problem.env)) for e in self.tree.completeEdges)
        print "Minimum distance: ",min(self.problem.goalTest.distance(e.get_tail().state,self.problem.env) for e in self.tree.completeEdges)
        numAchieved = [0]*len(self.problem.goalTest.tests)
        for e in self.tree.completeEdges:
            for i,v in enumerate(self.problem.goalTest.results(e.get_tail().state,self.problem.env)):
                if v: numAchieved[i]+=1
        print "Number of nodes achieving each goal condition:"
        for g,n in zip(self.problem.goalTest.tests,numAchieved):
            print "\t'",g,"':",n
        print 
        
        if hasattr(self,'discretePlanner'):
            print "* Discrete planner stats *"
            print "Num searches:",self.discretePlanner.numSearches
            print "Avg solution depth:",float(self.discretePlanner.solutionDepth)/float(self.discretePlanner.numSearches)

        #printing this information is often annoying, no harm in commenting
        #it out
        #if hasattr(self.diffusion,'densityEstimators'):
        #    print "* Density estimator stats *"
        #    for i,est in enumerate(self.diffusion.densityEstimators):
        #        print "Estimator",i
        #        print sorted(est.points.keys())




class LazyPTRPlanner(PTRPlanner):
    """Implements a lazy version of the PTR planner."""

    def __init__(self, problem, settings = PlanSettings()):
        PTRPlanner.__init__(self,problem,settings)
        self.planningEdges = []
        self.tree.head.lazy_depth = 0
        self.tree.head.depth = 0
        self.tree.head.weight = 1
        self.tree.head.numExpands = 0
        self.tree.head.reached = True
        self.tree.head.out_edges[0].lazyPlanning = True
        self.numPlanningEdges = 0

    def pick_nodes(self,k):
        #weight=lambda(e):e.get_tail().weight*pow(0.95,e.get_tail().numExpands) if (e.get_tail().lazy_depth < self.settings.lazyMaxDepth and not e.get_tail().goal) else 0.0
        #cond=lambda(e):e.get_tail().lazy_depth < self.settings.lazyMaxDepth
        #return self.diffusion.pick_nodes(k,weight=weight)
        #return self.diffusion.pick_nodes(k,cond=cond)
        return self.diffusion.pick_nodes(k)

    def planMore(self):
        self.iters += 1
        if self.iters % self.settings.printFrequency == 0:
            print
            print "Tree size:", self.tree.size()
            print "Edges active:", self.tree.active_count(), \
                    ", complete:",self.tree.complete_count(), \
                    ", failed:", self.tree.failed_count()
            print "Currently lazy planning: ",len(self.planningEdges)
            sys.stdout.flush()
        # Add new nodes/edges to tree
        n,newEdges = self.extend_tree()
        #print "Extend node at depth",n.depth,"weight",n.weight
        if n != None:
            n.numExpands += 1

        numChanges = 0
        for e in newEdges:
            e.get_tail().lazy_depth = e.get_head().lazy_depth+1
            e.get_tail().depth = e.get_head().depth+1
            e.get_tail().weight = e.get_head().weight
            e.get_tail().reached = False
            e.get_tail().numExpands = 0
            self.diffusion.add_node(e.get_tail().state,e)
            if self.problem.goalTest(e.get_tail().state,self.problem.env):
                #mark e as a goal edge
                e.get_tail().goal = True
                #print "Adding lazy plan to goal"
                p = e
                while p.get_head() != p.get_tail():
                    if not hasattr(p,'lazyPlanning'):
                        p.lazyPlanning = True
                        numChanges += 1
                        #freeze expanding any children of p
                        self.dfs_lazy_depth(p.get_tail(),self.settings.lazyMaxDepth)
                        self.dfs_change_diffusion(p,remove=True)
                        #start planning the head
                        if p.get_head().reached:
                            self.planningEdges.append(p)
                    else:
                        break
                    p = p.get_head().in_edges[0]
                print "Added lazy plan from depth",p.get_tail().depth,"to",e.get_tail().depth
                        
            #don't go lazy too far out
            lazydepth = 1
            p = e
            while p.get_head() != p.get_tail():
                if lazydepth >= self.settings.lazyMaxDepth:
                    if not hasattr(p,'lazyPlanning'):
                        print "Adding lazy plan by depth, depth",p.get_tail().depth,"descendent depth",e.get_tail().depth
                        p.lazyPlanning = True
                        numChanges += 1
                        #freeze expanding any children of p
                        self.dfs_lazy_depth(p.get_tail(),self.settings.lazyMaxDepth)
                        self.dfs_change_diffusion(p,remove=True)
                        #start planning the head
                        if p.get_head().reached:
                            self.planningEdges.append(p)
                    else:
                        break
                lazydepth += 1
                p = p.get_head().in_edges[0]

        if numChanges > 0 or (self.iters % 100 == 0):
            self.numPlanningEdges += numChanges
            olditers = self.iters
            self.iters = self.numPlanningEdges + self.iters/100
            print "Planning iteration...",self.iters
            eindex = 0
            while eindex < len(self.planningEdges):
                e = self.planningEdges[eindex]
                assert e.get_head().reached
                if e.complete() or self.plan_more(e, self.time(e)):
                    #mark the edge complete and stop planning it
                    #print "Completed edge to depth",e.get_tail().depth
                    e.get_tail().reached = True
                    self.tree.complete(e)
                    self.planningEdges.remove(e)
                    self.completed_edge(e)
                    #revise childrens' lazy_depth
                    self.dfs_lazy_depth(e.get_tail(),0)
                    self.dfs_change_diffusion(p,remove=False)
                    
                    #wake up all lazy planning child edges
                    for ce in e.get_tail().out_edges:
                        if hasattr(ce,'lazyPlanning'):
                            self.planningEdges.append(ce)

                    #test to see if we've reached a goal
                    if hasattr(e.get_tail(),'goal'):
                        self.iters = olditers
                        return self.tree.path_to(e)
                else:
                    #print "Reducing weight to",0.8*e.get_tail().weight,"depth",e.get_tail().depth
                    #self.dfs_weight(e.get_tail(),0.8*e.get_tail().weight)
                    eindex+=1
            self.iters = olditers
        return None

    def dfs_lazy_depth(self,node,depth):
        node.lazy_depth = depth
        for e in node.out_edges:
            if not hasattr(e,'lazyPlanning'):
                self.dfs_lazy_depth(e.get_tail(),depth+1)

    def dfs_change_diffusion(self,edge,remove):
        if remove:
            self.diffusion.remove_node(edge.get_tail().state,edge)
        else:
            self.diffusion.add_node(edge.get_tail().state,edge)
        for e in edge.get_tail().out_edges:
            if not hasattr(e,'lazyPlanning'):
                assert e != edge
                self.dfs_change_diffusion(e,remove)

    def dfs_weight(self,node,weight):
        node.weight = weight
        for e in node.out_edges:
            self.dfs_weight(e.get_tail(),weight)

class DiscretePriorityPlanner(LazyPTRPlanner):
    def __init__(self,problem,settings):
        LazyPTRPlanner.__init__(self,problem,settings)
        if not hasattr(self,'discretePlanner'):
            self.discretePlanner = discreteplanner.DiscretePlanner(problem.constructDiscrete())
            print "Discrete Problem:"
            self.discretePlanner.problem.print_properties()
            raw_input()
            if len(self.discretePlanner.problem.goalTest.tests)==0:
                raise RuntimeError("No discrete goal condition")
        self.oneShot = True
        if hasattr(settings,'oneShot'):
            self.oneShot = settings.oneShot
        self.allowedSuccessors = {}

    def lift(self,ad,state):
        """ad is a discrete action, state is a full hybrid state.  Returns the
        pair (a,s) where a is the continuous lifted state"""
        aindex = -1
        for i in xrange(len(self.problem.actions)):
            if ad.action.name == self.problem.actions[i].name:
                aindex = i
                break
        assert aindex >= 0, "Invalid action returned by discrete plan?"
        #found the corresponding action
        asamp = self.problem.actionSamplers[aindex]
        asamp.bindState(state)
        asamp.rootSolveIters = self.settings.actionBindSolveIters

        #sample continuous
        self.numApplicable[asamp.action.name] += 1
        outerIterations = 10
        for outer in xrange(outerIterations):
            params = asamp.sample_applicable_partial(ad.params,self.settings.actionParameterSamples/outerIterations)
            self.numParameterSamples[asamp.action.name] += 1
            if params == None: continue
            #go down the list
            a = BoundAction(self.problem.actions[aindex],params)
            s = a.apply(self.problem.env,state)
            if self.problem.stateSpace.infeasible(s,self.problem.env):
                #go back and do more binding
                pass
            else:
                self.numParameterSuccesses[asamp.action.name] += 1
                return (a,s)
        return (None,None)


    def extend_tree(self):
        #find a discrete path (a list of AStarNodes)
        if DISCRETE_PLANNER_ISRR:
            if (self.iters - 1)%10 != 0:
                #print "Now extending tree..."
                return LazyPTRPlanner.extend_tree(self)
        print "Planning discrete plan..."
        self.discretePlanner.reset()
        path = self.discretePlanner.solve()
        if path == None:
            print "Discrete planner was unable to find path"
            #debugging info
            print "  Tree size:",self.discretePlanner.astar.tree_size()
            bestf = self.discretePlanner.astar.best_node('f')
            bestg = self.discretePlanner.astar.best_node('g')
            besth = self.discretePlanner.astar.best_node('h')
            print "  Best f node:",bestf.state
            print "    f",bestf.f,"g",bestf.g,"h",bestf.h
            print "  Deepest node:",bestg.state
            print "    f",bestg.f,"g",bestg.g,"h",bestg.h
            print "  Cloesest node:",besth.state
            print "    f",besth.f,"g",besth.g,"h",besth.h
            return None,[]

        print "Found path of length",len(path)
        #now lift the discrete-only plan into the hybrid state
        lifted = True
        liftedstates = [self.problem.initialState]
        liftedactions = []
        for index,n in enumerate(path[1:]):
            #BoundAction
            sd = n.parent.state
            ad = n.parentedge
            if DISCRETE_PLANNER_ISRR:
                self.allowedSuccessors.setdefault(self.discretePlanner.to_tuple(sd),set()).add(ad)
                continue
            
            #bind the continuous variables of the action a
            lifted = False
            (a,s) = self.lift(ad,liftedstates[-1])
            if a != None:
                lifted = True
                liftedactions.append(a)
                liftedstates.append(s)
                self.mark_lifted(sd,ad)
            else:
                #disable the discrete transition
                print "Unable to lift transition",str(index)+"/"+str(len(path)-1)
                print "State",sd
                print "Action",ad
                #print "Prior lifted state"
                #output.pretty_print_state(liftedstates[-1])
                if self.oneShot:
                    #mark as disabled
                    self.mark_failed(sd,ad,disable=True)
                else:
                    flags = self.discretePlanner.transitionFlags(sd,ad)
                    if flags.cost != None:
                        if flags.cost < 0:
                            #already lifted, don't penalize ths too much
                            print "Penalizing existing lifted transition by 0.5"
                            flags.cost += 0.5
                        else:
                            self.mark_failed(sd,ad,cost=(index*0.5+1.0))
                    else:
                        self.mark_failed(sd,ad,cost=(index*0.5+1.0))
                break
            #otherwise, we're good to go to the next action

        if not lifted:
            return self.tree.head,[]
        else:
            #modify the underlying search so we are less likely to return the same path again?
            for n in path[1:]:
                sd = n.parent.state
                ad = n.parentedge
                self.mark_failed(sd,ad,cost=1.0/len(path))

        if DISCRETE_PLANNER_ISRR:
            print "Now extending tree..."
            return LazyPTRPlanner.extend_tree(self)

        print "Found a continuous binding for the path"

        #add the path to the planning tree
        res = []
        n = self.tree.head
        for i in xrange(len(liftedactions)):
            e = self.addEdge(n,liftedactions[i],liftedstates[i+1])
            res.append(e)
            n = e.get_tail()
        print "Increased to",len(self.tree.active_edges()),"active edges"
        if self.oneShot:
            earliestFailure = len(res)
            #do the planning now
            for i,e in enumerate(res):
                if not e.complete() and not self.plan_more(e, self.time(e)):
                    #perform disconnection in discrete space
                    sd = path[i+1].parent.state
                    ad = path[i+1].parentedge
                    print "Unable to plan transition ",sd,":"
                    print ad
                    self.mark_failed(sd,ad,cost=1)

                    #mark this failure
                    if i < earliestFailure:
                        earliestFailure = i
            if earliestFailure < len(res):
                for e in res:
                    self.tree.fail(e)
                res = res[:earliestFailure]
            #otherwise, we are done!
        return self.tree.head,res

    def sample_successors(self, node):
        if not DISCRETE_PLANNER_ISRR:
            return LazyPTRPlanner.sample_successors(self,node)

        dstate = dict([(s,node.state[s]) for s in self.discretePlanner.problem.stateSpace.names])
        #print "Sample successors...",dstate
        dindex = self.discretePlanner.to_tuple(dstate)
        if dindex not in self.allowedSuccessors:
            #no allowed successors
            print "Warning, expanded to an unallowed discrete state"
            return []

        allowedActions = list(self.allowedSuccessors[dindex])
        self.numSuccessorsSampled += 1
        r = []
        #Select all applicable actions
        for s in xrange(self.settings.numSuccessors):
            ad = random.choice(allowedActions)
            #ad gives a possible discrete component of an action
            (a,s) = self.lift(ad,node.state)
            if a:
                print "lifted",ad
                self.mark_lifted(dstate,ad)
                self.numFeasibleSuccessors += 1
                r.append((a,s))
            else:
                #print "Couldn't lift",ad
                self.mark_failed(dstate,ad,cost=1.0)
                pass
        return r

    def completed_edge(self,e):
        LazyPTRPlanner.completed_edge(self,e)
        return
        #adjust discrete cost
        dstate = dict([(s,e.get_head().state[s]) for s in self.discretePlanner.problem.stateSpace.names])
        
        #find the discrete action
        adsamp = [a for a in self.discretePlanner.problem.actionSamplers if a.action.name == e.get_action().action.name][0]
        ad = adsamp.action
        
        adparams = []
        for p in adsamp.normalParams:
            if hasattr(ad.params[p],'__len__') or isinstance(ad.params[p],DelayedDomain):
                adparams.append(p)
        a = BoundAction(ad,dict((p,e.get_action().params[p]) for p in adparams))
        self.mark_feasible(dstate,a)

    def mark_failed(self,sd,ad,disable=None,cost=None):
        """Marks a failure at discrete state sd, and discrete action ad"""
        if disable != None:
            print "Disabling discrete transition"
            self.discretePlanner.transitionFlags(sd,ad).disabled=disable
            #print "Disabling discrete (bound) action"
            #self.discretePlanner.boundActionFlags(ad).disabled=True
        if cost != None:
            flags = self.discretePlanner.transitionFlags(sd,ad)
            if flags.cost == None: flags.cost=0
            flags.cost += cost
            print "Increasing",ad,"cost mod to",flags.cost

    def mark_lifted(self,sd,ad):
        """Marks that discrete action ad at discrete state sd corresponds to
        a lifted transition"""
        print "Reducing",ad,"cost mod to",-0.5
        flags = self.discretePlanner.transitionFlags(sd,ad)
        flags.cost = -0.5

    def mark_feasible(self,sd,ad):
        """Marks that discrete action ad at discrete state sd corresponds to
        a feasible motion"""
        print "Reducing",ad,"cost mod to",-0.9
        flags = self.discretePlanner.transitionFlags(sd,ad)
        flags.cost = -0.9

def Planner(problem, settings = PlanSettings()):
    if settings.planner == 'lazy':
        return LazyPTRPlanner(problem,settings)
    elif settings.planner == 'discrete':
        return DiscretePriorityPlanner(problem,settings)
    else:
        return PTRPlanner(problem,settings)


if __name__ == '__main__':
    import state
    import actionparser
    import goalparser

    problem = planningproblem.PlanningProblem()
    problem.setEnv(globals())
    statespace = state.StateSpace(set(['q','index']))
    initial = { 'q':[0.0,0.0], 'index':0 }
    problem.setInitial(statespace,initial)

    parser = actionparser.ActionParser(problem.env,statespace.names)

    code = \
"""PARAMS:
    #dx is the amount to move in the x direction
    dx in (-0.1,0.1)
PRECOND:
    MP FakeIterativeSubproblem(50,1000)
    q[0]+dx >= 0.0
    q[0]+dx <= 1.0
EFFECTS:
    q[0] = q[0]+dx
"""
    problem.addAction(parser.parse('movex',code.split('\n'),0))
    code = \
"""PARAMS:
    #dx is the amount to move in the y direction
    dy in (-0.1,0.1)
PRECOND:
    MP FakeIterativeSubproblem(50,1000)
    q[1]+dy >= 0.0
    q[1]+dy <= 1.0
EFFECTS:
    q[1] = q[1]+dy
"""
    problem.addAction(parser.parse('movey',code.split('\n'),0))
    code = \
"""PARAMS:
    #di is the amount to move in the index direction
    di in {-1,1}
PRECOND:
    index+di >= 0
    index+di <= 10
EFFECTS:
    index = index+di
"""
    problem.addAction(parser.parse('moveindex',code,0))

    goal = goalparser.GoalTest(['q[0]>0.95','q[1]>0.95','index==10'])
    problem.setGoalTest(goal)

    """
    #test the discrete planner
    dproblem = problem.constructDiscrete()
    print dproblem.initialState
    print dproblem.goalTest.tests
    print dproblem.actions
    dplanner = Planner(dproblem)
    for i in xrange(1000):
        res=dplanner.planMore()
        if res != None:
            print res
            break
    for est in dplanner.tree.densityEstimators:
        print "Density estimator cells"
        print est.points.keys()
    print "Dumping tree to plan.tgf..."
    dplanner.tree.dump('plan.tgf')
    exit()
    """

    settings = PlanSettings()
    settings.scaling = {'q':[1.0,1.0],'index':10.0}
    planner = Planner(problem,settings)

    res = planner.plan(10000)
    planner.print_stats()
    print "Dumping tree to plan.tgf..."
    planner.tree.dump('plan.tgf')
    if res != None:
        path = res[0].path_to(res[1])
        print "Solution path:"
        print path[0].get_head().state
        for e in path:
            print e.get_tail().state
