"""Below we reuse some code that was used in the CMSC 421 course"""
# ---------
# Licensing Information: Please do not distribute or publish solutions to this
# project. You are free to use and extend these projects for educational
# purposes. The Pacman AI projects were developed at UC Berkeley, primarily by
# John DeNero (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# For more info, see http://inst.eecs.berkeley.edu/~cs188/sp09/pacman.html

"""
In search.py, you will implement generic search algorithms which are called 
by Pacman agents (in searchAgents.py).
"""

import util
import time
import copy
import random 

class DeterminSearch:
    """
    This class outlines the structure of deterministic search algorithms and how the 'wrapper' class
    of planning algorithms will interact with them, and then implements
    of the specific search algorithms as methods.
    
    """
    def __init__(self, initialState, heuristic='nullHeuristic', depth=40, timeOut=300):
        self.initialState = initialState #set()
        self.heuristic = heuristic #initializes A* with something
        self.depthLimit = depth
        self.timeOut = timeOut
    
    def getInitialState(self):
        """
        Returns the initial state for the planning problem problem 
        """
        return self.initialState
      
    def isGoalState(self, state):
        """
         state: Search state
      
        Returns True if and only if the state is a valid goal state
        """
        return state.isGoal()
    
    def getActions(self, state):
        """ Returns set of actions"""
#         random list of actions
        a = state.getApplicableActions()
        random.shuffle(a)
        return a
#         return state.getApplicableActions()
    
        #return state.getApplicableActions()
    
    def getSuccessorState(self,state,action):
        return state.getNextState(action)
    
    def depthFirstSearch(self, dlimit):
        startTime = time.clock()
        #Initalize visited set, stack and direction list
        exploredSet = {}
        stack = util.Stack()
        #Add first node to start set
        stack.push((self.getInitialState(),[]))
        while not stack.isEmpty():
            newState, newActionList = stack.pop()
            if self.isGoalState(newState): return time.clock()-startTime, newActionList
            if ((newState.key() in exploredSet and len(newActionList) >= exploredSet[newState.key()])): 
                continue
            if dlimit and len(newActionList) >= dlimit:
                continue
            actions = self.getActions(newState)
            nextStates = {action: self.getSuccessorState(newState, action) for action in actions}
            #print actions
            #if self.heuristic == 'hrpg':
                #actions = sorted(actions, key=lambda action: nextStates[action].getHrpg())
                #print actions
                #print [nextStates[action].getHrpg() for action in actions]
            for action in actions:
                nextState = nextStates[action]
                actionList = newActionList + [action]  
                'Push tuple of state and plan to stack'
                stack.push((nextState, actionList))
            #if len(stack) % 1000 == 0: 
            #print len(stack)
            if time.clock() > startTime+self.timeOut: return time.clock()-startTime, ['Timeout reached']
            exploredSet[newState.key()] = len(newActionList)
        return time.clock()-startTime, ['FAIL']

    def iterativeDeeping(self):
        startTime = time.clock()
        for depth in range(1, self.depthLimit + 1):
            returnTime, solution = self.depthFirstSearch(depth)
            if solution[0] != 'FAIL':
                return time.clock()-startTime, solution
                        #if len(q) % 1000 == 0: 
            if time.clock() > startTime+self.timeOut: 
                return time.clock()-startTime, ['Timeout reached']
        return time.clock()-startTime, ['FAIL']

    def aStar(self):
        startTime = time.clock()
        exploredSet = {}
        'q.push(item, priority); lowest priority wins'
        q = util.PriorityQueue()
        '''Add first node to start set'''
        q.push( (self.getInitialState(),[]), 0)
        while not q.isEmpty():
            newState, newActionList = q.pop()
            if self.isGoalState(newState): return time.clock()-startTime, newActionList
            if ((newState.key() in exploredSet and len(newActionList) >= exploredSet[newState.key()])): 
                continue
            if  len(newActionList) >= self.depthLimit:
                continue
            actions = self.getActions(newState)
            nextStates = {action: self.getSuccessorState(newState, action) for action in actions}
            for action in actions:
                nextState = nextStates[action]
                actionList = newActionList + [action]
                h = nextState.getHrpg()
                if h == -1: continue
                f = h+len(actionList) #a star
                'Push tuple of state and plan to stack'
                q.push((nextState, actionList), f)
                if time.clock() > startTime+self.timeOut: return time.clock()-startTime, ['Timeout reached']
            exploredSet[newState.key()] = len(newActionList)
        return time.clock()-startTime, ['FAIL']

    def bestFirstSearch(self):
        startTime = time.clock()
        exploredSet = {}
        'q.push(item, priority); lowest priority wins'
        q = util.PriorityQueue()
        '''Add first node to start set'''
        q.push( (self.getInitialState(),[]), 0)
        while not q.isEmpty():
            newState, newActionList = q.pop()
            if self.isGoalState(newState): return time.clock()-startTime, newActionList
            if ((newState.key() in exploredSet and len(newActionList) >= exploredSet[newState.key()])): 
                continue
            if  len(newActionList) >= self.depthLimit:
                continue
            actions = self.getActions(newState)
            nextStates = {action: self.getSuccessorState(newState, action) for action in actions}
            for action in actions:
                nextState = nextStates[action]
                actionList = newActionList + [action]
                h = nextState.getHrpg()
                if h == -1: continue
                f = h #greedy
                'Push tuple of state and plan to stack'
                q.push((nextState, actionList), f)
                if time.clock() > startTime+self.timeOut: return time.clock()-startTime, ['Timeout reached']
            exploredSet[newState.key()] = len(newActionList)
        return time.clock()-startTime, ['FAIL']

    def fastForwardSearch(self):
        startTime = time.clock()
        'q.push(item, priority); lowest priority wins'
        #bfsQueue = util.PriorityQueue()
        '''Add first node to start set'''
        #bfsQueue.push( (self.getInitialState(),[]), 0)
        ffState = self.getInitialState()  # The state that FastForward is at at each moment.
        ffActionList = []  # List of actions FastForward chooses to reach the goal.
        while not self.isGoalState(ffState):
            if len(ffActionList) >= self.depthLimit:
                return time.clock()-startTime, ['FAIL FF depth limit']
            
            actions = self.getActions(ffState)
            nextStates = {action: self.getSuccessorState(ffState, action) for action in actions}

            minH = ffState.getHrpg()
            if minH == -1:  # Can't reach the goal from the current FF state.
                return time.clock()-startTime, ['FAIL FF stuck']

            bestAction = None
            for action, nextState in nextStates.items():
                h = nextState.getHrpg()
                if h == -1: continue
                if h < minH:
                    minH = h
                    bestAction = action
            if bestAction:
                ffActionList.append(bestAction)
                ffState = nextStates[bestAction]
                continue
            
            #print 'minH =',minH, 'Will try BFS from state', ffState
            
            # Otherwise, no next state has a better heuristic! BFS to find one...
            q = util.Queue()
            exploredSet = set()
            '''Add first node to start set'''
            q.push((ffState, []))
            betterStateFound = False
            while not q.isEmpty() and not betterStateFound:
                newState, newActionList = q.pop()
                if len(ffActionList + newActionList) >= self.depthLimit:
                    return time.clock()-startTime, ['FAIL BFS maxDepth']
                actions = self.getActions(newState)
                nextStates = {action: self.getSuccessorState(newState, action) for action in actions}
                for action in actions:
                    nextState = nextStates[action]
                    if nextState.key() in exploredSet: 
                        continue
                    if self.isGoalState(nextState):
                        return time.clock()-startTime, ffActionList + newActionList + [action]
                    h = nextState.getHrpg()
                    #print 'h =', h, 'visited:', len(exploredSet)
                    if h == -1: continue
                    if h < minH:
                        ffState = nextState
                        ffActionList.extend(newActionList)
                        ffActionList.append(action)
                        betterStateFound = True
                        break
                    q.push((nextState, newActionList + [action]))
                    #if len(q) % 1000 == 0: 
                    if time.clock() > startTime+self.timeOut: return time.clock()-startTime, ['Timeout reached']
                    exploredSet.add(nextState.key())
            if not betterStateFound:
                return time.clock()-startTime, ['Fail FF BFS local minimum']
        return time.clock()-startTime, ffActionList

    def breadthFirstSearch(self):
        #print self.timeOut
        startTime = time.clock()
        exploredSet = {}
        q = util.Queue()
        '''Add first node to start set'''
        q.push( (self.getInitialState(),[]) )
        while not q.isEmpty():
            newState, newActionList = q.pop()
            if self.isGoalState(newState): return time.clock()-startTime, newActionList
            if ((newState.key() in exploredSet and len(newActionList) >= exploredSet[newState.key()])): 
                continue
            if len(newActionList) >= self.depthLimit:
                continue
            actions = self.getActions(newState)
            nextStates = {action: self.getSuccessorState(newState, action) for action in actions}
            for action in actions:
                nextState = nextStates[action]
                actionList = newActionList + [action]
                q.push((nextState, actionList))
                #if len(q) % 1000 == 0: 
                if time.clock() > startTime+self.timeOut: return time.clock()-startTime, ['Timeout reached']
            exploredSet[newState.key()] = len(newActionList)
        return time.clock()-startTime, ['FAIL']
     
    
    def disjDepthFirstSearch(self, dlimit, dGoals):
        startTime = time.clock()
        #Initalize visited set, stack and direction list
        exploredSet = {}
        stack = util.Stack()
        #Add first node to start set
        stack.push((self.getInitialState(),[]))
        while not stack.isEmpty():
            newState, newActionList = stack.pop()
            if self.isStateSubGoal(dGoals, newState): 
                #print "hi"
                return time.clock()-startTime, newActionList, newState
            if ((newState.key() in exploredSet and len(newActionList) >= exploredSet[newState.key()])): 
                continue
            if dlimit and len(newActionList) >= dlimit:
                continue
            actions = self.getActions(newState)
            nextStates = {action: self.getSuccessorState(newState, action) for action in actions}
            for action in actions:
                #print action
                nextState = nextStates[action]
                actionList = newActionList + [action]  
                'Push tuple of state and plan to stack'
                stack.push((nextState, actionList))
                #print nextState
            if time.clock() > startTime+self.timeOut: 
                return time.clock()-startTime, ["Timeout reached"], newState
            exploredSet[newState.key()] = len(newActionList)
        return time.clock()-startTime, ["FAIL"], newState 

    def disjGoalIterativeDeeping(self, dGoals):
        startTime = time.clock()
        for depth in range(10, self.depthLimit + 10):
            #print depth
            rdTime, solution, state = self.disjDepthFirstSearch(depth, dGoals)
            #print depth, solution
            if len(solution) == 0:
                """This bit of code handles when one of the subgoals is goop
                that is in the initial state.  The inital state 
                is a trival landmark """
                return  rdTime, solution, state 
            if solution[0] != 'FAIL':
                return time.clock()-startTime, solution, state
            if time.clock() > startTime+self.timeOut: 
                return time.clock()-startTime, ['Timeout reached'], state
        return time.clock()-startTime, ["FAIL"], state
    
    
    def isStateSubGoal(self, dGoals, state):
        """
        This function handles disjointed goals 
        """
        if state.isGoal():
            # "state check"
            return True
        else:
            for loc, goop in state.goops.iteritems():
                #print goop, loc, dGoals
                if (goop, loc) in dGoals:
                    #print "True subgoals", dGoals, (goop, loc)
                    return True
        #return False
        return False