import copy
import math
import random
import StringIO
from xml.dom.minidom import Document,Node

from action import Action,ActionSet
from pwl import *
from probability import Distribution
from agent import Agent
from mcts2 import Node

class mctsAgent(Agent):
    """
    @ivar name: agent name
    @type name: str
    @ivar world: the environment that this agent inhabits
    @type world: L{World<psychsim.world.World>}
    @ivar actions: the set of possible actions that the agent can choose from
    @type actions: {L{Action}}
    @ivar legal: a set of conditions under which certain action choices are allowed (default is that all actions are allowed at all times)
    @type legal: L{ActionSet}S{->}L{KeyedPlane}
    @ivar omega: the set of possible observations this agent may receive
    @type ivar omega: {str}
    @ivar O: the observation function; default is C{True}, which means perfect observations of actions
    @type O: L{KeyedTree}
    """

    def __init__(self,name):
        Agent.__init__(self, name)
        self.node = None

    def setNodeAndWorld(self, node, world):
        self.node = node
        self.world = world

                
    def value(self,vector,action=None,horizon=None,others=None,model=None):
        """
        Computes the expected value of a state vector (and optional action choice) to this agent
        @param vector: the state vector (not distribution) representing the possible world under consideration
        @type vector: L{KeyedVector}
        @param action: prescribed action choice for the agent to evaluate; if C{None}, then use agent's own action choice (default is C{None})
        @type action: L{ActionSet}
        @param horizon: the number of time steps to project into the future (default is agent's horizon)
        @type horizon: int
        @param others: optional table of actions being performed by other agents in this time step (default is no other actions)
        @type others: strS{->}L{ActionSet}
        @param model: the model of this agent to use (default is C{True})
        """
        #mcts
        if model is None and self.node:
            model = self.node.world.getModel(self.name,vector)
        elif model is None:
            model = self.world.getModel(self.name,vector)
        # Determine horizon
        if horizon is None:
            horizon = self.getAttribute('horizon',model)
        # Determine discount factor
        discount = self.getAttribute('discount',model)
        # Compute immediate reward
        R = self.reward(vector,model)
        result = {'R': R,
                  'agent': self.name,
                  'state': vector,
                  'horizon': horizon,
                  'projection': []}
        
        result['V'] = R
        
        '''
        #old
        if model is None:
            model = self.world.getModel(self.name,vector)
        # Determine horizon
        if horizon is None:
            horizon = self.getAttribute('horizon',model)
        # Determine discount factor
        discount = self.getAttribute('discount',model)
        # Compute immediate reward
        R = self.reward(vector,model)
        result = {'R': R,
                  'agent': self.name,
                  'state': vector,
                  'horizon': horizon,
                  'projection': []}
        # Check for pre-computed value function
        V = self.getAttribute('V',model).get(self.name,vector,action,horizon,
                                             self.getAttribute('ignore',model))
        if V is not None:
            result['V'] = V
        else:
            result['V'] = R
            if horizon > 0 and not self.world.terminated(vector):
                # Perform action(s)
                if others is None:
                    turn = {}
                else:
                    turn = copy.copy(others)
                if action:
                    turn[self.name] = action
                outcome = self.world.stepFromState(vector,turn,horizon)
                if not outcome.has_key('new'):
                    # No consistent outcome
                    pass
                elif isinstance(outcome['new'],Distribution):
                    # Uncertain outcomes
                    future = Distribution()
                    for newVector in outcome['new'].domain():
                        entry = copy.copy(outcome)
                        entry['probability'] = outcome['new'][newVector]
                        Vrest = self.value(newVector,None,horizon-1,None,model) #trevor this is potentially the model update call
                        entry.update(Vrest)
                        try:
                            future[entry['V']] += entry['probability']
                        except KeyError:
                            future[entry['V']] = entry['probability']
                        result['projection'].append(entry)
                    # The following is typically "expectation", but might be "max" or "min", too
                    op = self.getAttribute('projector',model)
                    if discount < -1e-6:
                        # Only final value matters
                        result['V'] = apply(op,(future,))
                    else:
                        # Accumulate value
                        result['V'] += discount*apply(op,(future,))
                else:
                    # Deterministic outcome
                    outcome['probability'] = 1.
                    Vrest = self.value(outcome['new'],None,horizon-1,None,model)
                    outcome.update(Vrest)
                    if discount < -1e-6:
                        # Only final value matters
                        result['V'] = Vrest['V']
                    else:
                        # Accumulate value
                        result['V'] += discount*Vrest['V']
                    result['projection'].append(outcome)
            # Do some caching
            self.getAttribute('V',model).set(self.name,vector,action,horizon,result['V'])
        '''
        #potentially vital code
        if horizon > 0 and not self.world.terminated(vector):
            if others is None:
                turn = {}
            else:
                turn = copy.copy(others)
            outcome = self.world.stepFromState(vector,turn,horizon-1)
            if not outcome.has_key('new'):
                # No consistent outcome
                pass
            elif isinstance(outcome['new'],Distribution):
                # Uncertain outcomes
                future = Distribution()
                for newVector in outcome['new'].domain():
                    entry = copy.copy(outcome)
                    entry['probability'] = outcome['new'][newVector]
                    Vrest = self.value(newVector,None,horizon-1,None,model) #trevor this is potentially the model update call
                    entry.update(Vrest)
                    try:
                        future[entry['V']] += entry['probability']
                    except KeyError:
                        future[entry['V']] = entry['probability']
                    result['projection'].append(entry)

        
        result['V'] = R
        if self.node:
            #mcts
            
            #given a node and an action:
            #    for every child of the node, if the action matches
            #        add child and visits to dictionary
            #    calculate rough prob of getting to these children
            #    value = sumOverChildren(probOfChild * childValueForModel)
            
            childList = {}
            totalVisits = 0
            for child in self.node.childNodes:
                if child.move == action:
                    childList[child] = child.visits
                    totalVisits += child.visits
            if totalVisits == 0:
                #print "mctsAgent.value(): children have not been visited"
                for child in childList:
                    childList[child] = 1./len(childList) #assume uniform dist
            else:
                for child in childList:
                    childList[child] = float(childList[child])/float(totalVisits)

            #print "CALCULATING VALUE"
            for child in childList:
                #print "\t",childList[child],'*',child.avgValue[model]
                result['V'] += childList[child] * child.avgValue[model]
            
            #print '\t---------------------\n\t',result['V']
            

            #/mcts
        return result
    
    def value2(self,vector,action=None,horizon=None,others=None,model=None):
        """
        Computes the expected value of a state vector (and optional action choice) to this agent
        @param vector: the state vector (not distribution) representing the possible world under consideration
        @type vector: L{KeyedVector}
        @param action: prescribed action choice for the agent to evaluate; if C{None}, then use agent's own action choice (default is C{None})
        @type action: L{ActionSet}
        @param horizon: the number of time steps to project into the future (default is agent's horizon)
        @type horizon: int
        @param others: optional table of actions being performed by other agents in this time step (default is no other actions)
        @type others: strS{->}L{ActionSet}
        @param model: the model of this agent to use (default is C{True})
        """
        '''#mcts
        if model is None and self.node:
            model = self.node.world.getModel(self.name,vector)
        elif model is None:
            model = self.world.getModel(self.name,vector)
        # Determine horizon
        if horizon is None:
            horizon = self.getAttribute('horizon',model)
        # Determine discount factor
        discount = self.getAttribute('discount',model)
        # Compute immediate reward
        R = self.reward(vector,model)
        result = {'R': R,
                  'agent': self.name,
                  'state': vector,
                  'horizon': horizon,
                  'projection': []}
        
        result['V'] = R
        '''
        #old
        if model is None:
            model = self.world.getModel(self.name,vector)
        # Determine horizon
        if horizon is None:
            horizon = self.getAttribute('horizon',model)
        # Determine discount factor
        discount = self.getAttribute('discount',model)
        # Compute immediate reward
        R = self.reward(vector,model)
        result = {'R': R,
                  'agent': self.name,
                  'state': vector,
                  'horizon': horizon,
                  'projection': []}
        # Check for pre-computed value function
        V = self.getAttribute('V',model).get(self.name,vector,action,horizon,
                                             self.getAttribute('ignore',model))
        if V is not None:
            result['V'] = V
        else:
            result['V'] = R
            if horizon > 0 and not self.world.terminated(vector):
                # Perform action(s)
                if others is None:
                    turn = {}
                else:
                    turn = copy.copy(others)
                if action:
                    turn[self.name] = action
                outcome = self.world.stepFromState(vector,turn,horizon)
                if not outcome.has_key('new'):
                    # No consistent outcome
                    pass
                elif isinstance(outcome['new'],Distribution):
                    # Uncertain outcomes
                    future = Distribution()
                    for newVector in outcome['new'].domain():
                        entry = copy.copy(outcome)
                        entry['probability'] = outcome['new'][newVector]
                        Vrest = self.value(newVector,None,horizon-1,None,model) #trevor this is potentially the model update call
                        entry.update(Vrest)
                        try:
                            future[entry['V']] += entry['probability']
                        except KeyError:
                            future[entry['V']] = entry['probability']
                        result['projection'].append(entry)
                    # The following is typically "expectation", but might be "max" or "min", too
                    op = self.getAttribute('projector',model)
                    if discount < -1e-6:
                        # Only final value matters
                        result['V'] = apply(op,(future,))
                    else:
                        # Accumulate value
                        result['V'] += discount*apply(op,(future,))
                else:
                    # Deterministic outcome
                    outcome['probability'] = 1.
                    Vrest = self.value(outcome['new'],None,horizon-1,None,model)
                    outcome.update(Vrest)
                    if discount < -1e-6:
                        # Only final value matters
                        result['V'] = Vrest['V']
                    else:
                        # Accumulate value
                        result['V'] += discount*Vrest['V']
                    result['projection'].append(outcome)
            # Do some caching
            self.getAttribute('V',model).set(self.name,vector,action,horizon,result['V'])
        '''
        result['V'] = R
        if self.node:
            #mcts
            
            #given a node and an action:
            #    for every child of the node, if the action matches
            #        add child and visits to dictionary
            #    calculate rough prob of getting to these children
            #    value = sumOverChildren(probOfChild * childValueForModel)
            
            childList = {}
            totalVisits = 0
            for child in self.node.childNodes:
                if child.move == action:
                    childList[child] = child.visits
                    totalVisits += child.visits
            if totalVisits == 0:
                #print "mctsAgent.value(): children have not been visited"
                for child in childList:
                    childList[child] = 1./len(childList) #assume uniform dist
            else:
                for child in childList:
                    childList[child] = float(childList[child])/float(totalVisits)

            #print "CALCULATING VALUE"
            for child in childList:
                #print "\t",childList[child],'*',child.avgValue[model]
                result['V'] += childList[child] * child.avgValue[model]
            
            #print '\t---------------------\n\t',result['V']
            '''

            #/mcts
        return result
    
