from pybrain.rl.agents import LearningAgent
from pybrain.rl.learners.valuebased import ActionValueNetwork
from pybrain.rl.learners import NFQ, SARSA
from pybrain.rl.explorers import BoltzmannExplorer
from pybrain.rl.explorers.continuous.sde import StateDependentExplorer
from pybrain.tools.shortcuts import buildNetwork
from pybrain.rl.learners.directsearch.enac import ENAC

class _PublicGoodAgent(LearningAgent):
    """A helper to create some predefined sub-agents"""
    def __init__(self, task, breed, name):
        breeds = (self._nfq, self._enac, self._sarsa)
        breeds[breed](task)
        self.name = name

    def __str__(self):
        return self.name

    def _nfq(self, task):
        nfq = NFQ() #nueral fitted Q learner
        nfq.explorer = BoltzmannExplorer()
        LearningAgent.__init__(
            self,
            module=ActionValueNetwork(task.outdim, task.indim),
            learner=nfq
            )

    def _enac(self, task):
        net = buildNetwork(task.outdim, task.indim ,bias=False)
        learner = ENAC()
        learner.gd.rprop = False
        # only relevant for RP
        learner.gd.deltamin = 0.1
        learner.gd.deltamax = 1
        # only relevant for BP
        learner.gd.alpha = 0.005
        learner.gd.momentum = 0.9
        LearningAgent.__init__(self, net, learner)

    def _sarsa(self, task):
        sarsa = SARSA()
        net = ActionValueNetwork(task.outdim, task.indim)
        sarsa.explorer = StateDependentExplorer(task.outdim, task.indim)
        LearningAgent.__init__(self, net, sarsa)
        


class PublicGoodAgent():
    """
        The PublicGoodAgent has two learners that modify two nueral nets. Both
        agents learn at the same time, so the PublicGoodAgent calls learn() and
        newEpisode() for the experiment. Otherwise each learner's methods must
        be accessed through self.allocator and self.punisher
    """
    index = 0

    def __init__(self):            
        self.idnumber = PublicGoodAgent.index
        PublicGoodAgent.index += 1

    def __str__(self):
        return "Agent " + str(self.idnumber)

    def setSubAgents(self, task, allocator_type, punisher_type=None):
        self.allocator = _PublicGoodAgent(task.contribute, allocator_type, "Allocator "+str(self.idnumber))
        if punisher_type and hasattr(task, 'punish'):
            self.punisher = _PublicGoodAgent(task.punish, punisher_type, "Punisher " + str(self.idnumber))
        else:
            self.punisher = None

    def newEpisode(self): #is this even necessary?
        self.allocator.newEpisode()
        if self.punisher:
            self.punisher.newEpisode()

    def learn(self):
        self.allocator.learn()
        if self.punisher:
            self.punisher.learn()

    def reset(self):
        self.allocator.reset()
        if self.punisher:
            self.punisher.reset()

    def setLearning(self):
        for agent in [self.allocator, self.punisher]:
            if agent == None:
                continue
            if agent.learning == False:
                agent.learning = True
            else:
                agent.learning = False
            
