from pybrain.rl.environments import Environment
import numpy

class World(Environment):

    def __init__(self, env, return_allocations=False, return_punishments=False, return_rule=None):
        self.env = env

        states = 0
        sensors = []
        if return_allocations:
            states += env.nplayers
            sensors += ["return_allocations"]*env.nplayers
        if return_punishments:
            states += (env.nplayers) * (env.nplayers)
            sensors += ["return_punishments"]* (env.nplayers * env.nplayers)
        if return_rule:
            states += return_rule.dim
            sensors += ["return_rule"] * return_rule.dim
        assert states > 0, str(states)

        self.outdim = states
        self.outparams = sensors
        self.return_allocations = return_allocations
        self.return_punishments = return_punishments
        self.return_rule = return_rule

    def getSensors(self):
        """
            Get sensors should return the currect sensors given self.return_allocations, ect.
        """
        output = numpy.array([])
        if self.return_allocations:
            output = numpy.hstack((output, self.env.allocations))
        if self.return_punishments:
            output = numpy.hstack((output, self.env.punishments.ravel()))
        if self.return_rule:
            if self.env.epsilon == 0:
                punishments = []
            else:
                punishments = self.env.punishments
            output = numpy.hstack((output, self.return_rule(self.env.allocations, punishments)))

        return output


class GroupAccount(World):
    """
        The Group Account Environment always has an input of 1, and then outputs
        allocations, punishments, or rule-followers
    """
    
    def __init__(self, env, return_allocations=False, return_punishments=False, return_rule=None):
        self.indim = 1
        World.__init__(self, env, return_allocations, return_punishments, return_rule)
        
    def performAction(self, action, idnumber):
        """ Record the contribution """
        self.env.allocations[idnumber] = action[0] #for some reason action is an array?? We just want one action anyway.
      
    
class PunishmentEnvironment(World):
    """
        The Punishment Environment always has an array input of n-player elements. The index corresponding
        to the actor inputing should always be zero. The output can be t-1 punishments, allocations, or rule-following
    """
    
    def __init__(self, env, return_allocations=False, return_punishments=False, return_rule=None):
        self.indim = env.nplayers - 1
        World.__init__(self, env, return_allocations, return_punishments, return_rule)

    def performAction(self, hitlist, idnumber):
        """ Add the player's punishment to the list of total punishment for each player """
        #assert type(hitlist) == list, str(hitlist)
        #mod_hitlist = hitlist.insert(idnumber, 0) # add a punishment of zero to the player's place in the hitlist
        assert hitlist[idnumber] == 0
        assert len(hitlist) == self.env.nplayers, str(hitlist)

        #self.env.hitlist = map(lambda x, y: x + y, self.env.hitlist, hitlist)
        self.env.punishments[idnumber] = hitlist

class PublicGoodEnvironment(Environment):
    """
        Ties the punishment environment and the group account together. Note that is does not implement performAction or getSensors.
    """
    
    def __init__(self, nplayers, y=20.0, alpha=0.4, epsilon=3.0, grp_acc=None, punish_env=None):
        self.group_account = grp_acc
        self.punish_environment = punish_env

        self.nplayers = nplayers
        self.y = y ##endowment
        self.alpha = alpha ##returned portion of everyone's contributions to everyone.
        self.epsilon = epsilon ##effectiveness of punishment
        self.group_account = grp_acc
        self.punish_environment = punish_env
        self.reset()

    def reset(self):
        self.reset_allocations()
        if self.epsilon != 0:
            self.reset_punishments()

    def getAllocations(self):
        return self.allocations

    def getPunishments(self):
        return self.punishments

    def reset_allocations(self):
        self.allocations = numpy.zeros((self.nplayers))

    def reset_punishments(self):
        self.punishments = numpy.zeros((self.nplayers, self.nplayers))

    def set_sub_environments(self, rules_grp, rules_punish):
        self.group_account = GroupAccount(self, *rules_grp)
        if self.epsilon != 0:
            self.punish_environment = PunishmentEnvironment(self, *rules_punish)
            
            
    def giveReward(self, idnumber, calc_punishments=True):
        earnings = self.y - self.allocations[idnumber] + self.allocations.sum()*self.alpha
        if calc_punishments and self.epsilon != 0:
            return earnings - self.punishments[...,idnumber].sum()*self.epsilon - self.punishments[idnumber,...].sum()
        else:
            return earnings
