"""Optimizer of weights of mixture components"""

import numpy as np
from copy import deepcopy

class OptimizerExp(object):
    """Optimizer with exponential forgetting"""

    def __init__(self, forg_factor=0.99, alternative=1.):
        self.forg_factor = forg_factor
        self.alternative = alternative

    def optimize(self, Mix, data):
        opt_mix = deepcopy(Mix)
        self.weights = np.array(opt_mix.get_weights())
        #print self.weights
        self.weights **= self.forg_factor
        self.weights *= self.alternative ** (1 - self.forg_factor)
        self._normalize_weights()
        
        #for i in xrange(4):
            #print opt_mix._mix[i].pdf

        logliks = np.zeros(len(self.weights))

        for i in xrange(len(opt_mix._mix)):
            # Component inactive?
            if opt_mix._mix[i].weight == 0.:
                continue
            prior_loglik = opt_mix._mix[i].pdf.log_likelihood
            opt_mix._mix[i].pdf.update(data)
            logliks[i] = opt_mix._mix[i].pdf.log_likelihood - prior_loglik
        logliks = np.array(logliks)
        logliks -= logliks.max()

        self.weights *= np.exp(logliks)
        self._normalize_weights()
        #Mix.set_weights(self.weights)
        #print self.weights

    def _normalize_weights(self):
        self.weights /= np.sum(self.weights)


class OptimizerExp2(OptimizerExp):
    """Optimizer with exponential forgetting using Giw.predictive_loglikelihood"""

    def __init__(self, forg_factor=0.99, alternative=1.):
        super(OptimizerExp2, self).__init__(forg_factor, alternative)

    def optimize(self, Mix, data):
        self.weights = np.array(Mix.get_weights())
        self.weights **= self.forg_factor
        self.weights *= self.alternative ** (1 - self.forg_factor)
        self._normalize_weights()

        logliks = np.zeros(len(self.weights))

        for i in xrange(len(Mix._mix)):
            # Component inactive?
        	if Mix._mix[i].weight == 0.:
        		continue
        	logliks[i] = Mix._mix[i].pdf.predictive_loglikelihood(data)

        #print logliks
        logliks = np.array(logliks)
        logliks -= logliks.max()
        self.weights *= np.exp(logliks)
        self._normalize_weights()
        #print self.weights

class OptimizerLinearScalar(OptimizerExp):
    """Optimizer with linear forgetting with scalar stabilizing term"""
    def __init__(self, forg_factor=0.99, alternative=0.):
        super(OptimizerLinearScalar, self).__init__(forg_factor, alternative)

    def optimize(self, Mix, data):
        self.weights = np.array(Mix.get_weights())
        self.weights *= self.forg_factor
        for i in xrange(len(Mix._mix)):
            if Mix._mix[i].weight == 0.:
           	    continue
            else:
                self.weights[i] += self.alternative
        self._normalize_weights()

        logliks = np.zeros(len(self.weights))

        for i in xrange(len(Mix._mix)):
            # Component inactive?
            if Mix._mix[i].weight == 0.:
                continue
            else:
                logliks[i] = Mix._mix[i].pdf.predictive_loglikelihood(data)

        self.weights *= np.exp(logliks)
        self._normalize_weights()



#######################################
if __name__ == "__main__":
    from pybamo import *
    TestMat = np.array([
                   [ 7., 4., 3. ],
                   [ 0., 5., 2. ],
                   [ 0., 0., 6. ]])
    TestMat += TestMat.T
    GiwTest = Giw(TestMat, 10)
    GiwTest2 = Giw(TestMat * 0.4, 10)

    MixTest = GiwMixture()
    MixTest.add(GiwTest, 0.1)
    MixTest.add(GiwTest2, 0.9)
    print MixTest.get_weights()

    data = np.array([.1, .1, .1])

    optim = OptimizerExp()
    optim.optimize(MixTest, data)
    print optim.weights
    print MixTest.get_weights()
    print MixTest._mix[0].pdf.v.v


