"""Regression model with partial forgetting"""

import numpy as np
from giw import *
from giwmix import *
from regmodel import *
from itertools import *
from copy import deepcopy

def perm_matrix(size):
    """
    Generates permutation matrix for `size' parameters.

    This matrix contains 1 if parameter varies and 0 otherwise
    """
    ncols = size
    nrows = 2 ** size
    matrix = np.zeros((nrows, ncols), dtype=np.int)
    for i in xrange(ncols):
    	period = 2 ** i
    	pattern = np.hstack((np.ones((period)), np.zeros(period)))
    	count = nrows / (2 * period)
    	matrix[:,i] = np.tile(pattern, count).T
    return matrix


def allcombinations(iterable, r_seq):
    """Returns all r_seq combinations of iterable"""
    iterable = list(iterable)
    return chain.from_iterable(imap(combinations, repeat(iterable), r_seq))

class RegmodelPF(Regmodel, Giw):
    """
    Bayesian regression model with partial forgetting

    Attributes
    ----------
    _model_log_likelihood : float
        Log-likelihood of the model
    _size : int
        Size of the inf. matrix
    factor : numpy.array
        Forgetting factors
    v_alt : numpy.array
        Alternative inf. matrix
    nu_alt : numpy.array
        Alternative degrees of freedom

    Methods
    -------
    __init__(mat_v=None, nu=1, desc=None, factor=None,
             mixtools_compat)
        Constructor
    alternative_info(method='exponential', parameters={'fact': .99}) 
        Set alternative information
    alternative_info_absterm(method='exponential', parameters={'fact': .99})
    model_update(data, factor=None)
        Update of the regression model by data
    """
    def __init__(self, mat_v=None, nu=1, desc=None, factor=None, \
                mixtools_compat=False):
        """Constructor
        Parameters
        ----------
        mat_v : Ldmat or np.array
            Information matrix.
        nu : float
            Counter (degrees of freedom).
        desc : str
            Description.
        factor : np.array
            Forgetting weights.
        mixtools_compat : bool
            Mixtools compatibility mode for likelihoods.
        """
        super(RegmodelPF, self).__init__(mat_v, nu, desc, mixtools_compat)
        self._model_log_likelihood = 0
        self.__last_log_likelihood = self.log_likelihood
        self._size = self.v.shape[0]
        self._prior_v = deepcopy(mat_v)
        self._prior_nu = nu
        self._log['weights_hist'] = []
        self.alter = {'method': 'exponential'}
        self.alter['parameters'] = {'fact': 0.99}
        self.alter_absterm_flag = False
        self.alter_absterm = {'method': 'exponential'}
        self.alter_absterm['parameters'] = {'fact': 0.99}
        self.optimizer = None
        if factor == None:
            self.factor = np.zeros(2 ** (self._size - 1))
            self.factor[0] = 1
        else:
        	self.factor = factor

    def alternative_info(self, method=None, parameters=None):
        """
        Setter of alternative info
        
        Currently provides only flattening of the posterior.
        """
        if method != None:
            self.alter['method'] = method
        if parameters != None:
            self.alter['parameters'] = parameters

        self.v_alt = deepcopy(self._v)
        self.nu_alt = self._nu
        
        if self.alter['method'] == 'exponential':
           	self.v_alt._d *= self.alter['parameters']['fact']
           	self.nu_alt *= self.alter['parameters']['fact']
        elif self.alter['method'] == 'prior':
            self.v_alt = Ldmat(self._prior_v)
            self.nu_alt = self._prior_nu
        elif self.alter['method'] == 'flat_same_mean':
            est_theta = self.est_theta
            d_lsr = self.expected_noise_cov * self._nu

            est_theta_var_max = np.max(np.diag(self.est_theta_cov))
            expected_noise_cov = self.expected_noise_cov
            fact = est_theta_var_max * 1.5 / expected_noise_cov
            est_theta_cov = np.eye(self._size - 1) * fact
            #print self.est_theta_cov

            cov_l, cov_d = ltdl(np.linalg.inv(est_theta_cov))
            d = np.r_[d_lsr, cov_d]
            l = np.eye(len(d))
            l_tmp = np.dot(cov_l, est_theta)
            l_tmp = np.column_stack((l_tmp, cov_l))
            l[1:] = l_tmp
            ld = l + np.diag(d) - np.eye(len(d))
            self.v_alt = Ldmat(ld, from_ld=True)
            self.nu_alt = self._nu


    def alternative_info_absterm(self, method=None, parameters=None):
        """
        Setter of alternative info different for abs. term
        
        Currently provides only flattening of the posterior.
        """
        self.alter_absterm_flag = True
        if method != None:
            self.alter_absterm['method'] = method
        if parameters != None:
            self.alter_absterm['parameters'] = parameters

        self.v_alt_absterm = deepcopy(self._v)
        self.nu_alt_absterm = self._nu
        
        if self.alter_absterm['method'] == 'exponential':
           	self.v_alt_absterm._d *= self.alter_absterm['parameters']['fact']
           	self.nu_alt_absterm *= self.alter_absterm['parameters']['fact']
        elif self.alter_absterm['method'] == 'prior':
            self.v_alt_absterm = deepcopy(Ldmat(self._prior_v))
            self.nu_alt_absterm = self._prior_nu
        elif self.alter_absterm['method'] == 'flat_same_mean':
            est_theta = self.est_theta
            d_lsr = self.expected_noise_cov * self._nu

            est_theta_var_max = np.max(np.diag(self.est_theta_cov))
            expected_noise_cov = self.expected_noise_cov
            fact = est_theta_var_max * 1.5 / expected_noise_cov
            est_theta_cov = np.eye(self._size - 1) * fact
            #print self.expected_noise_cov
            #print self.d11
            #print fact

            cov_l, cov_d = ltdl(np.linalg.inv(est_theta_cov))
            d = np.r_[d_lsr, cov_d]
            l = np.eye(len(d))
            l_tmp = np.dot(cov_l, est_theta)
            l_tmp = np.column_stack((l_tmp, cov_l))
            l[1:] = l_tmp
            ld = l + np.diag(d) - np.eye(len(d))
            self.v_alt_absterm = Ldmat(ld, from_ld=True)
            self.nu_alt_absterm = self._nu


    def model_update(self, data, factor=None):
        """
        Updates model statistics

        Parameters
        ----------
        data : np.array
            Data vector
        factor : np.array
            Forgetting factors (weights of hypotheses). If None,
            self.weights are used
        """
        #prior_log_likelihood = self.__last_log_likelihood
        if factor != None:
            self.factor = factor.T

        prior_log_likelihood = self.log_likelihood
        self.alternative_info()

        if self.alter_absterm_flag == True:
        	self.alternative_info_absterm()

        Mix = GiwMixture()
        iperm_mat = iter(perm_matrix(self._size - 1))

        #while iperm_mat:
        factor_no = -1
        for vec_alt in iperm_mat:
            factor_no += 1
            if vec_alt.sum() == 0:
                # Filtered pdf
                Mix.add(Giw(self.v, self.nu, desc='Filtered'), self.factor[factor_no])
            elif vec_alt.sum() == self._size - 1:
                # Completely forgotten pdf
                Mix.add(Giw(self.v_alt, self.nu_alt, desc="Full alternative"), self.factor[factor_no])
            else: 
                # Remaining cases
            	vec_perm = []
            	for j in xrange(self._size - 1):
                    position = self._size - j - 2
                    if vec_alt[position] == 0:
                    	vec_perm.insert(j, position + 1)
                    elif vec_alt[position] != 0:
                    	vec_perm.insert(0, position + 1)
                vec_perm.insert(0, 0)
                vec_perm = np.array(vec_perm, dtype=np.int)

                if self.alter_absterm_flag == True and vec_alt.sum() == 1. and vec_alt[-1] == 1.:
                    #print "Abs. term"
                    alt_v_tmp = deepcopy(self.v_alt_absterm)
                    alt_nu_tmp = self.nu_alt_absterm
                else: 
                    alt_v_tmp = deepcopy(self.v_alt)
                    alt_nu_tmp = self.nu_alt

                alt_v_tmp.perm(vec_perm)
                
                filt_v_tmp = deepcopy(self._v)
                filt_v_tmp.perm(vec_perm)


                D = np.hstack((\
                               alt_v_tmp.d[: vec_alt.sum() + 1],\
                               filt_v_tmp.d[vec_alt.sum() + 1 :] \
                             ))
                L = np.vstack((\
                               alt_v_tmp.l[: vec_alt.sum() + 1],\
                               filt_v_tmp.l[vec_alt.sum() + 1 :] \
                             ))
                ld_tmp = Ldmat(L - np.eye(D.shape[0]) + np.diag(D), from_ld=True)

                vec_perm = np.argsort(vec_perm) 
                ld_tmp.perm(vec_perm)

       	        Mix.add(Giw(ld_tmp, alt_nu_tmp, desc=vec_alt),\
       	                self.factor[factor_no])

        if self.optimizer:
            self.optimizer.optimize(Mix, data)
            self.factor = self.optimizer.weights
            Mix.set_weights(self.factor)

        Giw_apr = Mix.approx()
        self._v, self._nu = Giw_apr._v, Giw_apr._nu
        # Zbyva urcit, jak se budou pocitat likelihoody a bylo by mozna vhodne, pokud neni predem zadan vektor vah, jej vytvorit jako [1, 0, 0, ..., 0]

        self.update(data)
        self._log['est_theta_hist'].append(self.est_theta)
        self._log['weights_hist'].append(self.factor)

        posterior_log_likelihood = self.log_likelihood
        self.delta_log_likelihood = posterior_log_likelihood - prior_log_likelihood
        self._model_log_likelihood += self.delta_log_likelihood


class weights_plotter(object):
    
    def __init__(self, ax, weights_log, rows):
        weights = np.array(weights_log).T
        weights = np.array(weights[rows])
        ax.grid(True)
        ax.set_title("Weights")
        nweights, ndata = np.shape(weights)
        x = np.arange(ndata) + 1
        colors = ('CornflowerBlue', 'burlywood', 'gold',\
                  'lawngreen', 'coral', 'b', \
                  'm', 'k', 'c', 'black', 'w')
        color = iter(colors)
        sum_weights = np.zeros(ndata)
        for i in xrange(nweights):
           	ax.fill_between(x, weights[i] + sum_weights, sum_weights, \
           	    color = color.next(), label='w' + str(i))
           	sum_weights += weights[i]

        from matplotlib.patches import Rectangle
        color = iter(colors)
        r = (Rectangle((0, 0), 1, 1, color=color.next()) for i in xrange(nweights))
      	ax.legend(r, ('M' + str(i) for i in xrange(nweights)))




#--------------------------------------------

if __name__ == "__main__":
    import matplotlib.pyplot as plt
    from pybamo.optimizer import *
    np.set_printoptions(precision=6, suppress=True)

    regtest = 'OptimFalse'

    if regtest == 'NoOptimFalse':
        V = np.array([[5., 4., 3., 2.],
                      [0., 2., 1., 2.],
                      [0., 0., 7., 2.],
                      [0., 0., 0., 8.]])
        V = V + V.T

        V_bdm = np.array([[0.230941, 0.760766, 0.864478, 0.924887],
                          [0.760766, 0.75685, 0.969704, 0.917055],
                          [0.864478, 0.969704, 0.272345, 0.939409],
                          [0.924887, 0.917055, 0.939409, 0.0274006]])

        dt = np.array([1., 1., 1., 1.])

        weights = np.array([.2, .1, .2, .1, .1, .1, .1, .1])

        model = RegmodelPF(V, 10, factor=weights)
        model.alternative_info(method='exponential', parameters={'fact': .9})
        for i in xrange(1):
            model.model_update(dt)

        print "\n"
        print model.v.v
        print "\nKontrola"
        print model.v.v - V
        print "\nRozdil s BDM"
        print model.v.v - V - V_bdm


    elif regtest == 'OptimFalse':
        np.set_printoptions(precision=5, suppress=True)
        V = np.array([[5., 4., 3., 2.],
                      [0., 2., 1., 2.],
                      [0., 0., 7., 2.],
                      [0., 0., 0., 8.]])
        V = V + V.T

        dt = np.array([1., 1., 1., 1.])
        weights = np.array([.1, .1, .1, .1, .1, .1, .3, .1])

        optim = OptimizerExp()

        model = RegmodelPF(V, 10, factor=weights)
        model.alternative_info(method='exponential', parameters={'fact': .99})
        model.alternative_info_absterm(method='exponential', parameters={'fact': .9})
        model.optimizer = optim


        for i in xrange(100):
            model.model_update(dt)
            print optim.weights

        print model.model_log_likelihood



    elif regtest == 'NoOptimTrue':
        data_real = [7.]
        theta = (0.9, -0.5)
        for i in xrange(1, 500):
            data_real.append(theta[0] * data_real[i-1] \
                             + theta[1] + np.random.randn(1) * 0.1 + i)
        datamatrix = data_matrix(data_real, 1)
        data = iter(datamatrix)

        model = RegmodelPF(np.eye(3) * 0.01,\
                           factor=np.array([0.1,0.1,0.1,0.7]),\
                           mixtools_compat=True)

        ll_prior = model.log_likelihood

        for dt in data:
            model.model_update(dt)
        ll_post = model.log_likelihood


        print model.v.ld

        print "kumulativne", model._model_log_likelihood
        print "standardne", ll_post - ll_prior

        model.plot_est_theta_hist()





