"""Dynamic Bayesian model averaging interface"""

import sys
import numpy as np
import matplotlib.pyplot as plt
from scipy.io import loadmat
from itertools import izip
import matplotlib.path as path
import matplotlib.patches as patches


class Dma(object):
    """
    Dynamic model averaging class

    Attributes
    ----------
    weights : numpy.array
        Weights of models
    models : list
        Array of models
    weights_flattening_factor : float
        Forgetting factor for weights
    _log : dictionary
        Log structure
    weights_update_method : str
        Method for weights updating

    Method
    -----
    register(model)
        Register new model
    unregister(model_no)
        Unregister model from mixer
    disable_model(model_no)
        Disable model during modelling
    normalize_weights()
        Normalize weights of models
    update(data_zip_
        Update DMA models
    regress(regression_vec, y_real=None, delay=0)
        Averaged model regression from regression vector
    _predict_weights()
        Predictor for model weights
    _update_weights()
        Update model weights
    fix_likelihood(likelihood)
        DON'T USE. Do nasty things with likelihood ;-)

    """

    def __init__(self, models_count, weights_flattening_factor = 0.99):
        """
        Constructor
        
        Parameters
        ----------
        models_count : int
            Number of models for averaging
        weights_flattening_factor : float
            Forgetting factor for models' weights
        """
        self.weights = np.ones(models_count) / models_count
        self.models = []
        self.weights_flattening_factor = weights_flattening_factor
        self.weights_update_method = 'linear_uniform'
        self._log = {'y_models': [],
                     'y_avg': [],
                     'y_real': [],
                     'y_residual': [],
                     'weights': []}

    def register(self, model):
        """Register new model"""
        if len(self.models) < len(self.weights):
            self.models.append(model)
        else:
            print "Error: more models than preset"
            sys.exit(1)


    def unregister(self, model_no):
        """
        Unregister model
        
        Model is removed.
        """
        self.models.pop(model_no)
        self.weights = np.delete(self.weights, model_no)
        self.normalize_weights()

    def disable_model(self, model_no):
        """
        Disable model
        
        Effectively done by setting weight 0. Model remains in DMA.
        """
        self.weights[model_no] = 0

    def normalize_weights(self):
        """Normalizes weights of models"""
        count = len(self.weights.nonzero())
        self.weights = self.weights / sum(self.weights)

    def update(self, data_zip):
        """
        Updates DMA

        data_zip : tuple
            tuple of numpy.arrays with data. It must follow the order
            of models!!!
        """
        if len(data_zip) != len(self.models):
        	print "Error: the number of data vectors doesn't match!"
        	sys.exit(1)

        for i in xrange(len(self.models)):
        	self.models[i].model_update(data_zip[i])

        self._predict_weights()
        self._log['weights'].append(self.weights)
        self._update_weights()


    def regress(self, regression_vec, y_real=None, delay=0):
        """
        Averaged model regression from regression vector. If y_real
        is supplied, the residue is returned as well
        """
        try:
            self.stack
        except AttributeError:
            from Queue import Queue
            self.stack = Queue(delay + 1)

        models_no = len(self.models)
        self.stack.put_nowait([self.models[i].est_theta for i in xrange(models_no)])

        if self.stack.full():
            regression_coefs = self.stack.get()
            y = [np.dot(regression_vec[i], regression_coefs[i]) \
                        for i in xrange(models_no)]
        else:
        	y = np.zeros(models_no)
        
        self._log['y_models'].append(y)

        y_avg = np.dot(y, self.weights)
        self._log['y_avg'].append(y_avg)

        if y_real != None:
        	self._log['y_real'].append(y_real)
        	self._log['y_residual'].append(y_avg - y_real)

        # Put new estimates into stack


    def _predict_weights(self):
        """
        Predict weights

        weights_update_method :
            stabil_exp_trivial
                Flattening and stabilization with a small constant
            stabil_exp
                Stabilized exponential forgetting
            linear_uniform
                Linear forgetting with uniform alternative
        """
        
        if self.weights_update_method == 'stabil_exp_trivial':
            self.weights **= self.weights_flattening_factor
            self.weights[self.weights.nonzero()] += 0.01
        elif self.weights_update_method == 'stabil_exp':
            pass
        elif self.weights_update_method == 'linear_uniform':
            ind = self.weights.nonzero()[0]
            alter_uniform = 1./ind.size
            self.weights[ind] *= self.weights_flattening_factor
            self.weights[ind] += (1. - self.weights_flattening_factor) * alter_uniform
        self.normalize_weights()

    def _update_weights(self):
        """
        Updates weights according to models' likelihoods.
        """
        likelihoods = [self.models[i].delta_log_likelihood \
                       for i in xrange(len(self.models))]
        likelihoods = np.exp(likelihoods)
        #print likelihoods
        likelihoods = map(self.fix_likelihood, likelihoods)

        self.weights = self.weights * likelihoods
        self.normalize_weights()
        #print self.weights


    def fix_likelihood(self, likelihood):
        """
        Fixes negative likelihood to a small positive value
        
        This method does nasty things and shouldn't be used!
        """
        if likelihood < 0:
            return -1./likelihood
        else:  
        	return likelihood


    @property
    def log(self):
        """Returns log"""
        return self._log

 
class Dma_plotter(object):
    """
    Plotter for DMA results
    """
    def __init__(self, dma_log, bars=10):
        self.dma_log = dma_log
        self.bars = bars

        fig = plt.figure()
        fig.subplots_adjust(hspace = 0.3, top = 0.95, bottom = 0.1)
        self.fig = fig

        self.plot_predictions(dma_log)
        self.plot_residues(dma_log)
        self.plot_residues_hist(dma_log)
        self.plot_weights(dma_log)
        plt.show()

    def plot_predictions(self, dma_log):
        ax = self.fig.add_subplot(221)
        ax.grid(True)
        ax.hold(True)
        ax.set_title("Real data & predictions")
        ax.plot(self.dma_log['y_real'])
        ax.plot(self.dma_log['y_avg'], '+r')

    def plot_residues(self, dma_log):
        ax = self.fig.add_subplot(222)
        ax.hold(True)
        ax.grid(True)
        ax.set_title("Prediction errors")
        ax.fill_between(1+np.arange(len(dma_log['y_residual'])),\
                        0,
                        dma_log['y_residual'],
                        color = 'orange')
        std = np.array(dma_log['y_residual']).std()
        count = len(dma_log['y_residual'])
        std_line = np.ones(count) * std
        ax.plot(std_line, '--', color = 'b') 
        ax.plot(2 * std_line, '-', color = 'b') 
        ax.plot(-std_line, '--', color = 'b') 
        ax.plot(-2 * std_line, '-', color = 'b') 


    def plot_residues_hist(self, dma_log):
        """Plots histogram of residues. Number of bins comes from Scott's rule"""
        ax = self.fig.add_subplot(224)
        ax.grid(True)
        ax.set_title("Histogram of prediction errors")
        weights_no = np.sqrt(len(dma_log['y_residual']))
        ax.hist(dma_log['y_residual'], self.bars, facecolor = 'CornflowerBlue')

    def plot_weights(self, dma_log):
        weights = np.array(dma_log['weights']).T
        ax = self.fig.add_subplot(223)
        ax.grid(True)
        ax.set_title("Models weights")
        models_no, data_no = np.shape(weights)
        x = np.arange(data_no) + 1
        colors = ('CornflowerBlue', 'burlywood', 'gold',\
                  'lawngreen', 'coral', 'b', \
                  'm', 'k', 'c', 'black', 'w')
        color = iter(colors)
        sum_weights = np.zeros(data_no)
        for i in xrange(models_no):
           	ax.fill_between(x, weights[i] + sum_weights, sum_weights, \
           	    color = color.next(), label='M' + str(i))
           	sum_weights += weights[i]

        from matplotlib.patches import Rectangle
        color = iter(colors)
        r = (Rectangle((0, 0), 1, 1, color=color.next()) for i in xrange(models_no))
      	ax.legend(r, ('M' + str(i) for i in xrange(models_no)))


#-----------------------------------------------
if __name__ == "__main__":
	    dma_model = Dma(3)
	    dma_model.register('model')
	    dma_model.register('model')
	    dma_model.register('model')
	    dma_model.weights[1] = 0.8; dma_model.normalize_weights()
	    print dma_model.weights
	    print dma_model.weights
