"""GiW module"""

import numpy as np
from ldmat import Ldmat, ltdl
from scipy.special import gamma, gammaln
from scipy.constants import pi

class Giw(object):
    """
    Gauss-inverse-Wishart pdf

     Attributes
    ----------
    _nu : float
        degrees of freedom
    _no_of_coefs : int
        number of regression coefficients
    _v : Ldmat
        inf. matrix in LD decomposition
    desc : str
        description (optional)
    mixtools_compat : bool
        switch for log-likelihood compatibility with mixtools
   
    Methods
    -------
    __init__(mat_v=None, nu=1, desc=None)
        constructor
    __str__()
        instance description
    batch_update(datamatrix)
        update GiW by a batch of data
    set_params(mat_v, nu=1)
        set parameters of GiW pdf
    update(data)
        rank-one update
    predictive_loglikelihood(regressor)
        predictive log-likelihood
    
    Properties
    ----------
    nu: float
        degrees of freedom
    v: Ldmat
        inf. matrix in LD decomposition
    d11: float
        LS remainder
    est_theta: np.array
        LS estimates of regr. coefficients
    est_theta_cov: np.array
        covariance of est_theta
    expected_noise_cov: float
        expected noise covariance (here scalar)
    log_likelihood: float
        absolute log-likelihood
    """
    
    def __init__(self, mat_v=None, nu=1, desc=None):
        """
        Constructor. Initializes private attributes

        Parameters
        ----------
        mat_v : np.array or Ldmat
            Information matrix
        nu : float
            Degrees of freedom
        desc : str
            Description
        """
        self._nu = nu
        self.desc = desc
        self.mixtools_compat = False

        if mat_v is None:
            mat_v = np.eye(2) * 0.01
            mat_v[0, 0] = 0.1

        if isinstance(mat_v, np.ndarray):
            # Ordinary nonfactorized matrix
            self._no_of_coefs = mat_v.shape[1]
            self._v = Ldmat(mat_v)
        elif isinstance(mat_v, Ldmat):
            # L'DL factorized matrix
            self._v = mat_v
        elif isinstance(mat_v, dict):
            # Parameters theta, est_theta_cov, D_LSR of GiW
            # est_theta_cov being nonfactorized
            est_theta = mat_v['est_theta']
            est_theta_cov = mat_v['est_theta_cov']
            d_lsr = mat_v['d_lsr']

            cov_l, cov_d = ltdl(np.linalg.inv(est_theta_cov))
            d = np.r_[d_lsr, cov_d]
            l = np.eye(len(d))
            l_tmp = np.dot(cov_l, est_theta)
            l_tmp = np.column_stack((l_tmp, cov_l))
            l[1:] = l_tmp
            ld = l + np.diag(d) - np.eye(len(d))
            self._v = Ldmat(ld, from_ld=True)


    def __str__(self):
        rep = "\n" + "=" * 30
        rep += "\nGiw pdf: (" + str(self.desc) + ")\n" + "-" * 30
        rep += "\nInf. matrix: \n"
        rep += "L = \n" + str(self._v.l)
        rep += "\nD = " + str(self._v.d)
        rep += "\nDegrees of freedom: " + str(self._nu)
        rep += "\n" + "=" * 30 + "\n\n"
        return rep

    
    def set_params(self, mat_v, nu=1):
        """Set parameters V and nu of the GiW pdf"""
        self._no_of_coefs = mat_v.shape[1]
        self._v = Ldmat(mat_v)
        self._nu = nu

    @property
    def nu(self):
        """Getter for degr. of freedom"""
        return self._nu
    
    @property
    def v(self):
        """Getter for inf. matrix"""
        return self._v

    @property
    def est_theta(self):
        """Estimated values (mean)"""
        est_th = np.linalg.inv(self.v.l[1:, 1:])
        est_th = np.dot(est_th, self.v.l[1:, 0])
        return est_th

    @property
    def est_theta_cov(self):
        """Covariance of theta estimates"""
        l2 = self.v.l[1:, 1:]
        d2 = np.diag(self.v.d[1:])
        est_th_cov = np.dot(l2.T, d2)
        est_th_cov = np.dot(est_th_cov, l2)
        est_th_cov = np.linalg.inv(est_th_cov)
        return est_th_cov

    @property
    def d11(self):
        """LS remainder"""
        return self.v.d[0]

    @property
    def expected_noise_cov(self):
        """Expected noise covariance"""
        return self.v.d[0] / self._nu 

    def update(self, data):
        self._nu += 1
        self._v.update(data)

    def batch_update(self, datamatrix):
        for i in xrange(np.size(datamatrix, 0)):
            self._nu += 1 
            self._v.update(datamatrix[i])
        
    @property
    def log_likelihood(self):
        """
        log_likelihood(mixtools_like=False)

        Evaluate log-likelihood

        The log-likelihood is equivalent to the logarithm of normalizing
        constant of the pdf.

        Parameters
        ----------
        mixtools_like : bool
            Compatibility with Mixtools, i.e. w/o ln(2) and others
        """
    
        log_likelihood = gammaln(0.5 * (self._nu + 2))
        log_likelihood -= 0.5 * (self._nu + 2) * np.log(self.v.d[0])
        log_likelihood -= 0.5 * (np.sum(np.log(self.v.d[1:])))
        
        if not self.mixtools_compat:
            #log_likelihood += 0.5 * (self._nu + 2) * np.log(2)
            #log_likelihood += 0.5 * (len(self._v.d) - 1) * np.log(2*pi)
            # pokus podle Vaska Smidla
            nPsi = len(self._v.d)
            m = self._nu - nPsi - 1
            nkG = 0.5 * (-nPsi * np.log(2*pi) + np.sum(np.log(self.v.d[1:])));
            lg = gammaln(0.5 * m)
            nkW = 0.5 * (m * np.log(self.v.d[0])) \
                 -0.5 * (m * np.log(2)) - lg
            log_likelihood = -nkG - nkW

        return log_likelihood


    def predictive_loglikelihood(self, regressor):
        """Evaluate predictive log-likelihood"""
        predicted_value = np.dot(self.est_theta, regressor[1:])
        err = regressor[0] - predicted_value
        npsi = self._v.d.shape[0] - 1
        C1 = gammaln(0.5 * (self._nu - npsi - 1))
        C1 -= gammaln(0.5 * (self._nu - npsi - 2))
        zeta = np.dot(regressor[1:], self.est_theta_cov)
        zeta = np.dot(zeta, regressor[1:])
        C2 = self._v.d[0] * (1 + zeta) 
        pred_ll = C1 - 0.5 * (np.log(C2) + np.log(pi))
        pred_ll -= 0.5 * (self._nu - npsi - 1) * np.log(1 + (err ** 2)/C2)
        return pred_ll



def data_matrix(data, ar_order, constant=True):
    """
    Construct matrix containing AR model data in rows

    Parameters
    ----------
    data : np.array
        Vector of data
    ar_order : int
        Order of the AR model
    constant : bool
        True if AR model uses absolute term

    Returns
    -------
    dm.T : np.array
        matrix whose rows are data vectors for AR model
    """
    data = np.array(data)
    dm = np.array(data[0: -ar_order])
    for i in xrange(1,ar_order):
        dm = np.vstack((data[i: -ar_order + i], dm))
    dm = np.vstack((data[ar_order:], dm))

    if constant == True:
        dm = np.vstack((dm, np.ones(np.size(dm, 1))))
    return dm.T
            

#===========================================
if __name__ == "__main__":
    TestMat = np.array([
                   [ 0.7 ,  0.4 ,  0.  ],
                   [ 0.4 ,  0.5 ,  0.15],
                   [ 0.  ,  0.15,  0.6 ]])
    GiwTest = Giw(TestMat, 5)
    print GiwTest
    GiwTest.desc = 'Hokus pokus'
    print GiwTest
    GiwTest.desc = None 
    print GiwTest.expected_noise_cov
    print GiwTest
    print '======'
    print "Degr. of freedom: ", GiwTest.nu
    likelihood = GiwTest.log_likelihood

    pred_loglik = GiwTest.predictive_loglikelihood(np.array([1.,2.,3.]))
    print "Predictive loglikelihood: ", pred_loglik

    GiwTest.update(np.array([1., 2., 3.]))
    print "Likelihood (abs): ", GiwTest.log_likelihood - likelihood

    print "\n" * 3
    print "-*" * 30
    print "Test: GiW from LS parameters"
    Giw_fullrank = Giw(TestMat, 5)
    mat_v = {}
    mat_v['est_theta'] = Giw_fullrank.est_theta
    mat_v['est_theta_cov'] = Giw_fullrank.est_theta_cov
    mat_v['d_lsr'] = Giw_fullrank.d11
    Giw_ls = Giw(mat_v, 5)
    print "Should be zero matrix:"
    print Giw_ls.v.v - Giw_fullrank.v.v
    print "Should be zero:"
    print Giw_ls.nu - Giw_fullrank.nu
    


