import numpy as np
import sys
from view.endSrc.MySqlConn import MySqlConn
from sklearn import mixture
from view.endSrc.tGaussianClustersEstimatorBy1D import tGaussianClustersEstimatorBy1D
from view.endSrc.GaussianClusters import GaussianClusters
import random

class GaussianClustersEstimatorBy1D:
    '''
    This class estimate a Gamma Clusters (membership)
    from 1-dim info-data (distances) for a high-dimensional dataset
    '''
    def __init__(self):
        '''
            A lazy initialization is used for this class,
            since we do not want this constructor to throw exception,
            or produce failure of assert.

            We want robust and stateful usecases of this class.
        '''

        self.dbconn = None
        self.dbId = None
        self.logger = None
        self.t = tGaussianClustersEstimatorBy1D(None)  # later will pass in SQL connector

        # initial and final Gaussian parameters
        self.paiVec = None
        self.varVec = None      # np.array
        self.meanVec = None     # np.array
        self.k = None           # i.e., k is the number of Gamma components.


        self.reg_covar = 1e-06
        self.maxIter = 100

    def __str__(self):
        '''
            for simple test
        :return:
        '''
        return str(self.t)

    def getDBId(self):
        '''
            To be used by Observers
        :return:
        '''
        return self.dbId

    def setConfig(self, sqlConn: MySqlConn, dbId: np.int64):

        assert sqlConn is not None
        assert dbId > 0

        self.logger = sqlConn.logger

        # load optimization configures, such as epsilon, maxIter,
        # and gamma initial guess parameters.

        self.t.setSqlConn(sqlConn)
        if not self.t.readRow(dbId):
            self.logger.write('GammaClusterEstimatorBy1D.setConfig..failed to read the row by id=' + str(dbId))
            return False

        if self.t.estMixedParams is not None:        # self.t.estMixedParams = None

            vec = self.t.estMixedParams['meanList']
            if vec is not None:
                self.meanVec = np.asarray(vec)  # 1D np.ndarray
                assert isinstance(self.meanVec[0], np.float64)

            vec = self.t.estMixedParams['varMatList']
            if vec is not None:
                self.varVec = np.asarray(vec)  # 1D np.ndarray
                assert isinstance(self.varVec[0], np.float64) and (self.varVec>0).all()

            vec = self.t.estMixedParams['paiList']
            if vec is not None:
                self.paiVec = np.asarray(vec)  # 1D np.ndarray
                assert isinstance(self.paiVec[0], np.float64) or \
                    isinstance(self.paiVec[0], np.int64) and (self.paiVec > 0).all()
                s = sum(self.paiVec)
                if s != 1.0:
                    self.paiVec = self.paiVec / s

        # copy DB fields (all NOT NULL) for performance
        self.k = self.t.nComponents
        self.dbconn = sqlConn
        self.dbId = dbId

        return True

    def getMixedParamsJSONDict(self, meanVec, varVec, paiVec):
        '''
            provide initial/result Gamma parameters to be saved to DB
        :param meanVec: list or np.ndarray
        :param varVec: list or np.ndarray
        :param paiVec:   list or np.ndarray
        :return: JSON dict
        '''
        assert isinstance(meanVec, np.ndarray) or isinstance(meanVec, list)
        assert isinstance(varVec, np.ndarray) or isinstance(varVec, list)
        assert isinstance(paiVec, np.ndarray) or isinstance(paiVec, list)

        estMixedParams = {}
        if isinstance(meanVec, np.ndarray):
            meanVec = meanVec.tolist()
        estMixedParams['meanList'] = meanVec

        if isinstance(varVec, np.ndarray):
            varVec = varVec.tolist()
        estMixedParams['varList'] = varVec

        if isinstance(paiVec, np.ndarray):
            paiVec = paiVec.tolist()
        estMixedParams['paiList'] = paiVec

        return estMixedParams


    # def fit(self):
    #     # self.xVec = self.xVec[:, np.newaxis]
    #     n_samples, n_features = self.xVec.shape
    #     probMatrix = np.zeros((n_samples, self.k))
    #     # self.meanVec = np.random.randint(self.xVec.min()/2, self.xVec.max()/2, size=(self.k, n_features))
    #     # self.varVec = np.zeros((self.k, n_features, n_features))
    #     for k in range(self.k):
    #         np.fill_diagonal(self.varVec[k], 1)
    #     # self.paiVec = np.ones(self.k) / self.k
    #     for i in range(self.maxIter):
    #         for k in range(self.k):
    #             self.varVec += self.reg_covar
    #             g = multivariate_normal(mean=self.meanVec[k], cov=self.varVec[k])
    #
    #             ### E-step ###
    #             probMatrix[:, k] = self.paiVec[k] * g.pdf(self.xVec)
    #
    #         total_N = probMatrix.sum(axis=1)
    #         total_N[total_N == 0] = self.k
    #         probMatrix /= total_N.reshape(-1, 1)
    #
    #
    #
    #         ### M-step ###
    #         for k in range(self.k):
    #             N_k = np.sum(probMatrix[:, k], axis=0)
    #             self.meanVec[k] = (1 / N_k) * np.sum(self.xVec * (probMatrix[:, k].reshape(-1, 1)), axis=0)
    #             self.varVec[k] = (1 / N_k) * np.dot((probMatrix[:, k].reshape(-1, 1) * (self.xVec - self.meanVec[k])).T, (self.xVec - self.meanVec[k])) + self.reg_covar
    #             # self.varVec[k] = (1 / N_k) * np.dot((probMatrix[:, k].reshape(-1, 1)
    #             #                                            * (self.xVec - self.meanVec[k])).T,
    #             #                                           (self.xVec - self.meanVec[k])) + self.reg_covar
    #
    #             self.paiVec[k] = N_k / n_samples
    #
    #     self.zBuffer = probMatrix

    def __reduction(self, data):
        rSize = 50000
        idxs = list(range(len(data)))
        random.shuffle(idxs)
        if len(idxs) > rSize:
            print('data reduction(from {0} to {1}) from GaussianClustersEstimatorBy1D'.format(len(idxs), rSize))
            idxs = idxs[:rSize]
            data = data[idxs]
            return data
        return data

    def __sortByDistance(self):
        idx = np.argsort(self.meanVec, axis=0).reshape(-1)
        self.meanVec = self.meanVec[idx, :]
        self.varVec = self.varVec[idx, :]
        self.paiVec = self.paiVec[idx]
        self.zBuffer = self.zBuffer[:, idx]
        self.zBufferTrain = self.zBufferTrain[:, idx]

    def estimate(self, xVec: np.ndarray):
        """
        input the dataset(distances),
        a mixed gamma model is estimated and the results(negLogLikelihood, regItem) are obtained and written into db

        Parameters
        ----------
        xVec: dataset(distances)

        Returns
        -------
        the instance of GammaClusters
        """
        # assert (xVec > 0).all() and np.isfinite(xVec).all(), 'GammCluEstBy1D.estimate..sample points must be positive and finite'
        assert self.dbconn is not None, 'please call setConfig first'

        self.xVec = xVec
        self.xVecTrain = self.__reduction(xVec)    # 1-dimension data supposed to follow a mixture of k gamma distributions
        self.nTrain = len(self.xVecTrain)
        self.xVecTrain = self.xVecTrain[:, np.newaxis]
        self.xVec = self.xVec[:, np.newaxis]

        gmm = mixture.GaussianMixture(n_components=self.k)
        gmm.fit(self.xVecTrain)
        self.meanVec = gmm.means_
        self.varVec = gmm.covariances_
        self.paiVec = gmm.weights_
        self.zBuffer = gmm.predict_proba(self.xVec)
        self.zBufferTrain = gmm.predict_proba(self.xVecTrain)
        # self.labels = gmm.predict(self.xVec)      # self.zBuffer should be n*k
        self.aic = gmm.aic(self.xVecTrain)
        self.__sortByDistance()

        q = 3 * self.k
        negLogLik = self.aic - 2 * q
        regItem = 2 * q * self.nTrain / (self.nTrain - q - 1)

        loss = negLogLik + regItem

        # update the records in the database
        ok = self.t.updateEstimation(
            self.getMixedParamsJSONDict(self.meanVec.flatten(), self.varVec.flatten(), self.paiVec.flatten()),
            self.zBufferTrain,
            negLogLik,
            regItem
        )

        if not ok:
            self.logger.write('GauCluEstBy1D.estimate..ERROR: updating table tGaussianClustersEstimatorBy1D is failed')
            return None

        # return a GaussianClusters
        gc = GaussianClusters(list(self.meanVec), list(self.varVec), list(self.paiVec),
                              probMatrix=self.zBuffer, weight=loss)

        return gc
