import numpy as np
import sys

from view.endSrc.nlmMinimizer import nlmMinimizer
from view.endSrc.GammaPdf import GammaPdf
from view.endSrc.MySqlConn import MySqlConn

from view.endSrc.tGammaClustersEstimatorBy1D import tGammaClustersEstimatorBy1D
from view.endSrc.GammaClusters import GammaClusters


class GammaClustersEstimatorBy1D:
    '''
    This class estimate a Gamma Clusters (membership)
    from 1-dim info-data (distances) for a high-dimensional dataset
    '''
    def __init__(self):
        '''
            A lazy initialization is used for this class,
            since we do not want this constructor to throw exception,
            or produce failure of assert.

            We want robust and stateful usecases of this class.
        '''

        self.dbconn = None
        self.dbId = None
        self.logger = None
        self.t = tGammaClustersEstimatorBy1D(None)  # later will pass in SQL connector

        # init a nlmMinimizer based configuration
        self.minimizer = nlmMinimizer()   # load DLL only once
        self.gammaPdf = GammaPdf()

        # initial and final Gamma parameters
        self.shapeVec = None
        self.scaleVec = None
        self.paiVec = None
        self.k = None      # i.e., k is the number of Gamma components.

    def __str__(self):
        '''
            for simple test
        :return:
        '''
        return str(self.t)

    def getDBId(self):
        '''
            To be used by Observers
        :return:
        '''
        return self.dbId

    def setConfig(self, sqlConn: MySqlConn, dbId: np.int64):

        assert sqlConn is not None
        assert dbId > 0

        self.logger = sqlConn.logger

        # load optimization configures, such as epsilon, maxIter,
        # and gamma initial guess parameters.

        self.t.setSqlConn(sqlConn)
        if not self.t.readRow(dbId):
            self.logger.write('GammaClusterEstimatorBy1D.setConfig..failed to read the row by id=' + str(dbId))
            return False

        if self.t.estMixedParams is not None:

            vec = self.t.estMixedParams['shapeList']
            if vec is not None:
                self.shapeVec = np.asarray(vec)  # 1D np.ndarray
                assert isinstance(self.shapeVec[0], np.float64) and (self.shapeVec>0).all()

            vec = self.t.estMixedParams['scaleList']
            if vec is not None:
                self.scaleVec = np.asarray(vec)  # 1D np.ndarray
                assert isinstance(self.scaleVec[0], np.float64) and (self.scaleVec>0).all()

            vec = self.t.estMixedParams['paiList']
            if vec is not None:
                self.paiVec = np.asarray(vec)  # 1D np.ndarray
                assert isinstance(self.paiVec[0], np.float64) or \
                    isinstance(self.paiVec[0], np.int64) and (self.paiVec > 0).all()
                s = sum(self.paiVec)
                if s != 1.0:
                    self.paiVec = self.paiVec / s

        # copy DB fields (all NOT NULL) for performance
        self.k = self.t.nComponents
        self.epsilon = self.t.p_epsilon
        self.maxIter = self.t.p_maxIter
        self.maxRestarts = self.t.p_maxRestarts
        self.verb = self.t.p_verb

        self.dbconn = sqlConn
        self.dbId = dbId

        return True

    def getMixedParamsJSONDict(self, shapeVec, scaleVec, paiVec):
        '''
            provide initial/result Gamma parameters to be saved to DB
        :param shapeVec: list or np.ndarray
        :param scaleVec: list or np.ndarray
        :param paiVec:   list or np.ndarray
        :return: JSON dict
        '''
        assert isinstance(shapeVec, np.ndarray) or isinstance(shapeVec, list)
        assert isinstance(scaleVec, np.ndarray) or isinstance(scaleVec, list)
        assert isinstance(paiVec, np.ndarray) or isinstance(paiVec, list)

        estMixedParams = {}
        if isinstance(shapeVec, np.ndarray):
            shapeVec = shapeVec.tolist()
        estMixedParams['shapeList'] = shapeVec

        if isinstance(scaleVec, np.ndarray):
            scaleVec = scaleVec.tolist()
        estMixedParams['scaleList'] = scaleVec

        if isinstance(paiVec, np.ndarray):
            paiVec = paiVec.tolist()
        estMixedParams['paiList'] = paiVec

        return estMixedParams

    def _estMoment1_2(self):
        '''
                Sort the data self.xVec in ascending and
                split the sorted data into self.k components, and
                compute sample-mean and sample-variance for each component.

        :return: sample-means, sample-variances of self.k components
        '''
        if self.k == 1:
            # fix bug by Chen Hongjie 2020.08.07
            Ex = np.array([np.mean(self.xVec)])  # compute mean of the vector x
            Ex2 = np.array([np.mean(self.xVec ** 2)])    # compute squares of the elements of x
            return Ex, Ex2

        # k >= 2
        # cumsum(pai) is like 0.3333333 0.6666667 1.0000000
        # ind is like 200 400 600
        ind = np.floor(self.n * np.cumsum(self.paiVec))
        ind = ind.astype(int)
        x_sort = np.sort(self.xVec)


        x_part = []
        x_part.append(x_sort[0:ind[0] + 1])  # get [0, ind[0]] first ind[0]+1 elements (201 elements)
        for j in range(1, len(ind)):  # for each in [1, len(ind)-1]
            x_part.append(x_sort[ind[j - 1] - 1: ind[j]])  # [199, 399], [399, 599]

        Ex = np.zeros(shape=(self.k,))
        Ex2 = np.zeros(shape=(self.k,))
        for i, p in enumerate(x_part):
            Ex[i] = np.mean(p)              # 1.948985=mean[0, 200]  30.556443=mean[199,399] 325.343651
            Ex2[i] = np.mean(p ** 2)

        return Ex, Ex2

    def _estInitParameters(self):
        '''
            TODO: can we use kernel density estimation or spectral clustering
            to compute the paiVec (the ratios of sorted data) ?
        :return:
        '''


        #  compute paiVec, k
        if self.paiVec is None:
            pai = np.ones(self.k, dtype=np.float64)
            self.paiVec = pai / np.sum(pai)  # TODO: better ratios estimation needed.
        else:
            self.k = len(self.paiVec)

        #  compute shapeVec, scaleVec
        Ex, Ex2 = self._estMoment1_2()
        Ex_2 = Ex ** 2
        Varx = Ex2 - Ex_2

        assert (Varx > 0).all(), 'Varx must > 0'

        if self.shapeVec is None:
            self.shapeVec = Ex ** 2 / Varx  # 0.3571404  6.1872099 26.8053743

        if self.scaleVec is None:
            self.scaleVec = Varx / Ex  # 5.457196  4.938647 12.137255

        # xSS is the solution variable to be solved by non-linear optimizator
        self.xSS = np.concatenate((self.shapeVec, self.scaleVec))

        # update self.pdfBuffer
        self._positiveInference(self.paiVec, self.shapeVec, self.scaleVec, self.pdfBuffer)


    def _positiveInference(self, paiVec, shapeVec, scaleVec, outBuffer):
        '''
            production rule of un-normalized Bayesian posterior probability

            1. likelihood computation:
               For each j-th of k clusters, compute the Gamma density of
               each sample in self.xVec, and save them in outBuffer[:, j]
            2. production of likelihood and prior probability (paiVec)

            That is,
            outBuffer[i, j]= p(x_i | z_i = j) p(z_i = j)
            = gamma.pdf(x_i, shape_j, scale_j) pai_j

        :param paiVec:     prior probability (ratio) of k Gamma components
        :param shapeVec:   vector of shape parameters of k Gamma components
        :param scaleVec:   vector of scale parameters of k Gamma components
        :param outBuffer:  un-normalized Bayesian posterior probability
        :return: the outBuffer
        '''

        for j in range(self.k):
            # for each sample x_i in self.xVec, compute the probabilities (densities) x_i
            # of k Gamma distribution
            outBuffer[:, j] = self.gammaPdf(self.xVec, shapeVec[j], scaleVec[j])


        # remove zeros
        # outBuffer[outBuffer< sys.float_info.min] = sys.float_info.min

        outBuffer *= paiVec  # use product rule of probability, i.e. un-normalized posterior probability


    def _update_zBuffer_pai(self):
        '''
            Compute the posterior probability (the ratios of k boxes) self.paiVec,
            which can be thought as the ratios of sum of weights in each of k boxes.
            The weights are measured by posterior densities (rule of production).
        :return:
        '''
        # zBuffer: n-by-k, the latent membership prob for each i-th data, i in [0,n)

        self._positiveInference(self.paiVec, self.shapeVec, self.scaleVec, self.zBuffer)

        # normalize z_Buffer along each row of n records for k clusters
        self.zBuffer.sum(axis=1, out=self.nVecBuffer)
        self.zBuffer /= self.nVecBuffer[:, np.newaxis]      # z is 600(n)-by-3(nClusters)


        # pai_hat_j = 1/n sum_i..n p(Z_i =j| x_i: theta)
        self.paiVec = np.mean(self.zBuffer, axis=0)         # pai_hat is 1-by-3(nClusters)
        # the average sum of weights

    def _lossFun(self, xSS):
        '''
            Use self.paiVec, self.zBuffer to compute loss on
            the new positive inference: the probability of
            the product of likelihood and prior

        :param xSS: (shapeVec, scaleVec)
        :return: negative log (likelihood * prior)
        '''

        if not (np.isfinite(xSS).all() and (xSS > 0).all()):
            print('GammaClustersEst._lossFun...WARNING..invalid xSS found : ', xSS)
            return sys.float_info.max

        shapeVec = xSS[0: self.k]
        scaleVec = xSS[self.k: self.k + self.k]

        # given new zBuffer(ratio of each data point) => new pai(ratios of k boxes),
        # compute loss for the searching point xSS
        self._positiveInference(self.paiVec, shapeVec, scaleVec, self.pdfBuffer)
        np.log(self.pdfBuffer, out=self.pdfBuffer)
        self.pdfBuffer *= self.zBuffer

        loss = -1.0 * np.sum(self.pdfBuffer)
        return loss

    def _sumLogLik(self):
        # return a scalar
        # old.obs.ll <- sum(log(apply(dens(lambda, theta, k), 1, sum)))

        # TODO: already calculated in self._lossFun, self._estInitParameters
        # but self.pdfBuffer is changed by self.pdfBuffer *= self.zBuffer in
        # _lossFun
        self._positiveInference(self.paiVec, self.shapeVec, self.scaleVec, self.pdfBuffer)

        # TODO: why sum over each row ?
        np.sum(self.pdfBuffer, axis=1, out=self.nVecBuffer)
        np.log(self.nVecBuffer, out=self.nVecBuffer)        # nVecBuffer is (n, )

        return np.sum(self.nVecBuffer)

    # def _checkInitCall(self, xVec, paiVec, shapeVec, scaleVec, k):
    #
    #     assert (xVec > 0).all() and np.isfinite(xVec).all(), 'sample points must be positive and finite'
    #     assert paiVec is None or (paiVec > 0).all(), ''
    #     assert shapeVec is None or (shapeVec > 0).all()
    #     assert scaleVec is None or (scaleVec > 0).all()
    #     assert k > 0

    def _update_shape_scale(self, xSS):

        if np.isfinite(xSS).all() and (xSS > 0).all():
            self.shapeVec = xSS[0: self.k]
            self.scaleVec = xSS[self.k: self.k + self.k]
            # DONT SET self.ssErrorFree = True,
            # because the nlm_simple may return valid solution,
            # even it accesses bad solution during the single solving process.
            self.xSS = xSS
            return True
        return False

    def estimate(self, xVec: np.ndarray):
        """
        input the dataset(distances),
        a mixed gamma model is estimated and the results(negLogLikelihood, regItem) are obtained and written into db

        Parameters
        ----------
        xVec: dataset(distances)

        Returns
        -------
        the instance of GammaClusters
        """
        assert (xVec > 0).all() and np.isfinite(xVec).all(), 'GammCluEstBy1D.estimate..sample points must be positive and finite'
        assert self.dbconn is not None, 'please call setConfig first'


        self.xVec = xVec    # 1-dimension data supposed to follow a mixture of k gamma distributions
        self.n = len(self.xVec)
        self.xSS = None  # np.concatenate((self.shape, self.scale))
        self.pdfBuffer = np.zeros(shape=(self.n, self.k), dtype=np.float64)  # pdfBuffer is n-by-k
        self.zBuffer = np.zeros(shape=(self.n, self.k), dtype=np.float64)    # zBuffer for computing z
        self.nVecBuffer = np.zeros(shape=(self.n,), dtype=np.float64)  # used in computing z, sumLogLik

        # initize (lambda = lambda, alpha = alpha, beta = beta, k = k)
        self._estInitParameters()
        iter = 0
        mr = 0              # number of max restarts
        diff = self.epsilon + 1

        prevLogLik = self._sumLogLik()
        logLikList = [prevLogLik]
        curLogLik = None

        while diff > self.epsilon and iter < self.maxIter:

            # update membership (z and pai(hat)) to obtain a new self._lossFun
            # self.paiVec is pai_hat
            self._update_zBuffer_pai()

            retValue = self.minimizer(self.xSS, self._lossFun)

            xSS = self.minimizer.getXSolution()
            xOK = self._update_shape_scale(xSS)

            if retValue is not None and xOK:

                curLogLik = self._sumLogLik()  # self.pdfBuffer is right
                diff = curLogLik - prevLogLik

                prevLogLik = curLogLik
                logLikList.append(curLogLik)  # add new likelihood into list

                iter += 1
                if self.verb:
                    tmp = 'iteration = ' + str(iter)
                    tmp += 'log-lik diff = ' + str(diff)
                    tmp += 'log-lik = ' + str(curLogLik)
                    self.logger.write(tmp)
                continue

            # !xOK or or retValue None
            self.logger.write('GammClusEst.estimate..choosing new starting values for bad retValue')
            if mr >= self.maxRestarts:
                self.logger.write('GammClusEst.estimate..FAILED, since maxRestarts= ' + str(mr) + ' is reached')
                return None

            mr += 1         # number of max restarts
            iter = 0
            diff = self.epsilon + 1

            self._estInitParameters()
            prevLogLik = self._sumLogLik()  # must be called after _estInitParameters
            logLikList = [prevLogLik]

        # end of while
        if (iter >= self.maxIter):
            self.logger.write('GammClusEst.estimate..WARNING..not covergent, since iter >= maxIter=' + str(self.maxIter))


        # calculate negLogLikelihood and regItem
        N = len(xVec)                   # the number of records in the dataset
        # 2020.08.15 fix by Chen Hongjie; not negLogLik = -2 * np.log(-curLogLik)
        negLogLik = -2 * curLogLik
        q = 3 * self.k
        regItem = 2 * q * N / (N - q - 1)

        loss = negLogLik + regItem

        # update the records in the database
        logLikListDic = {'logLikList': logLikList}
        ok = self.t.updateEstimation(
            self.getMixedParamsJSONDict(self.shapeVec, self.scaleVec, self.paiVec),
            self.zBuffer,
            negLogLik,
            regItem,
            iter,
            logLikListDic
        )

        if not ok:
            self.logger.write('GamCluEstBy1D.estimate..ERROR: updating table tGammaClustersEstimatorBy1D is failed')
            return None

        # return a GammaClusters
        gc = GammaClusters(list(self.shapeVec), list(self.scaleVec), list(self.paiVec),
                           probMatrix=self.zBuffer, weight=loss)
        return gc
