from scipy import stats
import numpy as np
from sklearn.neighbors import KernelDensity

from view.endSrc.MySqlConn import MySqlConn
from view.endSrc.MyTools import MyTools

from view.endSrc.tObserver import tObserver
from view.endSrc.tGammaClustersEstimatorBy1D import tGammaClustersEstimatorBy1D
from view.endSrc.tHighDensityFilter import tHighDensityFilter

from view.endSrc.KdeFilter import KdeFilter
from view.endSrc.DensityPeakFilter import DensityPeakFilter

from view.endSrc.Sampling import Sampling


class Observer:

    def __init__(self):

        self.dbconn = None
        self.dbId = None
        self.logger = None

        self.viewPoint = None
        self.dataset = None  # instance of Dataset to watch
        self.p2pFun = None  # p2pFun is an instance of One2OneDistance
        self.n2nFun = None  # n2nFun is an instance of N2NDistances

        self.distances = []                 # distances of all data watched from the viewpoint
        self.nComponents_guess = 0          # initial guessed number of mixture components
        self.idsOfEachComponent = None      # data-point ids belonging to each component

        self.t = tObserver(None)            # set SqlConn in setConfig

        self.cluEst_Class = None  # class to New instances of the cluster estimator
        self.cluEst_Tab_Class = None  # table class for the cluster estimator yjf.add.8.21
        self.cluEst_objList = []   # objects of cluster estimators
        self.cluEst_minLoss_dbId = None  # the DB id of the cluster estimator that has the minimum loss = negLogLik + regularItem

        self.KdeFilter_topPer = None
        self.DensityPeakFilter_cutoffPer = None
        self.mixedParams = None         # JSON dict

        self.componentLabels = None

    def setConfig(self, dataset, sqlConn: MySqlConn, dbId: np.int64):

        assert len(dataset) > 0
        shp = np.shape(dataset[0])
        MyTools.checkMemberShape(dataset, shp, 'Observer.setConfig.dataset')

        assert sqlConn is not None
        assert dbId > 0
        self.logger = sqlConn.logger

        # load initial parameters from DB
        self.t.setSqlConn(sqlConn)
        if not self.t.readRow(dbId):
            self.logger.write('Observer.setConfig..failed to read the row by id=' + str(dbId))
            return False

        # 1) load the view point from DB
        self.viewPoint = self.t.viewPoint  # must be np.ndarray, since we pass the np.ndarray in tObserver.createRow

        # 2) dynamically load a cluster estimator
        # from string of its class name
        try:
            cluEstClsName = self.t.clusterEstimatorClassName
            self.cluEst_Class = MyTools.getClass(cluEstClsName + '.' + cluEstClsName)
            tabClassName = 't' + self.t.clusterEstimatorClassName
            self.cluEst_Tab_Class = MyTools.getClass(tabClassName + '.' + tabClassName)
        except Exception as e:
            self.logger.write('Observer.setConfig..failed to new an object of the cluster estimator: ' + cluEstClsName)
            return False

        # 3) dynamically load distance functions
        try:
            p2pName = self.t.One2OneDistClassName
            cls = MyTools.getClass(p2pName + '.' + p2pName)
            self.p2pFun = cls()
        except Exception as e:
            self.logger.write('Observer.setConfig..failed to new an object of One2One distance class: ' + p2pName)
            return False

        try:
            n2nName = self.t.N2N2DistClassName
            cls = MyTools.getClass(n2nName + '.' + n2nName)
            self.n2nFun = cls()
        except Exception as e:
            self.logger.write('Observer.setConfig..failed to new an object of N2N distances class: ' + n2nName)
            return False

        # 4) ----------- keep inputs --------------
        self.dataset = dataset
        self.dbId = dbId
        self.dbconn = sqlConn

        self.KdeFilter_topPer = self.t.filtersCfg['KdeFilter']['topPer']
        self.DensityPeakFilter_cutoffPer = self.t.filtersCfg['DensityPeakFilter']['cutoffPer']
        self.mixedParams = self.t.mixedParamsCfg  # JSON dict

        return True

    def _compDistances(self):
        self.distances = []
        for point in self.dataset:
            self.distances.append(self.p2pFun(self.viewPoint, point))

    def _estNumOfComponents(self):
        '''
            Estimate the initial num of Gamma components by check the num of peak based on kde
        :return:
        '''
        n = self.dataset.shape[0]

        # kde = KernelDensity(bandwidth=bandwidth).fit(np.array(self.distances)[:, np.newaxis])
        # den = np.exp(kde.score_samples(np.sort(self.distances)[:, np.newaxis]))

        kde = stats.gaussian_kde(self.distances)
        # fix bug by Chen Hongjie 2020.08.07
        density = kde(np.sort(self.distances))
        for i in range(1, n - 1):
            if density[i - 1] < density[i] and density[i] > density[i + 1]:
                self.nComponents_guess += 1


    def _createMixtureEstimator(self, nComponents):
        '''

        :param nComponents: the prior knowledge of number of mixture components
        :return: True if ok
        '''

        # 1 create a DB record for the cluster estimator fixed by nComponent
        # tab = tGammaClustersEstimatorBy1D(self.dbconn)  # yjf.remove 8.21
        tab = self.cluEst_Tab_Class(self.dbconn)

        # TODO: optimize initial Gamma mixture parameters,
        # specifically paiList base on 1D distances
        # based on the prior knowledge of nComponents

        # 2020.8.6 fix the bug found by Yang Hongguang.
        mpDict = None
        if self.mixedParams is not None and \
            self.mixedParams['paiList'] is not None and \
            nComponents == len(self.mixedParams['paiList']):
            mpDict = self.mixedParams

        id = tab.createRow(self.dbId, nComponents, estMixedParamsJSONDict=mpDict)
        if id is None:
            tmp = 'Observer._createClusterEstimator..failed to create a t..CluEstBy1D for nComponents='
            tmp += str(nComponents)
            self.logger.write(tmp)
            return False

        # 2 create the cluster estimator fixed by nComponent=m
        obj = None
        try:
            obj = self.cluEst_Class()
        except Exception as e:
            tmp = 'Observer._createClusterEstimator..failed to create an instance of the cluster estimator class: '
            tmp += self.t.clusterEstimatorClassName
            self.logger.write(tmp)
            return False

        # 3 configure the new cluster estimator
        if not obj.setConfig(self.dbconn, id):
            tmp = 'Observer._createClusterEstimator..failed to call setConfig method of the cluster estimator class: '
            tmp += self.t.clusterEstimatorClassName
            self.logger.write(tmp)
            return False

        # add to buffer
        self.cluEst_objList.append(obj)
        return True

    # set the self.clusters
    def _getBestMixtureEstimator(self):
        self.cluEst_objList = []  # clean cluster estimator objects
        # collect the prob cluster that has the minimum loss (weight)
        minLoss = float('inf')
        minDBId = None
        minProbClu = None
        est_obj = np.asarray(self.distances)
        # est_obj = Sampling.randomPartition(est_obj, 4)[0]

        import matplotlib.pyplot as plt
        compLst = []
        lossLst = []
        pcLst = []
        upperComp = np.ceil((est_obj.shape[0] - 1) / 3) - 1
        m = 1
        if self.maxob:
            upperComp = self.maxob
        if self.minob:
            m = self.minob
        last_loss = np.inf

        # fast search
        flag = 2
        while True:
            self._createMixtureEstimator(m)
            obj = self.cluEst_objList[-1]
            pc = obj.estimate(est_obj)
            pcLst.append(pc)

            compLst.append(m)
            lossLst.append(pc.weight)

            m = m * 2

            if pc.weight >= last_loss:
                last_loss = pc.weight
                flag -= 1
                if flag == 0:
                    break
                continue
            flag = 2
            last_loss = pc.weight

        if flag == 0:
            if len(compLst) < 4:
                best_fit_idx = 0
                valley_comp_begin = compLst[0]
            else:
                best_fit_idx = len(compLst) - 3 - 1
                valley_comp_begin = compLst[-4]
            valley_comp_end = compLst[-2]
        else:
            best_fit_idx = np.argmin(lossLst)
            valley_comp_begin = compLst[best_fit_idx - 1]
            valley_comp_end = compLst[best_fit_idx + 1]

        # fine-grained search
        compLst_fine_grained = []
        lossLst_fine_grained = []
        pcLst_fine_grained = []
        grain = int(np.ceil((valley_comp_begin + valley_comp_end) / 2 / 6))
        flag_fine_grained = 2
        last_loss = np.inf
        for m in range(valley_comp_begin+1, valley_comp_end, grain):
            self._createMixtureEstimator(m)
            obj = self.cluEst_objList[-1]
            pc = obj.estimate(est_obj)
            pcLst_fine_grained.append(pc)

            compLst_fine_grained.append(m)
            lossLst_fine_grained.append(pc.weight)

            if pc.weight >= last_loss:
                last_loss = pc.weight
                flag_fine_grained -= 1
                if flag_fine_grained == 0:
                    break
                continue
            flag_fine_grained = 2
            last_loss = pc.weight

        # plt.scatter(compLst, lossLst, marker='o')
        # plt.scatter(compLst_fine_grained, lossLst_fine_grained, marker='o')
        # plt.show()

        if len(lossLst_fine_grained) == 0:
            compLst_fine_grained = compLst
            pcLst_fine_grained = pcLst
        else:
            if flag_fine_grained == 0:
                best_fit_idx = -3
            else:
                best_fit_idx = np.argmin(lossLst_fine_grained)

        best_fit_comp = compLst_fine_grained[best_fit_idx]
        print('best_fit_comp', best_fit_comp)
        obj = self.cluEst_objList[best_fit_idx]
        minDBId = obj.getDBId()
        minProbClu = pcLst_fine_grained[best_fit_idx]

        if minDBId is None:
            return False

        self.idsOfEachComponent = minProbClu.getIdsOfEachCluster()
        self.componentLabels = minProbClu.getLabels()
        self.cluEst_minLoss_dbId = minDBId
        return True

    def _createTableForFilter(self, nameOfFilter, whichComponent, fromFilterId):

        ftable = tHighDensityFilter(self.dbconn)
        dbId = ftable.createRow(self.t.clusterEstimatorClassName, self.cluEst_minLoss_dbId, whichComponent, fromFilterId)
        if dbId is None:
            tmp = 'Observer.estimateCenters..failed of '
            tmp += 'creating a row of tHighDensityFilter for the filter: ' + nameOfFilter
            self.logger.write(tmp)
            return None, None
        return dbId, ftable

    def _filterCentersFromBestMixture(self):

        cidlst = []
        componentCenterNum = []  # the num of centers found by every component
        for i, ids in enumerate(self.idsOfEachComponent):
            componentCenterNum.append(0)
            if len(ids) < 2:
                tmp = 'Observer.estimateCenters..idsOfComponent have ids less than 2'
                tmp += ', stop filtering, try next component'
                self.logger.write(tmp)
                continue

            # KdeFilter
            kdId, ftable = self._createTableForFilter('KdeFilter', i, fromFilterId=None)  # top-level
            if kdId is None:
                return None  # ----- failed to access DB, already log it in _createTableForFilter

            kf = KdeFilter(self.distances, self.KdeFilter_topPer, ftable)
            outputIds = kf(ids)
            if outputIds is None or len(outputIds) < 2:
                tmp = 'Observer.estimateCenters..KdeFilter output empty or output only one'
                tmp += ', stop further filtering, try next component'
                self.logger.write(tmp)
                continue

            # DensityPeakFilter
            dpId, ftable = self._createTableForFilter('DensityPeakFilter', i, fromFilterId=kdId)
            if dpId is None:
                return None   # ----- failed to access DB, already log it in _createTableForFilter

            df = DensityPeakFilter(self.dataset, self.DensityPeakFilter_cutoffPer, self.n2nFun, ftable)
            outputIds = df(outputIds)
            if outputIds is None or len(outputIds) == 0:
                tmp = 'Observer.estimateCenters..DensityPeakFilter output empty'
                tmp += ', stop further filtering, try next component'
                self.logger.write(tmp)
                continue

            # collect the final output for i-th Gamma component
            for idx in outputIds:
                cidlst.append(idx)
            componentCenterNum[-1] = len(outputIds)

        # 2020.08.12 fix bug by Chen Hongjie
        clst = self.dataset[cidlst, :].tolist()
        return clst, cidlst, componentCenterNum

    def estimateCenters(self, maxob=None, minob=None):
        '''
                Estimate centers under the context of the Gamma mixture,
                which has the minimum loss = negLogLik + regularItem.

                The context is the partition of the dataset datapoints,
                which is expressed in self.idsOfEachComponent.

                The context is DB searchable by foreign key: self.cluEst_minLoss_dbId,
                by which we can find the final centers connecting
                which Gamma Estimation with the info: the number of component, paiList, etc.
        :return: list < id_data points_in_dataset > and componentCenterNum if ok, None if failed

                ######################################################
                Note the centers are real data points in the dataset,
                not mean computed virtual centers.
                #####################################################
        '''

        assert self.dbconn is not None, 'please call setConfig first'

        assert minob is None or minob > 0
        self.maxob = maxob
        self.minob = minob

        # 1. compute distances, guess the num of Gamma components
        self._compDistances()
        # self._estNumOfComponents()
        if not self.t.updateDistances(self.distances, self.nComponents_guess):
            self.logger.write('Observer.estimateCenters..failed to update distances and nComponents_guess in DB')
            return None

        # 2. get the 'best' Gamma mixture
        if not self._getBestMixtureEstimator():     # saved in self.cluEst_minLoss_dbId
            self.logger.write('Observer.estimateCenters..failed to get best mixture estimator')
            return None

        # 3. filtering centers from each component of the 'best' Gamma mixture
        clst, cidlst, componentCenterNum = self._filterCentersFromBestMixture()

        if not self.t.updateCenters(clst, self.cluEst_minLoss_dbId):
            self.logger.write('Observer.estimateCenters..failed to update centers and bestClusterEstimatorId in DB')
            return None

        return cidlst, componentCenterNum

    def getComponentLabels(self):
        return self.componentLabels