#
# import numpy as np
# import time
# from view.endSrc.MySqlConn import MySqlConn
# from view.endSrc.MyTools import MyTools
# from view.endSrc.DBConfig import DBConfig
#
# from view.endSrc.tIniceRunner import tIniceRunner
# from view.endSrc.tDataset import tDataset
# from view.endSrc.MergeCenters import MergeCenters
#
# # evaluate ARI, purity, NMI
# from sklearn.cluster import KMeans
# from sklearn.metrics.cluster import adjusted_rand_score
# from sklearn.metrics.cluster import normalized_mutual_info_score
#
#
# class INiceRunner:
#
#     def __init__(self):
#         '''
#             A lazy initialization is used for this class,
#             since we do not want this constructor to throw exception,
#             or produce failure of assert.
#
#             We want robust and stateful usecases of this class.
#         '''
#
#         self.dbconn = None
#         self.dbId = None
#         self.logger = None
#
#         # the ObserverGenrator and
#         # its params of ObserverGenrator.setConfig
#         self.observerGen = None
#         self.dataset = None
#
#         self.centerList = None
#
#     def setConfig(self, sqlConn: MySqlConn, dsId: np.int64, name, observerGenClassName, nObservers,
#                   clusterEstimatorClassName, one2oneClasName, n2nClassName, filterCfgJSONDict,
#                   maxob, minob, mixedParamsCfgJSONDict=None):
#         '''
#                 The params, such as clusterEstimatorClassName, one2oneClasName,
#                 are copied from those ObserverGenerator.setConfig.
#
#                 Since these copied parameters are used by ObserverGenerator.setConfig and
#                 really used by Observer, GammaClustersEstimator, so
#                 these params need not be saved in tIniceRunner table.
#
#                 By this way, we will reduce field duplication in DB tables.
#         :param sqlConn:
#         :param dsId:                        use to create new record in tIniceRunner table.
#         :param name:                        use to create new record in tIniceRunner table.
#         :param observerGenClassName:        use to create new record in tIniceRunner table.
#         :param nObservers:                  use to create new record in tIniceRunner table.
#         :param clusterEstimatorClassName:   copy from the params of ObserverGenerator.setConfig
#         :param one2oneClasName:             copy
#         :param n2nClassName:                copy
#         :param filterCfgJSONDict:           copy
#         :param mixedParamsCfgJSONDict:      copy
#         :return: True if ok.
#
#         # ObserverGenerator
#         #     def setConfig(self, sqlConn: MySqlConn, sessionId, datasetDBId,
#         #                   clusterEstimatorClassName,
#         #                   one2oneClasName, n2nClassName,
#         #                   filterCfgJSONDict, mixedParamsCfgJSONDict=None):
#
#         '''
#
#         assert sqlConn is not None
#         self.maxob = maxob
#         self.minob = minob
#
#         self.logger = sqlConn.logger
#
#         # load tIniceRunner table
#         t = tIniceRunner(sqlConn)
#         print('create a record in tIniceRunner to Run')
#         dbId = t.createRow(dsId, name, observerGenClassName, nObservers)
#
#
#         # make sure some fields are valid
#         # assert t.observerGenClassName  needn't, see the try/except
#         assert t.nObservers > 0, 'iNiceRunner.setConfig..number of observer must be positive'
#
#         # config ObserverGenerator,
#         # i.e., passing params down to the link of params flowing.
#         try:
#             clsPath = t.observerGenClassName + '.' + t.observerGenClassName
#             cls = MyTools.getClass(clsPath)
#             self.observerGen = cls()
#         except Exception as e:
#             self.logger.write('iNiceRunner.setConfig..failed to new an object of ObserverGenerator')
#             return False
#
#         if not self.observerGen.setConfig(sqlConn,
#                                    dbId,
#                                    t.datasetId,
#                                    clusterEstimatorClassName,
#                                    one2oneClasName,
#                                    n2nClassName,
#                                    filterCfgJSONDict,
#                                    mixedParamsCfgJSONDict
#                                    ):
#             self.logger.write('INiceRunnter.setConfig..failed to pass params down to ObserverGenerator')
#             return False
#
#         # load dataset to test centers on Kmeans
#         ds = tDataset(sqlConn)
#         if not ds.readDataset(t.datasetId):
#             self.logger.write('INiceRunner.setConfig..failed to load dataset for dataset id = ' + str(t.datasetId))
#             return False
#
#         self.dataset = ds.m_dataset
#         self.labels = ds.m_labels
#
#         # set table member
#         self.t = t
#         self.dbconn = sqlConn
#         self.dbId = dbId
#         return True
#
#     def run(self):
#
#         self.t.updateStartTimeByCurrent()
#
#         lst = self.observerGen.createObservers(self.t.nObservers)
#
#         self.centerList = []
#         for ob in lst:
#             # center ids, number of center points found by each component of this observer
#             cidlst, componentCenterNum = ob.estimateCenters(self.maxob, self.minob)
#             if cidlst is None:
#                 continue
#             for c in cidlst:
#                 self.centerList.append(self.dataset[c,:])
#
#         mc = MergeCenters()
#         self.centerList = mc.mergeCen(self.centerList)
#
#
#         # write nCenters, centerList to DB
#         if not self.t.updateCenters(self.centerList):
#             self.logger.write('iNiceRunner.runKmeans..OMIT..failed to write indices ARI, purity, etc to DB')
#
#         # get ARI, purity, NMI
#         return self.runKmeans()
#
#     def runKmeans(self):
#
#         #  If an ndarray is passed, it should be of shape (n_clusters, n_features)
#         #  and gives the initial centers.
#
#         nClusters = len(self.centerList)
#         k = KMeans(n_clusters=nClusters, init=np.asarray(self.centerList), n_init=1).fit(self.dataset)
#         assert list(np.unique(k.labels_)) == list(range(nClusters))
#         ARI = adjusted_rand_score(self.labels, k.labels_)
#         purity = MyTools.purity_score(self.labels, k.labels_)
#         NMI = normalized_mutual_info_score(self.labels, k.labels_)
#
#         # write to DB
#         ok = self.t.updateIndices(ARI, purity, NMI)
#         if not ok:
#             self.logger.write('iNiceRunner.runKmeans..failed to write indices ARI, purity, etc to DB')
#             return False
#
#         self.t.updateEndTimeByCurrent()
#         np.savetxt('../data/kmeansLabels_runnerid' + str(self.dbId) + time.asctime().replace(' ', '-')+'.csv',
#                    k.labels_, delimiter=',')
#
#         np.savetxt('../data/kmeansCenters_runnerid' + str(self.dbId) + time.asctime().replace(' ', '-') + '.csv',
#                    k.cluster_centers_, delimiter=',')
#
#         labelList = list(k.labels_)
#         compnumList = [labelList.count(i) for i in range(nClusters)]
#         paiList = [compnum*1.0/sum(compnumList) for compnum in compnumList]
#         return True, paiList
#
#
# if __name__ == '__main__':
#     nOb = 5
#     dsId = 100
#     name = 'experiments'
#     observerGenClassName = 'EdgeObserverGenerator'
#     # clusterEstClassName = 'GaussianClustersEstimatorBy1D'
#     clusterEstClassName = 'GammaClustersEstimatorBy1D'
#     p2pClassName = 'One2OneDistance'
#     n2nClassName = 'N2NDistances'
#     filterCfgJSONDict = {'KdeFilter': {'topPer': 0.5}, 'DensityPeakFilter': {'cutoffPer': 0.1}}
#     sqlconn = MySqlConn(DBConfig(name))
#
#     inR = INiceRunner()
#     inR.setConfig(sqlConn=sqlconn, dsId=dsId, name=name, observerGenClassName=observerGenClassName,
#                   nObservers=nOb, clusterEstimatorClassName=clusterEstClassName, one2oneClasName=p2pClassName,
#                   n2nClassName=n2nClassName, filterCfgJSONDict=filterCfgJSONDict, mixedParamsCfgJSONDict=None)
#     inR.run()


# --------------------------------------------------------------------------------------------------------------------


import numpy as np
import time
from view.endSrc.MySqlConn import MySqlConn
from view.endSrc.MyTools import MyTools
from view.endSrc.DBConfig import DBConfig

from view.endSrc.tIniceRunner import tIniceRunner
from view.endSrc.tDataset import tDataset
from view.endSrc.MergeCentersByMultiOb import MergeCentersByMultiOb

# evaluate ARI, purity, NMI
from sklearn.cluster import KMeans
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.metrics.cluster import normalized_mutual_info_score


class INiceRunner:

    def __init__(self):
        '''
            A lazy initialization is used for this class,
            since we do not want this constructor to throw exception,
            or produce failure of assert.

            We want robust and stateful usecases of this class.
        '''

        self.dbconn = None
        self.dbId = None
        self.logger = None

        # the ObserverGenrator and
        # its params of ObserverGenrator.setConfig
        self.observerGen = None
        self.dataset = None

        self.centerList = None

    def setConfig(self, sqlConn: MySqlConn, dsId: np.int64, name, observerGenClassName, nObservers,
                  clusterEstimatorClassName, one2oneClasName, n2nClassName, filterCfgJSONDict,
                  maxob, minob, mixedParamsCfgJSONDict=None):
        '''
                The params, such as clusterEstimatorClassName, one2oneClasName,
                are copied from those ObserverGenerator.setConfig.

                Since these copied parameters are used by ObserverGenerator.setConfig and
                really used by Observer, GammaClustersEstimator, so
                these params need not be saved in tIniceRunner table.

                By this way, we will reduce field duplication in DB tables.
        :param sqlConn:
        :param dsId:                        use to create new record in tIniceRunner table.
        :param name:                        use to create new record in tIniceRunner table.
        :param observerGenClassName:        use to create new record in tIniceRunner table.
        :param nObservers:                  use to create new record in tIniceRunner table.
        :param clusterEstimatorClassName:   copy from the params of ObserverGenerator.setConfig
        :param one2oneClasName:             copy
        :param n2nClassName:                copy
        :param filterCfgJSONDict:           copy
        :param mixedParamsCfgJSONDict:      copy
        :return: True if ok.

        # ObserverGenerator
        #     def setConfig(self, sqlConn: MySqlConn, sessionId, datasetDBId,
        #                   clusterEstimatorClassName,
        #                   one2oneClasName, n2nClassName,
        #                   filterCfgJSONDict, mixedParamsCfgJSONDict=None):

        '''

        assert sqlConn is not None
        self.maxob = maxob
        self.minob = minob

        self.logger = sqlConn.logger

        # load tIniceRunner table
        t = tIniceRunner(sqlConn)
        print('create a record in tIniceRunner to Run')
        dbId = t.createRow(dsId, name, observerGenClassName, nObservers)


        # make sure some fields are valid
        # assert t.observerGenClassName  needn't, see the try/except
        assert t.nObservers > 0, 'iNiceRunner.setConfig..number of observer must be positive'

        # config ObserverGenerator,
        # i.e., passing params down to the link of params flowing.
        try:
            clsPath = t.observerGenClassName + '.' + t.observerGenClassName
            cls = MyTools.getClass(clsPath)
            self.observerGen = cls()
        except Exception as e:
            self.logger.write('iNiceRunner.setConfig..failed to new an object of ObserverGenerator')
            return False

        if not self.observerGen.setConfig(sqlConn,
                                   dbId,
                                   t.datasetId,
                                   clusterEstimatorClassName,
                                   one2oneClasName,
                                   n2nClassName,
                                   filterCfgJSONDict,
                                   mixedParamsCfgJSONDict
                                   ):
            self.logger.write('INiceRunnter.setConfig..failed to pass params down to ObserverGenerator')
            return False

        # load dataset to test centers on Kmeans
        ds = tDataset(sqlConn)
        if not ds.readDataset(t.datasetId):
            self.logger.write('INiceRunner.setConfig..failed to load dataset for dataset id = ' + str(t.datasetId))
            return False

        self.dataset = ds.m_dataset
        self.labels = ds.m_labels

        # set table member
        self.t = t
        self.dbconn = sqlConn
        self.dbId = dbId
        return True

    def run(self):

        self.t.updateStartTimeByCurrent()

        lst = self.observerGen.createObservers(self.t.nObservers)

        self.centerList = []
        centersFromObs = []  # which observer dose each center come from
        componentCenterNum_ob = []  # number of center points found by each component of each observer
        for i, ob in enumerate(lst):
            # center ids, number of center points found by each component of this observer
            cidlst, componentCenterNum = ob.estimateCenters(self.maxob, self.minob)
            if cidlst is None:
                componentCenterNum_ob.append([])
                continue
            componentCenterNum_ob.append(componentCenterNum)
            for c in cidlst:
                self.centerList.append(c)
                centersFromObs.append(i)

        mc = MergeCentersByMultiOb(self.dataset, lst, isPlot=True)
        self.centerList = mc.merge(self.centerList, centersFromObs, componentCenterNum_ob)

        # write nCenters, centerList to DB
        if not self.t.updateCenters(self.centerList):
            self.logger.write('iNiceRunner.runKmeans..OMIT..failed to write indices ARI, purity, etc to DB')

        # get ARI, purity, NMI
        return self.runKmeans()

    def runKmeans(self):

        #  If an ndarray is passed, it should be of shape (n_clusters, n_features)
        #  and gives the initial centers.

        nClusters = len(self.centerList)
        k = KMeans(n_clusters=nClusters, init=np.asarray(self.centerList), n_init=1).fit(self.dataset)
        assert list(np.unique(k.labels_)) == list(range(nClusters))
        ARI = adjusted_rand_score(self.labels, k.labels_)
        purity = MyTools.purity_score(self.labels, k.labels_)
        NMI = normalized_mutual_info_score(self.labels, k.labels_)

        # write to DB
        ok = self.t.updateIndices(ARI, purity, NMI)
        if not ok:
            self.logger.write('iNiceRunner.runKmeans..failed to write indices ARI, purity, etc to DB')
            return False

        self.t.updateEndTimeByCurrent()
        np.savetxt('../data/kmeansLabels_runnerid' + str(self.dbId) + time.asctime().replace(' ', '-')+'.csv',
                   k.labels_, delimiter=',')

        np.savetxt('../data/kmeansCenters_runnerid' + str(self.dbId) + time.asctime().replace(' ', '-') + '.csv',
                   k.cluster_centers_, delimiter=',')

        labelList = list(k.labels_)
        compnumList = [labelList.count(i) for i in range(nClusters)]
        paiList = [compnum*1.0/sum(compnumList) for compnum in compnumList]
        return True, paiList


if __name__ == '__main__':
    nOb = 5
    dsId = 789
    # dsId = 785
    # dsId = 779
    minob = None
    maxob = None
    name = 'test'
    observerGenClassName = 'EdgeObserverGenerator'
    clusterEstClassName = 'GaussianClustersEstimatorBy1D'
    p2pClassName = 'One2OneDistance'
    n2nClassName = 'N2NDistances'
    filterCfgJSONDict = {'KdeFilter': {'topPer': 1}, 'DensityPeakFilter': {'cutoffPer': 0.1}}
    sqlconn = MySqlConn(DBConfig(name))

    inR = INiceRunner()
    inR.setConfig(sqlConn=sqlconn, dsId=dsId, name=name, observerGenClassName=observerGenClassName,
                  nObservers=nOb, clusterEstimatorClassName=clusterEstClassName, one2oneClasName=p2pClassName,
                  maxob=maxob, minob=minob,
                  n2nClassName=n2nClassName, filterCfgJSONDict=filterCfgJSONDict, mixedParamsCfgJSONDict=None)
    inR.run()
