from view.endSrc.ModeFilter import ModeFilter
from view.endSrc.Dataset import Dataset
from view.endSrc.N2NDistances import N2NDistances
from view.endSrc.tHighDensityFilter import tHighDensityFilter
import numpy as np


class DensityPeakFilter(ModeFilter):
    '''
    This class pick up the central points by looking for density peaks
    '''
    def __init__(self, dataset: Dataset, cutoffPercent: np.float, n2nFun: N2NDistances, filterTable: tHighDensityFilter):
        '''
        init the instance of DensityPeakFilter

        Parameters
        ----------
        dataset: original dataset for clustering operations
        cutoffPercent: take the topPercent(cutoffPercent) of the distance from the distance matrix as the cutoff distance
        n2nFun: function, used to calculate the distances from point to point for the dataset

        Examples
        --------
        filter = DensityPeakFilter(dataset, cutoffPercent, n2nFun)
        '''

        assert cutoffPercent < 1.0, 'cutoffPercent must strictly less than 1'

        ModeFilter.__init__(self, filterTable)

        self.dataset = dataset
        self.cutoffPercent = cutoffPercent
        self.n2nFun = n2nFun

    def __call__(self, ids: list):
        '''
        pick up the central points(actually distance values) by looking for density peaks

        Parameters
        ----------
        ids: index of points in the dataset to be processed by DensityPeakFilter

        Returns
        -------
        centerIds: list, index corresponding to center points

        Examples
        --------
        filter = DensityPeakFilter(dataset, cutoffPercent, n2nFun)
        centerIds = filter(ids)
        '''

        n = len(ids)

        # fetch the corresponding data from the dataset based on ids, and calculate the distance
        data = self.dataset[ids, :]
        dMat = self.n2nFun(data)

        # flatten the distance matrix, it means get the lower triangular portion of distance matrix
        dMatFlatten = []
        for i in range(1, n):  # don't start from 0, to ignore the distance between same point
            for j in range(i):
                dMatFlatten.append(dMat[i, j])

        m = len(dMatFlatten)

        # sort distance, and calculate cutoff distance
        dMatFlatten.sort()
        cutoffDis = dMatFlatten[round((m-1) * self.cutoffPercent)]  # cutoff distance, fix bug by ChenHongjie

        # calculate density (p_i) of every point, and generate its index (qi) in descending order
        p = []
        for i in range(n):
            pi = 0
            for j in range(n):
                if i != j:
                    val = np.exp(-((dMat[i, j] / cutoffDis) ** 2))  # with gaussian kernel
                    pi += val
            p.append(pi)
        p = np.array(p)
        idxDesc = np.argsort(-p)

        # calculate the minimum distance(sigma_i) between each point and the higher density point,
        # and the index(h_i) of the higher density point corresponding to the minimum distance
        sigma = [max(dMatFlatten) + 0.001] * n  # set initial value
        h = [0] * n  # TODO set initial value , set as -1 will be better? (commented by ChenHongjie)
        for i in range(1, n):  # skip the highest density point(because no point with higher density)
            for j in range(i):
                if dMat[idxDesc[i], idxDesc[j]] < sigma[idxDesc[i]]:
                    sigma[idxDesc[i]] = dMat[idxDesc[i], idxDesc[j]]
                    h[idxDesc[i]] = idxDesc[j]
        sigma[idxDesc[1]] = min(sigma[1:])  #TODO is it resonable? (commented by ChenHongjie)


        # add by Chenhongjie. 2020.08.24
        # TODO: need further test
        sigmasort = np.sort(sigma)
        sigmaDiff = []
        for i in range(n-1):
            diff = sigmasort[i+1] - sigmasort[i]
            sigmaDiff.append(diff)

        meanSigmaDiff = np.mean(sigmaDiff)
        changeRate = sigmaDiff / meanSigmaDiff
        centerPoints = np.where(changeRate > 20)[0]
        if len(centerPoints) == 0:
            compNum = 1
        else:
            fluctPoint = centerPoints[0]
            compNum = len(changeRate) - fluctPoint






        centerIdxRelative_sigma = np.argsort(-np.array(sigma))[:compNum]
        p_threshold = np.sort(p)[round(n * 0.05)]
        centerIdxRelative = []
        for i in centerIdxRelative_sigma:
            if p[i] > p_threshold:
                centerIdxRelative.append(i)
        centerIdxRelative = np.array(centerIdxRelative)

        # ids[centerIdxRelative] # TypeError: list indices must be integers or slices, not list
        centerIds = [ids[i] for i in centerIdxRelative]

        # import matplotlib.pyplot as plt
        # plt.scatter(p, sigma)
        # plt.show()
        # print(compNum)
        # plt.scatter(self.dataset[:, 0], self.dataset[:, 1])
        # plt.scatter(self.dataset[ids, 0], self.dataset[ids, 1])
        # plt.scatter(self.dataset[centerIds, 0], self.dataset[centerIds, 1], c='r')
        # plt.show()

        # write the result to DB
        s = str(type(self))
        clsName = s[s.find('\'') + 1: s.rfind('\'')]
        self.writeOutputIdsToDB(centerIds, 'DensityPeakFilter', {"cutoffPer": self.cutoffPercent, "n2nFun" : clsName})

        return centerIds
