import numpy as np
import time
from numpy import (
    arange,
    argsort,
    argwhere,
    empty,
    full,
    inf,
    intersect1d,
    max,
    ndarray,
    sort,
    sum,
    zeros,
)

from scipy.spatial.distance import pdist, squareform


class SNN_DPC:
    # 近邻数k,聚类簇数center_num,数据集dataSet,距离矩阵dis_matrix,离群比例discret_ratio
    def __init__(
        self, k, center_num=None, dataSet=None, dis_matrix=None, discret_ratio=0
    ):
        self.k = k
        self.center_num = center_num
        self.dataSet = dataSet
        self.dis_matrix = dis_matrix
        self.discret_ratio = discret_ratio

    # 计算距离矩阵（欧氏距离）
    def getDistanceMatrix(self):
        distanceMatrix = squareform(pdist(self.dataSet))
        return distanceMatrix

    # 计算样本的共享邻域
    def getSharedNeighbors(self, distanceMatrix, k):
        if self.dis_matrix is not None:
            n = self.dis_matrix.shape[0]
        elif self.dataSet is not None:
            # 获得样本的维度信息（n为样本数，d为每一个样本的维度）
            n, d = self.dataSet.shape
        # 按行对距离进行排序（每一行为一个样本与其他样本的距离）
        indexDistanceAsc = argsort(distanceMatrix)
        # 获得前k个距离该样本最近的样本的索引（包括自己）
        indexNeighbor = indexDistanceAsc[:, :k]
        # 初始化一个三维矩阵
        indexSharedNeighbor = empty([n, n, k], int)
        # 初始化一个二维矩阵
        numSharedNeighbor = empty([n, n], int)
        # 遍历每一个样本
        for i in range(n):
            # 计算样本间的共享邻域
            numSharedNeighbor[i, i] = 0
            for j in range(i):
                # 找到两个样本邻域中相同的样本
                shared = intersect1d(
                    indexNeighbor[i], indexNeighbor[j], assume_unique=True
                )
                # 统计共同邻域中的样本个数
                numSharedNeighbor[j, i] = numSharedNeighbor[i, j] = shared.size
                # 同时记录共同邻域中的样本索引位置
                indexSharedNeighbor[j, i, : shared.size] = indexSharedNeighbor[
                    i, j, : shared.size
                ] = shared
        return indexDistanceAsc, indexNeighbor, indexSharedNeighbor, numSharedNeighbor

    # 计算样本间相似性
    def getsimilarity(self, indexSharedNeighbor, numSharedNeighbor, distanceMatrix):
        if self.dis_matrix is not None:
            n = self.dis_matrix.shape[0]
        elif self.dataSet is not None:
            # 获得样本的维度信息（n为样本数，d为每一个样本的维度）
            n, d = self.dataSet.shape
        similarity = zeros([n, n])  # Diagonal and some elements are 0
        for i in range(n):
            for j in range(i):
                # 判断样本i和样本j的邻域是否互相包含
                if i in indexSharedNeighbor[i, j] and j in indexSharedNeighbor[i, j]:
                    indexShared = indexSharedNeighbor[i, j, : numSharedNeighbor[i, j]]
                    distanceSum = sum(
                        distanceMatrix[i, indexShared] + distanceMatrix[j, indexShared]
                    )
                    # 相似度赋值
                    similarity[i, j] = similarity[j, i] = (
                        numSharedNeighbor[i, j] ** 2 / distanceSum
                    )
        return similarity

    # 计算样本的局部密度
    def getLocalDensity(self, similarity):
        rho = np.sum(np.sort(similarity)[:, -self.k :], axis=1)
        return rho

    # 计算样本相对距离
    def getRelativeDistance(self, rho, indexNeighbor, distanceMatrix):
        if self.dis_matrix is not None:
            n = self.dis_matrix.shape[0]
        elif self.dataSet is not None:
            # 获得样本的维度信息（n为样本数，d为每一个样本的维度）
            n, d = self.dataSet.shape
        # 计算每一个样本与k个邻近样本的距离和
        distanceNeighborSum = empty(n)
        for i in range(n):
            distanceNeighborSum[i] = sum(distanceMatrix[i, indexNeighbor[i]])
        # 对样本索引按密度从大到小排列
        indexRhoDesc = argsort(rho)[::-1]
        delta = full(n, inf)
        # 遍历每一个样本索引（除了第一个）
        for i, a in enumerate(indexRhoDesc[1:], 1):
            for b in indexRhoDesc[:i]:
                delta[a] = min(
                    delta[a],
                    distanceMatrix[a, b]
                    * (distanceNeighborSum[a] + distanceNeighborSum[b]),
                )
        delta[indexRhoDesc[0]] = -inf
        delta[indexRhoDesc[0]] = max(delta)
        return delta

    # 确定聚类数及聚类中心
    def defineClusterCenters(self, rho, delta):
        if self.dis_matrix is not None:
            n = self.dis_matrix.shape[0]
        elif self.dataSet is not None:
            # 获得样本的维度信息（n为样本数，d为每一个样本的维度）
            n, d = self.dataSet.shape
        # 对局部密度进行平方
        square_rho = rho
        # 归一化
        square_normal_rho = (square_rho - np.min(square_rho)) / (
            np.max(square_rho) - np.min(square_rho)
        )
        normal_delta = (delta - np.min(delta)) / (np.max(delta) - np.min(delta))
        # 计算决策值
        gamma = square_normal_rho * normal_delta
        targetIndex = None
        # 只取gamma较大的部分样本
        # DN = int(np.round(np.sqrt(n)))
        DN = int(n / 2)
        # 从大到小排列gamma
        lock_asce_gamma = sort(gamma)[::-1][:DN]
        lock_asce_gamma_index = argsort(gamma)[::-1][:DN]
        # 是否给了参数center_num
        if self.center_num is None or self.center_num == "":
            slope = 0
            # 寻找最大的加权斜率
            for index in range(1, len(lock_asce_gamma) - 1):
                current_slop = (
                    index
                    * (lock_asce_gamma[index] - lock_asce_gamma[index + 1])
                    / (np.max(lock_asce_gamma) - np.min(lock_asce_gamma))
                )
                if current_slop > slope:
                    targetIndex = index
                    slope = current_slop
        elif self.center_num is not None:
            targetIndex = self.center_num - 1
        # 初始化每一个样本的标签
        indexAssignment = full(n, -1)
        indexCentroid = sort(lock_asce_gamma_index[: targetIndex + 1])
        # 为聚类中心分配标签
        indexAssignment[indexCentroid] = arange(len(indexCentroid))
        return indexCentroid, indexAssignment

    # 第一步分配非聚类中心
    def firstStepAssign(
        self, indexCentroid, indexNeighbor, indexAssignment, numSharedNeighbor
    ):
        # 转列表
        queue = indexCentroid.tolist()
        # 如果列表不为空
        while queue:
            # 拿到列表中第一个样本
            a = queue.pop(0)
            # 遍历a的邻域中的每一个样本
            for b in indexNeighbor[a]:
                if indexAssignment[b] == -1 and numSharedNeighbor[a, b] >= self.k / 2:
                    indexAssignment[b] = indexAssignment[a]
                    queue.append(b)

    # 第二步分配非聚类中心
    def secondStepAssign(
        self, indexAssignment, indexDistanceAsc, indexCentroid, indexUnassigned
    ):
        # 获取还未分配标签的样本索引
        indexUnassigned = argwhere(indexAssignment == -1).flatten()
        while indexUnassigned.size:
            # 创建一个矩阵（行为样本，列为聚类标签）
            numNeighborAssignment = zeros(
                [indexUnassigned.size, len(indexCentroid)], int
            )
            # 遍历每一个未分配样本
            for i, a in enumerate(indexUnassigned):
                for b in indexDistanceAsc[a, : self.k]:
                    if indexAssignment[b] != -1:
                        numNeighborAssignment[i, indexAssignment[b]] += 1
            most = max(numNeighborAssignment)
            if most > 0:
                temp = argwhere(numNeighborAssignment == most)
                # 分配标签
                indexAssignment[indexUnassigned[temp[:, 0]]] = temp[:, 1]
                # 迭代分配
                indexUnassigned = argwhere(indexAssignment == -1).flatten()
            else:
                self.k += 1

    # 计算每一类的平均向心距离
    def getDavgs(self, centroids, clusterLabels, distance):
        # 每一个聚类簇有一个平均向心距离
        davgs = np.zeros(shape=len(centroids))
        # 遍历每一个簇中心编号
        for i, center in enumerate(centroids):
            # 类中仅有一个元素
            if distance[clusterLabels == i, center].shape[0] < 2:
                davgs[i] = 1e-100
                continue
            # 找到该类中，其他样本距离中心的距离
            davgs[i] = np.sum(distance[clusterLabels == i, center]) / (
                distance[clusterLabels == i, center].shape[0] - 1
            )
        return davgs

    # 离散点处理
    def getDiscretePoints(self, rho, delta, clusterLabels, davgs):
        fof_list = np.zeros(shape=len(rho))
        # 归一化
        square_rho = rho
        normal_square_rho = (square_rho - np.min(square_rho)) / (
            np.max(square_rho) - np.min(square_rho)
        )
        normal_delta = (delta - np.min(delta)) / (np.max(delta) - np.min(delta))
        # 计算每一个样本的离散因子
        for i, label in enumerate(clusterLabels):
            if normal_square_rho[i] == 0:
                # 获得属于同一类的所有样本的密度
                label_rho = normal_square_rho[clusterLabels == label]
                # 过滤0值
                filter_rho = list(filter(lambda item: item != 0, label_rho))
                fof_list[i] = normal_delta[i] / davgs[label] / np.min(filter_rho)
            else:
                fof_list[i] = normal_delta[i] / davgs[label] / normal_square_rho[i]
        # 只取前部分fof进行计算
        num = int(len(fof_list) * self.discret_ratio / 100)
        return np.argsort(-fof_list)[: num + 1]

    # 算法运行
    def run(self):
        startTime = time.time()
        # 参数为数据集样本还是距离矩阵
        if self.dis_matrix is not None:
            distanceMatrix = self.dis_matrix
        elif self.dataSet is not None:
            # 计算距离矩阵
            distanceMatrix = self.getDistanceMatrix()
        # 计算样本的k近邻*
        (
            indexDistanceAsc,
            indexNeighbor,
            indexSharedNeighbor,
            numSharedNeighbor,
        ) = self.getSharedNeighbors(distanceMatrix, self.k)
        # 计算样本相似性*
        similarity = self.getsimilarity(
            indexSharedNeighbor, numSharedNeighbor, distanceMatrix
        )
        # 计算样本局部密度
        rho = self.getLocalDensity(similarity)
        # 计算样本相对距离*
        delta = self.getRelativeDistance(rho, indexNeighbor, distanceMatrix)

        # 计算聚类中心
        indexCentroid, indexAssignment = self.defineClusterCenters(rho, delta)

        # 第一步归类
        self.firstStepAssign(
            indexCentroid, indexNeighbor, indexAssignment, numSharedNeighbor
        )
        # 获取还未分配标签的样本索引
        indexUnassigned = argwhere(indexAssignment == -1).flatten()
        # 判断是否还有剩下未分配的样本索引
        exitDiscret = False if indexUnassigned.size == 0 else True
        # 第二步归类*
        self.secondStepAssign(
            indexAssignment, indexDistanceAsc, indexCentroid, indexUnassigned
        )
        # 离散点处理
        if exitDiscret and self.discret_ratio != 0 and self.discret_ratio is not None:
            # 计算每个类平均向心距离
            davgs = self.getDavgs(indexCentroid, indexAssignment, distanceMatrix)
            # 获取离散点所在的索引
            discretIndex = self.getDiscretePoints(rho, delta, indexAssignment, davgs)
            # 离散点标签
            indexAssignment[discretIndex] = -1
        endTime = time.time()
        print("Cluster total execution time:", str(endTime - startTime))
        return indexCentroid, indexAssignment


if __name__ == "__main__":
    disMatrix = np.load("data/spatial_dis_matrix.npy")
    snndpc = SNN_DPC(k=13, dis_matrix=disMatrix, discret_ratio=2)
    centroid, assignment = snndpc.run()
    print(centroid, assignment)
