# -*- coding: utf-8 -*-
import numpy as np
import logging
from api.serial import e2LSH

logger = logging.getLogger('django')


class NegativeAnalyzer():
    # 样本集，key-样本Id，value-样本的特征向量
    D = {}
    # 描述样本集的结构，key-样本Id，value-[对应key的In_link的所有样本]
    In = {}
    # 描述样本集的结构，key-样本Id，value-[对应key的Out_link的所有样本]
    Out = {}
    # 样本的聚类标签，key-样本Id，value-聚类Id
    labels = {}
    # 每个聚类对应的样本list，key-聚类Id，value-[样本id]
    cluster = {}
    # 是否有两个及以上的聚类
    isMultipleCluster = False
    # 加快遍历的Hash表
    hash_table = []

    def __init__(self, D, hash_table):
        self.D = D  # {id:([feature], hashIndex)}
        self.In = {}
        self.Out = {}
        self.labels = {}
        self.cluster = {}
        self.isMultipleCluster = False
        self.hash_table = hash_table
        # for i in range(5):
        #     logger.info(len(self.hash_table[i].buckets))
        #     logger.info(self.hash_table[i].buckets)

    def updateIn(self):
        """
        如果有这样的Out字典：{1:[1,2,3], 2:[1,2,3], 3:[1,3]}
        那么In字典为:{1:[1,2,3], 2:[1,2], 3:[1,2,3]}
        """
        for P in self.Out.keys():
            for Pin in self.Out[P]:
                self.In[Pin].append(P)

    def updateCluster(self):
        for P in self.labels.keys():
            if self.labels[P] == -1 or self.labels[P] == 0:
                continue
            else:
                if self.labels[P] in self.cluster.keys():
                    self.cluster[self.labels[P]].append(P)
                else:
                    self.cluster.update({self.labels[P]: [P]})

    def getIn(self):
        return self.In

    def getOut(self):
        return self.Out

    def MyDBSCAN(self, eps, MinPts):
        """
        Cluster the dataset `D` using the DBSCAN algorithm.

        MyDBSCAN takes a dataset `D` (a list of vectors), a threshold distance
        `eps`, and a required number of points `MinPts`.

        It will return a list of cluster labels. The label -1 means noise, and then
        the clusters are numbered starting from 1, 0 means the point hasn't been visited.
        """

        # 初始化labels字典以及In，Out

        for id in self.D.keys():
            self.labels.update({id: 0})
            self.In.update({id: []})
            self.Out.update({id: []})

        C = 0

        for P in self.D.keys():
            if not (self.labels[P] == 0):
                continue

            NeighborPts = self.regionQuery(P, eps, MinPts)

            if len(NeighborPts) < MinPts:
                self.labels[P] = -1

            else:
                C += 1
                self.growCluster(P, NeighborPts, C, eps, MinPts)

            if C > 1:
                self.isMultipleCluster = True

        return self.isMultipleCluster

    def growCluster(self, P, NeighborPts, C, eps, MinPts):
        """
        Grow a new cluster with label `C` from the seed point `P`.

        Parameters:
          `D`      - The dataset (a list of vectors)
          `labels` - List storing the cluster labels for all dataset points
          `P`      - Index of the seed point for this new cluster
          `NeighborPts` - All of the neighbors of `P`
          `C`      - The label for this new cluster.
          `eps`    - Threshold distance
          `MinPts` - Minimum required number of neighbors
        """

        self.labels[P] = C

        i = 0
        while i < len(NeighborPts):

            Pn = NeighborPts[i]

            if self.labels[Pn] == -1:
                self.labels[Pn] = C

            elif self.labels[Pn] == 0:

                self.labels[Pn] = C

                PnNeighborPts = self.regionQuery(Pn, eps, MinPts)

                if len(PnNeighborPts) >= MinPts:
                    NeighborPts = NeighborPts + PnNeighborPts

            i += 1

    def regionQuery(self, P, eps, MinPts):
        neighbors = []
        hash_bucket_index = self.D[P][1]
        for Pn in self.hash_table[hash_bucket_index]:
            distance = np.linalg.norm(self.D[P][0] - self.D[Pn][0])
            if distance < eps:
                neighbors.append(Pn)

        if len(neighbors) >= MinPts:
            self.Out.update({P: neighbors})

        return neighbors

    def PRank(self, lamda, C, k):
        """
               Parameters:
                 `lamda`  -paper中伪代码的λ
                 `C`      -paper中伪代码的C
                 `k`      -paper中伪代码的k

               Return: paper中的R
        """
        # key-样本1Id，value- {key-样本2Id，value-样本1与样本2的结构PRank}
        R = {}
        # paper中伪代码的R*
        Rstar = {}
        # Initialization
        for a in self.D.keys():
            R.update({a: {}})
            Rstar.update({a: {}})
            for b in self.D.keys():
                if b == a:
                    R[a].update({b: 1})
                    Rstar[a].update({b: 1})
                else:
                    R[a].update({b: 0})
                    Rstar[a].update({b: 0})

        # Iteration
        while k > 0:
            k -= 1
            for a in self.D.keys():
                for b in self.D.keys():
                    # 如果a或b有一个没有In_link，那么“in”为0
                    if (len(self.In[a]) * len(self.In[b])) == 0:
                        Rstar[a][b] = 0
                    else:
                        tempIn = 0
                        for ia in self.In[a]:
                            for ib in self.In[b]:
                                tempIn = tempIn + R[ia][ib]
                        Rstar[a][b] = lamda * C * tempIn / (len(self.In[a]) * len(self.In[b]))

                    # 如果a或b有一个没有Out_link，那么“out”为0
                    if (len(self.Out[a]) * len(self.Out[b])) == 0:
                        Rstar[a][b] = Rstar[a][b] + 0
                    else:
                        tempOut = 0
                        for oa in self.Out[a]:
                            for ob in self.Out[b]:
                                tempOut = tempOut + R[oa][ob]
                        Rstar[a][b] = Rstar[a][b] + (1 - lamda) * C * tempOut / (len(self.Out[a]) * len(self.Out[b]))

            # Update
            for a in self.D.keys():
                for b in self.D.keys():
                    if a != b:
                        R[a][b] = Rstar[a][b]
                    else:
                        R[a][b] = 1
        return R

    def evaluate(self, R):
        """"
        要找到能代表聚类的实际点，离质心最近的实际点，距离即欧氏距离
        """
        Cf = 0
        # 遍历labels(point-cluster)得到cluster—point字典, key-聚类Id，value-样本id
        cluster2Point_dict = self.cluster

        # 找到能代表某一聚类的实际点，选择离质心最近的实际点,首先获取每个聚类的虚拟质点
        # key-clusterId， value- 虚拟质点坐标(即特征向量)
        virtualCentroid_dict = {}
        for cluster in cluster2Point_dict.keys():
            feature_list = []
            for P in cluster2Point_dict[cluster]:
                feature_list.append(self.D[P][0])
            virtualCentroid_dict.update({cluster: np.mean(feature_list, axis=0)})
        # 再求实际质点，key-clusterId，value-质点Id
        realCentroid_dict = {}
        for cluster in cluster2Point_dict.keys():
            realCentroid = -1
            minDistance = 9999
            virtualCentroid = virtualCentroid_dict[cluster]
            for P in cluster2Point_dict[cluster]:
                distance = np.linalg.norm(self.D[P][0] - virtualCentroid)
                if distance < minDistance:
                    minDistance = distance
                    realCentroid = P
            realCentroid_dict.update({cluster: realCentroid})

        logger.info("聚类中心点结果：")
        logger.info(str(realCentroid_dict))

        # 计算compactness即Cf
        # 计算分子numerator
        numerator = 0
        for cluster in cluster2Point_dict.keys():
            centerPoint = realCentroid_dict[cluster]
            for P in cluster2Point_dict[cluster]:
                numerator = numerator + (1 - R[P][centerPoint])

        # 计算分母denominator
        denominator = 0
        for cluster_j in realCentroid_dict.keys():
            for cluster_i in realCentroid_dict.keys():
                if cluster_j > cluster_i:
                    denominator = denominator + (1 - R[realCentroid_dict[cluster_i]][realCentroid_dict[cluster_j]])

        try:
            Cf = numerator / denominator
        except ZeroDivisionError:
            logger.info("聚类数小于2")
            return 0

        return Cf
