import math
from collections import Counter

import numpy as np
from sklearn.neighbors import NearestNeighbors
from scipy.spatial.distance import cdist


class SMOTE_Sparsity:

    def relative_density(self, data, t, Minority_data, Minority_label, Majority_data, Majority_label, k):
        # 初始化变量
        # print(Minority_data.shape)
        MinorityKNN = NearestNeighbors(n_neighbors=k, algorithm='kd_tree').fit(Minority_data)
        MajorityKNN = NearestNeighbors(n_neighbors=k, algorithm='kd_tree').fit(Majority_data)

        MinorityKNN_indices = MinorityKNN.kneighbors(data)[1]
        MajorityKNN_indices = MajorityKNN.kneighbors(data)[1]

        # 去除每个点本身的索引
        MinorityKNN_indices = MinorityKNN_indices[:, 1:]
        MajorityKNN_indices = MajorityKNN_indices[:, 1:]

        # 初始化密度数组
        AbosuluteDensity_minority = np.zeros(data.shape[0])
        AbosuluteDensity_Majority = np.zeros(data.shape[0])
        AbosuluteDensity_HO = np.zeros(data.shape[0])
        AbosuluteDensity_HE = np.zeros(data.shape[0])
        RD = np.zeros(data.shape[0])
        zero_indices = []

        # 计算每个点的密度
        for i in range(data.shape[0]):
            pos1 = MinorityKNN_indices[i]

            if np.sum(cdist(Minority_data[pos1], [data[i]])) == 0:
                zero_indices.append(i)
            else:
                AbosuluteDensity_minority[i] = k / np.sum(cdist(Minority_data[pos1], [data[i]]))

            pos2 = MajorityKNN_indices[i]
            AbosuluteDensity_Majority[i] = k / np.sum(cdist(Majority_data[pos2], [data[i]]))

            if t[i] == Minority_label[0]:
                AbosuluteDensity_HO[i] = AbosuluteDensity_minority[i]
                AbosuluteDensity_HE[i] = AbosuluteDensity_Majority[i]
            else:
                AbosuluteDensity_HO[i] = AbosuluteDensity_Majority[i]
                AbosuluteDensity_HE[i] = AbosuluteDensity_minority[i]

            RD[i] = AbosuluteDensity_HO[i] / AbosuluteDensity_HE[i]

        for index in zero_indices:
            RD[index] = 0
            AbosuluteDensity_minority[index] = math.inf
        return RD, AbosuluteDensity_minority, AbosuluteDensity_Majority, MinorityKNN_indices, MajorityKNN_indices

    def smote_rd_populate(self, N, Minority_data, Minority_label, base, NN_Matrix, k):
        numattrs = Minority_data.shape[1]
        Synthetic_samples = []
        Synthetic_label = []
        # print(Minority_data)
        while N > 0:
            # 随机选择一个邻居
            random_NN = math.ceil(np.random.rand() * k) - 1
            base_data = Minority_data[base, :]
            base_label = Minority_label[base]
            base_NNs = NN_Matrix[base, :]
            base_random_NN = base_NNs[random_NN]

            # 创建合成样本
            new_sample = np.zeros(numattrs)
            for i in range(numattrs):
                dif = Minority_data[base_random_NN, i] - base_data[i]
                gap = np.random.rand()
                new_sample[i] = base_data[i] + gap * dif
                # print(Minority_data[base_random_NN])
            # 添加到合成样本列表
            Synthetic_samples.append(new_sample)
            Synthetic_label.append(base_label)

            N -= 1

        return np.array(Synthetic_samples), np.array(Synthetic_label)

    def smote_rd_generation(self, Minority_data, Minority_label, N, k):
        # SMOTE
        Synthetic_samples = []
        Synthetic_label = []

        # 计算每个样本的k个最近邻
        NN_model = NearestNeighbors(n_neighbors=k + 1, algorithm='auto')
        NN_model.fit(Minority_data)
        NN_Matrix = NN_model.kneighbors(Minority_data, return_distance=False)  # 获取最近邻的索引
        NN_Matrix = NN_Matrix[:, 1:]  # 去掉每个点自己作为邻居
        for i in range(len(Minority_data)):
            if N[i] > 0:
                new_samples, new_label = self.smote_rd_populate(N[i], Minority_data, Minority_label, i, NN_Matrix, k)
                Synthetic_samples.append(new_samples)
                Synthetic_label.append(new_label)

        return np.vstack(Synthetic_samples), np.hstack(Synthetic_label)

    def create_synthetic_samples(self, Minority_data, Minority_label, Majority_data, Majority_label, k):

        w = self.cal_w(Minority_data, Minority_label, Majority_data, Majority_label)
        # 计算合成样本的总量
        Total_generation = len(Majority_label) - len(Minority_label)
        # 计算每个样本的生成数量
        weight = np.floor(Total_generation * w).astype(int)

        remaining = Total_generation - np.sum(weight)
        indices = np.argsort(w)[::-1]  # 从大到小排序
        for i in range(remaining):
            weight[indices[i % len(w)]] += 1
        # 生成合成样本
        Synthetic_samples, Synthetic_label = self.smote_rd_generation(Minority_data, Minority_label, weight, k)

        Minority_data = np.vstack((Minority_data, Synthetic_samples))

        X_my = np.vstack([Majority_data, Minority_data])
        y_my = np.hstack([Majority_label, Minority_label, Synthetic_label])

        return X_my, y_my

    def fit_resample(self, X, y, k=5):
        majority_class = Counter(y).most_common()[0][0]
        minority_class = Counter(y).most_common()[1][0]

        Minority_data = X[y == minority_class]
        Minority_label = y[y == minority_class]
        Majority_data = X[y == majority_class]
        Majority_label = y[y == majority_class]

        X_my, y_my = self.create_synthetic_samples(Minority_data, Minority_label, Majority_data,
                                                   Majority_label, k)

        return X_my, y_my

    # 稀疏
    def cal_w(self, Minority_data, Minority_label, Majority_data, Majority_label, k=6):
        RD, AbosuluteDensity_minority, _, _, _ = self.relative_density(
            Minority_data, Minority_label, Minority_data, Minority_label, Majority_data, Majority_label, k
        )

        AbosuluteDensity_minority = 1.0 / AbosuluteDensity_minority
        AbosuluteDensity_minority = AbosuluteDensity_minority / np.sum(AbosuluteDensity_minority)

        return AbosuluteDensity_minority
