import numpy as np
import heapq as hp
import random
from sklearn import decomposition


# 选择符合要求的函数，输入的原始数据和小类数据为np array类型
def choose_samples(data, minor_data, K1, K2):
    suitable_samples = []
    weights = []
    for i in range(minor_data.shape[0]):
        Dist = []
        for j in range(data.shape[0]):
            # 计算目前所选点到整个数据集的欧氏距离
            Dist.append(np.sqrt(sum(np.power((minor_data[i][:-1] - data[j][:-1]), 2))))
        num = 0
        # 最小的K2个数对应的索引
        K2_index = map(Dist.index, hp.nsmallest(K2, Dist))
        K2_index = list(K2_index)
        for j in range(K2):
            if (data[K2_index[j]][-1] == minor_data[0][-1]):
                num += 1
        if(num >= K1):

            suitable_samples.append(list(minor_data[i]))
            weights.append(num/K2)
    suitable_samples_np = np.array(suitable_samples)
    weights_np = np.array(weights)
    # 返回类型也为np类型
    return weights_np/sum(weights_np), suitable_samples_np


# 人造样本函数
def generate_samples(minor_data_pca, weights, V, Orate):
    num_attrs = minor_data_pca.shape[1]
    new_samples = np.zeros([Orate, num_attrs])
    while (Orate != 0):
        # 根据概率分布随机产生样本下标
        index = np.random.choice(list(range(0, len(weights))), p=weights.ravel())
        X = np.square(minor_data_pca[index])
        alpha = np.dot(X, V)
        alphaV = alpha * V
        attrs = []
        s = 0
        for j in range(num_attrs-1):
            # 随机产生数值
            r = random.uniform(-pow(alphaV[j], 0.5), pow(alphaV[j], 0.5))
            attrs.append(r)
            s = s + pow(r, 2)/alphaV[j]
        attrs.append(pow((1-s)*alphaV[-1], 0.5))
        #np.append(attrs[:], pow((1-s)*alphaV[-1], 0.5))
        new_samples[Orate-1] = attrs
        #np.append(new_samples, attrs,axis=0)
        Orate -= 1
    return new_samples


# minor以np多维array进行导入，minor[i]为第一个小类别样本集，t是小类别的数量
def MDO_sampleing(data, minor, n_maj, t):
    # 所有新建的样本
    New = []
    for i in range(t):
        new = np.array([])
        n_i = minor[i].shape[0]
        weights, suit_minor_sample_i = choose_samples(data, minor[i], 5, 10)
        mean = suit_minor_sample_i.mean(axis=0)
        Z_i = suit_minor_sample_i - mean
        Z_i = Z_i[:, :-1]
        m, n = Z_i.shape
        # 进行主成分分析，维度n_components自定义
        # 计算协方差矩阵
        Sigma = (1.0 / m) * np.dot(Z_i.T, Z_i)
        # 进行奇异值分解
        U, S, V = np.linalg.svd(Sigma)
        # 取前k维进行降维
        k = 2
        U_reduce = U[:, 0:k].reshape(n, k)
        T_i = np.dot(Z_i, U_reduce)
        Orate = n_maj - n_i
        new_samples = generate_samples(T_i, weights, S[:k], Orate)
        # 转换为原空间并添加回均值
        origin_new_samples = np.dot(new_samples, np.transpose(U_reduce))#+mean[:-1].reshape(-1,1)
        for j in range(len(mean[:-1])):
            origin_new_samples[:,j]=origin_new_samples[:,j]+mean[j]
        # 把新产生的样本添加到new后面
        labels = np.ones([Orate, 1])
        labels *= minor[i][0][-1]
        # new=np.append(new,np.append(new_samples,labels,axis=1),axis=0)
        new = np.append(origin_new_samples, labels, axis=1)
        if i == 0:
            New = new
        else:
            New = np.append(New, new, axis=0)
        #np.append(new, new_samples)
    return New
