import pickle
import pywt
from scipy.spatial.distance import pdist, cosine
import numpy as np
import utils
import random
def wpd_plt(signal, n):
    # wpd分解
    wp = pywt.WaveletPacket(data=signal, wavelet='db1', mode='symmetric', maxlevel=n)
    # 计算每一个节点的系数，存在map中，key为'aa'等，value为列表
    temp = dict()
    temp[1] = signal
    lev_f = []
    index = []
    j = 1
    for row in range(1, n + 1):
        temp_index = []
        for i in [node.path for node in wp.get_level(row, 'freq')]:
            temp[j] = wp[i].data
            temp_index.append(j)
            lev_f.append(i)
            # index.append()
            j += 1
        # lev_f.append(lev)
        index.append(np.array(temp_index))
    return wp, temp, lev_f, index


def getwpd_plt(sample, n):
    reference_dec = []
    for i in range(len(sample)):
        reference_dec.append(wpd_plt(sample[i], n)[1])
    return reference_dec


def getwpd_plt_base(sample, n):
    reference_dec = []
    for i in range(len(sample)):
        reference_dec.append(wpd_plt(sample[i], n))
    return reference_dec


def getfaultsample(k, num, fault_sample):
    l = len(fault_sample)
    index = np.random.choice([i for i in range(l)], num, replace=False)
    test_sample = fault_sample[index]
    return test_sample[0:k, :], test_sample[k:, :]


def getbase_idx(k_short):
    id_list = [i for i in range(0, k_short)]
    # print(id_list)
    b_idx = np.random.choice(id_list, 1, replace=False)[0]
    # print(b_idx)
    id_list.pop(b_idx)
    return b_idx, id_list


def getmap(idx, lev_list):
    List = []
    for i in (idx):
        for j in range(len(lev_list)):
            if i in lev_list[j].tolist():
                List.append([i, lev_list[j]])
                continue
    return List


def cosine_Distance_1(A, B):
    # print(A.shape)
    # print(B.shape)
    X = np.vstack([A, B])
    cosn = pdist(X, 'cosine')
    return cosn


def getsimial(IMF_t, IMF_S, lamda):
    a = []
    for imf in IMF_S:
        # temp = getsimialrity(IMF_t, imf)
        temp = cosine_Distance_1(IMF_t, imf)
        a.append(np.exp(-(temp) / lamda))
    return np.array(a).reshape(-1)


def getsimilarity(base, reference, temp_map, lambd):
    final_sim = []
    for i in temp_map:
        idx1 = i[0]
        idx2 = i[1]
        base_pym = base[1][idx1]
        cos_sim = getsim(base_pym, idx2, reference, lambd)
        final_sim.append(cos_sim)
    return final_sim


def getechsim(base_pym, idx_list, indivual, lambd):
    # cos_sim = []
    max_cos = 0
    max_idx = 0
    for i in idx_list:
        # print(i)
        # print(indivual[i].shape)
        temp_cos = cosine_Distance_1(base_pym, indivual[i])
        temp_cos = np.exp(-(temp_cos) / lambd)

        if temp_cos > max_cos:
            max_cos = temp_cos
            max_idx = i
    return max_cos, max_idx


def getsim(base_pym, idx_list, reference, lambd):
    f_cos = []
    index = []
    for i in range(len(reference)):
        temp_refernce = reference[i]
        # print(len(temp_refernce))
        cos_sim, sim_idx = getechsim(base_pym, idx_list, temp_refernce, lambd)
        f_cos.append(cos_sim)
        index.append(sim_idx)

    return np.hstack((np.array(f_cos).reshape(-1, 1), np.array(index).reshape(-1, 1)))


def getbase_ref(base_idx, base_dec):
    if len(base_idx) != 0:
        base_ref = []
        for i in base_idx:
            base_ref.append(base_dec[i][1])
        return base_ref
    else:
        return None


def getsypyc(temp_map, base_sample, base_ref, reference_dec, ration, factor):
    sim_1 = getsimilarity(base_sample, reference_dec, temp_map, factor)
    sim_2 = getsimilarity(base_sample, base_ref, temp_map, factor) if base_ref != None else None
    pyc = []
    for i in range(0, len(temp_map)):
        pyc_base = base_sample[1][temp_map[i][0]]
        pyc_base_syc = np.zeros((len(pyc_base)))
        sum = 0
        for j in range(sim_1[i].shape[0]):
            pyc_base_syc += sim_1[i][j, 0] * reference_dec[j][sim_1[i][j, 1]]
            sum += sim_1[i][j, 0]
        if sim_2 != None:
            for k in range(sim_2[i].shape[0]):
                pyc_base_syc += sim_2[i][k, 0] * base_ref[k][sim_2[i][k, 1]]
                sum += sim_2[i][k, 0]
        # ration = random.uniform(ration, 1.1)
        # print(ration)
        # (1-ration)*
        # pyc_base_syc = pyc_base*(ration) + (1-ration)*(pyc_base_syc / sum)
        pyc_base_syc = pyc_base*(ration) + (pyc_base_syc / sum)
        pyc.append([pyc_base_syc, pyc_base])
    return pyc


def getfinalsynSample(temp_map, base_sample, pyc_syc):
    wp = base_sample[0]
    base_raw = pywt.WaveletPacket.reconstruct(wp)
    std_real = np.std(base_raw)
    # print(std_real)
    for i in range(len(temp_map)):
        nd = base_sample[2][temp_map[i][0] - 1]
        wp[nd].data = pyc_syc[i][0]
    base_sample_rec = pywt.WaveletPacket.reconstruct(wp)
    # base_sample_rec = base_sample_rec - np.mean(base_sample_rec)
    # std_fake = np.std(base_sample_rec)
    # base_sample_rec = (std_real/std_fake)*base_sample_rec
    # print(np.mean(base_sample_rec))
    return base_sample_rec

#%%
"""
lev:小波包分解水平
num:参考集中正常样本个数
nor_sample:训练集中正常样本
k_short: 故障样本个数
max_num:基样本中最大变异小波包数量
ration: 原始变异小波包系数占的比例
contal_factor: 相似性度计算中控制因子
"""
def getsysample(lev, num, nor_sample, k_short, fault_sample, max_num, ration, contal_factor):
    n_ref = np.random.choice([i for i in range(len(nor_sample))],num, replace=False)
    reference_sample = nor_sample[n_ref]
    #分解
    reference_dec = getwpd_plt(reference_sample, lev)
    k_fault_dc = getwpd_plt_base(fault_sample, lev)
    #获取基样本
    b_idx, b_ref = getbase_idx(k_short)
    # print(b_idx)
    base_sample = k_fault_dc[b_idx]
    base_ref = getbase_ref(b_ref, k_fault_dc)

    seq_num = len(base_sample[1])  # 分解后小波包的个数
    #需要进行变异的小波包系数个数
    idx_n = np.random.choice([i for i in range(1, max_num)], 1, replace=False)[0]
    #需要进行变异小波包系数的id
    idx_f = np.random.choice([i for i in range(1, seq_num + 1)], idx_n, replace=False)
    #获取idx_f 中每个id对应的频段
    temp_map = getmap(idx_f, base_sample[-1])
    #获取变异后的小波包系数
    pyc_syc = getsypyc(temp_map, base_sample, base_ref, reference_dec, ration, contal_factor)
    #获取变异后的样本
    base_sample_rec = getfinalsynSample(temp_map, base_sample, pyc_syc)
    return  base_sample_rec

def getkshort_test(sampleset, k_short, test_num):
    idx = [i for i in range(len(sampleset))]
    sample = sampleset[idx]
    K_train = sample[0:k_short, :]
    fault_i_test = sample[k_short:k_short + test_num, :]
    return K_train, fault_i_test
def getrawtrain_test(dataset, k_short, test_num, train_num, normal_train, normal_test, label_train, label_test):
    train_data = []
    train_label = []
    train_label1 = []
    test_data = []
    test_label = []

    train_data.append(normal_train)
    test_data.append(normal_test)
    train_label.append(label_train)
    train_label1.append(label_train)
    test_label.append(label_test)

    for i in range(len(dataset)):
        temp_train,temp_test = getkshort_test(dataset[i], k_short, test_num)
        train_data.append(temp_train)
        test_data.append(temp_test)
        train_label.append(np.ones((len(temp_train)))*(i+1))
        train_label1.append(np.ones((train_num+k_short)) * (i + 1))
        test_label.append(np.ones((test_num)) * (i + 1))

    return train_data, test_data, train_label, train_label1, test_label
def dataaugmentation(sample_num, train_data, lev, num, nor_sample,k_short, max_num, ration, contal_factor):
    for i in range(1, len(train_data)):
        temp_data = []
        fault_sample = train_data[i]
        for j in range(0, sample_num):
            temp_sample = getsysample(lev, num, nor_sample, k_short, fault_sample, max_num, ration, contal_factor)
            temp_data.append(temp_sample.reshape(1, -1))
        train_data[i] = np.vstack((train_data[i], np.concatenate(temp_data)))
    return train_data