import json
import os
import numpy as np
import numpy.matlib
import pandas as pd
import scipy.io as scio
from imblearn.over_sampling import SMOTE
from sklearn import preprocessing
from sklearn.metrics import accuracy_score, auc, recall_score, roc_auc_score
from sklearn.neighbors import KNeighborsClassifier
from sklearn.semi_supervised import LabelSpreading
from supervised.dataLoad import load_data, train_data_process, test_data_process
from JsonToExecel.deal import save_outcome
with open("./config.json", 'r') as load_file:
    CONFIG = json.load(load_file)

def benchmark():
    # run learning methods in NASA dataset

    dirList = ['NASA', 'AEEEM', 'Relink', 'SOFTLAB']


    for directory in dirList:

        for dataset in os.listdir('D:/code/test/dataSetMat/' + directory):
            name = dataset.split('.')[0]

            train_name = f'{name}'

            trian_path = f'./dataSetMat/{directory}/{train_name}.mat'

            data1_train, data1_test = load_data(trian_path, name)


            np.random.shuffle(data1_train)

            X_train = data1_train[:, :-1]
            y_train = data1_train[:, -1]

            X_test = data1_test[:, :-1]
            y_test = data1_test[:, -1]

            if y_train.mean() > 1:
                for idx, (key) in enumerate(y_train):
                    if key == 2:
                        y_train[idx] = -1.0

            if y_test.mean() > 1:
                for idx, (key) in enumerate(y_test):
                    if key == 2:
                        y_test[idx] = -1.0

            # dataset expanding by SMOTE
            smo_1 = SMOTE(random_state=0, k_neighbors=3)
            smo_2 = SMOTE(random_state=0, k_neighbors=1)


            try:
                X_train, y_train = smo_1.fit_resample(X_train, y_train)
            except ValueError:
                X_train, y_train = smo_2.fit_resample(X_train, y_train)

            result_save = {
                'method': 'Semi-Supervise_feature',
                'dataSet': directory,
                'subDataSet': name,
                'numberOfSubSet': len(data1_test),
                'result': []
            }



            result_save['result'].append(semi_supervise(X_train, y_train, X_test, y_test, name))

            save_outcome(directory, name)

            dirs = "./outcome/" + directory + '/'
            if not os.path.exists(dirs):
                os.makedirs(dirs)
            with open(dirs + name + ".json", "w") as f:
                json.dump(result_save, f, indent=4)

def semi_supervise(X: np.ndarray, y: np.ndarray, test_X: np.ndarray, test_y: np.ndarray, name: str):


    # 训练样本的总数
    n = len(X)
    # 向量的维数
    d = len(X[0])

    y.resize((n, 1))
    testSize = len(test_X)
    # labeled sample ratio
    # labelRatio = 0.5
    # 标签的比率
    labelRatio = CONFIG[name]['labelRatio']
    # 有标签的数据集合
    l = int(n * labelRatio)
    # 无标签的数据集合
    u = n - l
    # Gamma = 0.001

    # γ and µ    为超参
    # 参数γ
    Gamma = CONFIG[name]['Gamma']
    # 参数µ
    Mu = CONFIG[name]['Mu']
    # 参数ω
    Beta = 5
    Omega = np.zeros((d, 1))
    Omega[:] = 0.5
    # 参数λ
    Lambda_vector = np.zeros((u, 1))
    Lambda_vector[:] = 0.5

    # 参数的α 阿尔法 or 逆协方差矩阵
    A = np.zeros((d, d))
    for i in range(d):
        A[i, i] = 0.001
    C = np.zeros((u, u))
    for i in range(u):
        C[i, i] = 0.001

    # 通过KNN   捕捉训练样本的局部邻域关系 并且建立了图拉普拉斯矩阵L=D-S
    print('通过KNN   捕捉训练样本的局部邻域关系 并且建立了图拉普拉斯矩阵')
    trainData_X = X
    trainData_Y = y.ravel()     # y and trainData_Y address the same memory
    # replace the original -1 label with 0, because in this method -1 means no label
    for i in range(n):
        trainData_Y[i] = 0 if trainData_Y[i] == -1 else trainData_Y[i]
    trainData_Y[l:] = -1

    # 按照knn近邻默认为5
    KNN = KNeighborsClassifier(n_neighbors=5)
    KNN.fit(trainData_X[:l], trainData_Y[:l])
    # 初始化矩阵
    # S为亲和矩阵 衡量训练数据领域关系
    S = np.zeros((n, n))
    # D为对角矩阵
    D = np.zeros((n, n))
    # L为拉普拉斯矩阵
    L = np.zeros((n, n))
    # 为样本加上权重  相同标签为10 不同标签但在临近范围为1 否则为0
    for i in range(n):
        for j in range(i, n):
            if trainData_Y[i] == trainData_Y[j] and trainData_Y[i] != -1:
                S[i][j] = 10
            elif (trainData_Y[i] == -1 and trainData_Y[j] > -1) and (KNN.predict(trainData_X[i:i+1]) == trainData_Y[j]):
                S[i][j] = 1
            elif (trainData_Y[j] == -1 and trainData_Y[i] > -1) and (KNN.predict(trainData_X[j:j+1]) == trainData_Y[i]):
                S[i][j] = 1
            else:
                S[i][j] = 0
            S[j][i] = S[i][j]
        D[i, i] = sum(S[i, :])

        # 训练的进度
        percent = 100 * (float((2*n-i)*(i+1)) / ((n+1)*n))
        show_str = ('[%%-%ds]' % 50) % (int(50*percent/100) * "#")
        print('\r%s %d%%' % (show_str, percent), end='')
    L = D - S


    # 通过标签传播获得伪向量 yu
    print('\n通过标签传播获得伪向量 yu')
    LGC_rbf = LabelSpreading(kernel='knn', gamma=20,
                             n_neighbors=7, max_iter=150)
    LGC_rbf.fit(trainData_X, trainData_Y)
    trainData_Y[l:] = LGC_rbf.predict(trainData_X[l:])
    # change 0 back to the -1

    # 进行数据归一化
    min_max_scaler = preprocessing.MinMaxScaler(
        (0, CONFIG[name]['xMaxScaler']))
    # 训练样本的X
    X = min_max_scaler.fit_transform(X)
    # 训练样本的Y
    test_X = min_max_scaler.transform(test_X)
    #  B = γXTLX 论文算式2
    B = Gamma * np.dot(np.dot(X.T, L), X)
    Lambda = np.matlib.identity(n)
    # 论文的σ
    Sigma = np.zeros((n, 1))
    E = np.zeros((n, n))
    P = np.zeros((u, u))
    k_lambda = np.zeros((u, 1))
    Eu = np.zeros((u, u))
    O = np.zeros((u, u))
    Omega_old = np.ones((d, 1))
    Lambda_vector_old = np.zeros((u, 1))
    g_omega = np.zeros((d, 1))
    H_omega = np.zeros((d, d))
    Sig_omega = np.zeros((d, d))
    g_lambda = np.zeros((u, 1))
    H_lambda = np.zeros((u, u))
    Sig_lambda = np.zeros((u, u))
    G = np.zeros((d, d))
    cnt = 0
    while np.linalg.norm(Omega - Omega_old, ord=np.inf) > 0.001:
        print('dataset Name:', name, '--------')

        # 根据 提供的参数需求 进行赋值
        for i in range(n):
            if(i < l):
                Sigma[i, 0] = 1 / (1 + np.exp(-1 * np.dot(X[i, :], Omega)))
                E[i, i] = Sigma[i, 0] * (1 - Sigma[i, 0])
            else:
                Sigma[i, 0] = 1 / \
                    (1 + np.exp(-1 *
                                Lambda_vector[i-l, 0] * np.dot(X[i, :], Omega)))
                E[i, i] *= Mu * Lambda_vector[i-l, 0] * \
                    Lambda_vector[i-l, 0] * Sigma[i, 0] * (1 - Sigma[i, 0])
                Lambda[i, i] = Mu * Lambda_vector[i-l, 0]
                P[i-l, i-l] = np.dot(X[i, :], Omega)
                k_lambda[i-l, 0] = Beta * \
                    (1 - (1 / (1 + np.exp(-(Beta * Lambda_vector[i-l, 0])))))
                Eu[i-l, i-l] = Sigma[i, 0] * (1 - Sigma[i, 0])
                O[i-l, i-l] = Beta * Beta * (1 / (1 + np.exp(-(Beta * Lambda_vector[i-l, 0])))) * (
                    1 - (1 / (1 + np.exp(-(Beta * Lambda_vector[i-l, 0])))))
        if(np.linalg.norm(g_omega[:, 0], ord=2) / d) < 0.001:
            # --- 算式10 --- 梯度  特征权重
            g_omega = np.dot(np.dot(X.T, Lambda), (y - Sigma)) - \
                np.dot((A + B), Omega)
            # --- 算式11 --- 哈希矩阵
            H_omega = -1 * (np.dot(np.dot(X.T, E), X) + A + B)
            Sig_omega = -1 * np.linalg.inv(H_omega)
            Omega_old = Omega.copy()
            # --- 算式9 --- 迭代加权最小二乘法
            Omega = Omega - np.dot(np.linalg.inv(H_omega), g_omega)
            print('gw:', np.mean(g_omega[:, 0]), ' gw_judge:', (np.linalg.norm(
                g_omega[:, 0], ord=2) / d), 'w_max', np.max(Omega, axis=0), 'w_min', np.min(Omega, axis=0))
        # 排除相关性过低的Omega
        for i in range(d):
            if(Omega[i, 0] != 0) and (abs(Omega[i, 0]) < 0.001):
                Omega[i, 0] = 0
        if(np.linalg.norm(g_lambda[:, 0], ord=2) / u) < 0.001:
            # --- 算式13 --- 梯度  未标记样本权重
            g_lambda = Mu * np.dot(P, (y[l:] - Sigma[l:])) - \
                np.dot(C, Lambda_vector) + k_lambda
            # --- 算式14 --- 哈希矩阵
            H_lambda = -1 * ((Mu * np.dot(np.dot(P.T, Eu), P)) + C + O)
            Sig_lambda = -1 * np.linalg.inv(H_lambda)
            Lambda_vector_old = Lambda_vector.copy()
            # --- 算式12 --- 更新
            Lambda_vector = Lambda_vector - \
                np.dot(np.linalg.inv(H_lambda), g_lambda)
            print('gl:', np.mean(g_lambda[:, 0]), ' gl_judge:', (np.linalg.norm(
                g_lambda[:, 0], ord=2) / u), 'l_max', np.max(Lambda_vector, axis=0), 'l_min', np.min(Lambda_vector, axis=0))
        # 排除相关性过低的Lambda
        for i in range(u):
            if(Lambda_vector[i, 0] != 0) and (abs(Lambda_vector[i, 0]) < 0.001):
                Lambda_vector[i, 0] = 0
        G = np.dot(np.dot(np.dot(np.linalg.inv(A), B), np.linalg.inv(
            np.matlib.identity(d) + np.dot(np.linalg.inv(A), B))), np.linalg.inv(A))

        # 优化超参数 lambda omega
        for i in range(d):
            # --- 算式18 ---
            A[i, i] = 1 / (Omega[i, 0] * Omega[i, 0] +
                           G[i, i] + Sig_omega[i, i])
        for i in range(u):
            # --- 算式20 ---
            C[i, i] = 1 / (Lambda_vector[i, 0] *
                           Lambda_vector[i, 0] + Sig_lambda[i, i])

        # 优化超参数 lambda omega
        print('lambda的最大值更新前后的差：', np.linalg.norm(Lambda_vector - Lambda_vector_old, ord=np.inf))
        print('omega最大值更新前后的差：', np.linalg.norm(Omega - Omega_old, ord=np.inf))
        # print('max_omega_new-old', np.linalg.norm(Omega - Omega_old, ord=np.inf))
        cnt += 1
        if cnt == 50:
            break
    # --- Test ---
    predict_y = np.zeros(testSize)
    predict_vector_y = np.dot(test_X, Omega).flatten()
    predict_vector_y *= CONFIG[name]['yScaler']
    threshold = CONFIG[name]['threshold']
    for i in range(testSize):
        if predict_vector_y[0, i] < threshold:
            predict_y[i] = -1
        else:
            predict_y[i] = 1
    # 训练结果的统计中的 TP FP FN TN
    print('predict_y:', predict_vector_y[0, :10])
    tp = 0
    fp = 0
    fn = 0
    tn = 0
    for idx in range(len(test_y)):
        if test_y[idx] == 1 and predict_y[idx] == 1:
            tp += 1
        elif test_y[idx] == 1 and predict_y[idx] == -1:
            fn += 1
        elif test_y[idx] == -1 and predict_y[idx] == 1:
            fp += 1
        elif test_y[idx] == -1 and predict_y[idx] == -1:
            tn += 1
    p = tp / (fp + tp)
    pf = fp / (fp + tn)
    if tp+fn == 0:
        tp += 1
    pd = tp / (tp + fn)
    if pd + p == 0:
        pd += 1
    F_measure = 2 * pd * p / (pd + p)
    """ print('precision:', 100 * p, '%')
    print('recall:', 100 * recall_score(test_y, predict_y), '%')
    print('pf:', 100 * pf, '%')
    print('F-measure:', 100 * F_measure, '%')
    print('accuracy:', 100 * accuracy_score(test_y, predict_y), '%')
    print('AUC:', 100 * roc_auc_score(test_y, predict_y), '%') """

    return {
        'precision:': p,
        'recall:': recall_score(test_y, predict_y),
        'pf:': pf,
        'F-measure:': F_measure,
        'accuracy:': accuracy_score(test_y, predict_y),
        'AUC:': roc_auc_score(test_y, predict_y)
    }



if __name__ == '__main__':
    benchmark()
