# 图用于KT系数
import pickle
from sklearn.neural_network import MLPClassifier
from sklearn.ensemble import RandomForestClassifier
from scipy.stats import kendalltau
from sklearn import neighbors
import matplotlib.pyplot as plt
import numpy as np
import Toy_experiment as exp
import gc

def calculate_KT(result, y_test, method, show_fig=True):
    result_arg = np.argsort(result)
    y_test_arg = np.argsort(y_test)
    result_rank = np.zeros(len(y_test_arg))
    y_test_rank = np.zeros(len(y_test_arg))
    for i in range(len(y_test_arg)):
        result_rank[result_arg[i]] = i
        y_test_rank[y_test_arg[i]] = i
    KTau, _ = kendalltau(result_rank, y_test_rank)
    print('method: {:}, KTau: {:}'.format(method, KTau))
    print('--------------------try-end---------------------\n')
    if show_fig:
        x = np.arange(0, 1, 0.01)
        y = x
        plt.figure(figsize=(5, 5))
        plt.plot(x, y, 'g', label='y_test = result')
        plt.scatter(result, y_test, s=1)
        plt.xlabel("predict_result")
        plt.ylabel("y_test")
        plt.title(f"method:{method}")
        plt.legend(loc="best")
        plt.show()

        x = np.arange(0, len(y_test), 0.1)
        y = x
        plt.figure(figsize=(6, 6))
        line_color = '#1F77D0'
        plt.plot(x, y, c=line_color, linewidth=1)
        point_color = '#FF4400'
        plt.scatter(result_rank, y_test_rank, c=point_color, s=2)
        plt.xlabel("predict_result")
        plt.ylabel("y_test")
        plt.title(f"method:{method}---KTau:{KTau}")
        plt.xlim(xmax=5000, xmin=0)
        plt.ylim(ymax=5000, ymin=0)
        plt.savefig('mlp.pdf')
        plt.show()
def n(x):
    num=1
    for i in range(x):
        num=num*i
        i = i + 1
    return  num
def get_win_result(test_num, pred_y):
    ord = 0
    win_result = np.zeros(test_num)
    y=np.array_split(pred_y,test_num)
    for i,j in enumerate(y):
        k=np.sum(j)
        win_result[i]=k
    return list(win_result)
def get_test_result(test_num,test):
    win_result = np.zeros(test_num)
    y = np.array_split(test, test_num)
    for i, j in enumerate(y):
        k = np.sum(j)
        win_result[i] = k
    return list(win_result)
model_MLPClassifier = MLPClassifier(solver='adam', activation='relu', hidden_layer_sizes=(82,82,),random_state=123,
                                 batch_size=128,verbose=True)
model_ranforest=RandomForestClassifier(n_estimators=150,criterion='entropy',random_state=42)

if __name__ == '__main__':
    train_num = 210
    integers2one_hot = True
    data_augmentation = False
    method = 'mlp'

    train_metrics = exp.get_toy_metrics(train_num, type='train', train_num=train_num)
    # X, y, more_metrics_num = get_toy_data(important_metrics, create_more_metrics=False, select_upper_tri=False, max_creation=-1,integers2one_hot=True, additional_metrics=False, normalization=True)
    x1,y1 = exp.get_upper_triangular_data(train_metrics, integers2one_hot=True, double_upper=False,
                                       additional_metrics=True, normalization=True)
    X1, Y1 = exp.getpair(x1,y1)
    np.random.seed(12)
    np.random.shuffle(X1)
    np.random.seed(12)
    np.random.shuffle(Y1)
    print("-------modran.fit------")
    model_MLPClassifier.fit(X1, Y1.ravel())
    del X1, Y1
    gc.collect()
    #print("-------model_ranforest.fit------")
    #model_ranforest.fit(X1, Y1.ravel())
    #with open("mlp.pickle",'wb')as f:
        #pickle.dump(exp.model_MLPClassifier,f)
    # del X1, Y1
    # gc.collect()train_num1 = 210
    #     important_metrics = exp.get_toy_metrics(train_num1, type='train', train_num=train_num1)
    #     x1, y1 = exp.get_upper_triangular_data(important_metrics, integers2one_hot=True, double_upper=False,
    #                                        additional_metrics=True, normalization=True)
    #     X1, Y1 = exp.getpair(x1, y1)
    #     with open('mlp.pickle', 'rb') as f:
    #         model2 = pickle.load(f)
    #         print("-------model_MLPClassifier.fit2------")
    #         model2.fit(X1, Y1.ravel())
    print("------------mlpssssssssssssss2-----------------------")
    for i in range(4):
        print("-------model_MLPClassifier.predict------")
        test_num=(205+i)
        test_metrics = exp.get_toy_metrics(test_num, type='fixed_test', train_num=test_num)
        x2,y1 = exp.get_upper_triangular_data(test_metrics, integers2one_hot=True, double_upper=False,
                                           additional_metrics=True, normalization=True)
        X2, Y2 = exp.getpair(x2,y1)
        print("-------modelran_kt------")
        pred_y1 = MLPClassifier.predict(X2)
        score = model_MLPClassifier.score(X2,Y2)
        print("score",score)
        print("-------get_win_result------")
        win_result1 = get_win_result(test_num, pred_y1)
        test_result = get_test_result(test_num, Y2)
        calculate_KT(win_result1, test_result, 'mlp', show_fig=True)
        #print("-------model_ranforest_kt------")
       # pred_y2 = model_ranforest.predict(X2)
        #print("-------get_win_result------")
        #win_result2 = get_win_result(test_num, pred_y2)
        #calculate_KT(win_result2, test_result, 'mlp', show_fig=True)
    # # 1
    # print("-------model_decision_tree_classifier.fit------")
    # exp.model_decision_tree_classifier.fit(X1, Y1.ravel())
    # print("-------model_decision_tree_classifier.predict------")
    # pred_y1 = exp.model_decision_tree_classifier.predict(X2)
    # print("-------get_win_result------")
    # win_result1 = get_win_result(test_num, pred_y1)
    # calculate_KT(win_result1, y_test, 'model_decision_tree_classifier', show_fig=True)
    # print("------------ssssssssssssss-----------------------")
    # score1 = exp.model_decision_tree_classifier.score(X2, Y2)
    # print('model_decision_tree_classifier', score1)
    #
    # knn 2
    # print("-------model_k_neighbor.fit------")
    # exp.model_k_neighbor.fit(X1, Y1.ravel())
    # print("-------model_k_neighbor.predict------")
    # pred_y2 = exp.model_k_neighbor.predict(X2)
    # print("-------get_win_result------")
    # win_result2 = get_win_result(test_num, pred_y2)
    # calculate_KT(win_result2, y_test, 'knn', show_fig=False)
    # print("------------ssssssssssssss-----------------------")
    # score2 = exp.model_k_neighbor.score(X2, Y2)
    # print('model_k_neighbor', score2)

    # # model_random_forest_classification 3
    # print("-------model_random_forest_classification.fit------")
    # exp.model_random_forest_classification.fit(X1, Y1.ravel())
    # print("-------model_random_forest_classification.predict------")
    # pred_y3 = exp.model_random_forest_classification.predict(X2)
    # print("-------get_win_result------")
    # win_result3 = get_win_result(test_num, pred_y3)
    # calculate_KT(win_result3, y_test, 'model_random_forest_classification', show_fig=True)
    # print("------------ssssssssssssss-----------------------")
    # score3 = exp.model_random_forest_classification.score(X2, Y2)
    # print('model_random_forest_classification', score3)
    #
    # # 4 model_adaboost_classifier
    # print("-------model_adaboost_classifier .fit------")
    # exp.model_adaboost_classifier.fit(X1, Y1.ravel())
    # print("-------model_adaboost_classifier .predict------")
    # pred_y4 = exp.model_adaboost_classifier.predict(X2)
    # print("-------get_win_result------")
    # win_result4 = get_win_result(test_num, pred_y4)
    # calculate_KT(win_result4, y_test, 'model_adaboost_classifier ', show_fig=True)
    # print("------------ssssssssssssss-----------------------")
    # score4 = exp.model_adaboost_classifier.score(X2, Y2)
    # print('model_adaboost_classifier', score4)
    #
    # # 5 model_gradient_boosting_classifier
    # print("-------model_gradient_boosting_classifier.fit------")
    # exp.model_gradient_boosting_classifier.fit(X1, Y1.ravel())
    # print("-------model_gradient_boosting_classifier.predict------")
    # pred_y5 = exp.model_gradient_boosting_classifier.predict(X2)
    # print("-------get_win_result------")
    # win_result5 = get_win_result(test_num, pred_y5)
    # calculate_KT(win_result5, y_test, 'model_gradient_boosting_classifier', show_fig=True)
    # print("------------ssssssssssssss-----------------------")
    # score5 = exp.model_gradient_boosting_classifier.score(X2, Y2)
    # print('model_gradient_boosting_classifier', score5)
    #
    # # 6
    # print("-------model_bagging_classifier.fit------")
    # exp.model_bagging_classifier.fit(X1, Y1.ravel())
    # print("-------model_bagging_classifier.predict------")
    # pred_y6 = exp.model_bagging_classifier.predict(X2)
    # print("-------get_win_result------")
    # win_result6 = get_win_result(test_num, pred_y6)
    # calculate_KT(win_result6, y_test, 'model_bagging_classifier', show_fig=True)
    # print("------------ssssssssssssss-----------------------")
    # score6 = exp.model_bagging_classifier.score(X2, Y2)
    # print('model_bagging_classifier', score6)
    #
    # 7
    print("-------model_svm.fit------")
    exp.model_svm.fit(X1, Y1.ravel())
    print("-------model_svm.predict------")
    pred_y7 = exp.model_svm.predict(X2)
    print("-------get_win_result------")
    win_result7 = get_win_result(test_num, pred_y7)
    calculate_KT(win_result7, y_test, 'svm', show_fig=True)
    # print("------------ssssssssssssss-----------------------")
    # score7 = exp.model_svm.score(X2, Y2)
    # print('model_svm', score7)

    # # 8
    # print("-------LogisticRegression.fit------")
    # exp.LogisticRegression.fit(X1, Y1.ravel())
    # print("-------LogisticRegression.predict------")
    # pred_y8 = exp.LogisticRegression.predict(X2)
    # print("-------get_win_result------")
    # win_result8 = get_win_result(test_num, pred_y8)
    # calculate_KT(win_result8, y_test, 'LogisticRegression', show_fig=True)
    # print("------------ssssssssssssss-----------------------")
    # score8 = exp.LogisticRegression.score(X2, Y2)
    # print('LogisticRegression', score8)

    # # 9 model_GaussianNB
    # print("-------model_GaussianNB.fit------")
    # exp.model_GaussianNB.fit(X1, Y1.ravel())
    # print("-------model_GaussianNB.predict------")
    # pred_y9 = exp.model_GaussianNB.predict(X2)
    # print("-------get_win_result------")
    # win_result9 = get_win_result(test_num, pred_y9)
    # calculate_KT(win_result9, y_test, 'model_GaussianNB', show_fig=True)
    # print("------------ssssssssssssss-----------------------")
    # score9 = exp.model_GaussianNB.score(X2, Y2)
    # print('model_GaussianNB', score9)

    # # 10 xgboost
    # print("-------model_xgboost.fit------")
    # exp.model_xgboost.fit(X1, Y1.ravel())
    # print("-------model_xgboost.predict------")
    # pred_y10 = exp.model_xgboost.predict(X2)
    # print("-------get_win_result------")
    # win_result10 = get_win_result(test_num, pred_y10)
    # calculate_KT(win_result10, y_test, 'xgboost', show_fig=True)
    # print("------------ssssssssssssss-----------------------")
    # score10 = exp.model_xgboost.score(X2, Y2)
    # print('xgboost', score10)

    # 11model_QuadraticDiscriminantAnalysis

    # print("-------model_QuadraticDiscriminantAnalysis.fit------")
    # exp.model_QuadraticDiscriminantAnalysis.fit(X1, Y1.ravel())
    # print("-------model_QuadraticDiscriminantAnalysis.predict------")
    # pred_y11 = exp.model_QuadraticDiscriminantAnalysis.predict(X2)
    # print("-------get_win_result------")
    # win_result11 = get_win_result(test_num, pred_y11)
    # calculate_KT(win_result11, y_test, 'xgboost', show_fig=True)
    # print("------------ssssssssssssss-----------------------")
    # score11 = exp.model_QuadraticDiscriminantAnalysis.score(X2, Y2)
    # print('model_QuadraticDiscriminantAnalysis', score11)
    #
    # # 12model_MLPClassifier
    # print("-------model_MLPClassifier.fit------")
    # exp.model_MLPClassifier.fit(X1, Y1.ravel())
    # print("-------model_MLPClassifier.predict------")
    # pred_y12 = exp.model_MLPClassifier.predict(X2)
    # print("-------get_win_result------")
    # win_result12 = get_win_result(test_num, pred_y12)
    # calculate_KT(win_result12, y_test, 'model_MLPClassifier', show_fig=True)
    # print("------------ssssssssssssss-----------------------")
    # score12 = exp.model_MLPClassifier.score(X2, Y2)
    # print('model_MLPClassifier', score12)


