from openpyxl import Workbook
from sklearn import linear_model
import numpy as np
from sklearn.linear_model import BayesianRidge
from sklearn.metrics import make_scorer
from sklearn.tree import DecisionTreeRegressor
from sklearn.neural_network import MLPRegressor
from sklearn.svm import SVR
from sklearn.neighbors import KNeighborsRegressor
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.isotonic import IsotonicRegression
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.linear_model import SGDRegressor
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import GridSearchCV
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import BaggingClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier

from PerformanceMeasure import PerformanceMeasure
from Processing import Processing
from RankSVM import RankSVM
from rankboostYin import RankalgorithmCreatedata, RankalgorithmTrainandtest

from multiprocessing.dummy import Pool as ThreadPool
import os
import configuration_file as configuration_file
from IG import IG

header = ["数据集", "Rank0", "Rank1", "Rank2", "Rank3", "Rank4", "Rank5", "RankSVM", "Rank7", "Rank8"
    , 'LogisticRegression'
    , 'SGDRegressor'
    , 'GaussianProcessRegressor'
    , 'DecisionTreeRegressor'
    , 'LinearRegression'
    , 'BayesianRidge'
    , 'MLPRegressor'
    , 'SVR'
    , 'KNeighborsRegressor'
    , 'GradientBoostingRegressor',
          'GaussianNB',
          'LogisticRegression',
          'DecisionTreeClassifier',
          'BaggingClassifier',
          'RandomForestClassifier',
          'KNeighborsClassifier'
          ]
'''
分类方法的调优参数
'''
dtr_tuned_parameters = [{'min_samples_split': [2, 6, 10, 14, 18],
                         'min_samples_leaf': [1, 3, 4, 7, 9]}]
lr_tuned_parameters = [{'normalize': [True, False]}]
bayes_tuned_parameters = [{'tol': [0.1, 0.01, 0.001, 0.0001, 0.00001]}]
mlpr_tuned_parameters = [{'hidden_layer_sizes': [56, 128, 256, 512],
                          'batch_size': [8, 16, 32, 128, 256]}]
svr_tuned_parameters = [{'C': [0.01, 0.1, 1, 10, 100]}]
knr_tuned_parameters = [{'n_neighbors': [1, 5, 9, 13, 17]}]
gbr_tuned_parameters = [{'n_estimators': [100, 200, 300, 400, 500],
                         'min_samples_split': [2, 6, 10, 14, 18],
                         'min_samples_leaf': [1, 3, 5, 7, 9]}]
gpr_tuned_parameters = [{'n_restarts_optimizer': [0, 1, 2, 3, 4]}]
sgdr_tuned_parameters = [{'alpha': [0.1, 0.01, 0.001, 0.0001, 0.00001]}]
logr_tuned_parameters = [{'tol': [0.1, 0.01, 0.001, 0.0001, 0.00001]}]
'''
回归方法调优参数
'''
LR_tuned_parameters = [{'tol': [0.1, 0.01, 0.001, 0.0001, 0.00001]}]
DTC_tuned_parameters = [{'min_samples_split': [2, 6, 10, 14, 18],
                         'min_samples_leaf': [1, 3, 5, 7, 9]}]
BC_tuned_parameters = [{'n_estimators': [10, 20, 30, 40, 50]}]
RFC_tuned_parameters = [{'n_estimators': [10, 20, 30, 40, 50]}]
KNC_tuned_parameters = [{'n_neighbors': [1, 5, 9, 13, 17]}]
RS_tuned_parameters = [{'C': [0.01, 0.1, 1, 10, 100]}]
'''
其他可以调的参数
'''
cv_times = 3
number_of_booststrap = 10
selectedFeatureNumber = 5


def reg_method(training_data_X, training_data_y, test_data_X, score_func, codeN):
    '''

    return: 7个回归模型对test_data_X的预测值，预测值会四舍五入取整
            stored in a list.

    '''
    if score_func == "OPT":
        def my_opt_score(realbug, predbug):
            return PerformanceMeasure(realbug, predbug).OPT(codeN)

        my_score = my_opt_score
    else:
        def my_fpa_score(realbug, predbug):
            return PerformanceMeasure(realbug, predbug).FPA()

        my_score = my_fpa_score

    sgdr = GridSearchCV(SGDRegressor(), sgdr_tuned_parameters, cv=cv_times,
                        scoring=make_scorer(my_score, greater_is_better=True))
    sgdr.fit(training_data_X, training_data_y)
    svr = GridSearchCV(SVR(), svr_tuned_parameters, cv=cv_times,
                       scoring=make_scorer(my_score, greater_is_better=True))
    # svr = SVR()
    svr.fit(training_data_X, training_data_y)
    mlpr = GridSearchCV(MLPRegressor(), mlpr_tuned_parameters, cv=cv_times,
                        scoring=make_scorer(my_score, greater_is_better=True))
    mlpr.fit(training_data_X, training_data_y)
    logr = GridSearchCV(LogisticRegression(), logr_tuned_parameters, cv=cv_times,
                        scoring=make_scorer(my_score, greater_is_better=True))
    logr.fit(training_data_X, training_data_y)
    gbr = GridSearchCV(GradientBoostingRegressor(), gbr_tuned_parameters, cv=cv_times,
                       scoring=make_scorer(my_score, greater_is_better=True))
    gbr.fit(training_data_X, training_data_y)
    gpr = GridSearchCV(GaussianProcessRegressor(), gpr_tuned_parameters, cv=cv_times,
                       scoring=make_scorer(my_score, greater_is_better=True))
    gpr.fit(training_data_X, training_data_y)
    knr = GridSearchCV(KNeighborsRegressor(), knr_tuned_parameters, cv=cv_times,
                       scoring=make_scorer(my_score, greater_is_better=True))
    knr.fit(training_data_X, training_data_y)
    dtr = GridSearchCV(DecisionTreeRegressor(), dtr_tuned_parameters, cv=cv_times,
                       scoring=make_scorer(my_score, greater_is_better=True))
    dtr.fit(training_data_X, training_data_y)
    lr = GridSearchCV(linear_model.LinearRegression(), lr_tuned_parameters, cv=cv_times,
                      scoring=make_scorer(my_score, greater_is_better=True))
    lr.fit(training_data_X, training_data_y)
    bayes = GridSearchCV(BayesianRidge(), bayes_tuned_parameters, cv=cv_times,
                         scoring=make_scorer(my_score, greater_is_better=True))
    bayes.fit(training_data_X, training_data_y)
    return [(np.around(logr.predict(test_data_X)), 'LogisticRegression'),
            (np.around(sgdr.predict(test_data_X)), 'SGDRegressor'),
            (np.around(gpr.predict(test_data_X)), 'GaussianProcessRegressor'),
            (np.around(dtr.predict(test_data_X)), 'DecisionTreeRegressor'),
            (np.around(lr.predict(test_data_X)), 'LinearRegression'),
            (np.around(bayes.predict(test_data_X)), 'BayesianRidge'),
            (np.around(mlpr.predict(test_data_X)), 'MLPRegressor'),
            (np.around(svr.predict(test_data_X)), 'SVR'),
            (np.around(knr.predict(test_data_X)), 'KNeighborsRegressor'),
            (np.around(gbr.predict(test_data_X)), 'GradientBoostingRegressor')]


def ClassificationMethod(X, Y, testX, score_func, codeN):
    if score_func == "OPT":
        def my_opt_score(realbug, predbug):
            return PerformanceMeasure(realbug, predbug).OPT(codeN)

        my_score = my_opt_score
    else:
        def my_fpa_score(realbug, predbug):
            return PerformanceMeasure(realbug, predbug).FPA()

        my_score = my_fpa_score

    gnb = GaussianNB()
    gnb_pred = gnb.fit(X, Y).predict_proba(testX)
    gnb_pred = [p[1] for p in gnb_pred]

    LR = GridSearchCV(LogisticRegression(), LR_tuned_parameters, cv=cv_times,
                      scoring=make_scorer(my_score, greater_is_better=True))
    LR_pred = LR.fit(X, Y).predict_proba(testX)
    LR_pred = [p[1] for p in LR_pred]

    DTC = GridSearchCV(DecisionTreeClassifier(), DTC_tuned_parameters, cv=cv_times,
                       scoring=make_scorer(my_score, greater_is_better=True))
    DTC_pred = DTC.fit(X, Y).predict_proba(testX)
    DTC_pred = [p[1] for p in DTC_pred]

    BC = GridSearchCV(BaggingClassifier(), BC_tuned_parameters, cv=cv_times,
                      scoring=make_scorer(my_score, greater_is_better=True))
    BC_pred = BC.fit(X, Y).predict_proba(testX)
    BC_pred = [p[1] for p in BC_pred]

    RFC = GridSearchCV(RandomForestClassifier(), RFC_tuned_parameters, cv=cv_times,
                       scoring=make_scorer(my_score, greater_is_better=True))
    RFC_pred = RFC.fit(X, Y).predict_proba(testX)
    RFC_pred = [p[1] for p in RFC_pred]

    KNC = GridSearchCV(KNeighborsClassifier(), KNC_tuned_parameters, cv=cv_times,
                       scoring=make_scorer(my_score, greater_is_better=True))
    KNC_pred = KNC.fit(X, Y).predict_proba(testX)
    KNC_pred = [p[1] for p in KNC_pred]
    return [(gnb_pred, 'GaussianNB'),
            (LR_pred, 'LogisticRegression'),
            (DTC_pred, 'DecisionTreeClassifier'),
            (BC_pred, 'BaggingClassifier'),
            (RFC_pred, 'RandomForestClassifier'),
            (KNC_pred, 'KNeighborsClassifier')]


def bootstrap_single_data(dataset, filename):
    '''单独测评每个数据集

    '''
    wb_fpa = Workbook()
    ws_fpa = wb_fpa.active
    ws_fpa.append(header)

    wb_OPT = Workbook()
    ws_OPT = wb_OPT.active
    ws_OPT.append(header)

    wb_fpa_featureselected_5 = Workbook()
    ws_fpa_featureselected_5 = wb_fpa_featureselected_5.active
    ws_fpa_featureselected_5.append(header)

    wb_OPT_featureselected_5 = Workbook()
    ws_OPT_featureselected_5 = wb_OPT_featureselected_5.active
    ws_OPT_featureselected_5.append(header)

    wb_fpa_featureselected_10 = Workbook()
    ws_fpa_featureselected_10 = wb_fpa_featureselected_10.active
    ws_fpa_featureselected_10.append(header)

    wb_OPT_featureselected_10 = Workbook()
    ws_OPT_featureselected_10 = wb_OPT_featureselected_10.active
    ws_OPT_featureselected_10.append(header)

    wb_fpa_featureselected_15 = Workbook()
    ws_fpa_featureselected_15 = wb_fpa_featureselected_15.active
    ws_fpa_featureselected_15.append(header)

    wb_OPT_featureselected_15 = Workbook()
    ws_OPT_featureselected_15 = wb_OPT_featureselected_15.active
    ws_OPT_featureselected_15.append(header)

    times = 0
    for _ in range(100000000):
        try:

            print("{}数据集第{}次bootstrap".format(filename, _))
            training_data_X, training_data_y, testing_data_X, testing_data_y = Processing().separate_data(dataset)
            Cla_training_data_y = [1 if y > 0 else 0 for y in training_data_y]
            Cla_testing_data_y = [1 if y > 0 else 0 for y in testing_data_y]
            codeN = [i[10] for i in testing_data_X]

            # 如果是全零或者是全1，则放弃这组数据，重新bootstrap
            if np.sum(Cla_training_data_y) == 0 or len(Cla_training_data_y) == np.sum(Cla_training_data_y) or np.sum(
                    Cla_testing_data_y) == 0 or len(Cla_testing_data_y) == np.sum(Cla_testing_data_y):
                continue
            # training_data_X不是矩阵，training_data_y不是数组，放弃这组数据
            if len(np.shape(np.array(training_data_X))) != 2 or len(np.shape(np.array(training_data_y))) != 1 or len(
                    np.shape(np.array(testing_data_X))) != 2 or len(np.shape(np.array(testing_data_y))) != 1:
                continue

            # print("********************************************")
            # print(training_data_X)
            # print("********************************************")
            # print(training_data_y)
            # print("********************************************")
            # print(testing_data_X)
            # print("********************************************")
            # print(testing_data_y)
            # print("********************************************")
            # 不做特征选择
            FPA, OPT = ALLmethods(training_data_X, training_data_y, testing_data_X, testing_data_y, Cla_training_data_y,
                                  Cla_testing_data_y, codeN, filename)
            ws_fpa.append(FPA)
            wb_fpa.save(
                os.path.join(configuration_file.configuration_file().saveResultsPath, 'fpa_' + filename + '.xlsx'))
            ws_OPT.append(OPT)
            wb_OPT.save(
                os.path.join(configuration_file.configuration_file().saveResultsPath, 'OPT_' + filename + '.xlsx'))

            # 做特征选择 - 5
            training_data_X, training_data_y, testing_data_X, testing_data_y = IG(training_data_X, training_data_y,
                                                                                  testing_data_X,
                                                                                  testing_data_y).getSelectedFeature(
                selectedFeatureNumber)
            print("特征选择的个数是{}".format(len(training_data_X[0])))
            FPA_featureselected_5, OPT_featureselected_5 = ALLmethods(training_data_X, training_data_y, testing_data_X,
                                                                      testing_data_y, Cla_training_data_y,
                                                                      Cla_testing_data_y, codeN, filename)
            ws_fpa_featureselected_5.append(FPA_featureselected_5)
            wb_fpa_featureselected_5.save(os.path.join(configuration_file.configuration_file().saveResultsPath,
                                                       'fpa_featureselected_5_' + filename + '.xlsx'))
            ws_OPT_featureselected_5.append(OPT_featureselected_5)
            wb_OPT_featureselected_5.save(os.path.join(configuration_file.configuration_file().saveResultsPath,
                                                       'OPT_featureselected_5_' + filename + '.xlsx'))

            # 做特征选择 - 10
            training_data_X, training_data_y, testing_data_X, testing_data_y = IG(training_data_X, training_data_y,
                                                                                  testing_data_X,
                                                                                  testing_data_y).getSelectedFeature(
                selectedFeatureNumber + 5)
            print("特征选择的个数是{}".format(len(training_data_X[0])))
            FPA_featureselected_10, OPT_featureselected_10 = ALLmethods(training_data_X, training_data_y,
                                                                        testing_data_X,
                                                                        testing_data_y, Cla_training_data_y,
                                                                        Cla_testing_data_y, codeN, filename)
            ws_fpa_featureselected_10.append(FPA_featureselected_10)
            wb_fpa_featureselected_10.save(os.path.join(configuration_file.configuration_file().saveResultsPath,
                                                        'fpa_featureselected_10_' + filename + '.xlsx'))
            ws_OPT_featureselected_10.append(OPT_featureselected_10)
            wb_OPT_featureselected_10.save(os.path.join(configuration_file.configuration_file().saveResultsPath,
                                                        'OPT_featureselected_10_' + filename + '.xlsx'))

            # 做特征选择 - 15
            training_data_X, training_data_y, testing_data_X, testing_data_y = IG(training_data_X, training_data_y,
                                                                                  testing_data_X,
                                                                                  testing_data_y).getSelectedFeature(
                selectedFeatureNumber + 10)
            print("特征选择的个数是{}".format(len(training_data_X[0])))
            FPA_featureselected_15, OPT_featureselected_15 = ALLmethods(training_data_X, training_data_y,
                                                                        testing_data_X,
                                                                        testing_data_y, Cla_training_data_y,
                                                                        Cla_testing_data_y, codeN, filename)
            ws_fpa_featureselected_15.append(FPA_featureselected_15)
            wb_fpa_featureselected_15.save(os.path.join(configuration_file.configuration_file().saveResultsPath,
                                                        'fpa_featureselected_15_' + filename + '.xlsx'))
            ws_OPT_featureselected_15.append(OPT_featureselected_15)
            wb_OPT_featureselected_15.save(os.path.join(configuration_file.configuration_file().saveResultsPath,
                                                        'OPT_featureselected_15_' + filename + '.xlsx'))

            times += 1
        except BaseException as BE:
            print("错误类型", BE)
            print(filename + "报错")
        finally:
            if times == number_of_booststrap:
                break


def ALLmethods(training_data_X, training_data_y, testing_data_X, testing_data_y, Cla_training_data_y,
               Cla_testing_data_y, codeN, filename):
    fpa0_8 = [0, 0, 0, 0, 0, 0, 0, 0, 0]
    fpa9_19 = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
    fpa20_26 = [0, 0, 0, 0, 0, 0]

    OPT0_8 = [0, 0, 0, 0, 0, 0, 0, 0, 0]
    OPT9_19 = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
    OPT20_26 = [0, 0, 0, 0, 0, 0]

    # # 排序学习算法
    # RankalgorithmCreatedata(training_data_X, training_data_y, testing_data_X, testing_data_y, filename)
    # for i in range(9):
    #     if i == 6:
    #         rs = GridSearchCV(RankSVM(), RS_tuned_parameters, cv=cv_times,
    #                           scoring=make_scorer(my_fpa_score, greater_is_better=True))
    #         rs = rs.fit(training_data_X, training_data_y)
    #         rs_pred_y = np.around(rs.predict(testing_data_X))
    #         temp_fpa = PerformanceMeasure(testing_data_y, rs_pred_y).FPA()
    #
    #         def my_opt_score(realbug, predbug):
    #             return PerformanceMeasure(realbug, predbug).OPT(codeN)
    #
    #         rs = GridSearchCV(RankSVM(), RS_tuned_parameters, cv=cv_times,
    #                           scoring=make_scorer(my_opt_score, greater_is_better=True))
    #         rs = rs.fit(training_data_X, training_data_y)
    #         rs_pred_y = np.around(rs.predict(testing_data_X))
    #         OPT = PerformanceMeasure(testing_data_y, rs_pred_y).OPT(codeN)
    #     else:
    #         temp_fpa, pred_bug = RankalgorithmTrainandtest(i, filename)
    #         OPT = PerformanceMeasure(testing_data_y, pred_bug).OPT(codeN)
    #     fpa0_8[i] = temp_fpa
    #     OPT0_8[i] = OPT

    # 回归算法
    y_pred = reg_method(training_data_X, training_data_y, testing_data_X, "FPA", codeN)
    index = 0
    for algorithm, name in y_pred:
        fpa = PerformanceMeasure(testing_data_y, algorithm).FPA()
        fpa9_19[index] = fpa
        index += 1
    y_pred = reg_method(training_data_X, training_data_y, testing_data_X, "OPT", codeN)
    index = 0
    for algorithm, name in y_pred:
        OPT = PerformanceMeasure(testing_data_y, algorithm).OPT(codeN)
        OPT9_19[index] = OPT
        index += 1

    # 分类算法
    classification_results = ClassificationMethod(training_data_X, Cla_training_data_y, testing_data_X, "FPA", codeN)
    index = 0
    for pred_prob, name in classification_results:
        fpa = PerformanceMeasure(Cla_testing_data_y, pred_prob).FPA()
        fpa20_26[index] = fpa
        index += 1
    classification_results = ClassificationMethod(training_data_X, Cla_training_data_y, testing_data_X, "OPT", codeN)
    index = 0
    for pred_prob, name in classification_results:
        OPT = PerformanceMeasure(Cla_testing_data_y, pred_prob).OPT(codeN)
        OPT20_26[index] = OPT
        index += 1

    return [filename] + [inde for inde in fpa0_8 + fpa9_19 + fpa20_26], [filename] + [inde for inde in
                                                                                      OPT0_8 + OPT9_19 + OPT20_26]


'''
用于调优的fpa指标
'''


def my_fpa_score(realbug, predbug):
    return PerformanceMeasure(realbug, predbug).FPA()


def my_opt_score(realbug, predbug, codeN):
    return PerformanceMeasure(realbug, predbug).OPT(codeN)


datasetList = []
filenameList = []


def main(inde):
    bootstrap_single_data(datasetList[inde], filenameList[inde])


if __name__ == '__main__':

    for dataset, filename in Processing().import_single_data():
        datasetList.append(dataset)
        filenameList.append(filename)
    print("测试数据集的个数是：{}".format(len(filenameList)))
    pool = ThreadPool(3)
    index = [i for i in range(len(filenameList))]
    results = pool.map(main, index)
    pool.close()
    pool.join()
