import numpy as np
from Processing import Processing
from configuration_file import configuration_file
from sklearn import linear_model
from openpyxl import Workbook
import os
import time
from sklearn.linear_model import BayesianRidge
from sklearn.metrics import make_scorer
from sklearn.tree import DecisionTreeRegressor
from sklearn.neural_network import MLPRegressor
from sklearn.svm import SVR
from sklearn.neighbors import KNeighborsRegressor
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.linear_model import SGDRegressor
from rankboostYin import *
import xlrd
import shutil
from PerformanceMeasure import PerformanceMeasure
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import GridSearchCV
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import BaggingClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from IG import IG
import warnings

warnings.filterwarnings('ignore')

header = ["数据集", "NaiveBayes", "LogisticRegression", "DecisionTreeClassifier", "BaggingClassifier",
          "RandomForestClassifier", "KNeighborsClassifier", "DecisionTreeRegressor", "LinearRegression",
          "BayesianRidge", "MLPRegressor", "SupportVectorRegression", "KNeighborsRegressor",
          "GradientBoostingRegressor",
          "GaussianProcessRegressor", "SGDRegressor"
          ]

'''
分类方法调优参数
'''
LR_tuned_parameters = [{'tol': [0.1, 0.01, 0.001, 0.0001, 0.00001]}]
DTC_tuned_parameters = [{'min_samples_split': [2, 6, 10, 14, 18],
                         'min_samples_leaf': [1, 3, 5, 7, 9]}]
BC_tuned_parameters = [{'n_estimators': [10, 20, 30, 40, 50]}]
RFC_tuned_parameters = [{'n_estimators': [10, 20, 30, 40, 50]}]
KNC_tuned_parameters = [{'n_neighbors': [1, 5, 9, 13, 17]}]
'''
回归方法的调优参数
'''
dtr_tuned_parameters = [{'min_samples_split': [2, 6, 10, 14, 18],
                         'min_samples_leaf': [1, 3, 5, 7, 9]}]
lr_tuned_parameters = [{'normalize': [True, False]}]
bayes_tuned_parameters = [{'tol': [0.1, 0.01, 0.001, 0.0001, 0.00001]}]
mlpr_tuned_parameters = [{'hidden_layer_sizes': [2, 4, 8, 16, 32, 64],
                          'batch_size': [8, 16, 32, 128, 256]}]
svr_tuned_parameters = [{'C': [0.01, 0.1, 1, 10, 100]}]
knr_tuned_parameters = [{'n_neighbors': [1, 5, 9, 13, 17]}]
gbr_tuned_parameters = [{'n_estimators': [100, 200, 300, 400, 500],
                         'min_samples_split': [2, 6, 10, 14, 18],
                         'min_samples_leaf': [1, 3, 5, 7, 9]}]
gpr_tuned_parameters = [{'n_restarts_optimizer': [0, 1, 2, 3, 4]}]
sgdr_tuned_parameters = [{'alpha': [0.1, 0.01, 0.001, 0.0001, 0.00001]}]
'''
其他可以调的参数
'''
cv_times = 3
number_of_booststrap = 10
selectedFeatureNumber = 5


def reg_method(training_data_X, training_data_y, test_data_X, score_func):
    '''

    return: 7个回归模型对test_data_X的预测值，预测值不四舍五入
    在预测缺陷密度的时候，网格优化OPT时是去优化FPA，网格优化PofBS20时是去优化PofB20.

    '''
    print("into reg_method..........")
    if score_func == "OPT":
        def my_opt_score(realbug, predbug):
            return PerformanceMeasure(realbug, predbug).FPA()

        my_score = my_opt_score
    elif score_func == "FPA":
        def my_fpa_score(realbug, predbug):
            return PerformanceMeasure(realbug, predbug).FPA()

        my_score = my_fpa_score
    elif score_func == "CLC":
        def my_clc_score(realbug, predbug):
            return PerformanceMeasure(realbug, predbug).CLC()

        my_score = my_clc_score
    elif score_func == "PofB20":
        def my_pofb20_score(realbug, predbug):
            return PerformanceMeasure(realbug, predbug).PofB20(0.2)

        my_score = my_pofb20_score
    elif score_func == "PofBS20":
        def my_pofbs20_score(realbug, predbug):
            return PerformanceMeasure(realbug, predbug).PofB20(0.2)

        my_score = my_pofbs20_score

    dtr = GridSearchCV(DecisionTreeRegressor(), dtr_tuned_parameters, cv=cv_times,
                       scoring=make_scorer(my_score, greater_is_better=True))
    dtr.fit(training_data_X, training_data_y)
    lr = GridSearchCV(linear_model.LinearRegression(), lr_tuned_parameters, cv=cv_times,
                      scoring=make_scorer(my_score, greater_is_better=True))
    lr.fit(training_data_X, training_data_y)
    bayes = GridSearchCV(BayesianRidge(), bayes_tuned_parameters, cv=cv_times,
                         scoring=make_scorer(my_score, greater_is_better=True))
    bayes.fit(training_data_X, training_data_y)
    mlpr = GridSearchCV(MLPRegressor(), mlpr_tuned_parameters, cv=cv_times,
                        scoring=make_scorer(my_score, greater_is_better=True))
    mlpr.fit(training_data_X, training_data_y)
    svr = GridSearchCV(SVR(), svr_tuned_parameters, cv=cv_times,
                       scoring=make_scorer(my_score, greater_is_better=True))
    svr.fit(training_data_X, training_data_y)
    knr = GridSearchCV(KNeighborsRegressor(), knr_tuned_parameters, cv=cv_times,
                       scoring=make_scorer(my_score, greater_is_better=True))
    knr.fit(training_data_X, training_data_y)
    gbr = GridSearchCV(GradientBoostingRegressor(), gbr_tuned_parameters, cv=cv_times,
                       scoring=make_scorer(my_score, greater_is_better=True))
    gbr.fit(training_data_X, training_data_y)
    gpr = GridSearchCV(GaussianProcessRegressor(), gpr_tuned_parameters, cv=cv_times,
                       scoring=make_scorer(my_score, greater_is_better=True))
    gpr.fit(training_data_X, training_data_y)
    sgdr = GridSearchCV(SGDRegressor(), sgdr_tuned_parameters, cv=cv_times,
                        scoring=make_scorer(my_score, greater_is_better=True))
    sgdr.fit(training_data_X, training_data_y)

    return [(dtr.predict(test_data_X), 'DecisionTreeRegressor'),
            (lr.predict(test_data_X), 'LinearRegression'),
            (bayes.predict(test_data_X), 'BayesianRidge'),
            (mlpr.predict(test_data_X), 'MLPRegressor'),
            (svr.predict(test_data_X), 'SVR'),
            (knr.predict(test_data_X), 'KNeighborsRegressor'),
            (gbr.predict(test_data_X), 'GradientBoostingRegressor'),
            (gpr.predict(test_data_X), 'GaussianProcessRegressor'),
            (sgdr.predict(test_data_X), 'SGDRegressor')
            ]


def ClassificationMethod(X, Y, testX, score_func):
    print("into ClassificationMethod..........")
    if score_func == "OPT":
        def my_opt_score(realbug, predbug):
            return PerformanceMeasure(realbug, predbug).FPA()

        my_score = my_opt_score
    elif score_func == "FPA":
        def my_fpa_score(realbug, predbug):
            return PerformanceMeasure(realbug, predbug).FPA()

        my_score = my_fpa_score
    elif score_func == "CLC":
        def my_clc_score(realbug, predbug):
            return PerformanceMeasure(realbug, predbug).CLC()

        my_score = my_clc_score
    elif score_func == "PofB20":
        def my_pofb20_score(realbug, predbug):
            return PerformanceMeasure(realbug, predbug).PofB20(0.2)

        my_score = my_pofb20_score
    elif score_func == "PofBS20":
        def my_pofbs20_score(realbug, predbug):
            return PerformanceMeasure(realbug, predbug).PofB20(0.2)

        my_score = my_pofbs20_score

    gnb = GaussianNB()
    gnb_pred = gnb.fit(X, Y).predict_proba(testX)
    gnb_pred = [p[1] for p in gnb_pred]

    LR = GridSearchCV(LogisticRegression(), LR_tuned_parameters, cv=cv_times,
                      scoring=make_scorer(my_score, greater_is_better=True))
    LR_pred = LR.fit(X, Y).predict_proba(testX)
    LR_pred = [p[1] for p in LR_pred]

    DTC = GridSearchCV(DecisionTreeClassifier(), DTC_tuned_parameters, cv=cv_times,
                       scoring=make_scorer(my_score, greater_is_better=True))
    DTC_pred = DTC.fit(X, Y).predict_proba(testX)
    DTC_pred = [p[1] for p in DTC_pred]

    BC = GridSearchCV(BaggingClassifier(), BC_tuned_parameters, cv=cv_times,
                      scoring=make_scorer(my_score, greater_is_better=True))
    BC_pred = BC.fit(X, Y).predict_proba(testX)
    BC_pred = [p[1] for p in BC_pred]

    RFC = GridSearchCV(RandomForestClassifier(), RFC_tuned_parameters, cv=cv_times,
                       scoring=make_scorer(my_score, greater_is_better=True))
    RFC_pred = RFC.fit(X, Y).predict_proba(testX)
    RFC_pred = [p[1] for p in RFC_pred]

    KNC = GridSearchCV(KNeighborsClassifier(), KNC_tuned_parameters, cv=cv_times,
                       scoring=make_scorer(my_score, greater_is_better=True))
    KNC_pred = KNC.fit(X, Y).predict_proba(testX)
    KNC_pred = [p[1] for p in KNC_pred]
    return [(gnb_pred, 'GaussianNB'),
            (LR_pred, 'LogisticRegression'),
            (DTC_pred, 'DecisionTreeClassifier'),
            (BC_pred, 'BaggingClassifier'),
            (RFC_pred, 'RandomForestClassifier'),
            (KNC_pred, 'KNeighborsClassifier')]


def Predictbugs(training_data_X, training_data_y, testing_data_X, testing_data_y, Cla_training_data_y, filename):
    # 评价模型预测缺陷个数的性能好坏时，只用FPA,CLC,PofB20来评价
    print("into Predictbugs................................")
    fpa1_6 = [0, 0, 0, 0, 0, 0]
    fpa7_15 = [0, 0, 0, 0, 0, 0, 0, 0, 0]
    clc1_6 = [0, 0, 0, 0, 0, 0]
    clc7_15 = [0, 0, 0, 0, 0, 0, 0, 0, 0]
    pofb20_1_6 = [0, 0, 0, 0, 0, 0]
    pofb20_7_15 = [0, 0, 0, 0, 0, 0, 0, 0, 0]

    # 分类算法
    classification_results = ClassificationMethod(training_data_X, Cla_training_data_y, testing_data_X, "FPA")
    index = 0
    for pred_prob, name in classification_results:
        fpa = PerformanceMeasure(testing_data_y, pred_prob).FPA()
        fpa1_6[index] = fpa
        index += 1

    classification_results = ClassificationMethod(training_data_X, Cla_training_data_y, testing_data_X, "CLC")
    index = 0
    for pred_prob, name in classification_results:
        CLC = PerformanceMeasure(testing_data_y, pred_prob).CLC()
        clc1_6[index] = CLC
        index += 1

    classification_results = ClassificationMethod(training_data_X, Cla_training_data_y, testing_data_X, "PofB20")
    index = 0
    for pred_prob, name in classification_results:
        pofb20 = PerformanceMeasure(testing_data_y, pred_prob).PofB20(0.2)
        pofb20_1_6[index] = pofb20
        index += 1

    # 回归算法
    y_pred = reg_method(training_data_X, training_data_y, testing_data_X, "FPA")
    index = 0
    for predy, name in y_pred:
        fpa = PerformanceMeasure(testing_data_y, predy).FPA()
        fpa7_15[index] = fpa
        index += 1

    y_pred = reg_method(training_data_X, training_data_y, testing_data_X, "CLC")
    index = 0
    for predy, name in y_pred:
        CLC = PerformanceMeasure(testing_data_y, predy).CLC()
        clc7_15[index] = CLC
        index += 1

    y_pred = reg_method(training_data_X, training_data_y, testing_data_X, "PofB20")
    index = 0
    for predy, name in y_pred:
        pofb20 = PerformanceMeasure(testing_data_y, predy).PofB20(0.2)
        pofb20_7_15[index] = pofb20
        index += 1

    return [filename] + [inde for inde in fpa1_6 + fpa7_15], \
           [filename] + [inde for inde in clc1_6 + clc7_15], [filename] + [inde for inde in pofb20_1_6 + pofb20_7_15]


def Predictdensity(training_data_X, training_data_y, testing_data_X, testing_data_y, Cla_training_data_y, testingcodeN,
                   filename):
    # 预测缺陷密度时，评价指标为OPT,PofBS20。x为19维的向量（即去掉了loc代码行数这一维），y为缺陷密度
    opt1_6 = [0, 0, 0, 0, 0, 0]
    opt7_15 = [0, 0, 0, 0, 0, 0, 0, 0, 0]

    pofbs20_1_6 = [0, 0, 0, 0, 0, 0]
    pofbs20_7_15 = [0, 0, 0, 0, 0, 0, 0, 0, 0]

    # 分类算法
    classification_results = ClassificationMethod(training_data_X, Cla_training_data_y, testing_data_X, "OPT")
    index = 0
    for pred_density, name in classification_results:
        pred_bugs = pred_density * np.array(testingcodeN)
        testing_data_bugs = testing_data_y * np.array(testingcodeN)
        OPT = PerformanceMeasure(testing_data_bugs, pred_bugs).OPT(testingcodeN)
        opt1_6[index] = OPT
        index += 1

    classification_results = ClassificationMethod(training_data_X, Cla_training_data_y, testing_data_X, "PofBS20")
    index = 0
    for pred_density, name in classification_results:
        pred_bugs = pred_density * np.array(testingcodeN)
        testing_data_bugs = testing_data_y * np.array(testingcodeN)
        pofbs20 = PerformanceMeasure(testing_data_bugs, pred_bugs).PofBS20(testingcodeN, 0.2)
        pofbs20_1_6[index] = pofbs20
        index += 1

    # 回归算法

    y_pred = reg_method(training_data_X, training_data_y, testing_data_X, "OPT")
    index = 0
    for pred_density, name in y_pred:
        pred_bugs = pred_density * np.array(testingcodeN)
        testing_data_bugs = testing_data_y * np.array(testingcodeN)
        OPT = PerformanceMeasure(testing_data_bugs, pred_bugs).OPT(testingcodeN)
        opt7_15[index] = OPT
        index += 1

    y_pred = reg_method(training_data_X, training_data_y, testing_data_X, "PofBS20")
    index = 0
    for pred_density, name in y_pred:
        pred_bugs = pred_density * np.array(testingcodeN)
        testing_data_bugs = testing_data_y * np.array(testingcodeN)
        pofbs20 = PerformanceMeasure(testing_data_bugs, pred_bugs).PofBS20(testingcodeN, 0.2)
        pofbs20_7_15[index] = pofbs20
        index += 1
    return [filename] + [inde for inde in opt1_6 + opt7_15], [filename] + [inde for inde in pofbs20_1_6 + pofbs20_7_15]


if __name__ == '__main__':
    # 首先判断configuration_file.py中的is_remain_origin_bootstrap_csv的值
    # 如果is_remain_origin_bootstrap_csv为False，说明需要重新生成bootstrap的csv文件
    if not configuration_file().is_remain_origin_bootstrap_csv:
        # 那么首先删除之前的bootstrp文件夹
        if os.path.exists(configuration_file().bootstrap_dir):
            shutil.rmtree(configuration_file().bootstrap_dir)
        # 然后重新bootstrap
        for dataset, filename in Processing().import_single_data():
            print("开始处理文件：{0}".format(filename))
            Processing().split_train_test_csv(dataset, filename)

    # 切分完之后，train和test的csv均已存在设置好的文件夹中，此时可以读取进行算法的测试了
    # 测试结果用5个列表保存起来，然后所有bootstrap数据结束后调用写Excel函数写进去

    print("开始时间：", time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())))
    starttime = time.time()
    for train_data_x_list, train_data_y_list, test_data_x_list, test_data_y_list, filename in Processing().read_bootstrap_csv():
        fpa_list = []
        opt_list = []
        clc_list = []
        pofb20_list = []
        pofbs20_list = []

        fpa_list.append(header)
        opt_list.append(header)
        clc_list.append(header)
        pofb20_list.append(header)
        pofbs20_list.append(header)

        print(len(train_data_x_list), len(train_data_y_list), len(test_data_x_list), len(test_data_y_list))
        print("================================")
        for i in range(len(train_data_x_list)):
            # 分别取出每一次bootstrap的数据
            training_data_X = np.array(train_data_x_list[i])
            training_data_y = np.array(train_data_y_list[i])
            testing_data_X = np.array(test_data_x_list[i])
            testing_data_y = np.array(test_data_y_list[i])
            Cla_training_data_y = [1 if y > 0 else 0 for y in training_data_y]
            testingcodeN = [i[10] for i in testing_data_X]

            print("**************20维特征选择前****************")
            print(training_data_X.shape)
            print(training_data_y.shape)
            print(testing_data_X.shape)
            print(testing_data_y.shape)
            print("********************************************")
            
            # 将其变为19维特征的x 加上缺陷密度的y，并保存在new_train_data_x,new_train_data_y,new_test_data_x,new_test_data_y
            new_train_data_x, new_train_data_y, new_test_data_x, new_test_data_y = \
                Processing().change_to_newdata(training_data_X, training_data_y, testing_data_X, testing_data_y)

            training_data_X, training_data_y, testing_data_X, testing_data_y = IG(training_data_X, training_data_y,
                                                                                  testing_data_X,
                                                                                  testing_data_y).getSelectedFeature(10)
            print("特征选择的个数是{}".format(len(training_data_X[0])))
            training_data_X = np.array(training_data_X)
            training_data_y = np.array(training_data_y)
            testing_data_X = np.array(testing_data_X)
            testing_data_y = np.array(testing_data_y)
            print("**************20维特征选择后****************")
            print(training_data_X.shape)
            print(training_data_y.shape)
            print(testing_data_X.shape)
            print(testing_data_y.shape)
            print("********************************************")

            print("************************19维特征选择前**************************************")
            print(new_train_data_x.shape)
            print(new_train_data_y.shape)
            print(new_test_data_x.shape)
            print(new_test_data_y.shape)
            print("***************************************************************************")
            new_Cla_training_data_y = [1 if y > 0 else 0 for y in new_train_data_y]

            new_train_data_x, new_train_data_y, new_test_data_x, new_test_data_y = IG(new_train_data_x, 
                        new_train_data_y, new_test_data_x, new_test_data_y).getSelectedFeature(10)
            new_train_data_x = np.array(new_train_data_x)
            new_train_data_y = np.array(new_train_data_y)
            new_test_data_x = np.array(new_test_data_x)
            new_test_data_y = np.array(new_test_data_y)
            print("************************19维特征选择后**************************************")
            print(new_train_data_x.shape)
            print(new_train_data_y.shape)
            print(new_test_data_x.shape)
            print(new_test_data_y.shape)
            print("***************************************************************************")
            FPA, CLC, PofB20 = Predictbugs(training_data_X, training_data_y, testing_data_X,
                                           testing_data_y, Cla_training_data_y, filename)

            OPT, PofBS20 = Predictdensity(new_train_data_x, new_train_data_y, new_test_data_x, new_test_data_y,
                                          new_Cla_training_data_y, testingcodeN, filename)

            print("-------------------------------")
            print("FPA ", FPA)
            print("OPT ", OPT)
            print("CLC ", CLC)
            print("PofB20 ", PofB20)
            print("PofBS20 ", PofBS20)
            print("-------------------------------")
            fpa_list.append(FPA)
            opt_list.append(OPT)
            clc_list.append(CLC)
            pofb20_list.append(PofB20)
            pofbs20_list.append(PofBS20)

        result_path = configuration_file().save_five_measures_dir

        fpa_csv_name = filename + "_slelect_fpa.xlsx"
        fpa_result_path = os.path.join(result_path, fpa_csv_name)
        Processing().write_excel(fpa_result_path, fpa_list)

        opt_csv_name = filename + "_slelect_opt.xlsx"
        opt_result_path = os.path.join(result_path, opt_csv_name)
        Processing().write_excel(opt_result_path, opt_list)

        clc_csv_name = filename + "_slelect_clc.xlsx"
        clc_result_path = os.path.join(result_path, clc_csv_name)
        Processing().write_excel(clc_result_path, clc_list)

        pofb20_csv_name = filename + "_slelect_pofb20.xlsx"
        clc_result_path = os.path.join(result_path, pofb20_csv_name)
        Processing().write_excel(clc_result_path, pofb20_list)

        pofbs20_csv_name = filename + "_slelect_pofbs20.xlsx"
        clc_result_path = os.path.join(result_path, pofbs20_csv_name)
        Processing().write_excel(clc_result_path, pofbs20_list)

    print("结束时间：", time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())))
    endtime = time.time()
    print('耗用时间:', endtime - starttime, '秒')

    pass
