import numpy as np
from Processing import Processing
from configuration_file import configuration_file
from openpyxl import Workbook
import os
import time
from rankboostYin import *
import xlrd
import shutil
from PerformanceMeasure import PerformanceMeasure
from RankSVM import RankSVM
from LTR import *
from IG import IG

# import warnings
# warnings.filterwarnings('ignore')

# jar包出来结果的表头
jar_header = ["数据集", "RankingSVM", "RankBoost", "RankNet", "LambdaRank", "ListNet", "AdaRank",
              "CoordinateAscent", "LTR"
              ]
RS_tuned_parameters = [{'C': [0.01, 0.1, 1, 10, 100]}]
cv_times = 3


def Jar_methods(training_data_X, training_data_y, testing_data_X, testing_data_y, codeN, filename):
    fpa1_8 = [0, 0, 0, 0, 0, 0, 0, 0]
    clc1_8 = [0, 0, 0, 0, 0, 0, 0, 0]
    pofb20_1_8 = [0, 0, 0, 0, 0, 0, 0, 0]
    # 排序学习算法ranker按照我们规定的顺序写好
    # 先跑ranking SVM
    rs_pred_y = RankSVM().fit(training_data_X, training_data_y).predict(testing_data_X)
    FPA = PerformanceMeasure(testing_data_y, rs_pred_y).FPA()
    CLC = PerformanceMeasure(testing_data_y, rs_pred_y).CLC()
    PofB20 = PerformanceMeasure(testing_data_y, rs_pred_y).PofB20(0.2)

    # 将上述结果保存起来
    fpa1_8[0] = FPA
    clc1_8[0] = CLC
    pofb20_1_8[0] = PofB20

    # 在计算jar包中的算法
    method_index = [2, 1, 5, 7, 3, 4]
    # 2: RankBoost 1: RankNet 5: LambdaRank 7: ListNet 3: AdaRank 4: Coordinate Ascent
    train_dat_path, test_dat_path = RankalgorithmCreatedata(training_data_X, training_data_y, testing_data_X, testing_data_y, filename)
    for i in range(len(method_index)):
        print("目前的ranker为：", method_index[i])
        pred_bug = RankalgorithmTrainandtest(method_index[i], filename, train_dat_path, test_dat_path)
        try:
          FPA = PerformanceMeasure(testing_data_y, pred_bug).FPA()
        except Exception as e:
          print(e)
          FPA = 99999
        try:
          CLC = PerformanceMeasure(testing_data_y, pred_bug).CLC()
        except Exception as e:
          print(e)
          CLC = 99999
        try:
          PofB20 = PerformanceMeasure(testing_data_y, pred_bug).PofB20(0.2)
        except Exception as e:
          print(e)
          PofB20 = 99999
        fpa1_8[i+1] = FPA
        clc1_8[i+1] = CLC
        pofb20_1_8[i+1] = PofB20
    # 最后计算ltr的
    ltr = LTR(NP=100, F_CR=[(1.0, 0.1), (1.0, 0.9), (0.8, 0.2)], generation=100, len_x=len(training_data_X[0]),
                  value_up_range=20.0,
                  value_down_range=-20.0, X=training_data_X, y=training_data_y)
    ltrmaxpara = ltr.process()
    ltr_pred_y = ltr.predict(testing_data_X, ltrmaxpara)
    try:
      fpa = PerformanceMeasure(testing_data_y, ltr_pred_y).FPA()
    except Exception as e:
      print(e)
      fpa = 99999
    try:
      clc = PerformanceMeasure(testing_data_y, ltr_pred_y).CLC()
    except Exception as e:
      print(e)
      clc = 99999
    try:
      pofb20 = PerformanceMeasure(testing_data_y, ltr_pred_y).PofB20(0.2)
    except Exception as e:
      print(e)
      pofb20 = 99999
    fpa1_8[-1] = fpa
    clc1_8[-1] = clc
    pofb20_1_8[-1] = pofb20

    return [filename] + [inde for inde in fpa1_8],  [filename] + [inde for inde in clc1_8], \
           [filename] + [inde for inde in pofb20_1_8]


def Predictdensity(training_data_X, training_data_y, testing_data_X, testing_data_y, codeN, filename):
    # 预测缺陷密度时，评价指标为OPT,PofBS20。x为19维的向量（即去掉了loc代码行数这一维），y为缺陷密度
    opt1_8 = [0, 0, 0, 0, 0, 0, 0, 0]
    pofbs20_1_8 = [0, 0, 0, 0, 0, 0, 0, 0]
    rs_pred_y = RankSVM().fit(training_data_X, training_data_y).predict(testing_data_X)
    pred_bugs = rs_pred_y * np.array(codeN)
    testing_data_bugs = testing_data_y * np.array(codeN)
    try:
      OPT = PerformanceMeasure(testing_data_bugs, pred_bugs).OPT(codeN)
    except Exception as e:
      print(e)
      OPT = 99999
    try:
      pofbs20 = PerformanceMeasure(testing_data_bugs, pred_bugs).PofBS20(codeN, 0.2)
    except Exception as e:
      print(e)
      pofbs20 = 99999
    opt1_8[0] = OPT
    pofbs20_1_8[0] = pofbs20

    # 在计算jar包中的算法
    method_index = [2, 1, 5, 7, 3, 4]
    # 2: RankBoost 1: RankNet 5: LambdaRank 7: ListNet 3: AdaRank 4: Coordinate Ascent
    train_dat_path, test_dat_path = RankalgorithmCreatedata(training_data_X, training_data_y, testing_data_X, testing_data_y, filename)
    for i in range(len(method_index)):
        print("目前的ranker为：", method_index[i])
        pred_bug = RankalgorithmTrainandtest(method_index[i], filename, train_dat_path, test_dat_path)
        pred_bugs = pred_bug * np.array(codeN)
        testing_data_bugs = testing_data_y * np.array(codeN)
        try:
          OPT = PerformanceMeasure(testing_data_bugs, pred_bugs).OPT(codeN)
        except Exception as e:
          print(e)
          OPT = 99999
        try:
          PofBS20 = PerformanceMeasure(testing_data_bugs, pred_bugs).PofBS20(codeN, 0.2)
        except Exception as e:
          print(e)
          PofBS20 = 99999
        opt1_8[i + 1] = OPT
        pofbs20_1_8[i + 1] = PofBS20

    # 最后计算ltr的
    ltr = LTR(NP=100, F_CR=[(1.0, 0.1), (1.0, 0.9), (0.8, 0.2)], generation=100, len_x=len(training_data_X[0]),
                  value_up_range=20.0,
                  value_down_range=-20.0, X=training_data_X, y=training_data_y)
    ltrmaxpara = ltr.process()
    ltr_pred_y = ltr.predict(testing_data_X, ltrmaxpara)
    pred_bugs = ltr_pred_y * np.array(codeN)
    testing_data_bugs = testing_data_y * np.array(codeN)
    try:
      opt = PerformanceMeasure(testing_data_bugs, pred_bugs).OPT(codeN)
    except Exception as e:
      print(e)
      opt = 99999
    try:
      pofbs20 = PerformanceMeasure(testing_data_bugs, pred_bugs).PofBS20(codeN, 0.2)
    except Exception as e:
      print(e)
      pofbs20 = 99999
    opt1_8[-1] = opt
    pofbs20_1_8[-1] = pofbs20
    return [filename] + [inde for inde in opt1_8], [filename] + [inde for inde in pofbs20_1_8]


if __name__ == '__main__':
    # 首先判断configuration_file.py中的is_remain_origin_bootstrap_csv的值
    # 如果is_remain_origin_bootstrap_csv为False，说明需要重新生成bootstrap的csv文件
    if not configuration_file().is_remain_origin_bootstrap_csv:
        # 那么首先删除之前的bootstrp文件夹
        if os.path.exists(configuration_file().bootstrap_dir):
            shutil.rmtree(configuration_file().bootstrap_dir)
        # 然后重新bootstrap
        for dataset, filename in Processing().import_single_data():
            print("开始处理文件：{0}".format(filename))
            Processing().split_train_test_csv(dataset, filename)

    # 切分完之后，train和test的csv均已存在设置好的文件夹中，此时可以读取进行算法的测试了
    # 测试结果用5个列表保存起来，然后所有bootstrap数据结束后调用写Excel函数写进去

    featurer_len = "0"
    print("开始时间：", time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())))
    starttime = time.time()
    for train_data_x_list, train_data_y_list, test_data_x_list, test_data_y_list, filename in Processing().read_bootstrap_csv():
        fpa_list = []
        opt_list = []
        clc_list = []
        pofb20_list = []
        pofbs20_list = []

        fpa_list.append(jar_header)
        opt_list.append(jar_header)
        clc_list.append(jar_header)
        pofb20_list.append(jar_header)
        pofbs20_list.append(jar_header)

        print(len(train_data_x_list), len(train_data_y_list), len(test_data_x_list), len(test_data_y_list))
        for i in range(len(train_data_x_list)):
            # 分别取出每一次bootstrap的数据
            training_data_X = np.array(train_data_x_list[i])
            training_data_y = np.array(train_data_y_list[i])
            testing_data_X = np.array(test_data_x_list[i])
            testing_data_y = np.array(test_data_y_list[i])

            new_train_data_x, new_train_data_y, new_test_data_x, new_test_data_y = \
                Processing().change_to_newdata(training_data_X, training_data_y, testing_data_X, testing_data_y)
            # print(new_train_data_x.shape, new_train_data_y.shape, new_test_data_x.shape, new_test_data_y.shape)
            # print("new train data y:", new_train_data_y)
            # print("new test data y:", new_test_data_y)
            print("******************20维特征选择前**********************")
            print(training_data_X.shape)
            print(training_data_y.shape)
            print(testing_data_X.shape)
            print(testing_data_y.shape)
            print("********************************************")

            Cla_training_data_y = [1 if y > 0 else 0 for y in training_data_y]
            testingcodeN = [i[10] for i in testing_data_X]

            training_data_X, training_data_y, testing_data_X, testing_data_y = IG(training_data_X, training_data_y,
                                                                                  testing_data_X,
                                                                                  testing_data_y).getSelectedFeature(10)
            print("特征选择的个数是{}".format(len(training_data_X[0])))
            training_data_X = np.array(training_data_X)
            training_data_y = np.array(training_data_y)
            testing_data_X = np.array(testing_data_X)
            testing_data_y = np.array(testing_data_y)
            print("**************20维特征选择后****************")
            print(training_data_X.shape)
            print(training_data_y.shape)
            print(testing_data_X.shape)
            print(testing_data_y.shape)
            print("********************************************")
            print("************************19维特征选择前**************************************")
            print(new_train_data_x.shape)
            print(new_train_data_y.shape)
            print(new_test_data_x.shape)
            print(new_test_data_y.shape)
            print("***************************************************************************")

            new_train_data_x, new_train_data_y, new_test_data_x, new_test_data_y = IG(new_train_data_x, 
                        new_train_data_y, new_test_data_x, new_test_data_y).getSelectedFeature(10)
            new_train_data_x = np.array(new_train_data_x)
            new_train_data_y = np.array(new_train_data_y)
            new_test_data_x = np.array(new_test_data_x)
            new_test_data_y = np.array(new_test_data_y)
            print("************************19维特征选择后**************************************")
            print(new_train_data_x.shape)
            print(new_train_data_y.shape)
            print(new_test_data_x.shape)
            print(new_test_data_y.shape)
            print("***************************************************************************")

            FPA, CLC, PofB20 = Jar_methods(training_data_X, training_data_y, testing_data_X,
                                                         testing_data_y, testingcodeN, filename)
            OPT, PofBS20 = Predictdensity(new_train_data_x, new_train_data_y, new_test_data_x, new_test_data_y, testingcodeN, filename)
            print("-------------------------------")
            print("FPA ", FPA)
            print("OPT ", OPT)
            print("CLC ", CLC)
            print("PofB20 ", PofB20)
            print("PofBS20 ", PofBS20)
            print("-------------------------------")
            fpa_list.append(FPA)
            opt_list.append(OPT)
            clc_list.append(CLC)
            pofb20_list.append(PofB20)
            pofbs20_list.append(PofBS20)

        result_path = configuration_file().save_five_measures_dir

        fpa_csv_name = filename + "_slelect_jar_fpa.xlsx"
        fpa_result_path = os.path.join(result_path, fpa_csv_name)
        Processing().write_excel(fpa_result_path, fpa_list)

        opt_csv_name = filename + "_slelect_jar_opt.xlsx"
        opt_result_path = os.path.join(result_path, opt_csv_name)
        Processing().write_excel(opt_result_path, opt_list)

        clc_csv_name = filename + "_slelect_jar_clc.xlsx"
        clc_result_path = os.path.join(result_path, clc_csv_name)
        Processing().write_excel(clc_result_path, clc_list)

        pofb20_csv_name = filename + "_slelect_jar_pofb20.xlsx"
        clc_result_path = os.path.join(result_path, pofb20_csv_name)
        Processing().write_excel(clc_result_path, pofb20_list)

        pofbs20_csv_name = filename + "_slelect_jar_pofbs20.xlsx"
        clc_result_path = os.path.join(result_path, pofbs20_csv_name)
        Processing().write_excel(clc_result_path, pofbs20_list)

    print("结束时间：", time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())))
    endtime = time.time()
    print('耗用时间:', endtime - starttime, '秒')

    pass
