import numpy as np
import pandas as pd
from sklearn.utils import resample
import os
from configuration_file import configuration_file
from openpyxl import Workbook
from rankboostYin import *
# import xlrd


class Processing():
    def __init__(self):
        self.folder_name = configuration_file().datafolderPath

    def import_data(self):
        '''

        读取文件夹中所有文件数据

        folder_name: 文件夹的名字

        return: 文件夹下所有文件的数据

        '''

        dataset = pd.core.frame.DataFrame()

        # In Mac the path use '/' to identify the secondary path
        folder_path = self.folder_name

        for root, dirs, files in os.walk(folder_path):

            for file in files:
                file_path = os.path.join(root, file)

                data1 = pd.read_csv(file_path)

                dataset = dataset.append(data1, ignore_index=True)

        return dataset

    def import_single_data(self):
        '''

        单独读取文件夹中的每一个数据集

        folder_name: 文件夹的名字

        return: 文件夹下单独数据集的数据

        '''

        dataset = pd.core.frame.DataFrame()

        folder_path = self.folder_name

        for root, dirs, files in os.walk(folder_path):

            for file in files:
                file_path = os.path.join(root, file)

                dataset = pd.read_csv(file_path)
                yield dataset, file

    def separate_data(self, original_data):
        '''

        用out-of-sample bootstrap方法产生训练集和测试集,参考论文An Empirical Comparison of Model Validation Techniques for DefectPrediction Models
        A bootstrap sample of size N is randomly drawn with replacement from an original dataset that is also of size N .
        The model is instead tested using the rows that do not appear in the bootstrap sample.
        On average, approximately 36.8 percent of the rows will not appear in the bootstrap sample, since the bootstrap sample is drawn with replacement.
        OriginalData:整个数据集

        return: 划分好的 训练集和测试集

        '''

        original_data = original_data.iloc[:, 3:]

        original_data = np.array(original_data)
        # 在此对原始读出来的数据进行筛选，只有loc这个特征不为0的我们才要
        print("筛选掉0行之前的原始数据shape为:{0}".format(original_data.shape))
        filter_list = []
        for each in original_data:
            if int(each[10]) != 0:
                filter_list.append(each.tolist())
        original_data = np.array(filter_list)
        print("筛选掉0行之后的原始数据shape为:{0}".format(original_data.shape))

        # 从originaldata中有放回的抽样，size(trainingdata)==size(originaldata)
        training_data = resample(original_data)

        k = len(training_data[0])
        # 先转换成list 在进行数据筛选

        original_data = original_data.tolist()

        training_data = training_data.tolist()

        testing_data = []

        for i in original_data:
            if i not in training_data:
                testing_data.append(i)

        testing_data = np.array(testing_data)
        training_data = np.array(training_data)
        training_data_X = training_data[:, 0:k - 1]
        training_data_y = training_data[:, k - 1]
        testing_data_X = testing_data[:, 0:k - 1]
        testing_data_y = testing_data[:, k - 1]

        return training_data_X, training_data_y, testing_data_X, testing_data_y

    def split_train_test_csv(self, dataset, filename):
        count = 0
        for _ in range(100000000):
            try:
                print("{}数据集第{}次bootstrap".format(filename, _))
                training_data_X, training_data_y, testing_data_X, testing_data_y = Processing().separate_data(dataset)
                print(training_data_X.shape, training_data_y.shape, testing_data_X.shape, testing_data_y.shape)
                Cla_training_data_y = [1 if y > 0 else 0 for y in training_data_y]
                Cla_testing_data_y = [1 if y > 0 else 0 for y in testing_data_y]
                codeN = [i[10] for i in testing_data_X]

                # 如果是全零或者是全1，则放弃这组数据，重新bootstrap
                if np.sum(Cla_training_data_y) == 0 or len(Cla_training_data_y) == np.sum(
                        Cla_training_data_y) or np.sum(Cla_testing_data_y) == 0 or \
                                len(Cla_testing_data_y) == np.sum(Cla_testing_data_y):
                    continue
                # training_data_X不是矩阵，training_data_y不是数组，放弃这组数据
                if len(np.shape(np.array(training_data_X))) != 2 or len(
                        np.shape(np.array(training_data_y))) != 1 or len(
                        np.shape(np.array(testing_data_X))) != 2 or len(np.shape(np.array(testing_data_y))) != 1:
                    continue

                count += 1

                # 开始写入csv中，csv最外层套一层filename文件夹，里面分别是各个train和test csv文件
                wb = Workbook()
                ws = wb.active
                # 写训练csv
                for i in range(len(training_data_X)):
                    l = []
                    l.extend(training_data_X[i])
                    l.append(training_data_y[i])
                    ws.append(l)
                dir_name = "".join(filename.split(".")[:-1])
                dir_name = os.path.join(configuration_file().bootstrap_dir, dir_name)
                train_csv_name = "".join(filename.split(".")[:-1]) + "_train_" + str(count) + ".xlsx"
                mkdir(dir_name)
                save_path = os.path.join(dir_name, train_csv_name)
                wb.save(save_path)

                # 写测试csv
                wb_ = Workbook()
                ws_ = wb_.active
                for i in range(len(testing_data_X)):
                    l = []
                    l.extend(testing_data_X[i])
                    l.append(testing_data_y[i])
                    ws_.append(l)
                dir_name = "".join(filename.split(".")[:-1])
                dir_name = os.path.join(configuration_file().bootstrap_dir, dir_name)
                train_csv_name = "".join(filename.split(".")[:-1]) + "_test_" + str(count) + ".xlsx"
                mkdir(dir_name)
                save_path = os.path.join(dir_name, train_csv_name)
                wb_.save(save_path)

            except BaseException as BE:
                print("错误类型", BE)
                print(filename + "报错")
            finally:
                if count == configuration_file().bootstrap_count:
                    print("bootstrap数目已够：{}次！".format(configuration_file().bootstrap_count))
                    break
        pass

    def read_bootstrap_csv(self):
        '''
        我们已经生成了所有bootstrap之后的train_csv和test_csv文件，存在相应的文件夹下，根目录是configuration_file().bootstrap_dir
        第二级目录为Data目录下每个原始csv的name.
        :return: yield一个文件的: filenname train_data_x,train_data_y, test_data_x, test_data_y
        举个例子：对于数据集：forrest-0.7.csv来说，要对其进行30次bootstrap，那么就会生成30个对于的train和test的csv,那么这里yield
        出去的就是   filenname：forrest-0.7.csv
                     train_data_x：是一个三维数组，有30个二维数组，每个二维数组对应着一个train csv里的x
                     train_data_y:是一个二维数组，有30个一维数组，每个一位数组对应着一个train csv里的y
                     test_data_x:是一个三维数组，有30个二维数组，每个二维数组对应着一个test csv里的x
                     test_data_y:是一个二维数组，有30个一维数组，每个一维数组对应着一个test csv里的y
        '''
        folder = configuration_file().bootstrap_dir
        datafolderPath = configuration_file().datafolderPath
        csv_list = os.listdir(datafolderPath)
        for csv_file in csv_list:
            name = "".join(csv_file.split(".")[:-1])
            train_data_x_list = []
            train_data_y_list = []
            test_data_x_list = []
            test_data_y_list = []
            for i in range(1, configuration_file().bootstrap_count + 1):
                tmp_train_x = []
                tmp_train_y = []
                tmp_test_x = []
                tmp_test_y = []

                bootstrap_train_name = name + "_train_" + str(i) + ".xlsx"
                bootstrap_test_name = name + "_test_" + str(i) + ".xlsx"
                bootstrap_train_ = os.path.join(folder, name)
                bootstrap_train_path = os.path.join(bootstrap_train_, bootstrap_train_name)
                bootstrap_test_path = os.path.join(bootstrap_train_, bootstrap_test_name)

                if not os.path.exists(bootstrap_train_path):
                    break
                if not os.path.exists(bootstrap_test_path):
                    break
                # 读train
                data = xlrd.open_workbook(bootstrap_train_path)

                sheeti = data.sheets()[0]
                nrows = sheeti.nrows  # 行
                ncols = sheeti.ncols  # 列

                for row in range(nrows):
                    row_list = []
                    tmp_train_y.append(sheeti.cell(row, ncols - 1).value)
                    for col in range(ncols - 1):
                        row_list.append(sheeti.cell(row, col).value)
                    tmp_train_x.append(row_list)
                train_data_x_list.append(tmp_train_x)
                train_data_y_list.append(tmp_train_y)

                # 读test
                data = xlrd.open_workbook(bootstrap_test_path)

                sheeti = data.sheets()[0]
                nrows = sheeti.nrows  # 行
                ncols = sheeti.ncols  # 列

                for row in range(nrows):
                    row_list = []
                    tmp_test_y.append(sheeti.cell(row, ncols - 1).value)
                    for col in range(ncols - 1):
                        row_list.append(sheeti.cell(row, col).value)
                    tmp_test_x.append(row_list)
                test_data_x_list.append(tmp_test_x)
                test_data_y_list.append(tmp_test_y)
            yield train_data_x_list, train_data_y_list, test_data_x_list, test_data_y_list, csv_file

    def write_excel(self, excel_path, data):
        '''

        :param path: Excel的路径
        :param data: 要写入的数据，是一个二维的list，每一个元素是一个一维的list
        :return:
        '''
        # try:
        dir_name = str(os.path.split(excel_path)[0])    # 把目录分离出来，没有的话则创建
        print(dir_name)
        mkdir(dir_name)
        wb = Workbook()
        ws = wb.active
        for _ in data:
            ws.append(_)
        wb.save(excel_path)
        # except BaseException as BE:
        #     print("写Excel时候错误类型：", BE)
        # pass

    def change_to_newdata(self, training_data_X, training_data_y, testing_data_X, testing_data_y):
        '''
        :param training_data_X: 二维list，代表一对train_test中的train，每个子list下20维特征
        :param training_data_y: 一维list，是training_data_X对应的特征，也就是bug数目
        :param testing_data_X:二维list，代表一对train_test中的test，每个子list下20维特征
        :param testing_data_y:一维list，是testing_data_X对应的特征，也就是bug数目
        :return:新的数据，是19个特征x加缺陷密度y
        '''
        # 先取出我们要的行数据
        want_row = [i for i in range(len(training_data_X))]
        new_train_data_x = training_data_X[want_row]
        want_row = [i for i in range(len(testing_data_X))]
        new_test_data_x = testing_data_X[want_row]
        # 再取出我们要的列
        want_col = [j for j in range(0, 10)] + [j for j in range(11, len(training_data_X[0]))]
        new_train_data_x = new_train_data_x[:, want_col]
        new_test_data_x = new_test_data_x[:, want_col]

        # 取出代码行数那一列
        loc_train = training_data_X[:, [10]].squeeze()
        loc_test = testing_data_X[:, [10]].squeeze()
        # print(loc_test)
        # print(testing_data_y.shape)
        # 计算缺陷密度
        new_train_data_y = training_data_y / loc_train
        new_test_data_y = testing_data_y / loc_test
        return new_train_data_x, new_train_data_y, new_test_data_x, new_test_data_y