
import numpy as np
import pandas as pd

# Visualization 形象化[ˌvɪʒʊəlaɪ'zeɪʃn]
import matplotlib as mpl
import matplotlib.pyplot as plt
import missingno
import seaborn as sns
from pandas.plotting import scatter_matrix #原先是from pandas.tools.plotting 报错
from mpl_toolkits.mplot3d import Axes3D

# Feature Selection and Encoding 编码 ɪn'kəʊdɪŋ
from sklearn.feature_selection import RFE, RFECV
from sklearn.svm import SVR
from sklearn.decomposition import PCA
from sklearn.preprocessing import OneHotEncoder, LabelEncoder, label_binarize
# Machine learning
import sklearn.ensemble as ske
from sklearn import datasets, model_selection, tree, preprocessing, metrics, linear_model
from sklearn.svm import LinearSVC
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import LinearRegression, LogisticRegression, Ridge, Lasso, SGDClassifier
from sklearn.tree import DecisionTreeClassifier
#import tensorflow as tf #tensorflow深度学习框架，python3.4装不了，得先升级

# Grid and Random Search  grid	[grɪd]格子
import scipy.stats as st
from scipy.stats import randint as sp_randint
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import RandomizedSearchCV

# Metrics
from sklearn.metrics import precision_recall_fscore_support, roc_curve, auc

# Managing Warnings 警告管理
import warnings
import sys

warnings.filterwarnings('ignore') #忽略警告错误

####################  了解数据整体情况  ##########################
#返回连续值特征列的lable
def continuous_culomn(data):
    '''传入dataFrame数据，返回连续值特征列的lable（根据每列的数据类型判定连续值列）'''
    continuous_value_list = ['int8', 'uint8', 'int16', 'uint16', 'int32', 'uint32', 'int64', 'uint64', 'float16',
                             'float32', 'float64']  # 列的数据类型包含于列表中的判定为连续值
    continuous_data = data.select_dtypes(include=continuous_value_list)  # 提取连续值的列
    continuous_culomns = continuous_data.columns.values
    return  continuous_culomns
#返回离散值特征列的lable
def discretised_culomn(data):
    '''传入dataFrame数据，返回离散值特征列的lable（根据每列的数据类型判定连续值列0'''
    discretised_value_list = ['object', 'bool']  # 列的数据类型包含于列表中的判定为连续值
    discretised_data = data.select_dtypes(include=discretised_value_list)  # 提取连续值的列
    discretised_culomns = discretised_data.columns.values
    return discretised_culomns
#打印各种类型值占用的内存
def show_types_memory_usage(data):
    type_list = ['bool','int8','uint8','int16','uint16','int32','uint32','int64','uint64',
                 'float16','float32','float64','complex64','complex128','object','void']
    for type in type_list:
        selected_type_data = data.select_dtypes(include=[type])
        if len(selected_type_data.columns.values)>0:
            sub_usage_b = selected_type_data.memory_usage(deep=True).sum()
            sub_usage_mb = sub_usage_b / 1024 ** 2
            print('{}类型数据内存占用量为 {:.3f} MB'.format(type, sub_usage_mb))
#返回有缺失的的列名list
def have_missing_culomn(data):
    have_miss_column = data.isnull().any()  # 判断哪些”列”存在缺失值
    have_miss_column_list = have_miss_column[have_miss_column.values == True].index.values  # 得到有缺失值的列名
    return have_miss_column_list
#用于展示并保存缺失值图谱
def missingno(data):
    import missingno as msno
    msno.matrix(data, labels=True)
    plt.savefig('缺失值图谱.png',dpi=120,bbox_inches='tight')#保存到当前文件夹
    plt.show()
#打印有缺失值的行数占比，并返回有缺失值的行的index
def have_missing_row_index(data,print_c = True):
    missing_data_row = data[data.isnull().values==True].index #得到有缺失值的行的索引，这些索引有些是重复的，还需统计一下
    missing_data_row = missing_data_row.value_counts().index #得到无重复的缺失值的行的索引
    missing_rate = len(missing_data_row )/data.shape[0]
    if print_c == True:
        print('当前数据中有缺失的样本量为：{} 占比: {:.2%}'.format(len(missing_data_row ),missing_rate))
    return  missing_data_row
#返回一个字典，key为存在缺失值的列名，value为该列存在缺失值的行的index，打印每列缺失的数量与占比
def have_missing_column_detail(data,print_c= True):
    have_missing_column_list =  have_missing_culomn(data)#得到有缺失值的列名
    if print_c == True:
        print('共有{}列数据存在缺失'.format(len(have_missing_column_list)))
        print(have_missing_column_list)
    missing_dict ={}
    for column in have_missing_column_list:
        missing_index = data[column][data[column].isnull().values==True].index
        missing_num = len(missing_index)
        missing_rate = missing_num/len(data)
        if print_c == True:
            print('{} 列缺失的行数为:{},缺失率为{:.2%}'.format(column,missing_num,missing_rate) )
        missing_dict[column] = missing_index
    return missing_dict

#去除重复行，去重
def drop_chongfu(data):
    start = data.shape[0]
    data = data.drop_duplicates()
    end = data.shape[0]
    print('去重前样本量：{}，重复样本：{}，重复行占比{:.2%}'.format(start,start-end,(start-end)/start))
    return data
# 日期格式转换，加年月日星期，是否周末列
def time_split(df,date_column):
    df[date_column] = pd.to_datetime(df[date_column])
    df['year'] = df[date_column].dt.year
    df['month'] = df[date_column].dt.month
    df['day'] = df[date_column].dt.day
    df['week'] = df[date_column].dt.weekday +1
    df['date'] = df[date_column].dt.date  # 不然存到excel里面还有小时分秒，看着太长
    def is_weekend(x):
        if x == 6 or x == 7: return 1
        else: return 0
    df['weekend'] = df['week'].map(lambda x: is_weekend(x))

#打印每列中所有的值
def print_values(data,max_show=30):
    for column in data.columns:
        value_counts = data[column].value_counts() #对特征进行value_count,值统计出来的结果是series结构
        value = value_counts.index.values #value_count的索引即为特征值，.values转化为list格式
        num = len(value)
        if num > max_show:
            value = value[:max_show-1]
        print('{} 列共有{}个值,展示如下（最大展示数量：{}）：'.format(column,num,max_show) )
        print(value)
        print('_'*60)
#统计每个特征分别有哪些值，各个数值上的分布情况如何
def features_value_counts_show(data,drop_list=[],drop_max_value=[],print_c = False,
                               picture_name = '各特征标签分布情况分析.png'):
    '''
    传入dataFrame数据，对每列进行value_count,并作图展示,drop_list与drop_max_value填的都是特征名称
    '''

    #确定画几行几列的subplot
    import math
    total_plot = data.shape[1]-len(drop_list)
    shape2 = math.ceil(math.sqrt(total_plot))
    shape1 = math.ceil(total_plot / shape2)

    i=0
    fig = plt.figure(figsize=(shape2*2, shape1*1.5))#绘制画布
    myfont = mpl.font_manager.FontProperties(fname='C:/Windows/Fonts/simhei.ttf')  # 进行字体设置，解决中文无法显示的问题
    plt.title(picture_name, fontsize=15, fontproperties=myfont)  # 添加标题
    plt.axis('off')#去除画图的边框

    sort_index_list = continuous_culomn(data)  # 提取出连续值的列的特征标签

    for column in data.columns:
        value_counts = data[column].value_counts() #对特征进行value_count,值统计出来的结果是series结构
        if print_c == True: #根据参数决定是否打印value_counts的结果
            print(value_counts.head(10))
            print('_' * 10 + 'value_counts.head(10)')

        i+= 1
        if column in drop_list: #放弃不予作图
            i-= 1
            continue  # 遇到要放弃作图的特征停止本次循环
        elif column in sort_index_list: #按索引进行排序
            if column in drop_max_value:
                drop_index = value_counts.idxmax()  # 得到最大值的索引
                value_counts.drop(drop_index, inplace=True)  # 删除最大值
            value_counts = value_counts.sort_index()#对特征按索引进行排序
        elif column in drop_max_value and column not in sort_index_list: # 删除最大值
            drop_index = value_counts.idxmax()
            value_counts.drop(drop_index, inplace=True)
        else:pass
        ax = fig.add_subplot(shape1, shape2, i) #对特征进行画图
        ax.plot(value_counts.index,value_counts.data)#特征标签做x轴，每个标签对应的样本数y轴
        plt.xticks([])#去除x轴刻度标签（太多了显示不下，都堆叠到一起了）
        plt.xlabel(column)#添加x轴标签

    sns.despine()  # 去掉上框和右框
    plt.tight_layout() #让输出的图片不要有重合堆叠的情况
    plt.savefig(picture_name,dpi=120,bbox_inches='tight')#保存到当前文件夹
    plt.show()
#传入要作图的数据，和不做图的列的list，自动提取连续值的列（int，float类型），打印并保存箱型图，以便观察数据离群值情况
def outlier_show(data,drop_columns=[],picture_name = '数据离群情况分析.png'):
    '''#传入要作图的数据，和不做图的列的list，自动提取连续值的列（int，float类型），打印并保存箱型图，以便观察数据离群值情况'''
    #提取连续值的列
    continuous_value_list = ['int8', 'uint8', 'int16', 'uint16', 'int32', 'uint32', 'int64', 'uint64','float16', 'float32', 'float64']
    show_data = data.select_dtypes(include = continuous_value_list)
    if len(drop_columns) > 0:
        show_data.drop(drop_columns, axis=1, inplace=True)

    myfont = mpl.font_manager.FontProperties(fname='C:/Windows/Fonts/simhei.ttf')  # 进行字体设置，解决中文无法显示的问题
    sns.set_palette("hls", 8)  # 用seaborn设置色板

    import math
    total_plot = len(show_data.columns.values)
    # 设置一行最多画图的数量是5个
    shape1 = math.ceil(total_plot / 5)
    shape2 = math.ceil(total_plot / shape1)
    fig = plt.figure(figsize=(shape2*1.5, shape1*2))#设置画布的大小

    plt.title('数据离群情况分析',fontsize=15,fontproperties = myfont)#添加标题
    plt.axis('off')#去除画图的边框
    plt.subplots_adjust(wspace=0.9, hspace=0.2)#调整子图间距
    xlable = show_data.columns.values
    for i in range(len(xlable)):
        ax = fig.add_subplot(shape1,shape2,i+1)
        ax.boxplot(show_data[xlable[i]],sym='+',patch_artist=True)
        ax.set_xticks([])
        ax.set_xlabel(xlable[i],fontproperties = myfont)
    sns.despine()#去掉上框和右框
    plt.savefig(picture_name,dpi=120,bbox_inches='tight')#保存到当前文件夹
    plt.show()

#将print的内容同时输出到控制台与文件中
class Logger(object):
    def __init__(self, fileN="infomation.txt"):
        self.terminal = sys.stdout
        self.log = open(fileN, "w",encoding='utf-8') #文件名、重写方式打开、制定utf-8解决中文乱码问题
    def write(self, message):
        self.terminal.write(message)
        self.log.write(message)
    def flush(self):
        pass

#同时print到控制台与txt中，查看数据shape、数据类型、占用内存、每列有哪些值、describe、缺失图谱、缺失值占比等
#返回"有缺失值的行的索引"、"{有缺失值的列：各列缺失值的行的索引}"
def print_data_infos(data,file_name="数据详情.txt"):
    '''
    输出信息打印到控制台的同时保存到txt文本中便于查看
    输出信息包括：
        数据的行数与列数、各列数据类型、各数据类型占用的内存
        每列的值（最多显示30个）、每列的最大值、最小值中位数等指标
        展示并保存缺失值图谱，统计有缺失值的列、有缺失值的样本及确实样本占比
        展示各个特征的数据分布情况
        通过数据类型提取连续值的列，绘制箱型图，展示离群情况
    return
        1、有缺失值的行的index
        2、一个字典,key为有缺失值的列，value为该列有缺失值的数据的index
    '''

    import time

    output = sys.stdout  # 保存原始的指向控制台
    sys.stdout = Logger(file_name)  # 将print的路径同时指向文件和控制台

    start = time.time()
    print('_'*60)
    print('数据共有 {} 行， {} 列'.format(data.shape[0],data.shape[1]))
    print('_' * 60)
    print('各列的数据类型如下')
    print(data.info(memory_usage='deep')) #打印所有的列的数据类型，以及data占用的内存总量
    print('_' * 60)
    show_types_memory_usage(data) #打印各种类型值占用的内存
    print('_' * 60)
    print_values(data, max_show=30)#打印每列中所有的值
    print(data.describe()) #展现每列的最大值、最小值中位数等指标
    print('_' * 60)
    missingno(data)  # 用于展示并保存缺失值图谱
    miss_data_row_index = have_missing_row_index(data)
    have_miss_column_dict = have_missing_column_detail(data)
    print('_' * 60)
    print('以下是每列的value_counts结果：')
    features_value_counts_show(data, print_c=True)
    outlier_show(data)
    end = time.time()

    print('打印完毕，耗时{:.3f} 秒'.format(end-start))
    sys.stdout = output  # 将print的路径切换回控制台
    return miss_data_row_index,have_miss_column_dict


####################  数据预处理  ######################

#提取x与y数据
def X_y_split(data,y_name):
    X = data.ix[:, data.columns != y_name] #取不包括class的特征列
    y = data.ix[:, data.columns == y_name] #取class列
    X = X.values
    y = y.values
    return X,y
#返回标准化后的数据
def StandardScaler(X):
    #转换后的结果是数组格式，所以需要替换到原来的数据中，而非直接传出
    from sklearn import preprocessing
    std_scale = preprocessing.StandardScaler().fit(X)
    X.iloc[:,:] = std_scale.transform(X)
    return X
#返回归一化后的数据
def MinMaxScaler(X):
    #转换后的结果是数组格式，所以需要替换到原来的数据中，而非直接传出
    from sklearn import preprocessing
    minmax_scale = preprocessing.MinMaxScaler().fit(X)
    X.iloc[:, :] = minmax_scale.transform(X)
    return X

##### 删除不符合逻辑的数据#####
#单一条件删除样本(或的关系)
def drop_row_single(data,condition_list):
    '''
    传入数据与删除的条件，删除所有符合条件的样本(或的关系)，可同时传入多种条件
    如：drop_list = [data['age'] > 70,data['age'] < 18,……]
    '''
    num = len(data)
    drop_index_list= []#建一个要删除的index表
    for drops in condition_list:#一个条件一个条件的过
        drop_index = list(drops[drops.values == True].index)
        drop_index_list.extend(drop_index)#把每个条件下所要删除的行的list合并到index表中
    drop_index_list = list(set(drop_index_list))#给index表剔重
    data_clean = data.drop(drop_index_list,axis=0)#从数据中删除表中的数据
    print('共删除数据 : {} 行，占数据总量的 : {:.2%}'.format(len(drop_index_list),len(drop_index_list)/num))
    return data_clean
#复合条件删除样本（与的关系）
def drop_row_compound(data,condition_list):
    '''
    传入数据与删除的‘条件组合’（与的关系）：
    data['hours-per-week'] < 20,data['predclass_changed'] == 1……]
    '''
    data_num = len(data)
    condition_num =len(condition_list)
    drop_index_list = []  # 建一个要删除的index表
    for drops in condition_list:  # 一个条件一个条件的过
        drop_index = list(drops[drops.values == True].index)
        drop_index_list.extend(drop_index)  # 把每个条件下所要删除的行的list合并到index表中
    drop_index_count = pd.value_counts(drop_index_list)  # 对要删除的index列表进行value_counts

    drop_index_map = drop_index_count[drop_index_count.values == condition_num].index #得到出现次数与条件数相同的index值，就是要删的index
    data_clean = data.drop(drop_index_map, axis=0)  # 从数据中删除表中的数据
    print('共删除数据 : {} 行，占数据总量的 ; {:.2%}'.format(len(drop_index_map), len(drop_index_map) / data_num))
    return data_clean
#删除有NaN的列
def drop_NaN_rows(data):
    data_drop_NaN = data.copy()
    NaN_index = have_missing_row_index(data, print_c=False)#调用前面的函数获取有缺失值的index
    data_drop_NaN.drop(NaN_index,axis=0,inplace=True )#删除有缺失值的行
    print('共删除有缺失值的数据 : {} 行，占数据总量的 : {:.2%}'.format(len(NaN_index), len(NaN_index) / data.shape[0]))
    return data_drop_NaN



####################  特征处理  ######################


#用来将离散值转为连续值，传入的数据中不能有Nan，可以一次转换多列
def discretised_to_continuous_NoNaN(data,need_change_columns,change_dict = {}):
    '''将离散值转为连续值，传入的数据中不能有Nan，可以一次转换多列，需传入数据、要转换的列名列表'''
    data_changed = data.copy()
    for column in need_change_columns:
        gle = LabelEncoder()#实例化方法，将离散值编码成连续值
        genre_labels = gle.fit_transform(data_changed[column])#把离散值的列传进来
        data_changed[column + '_changed'] = genre_labels#把转换好的值插进去
        genre_mappings = {index: label for index, label in enumerate(gle.classes_)}  # 吧转换后的结果拿出来
        change_dict[column + '_changed'] = genre_mappings
    data_changed.drop(need_change_columns,axis=1,inplace=True)
    return data_changed,change_dict
#用来将离散值转为连续值，数据中可以有Nan，一次只能转换一列
def discretised_to_continuous_NaNcolumn(data,column,change_dict = {}):
    '''传入数据和要转换的列名、数据中可以有Nan，每次只能转一列'''
    data_changed = data.copy()
    drop_NaN_column = data_changed[column].dropna() #得到去除NaN值的一列

    gle = LabelEncoder()
    gle.fit_transform(drop_NaN_column)#把离散值的列传进来,得到的是ndarray的一维数组
    genre_mappings = {index: label for index, label in enumerate(gle.classes_)}  # 吧转换后的结果拿出来
    change_value_list = list(genre_mappings.keys())#得到改变后的一组连续值
    for change_value in change_value_list:
        origin_value = genre_mappings[change_value] #得到对应的原始的值
        data_changed[column].replace(origin_value,change_value,inplace=True)#进行替换
    rename = column + '_changed'
    data_changed.rename(columns={column: rename}, inplace=True)  # 更改列名
    change_dict[column + '_changed'] = genre_mappings
    return data_changed, change_dict
#将数据中的离散值特征编码为连续值，结合上面两种方法，直接传入数据即可转换
def discretised_to_continuous(data,change_list='auto',notchange_list=[],file_name = 'discretised_to_continuous_map.txt'):
    import time
    start = time.time()

    if change_list == 'auto':need_change_culomns = discretised_culomn(data)#获取离散值的特征名称列表
    else: need_change_culomns = change_list

    have_missing_culomns = have_missing_culomn(data)#获取存在缺失值的特征名称列表

    have_missing_discretised_culomns = list(set(need_change_culomns) & set(have_missing_culomns))#获得存在缺失值的离散特征列表
    have_missing_discretised_culomns = list(set(have_missing_discretised_culomns) - set(notchange_list))  # 从存在缺失值的离散特征列表中剔除不转换的列
    no_missing_discretised_culomns = list(set(need_change_culomns) - set(have_missing_culomns))#获得无缺失值的离散特征列表
    no_missing_discretised_culomns = list(set(no_missing_discretised_culomns) - set(notchange_list))   # 从无缺失值的离散特征列表中剔除不转换的列

    data_changed,change_dict = discretised_to_continuous_NoNaN(data,no_missing_discretised_culomns)#对没有缺失值的列进行转换

    for NaN_culomn in have_missing_discretised_culomns:#由于方法每次只能转换一列，每次提取一列进行转换
        data_changed,change_dict = discretised_to_continuous_NaNcolumn(data_changed,NaN_culomn,change_dict = change_dict)

    end = time.time()
    output = sys.stdout  # 保存原始的指向控制台
    sys.stdout = Logger(file_name)  # 将print的路径同时指向文件和控制台

    print('转换用时{:.3f}s , 结果如下:'.format(end-start))
    changed_lables = list(change_dict.keys())
    print('共转换{}个特征，分别为：{}'.format(len(changed_lables),changed_lables))
    for lable in changed_lables:
        print('特征{}：'.format(lable))
        print(change_dict[lable])
        print('-'*40)
    sys.stdout = output  # 将print的路径切换回控制台
    return data_changed,change_dict
#传入DataFrame数据和要编码的列，return编码后的数据和编码转换的字典
def OneHotEncoder_packed(data,change_culomn_list ='auto',not_change_culomn=[],file_name = 'OneHotTransform.txt'):
    '''
    传入DataFrame数据和要编码的列，return编码后的数据和编码转换的字典
    change_culomn_list不指定的话默认对所有数据类型为object和bull值的列进行编码
    转换后的数据中所有的NaN会被替换成为string：'NaN'
    '''
    import time
    start = time.time()

    if change_culomn_list == 'auto':
        change_culomn_list = discretised_culomn(data)#选取所有数据类型为object和bull值的列
    change_culomn_list = list(set(change_culomn_list)-set(not_change_culomn))

    data_changed = data.fillna('NaN')#将NaN替换成str（用OneHotEncoder不能有NaN值）
    change_dict={}#建立转换字典，记录转换的内容


    for culomn in change_culomn_list:#对数据进行转换
        enc=OneHotEncoder()#实例化方法
        # OneHotEncoder只能fit array结构数据，dataframe结构需dropna().values后才能使用
        # 由于.value后就变成一行了，所以需要再.reshape（-1,1）转置变成列后再fit，不然就会当成只有一个样本
        enc.fit(data_changed[culomn].values.reshape(-1,1))
        OneHotEncod = enc.transform(data_changed[culomn].values.reshape(-1,1)).toarray()#将数据进行转换，生成array结构数据,不加toarray出来的看不明白是什么
        OneHot_frame = pd.DataFrame(OneHotEncod)#将array结构转换成DataFrame结构

        feature_dict = {}#构建用于替换列名的字典
        feature_name_list = list(enc.categories_[0])#enc.categories_可以得到转换的特征名称，是一个array结构,需要转换
        for index in range(len(feature_name_list)):#构建用于替换列名的字典
            feature_name = culomn + '_'+ feature_name_list[index]
            feature_dict[index] = feature_name
        OneHot_frame.rename(columns=feature_dict, inplace=True)#将列名从数字替换成feature的名称
        # OneHot转换后的数据格式是float，占空间较大，所以将所有的列强制转换成int8
        OneHot_frame[list(feature_dict.values())] = OneHot_frame[list(feature_dict.values())].astype('int8')

        OneHot_frame.index = data_changed.index#由于OneHot_frame的索引是重新编码的，所以需要将转换前数据的索引复制过来，便于拼接
        data_changed = pd.concat([data_changed, OneHot_frame], axis=1)#将OneHot_frame拼接到原数据的后面
        change_dict[culomn] = list(feature_dict.values())#记录转换的内容

    data_changed.drop(change_culomn_list, axis=1, inplace=True)#删除原有的列
    end = time.time()

    output = sys.stdout  # 保存原始的指向控制台
    sys.stdout = Logger(file_name)  # 将print的路径同时指向文件和控制台
    print('共用OneHotEncoder转换{}个特征，用时{:.3f}秒\n分别为：{}'.format(len(change_culomn_list),end-start,change_culomn_list))
    print('_'*30)
    for feature in change_culomn_list:
        print('{}列被转换为；'.format(feature))
        print(change_dict[feature])
        print('_' * 30)
    sys.stdout = output  # 将print的路径切换回控制台

    return data_changed,change_dict


#利用sklearn的PCA对维度进行压缩
def PCA_data(data,y_name=None ,n_components='auto'):
    '''
    :param data: 将要做维度压缩的的数据传入即可
    :param y_name: 指定y列的列名
    :param n_components: 保留多少个特征
    :return: 处理好的数据集
    '''

    column_list = list(data.columns.values)#提取数据的列名
    column_list.remove(y_name)#在列名中删除y
    MinMax_data = MinMaxScaler(data[column_list])#从数据中提取特征数据进行归一化处理

    if n_components=='auto': pca = PCA()#如果未指定保留多少个特征则自动选取
    else:pca = PCA(n_components = n_components)
    pca.fit(MinMax_data)
    data_new_array = pca.transform(MinMax_data)#得到转换后的array数据
    data_new = pd.DataFrame(data_new_array)#将得到的array结构数据转化成Dataframe结构

    data_new.index = data.index#让index与原数据相同
    if y_name != None:
        data_y = data[y_name]
        data_new = pd.concat([data_new , data_y], axis=1)

    data_new[list(data_new.columns.values)] = data_new[list(data_new.columns.values)].astype(
        'float16')  # 从float64转为16节约空间
    ratio_list = list(pca.explained_variance_ratio_.round(decimals=5))
    #pca.explained_variance_ratio_；transform之后，各特征项的方差占比，占比越高特征越重要
    ratio_total = 0
    for i in range(len(ratio_list)):
        ratio_total += ratio_list[i]
        print('累计至第{}个特征方差总和占比为{}'.format(i + 1, ratio_total))

    return data_new
#利用sklearn的LDA对维度进行压缩
def LDA_data(data,y_name,n_components='auto'):
    '''
    n_component需满足1≤n_components≤n_classes-1的情况n_classes是指类别的个数
    如果只有两个类别不管设成几最后得到的都只有1个维度
    '''
    from sklearn.discriminant_analysis import LinearDiscriminantAnalysis

    x,y = X_y_split(data, y_name)

    if n_components == 'auto':
        lda = LinearDiscriminantAnalysis()  # 如果未指定保留多少个特征则自动选取
    else:
        lda = LinearDiscriminantAnalysis(n_components=n_components)

    lda.fit(x, y)
    data_new_array = lda.transform(x)  # 得到转换后的array数据
    print(type(data_new_array))
    data_new = pd.DataFrame(data_new_array)  # 将得到的array结构数据转化成Dataframe结构
    data_new.index = data.index  # 让index与原数据相同
    data_new[list(data_new.columns.values)] = data_new[list(data_new.columns.values)].astype(
        'float16')  # 从float64转为16节约空间

    data_y = data[y_name]
    data_new = pd.concat([data_new, data_y], axis=1)

    ratio_list = list(lda.explained_variance_ratio_.round(decimals=5))
    # pca.explained_variance_ratio_；transform之后，各特征项的方差占比，占比越高特征越重要
    ratio_total = 0
    for i in range(len(ratio_list)):
        ratio_total += ratio_list[i]
        print('累计至第{}个特征方差总和占比为{}'.format(i + 1, ratio_total))

    return data_new


#可用下采样的各种方法进行下采样
def down_samplers(data_train,y_name,method):
    '''
    method包括：
    RandomUnderSampler:随机下采样
    NearMiss:添加了一些启发式(heuristic)的规则来选择样本, 通过设定version参数来实现三种启发式的规则.
        1: 选择离N个近邻的负样本的平均距离最小的正样本;
        2: 选择离N个负样本最远的平均距离最小的正样本;
        3: 是一个两段式的算法. 首先, 对于每一个负样本, 保留它们的M个近邻样本; 接着, 那些到N个近邻样本平均距离最大的正样本将被选择.
    ClusterCentroids:从原型中生成数据，每一个类别的样本都会用K-Means算法的中心点来进行合成, 而不是随机从原始样本进行抽取.
                    要求原始数据集最好能聚类成簇. 此外, 中心点的数量应该设置好, 这样下采样的簇能很好地代表原始数据
    EditedNearestNeighbours:用最近邻算法来编辑(edit)数据集, 找出那些与邻居不太友好的样本然后移除.
                            绝大多数(kind_sel='mode')或者全部(kind_sel='all')的近邻样本都属于同一个类, 这些样本会被保留在数据集中.
    RepeatedEditedNearestNeighbours：重复基础的EditedNearestNeighbours算法多次
    ALLKNN：算法在进行每次迭代的时候, 最近邻的数量都在增加
    CondensedNearestNeighbour:使用1近邻的方法来进行迭代, 来判断一个样本是应该保留还是剔除,
    OneSidedSelection;函数使用 TomekLinks 方法来剔除噪声数据(多数类样本)
    NeighbourhoodCleaningRule：主要关注如何清洗数据而不是筛选(considering)他们. 因此, 该算法将使用
                            EditedNearestNeighbours和 3-NN分类器结果拒绝的样本之间的并集
    InstanceHardnessThreshold：是一种很特殊的方法, 是在数据上运用一种分类器, 然后将概率低于阈值的样本剔除掉.

    '''

    method_list = ['RandomUnderSampler','NearMiss1','NearMiss2','NearMiss3','ClusterCentroids','EditedNearestNeighbours',
                   'RepeatedEditedNearestNeighbours','AllKNN','CondensedNearestNeighbour','OneSidedSelection',
                   'NeighbourhoodCleaningRule','InstanceHardnessThreshold']
    if method not in method_list:
        print('method参数错误，{}不在方法列表中，方法列表如下：\n{}'.fomat(method,method_list))
    if method == 'RandomUnderSampler':
        from imblearn.under_sampling import RandomUnderSampler
        method = RandomUnderSampler(random_state=0)
    if method == 'NearMiss1':
        from imblearn.under_sampling import NearMiss
        method = NearMiss(random_state=0, version=1)
    if method == 'NearMiss2':
        from imblearn.under_sampling import NearMiss
        method = NearMiss(random_state=0, version=2)
    if method == 'NearMiss3':
        from imblearn.under_sampling import NearMiss
        method = NearMiss(random_state=0, version=3)
    if method =='ClusterCentroids':
        from imblearn.under_sampling import ClusterCentroids#原型生成型下采样
        method = ClusterCentroids(random_state=0)
    if method =='EditedNearestNeighbours':
        from imblearn.under_sampling import EditedNearestNeighbours
        method = EditedNearestNeighbours(random_state=0)
    if method == 'RepeatedEditedNearestNeighbours':
        from imblearn.under_sampling import RepeatedEditedNearestNeighbours
        method = RepeatedEditedNearestNeighbours(random_state=0)
    if method == 'AllKNN':
        from imblearn.under_sampling import AllKNN
        method = AllKNN(random_state=0)
    if method == 'CondensedNearestNeighbour':
        from imblearn.under_sampling import CondensedNearestNeighbour
        method = CondensedNearestNeighbour(random_state=0)
    if method == 'OneSidedSelection':
        from imblearn.under_sampling import OneSidedSelection
        method = OneSidedSelection(random_state=0)
    if method == 'NeighbourhoodCleaningRule':
        from imblearn.under_sampling import NeighbourhoodCleaningRule
        method = NeighbourhoodCleaningRule(random_state=0)
    if method == 'InstanceHardnessThreshold':
        from sklearn.linear_model import LogisticRegression
        from imblearn.under_sampling import InstanceHardnessThreshold
        method = InstanceHardnessThreshold(random_state=0,estimator=LogisticRegression())

    X, y = X_y_split(data_train, y_name)
    X_resampled, y_resampled = method.fit_resample(X, y)

    return X_resampled, y_resampled
#可用上采样的各种方法进行上采样
def over_samplers(data_train,y_name,method):
    '''
    RandomOverSampler:从少数类的样本中进行随机采样来增加新的样本
    SMOTE: 对于少数类样本a, 随机选择一个最近邻的样本b, 然后从a与b的连线上随机选取一个点c作为新的少数类样本;
    ADASYN: 关注的是在那些基于K最近邻分类器被错误分类的原始样本附近生成新的少数类样本
    SMOTEENN,SMOTETomek:过采样与下采样相结合

    '''

    method_list = ['RandomOverSampler', 'SMOTE', 'SMOTE_borderline1','SMOTE_borderline2','SMOTE_svm','ADASYN', 'SMOTEENN',
                   'SMOTETomek']
    if method not in method_list:
        print('method参数错误，{}不在方法列表中，方法列表如下：\n{}'.fomat(method,method_list))
    if method == 'RandomOverSampler':
        from imblearn.over_sampling import RandomOverSampler
        method = RandomOverSampler(random_state=0)
    if method == 'SMOTE':
        from imblearn.over_sampling import SMOTE #从不平衡数据处理马快中导入上采样的方法（需要安装）
        method = SMOTE(random_state=0)
    if method == 'SMOTE_borderline1':
        from imblearn.over_sampling import SMOTE #从不平衡数据处理马快中导入上采样的方法（需要安装）
        method = SMOTE(random_state=0,kind='borderline1')
    if method == 'SMOTE_borderline2':
        from imblearn.over_sampling import SMOTE #从不平衡数据处理马快中导入上采样的方法（需要安装）
        method = SMOTE(random_state=0,kind='borderline2')
    if method == 'SMOTE_svm':
        from imblearn.over_sampling import SMOTE #从不平衡数据处理马快中导入上采样的方法（需要安装）
        method = SMOTE(random_state=0,kind='svm')
    if method == 'ADASYN':
        from imblearn.over_sampling import ADASYN
        method = ADASYN(random_state=0)
    if method == 'SMOTEENN':
        from imblearn.combine import SMOTEENN
        method = SMOTEENN(random_state=0)
    if method == 'SMOTETomek':
        from imblearn.combine import SMOTETomek
        method = SMOTETomek(random_state=0)

    X, y = X_y_split(data_train, y_name)
    X_resampled, y_resampled = method.fit_sample(X, y)

    return X_resampled, y_resampled
#返回上采样或下采样数据
def DownOver_sample_datas(data_train,y_name,choice ='dowm',method='all',n=10000):
    over_list = ['RandomOverSampler', 'SMOTE', 'SMOTE_borderline1', 'SMOTE_borderline2', 'ADASYN',
                   'SMOTEENN','SMOTETomek']
    down_list = ['RandomUnderSampler', 'NearMiss1','NearMiss3','EditedNearestNeighbours', 'AllKNN', 'OneSidedSelection',
                 'NeighbourhoodCleaningRule', 'InstanceHardnessThreshold']
    #结果中有负值： 'SMOTE_svm'
    #MemoryError或太慢:'NearMiss2','ClusterCentroids'，'RepeatedEditedNearestNeighbours','CondensedNearestNeighbour'
    if choice == 'dowm': method_list=down_list
    if choice == 'over': method_list=over_list

    if method == 'all':use_method = method_list
    else: use_method = method

    if data_train.shape[0] > n:
        data = data_train.sample(n=n,random_state=0)#随机抽取n行

    data_dict={}
    for downover_method in use_method:
        if choice == 'dowm':
            X_resampled, y_resampled = down_samplers(data,y_name,downover_method)
        if choice == 'over':
            X_resampled, y_resampled = over_samplers(data, y_name, downover_method)
        data_list =[X_resampled, y_resampled ]
        data_dict[downover_method]=data_list

    return data_dict
#绘制混淆军阵
def plot_confusion_matrix(cm, classes, title='Confusion matrix',cmap=plt.cm.Blues):
    """
    This function prints and plots the confusion matrix（绘制混淆矩阵图）.
    """
    import itertools
    plt.imshow(cm, interpolation='nearest', cmap=cmap) #将一个image显示在二维坐标轴上；cmap: 颜色图谱（colormap), 默认绘制为RGB(A)颜色空间；interplotation：默认"None"，可用字符串类型命令设定

    plt.title(title) #设置标题
    plt.colorbar() #添加colorbar（颜色条或渐变色条）
    tick_marks = np.arange(len(classes))
    plt.xticks(tick_marks, classes, rotation=0) #设置X的刻度，第一个参数刻度的值域，第二个参数：lable的数值；rotation：刻度标签的角度。官方给出的例子是：xticks( arange(5), (‘Tom’, ‘Dick’, ‘Harry’, ‘Sally’, ‘Sue’) )
    plt.yticks(tick_marks, classes) #设置Y的刻度
    thresh = cm.max() / 2 #设置临界值thresholds
    for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): #相当于for a in A和for b in B的嵌套
        plt.text(j, i, cm[i, j],horizontalalignment="center",color="white" if cm[i, j] > thresh else "black")

    plt.tight_layout() #tight_layout会自动调整子图参数，使之填充整个图像区域。
    plt.ylabel('True label') #设置y轴坐标
    plt.xlabel('Predicted label') #设置x轴坐标


####################  模型选择  ######################

#对指定的模型进行交叉验证，返回一个列表[平均分,[每次的得分]]
def cross_val_score_mean(X,y,modle,scoring = None):
    from sklearn.model_selection import cross_val_score
    # 得分通过以下函数计算：
    # sklearn.model_selection.cross_val_score(estimator, X, y=None, groups=None, scoring=None, cv=None, n_jobs=1, verbose=0, fit_params=None, pre_dispatch=‘2*n_jobs’)

    # cross_val_score函数可选的评分如下：
    # Classification:‘accuracy‘balanced_accuracy’‘average_precision’‘brier_score_loss’‘f1’‘f1_micro’‘f1_macro’
    # ‘f1_weighted’‘f1_samples’‘neg_log_loss’    metrics.log_loss‘precision’‘recall’‘jaccard’‘roc_auc’
    # Clustering:‘adjusted_mutual_info_score’‘adjusted_rand_score’‘completeness_score’‘fowlkes_mallows_score’
    # ‘homogeneity_score’‘mutual_info_score’‘normalized_mutual_info_score’‘v_measure_score’
    # Regression:‘explained_variance’ ‘max_error’ ‘neg_mean_absolute_error’‘neg_mean_squared_error’
    # ‘neg_mean_squared_log_error’‘neg_median_absolute_error’‘r2’
    scores = cross_val_score(modle, X, y, scoring = scoring,cv=5)#交叉验证函数返回值是一个包含各次验证分值的list
    return [scores.mean(),list(scores)]

#返回9种{回归模型名称：实例化的模型}的字典，用于try_modles
def Regressor_modle_dict():
    from sklearn import tree  # 决策树回归
    from sklearn import linear_model  # 线性回归
    from sklearn import svm  # SVM回归
    from sklearn import neighbors  # KNN回归
    from sklearn import ensemble  # 随机森林回归、Adaboost回归、GBRT回归
    from sklearn.ensemble import BaggingRegressor  # Bagging回归
    from sklearn.tree import ExtraTreeRegressor  # ExtraTree极端随机树回归

    DecisionTreeRegressor = tree.DecisionTreeRegressor()
    LinearRegression = linear_model.LinearRegression()
    SVR = svm.SVR()
    KNeighborsRegressor = neighbors.KNeighborsRegressor()
    RandomForestRegressor = ensemble.RandomForestRegressor(n_estimators=20)  # 使用20个决策树
    AdaBoostRegressor = ensemble.AdaBoostRegressor(n_estimators=50)  # 使用50个决策树
    GradientBoostingRegressor = ensemble.GradientBoostingRegressor(n_estimators=100)  # 使用100个决策树
    BaggingRegressor = BaggingRegressor()
    ExtraTreeRegressor = ExtraTreeRegressor()

    model_dict = {'DecisionTreeRegressor': DecisionTreeRegressor,
                  'LinearRegression': LinearRegression,
                  'KNeighborsRegressor': KNeighborsRegressor,
                  'RandomForestRegressor': RandomForestRegressor,
                  'AdaBoostRegressor': AdaBoostRegressor,
                  'GradientBoostingRegressor': GradientBoostingRegressor,
                  'BaggingRegresso': BaggingRegressor,
                  'ExtraTreeRegressor': ExtraTreeRegressor}
    # 'SVR': SVR总是卡着出不来，还需要测试
    #   优点：本质上是非线性方法，在样本较少时容易抓住数据和特征之间的非线性关系，避免神经网络结构选择和局部极小点问题、可以提高泛化性能、解决高维问题。
    #   缺点：SVM对数据缺失敏感，对于样本数据较多时，复杂度为 O(n2) O(n2),复杂度较高。

    return model_dict
#返回9种{分类模型名称：实例化的模型}的字典，用于try_modles
def classifier_modle_dict(mode='all'):
    '''
    如果想速度快些设定mode = 'time_first'
    如果数据集中有负数设定mode == 'have_negative':
    '''
    from sklearn.neighbors import KNeighborsClassifier
    from sklearn.linear_model import LogisticRegression
    from sklearn.tree import DecisionTreeClassifier
    from sklearn.ensemble import RandomForestClassifier
    from sklearn.naive_bayes import GaussianNB
    from sklearn.naive_bayes import MultinomialNB
    from sklearn.ensemble import GradientBoostingClassifier
    from sklearn.svm import SVC #只对二分类有效
    from sklearn.linear_model import SGDClassifier


    KnnClassifier = KNeighborsClassifier()
    LogisticRegression = LogisticRegression()
    DecisionTreeClassifier = DecisionTreeClassifier()
    RandomForestClassifier = RandomForestClassifier(n_estimators= 20, max_depth=5)
    GradientBoostingClassifier=GradientBoostingClassifier(n_estimators=100)
    BayesGaussianNB = GaussianNB()
    BayesMultinomialNB = MultinomialNB()
    SVC = SVC()
    SGDClassifier = SGDClassifier()#loss="hinge", penalty="l2"

    model_dict = {'KnnClassifier': KnnClassifier,
                  'LogisticRegression': LogisticRegression,
                  'DecisionTreeClassifier': DecisionTreeClassifier,
                  'RandomForestClassifier': RandomForestClassifier,
                  'GradientBoostingClassifier': GradientBoostingClassifier,
                  'BayesGaussianNB': BayesGaussianNB,
                  'BayesMultinomialNB':BayesMultinomialNB,
                  'SVC': SVC,
                  'SGDClassifier': SGDClassifier}
    if mode == 'time_first':
        del model_dict['SVC']
        del model_dict['KnnClassifier']
    if mode == 'have_negative':
        del model_dict['BayesMultinomialNB']#'BayesMultinomialNB': BayesMultinomialNB 训练集数据X不能有负数

    return model_dict
#返回'分类'评分字典：accuracy，precision，reacll，f1，rac_auc,classification_report
def score_dict_classification():
    from sklearn.metrics import classification_report
    from sklearn.metrics import accuracy_score
    from sklearn.metrics import precision_score
    from sklearn.metrics import recall_score
    from sklearn.metrics import f1_score
    from sklearn.metrics import roc_auc_score

    dict = {'classification_report':classification_report,
            'accuracy': accuracy_score,
            'precision':precision_score,
            'recall':recall_score,
            'f1':f1_score,
            'roc_auc': roc_auc_score}
    return dict

#对比在标准化、归一化、未做改变时用各种模型取得的分值（用cross_val_score计算）
def try_datas(data_dict,model_dict,file_name='data_score.txt',scoring=None):
    '''
    :param data_dict: {data_name:[x,y]}
    :param model_dict:{模型名称：fit好的模型}
    :param file_name: 默认'data_score.txt'
    :param scoring: str
    :return: None，会保存记录得分的文件
    得分利用cross_val_score计算，交叉验证5次取平均值
    scorring可选的评分如下：
    # Classification:‘accuracy‘balanced_accuracy’‘average_precision’‘brier_score_loss’‘f1’‘f1_micro’‘f1_macro’
    # ‘f1_weighted’‘f1_samples’‘neg_log_loss’    metrics.log_loss‘precision’‘recall’‘jaccard’‘roc_auc’
    # Clustering:‘adjusted_mutual_info_score’‘adjusted_rand_score’‘completeness_score’‘fowlkes_mallows_score’
    # ‘homogeneity_score’‘mutual_info_score’‘normalized_mutual_info_score’‘v_measure_score’
    # Regression:‘explained_variance’ ‘max_error’ ‘neg_mean_absolute_error’‘neg_mean_squared_error’
    # ‘neg_mean_squared_log_error’‘neg_median_absolute_error’‘r2’
    '''
    import time

    output = sys.stdout  # 保存原始的指向控制台
    sys.stdout = Logger(file_name)  # 将print的路径同时指向文件和控制台

    start = time.time()

    for key in data_dict.keys():#检验数据中是否有负值，有则删除该数据
        data = pd.DataFrame(data_dict[key][0])
        data_mask = data.mask(data < 0)
        NaN_index = have_missing_row_index(data_mask, print_c=False)
        if len(NaN_index) > 0:del dict[key]

    data_name_list = list(data_dict.keys())
    index_list = data_name_list.copy()
    index_list.append('mean_score')

    model_name_list = list(model_dict.keys())
    columns_list = model_name_list.copy()
    columns_list.append('best_score')

    score_df = pd.DataFrame(columns=columns_list,index=index_list)
    for data_name in data_name_list:
        X = data_dict[data_name][0]
        y = data_dict[data_name][1]

        for model in model_name_list:
            scores = cross_val_score_mean(X, y, model_dict[model], scoring=scoring)
            score_df.loc[data_name][model] = scores[0]

        score_df.loc[data_name]['best_score'] = score_df.loc[data_name].max()#取每行的最大值
        score_df.sort_values(by='best_score', ascending=False, inplace=True) #降序排列，默认是升序True

    for column in columns_list:
        score_df.loc['mean_score'][column] = score_df[column].mean()  # 取每行的最大值
    score_df.sort_values(by='mean_score', axis=1, ascending=False, inplace=True)

    end = time.time()
    print('各数据集交叉验证完毕，用时：{}s评分如下：'.format(end-start))
    print(score_df)

    sys.stdout = output  # 将print的路径切换回控制台

def modles_score(model_dict,score_dict,data=None,y_name=None,data_list=None,file_name='modles_scores.txt',n=20000,split=5):
    '''
    :param model_dict:{模型名称：fit好的模型}
    :param score_dict: {评分方法名称:导入的评分方法（未实例化的）}
    :param data_list: 拆分好的[x,y]，array格式；data_list与data，y_name两种数据形式二选一
    :param data: DataFrame格式，y_name必须填
    :return: None，会保存记录得分的文件
    得分利用cross_val_score计算，交叉验证5次取平均值
       scorring可选的评分如下：
       # Classification:‘accuracy‘balanced_accuracy’‘average_precision’‘brier_score_loss’‘f1’‘f1_micro’‘f1_macro’
       # ‘f1_weighted’‘f1_samples’‘neg_log_loss’    metrics.log_loss‘precision’‘recall’‘jaccard’‘roc_auc’
       # Clustering:‘adjusted_mutual_info_score’‘adjusted_rand_score’‘completeness_score’‘fowlkes_mallows_score’
       # ‘homogeneity_score’‘mutual_info_score’‘normalized_mutual_info_score’‘v_measure_score’
       # Regression:‘explained_variance’ ‘max_error’ ‘neg_mean_absolute_error’‘neg_mean_squared_error’
       # ‘neg_mean_squared_log_error’‘neg_median_absolute_error’‘r2’
    '''

    from sklearn.model_selection import StratifiedKFold
    import time
    start =time.time()

    if data_list != None:
        x = pd.DataFrame(data_list[0])
        y = pd.DataFrame(data_list[1])
    else:
        if data.shape[0] > n:
            data = data.sample(n) #如果行数比指定的n多则进行随机采样
        x, y = X_y_split(data, y_name)#拆分x与y

    kf = StratifiedKFold(split, shuffle=False)  #实例化KFlod将训练集数据的切分成5部分

    model_list = list(model_dict.keys())  # 获得模型列表（字典的key是无序的，变成列表就固定下来了）
    scorer_list = list(score_dict.keys())  # 获得评分方式列表
    score_frame_key = scorer_list.copy()
    if 'classification_report'in scorer_list:
        score_frame_key.remove('classification_report')
        report_list = []  # 创建一个储存report的列表
    score_frame_key.append('time_spend')  # 评分方式列表后面增加‘花费时间’

    score_frame_list = []  # 建立用于储存评分frame的list

    for train_index, test_index in kf.split(x,y): #分别提取训练集及测试集数据
        x_train = x.iloc[train_index].values#.values会转换成ndarray结构
        y_train = y.iloc[train_index].values.ravel()#.ravel()会展开转换成一维
        x_test = x.iloc[test_index].values
        y_test = y.iloc[test_index].values.ravel()

        score_df = pd.DataFrame(columns=score_frame_key,index=model_list)#建立一个空的frame
        if 'classification_report' in scorer_list:report_dict = {}#创建一个储存report的字典

        for model in model_list:#遍历各个模型
            time_start = time.time()  # 开始计时

            modle = model_dict[model]#提取实例化后的模型
            modle.fit(x_train,y_train)#用训练集进行训练
            y_pred = modle.predict(x_test)#用测试集进行预测

            time_end = time.time()
            time_spend =  time_end - time_start

            scores = []#创建一个得分列表

            for scorer_name in scorer_list:#遍历各个评分标准
                scorer = score_dict[scorer_name]#调出评分方法
                if scorer_name == 'classification_report':
                    score = scorer(y_test,y_pred,output_dict=True)#传入测试数据与预测值进行评分
                    report_dict[model]=score#如果是报告就放入报告字典中
                else:
                    score = scorer(y_test, y_pred)  # 传入测试数据与预测值进行评分
                    scores.append(score)#如果是评分则放入评分列表中

            scores.append(time_spend)#在评分列表中加入花费的时间
            score_df.loc[model] = scores#将评分数据放入frame中
        score_frame_list.append(score_df)#将评分frame放入list中
        if 'classification_report' in scorer_list:report_list.append(report_dict)#将报告字典放入报告列表中

    score_mean_df = pd.DataFrame(columns=score_frame_key, index=model_list)  # 建立一个空的frame
    score_std_df = score_mean_df.copy()
    score_max_df = score_mean_df.copy()
    score_min_df = score_mean_df.copy()

    for model in model_list:#遍历模型列表
        for key in score_frame_key:#遍历评分方法列表
            score_list = []#创建一个评分表用于计算各次的平均分
            for i in range(len(score_frame_list)):
                score_list.append(score_frame_list[i].loc[model][key])#将每个frame中的得分都加入列表中
            score_array = np.array(score_list)#将列表换换成array结构便于计算平均值

            score_mean = score_array.mean()#获得交叉验证后的平均分
            score_std = score_array.std()#计算各次得分的标准差
            score_max = score_array.max()#计算各次得分的最大值
            score_min = score_array.mean()#计算各次得分的最小值

            score_mean_df.loc[model][key] = float('%.3f' %score_mean)#填入平均分数据中，保留3位小数
            score_std_df.loc[model][key] = float('%.3f' %score_std)#填入标准差数据中
            score_max_df.loc[model][key] = float('%.3f' % score_max)  # 填入最大值数据中
            score_min_df.loc[model][key] = float('%.3f' % score_min)  # 填入最小值数据中

    score_std_df.drop(['time_spend'], axis=1, inplace=True)  # 删除花费时间列
    score_max_df.drop(['time_spend'], axis=1, inplace=True)  # 删除花费时间列
    score_min_df.drop(['time_spend'], axis=1, inplace=True)  # 删除花费时间列
    score_mean_df.sort_values(by='roc_auc',ascending=False,inplace=True)#以roc_auc进行降序排列
    score_std_df.sort_values(by='roc_auc', ascending=True,inplace=True)  # 以roc_auc进行降序排列
    score_max_df.sort_values(by='roc_auc', ascending=False, inplace=True)  # 以roc_auc进行降序排列
    score_min_df.sort_values(by='roc_auc', ascending=False, inplace=True)  # 以roc_auc进行降序排列

    end= time.time()

    output = sys.stdout  # 保存原始的指向控制台
    sys.stdout = Logger(file_name)  # 将print的路径同时指向文件和控制台

    print('各模型{}次交叉验证完毕，共用时{:.2f}s'.format(split,end-start))
    print('各个模型的平均得分如下：\n{}'.format(score_mean_df))
    print('-'*50)
    print('各个模型的得分的标准差如下：\n{}'.format(score_std_df))
    print('-' * 50)
    print('各个模型的得分的最大值如下：\n{}'.format(score_max_df))
    print('-' * 50)
    print('各个模型的得分的最小值如下：\n{}'.format(score_min_df))
    print('-' * 50)

    # 计算report的平均值并打印report
    if 'classification_report' in scorer_list:
        key_model_list = list(score_mean_df.index.values)
        key_score_list = ['precision', 'recall','f1-score', 'support' ]
        avg_list = ['micro avg','macro avg','weighted avg']
        key_lable_list = list(set(y_test))
        key_lable_list = [str(x) for x in key_lable_list]#将list中的所有内容转换为str
        key_lable_list += avg_list

        for key_model in key_model_list:
            report_frame = pd.DataFrame(columns=key_score_list, index=key_lable_list)
            for key_label in key_lable_list:
                for key_score in key_score_list:
                    scores = []
                    for i in range(len(report_list)):
                        scores.append(report_list[i][key_model][key_label][key_score])
                    scores_array = np.array(scores)
                    mean_score = scores_array.mean()
                    report_frame.loc[key_label][key_score] = float('%.3f' % mean_score)
            print('模型{}的report如下:\n{}'.format(key_model,report_frame))
            print('-' * 50)
        print('"micro":通过先计算总体的TP，FN和FP的数量，再计算F1\n"macro":分布计算每个类别的F1，然后做平均（各类别F1的权重相同）')

    sys.stdout = output  # 将print的路径切换回控制台




########### 建立离散化的数据集 dataset_bin ###########
#features =
#labels =


