# -*- coding: utf-8 -*-

import lightgbm as lgb
import pandas as pd
import numpy as np
import json
import time
import re
import matplotlib.pylab as plt
from sklearn import metrics
from sklearn.metrics import mean_squared_error
import scorecardpy as sc
from sklearn.feature_selection import RFE
from sklearn.linear_model import LogisticRegression
from collections import Counter
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
import os
import warnings
import datetime as dtt
warnings.filterwarnings("ignore")

def psi_stats_score(data_left, data_right, non_computed=None, plot_image=True):
    """Calculate Average PSI value.
    Parameters
    ----------
    data_left: pandas.DataFrame
        The necessary columns score, y, id must be in data.

    data_right: pandas.DataFrame
        The necessary columns score, y, id must be in data.

    non_computed : str or None
        The column name of non-computed scoring indicators.
        'True' means score by non_computed.
        'False' means score by computed.

    plot_image : bool (default True)
        Plot image.

    Returns
    -------
    psi_table : pandas.DataFrame
        The PSI value of score interval.
    """

    """Check columns of data."""
    check_cols = ['score', 'y', 'id']
    if non_computed != None and type(non_computed) == str:
        check_cols += [non_computed]
        data_left = data_left[~data_left[non_computed] == True].copy()
        data_right = data_right[~data_right[non_computed] == True].copy()
    elif non_computed == None:
        pass
    else:
        raise ValueError('non_computed must be a str.')
    for col in check_cols:
        if col not in data_left.columns or col not in data_right.columns:
            raise ValueError('Please check the columns %s of data' % col)

    """Drop NaN by column 'score'."""
    data_left = data_left.loc[data_left['score'].notnull(), check_cols]
    data_right = data_right.loc[data_right['score'].notnull(), check_cols]

    """Discrete score value."""
    break_points = [0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100]
    data_left.loc[data_left.score < 0, 'score'] = 0
    data_left.loc[data_left.score >= 100, 'score'] = 99
    data_right.loc[data_right.score < 0, 'score'] = 0
    data_right.loc[data_right.score >= 100, 'score'] = 99
    data_left['score'] = pd.cut(data_left['score'], break_points, right=False).values
    data_right['score'] = pd.cut(data_right['score'], break_points, right=False).values

    """Count psi of bad & good sample."""
    count_left = data_left.groupby(['score', 'y']).count()['id'].unstack().fillna(value=0.0)
    count_right = data_right.groupby(['score', 'y']).count()['id'].unstack().fillna(value=0.0)
    count_left['bad_ratio'] = count_left[1] / count_left[1].sum()
    count_right['bad_ratio'] = count_right[1] / count_right[1].sum()
    count_left['good_ratio'] = count_left[0] / count_left[0].sum()
    count_right['good_ratio'] = count_right[0] / count_right[0].sum()
    count_final = pd.merge(count_left, count_right, left_index=True,
                           right_index=True, suffixes=['_left', '_right'])
    count_final['psi_bad'] = (count_left['bad_ratio'] - count_right['bad_ratio']) * \
                             np.log(count_left['bad_ratio'] / count_right['bad_ratio'])
    count_final['psi_good'] = (count_left['good_ratio'] - count_right['good_ratio']) * \
                              np.log(count_left['good_ratio'] / count_right['good_ratio'])

    average_psi = (count_final['psi_bad'].replace([np.inf, np.nan], 0.0).sum() + \
                   count_final['psi_good'].replace([np.inf, np.nan], 0.0).sum()) / 2

    """Plot image"""
    if plot_image == True:
        plot_range = ['bad_ratio_left', 'good_ratio_left',
                      'bad_ratio_right', 'good_ratio_right']
        plot_label = ['Bad Ratio of Test Sample', 'Good Ratio of Test Sample',
                      'Bad Ratio of Train Set', 'Good Ratio of Train Set']
        color = ['blue', 'red', 'green', 'cyan']
        marker = ['s', 'x', 'o', 'v']
        plt.figure()
        for p, l, c, m in zip(plot_range, plot_label, color, marker):
            value = count_final[p].values
            score_range = range(len(count_final.index))
            score_label = ['[0,10)', '[10,20)',
                           '[20,30)', '[30,40)',
                           '[40,50]', '[50,60)',
                           '[60,70)', '[70,80]',
                           '[80,90]', '[90,100)']
            plt.plot(score_range, value, color=c, marker=m,
                     markersize=1, label=l)
        plt.grid()
        plt.legend(loc='upper left')
        plt.xticks(score_range, score_label, rotation=45)
        plt.title('PSI of Score Card')
        plt.ylabel('Ratio')
        plt.tight_layout()
        plt.show()
    print('Average PSI:%f' % average_psi)
    return count_final

#定义KS计算函数
from sklearn.metrics import roc_curve
def calKS(y,y_pred):
    fpr, tpr, threshold = metrics.roc_curve(y, y_pred)
    return max(tpr - fpr)

def psi_stats_score1(data_left, data_right, non_computed=None, plot_image=True):
    """Calculate Average PSI value.
    Parameters
    ----------
    data_left: pandas.DataFrame
        The necessary columns score, y, id must be in data.

    data_right: pandas.DataFrame
        The necessary columns score, y, id must be in data.

    non_computed : str or None
        The column name of non-computed scoring indicators.
        'True' means score by non_computed.
        'False' means score by computed.

    plot_image : bool (default True)
        Plot image.

    Returns
    -------
    psi_table : pandas.DataFrame
        The PSI value of score interval.
    """

    """Check columns of data."""
    check_cols = ['score', 'y', 'id']
    if non_computed != None and type(non_computed) == str:
        check_cols += [non_computed]
        data_left = data_left[~data_left[non_computed] == True].copy()
        data_right = data_right[~data_right[non_computed] == True].copy()
    elif non_computed == None:
        pass
    else:
        raise ValueError('non_computed must be a str.')
    for col in check_cols:
        if col not in data_left.columns or col not in data_right.columns:
            raise ValueError('Please check the columns %s of data' % col)

    """Drop NaN by column 'score'."""
    data_left = data_left.loc[data_left['score'].notnull(), check_cols]
    data_right = data_right.loc[data_right['score'].notnull(), check_cols]

    """Discrete score value."""
    break_points = list(range(300,1001,50))
    data_left.loc[data_left.score < 300, 'score'] = 300
    data_left.loc[data_left.score >= 1000, 'score'] = 999
    data_right.loc[data_right.score < 300, 'score'] = 300
    data_right.loc[data_right.score >= 1000, 'score'] = 999
    data_left['score'] = pd.cut(data_left['score'], break_points, right=False).values
    data_right['score'] = pd.cut(data_right['score'], break_points, right=False).values

    """Count psi of bad & good sample."""
    count_left = data_left.groupby(['score', 'y']).count()['id'].unstack().fillna(value=0.0)
    count_right = data_right.groupby(['score', 'y']).count()['id'].unstack().fillna(value=0.0)
    count_left['bad_ratio'] = count_left[1] / count_left[1].sum()
    count_right['bad_ratio'] = count_right[1] / count_right[1].sum()
    count_left['good_ratio'] = count_left[0] / count_left[0].sum()
    count_right['good_ratio'] = count_right[0] / count_right[0].sum()
    count_final = pd.merge(count_left, count_right, left_index=True,
                           right_index=True, suffixes=['_left', '_right'])
    count_final['psi_bad'] = (count_left['bad_ratio'] - count_right['bad_ratio']) * \
                             np.log(count_left['bad_ratio'] / count_right['bad_ratio'])
    count_final['psi_good'] = (count_left['good_ratio'] - count_right['good_ratio']) * \
                              np.log(count_left['good_ratio'] / count_right['good_ratio'])

    average_psi = (count_final['psi_bad'].replace([np.inf, np.nan], 0.0).sum() + \
                   count_final['psi_good'].replace([np.inf, np.nan], 0.0).sum()) / 2

    """Plot image"""
    if plot_image == True:
        plot_range = ['bad_ratio_left', 'good_ratio_left',
                      'bad_ratio_right', 'good_ratio_right']
        plot_label = ['Bad Ratio of Test Sample', 'Good Ratio of Test Sample',
                      'Bad Ratio of Train Set', 'Good Ratio of Train Set']
        color = ['blue', 'red', 'green', 'cyan']
        marker = ['s', 'x', 'o', 'v']
        plt.figure(figsize=(9,6))
        for p, l, c, m in zip(plot_range, plot_label, color, marker):
            value = count_final[p].values
            score_range = range(len(count_final.index))
            score_label = ['[300,350)', '[350,400)',
                           '[400,450)', '[450,500)',
                           '[500,550]', '[550,600)',
                           '[600,650)', '[650,700]',
                           '[700,750]', '[750,800)',
                           '[800,850]', '[850,900)',
                           '[900,950]', '[950,1000)']
            plt.plot(score_range, value, color=c, marker=m,
                     markersize=1, label=l)
        plt.grid()
        plt.legend(loc='upper left')
        plt.xticks(score_range, score_label, rotation=45)
        plt.title('PSI of Score Card')
        plt.ylabel('Ratio')
        plt.tight_layout()
        plt.show()
    print('Average PSI:%f' % average_psi)
    return count_final

#类别变量转数值变量
def cate_var_transform(X, Y):
    ##取出数据类型
    d_type = X.dtypes
    object_var = X.iloc[:, np.where(d_type == "object")[0]]
    num_var = X.iloc[:, np.where(d_type != "object")[0]]

    # object_transfer_rule用于记录每个类别变量的数值转换规则
    object_transfer_rule = list(np.zeros([len(object_var.columns)]))

    # object_transform是类别变量数值化转化后的值
    object_transform = pd.DataFrame(np.zeros(object_var.shape),
                                    columns=object_var.columns)

    for i in range(0, len(object_var.columns)):

        temp_var = object_var.iloc[:, i]

        ##除空值外的取值种类
        unique_value = np.unique(temp_var.iloc[np.where(~temp_var.isna())[0]])

        transform_rule = pd.concat([pd.DataFrame(unique_value, columns=['raw data']),
                                    pd.DataFrame(np.zeros([len(unique_value), 2]),
                                                 columns=['transform data', 'bad rate'])], axis=1)
        for j in range(0, len(unique_value)):
            bad_num = len(np.where((Y == 1) & (temp_var == unique_value[j]))[0])
            all_num = len(np.where(temp_var == unique_value[j])[0])

            # 计算badprob
            if all_num == 0:  # 防止all_num=0的情况，报错
                all_num = 0.5
            transform_rule.iloc[j, 2] = 1.0000000 * bad_num / all_num

        # 按照badprob排序，给出转换后的数值
        transform_rule = transform_rule.sort_values(by='bad rate')
        transform_rule.iloc[:, 1] = list(range(len(unique_value), 0, -1))

        # 保存转换规则
        object_transfer_rule[i] = {object_var.columns[i]: transform_rule}
        # 转换变量
        for k in range(0, len(unique_value)):
            transfer_value = transform_rule.iloc[np.where(transform_rule.iloc[:, 0] == unique_value[k])[0], 1]
            object_transform.iloc[np.where(temp_var == unique_value[k])[0], i] = float(transfer_value)
        object_transform.iloc[np.where(object_transform.iloc[:, i] == 0)[0], i] = np.nan

    X_transformed = pd.concat([num_var, object_transform], axis=1)
    return (X_transformed, transform_rule)

#去除缺失率、单一值率过高的变量
def missing_identity_select(data,y='flagy', missing_rate=0.9, identity_rate=0.9, kp_vars=None):
    if kp_vars:
        data1 = data.drop(columns=kp_vars)
    else:
        data1 = data.copy()
    null_ratio = data1.isnull().sum() / data1.shape[0]
    data1 = data1.iloc[:,np.where(null_ratio <= missing_rate)[0]]
    identity = data1.drop(columns='flagy').apply(lambda x: x.value_counts().max() / x.size).reset_index(name='identity_rate').rename(columns={'index': 'variable'})
    identity_vars = identity[identity['identity_rate'] <= identity_rate]['variable'].to_list()
    data1 = data[identity_vars + kp_vars + ['flagy']]
    return data1
#剔除自变量与因变量相关性过低、自变量与自变量相关性高的变量
def delete_corelation(data, y='flagy', y_cor=0.1, x_cor=0.7, kp_vars=None):
    if kp_vars:
        data1 = data.drop(columns=kp_vars) 
    cor = data1.corr().abs()
    cor_y = cor[y]
    y_select = cor_y[cor_y >= y_cor ]
    y_select.sort_values(ascending=False)
    cor_x = cor.loc[y_select.index.values, y_select.index.values]
    cor_x.drop(labels=y, inplace=True)
    cor_x.drop(columns=y, inplace=True)
    drop_index = []
    for i in range(len(cor_x)-1):
        drop_index.extend(np.where(cor_x.iloc[i+1:,i] >= x_cor)[0])
    drop_index = list(set(drop_index))
    drop_name = cor_x.index[drop_index].tolist()
    cor_x.drop(index=drop_name, inplace=True)
    final_vars = cor_x.index.tolist() + kp_vars + [y]
    data = data[final_vars]
    return data
#变量PSI计算
def psi_var(data_train, data_test):
    data_train1 = data_train.copy()
    data_test1 = data_test.copy()
    var_names = data_train.columns.tolist()
    drop = []
    for name in var_names:
        num = data_train[name].unique().shape[0]
        if num <= 6:
            breaks = np.linspace(data_train[name].min()-1, data_train[name].max(), num+1)
        else:
            breaks = np.linspace(data_train[name].min()-1, data_train[name].max(), 6+1)
        data_train1[name] = pd.cut(data_train[name], bins=breaks, right=True)
        data_test1[name] = pd.cut(data_test[name], bins=breaks, right=True)
        data_train1[name] = data_train1[name].astype(str)
        data_test1[name] = data_test1[name].astype(str)
        df1 = data_train1[name].value_counts().rename('train')
        df2 = data_test1[name].value_counts().rename('test')
        df = pd.concat([df1, df2], axis=1)
        df['train_ratio'] = df.train / df.train.sum()
        df['test_ratio'] = df.test / df.test.sum()
        df['psi'] = (df.train_ratio - df.test_ratio) * np.log(df.train_ratio / df.test_ratio)
        df.psi.replace(np.inf, 0)
        if df.psi.sum() > 0.1:
            #del data_train[name]
            #del data_test[name]
            drop.append(name)
        del df1
        del df2
        del df
    print('PSI偏高的被剔除的变量：\n{}'.format(drop))
    return drop

def psi_var_table(data_train, data_test):
    psi_table = pd.DataFrame()
    data_train1 = data_train.copy()
    data_test1 = data_test.copy()
    var_names = data_train.columns.tolist()
    drop = []
    for name in var_names:
        num = data_train[name].unique().shape[0]
        if num <= 6:
            breaks = data_train[name].value_counts().sort_index().index.to_list()
            #breaks = np.linspace(data_train[name].min()-1, data_train[name].max(), num+1)
        else:
            breaks = np.linspace(data_train[name].min(), data_train[name].max(), 6)
        
        breaks = [-np.inf] + list(breaks) + [np.inf]
        data_train1[name] = pd.cut(data_train[name], bins=breaks, right=True)
        data_test1[name] = pd.cut(data_test[name], bins=breaks, right=True)
        data_train1[name] = data_train1[name].astype(str)
        data_test1[name] = data_test1[name].astype(str)
        df1 = data_train1[name].value_counts().rename('train')
        df2 = data_test1[name].value_counts().rename('test')
        df = pd.concat([df1, df2], axis=1)
        df['train_ratio'] = df.train / df.train.sum()
        df['test_ratio'] = df.test / df.test.sum()
        df['psi'] = (df.train_ratio - df.test_ratio) * np.log(df.train_ratio / df.test_ratio)
        #df.psi.replace(np.inf, 0)
        df['psi_all'] = df.psi.sum()
        df['var'] = name
        df.reset_index(inplace=True)
        psi_table = pd.concat([psi_table, df],axis=0, sort=False)
        psi_table = psi_table[['var', 'index','train', 'train_ratio', 'test', 'test_ratio', 'psi', 'psi_all']]
    return psi_table

def badrate_month(df):
    df['user_date'] = pd.to_datetime(df['user_date'])
    df_temp = df[['user_date','flagy']]
    df_temp['year'] = df_temp['user_date'].apply(lambda x : str(x.year))
    df_temp['month'] = df_temp['user_date'].apply(lambda x : str(x.month)+'月')
    badrate_m = df_temp.groupby(['year','month'])['flagy'].agg([len,sum])
    badrate_m['坏客率%'] = badrate_m['sum']/badrate_m['len']*100 
    badrate_m.rename(columns = {'len':'样本量','sum':'坏客数量'},inplace = True)
    return badrate_m



def report(
    data_total, 
    data_train, 
    data_test,
    data_oot = None,
    model = None,
    y = 'flagy',
    filename='',
    points0 = 55,
    pdo = -10,
    odds0 = 0.1,
    grey = 2,
    score_range = (0,100),
    tick = 10,
    percent = 5,
    top_n = 10,
    **kwargs):
    """
    :param data_total: dataframe 所有训练样本，不含验证集，包含入模变量、标签及申请日期，申请日期最好为日期格式，有灰客户样本
    :param data_train: dataframe 训练集，只包含入模变量及标签，不含灰客户
    :param data_test: dataframe 测试集，只包含入模变量及标签，不含灰客户
    :param data_oot: dataframe 验证集，默认为None，不含灰客户
    :param model: 最终模型,默认为bst
    :param y: str 标签名，默认为'flagy'
    :param filename: str 报告名，默认为'',输出名称自动加'report_生成日期'后缀，如'lgb模型report_2021_07_23'
    :param points0: int 基准分，默认55
    :param pdo: int pd0，默认-10
    :param odds0: float 坏账率比，默认0.1
    :param grey: int, float or str 灰客户的取值标识，默认取2
    :param score_range: tuple 评分的上下限，如 (0, 100)
    :param tick: int or float 评分分布的分数间隔，默认为10
    :param percent: int or float 评分等频分布的分位数间隔，默认为5，即5%分位数
    :param top_n: int 输出重要性为前n的变量分箱图，默认为10，即输出变量重要度为前10的变量分箱图
    :param kwargs: 其他变量,如user_date='user_date'：申请日期名称，出现在data_total中，主要用于报告第2部分样本分析
    """
    # 文件名
    filename = filename + 'report_' + dtt.datetime.now().strftime('%Y_%m_%d')
    # 取出模型中的变量,确认函数中传入的样本包含入模变量和标签
    var_final = model.booster_.feature_name()
    data_train = data_train[var_final + [y]].copy() # 训练集只取入模变量和y
    data_test = data_test[var_final + [y]].copy() # 测试集只取入模变量和y
    try:
        data_oot = data_oot[var_final + [y]].copy() # 验证集只取入模变量和y
    except:
        pass

    # 进行excel报告内容整理
    table = pd.ExcelWriter(filename + '.xlsx',engine = 'xlsxwriter')
    # ------------------------------------------------------------------------------------------------------------------
    # 目录页
    sheet = pd.DataFrame(
        columns=["编号", "中文简称", "英文简称", "内容"],
        data=[
            ["1", "模型使用说明", "Model_Explain", "模型使用说明"],
            ["2", "原始数据统计", "Original_Stat", "原始数据情况、建模数据选取、匹配百融数据说明"],
            ["3", "衍生特征构造", "Var_derivation", "衍生特征构造"],
            ["4", "数据预处理-格式转换", "Data_Pre_Format", "数据预处理-格式转换"],
            ["5", "模型参数", "Model_Params", "模型参数及评分参数设定"],
            ["6", "模型区分度评估", "Model_Disc", "模型区分度评估"],
            ["7", "模型稳定性评估", "Model_Stab", "模型稳定性评估"],
            ["8", "单变量稳定性", "Var_Stab", "单变量稳定性评估"],
            ["9", "变量重要性", "Var_Importance", "单变量重要性排序"],           
            ["10", "变量趋势", "Var_trend", "重要变量趋势"],
            ["11", "样本风险评分分布", "Model_Score", "模型评分及风险表现"],
            ["12", "评分决策表", "Decision_table", "不同评分分段的通过率、违约率提升"],
        ],
    )
    sheet.to_excel(table, sheet_name="目录", startrow=0, startcol=0, index=False)
    # -------------------------------------------------------------------------------------------------------------------
    # 1.模型使用说明页
    head = pd.DataFrame(columns=["返回目录"])
    sheet1 = pd.DataFrame(
        index=["版本名称", "模型类型", "客群种类", "该版本更新时间", "开发人员", "建模样本数据量", "模型变量数量", "核心算法"],
        columns=["内容"],
    )
    head.to_excel(table, sheet_name="1.模型使用说明", startrow=0, index=False)
    sheet1.to_excel(table, sheet_name="1.模型使用说明", startrow=1)    
    # -------------------------------------------------------------------------------------------------------------------
    # 2.原始数据统计页
    head2_1 = pd.DataFrame(columns=["一、数据来源"])
    sheet2_1 = pd.DataFrame(
        index=[
            "机构",
            "产品类型",
            "业务开展时间",
            "引流渠道",
            "额度区间",
            "期数范围",
            "存量客户数量",
            "日进件量",
            "平均通过率",
            "审批流程",
            "审批使用数据",
        ],
        columns=["内容"],
    )
    head2_2 = pd.DataFrame(columns=["二、数据概要"])
    sheet2_2 = pd.DataFrame(
        index=[
            "客群描述",
            "观察期",
            "表现期",
            "原始样本时间",
            "原始样本量",
            "建模样本时间",
            "建模样本量",
            "验证样本时间",
            "验证样本量",
        ],
        columns=["内容"],
    )

    head2_3 = pd.DataFrame(columns=["三、好坏客户定义"])
    sheet2_3 = pd.DataFrame(columns=["客户类型", "定义方式", "样本量", "好坏客户定义描述"])
    sheet2_3["客户类型"] = ["坏客户", "灰客户", "好客户"]

    head2_4 = pd.DataFrame(columns=["四、建模数据统计情况"])
    sheet2_4 = pd.DataFrame(columns=["年", "月", "数量", "比例", "坏数量", "坏账率", "平均坏账率"])
    if "user_date" in kwargs.values():
        data_total['user_date'] = pd.to_datetime(data_total['user_date'], errors="coerce")
        temp = data_total.copy()
        temp["年"] = temp['user_date'].apply(lambda x: str(x.year) + "年")
        temp["月"] = temp['user_date'].apply(lambda x: str(x.month) + "月")
        temp = (
            temp[temp[y] != grey]
            .groupby(["年", "月"])[y]
            .agg([len, sum])
            .rename(columns={"len": "数量", "sum": "坏数量"})
            .reset_index()
        )
        temp["比例"] = temp["数量"] / temp["数量"].sum()
        temp["坏账率"] = temp["坏数量"] / temp["数量"]
        temp["平均坏账率"] = temp["坏数量"].sum() / temp["数量"].sum()
        temp = temp[["年","月","数量", "比例", "坏数量", "坏账率", "平均坏账率"]]
        sheet2_4 = temp.copy()

    head2_5 = pd.DataFrame(columns=["五、建模数据选取"])
    sheet2_5 = pd.DataFrame(columns=["类型", "年", "月", "数量", "比例", "坏数量", "坏账率", "平均坏账率"])
    sheet2_5["类型"] = ["训练", "测试", "验证"]
    if "user_date" in kwargs.values():
        data_train["user_date"] = data_total['user_date']
        data_test["user_date"] = data_total['user_date']
        data_train["类型"] = "训练"
        data_test["类型"] = "测试"
        try:
            data_oot["类型"] = "验证"
            data_oot["user_date"] = data_oot['user_date']
        except:
            pass
        data_merge = pd.concat([data_train, data_test, data_oot], axis=0, sort=False)
        data_merge["年"] = data_merge['user_date'].apply(lambda x: str(x.year))
        data_merge["月"] = data_merge['user_date'].apply(lambda x: str(x.month) + "月")
        data_merge = (
            data_merge.groupby(["类型", "年", "月"])[y]
            .agg([len, sum])
            .rename(columns={"len": "数量", "sum": "坏数量"})
            .reset_index()
        )
        data_merge["比例"] = data_merge["数量"] / data_merge["数量"].sum()
        data_merge["坏账率"] = data_merge["坏数量"] / data_merge["数量"]
        data_merge["平均坏账率"] = data_merge["坏数量"].sum() / data_merge["数量"].sum()
        data_merge = data_merge[["类型","年","月","数量", "比例", "坏数量", "坏账率", "平均坏账率"]]
        sheet2_5 = data_merge.copy()
        del data_train['user_date'],data_test['user_date'],data_train['类型'],data_test['类型']
        try:
            del data_oot['user_date'],data_oot['类型']
        except:
            pass

    head2_6 = pd.DataFrame(columns=["六、数据集划分"])
    sheet2_6 = pd.DataFrame(columns=["数据量", "坏样本", "坏账率"], index=["训练集", "测试集", "验证集"])
    try:
        sheet2_6["数据量"] = [data_train.shape[0], data_test.shape[0], data_oot.shape[0]]
        sheet2_6["坏样本"] = [data_train[y].sum(), data_test[y].sum(), data_oot[y].sum()]
        sheet2_6["坏账率"] = sheet2_6["坏样本"] / sheet2_6["数据量"]
    except:
        sheet2_6["数据量"] = [data_train.shape[0], data_test.shape[0], 0]
        sheet2_6["坏样本"] = [data_train[y].sum(), data_test[y].sum(), 0]
        sheet2_6["坏账率"] = sheet2_6["坏样本"] / sheet2_6["数据量"]

    head.to_excel(table, sheet_name="2.原始数据统计", startrow=0, index=False)
    head2_1.to_excel(table, sheet_name="2.原始数据统计", startrow=1, index=False)
    sheet2_1.to_excel(table, sheet_name="2.原始数据统计", startrow=3, startcol=1)
    head2_2.to_excel(table, sheet_name="2.原始数据统计", startrow=16, index=False)
    sheet2_2.to_excel(table, sheet_name="2.原始数据统计", startrow=18, startcol=1)
    head2_3.to_excel(table, sheet_name="2.原始数据统计", startrow=29, index=False)
    sheet2_3.to_excel(
        table, sheet_name="2.原始数据统计", startrow=31, startcol=1, index= False
    )
    head2_4.to_excel(table, sheet_name="2.原始数据统计", startrow=36, index=False)
    if sheet2_4.shape[0] == 0:
        sheet2_4.to_excel(
            table, sheet_name="2.原始数据统计", startrow=38, startcol=1, index=False
        )
    else:
        sheet2_4.to_excel(table, sheet_name="2.原始数据统计", startrow=38, startcol=1,index=False)
    row_number = sheet2_4.shape[0] + 38 + 17
    head2_5.to_excel(table, sheet_name="2.原始数据统计", startrow=row_number, index=False)
    sheet2_5.to_excel(table, sheet_name="2.原始数据统计", startrow=row_number + 2, startcol=1,index = False)
    row_number1 = row_number + 2 + sheet2_5.shape[0] + 2
    head2_6.to_excel(table, sheet_name="2.原始数据统计", startrow=row_number1, index=False)
    sheet2_6.to_excel(
        table, sheet_name="2.原始数据统计", startrow=row_number1 + 2, startcol=1
    )
    # ---------------------------------------------------------------------------------------------------
    # 3.衍生变量构造
    sheet3 = pd.DataFrame(columns=["序号", "模块", "变量", "中文名", "数据来源", "衍生逻辑"])
    head.to_excel(table, sheet_name="3.衍生变量构造", index=False)
    sheet3.to_excel(table, sheet_name="3.衍生变量构造", startrow=2, index=False)
    # ----------------------------------------------------------------------------------------------------
    # 4.数据预处理
    sheet4 = pd.DataFrame(columns=["序号", "变量", "数据源", "变量类型", "编码(转换)格式", "举例"])
    head.to_excel(table, sheet_name="4.数据预处理", index=False)
    sheet4.to_excel(table, sheet_name="4.数据预处理", startrow=2, index=False)
    # -----------------------------------------------------------------------------------------------------
    # 5.模型参数
    col = list(str(model).split(','))
    model_param = pd.DataFrame(col,columns = ['parms'])
    model_param[['parms','参数值']] = model_param.parms.str.split('=',2,expand=True)
    model_param['parms'] = model_param['parms'].apply(lambda x : x.replace('\n',''))
    model_param['parms'] = model_param['parms'].apply(lambda x :str(x).strip())
    model_param.iloc[0,0] = model_param.iloc[0,0][15:]
    model_param.iloc[13,1] = model_param.iloc[13,1][:-1]
    mapping = pd.DataFrame({'parms':['bagging_fraction','feature_fraction','bagging_freq','importance_type','learning_rate',
                                'max_bin','max_depth','min_data_in_leaf','n_estimators',
                                'num_leaves','objective','random_state','reg_alpha','reg_lambda'],
                            '模型参数':['每次抽取的样本比例','每次抽取的特征比例','装袋频率','重要性计算方式',
                               '学习率','最大分箱数量','最大树深','每个叶子节点的最少样本量',
                                '学习器数量','叶子结点数量','分类方式','随机参数','一阶惩罚项','二阶惩罚项']})
    model_param = pd.merge(model_param,mapping,how = 'left',on = 'parms')
    sheet5 = model_param[['parms','模型参数','参数值']].copy()
    
    head.to_excel(table, sheet_name="5.模型参数", index=False)
    sheet5.to_excel(table, sheet_name="5.模型参数", index=False, startrow=2)
    # -----------------------------------------------------------------------------------------------------
    # 6.模型区分度评估
    title = pd.DataFrame(columns=["模型区分度评估"])
    sheet6 = pd.DataFrame(columns=["评估指标", "训练集", "测试集", "验证集"])
    sheet6["评估指标"] = ["KS", "AUC"]
    # 计算KS、AUC
    # 预测概率
    train_class_pred = model.predict_proba(data_train.drop(columns = [y]))[:,1]
    test_class_pred = model.predict_proba(data_test.drop(columns = [y]))[:,1]
    try:
        oot_class_pred = model.predict_proba(data_oot.drop(columns = [y]))[:,1]
    except:                                            
        pass
    # 评估
    train_perf = sc.perf_eva(data_train[y], train_class_pred, title="train")
    test_perf = sc.perf_eva(data_test[y], test_class_pred, title="test")
    train_perf["pic"].savefig("train_KS_AUC.png", bbox_inches="tight")
    test_perf["pic"].savefig("test_KS_AUC.png", bbox_inches="tight")
    sheet6["训练集"] = [train_perf["KS"], train_perf["AUC"]]
    sheet6["测试集"] = [test_perf["KS"], test_perf["AUC"]]
    try:
        oot_perf = sc.perf_eva(data_oot[y], oot_class_pred, title="oot")
        oot_perf["pic"].savefig("oot_KS_AUC.png", bbox_inches="tight")
        sheet6["验证集"] = [oot_perf["KS"], oot_perf["AUC"]]
    except:
        pass
    title1 = pd.DataFrame(
        columns=[
            "此次建模，训练样本KS={}，AUC={}，模型结果较理想，模型对好坏客户具有很好的区分度，且模型较稳定，达到建模预期目标".format(
                train_perf["KS"], train_perf["AUC"])])
    title2 = pd.DataFrame(columns=["训练集", "KS={}".format(train_perf["KS"])])
    title3 = pd.DataFrame(columns=["测试集", "KS={}".format(test_perf["KS"])])
    title.to_excel(table, sheet_name="6.模型区分度评估", index=False, startrow=1)                                     
    title1.to_excel(table, sheet_name="6.模型区分度评估", index=False, startrow=8)
    title2.to_excel(table, sheet_name="6.模型区分度评估", index=False, startrow=10, startcol=3)
    title3.to_excel(table, sheet_name="6.模型区分度评估", index=False, startrow=10, startcol=11)
    try:
        title4 = pd.DataFrame(columns=["验证集", "KS={}".format(oot_perf["KS"])])
        title4.to_excel(
            table, sheet_name="6.模型区分度评估", index=False, startrow=10, startcol=19
        )
    except:
        pass

    head.to_excel(table, sheet_name="6.模型区分度评估", index=False)
    sheet6.to_excel(table, sheet_name="6.模型区分度评估", index=False, startrow=2)
    # 曲线图
    sheet = table.book.sheetnames["6.模型区分度评估"]
    sheet.insert_image("A12", "train_KS_AUC.png", {"x_scale": 0.9, "y_scale": 0.9})
    sheet.insert_image("I12", "test_KS_AUC.png", {"x_scale": 0.9, "y_scale": 0.9})
    try:
        sheet.insert_image("Q12", "oot_KS_AUC.png", {"x_scale": 0.9, "y_scale": 0.9})
    except:
        pass
    # -------------------------------------------------------------------------------------------------------------------------
    # 7.模型稳定性评估
    title1 = pd.DataFrame(columns=["1.训练&测试"])
    title2_1 = pd.DataFrame(columns=["等间距", "模型样本量分布评估"])
    title2_2 = pd.DataFrame(columns=["等频", "模型样本量分布评估"])
    head.to_excel(table, sheet_name="7.模型稳定性评估", index=False)
    title1.to_excel(table, sheet_name="7.模型稳定性评估", index=False, startrow=1)
    title2_1.to_excel(table, sheet_name="7.模型稳定性评估", index=False, startrow=2)
    title2_2.to_excel(
        table, sheet_name="7.模型稳定性评估", index=False, startrow=2, startcol=11
    )
    B = pdo / np.log(2)
    A = points0 + B * np.log(odds0)
    score_train = np.around(A - B * np.log(train_class_pred/(1 - train_class_pred)))
    score_train = pd.DataFrame(score_train,index = data_train.index)
    score_train = pd.concat([data_train['flagy'],score_train],axis = 1).rename(columns = {0:'score'})
    score_test = np.around(A - B * np.log(test_class_pred/(1 - test_class_pred)))
    score_test = pd.DataFrame(score_test,index = data_test.index)
    score_test = pd.concat([data_test['flagy'],score_test],axis = 1).rename(columns = {0:'score'})
    try:
        score_oot = np.around(A - B * np.log(oot_class_pred/(1 - oot_class_pred)))
        score_oot = pd.DataFrame(score_oot,index = data_oot.index)
        score_oot = pd.concat([data_oot['flagy'],score_oot],axis = 1).rename(columns = {0:'score'})
    except:
        pass
    # 生成表格--------------
    # 训练集&测试集-------
    # 等高分布-----
    # 训练集
    df = score_train[score_train[y] != grey]
    df.loc[df.score < score_range[0], "score"] = score_range[0]
    df.loc[df.score >= score_range[1], "score"] = score_range[1] - 1
    df["score"] = pd.cut(
        df.score, bins=range(score_range[0], score_range[1] + 1, tick), right=False
    )
    df = df.groupby(by=["score", y]).size().unstack(level=[1], fill_value=0)
    df = df.fillna(0)
    df.sort_index(ascending=True, inplace=True)
    df["训练样本量"] = df[1] + df[0]
    df["训练集占比"] = df["训练样本量"] / df["训练样本量"].sum()
    df["训练坏客户数"] = df[1]
    df["训练坏客户占比"] = df["训练坏客户数"] / df["训练坏客户数"].sum()
    del df[0], df[1]
    # 测试集
    df1 = score_test[score_test[y] != grey]
    df1.loc[df1.score < score_range[0], "score"] = score_range[0]
    df1.loc[df1.score >= score_range[1], "score"] = score_range[1] - 1
    df1["score"] = pd.cut(
        df1.score, bins=range(score_range[0], score_range[1] + 1, tick), right=False
    )
    df1 = df1.groupby(by=["score", "flagy"]).size().unstack(level=[1], fill_value=0)
    df1 = df1.fillna(0)
    df1.sort_index(ascending=True, inplace=True)
    df1["测试样本量"] = df1[1] + df1[0]
    df1["测试集占比"] = df1["测试样本量"] / df1["测试样本量"].sum()
    df1["测试坏客户数"] = df1[1]
    df1["测试坏客户占比"] = df1["测试坏客户数"] / df1["测试坏客户数"].sum()
    del df1[0], df1[1]
    # 并表并计算psi
    sheet7_1 = df.merge(df1, how="outer", on="score")
    sheet7_1 = sheet7_1.fillna(0)
    sheet7_1["psi"] = (sheet7_1["训练集占比"] - sheet7_1["测试集占比"]) * np.log(
        sheet7_1["训练集占比"] / sheet7_1["测试集占比"]
    )
    sheet7_1["psi_bad"] = (sheet7_1["训练坏客户占比"] - sheet7_1["测试坏客户占比"]) * np.log(
        sheet7_1["训练坏客户占比"] / sheet7_1["测试坏客户占比"]
    )

    # 等频分布------
    # 训练集
    dt = score_train[score_train[y] != grey]
    dt.loc[dt.score < score_range[0], "score"] = score_range[0]
    dt.loc[dt.score >= score_range[1], "score"] = score_range[1] - 1
    dt.sort_values(by="score", ascending=True, inplace=True)
    percent_list = list(range(0, 100 + percent, percent))
    breaks = np.percentile(dt.score.values, percent_list)
    breaks = list(set(breaks))
    breaks = sorted(breaks)
    breaks[-1] = score_range[1]
    breaks[0] = score_range[0]
    dt["score"] = pd.cut(dt.score, bins=breaks, right=False)
    dt = dt.groupby(by=["score", "flagy"]).size().unstack(level=[1], fill_value=0)
    dt = dt.fillna(0)
    dt.sort_index(ascending=True, inplace=True)
    dt["训练样本量"] = dt[1] + dt[0]
    dt["训练集占比"] = dt["训练样本量"] / dt["训练样本量"].sum()
    dt["训练坏客户数"] = dt[1]
    dt["训练坏客户占比"] = dt["训练坏客户数"] / dt["训练坏客户数"].sum()
    del dt[0], dt[1]
    # 测试集
    df1 = score_test[score_test[y] != grey]
    df1.loc[df1.score < score_range[0], "score"] = score_range[0]
    df1.loc[df1.score >= score_range[1], "score"] = score_range[1] - 1
    df1.sort_values(by="score", ascending=True, inplace=True)
    df1["score"] = pd.cut(df1.score, bins=breaks, right=False)
    df1 = df1.groupby(by=["score", "flagy"]).size().unstack(level=[1], fill_value=0)
    df1 = df1.fillna(0)
    df1.sort_index(ascending=True, inplace=True)
    df1["测试样本量"] = df1[1] + df1[0]
    df1["测试集占比"] = df1["测试样本量"] / df1["测试样本量"].sum()
    df1["测试坏客户数"] = df1[1]
    df1["测试坏客户占比"] = df1["测试坏客户数"] / df1["测试坏客户数"].sum()
    del df1[0], df1[1]
    # 并表并计算psi
    sheet7_2 = dt.merge(df1, how="outer", on="score")
    sheet7_2 = sheet7_2.fillna(0)
    sheet7_2["psi"] = (sheet7_2["训练集占比"] - sheet7_2["测试集占比"]) * np.log(
        sheet7_2["训练集占比"] / sheet7_2["测试集占比"]
    )
    sheet7_2["psi_bad"] = (sheet7_2["训练坏客户占比"] - sheet7_2["测试坏客户占比"]) * np.log(
        sheet7_2["训练坏客户占比"] / sheet7_2["测试坏客户占比"]
    )

    sheet7_1.to_excel(table, sheet_name="7.模型稳定性评估", startrow=4)
    sheet7_2.to_excel(table, sheet_name="7.模型稳定性评估", startrow=4, startcol=12)
    row_number = max(sheet7_2.shape[0], sheet7_1.shape[0]) + 4 + 20 + 2

    # 有验证集情况
    try:
        # 等高
        df2 = score_oot[score_oot[y] != grey]
        df2.loc[df2.score < score_range[0], "score"] = score_range[0]
        df2.loc[df2.score >= score_range[1], "score"] = score_range[1] - 1
        df2["score"] = pd.cut(
            df2.score, bins=range(score_range[0], score_range[1] + 1, tick), right=False
        )
        df2 = df2.groupby(by=["score", "flagy"]).size().unstack(level=[1], fill_value=0)
        df2 = df2.fillna(0)
        df2.sort_index(ascending=True, inplace=True)
        df2["验证样本量"] = df2[1] + df2[0]
        df2["验证集占比"] = df2["验证样本量"] / df2["验证样本量"].sum()
        df2["验证坏客户数"] = df2[1]
        df2["验证坏客户占比"] = df2["验证坏客户数"] / df2["验证坏客户数"].sum()
        del df2[0], df2[1]
        # 并表并计算psi
        sheet7_1 = df.merge(df2, how="outer", on="score")
        sheet7_1 = sheet7_1.fillna(0)
        sheet7_1["psi"] = (sheet7_1["训练集占比"] - sheet7_1["验证集占比"]) * np.log(
            sheet7_1["训练集占比"] / sheet7_1["验证集占比"]
        )
        sheet7_1["psi_bad"] = (sheet7_1["验证坏客户占比"] - sheet7_1["验证坏客户占比"]) * np.log(
            sheet7_1["验证坏客户占比"] / sheet7_1["验证坏客户占比"]
        )
        # 等频
        df2 = score_oot[score_oot[y] != grey]
        df2.loc[df2.score < score_range[0], "score"] = score_range[0]
        df2.loc[df2.score >= score_range[1], "score"] = score_range[1] - 1
        df2["score"] = pd.cut(df2.score, bins=breaks, right=False)
        df2 = df2.groupby(by=["score", "flagy"]).size().unstack(level=[1], fill_value=0)
        df2 = df2.fillna(0)
        df2.sort_index(ascending=True, inplace=True)
        df2["验证样本量"] = df2[1] + df2[0]
        df2["验证集占比"] = df2["验证样本量"] / df2["验证样本量"].sum()
        df2["验证坏客户数"] = df2[1]
        df2["验证坏客户占比"] = df2["验证坏客户数"] / df2["验证坏客户数"].sum()
        del df2[0], df2[1]
        # 并表并计算psi
        sheet7_2 = dt.merge(df2, how="outer", on="score")
        sheet7_2 = sheet7_2.fillna(0)
        sheet7_2["psi"] = (sheet7_2["训练集占比"] - sheet7_2["验证集占比"]) * np.log(
            sheet7_2["训练集占比"] / sheet7_2["验证集占比"]
        )
        sheet7_2["psi_bad"] = (sheet7_2["训练坏客户占比"] - sheet7_2["验证坏客户占比"]) * np.log(
            sheet7_2["训练坏客户占比"] / sheet7_2["验证坏客户占比"]
        )
        title1 = pd.DataFrame(columns=["2.训练&验证"])
        title1.to_excel(
            table, sheet_name="7.模型稳定性评估", index=False, startrow=row_number
        )
        title2_1.to_excel(
            table, sheet_name="7.模型稳定性评估", index=False, startrow=row_number + 1
        )
        title2_2.to_excel(
            table,
            sheet_name="7.模型稳定性评估",
            index=False,
            startrow=row_number + 1,
            startcol=12,
        )

        sheet7_1.to_excel(table, sheet_name="7.模型稳定性评估", startrow=row_number + 3)
        sheet7_2.to_excel(
            table, sheet_name="7.模型稳定性评估", startrow=row_number + 3, startcol=12
        )
    except:
        pass
    # -------------------------------------------------------------------------------------------------------------------------
    # 8.变量重要性
    feature_imp = pd.Series(model.feature_importances_)
    feature_name = pd.Series(model.booster_.feature_name())
    feature_df = pd.DataFrame({'feature_name': feature_name, 'element': feature_imp})
    feature_df.sort_values('element', ascending=False, inplace=True)
    feature_df.set_index('feature_name', drop=True, inplace=True)
    try:
        iv = sc.iv(data_total[model.booster_.feature_name()+['flagy']],'flagy')
        missing_rate = data_total[model.booster_.feature_name()].apply(lambda x: x.isna().sum() / x.shape[0])
        iv.set_index('variable', drop=True, inplace=True)
        missing_rate = pd.DataFrame(missing_rate, columns=['缺失值占比'])

        sheet_8 = pd.concat([feature_df, iv, missing_rate], axis=1)
        sheet_8 = sheet_8.reset_index()
        sheet_8.columns = ['变量','重要性','IV值','缺失值占比']
        sheet_8["序号"] = list(range(1, sheet_8.shape[0] + 1))
        sheet_8["解释"] = ""
        sheet_8 = sheet_8[["序号", "变量", "解释", "重要性", "IV值", "缺失值占比"]]
    except:
        sheet_8 = pd.DataFrame(columns=["序号", "变量", "解释", "重要性", "IV值", "缺失值占比"])

    head.to_excel(table, sheet_name="8.变量重要性", index=False)
    title.to_excel(table, sheet_name="8.变量重要性", index=False, startrow=1)
    sheet_8.to_excel(table, sheet_name="8.变量重要性", index=False, startrow=2)
    # -------------------------------------------------------------------------------------------------------------------------
    # 9.变量趋势
    import matplotlib.pyplot as plt
    title = pd.DataFrame(columns = ["重要变量风险表现"])
    var = feature_df.index.tolist()[:top_n]
    data_bins = sc.woebin(data_total[var + [y]],y)
    sheet_9 = pd.concat(data_bins, ignore_index=True)[["variable", "bin","count", "count_distr", "bad", "badprob"]]
    title1 = pd.DataFrame(columns = ["序号","变量","分箱","区间数量","区间占比","区间坏客数","区间坏客率"])
    head.to_excel(table, sheet_name="9.变量趋势", index=False)
    title.to_excel(table, sheet_name="9.变量趋势", index=False, startrow=1)
    title1.to_excel(table, sheet_name="9.变量趋势", index=False, startrow=2)
    sheet_9.to_excel(
        table, sheet_name="9.变量趋势", index=False, startrow=3, startcol=1, header=False
    )
    sheet = table.book.sheetnames["9.变量趋势"]
    
    bin_pict = sc.woebin_plot(data_bins)
    i = 3
    for pict in list(sheet_9.variable.unique()):
        bin_pict[pict].savefig(pict + ".png", bbox_inches="tight")
        sheet.insert_image(
            "J" + str(i), pict + ".png", {"x_scale": 0.6, "y_scale": 0.6}
        )
        i = i + 12
    # -------------------------------------------------------------------------------------------------------------------------
    # 10.样本风险评分分布
    title1 = pd.DataFrame(columns=["1、等高分布"])
    title2 = pd.DataFrame(columns=["分数整体分布情况-训练集"])
    title3 = pd.DataFrame(columns=["分数整体分布情况-测试集"])
    title4 = pd.DataFrame(columns=["分数整体分布情况-验证集"])
    title5 = pd.DataFrame(columns=["分数整体分布情况-训练集+测试集"])
    head.to_excel(table, sheet_name="10.样本风险评分分布", index=False)
    title1.to_excel(table, sheet_name="10.样本风险评分分布", index=False, startrow=1)
    title2.to_excel(table, sheet_name="10.样本风险评分分布", index=False, startrow=2)
    title3.to_excel(table, sheet_name="10.样本风险评分分布", index=False, startrow=19)
    title5.to_excel(table, sheet_name="10.样本风险评分分布", index=False, startrow=2,startcol=11)
    title5.to_excel(table, sheet_name="10.样本风险评分分布", index=False, startrow=55,startcol=11)
    # 等高---------
    # 训练+测试
    score_total = pd.concat([score_train,score_test],axis=0,sort=False)
    df = score_total[score_total[y] != grey]
    df.loc[df.score < score_range[0], "score"] = score_range[0]
    df.loc[df.score >= score_range[1], "score"] = score_range[1] - 1
    df["score"] = pd.cut(df.score, bins=range(score_range[0], score_range[1] + 1, tick),right=False)
    df = df.groupby(by=["score", "flagy"]).size().unstack(level=[1], fill_value=0)
    df = df.fillna(0)
    df["区间人数"] = df[0] + df[1]
    df["区间占比"] = df["区间人数"] / df["区间人数"].sum()
    df["区间坏客户率"] = df[1] / df["区间人数"]
    df["累计坏客户占比"] = df[1].cumsum() / df[1].sum()
    df["累计好客户占比"] = df[0].cumsum() / df[0].sum()
    df["好坏区分程度(ks)"] = df["累计坏客户占比"] - df["累计好客户占比"]
    df.reset_index(inplace=True)
    df.rename(columns={"score": "评分区间"}, inplace=True)
    del df[0], df[1]
    df.to_excel(table, sheet_name="10.样本风险评分分布", index=False, startrow=3,startcol = 11)
    # 训练集
    df = score_train[score_train[y] != grey]
    df.loc[df.score < score_range[0], "score"] = score_range[0]
    df.loc[df.score >= score_range[1], "score"] = score_range[1] - 1
    df["score"] = pd.cut(
        df.score, bins=range(score_range[0], score_range[1] + 1, tick), right=False
    )
    df = df.groupby(by=["score", "flagy"]).size().unstack(level=[1], fill_value=0)
    df = df.fillna(0)
    df["区间人数"] = df[0] + df[1]
    df["区间占比"] = df["区间人数"] / df["区间人数"].sum()
    df["区间坏客户率"] = df[1] / df["区间人数"]
    df["累计坏客户占比"] = df[1].cumsum() / df[1].sum()
    df["累计好客户占比"] = df[0].cumsum() / df[0].sum()
    df["好坏区分程度(ks)"] = df["累计坏客户占比"] - df["累计好客户占比"]
    df.reset_index(inplace=True)
    df.rename(columns={"score": "评分区间"}, inplace=True)
    del df[0], df[1]
    df.to_excel(table, sheet_name="10.样本风险评分分布", index=False, startrow=3)
    # 测试集
    df = score_test[score_test[y] != grey]
    df.loc[df.score < score_range[0], "score"] = score_range[0]
    df.loc[df.score >= score_range[1], "score"] = score_range[1] - 1
    df["score"] = pd.cut(
        df.score, bins=range(score_range[0], score_range[1] + 1, tick), right=False
    )
    df = df.groupby(by=["score", "flagy"]).size().unstack(level=[1], fill_value=0)
    df = df.fillna(0)
    df["区间人数"] = df[0] + df[1]
    df["区间占比"] = df["区间人数"] / df["区间人数"].sum()
    df["区间坏客户率"] = df[1] / df["区间人数"]
    df["累计坏客户占比"] = df[1].cumsum() / df[1].sum()
    df["累计好客户占比"] = df[0].cumsum() / df[0].sum()
    df["好坏区分程度(ks)"] = df["累计坏客户占比"] - df["累计好客户占比"]
    df.reset_index(inplace=True)
    df.rename(columns={"score": "评分区间"}, inplace=True)
    del df[0], df[1]
    df.to_excel(table, sheet_name="10.样本风险评分分布", index=False, startrow=20)
    # 等频---------
    title1 = pd.DataFrame(columns=["1、等频分布"])
    title1.to_excel(table, sheet_name="10.样本风险评分分布", index=False, startrow=54)
    title2.to_excel(table, sheet_name="10.样本风险评分分布", index=False, startrow=55)
    title3.to_excel(table, sheet_name="10.样本风险评分分布", index=False, startrow=77)
    # 训练+测试
    score_total = pd.concat([score_train,score_test],axis=0,sort=False)
    df = score_total[score_total[y] != grey]
    df.sort_values(by="score", ascending=True, inplace=True)
    df.loc[df.score < score_range[0], "score"] = score_range[0]
    df.loc[df.score >= score_range[1], "score"] = score_range[1] - 1
    percent_list = list(range(0, 100 + percent, percent))
    breaks = np.percentile(df.score.values, percent_list)
    breaks = list(set(breaks))
    breaks = sorted(breaks)
    breaks[-1] = score_range[1]
    breaks[0] = score_range[0]
    df["score"] = pd.cut(df.score, bins=breaks, right=False)
    df = df.groupby(by=["score", "flagy"]).size().unstack(level=[1], fill_value=0)
    df = df.fillna(0)
    df["区间人数"] = df[0] + df[1]
    df["区间占比"] = df["区间人数"] / df["区间人数"].sum()
    df["区间坏客户率"] = df[1] / df["区间人数"]
    df["累计坏客户占比"] = df[1].cumsum() / df[1].sum()
    df["累计好客户占比"] = df[0].cumsum() / df[0].sum()
    df["好坏区分程度(ks)"] = df["累计坏客户占比"] - df["累计好客户占比"]
    df.reset_index(inplace=True)
    df.rename(columns={"score": "评分区间"}, inplace=True)
    del df[0], df[1]
    df.to_excel(table, sheet_name="10.样本风险评分分布", index=False, startrow=56,startcol = 11)
    # 训练集
    dt = score_train[score_train[y] != grey]
    dt.sort_values(by="score", ascending=True, inplace=True)
    dt.loc[dt.score < score_range[0], "score"] = score_range[0]
    dt.loc[dt.score >= score_range[1], "score"] = score_range[1] - 1
    dt["score"] = pd.cut(dt.score, bins=breaks, right=False)
    dt = dt.groupby(by=["score", "flagy"]).size().unstack(level=[1], fill_value=0)
    dt = dt.fillna(0)
    dt["区间人数"] = dt[0] + dt[1]
    dt["区间占比"] = dt["区间人数"] / dt["区间人数"].sum()
    dt["区间坏客户率"] = dt[1] / dt["区间人数"]
    dt["累计坏客户占比"] = dt[1].cumsum() / dt[1].sum()
    dt["累计好客户占比"] = dt[0].cumsum() / dt[0].sum()
    dt["好坏区分程度(ks)"] = dt["累计坏客户占比"] - dt["累计好客户占比"]
    dt.reset_index(inplace=True)
    dt.rename(columns={"score": "评分区间"}, inplace=True)
    del dt[0], dt[1]
    dt.to_excel(table, sheet_name="10.样本风险评分分布", index=False, startrow=56)
    # 测试集
    dt = score_test[score_test[y] != grey]
    dt.sort_values(by="score", ascending=True, inplace=True)
    dt.loc[dt.score < score_range[0], "score"] = score_range[0]
    dt.loc[dt.score >= score_range[1], "score"] = score_range[1] - 1
    dt["score"] = pd.cut(dt.score, bins=breaks, right=False)
    dt = dt.groupby(by=["score", "flagy"]).size().unstack(level=[1], fill_value=0)
    dt = dt.fillna(0)
    dt["区间人数"] = dt[0] + dt[1]
    dt["区间占比"] = dt["区间人数"] / dt["区间人数"].sum()
    dt["区间坏客户率"] = dt[1] / dt["区间人数"]
    dt["累计坏客户占比"] = dt[1].cumsum() / dt[1].sum()
    dt["累计好客户占比"] = dt[0].cumsum() / dt[0].sum()
    dt["好坏区分程度(ks)"] = dt["累计坏客户占比"] - dt["累计好客户占比"]
    dt.reset_index(inplace=True)
    dt.rename(columns={"score": "评分区间"}, inplace=True)
    del dt[0], dt[1]
    dt.to_excel(table, sheet_name="10.样本风险评分分布", index=False, startrow=78)    
    
    # 有验证集情况
    try:
        # 等高
        title4.to_excel(table, sheet_name="10.样本风险评分分布", index=False, startrow=37)
        df = score_oot[score_oot[y] != grey]
        df.loc[df.score < score_range[0], "score"] = score_range[0]
        df.loc[df.score >= score_range[1], "score"] = score_range[1] - 1
        df["score"] = pd.cut(
            df.score, bins=range(score_range[0], score_range[1] + 1, tick), right=False
        )
        df = df.groupby(by=["score", "flagy"]).size().unstack(level=[1], fill_value=0)
        df = df.fillna(0)
        df["区间人数"] = df[0] + df[1]
        df["区间占比"] = df["区间人数"] / df["区间人数"].sum()
        df["区间坏客户率"] = df[1] / df["区间人数"]
        df["累计坏客户占比"] = df[1].cumsum() / df[1].sum()
        df["累计好客户占比"] = df[0].cumsum() / df[0].sum()
        df["好坏区分程度(ks)"] = df["累计坏客户占比"] - df["累计好客户占比"]
        df.reset_index(inplace=True)
        df.rename(columns={"score": "评分区间"}, inplace=True)
        del df[0], df[1]
        df.to_excel(table, sheet_name="10.样本风险评分分布", index=False, startrow=38)
        # 等频
        title4.to_excel(table, sheet_name="10.样本风险评分分布", index=False, startrow=104)
        dt = score_oot[score_oot[y] != grey]
        dt.sort_values(by="score", ascending=True, inplace=True)
        dt.loc[dt.score < score_range[0], "score"] = score_range[0]
        dt.loc[dt.score >= score_range[1], "score"] = score_range[1] - 1
        dt["score"] = pd.cut(dt.score, bins=breaks, right=False)
        dt = dt.groupby(by=["score", "flagy"]).size().unstack(level=[1], fill_value=0)
        dt = dt.fillna(0)
        dt["区间人数"] = dt[0] + dt[1]
        dt["区间占比"] = dt["区间人数"] / dt["区间人数"].sum()
        dt["区间坏客户率"] = dt[1] / dt["区间人数"]
        dt["累计坏客户占比"] = dt[1].cumsum() / dt[1].sum()
        dt["累计好客户占比"] = dt[0].cumsum() / dt[0].sum()
        dt["好坏区分程度(ks)"] = dt["累计坏客户占比"] - dt["累计好客户占比"]
        dt.reset_index(inplace=True)
        dt.rename(columns={"score": "评分区间"}, inplace=True)
        del dt[0], dt[1]
        dt.to_excel(table, sheet_name="10.样本风险评分分布", index=False, startrow=105)
    except:
        pass
    # -------------------------------------------------------------------------------------------------------------------------
    # 11.评分决策表
    head.to_excel(table, sheet_name="11.评分决策表", index=False)
    title1 = pd.DataFrame(columns=["1、等高"])
    title2 = pd.DataFrame(columns=["2、等频"])
    title3 = pd.DataFrame(columns=["评分决策表"])
    # 等高
    title1.to_excel(table, sheet_name="11.评分决策表", index=False, startrow=1)
    title3.to_excel(table, sheet_name="11.评分决策表", index=False, startrow=2)
    
    total_class_pred = model.predict_proba(data_total[var_final])[:,1]
    score_total = np.around(A - B * np.log(total_class_pred/(1 - total_class_pred)))
    score_total = pd.DataFrame(score_total,index = data_total.index)
    score_total = pd.concat([data_total['flagy'],score_total],axis = 1).rename(columns = {0:'score'})
    # score_total = pd.concat([score_train, score_test], axis=0, sort=False)

    score_total.loc[score_total.score < score_range[0], "score"] = score_range[0]
    score_total.loc[score_total.score >= score_range[1], "score"] = score_range[1] - 1 
    score_total["score"] = pd.cut(
        score_total.score,
        bins=range(score_range[0], score_range[1] + 1, tick),
        right=False,
    )
    score_total = (
        score_total.groupby(by=["score", "flagy"])
        .size()
        .unstack(level=[1], fill_value=0)
    )
    score_total = score_total.fillna(0)
    score_total.sort_index(ascending=False, inplace=True)
    score_total["好"] = score_total[0]
    try:
        score_total["灰"] = score_total[grey]
    except:
        print("Warning: No Grey sample!")
    score_total["坏"] = score_total[1]
    try:
        score_total["总"] = score_total[0] + score_total[1] + score_total[grey]
    except:
        score_total["总"] = score_total[0] + score_total[1]
    score_total["坏累计"] = score_total[1].cumsum()
    score_total["总累计"] = score_total["总"].cumsum()
    score_total["通过率"] = score_total["总累计"] / score_total["总"].sum()
    score_total["每段违约率"] = score_total["坏"] / score_total["总"]
    score_total["平均违约率"] = score_total["坏"].sum() / score_total["总"].sum()
    score_total["通过违约率"] = score_total["坏累计"] / score_total["总累计"]
    score_total["拒绝违约率"] = (score_total["坏"].sum() - score_total["坏累计"]) / (
        score_total["总"].sum() - score_total["总累计"]
    )
    score_total["违约率下降"] = 1 - score_total["通过违约率"] / score_total["平均违约率"]
    score_total.reset_index(inplace=True)
    score_total.rename(columns={"score": "评分区间"}, inplace=True)
    del score_total[1], score_total[0]
    score_total.to_excel(table, sheet_name="11.评分决策表", index=False, startrow=3)
    # 等频
    title2.to_excel(table, sheet_name="11.评分决策表", index=False, startrow=25)
    title3.to_excel(table, sheet_name="11.评分决策表", index=False, startrow=26)
    
    total_class_pred = model.predict_proba(data_total[var_final])[:,1]
    score_total = np.around(A - B * np.log(total_class_pred/(1 - total_class_pred)))
    score_total = pd.DataFrame(score_total,index = data_total.index)
    score_total = pd.concat([data_total['flagy'],score_total],axis = 1).rename(columns = {0:'score'})
    #score_total = pd.concat([score_train, score_test], axis=0, sort=False)

    score_total.loc[score_total.score < score_range[0], "score"] = score_range[0]
    score_total.loc[score_total.score >= score_range[1], "score"] = score_range[1]
    percent_list = list(range(0, 100 + percent, percent))
    breaks = np.percentile(score_total.score.values, percent_list)
    breaks = list(set(breaks))
    breaks = sorted(breaks)
    breaks[-1] = score_range[1]
    breaks[0] = score_range[0]
    score_total["score"] = pd.cut(score_total.score, bins=breaks, right=False)
    score_total = (
        score_total.groupby(by=["score", "flagy"])
        .size()
        .unstack(level=[1], fill_value=0)
    )
    score_total = score_total.fillna(0)
    score_total.sort_index(ascending=False, inplace=True)
    score_total["好"] = score_total[0]
    try:
        score_total["灰"] = score_total[grey]
    except:
        print("Warning: No Grey sample!")
    score_total["坏"] = score_total[1]
    try:
        score_total["总"] = score_total[0] + score_total[1] + score_total[grey]
    except:
        score_total["总"] = score_total[0] + score_total[1]
    score_total["坏累计"] = score_total[1].cumsum()
    score_total["总累计"] = score_total["总"].cumsum()
    score_total["通过率"] = score_total["总累计"] / score_total["总"].sum()
    score_total["每段违约率"] = score_total["坏"] / score_total["总"]
    score_total["平均违约率"] = score_total["坏"].sum() / score_total["总"].sum()
    score_total["通过违约率"] = score_total["坏累计"] / score_total["总累计"]
    score_total["拒绝违约率"] = (score_total["坏"].sum() - score_total["坏累计"]) / (
        score_total["总"].sum() - score_total["总累计"]
    )
    score_total["违约率下降"] = 1 - score_total["通过违约率"] / score_total["平均违约率"]
    score_total.reset_index(inplace=True)
    score_total.rename(columns={"score": "评分区间"}, inplace=True)
    del score_total[1], score_total[0]
    score_total.to_excel(table, sheet_name="11.评分决策表", index=False, startrow=27)
    table.save()    
    
    return 1