# -*- coding: utf-8 -*-
"""
Created on Mon Jul 26 19:41:06 2021

@author: chang.lu
"""

import lightgbm as lgb
import pandas as pd
import numpy as np
import re
import matplotlib.pylab as plt
from sklearn import metrics
from sklearn.metrics import mean_squared_error
import scorecardpy as sc
from sklearn.feature_selection import RFE
from sklearn.linear_model import LogisticRegression
from collections import Counter
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
import os
import warnings
import datetime as dtt
warnings.filterwarnings("ignore")

def report(
    data_total, 
    data_train, 
    data_test,
    data_oot = None,
    model = None,
    y = 'flagy',
    filename='',
    points0 = 55,
    pdo = -10,
    odds0 = 0.1,
    grey = 2,
    score_range = (0,100),
    tick = 10,
    percent = 5,
    top_n = 10,
    **kwargs):
    """
    :param data_total: dataframe 所有训练样本，不含验证集，包含入模变量、标签及申请日期，申请日期最好为日期格式，有灰客户样本
    :param data_train: dataframe 训练集，只包含入模变量及标签，不含灰客户
    :param data_test: dataframe 测试集，只包含入模变量及标签，不含灰客户
    :param data_oot: dataframe 验证集，默认为None，不含灰客户
    :param model: 最终模型,默认为bst
    :param y: str 标签名，默认为'flagy'
    :param filename: str 报告名，默认为'',输出名称自动加'report_生成日期'后缀，如'lgb模型report_2021_07_23'
    :param points0: int 基准分，默认55
    :param pdo: int pd0，默认-10
    :param odds0: float 坏账率比，默认0.1
    :param grey: int, float or str 灰客户的取值标识，默认取2
    :param score_range: tuple 评分的上下限，如 (0, 100)
    :param tick: int or float 评分分布的分数间隔，默认为10
    :param percent: int or float 评分等频分布的分位数间隔，默认为5，即5%分位数
    :param top_n: int 输出重要性为前n的变量分箱图，默认为10，即输出变量重要度为前10的变量分箱图
    :param kwargs: 其他变量,如user_date='user_date'：申请日期名称，出现在data_total中，主要用于报告第2部分样本分析
    """
    # 文件名
    filename = filename + 'report_' + dtt.datetime.now().strftime('%Y_%m_%d')
    # 取出模型中的变量,确认函数中传入的样本包含入模变量和标签
    var_final = model.booster_.feature_name()
    data_train = data_train[var_final + [y]].copy() # 训练集只取入模变量和y
    data_test = data_test[var_final + [y]].copy() # 测试集只取入模变量和y
    try:
        data_oot = data_oot[var_final + [y]].copy() # 验证集只取入模变量和y
    except:
        pass

    # 进行excel报告内容整理
    table = pd.ExcelWriter(filename + '.xlsx',engine = 'xlsxwriter')
    # ------------------------------------------------------------------------------------------------------------------
    # 目录页
    sheet = pd.DataFrame(
        columns=["编号", "中文简称", "英文简称", "内容"],
        data=[
            ["1", "模型使用说明", "Model_Explain", "模型使用说明"],
            ["2", "原始数据统计", "Original_Stat", "原始数据情况、建模数据选取、匹配百融数据说明"],
            ["3", "衍生特征构造", "Var_derivation", "衍生特征构造"],
            ["4", "数据预处理-格式转换", "Data_Pre_Format", "数据预处理-格式转换"],
            ["5", "模型参数", "Model_Params", "模型参数及评分参数设定"],
            ["6", "模型区分度评估", "Model_Disc", "模型区分度评估"],
            ["7", "模型稳定性评估", "Model_Stab", "模型稳定性评估"],
            ["8", "单变量稳定性", "Var_Stab", "单变量稳定性评估"],
            ["9", "变量重要性", "Var_Importance", "单变量重要性排序"],           
            ["10", "变量趋势", "Var_trend", "重要变量趋势"],
            ["11", "样本风险评分分布", "Model_Score", "模型评分及风险表现"],
            ["12", "评分决策表", "Decision_table", "不同评分分段的通过率、违约率提升"],
        ],
    )
    sheet.to_excel(table, sheet_name="目录", startrow=0, startcol=0, index=False)
    # -------------------------------------------------------------------------------------------------------------------
    # 1.模型使用说明页
    head = pd.DataFrame(columns=["返回目录"])
    sheet1 = pd.DataFrame(
        index=["版本名称", "模型类型", "客群种类", "该版本更新时间", "开发人员", "建模样本数据量", "模型变量数量", "核心算法"],
        columns=["内容"],
    )
    head.to_excel(table, sheet_name="1.模型使用说明", startrow=0, index=False)
    sheet1.to_excel(table, sheet_name="1.模型使用说明", startrow=1)    
    # -------------------------------------------------------------------------------------------------------------------
    # 2.原始数据统计页
    head2_1 = pd.DataFrame(columns=["一、数据来源"])
    sheet2_1 = pd.DataFrame(
        index=[
            "机构",
            "产品类型",
            "业务开展时间",
            "引流渠道",
            "额度区间",
            "期数范围",
            "存量客户数量",
            "日进件量",
            "平均通过率",
            "审批流程",
            "审批使用数据",
        ],
        columns=["内容"],
    )
    head2_2 = pd.DataFrame(columns=["二、数据概要"])
    sheet2_2 = pd.DataFrame(
        index=[
            "客群描述",
            "观察期",
            "表现期",
            "原始样本时间",
            "原始样本量",
            "建模样本时间",
            "建模样本量",
            "验证样本时间",
            "验证样本量",
        ],
        columns=["内容"],
    )

    head2_3 = pd.DataFrame(columns=["三、好坏客户定义"])
    sheet2_3 = pd.DataFrame(columns=["客户类型", "定义方式", "样本量", "好坏客户定义描述"])
    sheet2_3["客户类型"] = ["坏客户", "灰客户", "好客户"]

    head2_4 = pd.DataFrame(columns=["四、建模数据统计情况"])
    sheet2_4 = pd.DataFrame(columns=["年", "月", "数量", "比例", "坏数量", "坏账率", "平均坏账率"])
    if "user_date" in kwargs.values():
        data_total['user_date'] = pd.to_datetime(data_total['user_date'], errors="coerce")
        temp = data_total.copy()
        temp["年"] = temp['user_date'].apply(lambda x: str(x.year) + "年")
        temp["月"] = temp['user_date'].apply(lambda x: str(x.month) + "月")
        temp = (
            temp[temp[y] != grey]
            .groupby(["年", "月"])[y]
            .agg([len, sum])
            .rename(columns={"len": "数量", "sum": "坏数量"})
            .reset_index()
        )
        temp["比例"] = temp["数量"] / temp["数量"].sum()
        temp["坏账率"] = temp["坏数量"] / temp["数量"]
        temp["平均坏账率"] = temp["坏数量"].sum() / temp["数量"].sum()
        temp = temp[["年","月","数量", "比例", "坏数量", "坏账率", "平均坏账率"]]
        sheet2_4 = temp.copy()

    head2_5 = pd.DataFrame(columns=["五、建模数据选取"])
    sheet2_5 = pd.DataFrame(columns=["类型", "年", "月", "数量", "比例", "坏数量", "坏账率", "平均坏账率"])
    sheet2_5["类型"] = ["训练", "测试", "验证"]
    if "user_date" in kwargs.values():
        data_train["user_date"] = data_total['user_date']
        data_test["user_date"] = data_total['user_date']
        data_train["类型"] = "训练"
        data_test["类型"] = "测试"
        try:
            data_oot["类型"] = "验证"
            data_oot["user_date"] = data_oot['user_date']
        except:
            pass
        data_merge = pd.concat([data_train, data_test, data_oot], axis=0, sort=False)
        data_merge["年"] = data_merge['user_date'].apply(lambda x: str(x.year))
        data_merge["月"] = data_merge['user_date'].apply(lambda x: str(x.month) + "月")
        data_merge = (
            data_merge.groupby(["类型", "年", "月"])[y]
            .agg([len, sum])
            .rename(columns={"len": "数量", "sum": "坏数量"})
            .reset_index()
        )
        data_merge["比例"] = data_merge["数量"] / data_merge["数量"].sum()
        data_merge["坏账率"] = data_merge["坏数量"] / data_merge["数量"]
        data_merge["平均坏账率"] = data_merge["坏数量"].sum() / data_merge["数量"].sum()
        data_merge = data_merge[["类型","年","月","数量", "比例", "坏数量", "坏账率", "平均坏账率"]]
        sheet2_5 = data_merge.copy()
        del data_train['user_date'],data_test['user_date'],data_train['类型'],data_test['类型']
        try:
            del data_oot['user_date'],data_oot['类型']
        except:
            pass

    head2_6 = pd.DataFrame(columns=["六、数据集划分"])
    sheet2_6 = pd.DataFrame(columns=["数据量", "坏样本", "坏账率"], index=["训练集", "测试集", "验证集"])
    try:
        sheet2_6["数据量"] = [data_train.shape[0], data_test.shape[0], data_oot.shape[0]]
        sheet2_6["坏样本"] = [data_train[y].sum(), data_test[y].sum(), data_oot[y].sum()]
        sheet2_6["坏账率"] = sheet2_6["坏样本"] / sheet2_6["数据量"]
    except:
        sheet2_6["数据量"] = [data_train.shape[0], data_test.shape[0], 0]
        sheet2_6["坏样本"] = [data_train[y].sum(), data_test[y].sum(), 0]
        sheet2_6["坏账率"] = sheet2_6["坏样本"] / sheet2_6["数据量"]

    head.to_excel(table, sheet_name="2.原始数据统计", startrow=0, index=False)
    head2_1.to_excel(table, sheet_name="2.原始数据统计", startrow=1, index=False)
    sheet2_1.to_excel(table, sheet_name="2.原始数据统计", startrow=3, startcol=1)
    head2_2.to_excel(table, sheet_name="2.原始数据统计", startrow=16, index=False)
    sheet2_2.to_excel(table, sheet_name="2.原始数据统计", startrow=18, startcol=1)
    head2_3.to_excel(table, sheet_name="2.原始数据统计", startrow=29, index=False)
    sheet2_3.to_excel(
        table, sheet_name="2.原始数据统计", startrow=31, startcol=1, index= False
    )
    head2_4.to_excel(table, sheet_name="2.原始数据统计", startrow=36, index=False)
    if sheet2_4.shape[0] == 0:
        sheet2_4.to_excel(
            table, sheet_name="2.原始数据统计", startrow=38, startcol=1, index=False
        )
    else:
        sheet2_4.to_excel(table, sheet_name="2.原始数据统计", startrow=38, startcol=1,index=False)
    row_number = sheet2_4.shape[0] + 38 + 17
    head2_5.to_excel(table, sheet_name="2.原始数据统计", startrow=row_number, index=False)
    sheet2_5.to_excel(table, sheet_name="2.原始数据统计", startrow=row_number + 2, startcol=1,index = False)
    row_number1 = row_number + 2 + sheet2_5.shape[0] + 2
    head2_6.to_excel(table, sheet_name="2.原始数据统计", startrow=row_number1, index=False)
    sheet2_6.to_excel(
        table, sheet_name="2.原始数据统计", startrow=row_number1 + 2, startcol=1
    )
    # ---------------------------------------------------------------------------------------------------
    # 3.衍生变量构造
    sheet3 = pd.DataFrame(columns=["序号", "模块", "变量", "中文名", "数据来源", "衍生逻辑"])
    head.to_excel(table, sheet_name="3.衍生变量构造", index=False)
    sheet3.to_excel(table, sheet_name="3.衍生变量构造", startrow=2, index=False)
    # ----------------------------------------------------------------------------------------------------
    # 4.数据预处理
    sheet4 = pd.DataFrame(columns=["序号", "变量", "数据源", "变量类型", "编码(转换)格式", "举例"])
    head.to_excel(table, sheet_name="4.数据预处理", index=False)
    sheet4.to_excel(table, sheet_name="4.数据预处理", startrow=2, index=False)
    # -----------------------------------------------------------------------------------------------------
    # 5.模型参数
    col = list(str(bst).split(','))
    model_param = pd.DataFrame(col,columns = ['parms'])
    model_param[['parms','参数值']] = model_param.parms.str.split('=',2,expand=True)
    model_param['parms'] = model_param['parms'].apply(lambda x : x.replace('\n',''))
    model_param['parms'] = model_param['parms'].apply(lambda x :str(x).strip())
    model_param.iloc[0,0] = model_param.iloc[0,0][15:]
    model_param.iloc[13,1] = model_param.iloc[13,1][:-1]
    mapping = pd.DataFrame({'parms':['bagging_fraction','feature_fraction','bagging_freq','importance_type','learning_rate',
                                'max_bin','max_depth','min_data_in_leaf','n_estimators',
                                'num_leaves','objective','random_state','reg_alpha','reg_lambda'],
                            '模型参数':['每次抽取的样本比例','每次抽取的特征比例','装袋频率','重要性计算方式',
                               '学习率','最大分箱数量','最大树深','每个叶子节点的最少样本量',
                                '学习器数量','叶子结点数量','分类方式','随机参数','一阶惩罚项','二阶惩罚项']})
    model_param = pd.merge(model_param,mapping,how = 'left',on = 'parms')
    sheet5 = model_param[['parms','模型参数','参数值']].copy()
    
    head.to_excel(table, sheet_name="5.模型参数", index=False)
    sheet5.to_excel(table, sheet_name="5.模型参数", index=False, startrow=2)
    # -----------------------------------------------------------------------------------------------------
    # 6.模型区分度评估
    title = pd.DataFrame(columns=["模型区分度评估"])
    sheet6 = pd.DataFrame(columns=["评估指标", "训练集", "测试集", "验证集"])
    sheet6["评估指标"] = ["KS", "AUC"]
    # 计算KS、AUC
    # 预测概率
    train_class_pred = model.predict_proba(data_train.drop(columns = [y]))[:,1]
    test_class_pred = model.predict_proba(data_test.drop(columns = [y]))[:,1]
    try:
        oot_class_pred = model.predict_proba(data_oot.drop(columns = [y]))[:,1]
    except:                                            
        pass
    # 评估
    train_perf = sc.perf_eva(data_train[y], train_class_pred, title="train")
    test_perf = sc.perf_eva(data_test[y], test_class_pred, title="test")
    train_perf["pic"].savefig("train_KS_AUC.png", bbox_inches="tight")
    test_perf["pic"].savefig("test_KS_AUC.png", bbox_inches="tight")
    sheet6["训练集"] = [train_perf["KS"], train_perf["AUC"]]
    sheet6["测试集"] = [test_perf["KS"], test_perf["AUC"]]
    try:
        oot_perf = sc.perf_eva(data_oot[y], oot_class_pred, title="oot")
        oot_perf["pic"].savefig("oot_KS_AUC.png", bbox_inches="tight")
        sheet6["验证集"] = [oot_perf["KS"], oot_perf["AUC"]]
    except:
        pass
    title1 = pd.DataFrame(
        columns=[
            "此次建模，训练样本KS={}，AUC={}，模型结果较理想，模型对好坏客户具有很好的区分度，且模型较稳定，达到建模预期目标".format(
                train_perf["KS"], train_perf["AUC"])])
    title2 = pd.DataFrame(columns=["训练集", "KS={}".format(train_perf["KS"])])
    title3 = pd.DataFrame(columns=["测试集", "KS={}".format(test_perf["KS"])])
    title.to_excel(table, sheet_name="6.模型区分度评估", index=False, startrow=1)                                     
    title1.to_excel(table, sheet_name="6.模型区分度评估", index=False, startrow=8)
    title2.to_excel(table, sheet_name="6.模型区分度评估", index=False, startrow=10, startcol=3)
    title3.to_excel(table, sheet_name="6.模型区分度评估", index=False, startrow=10, startcol=11)
    try:
        title4 = pd.DataFrame(columns=["验证集", "KS={}".format(oot_perf["KS"])])
        title4.to_excel(
            table, sheet_name="6.模型区分度评估", index=False, startrow=10, startcol=19
        )
    except:
        pass

    head.to_excel(table, sheet_name="6.模型区分度评估", index=False)
    sheet6.to_excel(table, sheet_name="6.模型区分度评估", index=False, startrow=2)
    # 曲线图
    sheet = table.book.sheetnames["6.模型区分度评估"]
    sheet.insert_image("A12", "train_KS_AUC.png", {"x_scale": 0.9, "y_scale": 0.9})
    sheet.insert_image("I12", "test_KS_AUC.png", {"x_scale": 0.9, "y_scale": 0.9})
    try:
        sheet.insert_image("Q12", "oot_KS_AUC.png", {"x_scale": 0.9, "y_scale": 0.9})
    except:
        pass
    # -------------------------------------------------------------------------------------------------------------------------
    # 7.模型稳定性评估
    title1 = pd.DataFrame(columns=["1.训练&测试"])
    title2_1 = pd.DataFrame(columns=["等间距", "模型样本量分布评估"])
    title2_2 = pd.DataFrame(columns=["等频", "模型样本量分布评估"])
    head.to_excel(table, sheet_name="7.模型稳定性评估", index=False)
    title1.to_excel(table, sheet_name="7.模型稳定性评估", index=False, startrow=1)
    title2_1.to_excel(table, sheet_name="7.模型稳定性评估", index=False, startrow=2)
    title2_2.to_excel(
        table, sheet_name="7.模型稳定性评估", index=False, startrow=2, startcol=11
    )
    B = pdo / np.log(2)
    A = points0 + B * np.log(odds0)
    score_train = np.around(A - B * np.log(train_class_pred/(1 - train_class_pred)))
    score_train = pd.DataFrame(score_train,index = data_train.index)
    score_train = pd.concat([data_train['flagy'],score_train],axis = 1).rename(columns = {0:'score'})
    score_test = np.around(A - B * np.log(test_class_pred/(1 - test_class_pred)))
    score_test = pd.DataFrame(score_test,index = data_test.index)
    score_test = pd.concat([data_test['flagy'],score_test],axis = 1).rename(columns = {0:'score'})
    try:
        score_oot = np.around(A - B * np.log(oot_class_pred/(1 - oot_class_pred)))
        score_oot = pd.DataFrame(score_oot,index = data_oot.index)
        score_oot = pd.concat([data_oot['flagy'],score_oot],axis = 1).rename(columns = {0:'score'})
    except:
        pass
    # 生成表格--------------
    # 训练集&测试集-------
    # 等高分布-----
    # 训练集
    df = score_train[score_train[y] != grey]
    df.loc[df.score < score_range[0], "score"] = score_range[0]
    df.loc[df.score >= score_range[1], "score"] = score_range[1] - 1
    df["score"] = pd.cut(
        df.score, bins=range(score_range[0], score_range[1] + 1, tick), right=False
    )
    df = df.groupby(by=["score", y]).size().unstack(level=[1], fill_value=0)
    df = df.fillna(0)
    df.sort_index(ascending=True, inplace=True)
    df["训练样本量"] = df[1] + df[0]
    df["训练集占比"] = df["训练样本量"] / df["训练样本量"].sum()
    df["训练坏客户数"] = df[1]
    df["训练坏客户占比"] = df["训练坏客户数"] / df["训练坏客户数"].sum()
    del df[0], df[1]
    # 测试集
    df1 = score_test[score_test[y] != grey]
    df1.loc[df1.score < score_range[0], "score"] = score_range[0]
    df1.loc[df1.score >= score_range[1], "score"] = score_range[1] - 1
    df1["score"] = pd.cut(
        df1.score, bins=range(score_range[0], score_range[1] + 1, tick), right=False
    )
    df1 = df1.groupby(by=["score", "flagy"]).size().unstack(level=[1], fill_value=0)
    df1 = df1.fillna(0)
    df1.sort_index(ascending=True, inplace=True)
    df1["测试样本量"] = df1[1] + df1[0]
    df1["测试集占比"] = df1["测试样本量"] / df1["测试样本量"].sum()
    df1["测试坏客户数"] = df1[1]
    df1["测试坏客户占比"] = df1["测试坏客户数"] / df1["测试坏客户数"].sum()
    del df1[0], df1[1]
    # 并表并计算psi
    sheet7_1 = df.merge(df1, how="outer", on="score")
    sheet7_1 = sheet7_1.fillna(0)
    sheet7_1["psi"] = (sheet7_1["训练集占比"] - sheet7_1["测试集占比"]) * np.log(
        sheet7_1["训练集占比"] / sheet7_1["测试集占比"]
    )
    sheet7_1["psi_bad"] = (sheet7_1["训练坏客户占比"] - sheet7_1["测试坏客户占比"]) * np.log(
        sheet7_1["训练坏客户占比"] / sheet7_1["测试坏客户占比"]
    )

    # 等频分布------
    # 训练集
    dt = score_train[score_train[y] != grey]
    dt.loc[dt.score < score_range[0], "score"] = score_range[0]
    dt.loc[dt.score >= score_range[1], "score"] = score_range[1] - 1
    dt.sort_values(by="score", ascending=True, inplace=True)
    percent_list = list(range(0, 100 + percent, percent))
    breaks = np.percentile(dt.score.values, percent_list)
    breaks = list(set(breaks))
    breaks = sorted(breaks)
    breaks[-1] = score_range[1]
    breaks[0] = score_range[0]
    dt["score"] = pd.cut(dt.score, bins=breaks, right=False)
    dt = dt.groupby(by=["score", "flagy"]).size().unstack(level=[1], fill_value=0)
    dt = dt.fillna(0)
    dt.sort_index(ascending=True, inplace=True)
    dt["训练样本量"] = dt[1] + dt[0]
    dt["训练集占比"] = dt["训练样本量"] / dt["训练样本量"].sum()
    dt["训练坏客户数"] = dt[1]
    dt["训练坏客户占比"] = dt["训练坏客户数"] / dt["训练坏客户数"].sum()
    del dt[0], dt[1]
    # 测试集
    df1 = score_test[score_test[y] != grey]
    df1.loc[df1.score < score_range[0], "score"] = score_range[0]
    df1.loc[df1.score >= score_range[1], "score"] = score_range[1] - 1
    df1.sort_values(by="score", ascending=True, inplace=True)
    df1["score"] = pd.cut(df1.score, bins=breaks, right=False)
    df1 = df1.groupby(by=["score", "flagy"]).size().unstack(level=[1], fill_value=0)
    df1 = df1.fillna(0)
    df1.sort_index(ascending=True, inplace=True)
    df1["测试样本量"] = df1[1] + df1[0]
    df1["测试集占比"] = df1["测试样本量"] / df1["测试样本量"].sum()
    df1["测试坏客户数"] = df1[1]
    df1["测试坏客户占比"] = df1["测试坏客户数"] / df1["测试坏客户数"].sum()
    del df1[0], df1[1]
    # 并表并计算psi
    sheet7_2 = dt.merge(df1, how="outer", on="score")
    sheet7_2 = sheet7_2.fillna(0)
    sheet7_2["psi"] = (sheet7_2["训练集占比"] - sheet7_2["测试集占比"]) * np.log(
        sheet7_2["训练集占比"] / sheet7_2["测试集占比"]
    )
    sheet7_2["psi_bad"] = (sheet7_2["训练坏客户占比"] - sheet7_2["测试坏客户占比"]) * np.log(
        sheet7_2["训练坏客户占比"] / sheet7_2["测试坏客户占比"]
    )

    sheet7_1.to_excel(table, sheet_name="7.模型稳定性评估", startrow=4)
    sheet7_2.to_excel(table, sheet_name="7.模型稳定性评估", startrow=4, startcol=12)
    row_number = max(sheet7_2.shape[0], sheet7_1.shape[0]) + 4 + 20 + 2

    # 有验证集情况
    try:
        # 等高
        df2 = score_oot[score_oot[y] != grey]
        df2.loc[df2.score < score_range[0], "score"] = score_range[0]
        df2.loc[df2.score >= score_range[1], "score"] = score_range[1] - 1
        df2["score"] = pd.cut(
            df2.score, bins=range(score_range[0], score_range[1] + 1, tick), right=False
        )
        df2 = df2.groupby(by=["score", "flagy"]).size().unstack(level=[1], fill_value=0)
        df2 = df2.fillna(0)
        df2.sort_index(ascending=True, inplace=True)
        df2["验证样本量"] = df2[1] + df2[0]
        df2["验证集占比"] = df2["验证样本量"] / df2["验证样本量"].sum()
        df2["验证坏客户数"] = df2[1]
        df2["验证坏客户占比"] = df2["验证坏客户数"] / df2["验证坏客户数"].sum()
        del df2[0], df2[1]
        # 并表并计算psi
        sheet7_1 = df.merge(df2, how="outer", on="score")
        sheet7_1 = sheet7_1.fillna(0)
        sheet7_1["psi"] = (sheet7_1["训练集占比"] - sheet7_1["验证集占比"]) * np.log(
            sheet7_1["训练集占比"] / sheet7_1["验证集占比"]
        )
        sheet7_1["psi_bad"] = (sheet7_1["验证坏客户占比"] - sheet7_1["验证坏客户占比"]) * np.log(
            sheet7_1["验证坏客户占比"] / sheet7_1["验证坏客户占比"]
        )
        # 等频
        df2 = score_oot[score_oot[y] != grey]
        df2.loc[df2.score < score_range[0], "score"] = score_range[0]
        df2.loc[df2.score >= score_range[1], "score"] = score_range[1] - 1
        df2["score"] = pd.cut(df2.score, bins=breaks, right=False)
        df2 = df2.groupby(by=["score", "flagy"]).size().unstack(level=[1], fill_value=0)
        df2 = df2.fillna(0)
        df2.sort_index(ascending=True, inplace=True)
        df2["验证样本量"] = df2[1] + df2[0]
        df2["验证集占比"] = df2["验证样本量"] / df2["验证样本量"].sum()
        df2["验证坏客户数"] = df2[1]
        df2["验证坏客户占比"] = df2["验证坏客户数"] / df2["验证坏客户数"].sum()
        del df2[0], df2[1]
        # 并表并计算psi
        sheet7_2 = dt.merge(df2, how="outer", on="score")
        sheet7_2 = sheet7_2.fillna(0)
        sheet7_2["psi"] = (sheet7_2["训练集占比"] - sheet7_2["验证集占比"]) * np.log(
            sheet7_2["训练集占比"] / sheet7_2["验证集占比"]
        )
        sheet7_2["psi_bad"] = (sheet7_2["训练坏客户占比"] - sheet7_2["验证坏客户占比"]) * np.log(
            sheet7_2["训练坏客户占比"] / sheet7_2["验证坏客户占比"]
        )
        title1 = pd.DataFrame(columns=["2.训练&验证"])
        title1.to_excel(
            table, sheet_name="7.模型稳定性评估", index=False, startrow=row_number
        )
        title2_1.to_excel(
            table, sheet_name="7.模型稳定性评估", index=False, startrow=row_number + 1
        )
        title2_2.to_excel(
            table,
            sheet_name="7.模型稳定性评估",
            index=False,
            startrow=row_number + 1,
            startcol=12,
        )

        sheet7_1.to_excel(table, sheet_name="7.模型稳定性评估", startrow=row_number + 3)
        sheet7_2.to_excel(
            table, sheet_name="7.模型稳定性评估", startrow=row_number + 3, startcol=12
        )
    except:
        pass
    # -------------------------------------------------------------------------------------------------------------------------
    # 8.变量重要性
    feature_imp = pd.Series(model.feature_importances_)
    feature_name = pd.Series(model.booster_.feature_name())
    feature_df = pd.DataFrame({'feature_name': feature_name, 'element': feature_imp})
    feature_df.sort_values('element', ascending=False, inplace=True)
    feature_df.set_index('feature_name', drop=True, inplace=True)
    try:
        iv = sc.iv(data_total[model.booster_.feature_name()+['flagy']],'flagy')
        missing_rate = data_total[model.booster_.feature_name()].apply(lambda x: x.isna().sum() / x.shape[0])
        iv.set_index('variable', drop=True, inplace=True)
        missing_rate = pd.DataFrame(missing_rate, columns=['缺失值占比'])

        sheet_8 = pd.concat([feature_df, iv, missing_rate], axis=1)
        sheet_8 = sheet_8.reset_index()
        sheet_8.columns = ['变量','重要性','IV值','缺失值占比']
        sheet_8["序号"] = list(range(1, sheet_8.shape[0] + 1))
        sheet_8["解释"] = ""
        sheet_8 = sheet_8[["序号", "变量", "解释", "重要性", "IV值", "缺失值占比"]]
    except:
        sheet_8 = pd.DataFrame(columns=["序号", "变量", "解释", "重要性", "IV值", "缺失值占比"])

    head.to_excel(table, sheet_name="8.变量重要性", index=False)
    title.to_excel(table, sheet_name="8.变量重要性", index=False, startrow=1)
    sheet_8.to_excel(table, sheet_name="8.变量重要性", index=False, startrow=2)
    # -------------------------------------------------------------------------------------------------------------------------
    # 9.变量趋势
    import matplotlib.pyplot as plt
    title = pd.DataFrame(columns = ["重要变量风险表现"])
    var = feature_df.index.tolist()[:top_n]
    data_bins = sc.woebin(data_total[var + [y]],y)
    sheet_9 = pd.concat(data_bins, ignore_index=True)[["variable", "bin","count", "count_distr", "bad", "badprob"]]
    title1 = pd.DataFrame(columns = ["序号","变量","分箱","区间数量","区间占比","区间坏客数","区间坏客率"])
    head.to_excel(table, sheet_name="9.变量趋势", index=False)
    title.to_excel(table, sheet_name="9.变量趋势", index=False, startrow=1)
    title1.to_excel(table, sheet_name="9.变量趋势", index=False, startrow=2)
    sheet_9.to_excel(
        table, sheet_name="9.变量趋势", index=False, startrow=3, startcol=1, header=False
    )
    sheet = table.book.sheetnames["9.变量趋势"]
    
    bin_pict = sc.woebin_plot(data_bins)
    i = 3
    for pict in list(sheet_9.variable.unique()):
        bin_pict[pict].savefig(pict + ".png", bbox_inches="tight")
        sheet.insert_image(
            "J" + str(i), pict + ".png", {"x_scale": 0.6, "y_scale": 0.6}
        )
        i = i + 12
    # -------------------------------------------------------------------------------------------------------------------------
    # 10.样本风险评分分布
    title1 = pd.DataFrame(columns=["1、等高分布"])
    title2 = pd.DataFrame(columns=["分数整体分布情况-训练集"])
    title3 = pd.DataFrame(columns=["分数整体分布情况-测试集"])
    title4 = pd.DataFrame(columns=["分数整体分布情况-验证集"])
    title5 = pd.DataFrame(columns=["分数整体分布情况-训练集+测试集"])
    head.to_excel(table, sheet_name="10.样本风险评分分布", index=False)
    title1.to_excel(table, sheet_name="10.样本风险评分分布", index=False, startrow=1)
    title2.to_excel(table, sheet_name="10.样本风险评分分布", index=False, startrow=2)
    title3.to_excel(table, sheet_name="10.样本风险评分分布", index=False, startrow=19)
    title5.to_excel(table, sheet_name="10.样本风险评分分布", index=False, startrow=2,startcol=11)
    title5.to_excel(table, sheet_name="10.样本风险评分分布", index=False, startrow=55,startcol=11)
    # 等高---------
    # 训练+测试
    score_total = pd.concat([score_train,score_test],axis=0,sort=False)
    df = score_total[score_total[y] != grey]
    df.loc[df.score < score_range[0], "score"] = score_range[0]
    df.loc[df.score >= score_range[1], "score"] = score_range[1] - 1
    df["score"] = pd.cut(df.score, bins=range(score_range[0], score_range[1] + 1, tick),right=False)
    df = df.groupby(by=["score", "flagy"]).size().unstack(level=[1], fill_value=0)
    df = df.fillna(0)
    df["区间人数"] = df[0] + df[1]
    df["区间占比"] = df["区间人数"] / df["区间人数"].sum()
    df["区间坏客户率"] = df[1] / df["区间人数"]
    df["累计坏客户占比"] = df[1].cumsum() / df[1].sum()
    df["累计好客户占比"] = df[0].cumsum() / df[0].sum()
    df["好坏区分程度(ks)"] = df["累计坏客户占比"] - df["累计好客户占比"]
    df.reset_index(inplace=True)
    df.rename(columns={"score": "评分区间"}, inplace=True)
    del df[0], df[1]
    df.to_excel(table, sheet_name="10.样本风险评分分布", index=False, startrow=3,startcol = 11)
    # 训练集
    df = score_train[score_train[y] != grey]
    df.loc[df.score < score_range[0], "score"] = score_range[0]
    df.loc[df.score >= score_range[1], "score"] = score_range[1] - 1
    df["score"] = pd.cut(
        df.score, bins=range(score_range[0], score_range[1] + 1, tick), right=False
    )
    df = df.groupby(by=["score", "flagy"]).size().unstack(level=[1], fill_value=0)
    df = df.fillna(0)
    df["区间人数"] = df[0] + df[1]
    df["区间占比"] = df["区间人数"] / df["区间人数"].sum()
    df["区间坏客户率"] = df[1] / df["区间人数"]
    df["累计坏客户占比"] = df[1].cumsum() / df[1].sum()
    df["累计好客户占比"] = df[0].cumsum() / df[0].sum()
    df["好坏区分程度(ks)"] = df["累计坏客户占比"] - df["累计好客户占比"]
    df.reset_index(inplace=True)
    df.rename(columns={"score": "评分区间"}, inplace=True)
    del df[0], df[1]
    df.to_excel(table, sheet_name="10.样本风险评分分布", index=False, startrow=3)
    # 测试集
    df = score_test[score_test[y] != grey]
    df.loc[df.score < score_range[0], "score"] = score_range[0]
    df.loc[df.score >= score_range[1], "score"] = score_range[1] - 1
    df["score"] = pd.cut(
        df.score, bins=range(score_range[0], score_range[1] + 1, tick), right=False
    )
    df = df.groupby(by=["score", "flagy"]).size().unstack(level=[1], fill_value=0)
    df = df.fillna(0)
    df["区间人数"] = df[0] + df[1]
    df["区间占比"] = df["区间人数"] / df["区间人数"].sum()
    df["区间坏客户率"] = df[1] / df["区间人数"]
    df["累计坏客户占比"] = df[1].cumsum() / df[1].sum()
    df["累计好客户占比"] = df[0].cumsum() / df[0].sum()
    df["好坏区分程度(ks)"] = df["累计坏客户占比"] - df["累计好客户占比"]
    df.reset_index(inplace=True)
    df.rename(columns={"score": "评分区间"}, inplace=True)
    del df[0], df[1]
    df.to_excel(table, sheet_name="10.样本风险评分分布", index=False, startrow=20)
    # 等频---------
    title1 = pd.DataFrame(columns=["1、等频分布"])
    title1.to_excel(table, sheet_name="10.样本风险评分分布", index=False, startrow=54)
    title2.to_excel(table, sheet_name="10.样本风险评分分布", index=False, startrow=55)
    title3.to_excel(table, sheet_name="10.样本风险评分分布", index=False, startrow=77)
    # 训练+测试
    score_total = pd.concat([score_train,score_test],axis=0,sort=False)
    df = score_total[score_total[y] != grey]
    df.sort_values(by="score", ascending=True, inplace=True)
    df.loc[df.score < score_range[0], "score"] = score_range[0]
    df.loc[df.score >= score_range[1], "score"] = score_range[1] - 1
    percent_list = list(range(0, 100 + percent, percent))
    breaks = np.percentile(df.score.values, percent_list)
    breaks = list(set(breaks))
    breaks = sorted(breaks)
    breaks[-1] = score_range[1]
    breaks[0] = score_range[0]
    df["score"] = pd.cut(df.score, bins=breaks, right=False)
    df = df.groupby(by=["score", "flagy"]).size().unstack(level=[1], fill_value=0)
    df = df.fillna(0)
    df["区间人数"] = df[0] + df[1]
    df["区间占比"] = df["区间人数"] / df["区间人数"].sum()
    df["区间坏客户率"] = df[1] / df["区间人数"]
    df["累计坏客户占比"] = df[1].cumsum() / df[1].sum()
    df["累计好客户占比"] = df[0].cumsum() / df[0].sum()
    df["好坏区分程度(ks)"] = df["累计坏客户占比"] - df["累计好客户占比"]
    df.reset_index(inplace=True)
    df.rename(columns={"score": "评分区间"}, inplace=True)
    del df[0], df[1]
    df.to_excel(table, sheet_name="10.样本风险评分分布", index=False, startrow=56,startcol = 11)
    # 训练集
    dt = score_train[score_train[y] != grey]
    dt.sort_values(by="score", ascending=True, inplace=True)
    dt.loc[dt.score < score_range[0], "score"] = score_range[0]
    dt.loc[dt.score >= score_range[1], "score"] = score_range[1] - 1
    dt["score"] = pd.cut(dt.score, bins=breaks, right=False)
    dt = dt.groupby(by=["score", "flagy"]).size().unstack(level=[1], fill_value=0)
    dt = dt.fillna(0)
    dt["区间人数"] = dt[0] + dt[1]
    dt["区间占比"] = dt["区间人数"] / dt["区间人数"].sum()
    dt["区间坏客户率"] = dt[1] / dt["区间人数"]
    dt["累计坏客户占比"] = dt[1].cumsum() / dt[1].sum()
    dt["累计好客户占比"] = dt[0].cumsum() / dt[0].sum()
    dt["好坏区分程度(ks)"] = dt["累计坏客户占比"] - dt["累计好客户占比"]
    dt.reset_index(inplace=True)
    dt.rename(columns={"score": "评分区间"}, inplace=True)
    del dt[0], dt[1]
    dt.to_excel(table, sheet_name="10.样本风险评分分布", index=False, startrow=56)
    # 测试集
    dt = score_test[score_test[y] != grey]
    dt.sort_values(by="score", ascending=True, inplace=True)
    dt.loc[dt.score < score_range[0], "score"] = score_range[0]
    dt.loc[dt.score >= score_range[1], "score"] = score_range[1] - 1
    dt["score"] = pd.cut(dt.score, bins=breaks, right=False)
    dt = dt.groupby(by=["score", "flagy"]).size().unstack(level=[1], fill_value=0)
    dt = dt.fillna(0)
    dt["区间人数"] = dt[0] + dt[1]
    dt["区间占比"] = dt["区间人数"] / dt["区间人数"].sum()
    dt["区间坏客户率"] = dt[1] / dt["区间人数"]
    dt["累计坏客户占比"] = dt[1].cumsum() / dt[1].sum()
    dt["累计好客户占比"] = dt[0].cumsum() / dt[0].sum()
    dt["好坏区分程度(ks)"] = dt["累计坏客户占比"] - dt["累计好客户占比"]
    dt.reset_index(inplace=True)
    dt.rename(columns={"score": "评分区间"}, inplace=True)
    del dt[0], dt[1]
    dt.to_excel(table, sheet_name="10.样本风险评分分布", index=False, startrow=78)    
    
    # 有验证集情况
    try:
        # 等高
        title4.to_excel(table, sheet_name="10.样本风险评分分布", index=False, startrow=37)
        df = score_oot[score_oot[y] != grey]
        df.loc[df.score < score_range[0], "score"] = score_range[0]
        df.loc[df.score >= score_range[1], "score"] = score_range[1] - 1
        df["score"] = pd.cut(
            df.score, bins=range(score_range[0], score_range[1] + 1, tick), right=False
        )
        df = df.groupby(by=["score", "flagy"]).size().unstack(level=[1], fill_value=0)
        df = df.fillna(0)
        df["区间人数"] = df[0] + df[1]
        df["区间占比"] = df["区间人数"] / df["区间人数"].sum()
        df["区间坏客户率"] = df[1] / df["区间人数"]
        df["累计坏客户占比"] = df[1].cumsum() / df[1].sum()
        df["累计好客户占比"] = df[0].cumsum() / df[0].sum()
        df["好坏区分程度(ks)"] = df["累计坏客户占比"] - df["累计好客户占比"]
        df.reset_index(inplace=True)
        df.rename(columns={"score": "评分区间"}, inplace=True)
        del df[0], df[1]
        df.to_excel(table, sheet_name="10.样本风险评分分布", index=False, startrow=38)
        # 等频
        title4.to_excel(table, sheet_name="10.样本风险评分分布", index=False, startrow=104)
        dt = score_oot[score_oot[y] != grey]
        dt.sort_values(by="score", ascending=True, inplace=True)
        dt.loc[dt.score < score_range[0], "score"] = score_range[0]
        dt.loc[dt.score >= score_range[1], "score"] = score_range[1] - 1
        dt["score"] = pd.cut(dt.score, bins=breaks, right=False)
        dt = dt.groupby(by=["score", "flagy"]).size().unstack(level=[1], fill_value=0)
        dt = dt.fillna(0)
        dt["区间人数"] = dt[0] + dt[1]
        dt["区间占比"] = dt["区间人数"] / dt["区间人数"].sum()
        dt["区间坏客户率"] = dt[1] / dt["区间人数"]
        dt["累计坏客户占比"] = dt[1].cumsum() / dt[1].sum()
        dt["累计好客户占比"] = dt[0].cumsum() / dt[0].sum()
        dt["好坏区分程度(ks)"] = dt["累计坏客户占比"] - dt["累计好客户占比"]
        dt.reset_index(inplace=True)
        dt.rename(columns={"score": "评分区间"}, inplace=True)
        del dt[0], dt[1]
        dt.to_excel(table, sheet_name="10.样本风险评分分布", index=False, startrow=105)
    except:
        pass
    # -------------------------------------------------------------------------------------------------------------------------
    # 11.评分决策表
    head.to_excel(table, sheet_name="11.评分决策表", index=False)
    title1 = pd.DataFrame(columns=["1、等高"])
    title2 = pd.DataFrame(columns=["2、等频"])
    title3 = pd.DataFrame(columns=["评分决策表"])
    # 等高
    title1.to_excel(table, sheet_name="11.评分决策表", index=False, startrow=1)
    title3.to_excel(table, sheet_name="11.评分决策表", index=False, startrow=2)
    
    total_class_pred = model.predict_proba(data_total[var_final])[:,1]
    score_total = np.around(A - B * np.log(total_class_pred/(1 - total_class_pred)))
    score_total = pd.DataFrame(score_total,index = data_total.index)
    score_total = pd.concat([data_total['flagy'],score_total],axis = 1).rename(columns = {0:'score'})
    # score_total = pd.concat([score_train, score_test], axis=0, sort=False)

    score_total.loc[score_total.score < score_range[0], "score"] = score_range[0]
    score_total.loc[score_total.score >= score_range[1], "score"] = score_range[1] - 1 
    score_total["score"] = pd.cut(
        score_total.score,
        bins=range(score_range[0], score_range[1] + 1, tick),
        right=False,
    )
    score_total = (
        score_total.groupby(by=["score", "flagy"])
        .size()
        .unstack(level=[1], fill_value=0)
    )
    score_total = score_total.fillna(0)
    score_total.sort_index(ascending=False, inplace=True)
    score_total["好"] = score_total[0]
    try:
        score_total["灰"] = score_total[grey]
    except:
        print("Warning: No Grey sample!")
    score_total["坏"] = score_total[1]
    try:
        score_total["总"] = score_total[0] + score_total[1] + score_total[grey]
    except:
        score_total["总"] = score_total[0] + score_total[1]
    score_total["坏累计"] = score_total[1].cumsum()
    score_total["总累计"] = score_total["总"].cumsum()
    score_total["通过率"] = score_total["总累计"] / score_total["总"].sum()
    score_total["每段违约率"] = score_total["坏"] / score_total["总"]
    score_total["平均违约率"] = score_total["坏"].sum() / score_total["总"].sum()
    score_total["通过违约率"] = score_total["坏累计"] / score_total["总累计"]
    score_total["拒绝违约率"] = (score_total["坏"].sum() - score_total["坏累计"]) / (
        score_total["总"].sum() - score_total["总累计"]
    )
    score_total["违约率下降"] = 1 - score_total["通过违约率"] / score_total["平均违约率"]
    score_total.reset_index(inplace=True)
    score_total.rename(columns={"score": "评分区间"}, inplace=True)
    del score_total[1], score_total[0]
    score_total.to_excel(table, sheet_name="11.评分决策表", index=False, startrow=3)
    # 等频
    title2.to_excel(table, sheet_name="11.评分决策表", index=False, startrow=25)
    title3.to_excel(table, sheet_name="11.评分决策表", index=False, startrow=26)
    
    total_class_pred = model.predict_proba(data_total[var_final])[:,1]
    score_total = np.around(A - B * np.log(total_class_pred/(1 - total_class_pred)))
    score_total = pd.DataFrame(score_total,index = data_total.index)
    score_total = pd.concat([data_total['flagy'],score_total],axis = 1).rename(columns = {0:'score'})
    #score_total = pd.concat([score_train, score_test], axis=0, sort=False)

    score_total.loc[score_total.score < score_range[0], "score"] = score_range[0]
    score_total.loc[score_total.score >= score_range[1], "score"] = score_range[1]
    percent_list = list(range(0, 100 + percent, percent))
    breaks = np.percentile(score_total.score.values, percent_list)
    breaks = list(set(breaks))
    breaks = sorted(breaks)
    breaks[-1] = score_range[1]
    breaks[0] = score_range[0]
    score_total["score"] = pd.cut(score_total.score, bins=breaks, right=False)
    score_total = (
        score_total.groupby(by=["score", "flagy"])
        .size()
        .unstack(level=[1], fill_value=0)
    )
    score_total = score_total.fillna(0)
    score_total.sort_index(ascending=False, inplace=True)
    score_total["好"] = score_total[0]
    try:
        score_total["灰"] = score_total[grey]
    except:
        print("Warning: No Grey sample!")
    score_total["坏"] = score_total[1]
    try:
        score_total["总"] = score_total[0] + score_total[1] + score_total[grey]
    except:
        score_total["总"] = score_total[0] + score_total[1]
    score_total["坏累计"] = score_total[1].cumsum()
    score_total["总累计"] = score_total["总"].cumsum()
    score_total["通过率"] = score_total["总累计"] / score_total["总"].sum()
    score_total["每段违约率"] = score_total["坏"] / score_total["总"]
    score_total["平均违约率"] = score_total["坏"].sum() / score_total["总"].sum()
    score_total["通过违约率"] = score_total["坏累计"] / score_total["总累计"]
    score_total["拒绝违约率"] = (score_total["坏"].sum() - score_total["坏累计"]) / (
        score_total["总"].sum() - score_total["总累计"]
    )
    score_total["违约率下降"] = 1 - score_total["通过违约率"] / score_total["平均违约率"]
    score_total.reset_index(inplace=True)
    score_total.rename(columns={"score": "评分区间"}, inplace=True)
    del score_total[1], score_total[0]
    score_total.to_excel(table, sheet_name="11.评分决策表", index=False, startrow=27)
    table.save()    
    
    return 1

# 导入数据和模型文件
data_total = joblib.load('data_total.pkl')
data_train = joblib.load('data_train.pkl')
data_test = joblib.load('data_test.pkl')
bst = joblib.load('lgb_model.pkl')

report(data_total,
       data_train,
       data_test,
       data_oot=None,
       model=bst,
       y='flagy',
       filename='1',
       points0=55,
       pdo=-10,
       odds0=0.1,
       grey=2,
       score_range=(0, 100),
       ticks=10,
       percent=5,
       top_n = 10,
       user_data = 'user_date')








