# 0.导入包
import pandas as pd
import numpy as np
import os
import scorecardpy as sc
from sklearn.model_selection import train_test_split
import xgboost as xgb
from xgboost.sklearn import XGBClassifier
from sklearn import tree
import re 
import seaborn as sns
from matplotlib import pyplot as plt
from joblib import Parallel, delayed
import statsmodels.api as sm
from pylab import *
import joblib
import warnings
warnings.filterwarnings('ignore')

%matplotlib inline
# pd.set_option('display.max_rows', None)
plt.rcParams['font.sans-serif']=['SimHei'] #用来正常显示中文标签
plt.rcParams['axes.unicode_minus']=False #用来正常显示负号

## 0.1 函数
import logging

logging.basicConfig(
    level=logging.DEBUG, format=" %(asctime)s - %(levelname)s - %(message)s"
)
logging.debug("HHHH")
logging.disable(logging.CRITICAL)

def flag_hit(dat):
    # 查看每个产品的匹配率
    import re
    import pandas as pd
    flag_list = [i for i in dat.columns if re.match('flag_',i) != None]
    for i in flag_list:
        dat[i] = dat[i].replace([98,99],0,regex = True)
    count = len(dat)
    pp = dat[flag_list].apply(pd.value_counts).T[1]
    pp_rate  = pp/count
    df = pd.DataFrame(index = pp.index)
    df['匹配数量'] = list(pp)
    df['匹配率'] = ['{:.2%}'.format(i) for i in list(pp_rate)]
    return df

def var_ks(df_data, col_var ,flagy):
    list_big = []
    for i in col_var:
        list_small = []
        try:
            auc = roc_auc_score(y_true=df_data[flagy], y_score=df_data[i])
            fpr, tpr, _ = roc_curve(y_true=df_data[flagy], y_score=df_data[i])
            ks = max(abs(tpr-fpr))
            list_small.extend([i, auc, ks])
            list_big.append(list_small)
        except:pass
    result = pd.DataFrame(list_big, columns = ['var', 'auc', 'ks']).sort_values(by='ks',ascending=False)
    return result
    
def cate_var_transform(X,Y):
    ##取出数据类型
    d_type = X.dtypes
    object_var = X.iloc[:, np.where(d_type == "object")[0]]
    num_var = X.iloc[:, np.where(d_type != "object")[0]]
    
    #object_transfer_rule用于记录每个类别变量的数值转换规则
    object_transfer_rule = {}
    
    #object_transform是类别变量数值化转化后的值
    object_transform = pd.DataFrame(np.zeros(object_var.shape),
                                    columns=object_var.columns) 
    
    for i in range(0,len(object_var.columns)):
        
        temp_var = object_var.iloc[:, i]
        
        ##除空值外的取值种类
        unique_value=np.unique(temp_var.iloc[np.where(~temp_var.isna() )[0]])
    
        transform_rule=pd.concat([pd.DataFrame(unique_value,columns=['raw data']),
                                       pd.DataFrame(np.zeros([len(unique_value),2]),
                                                    columns=['transform data','bad rate'])],axis=1) 
        for j in range(0,len(unique_value)):
            bad_num=len(np.where( (Y == 1) & (temp_var == unique_value[j]) )[0])
            all_num=len(np.where(temp_var == unique_value[j])[0])
            
            #计算badprob
            if all_num == 0:#防止all_num=0的情况，报错
                all_num=0.5  
            transform_rule.iloc[j,2] = 1.0000000*bad_num/all_num
        
        #按照badprob排序，给出转换后的数值
        transform_rule = transform_rule.sort_values(by='bad rate')
        transform_rule.iloc[:,1]=list(range(len(unique_value),0,-1))
         
        #保存转换规则
        object_transfer_rule.update({object_var.columns[i]: transform_rule})
        #转换变量
        for k in range(0,len(unique_value)):
            transfer_value = transform_rule.iloc[np.where(transform_rule.iloc[:,0] == unique_value[k])[0],1]
            object_transform.iloc[np.where(temp_var == unique_value[k])[0],i] = float(transfer_value)
        object_transform.iloc[np.where(object_transform.iloc[:,i] == 0)[0],i] = np.nan 
    
    X_transformed = pd.concat([num_var,object_transform],axis = 1) 
    return(X_transformed,object_transfer_rule)

def psi(train,test,psi_comb):
    """
    说明：查看训练与测试集之前的psi。并返回psi大于等于psi_comb的变量。
    
    输入：
        train：训练集
        test: 测试集
        psi_comb：psi阈值
    
    返回：
        psi_df： 变量PSI数据集
        drop_psi: psi大于阈值的变量 
    """    
    PSI = {}
    for col in train.columns:
        train_prop = train[col].value_counts(normalize = True, dropna = False)
        test_prop = test[col].value_counts(normalize = True, dropna = False)
        psi = np.sum((train_prop - test_prop) * np.log(train_prop/test_prop))
        #PSI[col] = [np.round(psi,decimals = 4)]
        PSI[col] = [np.round(psi,decimals = 4)]
    psi_df = pd.DataFrame(PSI,columns = PSI.keys()).T
    psi_df.columns = ['psi']
    keep_psi = list(psi_df[psi_df.psi < psi_comb].index)
    #import toad
    #psi_df= toad.metrics.PSI(train,test).sort_values(0).reset_index().rename(columns = {'index':'feature',0:'psi'})
    #drop_psi = list(psi_df[psi_df.psi >= psi_comb].feature)
    #return psi_df,keep_psi
    return keep_psi

def stepwise_selection(X, y, 
                       initial_list = [], 
                       threshold_in = 0.01, 
                       threshold_out = 0.05, 
                       verbose = True):
    """ Perform a forward-backward feature selection 
    based on p-value from statsmodels.api.OLS
    Arguments:
        X - pandas.DataFrame with candidate features
        y - list-like with the target
        initial_list - list of features to start with (column names of X)
        threshold_in - include a feature if its p-value < threshold_in
        threshold_out - exclude a feature if its p-value > threshold_out
        verbose - whether to print the sequence of inclusions and exclusions
    Returns: list of selected features 
    Always set threshold_in < threshold_out to avoid infinite looping.
    See https://en.wikipedia.org/wiki/Stepwise_regression for the details
    """
    included = list(initial_list)
    while True:
        changed = False
        # forward step
        excluded = list(set(X.columns)-set(included))
        new_pval = pd.Series(index=excluded)
        for new_column in excluded:
            model = sm.GLM(y, sm.add_constant(pd.DataFrame(X[included+[new_column]])), family = sm.families.Binomial()).fit()
            new_pval[new_column] = model.pvalues[new_column]
        best_pval = new_pval.min()
        if best_pval < threshold_in:
            best_feature = new_pval.index[new_pval.argmin()]
            included.append(best_feature)
            changed=True
            if verbose:
                print('Add  {:30} with p-value {:.6}'.format(best_feature, best_pval))

        # backward step
        model = sm.GLM(y, sm.add_constant(pd.DataFrame(X[included])), family = sm.families.Binomial()).fit()
        # use all coefs except intercept
        pvalues = model.pvalues.iloc[1:]
        worst_pval = pvalues.max() # null if pvalues is empty
        if worst_pval > threshold_out:
            changed=True
            worst_feature = pvalues.index[pvalues.argmax()]
            included.remove(worst_feature)
            if verbose:
                print('Drop {:30} with p-value {:.6}'.format(worst_feature, worst_pval))
        if not changed:
            break
    return included

def myWOEbin(
    Data,
    Y,
    max_leaf_num=6, #max_leaf_num:分箱的最大箱数
    min_woe_box_percent=0.05,#min_woe_box_percent:叶节点最小样本量比例（仅占非空值的）
    min_woe_box_num_min=100,#min_woe_box_num_min:叶节点最小样本量；满足叶节点最小样本量比例，且满足叶节点最小样本量
    special_values=None,
    method = 'tree',):
    try:
        Data = Data.reset_index(drop=1)
        Y = Y.reset_index(drop=1)
    except Exception as e:
        print("输入数据非数据框格式，请调整为数据框！")

    # 每个分箱的数量不得小于总体取值样本的5%（可调整）
    min_woe_box_num = int(Data.shape[0] * min_woe_box_percent)
    if min_woe_box_num < min_woe_box_num_min:
        min_woe_box_num = min_woe_box_num_min

    var_num = len(Data.columns)

    # var_splitpoint用于存储每一个变量的分箱截点
    var_splitpoint = list(np.zeros([var_num, 1]))
    if special_values:
        special_values = list(special_values)
    else:
        special_values = []
    for i in range(0, var_num):
        # 非空的取值才进行决策树最优分箱
        temp_var = Data.iloc[:, i]
        NonNan_position = np.where(
            ~(temp_var.isna()) & ~(temp_var.isin(special_values))
        )[0]
        max_leaf = max_leaf_num
        # 最多分到6个叶子节点
        if method == 'tree':
            groupdt = tree.DecisionTreeClassifier(
                criterion="entropy",
                min_samples_leaf=min_woe_box_num,
                max_leaf_nodes=max_leaf,
            )

            groupdt.fit(
                np.array(Data.iloc[NonNan_position, i]).reshape(-1, 1),
                Y.iloc[NonNan_position],
            )
            dot_data = tree.export_graphviz(
                groupdt,
                out_file=None,
            )
            pattern = re.compile("<= (.*?)\\\\nentropy", re.S)
            split_num = re.findall(pattern, dot_data)
        elif method == 'freq':
            split_num = np.percentile(Data.iloc[NonNan_position, i], np.linspace(0,100, max_leaf, endpoint=False), 
                                     interpolation='linear')
            split_num = split_num[1:]

        splitpoint = [float(j) for j in split_num]
        final_splitpoint = sorted(list(set(splitpoint)))
        try:
            if Data.iloc[NonNan_position, i].min() >= final_splitpoint[0]:
                final_splitpoint = final_splitpoint[1:]
        except:
            pass
        var_splitpoint[i] = final_splitpoint

    var_splitpoint = pd.Series(var_splitpoint, index=Data.columns)
    return var_splitpoint

def single_woe_cal(data, y, SingleBreak, special_values=None):
    """
    data:df with 2 columns--single variable and the label
    y: column name of label"""
    print("data变量: {}".format(data.columns), data.shape)
    SingleBreak = sorted(list(set([-np.inf] + SingleBreak + [np.inf])))
    if y not in data.columns:
        raise Exception("label not found in data:", y)
    v = [i for i in data.columns.to_list() if i != y]
    if len(v) != 1:
        raise Exception("Input array must contain only one explanatory variable")
    # basic info
    total_num = data.shape[0]
    total_bad = data[y].sum()
    total_good = total_num - total_bad
    # CUT and group
    if not isinstance(special_values, list):
        special_values = [special_values]
    data1 = data[data[v[0]].isin(list(special_values) + [np.nan, None])]
    data = data[~data[v[0]].isin(list(special_values) + [np.nan, None])]

    tempdata = data.assign(bin=pd.cut(data[v[0]], SingleBreak, right=False))

    temptable = (
        tempdata.groupby("bin")
        .agg(["count", np.mean, np.sum])[y]
        .rename(columns={"count": "count", "mean": "badprob", "sum": "bad"})
    )

    # complete the bin description
    temptable["variable"] = v[0]
    temptable["count_distr"] = temptable["count"] / total_num
    temptable["good"] = temptable["count"] - temptable["bad"]
    temptable["breaks"] = SingleBreak[1:]
    temptable["bad_pct"] = temptable["bad"] / total_bad
    temptable["good_pct"] = (temptable["count"] - temptable["bad"]) / total_good
    temptable["woe"] = np.log(temptable["bad_pct"] / temptable["good_pct"])
    temptable['woe'] = temptable['woe'].replace(np.inf, 9999)
    temptable['woe'] = temptable['woe'].replace(-np.inf, -9999)
    temptable["bin_iv"] = (temptable["bad_pct"] - temptable["good_pct"]) * temptable[
        "woe"
    ]
    # 特殊值的相关分箱计算
    #     data1[v[0]] = data1[v[0]].astype(str)
    data1 = data1.fillna("nan")
    data1 = data1.rename(columns={v[0]: "bin"})
    temptable1 = (
        data1.groupby("bin")
        .agg(["count", np.mean, np.sum])[y]
        .rename(columns={"count": "count", "mean": "badprob", "sum": "bad"})
    )
    temptable1["variable"] = v[0]
    temptable1["count_distr"] = temptable1["count"] / total_num
    temptable1["good"] = temptable1["count"] - temptable1["bad"]
    temptable1["breaks"] = temptable1.index
    temptable1["bad_pct"] = temptable1["bad"] / total_bad
    temptable1["good_pct"] = (temptable1["count"] - temptable1["bad"]) / total_good
    temptable1["woe"] = np.log(temptable1["bad_pct"] / temptable1["good_pct"])
    temptable1['woe'] = temptable1['woe'].replace(np.inf, 9999)
    temptable1['woe'] = temptable1['woe'].replace(-np.inf, -9999)
    temptable1["bin_iv"] = (
        temptable1["bad_pct"] - temptable1["good_pct"]
    ) * temptable1["woe"]

    single_bin = pd.concat([temptable1, temptable], axis=0, sort=False)
    single_bin["total_iv"] = single_bin["bin_iv"].sum()
    single_bin.reset_index(inplace=True)
    single_bin = {
        v[0]: single_bin[
            [
                "variable",
                "bin",
                "count",
                "count_distr",
                "good",
                "bad",
                "badprob",
                "woe",
                "bin_iv",
                "total_iv",
                "breaks",
            ]
        ]
    }
    return single_bin

def sdf_woebin(
    data,
    y,
    breaks_list=None,
    max_leaf_num=6,
    min_woe_box_percent=0.01,
    min_woe_box_num_min=100,
    special_values=None,
    method = 'tree'):
    # Pandas Version
    """data: df-like form ,the original dataset
    y: str form, the name of label
    breaks_list: dict from , dict of break points"""
    # label check
    if y not in data.columns:
        raise Exception("label not found in data:", y)
    # basic info
    total_num = data.shape[0]
    total_bad = data[y].sum()
    total_good = total_num - total_bad
    # 决策树找分割点
    breaks = myWOEbin(
        data.drop(columns=y),
        data[y],
        max_leaf_num=max_leaf_num,
        min_woe_box_percent=min_woe_box_percent,
        min_woe_box_num_min=min_woe_box_num_min,
        special_values=special_values,
        method = method
        
    )
    breaks = breaks.to_dict()
    # 不应传入特殊值的dict
    if breaks_list:
        if isinstance(special_values, list):
            breaks_list = dict(
                [
                    (k, list(set(v) - set(special_values) - set(["nan"])))
                    for k, v in breaks_list.items()
                ]
            )
        else:
            breaks_list = dict(
                [
                    (k, list(set(v) - set([special_values]) - set(["nan"])))
                    for k, v in breaks_list.items()
                ]
            )
        breaks.update(breaks_list)
        # parallel process--calculating woe and iv, reforming
    parallel_res = Parallel(n_jobs=-1)(
        delayed(single_woe_cal)(
            data[[y, v]], y, breaks[v], special_values=special_values
        )
        for v in list(breaks)
    )
    # reforming the result: from tuple to dicts
    bins_dict = {}
    for b in parallel_res:
        bins_dict.update(b)
    return bins_dict

def _single_transform(series, trans_dict):
    series.rename(series.name + "_woe", inplace=True)
    normal_dict = {
        i: j for i, j in trans_dict.items() if type(i) == pd._libs.interval.Interval
    }
    special_dict = {
        i: j for i, j in trans_dict.items() if type(i) != pd._libs.interval.Interval
    }
    series1 = series[series.isin(list(special_dict) + [np.nan])]
    series2 = series[~series.isin(list(special_dict) + [np.nan])]
    #     try:
    #         special_dict.update(dict([(i, trans_dict[i]) for i in special_values]))
    #     except:
    #         print("Warning: No special_values get")
    series1 = series1.fillna("nan")
    result = pd.concat([series1.map(special_dict), series2.map(normal_dict)], axis=0)
    return result

def sdf_woebin_ply(data, bins, n_jobs=-1):
    """
    data:untransformed data
    Trans_dict: dict for transformation
    --format of Trans_dict:{'var_1':trans_dict
    --format of trans_dict:{interval(0,1):0.231,interval(1,3):0.501}
    n_jobs: nums of cpu"""
    #     Trans_dict = dict(
    #         zip(
    #             list(bins),
    #             [d["breaks"].values.tolist() for _, d in bins.items()],
    #         )
    #     )
    Woe_dict = dict(
        [
            (i, dict(j[["bin", "woe"]].to_dict(orient="split")["data"]))
            for i, j in bins.items()
        ]
    )
    datacopy = data.copy()
    keys = list(Woe_dict)
    # do some print
    DiffElement = list(set(keys) - set(data.columns))
    Untransformed = list(set(data.columns) - set(keys))
    try:
        data_untrans = data[Untransformed]
    except:
        data_untrans = pd.DataFrame()
    if len(DiffElement) != 0:
        raise Exception("These variables aren't included in data:", DiffElement)
    if len(Untransformed) != 0:
        if len(Untransformed) >= 5:
            print(
                "These variables will not be transformed:",
                Untransformed[:5],
                "list goes on",
            )
        else:
            print(
                "These variables will not be transformed:",
                Untransformed,
                "list goes on",
            )
    parallel_res = Parallel(n_jobs=n_jobs)(
        delayed(_single_transform)(data[v], Woe_dict[v]) for v in keys
    )
    data_transformed = pd.concat(parallel_res + [data_untrans], axis=1)
    return data_transformed

def sdf_woebin_plot(bins):
    plt.rcParams['font.sans-serif']=['SimHei'] #用来正常显示中文标签
    plt.rcParams['axes.unicode_minus']=False #用来正常显示负号
    figure = {}
    for col in bins:
        #         plt.figure(figsize=(100, 70))
        f, ax1 = plt.subplots(figsize=(8, 6))
        ax1.set_ylabel("占比")
        ax1.set_title(col + ", iv={}".format(round(bins[col].iloc[0]["total_iv"], 6)))
        plt.xticks(
            list(range(len(bins[col]))), bins[col]["bin"], rotation=45, fontsize=8
        )
        
        ax1.bar(
            list(range(len(bins[col]))),
            bins[col]["count_distr"] * bins[col]["good"] / bins[col]["count"],
            label="占比",
            color="green",
        )
        ax1.bar(
            list(range(len(bins[col]))),
            bins[col]["count_distr"] * bins[col]["bad"] / bins[col]["count"],
            bottom=bins[col]["count_distr"] * bins[col]["good"] / bins[col]["count"],
            color="red",
        )

    
        for a, b in zip(range(len(bins[col])), bins[col]["count_distr"]):
            c = str(round(b * 100, 2)) + "%"
            plt.text(a, b * 1.02, c + ' '+ str(bins[col].loc[a, 'count']), ha="center", va="baseline", fontsize=10)
        plt.rcParams["axes.grid"] = False
        
        ax2 = ax1.twinx()
        ax2.set_ylabel("坏客率")
        ax2.plot(
            range(len(bins[col])),
            bins[col]["badprob"],
            label="坏客率",
            marker="o",
            markerfacecolor="white",
            markersize=8,
            color="blue",
        )
        
        for a, b in zip(range(len(bins[col])), bins[col]["badprob"]):
            c = str(round(b * 100, 2)) + "%"
            ax2.text(a, b, c, ha="center", va="top", fontsize=10, color="blue")
        plt.xticks(
            list(range(len(bins[col]))), bins[col]["bin"], rotation=45, fontsize=8
        )
        ax1.legend(loc="upper right", bbox_to_anchor=(1, 0.9))
        ax2.legend(loc=1)
        figure.update({col: ax1})
    return figure

def mannual_breaks(pr_bins, data, ylabel="flagy", special_values=None):
    """INPUT:
        pr_bins: bins generated through auto-binning process
        data: data in original form
        ylabel: name of ylabel, in str form
    OUTPUT:
        all_breaklist
        droplist: variables that cannot be binned properly
    """

    import matplotlib.pyplot as plt

    xname = list(pr_bins)
    i = 0
    all_breaklist = {}
    droplist = []
    while i < (len(pr_bins)):
        b = xname[i]
        p1 = sdf_woebin_plot({b: pr_bins[b]})
        plt.show(p1)
        print(
            i,
            "/",
            len(pr_bins),
            "current splitting points:",
            list(pr_bins[b]["breaks"]),
        )
        print(("Adjust breaks for (%s)?\n 1.next\n 2.yes\n 3.back" % b))
        if_adj = input("1:next    2. yes    3.back  4.drop  5.quit\n")
        new_breaks = {}
        while if_adj == "2":
            print("pls enter the new breaks:")
            try:
                new_breaks_points = input().split(",")
                new_breaks = {b: sorted([float(x) for x in new_breaks_points])}
                bins_adj_temp = sdf_woebin(
                    data.loc[:, [b, ylabel]],
                    y=ylabel,
                    breaks_list=new_breaks,
                    special_values=special_values,
                )
                p2 = sdf_woebin_plot(bins_adj_temp)
                plt.show(p2)
                breaks_output = sorted(
                    list(bins_adj_temp[b]["breaks"]),
                    key=lambda x: -np.inf if isinstance(x, str) else x,
                )
                print("current splitting points:", breaks_output)
                print(("Adjust breaks for (%s)?\n 1.next\n 2.yes\n 3.back" % b))
                if_adj = input("1:save and next    2. yes    3.back  4.drop")
            except Exception as e:
                print(e)
                print("Error while adjusting", b)
                if_continue = input("to continue? \n 1.yes 2.next 3.quit")
                if if_continue == "1":
                    continue
                elif if_continue == "2":
                    if_adj == "1"
                elif if_continue == "3":
                    break
        if if_adj == "1":
            if b not in new_breaks.keys():
                all_breaklist[b] = list(pr_bins[b]["breaks"])
            else:
                all_breaklist[b] = breaks_output
            i += 1
            continue

        if if_adj == "3":
            if i == 0:
                print('This is the first plot, "back" option forbidden')
                break
            else:
                i -= 1
        if if_adj == "4":
            droplist.append(b)
            i += 1
        if if_adj == "5":
            break
    print("Mannual adjustment completed")
    return all_breaklist, droplist

def sdf_single_scorecard_ply(dx, cardx, x):
    card_dict = dict(cardx[["bin", "points"]].to_dict("split")["data"])
    card_dict_interval = {
        i: j for i, j in card_dict.items() if type(i) == pd._libs.interval.Interval
    }
    card_dict_special = {
        i: j for i, j in card_dict.items() if type(i) != pd._libs.interval.Interval
    }
    special_values = list(card_dict_special)
    dx_special = dx[dx.isin(special_values)]
    dx_inter = dx[~dx.isin(special_values)]
    score = pd.concat(
        [dx_special.map(card_dict_special), dx_inter.map(card_dict_interval)], axis=0
    )
    return score

def sdf_scorecard_ply(data, card, only_total_score=True, var_kp=None):
    cardx = pd.concat(card, ignore_index=True)
    dt = data.copy(deep=True)
    cols = list(cardx.variable.unique())
    cols.remove("basepoints")
    dt[cols] = dt[cols].fillna("nan")
    parallel_score = Parallel(n_jobs=-1)(
        delayed(sdf_single_scorecard_ply)(dt[v], cardx[cardx.variable == v], v)
        for v in cols
    )
    try:
        data_score = pd.concat(parallel_score + [data[var_kp]], axis=1)
    except:
        data_score = pd.concat(parallel_score, axis=1)
    data_score["score"] = (
        data_score.sum(axis=1)
        + cardx[cardx.variable == "basepoints"]["points"].values[0]
    )
    if only_total_score:
        data_score.drop(columns=cols, inplace=True)
    return data_score

def nan_iden_filter(df_,missing_limit,identical_limit,kp_var = None):
    df_1 = df_.copy()
    if kp_var:
        df_ = df_.drop(columns = kp_var)
    # 缺失值
    nan_rate = lambda a: a[a.isnull()].size/a.size
    na_perc = df_.apply(nan_rate).reset_index(name='missing_rate').rename(columns={'index':'variable'})
    # 同值
    idt_rate = lambda a: a.value_counts().max() / a.size
    identical_perc = df_.apply(idt_rate).reset_index(name='identical_rate').rename(columns={'index':'variable'})
    
    dt_merge = na_perc.merge(identical_perc,on = 'variable') # 合并
    dt_var_sel = dt_merge.query('(missing_rate <= {}) & (identical_rate <= {})'.format(missing_limit,identical_limit))
    dt_var_del = dt_merge.query('(missing_rate > {}) | (identical_rate > {})'.format(missing_limit,identical_limit))
    var_sel = dt_var_sel['variable'].tolist()
    if kp_var:
        df_final = df_1[var_sel + kp_var]
    else:
        df_final = df_1[var_sel]
    return df_final

def bin_iv(df,bins_num = 10,iv_limit = 0.02,kp_var = None):
    df1 = df.copy()
    if kp_var:
        df = df.drop(columns = kp_var)
    # 等频分箱后筛选变量
    df_bin = df.drop(columns = ['flagy']).apply(lambda x: pd.qcut(x,bins_num,labels = None,duplicates = 'drop'))
    df_merge_y = pd.concat([df_bin,df['flagy']],axis = 1)
    ivlist = iv(df_merge_y, y='flagy')
    iv_var = ivlist[ivlist.info_value >= iv_limit].variable.tolist()
    if kp_var:
        df_final = df1[iv_var + kp_var + ['flagy']]
    else:
        df_final = df1[iv_var + ['flagy']]
    return df_final

def iv(dt, y, x = None, positive='bad|1', order=True):
    dt = dt.copy(deep=True)
    if isinstance(y, str):
        y = [y]
    if isinstance(x, str) and x is not None:
        x = [x]
    if x is not None: 
        dt = dt[y+x]    
    # x variable names
    
    xs = list(set(dt.columns) - set(y))
    # info_value
    ivlist = pd.DataFrame({
        'variable': xs,
        'info_value': [iv_xy(dt[i], dt[y[0]]) for i in xs]
    }, columns=['variable', 'info_value'])
    # sorting iv
    if order: 
        ivlist = ivlist.sort_values(by='info_value', ascending=False)
    return ivlist
    # ivlist = iv(dat, y='creditability')

def iv_xy(x, y):
    # good bad func
    def goodbad(df):
        names = {'good': (df['y']==0).sum(),'bad': (df['y']==1).sum()}
        return pd.Series(names)
    # iv calculation
    iv_total = pd.DataFrame({'x':x.astype('str'),'y':y}) \
      .fillna('missing') \
      .groupby('x') \
      .apply(goodbad) \
      .replace(0, 0.9) \
      .assign(
        DistrBad = lambda x: x.bad/sum(x.bad),
        DistrGood = lambda x: x.good/sum(x.good)
      ) \
      .assign(iv = lambda x: (x.DistrBad-x.DistrGood)*np.log(x.DistrBad/x.DistrGood)) \
      .iv.sum()
    # return iv
    return iv_total

def object_var_del(df,num_limit = 10):
    # 删除类别过多的分类型变量
    object_cat_num = df.loc[:,df.dtypes == 'object'].apply(lambda x : len(x.value_counts())) # 变量类别数
    drop_col = object_cat_num[object_cat_num > num_limit].index.tolist() # 类别数大于阈值的变量
    df_result = df.drop(columns = drop_col)
    return df_result

def isna(data):    
    return(pd.isna(data).sum().index[pd.isna(data).sum().values>0])

def corr_iv(df,corr_limit = 0.7):
    corr = abs(df.drop(columns=["flagy"]).corr())
    corr_dict={}
    for i in range(corr.shape[0]-1):
        for j in range(corr.shape[0]-i-1):
            if corr.iloc[i,i+j+1] > corr_limit:
                corr_dict[(corr.index[i],corr.columns[i+j+1])] = corr.iloc[i,i+j+1]  
    iv_result = iv(df, "flagy")
    for name in corr_dict.keys():
        if name[0] in df.columns and name[1] in df.columns:
            name0_iv = float(iv_result[iv_result['variable'] == name[0]]['info_value'])
            name1_iv = float(iv_result[iv_result['variable'] == name[1]]['info_value'])
            if name0_iv > name1_iv:
                del df[name[1]]
            else:
                del df[name[0]]
    return df

def grey_score_high(dt_grey,card,score_range=(300, 1000),tick=50):
    """
    param dt_grey:含灰客户样本，有'flagy'
    param card:评分卡
    param score_range:分数区间
    
    """
    dt_grey = dt_grey.fillna(-99)
    score = sdf_scorecard_ply(dt_grey, card)
    score[score <= score_range[0]] = score_range[0]
    score[score >= score_range[1]] = score_range[1] - 1
    score_total = pd.merge(score,dt_grey['flagy'],left_index = True,right_index = True)

    score_total["score"] = pd.cut(
            score_total.score,
            bins=range(score_range[0], score_range[1] + 1, tick),
            right=False,
        )
    score_total = (
        score_total.groupby(by=["score", "flagy"])
        .size()
        .unstack(level=[1], fill_value=0)
    )
    score_total.sort_index(ascending=False, inplace=True)
    score_total["好"] = score_total[0]
    score_total["灰"] = score_total[0.5]
    score_total["坏"] = score_total[1]
    score_total["总"] = score_total[0] + score_total[1] + score_total[0.5]
    score_total["坏累计"] = score_total[1].cumsum()
    score_total["总累计"] = score_total["总"].cumsum()
    score_total["通过率"] = score_total["总累计"] / score_total["总"].sum()
    score_total["每段违约率"] = score_total["坏"] / score_total["总"]
    score_total["平均违约率"] = score_total["坏"].sum() / score_total["总"].sum()
    score_total["通过违约率"] = score_total["坏累计"] / score_total["总累计"]
    score_total["拒绝违约率"] = (score_total["坏"].sum() - score_total["坏累计"]) / (
        score_total["总"].sum() - score_total["总累计"]
    )
    score_total["违约率下降"] = 1 - score_total["通过违约率"] / score_total["平均违约率"]
    score_total.reset_index(inplace=True)
    score_total.rename(columns={"score": "评分区间"}, inplace=True)
    del score_total[1], score_total[0],score_total[0.5]
    return score_total

def grey_score_freq(dt_grey,card,score_range=(300, 1000),percent=5):
    dt_grey = dt_grey.fillna(-99)
    score = sdf_scorecard_ply(dt_grey, card)
    score[score <= score_range[0]] = score_range[0]
    score[score >= score_range[1]] = score_range[1] - 1
    score_total = pd.merge(score,dt_grey['flagy'],left_index = True,right_index = True)

    score_total.loc[score_total.score < score_range[0], "score"] = score_range[0]
    score_total.loc[score_total.score >= score_range[1], "score"] = score_range[1]
    percent_list = list(range(0, 100 + percent, percent))
    breaks = np.percentile(score_total.score.values, percent_list)
    breaks = list(set(breaks))
    breaks = sorted(breaks)
    breaks[-1] = 1000
    breaks[0] = 300
    score_total["score"] = pd.cut(score_total.score, bins=breaks, right=False)
    score_total = (
        score_total.groupby(by=["score", "flagy"])
        .size()
        .unstack(level=[1], fill_value=0)
    )
    score_total = score_total.fillna(0)
    score_total.sort_index(ascending=False, inplace=True)
    score_total["好"] = score_total[0]
    score_total["灰"] = score_total[0.5]
    score_total["坏"] = score_total[1]

    score_total["总"] = score_total[0] + score_total[1] + score_total[0.5]

    score_total["坏累计"] = score_total[1].cumsum()
    score_total["总累计"] = score_total["总"].cumsum()
    score_total["通过率"] = score_total["总累计"] / score_total["总"].sum()
    score_total["每段违约率"] = score_total["坏"] / score_total["总"]
    score_total["平均违约率"] = score_total["坏"].sum() / score_total["总"].sum()
    score_total["通过违约率"] = score_total["坏累计"] / score_total["总累计"]
    score_total["拒绝违约率"] = (score_total["坏"].sum() - score_total["坏累计"]) / (
        score_total["总"].sum() - score_total["总累计"]
    )
    score_total["违约率下降"] = 1 - score_total["通过违约率"] / score_total["平均违约率"]
    score_total.reset_index(inplace=True)
    score_total.rename(columns={"score": "评分区间"}, inplace=True)
    del score_total[1], score_total[0],score_total[0.5]
    return score_total

import statsmodels.api as sm
import statsmodels.stats.outliers_influence as oi
import datetime as dtt
from sklearn.linear_model import LogisticRegression

def badrate_month(df):
    df['user_date'] = pd.to_datetime(df['user_date'])
    df_temp = df[['user_date','flagy']]
    df_temp['year'] = df_temp['user_date'].apply(lambda x : str(x.year))
    df_temp['month'] = df_temp['user_date'].apply(lambda x : str(x.month)+'月')
    badrate_m = df_temp.groupby(['year','month'])['flagy'].agg([len,sum])
    badrate_m['rate%'] = badrate_m['sum']/badrate_m['len']*100 
    return badrate_m

def report(
    data_total,
    data_train,
    data_test,
    data_oot=None,
    y="flagy",
    breaks_list=None,
    filename="",
    points0=600,
    pdo=50,
    odds0=1 / 19,
    basepoints_eq0=False,
    special_values=None,
    grey=2,
    score_range=(300, 1000),
    tick=50,
    percent=5,
    **kwargs):
    """
    :param data_total: dataframe 所有训练样本，不含验证集，包含入模变量、标签及申请日期，申请日期最好为日期格式，有灰客户样本
    :param data_train: dataframe 训练集，只包含入模变量及标签,不含灰客户
    :param data_test: dataframe 测试集，只包含入模变量及标签， 不含灰客户
    :param data_oot: dataframe or None 验证集，默认为None，不含灰客户
    :param y: str 标签名，默认为'flagy'
    :param breaks_list: dict or None 入模变量分箱节点，如{'a':[-inf,1,5,10, inf], 'b': [-inf,2,6,9, inf], ...}
    :param filename: str 报告名，默认为''，输出名称自动加'report_报告生成日期'后缀，如'反欺诈_report20201229113158'
    :param points0: int 基准分，默认600
    :param pdo: int pdo，默认50
    :param odds: float 坏账比率， 默认1/19
    :param basepoints_eq0: Bool 是否使得截距项为0，值为True时，截距项评分平分到每个入模变量评分上，为False时不平分，默认为False
    :param special_values: None or list 变量分箱时设置的特殊值，特殊值单独为一箱，默认为None，即缺失值为特殊值
    :param grey: int, float or str 灰客户的取值标识，默认取2
    :param score_range: tuple 评分的上下限，如 (300, 1000)
    :param tick: int or float 评分分布的分数间隔，默认为50
    :param percent: int or float 评分等频分布的分位数间隔，默认为5，即5%分位数
    :param kwargs: 其他变量,如user_date='user_date'：申请日期名称，出现在data_total中，主要用于报告第2部分样本分析
    :return: 返回1
    """
    # 文件名
    filename = filename + "report_" + dtt.datetime.now().strftime("%Y%m%d%H%M%S")
    # 分箱计算woe
    if "user_date" in kwargs:
        var_final = data_train.drop(columns=[y, 'user_date']).columns.to_list()
    else:
        var_final = data_train.drop(columns=[y]).columns.to_list()

    bins = sdf_woebin(
        data_train[var_final + [y]],
        y,
        breaks_list=breaks_list,
        max_leaf_num=6,
        min_woe_box_percent=0.01,
        min_woe_box_num_min=100,
        special_values=special_values,
    )
    bins_test = sdf_woebin(
        data_test[var_final + [y]],
        y,
        breaks_list=breaks_list,
        max_leaf_num=6,
        min_woe_box_percent=0.01,
        min_woe_box_num_min=100,
        special_values=special_values,
    )
    bins_train1 = pd.concat(bins, ignore_index=True)
    bins_test1 = pd.concat(bins_test, ignore_index=True)
    try:
        bins_oot = sdf_woebin(
            data_oot[var_final + [y]],
            y,
            breaks_list=breaks_list,
            max_leaf_num=6,
            min_woe_box_percent=0.01,
            min_woe_box_num_min=100,
            special_values=special_values,
        )
        oot_woe = sdf_woebin_ply(data_oot[var_final + [y]], bins)
        bins_oot1 = pd.concat(bins_oot, ignore_index=True)
    except:
        pass
    train_woe = sdf_woebin_ply(data_train[var_final + [y]], bins)
    test_woe = sdf_woebin_ply(data_test[var_final + [y]], bins)
    lr = LogisticRegression(penalty="none", solver="newton-cg", n_jobs=-1)
    lr.fit(train_woe.drop(columns=[y]), train_woe[y])
    card = sc.scorecard(
        bins,
        lr,
        var_final,
        points0=points0,
        pdo=pdo,
        odds0=odds0,
        basepoints_eq0=basepoints_eq0,
    )
    # 进行excel报告内容整理
    table = pd.ExcelWriter(filename + ".xlsx", engine="xlsxwriter")
    # ------------------------------------------------------------------------------------------------------------------
    # 目录页
    sheet = pd.DataFrame(
        columns=["编号", "中文简称", "英文简称", "内容"],
        data=[
            ["1", "模型使用说明", "Model_Explain", "模型使用说明"],
            ["2", "原始数据统计", "Original_Stat", "原始数据统计"],
            ["3", "衍生特征构造", "Var_derivation", "衍生特征构造"],
            ["4", "数据预处理-格式转换", "Data_Pre_Format", "数据预处理-格式转换"],
            ["5", "候选变量", "Candidate_Vars", "经IV筛选、Lasso筛选、手动筛选后的剩余变量"],
            ["6", "模型参数", "Model_Params", "模型参数"],
            ["7", "变量相关系数", "VarSelect_Corr", "入模变量相关系数"],
            ["8", "模型区分度评估", "Model_Disc", "模型区分度评估"],
            ["9", "变量分箱", "Var_bin", "变量分箱"],
            ["10", "模型评分卡", "Scorecard", "变量分数及评分参数设定"],
            ["11", "单变量稳定性", "Var_Stab", "单变量稳定性评估"],
            ["12", "模型稳定性评估", "Model_Stab", "模型稳定性评估"],
            ["13", "样本风险评分分布", "Model_Score", "模型评分及风险表现"],
            ["14", "评分决策表", "Decision_table", "不同评分分段的通过率、违约率提升"],
        ],
    )
    sheet.to_excel(table, sheet_name="目录", startrow=0, startcol=0, index=False)
    # -------------------------------------------------------------------------------------------------------------------
    # 1.模型使用说明页
    head = pd.DataFrame(columns=["返回目录"])
    sheet1 = pd.DataFrame(
        index=["版本名称", "模型类型", "客群种类", "该版本更新时间", "开发人员", "建模样本数据量", "模型变量数量", "核心算法"],
        columns=["内容"],
    )
    head.to_excel(table, sheet_name="1.模型使用说明", startrow=0, index=False)
    sheet1.to_excel(table, sheet_name="1.模型使用说明", startrow=1)
    # -------------------------------------------------------------------------------------------------------------------
    # 2.原始数据统计页
    head2_1 = pd.DataFrame(columns=["一、数据来源"])
    sheet2_1 = pd.DataFrame(
        index=[
            "机构",
            "产品类型",
            "业务开展时间",
            "引流渠道",
            "额度区间",
            "期数范围",
            "存量客户数量",
            "日进件量",
            "平均通过率",
            "审批流程",
            "审批使用数据",
        ],
        columns=["内容"],
    )
    head2_2 = pd.DataFrame(columns=["二、数据概要"])
    sheet2_2 = pd.DataFrame(
        index=[
            "客群描述",
            "观察期",
            "表现期",
            "原始样本时间",
            "原始样本量",
            "建模样本时间",
            "建模样本量",
            "验证样本时间",
            "验证样本量",
        ],
        columns=["内容"],
    )

    head2_3 = pd.DataFrame(columns=["三、好坏客户定义"])
    sheet2_3 = pd.DataFrame(columns=["客户类型", "定义方式", "样本量", "好坏客户定义描述"])
    sheet2_3["客户类型"] = ["坏客户", "灰客户", "好客户"]

    head2_4 = pd.DataFrame(columns=["四、建模数据统计情况"])
    sheet2_4 = pd.DataFrame(columns=["年", "月", "数量", "比例", "坏数量", "坏账率", "平均坏账率"])
    if "user_date" in kwargs:
        data_total['user_date'] = pd.to_datetime(data_total['user_date'], errors="coerce")
        temp = data_total.copy()
        temp["年"] = temp['user_date'].apply(lambda x: str(x.year))
        temp["月"] = temp['user_date'].apply(lambda x: str(x.month) + "月")
        temp = (
            temp[temp[y] != grey]
            .groupby(["年", "月"])[y]
            .agg([len, sum])
            .rename(columns={"len": "数量", "sum": "坏数量"})
            .reset_index()
        )
        temp["比例"] = temp["数量"] / temp["数量"].sum()
        temp["坏账率"] = temp["坏数量"] / temp["数量"]
        temp["平均坏账率"] = temp["坏数量"].sum() / temp["数量"].sum()
        temp = temp[["年","月","数量", "比例", "坏数量", "坏账率", "平均坏账率"]]
        sheet2_4 = temp.copy()

    head2_5 = pd.DataFrame(columns=["五、建模数据选取"])
    sheet2_5 = pd.DataFrame(columns=["类型", "年", "月", "数量", "比例", "坏数量", "坏账率", "平均坏账率"])
    sheet2_5["类型"] = ["训练", "测试", "验证"]
    if "user_date" in kwargs:
        data_train["user_date"] = pd.to_datetime(data_train["user_date"])
        data_test["user_date"] = pd.to_datetime(data_test["user_date"])
        data_train["类型"] = "训练"
        data_test["类型"] = "测试"
        try:
            data_oot["类型"] = "验证"
            data_oot["user_date"] = pd.to_datetime(data_oot["user_date"])
        except:
            pass
        data_merge = pd.concat([data_train, data_test, data_oot], axis=0, sort=False)
        data_merge["年"] = data_merge['user_date'].apply(lambda x: str(x.year))
        data_merge["月"] = data_merge['user_date'].apply(lambda x: str(x.month) + "月")
        data_merge = (
            data_merge.groupby(["类型", "年", "月"])[y]
            .agg([len, sum])
            .rename(columns={"len": "数量", "sum": "坏数量"})
            .reset_index()
        )
        data_merge["比例"] = data_merge["数量"] / data_merge["数量"].sum()
        data_merge["坏账率"] = data_merge["坏数量"] / data_merge["数量"]
        data_merge["平均坏账率"] = data_merge["坏数量"].sum() / data_merge["数量"].sum()
        data_merge = data_merge[["类型","年","月","数量", "比例", "坏数量", "坏账率", "平均坏账率"]]
        sheet2_5 = data_merge.copy()

    head2_6 = pd.DataFrame(columns=["六、数据集划分"])
    sheet2_6 = pd.DataFrame(columns=["数据量", "坏样本", "坏账率"], index=["训练集", "测试集", "验证集"])
    try:
        sheet2_6["数据量"] = [data_train.shape[0], data_test.shape[0], data_oot.shape[0]]
        sheet2_6["坏样本"] = [data_train[y].sum(), data_test[y].sum(), data_oot[y].sum()]
        sheet2_6["坏账率"] = sheet2_6["坏样本"] / sheet2_6["数据量"]
    except:
        sheet2_6["数据量"] = [data_train.shape[0], data_test.shape[0], 0]
        sheet2_6["坏样本"] = [data_train[y].sum(), data_test[y].sum(), 0]
        sheet2_6["坏账率"] = sheet2_6["坏样本"] / sheet2_6["数据量"]

    head.to_excel(table, sheet_name="2.原始数据统计", startrow=0, index=False)
    head2_1.to_excel(table, sheet_name="2.原始数据统计", startrow=1, index=False)
    sheet2_1.to_excel(table, sheet_name="2.原始数据统计", startrow=3, startcol=1)
    head2_2.to_excel(table, sheet_name="2.原始数据统计", startrow=16, index=False)
    sheet2_2.to_excel(table, sheet_name="2.原始数据统计", startrow=18, startcol=1)
    head2_3.to_excel(table, sheet_name="2.原始数据统计", startrow=29, index=False)
    sheet2_3.to_excel(
        table, sheet_name="2.原始数据统计", startrow=31, startcol=1, index=False
    )
    head2_4.to_excel(table, sheet_name="2.原始数据统计", startrow=36, index=False)
    if sheet2_4.shape[0] == 0:
        sheet2_4.to_excel(
            table, sheet_name="2.原始数据统计", startrow=38, startcol=1, index=False
        )
    else:
        sheet2_4.to_excel(table, sheet_name="2.原始数据统计", startrow=38, startcol=1)
    row_number = sheet2_4.shape[0] + 38 + 17
    head2_5.to_excel(table, sheet_name="2.原始数据统计", startrow=row_number, index=False)
    sheet2_5.to_excel(table, sheet_name="2.原始数据统计", startrow=row_number + 2, startcol=1)
    row_number1 = row_number + 2 + sheet2_5.shape[0] + 2
    head2_6.to_excel(table, sheet_name="2.原始数据统计", startrow=row_number1, index=False)
    sheet2_6.to_excel(
        table, sheet_name="2.原始数据统计", startrow=row_number1 + 2, startcol=1
    )
    # ---------------------------------------------------------------------------------------------------
    # 3.衍生变量构造
    sheet3 = pd.DataFrame(columns=["序号", "模块", "变量", "中文名", "数据来源", "衍生逻辑"])

    head.to_excel(table, sheet_name="3.衍生变量构造", index=False)
    sheet3.to_excel(table, sheet_name="3.衍生变量构造", startrow=2, index=False)
    # ----------------------------------------------------------------------------------------------------
    # 4.数据预处理
    sheet4 = pd.DataFrame(columns=["序号", "变量", "数据源", "变量类型", "编码(转换)格式", "举例"])
    head.to_excel(table, sheet_name="4.数据预处理", index=False)
    sheet4.to_excel(table, sheet_name="4.数据预处理", startrow=2, index=False)
    # -----------------------------------------------------------------------------------------------------
    # 5.候选变量
    title = pd.DataFrame(columns=["候选变量"])
    try:
        sheet5 = pd.read_csv("iv_missing_rate.csv", encoding="utf-8")
        sheet5.columns = ["变量", "IV值", "缺失值占比"]
        sheet5.sort_values(by="IV值", ascending=False, inplace=True)
        sheet5["解释"] = ""
        sheet5["序号"] = list(range(1, sheet5.shape[0] + 1))
        sheet5 = sheet5[["序号", "变量", "解释", "IV值", "缺失值占比"]]
    except:
        sheet5 = pd.DataFrame(columns=["序号", "变量", "解释", "IV值", "缺失值占比"])

    tips = pd.DataFrame(
        columns=["tip"], data=["计算变量分箱IV值，选择IV值>=0.02的变量", "其中重要经验变量单独挑回"]
    )
    head.to_excel(table, sheet_name="5.候选变量", index=False)
    title.to_excel(table, sheet_name="5.候选变量", index=False, startrow=1)
    sheet5.to_excel(table, sheet_name="5.候选变量", index=False, startrow=2)
    tips.to_excel(
        table, sheet_name="5.候选变量", index=False, header=False, startrow=3, startcol=6
    )
    # -------------------------------------------------------------------------------------------------------
    # 6.模型参数
    # 统计检验
    model = sm.GLM(
        train_woe[y],
        exog=sm.add_constant(train_woe.drop(columns=["flagy"])),
        family=sm.families.Binomial(),
    ).fit()
    stats = pd.concat(
        [model.params, model.bse, model.tvalues, model.pvalues], axis=1, sort=False
    )
    stats.reset_index(drop=False, inplace=True)
    stats.columns = ["变量", "估计", "标准误差", "Z-Value", "P-Value"]
    stats["变量"] = stats["变量"].apply(lambda x: x.replace("_woe", ""))
    stats.set_index("变量", inplace=True)
    stats["序号"] = list(range(stats.shape[0]))
    try:
        iv_missing_rate = sheet5[sheet5.变量.isin(var_final)]
        del iv_missing_rate["序号"]
    except:
        iv_missing_rate = pd.DataFrame(columns=["变量", "解释", "IV值", "缺失值占比"])
    iv_missing_rate.set_index("变量", inplace=True)
    # VIF值
    xs = np.array(sm.add_constant(train_woe.drop(columns=[y])), dtype=np.float)
    xs_name = ["const"] + train_woe.drop(columns=[y]).columns.to_list()  # 需要求VIF的变量
    vif = pd.DataFrame(
        [
            {"变量": xs_name[i], "VIF": oi.variance_inflation_factor(xs, i)}
            for i in range(len(xs_name))
        ]
    )
    vif = vif[vif.变量 != "const"]
    vif["变量"] = vif["变量"].apply(lambda x: x.replace("_woe", ""))
    vif.set_index("变量", inplace=True)
    sheet6 = pd.concat([stats, vif, iv_missing_rate], axis=1)
    sheet6.reset_index(inplace=True)
    sheet6.rename(columns={"index": "变量"}, inplace=True)
    sheet6 = sheet6[
        ["序号", "变量", "解释", "估计", "标准误差", "Z-Value", "P-Value", "VIF", "IV值", "缺失值占比"]
    ]
    sheet6.sort_values(by="序号", ascending=True, inplace=True)

    head.to_excel(table, sheet_name="6.模型参数", index=False)
    title = pd.DataFrame(columns=["逻辑回归结果"])
    title.to_excel(table, sheet_name="6.模型参数", index=False, startrow=1)
    sheet6.to_excel(table, sheet_name="6.模型参数", index=False, startrow=2)
    # ------------------------------------------------------------------------------------------------
    # 7.变量相关系数
    sheet7_1 = pd.DataFrame(columns=["序号", "分箱方法"], data=[["1", "最优分箱"], ["2", "手动分箱"]])
    sheet7_2 = pd.DataFrame(
        np.corrcoef(train_woe.drop(columns=[y]), rowvar=False),
        columns=train_woe.drop(columns=[y]).columns,
        index=train_woe.drop(columns=[y]).columns,
    )
    title1 = pd.DataFrame(columns=["建模过程中所使用的分箱方法"])
    title2 = pd.DataFrame(columns=["多变量分析——相关系数"])
    title3 = pd.DataFrame(columns=["Pearson Correlation Coefficient"])
    head.to_excel(table, sheet_name="7.变量相关系数", index=False)
    title1.to_excel(table, sheet_name="7.变量相关系数", index=False, startrow=2)
    sheet7_1.to_excel(table, sheet_name="7.变量相关系数", index=False, startrow=3)
    title2.to_excel(table, sheet_name="7.变量相关系数", index=False, startrow=10)
    title3.to_excel(table, sheet_name="7.变量相关系数", index=False, startrow=11)
    sheet7_2.to_excel(table, sheet_name="7.变量相关系数", startrow=12)
    # ---------------------------------------------------------------------------------------------------
    # 8.模型区分度评估
    title = pd.DataFrame(columns=["模型区分度评估"])
    sheet8 = pd.DataFrame(columns=["评估指标", "训练集", "测试集", "验证集"])
    sheet8["评估指标"] = ["KS", "AUC"]
    # 计算KS、AUC
    # 预测概率
    train_pred = lr.predict_proba(train_woe.drop(columns=[y]))[:, 1]
    test_pred = lr.predict_proba(test_woe.drop(columns=[y]))[:, 1]
    try:
        oot_pred = lr.predict_proba(oot_woe.drop(columns=[y]))[:, 1]
    except:
        pass
    # 评估
    train_perf = sc.perf_eva(train_woe.flagy, train_pred, title="train")
    test_perf = sc.perf_eva(test_woe.flagy, test_pred, title="test")
    train_perf["pic"].savefig("train_KS_AUC.png", bbox_inches="tight")
    test_perf["pic"].savefig("test_KS_AUC.png", bbox_inches="tight")
    sheet8["训练集"] = [train_perf["KS"], train_perf["AUC"]]
    sheet8["测试集"] = [test_perf["KS"], test_perf["AUC"]]
    try:
        oot_perf = sc.perf_eva(oot_woe.flagy, oot_pred, title="oot")
        oot_perf["pic"].savefig("oot_KS_AUC.png", bbox_inches="tight")
        sheet8["验证集"] = [oot_perf["KS"], oot_perf["AUC"]]
    except:
        pass
    title1 = pd.DataFrame(
        columns=[
            "此次建模，训练样本KS={}，AUC={}，模型结果较理想，模型对好坏客户具有很好的区分度，且模型较稳定，达到建模预期目标".format(
                train_perf["KS"], train_perf["AUC"]
            )
        ]
    )
    title2 = pd.DataFrame(columns=["训练集", "KS={}".format(train_perf["KS"])])
    title3 = pd.DataFrame(columns=["测试集", "KS={}".format(test_perf["KS"])])
    title1.to_excel(table, sheet_name="8.模型区分度评估", index=False, startrow=8)
    title2.to_excel(table, sheet_name="8.模型区分度评估", index=False, startrow=10, startcol=3)
    title3.to_excel(
        table, sheet_name="8.模型区分度评估", index=False, startrow=10, startcol=11
    )
    try:
        title4 = pd.DataFrame(columns=["验证集", "KS={}".format(oot_perf["KS"])])
        title4.to_excel(
            table, sheet_name="8.模型区分度评估", index=False, startrow=10, startcol=19
        )
    except:
        pass

    head.to_excel(table, sheet_name="8.模型区分度评估", index=False)
    sheet8.to_excel(table, sheet_name="8.模型区分度评估", index=False, startrow=2)
    # 曲线图
    sheet = table.book.sheetnames["8.模型区分度评估"]
    sheet.insert_image("A12", "train_KS_AUC.png", {"x_scale": 0.9, "y_scale": 0.9})
    sheet.insert_image("I12", "test_KS_AUC.png", {"x_scale": 0.9, "y_scale": 0.9})
    try:
        sheet.insert_image("Q12", "oot_KS_AUC.png", {"x_scale": 0.9, "y_scale": 0.9})
    except:
        pass
    # -----------------------------------------------------------------------------------------------------
    # 9.变量分箱
    title = pd.DataFrame(columns=["模型变量分箱及打分"])
    sheet9 = pd.merge(
        bins_train1[["variable", "bin", "count_distr", "badprob"]],
        bins_test1[["variable", "bin", "count_distr", "badprob"]],
        how="outer",
        on=["variable", "bin"],
    )
    sheet9["解释"] = ""
    try:
        sheet9 = pd.merge(
            sheet9,
            bins_oot1[["variable", "bin", "count_distr", "badprob"]],
            how="outer",
            on=["variable", "bin"],
        )
        sheet9.rename(
            columns={
                "count_distr_x": "训练集区间占比",
                "count_distr_y": "测试集区间占比",
                "count_distr": "验证集区间占比",
                "badprob_x": "训练集区间坏客率",
                "badprob_y": "测试集区间坏客率",
                "badprob": "验证集区间坏客率",
            },
            inplace=True,
        )
        title1 = pd.DataFrame(
            columns=[
                "序号",
                "名称",
                "解释",
                "分箱",
                "训练集区间占比",
                "训练集区间坏客率",
                "测试集区间占比",
                "测试集区间坏客率",
                "验证集区间占比",
                "验证集区间坏客率",
            ]
        )
        sheet9 = sheet9[
            [
                "variable",
                "解释",
                "bin",
                "训练集区间占比",
                "训练集区间坏客率",
                "测试集区间占比",
                "测试集区间坏客率",
                "验证集区间占比",
                "验证集区间坏客率",
            ]
        ]
        oot_pict = sdf_woebin_plot(bins_oot)
    except:
        sheet9.rename(
            columns={
                "count_distr_x": "训练集区间占比",
                "count_distr_y": "测试集区间占比",
                "badprob_x": "训练集区间坏客率",
                "badprob_y": "测试集区间坏客率",
            },
            inplace=True,
        )
        title1 = pd.DataFrame(
            columns=[
                "序号",
                "名称",
                "解释",
                "分箱",
                "训练集区间占比",
                "训练集区间坏客率",
                "测试集区间占比",
                "测试集区间坏客率",
            ]
        )
        sheet9 = sheet9[
            ["variable", "解释", "bin", "训练集区间占比", "训练集区间坏客率", "测试集区间占比", "测试集区间坏客率"]
        ]

    head.to_excel(table, sheet_name="9.变量分箱", index=False)
    title.to_excel(table, sheet_name="9.变量分箱", index=False, startrow=1)
    title1.to_excel(table, sheet_name="9.变量分箱", index=False, startrow=2)
    sheet9.to_excel(
        table, sheet_name="9.变量分箱", index=False, startrow=5, startcol=1, header=False
    )
    sheet = table.book.sheetnames["9.变量分箱"]
    train_pict = sdf_woebin_plot(bins)
    test_pict = sdf_woebin_plot(bins_test)

    i = 3
    for pict in list(sheet9.variable.unique()):
        train_pict[pict].get_figure().savefig(
            "train_" + pict + ".png", bbox_inches="tight"
        )
        sheet.insert_image(
            "L" + str(i), "train_" + pict + ".png", {"x_scale": 0.6, "y_scale": 0.6}
        )
        i = i + 20
    i = 3
    for pict in list(sheet9.variable.unique()):
        test_pict[pict].get_figure().savefig(
            "test_" + pict + ".png", bbox_inches="tight"
        )
        sheet.insert_image(
            "T" + str(i), "test_" + pict + ".png", {"x_scale": 0.6, "y_scale": 0.6}
        )
        i = i + 20
    try:
        i = 3
        for pict in list(sheet9.variable.unique()):
            oot_pict[pict].get_figure().savefig(
                "oot_" + pict + ".png", bbox_inches="tight"
            )
            sheet.insert_image(
                "AB" + str(i), "oot_" + pict + ".png", {"x_scale": 0.6, "y_scale": 0.6}
            )
            i = i + 20
    except:
        pass
    # ----------------------------------------------------------------------------------------------------------
    # 10.模型评分卡
    title1 = pd.DataFrame(columns=["模型变量分箱及打分"])
    title2 = pd.DataFrame(columns=["序号", "名称", "变量解释", "分箱", "系数", "woe", "评分"])
    title2["序号"] = ["0"]
    title2["名称"] = ["Intercept"]
    title2["变量解释"] = ["截距"]
    sheet10 = bins_train1[["variable", "bin", "woe"]]
    card_plain = pd.concat(card, ignore_index=True)
    sheet10 = pd.merge(
        sheet10, sheet6[["变量", "估计"]], how="inner", left_on="variable", right_on="变量"
    )
    sheet10 = pd.merge(sheet10, card_plain, how="right", on=["variable", "bin"])
    sheet10["解释"] = ""
    sheet10 = sheet10[["variable", "解释", "bin", "估计", "woe", "points"]]
    sheet10.columns = ["变量", "解释", "分箱", "系数", "woe", "评分"]
    title3 = pd.DataFrame(columns=["评分卡变量贡献度"])
    sheet10_1 = (
        card_plain.groupby("variable")["points"]
        .agg([max, min])
        .rename(columns={"max": "最大分值", "min": "最小分值"})
    )
    sheet10_1["贡献度"] = (sheet10_1["最大分值"] - sheet10_1["最小分值"]) / (
        sheet10_1["最大分值"] - sheet10_1["最小分值"]
    ).sum()
    sheet10_1["中文名称"] = ""
    sheet10_1.reset_index(inplace=True)
    sheet10_1 = sheet10_1[["variable", "中文名称", "最大分值", "最小分值", "贡献度"]]
    title4 = pd.DataFrame(columns=["评分卡刻度计算方法"])
    sheet10_2 = pd.DataFrame(
        {"a": ["odds0", "basepoints", "pdo"], "b": [odds0, points0, pdo]}
    )
    head.to_excel(table, sheet_name="10.模型评分卡", index=False)
    title1.to_excel(table, sheet_name="10.模型评分卡", index=False, startrow=1)
    title2.to_excel(table, sheet_name="10.模型评分卡", index=False, startrow=2)
    sheet10.to_excel(
        table, sheet_name="10.模型评分卡", index=False, startrow=4, startcol=1, header=False
    )
    title3.to_excel(table, sheet_name="10.模型评分卡", index=False, startrow=1, startcol=8)
    sheet10_1.to_excel(
        table, sheet_name="10.模型评分卡", index=False, startrow=2, startcol=8
    )
    title4.to_excel(table, sheet_name="10.模型评分卡", index=False, startrow=1, startcol=14)
    sheet10_2.to_excel(
        table, sheet_name="10.模型评分卡", index=False, startrow=2, startcol=14
    )
    # -------------------------------------------------------------------------------------------------------------------------
    # 11.单变量稳定性评估
    head.to_excel(table, sheet_name="11.单变量稳定性评估", index=False)
    title1 = pd.DataFrame(columns=["单变量稳定性评估"])
    title1.to_excel(table, sheet_name="11.单变量稳定性评估", index=False, startrow=1)
    title1.to_excel(
        table, sheet_name="11.单变量稳定性评估", index=False, startrow=1, startcol=9
    )
    bins_traintest = pd.merge(
        bins_train1[["variable", "bin", "count", "count_distr"]],
        bins_test1[["variable", "bin", "count", "count_distr"]],
        how="outer",
        on=["variable", "bin"],
    )
    bins_traintest["PSIvar"] = (
        bins_traintest["count_distr_x"] - bins_traintest["count_distr_y"]
    ) * np.log(bins_traintest["count_distr_x"] / bins_traintest["count_distr_y"])
    bins_traintest = bins_traintest.assign(
        PSI_ALL=bins_traintest.groupby("variable")["PSIvar"].transform(sum)
    )
    bins_traintest = bins_traintest[
        [
            "variable",
            "bin",
            "count_x",
            "count_distr_x",
            "count_y",
            "count_distr_y",
            "PSIvar",
            "PSI_ALL",
        ]
    ]
    bins_traintest.rename(
        columns={
            "variable": "变量名称",
            "bin": "分箱",
            "count_x": "样本量(训练)",
            "count_distr_x": "占比(训练)",
            "count_y": "样本量(测试)",
            "count_distr_y": "占比(测试)",
        },
        inplace=True,
    )
    bins_traintest.to_excel(table, sheet_name="11.单变量稳定性评估", index=False, startrow=3)
    try:
        bins_trainoot = pd.merge(
            bins_train1[["variable", "bin", "count", "count_distr"]],
            bins_oot1[["variable", "bin", "count", "count_distr"]],
            how="outer",
            on=["variable", "bin"],
        )
        bins_trainoot["PSIvar"] = (
            bins_trainoot["count_distr_x"] - bins_trainoot["count_distr_y"]
        ) * np.log(bins_trainoot["count_distr_x"] / bins_trainoot["count_distr_y"])
        bins_trainoot = bins_trainoot.assign(
            PSI_ALL=bins_trainoot.groupby("variable")["PSIvar"].transform(sum)
        )
        bins_trainoot = bins_trainoot[
            [
                "variable",
                "bin",
                "count_x",
                "count_distr_x",
                "count_y",
                "count_distr_y",
                "PSIvar",
                "PSI_ALL",
            ]
        ]
        bins_trainoot.rename(
            columns={
                "variable": "变量名称",
                "bin": "分箱",
                "count_x": "样本量(训练)",
                "count_distr_x": "占比(训练)",
                "count_y": "样本量(验证)",
                "count_distr_y": "占比(验证)",
            },
            inplace=True,
        )
        bins_trainoot.to_excel(
            table, sheet_name="11.单变量稳定性评估", index=False, startrow=3, startcol=11
        )
    except:
        pass
    # -------------------------------------------------------------------------------------------------------------------------
    # 12.模型稳定性评估
    title1 = pd.DataFrame(columns=["1.训练&测试"])
    title2_1 = pd.DataFrame(columns=["等间距", "模型样本量分布评估"])
    title2_2 = pd.DataFrame(columns=["等频", "模型样本量分布评估"])
    head.to_excel(table, sheet_name="12.模型稳定性评估", index=False)
    title1.to_excel(table, sheet_name="12.模型稳定性评估", index=False, startrow=1)
    title2_1.to_excel(table, sheet_name="12.模型稳定性评估", index=False, startrow=2)
    title2_2.to_excel(
        table, sheet_name="12.模型稳定性评估", index=False, startrow=2, startcol=11
    )
    score_train = sdf_scorecard_ply(data_train, card, var_kp=[y])
    score_test = sdf_scorecard_ply(data_test, card, var_kp=[y])
    try:
        score_oot = sdf_scorecard_ply(data_oot, card, var_kp=[y])
    except:
        pass
    # 生成表格--------------
    # 训练集&测试集-------
    # 等高分布-----
    # 训练集
    df = score_train[score_train[y] != grey]
    df.loc[df.score < score_range[0], "score"] = score_range[0]
    df.loc[df.score >= score_range[1], "score"] = score_range[1] - 1
    df["score"] = pd.cut(
        df.score, bins=range(score_range[0], score_range[1] + 1, tick), right=False
    )
    df = df.groupby(by=["score", y]).size().unstack(level=[1], fill_value=0)
    df = df.fillna(0)
    df.sort_index(ascending=True, inplace=True)
    df["训练样本量"] = df[1] + df[0]
    df["训练集占比"] = df["训练样本量"] / df["训练样本量"].sum()
    df["训练坏客户数"] = df[1]
    df["训练坏客户占比"] = df["训练坏客户数"] / df["训练坏客户数"].sum()
    del df[0], df[1]
    # 测试集
    df1 = score_test[score_test[y] != grey]
    df1.loc[df1.score < score_range[0], "score"] = score_range[0]
    df1.loc[df1.score >= score_range[1], "score"] = score_range[1] - 1
    df1["score"] = pd.cut(
        df1.score, bins=range(score_range[0], score_range[1] + 1, tick), right=False
    )
    df1 = df1.groupby(by=["score", "flagy"]).size().unstack(level=[1], fill_value=0)
    df1 = df1.fillna(0)
    df1.sort_index(ascending=True, inplace=True)
    df1["测试样本量"] = df1[1] + df1[0]
    df1["测试集占比"] = df1["测试样本量"] / df1["测试样本量"].sum()
    df1["测试坏客户数"] = df1[1]
    df1["测试坏客户占比"] = df1["测试坏客户数"] / df1["测试坏客户数"].sum()
    del df1[0], df1[1]
    # 并表并计算psi
    sheet12_1 = df.merge(df1, how="outer", on="score")
    sheet12_1 = sheet12_1.fillna(0)
    sheet12_1["psi"] = (sheet12_1["训练集占比"] - sheet12_1["测试集占比"]) * np.log(
        sheet12_1["训练集占比"] / sheet12_1["测试集占比"]
    )
    sheet12_1["psi_bad"] = (sheet12_1["训练坏客户占比"] - sheet12_1["测试坏客户占比"]) * np.log(
        sheet12_1["训练坏客户占比"] / sheet12_1["测试坏客户占比"]
    )

    # 等频分布------
    # 训练集
    dt = score_train[score_train[y] != grey]
    dt.loc[dt.score < score_range[0], "score"] = score_range[0]
    dt.loc[dt.score >= score_range[1], "score"] = score_range[1] - 1
    dt.sort_values(by="score", ascending=True, inplace=True)
    percent_list = list(range(0, 100 + percent, percent))
    breaks = np.percentile(dt.score.values, percent_list)
    breaks = list(set(breaks))
    breaks = sorted(breaks)
    breaks[-1] = score_range[1]
    breaks[0] = score_range[0]
    dt["score"] = pd.cut(dt.score, bins=breaks, right=False)
    dt = dt.groupby(by=["score", "flagy"]).size().unstack(level=[1], fill_value=0)
    dt = dt.fillna(0)
    dt.sort_index(ascending=True, inplace=True)
    dt["训练样本量"] = dt[1] + dt[0]
    dt["训练集占比"] = dt["训练样本量"] / dt["训练样本量"].sum()
    dt["训练坏客户数"] = dt[1]
    dt["训练坏客户占比"] = dt["训练坏客户数"] / dt["训练坏客户数"].sum()
    del dt[0], dt[1]
    # 测试集
    df1 = score_test[score_test[y] != grey]
    df1.loc[df1.score < score_range[0], "score"] = score_range[0]
    df1.loc[df1.score >= score_range[1], "score"] = score_range[1] - 1
    df1.sort_values(by="score", ascending=True, inplace=True)
    df1["score"] = pd.cut(df1.score, bins=breaks, right=False)
    df1 = df1.groupby(by=["score", "flagy"]).size().unstack(level=[1], fill_value=0)
    df1 = df1.fillna(0)
    df1.sort_index(ascending=True, inplace=True)
    df1["测试样本量"] = df1[1] + df1[0]
    df1["测试集占比"] = df1["测试样本量"] / df1["测试样本量"].sum()
    df1["测试坏客户数"] = df1[1]
    df1["测试坏客户占比"] = df1["测试坏客户数"] / df1["测试坏客户数"].sum()
    del df1[0], df1[1]
    # 并表并计算psi
    sheet12_2 = dt.merge(df1, how="outer", on="score")
    sheet12_2 = sheet12_2.fillna(0)
    sheet12_2["psi"] = (sheet12_2["训练集占比"] - sheet12_2["测试集占比"]) * np.log(
        sheet12_2["训练集占比"] / sheet12_2["测试集占比"]
    )
    sheet12_2["psi_bad"] = (sheet12_2["训练坏客户占比"] - sheet12_2["测试坏客户占比"]) * np.log(
        sheet12_2["训练坏客户占比"] / sheet12_2["测试坏客户占比"]
    )

    sheet12_1.to_excel(table, sheet_name="12.模型稳定性评估", startrow=4)
    sheet12_2.to_excel(table, sheet_name="12.模型稳定性评估", startrow=4, startcol=12)
    row_number = max(sheet12_2.shape[0], sheet12_1.shape[0]) + 4 + 20 + 2

    # 有验证集情况
    try:
        # 等高
        df2 = score_oot[score_oot[y] != grey]
        df2.loc[df2.score < score_range[0], "score"] = score_range[0]
        df2.loc[df2.score >= score_range[1], "score"] = score_range[1] - 1
        df2["score"] = pd.cut(
            df2.score, bins=range(score_range[0], score_range[1] + 1, tick), right=False
        )
        df2 = df2.groupby(by=["score", "flagy"]).size().unstack(level=[1], fill_value=0)
        df2 = df2.fillna(0)
        df2.sort_index(ascending=True, inplace=True)
        df2["验证样本量"] = df2[1] + df2[0]
        df2["验证集占比"] = df2["验证样本量"] / df2["验证样本量"].sum()
        df2["验证坏客户数"] = df2[1]
        df2["验证坏客户占比"] = df2["验证坏客户数"] / df2["验证坏客户数"].sum()
        del df2[0], df2[1]
        # 并表并计算psi
        sheet12_1 = df.merge(df2, how="outer", on="score")
        sheet12_1 = sheet12_1.fillna(0)
        sheet12_1["psi"] = (sheet12_1["训练集占比"] - sheet12_1["验证集占比"]) * np.log(
            sheet12_1["训练集占比"] / sheet12_1["验证集占比"]
        )
        sheet12_1["psi_bad"] = (sheet12_1["验证坏客户占比"] - sheet12_1["验证坏客户占比"]) * np.log(
            sheet12_1["验证坏客户占比"] / sheet12_1["验证坏客户占比"]
        )
        # 等频
        df2 = score_oot[score_oot[y] != grey]
        df2.loc[df2.score < score_range[0], "score"] = score_range[0]
        df2.loc[df2.score >= score_range[1], "score"] = score_range[1] - 1
        df2["score"] = pd.cut(df2.score, bins=breaks, right=False)
        df2 = df2.groupby(by=["score", "flagy"]).size().unstack(level=[1], fill_value=0)
        df2 = df2.fillna(0)
        df2.sort_index(ascending=True, inplace=True)
        df2["验证样本量"] = df2[1] + df2[0]
        df2["验证集占比"] = df2["验证样本量"] / df2["验证样本量"].sum()
        df2["验证坏客户数"] = df2[1]
        df2["验证坏客户占比"] = df2["验证坏客户数"] / df2["验证坏客户数"].sum()
        del df2[0], df2[1]
        # 并表并计算psi
        sheet12_2 = dt.merge(df2, how="outer", on="score")
        sheet12_2 = sheet12_2.fillna(0)
        sheet12_2["psi"] = (sheet12_2["训练集占比"] - sheet12_2["验证集占比"]) * np.log(
            sheet12_2["训练集占比"] / sheet12_2["验证集占比"]
        )
        sheet12_2["psi_bad"] = (sheet12_2["训练坏客户占比"] - sheet12_2["验证坏客户占比"]) * np.log(
            sheet12_2["训练坏客户占比"] / sheet12_2["验证坏客户占比"]
        )
        title1 = pd.DataFrame(columns=["2.训练&验证"])
        title1.to_excel(
            table, sheet_name="12.模型稳定性评估", index=False, startrow=row_number
        )
        title2_1.to_excel(
            table, sheet_name="12.模型稳定性评估", index=False, startrow=row_number + 1
        )
        title2_2.to_excel(
            table,
            sheet_name="12.模型稳定性评估",
            index=False,
            startrow=row_number + 1,
            startcol=12,
        )

        sheet12_1.to_excel(table, sheet_name="12.模型稳定性评估", startrow=row_number + 3)
        sheet12_2.to_excel(
            table, sheet_name="12.模型稳定性评估", startrow=row_number + 3, startcol=12
        )
    except:
        pass
    # -----------------------------------------------------------------------------------------------------------------
    # 13.样本风险评分分布
    title1 = pd.DataFrame(columns=["1、等高分布"])
    title2 = pd.DataFrame(columns=["分数整体分布情况-训练集"])
    title3 = pd.DataFrame(columns=["分数整体分布情况-测试集"])
    title4 = pd.DataFrame(columns=["分数整体分布情况-验证集"])
    head.to_excel(table, sheet_name="13.样本风险评分分布", index=False)
    title1.to_excel(table, sheet_name="13.样本风险评分分布", index=False, startrow=1)
    title2.to_excel(table, sheet_name="13.样本风险评分分布", index=False, startrow=2)
    title3.to_excel(table, sheet_name="13.样本风险评分分布", index=False, startrow=19)

    # 等高---------
    # 训练+测试
    score_total = pd.concat([score_train,score_test],axis = 0,sort = False)
    df = score_total[score_total[y] != grey]
    df.loc[df.score < score_range[0], "score"] = score_range[0]
    df.loc[df.score >= score_range[1], "score"] = score_range[1] - 1
    df["score"] = pd.cut(
        df.score, bins=range(score_range[0], score_range[1] + 1, tick), right=False
    )
    df = df.groupby(by=["score", "flagy"]).size().unstack(level=[1], fill_value=0)
    df = df.fillna(0)
    df["区间人数"] = df[0] + df[1]
    df["区间占比"] = df["区间人数"] / df["区间人数"].sum()
    df["区间坏客户率"] = df[1] / df["区间人数"]
    df["累计坏客户占比"] = df[1].cumsum() / df[1].sum()
    df["累计好客户占比"] = df[0].cumsum() / df[0].sum()
    df["好坏区分程度(ks)"] = df["累计坏客户占比"] - df["累计好客户占比"]
    df.reset_index(inplace=True)
    df.rename(columns={"score": "评分区间"}, inplace=True)
    del df[0], df[1]
    df.to_excel(table, sheet_name="13.样本风险评分分布", index=False, startrow=3,startcol=11)

    # 训练集
    df = score_train[score_train[y] != grey]
    df.loc[df.score < score_range[0], "score"] = score_range[0]
    df.loc[df.score >= score_range[1], "score"] = score_range[1] - 1
    df["score"] = pd.cut(
        df.score, bins=range(score_range[0], score_range[1] + 1, tick), right=False
    )
    df = df.groupby(by=["score", "flagy"]).size().unstack(level=[1], fill_value=0)
    df = df.fillna(0)
    df["区间人数"] = df[0] + df[1]
    df["区间占比"] = df["区间人数"] / df["区间人数"].sum()
    df["区间坏客户率"] = df[1] / df["区间人数"]
    df["累计坏客户占比"] = df[1].cumsum() / df[1].sum()
    df["累计好客户占比"] = df[0].cumsum() / df[0].sum()
    df["好坏区分程度(ks)"] = df["累计坏客户占比"] - df["累计好客户占比"]
    df.reset_index(inplace=True)
    df.rename(columns={"score": "评分区间"}, inplace=True)
    del df[0], df[1]
    df.to_excel(table, sheet_name="13.样本风险评分分布", index=False, startrow=3)
    # 测试集
    df = score_test[score_test[y] != grey]
    df.loc[df.score < score_range[0], "score"] = score_range[0]
    df.loc[df.score >= score_range[1], "score"] = score_range[1] - 1
    df["score"] = pd.cut(
        df.score, bins=range(score_range[0], score_range[1] + 1, tick), right=False
    )
    df = df.groupby(by=["score", "flagy"]).size().unstack(level=[1], fill_value=0)
    df = df.fillna(0)
    df["区间人数"] = df[0] + df[1]
    df["区间占比"] = df["区间人数"] / df["区间人数"].sum()
    df["区间坏客户率"] = df[1] / df["区间人数"]
    df["累计坏客户占比"] = df[1].cumsum() / df[1].sum()
    df["累计好客户占比"] = df[0].cumsum() / df[0].sum()
    df["好坏区分程度(ks)"] = df["累计坏客户占比"] - df["累计好客户占比"]
    df.reset_index(inplace=True)
    df.rename(columns={"score": "评分区间"}, inplace=True)
    del df[0], df[1]
    df.to_excel(table, sheet_name="13.样本风险评分分布", index=False, startrow=20)
    # 等频
    title1 = pd.DataFrame(columns=["1、等频分布"])
    title1.to_excel(table, sheet_name="13.样本风险评分分布", index=False, startrow=54)
    title2.to_excel(table, sheet_name="13.样本风险评分分布", index=False, startrow=55)
    # 训练+测试
    score_total = pd.concat([score_train,score_test],axis = 0,sort = False)
    df = score_total[score_total[y] != grey]
    df.sort_values(by="score", ascending=True, inplace=True)
    df.loc[df.score < score_range[0], "score"] = score_range[0]
    df.loc[df.score >= score_range[1], "score"] = score_range[1] - 1
    percent_list = list(range(0, 100 + percent, percent))
    breaks = np.percentile(df.score.values, percent_list)
    breaks = list(set(breaks))
    breaks = sorted(breaks)
    breaks[-1] = score_range[1]
    breaks[0] = score_range[0]
    df["score"] = pd.cut(df.score, bins=breaks, right=False)
    df = df.groupby(by=["score", "flagy"]).size().unstack(level=[1], fill_value=0)
    df = df.fillna(0)
    df["区间人数"] = df[0] + df[1]
    df["区间占比"] = df["区间人数"] / df["区间人数"].sum()
    df["区间坏客户率"] = df[1] / df["区间人数"]
    df["累计坏客户占比"] = df[1].cumsum() / df[1].sum()
    df["累计好客户占比"] = df[0].cumsum() / df[0].sum()
    df["好坏区分程度(ks)"] = df["累计坏客户占比"] - df["累计好客户占比"]
    df.reset_index(inplace=True)
    df.rename(columns={"score": "评分区间"}, inplace=True)
    del df[0], df[1]
    df.to_excel(table, sheet_name="13.样本风险评分分布", index=False, startrow=56, startcol=11)
    # 训练集
    dt = score_train[score_train[y] != grey]
    dt.sort_values(by="score", ascending=True, inplace=True)
    dt.loc[dt.score < score_range[0], "score"] = score_range[0]
    dt.loc[dt.score >= score_range[1], "score"] = score_range[1] - 1
    dt["score"] = pd.cut(dt.score, bins=breaks, right=False)
    dt = dt.groupby(by=["score", "flagy"]).size().unstack(level=[1], fill_value=0)
    dt = dt.fillna(0)
    dt["区间人数"] = dt[0] + dt[1]
    dt["区间占比"] = dt["区间人数"] / dt["区间人数"].sum()
    dt["区间坏客户率"] = dt[1] / dt["区间人数"]
    dt["累计坏客户占比"] = dt[1].cumsum() / dt[1].sum()
    dt["累计好客户占比"] = dt[0].cumsum() / dt[0].sum()
    dt["好坏区分程度(ks)"] = dt["累计坏客户占比"] - dt["累计好客户占比"]
    dt.reset_index(inplace=True)
    dt.rename(columns={"score": "评分区间"}, inplace=True)
    del dt[0], dt[1]
    dt.to_excel(table, sheet_name="13.样本风险评分分布", index=False, startrow=56)
    # 测试集
    dt = score_test[score_test[y] != grey]
    dt.sort_values(by="score", ascending=True, inplace=True)
    dt.loc[dt.score < score_range[0], "score"] = score_range[0]
    dt.loc[dt.score >= score_range[1], "score"] = score_range[1] - 1
    dt["score"] = pd.cut(dt.score, bins=breaks, right=False)
    dt = dt.groupby(by=["score", "flagy"]).size().unstack(level=[1], fill_value=0)
    dt = dt.fillna(0)
    dt["区间人数"] = dt[0] + dt[1]
    dt["区间占比"] = dt["区间人数"] / dt["区间人数"].sum()
    dt["区间坏客户率"] = dt[1] / dt["区间人数"]
    dt["累计坏客户占比"] = dt[1].cumsum() / dt[1].sum()
    dt["累计好客户占比"] = dt[0].cumsum() / dt[0].sum()
    dt["好坏区分程度(ks)"] = dt["累计坏客户占比"] - dt["累计好客户占比"]
    dt.reset_index(inplace=True)
    dt.rename(columns={"score": "评分区间"}, inplace=True)
    del dt[0], dt[1]
    dt.to_excel(table, sheet_name="13.样本风险评分分布", index=False, startrow=78)

    # 有验证集情况
    try:
        # 等高
        title4.to_excel(table, sheet_name="13.样本风险评分分布", index=False, startrow=37)
        df = score_oot[score_oot[y] != grey]
        df.loc[df.score < score_range[0], "score"] = score_range[0]
        df.loc[df.score >= score_range[1], "score"] = score_range[1] - 1
        df["score"] = pd.cut(
            df.score, bins=range(score_range[0], score_range[1] + 1, tick), right=False
        )
        df = df.groupby(by=["score", "flagy"]).size().unstack(level=[1], fill_value=0)
        df = df.fillna(0)
        df["区间人数"] = df[0] + df[1]
        df["区间占比"] = df["区间人数"] / df["区间人数"].sum()
        df["区间坏客户率"] = df[1] / df["区间人数"]
        df["累计坏客户占比"] = df[1].cumsum() / df[1].sum()
        df["累计好客户占比"] = df[0].cumsum() / df[0].sum()
        df["好坏区分程度(ks)"] = df["累计坏客户占比"] - df["累计好客户占比"]
        df.reset_index(inplace=True)
        df.rename(columns={"score": "评分区间"}, inplace=True)
        del df[0], df[1]
        df.to_excel(table, sheet_name="13.样本风险评分分布", index=False, startrow=38)
        # 等频
        title4.to_excel(table, sheet_name="13.样本风险评分分布", index=False, startrow=104)
        dt = score_oot[score_oot[y] != grey]
        dt.sort_values(by="score", ascending=True, inplace=True)
        dt.loc[dt.score < score_range[0], "score"] = score_range[0]
        dt.loc[dt.score >= score_range[1], "score"] = score_range[1] - 1
        dt["score"] = pd.cut(dt.score, bins=breaks, right=False)
        dt = dt.groupby(by=["score", "flagy"]).size().unstack(level=[1], fill_value=0)
        dt = dt.fillna(0)
        dt["区间人数"] = dt[0] + dt[1]
        dt["区间占比"] = dt["区间人数"] / dt["区间人数"].sum()
        dt["区间坏客户率"] = dt[1] / dt["区间人数"]
        dt["累计坏客户占比"] = dt[1].cumsum() / dt[1].sum()
        dt["累计好客户占比"] = dt[0].cumsum() / dt[0].sum()
        dt["好坏区分程度(ks)"] = dt["累计坏客户占比"] - dt["累计好客户占比"]
        dt.reset_index(inplace=True)
        dt.rename(columns={"score": "评分区间"}, inplace=True)
        del dt[0], dt[1]
        dt.to_excel(table, sheet_name="13.样本风险评分分布", index=False, startrow=105)
    except:
        pass
    # ----------------------------------------------------------------------------------------------------------------
    # 14.评分决策表
    head.to_excel(table, sheet_name="14.评分决策表", index=False)
    title1 = pd.DataFrame(columns=["1、等高"])
    title2 = pd.DataFrame(columns=["2、等频"])
    title3 = pd.DataFrame(columns=["评分决策表"])
    # 等高
    title1.to_excel(table, sheet_name="14.评分决策表", index=False, startrow=1)
    title3.to_excel(table, sheet_name="14.评分决策表", index=False, startrow=2)
    score_total = pd.concat([score_train, score_test], axis=0, sort=False)
    score_total.loc[score_total.score < score_range[0], "score"] = score_range[0]
    score_total.loc[score_total.score >= score_range[1], "score"] = score_range[1] - 1
    score_total["score"] = pd.cut(
        score_total.score,
        bins=range(score_range[0], score_range[1] + 1, tick),
        right=False,
    )
    score_total = (
        score_total.groupby(by=["score", "flagy"])
        .size()
        .unstack(level=[1], fill_value=0)
    )
    score_total = score_total.fillna(0)
    score_total.sort_index(ascending=False, inplace=True)
    score_total["好"] = score_total[0]
    try:
        score_total["灰"] = score_total[grey]
    except:
        print("Warning: No Grey sample!")
    score_total["坏"] = score_total[1]
    try:
        score_total["总"] = score_total[0] + score_total[1] + score_total[grey]
    except:
        score_total["总"] = score_total[0] + score_total[1]
    score_total["坏累计"] = score_total[1].cumsum()
    score_total["总累计"] = score_total["总"].cumsum()
    score_total["通过率"] = score_total["总累计"] / score_total["总"].sum()
    score_total["每段违约率"] = score_total["坏"] / score_total["总"]
    score_total["平均违约率"] = score_total["坏"].sum() / score_total["总"].sum()
    score_total["通过违约率"] = score_total["坏累计"] / score_total["总累计"]
    score_total["拒绝违约率"] = (score_total["坏"].sum() - score_total["坏累计"]) / (
        score_total["总"].sum() - score_total["总累计"]
    )
    score_total["违约率下降"] = 1 - score_total["通过违约率"] / score_total["平均违约率"]
    score_total.reset_index(inplace=True)
    score_total.rename(columns={"score": "评分区间"}, inplace=True)
    del score_total[1], score_total[0]
    score_total.to_excel(table, sheet_name="14.评分决策表", index=False, startrow=3)
    # 等频
    title2.to_excel(table, sheet_name="14.评分决策表", index=False, startrow=25)
    title3.to_excel(table, sheet_name="14.评分决策表", index=False, startrow=26)
    score_total = pd.concat([score_train, score_test], axis=0, sort=False)
    score_total.loc[score_total.score < score_range[0], "score"] = score_range[0]
    score_total.loc[score_total.score >= score_range[1], "score"] = score_range[1]
    percent_list = list(range(0, 100 + percent, percent))
    breaks = np.percentile(score_total.score.values, percent_list)
    breaks = list(set(breaks))
    breaks = sorted(breaks)
    breaks[-1] = 1000
    breaks[0] = 300
    score_total["score"] = pd.cut(score_total.score, bins=breaks, right=False)
    score_total = (
        score_total.groupby(by=["score", "flagy"])
        .size()
        .unstack(level=[1], fill_value=0)
    )
    score_total = score_total.fillna(0)
    score_total.sort_index(ascending=False, inplace=True)
    score_total["好"] = score_total[0]
    try:
        score_total["灰"] = score_total[grey]
    except:
        print("Warning: No Grey sample!")
    score_total["坏"] = score_total[1]
    try:
        score_total["总"] = score_total[0] + score_total[1] + score_total[grey]
    except:
        score_total["总"] = score_total[0] + score_total[1]
    score_total["坏累计"] = score_total[1].cumsum()
    score_total["总累计"] = score_total["总"].cumsum()
    score_total["通过率"] = score_total["总累计"] / score_total["总"].sum()
    score_total["每段违约率"] = score_total["坏"] / score_total["总"]
    score_total["平均违约率"] = score_total["坏"].sum() / score_total["总"].sum()
    score_total["通过违约率"] = score_total["坏累计"] / score_total["总累计"]
    score_total["拒绝违约率"] = (score_total["坏"].sum() - score_total["坏累计"]) / (
        score_total["总"].sum() - score_total["总累计"]
    )
    score_total["违约率下降"] = 1 - score_total["通过违约率"] / score_total["平均违约率"]
    score_total.reset_index(inplace=True)
    score_total.rename(columns={"score": "评分区间"}, inplace=True)
    del score_total[1], score_total[0]
    score_total.to_excel(table, sheet_name="14.评分决策表", index=False, startrow=27)
    table.save()
    return 1
    # over
    
# 1.预处理
## 1.1读取数据
'''
常用函数
enconding = 'utf-8' 'GBK' 'c' 'CP1252' 'ansi'
nrows = 需要读取的行数
skiprows = [1] 需要跳过的行数（从0开始算起）
header = 0 设置哪行作为表头，没有表头时header = None
names = [] 无表头时添加列名
usecols = ['a','b','c'] 只读取数据的某几列
index_col = 'cus_num' 指定某列为index
low_memory = False 分块加载，避免类型混淆需要设置为False
'''
os.getcwd() # 查看当前工作目录
os.chdir('D:\\项目') # 切换工作目录

# df_pre = pd.read_csv('xx.csv',nrows = 5) # 查看数据大致情况，需要设置哪行作为索引，跳过哪行
df = pd.read_csv('xx.csv',low_memory = False,index_col = 'cus_num',header = 0,encoding = 'utf-8',skiprows = [1])
df = df.rename(columns = {'other_var4':'flagy'}) # 重命名标签

# df.drop(columns = ['name','id','cell','user_date','sl_user_date','other_var1'],inplace = True)
## 1.2查看数据信息
'''
常用函数
df.columns.tolist() 查看变量名
df.dtypes 查看变量类型
df.shape
df.describe() 描述性统计
df.isnull.sum() 缺失值统计
'''

# 坏客率
badper = sum(df['flagy'] == 1)/len(df['flagy']) * 100
print(f'{badper}%')

## 1.3各月份坏客率，oot月份确定

badrate_m = badrate_month(df)
badrate_m

df['user_date'] = pd.to_datetime(df['user_date']) # 转换成时间类型
# df['month'] = df['user_date'].apply(lambda x : str(x.month)+'月') # 增加月份变量
# data_oot = df[df['month'] == '9月']
# data_total = df[df['month'] != '9月']


# 2.变量粗筛
# 删除缺失、同值过高的
start_time = time.time()
df_nan_iden_filter = nan_iden_filter(df_,missing_limit = 0.9,identical_limit = 0.9,kp_var = ['user_date','flagy'])
run_time = time.time() - start_time
time.strftime("%H:%M:%S", time.gmtime(run_time))

# 删除类别变量中类别过多的
df_t1 = object_var_del(df_nan_iden_filter,num_limit = 10)

df_t1.select_dtypes('O').columns # 查看还剩什么类别变量，删除无意义类别变量

obj_var_drop = ['pd_cell_type', 'cmec_conservative', 'cmec_radical', 'tl_cell_lasttype','tl_id_lasttype','month']
df_t1.drop(columns = obj_var_drop,inplace = True)

# 类别变量转换后等频分箱(不确定哪个等级坏客更高时，按照每个变量取值的坏账率排序，然后取值坏账率最高的赋值1，然后以此类推。)
df_t1 = df_t1.reset_index()
df_x = df_t1.drop(columns = ['flagy'])
df_y = df_t1['flagy']
# 若类别变量个数不为0，则进行类别变量转换   (如果数据集不需要转换，则会报错)
x_transformed, transform_rule = cate_var_transform(df_x,df_y)
df_x = x_transformed
dt_s = pd.concat([df_x,df_y],axis = 1)
# 重设一下index
dt_s = dt_s.set_index(
    "cus_num", drop=True, append=False, inplace=False, verify_integrity=False)
# transform_rule.to_csv("类别映射关系.csv")
dt_s.shape

# data_oot也将相应类别变量转换
# for col in transform_rule:
#     trans = dict(transform_rule[col][['raw data', 'transform data']].to_dict(orient='split')['data'])
#     data_oot[col] = data_oot[col].map(trans)

# 筛选iv
dt_s1 = bin_iv(dt_s,bins_num = 10,iv_limit = 0.03,kp_var = ['user_date'])

# 方法2、scorecard筛选
df_pre_filter = sc.var_filter(df,'flagy',x = None,iv_limit = 0.03,missing_limit = 0.9,identical_limit = 0.9,var_rm = None,var_kp = None,return_rm_reason = False,positive = 'bad|1')


# 3.缺失值填充

dt_s1 = dt_s1.fillna(-99) # 缺失值填充为-99
col = isna(dt_s1) # 检查是否还有缺失值
[i for i in col]

# 验证集需要打分的话也要填充缺失值
data_oot = data_oot.fillna(-99)
col = isna(data_oot) # 检查是否还有缺失值
[i for i in col]

dt_s = dt_s1.copy()

# 保存数据
# joblib.dump(data_oot,'data_oot.pkl')
# joblib.dump(data_total,'data_total.pkl')
# joblib.dump(transform_rule,'transform_rule.pkl')
# joblib.dump(dt_s,'dt_s.pkl')
# 读取数据
# data_oot = joblib.load('data_oot.pkl')
# data_total = joblib.load('data_total.pkl')
# transform_rule = joblib.load('transform_rule.pkl')
# dt_s = joblib.load('dt_s.pkl')

# 4.划分训练集和测试集(好坏分层)
bad_set = dt_s[dt_s['flagy'] == 1]
good_set = dt_s[dt_s['flagy'] == 0]
# good_set = dt_s[dt_s['flagy'] == 0].sample(bad_set.shape[0]*9)# 如果坏客率过低，可以将好样本欠抽，最终坏客率抽到10%

good_train,good_test = train_test_split(good_set,test_size = 0.3,random_state = 666)
bad_train,bad_test = train_test_split(bad_set,test_size = 0.3,random_state = 666)

train = pd.concat([good_train,bad_train])
test1 = pd.concat([good_test,bad_test])
train_x = train.drop(columns = ['flagy','user_date'])
train_y = train.loc[:,'flagy']
test_x = test1.drop(columns = ['flagy','user_date'])
test_y = test1.loc[:,'flagy']

# data_total.to_csv('data_total.csv',encoding = 'utf-8')
# data_oot.to_csv('data_oot.csv',encoding = 'utf-8')
# train.to_csv('train.csv',encoding ='utf-8')
# test1.to_csv('test1.csv',encoding = 'utf-8')

# 5.变量筛选
## 决策树最优分箱
bins = sc.woebin(dt_s,y = 'flagy')
bins = sdf_woebin(
    dt_s.drop(columns = ['user_date']),
    "flagy",
    max_leaf_num=6,
    min_woe_box_percent=0.05,
    min_woe_box_num_min=100,
    special_values=[-99],)
# p1 = sc.woebin_plot(bins)

# 训练集测试集WOE转换
train_woe = sdf_woebin_ply(train_x, bins)
test_woe = sdf_woebin_ply(test_x, bins)
# data_woe = pd.concat([train_woe, test_woe])

# 训练集验证集变量稳定性psi验证
## 如果有类别变量做了转换，需要先转换
for col in transform_rule:
    trans = dict(transform_rule[col][['raw data', 'transform data']].to_dict(orient='split')['data'])
    data_oot[col] = data_oot[col].map(trans)

data_oot_psi = data_oot[train_x.columns.tolist()]
oot_woe = sdf_woebin_ply(data_oot_psi, bins) # 转换前需要与训练集进行同样的缺失值填充以及类别变量转换
psi_result = psi(train_woe,oot_woe,0.02)
len(psi_result)

# 决策树分箱后筛选iv
# dt_s_woe_iv = pd.merge(dt_s_woe,dt_s['flagy'],left_index = True,right_index = True)
# iv_result = sc.iv(dt_s_woe_iv,'flagy')
# iv_result_var = iv_result[iv_result['info_value'] > 0.02]
# dt_lasso_woe = dt_s_woe_iv[iv_result_var + 'flagy']


# lasso
# 分出自变量和因变量
reg_data = train_woe
reg_target = train_y.loc[train_woe.index]

# 交叉验证拟合Lasso模型
from sklearn.linear_model import LassoCV
lassocv = LassoCV()
lassocv.fit(reg_data, reg_target)
# 交叉验证选择的参数alpha
print(lassocv.alpha_)
# 最终Lasso模型中的变量系数
print(lassocv.coef_[:10])
# Lasso细筛出的变量个数
print(np.sum(lassocv.coef_ > 0))

# 非交叉验证拟合lasso
from sklearn.linear_model import Lasso
# alpha = 0.1
lasso=Lasso(alpha=0.0018,max_iter=10000)
lasso.fit(reg_data, reg_target)
# 最终Lasso模型中的变量系数
print(lasso.coef_[:10])
# Lasso细筛出的变量个数
print(np.sum(lasso.coef_ > 0))

# 细筛后的数据集
mask = lassocv.coef_ > 0
new_data_woe = reg_data.iloc[:, mask]
# LASSO筛选出的变量
lasso_result = new_data_woe.columns.values.tolist()


# stepwise
stepwise_result = stepwise_selection(new_data_woe,
                                     reg_target,
                                     # initial_list = lasso_result,
                                     threshold_in=0.05,
                                     threshold_out=0.05)
len(stepwise_result)

# 变量筛选结果
result_withoutwoe = [item.replace("_woe", "") for item in stepwise_result] 
train_new = train[result_withoutwoe + ["flagy"]]
bins_new = dict([(i, bins.get(i)) for i in result_withoutwoe])

# 手动调整分箱
%matplotlib inline

breaks_new, drop_vars = mannual_breaks(
    bins_new,
    train_new,
    "flagy",
    special_values=[-99],
)
breaks_new

train_new = train_new.drop(columns=drop_vars)
bins_new = sdf_woebin(train_new, "flagy", breaks_list=breaks_new, special_values=[-99])

train_woe_new = sdf_woebin_ply(train_new, bins_new)
test_woe_new = sdf_woebin_ply(test1[train_new.columns], bins_new)

# 查看测试集上变量趋势
bin_test = sdf_woebin(
    test1[train_new.columns.tolist()], "flagy", breaks_list=breaks_new, special_values=[-99]
)
sdf_woebin_plot(bin_test)

# 保存候选变量iv、缺失值
iv = iv(train_woe_new, "flagy")
"""
如果有缺失值填充，应以填充值统计缺失率
下面以填充-99为例
"""
iv["variable"] = iv.variable.apply(lambda x: x.replace("_woe", ""))
iv.set_index("variable", drop=True, inplace=True)

missing_rate = (
    train_new.drop(columns=["flagy"])
    .apply(lambda x: (x == -99).sum() / x.shape[0])
    .rename("缺失值占比")
)
output = pd.concat([iv, missing_rate], axis=1)
output.sort_values(by="info_value", ascending=False, inplace=True)
output.to_csv("iv_missing_rate.csv", encoding="utf_8_sig")

# 系数相关性高的太多，回到VIF前面这里重弄
train_woe_new_copy = train_woe_new.copy()
train_woe_new = corr_iv(train_woe_new_copy,corr_limit = 0.65)

# VIF检验
import statsmodels.stats.outliers_influence as oi
import statsmodels.api as sm

xs = np.array(sm.add_constant(train_woe_new.drop(columns=["flagy"])),
              dtype=np.float)
xs_name = ["const"] + train_woe_new.drop(
    columns=['flagy']).columns.to_list()  # 需要求VIF的变量
vif = pd.DataFrame([{
    "variable": xs_name[i],
    "vif": oi.variance_inflation_factor(xs, i)
} for i in range(len(xs_name))])
vif = vif[vif.variable != 'const']
train_woe_copy = train_woe_new.copy()
while 1:
    vif.sort_values(by='vif', ascending=False, inplace=True)
    if vif.iloc[0]['vif'] > 4:
        vif.reset_index(drop=1, inplace=True)
        print('drop: ' + vif.iloc[0]['variable'] + ' vif: ' +
              str(vif.iloc[0]['vif']))
        del train_woe_copy[vif.iloc[0]['variable']]
        vif.drop(index=[0], inplace=True)
        xs = np.array(sm.add_constant(train_woe_copy.drop(columns=["flagy"])),
                      dtype=np.float)
        xs_name = ["const"] + vif['variable'].values.tolist()  # 需要求VIF的变量
        vif = pd.DataFrame([{
            "variable": xs_name[i],
            "vif": oi.variance_inflation_factor(xs, i)
        } for i in range(len(xs_name))])
        vif = vif[vif.variable != 'const']
    else:
        break
vif

# p值、系数检验
while 1:
    model = sm.GLM(train_woe_copy['flagy'], sm.add_constant(train_woe_copy.drop(columns=['flagy'])), family = sm.families.Binomial()).fit()
    pvalue = model.pvalues
    pvalue.drop(index=['const'], inplace=True)
    pvalue.sort_values(inplace=True, ascending=False)
    if pvalue.iloc[0] > 0.05:
        print('drop '+ pvalue.index[0] + 'p-value: ' + str(pvalue.iloc[0]))
        del train_woe_copy[pvalue.index[0]]
    else:
        break

while 1:
    model = sm.GLM(train_woe_copy['flagy'], sm.add_constant(train_woe_copy.drop(columns=['flagy'])), family = sm.families.Binomial()).fit()
    params = model.params
    params.drop(index=['const'], inplace=True)
    params.sort_values(inplace=True, ascending=True)
    if params.iloc[0] < 0:
        print('drop'+params.index[0])
        del train_woe_copy[params.index[0]]
    else:
        break

# 相关系数检验
corr = train_woe_copy.drop(columns=["flagy"]).corr()
mask = np.zeros_like(corr)
mask[np.tril_indices_from(mask)] = True

f, ax = plt.subplots(figsize=(20, 12)) 
sns.set(font_scale=1)
sns.heatmap(corr, cmap='Blues', annot=True, mask=mask.T)
plt.show()


# 拟合逻辑回归

var_final = (train_woe_copy.drop(columns=['flagy']).rename(
    columns=lambda x: x[0:-4]).columns.to_list())

breaks_final = {key: list(bins_new[key]["breaks"]) for key in var_final}

train_final = train[var_final + ["flagy"]]
test_final = test1[train_final.columns]
# oot_final = data_oot[train_final.columns]

bins_final = sdf_woebin(
    train_final,
    "flagy",
    max_leaf_num=6,
    min_woe_box_percent=0.05,
    min_woe_box_num_min=100,
    breaks_list=breaks_final,
    special_values=[-99],
)

train_woe_final = sdf_woebin_ply(train_final, bins_final)
test_woe_final = sdf_woebin_ply(test_final, bins_final)
# oot_woe_final = sdf_woebin_ply(oot_final,bins_final)

bin_test = sdf_woebin(
    test_final, "flagy", breaks_list=breaks_final, special_values=[-99]
)
sdf_woebin_plot(bin_test)

## 拟合

# 由于需要用到card里面的一些数据，我们选择用sk库拟合
from sklearn.linear_model import LogisticRegression

lr = LogisticRegression(penalty="none", solver="newton-cg", n_jobs=-1)
lr.fit(train_woe_final.drop(columns=["flagy"]), train_woe_final["flagy"])

model = sm.GLM(
    train_woe_final["flagy"],
    exog=sm.add_constant(train_woe_final.drop(columns=["flagy"])),
    family=sm.families.Binomial(),
).fit()
model.summary2()

# 预测
train_pred = lr.predict_proba(train_woe_final.drop(columns=["flagy"]))[:, 1]
test_pred = lr.predict_proba(test_woe_final.drop(columns=["flagy"]))[:, 1]
# oot_pred = lr.predict_proba(oot_woe_final.drop(columns=["flagy"]))[:, 1]

# 评估
train_perf = sc.perf_eva(train_woe_final.flagy, train_pred, title="train")
test_perf = sc.perf_eva(test_woe_final.flagy, test_pred, title="test")
# oot_perf = sc.perf_eva(oot_woe_final.flagy, oot_pred, title="oot")

# 打分
odds = 0.027/0.973
card = sc.scorecard(bins_final, lr, var_final, points0=610, pdo=90, odds0=odds)

train_score = sdf_scorecard_ply(train_final, card)
test_score = sdf_scorecard_ply(test_final, card)

train_score = train_score.loc[train.index]
test_score = test_score.loc[test1.index]

train_score[train_score <= 300] = 300
train_score[train_score >= 1000] = 1000
test_score[test_score < 300] = 300
test_score[test_score >= 1000] = 1000

# 计算分数PSI
train_score.columns, test_score.columns = ["score"], ["score"]

sc.perf_psi(
    score={"train": train_score, "test": test_score},
    label={"train": train_y, "test": test_y},
    x_tick_break=50,
)
print(f'max:{max(train_score.score)}, min:{min(train_score.score)}')


# 保存模型
import joblib
joblib.dump(var_final,'var_final.pkl')
joblib.dump(lr,'lr.pkl')
joblib.dump(breaks_final,'breaks_final.pkl')
joblib.dump(bins_final,'bins_final.pkl')
joblib.dump(card,'card.pkl')
joblib.dump(train,'train.pkl')
joblib.dump(test1,'test.pkl')

# 读取模型
lr = joblib.load('lr.pkl')
var_final = joblib.load('var_final.pkl')
bins_final = joblib.load('bins_final.pkl')
train = joblib.load('train.pkl')

# 输出报告

# 如果有user_date,需要把df_total,train_final,test_final都加上user_date
# df_total['user_date'] = pd.to_datetime(df_total['date_credit'])
# train_final['user_date'] = data_total['user_date']
# test_final['user_date'] = data_total['user_date']

report(
    data_total,
    train_final,
    test_final,
    data_oot = data_oot, # 需要有标签，没有的话自己随便加一下
    y="flagy",
    breaks_list=breaks_final,
    filename="v1",
    points0=600,
    pdo=100,
    odds0=0.0248/(1-0.0248),
    basepoints_eq0=False,
    special_values=[-99],
    grey=2,
    score_range=(300, 1000),
    tick=50,
    percent=5,
    user_date = 'user_date',
)


























# 规则报告
rule_result_total['sample_num'] = df_t.shape[0]
rule_result_total['平均坏客率'] = sum(df_t.flagy) / len(df_t.flagy)
rule_result1 = rule_result_total[['var_1','down_lmt_1','up_lmt_1','var_2','down_lmt_2','up_lmt_2','hitnum', 'badnum', 'sample_num', 'hit_rate', 'badrate', '平均坏客率', 'lift', 'lift_acu', 'hit_rate_acu']]
rule_result1.columns = ['变量1','变量1下限','变量1上限','变量2','变量2下限','变量2上限','频数', '规则命中坏客户人数', '样本数据量', '规则覆盖率', '规则覆盖坏客户率', '平均坏客户率', '提升度', 
                        '累计提升度', '累计规则覆盖率'
]
rule_result1[['规则覆盖率','规则覆盖坏客户率','平均坏客户率','累计规则覆盖率']] = rule_result1[['规则覆盖率','规则覆盖坏客户率','平均坏客户率','累计规则覆盖率']].applymap(lambda x : '%.3f' % (x*100)+'%')
rule_result1[['变量1','变量2']] = rule_result1[['变量1','变量2']].applymap(lambda x: '('+ str(x) +')')
# rule_result1['规则覆盖率'] = rule_result1['规则覆盖率'].apply(lambda x : '%.3f' % (x*100)+'%')
# rule_result1['规则覆盖坏客户率'] = rule_result1['规则覆盖坏客户率'].apply(lambda x : '%.3f' % (x*100)+'%')
# rule_result1['平均坏客户率'] = rule_result1['平均坏客户率'].apply(lambda x : '%.3f' % (x*100)+'%')
# rule_result1['累计规则覆盖率'] = rule_result1['累计规则覆盖率'].apply(lambda x : '%.3f' % (x*100)+'%')
rule_result1




