# 0.导入包
import pandas as pd
import numpy as np
import os
import scorecardpy as sc
from sklearn.model_selection import train_test_split
# import xgboost as xgb
# from xgboost.sklearn import XGBClassifier
from sklearn import tree
import re 
import seaborn as sns
from matplotlib import pyplot as plt
from joblib import Parallel, delayed
import statsmodels.api as sm
from pylab import *
import joblib
from Logistic_function_def import *
import warnings
warnings.filterwarnings('ignore')

%matplotlib inline
# pd.set_option('display.max_rows', None)
plt.rcParams['font.sans-serif']=['SimHei'] #用来正常显示中文标签
plt.rcParams['axes.unicode_minus']=False #用来正常显示负号

os.getcwd() # 查看当前工作目录

# 1.预处理
## 1.1数据导入
'''
常用函数
enconding = 'utf-8' 'GBK' 'c' 'CP1252' 'ansi'
nrows = 需要读取的行数
skiprows = [1] 需要跳过的行数（从0开始算起）
header = 0 设置哪行作为表头，没有表头时header = None
names = [] 无表头时添加列名
usecols = ['a','b','c'] 只读取数据的某几列
index_col = 'cus_num' 指定某列为index
low_memory = False 分块加载，避免类型混淆需要设置为False
'''
# os.chdir('D:\\项目') # 切换工作目录
df_all = pd.read_csv('xx.csv',low_memory = False,index_col = 'cus_num',header = 0,encoding = 'utf-8',skiprows = [1])
df_all = df_all.rename(columns = {'other_var4':'flagy'}) # 重命名标签
# als_list = [i for i in df_br_model.columns if re.match('als_',i) != None]
df_all.flagy.value_counts()
df = df_all.drop(columns = ['Unnamed: 0'])

# 验证集导入
data_oot = pd.read_csv('xx.csv',low_memory = False,header = 0,encoding = 'utf-8')
df_oot = data_oot[als_list+['客户号']].drop(columns = ['Unnamed: 0'])

# 坏客率
badrate_all(df,y = 'flagy')

# 各月份坏客率
badrate_month(df,date_var = 'app_date')

## 1.2数据探查
# 变量分类
var_id  = ['客户号','app_date']
flag = ['flagy']
# cols_drop = [i for i in df.columns if re.search('terms',i)] # 前期剔除变量 
cols = list(set(df.columns) - set(var_id) - set(flag))

types = df[cols].dtypes
# types.value_counts()
var_cats = types[types == 'object'].index.tolist()
var_nums = types[types != 'object'].index.tolist()

# 数值型EDA
data_nunique = df.nunique().to_dict()
df_eda_num = df[var_nums].describe(percentiles = [.01,.05,.25,.5,.75,.95,.99]).T
df_eda_num['unique'] = df_eda_num.index.map(data_nunique)

# # 类别型EDA
# df_eda_cat = df[var_cats].describe(include='O').T

# 保存结果
# os.mkdir('E:\\借呗建模\\3.模型构建\\result')
# df_eda_num.to_csv(r'result/df_eda_num.csv')
# df_eda_cat.to_csv(r'result/df_eda_cat.csv')

## 1.3 类别变量处理
## 删除类别变量中类别过多的
# df_thin1 = object_var_del(df,num_limit = 10)
# df_thin1.select_dtypes('O').columns # 查看还剩什么类别变量，删除无意义类别变量

# 类别变量转换后等频分箱(不确定哪个等级坏客更高时，按照每个变量取值的坏账率排序，然后取值坏账率最高的赋值1，然后以此类推。)
x_transformed, transform_rule = cate_var_transform(df[cols],df['flagy'])
df_cat_trans = pd.concat([x_transformed,df[var_id+['flagy']]],axis = 1)
# transform_rule.to_csv(r'result/类别映射关系.csv')
df_cat_trans.shape

# 类别变量无需转换
df_cat_trans = df[var_id+var_nums+['flagy']]
df_cat_trans.shape

## 1.4.划分训练集和测试集(好坏分层)
bad_set = df_cat_trans[df_cat_trans['flagy'] == 1]
good_set = df_cat_trans[df_cat_trans['flagy'] == 0]
# good_set = dt_s[dt_s['flagy'] == 0].sample(bad_set.shape[0]*9)# 如果坏客率过低，可以将好样本欠抽，最终坏客率抽到10%

good_train,good_test = train_test_split(good_set,test_size = 0.3,random_state = 666)
bad_train,bad_test = train_test_split(bad_set,test_size = 0.3,random_state = 666)

train = pd.concat([good_train,bad_train])
test1 = pd.concat([good_test,bad_test])

badrate_all(train,y = 'flagy')

badrate_all(test1,y = 'flagy')

len(cols)

# 2.特征工程
## 2.1 粗筛

### 2.1.1 删除缺失、同值过高的

nan_iden_sel_var,df_dropcol_miss_iden = missing_identify_select(train.drop(columns = var_id),y='flagy',missing_trd=0.95,identify_trd=0.95,identify_num=1)
len(nan_iden_sel_var),len(nan_iden_sel_var)-len(flag)

### 2.1.2 等频分箱剔除iv<=0.02

bin_iv_sel_var,df_dropcol_bin_iv = bin_iv_filter(train[nan_iden_sel_var],bins_num = 10,iv_limit = 0.02,kp_var = None)
len(set(bin_iv_sel_var)),len(set(bin_iv_sel_var)- set(flag))

## 2.2 缺失值填充

# 训练集填充
train = train.fillna(-9999) 
col = isna(train) 
[i for i in col]

# 测试集填充
test1= test1.fillna(-9999) 
col = isna(test1) 
[i for i in col]

## 2.3 决策树最优分箱

bins = sdf_woebin(
    train[bin_iv_sel_var],
    "flagy",
    max_leaf_num=6,
    min_woe_box_percent=0.05,
    min_woe_box_num_min=100,
    special_values=[-9999],)

# 训练集测试集WOE转换
train_woe = sdf_woebin_ply(train[bin_iv_sel_var], bins)
test_woe = sdf_woebin_ply(test1[bin_iv_sel_var], bins)

## 2.4 变量稳定性psi验证

## 如果有类别变量做了转换，需要先转换
# for col in transform_rule:
#     trans = dict(transform_rule[col][['raw data', 'transform data']].to_dict(orient='split')['data'])
#     data_oot[col] = data_oot[col].map(trans)
data_oot_psi = df_oot[bin_iv_sel_var].fillna(-9999)
oot_woe = sdf_woebin_ply(data_oot_psi, bins) # 转换前需要与训练集进行同样的缺失值填充以及类别变量转换
psi_sel_var,df_dropcol_psi,psi_df = psi(train_woe,oot_woe,0.02)

len(set(psi_sel_var)),len(set(psi_sel_var)- set(flag))

## 2.5 决策树分箱后剔除iv<=0.02

woe_iv_sel_var,df_dropcol_woe_iv = woe_iv_filter(train_woe[psi_sel_var],y='flagy',iv_limit = 0.02,kp_var = None)

len(set(woe_iv_sel_var)),len(set(woe_iv_sel_var)- set(flag))

# 2.6 lasso

# 分出自变量和因变量
reg_data = train_woe[list(set(woe_iv_sel_var)-set(flag))]
reg_target = train_woe['flagy']

lasso_sel_var,df_dropcol_lasso = lasso_selection(reg_data,reg_target,alpha_ = 0.0001,max_iter_=10000,Lasso_CV = False)

# 细筛后的数据集
new_data_woe = reg_data[lasso_sel_var]

sc.iv(pd.merge(new_data_woe,train['flagy'],left_index = True,right_index = True),'flagy')

## 2.7 stepwise

stepwise_result,df_dropcol_stepwise = stepwise_selection(new_data_woe,
                                                         reg_target,
#                                                        initial_list = lasso_sel_var,
                                                         threshold_in=0.05,
                                                         threshold_out=0.05)
len(stepwise_result)

# 变量筛选结果
result_withoutwoe = [item.replace("_woe", "") for item in stepwise_result] 
# result_withoutwoe = [item.replace("_woe", "") for item in lasso_sel_var]
train_new = train[result_withoutwoe + ["flagy"]]
bins_new = dict([(i, bins.get(i)) for i in result_withoutwoe])

## 2.8 手动调整分箱

# %matplotlib inline
breaks_new, drop_vars = mannual_breaks(
    bins_new,
    train_new,
    "flagy",
    special_values=[-9999],
)
breaks_new

# drop_col_trend_unexplain = list(set(result_withoutwoe)-set(breaks_new.keys()))
# df_dropcol_trend_unexplain = pd.DataFrame(drop_col_trend_unexplain,columns = ['drop_col'])
# df_dropcol_trend_unexplain['DropReason'] = 'trend_unexplain'
# df_dropcol_trend_unexplain['Count'] = len(drop_col_trend_unexplain)

train_new1 = train[list(breaks_new)+['flagy']] #train_new
bins_new = sdf_woebin(train_new1, "flagy", breaks_list=breaks_new, special_values=[-9999])

train_woe_new = sdf_woebin_ply(train_new1, bins_new)
test_woe_new = sdf_woebin_ply(test1[train_new1.columns], bins_new)

sdf_woebin_plot(bins_new)

# 查看测试集上变量趋势
bin_test = sdf_woebin(test1[train_new1.columns.tolist()], "flagy", breaks_list=breaks_new, special_values=[-9999])
sdf_woebin_plot(bin_test)

## 2.9 保存候选变量iv、缺失值

iv_ = iv(train_woe_new, "flagy")
"""
如果有缺失值填充，应以填充值统计缺失率
下面以填充-99为例
"""
iv_["variable"] = iv_.variable.apply(lambda x: x.replace("_woe", ""))
iv_.set_index("variable", drop=True, inplace=True)
missing_rate = (train_new1.drop(columns=["flagy"]).apply(lambda x: (x == -9999).sum() / x.shape[0]).rename("缺失值占比"))

output = pd.concat([iv_, missing_rate], axis=1)
output.sort_values(by="info_value", ascending=False, inplace=True)
# output.to_csv("iv_missing_rate.csv", encoding="utf_8_sig")

output

## 2.10 相关系数筛选

# 系数相关性大于0.7则保留iv值高的
train_woe_corr_sel,df_dropcol_corr = corr_iv(train_woe_new,corr_limit = 0.7,flag = "flagy")

len(set(train_woe_corr_sel.columns)-set(flag))

## 2.11 VIF检验

# VIF检验
vif,train_woe_vif_sel,df_dropcol_vif = vif_select(train_woe_corr_sel,vif_limit = 3,flag = "flagy")
vif

## 2.12 p值、系数检验

train_woe_pvalue_sel,df_dropcol_pvalue_params = pvalue_params_select(train_woe_vif_sel,pvalue_limit = 0.05,flag = "flagy")

# 相关系数检验
corr = train_woe_pvalue_sel.drop(columns=["flagy"]).corr()
mask = np.zeros_like(corr)
mask[np.tril_indices_from(mask)] = True

f, ax = plt.subplots(figsize=(20, 12)) 
sns.set(font_scale=1)
sns.heatmap(corr, cmap='Blues', annot=True, mask=mask.T)
plt.show()

# 3.拟合逻辑回归
## 3.1最终入模变量

var_final = (train_woe_pvalue_sel.drop(columns=['flagy']).rename(columns=lambda x: x[0:-4]).columns.to_list())

breaks_final = {key: list(bins_new[key]["breaks"]) for key in var_final}

train_final = train[var_final + ["flagy"]]
test_final = test1[train_final.columns]

bins_final = sdf_woebin(
    train_final,
    "flagy",
    max_leaf_num=6,
    min_woe_box_percent=0.05,
    min_woe_box_num_min=100,
    breaks_list=breaks_final,
    special_values=[-9999],)

train_woe_final = sdf_woebin_ply(train_final, bins_final)
test_woe_final = sdf_woebin_ply(test_final, bins_final)

bin_test = sdf_woebin(test_final, "flagy", breaks_list=breaks_final, special_values=[-9999])

# oot_final = df_oot.query('flagy!=2')[train_final.columns].fillna(-9999)
# oot_woe_final = sdf_woebin_ply(oot_final,bins_final)
# bin_oot = sdf_woebin(oot_final, "flagy", breaks_list=breaks_final, special_values=[-9999])
# pd.concat([iv(train_woe_final, "flagy"),iv(test_woe_final, "flagy")['info_value'],iv(oot_woe_final, "flagy")['info_value']],axis = 1)

# 查看分箱趋势
sdf_woebin_plot(bins_final)

sdf_woebin_plot(bin_test)

sdf_woebin_plot(bin_oot)

## 3.2拟合

# 由于需要用到card里面的一些数据，我们选择用sk库拟合
from sklearn.linear_model import LogisticRegression

lr = LogisticRegression(penalty="none", solver="newton-cg", n_jobs=-1)
lr.fit(train_woe_final.drop(columns=["flagy"]), train_woe_final["flagy"])

model = sm.GLM(
    train_woe_final["flagy"],
    exog=sm.add_constant(train_woe_final.drop(columns=["flagy"])),
    family=sm.families.Binomial()).fit()
model.summary2()

## 3.2预测

train_pred = lr.predict_proba(train_woe_final.drop(columns=["flagy"]))[:, 1]
test_pred = lr.predict_proba(test_woe_final.drop(columns=["flagy"]))[:, 1]
# oot_pred = lr.predict_proba(oot_woe_final.drop(columns=["flagy"]))[:, 1]

## 3.3评估

train_perf = sc.perf_eva(train_woe_final.flagy, train_pred, title="train")
test_perf = sc.perf_eva(test_woe_final.flagy, test_pred, title="test")
# oot_perf = sc.perf_eva(oot_woe_final.flagy, oot_pred, title="oot")

## 3.4打分

odds = 1500/28500
card = sc.scorecard(bins_final, lr, var_final, points0=540, pdo=45, odds0=odds)

train_score = sdf_scorecard_ply(train_final, card)
test_score = sdf_scorecard_ply(test_final, card)

train_score = train_score.loc[train.index]
test_score = test_score.loc[test1.index]

# train_score[train_score <= 300] = 300
# train_score[train_score >= 1000] = 1000
# test_score[test_score < 300] = 300
# test_score[test_score >= 1000] = 1000

# 计算分数PSI
train_score.columns, test_score.columns = ["score"], ["score"]

sc.perf_psi(
    score={"train": train_score, "test": test_score},
    label={"train": train_final['flagy'], "test": test_final['flagy']},
    x_tick_break=50,)
print(f'max:{max(train_score.score)}, min:{min(train_score.score)}')

# 4.输出报告
# 变量剔除原因
len(cols)

var_drop_reason = pd.concat([df_dropcol_miss_iden,
							 df_dropcol_bin_iv,
							 df_dropcol_psi,
							 df_dropcol_woe_iv,
							 df_dropcol_lasso,
							 df_dropcol_stepwise,
                             df_dropcol_trend_unexplain,
							 df_dropcol_corr,
							 df_dropcol_vif,
							 df_dropcol_pvalue_params])
var_drop_reason
# 保存模型
import joblib
joblib.dump(var_final,r'E:\result\var_final.pkl')
joblib.dump(lr,r'E:\result\lr.pkl')
joblib.dump(breaks_final,r'E:\result\breaks_final.pkl')
joblib.dump(bins_final,r'E:\result\bins_final.pkl')
joblib.dump(card,r'E:\result\card.pkl')
joblib.dump(train,r'E:\result\train.pkl')
joblib.dump(test1,r'E:\result\test.pkl')
output.to_csv(r'E:\report\iv_missing_rate.csv', encoding="utf_8_sig")

# 全部数据在训练集上的iv
iv_all = iv(train[var_nums+flag], "flagy")
iv_all["variable"] = iv_all.variable.apply(lambda x: x.replace("_woe", ""))
iv_all.set_index("variable", drop=True, inplace=True)
missing_rate_all = (train[var_nums].fillna(-9999).apply(lambda x: (x == -9999).sum() / x.shape[0]).rename("缺失值占比"))
output_all = pd.concat([iv_all, missing_rate_all], axis=1)
output_all.sort_values(by="info_value", ascending=False, inplace=True)
output_all.to_csv(r'E:\借呗建模\3.模型构建\行内自有+百融衍生\report\iv_missing_rate_all.csv', encoding="utf_8_sig")

# report
train_final = train[var_final + ["flagy"]]
test_final = test1[train_final.columns]
data_total = df.copy()
data_total['user_date'] = pd.to_datetime(data_total['app_date'])
train_final['user_date'] = data_total['user_date']
test_final['user_date'] = data_total['user_date']

os.getcwd()
os.chdir(r'E:\report')

report(
    data_total,
    train_final,
    test_final,
    data_oot = df_oot1.fillna(-9999).rename(columns = {'app_date':'user_date'}), # 需要有标签，没有的话自己随便加一下
    y="flagy",
    breaks_list=breaks_final,
    filename="自有数据",
    points0=495,
    pdo=60,
    odds0=1500/28500,
    basepoints_eq0=False,
    special_values=[-9999],
    grey=2,
    score_range=(300, 1000),
    tick=50,
    percent=5,
    user_date = 'user_date',
)

# 全量样本上的评分决策表

card = joblib.load(r'E:\借呗建模\3.模型构建\行内自有数据\result\card.pkl')
var_final = joblib.load(r'E:\借呗建模\3.模型构建\行内自有数据\result\var_final.pkl')

df_all.flagy.value_counts()

score_high(df_all[var_final+['flagy']].fillna(-9999),card,score_range=(300, 1000),tick=50,if_contain_grey = True,grey_value=2)

score_freq(df_all[var_final+['flagy']].fillna(-9999),card,score_range=(300, 1000),percent=5,if_contain_grey = True,grey_value=2)




