# -*- coding: utf-8 -*-
"""
Created on Wed Jul  7 14:47:51 2021

"""
import os
os.chdir(r'E:\test')
import scorecardpy as sc
import pandas as pd
import numpy as np
import re
from sklearn.linear_model import LogisticRegression
from sklearn import metrics 
import statsmodels.stats.outliers_influence as oi
from sklearn.model_selection import train_test_split 
import sys
sys.path.append('./modules/') 
from fonction import *
import warnings
warnings.filterwarnings('ignore')

#%%
'''
导入数据
'''
data=pd.read_csv('test.csv')

varlist=pd.read_csv(r'产品列表.csv',encoding='gbk')
varlist=varlist.set_index('variable')['name'].to_dict()

#每个产品的匹配率
df_flag=flag_hit(data)

#月份坏账分布
month_dist=month_distr(data,x='user_date',y='flagy')

#坏账
rate(data,'flagy')
#%%
'''
变量衍生
'''
size_mapping2= {'山东':'huadong','江苏':'huadong','安徽':'huadong','浙江':'huadong','福建':'huadong','上海':'huadong',
'广东':'huanan','广西':'huanan','海南':'huanan',
'湖北':'huazhong','湖南':'huazhong','河南':'huazhong','江西':'huazhong',
'北京':'huabei','天津':'huabei','河北':'huabei','山西':'huabei','内蒙古':'huabei',
'宁夏':'xibei','新疆':'xibei','青海':'xibei','陕西':'xibei','甘肃':'xibei',
'四川':'xinan','云南':'xinan','贵州':'xinan','西藏':'xinan','重庆':'xinan',
'辽宁':'dongbei','吉林':'dongbei','黑龙江':'dongbei',
'台湾':'gangaotai','香港':'gangaotai','澳门':'gangaotai'}  

data['pd_cell_province2'] = data['pd_cell_province'].map(size_mapping2) 

#%%
'''
数据探查
'''
#变量分类
var_id=['cus_num','user_date']
flag=['flagy']
var_flag=list(df_flag.index)

col_drop=['pd_id_where', 'tl_id_eletail_lasttime', 'tl_cell_eletail_lasttime', 'pd_id_city', 'pd_cell_city', 'pef_paypower_prov', 'pd_cell_province',
 'mma_var100', 'mma_var101', 'mma_var201', 'mma_var202', 'mma_var302', 'mma_var303', 'pef_ind_cate1', 'cmec_conservative', 'cmec_radical', 'mma_var3']

col_ae=[]
for i in data.columns:
    if re.match('^ae_', i):
        col_ae.append(i) 

cols=list(set(data.columns)-set(var_id)-set(flag)-set(var_flag)-set(col_drop)-set(col_ae))
types=data[cols].dtypes
var_cats=types[types == "object"].index.tolist()
var_nums=types[types != "object"].index.tolist()

#EDA
#数值型
data_nunique=data.nunique().to_dict()
df_eda_num=data[var_nums].describe(percentiles=[.01,.05,.25,.50,.75,.95,.99]).T
df_eda_num['unique']=df_eda_num.index.map(data_nunique)
#类别型
df_eda_cat=data[var_cats].describe(include='O').T
#df_eda_num.to_csv('result/df_eda_num.csv')
#df_eda_cat.to_csv('result/df_eda_cat.csv')

'''
#类别型变量转换
'''
X_transformed,transform_rule,rules = cate_var_transform_modif(data[['flagy']+cols],data['flagy'])   
pd.concat([(v).assign(varname = k) for k,v in rules.items()]).to_csv('result/trans_rules.csv')

df=pd.concat([data[var_id],X_transformed],axis=1)
#df=df.iloc[:,:50]

# df.to_csv('data/X_transformed.csv',index=False)
#%%
'''
划分数据集
'''
train,test=train_test_split(df,test_size=0.3,random_state=1234,stratify=df['flagy'])
#train.to_csv('data/train.csv',index=False)
#test.to_csv('data/test.csv',index=False)
train_id=train[var_id].reset_index(drop=True)   
test_id=test[var_id].reset_index(drop=True) 
train=train.drop(var_id,axis=1).reset_index(drop=True)
test=test.drop(var_id,axis=1).reset_index(drop=True)  
y_train = np.array(train['flagy'])
y_test=np.array(test['flagy'])

rate(train,y='flagy')
rate(test,y='flagy')

#%%
'''
变量筛选
'''

'''
1. 初筛
缺失值>0.95和集中度>0.95和唯一值删除
'''
col_rest_miss,df_dropcol_miss_iden=missing_identify_select(train,y='flagy',missing_trd=0.95,identify_trd=0.95,identify_num=1)

'''
2. iv值筛选，iv<=0.03
'''
#变量分箱
bins=sc.woebin(train[col_rest_miss],y='flagy',max_num_bin=5)
#woe转换
train_woe  = sc.woebin_ply(train[col_rest_miss],bins = bins)
test_woe = sc.woebin_ply(test[col_rest_miss], bins = bins)
#train_woe.to_csv('data/train_woe.csv',index=False)
#test_woe.to_csv('data/test_woe.csv',index=False)

iv_info,col_rest_iv,df_dropcol_iv=feature_filter_iv(train_woe,y='flagy',seuil=0.03)
#iv_info.to_csv('result/iv_info.csv',index=False)

'''
3. lasso+聚类筛选变量
'''
colrest_ls,df_dropcol_ls=feature_selector_als(train_woe[col_rest_iv+['flagy']],y='flagy')

'''
4. 相关性筛选变量
'''
#按相关最多次的变量进行一一剔除
col_rest_corr,df_dropcol_corr=var_corr(train_woe[colrest_ls],seuil=0.8)

#按变量两两相关的iv值进行筛选，将iv高的剔除
#col_rest_corr,df_drop_corr=var_corr2(train_woe[colrest_ls],y='flagy',seuil=0.8)

'''
5. 逐步回归
'''
reg_x = train_woe[col_rest_corr].drop(['flagy'],axis = 1)
reg_y = train_woe['flagy']
stepwise_variable = stepwise_selection(reg_x, reg_y,threshold_in = 0.05, threshold_out = 0.05)
stepwise_variable

#存储因逐步回归而删除的变量
col_rest_step =  list(stepwise_variable) +['flagy']
drop_col_for_step = list(set(col_rest_corr)-set(col_rest_step))

df_dropcol_step = pd.DataFrame(drop_col_for_step,columns = ['drop_col'])
df_dropcol_step['DropReason'] = 'step'
df_dropcol_step['Count'] = len(drop_col_for_step)

'''
6. vif筛选变量
剔除vif>3的变量
'''
vif,colrest_vif,df_dropcol_vif=vif(train_woe[col_rest_step],y='flagy',seuil=3)

#变量重新分箱
col_vif=[i.replace('_woe','') for i in colrest_vif]
bins_vif=sc.woebin(train[col_vif],y='flagy',max_num_bin=5)

breaks_adj=sc.woebin_adj(train[col_vif], train['flagy'],bins_vif)

#重新分箱
col_f=['ql_m6_cell_bank_max_monnum',
 'pef_bus_type',
 'ql_m6_id_nbank_avg_monnum',
 'ql_m12_cell_nbank_orgnum']

bins_adj=sc.woebin(train[col_f+['flagy']],y='flagy',breaks_list=breaks_adj)
bins_test=sc.woebin(test[col_f+['flagy']],y='flagy',breaks_list=breaks_adj)

#woe值映射
train_woe_=sc.woebin_ply(train[col_f+['flagy']],bins_adj)
test_woe_=sc.woebin_ply(test[col_f+['flagy']],bins_adj)

'''
7. LR模型筛选
'''
model,df_dropcol_lr = lr(train_woe_,y='flagy')

train_woe1 = sm.add_constant(train_woe_.drop('flagy',axis = 1))
model = sm.Logit(np.array(train_woe_['flagy']), train_woe1).fit()

model_stat= model.summary().tables[1].data
model_stat= pd.DataFrame(data=model_stat[1:], columns=model_stat[0])

fin_col_woe=model.params.index.tolist()[1:]+['flagy']
fin_col=[i.replace('_woe','') for i in fin_col_woe]
bins_fin={i:bins_adj[i] for i in fin_col if i not in ['flagy']}
sc.woebin_plot(bins_fin)

import pickle
# with open('model/lr.pkl','wb') as f:
#     pickle.dump(model,f)
#%%
'''
模型效果评估
'''
def ks_p(dat,bins,model,fin_col,names,path='plot'):
    woes = sc.woebin_ply(dat[fin_col],bins = bins)
    preds=model.predict(sm.add_constant(woes.drop('flagy',axis = 1)))
    ks = sc.perf_eva(label = woes['flagy'],pred =preds, title = names)
    ks['pic'].savefig(path+'/%s.png'%names,dpi = 100)
    df_ks=pd.DataFrame.from_dict(ks, orient='index').T.iloc[:,:3]
    df_ks.index=[names]
    print(ks)
    return preds,df_ks

predtrain,kstrain=ks_p(train,bins = bins_fin,model = model,fin_col = fin_col,names='train',path='plot')
predtest,kstest=ks_p(test,bins = bins_fin,model = model,fin_col = fin_col,names='test',path='plot')
preddf,ksdf=ks_p(df,bins = bins_fin,model = model,fin_col = fin_col,names='data',path='plot')

#%%
'''评分卡转换'''
card = scorecard(bins_fin
          ,model
          ,model.params.index.tolist()[1:]
          ,points0=670
          ,odds0=sum(data['flagy'])/(len(data['flagy'])-sum(data['flagy']))
          ,pdo=90
          ,basepoints_eq0=False)

df_card=pd.concat(card)
# with open('model/card.pkl','wb') as f:
#     pickle.dump(card,f)

def score_trans(dat,fin_col,card,only_total_score):
    dat_score = sc.scorecard_ply(dat[fin_col],card,only_total_score=only_total_score)
    dat_score.columns = ['score']
    print(dat_score['score'].min(),dat_score['score'].max())
    dat_score['flagy'] = dat['flagy']
    return dat_score

train_score = score_trans(train,fin_col,card,only_total_score=True)
test_score = score_trans(test,fin_col,card,only_total_score=True)
df_score = score_trans(df,fin_col,card,only_total_score=True)
#train_score.to_csv('result/train_score.csv')
#test_score.to_csv('result/test_score.csv')
#df_score.to_csv('result/df_score.csv')

#%%
'''
PSI验证 ：测试和训练，测试和验证，建模样本和PSI样本
1. 变量PSI验证
2. 分数PSI验证
'''
#变量PSI
col_final=[i for i in fin_col if i not in ['flagy']]
df_psi,df_total_psi=var_psi(train,test,col_final,bins_fin)

#分数PSI
df_score_psi=score_psi(train_score['score'],train_score['flagy'],test_score['score'],test_score['flagy'],50)
#%%
'''
分数分布
'''
#分数等分分布
score_seq_dist=score_overdue(df_score['score'],df_score['flagy'],bins_way='same_dist',order='asc',k=50)

#分数等距分布
score_seq_per=score_overdue(df_score['score'],df_score['flagy'],bins_way='same_per',order='asc',k=21)
#%%
'''
报告输出
'''
model_file=pd.ExcelWriter('modelfile1.xlsx')

fin_col_woe = model.params.index.tolist()[1:]+ ['flagy']
fin_col = [i.replace("_woe",'') for i in  fin_col_woe]
bins_fin = {i:bins_fin[i] for i in fin_col if i not in ['flagy']}
bins_test = {i:bins_test[i] for i in fin_col if i not in ['flagy']}
df_binsfin=pd.concat(bins_fin).reset_index()
df_bins_iv=df_binsfin[['variable','total_iv']].drop_duplicates().reset_index(drop=True)

pd.DataFrame().to_excel(model_file,sheet_name='1.Model_Explain',index=False)

#基本统计
np.round(month_distr(data,x='user_date',y='flagy'),4).to_excel(model_file,sheet_name='2.Original_Stat')

rate_data=rate(data,'flagy')
rate_data['数据集']='建模数据'
np.round(rate_data,4).to_excel(model_file,sheet_name='2.Original_Stat',index=False,startrow=1,startcol=9)
rate_train=rate(train,y='flagy')
rate_train['数据集']='训练集'
np.round(rate_train,4).to_excel(model_file,sheet_name='2.Original_Stat',index=False,startrow=5,startcol=9)
rate_test=rate(test,y='flagy')
rate_test['数据集']='测试集'
np.round(rate_test,4).to_excel(model_file,sheet_name='2.Original_Stat',index=False,startrow=9,startcol=9)

#数据预处理
pd.DataFrame().to_excel(model_file,sheet_name='3.Data_Prep_Format',index=False,startrow=1)
#变量衍生
pd.DataFrame().to_excel(model_file,sheet_name='4.Var_Diravation',index=False,startrow=1)

#IV
df_iv=iv_info[iv_info['info_value']>0.03].reset_index(drop=True)
df_iv['variable']=[i.replace('_woe','') for i in df_iv['variable']]
df_iv['exp']=df_iv['variable'].map(varlist)
df_iv['missing_rate']=[train[i].isna().sum()/len(train) for i in list(df_iv['variable'])]
df_iv=df_iv[['variable','exp','info_value','missing_rate']]
np.round(df_iv,4).to_excel(model_file,sheet_name='5.Var_Pred_IV',index=True,startrow=1)

#变量筛选过程
df_dropcol_reason=pd.concat([df_dropcol_miss_iden
								,df_dropcol_iv
								,df_dropcol_ls
								,df_dropcol_corr
								,df_dropcol_step
								,df_dropcol_vif
								,df_dropcol_lr],axis=0)
df_dropcol_reason['drop_col']=[i.replace('_woe','') for i in df_dropcol_reason['drop_col']]
df_dropcol_reason['exp']=df_dropcol_reason['drop_col'].map(varlist)
df_dropcol_reason=df_dropcol_reason[['drop_col','exp','DropReason','Count']]
df_dropcol_reason.to_excel(model_file,sheet_name='6.Var_selection',index=True,startrow=1)

#相关性
df_corr=train_woe_[[i for i in fin_col_woe if i not in ['flagy']]]
df_corr.columns=[i.replace('_woe','') for i in df_corr.columns]
df_corr = df_corr.corr()
np.round(df_corr,4).to_excel(model_file,sheet_name='7.VarSelect_Corr',index=True,startrow=1)

#模型结果
param=df_param_(model,train_woe_,fin_col_woe,train,flag_name='flagy')
param['exp']=param['variable'].map(varlist)
np.round(param,4).to_excel(model_file,sheet_name='8.Model_Parms',index=False,startrow=1)

#分箱结果
df_bins_fin=pd.concat(bins_fin)[['variable', 'bin', 'count', 'count_distr', 'good', 'bad', 'badprob','woe']]
df_bins_test=pd.concat(bins_test)[['variable', 'bin', 'count', 'count_distr', 'good', 'bad', 'badprob']]
df_bins=pd.merge(df_bins_fin,df_bins_test,on=['variable', 'bin'],how='outer',suffixes=('_train', '_test'))
df_bins['exp']=df_bins['variable'].map(varlist)

df_bins_all=df_bins[['variable', 'exp', 'bin', 'count_distr_train', 'badprob_train','count_distr_test','badprob_test']]
np.round(df_bins_all,4).to_excel(model_file,sheet_name='9.Var_Train_Test_Compare',index=True,startrow=1)

#评分卡
df_card=pd.concat(card)
df_card=pd.merge(df_card,df_bins_fin[['variable','bin','woe']],on=['variable','bin'],how='left')
df_card=pd.merge(df_card,param[['variable','params']],on=['variable'],how='left')
df_card['exp']=df_card['variable'].map(varlist)
df_card=df_card[['variable', 'exp', 'bin', 'params', 'woe', 'points']]
df_card.to_excel(model_file,sheet_name='10.Card',index=True,startrow=1)

#模型效果评估
df_ks=pd.concat([kstrain,kstest]).T
df_ks.to_excel(model_file,sheet_name='11. Model_Disc',index=True,startrow=1)

#变量稳定性
df_psi,df_total_psi=var_psi(train,test,col_final,bins_fin)
df_psi['varname']=[i.replace('_woe','') for i in df_psi['varname']]
df_psi.rename(columns={'varname':'variable','value_woe':'woe'},inplace=True)
#df_psi['exp']=df_psi['variable'].map(varlist)
df_psi=pd.merge(df_bins[['variable','woe','bin','count_train','count_test']],df_psi,on=['variable','woe'])
df_psi[['variable', 'bin', 'count_train', 'train_distr', 'count_test',
       'test_distr', 'var_psi', 'total_psi']].to_excel(model_file,sheet_name='12.var_Stab',index=True,startrow=1)


#分数稳定性
np.round(df_score_psi,5).to_excel(model_file,sheet_name='13.Model_Stab',index=True,startrow=1)

#分数排序能力
np.round(score_seq_dist,4).to_excel(model_file,sheet_name='14.Model_Score',index=True,startrow=1)
np.round(score_seq_per,4).to_excel(model_file,sheet_name='14.Model_Score',index=True,startrow=1,startcol=16)

model_file.save()

#%%
import matplotlib.pyplot as plt
from matplotlib import image

def save_png(bins_adj,save_addr='plot/train',col_num = 1, figsize=(4,2)):
    fig = plt.figure(figsize=figsize)             #设置画布大小
    img_list = []                               #img列表
    for i in bins_adj.keys():
        sc.woebin_plot(bins_adj[i])
        plt.savefig(save_addr+'/'+i + '.png',dpi = 100)      #存储每个图像                
    plt.close()           
        
bins_ftrain={i:bins_adj[i] for i in fin_col if i not in ['flagy']}
bins_ftest={i:bins_test[i] for i in fin_col if i not in ['flagy']}
        
save_png(bins_ftrain,save_addr='plot/train',col_num = 1, figsize=(4,2))
save_png(bins_ftest,save_addr='plot/test',col_num = 1, figsize=(4,2))


