# -*- coding: utf-8 -*-
"""
Created on Tues Aug 3 10:15:20 2021

@author: luchang
"""

# 1.导包------------------------------------------------------------
import pandas as pd
import numpy as np
import rules_function_def as rfd  # 载入自定义函数
import time
import os
import re
import scorecardpy as sc
from sklearn.model_selection import train_test_split
# 2.数据准备------------------------------------------------------------
## 2.1 数据读取------------------------------------------------------------
df_t = pd.read_csv('新浪建模三要素匹配_1625538190.csv',index_col = 'cus_num', header = 0,low_memory = False,skiprows = [1])
df_t.head()
df_t.rename(columns = {'other_var1':'flagy'},inplace = True) # y标签重命名
## 2.2 查看产品匹配率-----------------------------------------------------
rfd.flag_hit(df_t)
## 2.3 将各产品字段单独取出------------------------------------------------
als_list = [i for i in df_t.columns if re.match('als_',i) != None]
ir_list = [i for i in df_t.columns if re.match('ir_',i) != None]
frg_list = [i for i in df_t.columns if re.match('frg_',i) != None]

# 3.数据预处理(以el产品为例)------------------------------------------------
Data = df_t[el_list+['flagy']]
Data = Data[Data.flagy != 0.5]
'''反欺诈规则处理的变量集与样本的变量集取交集'''
# input_var = list(set(Data.columns.values) & set(var_direction.iloc[:, 0]))
# input_var = list(set(Data.columns.values))
X = Data.drop(columns=['flagy'])
Y = Data.loc[:, 'flagy']  # 注意，此处1代表坏客户，0代表好客户
badrate_real = sum(Y) / np.shape(Y)[0]  # np.shape(Y)[0] returns the number of rows
print('原始数据集的维度{0},原始数据集的badrate为{1}'.format(np.shape(X), badrate_real))

## 3.1特征筛选--------------------------------------------------------------
# 删掉缺失率过高的变量,删掉同值过高的变量
nan_ratio_threshold = 0.99  # nan_ratio_threshold为阈值
mode_ratio_threshold = 0.99  # mode_ratio_threshold为阈值

X = rfd.pre_deal(X, nan_ratio_threshold = nan_ratio_threshold,
                 mode_ratio_threshold = mode_ratio_threshold)

print('数据预处理后数据集的维度{0}'.format(np.shape(X)))

## 3.2类别变量转换--------------------------------------------------------------
d_type = X.dtypes  # returns the type of each predictor;
print('类别变量的个数为{0}'.format(sum((d_type == "object") | (d_type == "str"))))

# 若类别变量个数不为0，则进行类别变量转换
X_transformed,transform_rule = cate_var_transform(X,Y)
X = X_transformed

## 3.3.划分训练测试集------------------------------------------------
train_x, test_x, train_y, test_y = train_test_split(X, Y, test_size=0.3, random_state=666)
print('训练集：{}\n测试集：{}'.format(len(train_x), len(test_x)))

# 4.变量分箱---------------------------------------------------------
# 包含决策树分箱、最优KS分箱、卡方分箱3种，可选择其中一种，推荐决策树分箱

'''---------------=================决策树分箱=============---------------------'''
max_leaf_num = 6   #最大箱数
min_woe_box_percent = 0.01 #每箱样本数最小占比
min_woe_box_num_min = 200   #最小样本数

var_splitpoint = rfd.myWOEbin(train_x, train_y, max_leaf_num = max_leaf_num,
                              min_woe_box_percent = min_woe_box_percent,
                              min_woe_box_num_min = min_woe_box_num_min)
WOE_var = var_splitpoint.index.values

'''---------------=================最优KS分箱=============---------------------'''
breaks_num = 6
min_woe_box_percent = 0.01
min_woe_box_num_min = 50

var_splitpoint = rfd.best_ks_box(train_x, train_y, breaks_num = breaks_num, min_woe_box_percent = min_woe_box_percent,
                               min_woe_box_num_min = min_woe_box_num_min)
WOE_var = var_splitpoint.index.values

'''---------------=================卡方分箱=============---------------------'''
breaks_num = 6
min_woe_box_percent = 0.01
min_woe_box_num_min = 50

var_splitpoint = rfd.chi_box(train_x, train_y, breaks_num = breaks_num, min_woe_box_percent = min_woe_box_percent,
                               min_woe_box_num_min = min_woe_box_num_min, cf=0.1)
WOE_var = var_splitpoint.index.values

# 5.单变量规则筛选---------------------------------------------------------
# 以提升度为唯一指标，基于分箱的阈值，遍历所有可能的规则，筛选出满足提升度要求的单规则

## 5.1单变量规则筛选---------------------------------------------------------
lift_down_lmt = 2 #设置选取用于两两交叉的规则的提升度阈值和坏账率阈值

'''变量的分箱方向文档direction中的需要抹平左边界和有边界的字段'''
# left_lmt = var_direction.iloc[np.where(var_direction.iloc[:, 2] == 1)[0], 0]#----需要抹平左边界的字段
# right_lmt = var_direction.iloc[np.where(var_direction.iloc[:, 2] == -1)[0], 0]#----需要抹平右边界的字段

'''该样本中需要抹平边界的字段'''
# left_new = list(set(left_lmt) & set(WOE_var))
# right_new = list(set(right_lmt) & set(WOE_var))
left_new = [] #list(set(WOE_var)) # example:(-inf,n) 
right_new = list(set(WOE_var)) # example:(n,inf) 
oth_var = list(set(WOE_var) - set(left_new) - set(right_new))

'''处理分箱结果'''
rule_X, rule = rfd.deal_WOEbin(train_x, train_y, left_var=left_new,
                               right_var=right_new, oth_var=oth_var,
                               var_splitpoint=var_splitpoint,
                               lift_need=lift_down_lmt)
print('用于两两交叉的规则个数为{0}'.format(len(rule)))
# 614

rule

## 5.2相关性筛选---------------------------------------------------------
cor_max = 0.9 # 设置相关系数阈值
try:
    rule_cor = pd.DataFrame(np.corrcoef(rule_X.T))#计算相关系数矩阵
    rule_cor_choose, rule_X_cor_choose = rfd.dealwithcor(rule_cor, rule,
                                                     rule_X, cor_max=cor_max)
except:
    rule_cor_choose, rule_X_cor_choose = rule, rule_X

# del Data,X_raw,X,num_var

print('用于两两交叉的规则筛除相关性之后，剩余个数为{0}'.format(len(rule_cor_choose)))

# 6.双变量规则筛选---------------------------------------------------------
# 基于单变量规则，通过两两交叉的方式生成双变量规则，并按提升度、最小命中数进行规则的筛选

# 设置交叉后规则命中的样本最小量，以及提升度和坏账率阈值
Rule_min_cnt = 20  #设置最小命中数
lift_need = 3  #重新设置提升度
badrate_need = badrate_real * lift_need  #计算坏客率阈值

if_cross, hitnum, badnum, badrate = rfd.rule_cross_rule(rule_cor_choose,rule_X_cor_choose,train_y,
                                                     Rule_min_cnt = Rule_min_cnt,
                                                     lift_need = lift_need,
                                                     badrate_real = badrate_real)

# 7.所有规则整理-----------------------------------------------------------
# 将单双变量规则合并存储，用于后续规则集的计算； 注：单变量规则做了进一步的筛选，筛选条件与双变量一致。

'''取出满足条件的单变量规则'''
OneVar_choose = np.where( (rule.loc[:,"badrate"] > badrate_need) & (rule.loc[:,"hit_num"] > Rule_min_cnt))[0]
OneVarRule = pd.DataFrame(rule.iloc[OneVar_choose,:])
OneVarRule_X = pd.DataFrame(rule_X.iloc[:,OneVar_choose])
OneVarRule_num = len(OneVar_choose)

'''双变量规则的条数'''
TwoVarRule_num = int(if_cross.sum().sum())

'''用于存储单、双变量规则'''
TwoVarRule = pd.DataFrame(np.zeros([TwoVarRule_num + OneVarRule_num,9]) * np.nan,
                          columns = ["var_1", "down_lmt_1","up_lmt_1",
                          "var_2","down_lmt_2","up_lmt_2","badnum","hitnum","badrate"])

sample_num = len(rule_X_cor_choose) #总样本量
TwoVarRule_X = pd.DataFrame(np.zeros([sample_num,(TwoVarRule_num+OneVarRule_num)]))

'''提取双变量规则'''
row_index,col_index=np.where(if_cross> 0) #获取组合规则的行标签和列标签
for r,c,k in zip(row_index,col_index,range(len(row_index))):
    var1_info=np.array(rule_cor_choose.iloc[r,[0,1,2]])
    var2_info=np.array(rule_cor_choose.iloc[c,[0,1,2]])
    #填写规则信息表
    TwoVarRule.loc[k,["var_1", "down_lmt_1","up_lmt_1"]]=var1_info
    TwoVarRule.loc[k,["var_2","down_lmt_2","up_lmt_2"]]=var2_info
    TwoVarRule.loc[k,['badnum','hitnum','badrate']]=[badnum.iloc[r,c],hitnum.iloc[r,c],badrate.iloc[r,c]]
    #计算样本对规则的满足情况
    temp_X=np.array(rule_X_cor_choose.iloc[:,r]+rule_X_cor_choose.iloc[:,c])
    temp_X[temp_X<=1] = 0
    temp_X[temp_X>1] = 1
    TwoVarRule_X.iloc[:,k]=temp_X
    
'''重新筛选后的单变量规则加入到变量集中'''
TwoVarRule.loc[TwoVarRule_num:,['var_1']]=np.array(OneVarRule.loc[:,['var']])
TwoVarRule.loc[TwoVarRule_num:,['down_lmt_1']]=np.array(OneVarRule.loc[:,['dowm_lmt']])
TwoVarRule.loc[TwoVarRule_num:,['up_lmt_1']]=np.array(OneVarRule.loc[:,['up_lmt']])
             
TwoVarRule.loc[TwoVarRule_num:,['badnum']] =np.array(OneVarRule.loc[:,['badnum']])               
TwoVarRule.loc[TwoVarRule_num:,['hitnum']] =np.array(OneVarRule.loc[:,['hit_num']])
TwoVarRule.loc[TwoVarRule_num:,['badrate']] =np.array(OneVarRule.loc[:,['badrate']])
TwoVarRule_X.iloc[:,TwoVarRule_num:] = OneVarRule_X.values

# del rule_X,rule_X_cor_choose,OneVarRule_X

TwoVarRule

# 8.最终规则集筛选--------------------------------------------------------------
# 这里有两个版本的规则集构建方法，主要思路都是根据新增规则命中的坏客增量是否达到设定的阈值来判断是否加入规则集。一般推荐 用下面示例的版本构建。
'''使用version1的挑选有效规则集，11万样本，2.5万规则，大概40分钟，version2耗时会很长，谨慎使用'''
t1 = time.time()
if_choose_final = rfd.useful_rule_v1(TwoVarRule,TwoVarRule_X, train_y, 
                       min_bad_add = 10, 
                       badrate = "badrate")
t2 = time.time()

rule_final = TwoVarRule.iloc[np.where(if_choose_final.iloc[:,0] == 1)[0],:]

rule_X_final = TwoVarRule_X.iloc[:,np.where(if_choose_final.iloc[:,0] == 1)[0]]

# del TwoVarRule_X

'''计算有效规则集的效果'''
all_bad = sum(train_y)
sample_num = len(train_y)

order_temp =  np.argsort(-np.array(rule_final.loc[:,"badrate"]))
rule_final = rule_final.iloc[order_temp, :]
rule_X_final = rule_X_final.iloc[:,order_temp]
rule_final_effe = rule_final.copy()

temp = pd.DataFrame(rule_X_final).apply(lambda x: x * train_y.values)
rule_final_effe['hit_rate'] = rule_final_effe['hitnum'] / train_x.shape[0]
rule_final_effe['badnum_acu'] = temp.cummax(axis=1).sum().values
rule_final_effe['hit_num_acu'] = rule_X_final.cummax(axis=1).sum().values
rule_final_effe['hit_rate_acu'] = rule_final_effe['hit_num_acu'] / train_x.shape[0]
rule_final_effe['badrate_acu'] = rule_final_effe['badnum_acu'] / rule_final_effe['hit_num_acu']
rule_final_effe['recall_acu'] = rule_final_effe['badnum_acu'] / train_y.sum()
rule_final_effe['lift_acu'] = rule_final_effe['badrate_acu'] / (train_y.sum() / train_y.size)
rule_final_effe['lift'] = rule_final_effe['badrate'] / (train_y.sum() / train_y.size)
rule_result = rule_final_effe
rule_result.reset_index(drop=True, inplace=True)
# rule_result.to_csv('train_apply_rule.csv',encoding="utf_8_sig")

rule_result 
# 9.验证规则集--------------------------------------------------------------
#验证函数定义
def rule_check(rule_result, df):
    rule_matrix = pd.DataFrame(np.zeros((len(df), len(rule_result))))
    rule_test = rule_result.copy()
    df.reset_index(drop=1, inplace=True)
    for i in range(len(rule_result)):
        var1 = rule_result.iloc[i]['var_1']
        var2 = rule_result.iloc[i]['var_2']
        down_lmt_1 = rule_result.iloc[i]['down_lmt_1']
        down_lmt_2 = rule_result.iloc[i]['down_lmt_2']
        up_lmt_1 = rule_result.iloc[i]['up_lmt_1']
        up_lmt_2 = rule_result.iloc[i]['up_lmt_2']
        try:
            temp = df[var1].apply(lambda x: 1 if x > down_lmt_1 and x <= up_lmt_1 else 0) * df[var2].apply(lambda x:
                                                             1 if x > down_lmt_2 and x <= up_lmt_2 else 0)
            if np.isnan(down_lmt_1) and ~np.isnan(down_lmt_2):
                temp = df[var1].apply(lambda x: 1 if np.isnan(x) else 0) * df[var2].apply(lambda x: 
                                                                                          1 if x > down_lmt_2 and x <= up_lmt_2 else 0)
            elif ~np.isnan(down_lmt_1) and np.isnan(down_lmt_2):
                temp = df[var1].apply(lambda x: 1 if x > down_lmt_1 and x <= up_lmt_1 else 0) * df[var2].apply(lambda x: 
                                                                                                               1 if np.isnan(x) else 0)
            elif np.isnan(down_lmt_1) and np.isnan(down_lmt_2):
                temp = df[var1].apply(lambda x: 1 if np.isnan(x) else 0) * df[var2].apply(lambda x: 1 if np.isnan(x) else 0)

        except:
            temp = df[var1].apply(lambda x: 1 if x >= down_lmt_1 and x < up_lmt_1 else 0)
            if np.isnan(down_lmt_1):
                temp = df[var1].apply(lambda x: 1 if np.isnan(x) else 0)
            
        rule_matrix[i] = temp
    temp= rule_matrix.apply(lambda x: x * df['flagy'])
    rule_test['badnum'] = temp.sum().values
    rule_test['hitnum'] = rule_matrix.sum().values
    rule_test['hit_rate'] = rule_test['hitnum'] / df.shape[0]
    rule_test['badrate'] = rule_test['badnum'] / rule_test['hitnum']
    rule_test['badnum_acu'] = temp.cummax(axis=1).sum().values
    rule_test['hit_num_acu'] = rule_matrix.cummax(axis=1).sum().values
    rule_test['hit_rate_acu'] = rule_test['hit_num_acu'] / df.shape[0]
    rule_test['badrate_acu'] = rule_test['badnum_acu'] / rule_test['hit_num_acu']
    rule_test['recall_acu'] = rule_test['badnum_acu'] / df['flagy'].sum()
    rule_test['lift_acu'] = rule_test['badrate_acu'] / (df['flagy'].sum() / df.shape[0])
    rule_test['lift'] = rule_test['badrate'] / (df['flagy'].sum() / df.shape[0])
    
#     rule_test.to_csv('rule_test.csv', encoding='utf_8_sig', index=False)
    return rule_test, rule_matrix.join(df.flagy)

#测试集验证
df_test = pd.concat([test_x, test_y], axis=1)
df_test.reset_index(drop=1, inplace=True)
rule_test, _ = rule_check(rule_result, df_test)
rule_test
# rule_test.to_csv('test_apply_rule.csv', encoding='utf_8_sig', index=False)

#验证集验证
very_df = pd.read_excel('VC3.1_10000.xlsx', header=[0,1,2,3])
# very_df.renmae(columns={'mob1_d30': 'flagy'}, inplace=True)
# very_df.columns = cols
for col in transform_rule:
    trans = dict(transform_rule[col][['raw data', 'transform data']].to_dict(orient='split')['data'])
    very_df[col] = very_df[col].map(trans)
very_df['flagy'] = 0
# very_df = very_df[very_df.flagy != 2]
very_df.reset_index(drop=True, inplace=True)
rule_oot, _ = rule_check(rule_result, very_df)
rule_oot
# rule_oot.to_csv('oot_apply_rule.csv', encoding='utf_8_sig', index=False)





# 所有产品最终规则集
result_t = pd.concat([als_result,alu_result,el_result,frg_result,ir_result,sl_result])

# 预处理过的全部数据
df_tt = pd.concat([X,Y],axis = 1)
# 对全部数据进行最终规则检查
rule_result_total, _ = rule_check(result_t,df_tt)

# 规则中剔除一些变量做检查
# rule_result_total, _ = rule_check(result_t[result_t['var_1'] != 'alu_m12_id_tot_monnum'],df_tt)



# 规则报告
def rule_report(rule_result_total,df_t):
    rule_result_total['sample_num'] = df_t.shape[0]
    rule_result_total['平均坏客率'] = sum(df_t.flagy) / len(df_t.flagy)
    rule_result1 = rule_result_total[['var_1','down_lmt_1','up_lmt_1','var_2','down_lmt_2','up_lmt_2','hitnum', 'badnum', 'sample_num', 'hit_rate', 'badrate', '平均坏客率', 'lift', 'lift_acu', 'hit_rate_acu']]
    rule_result1.columns = ['变量1','变量1下限','变量1上限','变量2','变量2下限','变量2上限','频数', '规则命中坏客户人数', '样本数据量', '规则覆盖率', '规则覆盖坏客户率', '平均坏客户率', '提升度', 
                            '累计提升度', '累计规则覆盖率'
    ]
    rule_result1[['规则覆盖率','规则覆盖坏客户率','平均坏客户率','累计规则覆盖率']] = rule_result1[['规则覆盖率','规则覆盖坏客户率','平均坏客户率','累计规则覆盖率']].applymap(lambda x : '%.3f' % (x*100)+'%')
    rule_result1[['变量1','变量2']] = rule_result1[['变量1','变量2']].applymap(lambda x: '('+ str(x) +')')
    # rule_result1['规则覆盖率'] = rule_result1['规则覆盖率'].apply(lambda x : '%.3f' % (x*100)+'%')
    # rule_result1['规则覆盖坏客户率'] = rule_result1['规则覆盖坏客户率'].apply(lambda x : '%.3f' % (x*100)+'%')
    # rule_result1['平均坏客户率'] = rule_result1['平均坏客户率'].apply(lambda x : '%.3f' % (x*100)+'%')
    # rule_result1['累计规则覆盖率'] = rule_result1['累计规则覆盖率'].apply(lambda x : '%.3f' % (x*100)+'%')
    return rule_result1










































