#!/usr/bin/env python
# coding: utf-8

# In[1]:


import pandas as pd 
import pymysql

# 获取用户信息
try:
    conn = pymysql.connect(host="localhost", user="test", passwd="123456", db='course', charset="utf8")
    cursor = conn.cursor()
    print("数据库连接成功！")
except Exception as e:
    print(e)

sql = "select * from df_master_with_update_log;"
try:
    cursor.execute(sql)
    res = cursor.fetchall()
    cols = [cursor.description[i][0] for i in range(len(cursor.description))]
    df_master = pd.DataFrame(res, columns=cols)
    print("数据获取成功！")
    cursor.close()
    conn.commit()
    conn.close()
except Exception as e:
    print(e)
    

import argparse
import numpy as np

# 初始化参数构造器
parser = argparse.ArgumentParser()

# 在参数构造器中添加两个命令行参数
parser.add_argument('--filename', type=str,default="清洗数据和建模")

# 获取所有的命令行参数
args = parser.parse_args()
#args=[]

filename = args.filename # 文件名


# #### 清洗类别型特征

# In[2]:


df = df_master.copy()
# 去除空格符
df["UserInfo_9"] = df["UserInfo_9"].map(lambda x:x.strip())

# 将所有“市”字去掉
df["UserInfo_8"] = df["UserInfo_8"].map(lambda x:x.replace("市", ""))


# In[3]:


df_cat = df.select_dtypes(include=["object"])
df_cat.fillna(value="不祥", axis=1, inplace=True) # 使用unknow填充缺失值

# 使用other替换样本数少于总体0.2%的取值
for i in df_cat.columns:
    temp = df_cat[i].value_counts()
    low_index = list(temp[temp<(len(df_cat) * 0.002)].index)
    df_cat[i] = df_cat[i].map(lambda x:"other" if x in low_index else x)


# In[4]:


# One_Hot编码（哑变量处理）
df_cat_dummy = pd.get_dummies(df_cat)


# #### 数值型数据的缺失值处理

# In[40]:


df_numerical = df.select_dtypes(include=["int", "float"])
temp = df_numerical.isnull().sum()
temp = temp[temp > 0]
temp_1 = temp.sort_values(ascending=False)

# # 可视化
# import warnings;warnings.filterwarnings("ignore")
# from matplotlib import pyplot as plt
# import seaborn as sns
# plt.rcParams["font.sans-serif"] = ["SimHei"]  # 正常显示中文
# plt.rcParams["axes.unicode_minus"] = False # 正常显示负号
# plt.figure(figsize=(10,6), dpi=80)
# sns.barplot(y=temp.values,x=temp.index)
# plt.xlabel("存在缺失值的特征")
# plt.ylabel("缺失数")
# plt.title("特征缺失比例图")
# plt.xticks(rotation=90)
# plt.show()
# plt.tight_layout()
# plt.savefig(filename + ".png")


# In[19]:


# 剔除掉缺失率太高的WeblogInfo_1和WeblogInfo_3，其余使用中位数进行填充
df_numerical2 = df_numerical.copy()
df_numerical2.drop(labels=["WeblogInfo_1", "WeblogInfo_3"], axis=1,inplace=True) # 剔除

# 中位数填充
for i in df_numerical2.columns:
    median = df_numerical2[i].median() # 求当前特征的中位数
    df_numerical2[i] = df_numerical2[i].fillna(value=median) # 填充替换


# #### 筛选冗余特征

# ##### 多重共线性处理

# In[20]:


import pandas as pd, numpy as np, os, re, math, time
import warnings;warnings.filterwarnings("ignore")
# to check monotonicity of a series 检验序列的单调性
def is_monotonic(temp_series):
    return all(temp_series[i] <= temp_series[i + 1] for i in range(len(temp_series) - 1)) or all(temp_series[i] >= temp_series[i + 1] for i in range(len(temp_series) - 1))

def prepare_bins(bin_data, c_i, target_col, max_bins):
    force_bin = True
    binned = False
    remarks = np.nan
    # ----------------- Monotonic binning -----------------
    for n_bins in range(max_bins, 2, -1):
        try:
            bin_data[c_i + "_bins"] = pd.qcut(bin_data[c_i], n_bins, duplicates="drop")
            monotonic_series = bin_data.groupby(c_i + "_bins")[target_col].mean().reset_index(drop=True)
            if is_monotonic(monotonic_series):
                force_bin = False
                binned = True
                remarks = "binned monotonically"
                break
        except:
            pass
    # ----------------- Force binning -----------------
    # creating 2 bins forcefully because 2 bins will always be monotonic
    if force_bin or (c_i + "_bins" in bin_data and bin_data[c_i + "_bins"].nunique() < 2):
        _min=bin_data[c_i].min()
        _mean=bin_data[c_i].mean()
        _max=bin_data[c_i].max()
        bin_data[c_i + "_bins"] = pd.cut(bin_data[c_i], [_min, _mean, _max], include_lowest=True)
        if bin_data[c_i + "_bins"].nunique() == 2:
            binned = True
            remarks = "binned forcefully"
    
    if binned:
        return c_i + "_bins", remarks, bin_data[[c_i, c_i+"_bins", target_col]].copy()
    else:
        remarks = "couldn't bin"
        return c_i, remarks, bin_data[[c_i, target_col]].copy()

# calculate WOE and IV for every group/bin/class for a provided feature 计算所提供功能的每个组/箱/类的WOE和IV
def iv_woe_4iter(binned_data, target_col, class_col):
    if "_bins" in class_col:
        binned_data[class_col] = binned_data[class_col].cat.add_categories(['Missing'])
        binned_data[class_col] = binned_data[class_col].fillna("Missing")
        temp_groupby = binned_data.groupby(class_col).agg({class_col.replace("_bins", ""):["min", "max"],
                                                           target_col: ["count", "sum", "mean"]}).reset_index()
    else:
        binned_data[class_col] = binned_data[class_col].fillna("Missing")
        temp_groupby = binned_data.groupby(class_col).agg({class_col:["first", "first"],
                                                           target_col: ["count", "sum", "mean"]}).reset_index()
    
    temp_groupby.columns = ["sample_class", "min_value", "max_value", "sample_count", "event_count", "event_rate"]
    temp_groupby["non_event_count"] = temp_groupby["sample_count"] - temp_groupby["event_count"]
    temp_groupby["non_event_rate"] = 1 - temp_groupby["event_rate"]
    temp_groupby = temp_groupby[["sample_class", "min_value", "max_value", "sample_count",
                                 "non_event_count", "non_event_rate", "event_count", "event_rate"]]
    
    if "_bins" not in class_col and "Missing" in temp_groupby["min_value"]:
        temp_groupby["min_value"] = temp_groupby["min_value"].replace({"Missing": np.nan})
        temp_groupby["max_value"] = temp_groupby["max_value"].replace({"Missing": np.nan})
    temp_groupby["feature"] = class_col
    if "_bins" in class_col:
        temp_groupby["sample_class_label"]=temp_groupby["sample_class"].replace({"Missing": np.nan}).astype('category').cat.codes.replace({-1: np.nan})
    else:
        temp_groupby["sample_class_label"]=np.nan
    temp_groupby = temp_groupby[["feature", "sample_class", "sample_class_label", "sample_count", "min_value", "max_value",
                                 "non_event_count", "non_event_rate", "event_count", "event_rate"]]
    
    """
    **********get distribution of good and bad 得到好的和坏的分布
    """
    temp_groupby['distbn_non_event'] = temp_groupby["non_event_count"]/temp_groupby["non_event_count"].sum()
    temp_groupby['distbn_event'] = temp_groupby["event_count"]/temp_groupby["event_count"].sum()

    temp_groupby['woe'] = np.log(temp_groupby['distbn_non_event'] / temp_groupby['distbn_event'])
    temp_groupby['iv'] = (temp_groupby['distbn_non_event'] - temp_groupby['distbn_event']) * temp_groupby['woe']
    
    temp_groupby["woe"] = temp_groupby["woe"].replace([np.inf,-np.inf],0)
    temp_groupby["iv"] = temp_groupby["iv"].replace([np.inf,-np.inf],0)
    
    return temp_groupby

"""
- iterate over all features. 迭代所有功能。
- calculate WOE & IV for there classes.计算这些类别的woe与iv值
- append to one DataFrame woe_iv.追加到数据帧woe_iv中。
"""
def var_iter(data, target_col, max_bins):
    woe_iv = pd.DataFrame()
    remarks_list = []
    for c_i in data.columns:
        if c_i not in [target_col]:
            # check if binning is required. if yes, then prepare bins and calculate woe and iv.
            """
            ----logic---
            binning is done only when feature is continuous and non-binary. 仅当数据是连续型，且不是二进制时才会处理
            Note: Make sure dtype of continuous columns in dataframe is not object. 确保dataframe中连续列的数据类型不是object。
            """
            c_i_start_time=time.time()
            if np.issubdtype(data[c_i], np.number) and data[c_i].nunique() > 2:
                class_col, remarks, binned_data = prepare_bins(data[[c_i, target_col]].copy(), c_i, target_col, max_bins)
                agg_data = iv_woe_4iter(binned_data.copy(), target_col, class_col)
                remarks_list.append({"feature": c_i, "remarks": remarks})
            else:
                agg_data = iv_woe_4iter(data[[c_i, target_col]].copy(), target_col, c_i)
                remarks_list.append({"feature": c_i, "remarks": "categorical"})
            # print("---{} seconds. c_i: {}----".format(round(time.time() - c_i_start_time, 2), c_i))
            woe_iv = woe_iv.append(agg_data)
    return woe_iv, pd.DataFrame(remarks_list)

# after getting woe and iv for all classes of features calculate aggregated IV values for features.
def get_iv_woe(data, target_col, max_bins):
    func_start_time = time.time()
    woe_iv, binning_remarks = var_iter(data, target_col, max_bins)
    print("------------------IV and WOE calculated for individual groups.------------------")
    print("Total time elapsed: {} minutes".format(round((time.time() - func_start_time) / 60, 3)))
    
    woe_iv["feature"] = woe_iv["feature"].replace("_bins", "", regex=True)    
    woe_iv = woe_iv[["feature", "sample_class", "sample_class_label", "sample_count", "min_value", "max_value",
                     "non_event_count", "non_event_rate", "event_count", "event_rate", 'distbn_non_event',
                     'distbn_event', 'woe', 'iv']]
    
    iv = woe_iv.groupby("feature")[["iv"]].agg(["sum", "count"]).reset_index()
    print("------------------Aggregated IV values for features calculated.------------------")
    print("Total time elapsed: {} minutes".format(round((time.time() - func_start_time) / 60, 3)))
    
    iv.columns = ["feature", "iv", "number_of_classes"]
    null_percent_data=pd.DataFrame(data.isnull().mean()).reset_index()
    null_percent_data.columns=["feature", "feature_null_percent"]
    iv=iv.merge(null_percent_data, on="feature", how="left")
    print("------------------Null percent calculated in features.------------------")
    print("Total time elapsed: {} minutes".format(round((time.time() - func_start_time) / 60, 3)))
    iv = iv.merge(binning_remarks, on="feature", how="left")
    woe_iv = woe_iv.merge(iv[["feature", "iv", "remarks"]].rename(columns={"iv": "iv_sum"}), on="feature", how="left")
    print("------------------Binning remarks added and process is complete.------------------")
    print("Total time elapsed: {} minutes".format(round((time.time() - func_start_time) / 60, 3)))
    return iv, woe_iv.replace({"Missing": np.nan})


iv, woe_iv = get_iv_woe(df_numerical2.copy(), target_col="target", max_bins=20)


# In[21]:


# 根据IV值剔除掉相关性高的变量
pd.set_option("display.float_format", lambda x: "%.4f" % x) 
iv.sort_values(by="iv", ascending=False)
# 首先筛选iv值大于0.02的
new_cols = iv[iv["iv"] >= 0.02]["feature"]

# 特征筛选
df_numerical3 = df_numerical2[list(new_cols) + ["target"]]


# 寻找强相关性变量
import numpy
lst = []
for i in list(df_numerical3.columns):
    for j in list(df_numerical3.drop(labels=[i], axis=1).columns):
        cor = df_numerical3[[i, j]].corr()[i][j]
        if type(cor) == numpy.float64: 
            if abs(cor) >= 0.99:
                lst.append([i,j,cor])


# In[22]:


df_cor = pd.DataFrame(lst, columns=["前", "后", "相关系数"]).sort_values(by="相关系数", ascending=False)

# 根据IV值剔除相关性高的变量（两两比对选择IV值更大的变量）
higher_iv = list(df_numerical3.columns)
for i, j in zip(df_cor[::2]["前"], df_cor[::2]["后"]):
    try:
        if iv[iv["feature"] == i]["iv"].values[0] > iv[iv["feature"] == j]["iv"].values[0]:
            higher_iv.remove(j)  # 移除
        else:
            higher_iv.remove(i)
    except Exception as e:
        print(e)


# ##### 同一值处理

# In[23]:


df_numerical4 = df_numerical3[higher_iv]  # 重新筛选特征

# 遍历每个特征，如果该特征所含的某一个唯一值占比超99%，则剔除该特征
for i in df_numerical4.columns:
    temp = df_numerical4[i].value_counts()
    top_value = temp.values[0]
    freq = top_value/len(df_numerical4)
    if freq >= 0.99:
        df_numerical4.drop(labels=[i], axis=1,inplace=True)


# ##### 变异系数处理

# In[24]:


# 遍历每个特征，如果该特征的变异系数小于0.1，则剔除该特征
for i in df_numerical4.columns:
    std = df_numerical4[i].std()
    u = df_numerical4[i].mean()
    var = std/u
    if var < 0.1:
        df_numerical4.drop(labels=[i], axis=1,inplace=True)


# In[26]:


# for i in df_cat_dummy.columns:
#     df_cat_dummy[i] = df_cat_dummy[i].map(int)
df_all = pd.concat([df_cat_dummy, df_numerical4], axis=1)


# #### 构建GBM预测模型

# ##### 拆分数据集

# In[27]:


from sklearn.model_selection import train_test_split
X = df_all.drop(labels=["target"], axis=1)
Y = df_all["target"].map(int)
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, train_size=0.8, random_state=42, shuffle=True)


# ##### 建模

# In[28]:


import lightgbm as lgb
lgb_model = lgb.LGBMClassifier(num_leaves=31, learning_rate=0.05, n_estimators=500, n_jobs=-1)
lgb_model.fit(X_train, Y_train, eval_set=[(X_test, Y_test)], eval_metric='logloss', early_stopping_rounds=5)
pre_lgb = lgb_model.predict(X_test, num_iteration=lgb_model.best_iteration_)
prob = lgb_model.predict_proba(X_test)


# ##### 模型评估

# In[43]:


# from matplotlib import pyplot as plt
# from sklearn.metrics import confusion_matrix
# plt.rcParams["font.sans-serif"] = ["SimHei"]
# plt.rcParams["axes.unicode_minus"] = False
# import seaborn as sns

# def plot():
#     plt.figure(figsize=(30,15),dpi=90)
#     plt.subplot(221)
#     sns.heatmap(confusion_matrix(Y_test, pre_lgb), cmap="Blues", cbar=False,annot=True, fmt='.20g', xticklabels=["正常", "异常"], yticklabels=["正常",  "异常"])
#     plt.xlabel("预测类别")
#     plt.ylabel("真实类别")
#     plt.title("(a)测试集上CatBoost-混淆矩阵")
#     # plt.show()

#     plt.subplot(222)
#     from sklearn.metrics import roc_curve, roc_auc_score
#     fpr, tpr, _ = roc_curve(Y_test, prob[:, 1])
#     auc = roc_auc_score(Y_test, prob[:, 1])
#     # plt.figure(dpi=100)
#     plt.plot(fpr, tpr, label = f"AUC:{auc}", linestyle="dashed")
#     plt.title("(b)测试集上LightGBM 模型 ROC 曲线和 AUC 值")
#     plt.xlabel("假正率")
#     plt.ylabel("真正率")
#     plt.legend(loc="best")
#     # plt.savefig("./Images/catboost混淆矩阵.png")
#     # plt.show()
#     plt.subplot(223)
#     imp = lgb_model.feature_importances_[:20]
#     df2 = pd.DataFrame(imp, index=X.columns[:20], columns=["imp"])
#     df2.sort_values(by="imp",inplace=True, ascending=False)
#     # plt.figure(figsize=(15,6),dpi=90)
#     sns.barplot(x=df2["imp"], y=df2.index)
#     plt.title("(c)CatBoost模型特征重要性")
#     # plt.ylabel("特征")
#     plt.xlabel("特征重要性")

#     #自定义绘制ks曲线的函数
#     def plot_ks(y_test, y_score, positive_flag):
#         # 对y_test,y_score重新设置索引
#         y_test.index = np.arange(len(y_test))
#         #y_score.index = np.arange(len(y_score))
#         # 构建目标数据集
#         target_data = pd.DataFrame({'y_test':y_test, 'y_score':y_score})

#         # 按y_score降序排列
#         target_data.sort_values(by = 'y_score', ascending = False, inplace = True)

#         # 自定义分位点
#         cuts = np.arange(0.1,1,0.1)

#         # 计算各分位点对应的Score值
#         index = len(target_data.y_score)*cuts
#         scores = target_data.y_score.iloc[index.astype('int')]

#         # 根据不同的Score值，计算Sensitivity和Specificity
#         Sensitivity = []
#         Specificity = []

#         for score in scores:
#             # 正例覆盖样本数量与实际正例样本量
#             positive_recall = target_data.loc[(target_data.y_test == positive_flag) & (target_data.y_score>score),:].shape[0]
#             positive = sum(target_data.y_test == positive_flag)

#             # 负例覆盖样本数量与实际负例样本量
#             negative_recall = target_data.loc[(target_data.y_test != positive_flag) & (target_data.y_score<=score),:].shape[0]
#             negative = sum(target_data.y_test != positive_flag)
#             Sensitivity.append(positive_recall/positive)
#             Specificity.append(negative_recall/negative)

#         # 构建绘图数据
#         plot_data = pd.DataFrame({'cuts':cuts,'y1':1-np.array(Specificity),'y2':np.array(Sensitivity),
#         'ks':np.array(Sensitivity)-(1-np.array(Specificity))})

#         # 寻找Sensitivity和1-Specificity之差的最大值索引
#         max_ks_index = np.argmax(plot_data.ks)
#         plt.plot([0]+cuts.tolist()+[1], [0]+plot_data.y1.tolist()+[1], label = '1-Specificity')
#         plt.plot([0]+cuts.tolist()+[1], [0]+plot_data.y2.tolist()+[1], label = 'Sensitivity')

#         # 添加参考线
#         plt.vlines(plot_data.cuts[max_ks_index], ymin = plot_data.y1[max_ks_index],
#         ymax = plot_data.y2[max_ks_index], linestyles = '--')

#         # 添加文本信息

#         plt.text(x = plot_data.cuts[max_ks_index]+0.01,
#         y = plot_data.y1[max_ks_index]+plot_data.ks[max_ks_index]/2,
#         s = 'KS= %.2f' %plot_data.ks[max_ks_index])

#         # 显示图例
#         plt.title("K-S值曲线图")
#         plt.legend()
#         # 显示图形
#     #     plt.show()
#     plt.subplot(224)
#     plot_ks(Y_test, pre_lgb, positive_flag = 1)
    

# p = plot()


# In[47]:


# 结果展示
from pyecharts import options as opts
from pyecharts.charts import Bar, Tab, Line


def bar_datazoom_slider() -> Bar:
    x = list(temp_1.index)
    values = [int(i) for i in temp_1.values]
    c = (
        Bar()
        .add_xaxis(x)
        .add_yaxis("缺失个数", values)
        .set_global_opts(
            title_opts=opts.TitleOpts(title="存在缺失值的字段"),
            datazoom_opts=[opts.DataZoomOpts()],
        )
    )
    return c



from pyecharts.components import Table
from pyecharts.options import ComponentTitleOpts

def table(rows, headers,title, subtitle):
    table = Table()
    table.add(headers, rows)
    table.set_global_opts(
        title_opts=ComponentTitleOpts(title=f"{title}", subtitle=f"{subtitle}")
    )
    return table


# ROC曲线和AUC值
from sklearn.metrics import roc_curve, roc_auc_score
def roc():
    fpr, tpr, _ = roc_curve(Y_test, prob[:, 1])
    auc = roc_auc_score(Y_test, prob[:, 1])
    x_data = fpr
    y_data = tpr
    c = (
        Line()
        .set_global_opts(
            title_opts=opts.TitleOpts(title=f"ROC曲线(AUC:{auc})"),
            tooltip_opts=opts.TooltipOpts(is_show=False),
            xaxis_opts=opts.AxisOpts(type_="category"),
            yaxis_opts=opts.AxisOpts(
                type_="value",
                axistick_opts=opts.AxisTickOpts(is_show=True),
                splitline_opts=opts.SplitLineOpts(is_show=True),
            ),
        )
        .add_xaxis(xaxis_data=x_data)
        .add_yaxis(
            series_name="",
            y_axis=y_data,
            symbol="emptyCircle",
            is_symbol_show=True,
            label_opts=opts.LabelOpts(is_show=False),
        )
    )
    return c

imp = lgb_model.feature_importances_[:20]
df2 = pd.DataFrame(imp, index=X.columns[:20], columns=["imp"])
df2.sort_values(by="imp",inplace=True, ascending=False)
def importance():
    x = list(df2.index)
    y = [float(i) for i in df2["imp"]]
    c = (
    Bar()
    .add_xaxis(x)
    .add_yaxis("特征重要性", y)
    .reversal_axis()
    .set_series_opts(label_opts=opts.LabelOpts(position="right"))
    .set_global_opts(title_opts=opts.TitleOpts(title="特征重要性"))
    )
    return c



tab = Tab()

# 分类型特征转数值型
d = df_cat_dummy.iloc[:20,:50]
s = len(df_cat_dummy)
col = len(df_cat_dummy.columns)
h = list(d.columns)
l = []
for i in range(len(d)):
    row = list(d.iloc[i,:])
    l.append(row)
tab.add(table(rows=l,headers=h,title="处理分类型特征结果示例",subtitle=f"总样本：{s},总特征：{col}"), "处理分类型特征")
tab.add(bar_datazoom_slider(), "字段缺失情况")
tab.add(roc(), "模型评估")
tab.add(importance(), "特征重要性")
tab.render(f"{filename}.html")

