# -*- coding: utf-8 -*-
"""
Created on Mon Apr  8 15:49:39 2024

@author: admin
"""
import pandas as pd
import numpy as np
import toad
# from image_create import show_roc, show_ks, loss_curve, show_pr, origin_coor,afterProcess_coor,heatMap
from sklearn import metrics
from sklearn.metrics import roc_curve, accuracy_score
from sklearn.metrics import precision_recall_curve
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
from sklearn.preprocessing import StandardScaler


# 变量同质性检验
def check_tz(cal_data, target, ex_list):
    """
    # 变量同质性检验
    :param cal_data:
    :return:
    """
    cal_data = cal_data.fillna(-99999)
    new_check = pd.DataFrame()
    col_check = [i for i in cal_data.columns if i not in ex_list + [target]]
    new_check['name'] = col_check
    l = []
    for i in col_check:
        # 计算该列中出现次数最多的值的频率（即该值出现的次数除以该列的总行数
        f = cal_data[i].value_counts().max() / len(cal_data)
        l.append(f)
    new_check['单一率'] = l
    return new_check


# 特征相关性分析
def corr_fea(df, cor, target):
    # 计算dataframe中除了目标列之外的所有列之间的相关系数，并筛选出那些相关性强度大于或等于某个阈值cor的列对。
    cor_all = df.drop([target], axis=1).corr().unstack().reset_index()
    # 筛选出level_0和level_1不相等的行，即去除对角线元素。按照降序排序。删除重复值。
    cor_all = cor_all[cor_all['level_0'] != cor_all['level_1']].sort_values(by=0, ascending=False).drop_duplicates(
        subset=[0])
    # 计算第0列的绝对值。
    cor_all[0] = abs(cor_all[0])
    return cor_all[cor_all[0] >= cor]


# 评估模型性能的函数。
def model_eva(actual, pred_proba):
    """
    评估模型性能的函数。

    参数:
    - actual: 真实标签的数组，形状为(n_samples,)
    - pred: 模型预测为正类的概率的数组，形状为(n_samples,)

    返回:
    - auc: ROC AUC值，衡量模型的排序能力
    - ks: KS统计量，衡量模型区分正负类的能力
    - best_f1_score: 最佳F1分数，平衡了精确率和召回率的指标
    - best_precision: 在最佳F1分数下的精确率
    - best_recall: 在最佳F1分数下的召回率
    - best_threshold: 达到最佳F1分数的阈值
    - best_accuracy: 在最佳F1分数下的准确率
    """

    # 将预测概率转换为二分类标签（使用0.5作为阈值）
    pred_labels = (pred_proba >= 0.5).astype(int)

    # 计算ROC曲线的FPR, TPR和阈值
    fpr, tpr, thresholds = roc_curve(actual, pred_proba)
    auc = metrics.auc(fpr, tpr)

    # 计算KS统计量
    ks = max(tpr - fpr)

    # 计算精确率-召回率曲线的精确率、召回率和阈值
    precisions, recalls, thresholds_pr = precision_recall_curve(actual, pred_proba)

    # 计算F1分数
    f1_scores = 2 * (precisions * recalls) / (precisions + recalls + 1e-7)

    # 找出最大F1分数的索引和值
    best_f1_score = np.max(f1_scores)
    best_f1_score_index = np.argmax(f1_scores)
    # 使用最佳F1分数的索引从precision_recall_curve的阈值数组中获取最佳阈值
    best_threshold = thresholds_pr[best_f1_score_index]

    # 在最佳阈值下的精确率和召回率
    best_precision = precisions[best_f1_score_index]
    best_recall = recalls[best_f1_score_index]
    # 准确率
    best_accuracy = accuracy_score(actual, (pred_proba >= best_threshold).astype(int))

    return auc, ks, best_f1_score, best_precision, best_recall, best_threshold, best_accuracy



# 数据衍生
def handle(data):
    data['个人账户金额比例'] = data['个人账户金额_SUM'] / data['ALL_SUM']
    data['统筹支付金额比例'] = data['统筹支付金额_SUM'] / data['ALL_SUM']
    data['贵重药品金额比例'] = data.apply(lambda x: x['贵重药品发生金额_SUM'] / x['药品费发生金额_SUM'] if x['药品费发生金额_SUM'] > 0 else -1,
                                  axis=1)
    data['贵重检查费金额比例'] = data.apply(lambda x: x['贵重检查费金额_SUM'] / x['检查费发生金额_SUM'] if x['检查费发生金额_SUM'] > 0 else -1,
                                   axis=1)
    data['手术费用在总金额占比'] = data.apply(lambda x: x['手术费发生金额_SUM'] / x['ALL_SUM'] if x['ALL_SUM'] > 0 else -1, axis=1)
    data['床位费用在总金额占比'] = data.apply(lambda x: x['床位费发生金额_SUM'] / x['ALL_SUM'] if x['ALL_SUM'] > 0 else -1, axis=1)
    data['高价材料金额比例'] = data.apply(lambda x: x['高价材料发生金额_SUM'] / x['医用材料发生金额_SUM'] if x['医用材料发生金额_SUM'] > 0 else -1,
                                  axis=1)
    data['医用材料费自费比例'] = data.apply(lambda x: x['医用材料费自费金额_SUM'] / x['医用材料发生金额_SUM'] if x['医用材料发生金额_SUM'] > 0 else -1,
                                   axis=1)
    data['月就诊医院avg比月就诊天数avg'] = data.apply(lambda x: x['月就诊医院数_AVG'] / x['月就诊天数_AVG'] if x['月就诊天数_AVG'] > 0 else -1,
                                           axis=1)
    data['月就诊医院max比月就诊天数max'] = data.apply(lambda x: x['月就诊医院数_MAX'] / x['月就诊天数_MAX'] if x['月就诊天数_MAX'] > 0 else -1,
                                           axis=1)
    data['ALLSUM比就诊的月数'] = data.apply(lambda x: x['ALL_SUM'] / x['就诊的月数'] if x['就诊的月数'] > 0 else -1, axis=1)
    data['就诊次数_SUM比就诊的月数'] = data.apply(lambda x: x['就诊次数_SUM'] / x['就诊的月数'] if x['就诊的月数'] > 0 else -1, axis=1)
    data['ALL_SUM比就诊次数_SUM'] = data.apply(lambda x: x['ALL_SUM'] / x['就诊次数_SUM'] if x['就诊次数_SUM'] > 0 else -1, axis=1)
    data['月就诊次数_MAX比就诊天数_MAX'] = data.apply(lambda x: x['月就诊次数_MAX'] / x['月就诊天数_MAX'] if x['月就诊天数_MAX'] > 0 else -1,
                                            axis=1)
    data['月就诊次数_MAX比就诊天数_AVG'] = data.apply(lambda x: x['月就诊次数_MAX'] / x['月就诊天数_AVG'] if x['月就诊天数_AVG'] > 0 else -1,
                                            axis=1)
    data['ALL_SUM比月就诊次数_AVG'] = data.apply(lambda x: x['ALL_SUM'] / x['月就诊次数_AVG'] if x['月就诊次数_AVG'] > 0 else -1,
                                           axis=1)
    data['医院_统筹金_MAX比医院_就诊天数_MAX'] = data.apply(
        lambda x: x['医院_统筹金_MAX'] / x['医院_就诊天数_MAX'] if x['医院_就诊天数_MAX'] > 0 else -1, axis=1)
    data['医院_统筹金_AVG比医院_就诊天数_AVG'] = data.apply(
        lambda x: x['医院_统筹金_AVG'] / x['医院_就诊天数_AVG'] if x['医院_就诊天数_AVG'] > 0 else -1, axis=1)
    data['医院_药品_MAX比医院_统筹金_MAX'] = data.apply(lambda x: x['医院_药品_MAX'] / x['医院_统筹金_MAX'] if x['医院_统筹金_MAX'] > 0 else -1,
                                              axis=1)
    data['医院_药品_AVG比医院_统筹金_AVG'] = data.apply(lambda x: x['医院_药品_AVG'] / x['医院_统筹金_AVG'] if x['医院_统筹金_AVG'] > 0 else -1,
                                              axis=1)
    data['月药品金额_MAX比月就诊医院数_MAX'] = data.apply(lambda x: x['月药品金额_MAX'] / x['月就诊医院数_MAX'] if x['月就诊医院数_MAX'] > 0 else -1,
                                              axis=1)
    data['月药品金额_MAX比月就诊次数_MAX'] = data.apply(lambda x: x['月药品金额_MAX'] / x['月就诊次数_MAX'] if x['月就诊次数_MAX'] > 0 else -1,
                                             axis=1)
    data['月药品金额_MAX比月统筹金额_MAX'] = data.apply(lambda x: x['月药品金额_MAX'] / x['月统筹金额_MAX'] if x['月统筹金额_MAX'] > 0 else -1,
                                             axis=1)
    data['月药品金额_AVG比月就诊医院数_AVG'] = data.apply(lambda x: x['月药品金额_AVG'] / x['月就诊医院数_AVG'] if x['月就诊医院数_AVG'] > 0 else -1,
                                              axis=1)
    data['月药品金额_AVG比月就诊次数_AVG'] = data.apply(lambda x: x['月药品金额_AVG'] / x['月就诊次数_AVG'] if x['月就诊次数_AVG'] > 0 else -1,
                                             axis=1)
    data['月药品金额_AVG比月统筹金额_AVG'] = data.apply(lambda x: x['月药品金额_AVG'] / x['月统筹金额_AVG'] if x['月统筹金额_AVG'] > 0 else -1,
                                             axis=1)
    data['出院诊断LENTH_MAX比ALL_SUM'] = data.apply(lambda x: x['出院诊断LENTH_MAX'] / x['ALL_SUM'] if x['ALL_SUM'] > 0 else -1,
                                               axis=1)
    return data


# 数据处理
def data_processing(data, ex_list, target):
    # 增加特征数量
    handle(data)  # 16000*109
    data['original_index'] = data.index

    # 1、数据统计分析 删除不相关的特征列
    data_des = toad.detect(data.drop(ex_list, axis=1))

    # #2、特征降纬1---剔除缺失值高于0.9的特征，dropmiss_col为0,即不存在缺失值超过0.9的特征，结果为106个特征
    data_des['missing1'] = data_des['missing'].str.strip("%").astype(float) / 100
    # 缺失值超过0.9的特征列表
    dropmiss_col = list(data_des[data_des['missing1'] >= 0.9].index)
    # 2、特征降纬2---剔除单一值高于0.99的特征，tz_99为14列
    data_tz = check_tz(data, target, ex_list)

    # 单一值超过0.99的特征
    tz_99 = data_tz[data_tz['单一率'] >= 0.99]['name'].tolist()
    # 将特征缺失值超过0.9，单一值超过0.99的特征删除后的data。
    data_filter2 = data.drop(tz_99 + dropmiss_col + ex_list, axis=1)  # 16000 92

    # 2、特征降纬3--剔除iv低于0，01的特征,iv_col为1
    data_iv = toad.quality(data_filter2, 'RES', iv_only=True)[['iv']]
    iv_col = list(data_iv[data_iv['iv'] <= 0.01].index)
    # 删除iv低于0.01的特征后的数据
    data_filter3 = data_filter2.drop(iv_col, axis=1)  # 16000 91

    # 2、特征降纬4--剔除相关性高于0.85以上且iv低的特征 16000 51
    # data_corr = corr_fea(data_filter3, 0.85)
    data_filter4, drop_col = toad.selection.select(data_filter3, iv=0.03, target='RES', corr=0.85, return_drop=True)
    # 保存data_filter4的特征名称
    feature_names = data_filter4.columns.tolist()
    return data, data_filter4, feature_names, target


# WOE转换后再次筛选
def woe_process(data, data_filter4, feature_names, target):
    # 创建一个combiner对象
    c = toad.transform.Combiner()
    # 使用quantile方式对其进行分箱操作，分箱数量为10.
    c.fit(data_filter4, y=target, method='quantile', n_bins=10)  # empty_separate = False
    # 创建一个WOETransformer对象，用于进行WOE转换
    transer = toad.transform.WOETransformer()
    # 使用Combiner对象的transform方法对data_filter4进行分箱处理，
    # 然后使用WOETransformer的fit_transform方法对这些分箱后的数据进行WOE转换
    # 注意：exclude=[target]表示在计算WOE时不包括目标列target
    data_woe = transer.fit_transform(c.transform(data_filter4), data_filter4[target], exclude=[target])
    # 调用corr_fea函数，
    # 计算data_woe中特征与目标列之间的相关性，筛选出相关性小于0.8的特征
    # data_woe_corr = corr_fea(data_woe,0.8)
    # print(data_woe_corr)
    # 使用toad库的select方法进行特征选择，
    # 筛选出iv值大于0.02且相关性小于0.85的特征
    # target='RES'指定目标列为RES
    # return_drop=True表示返回被删除的特征列名
    data_woe_filter, drop_col = toad.selection.select(data_woe, target='RES', iv=0.02, corr=0.85, return_drop=True)
    selected_feature_names = [name for name in feature_names if name not in drop_col]
    # 5、模型训练及参数寻优
    X_data = data_filter4.drop([target], axis=1)


    np.random.seed(0)  # 设定一个全局随机种子以确保示例的可重复性
    # X = np.random.rand(100, 5)  # 假设有100个样本，每个样本有5个特征
    # y = np.random.randint(0, 2, 100)  # 假设是一个二分类问题，目标变量y有0和1两个类别

    # 设置随机种子和划分比例
    random_seed = 20  # 选择一个随机种子
    test_size = 0.2  # 划分20%的数据作为测试集

    # 使用train_test_split函数划分数据集
    x_train, x_test, y_train, y_test, idx_train, idx_test = train_test_split(X_data, data_filter4['RES'],
                                                                             data['original_index'], test_size=test_size, random_state=random_seed)


    feature_psi = toad.metrics.PSI(x_train, x_test)
    # print(feature_psi)
    x_train.drop(list(feature_psi[feature_psi >= 0.1].index), axis=1, inplace=True)
    x_test.drop(list(feature_psi[feature_psi >= 0.1].index), axis=1, inplace=True)

    selected_feature_names = [name for name in selected_feature_names if
                              name not in list(feature_psi[feature_psi >= 0.1].index) and name != 'RES']
    # 数据标准化
    std = StandardScaler()
    x_train = std.fit_transform(x_train)
    x_test = std.fit_transform(x_test)
    # 将train和test转换为dataframe形式
    x_train_df = pd.DataFrame(x_train, columns=selected_feature_names)
    x_test_df = pd.DataFrame(x_test, columns=selected_feature_names)
    return data, x_train, y_train, x_test, y_test, x_train_df, x_test_df, idx_test


# 创建模型
def model_create(x_train, y_train, x_test, y_test):
    modelxgb_best = XGBClassifier(
        booster='gbtree',  # 弱学习器类型: 可选'gbtree'或'gblinear'
        objective='binary:logistic',  # 目标函数:可选'binary:logistic'或'multi:softmax'或'reg:linear'或'reg:logistic'
        max_depth=6, # 3
        learning_rate=0.2, # 0.15
        n_estimators=200,
        gamma=0,  # 0.0001 节点分裂所需的最小损失函数下降值
        min_child_weight=1,  # 孩子节点中最小的样本权重和(二阶导h之和): 默认值为1,可以用来控制过拟合.假设h在0.01附近，min_child_weight为1意味着叶子节点中最少需要包含100个样本。
        subsample=0.8,  # 行采样率
        colsample_bytree=0.8,  # 列采样率: 用于构建每棵树的列的比例,可以防止过拟合。
        reg_alpha=10,  # L1正则项参数
        reg_lambda=50,  # L2正则项参数
        scale_pos_weight=1,  # 正样本权重: 在二分类问题中，当正类和负类的样本比例非常不均衡时，可以设置这个参数来平衡正负样本的权重。默认值为1。
        seed=5,
        eval_metric=['logloss', 'auc']
    )
    modelxgb_best.fit(x_train,
                      y_train,
                      # early_stopping_rounds=5,                    # 用于指定在训练过程中如果连续多轮迭代在验证集上的性能没有提升，则提前停止训练.以当前参数为例,当模型在训练集和测试集上连续5轮logloss没有下降或者auc没有上升,则提前停止训练.
                      # 评估指标:可选'logloss'或'mlogloss'或'auc'或'error'或'merror'或'mae'或'rmse'
                      eval_set=[(x_train, y_train), (x_test, y_test)],  # 参与评估的数据集
                      verbose=False)
    return modelxgb_best


# 调用模型返回结果
def model_use(data, modelxgb_best, x_train, x_test, x_test_df, y_train, y_test, idx_test):


    y_pred_train = list(modelxgb_best.predict_proba(x_train)[:, 1])
    y_pred_train = np.array(y_pred_train)
    y_pred_test = list(modelxgb_best.predict_proba(x_test)[:, 1])
    y_pred_test = np.array(y_pred_test)

    # 预测测试集的类别标签
    x_pred_test = modelxgb_best.predict(x_test_df)

    # 筛选出预测为类别1的样本的索引
    indices_of_interest = idx_test[x_pred_test == 1]
    # 从x_test中提取预测为类别1的样本，并保留特征名称
    positives_df = data.loc[indices_of_interest]
    positives_df = positives_df.drop(columns='original_index')

    # 将提取的样本导出到CSV文件，保留特征名称
    output_file = 'positives_test.csv'

    positives_df.to_csv(output_file, index=False)

    # 5.3模型评估
    auc_train, ks_train, f1_train, p_train, r_train, t_train, acc_train = model_eva(y_train, y_pred_train)
    auc_test, ks_test, f1_test, p_test, r_test, t_test, acc_test = model_eva(y_test, y_pred_test)

    # 输出模型评估结果
    print("[训练集-auc,ks,f1,精确率,召回率,最优阈值,准确率]:", auc_train, ks_train, f1_train, p_train, r_train, t_train, acc_train)
    print("[测试集-auc,ks,f1,精确率,召回率,最优阈值,准确率]:", auc_test, ks_test, f1_test, p_test, r_test, t_test, acc_test)
    return auc_train, ks_train, f1_train, p_train, r_train, t_train, acc_train, auc_test, ks_test, f1_test, p_test, r_test, t_test, acc_test, y_pred_train, y_pred_test


# 初始化模型
def initModel():
    # 导入数据
    data = pd.read_csv('dataSet.csv')  # 16000 82
    target = 'RES'
    ex_list = ['个人编码', '顺序号_NN', '医院编码_NN', '顺序号_NN']
    data = handle(data)
    # 绘制处理前的六十个非目标特征的相关性条形图
    # origin_coor(data, target, save_path='images/origin_coor.png')
    # # 获取初步降维后的数据
    data, data_filter4, feature_names, target = data_processing(data, ex_list, target)
    # 获取woe分箱后的数据
    data, x_train, y_train, x_test, y_test, x_train_df, x_test_df, idx_test = woe_process(data, data_filter4,
                                                                                          feature_names, target)
    # 绘制处理后的非目标特征的相关性条形图
    # afterProcess_coor(data, target, save_path='images/afterProcess_coor.png')
    # # 绘制处理后的非目标特征的相关性矩阵
    # heatMap(data, target, save_path='images/heatMap.png')
    # 获取模型
    modelxgb_best = model_create(x_train, y_train, x_test, y_test)
    # 训练模型
    auc_train, ks_train, f1_train, p_train, r_train, t_train, acc_train, auc_test, ks_test, f1_test, p_test, r_test, t_test, acc_test, y_pred_train, y_pred_test = model_use(
        data, modelxgb_best, x_train, x_test, x_test_df, y_train, y_test, idx_test)
    # 创建生成的曲线
    # img_create(y_test,y_train,y_pred_test,y_pred_train,modelxgb_best)
    return f1_train, p_train, r_train, f1_test, p_test, r_test


# 图像获g_取
# def imcreate(y_test,y_train,y_pred_test,y_pred_train,modelxgb_best):
#     # roc曲线获取
#     show_roc(y_train,y_pred_train, 'train',save_path='images/roc_train.png')
#     show_roc(y_test,y_pred_test,'test',save_path='images/roc_test.png')
#     # ks曲线获取
#     show_ks(y_train,y_pred_train,'train',save_path='images/ks_train.png')
#     show_ks(y_test,y_pred_test,'test',save_path='images/ks_test.png')
#     # pr曲线获取
#     show_pr(y_train,y_pred_train,'train',save_path='images/pr_train.png')
#     show_pr(y_test,y_pred_test,'test',save_path='images/pr_test.png')
#     # 损失函数曲线获取
#     loss_curve(modelxgb_best,save_path='images/loss_curve.png')

if __name__ == '__main__':
    initModel()
