from feature_engine import *


def prob2score(prob):
    """
    prob转化为概率分的工具
    :param prob:
    :return:
    """
    # prob = min(prob, 0.9999)
    return round(550 - 60 / np.log(2) * np.log(prob / (1 - prob)), 0)


# def p_to_score(p, PDO=60.0, Base=600, Ratio=1.0/15.0):
#     """
#     逾期概率转换分数
#     :param p: 逾期概率
#     :param PDO: points double odds. default = 60
#     :param Base: base points. default = 600
#     :param Ratio: odds. default = 1.0/15.0
#     :returns: 模型分数
#     """
#     B = PDO / np.log(2)
#     A = Base + B * np.log(Ratio)
#     score = A - B * np.log(p / (1 - p))
#     return round(score, 0)

def feature_class(df, model_fea):
    """
    识别dataframe中的指定的哪些是数值型，哪些是类别型
    :param df Dataframe: 待分析的数据
    :param model_feas arry_like: 待分析的特征列表
    :return:
    """
    num_fea = []
    cate_fea = []
    for col in model_fea:
        if is_numeric_dtype(df[col]):
            num_fea.append(col)
        else:
            cate_fea.append(col)
    return num_fea, cate_fea


def cate_label_encodings(df, cate_fea):
    """
    计算类别特征的编码表
    :param df Dataframe:
    :param cate_fea:
    :return:
    """
    label_encodings = {}
    for feature in cate_fea:
        _, categories_index = pd.factorize(df[feature])
        # 将Index转换为字典
        categories_mapping = dict(zip(categories_index, range(len(categories_index))))
        label_encodings[feature] = categories_mapping
    return label_encodings


def cate_factorize(df, label_encodings):
    """
    根据编码表将类别特征进行编码
    :param df Dataframe:
    :param label_encodings:
    :return:
    """
    for feature in label_encodings:
        df[feature] = df[feature].map(label_encodings[feature]).fillna(-1).astype(int)
    return df


def fill_df(df, model_fea, num_feas=None, cate_feas=None, num_fill=-999, cate_fill=""):
    """
    针对流程上的数据进行批量填充
    :param df: 待填充的 dataframe
    :param model_fea: 待填充的特征列表 ，如果有值则整体填充，如果没有值，则重新识别
    :param num_feas:
    :param cate_feas:
    :param num_fill:
    :param cate_fill:
    :return:
    """
    if model_fea is None or len(model_fea) == 0:
        num_features, cate_features = num_feas, cate_feas
    else:
        num_features, cate_features = feature_class(df, model_fea)
    df[num_features] = df[num_features].fillna(num_fill)
    df[cate_features] = df[cate_features].fillna(cate_fill)
    return df


def oppsite_features(df1, df2, model_feas, target):
    """
    计算两组数据中，反向的特征
    :param df1:
    :param df2:
    :param model_feas:
    :param target:
    :return:
    """
    oppsite_feas = []
    for fea in tqdm(model_feas):
        corr1 = df1[fea].corr(df1[target])
        corr2 = df2[fea].corr(df2[target])
        if corr1 * corr2 <= 0:
            oppsite_feas.append(fea)
    return oppsite_feas


def oppsite_feature_kfold(df, model_feas, target, n_splits=5, random_state=42):
    """
    计算该数据集中，随机分组后都不稳定的特征
    :param df:
    :param model_feas:
    :param target:
    :param n_splits:
    :param random_state:
    :return:
    """
    df = df.copy()
    df = df.reset_index(drop=True)
    assert df.index.is_unique, '输入数据的index有重复值，请优先确保数据格式的正确性'
    oppsite_feas = []
    kfold = KFold(n_splits=n_splits, shuffle=True, random_state=random_state)
    for idxs1, idxs2 in kfold.split(df):
        df1 = df.iloc[idxs1, :]
        df2 = df.iloc[idxs2, :]
        for x in tqdm(model_feas):
            corr1 = df1[x].corr(df1[target])
            corr2 = df2[x].corr(df2[target])
            if corr1 * corr2 <= 0:
                oppsite_feas.append(x)
    oppsite_feas = list(set(oppsite_feas))
    return oppsite_feas


def feature_effective0(train, test, flist, target, num_fill=-999, cate_fill=''):
    """
    批量分析数据中特征的基本信息：'feature', 'fea_dtype', 'nuique', 'psi', 'iv'
    :param train: 训练集
    :param test: 验证集
    :param flist: 特征列表
    :param target: 训练目标
    :param num_fill: 数值型特征的填充值
    :param cate_fill: 类别行特征的填充值
    :return:
    """
    num_feas, cate_feas = feature_class(train, flist)
    train[num_feas] = train[num_feas].fillna(num_fill)
    train[cate_feas] = train[cate_feas].fillna(cate_fill)
    test[num_feas] = test[num_feas].fillna(num_fill)
    test[cate_feas] = test[cate_feas].fillna(cate_fill)
    data = pd.concat([train, test])
    feature_info_list = []

    for fea in tqdm(flist):
        fea_dtype = 'numerical' if fea in num_feas else 'categorical'
        unique = data[fea].nunique()
        if (fea_dtype == 'categorical') and (unique > 100):  # 针对类型超过的类别型特征，不计算 iv和psi
            iv = np.nan
            psi = np.nan
        else:
            iv = calc_iv(data[target], data[fea])
            psi = calc_psi(train[fea], test[fea])
        feature_info_list.append([fea, fea_dtype, unique, psi, iv])
    fea_info = pd.DataFrame(feature_info_list, columns=['feature', 'fea_dtype', 'nunique', 'psi', 'iv'])
    fea_info = fea_info.sort_values('iv', ascending=False)
    return fea_info


def model_features(lgb_model, importance_type='gain'):
    """
    分析模型特征及权重
    :param lgb_model:
    :param importance_type:
    :return:
    """
    fea_rs = pd.DataFrame(
        {"var": lgb_model.feature_name(), "importance": lgb_model.feature_importance(importance_type=importance_type)}
    ).sort_values("importance", ascending=False)
    fea_rs = fea_rs[fea_rs['importance'] > 0]
    return fea_rs.sort_values("importance", ascending=False)


# 特征挑选工具
def lgb_feature_selection(data_sets: list, model_fea_init: list, cate_fea_init: list, target: str, params: dict,
                          callbacks=[log_evaluation(period=100), early_stopping(stopping_rounds=30)]):
    """
    使用lgb进行特征挑选
    cate_fea = []
    feature_select_df = lgb_feature_selection([train,test,oot],effect_fea,cate_fea,'def_pd1',lgb_params,callbacks=None)
    :param data_sets: 数据集的list，按照 train test oot 的顺序放数据集，如果没有oot，则将test作为oot
    :param model_fea_init: 所有的如墨特征集合
    :param target: target特征的名字
    :param params: 模型参数
    :param cate_fea_init: 类别特征的集合，使用list格式
    :param callbacks: 模型能调用过程中的call back
    :return: 特征挑选描述结果
    """
    train = data_sets[0]
    test = data_sets[1]
    oot = data_sets[2] if len(data_sets) == 3 else test
    model_fea = model_fea_init
    cate_fea = cate_fea_init

    tmp_arr = []
    n = 0
    while len(model_fea) > 0:
        n = n + 1
        if (cate_fea is None) or (len(cate_fea) == 0):
            train_data = lgb.Dataset(train[model_fea], label=train[target])
            test_data = lgb.Dataset(test[model_fea], label=test[target])
        else:
            train_data = lgb.Dataset(train[model_fea], label=train[target], categorical_feature=cate_fea)
            test_data = lgb.Dataset(test[model_fea], label=test[target], categorical_feature=cate_fea)

        model = lgb.train(params, train_data, valid_sets=[test_data], callbacks=callbacks)

        train['prob'] = model.predict(train[model_fea])
        test['prob'] = model.predict(test[model_fea])
        oot['prob'] = model.predict(oot[model_fea])

        auc1 = roc_auc_score(train[target], train['prob'])
        auc2 = roc_auc_score(test[target], test['prob'])
        auc3 = roc_auc_score(oot[target], oot['prob'])

        ks1 = calc_ks(train[target], train["prob"])
        ks2 = calc_ks(test[target], test['prob'])
        ks3 = calc_ks(oot[target], oot['prob'])

        delt = auc1 - auc2
        fea_count = len(model_fea)

        print(
            f"{n} iterator,feature count:{fea_count},AUC: {round(auc1, 4)},{round(auc2, 4)},{round(auc3, 4)},delt={round(delt, 4)}")

        fea_list_this = json.dumps(model_fea)
        cate_list_this = json.dumps(cate_fea)
        tmp_arr.append(
            [n, fea_count, auc1, auc2, auc3, ks1, ks2, ks3, auc1 - auc2, ks1 - ks2, model.num_trees(), fea_list_this,
             cate_list_this])

        fea_rs = model_features(model)

        model_fea_new = fea_rs['feature'].to_list()
        if len(model_fea_new) < len(model_fea):
            model_fea = model_fea_new
        else:
            model_fea = model_fea_new[0:-1]

        cate_fea = [x for x in model_fea if x in cate_fea_init]

    result = pd.DataFrame(tmp_arr, columns=['iter_num', 'fea_count', 'train_auc', 'test_auc', 'oot_auc',
                                            'train_ks', 'test_ks', 'oot_ks', 'delt_auc', 'delt_ks', 'tree_num',
                                            'features', 'cate_fea'])
    return result


def plot_roc_ks0(y_label,y_pred,  suptitle="标题"):
    # 创建一个画布，并添加子图
    fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(7, 2.7))
    fig.suptitle(suptitle, fontsize=14)

    tpr, fpr, threshold = metrics.roc_curve(y_label, y_pred)
    AUC = metrics.roc_auc_score(y_label, y_pred)
    ax1.plot(tpr, fpr, color="blue", label="AUC=%.3f" % AUC)
    ax1.plot([0, 1], [0, 1], "r--")
    ax1.set_ylim(0 - 0.02, 1 + 0.02)
    ax1.set_xlim(0 - 0.02, 1 + 0.02)
    ax1.set_title("ROC")
    ax1.legend(loc="best")

    pred_list = list(y_pred)
    label_list = list(y_label)
    total_bad = sum(label_list)
    total_good = len(label_list) - total_bad
    items = sorted(zip(pred_list, label_list), key=lambda x: x[0])
    pred_bin = []
    good_rate = []
    bad_rate = []
    ks_list = []

    for i in range(0, len(items)):
        item = items[i]
        items_sub = items[0:i]
        pred_bin.append(item[0])
        label_bin = [x[1] for x in items_sub]
        bad_num = sum(label_bin)
        good_num = len(label_bin) - bad_num
        goodrate = good_num / total_good
        badrate = bad_num / total_bad
        ks = abs(goodrate - badrate)
        good_rate.append(goodrate)
        bad_rate.append(badrate)
        ks_list.append(ks)
    ax2.plot(pred_bin, good_rate, color="green", label="good_rate")
    ax2.plot(pred_bin, bad_rate, color="red", label="bad_rate")
    ax2.plot(pred_bin, ks_list, color="blue", label="good-bad")
    # ax2.set_ylim(0-0.02, 1+0.02)
    # ax2.set_xlim(min(y_pred)-0.02, max(y_pred)+0.02)
    ax2.set_title("KS:{:.3f}".format(max(ks_list)))
    ax2.legend(loc="best")
    return plt.show(fig)


def plot_roc_ks(y_true, y_scores, data_desc=None):
    """
    绘制auc和ks
    :param y_true:
    :param y_scores:
    :param data_desc:
    :return:
    """
    import numpy as np
    import matplotlib.pyplot as plt
    from sklearn.metrics import roc_curve, auc
    y_scores = np.asarray(y_scores)
    y_true = np.asarray(y_true)
    # 计算FPR, TPR 和 AUC
    fpr, tpr, _ = roc_curve(y_true, y_scores)
    roc_auc = auc(fpr, tpr)
    # 按照预测概率排序
    sorted_indices = np.argsort(y_scores)[::-1]
    sorted_y_true = y_true[sorted_indices]
    # 计算累计分布
    total_positives = np.sum(y_true == 1)
    total_negatives = np.sum(y_true == 0)
    cumulative_positives = np.cumsum(sorted_y_true == 1) / total_positives
    cumulative_negatives = np.cumsum(sorted_y_true == 0) / total_negatives
    # KS统计量
    ks_statistic = max(abs(cumulative_positives - cumulative_negatives))
    # 设置全局字体大小
    plt.rcParams.update({'font.size': 9})  # 将默认字体大小设置为10
    # 创建一个1行2列的子图布局
    fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(9, 4))  # 减小图表尺寸
    # 绘制ROC曲线
    ax1.plot(fpr, tpr, color='darkorange', lw=2, label=f'ROC curve (AUC = {roc_auc:.2f})')
    ax1.plot([0, 1], [0, 1], color='navy', lw=2, linestyle='--')
    ax1.set_xlim([0.0, 1.0])
    ax1.set_ylim([0.0, 1.05])
    ax1.set_xlabel('False Positive Rate')
    ax1.set_ylabel('True Positive Rate')
    ax1.set_title('Receiver Operating Characteristic (ROC)')
    ax1.legend(loc="lower right")
    # 绘制KS曲线
    ax2.plot(cumulative_positives, label='Cumulative Positives')
    ax2.plot(cumulative_negatives, label='Cumulative Negatives')
    ax2.axvline(np.argmax(abs(cumulative_positives - cumulative_negatives)), color='r', linestyle='--',
                label=f'Max KS: {ks_statistic:.2f}')
    ax2.set_xlabel('Sample Fraction (sorted by score)')
    ax2.set_ylabel('Cumulative Distribution Function')
    ax2.set_title('KS Statistic Curve')
    ax2.legend()
    if data_desc:  # 添加主标题
        plt.suptitle(f'Performance on {data_desc}', fontsize=14)  # 主标题字体大小也相应减小

    # 调整子图之间的间距
    plt.tight_layout(rect=[0, 0, 1, 1])  # rect参数确保标题不会被裁剪
    plt.show()
