import copy
import math
import pandas as pd
import sys
import os
from pandas import np
from sklearn.metrics import roc_auc_score

CURRENT_PATH = os.getcwd()
sys.path.append(CURRENT_PATH)

from utils import prob_split


def pre_auc(model, X_data, y_data):
    y_pre = model.predict_proba(X_data)[:, 1].astype(float)
    # # print(y_pre[:10])
    auc = roc_auc_score(y_data.astype(float).astype(int), y_pre)
    return y_pre, auc

def model_evaluate(data_set,config,if_per_split):
    '''
    模型结果度量函数lift

    :param data_set:  pd.df 包含字段 [['prob','target']]
    :param config:
    :param if_per_split: boolen True 按照数据量均分50层输出结果(写死); False,按照输入的分位数输出结果
    :return:
    '''

    result = copy.deepcopy(data_set)

    result["target"] = result["target"].astype(float)

    # result["group_rank"] = np.nan

    if not if_per_split:
        layer = int(len(result) / 50)
        result = result.sort_values(by='prob', ascending=False).reset_index(drop=True)
        result["group"] = np.nan
        for index in result.index:
            result.at[index, "group"] = math.floor((index + 1) / layer)
            # result.at[index, "group_rank"] = index + 1
            if math.floor((index + 1) / layer) == 50:
                result.at[index, "group"] = 49
    else:
        result = result.sort_values(by='prob', ascending=True).reset_index(drop=True)
        result["group"] = ''
        quantile_split_list = config['quantile_split_list']
        prob_info_list = []
        prob_info_name_list = []

        for single_quantile in quantile_split_list:
            quantile_token = result.iloc[int(np.round(len(result) * single_quantile)), :]['prob']
            prob_info_list.append(quantile_token)
            single_quantile_str = ''.join(['prob_', str(int(single_quantile * 100))])
            prob_info_name_list.append(single_quantile_str)

        for prob_index,prob_value in enumerate(result["prob"]):
            result.at[prob_index , "group"] = prob_split(prob_value,prob_info_list,prob_info_name_list)

    max_group = pd.DataFrame(result.groupby('group')['prob'].max()).rename(columns={'prob': 'max_p'}).reset_index(
        drop=True)
    min_group = pd.DataFrame(result.groupby('group')['prob'].min()).rename(columns={'prob': 'min_p'}).reset_index(
        drop=True)
    target_group = pd.DataFrame(result.groupby('group')['target'].sum()).rename(
        columns={'target': 'target'}).reset_index(drop=True)
    sample_group = pd.DataFrame(result.groupby('group')['target'].count()).rename(
        columns={'target': 'sample'}).reset_index(drop=True)

    maxmin = max_group.join(min_group)
    samtarget = sample_group.join(target_group)
    output = samtarget.join(maxmin)
    output["sum_target"] = output["target"].cumsum()
    output['sum_sample'] = output['sample'].cumsum()
    # # print(output["target"][:10])
    output["target"] = output["target"].astype(float)
    output["sample"] = output["sample"].astype(float).astype(int)
    output[u"precision"] = output["target"] / output["sample"]
    output[u"acc_recall"] = output["sum_target"] / output["target"].sum()
    output[u"acc_precision"] = output["sum_target"] / output["sum_sample"]
    output['lift'] = output['acc_precision'] / ((output["target"].sum() + 0.0) / output["sample"].sum())  # 提升度确认
    # 设置格式：
    output[u"precision"] = output[u"precision"].round(decimals=3)
    output[u"acc_precision"] = output[u"acc_precision"].round(decimals=3)
    output[u"acc_recall"] = output[u"acc_recall"].map(lambda x: format(x, ".2%"))
    output[u"lift"] = output[u"lift"].round(decimals=2)
    result_lift = output.reset_index(drop=True)

    result_evaluate = pd.DataFrame(columns=['KS', 'GINI', 'AUC'])
    result_evaluate.iloc[0:2, 0:3] = np.nan
    result_lift['bad'] = result_lift['sample'] - result_lift['target']
    all_bad = result_lift['bad'].sum()
    result_lift['acc_bad'] = result_lift['sum_sample'] - result_lift['sum_target']
    result_lift['acc_bad_ratio'] = result_lift['acc_bad'].apply(lambda x: x / all_bad)
    result_lift['acc_good'] = result_lift['sum_target']
    all_good = output['target'].sum()
    result_lift['good_ratio1'] = result_lift['target'].apply(lambda x: x / all_good)
    result_lift['acc_good_ratio'] = result_lift['acc_good'].apply(lambda x: x / all_good)
    df1 = result_lift['acc_bad_ratio'].tolist()
    df1.insert(0, 0)
    df1.pop()
    df2 = pd.DataFrame(df1)
    result_lift['add_I_1'] = df2
    result_lift['I'] = result_lift['acc_bad_ratio'] + result_lift['add_I_1']
    result_lift['J'] = 0.5 * result_lift['good_ratio1'] * result_lift['I']
    e = result_lift['J'].sum()
    result_evaluate.loc[1, 'KS'] = np.abs(result_lift['acc_good_ratio'] - result_lift['acc_bad_ratio']).max()
    result_evaluate.loc[1, 'GINI'] = (0.5 - e) * 2

    return result_lift, result_evaluate


def validate(df: pd.DataFrame, cross):
    '''
    train 使用lift值输出模型结果，此处是valid_value 度量方式函数


    :param df:
    :param cross: 包含 '高', '中', '低' 的列
    :return:
    '''
    df_valed = pd.crosstab(df[cross], df['target'], margins=True)
    df_valed['进店率'] = df_valed[1] / df_valed['All']
    df_valed['线索占比'] = df_valed['All'] / df_valed.loc['All', 'All']

    if '低' in df[cross].tolist():
        df_valed.loc['低', '验证提升'] = df_valed.loc['低', '进店率'] / df_valed.loc['All', '进店率']
        df_valed.loc['低', '验证召回'] = df_valed.loc['低', 1] / df_valed.loc['All', 1]
    else:
        df_valed.loc['低', '验证提升'] = '无低意向线索'
        df_valed.loc['低', '验证召回'] = '无低意向线索'

    if '高' in df[cross].tolist():
        df_valed.loc['高', '验证提升'] = df_valed.loc['高', '进店率'] / df_valed.loc['All', '进店率']
        df_valed.loc['高', '验证召回'] = df_valed.loc['高', 1] / df_valed.loc['All', 1]
    else:
        df_valed.loc['高', '验证提升'] = '无高意向线索'
        df_valed.loc['高', '验证召回'] = '无高意向线索'
    return df_valed