import pandas as pd

data = './Dafen_20220708.csv'
df = pd.read_csv(data, encoding='gb18030')

# 关键指标 和 类型
# 递减指标（RLB、Ftswd、Si_Ti）
# 递增指标（Fliluntie、V）

key_dict = dict(  ###操作参数
    CG_LT_GL_GL04_FCokeRate='Asc',
    CG_LT_GL_GL04_FCoalRate='Asc',
    CG_LT_GL_GL04_RFWD='Asc',
    CG_LT_GL_GL04_FYLL='Asc',
    CG_LT_GL_GL04_LFLL='Asc',
    CG_LT_GL_GL04_GFDNKG='Asc',
    CG_LT_GL_GL04_RFH='Desc',
    ###状态参数
    CG_LT_GL_GL04_DW='Desc',
    CG_LT_GL_GL04_LDYL='Desc',
    CG_LT_GL_GL04_YC='Desc',
    CG_LT_GL_GL04_TQXZS='Asc',
    CG_LT_GL_GL04_MQLYL='Asc',
    CG_LT_GL_GL04_ZXQL='Asc',
    CG_LT_GL_GL04_XBYCZB='Desc',
    CG_LT_GL_GL04_Ftswd='Desc',
    CG_LT_GL_GL04_19507WDPJ='Desc',
    CG_LT_GL_GL04_26275WDPJ='Desc',
    CG_LT_GL_GL04_34800WDPJ='Desc',
    ######经济指标
    CG_LT_GL_GL04_V='Asc',
    CG_LT_GL_GL04_Si_Ti='Desc',
    # CG_LT_GL_GL04_FCoalRate= 'Asc',
    # CG_LT_GL_GL04_FCokeRate= 'Desc',
    CG_LT_GL_GL04_FLiLunTie='Asc')

key_params = key_dict.keys()

# 原始数据分析结果
df[key_params].describe()


def outlier_treatment(df, cols, whis=1.5):
    range_map = {}
    for col in cols:
        Q1, Q2, Q3, Q5, Q95 = df[col].quantile([0.25, 0.50, 0.75, 0.05, 0.95])
        IQR = Q3 - Q1
        range_map[col] = {}
        range_map[col]['lower_bound'] = Q1 - (whis * IQR)
        range_map[col]['upper_bound'] = Q3 + (whis * IQR)
        range_map[col]['Q1'] = Q1
        range_map[col]['Q2'] = Q2
        range_map[col]['Q3'] = Q3
    #        range_map[col]['Q5'] = Q5
    #       range_map[col]['Q95'] = Q95
    return range_map


# 原始数据四分卫距统计表
analysis_map = outlier_treatment(df, key_params)
print(analysis_map)

# 人工经验指标范围
exp_map = {

    ####操作指标
    "CG_LT_GL_GL04_FCokeRate": {
        "lower_bound": 350,
        "upper_bound": 480
    },
    "CG_LT_GL_GL04_FCoalRate": {
        "lower_bound": 120,
        "upper_bound": 150
    },
    "CG_LT_GL_GL04_RFWD": {
        "lower_bound": 1150,
        "upper_bound": 1209
    },
    "CG_LT_GL_GL04_FYLL": {
        "lower_bound": 9500,
        "upper_bound": 15500
    },
    "CG_LT_GL_GL04_LFLL": {
        "lower_bound": 4800,
        "upper_bound": 5500
    },
    "CG_LT_GL_GL04_GFDNKG": {
        "lower_bound": 10000,
        "upper_bound": 13500
    },
    "CG_LT_GL_GL04_RFH": {
        "lower_bound": 10000,
        "upper_bound": 27000
    },
    ####状态指标
    "CG_LT_GL_GL04_DW": {
        "lower_bound": 140,
        "upper_bound": 230
    },
    "CG_LT_GL_GL04_Ftswd": {
        "lower_bound": 1450,
        "upper_bound": 1490
    },
    "CG_LT_GL_GL04_LDYL": {
        "lower_bound": 220,
        "upper_bound": 255
    },
    "CG_LT_GL_GL04_YC": {
        "lower_bound": 160,
        "upper_bound": 190
    },
    "CG_LT_GL_GL04_TQXZS": {
        "lower_bound": 27.5,
        "upper_bound": 32.5
    },
    "CG_LT_GL_GL04_MQLYL": {
        "lower_bound": 44,
        "upper_bound": 47.5
    },
    "CG_LT_GL_GL04_ZXQL": {
        "lower_bound": 0.46,
        "upper_bound": 0.66
    },
    "CG_LT_GL_GL04_XBYCZB": {
        "lower_bound": 65,
        "upper_bound": 80
    },
    "CG_LT_GL_GL04_19507WDPJ": {
        "lower_bound": 55,
        "upper_bound": 95
    },
    "CG_LT_GL_GL04_26275WDPJ": {
        "lower_bound": 70,
        "upper_bound": 100
    },
    "CG_LT_GL_GL04_34800WDPJ": {
        "lower_bound": 85,
        "upper_bound": 125
    },
    #####经济指标
    "CG_LT_GL_GL04_V": {
        "lower_bound": 0.15,
        "upper_bound": 0.26
    },
    "CG_LT_GL_GL04_Si_Ti": {
        "lower_bound": 0.15,
        "upper_bound": 0.5
    },
    # "CG_LT_GL_GL04_FCoalRate":{
    #   "lower_bound": 125,
    #   "upper_bound": 160
    # },
    # "CG_LT_GL_GL04_FCokeRate":{
    #   "lower_bound": 350,
    #   "upper_bound": 382
    # },
    "CG_LT_GL_GL04_FLiLunTie": {
        "lower_bound": 240,
        "upper_bound": 280
    }

}

# 最终指标范围表生成
# 结合统计表和人工经验表
final_map = {}
for param in key_params:
    cnt = analysis_map[param]
    final_map[param] = {}
    final_map[param]['lower_bound'] = analysis_map[param]['lower_bound'] \
        if analysis_map[param]['lower_bound'] >= exp_map[param]['lower_bound'] \
        else exp_map[param]['lower_bound']
    final_map[param]['upper_bound'] = analysis_map[param]['upper_bound'] \
        if analysis_map[param]['upper_bound'] <= exp_map[param]['upper_bound'] \
        else exp_map[param]['upper_bound']

# 最终范围表
print(final_map)


# 计算百分位数函数
# 注意，取值范围为0.01 - 0.99，非剔除的异常状态，都是高于60分 小于99分的数值
# 60为标记的异常状态，区分于正常打分空间
def calculate_quantile(df):
    df_quantile = pd.DataFrame()
    for i in range(1, 100):
        df_quantile = df_quantile.append(df.quantile(i / 100))
    return df_quantile


# 生成有效数据集 edf
edf = pd.DataFrame()
for param in key_params:
    tdf = df[[param]]
    # 按上下边界卡值
    ret_df = tdf[(tdf[param] >= final_map[param]['lower_bound']) & \
                 (tdf[param] <= final_map[param]['upper_bound'])]
    # 计算分位数
    qdf = calculate_quantile(ret_df)
    edf = pd.concat([edf, qdf], axis=1)

# 添加分位数字段
edf['perc'] = edf.index

# 全部取2位小数，会造成精度丢失，产生很多重复值
# 假设参数可以忽略2位小数以后的精度，减少分段
edf = edf.round(2)

# 分位数计算结果
#edf


# 添加权重，分数和上下限 函数
def get_weight_score(edf, name, type):
    # 合并同值分段
    grp = edf.groupby(name)
    # 同值分段所占的百分位
    max = grp['perc'].max().rename("max_perc")
    min = grp['perc'].min().rename("min_perc")

    final_df = pd.DataFrame()

    final_df = pd.concat([final_df, min, max], axis=1)
    final_df[name] = final_df.index

    # 权重设定的基数，依据参数值对应的百分位数来设定，
    # 参数值可能对应一个范围的百分位值，为防止偏移，取所占百分位段的平均值
    # 权重设定的规则：
    # 递减指标（RLB、Ftswd、Si_Ti）：1- (所占最小百分位+所占最大百分位)/2
    # 递增指标（Fliluntie、V）：(所占最小百分位+所占最大百分位)/2
    final_df['last_min_perc'] = final_df['min_perc'].shift(+1)

    final_df['last_max_perc'] = final_df['max_perc'].shift(+1)

    final_df['last_min_perc'] = final_df['last_min_perc'].fillna(0)
    final_df['last_max_perc'] = final_df['last_max_perc'].fillna(0)

    if type == 'Asc':
        # final_df['sub_weight'] = (final_df['min_perc']+final_df['max_perc'])/2
        final_df['sub_weight'] = (final_df['last_min_perc'] + final_df['last_min_perc']) / 2

    else:
        final_df['sub_weight'] = 1 - (final_df['last_min_perc'] + final_df['last_min_perc']) / 2

    # final_df['last_min_perc'].fillna(0)
    # final_df['last_max_perc'].fillna(0)

    final_df['score'] = 65 + 35 * final_df['sub_weight']

    # 寻找取值区间, 因为lower_bound 是所在区间的值，即基准值，upper_bound 是下一个区间的值
    # 所以使用的时候，一律采用左闭右开 [lower_bound, upper_bound)

    final_df['lower_bound'] = final_df[name]
    final_df['upper_bound'] = final_df[name].shift(-1)

    # 最低下限和最高上线替换位final_map中的上下限, 不能依据现有数据集合范围
    # 此区间还在合理范围，不能丢掉
    final_df['lower_bound'][0] = final_map[name]['lower_bound']
    final_df['upper_bound'][len(final_df) - 1] = final_map[name]['upper_bound']

    col_order = ['min_perc', 'max_perc', 'last_min_perc', 'last_max_perc', 'lower_bound', 'upper_bound', 'sub_weight',
                 'score', name]
    final_df = final_df[col_order]
    return final_df


final_result = {}

for p in key_params:
    final_result[p] = get_weight_score(edf, p, key_dict[p])

from interval3 import Interval

# 生成评分可用的评分字典
score_dict = {}

for param in key_params:
    idict = {}
    score_dict[param] = idict
    tdf = final_result[param]
    for index, row in tdf.iterrows():
        i = Interval(row['lower_bound'], row['upper_bound'], lower_closed=True, upper_closed=False)
        idict[i] = row['score']

import pickle

pickle.dump(score_dict, open("./Gaolu_dict_202303.pk", "wb"))

