

import os
import itertools as it
from itertools import starmap
import chardet
import pandas as pd
import operator

from leads_rating_ETL import data_cut


def config_check(config: dict, *args):
    """
    该函数用于确认配置文件的对象 config 的合法性

    :param config:
    :param args: 需要 遍历的 动态的 非固定字段
    :return:


    """

    # config.columns自动生成，防止人为遗漏
    if not isinstance(config, dict):
        raise Exception("Config should be a dict!")

    if not isinstance(config["vals_iter_cols"] , list):
        raise Exception("value 'vals_iter_cols' should be a list!")

    if not isinstance(config["if_vals_iter"] , list):
        raise Exception("value 'if_vals_iter' should be a list!")
    elif len(config["if_vals_iter"]) ==0 and len(config["vals_iter_cols"])>0:
        raise Exception("if lengths of  list 'if_vals_iter' is zero, list 'vals_iter_cols' should be []!")
    elif len(config["if_vals_iter"]) !=0 and len(config["if_vals_iter"]) !=1:
        raise Exception("list 'if_vals_iter' should only contains single value!")

    if not isinstance(config["quantile_split_list"] , list):
        raise Exception("value 'quantile_split_list' should be a list!")
    elif len(config["quantile_split_list"]) <=1:
        raise Exception("lengths of the 'quantile_split_list' should be greater than 2!")
    elif not _monotone_increasing(config["quantile_split_list"]):
        raise Exception("vals in the 'quantile_split_list' should monotone increasing!")


    if config["train_mode"] == "combination" and not config["if_vals_iter"]:
        columns = []
        for arg in args:
            for val in arg.values():
                columns += val
        columns = list(set(columns))
        config["columns"] = columns + [config["target"]]
    elif config["train_mode"] == "traversal" and not config["if_vals_iter"]:
        repeat_list = []
        for static in config["static_features"]:
            if static in args:
                repeat_list += static
        if repeat_list:
            raise Exception("固定特征动态特征不应该有重复项！\n ", repeat_list)
        else:
            config["columns"] = config["static_features"] + args[0] + [config["target"]]
    return config


def data_reader(path: str, target: str, encoding=None):
    """
    读取数据

    (这里比较重要的是对文件编码的核实！ 文件编码错误很可能会报出错误，其实也是一种合法性的检验)

    :param path: 源数据文件路径
    :param target: 目标变量
    :param encoding: 文件编码
    :return: 原始数据
    """

    file_dir, file = os.path.split(path)
    shotname, extension = os.path.splitext(file)

    if extension == '.csv':
        read_func = pd.read_csv
    else:
        read_func = pd.read_excel
    if encoding:
        origin = read_func(path, encoding=encoding , engine = 'openpyxl')
    else:
        with open(path, 'rb') as f:
            encoding = chardet.detect(f.read())['encoding']
        origin = read_func(path, encoding=encoding)
    origin[target] = origin[target].astype(float).astype(int)

    return origin



def features_traversal(dynamic: list):
    """
    :param dynamic: 动态特征列表
    :return: 动态特征遍历组合生成器 [[], [], ...  [], .....]
    """
    features_list = []
    for i in range(1, len(dynamic) + 1):
        for x in it.combinations(dynamic, i):
            features_list.append(list(x))
    return features_list




def columns_vals_iter_pj(mode , *args):
    '''

    实现字段之间的迭代方案 or
    实现单个字段中字段值的迭代方案

    :param mode:
    :param args:
    :return:
    '''
    if mode == "traversal":
        features_list = features_traversal(*args) # 全排列组合下的 特征组合
        return features_list
    else:
        features_origin_list = []
        codes_list = []
        #
        for features_unzip, codes in features_combination(*args):
            # features_unzip: [['qwe', 'uiop'], [], ['c']]
            # codes : ['A1', 'B1', 'C1']
            features_origin_list.append(features_unzip)
            codes_list.append(codes)
        features_list = []
        for features_detail in features_origin_list:
            detail_init = []
            for detail in features_detail:
                detail_init += detail # 通过这个机制将 [] 给处理掉
            features_list.append(detail_init)

        return  features_list , codes_list



def summary_concat(config, path_dict):
    summaries_folder = path_dict["summaries"]
    files = sorted(os.listdir(summaries_folder))
    files_list = [summaries_folder + '/' + i for i in files]  # .remove('.ipynb_checkpoints')
    datas = [pd.read_excel(i, encoding='utf_8_sig' , engine='openpyxl') for i in files_list]
    all_data = pd.concat(datas)
    #     all_data.to_excel(summary_dir+'_bianli_summary_V0.3.xlsx',encoding='utf_8_sig', index=False)
    # 这里后缀也要按需改一下
    all_data.to_excel(path_dict["th_path"] + '/' + path_dict["th_path"] + '_all_summary.xlsx',
                      encoding='utf_8_sig', index=False)  # 自识别



def result_check(config: dict, path_dict: dict):
    pkls_len = len(os.listdir(path_dict["pkls"]))
    results_len = len(os.listdir(path_dict["results"]))
    smrs_len = len(os.listdir(path_dict["summaries"]))

    ctt = "Pkl: %d \n " \
          "Result: %d \n" \
          "Summary: %d \n" \
          % (pkls_len, results_len, smrs_len)
    if pkls_len == 3 * results_len == 3 * smrs_len != 0:
        with open(config["Folder"] + "/success_details.txt", "w") as f:
            f.write(ctt)
    else:
        with open(config["Folder"] + "/fail_details.txt", "w") as f:
            f.write(ctt)


def features_combination(*args):
    """
    example:
    A = {'A1': ['qwe', 'uiop'],
        'A2': ['zxc', 'bnm']}
    B = {'B1': [],
         'B2': ['bbbb']}
    C = {'C1': ['c'], 'C2': ['C1245r']}

    for features,codes in features_combination(A, B, C):
        # print(features,codes)

    output:
    [['qwe', 'uiop'], [], ['c']]               ['A1', 'B1', 'C1']
    [['qwe', 'uiop'], [], ['C1245r']]          ['A1', 'B1', 'C2']
    [['qwe', 'uiop'], ['bbbb'], ['c']]         ['A1', 'B2', 'C1']
    [['qwe', 'uiop'], ['bbbb'], ['C1245r']]    ['A1', 'B2', 'C2']
    [['zxc', 'bnm'], [], ['c']]                ['A2', 'B1', 'C1']
    [['zxc', 'bnm'], [], ['C1245r']]           ['A2', 'B1', 'C2']
    [['zxc', 'bnm'], ['bbbb'], ['c']]          ['A2', 'B2', 'C1']
    [['zxc', 'bnm'], ['bbbb'], ['C1245r']]     ['A2', 'B2', 'C2']
    ======================================================
    :param args: features dict
    :return: (features_list, feature_codes_list)生成器
    """
    all_dict = {}
    # all_columns = []
    for dict_in in args:
        all_dict.update(dict_in)
    # for val_list in all_dict.values():
    #     all_columns += val_list
    for i in it.product(*args):
        yield [all_dict[j] for j in i], list(i)




def val_format(th_lift, th_racall, th_leads_ratio):
    return [th_lift.round(decimals=2) if isinstance(th_lift, float) else th_lift,
            format(float(th_racall), ".2%") if isinstance(th_racall, float) else th_racall,
            format(float(th_leads_ratio), ".2%") if isinstance(th_leads_ratio, float) else th_leads_ratio]


def class_label2(x, prob_lowest, prob_hightest):
    '''
    该函数区分的结果只用来在summary中展示用

    :param x:
    :param prob_lowest: float 分位数最高档
    :param prob_hightest: float 分位数最低档
    :return:
    '''
    if x >= prob_hightest:
        return u"高"
    elif x <= prob_lowest:
        return u"低"
    else:
        return u"token_notlook"


def _monotone_increasing(lst):
    pairs = zip(lst, lst[1:])
    return all(starmap(operator.le, pairs))



def if_vals_check(config,source,validate):

    if config["if_vals_iter"][0] not in config["vals_iter_cols"]:
        raise Exception("'vals_iter_cols' should incloud feature in 'if_vals_iter'!")

    X_train, X_test, y_train, y_test = data_cut(source,
                                               target_column=config["target"],
                                               test_size=config["train_test_split"],
                                               random_state=1)


    val_train_set = set(X_train[config["if_vals_iter"][0]].tolist())
    val_test_set = set(X_test[config["if_vals_iter"][0]].tolist())
    val_validate_set = set(validate[config["if_vals_iter"][0]].tolist())

    if len(val_train_set - val_test_set) !=0:
        raise Exception(" 'val_train_set' contains values missing in 'val_test_set'!")
    if len(val_test_set - val_train_set) !=0:
        raise Exception(" 'val_test_set' contains values missing in 'val_train_set'!")
    if len(val_train_set - val_validate_set) !=0:
        raise Exception(" 'val_train_set' contains values missing in 'val_validate_set'!")


def prob_split(prob_index, prob_info_list, prob_info_name_list):
    i_max_token = len(prob_info_list)

    for i, single_prob in enumerate(prob_info_list):
        if i == 0 and prob_index <= single_prob:
            return prob_info_name_list[i]
        elif i == i_max_token-1 and prob_index >= prob_info_list[i_max_token -1]:
            return prob_info_name_list[i]
        elif prob_index >= single_prob and prob_index < prob_info_list[i + 1]:
            return prob_info_name_list[i]
        else:
            continue

