
import copy
import multiprocessing
import os
import sys
import warnings
from datetime import datetime as dt
import pandas as pd
CURRENT_PATH = os.getcwd()
sys.path.append(CURRENT_PATH)

from leads_rating_model import train2result
from utils import val_format, data_reader, \
    columns_vals_iter_pj, summary_concat, result_check, config_check, if_vals_check

'''
多线程用于跑批.
多线程执行函数对象为training2summary，其中 train2result为多线程跑模型的函数
'''



def path_manager(folder: str, tag_prim: str, tag_secon=''):
    """
    在指定工作路径下生成结构化路径,作为输出路径.
    附带当日日期标签,以便区分.

    ======================================================
    :param folder: 工作路径
    :param tag_prim: 主标签 str   eg. DDMP&Buick
    :param tag_secon: 副标签 str   eg. features_fillna
    :return:path_dict  生成用于存放结果的文件
           path_dict={'th_path': ... , # 整个跑批整体的文件夹 "主标签"+"次标签"+“运行时间_now”
                        'pkls': ... , 在 'th_path' 文件夹下
                        'ohe' : ... , 在 pkls 子文件夹下的
                        'model' : ... , 在 pkls 子文件夹下的
                        'str2int' : ..., 在 pkls 子文件夹下的
                        'result' : ... , 在 'th_path' 文件夹下
                        'summaries': ... , 在 'th_path' 文件夹下
                        'result': ... , 在 results 子文件夹下
                        'summary': ... 在 summaries 子文件夹下

                        }
    """
    os.chdir(folder)
    run_time = dt.now().strftime("%Y-%m-%d")
    path_dict = {}
    tag_prim = str(tag_prim)
    tag_secon = str(tag_secon)
    son_dir = ['pkls', 'results', 'summaries']

    th_path = tag_prim + tag_secon + '_' + run_time
    if not os.path.exists(th_path):
        os.mkdir(th_path)
    elif not os.listdir(th_path):
        warnings.warn("注意，文件夹不为空。\n"
                      "Folder is not empty! Please check the folder.")
    path_dict.update({'th_path': th_path})
    os.chdir(th_path)
    for i in son_dir:
        if not os.path.exists(i):
            os.mkdir(i)
        path_dict.update({i: os.path.abspath(i)})
    os.chdir(folder)
    for name in ["str2int", "ohe", "model"]:
        path_dict[name] = path_dict["pkls"] + "/" + name + "_" + th_path
    path_dict["result"] = path_dict["results"] + "/" + "result" + "_" + th_path
    path_dict["summary"] = path_dict["summaries"] + "/" + "summary" + "_" + th_path

    return path_dict

def training2summary(vals):
    if_vals_iter,\
    config, \
    path_dict, \
    version_info, \
    feature, \
    model_summary, \
    i, \
    data_pred_list \
        = vals[:8]

    # sc 对源; per 百分比

    sc_lift, \
    sc_recall, \
    sc_leads_ratio, \
    sc_low_lift, \
    sc_low_recall, \
    sc_low_leads_ratio, \
    lift, \
    recall, \
    leads_ratio, \
    low_lift, \
    low_recall, \
    low_leads_ratio, \
    lift_per, \
    recall_per, \
    leads_ratio_per, \
    low_lift_per, \
    low_recall_per, \
    low_leads_ratio_per\
        = train2result(config,
                       version_info,
                       feature,
                       data_pred_list,
                       path_dict)


    vals_token = [] # 结果输出的list
    vals_token = vals_token + \
           val_format(sc_lift, sc_recall, sc_leads_ratio) + \
           val_format(sc_low_lift, sc_low_recall, sc_low_leads_ratio) + \
           val_format(lift, recall, leads_ratio) + \
           val_format(low_lift, low_recall, low_leads_ratio) + \
           val_format(lift_per, recall_per, leads_ratio_per) + \
           val_format(low_lift_per, low_recall_per, low_leads_ratio_per)


    summary_value = ['V' + str(i[0] + 1).zfill(5)] + vals_token

    if config["train_mode"] == "traversal" and not if_vals_iter:
        static_features = config["static_features"]
        dynamic_features = list(set(feature) - set(static_features))
        summary_value += [str(static_features), str(dynamic_features)]
    elif config["train_mode"] == "combination" and not if_vals_iter:
        codes = vals[8]
        summary_value += codes
    elif config["train_mode"] == "traversal" and  if_vals_iter:
        static_values = config["static_values"]
        dynamic_values = i[1]
        summary_value += [str(static_values), str(dynamic_values)]
    elif config["train_mode"] == "combination" and if_vals_iter:
        codes = vals[8]
        summary_value += codes
    else:
        pass

    model_summary.at[i[0]] = summary_value
    # model_summary = model_summary[
    #     ['版本', '验证提升', '验证召回', '验证线索占比', '训练提升', '训练召回', '模型选择', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I']]

    model_summary.to_excel(version_info["summary"], encoding='utf_8_sig', index=False)


def training_control(config, path_dict: dict, data_pred_list: [pd.DataFrame], *args):
    """
    用于控制训练模型的特征组合方式、多进程开启。
    ======================================================
    :param config: 配置
    :param path_dict:   整个工程的输出文件路径字典
    :param data_pred_list: list   [pd.df_train_test, pd.df_validate]
    :param args: 对应mode的特征
    eg.
    mode == "traversal":
    training_control(（"traversal",["a","b"]）, path_dict, mpp_open=False, ['distance','source'])

    mode == "combination"
    A = {'A1': ['qwe', 'uiop'],
        'A2': ['zxc', 'bnm']}
    B = {'B1': [],
         'B2': ['bbbb']}
    C = {'C1': ['c'], 'C2': ['C1245r']}
    training_control(("combination"), path_dict, mpp_open=False, A,B,C)
    :return:
    """

    if_vals_iter = config['if_vals_iter']
    mode = config["train_mode"]  # ["traversal", "combination"]
    mpp_open = config["mpp_open"] # 是否开启多线程
    mpp_num = config["mpp_num"] #
    # 模式限定
    if mode in ["traversal", "combination"]:
        pass
    else:
        raise Exception("""Training mode[0] should be "traversal" or "combination". """)

    if if_vals_iter:
        print('对字段: {} 的字段值进行筛选'.format(if_vals_iter[0]))
    else:
        print('对特征进行筛选')


    summary_columns = ['版本',
                       '最高_验证提升_train', '最高_验证召回_train', '最高_验证线索占比_train',
                       '最低_验证提升_train', '最低_验证召回_train', '最低_验证线索占比_train',
                       '最高_验证提升_test', '最高_验证召回_test', '最高_验证线索占比_test',
                       '最低_验证提升_test', '最低_验证召回_test', '最低_验证线索占比_test',
                       '最高_验证提升_validate', '最高_验证召回_validate', '最高_验证线索占比_validate',
                       '最低_验证提升_validate', '最低_验证召回_validate', '最低_验证线索占比_validate']

    if mode == 'traversal':
        features_list = columns_vals_iter_pj(mode , *args)
    else: # 'combination'
        features_list , codes_list = columns_vals_iter_pj(mode , *args)

    # 多进程开关
    if mpp_open:
        mpp = multiprocessing.Pool(processes=mpp_num)

    for i in range(len(features_list)): # features_list 所有的特征组合
        if i + 1 < config["start_code"] or i + 1 > config["end_code"]:
            pass
        else:
            print('迭代组别:',i)
            version_info = dict()
            # 单个文件需要输出的内容 "部署ldos"的三个信息, result, summary, summary_percent. 故各个文件分开存放
            # "_V" 为 模型的版本编号
            for name in ["str2int", "ohe", "model"]:
                version_info[name] = path_dict[name] + '_V' + str(i + 1).zfill(5) + '.pkl'
            version_info["result"] = path_dict["result"] + '_V' + str(i + 1).zfill(5) + '.xlsx'
            version_info["summary"] = path_dict["summary"] + '_V' + str(i + 1).zfill(5) + '.xlsx'
            version_info["summary_percent"] = path_dict["summary"] + 'pencent_V' + str(i + 1).zfill(5) + '.xlsx'

            i_token = [i , 'token']
            if mode == "traversal" and not if_vals_iter:
                feature = config["static_features"] + features_list[i] # 这里是list形式存放需要迭代的变量
                model_summary = pd.DataFrame(columns=summary_columns + ['固定特征', "动态特征"])
                vals = [if_vals_iter,config, path_dict, version_info, feature, model_summary, i_token, data_pred_list]

            elif mode == 'combination' and not if_vals_iter:
                feature = features_list[i]
                model_summary = pd.DataFrame(columns=summary_columns + config["combination_feature_types"])

                vals = [if_vals_iter,config, path_dict, version_info, feature, model_summary, i_token, data_pred_list, codes_list[i]]

            elif mode == "traversal" and if_vals_iter:
                # 动态的特征值筛筛选，固定的特征
                fixd_feature = config["vals_iter_cols"] # 在遍历某个字段值时，特征不进行筛选，固定
                iter_vals = config["static_values"] + features_list[i] #  固定的字段值
                i_token[1] = features_list[i]

                source_data, validate_data = data_pred_list[0],data_pred_list[1]   # 对字段值进行筛选，需要对数据集本身进行切分

                source_data_itervals = copy.deepcopy(
                    source_data.loc[source_data[config['if_vals_iter'][0]].isin(iter_vals)])
                validate_data_itervals = copy.deepcopy(
                    validate_data.loc[source_data[config['if_vals_iter'][0]].isin(iter_vals)])

                data_pred_itervals = [source_data_itervals, validate_data_itervals]
                model_summary = pd.DataFrame(columns=summary_columns + ['固定字段值', "动态字段值"])
                vals = [if_vals_iter, config, path_dict, version_info, fixd_feature, model_summary, i_token, data_pred_itervals]


            elif mode == "combination" and if_vals_iter:
                fixd_feature = config["vals_iter_cols"]
                model_summary = pd.DataFrame(columns=summary_columns + config["combination_vals_types"])

                iter_comb = features_list[i]
                source_data, validate_data = data_pred_list[0], data_pred_list[1]

                source_data_itercomb = copy.deepcopy(
                    source_data.loc[source_data[config['if_vals_iter'][0]].isin(iter_comb)])
                validate_data_itercomb = copy.deepcopy(
                    validate_data.loc[source_data[config['if_vals_iter'][0]].isin(iter_comb)])
                data_pred_itercomb = [source_data_itercomb, validate_data_itercomb]

                vals = [if_vals_iter, config, path_dict, version_info, fixd_feature, model_summary, i_token, data_pred_itercomb,
                        codes_list[i]]
            else:
                pass

            if mpp_open:
                mpp.map_async(training2summary, [vals])
            else:
                training2summary(vals)
    if mpp_open:
        mpp.close()
        mpp.join()


def start(config: dict,  *args):

    config = config_check(config, *args)

    # 生成用于存放 整个工程结果的文件路径
    path_dict = path_manager(config["Folder"],
                             tag_prim=config["Tag_prim"],
                             tag_secon=config["Tag_secon"])

    # train_data + test_data
    source = data_reader(config['Train_data'],
                              target=config["target"],
                              encoding=config["Train_data_encoding"])

    validate = data_reader(config['Validate_data'],
                                target=config["target"],
                                encoding=config["Validate_data_encoding"])

    if config['if_vals_iter']:
        # 如果对字段值进行筛选，需要做进一步check
        if_vals_check(config,source , validate)

    columns_type = {}
    for col in source.columns:
        columns_type.update({col: source[col].dtype})

    # 使 source 与 validate 的字段属性一致，合法性处理
    for col in columns_type.keys():
        validate[col] = validate[col].astype(columns_type[col])

    data_pred_list = [source, validate]

    # 模型训练
    # *arg 为 combination 方案的特征组合，dict形式
    training_control(config, path_dict,
                     data_pred_list, *args)

    result_check(config, path_dict)

    summary_concat(config, path_dict)

    os.system("""zip -r "%s.zip" "%s" """ % (path_dict["th_path"],
                                             path_dict["th_path"]))
