import copy
import sys
import os

join = os.path.join
dirname = os.path.dirname

CURRENT_PATH = dirname(os.path.realpath(__file__))
FEATURE_PATH = join(CURRENT_PATH )
sys.path.append(FEATURE_PATH)

from leads_rating.leads_rating_pkg import start
from leads_rating.base_feature_gen.feature_gen import *
from leads_rating.leads_rating_model import LR_Classifier, GBDT_Classifier
from leads_rating.leads_rating_model import parameters_LR, parameters_gbdt
import warnings
warnings.filterwarnings("ignore") # 忽略警告信息

config = {"Folder": r"E:\CZR_work\其他临时\线索评级标准化\result_get_0427",  # 工作路径
          "Tag_prim": "Buick_test_Data",  # 主标签
          "Tag_secon": "特征值迭代组合",  # 副标签
          "Train_data": r"D:\我的坚果云\leads_rating_auto_proc\origin_data\train_test_test.xlsx",
          "Train_data_encoding": "utf_8_sig",  # 训练数据编码
          "train_test_split":0.3,
          "Model": ['LR',LR_Classifier,parameters_LR],  # 模型选择 #
          "Validate_data": r"D:\我的坚果云\leads_rating_auto_proc\origin_data\validate_test.xlsx",
          "Validate_data_encoding": "gbk",  # 验证数据编码
          "train_mode": "combination",  # ["traversal", "combination"]
          "static_features": ['dealer_brank_regional','dealer_city_grade','dealer_code','dealer_did_comp','distance','leads_dealer_comp','leads_city','leads_hour'],  # 固定特征 (仅遍历模式traversal, if not if_vals_iter 生效)
          "combination_feature_types": ["test_A", "test_B"],# 该list的长度与combination 方案输入的dict数据结构的keys数量保持一致
          "mpp_open": False,  # 多进程开关
          "mpp_num": 5,  # 进程数量 （仅开多进程时生效）
          "target": "arrive_or",  # 目标列
          "start_code": 1,
          "end_code": 9999999999,
          "if_vals_iter":['dealer_brank_regional'], # 用于区分是否是对单个字段值进行筛选
          "static_values":['九区','七区'], # 固定特征 (仅遍历模式traversal, if if_vals_iter 生效)
          "vals_iter_cols":['dealer_did_comp','leads_year','source','dealer_brank_regional'], # 在 if if_vals_iter 时调用，固定的feature，
          "combination_vals_types":['area_A', 'area_B'], # 这里的长度需要与输入的.....
          "source_data_prob_output": False, # 是否by model 输出训练集对应的概率阈值结果，基于这个结果可计算PSI指数
          "validate_data_prob_output": False ,# 是否by model 输出 验证集对应的概率阈值结果
          "quantile_split_list":[0.2, 0.5, 0.8] # 根据线索量动态切分结果，在模型指标中有体现 这里必须是从低到高！，至少两刀！
          }



def data_pre_base(origin_data_df):
    '''
    生成基础的衍生变量，

    origin_data_df 必须有字段 'mobile',
    注意如果需要生成字段,'dealer_did_comp' , 'leads_dealer_comp',则需要保证 'dealer_city', 'leads_city' 存在
    如果需要生成字段 ’name1‘, 'name2',则需要保证字段'name'存在

    :param origin_data_df:
    :return:
    '''

    origin_data = copy.deepcopy(origin_data_df)

    DLC_need_create = ['did_city', 'did_province', 'did_operator', 'leads_province',
                       'dealer_province', 'dealer_did_comp', 'leads_did_comp', 'leads_dealer_comp',
                       'leads_city_grade', 'dealer_city_grade', 'did_city_grade', 'leads_regional',
                       'dealer_regional', 'did_regional', 'name1', 'name2']

    DLC_values_list = etl_funcs(origin_data, DLC_need_create)
    for i in DLC_values_list:
        print(i)

    origin_data = operator_loc(origin_data)  # 增加变量 ['did_operator', 'did_province', 'did_city']
    origin_data = city2province(
        origin_data)  # 可输入columns生成指定的字段，默认生成 ['leads_province', 'dealer_province', 'did_province']
    origin_data = province2regional(
        origin_data)  # 可输入columns生成指定的字段，默认生成['leads_regional', 'dealer_regional' , 'did_regional']
    origin_data = city_grade_info(
        origin_data)  # 可输入columns生成指定的字段，默认生成['dealer_city_grade', 'did_city_grade', 'leads_city_grade']
    origin_data['dealer_did_comp'] = origin_data[['dealer_city', 'did_city', 'dealer_province', 'did_province']].apply(
        lambda x: dealer_did(x['dealer_city'], x['did_city'], x['dealer_province'], x['did_province']))

    origin_data['leads_dealer_comp'] = origin_data[['leads_city', 'did_city', 'leads_province', 'did_province']].apply(
        lambda x: leads_dealer(x['leads_city'], x['did_city'], x['leads_province'], x['did_province']))

    origin_data['leads_did_comp'] = origin_data[['leads_city', 'did_city', 'leads_province', 'did_province']].apply(
        lambda x: leads_did(x['leads_city'], x['did_city'], x['leads_province'], x['did_province']))

    origin_data = name_wash(origin_data)  # 生成衍生字段['name1', 'name2']

    return origin_data


# 确认运行环境中sklearn的版本为 0.19.1 ; joblib的版本为0.14
if __name__ == '__main__':


    # source = data_reader(config['Train_data'],
    #                      target=config["target"],
    #                      encoding=config["Train_data_encoding"])
    #
    # validate = data_reader(config['Validate_data'],
    #                        target=config["target"],
    #                        encoding=config["Validate_data_encoding"])
    #
    # source = data_pre_base(source)
    # validate = data_pre_base(validate)
    #
    # try:
    #     source.to_excel(config['Train_data'],encoding=config["Train_data_encoding"])
    # except:
    #     ource.to_csv(config['Train_data'], encoding=config["Train_data_encoding"])
    #
    # try:
    #     validate.to_excel(config['Validate_data'], encoding=config["Train_data_encoding"])
    # except:
    #     validate.to_csv(config['Validate_data'], encoding=config["Train_data_encoding"])



    # ########################################
    # # comb_feature
    # A = {'A1': ['dealer_brank_regional', 'dealer_city_grade', 'dealer_code', 'dealer_did_comp'],
    #      'A2': ['dealer_brank_regional', 'source']}
    #
    # B = {'B1': ['distance', 'leads_dealer_comp'],
    #      'B2': ['leads_city'],
    #      'B3': ['distance']}

    # # traversal_feature
    # A = ['leads_did_comp','leads_year','source']

    # comb_feature\
    # A = ['col_A', 'col_B', 'col_C']

    # # 用于遍历模式的变量值筛选
    # A = ['五区','六区']

    # # comb_val
    A = {'A1': ['九区', '七区'],
         'A2': ['七区']}

    B = {'B1': ['五区'],
         'B2': ['六区']}

    # [i for i in np.arange(0.2,1,0.2).tolist()],

    start(config, A, B)

