#!/usr/bin/env python
# -*- coding:utf-8 -*- 
# @Time    : 2018/11/15 19:15
# @Author  : liujiantao
# @Site    : 
# @File    : base_helper.py
# @Software: PyCharm
'''
注意：初赛测试集的日期数据（day）也是从1开始，但这里的日期1和训练集中的日期1不是同一天，
而是指初赛测试集中的第一天的数据。（同理，请区分复赛测试集中的日期与其他数据集中的日期）
复赛测试集，在时间上晚于初赛测试集。
整体的时间顺序为： 训练集时间 早于 初赛测试集时间 早于 复赛测试集时间
'''
# 数据集共分为训练数据集、初赛测试数据集、复赛测试数据集。
# 训练数据集中的文件包含黑白样本标签、用户交易详单、用户操作详单。
# 初赛和复赛的测试集数据中则只包含用户交易详单、用户操作详单。
# 测试数据
# 操作详情表单，共1769049条数据；
import random
import traceback

import time
from sklearn import tree
import pandas as pd
import numpy as np

data_base_path = "F:\\001experience\\DataCastleMatch\\tiancheng\\data\\"
sub_base_path = data_base_path + "submmit/"
features_base_path = data_base_path + "features/"
operation_round1_new = data_base_path + "test_operation_round2.txt"
# 交易详情表单，共168981条数据；
transaction_round1_new = data_base_path + "test_transaction_round2.txt"
# 训练集数据：
# 操作详情表单，共1460843条数据；
operation_train_new = data_base_path + "operation_train_new.txt"
# 黑白样本标签，共31179 条数据。
tag_train_new = data_base_path + "tag_train_new.txt"
# 交易详情表单，共264654条数据；
transaction_train_new = data_base_path + "transaction_train_new.txt"
# 提交样例
sub = sub_base_path + "sub.csv"

# operation header
operation_header = ["UID", "day", "mode", "success", "time", "os", "version", "device1", "device2",
                    "device_code1", "device_code2", "device_code3", "mac1", "mac2", "ip1", "ip2",
                    "wifi", "geo_code", "ip1_sub", "ip2_sub"]
merge_train_path = data_base_path + "merge_train.csv"
merge_test_path = data_base_path + "merge_test.csv"

features_select_cols = []
features_select_cols02 = []
features_name_all = list(set(features_select_cols + features_select_cols02))


# print(features_name_all)
# def find_wrong(data, feature):
#     tag_count = data.groupby([feature])['Tag'].count().reset_index()
#     black = (data.groupby([feature])['Tag'].sum() / tag_count['Tag']).sort_values(
#         ascending=False)
#
#     black = black.reset_index()
#     black = black.merge(tag_count, on=feature, how='left')
#     black.columns = [feature, 'Tag', 'Tag_cnt']
#     # black[black['Tag_cnt']>2]
#     black = black.sort_values(by=['Tag', 'Tag_cnt'], ascending=False)
#     return black


def get_is_no_ftr(train, test, col):
    tag_cnt = "op" + col +"_tag_cnt"
    tag_sum = "op" + col +"_tag_sum"
    tag_rate = "op" + col +"_tag_sum_rate"
    tag_gb_st = train.groupby([col])['Tag'].agg({
        tag_cnt: lambda x: x.count(),
        tag_sum: np.sum
    }).reset_index()
    tag_gb_st[tag_rate] = tag_gb_st['tag_sum'] / tag_gb_st['tag_cnt']
    train = train.merge(tag_gb_st, on=col, how='left')
    test = test.merge(tag_gb_st, on=col, how='left')
    return train, test


def deal_no_effict(train, test, cols):
    print("deal in tain not in test")
    for i, col in enumerate(cols):
        print(i, col)
        if train[col].dtype!='object':
            continue
        if 'time' in col:
            train[col] = train[col].map(lambda x:x[:2])
            test[col] = test[col].map(lambda x:x[:2])
        try:
            dif_value = list(set(train[col].tolist()).difference(set(test[col].tolist())))
            if len(dif_value) > 0:  # 删除 test中没有的类型数据
                # train[col] = ''
                train[col][train[col].isin(list(dif_value))] = ''
        except:
            pass
    train.dropna(axis=0, how='all')
    return train, test


def deal_data_v1(train, test):
    cols = list(test.columns.values)
    [cols.remove(item) if item in cols else 0 for item in tag_header]
    # deal_no_effict(train, test, cols)
    for i, col in enumerate(cols):
        print(i, col)
        try:
            train, test = get_is_no_ftr(train, test, col)
        except:
            pass
    # cols = list(test.columns.values)
    # [cols.remove(item) if item in cols else 0 for item in tag_header]
    train, test = woe_all(train, test, train[tag_hd.Tag],cols)
    return train, test


# def get_is_ftr(tst_tr, tst_rd1, label_test, label_train, train, test):
#     """
#
#     :return:
#     """
#
#     return train, test


class OperationHeader(object):
    """
    操作
    """
    UID = "UID"
    day = "day"
    mode = "mode"
    success = "success"
    time = "time"
    os = "os"
    version = "version"
    device1 = "device1"
    device2 = "device2"
    device_code1 = "device_code1"
    device_code2 = "device_code2"
    device_code3 = "device_code3"
    mac1 = "mac1"
    mac2 = "mac2"
    ip1 = "ip1"
    ip2 = "ip2"
    wifi = "wifi"
    geo_code = "geo_code"
    ip1_sub = "ip1_sub"
    ip2_sub = "ip2_sub"
    data_effective_num = "data_effective_num"
    data_lose_rate = "data_lose_rate"


op_hd = OperationHeader()

# transaction header
transaction_header = ["UID", "channel", "day", "time", "trans_amt", "amt_src1", "merchant", "code1",
                      "code2", "trans_type1", "acc_id1", "device_code1", "device_code2",
                      "device_code3", "device1", "device2", "mac1", "ip1", "bal", "amt_src2",
                      "acc_id2", "acc_id3", "geo_code", "trans_type2", "market_code", "market_type",
                      "ip1_sub"]


class TransactionHeader(object):
    """

    """
    UID = "UID"
    channel = "channel"
    day = "day"
    time = "time"
    trans_amt = "trans_amt"
    amt_src1 = "amt_src1"
    merchant = "merchant"
    code1 = "code1"
    code2 = "code2"
    trans_type1 = "trans_type1"
    acc_id1 = "acc_id1"
    device_code1 = "device_code1"
    device_code2 = "device_code2"
    device_code3 = "device_code3"
    device1 = "device1"
    device2 = "device2"
    mac1 = "mac1"
    ip1 = "ip1"
    bal = "bal"
    amt_src2 = "amt_src2"
    acc_id2 = "acc_id2"
    acc_id3 = "acc_id3"
    geo_code = "geo_code"
    trans_type2 = "trans_type2"
    market_code = "market_code"
    market_type = "market_type"
    ip1_sub = "ip1_sub"


tr_hd = TransactionHeader()
# tag header
tag_header = ["UID", "Tag"]


class TagHeader(object):
    UID = "UID"
    Tag = "Tag"


tag_hd = TagHeader()


def get_operation_round1_new():
    """
    operation_round1_new.csv为初赛测试集操作详情表单，共1769049条数据
    :return:
    """
    return pd.read_csv(operation_round1_new)


def get_transaction_round1_new():
    """
    transaction_round1_new.csv为初赛测试集交易详情表单，共168981条数据；
    注意：初赛测试集的日期数据（day）也是从1开始，但这里的日期1和训练集中的日期1不是同一天，
    而是指初赛测试集中的第一天的数据。（同理，请区分复赛测试集中的日期与其他数据集中的日期）
    :return:
    """
    return pd.read_csv(transaction_round1_new)


def get_operation_train_new():
    """
    operation_train_new.csv为训练集操作详情表单，共1460843条数据；
    :return:
    """
    return pd.read_csv(operation_train_new)


def get_transaction_train_new():
    """
    transaction_train_new.csv为训练集交易详情表单，共264654条数据；
    :return:
    """
    return pd.read_csv(transaction_train_new)


def get_tag_train_new():
    """
    tag_train_new.csv为训练集黑白样本标签，共31179 条数据。
    :return:
    """
    return pd.read_csv(tag_train_new)


def get_sub():
    """
    提交样例
    :return:
    """
    return pd.read_csv(sub)


def one_hot_Encoder(data, one_hot_col):
    from sklearn.preprocessing import LabelEncoder
    for feature in one_hot_col:
        try:
            data[feature] = LabelEncoder().fit_transform(data[feature].apply(int))
        except:
            data[feature] = LabelEncoder().fit_transform(data[feature])
    return data


def tpr_weight_funtion(y_true, y_predict):
    d = pd.DataFrame()
    d['prob'] = list(y_predict)
    d['y'] = list(y_true)
    d = d.sort_values(['prob'], ascending=[0])
    y = d.y
    PosAll = pd.Series(y).value_counts()[1]
    NegAll = pd.Series(y).value_counts()[0]
    pCumsum = d['y'].cumsum()
    nCumsum = np.arange(len(y)) - pCumsum + 1
    pCumsumPer = pCumsum / PosAll
    nCumsumPer = nCumsum / NegAll
    TR1 = pCumsumPer[abs(nCumsumPer - 0.001).idxmin()]
    TR2 = pCumsumPer[abs(nCumsumPer - 0.005).idxmin()]
    TR3 = pCumsumPer[abs(nCumsumPer - 0.01).idxmin()]
    return 0.4 * TR1 + 0.3 * TR2 + 0.3 * TR3


def feature_importance(clf, feature_name01, X_test, y_test):
    importances = clf.feature_importances_
    indices = np.argsort(importances)[::-1]
    size = len(feature_name01)
    # Print the feature ranking
    print(str(size) + " Feature ranking:")
    try:
        cols = []
        for f in range(size):
            rate = importances[indices[f]]
            feature_name = feature_name01[indices[f]]
            print("%d  %s  <==> importances rate: (%f)" % (f,feature_name , rate))
            if rate >1:
                cols.append(feature_name)
        print(cols)
    except:
        traceback.print_exc()
        print(feature_name01[indices[f]])
    ytestPre = clf.predict(X_test)
    from sklearn.metrics import accuracy_score, recall_score
    accuracy = accuracy_score(y_test, ytestPre)
    recall = recall_score(y_test, ytestPre)
    print(u'准确率： %.4f%%' % (100 * accuracy))
    print(u'召回率： %.4f%%' % (100 * recall))
    from sklearn.metrics import classification_report
    target_names = ['label is 0', 'label is 1']
    print(classification_report(y_test, ytestPre, target_names=target_names))
    from sklearn.metrics import cohen_kappa_score
    kappa_score = cohen_kappa_score(y_test, ytestPre)
    print(u'kappa score是一个介于(-1, 1)之间的数. score>0.8意味着好的分类；0或更低意味着不好（实际是随机标签）： %.4f%%' % (100 * kappa_score))


def status(x):
    """
    '总数', '最小值', '最小值位置', '25%分位数',
    '中位数', '75%分位数', '均值', '最大值', '最大值位数', '平均绝对偏差', '方差', '标准差',
    '偏度', '峰度'
    'total', 'minimum', 'minimum_position','25%_quantile ',
    'Median','75%_Quantile','Mean','Maximum','Maximum','MeanAbsoluteDeviation','Variance','StandardDeviation',
    'skewness',' kurtosis'
    :param x:
    :return:
    """

    return pd.Series([x.count(), x.min(), x.idxmin(), x.quantile(.25), x.median(),
                      x.quantile(.75), x.mean(), x.max(), x.idxmax(), x.mad(), x.var(),
                      x.std(), x.skew(), x.kurt()], index=['total', 'minimum', 'minimum_position', '25%_quantile ',
                                                           'Median', '75%_Quantile', 'Mean', 'Maximum', 'Maximum',
                                                           'MeanAbsoluteDeviation', 'Variance', 'StandardDeviation',
                                                           'skewness', ' kurtosis'])


def add_row(df):
    """
    # 插入一行
    :param df:
    :return:
    """
    df.loc[df.shape[0] + 1] = [0.0 for n in range(df.shape[1])]
    return df


def outliers_detection(data=None):
    """
    异常点检查
    :param data:
    :return:
    """
    # from sklearn import svm
    from sklearn import preprocessing
    from sklearn.ensemble import IsolationForest
    try:
        tag_uid = data[tag_header].drop_duplicates(tag_hd.UID)
        if data.drop_duplicates().shape[0] <= 1:
            data = data.astype('float64')
            return data, 0, tag_uid
        data = data.drop_duplicates()
        sample_siaze = data.shape[0]
        # 前向填充，使用默认是上一行的值,设置axis=1可以使用列进行填充
        data = data.fillna(method="ffill")
        # 后向填充，使用下一行的值,不存在的时候就不填充
        data = data.fillna(method="bfill")
        data.fillna(-1, inplace=True)  # 全空默认 -1
        data = data.reset_index(drop=True)
        data = data.astype('float64')
        data.pop(tag_hd.Tag)
        data.pop(tag_hd.UID)
        normolized_data = preprocessing.StandardScaler().fit_transform(data)
        # out_fraction = 0.02
        # nu_estimate = 0.95 * out_fraction + 0.05
        # mechine_learning = svm.OneClassSVM(kernel="rbf", degree=3, gamma=1.0 / len(normolized_data), nu=nu_estimate)
        # mechine_learning.fit(normolized_data)
        rng = np.random.RandomState(42)
        outliers_fraction = 0.02  # 异常样本比例
        # fit the model
        mechine_learning = IsolationForest(max_samples=sample_siaze, random_state=rng, contamination=outliers_fraction)
        mechine_learning.fit(normolized_data)
        detection = mechine_learning.predict(normolized_data)
        outliers = list(np.where(detection == -1)[0])
        regular = list(np.where(detection == 1)[0])
        outliers_rate = len(outliers) / sample_siaze
        # res = normolized_data[regular]
        if len(regular) <= 0:
            df = data.loc[outliers]
        else:
            df = data.loc[regular]
        if df.shape[0] <= 1:
            df = add_row(df)
        df[tag_hd.UID] = tag_uid[tag_hd.UID].tolist()[0]
        df[tag_hd.Tag] = tag_uid[tag_hd.Tag].tolist()[0]
        print("outliers_rate:", outliers_rate)
        del regular
        del data
    except:
        traceback.print_exc()
        print(123)
    return df, outliers_rate, tag_uid


def normolized_data(data):
    from sklearn import preprocessing
    normolized_data = preprocessing.StandardScaler().fit_transform(data)
    return normolized_data


def filed2encode_f(df, filed):
    """
    # 原始数据的标签映射
    :return:
    """
    fs = set(df[filed])
    filed2encode = dict(
        zip(list(fs), range(0, len(fs))))
    df[filed] = df[filed].map(filed2encode)
    return df


def is_none(d):
    return (d is None or d == 'None' or
            d == '?' or
            d == '' or
            d == 'NULL' or
            d == 'null')


def get_features(data):
    label = data[tag_header]
    uid_gb = data.groupby(['UID'])
    cols = data.columns.values.tolist()
    cols.remove(tag_hd.Tag)
    cols.remove(tag_hd.UID)
    for feature in cols:
        print(feature)
        label = label.merge(uid_gb[feature].count().reset_index(), on='UID', how='left')
        label = label.merge(uid_gb[feature].nunique().reset_index(), on='UID', how='left')
        label = label.merge(uid_gb[feature].max().reset_index(), on='UID', how='left')
        label = label.merge(uid_gb[feature].min().reset_index(), on='UID', how='left')
        label = label.merge(uid_gb[feature].sum().reset_index(), on='UID', how='left')
        label = label.merge(uid_gb[feature].mean().reset_index(), on='UID', how='left')
        label = label.merge(uid_gb[feature].std().reset_index(), on='UID', how='left')
        label = label.merge(uid_gb[feature].var().reset_index(), on='UID', how='left')
    return label


def get_feature(op, trans, label):
    for feature in op.columns[2:]:
        label = label.merge(op.groupby(['UID'])[feature].count().reset_index(), on='UID', how='left')
        label = label.merge(op.groupby(['UID'])[feature].nunique().reset_index(), on='UID', how='left')
        label = label.merge(op.groupby(['UID'])[feature].max().reset_index(), on='UID', how='left')
        label = label.merge(op.groupby(['UID'])[feature].min().reset_index(), on='UID', how='left')
        label = label.merge(op.groupby(['UID'])[feature].sum().reset_index(), on='UID', how='left')
        label = label.merge(op.groupby(['UID'])[feature].mean().reset_index(), on='UID', how='left')
        label = label.merge(op.groupby(['UID'])[feature].std().reset_index(), on='UID', how='left')
        label = label.merge(op.groupby(['UID'])[feature].var().reset_index(), on='UID', how='left')
        label = label.merge(op.groupby(['UID'])[feature].quantile(.25).reset_index(), on='UID', how='left')
        label = label.merge(op.groupby(['UID'])[feature].quantile(.75).reset_index(), on='UID', how='left')

    for feature in trans.columns[2:]:
        label = label.merge(trans.groupby(['UID'])[feature].count().reset_index(), on='UID', how='left')
        label = label.merge(trans.groupby(['UID'])[feature].nunique().reset_index(), on='UID', how='left')
        label = label.merge(trans.groupby(['UID'])[feature].max().reset_index(), on='UID', how='left')
        label = label.merge(trans.groupby(['UID'])[feature].min().reset_index(), on='UID', how='left')
        label = label.merge(trans.groupby(['UID'])[feature].sum().reset_index(), on='UID', how='left')
        label = label.merge(trans.groupby(['UID'])[feature].mean().reset_index(), on='UID', how='left')
        label = label.merge(trans.groupby(['UID'])[feature].std().reset_index(), on='UID', how='left')
        label = label.merge(trans.groupby(['UID'])[feature].var().reset_index(), on='UID', how='left')
        label = label.merge(trans.groupby(['UID'])[feature].quantile(.25).reset_index(), on='UID', how='left')
        label = label.merge(trans.groupby(['UID'])[feature].quantile(.75).reset_index(), on='UID', how='left')
    label.columns = [item + "_" + str(i) for i, item in enumerate(list(label.columns))]
    return label


opmerge_ftrs_path = features_base_path + "opmerge_ftrs.csv"
trsct_merge_ftrs_path = features_base_path + "trsct_merge_ftrs.csv"


def get_opmerge_ftrs():
    return pd.read_csv(opmerge_ftrs_path)


def get_trsct_merge_ftrs():
    return pd.read_csv(trsct_merge_ftrs_path)


def get_woe(data, feature, sep_value, target):
    """
    inputfile:dataframe所在输入文件
    feture:需要分析的特征变量
    sep 分段表达式
    target y变量
    """
    # woe
    import math
    # sep_value = sep.split(',')
    sep_len = len(sep_value)
    dict_bin = {}
    class_bin = {}
    len_dict_bin = {}
    len_dict_bin_0 = {}
    len_dict_bin_1 = {}
    woe_bin = {}
    iv_bin = {}
    if sep_len == 1:
        dict_bin[0] = data.loc[data[feature] <= float(sep_value[0]), :]
        dict_bin[1] = data.loc[data[feature] > float(sep_value[0]), :]
        dict_bin[2] = sum(data[feature].isnull())
        len_dict_bin[0] = len(dict_bin[0])
        len_dict_bin[1] = len(dict_bin[1])
        len_dict_bin[2] = len(dict_bin[2])
        class_bin[0] = "(0," + sep_value[0] + "]"
        class_bin[1] = "(" + sep_value[0] + "...)"
        class_bin[2] = "NA"
    else:
        for index, item in enumerate(sep_value):
            if index == 0:
                dict_bin[0] = data.loc[data[feature] <= float(item), :]
                len_dict_bin[0] = len(dict_bin[0])
                class_bin[0] = "(0," + str(float(item)) + "]"
            else:
                dict_bin[index] = (
                    data.loc[(data[feature] >= float(sep_value[index - 1])) & (data[feature] < float(item)),
                    :])
                len_dict_bin[index] = len(dict_bin[index])
                class_bin[index] = "(" + str(sep_value[index - 1]) + "," + str(sep_value[index]) + "]"
        dict_bin[index + 1] = data.loc[data[feature] > float(item), :]
        dict_bin[index + 2] = data.loc[data[feature].isnull()]
        len_dict_bin[index + 1] = len(dict_bin[index + 1])
        len_dict_bin[index + 2] = len(dict_bin[index + 2])
        class_bin[index + 1] = "(" + str(sep_value[index]) + "...)"
        class_bin[index + 2] = "NA"

    for index, item in enumerate(dict_bin):
        len_dict_bin_0[index] = len(dict_bin[index][dict_bin[index][target] == 0])
        len_dict_bin_1[index] = len(dict_bin[index][dict_bin[index][target] == 1])

    len_data_0 = len(data[data[target] == 0])
    len_data_1 = len(data[data[target] == 1])
    for index, item in enumerate(dict_bin):
        try:
            woe_bin[index] = math.log(math.e, (float(len_dict_bin_1[index]) / float(len_data_1)) / (
                    float(len_dict_bin_0[index]) / float(len_data_0)))
            iv_bin[index] = ((float(len_dict_bin_1[index]) / float(len_data_1)) - (
                    float(len_dict_bin_0[index]) / float(len_data_0))) * math.log(math.e, (
                    float(len_dict_bin_1[index]) / float(len_data_1)) / (
                                                                                          float(len_dict_bin_0[
                                                                                                    index]) / float(
                                                                                      len_data_0)))
        except Exception as e:
            woe_bin[index] = 0
            iv_bin[index] = 0

    dict_result = {}
    len_dict_bin_0[" "] = len_data_0
    len_dict_bin_1[" "] = len_data_1
    woe_bin[" "] = ""
    iv_bin[" "] = sum(iv_bin.values())
    class_bin[" "] = ""
    len_dict_bin[" "] = len(data)
    dict_result["bad"] = len_dict_bin_0
    dict_result["good"] = len_dict_bin_1
    dict_result["all"] = len_dict_bin
    dict_result["woe"] = woe_bin
    dict_result["iv"] = iv_bin
    dict_result["class"] = class_bin
    df = pd.DataFrame(dict_result)

    dict_result["%good"] = (df['good'] / df['all']).map('{:.2%}'.format);
    dict_result["%bad"] = (df['bad'] / df['all']).map('{:.2%}'.format);
    df["%good"] = dict_result["%good"]
    df["%bad"] = dict_result["%bad"]

    # 调整列的顺序
    df = df.ix[:, ['class', 'good', 'bad', 'good_rate', 'bad_rate', 'all', 'woe', 'iv']]
    return df


def smote_over_sample(X_train, y_train, i):
    """
    采用 smote 算法 进行 过采样
    https://www.kaggle.com/residentmario/undersampling-and-oversampling-imbalanced-data/notebook
    :param X_train:
    :param y_train:
    :return:
    """
    from imblearn.over_sampling import RandomOverSampler
    ros = RandomOverSampler(random_state=i)
    ros.fit(X_train, y_train)
    X_resampled, y_resampled = ros.sample(X_train, y_train)
    return X_resampled, y_resampled


def smote_under_sampling(X, y, n_subsets):
    """
    采用 RandomUnderSampler 算法 进行 下采样
    https://www.kaggle.com/residentmario/undersampling-and-oversampling-imbalanced-data/notebook

    """
    from imblearn.ensemble import EasyEnsemble
    e = EasyEnsemble(random_state=0, n_subsets=n_subsets)
    e.fit(X, y)
    X_resampled, y_resampled = e.sample(X, y)
    return X_resampled, y_resampled


# import matplotlib.pyplot as plt

def parallel_map(function, __tag_list, process_num=10):
    """
    并发映射
    """
    from multiprocessing.pool import ThreadPool
    pool = ThreadPool(process_num)
    if pool is None:
        pool = ThreadPool(process_num)
    data_list = []
    result_list = []
    for tag in __tag_list:
        result_list.append(pool.apply_async(function, [tag]))

    for result in result_list:
        data = result.get(timeout=120)
        data_list.append(data)
    return data_list


class Benchmark(object):
    """
    先实现一个简单的计时类
    """

    def __init__(self, prefix=None):
        self.prefix = prefix + ' ' if prefix else ''

    def __enter__(self):
        """
        方法被执行耗时
        方法被执行耗时
        :return:
        """
        self.start = time.time()

    def __exit__(self, exc_type, exc_val, exc_tb):
        """
        方法被调用,开发库时，清理资源，关闭文件等等操作，都可以放在__exit__方法当中
        :param exc_type:
        :param exc_val:
        :param exc_tb:
        :return:
        """
        print('%s time: %.4f sec: ' % (self.prefix, time.time() - self.start))


class DecisionTreeClassifierParser(object):
    '''
	对决策树进行解析
	'''

    def __init__(self, model, feature_names):
        '''
		:param model: 已训练完成的决策树模型
		:param feature_names: list类型，训练数据的特征名字，按训练时的顺序排列
		:return:
		'''
        self.model = model
        self.feature_names = feature_names
        # 仅支持DecisionTreeClassifier模型
        if not isinstance(self.model, tree.DecisionTreeClassifier):
            raise ('The given model is not the supported sklearn model DecisionTreeClassifier')

    def __parser(self):
        '''
		递归算法解析决策树的每个分支
		参考部分代码：　https://stackoverflow.com/questions/20224526/how-to-extract-the-decision-rules-from-scikit-learn-decision-tree
		:return:
		'''
        parsed_tree = []
        # 左节点id
        left = self.model.tree_.children_left
        # 右节点id
        right = self.model.tree_.children_right
        # 节点阈值
        threshold = self.model.tree_.threshold
        # 特征名字
        features = [self.feature_names[i] for i in self.model.tree_.feature]
        # value 是该节点上不同标签样本的数量
        values = self.model.tree_.value
        # 该节点sample的总数量，理论上value的和即为sample的总数
        number_of_nodes = self.model.tree_.weighted_n_node_samples
        # 节点不纯度
        # impurity = tree.tree_.impurity
        # 叶子节点id列表
        idx = np.argwhere(left == -1)[:, 0]

        def recurse(left, right, child, lineage=None):
            if lineage is None:
                v = values[child][0]
                label = np.argmax(v)
                # 该节点误分率
                missclassfication_rate = min(v) / sum(v)
                # node_impurity = impurity[child]
                # 终节点id, 标签，训练集中归于该节点的数量，错分概率
                lineage = [(child, 'leaf', label, int(number_of_nodes[child]), missclassfication_rate)]
            if child in left:
                parent = np.where(left == child)[0].item()
                split = '<='
            else:
                parent = np.where(right == child)[0].item()
                split = '>'

            lineage.append((parent, split, threshold[parent], features[parent]))

            if parent == 0:
                lineage.reverse()
                return lineage
            else:
                return recurse(left, right, parent, lineage)

        for child in idx:
            one_branch = []
            for node in recurse(left, right, child):
                one_branch.append(node)
            parsed_tree.append(one_branch)
        return parsed_tree

    def get_parsed_tree(self):
        '''输出决策树的解析结果.
		Returns
		-------
		[
		  {
			"missclassfication_rate": 0.5, #改路径错误分类率
			"sample_number": 2,　#该路径样本数据量
			"statement": "col2 <= 5.5 and col1 <= 1.5 and col2 <= 3.5",　#该路径的条件组合, 并且作冗余的归并，例如col2 > 3.5 and col2 > 4.5归并为col2 > 4.5
			"label": 0　#该路径的样本被预测的标签
		  }
		]
		'''
        res = []
        for t in self.__parser():
            sub_statement = []
            sub_statement_container = {}  # 以col1 <=作为key
            for each in t[0:-1]:
                # 可互相包含的决策路径逻辑合并处理
                statement_key = str(each[3]) + ' ' + str(each[1])
                if statement_key in sub_statement_container.keys():
                    if each[1] == '>' and sub_statement_container[statement_key] < each[2]:
                        sub_statement.remove(statement_key + ' ' + str(sub_statement_container[statement_key]))
                        sub_statement.append(statement_key + ' ' + str(each[2]))
                        sub_statement_container[statement_key] = each[2]
                    elif each[1] == '<=' and sub_statement_container[statement_key] > each[2]:
                        sub_statement.remove(statement_key + ' ' + str(sub_statement_container[statement_key]))
                        sub_statement.append(statement_key + ' ' + str(each[2]))
                        sub_statement_container[statement_key] = each[2]
                else:
                    sub_statement_container[statement_key] = each[2]
                    sub_statement.append(str(each[3]) + ' ' + str(each[1]) + ' ' + str(each[2]))
            res.append({
                'decision_path': ' and '.join(sub_statement),  # 以and的形式组合
                'label': t[-1][2],
                'sample_number': t[-1][3],
                'missclassfication_rate': t[-1][4]
            })

        return res


target_names = ['label is 0', 'label is 1']


def plot_fig(clf, figpath, feature_name):
    import graphviz
    feature_names = np.array(feature_name)
    dot_data = tree.export_graphviz(clf, out_file=None,  # doctest: +SKIP
                                    feature_names=feature_names,  # doctest: +SKIP
                                    class_names=target_names,  # doctest: +SKIP
                                    filled=True, rounded=True,  # doctest: +SKIP
                                    special_characters=True)
    graph = graphviz.Source(dot_data)  # doctest: +SKIP
    graph.render(figpath)


# 方差选择法,使用方差选择法，先要计算各个特征的方差，然后根据阈值，选择方差大于阈值的特征。使用feature_selection库的VarianceThreshold类来选择特征的代码如下：
# 方差选择法，返回值为特征选择后的数据
# 参数threshold为方差的阈值
# ll = VarianceThreshold(threshold=2).fit_transform(df_data.fillna(0.0))
def VarianceThreshold_selector(data, threshold=2):
    from sklearn.feature_selection import VarianceThreshold
    selector = VarianceThreshold(threshold=threshold)
    selector.fit(data)
    return data[data.columns[selector.get_support(indices=True)]]


print("helper load finish!!!")

train_data_path = features_base_path + "train_select_ftr.csv"

test_data_path = features_base_path + "test_select_ftr.csv"


def get_test_data():
    test_data = pd.read_csv(test_data_path)
    try:
        test_data.pop(tag_hd.Tag)
        test_data.pop(tag_hd.UID)
    except:
        pass
    # 方差选择
    test_data = VarianceThreshold_selector(test_data.fillna(-1))
    cols = list(test_data.columns.values)
    return test_data, cols


def get_X_y():
    test_data, cols = get_test_data()
    X = pd.read_csv(train_data_path)
    print(X.shape)
    y = get_tag_train_new()[tag_hd.Tag].values
    try:
        X.pop(tag_hd.Tag)
        X.pop(tag_hd.UID)
    except:
        pass

    return X[cols], y, test_data, cols


def get_sampling_X_y(rate):
    train_data = pd.read_csv(train_data_path)
    shixin = train_data[train_data[tag_hd.Tag] == 1]
    normal = train_data[train_data[tag_hd.Tag] == 0]
    k = int(shixin.shape[0] * (1 + rate * 0.5))
    normal = normal.sample(n=k, random_state=k)
    train_data = pd.concat([shixin, normal])
    y = train_data[tag_hd.Tag].values
    train_data.pop(tag_hd.Tag)
    cols = list(train_data.columns.values)
    # train.pop('UID')
    X = train_data.values
    return X, y, cols


def save_result(sub_preds, model_name, version):
    """
    保存模型结果
    :return:
    """
    sub = get_sub()
    sub['Tag'] = sub_preds
    sub.to_csv(sub_base_path + model_name + '_%s.csv' % str(version), index=False)


def get_model(model):
    """

    :return:
    """
    from sklearn.externals import joblib
    return joblib.load(model)


import decimal

# create a new context for this task
ctx = decimal.Context()

# 20 digits should be enough for everyone :D
ctx.prec = 20


def float_to_str(f):
    """
    Convert the given float to a string,
    without resorting to scientific notation
    """
    d1 = ctx.create_decimal(repr(f))
    return float(format(d1, 'f'))


def fill_mean(df):
    """
    均值填充
    :return:
    """
    for column in list(df.columns[df.isnull().sum() > 0]):
        # for column in list(df.columns):
        try:
            if df[column].dtype != 'object':
                mean_val = np.mean(df[column])
                df[column].fillna(mean_val, inplace=True)
        except:
            pass
    return df


def get_train_vail_data(i):
    """
    """
    X, y, test, cols = get_X_y()
    # 过采样
    # 数据标准化处理：
    from sklearn.preprocessing import StandardScaler

    X = StandardScaler().fit_transform(X)
    from sklearn.model_selection import train_test_split
    # 随机抽取20%的测试集
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=i)
    return X_train, X_test, y_train, y_test, test, cols


def blending_model(res, weights, model_name):
    """
    结果加权平均
    :return:
    """
    blend_model = get_sub()
    value = []

    def weight_average(df):
        val = df[0:].tolist()
        v = np.average(val, weights=weights)
        value.append(v)

    res.T.apply(weight_average)
    blend_model[tag_hd.Tag] = value
    rule_code = ['5776870b5747e14e', '8b3f74a1391b5427', '0e90f47392008def', '6d55ccc689b910ee', '2260d61b622795fb',
                 '1f72814f76a984fa', 'c2e87787a76836e0', '4bca6018239c6201', '922720f3827ccef8', '2b2e7046145d9517',
                 '09f911b8dc5dfc32', '7cc961258f4dce9c', 'bc0213f01c5023ac', '0316dca8cc63cc17', 'c988e79f00cc2dc0',
                 'd0b1218bae116267', '72fac912326004ee', '00159b7cc2f1dfc8', '49ec5883ba0c1b0e', 'c9c29fc3d44a1d7b',
                 '33ce9c3877281764', 'e7c929127cdefadb', '05bc3e22c112c8c9', '5cf4f55246093ccf', '6704d8d8d5965303',
                 '4df1708c5827264d', '6e8b399ffe2d1e80', 'f65104453e0b1d10', '1733ddb502eb3923', 'a086f47f681ad851',
                 '1d4372ca8a38cd1f', '29db08e2284ea103', '4e286438d39a6bd4', '54cb3985d0380ca4', '6b64437be7590eb0',
                 '89eb97474a6cb3c6', '95d506c0e49a492c', 'c17b47056178e2bb', 'd36b25a74285bebb']
    print(len(rule_code))
    # Test_trans = get_transaction_round1_new()
    # test_rule_uid = pd.DataFrame(Test_trans[Test_trans['merchant'].isin(rule_code)].UID.unique())
    # pred_data_rule = blend_model.merge(test_rule_uid, left_on='UID', right_on=0, how='left')
    # blend_model['Tag'][(pred_data_rule[0] > 0)] = random.uniform(0.95, 1.0)  # 这个系数还需要调整
    m = sum(weights) / len(weights)
    print(m)
    blend_model.to_csv(sub_base_path + 'blending_' + model_name + str(m) + '_.csv', index=False)


def write_json(path, data):
    '''
     保存清洗数据
    '''
    import json
    import traceback
    try:
        with open(path, "w") as outfile:
            json.dump(data, outfile, indent=4)
    except Exception as e:
        traceback.print_exc()


def get_DecisionTree_rule(clf, feature_name01):
    import json
    dtp = DecisionTreeClassifierParser(clf, feature_name01)
    res = dtp.get_parsed_tree()
    write_json(features_base_path + "DecisionTree_rule.json", res)
    print(json.dumps(res, ensure_ascii=False, indent=4))


def woe_single(DF, Y, X):
    from scipy.stats import stats
    if X.nunique() > 11:
        r = 0
        bad = Y.sum()  # 坏客户数(假设因变量列为1的是坏客户)
        good = Y.count() - bad  # 好客户数
        n = 5
        while np.abs(r) < 1:
            d1 = pd.DataFrame({"X": X, "Y": Y, "Bucket": pd.qcut(X, n, duplicates='drop')})
            d2 = d1.groupby('Bucket', as_index=False)
            r, p = stats.spearmanr(d2.mean().X, d2.mean().Y)
            n = n - 1
        d3 = pd.DataFrame(d2.X.min(), columns=['min'])
        d3['min'] = d2.min().X
        d3['max'] = d2.max().X
        d3['sum'] = d2.sum().Y
        d3['total'] = d2.count().Y
        d3['bad_rate'] = d2.mean().Y
        d3['group_rate'] = d3['total'] / (bad + good)
        d3['woe'] = np.log((d3['bad_rate'] / (1 - d3['bad_rate'])) / (bad / good))
        d3['iv'] = (d3['sum'] / bad - ((d3['total'] - d3['sum']) / good)) * d3['woe']
        iv = d3['iv'].sum()
        if iv != 0.0 and len(d2) > 1:
            d3['iv_sum'] = iv
            woe = list(d3['woe'].round(6))
            cut = list(d3['min'].round(6))
            cut1 = list(d3['max'].round(6))
            cut.append(cut1[-1] + 1)
            x_woe = pd.cut(X, cut, right=False, labels=woe)
            return d3, cut, woe, iv, x_woe
        else:
            dn1 = pd.DataFrame({"X": X, "Y": Y, "Bucket": pd.cut(X, 100)})
            dn2 = dn1.groupby('Bucket', as_index=False)
            dn3 = pd.DataFrame(dn2.X.min(), columns=['min'])
            dn3['min'] = dn2.min().X
            dn3['max'] = dn2.max().X
            dn3['sum'] = dn2.sum().Y
            dn3['total'] = dn2.count().Y
            while (1):
                if (len(dn3) > 4):
                    dn3_min_index = dn3[dn3.total == min(dn3.total)].index.values[0]
                    if (dn3_min_index != 0):  # 最小值非第一行的情况
                        dn3.iloc[dn3_min_index - 1, 1] = dn3.iloc[dn3_min_index, 1]
                        dn3.iloc[dn3_min_index - 1, 2] = dn3.iloc[dn3_min_index, 2] + dn3.iloc[dn3_min_index - 1, 2]
                        dn3.iloc[dn3_min_index - 1, 3] = dn3.iloc[dn3_min_index, 3] + dn3.iloc[dn3_min_index - 1, 3]
                        dn3 = dn3.drop([dn3_min_index])
                        dn3 = dn3.reset_index(drop=True)
                    else:  # 最小值是第一行的情况
                        dn3.iloc[dn3_min_index + 1, 0] = dn3.iloc[dn3_min_index, 0]
                        dn3.iloc[dn3_min_index + 1, 2] = dn3.iloc[dn3_min_index, 2] + dn3.iloc[dn3_min_index + 1, 2]
                        dn3.iloc[dn3_min_index + 1, 3] = dn3.iloc[dn3_min_index, 3] + dn3.iloc[dn3_min_index + 1, 3]
                        dn3 = dn3.drop([dn3_min_index])
                        dn3 = dn3.reset_index(drop=True)
                else:
                    break
            dn3['bad_rate'] = dn3['sum'] / dn3['total']
            dn3['group_rate'] = dn3['total'] / (bad + good)
            dn3['woe'] = np.log((dn3['bad_rate'] / (1 - dn3['bad_rate'])) / (bad / good))
            dn3['iv'] = (dn3['sum'] / bad - ((dn3['total'] - dn3['sum']) / good)) * dn3['woe']

            iv = dn3['iv'].sum()
            dn3['iv_sum'] = iv
            woe = list(dn3['woe'].round(6))
            cut = list(dn3['min'].round(6))
            cut1 = list(dn3['max'].round(6))
            cut.append(cut1[-1] + 1)
            x_woe = pd.cut(X, cut, right=False, labels=woe)
            return dn3, cut, woe, iv, x_woe
    else:
        bad = Y.sum()  # 坏客户数
        good = Y.count() - bad  # 好客户数
        d1 = pd.DataFrame({"X": X, "Y": Y})
        d2 = d1.groupby('X', as_index=True)
        d3 = pd.DataFrame()

        d3['sum'] = d2.sum().Y
        d3['total'] = d2.count().Y
        for c in range(d3.shape[0])[::-1]:
            if ((d3.iloc[c, 1] - d3.iloc[c, 0]) == 0) or (d3.iloc[c, 0] == 0):
                d3.iloc[c - 1, 0] = d3.iloc[c - 1, 0] + d3.iloc[c, 0]
                d3.iloc[c - 1, 1] = d3.iloc[c - 1, 1] + d3.iloc[c, 1]
                d3.drop(d3.index[c], inplace=True)
            else:
                continue

        d3['min'] = d3.index
        d3['max'] = d3.index
        d3['bad_rate'] = d3['sum'] / d3['total']
        d3['group_rate'] = d3['total'] / (bad + good)
        d3['woe'] = np.log((d3['bad_rate'] / (1 - d3['bad_rate'])) / (bad / good))
        d3['iv'] = (d3['sum'] / bad - ((d3['total'] - d3['sum']) / good)) * d3['woe']
        iv = d3['iv'].sum()
        d3['iv_sum'] = iv
        d3 = d3[['min', 'max', 'sum', 'total', 'bad_rate', 'group_rate', 'woe', 'iv', 'iv_sum']]

        woe = list(d3['woe'].round(6))
        cut = list(d3.index)
        x_woe = X.replace(cut, woe)
        return d3, cut, woe, iv, x_woe


def woe_all(DF, test, Y, X):
    """
    转换后保存到excel中：
    woe=woe_all(df,test, df.overdue_7,df.iloc[:,1:])
    woe.to_csv('E:\woe_all.csv')
    """
    from scipy.stats import stats
    for i in X:
        print(i)
        try:
            if DF[i].nunique() > 11:
                r = 0
                bad = Y.sum() + 1  # 坏客户数(假设因变量列为1的是坏客户)
                good = Y.count() - bad + 1  # 好客户数
                n = 5
                while np.abs(r) < 1:
                    d1 = pd.DataFrame({"X": DF[i], "Y": Y, "Bucket": pd.qcut(DF[i], n, duplicates='drop')})
                    d2 = d1.groupby('Bucket', as_index=False)
                    r, p = stats.spearmanr(d2.mean().X, d2.mean().Y)
                    n = n - 1
                d3 = pd.DataFrame(d2.X.min(), columns=['min'])
                d3['min'] = d2.min().X
                d3['max'] = d2.max().X
                d3['sum'] = d2.sum().Y
                d3['total'] = d2.count().Y
                d3['bad_rate'] = d2.mean().Y
                d3['group_rate'] = d3['total'] / (bad + good)
                d3['woe'] = np.log((d3['bad_rate'] / (1 - d3['bad_rate'])) / (bad / good))
                d3['iv'] = (d3['sum'] / bad - ((d3['total'] - d3['sum']) / good)) * d3['woe']
                iv = d3['iv'].sum()
                if iv != 0.0:
                    woe = list(d3['woe'].round(9))
                    # min = list(d3['min'].round(9))
                    bad_rate = list(d3['bad_rate'].round(9))
                    group_rate = list(d3['group_rate'].round(9))
                    cut = list(d3['min'].round(9))
                    cut1 = list(d3['max'].round(9))
                    cut.append(cut1[-1] + 5)
                    DF[i + '_woe'] = pd.cut(DF[i], cut, right=False, labels=woe)
                    DF[i + '_bad_rate'] = pd.cut(DF[i], cut, right=False, labels=bad_rate)
                    DF[i + '_group_rate'] = pd.cut(DF[i], cut, right=False, labels=group_rate)
                    DF[i + '_min'] = pd.cut(DF[i], cut, right=False, labels=list(d3['min'].round(9)))
                    DF[i + '_max'] = pd.cut(DF[i], cut, right=False, labels=list(d3['max'].round(9)))
                    test[i + '_woe'] = pd.cut(test[i], cut, right=False, labels=woe)
                    test[i + '_bad_rate'] = pd.cut(test[i], cut, right=False, labels=bad_rate)
                    test[i + '_group_rate'] = pd.cut(test[i], cut, right=False, labels=group_rate)
                    test[i + '_min'] = pd.cut(test[i], cut, right=False, labels=d3['min'])
                    test[i + '_max'] = pd.cut(test[i], cut, right=False, labels=d3['max'])
                else:
                    dn1 = pd.DataFrame({"X": DF[i], "Y": Y, "Bucket": pd.cut(DF[i], 20)})
                    dn2 = dn1.groupby('Bucket', as_index=False)
                    dn3 = pd.DataFrame(dn2.X.min(), columns=['min'])
                    dn3['min'] = dn2.min().X
                    dn3['max'] = dn2.max().X
                    dn3['sum'] = dn2.sum().Y
                    dn3['total'] = dn2.count().Y
                    dn3 = dn3.dropna()
                    dn3 = dn3.reset_index(drop=True)
                    while (1):
                        if (len(dn3) > 4):
                            try:
                                dn3_min_index = dn3[dn3.total == dn3.total.min()].index.values[0]
                            except:
                                dn3_min_index = dn3[dn3.total == np.min(dn3.total)].index.values[0]
                            if (dn3_min_index != 0):  # 最小值非第一行的情况
                                dn3.iloc[dn3_min_index - 1, 1] = dn3.iloc[dn3_min_index, 1]
                                dn3.iloc[dn3_min_index - 1, 2] = dn3.iloc[dn3_min_index, 2] + dn3.iloc[
                                    dn3_min_index - 1, 2]
                                dn3.iloc[dn3_min_index - 1, 3] = dn3.iloc[dn3_min_index, 3] + dn3.iloc[
                                    dn3_min_index - 1, 3]
                                dn3 = dn3.drop([dn3_min_index])
                                dn3 = dn3.reset_index(drop=True)
                            else:  # 最小值是第一行的情况
                                dn3.iloc[dn3_min_index + 1, 0] = dn3.iloc[dn3_min_index, 0]
                                dn3.iloc[dn3_min_index + 1, 2] = dn3.iloc[dn3_min_index, 2] + dn3.iloc[
                                    dn3_min_index + 1, 2]
                                dn3.iloc[dn3_min_index + 1, 3] = dn3.iloc[dn3_min_index, 3] + dn3.iloc[
                                    dn3_min_index + 1, 3]
                                dn3 = dn3.drop([dn3_min_index])
                                dn3 = dn3.reset_index(drop=True)
                        else:
                            break
                    dn3['bad_rate'] = dn3['sum'] / dn3['total']
                    dn3['group_rate'] = dn3['total'] / (bad + good)
                    dn3['woe'] = np.log((dn3['bad_rate'] / (1 - dn3['bad_rate'])) / (bad / good))
                    dn3['iv'] = (dn3['sum'] / bad - ((dn3['total'] - dn3['sum']) / good)) * dn3['woe']
                    # iv = dn3['iv'].sum()
                    cut = list(dn3['min'].round(9))
                    cut1 = list(dn3['max'].round(9))
                    cut.append(cut1[-1] + 1000)
                    try:
                        woe = list(dn3['woe'].round(9))
                        DF[i + '_woe'] = pd.cut(DF[i], cut, right=False, labels=woe)
                        test[i + '_woe'] = pd.cut(test[i], cut, right=False, labels=woe)
                    except:
                        continue
                        print(123)
                    try:
                        bad_rate = list(dn3['bad_rate'].round(9))
                        DF[i + '_bad_rate'] = pd.cut(DF[i], cut, right=False, labels=bad_rate)
                        test[i + '_bad_rate'] = pd.cut(test[i], cut, right=False, labels=bad_rate)
                    except:
                        continue
                        print(123)
                    try:
                        group_rate = list(dn3['group_rate'].round(9))
                        DF[i + '_group_rate'] = pd.cut(DF[i], cut, right=False, labels=group_rate)
                        test[i + '_group_rate'] = pd.cut(test[i], cut, right=False, labels=group_rate)
                    except:
                        continue
                        print(123)
            else:
                bad = Y.sum()  # 坏客户数
                good = Y.count() - bad  # 好客户数
                d1 = pd.DataFrame({"X": DF[i], "Y": Y})
                d2 = d1.groupby('X', as_index=True)
                d3 = pd.DataFrame()
                d3['sum'] = d2.sum().Y
                d3['total'] = d2.count().Y
                for c in range(d3.shape[0])[::-1]:
                    if ((d3.iloc[c, 1] - d3.iloc[c, 0]) == 0) or (d3.iloc[c, 0] == 0):
                        d3.iloc[c - 1, 0] = d3.iloc[c - 1, 0] + d3.iloc[c, 0]
                        d3.iloc[c - 1, 1] = d3.iloc[c - 1, 1] + d3.iloc[c, 1]
                        d3.drop(d3.index[c], inplace=True)
                    else:
                        continue
                d3['bad_rate'] = d3['sum'] / d3['total']
                d3['group_rate'] = d3['total'] / (bad + good)
                d3['woe'] = np.log((d3['bad_rate'] / (1 - d3['bad_rate'])) / (bad / good))
                d3['iv'] = (d3['sum'] / bad - ((d3['total'] - d3['sum']) / good)) * d3['woe']
                iv = d3['iv'].sum()
                woe = list(d3['woe'].round(9))
                cut = list(d3.index)
                x_woe = DF[i].replace(cut, woe)
                x_woe_test = test[i].replace(cut, woe)
                DF[i + '_woe'] = x_woe
                test[i + '_woe'] = x_woe_test
                DF[i + '_bad_rate'] = DF[i].replace(cut, list(d3['bad_rate'].round(9)))
                DF[i + '_group_rate'] = DF[i].replace(cut, list(d3['group_rate'].round(9)))
                test[i + '_bad_rate'] = test[i].replace(cut, list(d3['bad_rate'].round(9)))
                test[i + '_group_rate'] = test[i].replace(cut, list(d3['group_rate'].round(9)))
                # test[i + '_min'] = test[i].replace(cut, list(d3['min'].round(9)))
                # test[i + '_max'] = test[i].replace(cut, list(d3['max'].round(9)))
        except Exception as e:
            traceback.print_exc()
            print(123)
        print(DF.shape)
        print(test.shape)
    return DF, test


train_weo_path = features_base_path + 'train_ftr.csv'
test_weo_path = features_base_path + 'test_ftr.csv'


def get_train_weo():
    return pd.read_csv(train_weo_path)


# def flatten(l):
#     for el in l:
#         if hasattr(el, "__iter__") and not isinstance(el, basestring):
#             for sub in flatten(el):
#                 yield sub
#         else:
#             yield el

def get_test_weo():
    return pd.read_csv(test_weo_path)


def get_X_y_weo():
    train_data = pd.read_csv(train_weo_path)
    print(train_data.shape)
    y = train_data[tag_hd.Tag].values
    train_data.pop(tag_hd.Tag)
    cols = list(train_data.columns.values)
    # train.pop('UID')
    X = train_data.values
    del train_data
    return X, y, cols
