#!/usr/bin/env python
# -*- coding:utf-8 -*- 
# @Time    : 2018/12/5 22:26
# @Author  : liujiantao
# @Site    : 
# @File    : lightgbm_train01.py
# @Software: PyCharm
from tiancheng.base.base_helper import *

op_train = get_operation_train_new()
trans_train = get_transaction_train_new()

op_test = get_operation_round1_new()  # pd.read_csv('input/operation_round1_new.csv')
trans_test = get_transaction_round1_new()  # pd.read_csv('input/transaction_round1_new.csv')
y = get_tag_train_new()  # pd.read_csv('input/tag_train_new.txt')
sub = get_sub()
op_train, op_test = woe_all(op_train, op_test, y[tag_hd.Tag], list(op_train.columns.values))
trans_train, trans_test = woe_all(op_train, op_test, y[tag_hd.Tag], list(trans_train.columns.values))


def dev_version(data, label):
    data['date'] = pd.to_datetime(('2018/10/' + data['day'].astype(str) + ' ' + data['time'].astype(str)),
                                  format='%Y/%m/%d %H:%M:%S')
    data = data.set_index('date')
    data = data.sort_index()
    data['device_code'] = (
            data['device_code3'].fillna('') + data['device_code2'].fillna('') + data['device_code1'].fillna(''))
    dave = data[data['version'].notnull()]
    from sklearn.preprocessing import LabelEncoder
    class_le = LabelEncoder()
    dave['version_c'] = class_le.fit_transform(dave['version'].values)
    dave['ver_dev_max'] = dave['version_c'].groupby(dave['UID']).transform(
        lambda x: x.rolling(window=2, min_periods=1).max())
    dave['ver_dev_min'] = dave['version_c'].groupby(dave['UID']).transform(
        lambda x: x.rolling(window=2, min_periods=1).min())
    dave['ver_dev_nui'] = 0
    dave['ver_dev_nui'][dave['ver_dev_max'] != dave['ver_dev_min']] = 1
    #    print(dave[['ver_dev_max','ver_dev_min','ver_dev_nui']])

    dave['ver_dev_nui_1d_sum'] = dave.groupby(by='UID')['ver_dev_nui'].transform(lambda x: x.rolling(window='1d').sum())
    dave['ver_dev_nui_3d_sum'] = dave.groupby(by='UID')['ver_dev_nui'].transform(lambda x: x.rolling(window='3d').sum())
    dave['ver_dev_nui_1h_sum'] = dave.groupby(by='UID')['ver_dev_nui'].transform(lambda x: x.rolling(window='3h').sum())

    data['ver_dev_nui_1d_sum'] = np.nan
    data['ver_dev_nui_3d_sum'] = np.nan
    data['ver_dev_nui_1h_sum'] = np.nan

    data['ver_dev_nui_1d_sum'][~data['version'].isnull()] = dave['ver_dev_nui_1d_sum']
    data['ver_dev_nui_3d_sum'][~data['version'].isnull()] = dave['ver_dev_nui_3d_sum']
    data['ver_dev_nui_1h_sum'][~data['version'].isnull()] = dave['ver_dev_nui_1h_sum']

    label = label.merge(data.groupby(['UID'])['ver_dev_nui_1d_sum'].sum().reset_index(), on='UID', how='left',
                        suffixes=('_sum', '_max'))
    label = label.merge(data.groupby(['UID'])['ver_dev_nui_1d_sum'].max().reset_index(), on='UID', how='left',
                        suffixes=('', '_max'))
    label = label.merge(data.groupby(['UID'])['ver_dev_nui_1d_sum'].min().reset_index(), on='UID', how='left',
                        suffixes=('', '_min'))
    label = label.merge(data.groupby(['UID'])['ver_dev_nui_1d_sum'].mean().reset_index(), on='UID', how='left',
                        suffixes=('', '_mean'))

    label = label.merge(data.groupby(['UID'])['ver_dev_nui_3d_sum'].sum().reset_index(), on='UID', how='left',
                        suffixes=('_sum', '_max'))
    label = label.merge(data.groupby(['UID'])['ver_dev_nui_3d_sum'].max().reset_index(), on='UID', how='left',
                        suffixes=('', '_max'))
    label = label.merge(data.groupby(['UID'])['ver_dev_nui_3d_sum'].min().reset_index(), on='UID', how='left',
                        suffixes=('', '_min'))
    label = label.merge(data.groupby(['UID'])['ver_dev_nui_3d_sum'].mean().reset_index(), on='UID', how='left',
                        suffixes=('', '_mean'))

    label = label.merge(data.groupby(['UID'])['ver_dev_nui_1h_sum'].sum().reset_index(), on='UID', how='left',
                        suffixes=('_sum', '_max'))
    label = label.merge(data.groupby(['UID'])['ver_dev_nui_1h_sum'].max().reset_index(), on='UID', how='left',
                        suffixes=('', '_max'))
    label = label.merge(data.groupby(['UID'])['ver_dev_nui_1h_sum'].min().reset_index(), on='UID', how='left',
                        suffixes=('', '_min'))
    label = label.merge(data.groupby(['UID'])['ver_dev_nui_1h_sum'].mean().reset_index(), on='UID', how='left',
                        suffixes=('', '_mean'))
    return label


# dev_version(data, label)
train = dev_version(op_train, y).fillna(-1)
test = dev_version(op_test, sub).fillna(-1)
label = y['Tag']
test_id = test['UID']
cols = list(train.columns.values)
# [cols.remove(item) for item in tag_header]
cols = [str(i) + item for i, item in enumerate(cols[2:])]
train.columns = tag_header + cols
test.columns = tag_header + cols
train, test = woe_all(train, test, train['Tag'], cols)
train = train.fillna(method="ffill")
test = test.fillna(method="ffill")
# 后向填充，使用下一行的值,不存在的时候就不填充
train = train.fillna(method="bfill")
test = test.fillna(method="bfill")
from sklearn.model_selection import train_test_split

X = train.values
# 随机抽取20%的测试集
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.15, random_state=0)

import lightgbm as lgb

clf = lgb.LGBMClassifier(
    colsample_bytree=1.0,
    learning_rate=0.03,
    num_leaves=3000,  # 决策树的叶子节点
    max_depth=-1,
    min_child_samples=8,
    min_child_weight=0.001,
    min_split_gain=0.0,
    n_estimators=10000,
    n_jobs=-1,  # 等于-1的时候，表示cpu里的所有core进行工作。
    objective='binary',  # 目标为2分类,
    reg_alpha=0.1,
    reg_lambda=0.0,
    silent=True,
    subsample=0.95,
    subsample_for_bin=20000,
    subsample_freq=1)
clf.fit(X_train, y_train, early_stopping_rounds=50, eval_metric="logloss",
        eval_set=[(X_test, y_test)])
feature_name = cols
test = get_test_data()
sub_pre = clf.predict_proba(test)
ytestPre = clf.predict_proba(X_test)[:, 1]
feature_importance(clf, feature_name, X_test, y_test)
m = tpr_weight_funtion(y_test, ytestPre)
sub_pre = [float_to_str(item[1]) for item in sub_pre]
print(m)
sub = get_sub()
sub['Tag'] = sub_pre
sub.to_csv(sub_base_path + 'lightgbm_train01_%s.csv' % str(m), index=False)
