import pandas as pd
import numpy as np
import lightgbm as lgb
from sklearn.model_selection import StratifiedKFold
from tiancheng.base.base_helper import *

op_train = get_operation_train_new()
trans_train = get_transaction_train_new()

op_test = get_operation_round1_new() # pd.read_csv('input/operation_round1_new.csv')
trans_test = get_transaction_round1_new() #pd.read_csv('input/transaction_round1_new.csv')
y = get_tag_train_new() # pd.read_csv('input/tag_train_new.txt')
sub = get_sub()

def get_feature(op,trans,label):
    for feature in op.columns[2:]:
        label = label.merge(op.groupby(['UID'])[feature].count().reset_index(),on='UID',how='left')
        label =label.merge(op.groupby(['UID'])[feature].nunique().reset_index(),on='UID',how='left')
    
    for feature in trans.columns[2:]:
        if trans_train[feature].dtype == 'object':
            label =label.merge(trans.groupby(['UID'])[feature].count().reset_index(),on='UID',how='left')
            label =label.merge(trans.groupby(['UID'])[feature].nunique().reset_index(),on='UID',how='left')
        else:
            label =label.merge(trans.groupby(['UID'])[feature].count().reset_index(),on='UID',how='left')
            label =label.merge(trans.groupby(['UID'])[feature].nunique().reset_index(),on='UID',how='left')
            label =label.merge(trans.groupby(['UID'])[feature].max().reset_index(),on='UID',how='left')
            label =label.merge(trans.groupby(['UID'])[feature].min().reset_index(),on='UID',how='left')
            label =label.merge(trans.groupby(['UID'])[feature].sum().reset_index(),on='UID',how='left')
            label =label.merge(trans.groupby(['UID'])[feature].mean().reset_index(),on='UID',how='left')
            label =label.merge(trans.groupby(['UID'])[feature].std().reset_index(),on='UID',how='left')
    # label = label.T.drop_duplicates(subset=None, keep='first', inplace=True)
    return label



train = get_feature(op_train,trans_train,y).fillna(-1)
test = get_feature(op_test,trans_test,sub).fillna(-1)
label = y['Tag']
test_id = test['UID']
cols = list(train.columns.values)
# [cols.remove(item) for item in tag_header]
cols = [str(i)+item for i,item in enumerate(cols[2:])]
train.columns = tag_header + cols
test.columns = tag_header + cols
train, test = woe_all(train,test, train['Tag'], cols)
train = train.fillna(method="ffill")
test = test.fillna(method="ffill")
# 后向填充，使用下一行的值,不存在的时候就不填充
train = train.fillna(method="bfill")
test = test.fillna(method="bfill")
# test = test.fillna(-1)
# train = train.fillna(-1)
# train = train[cols]
# test = test[cols]
train = train.drop(['UID','Tag'],axis = 1)
test = test.drop(['UID','Tag'],axis = 1)

print(test.shape)
print(train.shape)

clf = lgb.LGBMClassifier(boosting_type='gbdt', num_leaves=100, reg_alpha=3, reg_lambda=5, max_depth=-1,
    n_estimators=5000, objective='binary', subsample=0.95, colsample_bytree=0.77, subsample_freq=1, learning_rate=0.05,
    random_state=1000, n_jobs=16, min_child_weight=4, min_child_samples=5, min_split_gain=0)

from sklearn.model_selection import train_test_split
# 随机抽取20%的测试集
X_train, X_test, y_train, y_test = train_test_split(train, label, test_size=0.15, random_state=0)

clf.fit( X_train,  y_train, early_stopping_rounds=50, eval_metric="logloss",
        eval_set=[( X_test,  y_test)])
feature_name = list(train.columns.values)
sub_pre = clf.predict_proba(test)
ytestPre = clf.predict_proba(X_test)[:, 1]
feature_importance(clf, feature_name,  X_test,  y_test)