import pandas as pd
import numpy as np
import xgboost as xgb
from sklearn.cross_validation import KFold
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression,RidgeClassifier,Ridge
from sklearn.metrics import accuracy_score


seed = 1024
NFOLDS = 5
np.random.seed(seed)

path = '../data/'

train_x = pd.read_pickle(path + 'train_X.pkl')
valid_x = pd.read_pickle(path + 'valid_X.pkl')
dev_x = pd.read_pickle(path + 'dev_X.pkl')

y_train = pd.read_pickle(path+'train.pkl')['label']
y_valid = pd.read_pickle(path+'valid.pkl')['label']
y_dev = pd.read_pickle(path+'dev.pkl')['label']



ntrain = train_x.shape[0]
ntest = valid_x.shape[0]


x_train = np.array(train_x)
x_test = np.array(valid_x)


kf = KFold(ntrain, n_folds=NFOLDS, shuffle=True, random_state=seed)




class SklearnWrapper(object):
    def __init__(self, clf, seed=0, params=None):
        params['random_state'] = seed
        self.clf = clf(**params)

    def train(self, x_train, y_train):
        self.clf.fit(x_train, y_train)

    def predict(self, x):
        return self.clf.predict(x)


class XgbWrapper(object):
    def __init__(self, seed=0, params=None):
        self.param = params
        self.param['seed'] = seed
        self.nrounds = params.pop('nrounds', 500)

    def train(self, x_train, y_train):
        dtrain = xgb.DMatrix(x_train, label=y_train)
        self.gbdt = xgb.train(self.param, dtrain, self.nrounds)

    def predict(self, x):
        return self.gbdt.predict(xgb.DMatrix(x))


def get_oof(clf):
    oof_train = np.zeros((ntrain,))
    oof_test = np.zeros((ntest,))
    oof_test_skf = np.empty((NFOLDS, ntest))

    for i, (train_index, test_index) in enumerate(kf):
        x_tr = x_train[train_index]
        y_tr = y_train[train_index]
        x_te = x_train[test_index]

        clf.train(x_tr, y_tr)

        oof_train[test_index] = clf.predict(x_te)#use for train
        oof_test_skf[i, :] = clf.predict(x_test)# use for test

    oof_test[:] = oof_test_skf.mean(axis=0)
    return oof_train.reshape(-1, 1), oof_test.reshape(-1, 1)


et_params = {
    'n_jobs': 16,
    'n_estimators': 120,
    'max_features': 0.6,
    'max_depth': 12,
    'min_samples_leaf': 2,
}

rf_params = {
    'n_jobs': 16,
    'n_estimators': 250,
    'max_features': 0.5,
    'max_depth': 12,
    'min_samples_leaf': 2,
}

xgb_params = {
    'colsample_bytree': 0.6,
    'silent': 1,
    'subsample': 0.7,
    'learning_rate': 0.025,
    'objective': 'binary:logistic',
    'max_depth': 8,
    'min_child_weight': 10,
    'eval_metric':['error','logloss'],
    'nrounds': 600
}


xgb_params_2 = {
    'max_depth':7,
    'nthread':18,
    'eta':0.03,
    'eval_metric':['error','logloss'],
    #'eval_metric':['logloss','error'],
    'objective':'binary:logistic',
    'subsample':0.5,
    'colsample_bytree':0.5,
    'silent':1,
    'seed':1123,
    'min_child_weight':10,
    'nrounds': 400
}

lr_params = {
    'C':1.0,
}


xg = XgbWrapper(seed=seed, params=xgb_params)
xg2 = XgbWrapper(seed=seed, params=xgb_params_2)
et = SklearnWrapper(clf=ExtraTreesClassifier, seed=seed, params=et_params)
rf = SklearnWrapper(clf=RandomForestClassifier, seed=seed, params=rf_params)
Lr = SklearnWrapper(clf=LogisticRegression,seed=seed,params=lr_params)

xg_oof_train, xg_oof_test = get_oof(xg)
xg_oof_train_2, xg_oof_test_2 = get_oof(xg2)
et_oof_train, et_oof_test = get_oof(et)
rf_oof_train, rf_oof_test = get_oof(rf)
lr_oof_train,lr_oof_test = get_oof(Lr)


print("XG-CV: {}".format(accuracy_score(y_train,(xg_oof_train+0.5).astype(int))))
print("RF-CV: {}".format(accuracy_score(y_train,(rf_oof_train+0.5).astype(int))))
print("ET-CV: {}".format(accuracy_score(y_train,(et_oof_train+0.5).astype(int))))
print("lr-CV: {}".format(accuracy_score(y_train,(lr_oof_train+0.5).astype(int))))



#generate level 2

l2_x_train = np.concatenate((xg_oof_train,xg_oof_train_2, et_oof_train, rf_oof_train,lr_oof_train), axis=1)
l2_x_test = np.concatenate((xg_oof_test,xg_oof_test_2, et_oof_test, rf_oof_test,lr_oof_test), axis=1)


dtrain = xgb.DMatrix(l2_x_train, label=y_train)
dtest = xgb.DMatrix(l2_x_test)


xgb_params = {
    'colsample_bytree': 0.6,
    'silent': 1,
    'subsample': 0.7,
    'learning_rate': 0.01,
    'objective': 'binary:logistic',
    'max_depth': 4,
    'num_parallel_tree': 1,
    'min_child_weight': 0,
    'eval_metric':['error','logloss'],
}

res = xgb.cv(xgb_params, dtrain, num_boost_round=10000, nfold=5, seed=seed,stratified=False,
             early_stopping_rounds=20, verbose_eval=10, show_stdv=True)

num_boost_rounds = len(res)
print('best round',num_boost_rounds)


clf=xgb.train(xgb_params,dtrain,
              num_boost_round=int(1.1*num_boost_rounds),
              evals=[(dtrain,'Train')],
              verbose_eval=50)

test_pred = clf.predict(dtest)

y_t = (test_pred+0.5).astype(int)
acc =  accuracy_score(y_valid,y_t)

print('ensembel model  the accuracy on the dev set is : {}%'.format(round(acc* 100,2)))
