import lightgbm as lgb
import numpy as np
import time
import pandas as pd
from sklearn.model_selection import GridSearchCV
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import GradientBoostingClassifier
import xgboost as xgb
from scipy import interp
import matplotlib.pyplot as plt
from itertools import cycle
from sklearn.ensemble import VotingClassifier
# from sklearn.cross_validation import StratifiedKFold
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import roc_auc_score, roc_curve, auc
import warnings

warnings.filterwarnings("ignore")

def lightgbm_model(X_train, X_test, y_train):
    X_train = lgb.Dataset(X_train.values, y_train.values)
    params = {'objective': 'binary', 'metric': {'auc'}, 'learning_rate': 0.01, 'max_depth': 6, 'seed': 7}
    model = lgb.train(params, X_train, num_boost_round=600)
    predictions = model.predict(X_test)
    return predictions

def forest_model(X_train, X_test, y_train):
    model = RandomForestClassifier(n_estimators=160, max_features=35, max_depth=8, random_state=7)
    model.fit(X_train, y_train)
    predictions = model.predict_proba(X_test)[:, 1]
    return predictions

def gradient_model(X_train, X_test, y_train):
    model = GradientBoostingClassifier(n_estimators=200, random_state=7, max_depth=5, learning_rate=0.03)
    model.fit(X_train, y_train)
    predictions = model.predict_proba(X_test)[:, 1]
    return predictions

def xgboost_model(X_train, X_test, y_train):
    X_train = xgb.DMatrix(X_train.values, label=y_train.values)
    X_test = xgb.DMatrix(X_test.values)
    params = {'objective': 'binary:logistic', 'eval_metric': 'auc', 'silent': 1, 'seed': 7,
              'max_depth': 6, 'eta': 0.01}
    model = xgb.train(params, X_train, 600)
    predictions = model.predict(X_test)
    return predictions

def kfold_plot(train, ytrain, model):
    kf = StratifiedKFold(n_splits=5)
    scores = []
    mean_tpr = 0.0
    mean_fpr = np.linspace(0, 1, 100)
    exe_time = []

    colors = cycle(['cyan', 'indigo', 'seagreen', 'yellow', 'blue'])
    lw = 2

    i = 0
    for (train_index, test_index), color in zip(kf.split(train, ytrain), colors):
        X_train, X_test = train.iloc[train_index], train.iloc[test_index]
        y_train, y_test = ytrain.iloc[train_index], ytrain.iloc[test_index]
        begin_t = time.time()
        predictions = model(X_train, X_test, y_train)
        end_t = time.time()
        exe_time.append(round(end_t - begin_t, 3))
        scores.append(roc_auc_score(y_test.astype(float), predictions))
        fpr, tpr, thresholds = roc_curve(y_test, predictions)
        mean_tpr += interp(mean_fpr, fpr, tpr)
        mean_tpr[0] = 0.0
        roc_auc = auc(fpr, tpr)
        plt.plot(fpr, tpr, lw=lw, color=color, label='ROC fold %d (area = %0.2f)' % (i, roc_auc))
        i += 1
    plt.plot([0, 1], [0, 1], linestyle='--', lw=lw, color='k', label='Luck')

    mean_tpr /= kf.get_n_splits(train, ytrain)
    mean_tpr[-1] = 1.0
    mean_auc = auc(mean_fpr, mean_tpr)
    plt.plot(mean_fpr, mean_tpr, color='g', linestyle='--', label='Mean ROC (area = %0.2f)' % mean_auc, lw=lw)

    plt.xlim([-0.05, 1.05])
    plt.ylim([-0.05, 1.05])
    plt.xlabel('False Positive Rate')
    plt.ylabel('True Positive Rate')
    plt.title('Receiver operating characteristic')
    plt.legend(loc='lower right')
    plt.show()

    #     print 'scores: ', scores
    print('mean scores: ', np.mean(scores))
    print('mean model process time: ', np.mean(exe_time), 's')

    return scores, np.mean(scores), np.mean(exe_time)

dct_scores = {}
mean_score = {}
mean_time = {}

from kmeans_smote import KMeansSMOTE

kmeans_smote = KMeansSMOTE(
    kmeans_args={
        'n_clusters': 100
    },
    smote_args={
        'k_neighbors': 10
    }
)

def choose_xgb_model(X_train, y_train):
    tuned_params = [{'objective': ['binary:logistic'], 'learning_rate': [0.01, 0.03, 0.05],
                     'n_estimators': [100, 150, 200], 'max_depth':[4, 6, 8]}]
    begin_t = time.time()
    clf = GridSearchCV(xgb.XGBClassifier(seed=7), tuned_params, scoring='roc_auc')
    clf.fit(X_train, y_train)
    end_t = time.time()
    return clf.best_estimator_

def choose_lgb_model(X_train, y_train):
    tuned_params = [{'objective': ['binary'], 'learning_rate': [0.01, 0.03, 0.05],
                     'n_estimators': [100, 150, 200], 'max_depth':[4, 6, 8]}]
    begin_t = time.time()
    clf = GridSearchCV(lgb.LGBMClassifier(seed=7), tuned_params, scoring='roc_auc')
    clf.fit(X_train, y_train)
    end_t = time.time()
    return clf.best_estimator_

def choose_forest_model(X_train, y_train):
    tuned_params = [{'n_estimators': [100, 150, 200], 'max_features': [8, 15, 30], 'max_depth':[4, 8, 10]}]
    begin_t = time.time()
    clf = GridSearchCV(RandomForestClassifier(random_state=7), tuned_params, scoring='roc_auc')
    clf.fit(X_train, y_train)
    end_t = time.time()
    return clf.best_estimator_
def choose_gradient_model(X_train, y_train):
    tuned_params = [{'n_estimators': [100, 150, 200], 'learning_rate': [0.03, 0.05, 0.07],
                     'min_samples_leaf': [8, 15, 30], 'max_depth':[4, 6, 8]}]
    begin_t = time.time()
    clf = GridSearchCV(GradientBoostingClassifier(random_state=7), tuned_params, scoring='roc_auc')
    clf.fit(X_train, y_train)
    end_t = time.time()
    return clf.best_estimator_

dataset0=pd.read_csv("xiaoxiao_model/predata/train1.csv")
train_full1 = dataset0.drop(['y'],1)
ytrain1 = dataset0['y']

train_full, ytrain = kmeans_smote.fit_sample(train_full1, ytrain1)
train_full = pd.DataFrame(train_full, columns=train_full1.columns)

bst_gradient = choose_gradient_model(train_full, ytrain)
bst_xgb = choose_xgb_model(train_full, ytrain)
bst_lgb = choose_lgb_model(train_full, ytrain)

def voting_model(X_train, X_test, y_train):
    vclf = VotingClassifier(estimators=[('xgb', bst_xgb), ('gbm',bst_gradient),
                                       ('lgb', bst_lgb)], voting='soft', weights=[2,  1 , 2])
    vclf.fit(X_train, y_train)
    predictions = vclf.predict_proba(X_test)[:, 1]
    return predictions

test = pd.read_csv("xiaoxiao_model/predata/test1.csv")
predictions = voting_model(train_full, test.drop(['0'],1),ytrain)
result = pd.DataFrame()
result['idx'] = test['0']
p = []
sum = 0
result['result'] = predictions
result.to_csv(r"xiao_sub.csv",index=False)
print("xiao_model sucessed!")