from Xg_Utils.process_data import *
from Xg_Utils.params_run_submit import *
from sklearn.metrics import roc_auc_score
import os
import json
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation, Flatten, Reshape, Layer
from keras.layers.normalization import BatchNormalization
from keras.utils import np_utils
import time


def split_xy_to_individual(xy):
    return xy[:, 0:len(xy.transpose()) - 1], xy[:, len(xy.transpose()) - 1]


def get_nn_model():
    model = Sequential()
    model.add(Dense(500, input_shape=(1138,), activation='sigmoid'))
    model.add(Dropout(0.25))
    # model.add(BatchNormalization())
    model.add(Dense(1, activation='sigmoid'))
    return model


def bagging_different_prep_boost(X, y, val_X, val_y):
    # TODO: not test yet since NAN in matrix
    experiment_name = 'bagging_different_pred_boost'
    experiment_path = os.path.join('Xg_metadata', experiment_name).replace('\\', '/')
    # os._exists()
    # os.mkdir(experiment_path)

    print 'setting params'
    params = params_XGBoost(random_seed=False)

    print 'running models'
    models = []
    flag = 1
    for prep in [scale_X, log_X, square_root_X]:
        X = prep(X)
        val_X = prep(val_X)
        run = run_XGBoost(X, y, val_X, val_y, params=params)
        models.append(run)
        run.save_model(experiment_path + '/model_' + str(flag))
        flag += 1

    print 'bagging models'
    test_uid, test_x = get_test_data(
        file_path='/media/dell/cb552bf1-c649-4cca-8aca-3c24afca817b/dell/wxm/Data/DCP2P/test_x.csv')
    dtest = xgb.DMatrix(test_x)
    dval = xgb.DMatrix(val_X, label=val_y)
    dtrain = xgb.DMatrix(X, label=y)
    train_preds = []
    val_preds = []
    test_preds = []
    for model in range(len(models)):
        train_preds.append(model.predict(dtrain))
        val_preds.append(model.predict(dval))
        test_preds.append(model.predict(dtest))
    train_bagging_pred = 0
    val_bagging_pred = 0
    test_bagging_pred = 0
    for i in range(len(train_preds)):
        train_bagging_pred += train_preds[i]
        val_bagging_pred += val_preds[i]
        test_bagging_pred += train_preds[i]
    train_bagging_pred = train_bagging_pred / 3.
    val_bagging_pred = val_bagging_pred / 3.
    print 'bagging train_auc: ' + str(roc_auc_score(y, train_bagging_pred)) + " | bagging val_auc: " + str(
        roc_auc_score(val_y, val_bagging_pred))

    print 'saving metadata'
    performence = {
        'train_AUC': 0,
        'val_AUC': 0
    }
    performence['train_AUC'] = roc_auc_score(y, train_bagging_pred)
    performence['val_AUC'] = roc_auc_score(val_y, val_bagging_pred)
    meta_data = [params, performence]
    f = open(experiment_path + '/meta_data.json', 'wb')
    meta_json = json.dumps(meta_data, default=lambda o: o.__dict__, indent=4)
    f.write(meta_json)
    f.close()

    print 'submitting results'
    test_result = pd.DataFrame(columns=['uid', 'score'])
    test_result.uid = test_uid
    test_result.score = test_bagging_pred
    test_result.to_csv(experiment_path + '/result.csv', index=None, encoding='utf - 8')


def bagging_boost_and_nn(X, y, val_X, val_y):
    # TODO: No use
    experiment_name = 'bagging_boost_and_nn'
    experiment_path = os.path.join('Xg_metadata', experiment_name).replace('\\', '/')
    # os.mkdir(experiment_path)

    print 'running nn'
    nn_metadata = {
        'shuffle': False,
        'batch_size': 48,
        'class_weights': {0: 13.458, 1: 1.4},
        'loss_function': 'mse',
        'optimizer': 'adam',
        'nb_epochs': 30
    }
    nn_model = get_nn_model()
    nn_model.compile(loss=nn_metadata['loss_function'], optimizer=nn_metadata['optimizer'])
    # Y = np_utils.to_categorical(y, 2)
    # val_Y = np_utils.to_categorical(val_y, 2)
    Y = y
    val_Y = val_y
    for e in range(nn_metadata['nb_epochs']):
        print 'nn_model Eopch: ' + str(e + 1)
        nn_model.fit(X, Y, shuffle=nn_metadata['shuffle'], verbose=1,
                     validation_data=(val_X, val_Y),
                     batch_size=nn_metadata['batch_size'],
                     nb_epoch=1, class_weight=nn_metadata['class_weights'])
        print 'nn_model train_AUC: ' + str(roc_auc_score(y, nn_model.predict(X)))
        print 'nn_model val_AUC: ' + str(roc_auc_score(val_y, nn_model.predict(val_X)))

    print 'running XGBoost'
    params = params_XGBoost(random_seed=False)
    xgb_model = run_XGBoost(X, y, val_X, val_y, params=params, rounds=2000)
    dval = xgb.DMatrix(val_X, label=val_y)
    dtrain = xgb.DMatrix(X, label=y)
    print 'evaluating bagging'
    performence = {
        'train_AUC': 0,
        'val_AUC': 0
    }
    nn_pred_train = np.reshape(nn_model.predict(X), (len(nn_model.predict(X), )))
    nn_pred_val = np.reshape(nn_model.predict(val_X), (len(nn_model.predict(val_X), )))
    xgb_pred_train = np.asarray(xgb_model.predict(dtrain))
    xgb_pred_val = np.asarray(xgb_model.predict(dval))
    print 'nn: ' + str(nn_pred_train.shape) + " | " 'xgb: ' + str(xgb_pred_train.shape)
    performence['train_AUC'] = roc_auc_score(y, (nn_pred_train + xgb_pred_train) / 2.)
    performence['val_AUC'] = roc_auc_score(val_y, (xgb_pred_val + nn_pred_val) / 2.)
    print 'train_AUC: ' + str(performence['train_AUC']) + " | val_AUC: " + str(performence['val_AUC'])

    print 'submitting'
    test_uid, test_x = get_test_data(
        file_path='/media/dell/cb552bf1-c649-4cca-8aca-3c24afca817b/dell/wxm/Data/DCP2P/test_x.csv')
    dtest = xgb.DMatrix(test_x)
    nn_pred_test = nn_model.predict(np.asarray(test_x))
    xgb_pred_test = np.asarray(xgb_model.predict(dtest))

    test_result = pd.DataFrame(columns=['uid', 'score'])
    test_result.uid = test_uid
    test_result.score = (nn_pred_test + xgb_pred_test) / 2.
    test_result.to_csv(experiment_path + '/result.csv', index=None, encoding='utf - 8')

    print 'saving metadata'
    nn_model.save_weights(experiment_path + '/nn_W', overwrite=True)
    xgb_model.save_model(experiment_path + '/xgb_M')
    meta_data = {
        'nn': nn_metadata,
        'xgb': params,
        'performence': performence
    }
    f = open(experiment_path + '/meta_data.json', 'wb')
    meta_json = json.dumps(meta_data, default=lambda o: o.__dict__, indent=4)
    f.write(meta_json)
    f.close()


def bagging_forward_selection_of_learners(X, y, val_X, val_y):
    experiment_name = 'bagging_forward_selection_of_learners'
    experiment_path = os.path.join('Xg_metadata', experiment_name).replace('\\', '/')
    # os.mkdir(experiment_path)

    test_uid, test_x = get_test_data(
        file_path='/media/dell/cb552bf1-c649-4cca-8aca-3c24afca817b/dell/wxm/Data/DCP2P/test_x.csv')
    dtest = xgb.DMatrix(test_x)

    iters = 0
    old_AUC = 0
    models = []
    meta_datas = []
    nb_ensembled_models = 0
    iters_ensembled = 0
    flag = 1
    while True:
        iters += 1
        preds = 0
        best_bagging_test_pred = 0

        params = {
            'booster': 'gbtree',
            'objective': 'binary:logistic',
            'early_stopping_rounds': 50,
            'scale_pos_weight': random.randint(1300, 1500) / 13458.0,
            'eval_metric': 'auc',
            'gamma': random.randint(1, 100) / 100.,
            'max_depth': random.randint(4, 10),
            'lambda ': random.randint(400, 600),
            'subsample': random.randint(1, 100) / 100.,
            'colsample_bytree': random.randint(1, 100) / 100.,
            'min_child_weight': random.randint(2, 15),
            'eta': random.randint(1, 100) / 500.,
            'seed': random_seed,
        }

        model = run_XGBoost(params=params, X=X, y=y, val_X=val_X, val_y=val_y, rounds=1000)
        dval = xgb.DMatrix(val_X, label=val_y)
        val_AUC = roc_auc_score(np.asarray(val_y), model.predict(dval))

        if iters == 1:
            old_AUC = val_AUC
            models.append(model)
            meta_data = {
                'params': params,
                'val_AUC': old_AUC
            }
            meta_datas.append(meta_data)
        elif iters > 1:
            single_pred = model.predict(dval)
            for m in range(len(models)):
                preds += models[m].predict(dval)
            bagging_pred = (preds + single_pred) / (len(preds) + 1)
            bagging_AUC = roc_auc_score(np.asarray(val_y), bagging_pred)
            if bagging_AUC > old_AUC:
                iters_ensembled = iters

                old_AUC = bagging_AUC
                models.append(model)

                meta_data = {
                    'params': params,
                    'val_AUC': val_AUC,
                    'iters': iters_ensembled
                }
                meta_datas.append(meta_data)

                meta_data_update = {
                    'best_bagging_AUC': bagging_AUC,
                    'individual_model_performance': meta_datas,
                    'nb_bagging_models': len(models)
                }

                f = open(experiment_path + '/' + str(flag) + '_meta_data.json', 'wb')
                meta_json = json.dumps(meta_data_update, default=lambda o: o.__dict__, indent=4)
                f.write(meta_json)
                f.close()

                print 'best_bagging_AUC: ' + str(old_AUC)

                for m_ in range(len(models)):
                    best_bagging_test_pred += models[m_].predict(dtest)
                best_bagging_test_pred = best_bagging_test_pred / len(models)
                test_result = pd.DataFrame(columns=['uid', 'score'])
                test_result.uid = test_uid
                test_result.score = best_bagging_test_pred
                test_result.to_csv(experiment_path + '/' + str(flag) + '_result.csv', index=None, encoding='utf - 8')

            if iters - iters_ensembled >= 10:
                flag += 1
                iters = 0
                old_AUC = 0
                models = []
                meta_datas = []
                iters_ensembled = 0
            print 'flag: ' + str(flag) + " | " + 'AUC: ' + str(old_AUC)


def bagging_selection_and_replacement_with_cross_validation(train_xy):
    train_xy_ = train_xy.drop(['uid'], axis=1)
    train_xy_ = np.asarray(train_xy_)
    np.random.shuffle(train_xy_)
    nb_samples = len(train_xy_)

    X_all = train_xy_[:, 0:len(train_xy_.transpose()) - 1]
    y_all = train_xy_[:, len(train_xy_.transpose()) - 1]

    train_xy = train_xy_[0:nb_samples*(4./5.)]
    train_xy_val = train_xy_[nb_samples*(4./5.):]

    X_val_outside_cv = train_xy_val[:, 0:len(train_xy_val.transpose()) - 1]
    y_val_outside_cv = train_xy_val[:, len(train_xy_val.transpose()) - 1]
    dval_outside_cv = xgb.DMatrix(X_val_outside_cv,label=y_val_outside_cv)

    X_train_outside_cv = train_xy[:, 0:len(train_xy.transpose()) - 1]
    y_train_outside_cv = train_xy[:, len(train_xy.transpose()) - 1]

    part_train_1 = train_xy[0:nb_samples * (4. / 5.)]
    part_val_1 = train_xy[nb_samples * (4. / 5.):]

    part_train_2 = np.vstack((train_xy[0:nb_samples * (3. / 5.)], train_xy[nb_samples * (4. / 5.):]))
    part_val_2 = train_xy[nb_samples * (3. / 5.):nb_samples * (4. / 5.)]

    part_train_3 = np.vstack((train_xy[0:nb_samples * (2. / 5.)], train_xy[nb_samples * (3. / 5.):]))
    part_val_3 = train_xy[nb_samples * (2. / 5.):nb_samples * (3. / 5.)]

    part_train_4 = np.vstack((train_xy[0:nb_samples * (1. / 5.)], train_xy[nb_samples * (2. / 5.):]))
    part_val_4 = train_xy[nb_samples * (1. / 5.):nb_samples * (2. / 5.)]

    part_train_5 = train_xy[nb_samples * (1. / 5.):]
    part_val_5 = train_xy[0:nb_samples * (1. / 5.)]

    print part_train_1.shape
    print part_val_1.shape
    print part_train_2.shape
    print part_val_2.shape
    print part_train_3.shape
    print part_val_3.shape
    print part_train_4.shape
    print part_val_4.shape
    print part_train_5.shape
    print part_val_5.shape

    print X_all.shape
    print y_all.shape
    time.sleep(5)

    part_1 = {
        'part_train': part_train_1,
        'part_val': part_val_1
    }
    part_2 = {
        'part_train': part_train_2,
        'part_val': part_val_2
    }
    part_3 = {
        'part_train': part_train_3,
        'part_val': part_val_3
    }
    part_4 = {
        'part_train': part_train_4,
        'part_val': part_val_4
    }
    part_5 = {
        'part_train': part_train_5,
        'part_val': part_val_5
    }

    experiment_name = 'bagging_selection_and_replacement_with_cross_validation'
    experiment_path = os.path.join('Xg_metadata', experiment_name).replace('\\', '/')
    #os.mkdir(experiment_path)

    test_uid, test_x = get_test_data(
        file_path='/media/dell/cb552bf1-c649-4cca-8aca-3c24afca817b/dell/wxm/Data/DCP2P/test_x.csv')
    dtest = xgb.DMatrix(test_x)

    iters = 0
    old_AUC = 0
    models = []
    meta_datas = []
    refresh = 0
    val_AUC_bag = []
    preds_test_bag = []
    preds_val_bag = []
    while True:
        iters += 1
        preds = 0
        best_bagging_test_pred = 0

        params = {
            'booster': 'gbtree',
            'objective': 'binary:logistic',
            'early_stopping_rounds': 50,
            'scale_pos_weight': random.randint(1300, 1500) / 13458.0,
            'eval_metric': 'auc',
            'gamma': random.randint(1, 100) / 100.,
            'max_depth': random.randint(4, 10),
            'lambda ': random.randint(400, 600),
            'subsample': random.randint(1, 100) / 100.,
            'colsample_bytree': random.randint(1, 100) / 100.,
            'min_child_weight': random.randint(2, 15),
            'eta': random.randint(1, 100) / 500.,
            'seed': random_seed,
            'nthread': 16
        }
        val_AUCs = []
        for c in [part_1, part_2, part_3, part_4, part_5]:
            X, y = split_xy_to_individual(c['part_train'])
            val_X, val_y = split_xy_to_individual(c['part_val'])
            model = run_XGBoost(params=params, X=X, y=y, val_X=val_X, val_y=val_y, rounds=10)
            dval = xgb.DMatrix(val_X, label=val_y)
            pred_val = model.predict(dval)
            preds_val_bag.append()
            val_AUCs.append(roc_auc_score(np.asarray(val_y), pred_val))
        run = run_XGBoost(params=params, X=X_train_outside_cv, y=y_train_outside_cv,val_X=X_val_outside_cv,val_y=y_val_outside_cv, rounds=10)
        pred_val = run.predict(dval_outside_cv)
        val_AUC = np.sum(np.asarray(val_AUCs)) / 5.
        if iters == 1:
            old_AUC = val_AUC
            val_AUC_bag.append(val_AUC)
            models.append(model)
            meta_data = {
                'params': params,
                'val_AUCs': val_AUCs,
                'val_AUC': val_AUC
            }
            run = run_XGBoost(params=params, X=X_all, y=y_all,val_X=val_X,val_y=val_y, rounds=10)
            preds_test_bag.append(run.predict(dtest))
            meta_datas.append(meta_data)
        elif iters > 1:
            val_auc_ = 0
            for i in range(len(val_AUC_bag)):
                val_auc_ += val_AUC_bag[i]
            bagging_AUC = (val_auc_ + val_AUC)/(len(val_AUC_bag)+1)

            if old_AUC < val_AUC:
                old_AUC = val_AUC
                refresh += 1
                models = []
                meta_datas = []
                iters = 1
                meta_data_update = {}
                preds_test_bag = []
                val_AUC_bag = []
                print 'refresh:' + str(refresh)
                print 'individual_AUC: ' + str(val_AUC)
                time.sleep(2)

                val_AUC_bag.append(val_AUC)
                models.append(model)
                meta_data = {
                    'params': params,
                    'val_AUCs': val_AUCs,
                    'val_AUC': val_AUC
                }
                meta_datas.append(meta_data)

                run = run_XGBoost(params=params, X=X_all, y=y_all,val_X=val_X,val_y=val_y, rounds=10)
                test_score = run.predict(dtest)
                preds_test_bag.append(test_score)

                meta_data_update = {
                    'individual_model_performance': meta_datas,
                    'best_individual_AUC': old_AUC,
                    'refresh': refresh
                }

                f = open(experiment_path + '/' + 'meta_data.json', 'wb')
                meta_json = json.dumps(meta_data_update, default=lambda o: o.__dict__, indent=4)
                f.write(meta_json)
                f.close()

                test_result = pd.DataFrame(columns=['uid', 'score'])
                test_result.uid = test_uid
                test_result.score = test_score
                test_result.to_csv(experiment_path + '/' + 'result.csv', index=None, encoding='utf - 8')
                continue

            elif bagging_AUC >= old_AUC:
                print 'bagging_AUC >= old_AUC'
                print 'bagging_AUC: ' + str(bagging_AUC)
                time.sleep(2)
                iters_ensembled = iters

                old_AUC = bagging_AUC
                models.append(model)

                meta_data = {
                    'params': params,
                    'val_AUCs': val_AUCs,
                    'val_AUC': val_AUC,
                    'iters_ensembled': iters_ensembled
                }
                meta_datas.append(meta_data)

                meta_data_update = {
                    'individual_model_performance': meta_datas,
                    'best_bagging_AUC': bagging_AUC,
                    'nb_bagging_models': len(models),
                    'refresh_individual': refresh
                }

                f = open(experiment_path + '/' + 'meta_data.json', 'wb')
                meta_json = json.dumps(meta_data_update, default=lambda o: o.__dict__, indent=4)
                f.write(meta_json)
                f.close()

                run = run_XGBoost(params=params, X=X_all, y=y_all,val_X=val_X,val_y=val_y, rounds=10)
                test_score = run.predict(dtest)
                preds_test_bag.append(test_score)
                test_scores = 0
                for i in range(len(preds_test_bag)):
                    test_scores += preds_test_bag
                test_scores = test_scores/len(preds_test_bag)
                test_result = pd.DataFrame(columns=['uid', 'score'])
                test_result.uid = test_uid
                test_result.score = test_scores
                test_result.to_csv(experiment_path + '/' + 'result.csv', index=None, encoding='utf - 8')



def compare_diversity(x1,x2):
    m = np.vstack((x1.transpose(),x2.transpose()))
    a = np.corrcoef(m)[0,1]
    return abs(a)

def bagging_high_performance_and_diverse_models(X, y, val_X, val_y):
    experiment_name = 'bagging_high_performance_and_diverse_models'
    experiment_path = os.path.join('Xg_metadata', experiment_name).replace('\\', '/')
    #os.mkdir(experiment_path)

    test_uid, test_x = get_test_data(
        file_path='/media/dell/cb552bf1-c649-4cca-8aca-3c24afca817b/dell/wxm/Data/DCP2P/test_x.csv')
    dtest = xgb.DMatrix(test_x)
    dval = xgb.DMatrix(val_X, label=val_y)

    nb_models = 0
    val_preds_all = []
    test_preds_all = []
    val_AUCs_all = []
    val_preds_correlation = []
    test_preds_correlation = []

    print 'choosing models who\'s val_AUC is bigger than 0.705'
    time.sleep(2)
    while True:
        params = {
            'booster': 'gbtree',
            'objective': 'binary:logistic',
            'early_stopping_rounds': 50,
            'scale_pos_weight': random.randint(1300, 1500) / 13458.0,
            'eval_metric': 'auc',
            'gamma': random.randint(1, 100) / 100.,
            'max_depth': random.randint(4, 10),
            'lambda ': random.randint(400, 600),
            'subsample': random.randint(1, 100) / 100.,
            'colsample_bytree': random.randint(1, 100) / 100.,
            'min_child_weight': random.randint(2, 15),
            'eta': random.randint(1, 100) / 500.,
            'seed': random_seed,
            'nthread': 16
        }

        run = run_XGBoost(params=params, X=X, y=y, val_X=val_X, val_y=val_y, rounds=2000)
        val_pred = run.predict(dval,ntree_limit=run.best_ntree_limit)
        val_AUC = roc_auc_score(np.asarray(val_y),val_pred)
        if val_AUC >= 0.70:
            nb_models += 1
            val_AUCs_all.append(val_AUC)
            test_pred = run.predict(dtest,ntree_limit=run.best_ntree_limit)
            test_preds_all.append(test_pred)
            val_preds_all.append(val_pred)
        if nb_models >= 80:
            break

    print 'choosing models with low correlations'
    time.sleep(2)
    after_correlation_individual_AUCs = []

    max_1_auc_index = np.argmax(np.asarray(val_AUCs_all))
    val_preds_correlation.append(val_preds_all[max_1_auc_index])
    test_preds_correlation.append(test_preds_all[max_1_auc_index])

    val_AUCs_all_ = np.asarray(val_AUCs_all)
    val_AUCs_all_[max_1_auc_index] = 0
    max_2_auc_index = np.argmax(val_AUCs_all_)
    val_preds_correlation.append(val_preds_all[max_2_auc_index])
    test_preds_correlation.append(test_preds_all[max_2_auc_index])

    after_correlation_individual_AUCs.append(val_AUCs_all[max_1_auc_index])
    after_correlation_individual_AUCs.append(val_AUCs_all[max_2_auc_index])
    correlation_limit = compare_diversity(np.asarray(val_preds_all[max_1_auc_index]),np.asarray(val_preds_all[max_2_auc_index]))
    for m in range(len(val_preds_all)):
        correlations = []
        if m != max_1_auc_index and m != max_2_auc_index:
            for i in range(len(val_preds_correlation)):
                correlations.append(compare_diversity(np.asarray(val_preds_all[m]),np.asarray(val_preds_correlation[i])))
        correlations = np.asarray(correlations)
        if len(correlations[correlations>correlation_limit]) == 0:
            val_preds_correlation.append(val_preds_all[m])
            test_preds_correlation.append(test_preds_all[m])
            after_correlation_individual_AUCs.append(val_AUCs_all[m])

    print 'searching the best number of models to ensemble/bagging'
    time.sleep(2)

    final_AUCs = []
    pred_combined = []
    p = 0
    for i in range(len(val_preds_correlation)):
        pred_combined.append(val_preds_correlation[i])
        for j in range(len(pred_combined)):
            p += pred_combined[j]
        p = p/(j + 1)
        auc = roc_auc_score(np.asarray(val_y),p)
        final_AUCs.append(auc)

    final_AUCs_ = final_AUCs
    final_AUCs = np.asarray(final_AUCs)
    nb_model_final = np.argmax(final_AUCs)

    test_score = 0
    for i in range(nb_model_final+1):
        test_score += test_preds_correlation[i]
    test_score = test_score/(nb_model_final+1)

    test_result = pd.DataFrame(columns=['uid','score'])
    test_result.uid = test_uid
    test_result.score = test_score
    test_result.to_csv(experiment_path + '/result.csv',index=None,encoding='utf - 8')

    meta_data = {
        'individual_AUC_bigger_than_0.705':val_AUCs_all,
        'after_correlation_individual_AUCs':after_correlation_individual_AUCs,
        'AUCs_combine':final_AUCs_,
        'final_AUC':final_AUCs[nb_model_final],
        'final_nb_ensembled_models':nb_model_final+1,
        'correlation_limit':correlation_limit
    }
    f = open(experiment_path + '/meta_data.json','wb')
    meta_json = json.dumps(meta_data,default=lambda o: o.__dict__, indent=4)
    f.write(meta_json)
    f.close()






