from Xg_Utils.params_run_submit import *
from numpy import random
from sklearn.metrics import roc_auc_score
import os
import json
import time

random_seed = 1225

def normal_baseline(param, run, submit, X, y, val_X, val_y):
    model = run(X, y, val_X, val_y, random_seed=False)
    submit(model)


def random_search_the_best_params(X, y, val_X, val_y):
    experiment_name = 'random_search_the_best_params'
    experiment_path = os.path.join('Xg_metadata', experiment_name).replace('\\', '/')

    #os.mkdir(experiment_path)
    old_AUC = 0
    while True:
        params = {
            'booster': 'gbtree',
            'objective': 'binary:logistic',
            'early_stopping_rounds': 50,
            'scale_pos_weight': random.randint(1300, 1500) / 13458.0,
            'eval_metric': 'auc',
            'gamma': random.randint(1, 100) / 100.,
            'max_depth': random.randint(4, 10),
            'lambda ': random.randint(400, 600),
            'subsample': random.randint(1, 100) / 100.,
            'colsample_bytree': random.randint(1, 100) / 100.,
            'min_child_weight': random.randint(2, 15),
            'eta': random.randint(1, 100) / 500.,
            'seed': random_seed,
            'nthread':16
        }


        model = run_XGBoost(params=params, X=X, y=y, val_X=val_X, val_y=val_y, rounds=2000)
        dval = xgb.DMatrix(val_X, label=val_y)

        val_AUC = roc_auc_score(np.asarray(val_y), model.predict(dval,ntree_limit=model.best_ntree_limit))
        if val_AUC > old_AUC:
            old_AUC = val_AUC
            meta_data = {
                'params':params,
                "val_AUC":old_AUC
            }
            f = open(experiment_path + '/meta_data.json', 'wb')
            meta_json = json.dumps(meta_data, default=lambda o: o.__dict__, indent=4)
            f.write(meta_json)
            f.close()
            print 'val_AUC:' + str(val_AUC)
            sunbmit_XGBoost(model,meta_path=experiment_path,prep=True)


def split_xy_to_individual(xy):
    return xy[:, 0:len(xy.transpose()) - 1], xy[:, len(xy.transpose()) - 1]

def random_search_the_best_params_via_cv(train_xy):
    (X_submit,y_submit),(val_X_submit,val_y_submit) = random_split_train_val(train_xy)

    train_xy = train_xy.drop(['uid'], axis=1)
    train_xy = np.asarray(train_xy)
    np.random.shuffle(train_xy)
    nb_samples = len(train_xy)

    X_all = train_xy[:, 0:len(train_xy.transpose()) - 1]
    y_all = train_xy[:, len(train_xy.transpose()) - 1]

    part_train_1 = train_xy[0:nb_samples * (4. / 5.)]
    part_val_1 = train_xy[nb_samples * (4. / 5.):]

    part_train_2 = np.vstack((train_xy[0:nb_samples * (3. / 5.)], train_xy[nb_samples * (4. / 5.):]))
    part_val_2 = train_xy[nb_samples * (3. / 5.):nb_samples * (4. / 5.)]

    part_train_3 = np.vstack((train_xy[0:nb_samples * (2. / 5.)], train_xy[nb_samples * (3. / 5.):]))
    part_val_3 = train_xy[nb_samples * (2. / 5.):nb_samples * (3. / 5.)]

    part_train_4 = np.vstack((train_xy[0:nb_samples * (1. / 5.)], train_xy[nb_samples * (2. / 5.):]))
    part_val_4 = train_xy[nb_samples * (1. / 5.):nb_samples * (2. / 5.)]

    part_train_5 = train_xy[nb_samples * (1. / 5.):]
    part_val_5 = train_xy[0:nb_samples * (1. / 5.)]

    print part_train_1.shape
    print part_val_1.shape
    print part_train_2.shape
    print part_val_2.shape
    print part_train_3.shape
    print part_val_3.shape
    print part_train_4.shape
    print part_val_4.shape
    print part_train_5.shape
    print part_val_5.shape

    print X_all.shape
    print y_all.shape
    time.sleep(5)

    part_1 = {
        'part_train': part_train_1,
        'part_val': part_val_1
    }
    part_2 = {
        'part_train': part_train_2,
        'part_val': part_val_2
    }
    part_3 = {
        'part_train': part_train_3,
        'part_val': part_val_3
    }
    part_4 = {
        'part_train': part_train_4,
        'part_val': part_val_4
    }
    part_5 = {
        'part_train': part_train_5,
        'part_val': part_val_5
    }

    experiment_name = 'random_search_the_best_params_via_cv'
    experiment_path = os.path.join('Xg_metadata', experiment_name).replace('\\', '/')
    #os.mkdir(experiment_path)

    test_uid, test_x = get_test_data(
        file_path='/media/dell/cb552bf1-c649-4cca-8aca-3c24afca817b/dell/wxm/Data/DCP2P/test_x.csv')
    dtest = xgb.DMatrix(test_x)

    iteras = 0
    old_AUC = 0
    while True:
        iteras += 1
        params = {
            'booster': 'gbtree',
            'objective': 'binary:logistic',
            'early_stopping_rounds': 50,
            'scale_pos_weight': random.randint(1200, 1600) / 13458.0,
            'eval_metric': 'auc',
            'gamma': random.randint(1, 100) / 100.,
            'max_depth': random.randint(2, 16),
            'lambda ': random.randint(300, 800),
            'subsample': random.randint(1, 100) / 100.,
            'colsample_bytree': random.randint(1, 100) / 100.,
            'min_child_weight': random.randint(2, 20),
            'eta': random.randint(1, 100) / 500.,
            'seed': random_seed,
            'nthread':16
        }
        val_AUCs = []
        for part in [part_1,part_2,part_3,part_4,part_5]:
            X, y = split_xy_to_individual(part['part_train'])
            val_X, val_y = split_xy_to_individual(part['part_val'])
            dval = xgb.DMatrix(val_X,label=val_y)

            model = run_XGBoost(params=params, X=X, y=y, val_X=val_X, val_y=val_y, rounds=1000)
            val_pred = model.predict(dval,ntree_limit=model.best_ntree_limit)
            val_AUC = roc_auc_score(np.asarray(val_y),val_pred)
            val_AUCs.append(val_AUC)
        val_AUC_cv = np.sum(np.asarray(val_AUCs)) / 5.

        if val_AUC_cv > old_AUC:
            old_AUC = val_AUC_cv

            model_on_all = run_XGBoost(params=params, X=X_all, y=y_all, val_X=val_X, val_y=val_y, rounds=1000)
            test_pred_on_all = model_on_all.predict(dtest)

            model_on_submit = run_XGBoost(params=params, X=X_submit, y=y_submit, val_X=val_X_submit, val_y=val_y_submit, rounds=2000)
            test_pred_on_submit = model_on_submit.predict(dtest,ntree_limit=model_on_submit.best_ntree_limit)
            meta_data = {
                'val_AUC_cv':val_AUC_cv,
                'val_AUCs':val_AUCs,
                'params':params,
                'iteras':iteras
            }

            print 'saving metadata: ' + " | val_AUC_cv: " + str(val_AUC_cv)
            time.sleep(3)

            f = open(experiment_path + '/meta_data.json','wb')
            meta_json = json.dumps(meta_data, default=lambda o: o.__dict__, indent=4)
            f.write(meta_json)
            f.close()

            test_result = pd.DataFrame(columns=['uid','score'])
            test_result.uid = test_uid
            test_result.score = test_pred_on_all
            test_result.to_csv(experiment_path + '/result_on_all.csv',index=None,encoding='utf - 8')

            test_result_on_submit = pd.DataFrame(columns=['uid','score'])
            test_result_on_submit.uid = test_uid
            test_result_on_submit.score = test_pred_on_submit
            test_result_on_submit.to_csv(experiment_path + '/result_on_submit.csv',index=None,encoding='utf - 8')


