import xgboost as xgb
from numpy import random
import numpy as np
import sklearn.metrics.ranking as sci_ranking
import sklearn.preprocessing.data as prep
from Xg_Utils.for_missing_data import *
from Xg_Utils.get_data import *
from Xg_Utils.process_data import *


def params_XGBoost(random_seed=False):
    if random_seed == False:
        params = {
            "eval_metric": "auc",
            "scale_pos_weight": 0.11034329023629068,
            "lambda ": 566,
            "seed": 1225,
            "early_stopping_rounds": 50,
            "booster": "gbtree",
            "colsample_bytree": 0.630908892520934,
            "min_child_weight": 13,
            "subsample": 0.7312550164435608,
            "eta": 0.02164008192611102,
            "objective": "binary:logistic",
            "max_depth": 9,
            "gamma": 0.6127745993254938
        }
    else:
        params = {
            'booster': 'gbtree',
            'objective': 'binary:logistic',
            'early_stopping_rounds': 20,
            'scale_pos_weight': random.randint(1380, 1420) / 13458.0,
            'eval_metric': 'auc',
            'gamma': 0.1,
            'max_depth': random.randint(6, 10),
            'lambda ': random.randint(530, 580),
            'subsample': 0.7,
            'colsample_bytree': 0.4,
            'min_child_weight': random.randint(2, 6),
            'eta': 0.02,
            'seed': random_seed,
        }
    return params


def run_XGBoost(X, y, val_X, val_y, random_seed=False, params=None, rounds=2000):
    # TODO: random serach by mean can push the local cv to 0.74
    # xgboost start here
    dval = xgb.DMatrix(val_X, label=val_y)
    dtrain = xgb.DMatrix(X, label=y)

    watchlist = [(dval, 'val'), (dtrain, 'train')]
    model = xgb.train(params, dtrain, rounds, watchlist)
    # model.save_model('weights_models/xgb.model')
    return model


def sunbmit_XGBoost(model, meta_path,prep=False):
    test_uid, test_x = get_test_data(
        file_path='/media/dell/cb552bf1-c649-4cca-8aca-3c24afca817b/dell/wxm/Data/DCP2P/test_x.csv')
    if prep == False:
        pass
    elif prep == True:
        test_x = M_imputation(mode='mean',X=test_x)
    dtest = xgb.DMatrix(test_x)
    # predict test set (from the best iteration)
    test_y = model.predict(dtest, ntree_limit=model.best_ntree_limit)
    test_result = pd.DataFrame(columns=['uid', 'score'])
    test_result.uid = test_uid
    test_result.score = test_y
    test_result.to_csv(meta_path + '/result.csv', index=None, encoding='utf - 8')
