import xgboost as xgb
import numpy as np
import sklearn.metrics.ranking as sci_ranking
import sklearn.preprocessing.data as prep
from Xg_Utils.for_missing_data import *
from Xg_Utils.get_data import *
from Xg_Utils.process_data import *
from Ensemble_Utils.normal import *
from Ensemble_Utils.bagging import *
from Ensemble_Utils.optimize_weights import *

print 'getting training data'
train_xy = get_train_data(x_path='/media/dell/cb552bf1-c649-4cca-8aca-3c24afca817b/dell/wxm/Data/DCP2P/train_x.csv',
                          y_path='/media/dell/cb552bf1-c649-4cca-8aca-3c24afca817b/dell/wxm/Data/DCP2P/train_y.csv')


(X,y),(val_X,val_y) = random_split_train_val(train_xy)
"""
print 'X_shape:' + str(X.shape)
print 'y_shape:' + str(y.shape)
print 'val_X_shape:' + str(val_X.shape)
print 'val_y_shape:' + str(val_y.shape)
"""
print 'preprocessing data'
X = M_imputation('mean',X)
val_X = M_imputation('mean',val_X)

#X = np.asarray(np.abs(X))
#val_X = np.asarray(np.abs(val_X))

#print X[X<0]
#print val_X[val_X<0]

#X = scale_X(np.asarray(X))
"""
print 'setting params'
params = params_XGBoost(random_seed=False)

print 'running model'
model = run_XGBoost(X=X,y=y,val_X=val_X,val_y=val_y,params=params)
"""

#print 'bagging'
random_search_the_best_params(X,y,val_X,val_y)

#optimize_weights_model_based(X,y,val_X,val_y)
#bagging_high_performance_and_diverse_models(X,y,val_X,val_y)
