# XGBoost
#效果不佳，与真实值相差1.3左右
#准确率 80.07%

# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd

# Importing the dataset
dataset = pd.read_csv('d_train_20180102.csv',encoding='gb2312')
#dataset.fillna(method='ffill')#用前面的值填充缺失值
#dataset.fillna(method='bfill')#用后面的值填充缺失值

X = dataset.iloc[:,1:-1].values
y = dataset.iloc[:,-1].values
#print(X[0],y[0])

#计算损失，越大越好
def rmse(actual, predict):
    predict = np.array(predict)
    actual = np.array(actual)
    distance = predict - actual
    square_distance = distance ** 2
    mean_square_distance = square_distance.mean()
    score = np.sqrt(mean_square_distance)
    return -score

def precision(actual,predict):
	actual_sum = 1*(actual>6.1)
	predict_sum = 1*(predict>6.1)
	same_sum = 1*(actual_sum==predict_sum)
	return sum(same_sum)/len(same_sum)

# Encoding categorical data
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
X[:, 0] = LabelEncoder().fit_transform(X[:, 0])
X[:, 2] = 0
print('编码完成···')
'''
labelencoder_X = LabelEncoder()
X[:, :] = labelencoder_X.fit_transform(X[:, :])
'''

'''
#todo
onehotencoder = OneHotEncoder()
X = onehotencoder.fit_transform(X).toarray()
print('onehot编码完成···')
'''

# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)
print('数据集划分完成···')
# Fitting XGBoost to the Training set
import xgboost
learning_rate = 0.1
n_estimators = 130
max_depth = 3
eta = 0.1
min_child_weight = 0
gamma = 2
subsample = 0.7

classifier = xgboost.XGBRegressor(objective ='reg:squarederror',
	              learning_rate = learning_rate,
                  n_estimators = n_estimators,
                  max_depth = max_depth,
                  eta = eta,
                  min_child_weight = min_child_weight,
                  gamma = gamma,
                  subsample = subsample)

'''
#网格搜索调参
#supergram
learning_rate = [0.1]
n_estimators = [70]
max_depth = [5]
#min_child_weight = [0.1,1]
gamma = [1]
subsample = [0.9]
#colsample_btree = [0.5,0.6,0.7,0.8]


from sklearn.model_selection import GridSearchCV #网格搜索
param_grid = dict(learning_rate = learning_rate,
                  n_estimators = n_estimators,
                  max_depth = max_depth,
                  #min_child_weight = min_child_weight,
                  gamma = gamma,
                  subsample = subsample)


from sklearn.metrics import make_scorer


rmse_score = make_scorer(rmse)

grid_search = GridSearchCV(classifier,param_grid,scoring = rmse_score,n_jobs = -1)
grid_result = grid_search.fit(X_train, y_train) #运行网格搜索
print("Best: %f using %s" % (grid_result.best_score_,grid_search.best_params_))
means = grid_result.cv_results_['mean_test_score']
params = grid_result.cv_results_['params']
for mean,param in zip(means,params):
	print("%f  with:   %r" % (mean,param))
'''
#结果可视化
classifier.fit(X_train, y_train,verbose = True)
classifier.save_model('test.modle')
print('训练完成···模型保存test.modle')

# Predicting the Test set results

y_pred = classifier.predict(X_test)
final_loss = rmse(y_test,y_pred)

print('预测完成···')
for i in range(20,40):
	print('true:',y_test[i],'  pred:',y_pred[i])
print('loss:',final_loss)
final_precision = precision(y_test,y_pred)
print('precision:',final_precision)


'''
for i in y_test:
	print(i)
print('---------')
for i in y_pred:
	print(i)
'''
'''
#特征重要性可视化
importance = classifier.feature_importances_
importance_data = pd.DataFrame(importance,columns = ["importance"])
importance_data = importance_data.sort_values("importance",ascending = False)
print(importance_data)
'''
'''
# Making the Confusion Matrix
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred)
'''

'''
# Applying k-Fold Cross Validation
from sklearn.model_selection import cross_val_score
accuracies = cross_val_score(estimator = classifier, X = X_train, y = y_train, cv = 10)
accuracies.mean()
accuracies.std()
print(accuracies)
'''
print('supergram:','learning_rate =',learning_rate,
                  ' n_estimators =',n_estimators,
                  ' max_depth =',max_depth,
                  ' eta =',eta,
                  ' min_child_weight =',min_child_weight,
                  ' gamma =',gamma,
                  ' subsample =',subsample)
