#集成xgboost和randomForest
#效果不佳，与真实值差1.4左右
#准确率 79.63%
#dropna试试？

import numpy as np
import matplotlib.pyplot as plt
import pandas as pd

#rmse loss
def rmse(actual,predict):
	predict = np.array(predict)
	actual = np.array(actual)
	distance = predict - actual
	square_distance = distance**2
	mean_square_distance = square_distance.mean()
	score = np.sqrt(mean_square_distance)
	return -score

def precision(actual,predict):
      actual_sum = 1*(actual>6.1)
      predict_sum = 1*(predict>6.1)
      same_sum = 1*(actual_sum==predict_sum)
      return sum(same_sum)/len(same_sum)

#dataset
dataset = pd.read_csv('d_train_20180102.csv',encoding='gb2312').fillna(0)
X = dataset.iloc[:,1:-1].values
y = dataset.iloc[:,-1].values

#data encoding
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
X[:, 0] = LabelEncoder().fit_transform(X[:, 0])
X[:, 2] = 0
print('编码完成···')

# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)
print('数据集划分完成···')

#randomForest
from sklearn.ensemble import RandomForestRegressor
n_estimators = 50
max_depth = 5
min_samples_split = 4
min_samples_leaf = 2
forest = RandomForestRegressor(n_estimators = n_estimators,
                              max_depth = max_depth,
                              min_samples_split = min_samples_split,
                              min_samples_leaf = min_samples_leaf,
                              )

#XgBoost
import xgboost
learning_rate = 0.1
n_estimators = 130
max_depth = 3
eta = 0.1
min_child_weight = 0
gamma = 2
subsample = 0.7
xgbst = xgboost.XGBRegressor(objective ='reg:squarederror',
	              learning_rate = learning_rate,
                  n_estimators = n_estimators,
                  max_depth = max_depth,
                  eta = eta,
                  min_child_weight = min_child_weight,
                  gamma = gamma,
                  subsample = subsample)

#train
forest.fit(X_train,y_train)
xgbst.fit(X_train,y_train)

#meanImportance
importance_xgb = xgbst.feature_importances_
importance_forest = forest.feature_importances_
importance = (importance_xgb+importance_forest)/2
importance_data = pd.DataFrame(importance,columns = ["importance"])
importance_data = importance_data.sort_values("importance",ascending = False)
importance_new = importance_data[importance_data.importance>0.02]

X_new = dataset.iloc[:,importance_new.index+1].values
X_train, X_test, y_train, y_test = train_test_split(X_new, y, test_size = 0.2, random_state = 0)

#retrain
xgbst.fit(X_train,y_train)

#test
y_pred = xgbst.predict(X_test)
final_loss = rmse(y_test,y_pred)
print('预测完成···')
for i in range(20,40):
	print('true:',y_test[i],'  pred:',y_pred[i])
print('loss:',final_loss)
final_precision = precision(y_test,y_pred)
print('precision:',final_precision)