import pandas as pd
from sklearn.svm import SVR, LinearSVR
import warnings
from sklearn.model_selection import KFold,GridSearchCV
import matplotlib.pyplot as plt
from sklearn.ensemble import GradientBoostingRegressor, RandomForestRegressor
from xgboost import XGBRegressor
import numpy as np
warnings.filterwarnings('ignore')

version = ''
all_label = pd.read_csv('data/men_{version}all_label.csv'.format(version=version), header=0)
all_fea_month = pd.read_csv('data/men_{version}all_fea_month.csv'.format(version=version), header=0)
all_fea_month_tst = pd.read_csv('data/men_{version}all_fea_month_tst.csv'.format(version=version), header=0)


X_trn = all_fea_month
X_tst = all_fea_month_tst
std = all_fea_month.std(axis=0)


cols = std.sort_values(ascending=False).index.to_list()
# cols = list(map(str, range(std.shape[0])))

kf = KFold(n_splits=4, shuffle=True, random_state=1)  # 此时只随机一次，伪随机
model = SVR()
model = LinearSVR(C=10)
model = RandomForestRegressor
model = XGBRegressor# objective='reg:squarederror'
model = GradientBoostingRegressor



X = X_trn.ix[:, :]
y = all_label


# sorted(sklearn.metrics.SCORERS.keys())
param_test1 = {'n_estimators': range(20, 50, 5)}
gsearch1 = GridSearchCV(estimator=model(learning_rate=0.1, min_samples_split=2,
                                                            min_samples_leaf=1, max_depth=3, max_features='sqrt',
                                                            subsample=1, random_state=10),
                        param_grid=param_test1, scoring='neg_mean_squared_error', iid=False, cv=4)
gs1 = gsearch1.fit(X, y)
print(gs1.best_params_, gs1.best_score_)
param1 = gs1.best_params_


param_test2 = {'max_depth': range(2, 14, 1), 'min_samples_split': range(2, 20, 1)}
gsearch2 = GridSearchCV(estimator=model(learning_rate=0.1, n_estimators=param1['n_estimators'], min_samples_leaf=1,
                                                            max_features='sqrt', subsample=1, random_state=10),
                        param_grid=param_test2, scoring='neg_mean_squared_error', iid=False, cv=4)
gs2 = gsearch2.fit(X, y)
print(gs2.best_params_, gs2.best_score_)
param2 = gs2.best_params_


param_test3 = {'min_samples_split': range(2, 20, 1), 'min_samples_leaf': range(1, 20, 1)}
gsearch3 = GridSearchCV(estimator=model(learning_rate=0.1, n_estimators=param1['n_estimators'], max_depth=param2['max_depth'],
                                                            max_features='sqrt', subsample=1, random_state=10),
                        param_grid=param_test3, scoring='neg_mean_squared_error', iid=False, cv=4)
gs3 = gsearch3.fit(X, y)
print(gs3.best_params_, gs3.best_score_)
param3 = gs3.best_params_

param_test4 = {'max_features': range(5, 250, 5)}
gsearch4 = GridSearchCV(estimator=model(learning_rate=0.1, n_estimators=param1['n_estimators'],
                                                            max_depth=param2['max_depth'], min_samples_leaf=param3['min_samples_leaf'],
                                                            min_samples_split=param3['min_samples_split'], subsample=1, random_state=10),
                        param_grid=param_test4, scoring='neg_mean_squared_error', iid=False, cv=4)
gs4 = gsearch4.fit(X, y)
print(gs4.best_params_, gs4.best_score_)
param4 = gs4.best_params_

param_test5 = {'subsample': [0.6, 0.7, 0.75, 0.8, 0.85, 0.9]}
gsearch5 = GridSearchCV(estimator=model(learning_rate=0.1, n_estimators=param1['n_estimators'], max_features=param4['max_features'],
                                                            max_depth=param2['max_depth'], min_samples_leaf=param3['min_samples_leaf'],
                                                            min_samples_split=param3['min_samples_split'], random_state=10),
                        param_grid=param_test5, scoring='neg_mean_squared_error', iid=False, cv=4)
gs5 = gsearch5.fit(X, y)
print(gs5.best_params_, gs5.best_score_)