import pandas as pd
import numpy as np
import time
import statsmodels.api as sm
from sklearn.decomposition import PCA
from sklearn.linear_model import Lasso, ElasticNet, RidgeCV, Ridge
from sklearn.ensemble import RandomForestRegressor
from sklearn.svm import SVR
from xgboost import XGBRegressor
import lightgbm as lgb
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error


def data_split(data: pd.DataFrame, start='2016-09', end='2018-10'):
    data.set_index(['date'], inplace=True)
    rtn_test = data.loc[end:]
    data.drop(['label_return', 'forward_return', 'name', 'code', 'industry'], axis=1, inplace=True)
    df_train = data.loc[start: end]
    df_test = data.loc[end:]
    df_train.reset_index()
    df_test.reset_index()
    return df_train.drop('label', axis=1), df_test.drop('label', axis=1), df_train['label'], df_test['label'], rtn_test.reset_index()

df = pd.read_csv('data.csv')
df = df.fillna(0)
# X, Y = data_split(df)
# df = df.dropna()
# df.to_csv('test.csv')
# train_null = pd.isnull(df)
# print(len(train_null == True))
# train_null = df[train_null == True]
# print(train_null)

# Y_data = df['label']
# Y_data = np.array(Y_data)[:, np.newaxis]
# X_data = df.drop(['label', 'label_return', 'date', 'code'], axis=1)

# x_train,x_test,y_train,y_test = train_test_split(X_data, Y_data, test_size=0.3, random_state=None)
x_train, x_test, y_train, y_test, df_test = data_split(df)

#############################################
svr_ = SVR(kernel='rbf', degree=3, coef0=0.0, tol=0.01,
           C=1.0, epsilon=0.1, shrinking=True, cache_size=200,
           verbose=False, max_iter=-1).fit(x_train,y_train)
print('svr:', mean_squared_error(y_test,svr_.predict(x_test)))
#############################################
# lgb_ = lgb.LGBMModel(boosting_type='gbdt', num_leaves=35, max_depth=20, max_bin=255, learning_rate=0.03,
#                      n_estimators=10, subsample_for_bin=2000, objective='regression', min_split_gain=0.0,
#                        min_child_weight=0.001, min_child_samples=20, subsample=1.0, verbose=0,
#                        subsample_freq=1, colsample_bytree=1.0, reg_alpha=0.0, reg_lambda=0.0,
#                        random_state=None, n_jobs=-1, silent=True).fit(x_train, np.squeeze(y_train))
# print('lgb:', mean_squared_error(y_test,lgb_.predict(x_test)))
#############################################
rf = RandomForestRegressor(n_estimators= 50, max_depth=25, min_samples_split=20,
                                  min_samples_leaf=10,max_features='sqrt' ,oob_score=True, random_state=10)\
    .fit(x_train,y_train)
print('rf:', mean_squared_error(y_test,rf.predict(x_test)))
#############################################
ls = Lasso(alpha = 0.02).fit(x_train,y_train)
print('lasso:', mean_squared_error(y_test, ls.predict(x_test)))
#############################################
xgb = XGBRegressor(n_estimators=300, max_depth=3).fit(x_train,y_train)
print('xgb:', mean_squared_error(y_test, xgb.predict(x_test)))
#############################################
en = ElasticNet(alpha=5e-4,
              l1_ratio=0.5,
              max_iter=100000).fit(x_train,y_train)
print('en:', mean_squared_error(y_test, en.predict(x_test)))
#############################################
stack = pd.DataFrame()
stack['lasso'] = ls.predict(x_test)
# stack['gbdt'] = lgb_.predict(x_test)
stack['rf'] = rf.predict(x_test)
stack['svr'] = svr_.predict(x_test)
stack['xgb'] = xgb.predict(x_test)
stack['en'] = en.predict(x_test)
rg = RidgeCV(cv=5).fit(stack, y_test)
coefs = rg.coef_
intercepts = rg.intercept_
prob = rg.predict(stack)
print('total:', mean_squared_error(y_test, rg.predict(stack)))

df_test['prob'] = pd.Series(prob)
df_test.to_csv('regression_test.csv', encoding='utf_8_sig')

print('Balala Xiao Mo Xian!')

