# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import pickle
from sklearn.model_selection import GridSearchCV
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVR
from sklearn.tree import DecisionTreeRegressor
from sklearn.neighbors import KNeighborsRegressor
from sklearn.ensemble import RandomForestRegressor, AdaBoostRegressor
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.metrics import mean_squared_error, make_scorer
import matplotlib.pyplot as plt
from solve_q2 import plot_month_data

plt.rcParams['font.sans-serif']=['SimHei']
plt.rcParams['axes.unicode_minus'] = False
font = {'family' : 'SimHei',
'weight' : 'normal',
'size'   : 10,
}

def init_model_grid():
    '''
    初始化参数网格
    '''
    models_grid = {}
    models_grid['knn'] = {'n_neighbors':[1, 3, 5, 9, 13]}
    models_grid['svr'] = {
                        'C':[0.01, 0.02, 0.05, 0.07, 0.1], \
                        'kernel':['linear','rbf','poly'], \
                        'epsilon':[0, 0.001, 0.05, 0.01, 0.05, 0.1]
                        }
    models_grid['dtr'] = {'max_depth':[3, 5, 7, 9],\
                        'ccp_alpha':[0,0.01,0.05,0.1,0.2,0.3,0.4,0.5]} 

    models_grid['rf'] =  {'n_estimators':[5, 10, 15, 20, 25]}
    
    models_grid['ada'] = {'n_estimators':[5, 10, 15, 20, 25],
                          'base_estimator':[LinearRegression()]}

    return models_grid
    
def select_knn(X, y, scorer, grid, cv=5, verbose=True):
    """"筛选kNN算法的最合适参数k"""
    grid_search = GridSearchCV(KNeighborsRegressor(),\
                                param_grid=grid,
                                cv=cv,
                                scoring=scorer,
                                n_jobs=-1)
    
    grid_search.fit(X, y)
    if verbose:
        print('kNN 最佳参数： ', grid_search.best_params_)
    return grid_search.best_params_

def select_svr(X, y, scorer, grid, cv=5, verbose=True):
    '''
    选择 SVR 最合适参数
    '''
    grid_search = GridSearchCV(SVR(),\
                                param_grid=grid,
                                cv=cv,
                                scoring=scorer,
                                n_jobs=-1)
    
    grid_search.fit(X, y)
    if verbose:
        print('SVR 最佳参数： ', grid_search.best_params_)
    return grid_search.best_params_

def select_dtr(X, y, scorer, grid, cv=5, verbose=True):
    '''
    筛选决策树的最佳参数
    '''
    grid_search = GridSearchCV(DecisionTreeRegressor(),
                                param_grid=grid, cv=cv, \
                                scoring=scorer,
                                n_jobs=-1)
    grid_search.fit(X, y)
    if verbose:
        print('决策树最佳参数： ', grid_search.best_params_) 
    return grid_search.best_params_

def select_rf(X, y, scorer, grid, cv=5, verbose=True):
    '''
    筛选随机森林的最佳参数
    '''
    grid_search = GridSearchCV(RandomForestRegressor(max_samples=0.67,\
                                max_features=0.33, max_depth=5), \
                                param_grid=grid, cv=cv,\
                                scoring=scorer,
                                n_jobs=-1)
    grid_search.fit(X, y)
    if verbose:
        print('随机森林最佳参数: ', grid_search.best_params_)
    return grid_search.best_params_


def select_ada(X, y, scorer, grid, cv=5, verbose=True):
    '''
    筛选 AdaBoost 的最佳参数，其中基模型为逻辑回归模型
    '''
    grid_search = GridSearchCV(AdaBoostRegressor(),\
                                param_grid=grid,
                                cv=cv,
                                scoring=scorer,
                                n_jobs=-1)
    
    
    grid_search.fit(X, y)
    if verbose:    
        print('AdaBoost 最佳参数： ', grid_search.best_params_)
    return grid_search.best_params_

def select_model(X, y, scorer, models_grid, cv=5, verbose=True):
    '''
    将筛选参数整合为一个函数
    '''
    
    knn_param = select_knn(X, y, scorer, models_grid['knn'],\
                        cv, verbose=verbose)
    svr_param = select_svr(X, y, scorer, models_grid['svr'],\
                        cv, verbose=verbose)
    dtr_param = select_dtr(X, y, scorer, models_grid['dtr'],\
                        cv, verbose=verbose)
    rf_param = select_rf(X, y, scorer, models_grid['rf'], \
                        cv, verbose=verbose)
    ada_param = select_ada(X, y, scorer, models_grid['ada'], \
                        cv, verbose=verbose)
    return knn_param, svr_param, dtr_param, rf_param, ada_param

def predict_estimator(estimator, estimator_name,\
                    metric, scaler, X_train, X_test, y_train, y_test):
    '''
    评估各个模型，并画出实际数据、拟合数据的效果图
    '''
    estimator.fit(X_train, y_train)
    y_train_predict = estimator.predict(X_train)
    test_num = range(len(y_test.values))
    train_num = range(len(y_train.values))
    
    # 数据逆转换 
    y_train = scaler.inverse_transform(y_train)
    y_train_predict = scaler.inverse_transform(y_train_predict)
    score_train = metric(y_train, y_train_predict)
    
    metrics_name = [name for name in globals() if globals()[name] is metric][0]
    
    # 训练集、测试集画图
    fig = plt.figure()
    plt.subplots_adjust(left=0.125, bottom=None, right=0.9, top=None,
                    wspace=0.3, hspace=None)
    plt.subplot(1, 2, 1)
    plt.plot(train_num, y_train, label='实际数据')
    plt.plot(train_num, y_train_predict, linestyle='--',\
            label=f'{estimator_name} 拟合数据')
            
    plt.xlabel('日期序列', fontsize=10)
    plt.ylabel('事故次数', fontsize=10)
    plt.title('训练集数据')
    plt.legend(prop=font)

    plt.subplot(1, 2, 2)
    # 数据逆转换
    y_test_predict = estimator.predict(X_test)
    y_test = scaler.inverse_transform(y_test)
    y_test_predict = scaler.inverse_transform(y_test_predict)
    
    plt.plot(test_num, y_test, label='实际数据')
    plt.plot(test_num, y_test_predict, linestyle='--',\
            label=f'{estimator_name} 拟合数据')
            
    plt.xlabel('日期序列', fontsize=10)
    plt.ylabel('事故次数', fontsize=10)
    plt.title('测试集数据')
    plt.legend(prop=font)
    fig.savefig(f"../图片/{estimator_name}.png")
    plt.show()

    score_test = metric(y_test, y_test_predict)
    print(f'模型 {estimator} 的在训练集和测试集中的 {metrics_name} 分别为：\n' + \
            f'分别为: {score_train}, {score_test}')
    
def return_score(X_train, y_train, X_test, y_test, metric, verbose=True, \
            knn_param={'n_neighbors':43}, \
            svr_param={'C': 0.01, 'kernel': 'poly', 'epsilon':0.1},\
            dtr_param={'ccp_alpha':0.1, 'max_depth':4}, \
            rf_param={'n_estimators':15},\
            ada_param={'n_estimators':95}):
                
    """根据上述最优参数，构建模型"""
    lr = LinearRegression()
    knn = KNeighborsRegressor(n_neighbors=knn_param['n_neighbors'])
    svr = SVR(C=svr_param['C'], kernel=svr_param['kernel'],
              epsilon=svr_param['epsilon'])
              
    dtr = DecisionTreeRegressor(max_depth=dtr_param['max_depth'],
                                ccp_alpha=dtr_param['ccp_alpha'])
                                
    rf = RandomForestRegressor(n_estimators=rf_param['n_estimators'],\
                                max_samples=0.67,\
                                max_features=0.33, max_depth=5)
    ada = AdaBoostRegressor(base_estimator=lr,\
                            n_estimators=ada_param['n_estimators'])

    
    if verbose:
        # 画图
        estimators = ['lr', 'svr', 'knn', 'dtr', 'rf', 'ada']
        for estimator in estimators:
            predict_estimator(locals()[estimator], estimator, metric,\
                                scaler, X_train, X_test, y_train, y_test)
    

def predict_future(model, first_row):
    '''
    使用模型预测2021 年后 12 个月的事故次数
    '''
    data = np.ones((13, 5))
    data[0,:] = first_row
    for month in range(12):
        data_raw = data[month, :]
        data_predict = data_raw.reshape((1,-1))
        y = model.predict(data_predict)
        data[month+1, :-1] = data_raw[1:]
        data[month+1, -1] = y
        
    return data[1:, -1]
    
        
    
if __name__ == '__main__':
    data_window = pd.read_excel(r'../附件/滑动窗口后的次数数据（月）.xlsx', index_col=0)
    scaler = pickle.load(open(r'../附件/scaler.pkl', 'rb'))
    # 拆分数据集
    X = data_window.iloc[:, :-1]
    y = data_window.iloc[:, -1]
    X_train = data_window.iloc[:-12, :-1]
    y_train = data_window.iloc[:-12, -1]
    X_test = data_window.iloc[-12:, :-1]
    y_test = data_window.iloc[-12:, -1]
    
    scorer = make_scorer(mean_squared_error)
    models_grid = init_model_grid()
    
    # 筛选参数
    knn_param, svr_param, dtr_param, \
                    rf_param, ada_param = select_model(X, \
                                y, scorer, models_grid, cv=10, verbose=True)
    
    return_score(X_train, y_train, X_test, y_test, mean_squared_error, \
                scaler,
                knn_param=knn_param, svr_param=svr_param, \
                dtr_param=dtr_param, rf_param=rf_param, \
                ada_param=ada_param)
    columns = X.columns
    
    rf = RandomForestRegressor(n_estimators=rf_param['n_estimators'],\
                                max_samples=0.67,\
                                max_features=0.33, max_depth=5)
    rf.fit(X, y)
    
    first_row = list(X.iloc[-1,:])
    month_predict_data = predict_future(rf, first_row)
    month_predict_data = scaler.inverse_transform(month_predict_data)
    month_predict_data = np.round(month_predict_data)
    plot_month_data(month_predict_data)
