# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import pickle
from sklearn.model_selection import GridSearchCV
from sklearn.linear_model import LinearRegression
from sklearn.svm import SVR
from sklearn.tree import DecisionTreeRegressor
from sklearn.neighbors import KNeighborsRegressor
from sklearn.ensemble import RandomForestRegressor, AdaBoostRegressor
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.metrics import mean_squared_error, make_scorer
from sklearn.metrics import mean_absolute_percentage_error
import matplotlib.pyplot as plt

def select_knn(X, y, scorer, cv=5, verbose=True):
    """"筛选kNN算法的最合适参数k"""
    grid = {'n_neighbors':[ 19, 21, 23, 25, 27, 29, 31, 33, 35, 37, 39, 41, 43]}
      
    grid_search = GridSearchCV(KNeighborsRegressor(),\
                                param_grid=grid,
                                cv=cv,
                                scoring=scorer,
                                n_jobs=-1)
    
    grid_search.fit(X, y)
    if verbose:
        print('kNN 最佳参数： ', grid_search.best_params_)
    return grid_search.best_params_

def select_svr(X, y, scorer, cv=5, verbose=True):
    '''
    选择 SVR 最合适参数
    '''
    
    grid = {
            'C':[0.01, 0.05, 0.1, 0.25, 0.5, 0.75, 1, 1.25],
            'kernel':['linear','rbf','poly'],
            'epsilon':[0,0.001, 0.05, 0.01, 0.05, 0.1]
            }

    grid_search = GridSearchCV(SVR(),\
                                param_grid=grid,
                                cv=cv,
                                scoring=scorer,
                                n_jobs=-1)
    
    grid_search.fit(X, y)
    if verbose:
        print('SVR 最佳参数： ', grid_search.best_params_)
    return grid_search.best_params_

def select_dtr(X, y, scorer, cv=5, verbose=True):
    '''
    筛选决策树的最佳参数
    '''
    grid = {'max_depth':[4, 9, 13, 17, 21, 25],\
            'ccp_alpha':[0,0.01,0.05,0.1,0.2,0.3,0.4,0.5]} 
    grid_search = GridSearchCV(DecisionTreeRegressor(),
                                param_grid=grid, cv=cv, \
                                scoring=scorer,
                                n_jobs=-1)
    grid_search.fit(X, y)
    if verbose:
        print('决策树最佳参数： ', grid_search.best_params_) 
    return grid_search.best_params_

def select_rf(X, y, scorer, cv=5, verbose=True):
    '''
    筛选随机森林的最佳参数
    '''
    grid = {'n_estimators':[5, 15, 25, 35, 45, 50, 65, 75, 85, 95]}
    grid_search = GridSearchCV(RandomForestRegressor(max_samples=0.67,\
                                max_features=0.33, max_depth=5), \
                                param_grid=grid, cv=cv,\
                                scoring=scorer,
                                n_jobs=-1)
    grid_search.fit(X, y)
    if verbose:
        print('随机森林最佳参数: ', grid_search.best_params_)
    return grid_search.best_params_


def select_ada(X, y, scorer, cv=5, verbose=True):
    '''
    筛选 AdaBoost 的最佳参数，其中基模型为逻辑回归模型
    '''
    grid = {'n_estimators':[5, 15, 25, 35, 45, 50, 65, 75, 85, 95]}
    grid_search = GridSearchCV(AdaBoostRegressor( \
                                base_estimator=LinearRegression()),\
                                param_grid=grid,
                                cv=cv,
                                scoring=scorer,
                                n_jobs=-1)
    
    
    grid_search.fit(X, y)
    if verbose:    
        print('AdaBoost 最佳参数： ', grid_search.best_params_)
    return grid_search.best_params_

def select_model(X, y, scorer, cv=5, verbose=True):
    '''
    将筛选参数整合为一个函数
    '''
    knn_param = select_knn(X, y, scorer, cv, verbose=verbose)
    svc_param = select_svr(X, y, scorer, cv, verbose=verbose)
    dtc_param = select_dtr(X, y, scorer, cv, verbose=verbose)
    rf_param = select_rf(X, y, scorer, cv, verbose=verbose)
    ada_param = select_ada(X, y, scorer, cv, verbose=verbose)
    return knn_param, svc_param, dtc_param, rf_param, ada_param

def plot_line(y_train, y_test):
    '''
    画出训练集、测试集
    '''
    
    fig, ax = plt.subplots()
    plt.subplots_adjust(left=0.125, bottom=0.2, right=0.9, top=0.95,
                    wspace=0.3, hspace=0.35)
    x_train = len(y_train.values)
    x_test = len(y_test.values)
    ax.plot(range(x_train), y_train.values, c='b', label='Training Set')
    ax.plot(range(x_train,x_train+x_test), y_test.values, c='r', label='Testing Set')
    ax.set(xlabel='days', ylabel='load',
           title='load-day-series')
    ax.grid()
    ax.legend()    
    plt.show()

def predict_estimator(estimator, X_train, X_test, y_train, y_test, estimator_name):
    estimator.fit(X_train, y_train)
    y_train_predict = estimator.predict(X_train)
    mape_train = mean_absolute_percentage_error(y_train, y_train_predict)
    
    fig, ax = plt.subplots(1, 2)
    days_test = len(y_test.values)
    days_train = len(y_train.values)
    ax[0].plot(range(days_train), y_train,  label='real')
    ax[0].plot(range(days_train), y_train_predict, '--', label='fitted')
    ax[0].legend()
    ax[0].set(xlabel = 'days', ylabel='load(Training set)')
    ax[0].set_title(estimator)

    y_test_predict = estimator.predict(X_test)
    ax[1].plot(range(days_test), y_test, label='real')
    ax[1].plot(range(days_test), y_test_predict, '--',  label='fitted')
    ax[1].set(xlabel = 'days', ylabel='load(Testing set)')
    ax[1].set_title(estimator_name)
    ax[1].legend()
    plt.show()

    mape_test = mean_absolute_percentage_error(y_test, y_test_predict)
    print(f'模型 {estimator} 的在训练集和测试集中的 MAPE 分别为: {mape_train},{mape_test}')

def return_score(X, y, verbose=True, \
            knn_param={'n_neighbors':43}, \
            svr_param={'C': 0.01, 'kernel': 'poly', 'epsilon':0.1},\
            dtr_param={'ccp_alpha':0.1, 'max_depth':4}, \
            rf_param={'n_estimators':15},\
            ada_param={'n_estimators':95}):
                
    """根据上述最优参数，构建模型"""
    lr = LinearRegression()
    knn = KNeighborsRegressor(n_neighbors=knn_param['n_neighbors'])
    svr = SVR(C=svr_param['C'], kernel=svr_param['kernel'],
              epsilon=svr_param['epsilon'])
              
    dtr = DecisionTreeRegressor(max_depth=dtr_param['max_depth'],
                                ccp_alpha=dtr_param['ccp_alpha'])
                                
    rf = RandomForestRegressor(n_estimators=rf_param['n_estimators'],\
                                max_samples=0.67,\
                                max_features=0.33, max_depth=5)
    ada = AdaBoostRegressor(base_estimator=lr,\
                            n_estimators=ada_param['n_estimators'])
    
    # 拆分数据集
    split = int(np.round(0.7*X.shape[0]))
    X_train = X.iloc[:split, :]
    X_test = X.iloc[split:, :]
    y_train = y.iloc[:split]
    y_test = y.iloc[split:]
    
    if verbose:
        #plot_line(y_train, y_test)
        estimators = ['lr', 'svr', 'knn', 'dtr', 'rf', 'ada']
        for estimator in estimators:
            predict_estimator(locals()[estimator], X_train, X_test, y_train, y_test, estimator)
    
    pickle.dump(lr, open(r'../附件/中间数据/lr_model.pkl', 'wb'))
        
    
if __name__ == '__main__':
    data = pd.read_excel(r'../附件/中间数据/天负荷数据_1997_1998_ml.xlsx')
    data.dropna(axis=0, inplace=True)

    # y 为当前负荷
    y = data.iloc[:, -1]
    X = data.iloc[:, :-1]
    
    scorer = make_scorer(mean_absolute_percentage_error)
    
    
    knn_param, svr_param, \
                    dtr_param, rf_param, ada_param = select_model(X, y, scorer, cv=10, verbose=True)
    
    return_score(X, y, knn_param=knn_param, svr_param=svr_param, \
                dtr_param=dtr_param, rf_param=rf_param, ada_param=ada_param)
    
    
