# -*- coding: utf-8 -*-
"""
Created on Sat Oct 16 22:04:02 2021

@author: zhuo木鸟

使用机器学习方法解决问题2的分类问题
"""

import pickle
import matplotlib.pyplot as plt
from sklearn.model_selection import GridSearchCV, train_test_split
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import make_scorer, f1_score

# 中文画图设置
plt.rcParams['font.sans-serif']=['SimHei']
plt.rcParams['axes.unicode_minus'] = False


def init_model_grid():
    '''
    初始化参数网格，参数网格用于结合交叉验证法筛选个模型的最佳参数
    '''
    models_grid = {}
    models_grid['knn'] = {'n_neighbors':[1, 3, 5, 9, 13]}
    models_grid['svc'] = {
                        'C':[0.01, 0.05, 0.1, 0.3, 0.5, 0.7, 1, 1.5, 1.8, 
                             2, 2.2, 2.5, 3, 3.5, 4, 4.5, 5, 5.5], 
                        'kernel':['linear','rbf','poly'], 
                        }
    models_grid['dtc'] = {'max_depth':[3, 5, 7, 9, 11, 13, 15, 17, 19, 21],
                        'ccp_alpha':[0,0.01,0.05,0.1,0.2,0.3,0.4,0.5]} 

    models_grid['rf'] =  {'n_estimators':[5, 10, 15, 20, 25, 30, 35, 40, 45,
                                          50, 55, 60, 65, 70, 75, 80, 85, 90]}
    
    models_grid['ada'] = {'n_estimators':[5, 10, 15, 20, 25, 30, 35, 40],
                          'base_estimator':[LogisticRegression()]}

    return models_grid


# 模型参数选择
def select_knn(X, y, scorer, grid, cv=5, verbose=True):
    """"筛选kNN算法的最合适参数k"""
    grid_search = GridSearchCV(KNeighborsClassifier(),
                                param_grid=grid,
                                cv=cv,
                                scoring=scorer,
                                n_jobs=-1)
    
    grid_search.fit(X, y)
    if verbose:
        print('kNN 最佳参数： ', grid_search.best_params_)
    return grid_search.best_params_

def select_svc(X, y, scorer, grid, cv=5, verbose=True):
    '''
    筛选支持向量机分类模型的参数
    '''
    grid_search = GridSearchCV(SVC(),
                               param_grid=grid,
                               cv=cv,
                               scoring=scorer,
                               n_jobs=-1)
    grid_search.fit(X, y)
    if verbose:
        print('SVC 的最佳参数: ', grid_search.best_params_)
    return grid_search.best_params_

def select_dtc(X, y, scorer, grid, cv=5, verbose=True):
    '''
    筛选参数，网格中最佳的决策树模型参数
    '''
    grid_search = GridSearchCV(DecisionTreeClassifier(),
                              param_grid=grid,
                              cv=cv,
                              scoring=scorer,
                              n_jobs=-1)
    grid_search.fit(X, y)
    if verbose:
        print('决策树的最佳参数: ', grid_search.best_params_)
    
    return grid_search.best_params_

def select_rf(X, y, scorer, grid, cv=5, verbose=True):
    '''
    筛选随机森林的最佳参数
    '''
    grid_search = GridSearchCV(RandomForestClassifier(
                                max_samples=0.67,
                                max_features=0.33, 
                                max_depth=5), 
                                param_grid=grid, 
                                cv=cv,
                                scoring=scorer,
                                n_jobs=-1)
    grid_search.fit(X, y)
    if verbose:
        print('随机森林最佳参数: ', grid_search.best_params_)
    return grid_search.best_params_


def select_ada(X, y, scorer, grid, cv=5, verbose=True):
    '''
    筛选 AdaBoost 的最佳参数，其中基模型为逻辑回归模型
    '''
    grid_search = GridSearchCV(AdaBoostClassifier(),\
                                param_grid=grid,
                                cv=cv,
                                scoring=scorer,
                                n_jobs=-1)
    
    
    grid_search.fit(X, y)
    if verbose:    
        print('AdaBoost 最佳参数： ', grid_search.best_params_)
    return grid_search.best_params_


def select_model(X, y, scorer, models_grid, cv=5, verbose=True):
    '''
    将筛选参数整合为一个函数
    '''
    
    knn_param = select_knn(X, y, scorer, models_grid['knn'],\
                        cv, verbose=verbose)
    svc_param = select_svc(X, y, scorer, models_grid['svc'],\
                        cv, verbose=verbose)
    dtc_param = select_dtc(X, y, scorer, models_grid['dtc'],\
                        cv, verbose=verbose)
    rf_param = select_rf(X, y, scorer, models_grid['rf'], \
                        cv, verbose=verbose)
    ada_param = select_ada(X, y, scorer, models_grid['ada'], \
                        cv, verbose=verbose)
    return knn_param, svc_param, dtc_param, rf_param, ada_param


def predict_estimator(estimator, estimator_name,\
                    metric, X_train, X_test, y_train, y_test):
    '''
    评价通过网格寻优法找出的最佳参数下的模型
    '''
    # 采用训练集训练模式
    estimator.fit(X_train, y_train)
    y_train_predict = estimator.predict(X_train)

    # 模型在训练集中的效果
    score_train = metric(y_train, y_train_predict, average='weighted')
    
    metrics_name = [name for name in globals() if globals()[name] is metric][0]
    
    # 采用模型预测测试集的类别
    y_test_predict = estimator.predict(X_test)
    # 上模型在测试集中的效果
    score_test = metric(y_test, y_test_predict, average='weighted')
    print(f'模型 {estimator} 的在训练集和测试集中的 {metrics_name} 分别为：\n' + \
            f'分别为: {score_train}, {score_test}')


def return_score(X_train, y_train, X_test, y_test, metric, verbose=True, \
            knn_param={'n_neighbors':1}, \
            svc_param={'C': 3, 'kernel': 'linear'},\
            dtc_param={'ccp_alpha':0, 'max_depth':17}, \
            rf_param={'n_estimators':70},\
            ada_param={'n_estimators':40}):
                
    """
    根据上述最优参数，构建模型， 并输出模型的F1值
    """
    lr = LogisticRegression()
    nb = GaussianNB()
    knn = KNeighborsClassifier(n_neighbors=knn_param['n_neighbors'])
    svc = SVC(C=svc_param['C'], kernel=svc_param['kernel'])
              
    dtc = DecisionTreeClassifier(max_depth=dtc_param['max_depth'],
                                ccp_alpha=dtc_param['ccp_alpha'])
                                
    rf = RandomForestClassifier(n_estimators=rf_param['n_estimators'],\
                                max_samples=0.67,\
                                max_features=0.33, max_depth=5)
    ada = AdaBoostClassifier(base_estimator=lr,\
                            n_estimators=ada_param['n_estimators'])

    if verbose:
        # 画图
        estimators = ['lr', 'nb', 'svc', 'knn', 'dtc', 'rf', 'ada']
        for estimator in estimators:
            predict_estimator(locals()[estimator], estimator, metric,
                              X_train, X_test, y_train, y_test)
    

if __name__ == '__main__':
    # 读取数据预处理过后的数据
    datasets = pickle.load(open(r'../results/datasets_2_pca.pkl', 'rb'))
    # wave_number = pickle.load(open(r'../results/wave_number_2.pkl', 'rb'))
    herbs_op = pickle.load(open(r'../results/datasets_2_herbs_op.pkl', 'rb'))
    
    # 采用 F1 评价模型的效果
    f1_scorer = make_scorer(f1_score, average='weighted')
    
    # 产生待筛选的参数网格
    models_grid = init_model_grid()
    knn_param, \
        svc_param, \
            dtc_param, \
                rf_param, \
                    ada_param = select_model(datasets, 
                                             herbs_op, 
                                             f1_scorer,
                                             models_grid,
                                             cv=10,
                                             verbose=True)
                    
    X_train ,X_test, y_train, y_test = train_test_split(datasets,
                                                        herbs_op,
                                                        test_size=0.3)
    
    # 由于每次进行筛参数时，都会因为随机性得到不同的最佳参数，因此这里将博客中的最佳参数存储为默认变量。
    return_score(X_train, y_train, X_test, y_test, 
                 # knn_param = knn_param,
                 # svc_param = svc_param,
                 # dtc_param = dtc_param,
                 # rf_param = rf_param,
                 # ada_param = ada_param,
                 metric=f1_score)
    

