# -*- coding: utf-8 -*-
"""
Created on Sun Oct 17 16:13:04 2021

@author: zhuo木鸟

没用深度学习算法解决第二问
"""

import pickle
from tensorflow.keras.models import Sequential, model_from_json
from tensorflow.keras.callbacks import ModelCheckpoint
from sklearn.metrics import make_scorer
from tensorflow.keras.layers import Dense, Dropout, Input
from sklearn.model_selection import train_test_split
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.preprocessing import OneHotEncoder
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score

font1 = {'family' : 'Times New Roman',
'weight' : 'normal',
'size'   : 20,
}

def plot_train_test(history):
    plt.figure(figsize=(12,4))

    plt.subplots_adjust(left=0.125, bottom=None, right=0.9, top=None,
                    wspace=0.3, hspace=None)
    plt.subplot(1,2,1)
    plt.plot(history.history['loss'], linewidth=3, 
             label='Train')
    plt.plot(history.history['val_loss'],linewidth=3, 
             linestyle='dashed',label='Test')
    plt.xlabel('Epoch',fontsize=20)
    plt.ylabel('loss',fontsize=20)
    plt.legend()

    plt.subplot(1,2,2)
    plt.plot(history.history['accuracy'],linewidth=3, label='Train')
    plt.plot(history.history['val_accuracy'],linewidth=3, linestyle='dashed',label='Test')
    plt.xlabel('Epoch',fontsize=20)
    plt.ylabel('Accuracy',fontsize=20)
    plt.legend()
    plt.savefig(r'../pictures/问题二多层感知器训练过程展示')
    
    plt.show()


def build_ann(inputs, units_list=[50, 100, 50, 25],
            activation='relu',
            optimizer='adam',
            init='glorot_uniform',
            rate=0.2):
                
    '''
    根据参数，搭建一个多层感知器模型
    '''
    ann = Sequential()
    ann.add(Input(shape=(inputs,)))
    for units in units_list:
        # 添加隐藏层
        ann.add(Dense(units=units,
                        activation=activation,
                        kernel_initializer=init))
        # 添加 Dropout 层
        ann.add(Dropout(rate=rate))
    # 输出层
    ann.add(Dense(units=11,
                    activation='sigmoid',
                    kernel_initializer=init))

    ann.compile(loss='categorical_crossentropy', 
                optimizer=optimizer,
                metrics=['accuracy'])
    

    return ann


if __name__ == '__main__':
    # 读取数据预处理过后的数据
    datasets = pickle.load(open(r'../results/datasets_2_pca.pkl', 'rb'))
    # wave_number = pickle.load(open(r'../results/wave_number_2.pkl', 'rb'))
    herbs_op = pickle.load(open(r'../results/datasets_2_herbs_op.pkl', 'rb'))
    # 采用 one-hot 编码法将 OP 处理为 11 个双变量
    enc = OneHotEncoder(handle_unknown='ignore')
    enc.fit(herbs_op.values.reshape(-1,1))
    # 输出编码前的类别：
    print(enc.categories_)
    herbs_op_encoded = enc.transform(herbs_op.values.reshape(-1,1)).toarray()
    # 分数据集
    X_train ,X_test, y_train, y_test = train_test_split(datasets,
                                                        herbs_op_encoded,
                                                        test_size=0.3)
    ann = build_ann(datasets.shape[1])
    
    filepath = '../results/ann_weights-best.h5'
    
    checkpoint = ModelCheckpoint(filepath=filepath,monitor='val_accuracy',
                                  verbose=1, save_best_only=True, mode='max')
                            
    callback_list=[checkpoint]
    # 并将训练过程中的,测试集中精确度最高的模型的参数通过 hdf5 文件保存起来
    history = ann.fit(X_train, y_train, 
                    epochs=500, batch_size=100,
                    validation_data=(X_test,y_test),
                    callbacks=callback_list)
    # 保存模型的网络拓扑结构，但不保存参数
    ann_construction = ann.to_json()
    with open(r'../results/ann_model_construction.json', 'w') as file:
        file.write(ann_construction)
    
    # 读取网络拓扑
    with open(r'../results/ann_model_construction.json','r') as file:
        model_json_load = file.read()
        
    ann = model_from_json(model_json_load)
    # 将之前保留的最佳精确度对应的最佳参数读取到网络拓扑之中，从而得到最佳的神经网络模型
    ann.load_weights(filepath)
    
    # 求出最佳模型，在训练集合测试集中的精确度
    y_train_pred = ann.predict(X_train)
    y_train_pred = enc.inverse_transform(y_train_pred).reshape(y_train_pred.shape[0])
    y_train = enc.inverse_transform(y_train).reshape(y_train.shape[0])
    print('模型在训练集中的精确度为: ', accuracy_score(y_train, y_train_pred))
    print('模型在训练集中的准确率为：', precision_score(y_train, y_train_pred, average='weighted'))
    print('模型在训练集中的召回率为：', recall_score(y_train, y_train_pred, average='weighted'))
    print('模型在训练集中的 f1 值为：', f1_score(y_train, y_train_pred, average='weighted'))
    y_test_pred = ann.predict(X_test)
    y_test_pred = enc.inverse_transform(y_test_pred).reshape(y_test_pred.shape[0])
    y_test = enc.inverse_transform(y_test).reshape(y_test.shape[0])
    print('模型在测试集中的精确度为: ', accuracy_score(y_test, y_test_pred))
    print('模型在测试集中的准确率为：', precision_score(y_test, y_test_pred, average='weighted'))
    print('模型在测试集中的召回率为：', recall_score(y_test, y_test_pred, average='weighted'))
    print('模型在测试集中的 f1 值为：', f1_score(y_test, y_test_pred, average='weighted'))
    
    
    # 我录那些没有 OP 的数据
    datasets_without_op = pickle.load(open(r'../results/datasets_2_without_op_pca.pkl', 'rb'))
    datasets_without_op_index = pickle.load(open(r'../results/datasets_2_without_op.pkl', 'rb'))
    datasets_without_op_index = datasets_without_op_index.index    
    # 使用最佳模型进行预测
    y_predict = ann.predict(datasets_without_op)
    # 将结果解码
    herbs_op_predict = enc.inverse_transform(y_predict).reshape(15)

    herbs_no_op_df = pd.concat([pd.Series(datasets_without_op_index), pd.Series(herbs_op_predict, name='OP')], axis=1)
    herbs_no_op_df.to_excel(r'../results/问题2药材产出地预测结果_多层感知器模型.xlsx')
    
    # 画出训练过程中，损失函数以及模型，分别在训练集和测试集中的精确度
    plot_train_test(history)