import numpy as np
from sklearn.neural_network import MLPClassifier
import joblib
from sklearn.metrics import classification_report
from imblearn.under_sampling import RandomUnderSampler
from sklearn.model_selection import StratifiedKFold 
from data_format import x_train, y_train, x_cross, y_cross, x_test, y_test
import matplotlib.pyplot as plt
from test_result_show import plot_roc_ann,pie_chart
#设置字体为楷体
import matplotlib
matplotlib.rcParams['font.sans-serif'] = ['KaiTi']
import warnings
warnings.filterwarnings('ignore')  # "error", "ignore", "always", "default", "module" or "once"



def nerual_network_train(x_train,y_train,x_cross,y_cross,x_test,y_test):
    datasets = x_train
    labels = y_train

    # kf=KFold(n_splits=10)

    # C是正则化系数
    best_C = 1
    best_accurancy = 0
    c_list = []
    accurancy_list = []
    kf = StratifiedKFold(n_splits=10, shuffle=True) #K折交叉验证，折数为10，混洗为是
    for c in [0.0001, 0.001, 0.01, 0.1, 1, 10, 100]:
        classfier = ""
        for train_index, test_index in kf.split(datasets[:], labels[:]):
            rus = RandomUnderSampler(sampling_strategy=1, random_state=0, replacement=True)  # 采用随机欠采样（下采样）
            x_retest, y_retest = rus.fit_resample(datasets[:], labels[:])
            x_train = x_retest
            y_train = y_retest
            x_test = np.array(datasets)[test_index]
            y_test = np.array(labels)[test_index]

            classfier = MLPClassifier(hidden_layer_sizes=(200), activation='tanh', solver='sgd', alpha=0.001,
                                batch_size=5, learning_rate='constant', learning_rate_init=0.01, power_t=0.5, max_iter=200,
                                shuffle=True, random_state=None, tol=0.0001, verbose=False, warm_start=False, momentum=0.9,
                                nesterovs_momentum=True, early_stopping=False, validation_fraction=0.1, beta_1=0.9,
                                beta_2=0.999, epsilon=1e-08, n_iter_no_change=10)
            # 神经网络参数用法：hidden_layer_sizes隐藏层一层，有200个神经元；
            #               activation：返回双曲正切函数tanh
            #               solver：优化权重，随机梯度下降
            #               alpha :float浮点数保留三位
            #               batch_size ： 随机优化器的minibatch的大小为5
            #               learning_rate ： 仅在solver是随机梯度下降时使用，'constant’是’learning_rate_init’给出的恒定学习率
            #               learning_rate_init：它控制更新权重的步长为0.01
            #               power_t ：反缩放学习率的指数为0.5，仅在solver ='sgd’时使用
            #               max_iter ：最大迭代次数200（默认为200）
            #               shuffle：是否在每次迭代中对样本进行洗牌：True
            #               random_state ：默认无随机数生成器的状态或种子 ： None
            #               tol ： 容差优化
            #               verbose ： 默认为False 是否将进度消息打印到stdout
            #               warm_start ： 只需擦除以前的解决方案
            #               momentum ：梯度下降更新的动量 默认为0.9
            #               nesterovs_momentum ： 是否使用Nesterov的势头:True
            #               early_stopping : 默认为False。当验证评分没有改善时，是否使用提前停止来终止培训
            #               validation_fraction :将训练数据的0.1比例留作早期停止的验证集,默认为0.1
            #               beta_1 :估计一阶矩向量的指数衰减率0.9
            #               beta_2 ：估计一阶矩向量的指数衰减率应为0.999
            #               epsilon ：默认值1e-8, adam稳定性的价值
            #               n_iter_no_change ：不符合改进的最大历元数为10
            classfier.fit(x_train, y_train)  #使模型适合数据矩阵X和目标y

        # 使用交叉验证集评估预测
        y_pred = classfier.predict(x_cross)  #使用多层感知器分类器进行预测
        print(classification_report(y_cross, y_pred))
        #输出：precision精确度，recall召回率，f1-score F1 值是精确度和召回率的调和平均值，support每个标签的出现次数
        print("abc")

        count = 0
        y_pred_list = y_pred.tolist()
        y_cross_list = y_cross.tolist()
        for i in range(0, len(y_cross)):
            if y_pred_list[i] == y_cross_list[i]:
                count += 1
        if (1.0 * count / len(y_pred)) > best_accurancy:
            best_accurancy = count / len(y_pred)
            best_C = c

        c_list.append(c)
        accurancy_list.append(count / len(y_pred))

    print('best_C:{}'.format(best_C))
    print('best_accurancy:{}'.format(best_accurancy))

    # clf = MLPClassifier(hidden_layer_sizes=(200), activation='tanh', solver='sgd', alpha=0.001,
    #                     batch_size=5, learning_rate='constant', learning_rate_init=0.01, power_t=0.5, max_iter=200,
    #                     shuffle=True, random_state=None, tol=0.0001, verbose=False, warm_start=False, momentum=0.9,
    #                     nesterovs_momentum=True, early_stopping=False, validation_fraction=0.1, beta_1=0.9,
    #                     beta_2=0.999, epsilon=1e-08, n_iter_no_change=10)
    clf = MLPClassifier(hidden_layer_sizes=(200), activation='tanh', solver='sgd', alpha=best_C,
                        batch_size=5, learning_rate='constant', learning_rate_init=0.01, power_t=0.5, max_iter=200,
                        shuffle=True, random_state=None, tol=0.0001, verbose=False, warm_start=False, momentum=0.9,
                        nesterovs_momentum=True, early_stopping=False, validation_fraction=0.1, beta_1=0.9,
                        beta_2=0.999, epsilon=1e-08, n_iter_no_change=10)
    clf.fit(x_train, y_train)
    joblib.dump(clf, "./models/ann.pkl")

    return c_list, accurancy_list


# 绘制c 和 accurancy 的变化曲线
def show_change_ann(x, y):
    plt.plot(x, y, c='g', label='accurancy')
    plt.xlabel('alpha')
    plt.ylabel('Accuracy')
    plt.title('正则化参数与准确率的关系')
    plt.legend()
    plt.show()


def ANN_result(x_test,y_test):
    # 加载模型
    classfier = joblib.load('./models/ann.pkl')

    # 对测试集进行预测
    y_pred = classfier.predict(x_test)

    # 画出饼状图
    pie_chart(y_pred,'人工神经网路(ANN)')

    plot_roc_ann(x_test,y_test,y_pred,classfier,'人工神经网路(ANN)')  # 绘制ROC曲线并求出AUC值 以及 评估预测

    print('y_pred:{}'.format(y_pred))


if __name__ == '__main__':
    c_list, accurancy_list = nerual_network_train(x_train,y_train,x_cross,y_cross,x_test,y_test)
    show_change_ann(c_list, accurancy_list)
    ANN_result(x_test, y_test)