# -*- coding: utf-8 -*-
import math

import numpy as np
import pandas as pd
from imblearn.combine import SMOTEENN
from imblearn.over_sampling import SMOTE
from sklearn.ensemble import RandomForestClassifier
from sklearn import metrics
from sklearn.metrics import roc_curve, auc, f1_score, recall_score, precision_score, accuracy_score, \
    classification_report
from sklearn.model_selection import StratifiedKFold
from sklearn.utils import shuffle
from mdp_all import mdp_data
import matplotlib.pyplot as plt
import joblib
import matplotlib as mpl


"""
函数说明：文件处理
Parameters:
     filename:数据文件
Returns:
     list_datasets：数据集特征列表
     category_labels:数据标签列表
"""


def data_handle(filename):
    read_data = pd.read_csv(filename)
    read_data = shuffle(read_data)
    list_datasets = []
    category_labels = []
    count = len(read_data)
    for i in range(count):
        list_data = []
        for j in range(len(read_data.iloc[i, :]) - 1):
            row_data = read_data.iloc[i, j]  # 读取每个样本的每个数据
            list_data.append(row_data)  # 将每个数据存入列表
        list_datasets.append(list_data)  # 将每个样本的数据存入列表

        row_data_label = read_data.iloc[i, len(read_data.iloc[i, :]) - 1]  # 读取每个样本的类别标签
        if row_data_label == 'N':
            category_labels.append(0)  # 将二分类标签转化为0和1,0代表软件正常，1代表软件缺陷
        elif not row_data_label:
            category_labels.append(0)
        else:
            category_labels.append(1)
    return list_datasets, category_labels, count


"""
函数说明：绘制ROC曲线
Parameters:
     labels:测试标签列表
     predict_prob:预测标签列表
"""


def plot_roc(labels, predict_prob, auc, macro, macro_recall, weighted, auc_ave, g_mean_ave, balance_ave):
    # 创建一个1行2列的画布
    figure, axes = plt.subplots(ncols=1, nrows=3, figsize=(6.5, 6.5), dpi=100)
    # 绘图对象
    ax1 = axes[0]
    ax2 = axes[1]
    ax3 = axes[2]

    # 选择ax1
    plt.sca(ax1)
    false_positive_rate, true_positive_rate, thresholds = metrics.roc_curve(labels, predict_prob)  # 真阳性，假阳性，阈值
    roc_auc = metrics.auc(false_positive_rate, true_positive_rate)  # 计算AUC值
    print('AUC=' + str(roc_auc))
    plt.title('ROC')
    plt.plot(false_positive_rate, true_positive_rate, 'b', label='AUC = %0.4f' % roc_auc)
    plt.legend(loc='lower right')
    plt.plot([0, 1], [0, 1], 'r--')
    plt.ylabel('TPR（真阳性率）')
    plt.xlabel('FPR（伪阳性率）')

    # 选择ax2
    plt.sca(ax2)
    plt.axis('off')
    plt.title('模型评价指标', y=-0.1)
    # 解决中文乱码和正负号问题
    mpl.rcParams["font.sans-serif"] = ["SimHei"]
    mpl.rcParams["axes.unicode_minus"] = False
    col_labels = ['准确率', '精确率', '召回率', 'f1值']
    row_labels = ['实际']
    table_vals = [[auc, macro, macro_recall, weighted]]
    row_colors = ['red', 'pink', 'green', 'gold']
    table = plt.table(cellText=table_vals, colWidths=[0.18 for x in col_labels],
                      rowLabels=row_labels, colLabels=col_labels,
                      rowColours=row_colors, colColours=row_colors,
                      loc="center")
    table.set_fontsize(14)
    table.scale(1.5, 1.5)

    plt.sca(ax3)
    plt.axis('off')
    plt.title('收敛指标', y=-0.1)
    # 解决中文乱码和正负号问题
    mpl.rcParams["font.sans-serif"] = ["SimHei"]
    mpl.rcParams["axes.unicode_minus"] = False
    col_labels = ['Auc-Ave', 'G-Mean', 'Balance-Ave']
    row_labels = ['期望', '实际']
    table_vals = [[0.75, 0.7, 0.7], [auc_ave, g_mean_ave, balance_ave]]
    row_colors = ['yellow', 'cyan', 'green']
    table = plt.table(cellText=table_vals, colWidths=[0.22 for x in col_labels],
                      rowLabels=row_labels, colLabels=col_labels,
                      rowColours=row_colors, colColours=row_colors,
                      loc="center")
    table.set_fontsize(14)
    table.scale(1.5, 1.5)

    plt.show()
    # plt.savefig('figures/PC5.png') #将ROC图片进行保存


def random_forest(filename,num=0):
    datasets, labels, count = data_handle(filename)  # 对数据集进行处理

    # 训练集和测试集划分
    accuracy_list = []
    precision_list = []
    recall_list = []
    f1_list = []
    auc_list = []
    g_mean_list = []
    balance_list = []
    temp = 0

    # kf=KFold(n_splits=10)
    kf = StratifiedKFold(n_splits=5, shuffle=True)
    for train_index, test_index in kf.split(datasets[:], labels[:]):
        rus = SMOTEENN(sampling_strategy=0.5, random_state=0)  # 采用随机欠采样（下采样）
        x_retest, y_retest = rus.fit_resample(datasets[:], labels[:])
        x_train = x_retest
        y_train = y_retest
        # x_test = datasets[count:]
        # y_test = labels[count:]
        x_test = np.array(datasets)[test_index]
        y_test = np.array(labels)[test_index]

        clf = RandomForestClassifier(n_estimators=200, random_state=0)
        clf.fit(x_train, y_train)
        '''
        print(clf.n_iter_)
        print(clf.n_layers_)
        print(clf.n_outputs_)
        print(clf.batch_size)
        print(clf.loss)
        print(clf.activation)
        '''
        pre = clf.predict(x_test)
        print('准确率：', accuracy_score(y_test, pre))
        print('分类报告：', classification_report(y_test, pre))
        accuracy = accuracy_score(y_test, pre)
        if temp < accuracy:
            temp = accuracy
            joblib.dump(clf, 'files/random.pkl')
        precision = precision_score(y_test, pre, average='weighted')
        recall = recall_score(y_test, pre, average='weighted')
        f1score = f1_score(y_test, pre, average='weighted')
        false_positive_rate, true_positive_rate, thresholds = roc_curve(y_test, pre)
        roc_auc = auc(false_positive_rate, true_positive_rate)

        false_positive_rate, true_positive_rate = false_positive_rate[1], true_positive_rate[1]
        g_mean = math.sqrt(true_positive_rate * (1 - false_positive_rate))
        balance = 1 - math.sqrt(
            math.pow((1 - true_positive_rate), 2) + math.pow((0 - false_positive_rate), 2)) / math.sqrt(2)

        accuracy_list.append(accuracy)
        precision_list.append(precision)
        recall_list.append(recall)
        f1_list.append(f1score)
        auc_list.append(roc_auc)
        g_mean_list.append(g_mean)
        balance_list.append(balance)

    # print('k-accuracy:', accuracy_list) #准确率
    # print('k-precision:', precision_list) #精确率
    # print('k-recall:', recall_list) #召回率
    # print('k-f1_score', f1_list) #F-score相当于precision和recall的调和平均
    # print('k-auc', auc_list)
    # print('k-g_mean:', g_mean_list)
    # print('k-balance:', balance_list)
    auca = np.mean(accuracy_list)
    preci = np.mean(precision_list)
    recall = np.mean(recall_list)
    f1 = np.mean(f1_list)
    # 收敛标准
    auc_ave = np.mean(auc_list)
    g_mean_ave = np.mean(g_mean_list)
    balance_ave = np.mean(balance_list)
    print('平均准确率:', np.mean(accuracy_list))  # 准确率
    print('平均精确率:', np.mean(precision_list))  # 精确率
    print('平均召回率:', np.mean(recall_list))  # 召回率
    print('平均f1值:', np.mean(f1_list))
    # 收敛标准，一般大于0.7时采纳模型
    print('auc_ave:', np.mean(auc_list))
    print('g_mean_ave:', np.mean(g_mean_list))
    print('balance_ave:', np.mean(balance_list))

    print('混淆矩阵输出:\n', metrics.confusion_matrix(y_test, pre))  # 混淆矩阵输出

    if num == 0:
        plot_roc(y_test, pre, auca, preci, recall, f1, auc_ave, g_mean_ave, balance_ave)  # 绘制ROC曲线并求出AUC值
    else:
        mdp_data.add_data(mdp_data,y_test, pre, auca, preci, recall, f1)


if __name__ == '__main__':
    random_forest('MDP/KC3.csv')
