import xgboost as xgb
from dataPreproces import get_data
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
import pandas as pd
from sklearn import metrics
import matplotlib.pyplot as plt
import seaborn as sns
import json


def saveJson(list, path):
    # 将字典列表保存到文件中
    with open(path, 'w', encoding='utf-8') as fileObject:
        fileObject.write(json.dumps(list, ensure_ascii=False))
    print("保存成功!")


def printJuZhen(juzheng):
    # confusion = np.array(([91, 0, 0,4], [0, 92, 1,4], [0, 0, 95,4],[1,1,1,1]))
    # plt.rcParams两行是用于解决标签不能显示汉字的问题
    plt.rcParams['font.sans-serif'] = ['SimHei']
    plt.rcParams['axes.unicode_minus'] = False
    # 热度图，后面是指定的颜色块，可设置其他的不同颜色
    plt.imshow(juzheng, cmap=plt.cm.Blues)
    # ticks 坐标轴的坐标点
    # label 坐标轴标签说明
    indices = range(len(juzheng))
    plt.xticks(indices, [1, 2])
    plt.yticks(indices, [1, 2])
    plt.colorbar()
    plt.xlabel('预测值')
    plt.ylabel('真实值')
    plt.title('混淆矩阵')
    sns.set(font_scale=0.8)
    plt.show()


def xiao_ti_qing(a):
    # 小提琴图
    # 设置绘图风格
    plt.style.use('ggplot')
    # 处理中文乱码
    plt.rcParams['font.sans-serif'] = ['Microsoft YaHei']
    # 坐标轴负号的处理
    plt.rcParams['axes.unicode_minus'] = False
    # data_source = dataPreproces.get_data('../resource/all.csv', nans=False)
    # # 删除无用字段
    # bad_columns = ['SourceIP', 'DestinationIP', 'SourcePort', 'DestinationPort', 'TimeStamp']
    # data = data_source.drop(labels=bad_columns, axis='columns', inplace=True)
    a['DoH'].value_counts()  # 对分类变量的类别进行计数
    # 后面将研究不同类型的'gearbox'对应'price'的差异
    x = a['DoH']
    y = a['ResponseTimeTimeVariance']  # 在原数据集中，'price'为目标变量
    # 绘制小提琴图
    sns.violinplot(x=x, y=y, data=a)
    plt.show()
    # 在sns.violinplot中，x是类别变量，y是数值型变量，data用于指定数据集


if __name__ == '__main__':
    data_source = get_data("../resource/all.csv", nans=False)
    # 删除无用字段
    bad_columns = ['SourceIP', 'DestinationIP', 'SourcePort', 'DestinationPort', 'TimeStamp']
    data_source.drop(labels=bad_columns, axis='columns', inplace=True)
    # 字符数据数字化(这里恶心,不能写成'True',读进来自动转为布尔值True)
    data_source['DoH'] = data_source['DoH'].map({True: 0, False: 1})
    # 归一化处理
    scaler = MinMaxScaler()
    data_source = pd.DataFrame(scaler.fit_transform(data_source), columns=data_source.columns)

    X = data_source.loc[:, data_source.columns != 'DoH']
    Y = data_source['DoH']

    train_x, test_x, train_y, test_y = train_test_split(X, Y, test_size=0.2, random_state=7)

    # xgboost模型初始化设置
    dtrain = xgb.DMatrix(data=train_x, label=train_y)
    dtest = xgb.DMatrix(data=test_x)
    watchlist = [(dtrain, 'train')]

    # booster:
    params = {'booster': 'gbtree',
              'objective': 'binary:logistic',
              'eval_metric': 'auc',
              'max_depth': 5,
              'lambda': 10,
              'subsample': 0.75,
              'colsample_bytree': 0.75,
              'min_child_weight': 2,
              'eta': 0.025,
              'seed': 0,
              'nthread': 8,
              'gamma': 0.15,
              'learning_rate': 0.01}

    # 建模与预测：50棵树
    bst = xgb.train(params, dtrain, num_boost_round=50, evals=watchlist)
    ypred = bst.predict(dtest)

    y_pred = (ypred >= 0.5) * 1

    result = {'模型名称': 'XgBoost', '结果': {
        '准确率': metrics.accuracy_score(test_y, y_pred),
        '平均绝对误差': metrics.mean_absolute_error(test_y, y_pred),
        '均方误差': metrics.mean_squared_error(test_y, y_pred),
        'Precesion': metrics.precision_score(test_y, y_pred),
        'Recall': metrics.recall_score(test_y, y_pred),
        'F1-score': metrics.f1_score(test_y, y_pred),
        'Accuracy': metrics.accuracy_score(test_y, y_pred),
        'AUC': metrics.roc_auc_score(test_y, ypred)
    }}
    print(result)
    saveJson(result, '../XgBoostResult.json')
    ypred = bst.predict(dtest)
    print("测试集每个样本的得分\n", ypred)
    ypred_leaf = bst.predict(dtest, pred_leaf=True)
    print("测试集每棵树所属的节点数\n", ypred_leaf)
    ypred_contribs = bst.predict(dtest, pred_contribs=True)
    print("特征的重要性\n", ypred_contribs)

    xgb.plot_importance(bst, height=0.8, title='网络异常流量特征重要性统计', ylabel='特征')
    plt.rc('font', size=14)
    plt.show()

    confusion_mat = metrics.confusion_matrix(test_y, y_pred)
    printJuZhen(confusion_mat)
    # saveJson(result)
