from numpy import loadtxt
import xgboost
from xgboost import XGBClassifier
from xgboost import plot_importance
from matplotlib import pyplot as plt
import matplotlib as mpl
import pandas as pd
from sklearn.feature_selection import SelectFromModel
import warnings
from sklearn import metrics
warnings.filterwarnings("ignore")
data = pd.read_csv("data_fs.csv")
drop_col = ['Index','Src IP','Dst IP','Src Port','Dst Port','Flow Duration','Packet Number','Fwd Packet Number','Bwd Packet Number','Total Bytes','Total Fwd Bytes','Total Bwd Bytes','Label']
feature_names = list(data.drop(drop_col, axis=1).columns) # 拿到所有的特征

# 数据集划分特征矩阵X和目标变量y
X = data.drop(drop_col,axis=1).values	# np.array类型的自变量矩阵，不含表头
y = data['Label'].values  # np.array类型的标签列
print(data)
print('data loaded')

# dtrain是我的训练数据（自变量矩阵是X，分类结果即因变量矩阵是y，特征字段重命名成我设置的features列表）
dtrain = xgboost.DMatrix(X, label=y, feature_names=feature_names)
# 构建参数字典
param = {}
# use softmax multi-class classification
param['objective'] = 'multi:softmax'
# scale weight of positive examples
param['eta'] = 0.1
param['max_depth'] = 6
param['silent'] = 1
param['nthread'] = 4
param['num_class'] = 6

# 直接传入打包好的训练数据（同时含自变量、因变量矩阵）
model = xgboost.train(param, dtrain)

#model = XGBClassifier()
#model.fit(X, y)
# 使显示图标自适应
plt.rcParams['figure.autolayout'] = True

plot_importance(model, max_num_features=20, title='feature importance', xlabel='score', ylabel='feature', grid=False)

plt.show()




