#!/usr/bin/env python
# coding: utf-8

# In[72]:


import pandas as pd
import numpy as np
import gc
import matplotlib.pyplot as plt
import seaborn as sns

import warnings
from sklearn.preprocessing import scale
from sklearn.model_selection import cross_val_score
from sklearn.metrics import roc_auc_score, precision_recall_curve, roc_curve, average_precision_score,accuracy_score
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import GradientBoostingClassifier
from xgboost.sklearn import XGBClassifier
import lightgbm as lgb


# In[50]:


NF_BoT_IoT = pd.read_csv("NF-BoT-IoT.csv")


# In[4]:


NF_BoT_IoT


# In[21]:


feats = [f for f in NF_BoT_IoT.columns if f not in ['IPV4_SRC_ADDR','IPV4_DST_ADDR','Label','Attack'] ]


# In[22]:


NF_BoT_IoT.describe()


# In[54]:


NF_BoT_IoT['Label'].value_counts()


# In[55]:


NF_BoT_IoT['Attack'].value_counts()


# # 构建多种模型

# In[23]:


lr = LogisticRegression(random_state=2018,tol=1e-6)  # 逻辑回归模型

tree = DecisionTreeClassifier(random_state=2018) #决策树模型

svm = SVC(probability=True,random_state=2018,tol=1e-6)  # SVM模型

forest=RandomForestClassifier(n_estimators=100,random_state=2018) #　随机森林

Gbdt=GradientBoostingClassifier(random_state=2018) #CBDT

Xgbc=XGBClassifier(random_state=2018)  #Xgbc

gbm=lgb.LGBMClassifier(random_state=2018)  #lgb


# In[17]:


def muti_score(df_X,df_y,model):
    warnings.filterwarnings('ignore')
    accuracy = cross_val_score(model, df_X, df_y, scoring='accuracy', cv=5)
    precision = cross_val_score(model, df_X, df_y, scoring='precision', cv=5)
    recall = cross_val_score(model, df_X, df_y, scoring='recall', cv=5)
    f1_score = cross_val_score(model, df_X, df_y, scoring='f1', cv=5)
    auc = cross_val_score(model, df_X, df_y, scoring='roc_auc', cv=5)
    print("准确率:",accuracy.mean())
    print("精确率:",precision.mean())
    print("召回率:",recall.mean())
    print("F1_score:",f1_score.mean())
    print("AUC:",auc.mean())


# In[27]:


# muti_score(NF_BoT_IoT[feats],NF_BoT_IoT['Label'],lr)


# In[ ]:


# cross_val_score(gbm, NF_BoT_IoT[feats],NF_BoT_IoT['Label'], scoring='roc_auc', cv=5)


# ## 使用lightGBM进行二分类

# In[46]:


def LGB_model(data_, test_, y_, folds_):
    oof_preds = np.zeros(data_.shape[0])
    sub_preds = np.zeros(test_.shape[0])
    feature_importance_df = pd.DataFrame()
    feats = [f for f in NF_BoT_IoT.columns if f not in ['IPV4_SRC_ADDR','IPV4_DST_ADDR','Label','Attack'] ]
    for n_fold, (trn_idx, val_idx) in enumerate(folds_.split(data_)):
        trn_x, trn_y = data_[feats].iloc[trn_idx], y_.iloc[trn_idx]
        val_x, val_y = data_[feats].iloc[val_idx], y_.iloc[val_idx]
        
        clf=lgb.LGBMClassifier(random_state=2018)  #lgb
        
        clf.fit(trn_x, trn_y, 
                eval_set= [(trn_x, trn_y), (val_x, val_y)], 
                eval_metric='auc', verbose=100, early_stopping_rounds=40  #30
               )

        oof_preds[val_idx] = clf.predict_proba(val_x, num_iteration=clf.best_iteration_)[:, 1]
        sub_preds += clf.predict_proba(test_[feats], num_iteration=clf.best_iteration_)[:, 1] / folds_.n_splits
        
        fold_importance_df = pd.DataFrame()
        fold_importance_df["feature"] = feats
        fold_importance_df["importance"] = clf.feature_importances_
        fold_importance_df["fold"] = n_fold + 1
        feature_importance_df = pd.concat([feature_importance_df, fold_importance_df], axis=0)
        
        print('Fold %2d AUC : %.6f' % (n_fold + 1, roc_auc_score(val_y, oof_preds[val_idx])))
        del clf, trn_x, trn_y, val_x, val_y
        gc.collect()
        
    print('Full AUC score %.6f' % roc_auc_score(y_, oof_preds)) 

    return oof_preds, sub_preds, feature_importance_df
    
def display_importances(feature_importance_df_):
    # Plot feature importances
    cols = feature_importance_df_[["feature", "importance"]].groupby("feature").mean().sort_values(
        by="importance", ascending=False)[:50].index
    
    best_features = feature_importance_df_.loc[feature_importance_df_.feature.isin(cols)]
    
    plt.figure(figsize=(8,10))
    sns.barplot(x="importance", y="feature", 
                data=best_features.sort_values(by="importance", ascending=False))
    plt.title('LightGBM Features (avg over folds)')
    plt.tight_layout()
    plt.savefig('lgbm_importances.png')


# In[47]:


from sklearn.model_selection import KFold

data = NF_BoT_IoT.copy()
seed = 546789
y = data['Label']
folds = KFold(n_splits=5, shuffle=True, random_state=seed)
X_train1 = data.copy()
oof_preds, IntePre, importances = LGB_model(data, X_train1, y, folds)


# In[53]:


display_importances(importances) 


# ## 使用LightGBM进行多分类

# In[83]:


col = ["Attack"]
from sklearn.preprocessing import LabelEncoder
lbl = LabelEncoder().fit(data[col])
data[col] = lbl.transform(data[col])


# In[89]:


def LGB_model(data_, test_, y_, folds_):
    oof_preds = np.zeros(data_.shape[0])
    sub_preds = np.zeros(test_.shape[0])
    feature_importance_df = pd.DataFrame()
    feats = [f for f in NF_BoT_IoT.columns if f not in ['IPV4_SRC_ADDR','IPV4_DST_ADDR','Label','Attack'] ]
    for n_fold, (trn_idx, val_idx) in enumerate(folds_.split(data_)):
        trn_x, trn_y = data_[feats].iloc[trn_idx], y_.iloc[trn_idx]
        val_x, val_y = data_[feats].iloc[val_idx], y_.iloc[val_idx]
        
        
        params = {  
            'boosting_type': 'gbdt',  
            'objective': 'multiclass',
            'metric':'multi_logloss'
        }
        
        clf=lgb.LGBMClassifier(**params,random_state=2018)  #lgb
        
        clf.fit(trn_x, trn_y, 
                eval_set= [(trn_x, trn_y), (val_x, val_y)], 
               verbose=100, early_stopping_rounds=40  #30
               )

        oof_preds[val_idx] = clf.predict(val_x, num_iteration=clf.best_iteration_)
        sub_preds += clf.predict(test_[feats], num_iteration=clf.best_iteration_) / folds_.n_splits
        
        fold_importance_df = pd.DataFrame()
        fold_importance_df["feature"] = feats
        fold_importance_df["importance"] = clf.feature_importances_
        fold_importance_df["fold"] = n_fold + 1
        feature_importance_df = pd.concat([feature_importance_df, fold_importance_df], axis=0)
        
        print('Fold %2d accuracy : %.6f' % (n_fold + 1, accuracy_score(val_y, oof_preds[val_idx])))
        del clf, trn_x, trn_y, val_x, val_y
        gc.collect()
        
    print('Full AUC accuracy %.6f' % accuracy_score(y_, oof_preds)) 

    return oof_preds, sub_preds, feature_importance_df
    
def display_importances(feature_importance_df_):
    # Plot feature importances
    cols = feature_importance_df_[["feature", "importance"]].groupby("feature").mean().sort_values(
        by="importance", ascending=False)[:50].index
    
    best_features = feature_importance_df_.loc[feature_importance_df_.feature.isin(cols)]
    
    plt.figure(figsize=(8,10))
    sns.barplot(x="importance", y="feature", 
                data=best_features.sort_values(by="importance", ascending=False))
    plt.title('LightGBM Features (avg over folds)')
    plt.tight_layout()
    plt.savefig('lgbm_importances.png')


# In[90]:


from sklearn.model_selection import KFold

seed = 546789
y = data['Attack']
folds = KFold(n_splits=5, shuffle=True, random_state=seed)
X_train1 = data.copy()
oof_preds, IntePre, importances = LGB_model(data, X_train1, y, folds)


# In[91]:


display_importances(importances) 


# In[ ]:




