from model import AECNN,AELinear, MYRNN, MYLSTM ,MyCNN , MyDNN
from tools import datahandle, ConfusionMatrix, data_process,option2svmy,returndataaloader
from sklearn.neighbors import KNeighborsClassifier

import torch
import torch.nn as nn
import numpy as np
import torch
import torch.nn as nn
import numpy as np
import pandas as pd
import torch.utils.data as Data
import matplotlib.pyplot as plt
import random,sklearn
from sklearn import metrics,tree
import warnings

from sklearn.model_selection import GridSearchCV
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.datasets import load_iris
from sklearn.multiclass import OneVsRestClassifier
from sklearn.svm import SVC
from sklearn import svm
from sklearn.metrics import f1_score
import math
import csv
import operator
from sklearn.model_selection import train_test_split

warnings.filterwarnings("ignore")
# x = np.load('./data/classto6X.npy')
# y = np.load('./data/classto6Y.npy')

# x = np.load('./data/classto4X.npy')
# y = np.load('./data/classto4Y.npy')

# x = np.load('./data/4Xsample.npy')
# y = np.load('./data/4Ysample.npy') mmm
# x = np.load('./data/xparttest.npy')
# y = np.load('./data/yparttest.npy')

# x_, y_ = data_process(x, y)
# np.save('./data/xparttest.npy', x_)
# np.save('./data/yparttest.npy', y_)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(device)
'''our datassets' experiment'''
x = np.load('./data/original60X.npy')
y = np.load('./data/2Ysample.npy')
# '''cicddos2019'''
print('begin!')
# x = np.load('./data/cicddos2019/newx.npy')
# where_are_nan = np.isnan(x)
# where_are_inf = np.isinf(x)
# #nan替换成0,inf替换成nan
# x[where_are_nan] = 0
# x[where_are_inf] = 0
# print(x.shape)
# y = np.load('./data/cicddos2019/cicY_AE-MLP.npy')


"------------"
# x1 = np.load('./data/cicddos2019/train/X.npy')
# x2 = np.load('./data/cicddos2019/test/X.npy')
# y1 = np.load('./data/cicddos2019/train/Y.npy')
# y2 = np.load('./data/cicddos2019/test/Y.npy')
'-----------'
# x = np.load('./data/cicddos2019/03-11/X.npy')
# y = np.load('./data/cicddos2019/03-11/Y2.npy')
# 下面用于cicddos2019的标签分类
label14_classdict = {
    0: 'BENIGN',
    1: 'TFTP',
    2: 'MSSQL',
    3: 'LDAP',
    4: 'UDP',
    5: 'NetBIOS',
    6: 'DrDoS_SNMP',
    7: 'DrDoS_DNS',
    8: 'DrDoS_SSDP',
    9: 'DrDoS_NTP',
    10: 'Syn',
    11: 'Portmap',
    12: 'UDPLag',
    13: 'WebDDoS'
}
label3_classdict = {
    0: 'BENIGN',
    1: 'Reflection Attacks',
    2: 'Exploitation Attacks'
}
label_classdict = {0:'BENIGN',1:'LDAP',2:'MSSQL',3:'NetBIOS',4:'Syn',5:'UDP'}
# label_classdict = {'BENIGN':0,'LDAP':1,'MSSQL':2,'NetBIOS':3,'Syn':4,'UDPLag':5,'UDP-lag':5,'UDP':5,'WebDDoS':5,'Portmap':6}
modclass_indict = {0: 'Benign', 1: 'AllCoils', 2: 'AllRegisters', 3: 'Galil-RIO', 4: 'SingleCoil', 5: 'SingleRegister'}

#这里是modbus的标签分类
# modclass_indict = {0: 'Benign', 1: 'Coils', 2: 'Registers', 3: 'Galil-RIO'}
# modclass_indict = {0: 'Benign', 1: 'AllCoils', 2: 'AllRegisters', 3: 'Galil-RIO', 4: 'SingleCoil', 5: 'SingleRegister'}

modclass_indict = {0: 'Benign', 1: 'Attacks'}
#配置

class_indict = modclass_indict
batchsize = 10
inner_coder = 60
trainloader, testloader = datahandle(x, y, batchsize,120, 0.3, True)
# trainloader, testloader = returndataaloader(xtrain=x1,xtest=x2,ytrain=y1,ytest=y2,batchsize=10,isshuffle=False)
classes = 2
labels = [l for _, l in class_indict.items()]



def train_cnn(xloader, yloader, net, epoch, device):
    result = []
    optimizer = torch.optim.Adam(net.parameters(), lr=0.001)  # 优化器
    loss_function = nn.BCELoss()  # loss
    for i in range(epoch):
        confusion = ConfusionMatrix(num_classes=classes, labels=labels)
        net.train()
        for feature, target in xloader:
            optimizer.zero_grad()
            outputs = net(feature)
            single_loss = loss_function(outputs, target)
            single_loss.backward()
            optimizer.step()

            # outputs = nn.Softmax(outputs)
            # y_pred = torch.argmax(outputs, dim=1)
            # y_true = torch.argmax(target, dim=1)
            # 打印结果
            # print(y_pred,y_true)
        print(i/epoch)
        net.eval()
        # for feature, target in yloader:
        #     pred, _ = net(feature, hidden_cell)
        #     pred = torch.argmax(pred, dim=1)
        #     true = torch.argmax(target, dim=1)

    # 最后的模型 混淆矩阵
        for feature, target in yloader:
            pred = net(feature)
            pred = torch.argmax(pred, dim=1)
            true = torch.argmax(target, dim=1)
            # testloss =
            confusion.update(pred.cpu().numpy(), true.cpu().numpy())
        stracc = confusion.summary()
        result.append(stracc)
    confusion.plot()
    return result


def train_rnn(xloader, yloader, net, epoch, device):
    tmp = []
    hidden_cell = torch.zeros(1, 1, 10).to(device)
    optimizer = torch.optim.Adam(net.parameters(), lr=0.001)  # 优化器
    loss_function = nn.BCELoss()  # loss
    for i in range(epoch):
        confusion = ConfusionMatrix(num_classes=classes, labels=labels)
        net.train()
        for feature, target in xloader:
            optimizer.zero_grad()
            outputs, hidden_cell = net(feature, hidden_cell)
            single_loss = loss_function(outputs, target)
            single_loss.backward()
            optimizer.step()

            hidden_cell = hidden_cell.detach()
            # print(outputs.shape)
            # outputs = nn.Softmax(outputs)
            # y_pred = torch.argmax(outputs, dim=1)
            # y_true = torch.argmax(target, dim=1)
            # 打印结果
        print( i / epoch)

        net.eval()
        for feature, target in yloader:
            pred, _ = net(feature, hidden_cell)
            pred = torch.argmax(pred, dim=1)
            true = torch.argmax(target, dim=1)
            # testloss =
            confusion.update(pred.cpu().numpy(), true.cpu().numpy())
        stracc = confusion.summary()
        print(stracc)
        tmp.append(float(stracc))
    confusion.plot()
    return tmp



def train_lstm(xloader, yloader, net, epoch, device):
    tmp = []
    hidden_cell = (torch.zeros(1, 1, 10).to(device),  # shape: (n_layers, batch, hidden_size)
                   torch.zeros(1, 1, 10).to(device))
    optimizer = torch.optim.Adam(net.parameters(), lr=0.001)  # 优化器
    loss_function = nn.BCELoss()  # loss
    for i in range(epoch):
        confusion = ConfusionMatrix(num_classes=classes, labels=labels)
        net.train()
        for feature, target in xloader:
            optimizer.zero_grad()
            outputs, hidden_cell = net(feature, hidden_cell)
            single_loss = loss_function(outputs, target)
            single_loss.backward()
            optimizer.step()
            #
            # y_pred = torch.argmax(outputs, dim=1)
            # y_true = torch.argmax(target, dim=1)
            # print(sum(y_pred.data == y_true.data) / batchsize)
        print(i/epoch)
    # 最后的模型 混淆矩阵
        for feature, target in yloader:
            pred, _ = net(feature, hidden_cell)
            pred = torch.argmax(pred, dim=1)
            true = torch.argmax(target, dim=1)
            # testloss =
            # print(pred.shape, true.shape)
            confusion.update(pred.cpu().numpy(), true.cpu().numpy())
        stracc = confusion.summary()
        print(stracc)
        tmp.append(float(stracc))
    confusion.plot()
    return tmp

def train_MODLSTM(xloader, yloader, net1, net2, epoch, device):
    tmp = []
    hidden_cell = (torch.zeros(1, 1, 10).to(device),  # shape: (n_layers, batch, hidden_size)
                   torch.zeros(1, 1, 10).to(device))
    optimizer = torch.optim.Adam(net2.parameters(), lr=0.001)  # 优化器
    loss_function = nn.BCELoss()  # loss
    lossae = nn.MSELoss()
    optae = torch.optim.Adam(net1.parameters(), lr=0.001)  # ae优化器
    # AE的配置
    for i in range(epoch):
        confusion = ConfusionMatrix(num_classes=classes, labels= [l for _, l in class_indict.items()])
        print(i/epoch)
        net1.train()
        for seq, labels in xloader:
            optae.zero_grad()
            code, decoded = net1(seq)
            loss = lossae(decoded, seq)
            # print(code.shape)
            # print(seq.shape)
            loss.backward()
            optae.step()
        net1.eval()
        net2.train()
        # LSTM
        for feature, target in xloader:
            optimizer.zero_grad()
            re_feature, _ = net1(feature)
            outputs, hidden_cell = net2(re_feature, hidden_cell)
            single_loss = loss_function(outputs, target)
            single_loss.backward()
            optimizer.step()
            y_pred = torch.argmax(outputs, dim=1)
            y_true = torch.argmax(target, dim=1)
            # print(sum(y_pred.data == y_true.data) / batchsize)
        net2.eval()
        # for feature, target in yloader:
        #     re_feature, _ = net1(feature)
        #     outputs, _ = net2(re_feature, hidden_cell)
        #
        #     pred = torch.argmax(outputs, dim=1)
        #     true = torch.argmax(target, dim=1)
    # 最后的模型 混淆矩阵
        for feature, target in yloader:
            re_feature, _ = net1(feature)
            pred, _ = net2(re_feature, hidden_cell)
            pred = torch.argmax(pred, dim=1)
            true = torch.argmax(target, dim=1)
            # testloss =
            confusion.update(pred.cpu().numpy(), true.cpu().numpy())
        stracc = confusion.summary()
        tmp.append(float(stracc))
    confusion.plot()
    return tmp


def train_MODRNN(xloader, yloader, net1, net2, epoch, device):
    tmp = []
    hidden_cell = torch.zeros(1, 1, 10).to(device)
    optimizer = torch.optim.Adam(net2.parameters(), lr=0.001)  # 优化器
    loss_function = nn.BCELoss()  # loss
    lossae = nn.MSELoss()
    optae = torch.optim.Adam(net1.parameters(), lr=0.001)  # ae优化器

    for i in range(epoch):
        print(i/epoch)
        confusion = ConfusionMatrix(num_classes=classes, labels=[l for _, l in class_indict.items()])
        # AE的配置
        net1.train()
        # 开始训练

        for seq, labels in xloader:
            optae.zero_grad()
            code, decoded = net1(seq)
            loss = lossae(decoded, seq)
            print(code.shape)
            # print(seq.shape)
            loss.backward()
            optae.step()
        net1.eval()
        # RNN
        net2.train()
        for feature, target in xloader:
            optimizer.zero_grad()
            re_feature, _ = net1(feature)

            outputs, hidden_cell = net2(re_feature, hidden_cell)
            single_loss = loss_function(outputs, target)
            single_loss.backward()
            optimizer.step()
            hidden_cell = hidden_cell.detach()

        net2.eval()
    # 最后的模型 混淆矩阵
        for feature, target in yloader:
            re_feature, _ = net1(feature)
            pred, _ = net2(re_feature, hidden_cell)
            pred = torch.argmax(pred, dim=1)
            true = torch.argmax(target, dim=1)
            # testloss =
            confusion.update(pred.cpu().numpy(), true.cpu().numpy())
        stracc = confusion.summary()
        tmp.append(float(stracc))
    confusion.plot()
    return tmp
    # ## 读取模型
    # model = torch.load('model_name.pth')



def train_dnn(xloader, yloader, net, epoch, device):
    tmp = []
    optimizer = torch.optim.Adam(net.parameters(), lr=0.001)  # 优化器
    loss_function = nn.BCELoss()  # loss
    for i in range(epoch):
        confusion = ConfusionMatrix(num_classes=classes, labels=labels)
        net.train()
        for feature, target in xloader:
            optimizer.zero_grad()
            outputs = net(feature)
            single_loss = loss_function(outputs, target)
            single_loss.backward()
            optimizer.step()
            # print(outputs.shape)
            # outputs = nn.Softmax(outputs)
            # y_pred = torch.argmax(outputs, dim=1)
            # y_true = torch.argmax(target, dim=1)
            # 打印结果
        print( i / epoch)

        net.eval()
        for feature, target in yloader:
            pred = net(feature)
            pred = torch.argmax(pred, dim=1)
            true = torch.argmax(target, dim=1)
            # testloss =
            confusion.update(pred.cpu().numpy(), true.cpu().numpy())
        stracc = confusion.summary()
        print(stracc)
        tmp.append(float(stracc))
    confusion.plot()
    return tmp


#ML
def RF():
    x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3)
    clf = RandomForestClassifier(n_estimators=5, max_depth=4, min_samples_split=3)
    clf = clf.fit(x_train, y_train)
    y_pred = clf.predict(x_test)
    recall = metrics.recall_score(y_test, y_pred, average='macro')
    print("overall recall: %f" % (recall))
    ov_acc = metrics.accuracy_score(y_pred, y_test)
    print("overall accuracy: %f" % (ov_acc))
    precision_for_each_class = metrics.precision_score(y_test, y_pred, average=None)
    print("acc_for_each_class:\n", precision_for_each_class)
    avg_pre = np.mean(precision_for_each_class)
    print("average precision:%f" % (avg_pre))

def DT():
    x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3)
    clf = tree.DecisionTreeClassifier()  # 实例化模型对象
    clf = clf.fit(x_train, y_train)  # 用训练集数据训练模型
    y_pred = clf.predict(x_test)
    recall = metrics.recall_score(y_test, y_pred, average='macro')
    print("overall recall: %f" % (recall))
    ov_acc = metrics.accuracy_score(y_pred, y_test)
    print("overall accuracy: %f" % (ov_acc))
    precision_for_each_class = metrics.precision_score(y_test, y_pred, average=None)
    print("acc_for_each_class:\n", precision_for_each_class)
    avg_pre = np.mean(precision_for_each_class)
    print("average precision:%f" % (avg_pre))

def SVM():
    y1 = option2svmy(y)
    x_train, x_test, y_train, y_test = train_test_split(x, y1, test_size=0.3)
    clf = svm.SVC()
    clf = clf.fit(x_train, y_train)  # 用训练集数据训练模型
    y_pred = clf.predict(x_test)
    recall = metrics.recall_score(y_test, y_pred, average='macro')
    print("overall recall: %f" % (recall))
    ov_acc = metrics.accuracy_score(y_pred, y_test)
    print("overall accuracy: %f" % (ov_acc))
    precision_for_each_class = metrics.precision_score(y_test, y_pred, average=None)
    print("acc_for_each_class:\n", precision_for_each_class)
    avg_pre = np.mean(precision_for_each_class)
    print("average precision:%f" % (avg_pre))

def KNN():
    clf = KNeighborsClassifier(n_neighbors=classes, metric="minkowski")
    x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3)
    clf = clf.fit(x_train, y_train)  # 用训练集数据训练模型
    y_pred = clf.predict(x_test)
    recall = metrics.recall_score(y_test,y_pred,average='macro')
    print("overall recall: %f" % (recall))
    ov_acc = metrics.accuracy_score(y_pred, y_test)
    print("overall accuracy: %f" % (ov_acc))
    precision_for_each_class = metrics.precision_score(y_test, y_pred, average=None)
    print("acc_for_each_class:\n", precision_for_each_class)
    avg_pre = np.mean(precision_for_each_class)
    print("average precision:%f" % (avg_pre))

def chooseModle(n,feature_dim,device):
    if n==1:
        res = train_cnn(trainloader, testloader, MyCNN().to(device), 100, device)
    elif n==2:
        res = train_rnn(trainloader, testloader, MYRNN(feature_dim).to(device), 100, device)
    elif n==3:
        res = train_lstm(trainloader, testloader, MYLSTM(feature_dim).to(device), 200, device)
    elif n==4:
        res = train_MODLSTM(trainloader, testloader,
            # AELinear(initial_dimension=59,inner_coder=inner_coder),
            AECNN(inner_coder).to(device),
            MYLSTM(input_size=inner_coder).to(device),
            100, device)
    elif n==5:
        res = train_MODRNN(trainloader, testloader,
                      # AELinear(initial_dimension=59,inner_coder=inner_coder),
                      AECNN(inner_coder).to(device),
                      MYRNN(input_size=inner_coder).to(device),
                      1000, device)
    elif n==6:
        print("KNN:")
        KNN()
        print("SVM:")
        SVM()
        print("RF:")
        RF()
        res = None


    elif n==7:
        res = train_dnn(trainloader,testloader,MyDNN(feature_dim).to(device),500,device)
    return res



if __name__ == '__main__':
    # 1:cnn 2:rnn 3:lstm 4:modlstm 5:modrnn 6:RF
    #运行1之前要记得改batch数目为1
    result = chooseModle(3,60,device)
    # print(result)
    # np.save('./result/cic/binary/lstm.npy',result)