
import dgl.nn as dglnn
from dgl import from_networkx
import torch.nn as nn
import torch as th
import torch.nn.functional as F
import dgl.function as fn
import networkx as nx
import pandas as pd
import socket
import struct
import random
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
import category_encoders as ce
from sklearn.decomposition import PCA
# import seaborn as sns
import matplotlib.pyplot as plt
from sympy.physics.control.control_plots import matplotlib


def main():
    data = pd.read_csv('./ton_iot.csv')
    # 计算type列中每个唯一值的数量。value_counts()函数返回一个序列，其中索引是唯一值，值是每个唯一值的数量
    data.type.value_counts()

    # df = data.drop(data[(data.type == 'mitm') & (data.type > 'ransomware')].index)

    # mitm = data[data['type'] == 'mitm']
    # ransomware = data[data['type'] == 'ransomware']

    #从data DataFrame中随机选择10%的样本（由frac=0.1指定）
    # data = data.sample(frac=0.1, random_state=123)

    # data = pd.concat([df,mitm,ransomware], axis=0, ignore_index=True)

    # data

    # 将src_ip列的每个值替换为一个随机生成的IP地址。
    data['src_ip'] = data.src_ip.apply(
        lambda x: socket.inet_ntoa(struct.pack('>I', random.randint(0xac100001, 0xac1f0001))))
    # 将src_ip、src_port、dst_ip和dst_port列的值转换为字符串类型。
    data['src_ip'] = data.src_ip.apply(str)
    data['src_port'] = data.src_port.apply(str)
    data['dst_ip'] = data.dst_ip.apply(str)
    data['dst_port'] = data.dst_port.apply(str)
    # 将源IP和端口号、目标IP和端口号合并为一个字符串，格式为ip:port
    data['src_ip'] = data['src_ip'] + ':' + data['src_port']
    data['dst_ip'] = data['dst_ip'] + ':' + data['dst_port']

    data.type.value_counts()
    # 返回http_trans_depth列中所有唯一值的列表。
    data['http_trans_depth'].unique()
    # 删除了数据集中的多个列。
    data.drop(columns=['src_port', 'dst_port', 'http_uri', 'weird_name', 'weird_addl', 'weird_notice', 'dns_query',
                       'ssl_subject', 'ssl_issuer', 'http_user_agent', 'type'], inplace=True)
    #
    # le = LabelEncoder()
    # le.fit(data.label.values)
    # data['label'] = le.transform(data['label'])

    # 将label列的值转换为整数类型。
    data['label'] = data.label.apply(int)

    label = data.label

    data.drop(columns=['label'], inplace=True)

    # 建了一个StandardScaler对象，用于后续的特征缩放。
    scaler = StandardScaler()
    # 将label列添加回数据集
    data = pd.concat([data, label], axis=1)
    # 将数据集划分为训练集和测试集，测试集的大小为数据集的30%，
    # 并且划分是分层的，即在训练集和测试集中label的分布与原数据集中的分布相同。
    X_train, X_test, y_train, y_test = train_test_split(
        data, label, test_size=0.3, random_state=123, stratify=label)

    # X_train

    # 创建了一个目标编码器，用于将分类特征转换为连续特征。
    encoder = ce.TargetEncoder(
        cols=['proto', 'service', 'conn_state', 'dns_qclass', 'dns_qtype', 'dns_rcode', 'dns_AA', 'dns_RD', 'dns_RA',
              'dns_rejected', 'ssl_version', 'ssl_cipher', 'ssl_resumed', 'ssl_established', 'http_method', 'http_version',
              'http_status_code', 'http_orig_mime_types', 'http_resp_mime_types', 'http_trans_depth'])
    # 训练目标编码器并将其应用于训练数据。
    encoder.fit(X_train, y_train)
    X_train = encoder.transform(X_train)
    # 获取了除’label’列之外的所有列的名称。
    cols_to_norm = list(set(list(X_train.iloc[:, 2:].columns)) - set(list(['label'])))
    # 使用之前创建的StandardScaler对象对指定的列进行标准化处理。
    X_train[cols_to_norm] = scaler.fit_transform(X_train[cols_to_norm])
    # 将标准化后的特征值转换为列表，并存储在新的列’h’中。
    X_train['h'] = X_train[cols_to_norm].values.tolist()

    # X_train

    # 使用NetworkX库从数据框创建一个多图，其中源节点和目标节点分别由’src_ip’和’dst_ip’列指定，边的属性由’h’和’label’列指定。
    G = nx.from_pandas_edgelist(X_train, "src_ip", "dst_ip", ['h', 'label'], create_using=nx.MultiGraph())
    # 将多图转换为有向图
    G = G.to_directed()
    # 将NetworkX图转换为DGL图，以便进行图神经网络的训练。
    G = from_networkx(G, edge_attrs=['h', 'label'])
    # 为图中的每个节点分配一个特征向量，向量的长度与边的特征向量长度相同，所有元素的值都为1。
    G.ndata['h'] = th.ones(G.num_nodes(), G.edata['h'].shape[1])
    # 将节点和边的特征向量重塑为三维张量，以便进行后续的图神经网络计算。
    G.ndata['h'] = th.reshape(G.ndata['h'], (G.ndata['h'].shape[0], 1, G.ndata['h'].shape[1]))
    G.edata['h'] = th.reshape(G.edata['h'], (G.edata['h'].shape[0], 1, G.edata['h'].shape[1]))
    # 创建了一个长度为len(G.edata['h'])（即边的数量）的张量,为图中的每条边分配一个训练掩码.
    G.edata['train_mask'] = th.ones(len(G.edata['h']), dtype=th.bool)


    # 主模型类，它包含一个图神经网络（SAGE）和一个多层感知机预测器（MLPPredictor）。
    # 在前向传播函数forward中，它首先通过图神经网络处理图和特征，然后将处理后的特征传递给预测器。
    class Model(nn.Module):
        def __init__(self, ndim_in, ndim_out, edim, activation, dropout):
            super().__init__()
            self.gnn = SAGE(ndim_in, ndim_out, edim, activation, dropout)
            self.pred = MLPPredictor(ndim_out, 2)

        def forward(self, g, nfeats, efeats):
            h = self.gnn(g, nfeats, efeats)
            return self.pred(g, h)


    # 实现了GraphSAGE算法的图神经网络层.
    # 在前向传播函数forward中，它首先使用消息传递函数message_func来计算每个边的消息，
    # 然后使用update_all函数来聚合邻居节点的消息，并将聚合的消息与原始节点特征连接起来，最后通过一个线性变换和ReLU激活函数。
    class SAGELayer(nn.Module):
        def __init__(self, ndim_in, edims, ndim_out, activation):
            super(SAGELayer, self).__init__()
            ### force to outut fix dimensions
            self.W_msg = nn.Linear(ndim_in + edims, ndim_out)
            ### apply weight
            self.W_apply = nn.Linear(ndim_in + ndim_out, ndim_out)
            self.activation = activation

        def message_func(self, edges):
            return {'m': self.W_msg(th.cat([edges.src['h'], edges.data['h']], 2))}

        def forward(self, g_dgl, nfeats, efeats):
            with g_dgl.local_scope():
                g = g_dgl
                g.ndata['h'] = nfeats
                g.edata['h'] = efeats
                # Eq4
                g.update_all(self.message_func, fn.mean('m', 'h_neigh'))
                # Eq5
                g.ndata['h'] = F.relu(self.W_apply(th.cat([g.ndata['h'], g.ndata['h_neigh']], 2)))
                return g.ndata['h']

    # 包含多个SAGELayer的图神经网络。在前向传播函数forward中，它依次通过每个图神经网络层处理图和特征，然后将所有层的输出相加。
    class SAGE(nn.Module):
        def __init__(self, ndim_in, ndim_out, edim, activation, dropout):
            super(SAGE, self).__init__()
            self.layers = nn.ModuleList()
            self.layers.append(SAGELayer(ndim_in, edim, 128, activation))
            self.layers.append(SAGELayer(128, edim, ndim_out, activation))
            self.dropout = nn.Dropout(p=dropout)

        def forward(self, g, nfeats, efeats):
            for i, layer in enumerate(self.layers):
                if i != 0:
                    nfeats = self.dropout(nfeats)
                nfeats = layer(g, nfeats, efeats)
            return nfeats.sum(1)


    # 多层感知机预测器，用于根据图神经网络的输出预测边的标签。
    # 在前向传播函数forward中，它首先使用apply_edges函数来计算每个边的得分，然后返回所有边的得分。
    class MLPPredictor(nn.Module):
        def __init__(self, in_features, out_classes):
            super().__init__()
            self.W = nn.Linear(in_features * 2, out_classes)

        def apply_edges(self, edges):
            h_u = edges.src['h']
            h_v = edges.dst['h']
            score = self.W(th.cat([h_u, h_v], 1))
            return {'score': score}

        def forward(self, graph, h):
            with graph.local_scope():
                graph.ndata['h'] = h
                graph.apply_edges(self.apply_edges)
                return graph.edata['score']


    # 将图G及其所有的节点和边数据移动到GPU设备上。'cuda:0'表示第一个GPU设备。
    G = G.to('cuda:0')
    # 返回图G当前所在的设备
    # G.device
    #
    # G.ndata['h'].device
    #
    # G.edata['h'].device


    from sklearn.utils import class_weight
    import numpy as np
    # 计算类别权重，这在处理类别不平衡的数据集时非常有用。
    class_weights = class_weight.compute_class_weight('balanced',
                                                      classes=np.unique(G.edata['label'].cpu().numpy()),
                                                      y=G.edata['label'].cpu().numpy())
    # %%
    # class_weights = [10,1]
    # 返回一个与唯一类别标签数组相同长度的数组，表示每个类别的权重。
    class_weights = th.FloatTensor(class_weights).cuda()
    # 定义了一个考虑了类别不平衡的交叉熵损失函数
    criterion = nn.CrossEntropyLoss(weight=class_weights)


    # 计算模型预测的准确率。
    def compute_accuracy(pred, labels):
        return (pred.argmax(1) == labels).float().mean().item()


    #
    import os

    # %%
    node_features = G.ndata['h']
    edge_features = G.edata['h']

    edge_label = G.edata['label']
    train_mask = G.edata['train_mask']

    # 创建了一个图神经网络模型，并将其移动到GPU设备上。
    model = Model(G.ndata['h'].shape[2], 128, G.ndata['h'].shape[2], F.relu, 0.2).cuda()
    # 创建了一个Adam优化器，用于更新模型的参数。
    opt = th.optim.Adam(model.parameters())

    for epoch in range(1, 1000):
        pred = model(G, node_features, edge_features).cuda()
        loss = criterion(pred[train_mask], edge_label[train_mask])
        opt.zero_grad()
        loss.backward()
        opt.step()
        if epoch % 100 == 0:
            print('Epoch:', epoch, ' Training acc:', compute_accuracy(pred[train_mask], edge_label[train_mask]))
    # %%
    X_test = encoder.transform(X_test)
    # %%
    X_test[cols_to_norm] = scaler.transform(X_test[cols_to_norm])
    # %%
    X_test
    # %%
    X_test['h'] = X_test[cols_to_norm].values.tolist()
    # %%
    G_test = nx.from_pandas_edgelist(X_test, "src_ip", "dst_ip", ['h', 'label'], create_using=nx.MultiGraph())
    G_test = G_test.to_directed()
    G_test = from_networkx(G_test, edge_attrs=['h', 'label'])
    actual = G_test.edata.pop('label')
    G_test.ndata['feature'] = th.ones(G_test.num_nodes(), G.ndata['h'].shape[2])
    # %%
    G_test.ndata['feature'] = th.reshape(G_test.ndata['feature'],
                                         (G_test.ndata['feature'].shape[0], 1, G_test.ndata['feature'].shape[1]))
    G_test.edata['h'] = th.reshape(G_test.edata['h'], (G_test.edata['h'].shape[0], 1, G_test.edata['h'].shape[1]))
    # %%
    G_test = G_test.to('cuda:0')
    # %%
    import timeit

    start_time = timeit.default_timer()
    node_features_test = G_test.ndata['feature']
    edge_features_test = G_test.edata['h']
    test_pred = model(G_test, node_features_test, edge_features_test).cuda()
    elapsed = timeit.default_timer() - start_time
    # %%
    print(str(elapsed) + ' seconds')
    # %%
    test_pred = test_pred.argmax(1)
    test_pred = th.Tensor.cpu(test_pred).detach().numpy()

    # %%
    actual = ["Normal" if i == 0 else "Attack" for i in actual]
    test_pred = ["Normal" if i == 0 else "Attack" for i in test_pred]
    # %%
    #from sklearn.metrics import plot_confusion_matrix

    # %%
    import numpy as np


    def plot_confusion_matrix(cm,
                              target_names,
                              title='Confusion matrix',
                              cmap=None,
                              normalize=True):
        import matplotlib.pyplot as plt
        import numpy as np
        import itertools
        matplotlib.use('Agg')
        accuracy = np.trace(cm) / float(np.sum(cm))
        misclass = 1 - accuracy

        if cmap is None:
            cmap = plt.get_cmap('Blues')

        plt.figure(figsize=(12, 12))
        plt.imshow(cm, interpolation='nearest', cmap=cmap)
        plt.title(title)
        plt.colorbar()

        if target_names is not None:
            tick_marks = np.arange(len(target_names))
            plt.xticks(tick_marks, target_names, rotation=45)
            plt.yticks(tick_marks, target_names)

        if normalize:
            cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]

        thresh = cm.max() / 1.5 if normalize else cm.max() / 2
        for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
            if normalize:
                plt.text(j, i, "{:0.4f}".format(cm[i, j]),
                         horizontalalignment="center",
                         color="white" if cm[i, j] > thresh else "black")
            else:
                plt.text(j, i, "{:,}".format(cm[i, j]),
                         horizontalalignment="center",
                         color="white" if cm[i, j] > thresh else "black")

        plt.tight_layout()
        plt.ylabel('True label')
        plt.xlabel('Predicted label\naccuracy={:0.4f}; misclass={:0.4f}'.format(accuracy, misclass))
        plt.show()


    # %%
    from sklearn.metrics import confusion_matrix

    plot_confusion_matrix(cm=confusion_matrix(actual, test_pred),
                          normalize=False,
                          target_names=np.unique(actual),
                          title="Confusion Matrix")
    # %%
    from sklearn.metrics import classification_report

    print(classification_report(actual, test_pred, digits=4))
    # %%
    th.save(model.state_dict(), 'ton-iot.binary.pt')
    # %%
    from sklearn.metrics import classification_report

    print(classification_report(actual, test_pred, digits=2))
    # %%
    report = classification_report(actual, test_pred, digits=4, output_dict=True)
    # %%
    df = pd.DataFrame(report).transpose()
    # %%
    df.to_csv('unsw_ton_iot_agg_report.csv')
    # %%
    df
    # %%
    th.save(model.state_dict(), 'unsw_ton_iot_agg_mul.pt')


if __name__ == "__main__":
    main()
