{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "ranking-naples",
   "metadata": {},
   "outputs": [],
   "source": [
    "from sklearn.utils import shuffle\n",
    "import torch\n",
    "from torch import nn\n",
    "import random\n",
    "import time\n",
    "from collections import Counter\n",
    "from sklearn.preprocessing import StandardScaler\n",
    "from sklearn.model_selection import train_test_split\n",
    "from sklearn.metrics import confusion_matrix\n",
    "from sklearn.metrics import accuracy_score\n",
    "import torchtext.vocab as Vocab\n",
    "import torch.utils.data as Data\n",
    "import hiddenlayer as hl\n",
    "from visdom import Visdom\n",
    "import numpy as np"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "pointed-identifier",
   "metadata": {},
   "outputs": [],
   "source": [
    "device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "id": "mineral-ultimate",
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_data():\n",
    "    X = []\n",
    "    y = []\n",
    "    with open(\"../data/whiteopcode.txt\") as f:\n",
    "        opcodes = f.readlines()\n",
    "        for line in opcodes:\n",
    "            X.append(line.split())\n",
    "            y.append(1)\n",
    "    with open(\"../data/blackopcode.txt\") as f:\n",
    "        opcodes = f.readlines()\n",
    "        for line in opcodes:\n",
    "            X.append(line.split())\n",
    "            y.append(0)\n",
    "    return X, y"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "direct-instrumentation",
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_vocab():\n",
    "    with open(\"../data/all_php.txt\") as f:\n",
    "        data = f.read().split()\n",
    "    counter = Counter(data)\n",
    "    return Vocab.Vocab(counter)\n",
    "\n",
    "def get_dict() -> dict:\n",
    "    with open(\"../data/all_php.txt\") as f:\n",
    "        data = f.read().split()\n",
    "    d = dict(Counter(data))\n",
    "    for i, item in enumerate(d.items()):\n",
    "        d[item[0]] = i\n",
    "    return d\n",
    "\n",
    "def preprocess(X, y, vocab):\n",
    "    max_l = 1000  # 将opcode序列进行截断，使得长度变为6000\n",
    "    def pad(x):\n",
    "        return x[:max_l] if len(x) > max_l else x + [0] * (max_l - len(x))\n",
    "    features = torch.tensor([pad([vocab.stoi[word] for word in words]) for words in X])\n",
    "    # features = torch.tensor([pad([vocab[word] for word in words]) for words in X])\n",
    "    labels = torch.tensor(y)\n",
    "    return features, labels"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "id": "flying-parameter",
   "metadata": {},
   "outputs": [],
   "source": [
    "def evaluate_accuracy(data_iter, net, device=None):\n",
    "    if device is None and isinstance(net, torch.nn.Module):\n",
    "        # 如果没指定device就使用net的device\n",
    "        device = list(net.parameters())[0].device\n",
    "    acc_sum, n = 0.0, 0\n",
    "    with torch.no_grad():\n",
    "        for X, y in data_iter:\n",
    "            if isinstance(net, torch.nn.Module):\n",
    "                net.eval()  # 评估模式, 这会关闭dropout\n",
    "                acc_sum += (net(X.to(device)).argmax(dim=1) == y.to(device)).float().sum().cpu().item()\n",
    "                net.train()  # 改回训练模式\n",
    "            else:  # 自定义的模型, 3.13节之后不会用到, 不考虑GPU\n",
    "                if 'is_training' in net.__code__.co_varnames:  # 如果有is_training这个参数\n",
    "                    # 将is_training设置成False\n",
    "                    acc_sum += (net(X, is_training=False).argmax(dim=1) == y).float().sum().item()\n",
    "                else:\n",
    "                    acc_sum += (net(X).argmax(dim=1) == y).float().sum().item()\n",
    "            n += y.shape[0]\n",
    "    return acc_sum / n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "id": "vocational-marathon",
   "metadata": {},
   "outputs": [],
   "source": [
    "def train(train_iter, test_iter, net, loss, optimizer, device, num_epochs):\n",
    "    # canvas = hl.Canvas()\n",
    "    # history = hl.History()\n",
    "    # viz = Visdom()\n",
    "    # viz.line([[0.,0.]], [0], win='train', opts=dict(title='loss&acc', legend=['train_loss', 'test_acc']))\n",
    "\n",
    "    net = net.to(device)\n",
    "    print(\"training on \", device)\n",
    "    batch_count = 0\n",
    "    for epoch in range(num_epochs):\n",
    "        train_l_sum, train_acc_sum, n, start = 0.0, 0.0, 0, time.time()\n",
    "        step = 0\n",
    "        # for X, y in train_iter:\n",
    "        for X, y in train_iter:\n",
    "            X = X.to(device)\n",
    "            y = y.to(device)\n",
    "            y_hat = net(X)\n",
    "            l = loss(y_hat, y)\n",
    "            optimizer.zero_grad()\n",
    "            l.backward()\n",
    "            optimizer.step()\n",
    "            train_l_sum += l.cpu().item()\n",
    "            train_acc_sum += (y_hat.argmax(dim=1) == y).sum().cpu().item()\n",
    "            n += y.shape[0]\n",
    "            batch_count += 1\n",
    "            # if batch_count % 30 == 0:\n",
    "            #     acc = evaluate_accuracy(test_iter, net)\n",
    "            # # acc = accuracy_score(next(iter(test_iter))[1].detach().numpy(), y_hat.cpu().detech().numpy())\n",
    "            # #     acc = (y_hat.argmax(dim=1) == next(iter(test_iter))[1]).float().sum().item() / y.shape[0]\n",
    "            #     viz.line([[train_l_sum / batch_count, acc]], [batch_count], win='train', update='append')\n",
    "            # time.sleep(0.1)\n",
    "\n",
    "        test_acc = evaluate_accuracy(test_iter, net)\n",
    "        print('epoch %d, loss %.4f, train acc %.3f, test acc %.3f, time %.1f sec'\n",
    "              % (epoch + 1, train_l_sum / batch_count, train_acc_sum / n, test_acc, time.time() - start))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "id": "behind-chaos",
   "metadata": {},
   "outputs": [],
   "source": [
    "class BiRNN(nn.Module):\n",
    "    def __init__(self, vocab, embedding_size, n_hiddens, n_layers):\n",
    "        super(BiRNN, self).__init__()\n",
    "        self.embedding = nn.Embedding(len(vocab), embedding_size)\n",
    "        # 双向循环神经网络\n",
    "        self.encoder = nn.LSTM(input_size=embedding_size,\n",
    "                               hidden_size=n_hiddens,\n",
    "                               num_layers=n_layers,\n",
    "                               bidirectional=True)\n",
    "        # 初始时间步和最终时间步的隐藏状态作为全连接层输入\n",
    "        self.decoder = nn.Linear(4 * n_hiddens, 2)\n",
    "\n",
    "    def forward(self, inputs):\n",
    "        # inputs的形状是(批量大小, 词数)，因为LSTM需要将序列长度(seq_len)作为第一维，所以将输入转置后\n",
    "        # 再提取词特征，输出形状为(词数, 批量大小, 词向量维度)\n",
    "        embeddings = self.embedding(inputs.permute(1, 0))\n",
    "        # rnn.LSTM只传入输入embeddings，因此只返回最后一层的隐藏层在各时间步的隐藏状态。\n",
    "        # outputs形状是(词数, 批量大小, 2 * 隐藏单元个数)\n",
    "        outputs, _ = self.encoder(embeddings)  # output, (h, c)\n",
    "        # 连结初始时间步和最终时间步的隐藏状态作为全连接层输入。它的形状为\n",
    "        # (批量大小, 4 * 隐藏单元个数)。\n",
    "        encoding = torch.cat((outputs[0], outputs[-1]), -1)\n",
    "        outs = self.decoder(encoding)\n",
    "        return outs"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 54,
   "id": "combined-honor",
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_confusion_matrix(modelpath, y_test):\n",
    "    net = torch.load(modelpath)\n",
    "    y_pred = []\n",
    "    for X, y in test_iter:\n",
    "        X = X.to(device)\n",
    "        y = y.to(device)\n",
    "        y_hat = net(X)\n",
    "        y_pred.append(y_hat.argmax(dim=1).tolist())\n",
    "#         print(y_hat.argmax(dim=1).tolist())\n",
    "    y_pred = np.array(y_pred).reshape(-1)\n",
    "    y_test = np.array(y_test)\n",
    "    matrix = confusion_matrix(y_test, y_pred)\n",
    "    print(matrix)\n",
    "    TP = matrix[0][0]\n",
    "    FN = matrix[0][1]\n",
    "    FP = matrix[1][0]\n",
    "    TN = matrix[1][1]\n",
    "    FPR = FP / (FP + TN)\n",
    "    ACC = (TP + TN) / len(y_test)\n",
    "    PRE = TP / (TP + FN)\n",
    "    REC = TP / (TP + FN)\n",
    "    F1 = 2 * PRE * REC / (PRE + REC)\n",
    "    print(\"误报率：%.4f，准确率：%.4f，精确率：%.4f，召回率：%.4f，F1值：%.4f\" % (FPR, ACC, PRE, REC, F1))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 60,
   "id": "composite-elite",
   "metadata": {},
   "outputs": [],
   "source": [
    "data, labels = get_data()\n",
    "vocab = get_vocab()\n",
    "features, labels = preprocess(data, labels, vocab)\n",
    "X_train, X_test, y_train, y_test = train_test_split(features, labels, test_size=0.2, random_state=1)\n",
    "X_test = X_test[:640]\n",
    "y_test = y_test[:640]\n",
    "\n",
    "# batch_size = 4\n",
    "batch_size = 32\n",
    "print_step = 20\n",
    "train_iter = Data.DataLoader(Data.TensorDataset(X_train, y_train), batch_size, shuffle=True)\n",
    "test_iter = Data.DataLoader(Data.TensorDataset(X_test, y_test), batch_size)\n",
    "\n",
    "embedding_size, n_hiddens, n_layers = 128, 128, 2\n",
    "net = BiRNN(vocab, embedding_size, n_hiddens, n_layers)\n",
    "\n",
    "lr, n_epochs = 0.01, 3\n",
    "optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, net.parameters()), lr=lr)\n",
    "loss = nn.CrossEntropyLoss()\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 62,
   "id": "electric-revolution",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[[286   1]\n",
      " [ 13 340]]\n",
      "误报率：0.0368，准确率：0.9781，精确率：0.9965，召回率：0.9965，F1值：0.9965\n"
     ]
    }
   ],
   "source": [
    "get_confusion_matrix('model.pkl', y_test[:640])\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 52,
   "id": "radio-relation",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "640\n"
     ]
    }
   ],
   "source": [
    "print(len(y_test[:640]))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 47,
   "id": "cultural-township",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "20.3125"
      ]
     },
     "execution_count": 47,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "650 / 32"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 56,
   "id": "united-agenda",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "20.0"
      ]
     },
     "execution_count": 56,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "640 /32"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "fifty-dryer",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python [conda env:.conda-my_torch] *",
   "language": "python",
   "name": "conda-env-.conda-my_torch-py"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.10"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
