{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [],
   "source": [
    "# coding:utf-8\n",
    "import warnings\n",
    "warnings.filterwarnings('ignore')\n",
    "import os\n",
    "os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\n",
    "\n",
    "import json\n",
    "#j = json.loads('{\"one\" : \"1\", \"two\" : \"2\", \"three\" : \"3\"}')\n",
    "import time\n",
    "\n",
    "from tldextract import TLDExtract\n",
    "extract = TLDExtract(suffix_list_urls=None)\n",
    "\n",
    "from keras.models import load_model\n",
    "from keras import regularizers\n",
    "from keras import optimizers\n",
    "from keras.models import Sequential\n",
    "\n",
    "from keras.layers.core import Dense, Dropout, Activation\n",
    "from keras.layers.embeddings import Embedding\n",
    "from keras.layers.recurrent import LSTM, GRU\n",
    "from keras.layers import Bidirectional, Conv1D, MaxPool1D, Flatten\n",
    "\n",
    "from sklearn.cross_validation import train_test_split\n",
    "\n",
    "from keras.utils import to_categorical\n",
    "\n",
    "import numpy as np\n",
    "import pickle"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {
    "scrolled": false
   },
   "outputs": [],
   "source": [
    "SRC_IP_IDX = 3-1               \n",
    "DST_IP_IDX = 4-1               \n",
    "SRC_PORT_IDX = 5-1             \n",
    "DST_PORT_IDX = 6-1             \n",
    "PROTOCOL_IDX = 7-1             \n",
    "DNS_QUERY_NAME_IDX = 55-1 # domain\n",
    "DNS_REQUEST_TYPE = 56-1\n",
    "DNS_DOMAIN_TTL = 59-1\n",
    "DNS_REPLY_IPV4IP = 60-1        \n",
    "DNS_REPLY_IPV6IP = 61-1        \n",
    "DNS_REPLY_RRTYPE = 62-1        \n",
    "DNS_REQUEST_LEN  = 88-1        \n",
    "DNS_REPLY_LENGTH = 90-1\n",
    "\n",
    "def iterbrowse(path):\n",
    "    for home, dirs, files in os.walk(path):\n",
    "        for filename in files:\n",
    "            yield os.path.join(home, filename)\n",
    "            \n",
    "def extract_domain(domain):\n",
    "    suffix = {'.com','.la','.io', '.co', '.cn','.info', '.net',\n",
    "              '.org','.me', '.mobi', '.us', '.biz', '.xxx', '.ca',\n",
    "              '.co.jp', '.com.cn', '.net.cn', '.org.cn', '.mx','.tv',\n",
    "              '.ws', '.ag', '.com.ag', '.net.ag', '.org.ag','.am',\n",
    "              '.asia', '.at', '.be', '.com.br', '.net.br', '.name', \n",
    "              '.live', '.news', '.bz', '.tech', '.pub', '.wang', \n",
    "              '.space', '.top', '.xin', '.social', '.date', '.site', \n",
    "              '.red', '.studio', '.link', '.online', '.help', '.kr', \n",
    "              '.club', '.com.bz', '.net.bz', '.cc', '.band', '.market',\n",
    "              '.com.co', '.net.co', '.nom.co', '.lawyer', '.de', '.es',\n",
    "              '.com.es', '.nom.es', '.org.es', '.eu', '.wiki', \n",
    "              '.design', '.software', '.fm', '.fr', '.gs', '.in', \n",
    "              '.co.in', '.firm.in', '.gen.in', '.ind.in', '.net.in', \n",
    "              '.org.in', '.it', '.jobs', '.jp', '.ms', '.com.mx', '.nl',\n",
    "              '.nu','.co.nz','.net.nz', '.org.nz', '.se', '.tc', '.tk',\n",
    "              '.tw', '.com.tw', '.idv.tw', '.org.tw', '.hk', '.co.uk',\n",
    "              '.me.uk', '.org.uk', '.vg','.in-addr.arpa'}\n",
    "\n",
    "    domain = domain.lower()\n",
    "    names = domain.split(\".\")\n",
    "    if len(names) >= 3:\n",
    "        if (\".\"+\".\".join(names[-2:])) in suffix:\n",
    "            return \".\".join(names[-3:]), \".\".join(names[:-3])\n",
    "        elif (\".\"+names[-1]) in suffix:\n",
    "            return \".\".join(names[-2:]), \".\".join(names[:-2])\n",
    "    #print (\"New domain suffix found. Use tld extract domain...\")\n",
    "\n",
    "    pos = domain.rfind(\"/\")\n",
    "    if pos >= 0: # maybe subdomain contains /, for dns tunnel tool\n",
    "        ext = extract(domain[pos+1:])\n",
    "        subdomain = domain[:pos+1] + ext.subdomain\n",
    "    else:\n",
    "        ext = extract(domain)\n",
    "        subdomain = ext.subdomain\n",
    "    if ext.suffix:\n",
    "        mdomain = ext.domain + \".\" + ext.suffix\n",
    "    else:\n",
    "        mdomain = ext.domain\n",
    "    return mdomain, subdomain\n",
    "\n",
    "\n",
    "def filter_metadata_dns(data):\n",
    "    if(len(data) < 91):\n",
    "        return False\n",
    "\n",
    "    protol  = data[PROTOCOL_IDX]\n",
    "    dstport = data[DST_PORT_IDX]\n",
    "    dstip   = data[DST_IP_IDX]\n",
    "    qname   = data[DNS_QUERY_NAME_IDX]\n",
    "\n",
    "    if '' == qname or '' == dstip:\n",
    "        return False\n",
    "    if '17' == protol and ('53' == dstport):\n",
    "        return True\n",
    "    return False\n",
    "\n",
    "\n",
    "def metadata2_domain_data(log): \n",
    "    data = log.split('^')\n",
    "    if not filter_metadata_dns(data):\n",
    "        return None, None\n",
    "    domain = data[DNS_QUERY_NAME_IDX]\n",
    "    mdomain, subdomain = extract_domain(domain)\n",
    "    return (mdomain, subdomain)\n",
    "\n",
    "\n",
    "def get_local_data(tag=\"labeled\"):\n",
    "    data_path = \"./sample_data\"\n",
    "    black_data, white_data = [], []    \n",
    "    for dir_name in (\"black\", \"cdn\", \"white\"):\n",
    "        dir_path = \"%s/%s_%s\" % (data_path, tag, dir_name)\n",
    "\n",
    "        for path in iterbrowse(dir_path):\n",
    "            print( path)\n",
    "            with open(path) as f:\n",
    "                for line in f:\n",
    "                    mdomain, subdomain = metadata2_domain_data(line)\n",
    "                    if subdomain is not None:\n",
    "                        if \"white\" in path:\n",
    "                            white_data.append(subdomain)\n",
    "                        elif \"cdn\" in path:\n",
    "                            white_data.append(subdomain)\n",
    "                        elif \"black\" in path and \"pcap\" in path:\n",
    "                            black_data.append(subdomain)\n",
    "                        else:\n",
    "                            pass\n",
    "                            #print (\"pass path:\", path)\n",
    "                    #else:\n",
    "                    #    print (\"unknown line:\", line, \" in file:\", path)\n",
    "    return black_data, white_data\n",
    "\n",
    "\n",
    "class LABEL(object):\n",
    "    white = 0\n",
    "    cdn = 0\n",
    "    black = 1\n",
    "\n",
    "def pad_sequences(X, maxlen, value=0):\n",
    "    S=[]\n",
    "    for x in X:\n",
    "        xlen = len(x)\n",
    "        if xlen < maxlen:\n",
    "            x.extend([value]*(maxlen-xlen))\n",
    "        else:\n",
    "            x = x[:maxlen]\n",
    "        S.append(x)\n",
    "    return S\n",
    "\n",
    "def get_data():\n",
    "    black_x, white_x = get_local_data()\n",
    "    black_y, white_y = [LABEL.black]*len(black_x),[LABEL.white]*len(white_x)\n",
    "\n",
    "    X = black_x + white_x\n",
    "    labels = black_y + white_y\n",
    "\n",
    "    # Generate a dictionary of valid characters\n",
    "    valid_chars = {x:idx+1 for idx, x in enumerate(set(''.join(X)))}\n",
    "\n",
    "    max_features = len(valid_chars) + 1\n",
    "    print(\"max_features:\", max_features)\n",
    "    maxlen = np.max([len(x) for x in X])\n",
    "    print(\"max_len:\", maxlen)\n",
    "    maxlen = min(maxlen, 256)\n",
    "\n",
    "    # Convert characters to int and pad\n",
    "    X = [[valid_chars[y] for y in x] for x in X]\n",
    "    X = pad_sequences(X, maxlen=maxlen, value=0.)\n",
    "\n",
    "    # Convert labels to 0-1\n",
    "    Y = labels\n",
    "    \n",
    "    volcab_file = \"volcab.pkl\"\n",
    "    output = open(volcab_file, 'wb') \n",
    "    # Pickle dictionary using protocol 0.\n",
    "    data = {\"valid_chars\": valid_chars,\n",
    "            \"max_len\": maxlen, \n",
    "            \"volcab_size\": max_features}\n",
    "    pickle.dump(data, output)\n",
    "    output.close()\n",
    "\n",
    "    return X, Y, maxlen, max_features\n",
    "\n",
    "def build_model_BiRNN(max_len, volcab_size):\n",
    "    \"\"\"Build Bi-RNN model\"\"\"\n",
    "    model = Sequential()\n",
    "    model.add(Embedding(input_dim=volcab_size,\n",
    "                        output_dim=64,\n",
    "                        input_length=max_len))\n",
    "    model.add(Bidirectional(GRU(16)))\n",
    "    \n",
    "    model.add(Dense(16, activation='relu'))\n",
    "    model.add(Dropout(0.25))\n",
    "    model.add(Dense(1))\n",
    "    model.add(Activation('sigmoid'))\n",
    "    model.compile(loss='binary_crossentropy',\n",
    "                  optimizer='rmsprop',\n",
    "                  metrics=['accuracy'])\n",
    "    return model\n",
    "\n",
    "\n",
    "def get_cnn_model(max_len, volcab_size):\n",
    "    \"\"\"Build CNN model\"\"\"\n",
    "    model = Sequential()\n",
    "    model.add(Embedding(input_dim=volcab_size,\n",
    "                        output_dim=64,\n",
    "                        input_length=max_len))\n",
    "    model.add(Conv1D(128,\n",
    "                     3,\n",
    "                     padding='valid',\n",
    "                     activation=\"relu\", \n",
    "                     kernel_regularizer=regularizers.l2(0.01),\n",
    "                     activity_regularizer=regularizers.l1(0.01)))\n",
    "    model.add(MaxPool1D(2))\n",
    "    model.add(Conv1D(128,\n",
    "                     4,\n",
    "                     padding='valid',\n",
    "                     activation=\"relu\", \n",
    "                     kernel_regularizer=regularizers.l2(0.01),\n",
    "                     activity_regularizer=regularizers.l1(0.01)))\n",
    "    model.add(MaxPool1D(2))\n",
    "    model.add(Conv1D(128,\n",
    "                     5,\n",
    "                     padding='valid',\n",
    "                     activation=\"relu\", \n",
    "                     kernel_regularizer=regularizers.l2(0.01),\n",
    "                     activity_regularizer=regularizers.l1(0.01)))\n",
    "    model.add(MaxPool1D(2))\n",
    "    model.add(Flatten())\n",
    "    model.add(Dropout(0.5))\n",
    "    model.add(Dense(16,activation=\"relu\"))\n",
    "    model.add(Dense(1,activation='sigmoid'))\n",
    "    model.compile(loss='binary_crossentropy',\n",
    "                  optimizer='rmsprop',\n",
    "                  metrics=['accuracy'])\n",
    "    return model\n",
    "\n",
    "def run():\n",
    "    X, Y, max_len, volcab_size = get_data()\n",
    "\n",
    "    print( \"X len:\", len(X), \"Y len:\", len(Y))\n",
    "    trainX, testX, trainY, testY = train_test_split(X, Y, test_size=0.2, \n",
    "                                                    random_state=42)\n",
    "    print( trainX[:1])\n",
    "    print( trainY[:1])\n",
    "    print( testX[-1:])\n",
    "    print( testY[-1:])\n",
    "\n",
    "    model = get_cnn_model(max_len, volcab_size)\n",
    "    model.fit(trainX, trainY, \n",
    "              validation_data=(testX, testY), \n",
    "              verbose=1,\n",
    "              batch_size=32)\n",
    "\n",
    "    \n",
    "    filename = 'finalized_model.keras'\n",
    "    model.save(filename)\n",
    "\n",
    "    model.load(filename)\n",
    "    print( \"Just review 3 sample data test result:\")\n",
    "    result = model.predict(testX[0:3])\n",
    "    print( result)\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 240,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "./sample_data/labeled_black/dnscat2_when_exec_command_rm_file.txt\n",
      "./sample_data/labeled_black/ozyman_idle3.pcap.txt\n",
      "./sample_data/labeled_black/download_dnscat2_file12.pcap.txt\n",
      "./sample_data/labeled_black/iodine_direct_ssh4_base32.pcap.txt\n",
      "./sample_data/labeled_black/iodine_direct_ssh6_base128.pcap.txt\n",
      "./sample_data/labeled_black/dns2tcp_sendfile9.pcap.txt\n",
      "./sample_data/labeled_black/iodine_direct_scp17_base128.pcap.txt\n",
      "./sample_data/labeled_black/dnscat2_when_idle.txt\n",
      "./sample_data/labeled_black/iodine_direct_ssh9_base32_again.pcap.txt\n",
      "./sample_data/labeled_black/nbtoo_dnscat_file7.pcap.txt\n",
      "./sample_data/labeled_black/iodine_idle_direct_idle44.pcap.txt\n",
      "./sample_data/labeled_black/download_dnscat2_file13.pcap.txt\n",
      "./sample_data/labeled_black/dnscapy_scp2.pcap.txt\n",
      "./sample_data/labeled_black/dns2tcp_cmd.pcap.txt\n",
      "./sample_data/labeled_black/dnscapy_scp.pcap.txt\n",
      "./sample_data/labeled_black/tcp-over-dns-idle.pcap.txt\n",
      "./sample_data/labeled_black/iodine_direct_ssh6_base64u.pcap.txt\n",
      "./sample_data/labeled_black/iodine_direct_ssh6_base64.pcap.txt\n",
      "./sample_data/labeled_black/ozyman_idle.pcap.txt\n",
      "./sample_data/labeled_black/dns2tcp_when_use_ssh.txt\n",
      "./sample_data/labeled_black/ozyman_idle2.pcap.txt\n",
      "./sample_data/labeled_black/iodine_direct_scp3_base64.pcap.txt\n",
      "./sample_data/labeled_black/download_dnscat2_file14.pcap.txt\n",
      "./sample_data/labeled_cdn/2017-8-2-0-spcdntip.com.txt\n",
      "./sample_data/labeled_cdn/2017-8-2-0-mmycdn.com.txt\n",
      "./sample_data/labeled_cdn/2017-8-2-0-gosuncdn.com.txt\n",
      "./sample_data/labeled_cdn/2017-8-2-0-dlgslb.com.txt\n",
      "./sample_data/labeled_cdn/2017-8-2-8-tcdnvod.com.txt\n",
      "./sample_data/labeled_cdn/2017-8-2-0-ctripgslb.com.txt\n",
      "./sample_data/labeled_cdn/2017-8-2-0-tcdnvod.com.txt\n",
      "./sample_data/labeled_cdn/2017-8-2-0-ruisucdn.com.txt\n",
      "./sample_data/labeled_cdn/2017-8-2-0-mccdnglb.com.txt\n",
      "./sample_data/labeled_white/2017-8-15-8-qichedaquan.com.txt\n",
      "./sample_data/labeled_white/2017-8-16-11-lse.ac.uk.txt\n",
      "./sample_data/labeled_white/2017-8-15-0-henanpeace.org.cn.txt\n",
      "./sample_data/labeled_white/2017-8-2-0-bilibiligame.net.txt\n",
      "./sample_data/labeled_white/2017-8-2-0-365yg.com.txt\n",
      "./sample_data/labeled_white/2017-8-16-9-dicp.ac.cn.txt\n",
      "max_features: 43\n",
      "max_len: 942\n"
     ]
    }
   ],
   "source": [
    "X, Y, max_len, volcab_size = get_data()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 241,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "X len: 4439 Y len: 4439\n"
     ]
    }
   ],
   "source": [
    "print( \"X len:\", len(X), \"Y len:\", len(Y))\n",
    "trainX, test_X, trainY, test_Y = train_test_split(X, Y, test_size=0.2, \n",
    "                                                    random_state=42)\n",
    "\n",
    "trainX, testX, trainY, testY = train_test_split(trainX, trainY, test_size=0.2, \n",
    "                                                    random_state=42)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 242,
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "trainX=np.mat(trainX)\n",
    "testX=np.mat(testX)\n",
    "\n",
    "trainY=np.mat(trainY).flatten().T\n",
    "testY=np.mat(testY).flatten().T\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 243,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "_________________________________________________________________\n",
      "Layer (type)                 Output Shape              Param #   \n",
      "=================================================================\n",
      "embedding_20 (Embedding)     (None, 256, 64)           2752      \n",
      "_________________________________________________________________\n",
      "bidirectional_2 (Bidirection (None, 32)                7776      \n",
      "_________________________________________________________________\n",
      "dense_39 (Dense)             (None, 16)                528       \n",
      "_________________________________________________________________\n",
      "dropout_20 (Dropout)         (None, 16)                0         \n",
      "_________________________________________________________________\n",
      "dense_40 (Dense)             (None, 1)                 17        \n",
      "_________________________________________________________________\n",
      "activation_2 (Activation)    (None, 1)                 0         \n",
      "=================================================================\n",
      "Total params: 11,073\n",
      "Trainable params: 11,073\n",
      "Non-trainable params: 0\n",
      "_________________________________________________________________\n"
     ]
    }
   ],
   "source": [
    "model = get_cnn_model(max_len, volcab_size)\n",
    "\n",
    "model = build_model_BiRNN(max_len, volcab_size)\n",
    "model.summary()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 254,
   "metadata": {
    "scrolled": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Train on 3551 samples, validate on 888 samples\n",
      "Epoch 1/20\n",
      "3551/3551 [==============================] - 3s 760us/step - loss: 0.1165 - acc: 0.9718 - val_loss: 0.0880 - val_acc: 0.9786\n",
      "Epoch 2/20\n",
      "3551/3551 [==============================] - 3s 753us/step - loss: 0.1125 - acc: 0.9713 - val_loss: 0.0856 - val_acc: 0.9775\n",
      "Epoch 3/20\n",
      "3551/3551 [==============================] - 3s 747us/step - loss: 0.1135 - acc: 0.9730 - val_loss: 0.0844 - val_acc: 0.9775\n",
      "Epoch 4/20\n",
      "3551/3551 [==============================] - 3s 772us/step - loss: 0.1085 - acc: 0.9721 - val_loss: 0.0820 - val_acc: 0.9809\n",
      "Epoch 5/20\n",
      "3551/3551 [==============================] - 3s 767us/step - loss: 0.1053 - acc: 0.9735 - val_loss: 0.0808 - val_acc: 0.9775\n",
      "Epoch 6/20\n",
      "3551/3551 [==============================] - 3s 754us/step - loss: 0.1058 - acc: 0.9735 - val_loss: 0.0788 - val_acc: 0.9809\n",
      "Epoch 7/20\n",
      "3551/3551 [==============================] - 3s 747us/step - loss: 0.1088 - acc: 0.9747 - val_loss: 0.0793 - val_acc: 0.9775\n",
      "Epoch 8/20\n",
      "3551/3551 [==============================] - 3s 761us/step - loss: 0.1026 - acc: 0.9744 - val_loss: 0.0761 - val_acc: 0.9831\n",
      "Epoch 9/20\n",
      "3551/3551 [==============================] - 3s 746us/step - loss: 0.1024 - acc: 0.9775 - val_loss: 0.0774 - val_acc: 0.9775\n",
      "Epoch 10/20\n",
      "3551/3551 [==============================] - 3s 757us/step - loss: 0.1020 - acc: 0.9747 - val_loss: 0.0723 - val_acc: 0.9831\n",
      "Epoch 11/20\n",
      "3551/3551 [==============================] - 3s 746us/step - loss: 0.1020 - acc: 0.9724 - val_loss: 0.0718 - val_acc: 0.9809\n",
      "Epoch 12/20\n",
      "3551/3551 [==============================] - 3s 762us/step - loss: 0.1042 - acc: 0.9744 - val_loss: 0.0675 - val_acc: 0.9831\n",
      "Epoch 13/20\n",
      "3551/3551 [==============================] - 3s 744us/step - loss: 0.0970 - acc: 0.9789 - val_loss: 0.0659 - val_acc: 0.9820\n",
      "Epoch 14/20\n",
      "3551/3551 [==============================] - 3s 776us/step - loss: 0.0923 - acc: 0.9783 - val_loss: 0.0632 - val_acc: 0.9820\n",
      "Epoch 15/20\n",
      "3551/3551 [==============================] - 3s 756us/step - loss: 0.0894 - acc: 0.9786 - val_loss: 0.0611 - val_acc: 0.9831\n",
      "Epoch 16/20\n",
      "3551/3551 [==============================] - 3s 756us/step - loss: 0.0843 - acc: 0.9811 - val_loss: 0.0586 - val_acc: 0.9876\n",
      "Epoch 17/20\n",
      "3551/3551 [==============================] - 3s 744us/step - loss: 0.0834 - acc: 0.9792 - val_loss: 0.0573 - val_acc: 0.9865\n",
      "Epoch 18/20\n",
      "3551/3551 [==============================] - 3s 763us/step - loss: 0.0857 - acc: 0.9800 - val_loss: 0.0557 - val_acc: 0.9865\n",
      "Epoch 19/20\n",
      "3551/3551 [==============================] - 3s 745us/step - loss: 0.0825 - acc: 0.9803 - val_loss: 0.0562 - val_acc: 0.9865\n",
      "Epoch 20/20\n",
      "3551/3551 [==============================] - 3s 753us/step - loss: 0.0760 - acc: 0.9842 - val_loss: 0.0583 - val_acc: 0.9854\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "<keras.callbacks.History at 0x7f36887872e8>"
      ]
     },
     "execution_count": 254,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "model.fit(trainX, trainY, \n",
    "          validation_data=(testX, testY), \n",
    "          verbose=1,\n",
    "          batch_size=3551,\n",
    "          epochs=20)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 257,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Just review 2 sample data test result:\n",
      "[[0]\n",
      " [0]]\n"
     ]
    }
   ],
   "source": [
    "timestamp = time.strftime(\"%Y%m%d-%H%M%S\", time.localtime(time.time()))\n",
    "\n",
    "model.save(\"./models/BiLST-\"+timestamp+\".module\")\n",
    "model.save(\"./models/BiLST-final.module\")\n",
    "\n",
    "print(\"Just review 2 sample data test result:\")\n",
    "\n",
    "result = model.predict_classes(testX[0:2])\n",
    "print(result)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "metadata": {},
   "outputs": [],
   "source": [
    "from keras.models import load_model\n",
    "\n",
    "def get_predict_data():\n",
    "    data_path = \"./xshell_data\"\n",
    "    black_data = []\n",
    "    for path in iterbrowse(data_path):\n",
    "        with open(path) as f:\n",
    "            for line in f:\n",
    "                mdomain, subdomain = metadata2_domain_data(line)\n",
    "                if subdomain is not None:\n",
    "                    black_data.append(subdomain)\n",
    "    return black_data\n",
    "\n",
    "\n",
    "org_X = []\n",
    "\n",
    "def get_xshell_data():\n",
    "    global org_X\n",
    "    org_X = get_predict_data()\n",
    "    labels = [LABEL.black]*len(org_X)\n",
    "\n",
    "    volcab_file = \"volcab.pkl\"\n",
    "    assert os.path.exists(volcab_file)\n",
    "    pkl_file = open(volcab_file, 'rb')\n",
    "    data = pickle.load(pkl_file)\n",
    "    valid_chars, maxlen, max_features = data[\"valid_chars\"], data[\"max_len\"], data[\"volcab_size\"]\n",
    "\n",
    "    # Convert characters to int and pad\n",
    "    X = [[valid_chars[y] if y in valid_chars else 0 for y in x] for x in org_X]\n",
    "    X = pad_sequences(X, maxlen=maxlen, value=0.)\n",
    "\n",
    "    # Convert labels to 0-1\n",
    "    Y = labels\n",
    "    return X, Y, maxlen, max_features\n",
    "\n",
    "\n",
    "def run():\n",
    "    testX, testY, max_len, volcab_size = get_xshell_data()\n",
    "    print( \"X len:\", len(testX), \"Y len:\", len(testY))\n",
    "    print( testX[-1:])\n",
    "    print( testY[-1:])\n",
    "\n",
    "    filename = 'finalized_model.tflearn'\n",
    "    model = load_model(\"./models/BiLST-final.module\")\n",
    "\n",
    "    predictions = model.predict(testX)\n",
    "    \n",
    "    cnt = 0\n",
    "    global org_X\n",
    "    for i,p in enumerate(predictions):\n",
    "        #if abs(p[2]-testY[i][2]) < 0.1:\n",
    "        if p[2]>p[1] and p[1]>p[0]:\n",
    "            cnt += 1\n",
    "        else:\n",
    "            print( \"found data not detected:\")\n",
    "            print( \"original subdomain:\", org_X[i])\n",
    "            print( \"prediction compare:\", p, testY[i])\n",
    "    print( \"Dectected cnt:\", cnt, \"total:\", len(predictions))\n",
    "    print( \"Dectect Rate is:\", cnt/(len(predictions)+.0))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "X len: 1180 Y len: 1180\n",
      "[[39, 17, 16, 17, 16, 42, 12, 18, 18, 9, 26, 2, 25, 41, 42, 17, 12, 16, 5, 42, 35, 14, 25, 25, 15, 2, 33, 42, 37, 17, 16, 14, 25, 5, 35, 4, 41, 11, 35, 17, 35, 25, 41, 18, 16, 5, 41, 32, 39, 19, 25, 9, 2, 41, 37, 10, 37, 14, 22, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]]\n",
      "[1]\n",
      "<class 'numpy.matrixlib.defmatrix.matrix'>\n",
      "<class 'numpy.matrixlib.defmatrix.matrix'>\n"
     ]
    }
   ],
   "source": [
    "testX, testY, max_len, volcab_size = get_xshell_data()\n",
    "print( \"X len:\", len(testX), \"Y len:\", len(testY))\n",
    "print( testX[-1:])\n",
    "print( testY[-1:])\n",
    "testX = np.mat(testX)\n",
    "testY = np.mat(testY)\n",
    "print(type(testX))\n",
    "print(type(testY))\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 30,
   "metadata": {},
   "outputs": [],
   "source": [
    "model = load_model(\"./models/BiLST-final.module\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 31,
   "metadata": {},
   "outputs": [],
   "source": [
    "predictions = model.predict_classes(testX)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 32,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Dectected cnt: 910 total: 1180\n",
      "Dectect Rate is: 0.7711864406779662\n"
     ]
    }
   ],
   "source": [
    "cnt = 0\n",
    "\n",
    "for i,p in enumerate(predictions):\n",
    "    #print(i,p)\n",
    "    #if abs(p[2]-testY[i][2]) < 0.1:\n",
    "    if p[0]==1:\n",
    "        cnt += 1\n",
    "    else:\n",
    "        continue\n",
    "        print( \"found data not detected:\")\n",
    "        print( \"original subdomain:\", org_X[i])\n",
    "        #print( \"prediction compare:\", p[0], testY[i])\n",
    "print( \"Dectected cnt:\", cnt, \"total:\", len(predictions))\n",
    "print( \"Dectect Rate is:\", cnt/(len(predictions)+.0))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 33,
   "metadata": {},
   "outputs": [],
   "source": [
    "from kafka import KafkaConsumer\n",
    "\n",
    "\n",
    "#model = load_model(\"./models/BiLST-20180928-183836.module\")\n",
    "\n",
    "volcab_file = \"volcab.pkl\"\n",
    "pkl_file = open(volcab_file, 'rb')\n",
    "data = pickle.load(pkl_file)\n",
    "valid_chars, maxlen, max_features = data[\"valid_chars\"], data[\"max_len\"], data[\"volcab_size\"]\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": false
   },
   "outputs": [],
   "source": [
    "consumer = KafkaConsumer('dns')\n",
    "for message in consumer:\n",
    "    # message value and key are raw bytes -- decode if necessary!\n",
    "    # e.g., for unicode: `message.value.decode('utf-8')`\n",
    "    dns_log = json.loads(message.value.decode('utf-8'))\n",
    "    dns_query = dns_log[\"query\"]\n",
    "    \n",
    "    org_X = extract_domain(dns_query)\n",
    "    X = [[valid_chars[y] if y in valid_chars else 0 for y in x] for x in org_X]\n",
    "    X = pad_sequences(X, maxlen=maxlen, value=0.)\n",
    "    \n",
    "    X = np.mat(X)\n",
    "    \n",
    "    rs = model.predict_classes(X)[0][0]\n",
    "    \n",
    "    print(rs, dns_query)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[12, 13, 4, 5, 1, 22, 345, 4]\n",
      "[12, 13, 4, 5, 1, 22, 345, 4]\n"
     ]
    }
   ],
   "source": [
    "a = [12,13,4,5]\n",
    "b = [1,22,345,4]\n",
    "c = a + b\n",
    "print(c)\n",
    "a.pop()\n",
    "print(c)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[1, 4, 5, 0, 0, 0, 0, 0, 0, 0]\n"
     ]
    }
   ],
   "source": [
    "print(pad_sequences([1,4,5],10))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
