{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## ATIS SLU modeling\n",
    "\n",
    "### LICENSING\n",
    "\n",
    "models may use layers from [keras-contrib](https://github.com/keras-team/keras-contrib) released under the MIT License\n",
    "\n",
    "models may use layers from [keras utilities](https://github.com/cbaziotis/keras-utilities) by cbaziotis released under the MIT License"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/derek/anaconda3/lib/python3.6/importlib/_bootstrap.py:219: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88\n",
      "  return f(*args, **kwds)\n",
      "/home/derek/anaconda3/lib/python3.6/importlib/_bootstrap.py:219: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88\n",
      "  return f(*args, **kwds)\n",
      "/home/derek/anaconda3/lib/python3.6/importlib/_bootstrap.py:219: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88\n",
      "  return f(*args, **kwds)\n"
     ]
    }
   ],
   "source": [
    "from collections import Counter\n",
    "from preprocessing import CharacterIndexer, SlotIndexer, IntentIndexer\n",
    "from gensim.models import Word2Vec\n",
    "import json\n",
    "import numpy as np\n",
    "import pandas as pd\n",
    "import pickle"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### load training data and indexers\n",
    "\n",
    "the data has already been encoded in the preprocessing script"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "sentindexer = pickle.load(open('encoded/atis_sentindexer.pkl', 'rb'))\n",
    "slotindexer = pickle.load(open('encoded/atis_slotindexer.pkl', 'rb'))\n",
    "intindexer  = pickle.load(open('encoded/atis_intindexer.pkl',  'rb'))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "trn_text_idx = np.load('encoded/trn_text_idx.npy')\n",
    "trn_char_idx = np.load('encoded/trn_char_idx.npy')\n",
    "trn_slot_idx = np.load('encoded/trn_slot_idx.npy')\n",
    "trn_int_idx  = np.load('encoded/trn_int_idx.npy')\n",
    "\n",
    "dev_text_idx = np.load('encoded/dev_text_idx.npy')\n",
    "dev_char_idx = np.load('encoded/dev_char_idx.npy')\n",
    "dev_slot_idx = np.load('encoded/dev_slot_idx.npy')\n",
    "dev_int_idx  = np.load('encoded/dev_int_idx.npy')\n",
    "\n",
    "tst_text_idx = np.load('encoded/tst_text_idx.npy')\n",
    "tst_char_idx = np.load('encoded/tst_char_idx.npy')\n",
    "tst_slot_idx = np.load('encoded/tst_slot_idx.npy')\n",
    "tst_int_idx  = np.load('encoded/tst_int_idx.npy')"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### load pretrained embeddings"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "w2v_model = Word2Vec.load('model/atis_w2v.gensimmodel')\n",
    "w2v_vocab = pickle.load(open('model/atis_w2v_vocab.pkl',  'rb'))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### construct keras model"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Using TensorFlow backend.\n",
      "/home/derek/anaconda3/lib/python3.6/importlib/_bootstrap.py:219: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88\n",
      "  return f(*args, **kwds)\n"
     ]
    }
   ],
   "source": [
    "import h5py\n",
    "import math\n",
    "from keras.models import Model\n",
    "from keras.layers import Activation, Concatenate, concatenate, Dense, Dropout, Embedding, Input, TimeDistributed\n",
    "from keras.layers import LSTM, CuDNNLSTM, LeakyReLU, Masking, Lambda, Dot, BatchNormalization, Activation\n",
    "from keras.layers import Conv1D, MaxPooling1D, GlobalMaxPooling1D, Flatten\n",
    "from keras.layers.wrappers import Bidirectional\n",
    "from keras.callbacks import ReduceLROnPlateau, EarlyStopping, TerminateOnNaN, ModelCheckpoint\n",
    "from keras_contrib.layers import CRF\n",
    "from keras_contrib.utils import save_load_utils\n",
    "from kutilities.layers import AttentionWithContext\n",
    "from keras.optimizers import Adam, SGD\n",
    "import keras.backend as K\n",
    "from keras.layers import Dense, Activation, Multiply, Add, Lambda\n",
    "import keras.initializers\n",
    "from keras.regularizers import l1, l2"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "728 22 121 22\n"
     ]
    }
   ],
   "source": [
    "# preprocessing-dependent parameters\n",
    "# we can use the indexer attributes\n",
    "TXT_VOCAB  = sentindexer.max_word_vocab\n",
    "TXT_MAXLEN = sentindexer.max_sent_len\n",
    "CHR_MAXLEN = sentindexer.max_word_len\n",
    "CHR_VOCAB  = sentindexer.max_char_vocab\n",
    "SLOT_NUM   = slotindexer.labelsize\n",
    "LABEL_NUM  = intindexer.labelsize\n",
    "print(TXT_VOCAB, TXT_MAXLEN, SLOT_NUM, LABEL_NUM)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## model details\n",
    "\n",
    "input features combine word-level embeddings (`word2vec`) with sub-word (character-level) features to help generalize to unseen words; using embeddings from multiple levels has been shown to improve performance of sequence-labeling models. Convnet inspired by Ma & Hovy 2016 with implementational details such as highway layer, tanh activation from Kim 2015.\n",
    "\n",
    "the joint model is based on Hakkani-Tur with improvements:\n",
    "\n",
    "- optional slot CRF following Ma & Hovy for NER\n",
    "- optional \"aligned seq2seq\" following Liu & Lane (state initialization in deep LSTM)\n",
    "- optional pre-slot output attention following Liu & Lane\n",
    "- optional merging of slot predictions + attention for intent following Lui & Lane \n",
    "\n",
    "### implementation notes\n",
    "\n",
    "- the highway bias is set to -2 as recommended by Yoon Kim  \n",
    "- the activation is set to `relu` following the article, although the sample code uses `relu`  \n",
    "- the optimizer is set to `adadelta` as it seems to converge well. `clipnorm` is used because of a `nan` loss problem  \n",
    "- due to the above `nan` loss issue, there is some slight regularization applied to the intent dense layers.\n",
    "\n",
    "### references\n",
    "\n",
    "Goo *et al* (2018): *Slot-Gated Modeling for Joint Slot Filling and Intent Prediction*  \n",
    "NAACL-HCT 2018, available: http://aclweb.org/anthology/N18-2118\n",
    "\n",
    "Hakkani-Tur *et al* (2016): *Multi-Domain Joint Semantic Frame Parsing using Bi-directional RNN-LSTM*  \n",
    "available: https://www.csie.ntu.edu.tw/~yvchen/doc/IS16_MultiJoint.pdf\n",
    "\n",
    "Kim *et al* (2015): *Character-Aware Neural Language Models*  \n",
    "available: https://arxiv.org/pdf/1508.06615.pdf\n",
    "\n",
    "Liu & Lane (2016): *Attention-Based Recurrent Neural Network Models for Joint Intent Detection and Slot Filling*  \n",
    "INTERSPEECH 2016, available: https://pdfs.semanticscholar.org/84a9/bc5294dded8d597c9d1c958fe21e4614ff8f.pdf\n",
    "\n",
    "Ma & Hovy (2016): *End-to-end Sequence Labeling via Bi-directional LSTM-CNNs-CRF*  \n",
    "available: https://arxiv.org/pdf/1603.01354.pdf\n",
    "\n",
    "Park & Song (2017): *음절 기반의 CNN 를 이용한 개체명 인식 Named Entity Recognition using CNN for Korean syllabic character*  \n",
    "available: https://www.dbpia.co.kr/Journal/ArticleDetail/NODE07017625 (Korean)\n",
    "\n",
    "Srivastava, R. K., Greff, K., & Schmidhuber, J. (2015). *Highway networks*.  \n",
    "available: https://arxiv.org/pdf/1505.00387.pdf"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "modelname = 'test_model'"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "# self-defined network hyperparameters\n",
    "WEMBED_SIZE   = 200   # word embedding size. must match w2v size\n",
    "CEMBED_SIZE   = 200   # character embedding size. free param\n",
    "WDROP_RATE    = 0.50  # word-level input dropout\n",
    "DROP_RATE     = 0.33  # dropout for other layers\n",
    "RNN_DROP_RATE = 0.0   # recurrent droput (not implemented)\n",
    "HIDDEN_SIZE   = 300   # LSTM block hidden size\n",
    "BATCH_SIZE    = 32\n",
    "MAX_EPOCHS    = 50\n",
    "OPTIMIZER     = keras.optimizers.Adadelta(clipnorm=1.)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### main model input layers\n",
    "\n",
    "charCNN+highway & word2vec > LSTM following Kim 2015, Ma & Hovy 2016, Liu & Lane 2016 and others\n",
    "\n",
    "loading embeddings borrows technique from https://github.com/UKPLab/emnlp2017-bilstm-cnn-crf/blob/master/neuralnets/BiLSTM.py"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "loaded total of 724 vectors\n"
     ]
    }
   ],
   "source": [
    "########################################\n",
    "# preload word vectors\n",
    "########################################\n",
    "\n",
    "# create word embedding matrix \n",
    "# load word2vec vector if present; otherwise randomly init, but keep padding zero\n",
    "# ref: https://github.com/UKPLab/emnlp2017-bilstm-cnn-crf/blob/master/neuralnets/BiLSTM.py\n",
    "word_embedding_matrix = np.zeros((TXT_VOCAB, WEMBED_SIZE))\n",
    "c = 0\n",
    "for w in sentindexer.word2idx.keys():\n",
    "    # get the word vector from the embedding model\n",
    "    # if it's there (check against vocab list)\n",
    "    if w in w2v_vocab:\n",
    "        # get the word vector\n",
    "        word_vector = w2v_model.wv[w]\n",
    "        # slot it in at the proper index\n",
    "        word_embedding_matrix[sentindexer.word2idx[w]] = word_vector\n",
    "        c += 1\n",
    "    elif w not in (\"PAD\", \"_PAD_\"):\n",
    "        limit = math.sqrt(3.0 / WEMBED_SIZE)\n",
    "        word_vector = np.random.uniform(-limit, limit, WEMBED_SIZE)\n",
    "        word_embedding_matrix[sentindexer.word2idx[w]] = word_vector\n",
    "\n",
    "# loaded vector # may be lower than total vocab due to w2v settings\n",
    "print('loaded total of', c, 'vectors')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "initialized total of 34 vectors\n"
     ]
    }
   ],
   "source": [
    "########################################\n",
    "# randomly init char vectors\n",
    "########################################\n",
    "\n",
    "# create char embedding matrix randomly but keep padding zero\n",
    "# ref: https://github.com/UKPLab/emnlp2017-bilstm-cnn-crf/blob/master/neuralnets/BiLSTM.py\n",
    "char_embedding_matrix = np.zeros((CHR_VOCAB, CEMBED_SIZE))\n",
    "c = 0\n",
    "for w in sentindexer.char2idx.keys():\n",
    "    if w not in (\"PAD\", \"_PAD_\"):\n",
    "        # slot it in at the proper index\n",
    "        limit = math.sqrt(3.0 / CEMBED_SIZE)\n",
    "        char_vector = np.random.uniform(-limit, limit, CEMBED_SIZE)\n",
    "        char_embedding_matrix[sentindexer.char2idx[w]] = char_vector\n",
    "        c += 1\n",
    "\n",
    "# loaded vector # may be lower than total vocab due to w2v settings\n",
    "print('initialized total of', c, 'vectors')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [],
   "source": [
    "########################################\n",
    "# Kim; Ma & Hovy char-CNN + word input\n",
    "########################################\n",
    "\n",
    "# word-level input with word embedding matrix (with word2vec)\n",
    "txt_input = Input(shape=(TXT_MAXLEN,), name='word_input')\n",
    "\n",
    "txt_embed = Embedding(TXT_VOCAB, WEMBED_SIZE, input_length=TXT_MAXLEN,\n",
    "                      weights=[word_embedding_matrix],\n",
    "                      name='word_embedding', trainable=True, mask_zero=True)(txt_input)\n",
    "\n",
    "txt_drpot = Dropout(WDROP_RATE, name='word_dropout')(txt_embed)\n",
    "\n",
    "# character-level input with randomized initializations\n",
    "cnn_input = Input(shape=(TXT_MAXLEN, CHR_MAXLEN), name='cnn_input')\n",
    "\n",
    "cnn_embed = TimeDistributed(Embedding(CHR_VOCAB, CEMBED_SIZE, input_length=CHR_MAXLEN,\n",
    "                            weights=[char_embedding_matrix],\n",
    "                            name='cnn_embedding', trainable=True, mask_zero=False))(cnn_input)\n",
    "\n",
    "# 1-size window CNN with batch-norm & tanh activation (Kim 2015)\n",
    "cnns1 = TimeDistributed(Conv1D(filters=20, kernel_size=1, padding=\"same\", strides=1), name='cnn1_cnn')(cnn_embed)\n",
    "cnns1 = TimeDistributed(BatchNormalization(), name='cnn1_bnorm')(cnns1)\n",
    "cnns1 = TimeDistributed(Activation('tanh'), name='cnn1_act')(cnns1)\n",
    "cnns1 = TimeDistributed(GlobalMaxPooling1D(), name='cnn1_gmp')(cnns1)\n",
    "\n",
    "# 2-size window CNN with batch-norm & tanh activation (Kim 2015)\n",
    "cnns2 = TimeDistributed(Conv1D(filters=40, kernel_size=2, padding=\"same\", strides=1), name='cnn2_cnn')(cnn_embed)\n",
    "cnns2 = TimeDistributed(BatchNormalization(), name='cnn2_bnorm')(cnns2)\n",
    "cnns2 = TimeDistributed(Activation('tanh'), name='cnn2_act')(cnns2)\n",
    "cnns2 = TimeDistributed(GlobalMaxPooling1D(), name='cnn2_gmp')(cnns2)\n",
    "\n",
    "# 3-size window CNN with batch-norm & tanh activation (Kim 2015)\n",
    "cnns3 = TimeDistributed(Conv1D(filters=60, kernel_size=3, padding=\"same\", strides=1), name='cnn3_cnn')(cnn_embed)\n",
    "cnns3 = TimeDistributed(BatchNormalization(), name='cnn3_bnorm')(cnns3)\n",
    "cnns3 = TimeDistributed(Activation('tanh'), name='cnn3_act')(cnns3)\n",
    "cnns3 = TimeDistributed(GlobalMaxPooling1D(), name='cnn3_gmp')(cnns3)\n",
    "\n",
    "# 4-size window CNN with batch-norm & tanh activation (Kim 2015)\n",
    "cnns4 = TimeDistributed(Conv1D(filters=80, kernel_size=4, padding=\"same\", strides=1), name='cnn4_cnn')(cnn_embed)\n",
    "cnns4 = TimeDistributed(BatchNormalization(), name='cnn4_bnorm')(cnns4)\n",
    "cnns4 = TimeDistributed(Activation('tanh'), name='cnn4_act')(cnns4)\n",
    "cnns4 = TimeDistributed(GlobalMaxPooling1D(), name='cnn4_gmp')(cnns4)\n",
    "\n",
    "# time-distributed highway layer (Kim 2015)\n",
    "cnns  = concatenate([cnns1, cnns2, cnns3, cnns4], axis=-1, name='cnn_concat')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "200"
      ]
     },
     "execution_count": 12,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "K.int_shape(cnns)[-1]"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### highway layer\n",
    "\n",
    "paraphrasing from Yoon Kim(?) : \"an extension of the LSTM network to feed-forward networks\"\n",
    "\n",
    "see:  \n",
    "https://arxiv.org/pdf/1505.00387.pdf  \n",
    "http://people.idsia.ch/~rupesh/very_deep_learning/  \n",
    "https://theneuralperspective.com/2016/12/13/highway-networks/\n",
    "\n",
    "coded following Srivastava et al. with reference to https://gist.github.com/iskandr/a874e4cf358697037d14a17020304535"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [],
   "source": [
    "########################################\n",
    "# subword vector highway layer\n",
    "########################################\n",
    "\n",
    "hway_input = Input(shape=(K.int_shape(cnns)[-1],))\n",
    "gate_bias_init = keras.initializers.Constant(-2)\n",
    "transform_gate = Dense(units=K.int_shape(cnns)[-1], bias_initializer=gate_bias_init, activation='sigmoid')(hway_input)\n",
    "carry_gate = Lambda(lambda x: 1.0 - x, output_shape=(K.int_shape(cnns)[-1],))(transform_gate)\n",
    "h_transformed = Dense(units=K.int_shape(cnns)[-1])(hway_input)\n",
    "h_transformed = Activation('relu')(h_transformed)\n",
    "transformed_gated = Multiply()([transform_gate, h_transformed])\n",
    "carried_gated = Multiply()([carry_gate, hway_input])\n",
    "outputs = Add()([transformed_gated, carried_gated])\n",
    "\n",
    "highway = Model(inputs=hway_input, outputs=outputs)\n",
    "\n",
    "cnns  = TimeDistributed(highway, name='cnn_highway')(cnns)\n",
    "\n",
    "# final concat of convolutional subword embeddings and word vectors\n",
    "word_vects  = concatenate([cnns, txt_drpot], axis=-1, name='concat_word_vectors')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [],
   "source": [
    "########################################\n",
    "# main recurrent sentence block\n",
    "########################################\n",
    "\n",
    "# 'encoder' layer with returned states following (Liu, Lane)\n",
    "lstm_enc, fh, fc, bh, bc  = Bidirectional(LSTM(HIDDEN_SIZE, return_sequences=True, return_state=True),\n",
    "                                          name='bidirectional_enc')(word_vects)\n",
    "lstm_enc = Dropout(DROP_RATE, name='bidirectional_dropout_enc')(lstm_enc)\n",
    "\n",
    "# \"aligned seq2seq\" lstm\n",
    "# load forward LSTM with reverse states following Liu, Lane 2016 (and do reverse)\n",
    "lstm_dec = Bidirectional(LSTM(HIDDEN_SIZE, return_sequences=True),\n",
    "                         name='bidirectional_dec')(lstm_enc, initial_state=[bh, bc, fh, fc])\n",
    "\n",
    "lstm_states = Dropout(DROP_RATE, name='bidirectional_dropout_dec')(lstm_dec)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### model output layers\n",
    "\n",
    "joint intent and slot filling with optional attention, slot prediction CRF as seen in NER papers such as Huang *et al*, Ma & Hovy\n",
    "\n",
    "this model conditions intent classification on slot predictions along the lines of Liu & Lane.  \n",
    "Goo *et al.* do the opposite using a 'intent-slot gate', and Liu & Lane, Hakkani-Tur *et al* also experiment with \"implicit\" joint modeling."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [],
   "source": [
    "########################################\n",
    "# Huang et al; Ma & Hovy CRF slot clf\n",
    "########################################\n",
    "\n",
    "# final slot linear chain CRF layer\n",
    "lyr_crf   = CRF(SLOT_NUM, sparse_target=True, name='out_slot', learn_mode='marginal', test_mode='marginal')\n",
    "out_slot  = lyr_crf(lstm_states)\n",
    "\n",
    "# alternative is using greedy predictions\n",
    "# out_slot  = TimeDistributed(Dense(SLOT_NUM, activation='softmax'), name='out_slot')(txt_lstm_dec)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [],
   "source": [
    "########################################\n",
    "# attentional intent clf block\n",
    "########################################\n",
    "\n",
    "# combine lstm with CRF for attention (see Liu & Lane)\n",
    "seq_concat = concatenate([lstm_states, out_slot], axis=2, name='lstm_concat')\n",
    "seq_concat = Dropout(DROP_RATE, name='bidirectional_dropout_3')(seq_concat)\n",
    "\n",
    "# layer: intent attention w/context (Liu & Lane)\n",
    "att_int = AttentionWithContext(name='intent_attention')(seq_concat)\n",
    "\n",
    "# layer: dense + LeakyReLU with dropout\n",
    "out_int = Dense(K.int_shape(att_int)[-1],\n",
    "                kernel_regularizer=l2(0.0025),\n",
    "                name='intent_dense_1')(att_int)\n",
    "out_int = LeakyReLU(name='intent_act_1')(out_int)\n",
    "out_int = Dropout(DROP_RATE, name='intent_dropout_1')(out_int)\n",
    "\n",
    "# layer: dense + LeakyReLU with dropout\n",
    "out_int = Dense(K.int_shape(att_int)[-1],\n",
    "                kernel_regularizer=l2(0.001),\n",
    "                name='intent_dense_2')(out_int)\n",
    "out_int = LeakyReLU(name='intent_act_2')(out_int)\n",
    "\n",
    "# layer: final dense + softmax\n",
    "out_int = Dense(LABEL_NUM, activation='softmax', name='out_intent')(out_int)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {},
   "outputs": [],
   "source": [
    "model = Model(inputs=[txt_input, cnn_input], outputs=[out_slot, out_int])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "__________________________________________________________________________________________________\n",
      "Layer (type)                    Output Shape         Param #     Connected to                     \n",
      "==================================================================================================\n",
      "cnn_input (InputLayer)          (None, 22, 9)        0                                            \n",
      "__________________________________________________________________________________________________\n",
      "time_distributed_1 (TimeDistrib (None, 22, 9, 200)   7000        cnn_input[0][0]                  \n",
      "__________________________________________________________________________________________________\n",
      "cnn1_cnn (TimeDistributed)      (None, 22, 9, 20)    4020        time_distributed_1[0][0]         \n",
      "__________________________________________________________________________________________________\n",
      "cnn2_cnn (TimeDistributed)      (None, 22, 9, 40)    16040       time_distributed_1[0][0]         \n",
      "__________________________________________________________________________________________________\n",
      "cnn3_cnn (TimeDistributed)      (None, 22, 9, 60)    36060       time_distributed_1[0][0]         \n",
      "__________________________________________________________________________________________________\n",
      "cnn4_cnn (TimeDistributed)      (None, 22, 9, 80)    64080       time_distributed_1[0][0]         \n",
      "__________________________________________________________________________________________________\n",
      "cnn1_bnorm (TimeDistributed)    (None, 22, 9, 20)    80          cnn1_cnn[0][0]                   \n",
      "__________________________________________________________________________________________________\n",
      "cnn2_bnorm (TimeDistributed)    (None, 22, 9, 40)    160         cnn2_cnn[0][0]                   \n",
      "__________________________________________________________________________________________________\n",
      "cnn3_bnorm (TimeDistributed)    (None, 22, 9, 60)    240         cnn3_cnn[0][0]                   \n",
      "__________________________________________________________________________________________________\n",
      "cnn4_bnorm (TimeDistributed)    (None, 22, 9, 80)    320         cnn4_cnn[0][0]                   \n",
      "__________________________________________________________________________________________________\n",
      "cnn1_act (TimeDistributed)      (None, 22, 9, 20)    0           cnn1_bnorm[0][0]                 \n",
      "__________________________________________________________________________________________________\n",
      "cnn2_act (TimeDistributed)      (None, 22, 9, 40)    0           cnn2_bnorm[0][0]                 \n",
      "__________________________________________________________________________________________________\n",
      "cnn3_act (TimeDistributed)      (None, 22, 9, 60)    0           cnn3_bnorm[0][0]                 \n",
      "__________________________________________________________________________________________________\n",
      "cnn4_act (TimeDistributed)      (None, 22, 9, 80)    0           cnn4_bnorm[0][0]                 \n",
      "__________________________________________________________________________________________________\n",
      "cnn1_gmp (TimeDistributed)      (None, 22, 20)       0           cnn1_act[0][0]                   \n",
      "__________________________________________________________________________________________________\n",
      "cnn2_gmp (TimeDistributed)      (None, 22, 40)       0           cnn2_act[0][0]                   \n",
      "__________________________________________________________________________________________________\n",
      "cnn3_gmp (TimeDistributed)      (None, 22, 60)       0           cnn3_act[0][0]                   \n",
      "__________________________________________________________________________________________________\n",
      "cnn4_gmp (TimeDistributed)      (None, 22, 80)       0           cnn4_act[0][0]                   \n",
      "__________________________________________________________________________________________________\n",
      "word_input (InputLayer)         (None, 22)           0                                            \n",
      "__________________________________________________________________________________________________\n",
      "cnn_concat (Concatenate)        (None, 22, 200)      0           cnn1_gmp[0][0]                   \n",
      "                                                                 cnn2_gmp[0][0]                   \n",
      "                                                                 cnn3_gmp[0][0]                   \n",
      "                                                                 cnn4_gmp[0][0]                   \n",
      "__________________________________________________________________________________________________\n",
      "word_embedding (Embedding)      (None, 22, 200)      145600      word_input[0][0]                 \n",
      "__________________________________________________________________________________________________\n",
      "cnn_highway (TimeDistributed)   (None, 22, 200)      80400       cnn_concat[0][0]                 \n",
      "__________________________________________________________________________________________________\n",
      "word_dropout (Dropout)          (None, 22, 200)      0           word_embedding[0][0]             \n",
      "__________________________________________________________________________________________________\n",
      "concat_word_vectors (Concatenat (None, 22, 400)      0           cnn_highway[0][0]                \n",
      "                                                                 word_dropout[0][0]               \n",
      "__________________________________________________________________________________________________\n",
      "bidirectional_enc (Bidirectiona [(None, 22, 600), (N 1682400     concat_word_vectors[0][0]        \n",
      "__________________________________________________________________________________________________\n",
      "bidirectional_dropout_enc (Drop (None, 22, 600)      0           bidirectional_enc[0][0]          \n",
      "__________________________________________________________________________________________________\n",
      "bidirectional_dec (Bidirectiona (None, 22, 600)      2162400     bidirectional_dropout_enc[0][0]  \n",
      "                                                                 bidirectional_enc[0][3]          \n",
      "                                                                 bidirectional_enc[0][4]          \n",
      "                                                                 bidirectional_enc[0][1]          \n",
      "                                                                 bidirectional_enc[0][2]          \n",
      "__________________________________________________________________________________________________\n",
      "bidirectional_dropout_dec (Drop (None, 22, 600)      0           bidirectional_dec[0][0]          \n",
      "__________________________________________________________________________________________________\n",
      "out_slot (CRF)                  (None, 22, 121)      87604       bidirectional_dropout_dec[0][0]  \n",
      "__________________________________________________________________________________________________\n",
      "lstm_concat (Concatenate)       (None, 22, 721)      0           bidirectional_dropout_dec[0][0]  \n",
      "                                                                 out_slot[0][0]                   \n",
      "__________________________________________________________________________________________________\n",
      "bidirectional_dropout_3 (Dropou (None, 22, 721)      0           lstm_concat[0][0]                \n",
      "__________________________________________________________________________________________________\n",
      "intent_attention (AttentionWith (None, 721)          521283      bidirectional_dropout_3[0][0]    \n",
      "__________________________________________________________________________________________________\n",
      "intent_dense_1 (Dense)          (None, 721)          520562      intent_attention[0][0]           \n",
      "__________________________________________________________________________________________________\n",
      "intent_act_1 (LeakyReLU)        (None, 721)          0           intent_dense_1[0][0]             \n",
      "__________________________________________________________________________________________________\n",
      "intent_dropout_1 (Dropout)      (None, 721)          0           intent_act_1[0][0]               \n",
      "__________________________________________________________________________________________________\n",
      "intent_dense_2 (Dense)          (None, 721)          520562      intent_dropout_1[0][0]           \n",
      "__________________________________________________________________________________________________\n",
      "intent_act_2 (LeakyReLU)        (None, 721)          0           intent_dense_2[0][0]             \n",
      "__________________________________________________________________________________________________\n",
      "out_intent (Dense)              (None, 22)           15884       intent_act_2[0][0]               \n",
      "==================================================================================================\n",
      "Total params: 5,864,695\n",
      "Trainable params: 5,864,295\n",
      "Non-trainable params: 400\n",
      "__________________________________________________________________________________________________\n"
     ]
    }
   ],
   "source": [
    "model.summary()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "metadata": {},
   "outputs": [],
   "source": [
    "model.compile(optimizer=OPTIMIZER,\n",
    "              loss={'out_slot': lyr_crf.loss_function, 'out_intent': 'sparse_categorical_crossentropy'},\n",
    "              # loss={'out_slot': 'sparse_categorical_crossentropy', 'out_intent': 'sparse_categorical_crossentropy'},\n",
    "              loss_weights={'out_slot': 0.5, 'out_intent': 0.5},\n",
    "              )"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### model training\n",
    "\n",
    "we will use a `keras` Callback to reduce the learning rate when the validation loss stagnates\n",
    "\n",
    "and another Callback to terminate of the validation loss doesn't change for a number of epochs\n",
    "\n",
    "we will also save checkpoints every x epochs, and terminate on NaN loss"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "metadata": {},
   "outputs": [],
   "source": [
    "# callbacks\n",
    "# cb_redlr = ReduceLROnPlateau(monitor='val_loss', factor=0.8, patience=2, min_lr=0.0001, verbose=1)\n",
    "cb_early = EarlyStopping(monitor='val_loss', min_delta=0, patience=5, verbose=1)\n",
    "cb_chkpt = ModelCheckpoint('checkpoints/_'+modelname+'{epoch:02d}-{val_loss:.2f}.h5', verbose=1, save_best_only=True, save_weights_only=True, period=5)\n",
    "cb_nonan = TerminateOnNaN()\n",
    "\n",
    "callbacks_list=[cb_early, cb_chkpt, cb_nonan]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "metadata": {},
   "outputs": [],
   "source": [
    "# # load weights to resume training\n",
    "# model.load_weights('checkpoints/FILE_NAME_HERE.h5')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "metadata": {},
   "outputs": [],
   "source": [
    "history = model.fit([trn_text_idx, trn_char_idx],\n",
    "                    [trn_slot_idx, trn_int_idx],\n",
    "                    validation_data=([dev_text_idx, dev_char_idx], [dev_slot_idx, dev_int_idx]),\n",
    "                    batch_size=BATCH_SIZE,\n",
    "                    epochs=MAX_EPOCHS,\n",
    "                    callbacks=callbacks_list,\n",
    "                    verbose=0)\n",
    "\n",
    "hist_dict = history.history"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### save model"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "metadata": {},
   "outputs": [],
   "source": [
    "# save architecture with json\n",
    "with open('model/'+modelname+'.json', 'w') as f:\n",
    "    f.write(model.to_json())\n",
    "# save weights\n",
    "save_load_utils.save_all_weights(model, 'model/'+modelname+'.h5')\n",
    "# save training history\n",
    "np.save('model/'+modelname+'_dict.npy', hist_dict)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "metadata": {},
   "outputs": [],
   "source": [
    "# load test\n",
    "model.load_weights('model/'+modelname+'.h5')"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### predict and analyze performance"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/derek/anaconda3/lib/python3.6/importlib/_bootstrap.py:219: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88\n",
      "  return f(*args, **kwds)\n",
      "/home/derek/anaconda3/lib/python3.6/importlib/_bootstrap.py:219: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88\n",
      "  return f(*args, **kwds)\n"
     ]
    }
   ],
   "source": [
    "from sklearn.metrics import classification_report, f1_score\n",
    "from sklearn.metrics import accuracy_score, precision_score, recall_score"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "metadata": {},
   "outputs": [],
   "source": [
    "# remove nulls and pads and get F1 on only labels\n",
    "def procslots(trues, preds, nonull=True):\n",
    "    tru_slots = []\n",
    "    prd_slots = []\n",
    "    for i in range(len(trues)):\n",
    "        for j in range(len(trues[i])):\n",
    "            tru = trues[i][j]\n",
    "            if j < len(preds[i]):\n",
    "                prd = preds[i][j]\n",
    "            else:\n",
    "                prd = 'O'\n",
    "            if nonull==True:\n",
    "                if tru not in ('O', slotindexer.pad, slotindexer.unk):\n",
    "                    tru_slots.append(tru)\n",
    "                    prd_slots.append(prd)\n",
    "            else:\n",
    "                tru_slots.append(tru)\n",
    "                prd_slots.append(prd)\n",
    "    return tru_slots, prd_slots"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### training eval"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 27,
   "metadata": {},
   "outputs": [],
   "source": [
    "tprd_slots_dist, tprd_ints_dist = model.predict([trn_text_idx, trn_char_idx])\n",
    "tprd_int_idx  = np.squeeze(np.argmax(tprd_ints_dist, axis=-1))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 28,
   "metadata": {},
   "outputs": [],
   "source": [
    "tprd_slot_idx = np.argmax(tprd_slots_dist, axis=-1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 29,
   "metadata": {},
   "outputs": [],
   "source": [
    "tprd_ints = intindexer.inverse_transform(tprd_int_idx)\n",
    "ttru_ints = intindexer.inverse_transform(trn_int_idx)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 30,
   "metadata": {},
   "outputs": [],
   "source": [
    "# convert slot predictions, trues to text form\n",
    "tprd_txtslots = slotindexer.inverse_transform(tprd_slot_idx)\n",
    "ttrn_txtslots = slotindexer.inverse_transform(trn_slot_idx)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 31,
   "metadata": {},
   "outputs": [],
   "source": [
    "ttru_slots, tprd_slots = procslots(tprd_txtslots, ttrn_txtslots, nonull=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 32,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "# TRAIN RESULTS\n",
      "\n",
      "INTENT F1 :   0.9958062755602081  (weighted)\n",
      "INTENT ACC:   0.9966502903081733\n",
      "SLOT   F1 :   0.9943244981289089  (weighted, labels only)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/derek/anaconda3/lib/python3.6/site-packages/sklearn/metrics/classification.py:1113: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n",
      "  'precision', 'predicted', average, warn_for)\n",
      "/home/derek/anaconda3/lib/python3.6/site-packages/sklearn/metrics/classification.py:1115: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no true samples.\n",
      "  'recall', 'true', average, warn_for)\n"
     ]
    }
   ],
   "source": [
    "print('# TRAIN RESULTS')\n",
    "print()\n",
    "print('INTENT F1 :  ', f1_score(ttru_ints, tprd_ints, average='weighted'), ' (weighted)')\n",
    "print('INTENT ACC:  ', accuracy_score(ttru_ints, tprd_ints))\n",
    "print('SLOT   F1 :  ', f1_score(ttru_slots, tprd_slots, average='weighted'), ' (weighted, labels only)')"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### test eval"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 33,
   "metadata": {},
   "outputs": [],
   "source": [
    "prd_slots_dist, prd_ints_dist = model.predict([tst_text_idx, tst_char_idx])\n",
    "prd_int_idx  = np.squeeze(np.argmax(prd_ints_dist, axis=-1))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 34,
   "metadata": {},
   "outputs": [],
   "source": [
    "prd_slot_idx = np.argmax(prd_slots_dist, axis=-1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 35,
   "metadata": {},
   "outputs": [],
   "source": [
    "prd_ints = intindexer.inverse_transform(prd_int_idx)\n",
    "tru_ints = intindexer.inverse_transform(tst_int_idx)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 36,
   "metadata": {},
   "outputs": [],
   "source": [
    "# convert slot predictions, trues to text form\n",
    "prd_txtslots = slotindexer.inverse_transform(prd_slot_idx)\n",
    "tst_txtslots = slotindexer.inverse_transform(tst_slot_idx)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 37,
   "metadata": {},
   "outputs": [],
   "source": [
    "tru_slots, prd_slots = procslots(prd_txtslots, tst_txtslots, nonull=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 38,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "# TEST RESULTS\n",
      "\n",
      "INTENT F1 :   0.9573853410886499  (weighted)\n",
      "INTENT ACC:   0.9630459126539753\n",
      "SLOT   F1 :   0.9508826933073504  (weighted, labels only)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/derek/anaconda3/lib/python3.6/site-packages/sklearn/metrics/classification.py:1113: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n",
      "  'precision', 'predicted', average, warn_for)\n",
      "/home/derek/anaconda3/lib/python3.6/site-packages/sklearn/metrics/classification.py:1115: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no true samples.\n",
      "  'recall', 'true', average, warn_for)\n"
     ]
    }
   ],
   "source": [
    "print('# TEST RESULTS')\n",
    "print()\n",
    "print('INTENT F1 :  ', f1_score(tru_ints, prd_ints, average='weighted'), ' (weighted)')\n",
    "print('INTENT ACC:  ', accuracy_score(tru_ints, prd_ints))\n",
    "print('SLOT   F1 :  ', f1_score(tru_slots, prd_slots, average='weighted'), ' (weighted, labels only)')"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### conlleval test\n",
    "\n",
    "eval using [python conlleval](https://github.com/sighsmile/conlleval)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 39,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "((893,), (893,))"
      ]
     },
     "execution_count": 39,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "np.shape(tst_txtslots), np.shape(prd_txtslots)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 40,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(893, 22)"
      ]
     },
     "execution_count": 40,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# fake inputs for words, POS\n",
    "fake_toks = [['tok']*TXT_MAXLEN for _ in range(prd_slot_idx.shape[0])]\n",
    "fake_poss = [['WUT']*TXT_MAXLEN for _ in range(prd_slot_idx.shape[0])]\n",
    "np.shape(fake_toks)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 41,
   "metadata": {},
   "outputs": [],
   "source": [
    "# remove nulls and pads and get F1 on only labels\n",
    "def writeconlleval(toks, poss, trues, preds, filename='eval/temp.txt'):\n",
    "    with open(filename, 'w') as f:\n",
    "        for i in range(len(trues)):\n",
    "            for j in range(len(trues[i])):\n",
    "                tok = toks[i][j]\n",
    "                pos = poss[i][j]\n",
    "                tru = trues[i][j]\n",
    "                if j < len(preds[i]):\n",
    "                    prd = preds[i][j]\n",
    "                else:\n",
    "                    prd = 'O'\n",
    "                f.write(tok+' '+pos+' '+tru+' '+prd+' \\n')\n",
    "            f.write('-X-') # end-of-sentence\n",
    "            if i < len(trues):\n",
    "                f.write('\\n')\n",
    "                  \n",
    "    return"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 42,
   "metadata": {},
   "outputs": [],
   "source": [
    "writeconlleval(fake_toks, fake_poss, tst_txtslots, prd_txtslots, filename='eval/'+modelname+'.txt')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 43,
   "metadata": {},
   "outputs": [],
   "source": [
    "from conlleval import *\n",
    "import argparse"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 44,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "processed 10900 tokens with 2822 phrases; found: 2825 phrases; correct: 2646.\n",
      "accuracy:  97.81%; precision:  93.66%; recall:  93.76%; FB1:  93.71\n",
      "    aircraft_code: precision:  96.00%; recall:  72.73%; FB1:  82.76  25\n",
      "     airline_code: precision:  72.09%; recall:  93.94%; FB1:  81.58  43\n",
      "     airline_name: precision:  96.04%; recall:  96.04%; FB1:  96.04  101\n",
      "     airport_code: precision:  50.00%; recall:  33.33%; FB1:  40.00  6\n",
      "     airport_name: precision:  55.56%; recall:  47.62%; FB1:  51.28  18\n",
      "arrive_date.date_relative: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "arrive_date.day_name: precision:  66.67%; recall:  90.91%; FB1:  76.92  15\n",
      "arrive_date.day_number: precision:  62.50%; recall:  83.33%; FB1:  71.43  8\n",
      "arrive_date.month_name: precision:  71.43%; recall:  83.33%; FB1:  76.92  7\n",
      "arrive_time.end_time: precision:  77.78%; recall:  87.50%; FB1:  82.35  9\n",
      "arrive_time.period_of_day: precision:  85.71%; recall: 100.00%; FB1:  92.31  7\n",
      "arrive_time.start_time: precision:  88.89%; recall: 100.00%; FB1:  94.12  9\n",
      " arrive_time.time: precision:  88.57%; recall:  93.94%; FB1:  91.18  35\n",
      "arrive_time.time_relative: precision:  86.67%; recall:  83.87%; FB1:  85.25  30\n",
      "        city_name: precision:  84.21%; recall:  56.14%; FB1:  67.37  38\n",
      "       class_type: precision: 100.00%; recall: 100.00%; FB1: 100.00  24\n",
      "          connect: precision: 100.00%; recall:  83.33%; FB1:  90.91  5\n",
      "    cost_relative: precision:  97.30%; recall:  97.30%; FB1:  97.30  37\n",
      "         day_name: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "        days_code: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "depart_date.date_relative: precision:  85.00%; recall: 100.00%; FB1:  91.89  20\n",
      "depart_date.day_name: precision:  99.04%; recall:  97.63%; FB1:  98.33  208\n",
      "depart_date.day_number: precision:  96.23%; recall:  94.44%; FB1:  95.33  53\n",
      "depart_date.month_name: precision:  98.15%; recall:  96.36%; FB1:  97.25  54\n",
      "depart_date.today_relative: precision:  88.89%; recall:  88.89%; FB1:  88.89  9\n",
      " depart_date.year: precision: 100.00%; recall: 100.00%; FB1: 100.00  2\n",
      "depart_time.end_time: precision: 100.00%; recall:  33.33%; FB1:  50.00  1\n",
      "depart_time.period_mod: precision: 100.00%; recall: 100.00%; FB1: 100.00  5\n",
      "depart_time.period_of_day: precision:  97.48%; recall:  89.92%; FB1:  93.55  119\n",
      "depart_time.start_time: precision: 100.00%; recall:  33.33%; FB1:  50.00  1\n",
      " depart_time.time: precision:  80.00%; recall:  98.25%; FB1:  88.19  70\n",
      "depart_time.time_relative: precision: 100.00%; recall:  93.85%; FB1:  96.83  61\n",
      "          economy: precision: 100.00%; recall: 100.00%; FB1: 100.00  6\n",
      "      fare_amount: precision: 100.00%; recall: 100.00%; FB1: 100.00  1\n",
      "  fare_basis_code: precision:  88.89%; recall:  94.12%; FB1:  91.43  18\n",
      "      flight_days: precision:  90.00%; recall:  90.00%; FB1:  90.00  10\n",
      "       flight_mod: precision:  72.00%; recall:  75.00%; FB1:  73.47  25\n",
      "    flight_number: precision:  83.33%; recall: 100.00%; FB1:  90.91  12\n",
      "      flight_stop: precision: 100.00%; recall: 100.00%; FB1: 100.00  20\n",
      "      flight_time: precision:   0.00%; recall:   0.00%; FB1:   0.00  5\n",
      "fromloc.airport_code: precision:  83.33%; recall: 100.00%; FB1:  90.91  6\n",
      "fromloc.airport_name: precision:  34.78%; recall:  66.67%; FB1:  45.71  23\n",
      "fromloc.city_name: precision:  98.73%; recall:  99.43%; FB1:  99.08  708\n",
      "fromloc.state_code: precision:  95.65%; recall:  95.65%; FB1:  95.65  23\n",
      "fromloc.state_name: precision: 100.00%; recall:  94.12%; FB1:  96.97  16\n",
      "             meal: precision:  92.31%; recall:  75.00%; FB1:  82.76  13\n",
      "        meal_code: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      " meal_description: precision:  85.71%; recall:  66.67%; FB1:  75.00  7\n",
      "              mod: precision:  25.00%; recall:  50.00%; FB1:  33.33  4\n",
      "               or: precision:  60.00%; recall: 100.00%; FB1:  75.00  5\n",
      "    period_of_day: precision: 100.00%; recall: 100.00%; FB1: 100.00  4\n",
      " restriction_code: precision:  40.00%; recall:  50.00%; FB1:  44.44  5\n",
      "return_date.date_relative: precision:   0.00%; recall:   0.00%; FB1:   0.00  1\n",
      "return_date.day_name: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "       round_trip: precision:  98.61%; recall:  97.26%; FB1:  97.93  72\n",
      "       state_code: precision: 100.00%; recall: 100.00%; FB1: 100.00  1\n",
      "       state_name: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "stoploc.city_name: precision:  90.00%; recall:  90.00%; FB1:  90.00  20\n",
      "toloc.airport_code: precision: 100.00%; recall:  75.00%; FB1:  85.71  3\n",
      "toloc.airport_name: precision:  25.00%; recall:  33.33%; FB1:  28.57  4\n",
      "  toloc.city_name: precision:  96.07%; recall:  99.16%; FB1:  97.59  738\n",
      "toloc.country_name: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      " toloc.state_code: precision: 100.00%; recall: 100.00%; FB1: 100.00  18\n",
      " toloc.state_name: precision:  85.19%; recall:  82.14%; FB1:  83.64  27\n",
      "   transport_type: precision:  70.00%; recall:  70.00%; FB1:  70.00  10\n"
     ]
    }
   ],
   "source": [
    "conlleval('eval/'+modelname+'.txt')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "base",
   "language": "python",
   "name": "base"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.4"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
