{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "import os\n",
    "import re"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "import pickle as pkl\n",
    "import itertools\n",
    "import codecs\n",
    "import time\n",
    "import copy\n",
    "import random\n",
    "import sys"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "START_TAG = '<START>'\n",
    "STOP_TAG = '<STOP>'\n",
    "\n",
    "def pad_seq(seq, max_length, PAD_token=0):\n",
    "    \n",
    "    seq += [PAD_token for i in range(max_length - len(seq))]\n",
    "    return seq\n",
    "\n",
    "def create_dico(item_list):\n",
    "    \"\"\"\n",
    "    Create a dictionary of items from a list of list of items.\n",
    "    \"\"\"\n",
    "    assert type(item_list) is list\n",
    "    dico = {}\n",
    "    for items in item_list:\n",
    "        for item in items:\n",
    "            if item not in dico:\n",
    "                dico[item] = 1\n",
    "            else:\n",
    "                dico[item] += 1\n",
    "    return dico\n",
    "\n",
    "def create_mapping(dico):\n",
    "    \"\"\"\n",
    "    Create a mapping (item to ID / ID to item) from a dictionary.\n",
    "    Items are ordered by decreasing frequency.\n",
    "    \"\"\"\n",
    "    sorted_items = sorted(dico.items(), key=lambda x: (-x[1], x[0]))\n",
    "    id_to_item = {i: v[0] for i, v in enumerate(sorted_items)}\n",
    "    item_to_id = {v: k for k, v in id_to_item.items()}\n",
    "    return item_to_id, id_to_item\n",
    "\n",
    "def tag_mapping(sentences):\n",
    "    \"\"\"\n",
    "    Create a dictionary and a mapping of tags, sorted by frequency.\n",
    "    \"\"\"\n",
    "    tags = [s[2] for s in sentences]\n",
    "    dico = create_dico(tags)\n",
    "    dico[START_TAG] = -1\n",
    "    dico[STOP_TAG] = -2\n",
    "    tag_to_id, id_to_tag = create_mapping(dico)\n",
    "    print(\"Found %i unique named entity tags\" % len(dico))\n",
    "    return dico, tag_to_id, id_to_tag\n",
    "\n",
    "def word_mapping(sentences):\n",
    "\n",
    "    words = [[str(x).lower() for x in s[0]] for s in sentences]\n",
    "    dico = create_dico(words)\n",
    "\n",
    "    dico['<PAD>'] = 10000001\n",
    "    dico['<UNK>'] = 10000000\n",
    "    dico = {k:v for k,v in dico.items() if v>=3}\n",
    "    word_to_id, id_to_word = create_mapping(dico)\n",
    "\n",
    "    print(\"Found %i unique words (%i in total)\" % (\n",
    "        len(dico), sum(len(x) for x in words)\n",
    "    ))\n",
    "    return dico, word_to_id, id_to_word\n",
    "\n",
    "def augment_with_pretrained(dictionary, ext_emb_path, words):\n",
    "    \"\"\"\n",
    "    Augment the dictionary with words that have a pretrained embedding.\n",
    "    If `words` is None, we add every word that has a pretrained embedding\n",
    "    to the dictionary, otherwise, we only add the words that are given by\n",
    "    `words` (typically the words in the development and test sets.)\n",
    "    \"\"\"\n",
    "    print('Loading pretrained embeddings from %s...' % ext_emb_path)\n",
    "    assert os.path.isfile(ext_emb_path)\n",
    "\n",
    "    # Load pretrained embeddings from file\n",
    "    pretrained = set([\n",
    "        line.rstrip().split()[0].strip()\n",
    "        for line in codecs.open(ext_emb_path, 'r', 'utf-8')\n",
    "        if len(ext_emb_path) > 0\n",
    "    ])\n",
    "    \n",
    "    if words is None:\n",
    "        for word in pretrained:\n",
    "            if word not in dictionary:\n",
    "                dictionary[word] = 0\n",
    "    else:\n",
    "        for word in words:\n",
    "            if any(x in pretrained for x in [\n",
    "                word,\n",
    "                word.lower(),\n",
    "                re.sub('\\d', '0', word.lower())\n",
    "            ]) and word not in dictionary:\n",
    "                dictionary[word] = 0\n",
    "\n",
    "    word_to_id, id_to_word = create_mapping(dictionary)\n",
    "    return dictionary, word_to_id, id_to_word\n",
    "\n",
    "def cap_feature(s):\n",
    "    \"\"\"\n",
    "    Capitalization feature:\n",
    "    0 = low caps\n",
    "    1 = all caps\n",
    "    2 = first letter caps\n",
    "    3 = one capital (not first letter)\n",
    "    \"\"\"\n",
    "    if s.lower() == s:\n",
    "        return 0\n",
    "    elif s.upper() == s:\n",
    "        return 1\n",
    "    elif s[0].upper() == s[0]:\n",
    "        return 2\n",
    "    else:\n",
    "        return 3\n",
    "\n",
    "def prepare_dataset(sentences, word_to_id, tag_to_id):\n",
    "    \"\"\"\n",
    "    Prepare the dataset. Return a list of lists of dictionaries containing:\n",
    "        - word indexes\n",
    "        - word char indexes\n",
    "        - tag indexes\n",
    "    \"\"\"\n",
    "    def f(x): return x.lower()\n",
    "    data = []\n",
    "    for s in sentences:\n",
    "        str_words = [str(x) for x in s[0]]\n",
    "        words = [word_to_id[f(w) if f(w) in word_to_id else '<UNK>']\n",
    "                 for w in str_words]\n",
    "        caps = [cap_feature(w) for w in str_words]\n",
    "        tags = [tag_to_id[t] for t in s[2]]\n",
    "        verbs = [v for v in s[1]]\n",
    "        data.append({\n",
    "            'str_words': str_words,\n",
    "            'words': words,\n",
    "            'caps': caps,\n",
    "            'tags': tags,\n",
    "            'verbs': verbs\n",
    "        })\n",
    "    return data"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "def load_consrl(train_data, val_data, pretrained='wordvectors/glove.6B.100d.txt', word_dim = 100):\n",
    "    \n",
    "    dico_words_train, _, _ = word_mapping(train_data)\n",
    "    dico_tags, tag_to_id, id_to_tag = tag_mapping(train_data+val_data)\n",
    "    \n",
    "    dico_words, word_to_id, id_to_word = augment_with_pretrained(\n",
    "                                         dico_words_train.copy(), pretrained,\n",
    "                                         list(itertools.chain.from_iterable(\n",
    "                                         [[str(x).lower() for x in s[0]] for s in val_data])))\n",
    "    \n",
    "    train_data = prepare_dataset(train_data, word_to_id, tag_to_id)\n",
    "    dev_data = prepare_dataset(val_data, word_to_id, tag_to_id)\n",
    "    \n",
    "    print(\"%i / %i sentences in train / dev.\" % (len(train_data), len(dev_data)))\n",
    "    \n",
    "    all_word_embeds = {}\n",
    "    for i, line in enumerate(codecs.open(pretrained, 'r', 'utf-8')):\n",
    "        s = line.strip().split()\n",
    "        if len(s) == word_dim + 1:\n",
    "            all_word_embeds[s[0]] = np.array([float(i) for i in s[1:]])\n",
    "\n",
    "    word_embeds = np.random.uniform(-np.sqrt(0.06), np.sqrt(0.06), (len(word_to_id), word_dim))\n",
    "\n",
    "    for w in word_to_id:\n",
    "        if w in all_word_embeds:\n",
    "            word_embeds[word_to_id[w]] = all_word_embeds[w]\n",
    "        elif w.lower() in all_word_embeds:\n",
    "            word_embeds[word_to_id[w]] = all_word_embeds[w.lower()]\n",
    "\n",
    "    print('Loaded %i pretrained embeddings.' % len(all_word_embeds))\n",
    "    \n",
    "    mappings = {\n",
    "        'word_to_id': word_to_id,\n",
    "        'id_to_word': id_to_word,\n",
    "        'tag_to_id': tag_to_id,\n",
    "        'id_to_tag': id_to_tag,\n",
    "        'word_embeds': word_embeds\n",
    "    }\n",
    "        \n",
    "    return train_data, dev_data, mappings"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "def log_sum_exp(vec, dim=-1, keepdim = False):\n",
    "    max_score, _ = vec.max(dim, keepdim=keepdim)\n",
    "    if keepdim:\n",
    "        stable_vec = vec - max_score\n",
    "    else:\n",
    "        stable_vec = vec - max_score.unsqueeze(dim)\n",
    "    output = max_score + (stable_vec.exp().sum(dim, keepdim=keepdim)).log()\n",
    "    return output\n",
    "\n",
    "def create_batches(dataset, batch_size, order='keep', str_words=False, tag_padded= True):\n",
    "\n",
    "    newdata = copy.deepcopy(dataset)\n",
    "    if order=='sort':\n",
    "        newdata.sort(key = lambda x:len(x['words']))\n",
    "    elif order=='random':\n",
    "        random.shuffle(newdata)\n",
    "\n",
    "    newdata = np.array(newdata)  \n",
    "    batches = []\n",
    "    num_batches = np.ceil(len(dataset)/float(batch_size)).astype('int')\n",
    "\n",
    "    for i in range(num_batches):\n",
    "        batch_data = newdata[(i*batch_size):min(len(dataset),(i+1)*batch_size)]\n",
    "\n",
    "        words_seqs = [itm['words'] for itm in batch_data]\n",
    "        caps_seqs = [itm['caps'] for itm in batch_data]\n",
    "        verbs_seqs = [itm['verbs'] for itm in batch_data]\n",
    "        target_seqs = [itm['tags'] for itm in batch_data]\n",
    "        str_words_seqs = [itm['str_words'] for itm in batch_data]\n",
    "\n",
    "        seq_pairs = sorted(zip(words_seqs, caps_seqs, target_seqs, verbs_seqs, str_words_seqs), \n",
    "                           key=lambda p: len(p[0]), reverse=True)\n",
    "\n",
    "        words_seqs, caps_seqs, target_seqs, verbs_seqs, str_words_seqs = zip(*seq_pairs)\n",
    "        words_lengths = np.array([len(s) for s in words_seqs])\n",
    "\n",
    "        words_padded = np.array([pad_seq(s, np.max(words_lengths)) for s in words_seqs])\n",
    "        caps_padded = np.array([pad_seq(s, np.max(words_lengths)) for s in caps_seqs])\n",
    "        verbs_padded = np.array([pad_seq(s, np.max(words_lengths)) for s in verbs_seqs])\n",
    "\n",
    "        if tag_padded:\n",
    "            target_padded = np.array([pad_seq(s, np.max(words_lengths)) for s in target_seqs])\n",
    "        else:\n",
    "            target_padded = target_seqs\n",
    "\n",
    "        words_mask = (words_padded!=0).astype('int')\n",
    "\n",
    "        if str_words:\n",
    "            outputdict = {'words':words_padded, 'caps':caps_padded, 'tags': target_padded, \n",
    "                          'verbs': verbs_padded, 'wordslen': words_lengths, 'tagsmask':words_mask, \n",
    "                          'str_words': str_words_seqs}\n",
    "        else:\n",
    "            outputdict = {'words':words_padded, 'caps':caps_padded, 'tags': target_padded, \n",
    "                          'verbs': verbs_padded, 'wordslen': words_lengths, 'tagsmask':words_mask}\n",
    "\n",
    "        batches.append(outputdict)\n",
    "\n",
    "    return batches"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "class Initializer(object):\n",
    "    \n",
    "    def __init__(self):\n",
    "        pass\n",
    "    \n",
    "    def init_embedding(self, input_embedding):\n",
    "        bias = np.sqrt(3.0 / input_embedding.size(1))\n",
    "        nn.init.uniform(input_embedding, -bias, bias)\n",
    "    \n",
    "    def init_linear(self, input_linear):\n",
    "        bias = np.sqrt(6.0 / (input_linear.weight.size(0) + input_linear.weight.size(1)))\n",
    "        nn.init.uniform(input_linear.weight, -bias, bias)\n",
    "        if input_linear.bias is not None:\n",
    "            input_linear.bias.data.zero_()\n",
    "    \n",
    "    def init_lstm(self, input_lstm):\n",
    "        for ind in range(0, input_lstm.num_layers):\n",
    "            weight = eval('input_lstm.weight_ih_l' + str(ind))\n",
    "            bias = np.sqrt(6.0 / (weight.size(0) / 4 + weight.size(1)))\n",
    "            nn.init.uniform(weight, -bias, bias)\n",
    "            weight = eval('input_lstm.weight_hh_l' + str(ind))\n",
    "            bias = np.sqrt(6.0 / (weight.size(0) / 4 + weight.size(1)))\n",
    "            nn.init.uniform(weight, -bias, bias)\n",
    "        \n",
    "        if input_lstm.bidirectional:\n",
    "            for ind in range(0, input_lstm.num_layers):\n",
    "                weight = eval('input_lstm.weight_ih_l' + str(ind) + '_reverse')\n",
    "                bias = np.sqrt(6.0 / (weight.size(0) / 4 + weight.size(1)))\n",
    "                nn.init.uniform(weight, -bias, bias)\n",
    "                weight = eval('input_lstm.weight_hh_l' + str(ind) + '_reverse')\n",
    "                bias = np.sqrt(6.0 / (weight.size(0) / 4 + weight.size(1)))\n",
    "                nn.init.uniform(weight, -bias, bias)\n",
    "        \n",
    "        if input_lstm.bias:\n",
    "            \n",
    "            for ind in range(0, input_lstm.num_layers):\n",
    "                weight = eval('input_lstm.bias_ih_l' + str(ind))\n",
    "                weight.data.zero_()\n",
    "                weight.data[input_lstm.hidden_size: 2 * input_lstm.hidden_size] = 1\n",
    "                weight = eval('input_lstm.bias_hh_l' + str(ind))\n",
    "                weight.data.zero_()\n",
    "                weight.data[input_lstm.hidden_size: 2 * input_lstm.hidden_size] = 1\n",
    "            \n",
    "            if input_lstm.bidirectional:\n",
    "                for ind in range(0, input_lstm.num_layers):\n",
    "                    weight = eval('input_lstm.bias_ih_l' + str(ind) + '_reverse')\n",
    "                    weight.data.zero_()\n",
    "                    weight.data[input_lstm.hidden_size: 2 * input_lstm.hidden_size] = 1\n",
    "                    weight = eval('input_lstm.bias_hh_l' + str(ind) + '_reverse')\n",
    "                    weight.data.zero_()\n",
    "                    weight.data[input_lstm.hidden_size: 2 * input_lstm.hidden_size] = 1"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch\n",
    "import torch.nn as nn\n",
    "from torch.autograd import Variable\n",
    "\n",
    "class baseRNN(nn.Module):\n",
    "\n",
    "    def __init__(self, vocab_size, hidden_size, input_dropout_p, output_dropout_p, n_layers, rnn_cell, max_len=25):\n",
    "        super(baseRNN, self).__init__()\n",
    "        \n",
    "        self.vocab_size = vocab_size\n",
    "        self.hidden_size = hidden_size\n",
    "        self.n_layers = n_layers\n",
    "        self.max_len = max_len\n",
    "        \n",
    "        self.input_dropout_p = input_dropout_p\n",
    "        self.output_dropout_p = output_dropout_p\n",
    "        \n",
    "        if rnn_cell.lower() == 'lstm':\n",
    "            self.rnn_cell = nn.LSTM\n",
    "        elif rnn_cell.lower() == 'gru':\n",
    "            self.rnn_cell = nn.GRU\n",
    "        else:\n",
    "            raise ValueError(\"Unsupported RNN Cell: {0}\".format(rnn_cell))\n",
    "\n",
    "        self.input_dropout = nn.Dropout(p=input_dropout_p)\n",
    "\n",
    "    def forward(self, *args, **kwargs):\n",
    "        raise NotImplementedError()\n",
    "\n",
    "class WordEncoderRNN(baseRNN):\n",
    "\n",
    "    def __init__(self, vocab_size, embedding_size ,hidden_size, verb_size, cap_size, input_dropout_p=0.5, \n",
    "                 output_dropout_p=0, n_layers=1, bidirectional=True, rnn_cell='lstm'):\n",
    "        \n",
    "        super(WordEncoderRNN, self).__init__(vocab_size, hidden_size, input_dropout_p, \n",
    "                                             output_dropout_p, n_layers, rnn_cell)\n",
    "\n",
    "        self.embedding = nn.Embedding(vocab_size, embedding_size)\n",
    "        \n",
    "        augmented_embedding_size = embedding_size + verb_size + cap_size\n",
    "        self.rnn = self.rnn_cell(augmented_embedding_size, hidden_size, n_layers,\n",
    "                                 bidirectional=bidirectional, dropout=output_dropout_p,\n",
    "                                 batch_first=True)\n",
    "\n",
    "    def forward(self, words, verb_embedding, cap_embedding, input_lengths):\n",
    "        \n",
    "        embedded = self.embedding(words)\n",
    "        if cap_embedding is not None:\n",
    "            embedded = torch.cat((embedded,verb_embedding,cap_embedding),2)  \n",
    "        else:\n",
    "            embedded = torch.cat((embedded,verb_embedding),2)\n",
    "    \n",
    "        embedded = self.input_dropout(embedded)\n",
    "        embedded = nn.utils.rnn.pack_padded_sequence(embedded, input_lengths, batch_first= True)\n",
    "        output, _ = self.rnn(embedded)\n",
    "        output, _ = nn.utils.rnn.pad_packed_sequence(output, batch_first= True)\n",
    "        \n",
    "        return output\n",
    "    \n",
    "class DecoderCRF(nn.Module):\n",
    "\n",
    "    def __init__(self, input_dimension, tag_to_ix, input_dropout_p=0.5):\n",
    "        \n",
    "        super(DecoderCRF, self).__init__()\n",
    "        \n",
    "        self.tag_to_ix = tag_to_ix\n",
    "        self.tagset_size = len(tag_to_ix)\n",
    "        \n",
    "        self.dropout = nn.Dropout(input_dropout_p)\n",
    "        self.hidden2tag = nn.Linear(input_dimension, self.tagset_size)\n",
    "        \n",
    "        self.transitions = nn.Parameter(torch.zeros(self.tagset_size, self.tagset_size))\n",
    "        self.transitions.data[tag_to_ix[START_TAG], :] = -10000\n",
    "        self.transitions.data[:, tag_to_ix[STOP_TAG]] = -10000\n",
    "    \n",
    "    def viterbi_decode(self, feats, mask ,usecuda = True, score_only= False):\n",
    "    \n",
    "        batch_size, sequence_len, num_tags = feats.size()\n",
    "        \n",
    "        assert num_tags == self.tagset_size\n",
    "        \n",
    "        mask = mask.transpose(0, 1).contiguous()\n",
    "        feats = feats.transpose(0, 1).contiguous()\n",
    "        \n",
    "        backpointers = []\n",
    "        \n",
    "        all_forward_vars = Variable(torch.Tensor(sequence_len, \n",
    "                                    batch_size, num_tags).fill_(0.)).cuda()\n",
    "        \n",
    "        init_vars = torch.Tensor(batch_size, num_tags).fill_(-10000.)\n",
    "        init_vars[:,self.tag_to_ix[START_TAG]] = 0.\n",
    "        if usecuda:\n",
    "            forward_var = Variable(init_vars).cuda()\n",
    "        else:\n",
    "            forward_var = Variable(init_vars)\n",
    "        \n",
    "        for i in range(sequence_len):\n",
    "            broadcast_forward = forward_var.view(batch_size, 1, num_tags)\n",
    "            transition_scores = self.transitions.view(1, num_tags, num_tags)\n",
    "            \n",
    "            next_tag_var = broadcast_forward + transition_scores\n",
    "            \n",
    "            viterbivars_t, bptrs_t = torch.max(next_tag_var, dim=2)\n",
    "            \n",
    "            forward_var = viterbivars_t + feats[i]\n",
    "            all_forward_vars[i,:,:] = forward_var\n",
    "\n",
    "            bptrs_t = bptrs_t.squeeze().data.cpu().numpy()\n",
    "            backpointers.append(bptrs_t)\n",
    "        \n",
    "        mask_sum = torch.sum(mask, dim = 0, keepdim =True) - 1\n",
    "        mask_sum_ex = mask_sum.view(1, batch_size, 1).expand(1, batch_size, num_tags)\n",
    "        final_forward_var = all_forward_vars.gather(0, mask_sum_ex).squeeze(0)\n",
    "        \n",
    "        terminal_var = final_forward_var + self.transitions[self.tag_to_ix[STOP_TAG]].view(1, num_tags)\n",
    "        terminal_var.data[:,self.tag_to_ix[STOP_TAG]] = -10000.\n",
    "        terminal_var.data[:,self.tag_to_ix[START_TAG]] = -10000.\n",
    "        \n",
    "        path_score, best_tag_id = torch.max(terminal_var, dim = 1)\n",
    "                \n",
    "        if score_only:\n",
    "            return path_score\n",
    "        \n",
    "        n_mask_sum = mask_sum.squeeze().data.cpu().numpy() + 1\n",
    "        best_tag_id = best_tag_id.data.cpu().numpy()\n",
    "        decoded_tags = []\n",
    "        for i in range(batch_size):\n",
    "            best_path = [best_tag_id[i]]\n",
    "            bp_list = reversed([itm[i] for itm in backpointers[:n_mask_sum[i]]])\n",
    "            for bptrs_t in bp_list:\n",
    "                best_tag_id[i] = bptrs_t[best_tag_id[i]]\n",
    "                best_path.append(best_tag_id[i])\n",
    "            start = best_path.pop()\n",
    "            assert start == self.tag_to_ix[START_TAG]\n",
    "            best_path.reverse()\n",
    "            decoded_tags.append(best_path)\n",
    "        \n",
    "        return path_score, decoded_tags\n",
    "    \n",
    "    def crf_forward(self, feats, mask, usecuda=True):\n",
    "        \n",
    "        batch_size, sequence_length, num_tags = feats.size()\n",
    "        \n",
    "        mask = mask.float().transpose(0, 1).contiguous()\n",
    "        feats = feats.transpose(0, 1).contiguous()\n",
    "        \n",
    "        init_alphas = torch.Tensor(batch_size, num_tags).fill_(-10000.)\n",
    "        init_alphas[:,self.tag_to_ix[START_TAG]] = 0.\n",
    "        if usecuda:\n",
    "            forward_var = Variable(init_alphas).cuda()\n",
    "        else:\n",
    "            forward_var = Variable(init_alphas)\n",
    "        \n",
    "        for i in range(sequence_length):\n",
    "            emit_score = feats[i].view(batch_size, num_tags, 1)\n",
    "            transition_scores = self.transitions.view(1, num_tags, num_tags)\n",
    "            broadcast_forward = forward_var.view(batch_size, 1, num_tags)\n",
    "            tag_var = broadcast_forward + transition_scores + emit_score \n",
    "            \n",
    "            forward_var = (log_sum_exp(tag_var, dim = 2) * mask[i].view(batch_size, 1) +\n",
    "                            forward_var * (1 - mask[i]).view(batch_size, 1))\n",
    "            \n",
    "        terminal_var = (forward_var + (self.transitions[self.tag_to_ix[STOP_TAG]]).view(1, -1))\n",
    "        alpha = log_sum_exp(terminal_var, dim = 1)\n",
    "        \n",
    "        return alpha\n",
    "        \n",
    "    \n",
    "    def score_sentence(self, feats, tags, mask, usecuda=True):\n",
    "                \n",
    "        batch_size, sequence_length, num_tags = feats.size()\n",
    "        \n",
    "        feats = feats.transpose(0, 1).contiguous()\n",
    "        tags = tags.transpose(0, 1).contiguous()\n",
    "        mask = mask.float().transpose(0, 1).contiguous()\n",
    "                \n",
    "        broadcast_transitions = self.transitions.view(1, num_tags, num_tags).expand(batch_size, num_tags, num_tags)\n",
    "        \n",
    "        score = self.transitions[:,self.tag_to_ix[START_TAG]].index_select(0, tags[0])\n",
    "        \n",
    "        for i in range(sequence_length - 1):\n",
    "            current_tag, next_tag = tags[i], tags[i+1]\n",
    "            \n",
    "            transition_score = (\n",
    "                     broadcast_transitions\n",
    "                    .gather(1, next_tag.view(batch_size, 1, 1).expand(batch_size, 1, num_tags))\n",
    "                    .squeeze(1)\n",
    "                    .gather(1, current_tag.view(batch_size, 1))\n",
    "                    .squeeze(1)\n",
    "                    )\n",
    "\n",
    "            emit_score = feats[i].gather(1, current_tag.view(batch_size, 1)).squeeze(1)\n",
    "\n",
    "            score = score + transition_score* mask[i + 1] + emit_score * mask[i]  \n",
    "        last_tag_index = mask.sum(0).long() - 1\n",
    "\n",
    "        last_tags = tags.gather(0, last_tag_index.view(1, batch_size).expand(sequence_length, batch_size))\n",
    "        last_tags = last_tags[0]\n",
    "\n",
    "        last_transition_score = self.transitions[self.tag_to_ix[STOP_TAG]].index_select(0, last_tags)\n",
    "        \n",
    "        last_inputs = feats[-1]                                     \n",
    "        last_input_score = last_inputs.gather(1, last_tags.view(batch_size, 1))\n",
    "        last_input_score = last_input_score.squeeze(1)\n",
    "        \n",
    "        score = score + last_transition_score + last_input_score * mask[-1]\n",
    "        \n",
    "        return score\n",
    "    \n",
    "    def decode(self, input_var, mask, usecuda=True, score_only= False):\n",
    "        \n",
    "        input_var = self.dropout(input_var)\n",
    "        features = self.hidden2tag(input_var)\n",
    "        if score_only:\n",
    "            score = self.viterbi_decode(features, mask, usecuda=usecuda, score_only=True)\n",
    "            return score\n",
    "        score, tag_seq = self.viterbi_decode(features, mask, usecuda=usecuda)\n",
    "        return score, tag_seq\n",
    "    \n",
    "    def forward(self, input_var, tags, mask=None, usecuda=True):\n",
    "        \n",
    "        if mask is None:\n",
    "            mask = Variable(torch.ones(*tags.size()).long())\n",
    "        \n",
    "        input_var = self.dropout(input_var)\n",
    "        features = self.hidden2tag(input_var)\n",
    "        forward_score = self.crf_forward(features, mask, usecuda=usecuda)\n",
    "        ground_score = self.score_sentence(features, tags, mask, usecuda=usecuda)\n",
    "        \n",
    "        return torch.sum(forward_score-ground_score)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "class BiLSTM_CRF(nn.Module):\n",
    "    \n",
    "    def __init__(self, word_vocab_size, word_embedding_dim, word_hidden_dim, tag_to_id, \n",
    "                 verb_embedding_dim, cap_embedding_dim, verb_input_dim = 2, cap_input_dim=4, \n",
    "                 pretrained=None):\n",
    "        \n",
    "        super(BiLSTM_CRF, self).__init__()\n",
    "        \n",
    "        self.word_vocab_size = word_vocab_size\n",
    "        self.word_embedding_dim = word_embedding_dim\n",
    "        self.word_hidden_dim = word_hidden_dim\n",
    "        \n",
    "        self.verb_input_dim = verb_input_dim \n",
    "        self.verb_embedding_dim = verb_embedding_dim\n",
    "        \n",
    "        self.cap_input_dim = cap_input_dim\n",
    "        self.cap_embedding_dim = cap_embedding_dim\n",
    "        \n",
    "        self.tag_to_ix = tag_to_id\n",
    "        self.tagset_size = len(tag_to_id)\n",
    "        \n",
    "        self.initializer = Initializer()\n",
    "        \n",
    "        if self.cap_embedding_dim:\n",
    "            self.cap_embedder = nn.Embedding(self.cap_input_dim, self.cap_embedding_dim)\n",
    "            self.initializer.init_embedding(self.cap_embedder.weight)\n",
    "            \n",
    "        self.verb_embedder = nn.Embedding(self.verb_input_dim, self.verb_embedding_dim)\n",
    "        self.initializer.init_embedding(self.verb_embedder.weight)\n",
    "        \n",
    "        self.word_encoder = WordEncoderRNN(word_vocab_size, word_embedding_dim ,word_hidden_dim, \n",
    "                                     verb_embedding_dim, cap_embedding_dim, input_dropout_p=0.5)\n",
    "        \n",
    "        if pretrained is not None:\n",
    "            self.word_encoder.embedding.weight = nn.Parameter(torch.FloatTensor(pretrained))\n",
    "            \n",
    "        self.initializer.init_lstm(self.word_encoder.rnn)\n",
    "        \n",
    "        self.decoder = DecoderCRF(word_hidden_dim*2, self.tag_to_ix, input_dropout_p=0.5)\n",
    "        self.initializer.init_linear(self.decoder.hidden2tag)\n",
    "        \n",
    "    def forward(self, words, tags, verbs, caps, wordslen, tagsmask, usecuda=True):\n",
    "        \n",
    "        batch_size, max_len = words.size()\n",
    "        \n",
    "        cap_features = self.cap_embedder(caps) if self.cap_embedding_dim else None\n",
    "        verb_features = self.verb_embedder(verbs)\n",
    "        word_features = self.word_encoder(words, verb_features ,cap_features, wordslen)\n",
    "        score = self.decoder(word_features, tags, tagsmask, usecuda=usecuda)\n",
    "        \n",
    "        return score\n",
    "    \n",
    "    def decode(self, words, verbs, caps, wordslen, tagsmask, usecuda=True, score_only = False):\n",
    "        \n",
    "        batch_size, max_len = words.size()\n",
    "        \n",
    "        cap_features = self.cap_embedder(caps) if self.cap_embedding_dim else None\n",
    "        verb_features = self.verb_embedder(verbs)\n",
    "        word_features = self.word_encoder(words, verb_features ,cap_features, wordslen)\n",
    "        if score_only:\n",
    "            score = self.decoder.decode(word_features, tagsmask, usecuda=usecuda, \n",
    "                                        score_only = True)\n",
    "            return score\n",
    "        score, tag_seq = self.decoder.decode(word_features, tagsmask, usecuda=usecuda)\n",
    "        \n",
    "        return score, tag_seq"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Found 8934 unique words (480026 in total)\n",
      "Found 94 unique named entity tags\n",
      "Loading pretrained embeddings from wordvectors/glove.6B.100d.txt...\n",
      "20002 / 5002 sentences in train / dev.\n",
      "Loaded 400000 pretrained embeddings.\n"
     ]
    }
   ],
   "source": [
    "srl_data = pkl.load(open('datasets/consrl/srl_data.p','rb'))\n",
    "srl_train_data = srl_data[:20002]\n",
    "srl_val_data = srl_data[20002:25004]\n",
    "\n",
    "train_data, dev_data, mappings = load_consrl(srl_train_data, srl_val_data)\n",
    "\n",
    "word_to_id = mappings['word_to_id']\n",
    "tag_to_id = mappings['tag_to_id']\n",
    "word_embeds = mappings['word_embeds']"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [],
   "source": [
    "class Evaluator(object):\n",
    "    def __init__(self, result_path, model_name, mappings, usecuda=True):\n",
    "        self.result_path = result_path\n",
    "        self.model_name = model_name\n",
    "        self.tag_to_id = mappings['tag_to_id']\n",
    "        self.id_to_tag = mappings['id_to_tag']\n",
    "        self.usecuda = usecuda\n",
    "\n",
    "    def evaluate_conll(self, model, dataset, best_F, eval_script='./datasets/conll/conlleval',\n",
    "                          checkpoint_folder='.', record_confmat = False, batch_size = 40):\n",
    "        \n",
    "        prediction = []\n",
    "        save = False\n",
    "        new_F = 0.0\n",
    "        confusion_matrix = torch.zeros((len(self.tag_to_id) - 2, len(self.tag_to_id) - 2))\n",
    "    \n",
    "        data_batches = create_batches(dataset, batch_size = batch_size, str_words = True,\n",
    "                                      tag_padded = False)\n",
    "\n",
    "        for data in data_batches:\n",
    "\n",
    "            words = data['words']\n",
    "            verbs = data['verbs']\n",
    "            caps = data['caps']\n",
    "            mask = data['tagsmask']\n",
    "\n",
    "            if self.usecuda:\n",
    "                words = Variable(torch.LongTensor(words)).cuda()\n",
    "                verbs = Variable(torch.LongTensor(verbs)).cuda()\n",
    "                caps = Variable(torch.LongTensor(caps)).cuda()\n",
    "                mask = Variable(torch.LongTensor(mask)).cuda()\n",
    "            else:\n",
    "                words = Variable(torch.LongTensor(words))\n",
    "                verbs = Variable(torch.LongTensor(verbs))\n",
    "                caps = Variable(torch.LongTensor(caps))\n",
    "                mask = Variable(torch.LongTensor(mask))\n",
    "\n",
    "            wordslen = data['wordslen']\n",
    "            str_words = data['str_words']\n",
    "            \n",
    "            _, out = model.decode(words, verbs, caps, wordslen, mask, usecuda = self.usecuda)\n",
    "                                \n",
    "            ground_truth_id = data['tags']\n",
    "            predicted_id = out            \n",
    "            \n",
    "            for (swords, sground_truth_id, spredicted_id) in zip(str_words, ground_truth_id, predicted_id):\n",
    "                for (word, true_id, pred_id) in zip(swords, sground_truth_id, spredicted_id):\n",
    "                    line = ' '.join([word, self.id_to_tag[true_id], self.id_to_tag[pred_id]])\n",
    "                    prediction.append(line)\n",
    "                    confusion_matrix[true_id, pred_id] += 1\n",
    "                prediction.append('')\n",
    "        \n",
    "        predf = os.path.join(self.result_path, self.model_name, checkpoint_folder ,'pred.txt')\n",
    "        scoref = os.path.join(self.result_path, self.model_name, checkpoint_folder ,'score.txt')\n",
    "\n",
    "        with open(predf, 'w') as f:\n",
    "            f.write('\\n'.join(prediction))\n",
    "\n",
    "        os.system('%s < %s > %s' % (eval_script, predf, scoref))\n",
    "\n",
    "        eval_lines = [l.rstrip() for l in codecs.open(scoref, 'r', 'utf8')]\n",
    "\n",
    "        for i, line in enumerate(eval_lines):\n",
    "            print(line)\n",
    "            if i == 1:\n",
    "                new_F = float(line.strip().split()[-1])\n",
    "                if new_F > best_F:\n",
    "                    best_F = new_F\n",
    "                    save = True\n",
    "                    print('the best F is ', new_F)\n",
    "        \n",
    "        return best_F, new_F, save\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [],
   "source": [
    "class Trainer(object):\n",
    "    \n",
    "    def __init__(self, model, optimizer, result_path, model_name, usedataset, mappings, \n",
    "                 eval_every=1, usecuda = True):\n",
    "        self.model = model\n",
    "        self.optimizer = optimizer\n",
    "        self.eval_every = eval_every\n",
    "        self.model_name = os.path.join(result_path, model_name)\n",
    "        self.usecuda = usecuda\n",
    "        \n",
    "        if usedataset=='consrl':\n",
    "            self.evaluator = Evaluator(result_path, model_name, mappings).evaluate_conll\n",
    "    \n",
    "    def adjust_learning_rate(self, optimizer, lr):\n",
    "        for param_group in optimizer.param_groups:\n",
    "            param_group['lr'] = lr\n",
    "            \n",
    "    def train_model(self, num_epochs, train_data, dev_data, learning_rate, checkpoint_folder='.',\n",
    "                    eval_test_train=True, plot_every=20, adjust_lr=True, batch_size = 40):\n",
    "\n",
    "        losses = []\n",
    "        loss = 0.0\n",
    "        best_dev_F = -1.0\n",
    "        best_train_F = -1.0\n",
    "        all_F=[[0,0]]\n",
    "        count = 0\n",
    "        word_count = 0\n",
    "        \n",
    "        self.model.train(True)\n",
    "        for epoch in range(1, num_epochs+1):\n",
    "            t=time.time()\n",
    "            \n",
    "            train_batches = create_batches(train_data, batch_size= batch_size, order='random')\n",
    "            \n",
    "            for i, index in enumerate(np.random.permutation(len(train_batches))): \n",
    "                \n",
    "                data = train_batches[index]\n",
    "                self.model.zero_grad()\n",
    "\n",
    "                words = data['words']\n",
    "                tags = data['tags']\n",
    "                verbs = data['verbs']\n",
    "                caps = data['caps']\n",
    "                mask = data['tagsmask']\n",
    "                \n",
    "                if self.usecuda:\n",
    "                    words = Variable(torch.LongTensor(words)).cuda()\n",
    "                    verbs = Variable(torch.LongTensor(verbs)).cuda()\n",
    "                    caps = Variable(torch.LongTensor(caps)).cuda()\n",
    "                    mask = Variable(torch.LongTensor(mask)).cuda()\n",
    "                    tags = Variable(torch.LongTensor(tags)).cuda()\n",
    "                else:\n",
    "                    words = Variable(torch.LongTensor(words))\n",
    "                    verbs = Variable(torch.LongTensor(verbs))\n",
    "                    caps = Variable(torch.LongTensor(caps))\n",
    "                    mask = Variable(torch.LongTensor(mask))\n",
    "                    tags = Variable(torch.LongTensor(tags))\n",
    "                \n",
    "                wordslen = data['wordslen']\n",
    "                \n",
    "                score = self.model(words, tags, verbs, caps, wordslen, mask, usecuda=self.usecuda)\n",
    "                \n",
    "                loss += score.data[0]/np.sum(data['wordslen'])\n",
    "                score.backward()\n",
    "                \n",
    "                nn.utils.clip_grad_norm(self.model.parameters(), 5.0)\n",
    "                self.optimizer.step()\n",
    "                \n",
    "                count += 1\n",
    "                word_count += batch_size\n",
    "                \n",
    "                if count % plot_every == 0:\n",
    "                    loss /= plot_every\n",
    "                    print(word_count, ': ', loss)\n",
    "                    if losses == []:\n",
    "                        losses.append(loss)\n",
    "                    losses.append(loss)\n",
    "                    loss = 0.0\n",
    "                                        \n",
    "            if adjust_lr:\n",
    "                self.adjust_learning_rate(self.optimizer, \n",
    "                                          lr=learning_rate/(1+0.05*word_count/len(train_data)))\n",
    "            \n",
    "            if epoch%self.eval_every==0:\n",
    "                \n",
    "                self.model.train(False)\n",
    "                \n",
    "                if eval_test_train:\n",
    "                    best_train_F, new_train_F, _ = self.evaluator(self.model, train_data, best_train_F, \n",
    "                                                                  checkpoint_folder=checkpoint_folder)\n",
    "                else:\n",
    "                    best_train_F, new_train_F, _ = 0, 0, 0\n",
    "                best_dev_F, new_dev_F, save = self.evaluator(self.model, dev_data, best_dev_F,\n",
    "                                                             checkpoint_folder=checkpoint_folder)\n",
    "                if save:\n",
    "                    torch.save(self.model, os.path.join(self.model_name, checkpoint_folder, 'modelweights'))\n",
    "                    \n",
    "                sys.stdout.flush()\n",
    "                all_F.append([new_train_F, new_dev_F])\n",
    "                self.model.train(True)\n",
    "\n",
    "            print('*'*80)\n",
    "            print('Epoch %d Complete: Time Taken %d' %(epoch ,time.time() - t))\n",
    "\n",
    "        return losses, all_F"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Initial learning rate is: 0.015\n",
      "800 :  2.595070683346564\n",
      "1600 :  1.63570770355328\n",
      "2400 :  1.3989303576428846\n",
      "3200 :  1.3875672810613253\n",
      "4000 :  1.251654905696571\n",
      "4800 :  1.1671755277483062\n",
      "5600 :  1.2219252985343931\n",
      "6400 :  1.1341341147716555\n",
      "7200 :  1.1073128052637227\n",
      "8000 :  1.1006431717702703\n",
      "8800 :  0.9382336422992033\n",
      "9600 :  0.9032225882171238\n",
      "10400 :  0.834429728740749\n",
      "11200 :  0.8264320164852046\n",
      "12000 :  0.8433165445377331\n",
      "12800 :  0.902060032373508\n",
      "13600 :  0.7889980508425362\n",
      "14400 :  0.770750515068126\n",
      "15200 :  0.7176659991160028\n",
      "16000 :  0.7347934497757698\n",
      "16800 :  0.7159937699445951\n",
      "17600 :  0.7549342765925864\n",
      "18400 :  0.6711838412314545\n",
      "19200 :  0.6797682728412835\n",
      "20000 :  0.670285336786841\n",
      "processed 480026 tokens with 61118 phrases; found: 20013 phrases; correct: 19061.\n",
      "accuracy:  63.21%; precision:  95.24%; recall:  31.19%; FB1:  46.99\n",
      "the best F is  46.99\n",
      "             ARG0: precision:  53.69%; recall:   8.50%; FB1:  14.68  1451\n",
      "             ARG1: precision:  44.78%; recall:   0.21%; FB1:   0.42  67\n",
      "             ARG2: precision:  27.76%; recall:   1.85%; FB1:   3.46  335\n",
      "             ARG3: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "             ARG4: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "             ARG5: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "             ARGA: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-ADJ: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-ADV: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-CAU: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-COM: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-DIR: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-DIS: precision:   0.00%; recall:   0.00%; FB1:   0.00  1\n",
      "         ARGM-DSP: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-EXT: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-GOL: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-LOC: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-LVB: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-MNR: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-MOD: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-NEG: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-PNC: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-PRD: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-PRP: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-REC: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-TMP: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "           C-ARG0: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "           C-ARG1: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "           C-ARG2: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "           C-ARG3: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "           C-ARG4: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "       C-ARGM-ADV: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "       C-ARGM-EXT: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "       C-ARGM-MNR: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "       C-ARGM-MOD: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "           R-ARG0: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "           R-ARG1: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "           R-ARG2: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "           R-ARG3: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "           R-ARG4: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "       R-ARGM-ADV: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "       R-ARGM-CAU: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "       R-ARGM-EXT: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "       R-ARGM-LOC: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "       R-ARGM-MNR: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "       R-ARGM-PRD: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "       R-ARGM-PRP: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "       R-ARGM-TMP: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "                V: precision: 100.00%; recall: 100.00%; FB1: 100.00  18159\n",
      "processed 148220 tokens with 13517 phrases; found: 4350 phrases; correct: 4171.\n",
      "accuracy:  72.03%; precision:  95.89%; recall:  30.86%; FB1:  46.69\n",
      "the best F is  46.69\n",
      "             ARG0: precision:  45.97%; recall:   5.32%; FB1:   9.53  211\n",
      "             ARG1: precision:  75.00%; recall:   0.37%; FB1:   0.74  16\n",
      "             ARG2: precision:  31.46%; recall:   2.35%; FB1:   4.36  89\n",
      "             ARG3: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "             ARG4: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-ADJ: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-ADV: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-CAU: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-COM: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-DIR: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-DIS: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-EXT: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-GOL: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-LOC: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-LVB: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-MNR: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-MOD: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-NEG: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-PRD: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-PRP: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-REC: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-TMP: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "           C-ARG0: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "           C-ARG1: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "           R-ARG0: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "           R-ARG1: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "           R-ARG2: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "       R-ARGM-LOC: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "       R-ARGM-MNR: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "       R-ARGM-TMP: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "                V: precision: 100.00%; recall: 100.00%; FB1: 100.00  4034\n",
      "********************************************************************************\n",
      "Epoch 1 Complete: Time Taken 85\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/ubuntu/anaconda3/envs/pytorch_p36/lib/python3.6/site-packages/torch/serialization.py:159: UserWarning: Couldn't retrieve source code for container of type BiLSTM_CRF. It won't be checked for correctness upon loading.\n",
      "  \"type \" + obj.__name__ + \". It won't be checked \"\n",
      "/home/ubuntu/anaconda3/envs/pytorch_p36/lib/python3.6/site-packages/torch/serialization.py:159: UserWarning: Couldn't retrieve source code for container of type WordEncoderRNN. It won't be checked for correctness upon loading.\n",
      "  \"type \" + obj.__name__ + \". It won't be checked \"\n",
      "/home/ubuntu/anaconda3/envs/pytorch_p36/lib/python3.6/site-packages/torch/serialization.py:159: UserWarning: Couldn't retrieve source code for container of type DecoderCRF. It won't be checked for correctness upon loading.\n",
      "  \"type \" + obj.__name__ + \". It won't be checked \"\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "20800 :  0.6882183358579229\n",
      "21600 :  0.6158887418151069\n",
      "22400 :  0.6379858382695806\n",
      "23200 :  0.6338881753628154\n",
      "24000 :  0.7221007288618645\n",
      "24800 :  0.6692334994880385\n",
      "25600 :  0.6584870191859743\n",
      "26400 :  0.6149539914450052\n",
      "27200 :  0.5753546886244243\n",
      "28000 :  0.6249658306036144\n",
      "28800 :  0.6202996292924768\n",
      "29600 :  0.5303097140216994\n",
      "30400 :  0.5529400836548235\n",
      "31200 :  0.5670427814188075\n",
      "32000 :  0.5517603796957309\n",
      "32800 :  0.5482340737518273\n",
      "33600 :  0.5484248631469815\n",
      "34400 :  0.5226972831029485\n",
      "35200 :  0.5582539177908312\n",
      "36000 :  0.48694343479980534\n",
      "36800 :  0.5336687457863802\n",
      "37600 :  0.5228763094052656\n",
      "38400 :  0.48615921182083605\n",
      "39200 :  0.4934374300434259\n",
      "40000 :  0.521024629667814\n",
      "processed 480026 tokens with 61118 phrases; found: 36482 phrases; correct: 25254.\n",
      "accuracy:  63.76%; precision:  69.22%; recall:  41.32%; FB1:  51.75\n",
      "the best F is  51.75\n",
      "             ARG0: precision:  60.48%; recall:  32.45%; FB1:  42.23  4916\n",
      "             ARG1: precision:  47.20%; recall:  13.57%; FB1:  21.08  4070\n",
      "             ARG2: precision:  16.84%; recall:  22.52%; FB1:  19.27  6739\n",
      "             ARG3: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "             ARG4: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "             ARG5: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "             ARGA: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-ADJ: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-ADV: precision:   0.15%; recall:   0.06%; FB1:   0.08  648\n",
      "         ARGM-CAU: precision:   2.38%; recall:   0.37%; FB1:   0.63  42\n",
      "         ARGM-COM: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-DIR: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-DIS: precision:  55.89%; recall:  11.87%; FB1:  19.59  467\n",
      "         ARGM-DSP: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-EXT: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-GOL: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-LOC: precision:  18.18%; recall:   0.42%; FB1:   0.82  22\n",
      "         ARGM-LVB: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-MNR: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-MOD: precision:  69.00%; recall:  36.64%; FB1:  47.87  813\n",
      "         ARGM-NEG: precision:  94.32%; recall:  18.32%; FB1:  30.68  176\n",
      "         ARGM-PNC: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-PRD: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-PRP: precision:   2.19%; recall:   1.00%; FB1:   1.37  137\n",
      "         ARGM-REC: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-TMP: precision:  23.55%; recall:   2.39%; FB1:   4.34  293\n",
      "           C-ARG0: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "           C-ARG1: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "           C-ARG2: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "           C-ARG3: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "           C-ARG4: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "       C-ARGM-ADV: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "       C-ARGM-EXT: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "       C-ARGM-MNR: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "       C-ARGM-MOD: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "           R-ARG0: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "           R-ARG1: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "           R-ARG2: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "           R-ARG3: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "           R-ARG4: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "       R-ARGM-ADV: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "       R-ARGM-CAU: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "       R-ARGM-EXT: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "       R-ARGM-LOC: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "       R-ARGM-MNR: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "       R-ARGM-PRD: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "       R-ARGM-PRP: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "       R-ARGM-TMP: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "                V: precision: 100.00%; recall: 100.00%; FB1: 100.00  18159\n",
      "processed 148220 tokens with 13517 phrases; found: 8085 phrases; correct: 5275.\n",
      "accuracy:  66.84%; precision:  65.24%; recall:  39.02%; FB1:  48.84\n",
      "the best F is  48.84\n",
      "             ARG0: precision:  56.58%; recall:  23.79%; FB1:  33.50  767\n",
      "             ARG1: precision:  45.11%; recall:  11.74%; FB1:  18.63  838\n",
      "             ARG2: precision:  16.14%; recall:  24.04%; FB1:  19.31  1778\n",
      "             ARG3: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "             ARG4: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-ADJ: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-ADV: precision:   0.00%; recall:   0.00%; FB1:   0.00  367\n",
      "         ARGM-CAU: precision:   0.00%; recall:   0.00%; FB1:   0.00  14\n",
      "         ARGM-COM: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-DIR: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-DIS: precision:  43.33%; recall:   5.33%; FB1:   9.49  30\n",
      "         ARGM-EXT: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-GOL: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-LOC: precision:  33.33%; recall:   1.20%; FB1:   2.31  9\n",
      "         ARGM-LVB: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-MNR: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-MOD: precision:  74.13%; recall:  24.88%; FB1:  37.26  143\n",
      "         ARGM-NEG: precision:  91.67%; recall:   5.88%; FB1:  11.06  12\n",
      "         ARGM-PRD: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-PRP: precision:   0.00%; recall:   0.00%; FB1:   0.00  42\n",
      "         ARGM-REC: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-TMP: precision:  17.65%; recall:   1.41%; FB1:   2.62  51\n",
      "           C-ARG0: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "           C-ARG1: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "           R-ARG0: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "           R-ARG1: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "           R-ARG2: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "       R-ARGM-LOC: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "       R-ARGM-MNR: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "       R-ARGM-TMP: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "                V: precision: 100.00%; recall: 100.00%; FB1: 100.00  4034\n",
      "********************************************************************************\n",
      "Epoch 2 Complete: Time Taken 85\n",
      "40800 :  0.5083508440296206\n",
      "41600 :  0.5348447311173639\n",
      "42400 :  0.48967410156797914\n",
      "43200 :  0.4804257025443602\n",
      "44000 :  0.4820553540681084\n",
      "44800 :  0.4835186744653651\n",
      "45600 :  0.49115061463280557\n",
      "46400 :  0.47207852038248205\n",
      "47200 :  0.46277605388038784\n",
      "48000 :  0.45636993644950524\n",
      "48800 :  0.45456567707856826\n",
      "49600 :  0.46243428644997797\n",
      "50400 :  0.44209387543120027\n",
      "51200 :  0.4688497884838728\n",
      "52000 :  0.468698291788258\n",
      "52800 :  0.48279490846164197\n",
      "53600 :  0.4401475456556193\n",
      "54400 :  0.4647406938498482\n",
      "55200 :  0.46355816745800205\n",
      "56000 :  0.446454052465637\n",
      "56800 :  0.4458964018638921\n",
      "57600 :  0.4169279213339319\n",
      "58400 :  0.45952303254907545\n",
      "59200 :  0.4806251593117928\n",
      "60000 :  0.42356525399267964\n",
      "processed 480026 tokens with 61118 phrases; found: 35344 phrases; correct: 28071.\n",
      "accuracy:  69.42%; precision:  79.42%; recall:  45.93%; FB1:  58.20\n",
      "the best F is  58.2\n",
      "             ARG0: precision:  64.31%; recall:  41.63%; FB1:  50.55  5932\n",
      "             ARG1: precision:  49.70%; recall:  21.59%; FB1:  30.10  6151\n",
      "             ARG2: precision:  46.31%; recall:  18.15%; FB1:  26.08  1976\n",
      "             ARG3: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "             ARG4: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "             ARG5: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "             ARGA: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-ADJ: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-ADV: precision:   0.00%; recall:   0.00%; FB1:   0.00  1\n",
      "         ARGM-CAU: precision:   0.00%; recall:   0.00%; FB1:   0.00  6\n",
      "         ARGM-COM: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-DIR: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-DIS: precision:  63.29%; recall:  22.20%; FB1:  32.87  771\n",
      "         ARGM-DSP: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-EXT: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-GOL: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-LOC: precision:  65.62%; recall:   2.20%; FB1:   4.26  32\n",
      "         ARGM-LVB: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-MNR: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-MOD: precision:  68.81%; recall:  67.02%; FB1:  67.90  1491\n",
      "         ARGM-NEG: precision:  85.19%; recall:  43.82%; FB1:  57.87  466\n",
      "         ARGM-PNC: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-PRD: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-PRP: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-REC: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-TMP: precision:  53.76%; recall:   6.69%; FB1:  11.90  359\n",
      "           C-ARG0: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "           C-ARG1: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "           C-ARG2: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "           C-ARG3: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "           C-ARG4: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "       C-ARGM-ADV: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "       C-ARGM-EXT: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "       C-ARGM-MNR: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "       C-ARGM-MOD: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "           R-ARG0: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "           R-ARG1: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "           R-ARG2: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "           R-ARG3: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "           R-ARG4: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "       R-ARGM-ADV: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "       R-ARGM-CAU: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "       R-ARGM-EXT: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "       R-ARGM-LOC: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "       R-ARGM-MNR: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "       R-ARGM-PRD: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "       R-ARGM-PRP: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "       R-ARGM-TMP: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "                V: precision: 100.00%; recall: 100.00%; FB1: 100.00  18159\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "processed 148220 tokens with 13517 phrases; found: 7486 phrases; correct: 5971.\n",
      "accuracy:  76.01%; precision:  79.76%; recall:  44.17%; FB1:  56.86\n",
      "the best F is  56.86\n",
      "             ARG0: precision:  62.16%; recall:  32.24%; FB1:  42.45  946\n",
      "             ARG1: precision:  50.69%; recall:  22.67%; FB1:  31.33  1440\n",
      "             ARG2: precision:  46.21%; recall:  21.44%; FB1:  29.29  554\n",
      "             ARG3: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "             ARG4: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-ADJ: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-ADV: precision:   0.00%; recall:   0.00%; FB1:   0.00  1\n",
      "         ARGM-CAU: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-COM: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-DIR: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-DIS: precision:  53.19%; recall:  10.25%; FB1:  17.18  47\n",
      "         ARGM-EXT: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-GOL: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-LOC: precision:  75.00%; recall:   1.20%; FB1:   2.35  4\n",
      "         ARGM-LVB: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-MNR: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-MOD: precision:  74.46%; recall:  56.81%; FB1:  64.45  325\n",
      "         ARGM-NEG: precision:  84.21%; recall:  34.22%; FB1:  48.67  76\n",
      "         ARGM-PRD: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-PRP: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-REC: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-TMP: precision:  49.15%; recall:   4.55%; FB1:   8.33  59\n",
      "           C-ARG0: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "           C-ARG1: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "           R-ARG0: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "           R-ARG1: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "           R-ARG2: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "       R-ARGM-LOC: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "       R-ARGM-MNR: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "       R-ARGM-TMP: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "                V: precision: 100.00%; recall: 100.00%; FB1: 100.00  4034\n",
      "********************************************************************************\n",
      "Epoch 3 Complete: Time Taken 83\n",
      "60800 :  0.40572241635802797\n",
      "61600 :  0.43692727017659233\n",
      "62400 :  0.4048061546193281\n",
      "63200 :  0.4334374641845269\n",
      "64000 :  0.3855668415141913\n",
      "64800 :  0.39252109441657695\n",
      "65600 :  0.4410771673758148\n",
      "66400 :  0.41256219151292556\n",
      "67200 :  0.4009535124323801\n",
      "68000 :  0.42645154913141187\n",
      "68800 :  0.37269148941575336\n",
      "69600 :  0.40385962619971866\n",
      "70400 :  0.4192464211664893\n",
      "71200 :  0.44206314422453746\n",
      "72000 :  0.3998335843099822\n",
      "72800 :  0.40387680323352865\n",
      "73600 :  0.41579335267225276\n",
      "74400 :  0.40417522824415464\n",
      "75200 :  0.3868806737374442\n",
      "76000 :  0.3790080665106393\n",
      "76800 :  0.3883778158535519\n",
      "77600 :  0.39477531697965196\n",
      "78400 :  0.3607568962215622\n",
      "79200 :  0.3777649670762876\n",
      "80000 :  0.3733051494769606\n",
      "processed 480026 tokens with 61118 phrases; found: 44560 phrases; correct: 32044.\n",
      "accuracy:  71.48%; precision:  71.91%; recall:  52.43%; FB1:  60.64\n",
      "the best F is  60.64\n",
      "             ARG0: precision:  70.77%; recall:  48.07%; FB1:  57.26  6224\n",
      "             ARG1: precision:  40.33%; recall:  36.90%; FB1:  38.54  12957\n",
      "             ARG2: precision:  44.79%; recall:  25.85%; FB1:  32.78  2909\n",
      "             ARG3: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "             ARG4: precision:  50.00%; recall:   0.68%; FB1:   1.34  4\n",
      "             ARG5: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "             ARGA: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-ADJ: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-ADV: precision:  64.52%; recall:   1.17%; FB1:   2.30  31\n",
      "         ARGM-CAU: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-COM: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-DIR: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-DIS: precision:  63.82%; recall:  28.57%; FB1:  39.47  984\n",
      "         ARGM-DSP: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-EXT: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-GOL: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-LOC: precision:  43.75%; recall:   8.80%; FB1:  14.65  192\n",
      "         ARGM-LVB: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-MNR: precision:  50.00%; recall:   0.10%; FB1:   0.19  2\n",
      "         ARGM-MOD: precision:  75.09%; recall:  78.97%; FB1:  76.98  1610\n",
      "         ARGM-NEG: precision:  78.52%; recall:  68.98%; FB1:  73.44  796\n",
      "         ARGM-PNC: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-PRD: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-PRP: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-REC: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-TMP: precision:  49.75%; recall:  10.46%; FB1:  17.29  607\n",
      "           C-ARG0: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "           C-ARG1: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "           C-ARG2: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "           C-ARG3: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "           C-ARG4: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "       C-ARGM-ADV: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "       C-ARGM-EXT: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "       C-ARGM-MNR: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "       C-ARGM-MOD: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "           R-ARG0: precision:  95.29%; recall:  18.24%; FB1:  30.62  85\n",
      "           R-ARG1: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "           R-ARG2: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "           R-ARG3: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "           R-ARG4: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "       R-ARGM-ADV: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "       R-ARGM-CAU: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "       R-ARGM-EXT: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "       R-ARGM-LOC: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "       R-ARGM-MNR: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "       R-ARGM-PRD: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "       R-ARGM-PRP: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "       R-ARGM-TMP: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "                V: precision: 100.00%; recall: 100.00%; FB1: 100.00  18159\n",
      "processed 148220 tokens with 13517 phrases; found: 9651 phrases; correct: 6806.\n",
      "accuracy:  75.39%; precision:  70.52%; recall:  50.35%; FB1:  58.75\n",
      "the best F is  58.75\n",
      "             ARG0: precision:  70.48%; recall:  39.14%; FB1:  50.33  1013\n",
      "             ARG1: precision:  38.87%; recall:  36.80%; FB1:  37.81  3049\n",
      "             ARG2: precision:  43.49%; recall:  28.81%; FB1:  34.66  791\n",
      "             ARG3: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "             ARG4: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-ADJ: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-ADV: precision:  33.33%; recall:   0.88%; FB1:   1.71  12\n",
      "         ARGM-CAU: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-COM: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-DIR: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-DIS: precision:  56.58%; recall:  17.62%; FB1:  26.88  76\n",
      "         ARGM-EXT: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-GOL: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-LOC: precision:  26.67%; recall:   4.78%; FB1:   8.11  45\n",
      "         ARGM-LVB: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-MNR: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-MOD: precision:  80.27%; recall:  69.72%; FB1:  74.62  370\n",
      "         ARGM-NEG: precision:  76.00%; recall:  60.96%; FB1:  67.66  150\n",
      "         ARGM-PRD: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-PRP: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-REC: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-TMP: precision:  44.09%; recall:   6.44%; FB1:  11.23  93\n",
      "           C-ARG0: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "           C-ARG1: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "           R-ARG0: precision: 100.00%; recall:  17.65%; FB1:  30.00  18\n",
      "           R-ARG1: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "           R-ARG2: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "       R-ARGM-LOC: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "       R-ARGM-MNR: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "       R-ARGM-TMP: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "                V: precision: 100.00%; recall: 100.00%; FB1: 100.00  4034\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "********************************************************************************\n",
      "Epoch 4 Complete: Time Taken 85\n",
      "80800 :  0.3528975792966639\n",
      "81600 :  0.3856716068967173\n",
      "82400 :  0.3933853882412018\n",
      "83200 :  0.39835582128297925\n",
      "84000 :  0.355513030741163\n",
      "84800 :  0.3599182099110152\n",
      "85600 :  0.3777699519713442\n",
      "86400 :  0.3684710382394309\n",
      "87200 :  0.3471861247732421\n",
      "88000 :  0.3637399840776773\n",
      "88800 :  0.38217739926071015\n",
      "89600 :  0.3509991239391935\n",
      "90400 :  0.35326478407091166\n",
      "91200 :  0.3747901553626892\n",
      "92000 :  0.354811235187548\n",
      "92800 :  0.3481711126779035\n",
      "93600 :  0.33357686912956364\n",
      "94400 :  0.3803305651854777\n",
      "95200 :  0.3333186480061536\n",
      "96000 :  0.36435366886194603\n",
      "96800 :  0.3310993157404927\n",
      "97600 :  0.3462115686351135\n",
      "98400 :  0.36192322351908957\n",
      "99200 :  0.32790351002173757\n",
      "100000 :  0.3221779656257461\n",
      "processed 480026 tokens with 61118 phrases; found: 43435 phrases; correct: 32930.\n",
      "accuracy:  73.69%; precision:  75.81%; recall:  53.88%; FB1:  62.99\n",
      "the best F is  62.99\n",
      "             ARG0: precision:  69.80%; recall:  56.03%; FB1:  62.16  7355\n",
      "             ARG1: precision:  47.75%; recall:  36.23%; FB1:  41.20  10744\n",
      "             ARG2: precision:  46.34%; recall:  25.65%; FB1:  33.03  2790\n",
      "             ARG3: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "             ARG4: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "             ARG5: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "             ARGA: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-ADJ: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-ADV: precision:  56.14%; recall:   1.87%; FB1:   3.63  57\n",
      "         ARGM-CAU: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-COM: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-DIR: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-DIS: precision:  64.69%; recall:  28.25%; FB1:  39.33  960\n",
      "         ARGM-DSP: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-EXT: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-GOL: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-LOC: precision:  41.42%; recall:  14.66%; FB1:  21.66  338\n",
      "         ARGM-LVB: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-MNR: precision:  33.33%; recall:   0.19%; FB1:   0.38  6\n",
      "         ARGM-MOD: precision:  80.76%; recall:  78.97%; FB1:  79.85  1497\n",
      "         ARGM-NEG: precision:  94.20%; recall:  69.87%; FB1:  80.23  672\n",
      "         ARGM-PNC: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-PRD: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-PRP: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-REC: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-TMP: precision:  62.77%; recall:  12.86%; FB1:  21.34  591\n",
      "           C-ARG0: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "           C-ARG1: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "           C-ARG2: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "           C-ARG3: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "           C-ARG4: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "       C-ARGM-ADV: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "       C-ARGM-EXT: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "       C-ARGM-MNR: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "       C-ARGM-MOD: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "           R-ARG0: precision:  78.82%; recall:  36.04%; FB1:  49.46  203\n",
      "           R-ARG1: precision:  73.02%; recall:  11.36%; FB1:  19.66  63\n",
      "           R-ARG2: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "           R-ARG3: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "           R-ARG4: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "       R-ARGM-ADV: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "       R-ARGM-CAU: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "       R-ARGM-EXT: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "       R-ARGM-LOC: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "       R-ARGM-MNR: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "       R-ARGM-PRD: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "       R-ARGM-PRP: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "       R-ARGM-TMP: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "                V: precision: 100.00%; recall: 100.00%; FB1: 100.00  18159\n",
      "processed 148220 tokens with 13517 phrases; found: 9333 phrases; correct: 6981.\n",
      "accuracy:  78.54%; precision:  74.80%; recall:  51.65%; FB1:  61.10\n",
      "the best F is  61.1\n",
      "             ARG0: precision:  68.08%; recall:  45.83%; FB1:  54.78  1228\n",
      "             ARG1: precision:  47.13%; recall:  37.17%; FB1:  41.56  2540\n",
      "             ARG2: precision:  45.79%; recall:  27.81%; FB1:  34.60  725\n",
      "             ARG3: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "             ARG4: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-ADJ: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-ADV: precision:  46.15%; recall:   2.64%; FB1:   4.99  26\n",
      "         ARGM-CAU: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-COM: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-DIR: precision:  33.33%; recall:   1.45%; FB1:   2.78  3\n",
      "         ARGM-DIS: precision:  61.02%; recall:  14.75%; FB1:  23.76  59\n",
      "         ARGM-EXT: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-GOL: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-LOC: precision:  25.97%; recall:   7.97%; FB1:  12.20  77\n",
      "         ARGM-LVB: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-MNR: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-MOD: precision:  83.80%; recall:  70.42%; FB1:  76.53  358\n",
      "         ARGM-NEG: precision:  90.98%; recall:  64.71%; FB1:  75.62  133\n",
      "         ARGM-PRD: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-PRP: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-REC: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-TMP: precision:  55.06%; recall:   7.69%; FB1:  13.50  89\n",
      "           C-ARG0: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "           C-ARG1: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "           R-ARG0: precision:  74.47%; recall:  34.31%; FB1:  46.98  47\n",
      "           R-ARG1: precision:  57.14%; recall:  10.13%; FB1:  17.20  14\n",
      "           R-ARG2: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "       R-ARGM-LOC: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "       R-ARGM-MNR: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "       R-ARGM-TMP: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "                V: precision: 100.00%; recall: 100.00%; FB1: 100.00  4034\n",
      "********************************************************************************\n",
      "Epoch 5 Complete: Time Taken 84\n",
      "100800 :  0.34143797499221756\n",
      "101600 :  0.3526622322199346\n",
      "102400 :  0.3478468970900649\n",
      "103200 :  0.3348167399097359\n",
      "104000 :  0.3211109213336898\n",
      "104800 :  0.3422895248978758\n",
      "105600 :  0.3265882695909241\n",
      "106400 :  0.3201018907830185\n",
      "107200 :  0.3380174946993445\n",
      "108000 :  0.30475681816352906\n",
      "108800 :  0.33834310179044824\n",
      "109600 :  0.3116646216557285\n",
      "110400 :  0.32499433652876125\n",
      "111200 :  0.3464987239775502\n",
      "112000 :  0.33971669783568437\n",
      "112800 :  0.34479909132216735\n",
      "113600 :  0.3414759438638403\n",
      "114400 :  0.32748899305781176\n",
      "115200 :  0.34203249817081627\n",
      "116000 :  0.3545140434831485\n",
      "116800 :  0.34204588710681794\n",
      "117600 :  0.32355119201197974\n",
      "118400 :  0.3271140183170837\n",
      "119200 :  0.3327752217093395\n",
      "120000 :  0.2822234007144388\n",
      "processed 480026 tokens with 61118 phrases; found: 47410 phrases; correct: 34941.\n",
      "accuracy:  72.17%; precision:  73.70%; recall:  57.17%; FB1:  64.39\n",
      "the best F is  64.39\n",
      "             ARG0: precision:  74.60%; recall:  59.73%; FB1:  66.34  7336\n",
      "             ARG1: precision:  56.02%; recall:  35.87%; FB1:  43.74  9066\n",
      "             ARG2: precision:  31.32%; recall:  37.28%; FB1:  34.04  5999\n",
      "             ARG3: precision:  12.50%; recall:   0.33%; FB1:   0.64  8\n",
      "             ARG4: precision:  58.06%; recall:  12.24%; FB1:  20.22  62\n",
      "             ARG5: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "             ARGA: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-ADJ: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-ADV: precision:  31.04%; recall:  10.30%; FB1:  15.47  567\n",
      "         ARGM-CAU: precision:  66.67%; recall:   0.73%; FB1:   1.45  3\n",
      "         ARGM-COM: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-DIR: precision:  29.27%; recall:   4.08%; FB1:   7.16  41\n",
      "         ARGM-DIS: precision:  61.75%; recall:  40.17%; FB1:  48.68  1430\n",
      "         ARGM-DSP: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-EXT: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-GOL: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-LOC: precision:  40.91%; recall:  14.14%; FB1:  21.01  330\n",
      "         ARGM-LVB: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-MNR: precision:  49.18%; recall:   2.87%; FB1:   5.42  61\n",
      "         ARGM-MOD: precision:  85.19%; recall:  86.41%; FB1:  85.80  1553\n",
      "         ARGM-NEG: precision:  91.82%; recall:  79.25%; FB1:  85.07  782\n",
      "         ARGM-PNC: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-PRD: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-PRP: precision:  13.33%; recall:   5.32%; FB1:   7.60  120\n",
      "         ARGM-REC: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-TMP: precision:  49.20%; recall:  25.71%; FB1:  33.77  1508\n",
      "           C-ARG0: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "           C-ARG1: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "           C-ARG2: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "           C-ARG3: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "           C-ARG4: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "       C-ARGM-ADV: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "       C-ARGM-EXT: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "       C-ARGM-MNR: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "       C-ARGM-MOD: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "           R-ARG0: precision:  75.66%; recall:  45.50%; FB1:  56.82  267\n",
      "           R-ARG1: precision:  63.56%; recall:  18.52%; FB1:  28.68  118\n",
      "           R-ARG2: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "           R-ARG3: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "           R-ARG4: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "       R-ARGM-ADV: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "       R-ARGM-CAU: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "       R-ARGM-EXT: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "       R-ARGM-LOC: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "       R-ARGM-MNR: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "       R-ARGM-PRD: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "       R-ARGM-PRP: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "       R-ARGM-TMP: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "                V: precision: 100.00%; recall: 100.00%; FB1: 100.00  18159\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "processed 148220 tokens with 13517 phrases; found: 10204 phrases; correct: 7346.\n",
      "accuracy:  77.17%; precision:  71.99%; recall:  54.35%; FB1:  61.94\n",
      "the best F is  61.94\n",
      "             ARG0: precision:  72.10%; recall:  50.71%; FB1:  59.54  1283\n",
      "             ARG1: precision:  55.39%; recall:  35.40%; FB1:  43.20  2058\n",
      "             ARG2: precision:  28.73%; recall:  36.43%; FB1:  32.13  1514\n",
      "             ARG3: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "             ARG4: precision:  66.67%; recall:   8.89%; FB1:  15.69  6\n",
      "         ARGM-ADJ: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-ADV: precision:  26.14%; recall:   8.79%; FB1:  13.16  153\n",
      "         ARGM-CAU: precision:   0.00%; recall:   0.00%; FB1:   0.00  1\n",
      "         ARGM-COM: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-DIR: precision:  20.00%; recall:   2.90%; FB1:   5.06  10\n",
      "         ARGM-DIS: precision:  54.81%; recall:  23.36%; FB1:  32.76  104\n",
      "         ARGM-EXT: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-GOL: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-LOC: precision:  31.94%; recall:   9.16%; FB1:  14.24  72\n",
      "         ARGM-LVB: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-MNR: precision:  36.00%; recall:   3.18%; FB1:   5.84  25\n",
      "         ARGM-MOD: precision:  84.86%; recall:  80.28%; FB1:  82.51  403\n",
      "         ARGM-NEG: precision:  91.67%; recall:  70.59%; FB1:  79.76  144\n",
      "         ARGM-PRD: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-PRP: precision:  16.13%; recall:   4.81%; FB1:   7.41  31\n",
      "         ARGM-REC: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-TMP: precision:  49.64%; recall:  21.51%; FB1:  30.01  276\n",
      "           C-ARG0: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "           C-ARG1: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "           R-ARG0: precision:  71.43%; recall:  49.02%; FB1:  58.14  70\n",
      "           R-ARG1: precision:  55.00%; recall:  13.92%; FB1:  22.22  20\n",
      "           R-ARG2: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "       R-ARGM-LOC: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "       R-ARGM-MNR: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "       R-ARGM-TMP: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "                V: precision: 100.00%; recall: 100.00%; FB1: 100.00  4034\n",
      "********************************************************************************\n",
      "Epoch 6 Complete: Time Taken 84\n",
      "120800 :  0.3200924665825098\n",
      "121600 :  0.31639687153889773\n",
      "122400 :  0.3244397704643533\n",
      "123200 :  0.3212869366352785\n",
      "124000 :  0.32675256082000853\n",
      "124800 :  0.33956378654579766\n",
      "125600 :  0.3006158173302943\n",
      "126400 :  0.30852845849463406\n",
      "127200 :  0.3095377692382682\n",
      "128000 :  0.3208773544995767\n",
      "128800 :  0.3024519268188522\n",
      "129600 :  0.3037696130334363\n",
      "130400 :  0.28959302695281053\n",
      "131200 :  0.30267013299469\n",
      "132000 :  0.31588105726529575\n",
      "132800 :  0.33602528247409974\n",
      "133600 :  0.298477335388522\n",
      "134400 :  0.2949402498331583\n",
      "135200 :  0.3144162691780548\n",
      "136000 :  0.29448897928116835\n",
      "136800 :  0.3029326680041835\n",
      "137600 :  0.2768891222619274\n",
      "138400 :  0.3071314750916378\n"
     ]
    },
    {
     "ename": "KeyboardInterrupt",
     "evalue": "",
     "output_type": "error",
     "traceback": [
      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[0;31mKeyboardInterrupt\u001b[0m                         Traceback (most recent call last)",
      "\u001b[0;32m<ipython-input-12-a5e225a197eb>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m()\u001b[0m\n\u001b[1;32m     17\u001b[0m \u001b[0mmodel_name\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m'.'\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     18\u001b[0m \u001b[0mtrainer\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mTrainer\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmodel\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0moptimizer\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mresult_path\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmodel_name\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0musedataset\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m'consrl'\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmappings\u001b[0m\u001b[0;34m=\u001b[0m \u001b[0mmappings\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 19\u001b[0;31m \u001b[0mlosses\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mall_F\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtrainer\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtrain_model\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;36m10\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtrain_data\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdev_data\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mlearning_rate\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mlearning_rate\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m     20\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     21\u001b[0m \u001b[0mplt\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mplot\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mlosses\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m<ipython-input-11-9cdb38456be4>\u001b[0m in \u001b[0;36mtrain_model\u001b[0;34m(self, num_epochs, train_data, dev_data, learning_rate, checkpoint_folder, eval_test_train, plot_every, adjust_lr, batch_size)\u001b[0m\n\u001b[1;32m     62\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     63\u001b[0m                 \u001b[0mloss\u001b[0m \u001b[0;34m+=\u001b[0m \u001b[0mscore\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdata\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m/\u001b[0m\u001b[0mnp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msum\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdata\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'wordslen'\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 64\u001b[0;31m                 \u001b[0mscore\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mbackward\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m     65\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     66\u001b[0m                 \u001b[0mnn\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mutils\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mclip_grad_norm\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmodel\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mparameters\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;36m5.0\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m~/anaconda3/envs/pytorch_p36/lib/python3.6/site-packages/torch/autograd/variable.py\u001b[0m in \u001b[0;36mbackward\u001b[0;34m(self, gradient, retain_graph, create_graph, retain_variables)\u001b[0m\n\u001b[1;32m    165\u001b[0m                 \u001b[0mVariable\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    166\u001b[0m         \"\"\"\n\u001b[0;32m--> 167\u001b[0;31m         \u001b[0mtorch\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mautograd\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mbackward\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mgradient\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mretain_graph\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mcreate_graph\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mretain_variables\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m    168\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    169\u001b[0m     \u001b[0;32mdef\u001b[0m \u001b[0mregister_hook\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mhook\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m~/anaconda3/envs/pytorch_p36/lib/python3.6/site-packages/torch/autograd/__init__.py\u001b[0m in \u001b[0;36mbackward\u001b[0;34m(variables, grad_variables, retain_graph, create_graph, retain_variables)\u001b[0m\n\u001b[1;32m     97\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     98\u001b[0m     Variable._execution_engine.run_backward(\n\u001b[0;32m---> 99\u001b[0;31m         variables, grad_variables, retain_graph)\n\u001b[0m\u001b[1;32m    100\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    101\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;31mKeyboardInterrupt\u001b[0m: "
     ]
    }
   ],
   "source": [
    "word_vocab_size = len(word_to_id)\n",
    "word_embedding_dim = 100\n",
    "word_hidden_dim = 200\n",
    "verb_embedding_dim = 10\n",
    "cap_embedding_dim = 0\n",
    "\n",
    "model = BiLSTM_CRF(word_vocab_size, word_embedding_dim, word_hidden_dim, tag_to_id, \n",
    "                   verb_embedding_dim, cap_embedding_dim, pretrained = word_embeds)\n",
    "    \n",
    "    \n",
    "model.cuda()\n",
    "learning_rate = 0.015\n",
    "print('Initial learning rate is: %s' %(learning_rate))\n",
    "optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate, momentum=0.9)\n",
    "\n",
    "result_path = '.'\n",
    "model_name = '.'\n",
    "trainer = Trainer(model, optimizer, result_path, model_name, usedataset='consrl', mappings= mappings) \n",
    "losses, all_F = trainer.train_model(10, train_data, dev_data, learning_rate = learning_rate)\n",
    "    \n",
    "plt.plot(losses)\n",
    "plt.savefig(os.path.join(result_path, model_name, 'lossplot.png'))\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import pickle as pkl\n",
    "import os\n",
    "\n",
    "import torch\n",
    "torch.manual_seed(0)\n",
    "import torch.nn as nn\n",
    "from torch.nn import init\n",
    "from torch.autograd import Variable\n",
    "from neural_srl.util.utils import *\n",
    "import codecs\n",
    "import pickle as pkl\n",
    "import itertools"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Found 36278 unique words (5905439 in total)\n",
      "Found 129 unique named entity tags\n",
      "Loading pretrained embeddings from wordvectors/glove.6B.100d.txt...\n",
      "225114 / 31577 / 23045 sentences in train / dev / test.\n"
     ]
    }
   ],
   "source": [
    "word_dim = 100\n",
    "pretrained = 'wordvectors/glove.6B.100d.txt'\n",
    "dataset = 'datasets/conll05srl/'\n",
    "\n",
    "srl_train_data = pkl.load(open(os.path.join(dataset,'train_data.pkl'),'rb'),errors='ignore')\n",
    "srl_val_data = pkl.load(open(os.path.join(dataset,'dev_data.pkl'),'rb'),errors='ignore')\n",
    "srl_test_data = pkl.load(open(os.path.join(dataset,'test_data.pkl'),'rb'),errors='ignore')\n",
    "\n",
    "for i in range(len(srl_train_data)):\n",
    "    srl_train_data[i][2][-1] = srl_train_data[i][2][-1].strip()\n",
    "for i in range(len(srl_val_data)):\n",
    "    srl_val_data[i][2][-1] = srl_val_data[i][2][-1].strip()\n",
    "for i in range(len(srl_test_data)):\n",
    "    srl_test_data[i][2][-1] = srl_test_data[i][2][-1].strip()\n",
    "\n",
    "dico_words_train, _, _ = word_mapping(srl_train_data)\n",
    "dico_tags, tag_to_id, id_to_tag = tag_mapping(srl_train_data+srl_val_data+srl_test_data)\n",
    "\n",
    "dico_words, word_to_id, id_to_word = augment_with_pretrained(\n",
    "                                     dico_words_train.copy(), pretrained,\n",
    "                                     list(itertools.chain.from_iterable(\n",
    "                                     [[str(x).lower() for x in s[0]] for s in srl_val_data+srl_test_data])))\n",
    "\n",
    "train_data = prepare_dataset(srl_train_data, word_to_id, tag_to_id)\n",
    "dev_data = prepare_dataset(srl_val_data, word_to_id, tag_to_id)\n",
    "test_data = prepare_dataset(srl_test_data, word_to_id, tag_to_id)\n",
    "\n",
    "print(\"%i / %i / %i sentences in train / dev / test.\" % (len(train_data), len(dev_data), len(test_data)))\n",
    "\n",
    "mapping_file = os.path.join(dataset,'mapping.pkl')\n",
    "\n",
    "if not os.path.isfile(mapping_file):\n",
    "    all_word_embeds = {}\n",
    "    for i, line in enumerate(codecs.open(pretrained, 'r', 'utf-8')):\n",
    "        s = line.strip().split()\n",
    "        if len(s) == word_dim + 1:\n",
    "            all_word_embeds[s[0]] = np.array([float(i) for i in s[1:]])\n",
    "\n",
    "    word_embeds = np.random.uniform(-np.sqrt(0.06), np.sqrt(0.06), (len(word_to_id), word_dim))\n",
    "\n",
    "    for w in word_to_id:\n",
    "        if w in all_word_embeds:\n",
    "            word_embeds[word_to_id[w]] = all_word_embeds[w]\n",
    "        elif w.lower() in all_word_embeds:\n",
    "            word_embeds[word_to_id[w]] = all_word_embeds[w.lower()]\n",
    "\n",
    "    print('Loaded %i pretrained embeddings.' % len(all_word_embeds))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "{'caps': [2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 0],\n",
       " 'str_words': ['We',\n",
       "  'respectfully',\n",
       "  'invite',\n",
       "  'you',\n",
       "  'to',\n",
       "  'watch',\n",
       "  'a',\n",
       "  'special',\n",
       "  'edition',\n",
       "  'of',\n",
       "  'Across',\n",
       "  'China',\n",
       "  '.'],\n",
       " 'tags': [6, 22, 3, 5, 9, 2, 2, 2, 2, 2, 2, 2, 0],\n",
       " 'verbs': [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n",
       " 'words': [36, 12490, 4367, 16, 5, 1401, 8, 467, 3791, 7, 998, 132, 4]}"
      ]
     },
     "execution_count": 3,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "train_data[0]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "'B-ARG0'"
      ]
     },
     "execution_count": 5,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "id_to_tag[6]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(['We',\n",
       "  'respectfully',\n",
       "  'invite',\n",
       "  'you',\n",
       "  'to',\n",
       "  'watch',\n",
       "  'a',\n",
       "  'special',\n",
       "  'edition',\n",
       "  'of',\n",
       "  'Across',\n",
       "  'China',\n",
       "  '.'],\n",
       " ['0', '0', '1', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0'],\n",
       " ['B-ARG0',\n",
       "  'B-ARGM-MNR',\n",
       "  'B-V',\n",
       "  'B-ARG1',\n",
       "  'B-ARG2',\n",
       "  'I-ARG2',\n",
       "  'I-ARG2',\n",
       "  'I-ARG2',\n",
       "  'I-ARG2',\n",
       "  'I-ARG2',\n",
       "  'I-ARG2',\n",
       "  'I-ARG2',\n",
       "  'O'])"
      ]
     },
     "execution_count": 6,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "srl_train_data[0]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [],
   "source": [
    "from __future__ import print_function\n",
    "from collections import OrderedDict\n",
    "import os\n",
    "import neural_srl\n",
    "from neural_srl.util import Loader\n",
    "from neural_srl.models import BiLSTM_CRF\n",
    "from neural_srl.util.utils import *\n",
    "import matplotlib.pyplot as plt\n",
    "from torch.autograd import Variable\n",
    "import torch\n",
    "import random\n",
    "random.seed(0)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 61,
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch\n",
    "import codecs"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "model = torch.load('neural_srl/results/conll05srl/BiLSTM_CRF/modelweights')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 62,
   "metadata": {},
   "outputs": [],
   "source": [
    "def convert_bio_tags_to_conll_format(labels):\n",
    "    \"\"\"\n",
    "    Converts BIO formatted SRL tags to the format required for evaluation with the\n",
    "    official CONLL 2005 perl script. Spans are represented by bracketed labels,\n",
    "    with the labels of words inside spans being the same as those outside spans.\n",
    "    Beginning spans always have a opening bracket and a closing asterisk (e.g. \"(ARG-1*\" )\n",
    "    and closing spans always have a closing bracket (e.g. \"*)\" ). This applies even for\n",
    "    length 1 spans, (e.g \"(ARG-0*)\").\n",
    "    A full example of the conversion performed:\n",
    "    [B-ARG-1, I-ARG-1, I-ARG-1, I-ARG-1, I-ARG-1, O]\n",
    "    [ \"(ARG-1*\", \"*\", \"*\", \"*\", \"*)\", \"*\"]\n",
    "    Parameters\n",
    "    ----------\n",
    "    labels : List[str], required.\n",
    "        A list of BIO tags to convert to the CONLL span based format.\n",
    "    Returns\n",
    "    -------\n",
    "    A list of labels in the CONLL span based format.\n",
    "    \"\"\"\n",
    "    sentence_length = len(labels)\n",
    "    conll_labels = []\n",
    "    for i, label in enumerate(labels):\n",
    "        if label == \"O\":\n",
    "            conll_labels.append(\"*\")\n",
    "            continue\n",
    "        new_label = \"*\"\n",
    "        # Are we at the beginning of a new span, at the first word in the sentence,\n",
    "        # or is the label different from the previous one? If so, we are seeing a new label.\n",
    "        if label[0] == \"B\" or i == 0 or label[1:] != labels[i - 1][1:]:\n",
    "            new_label = \"(\" + label[2:] + new_label\n",
    "        # Are we at the end of the sentence, is the next word a new span, or is the next\n",
    "        # word not in a span? If so, we need to close the label span.\n",
    "        if i == sentence_length - 1 or labels[i + 1][0] == \"B\" or label[1:] != labels[i + 1][1:]:\n",
    "            new_label = new_label + \")\"\n",
    "        conll_labels.append(new_label)\n",
    "    return conll_labels"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 63,
   "metadata": {},
   "outputs": [],
   "source": [
    "def write_to_conll_eval_file(prediction_file,\n",
    "                             gold_file,\n",
    "                             verb_index,\n",
    "                             sentence,\n",
    "                             prediction,\n",
    "                             gold_labels):\n",
    "    \"\"\"\n",
    "    Prints predicate argument predictions and gold labels for a single verbal\n",
    "    predicate in a sentence to two provided file references.\n",
    "    Parameters\n",
    "    ----------\n",
    "    prediction_file : TextIO, required.\n",
    "        A file reference to print predictions to.\n",
    "    gold_file : TextIO, required.\n",
    "        A file reference to print gold labels to.\n",
    "    verb_index : Optional[int], required.\n",
    "        The index of the verbal predicate in the sentence which\n",
    "        the gold labels are the arguments for, or None if the sentence\n",
    "        contains no verbal predicate.\n",
    "    sentence : List[str], required.\n",
    "        The word tokens.\n",
    "    prediction : List[str], required.\n",
    "        The predicted BIO labels.\n",
    "    gold_labels : List[str], required.\n",
    "        The gold BIO labels.\n",
    "    \"\"\"\n",
    "    verb_only_sentence = [\"-\"] * len(sentence)\n",
    "    if verb_index is not None:\n",
    "        verb_only_sentence[verb_index] = sentence[verb_index]\n",
    "\n",
    "    conll_format_predictions = convert_bio_tags_to_conll_format(prediction)\n",
    "    conll_format_gold_labels = convert_bio_tags_to_conll_format(gold_labels)\n",
    "\n",
    "    for word, predicted, gold in zip(verb_only_sentence,\n",
    "                                     conll_format_predictions,\n",
    "                                     conll_format_gold_labels):\n",
    "        prediction_file.write(word.ljust(15))\n",
    "        prediction_file.write(predicted.rjust(15) + \"\\n\")\n",
    "        gold_file.write(word.ljust(15))\n",
    "        gold_file.write(gold.rjust(15) + \"\\n\")\n",
    "    prediction_file.write(\"\\n\")\n",
    "    gold_file.write(\"\\n\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 128,
   "metadata": {},
   "outputs": [],
   "source": [
    "class Evaluator(object):\n",
    "    def __init__(self, result_path, model_name, mappings, usecuda=True):\n",
    "        self.result_path = result_path\n",
    "        self.model_name = model_name\n",
    "        self.tag_to_id = mappings['tag_to_id']\n",
    "        self.id_to_tag = mappings['id_to_tag']\n",
    "        self.usecuda = usecuda\n",
    "\n",
    "    def evaluate_ner(self, model, dataset, best_F, eval_script='./datasets/conll/conlleval',\n",
    "                          checkpoint_folder='.', record_confmat = False, batch_size = 80):\n",
    "        \n",
    "        model.eval()\n",
    "        \n",
    "        prediction = []\n",
    "        save = False\n",
    "        new_F = 0.0\n",
    "        confusion_matrix = torch.zeros((len(self.tag_to_id) - 2, len(self.tag_to_id) - 2))\n",
    "    \n",
    "        data_batches = create_batches(dataset, batch_size = batch_size, str_words = True,\n",
    "                                      tag_padded = False)\n",
    "\n",
    "        for data in data_batches:\n",
    "\n",
    "            words = data['words']\n",
    "            verbs = data['verbs']\n",
    "            caps = data['caps']\n",
    "            mask = data['tagsmask']\n",
    "\n",
    "            if self.usecuda:\n",
    "                words = Variable(torch.LongTensor(words)).cuda()\n",
    "                verbs = Variable(torch.LongTensor(verbs)).cuda()\n",
    "                caps = Variable(torch.LongTensor(caps)).cuda()\n",
    "                mask = Variable(torch.LongTensor(mask)).cuda()\n",
    "            else:\n",
    "                words = Variable(torch.LongTensor(words))\n",
    "                verbs = Variable(torch.LongTensor(verbs))\n",
    "                caps = Variable(torch.LongTensor(caps))\n",
    "                mask = Variable(torch.LongTensor(mask))\n",
    "\n",
    "            wordslen = data['wordslen']\n",
    "            str_words = data['str_words']\n",
    "            \n",
    "            _, out = model.decode(words, verbs, caps, wordslen, mask, usecuda = self.usecuda)\n",
    "                                \n",
    "            ground_truth_id = data['tags']\n",
    "            predicted_id = out            \n",
    "            \n",
    "            for (swords, sground_truth_id, spredicted_id) in zip(str_words, ground_truth_id, predicted_id):\n",
    "                for (word, true_id, pred_id) in zip(swords, sground_truth_id, spredicted_id):\n",
    "                    if self.id_to_tag[true_id]!='B-V':\n",
    "                        line = ' '.join([word, self.id_to_tag[true_id], self.id_to_tag[pred_id]])\n",
    "                        prediction.append(line)\n",
    "                        confusion_matrix[true_id, pred_id] += 1\n",
    "                prediction.append('')\n",
    "        \n",
    "        predf = os.path.join(self.result_path, self.model_name, checkpoint_folder ,'pred.txt')\n",
    "        scoref = os.path.join(self.result_path, self.model_name, checkpoint_folder ,'score.txt')\n",
    "\n",
    "        with open(predf, 'w+') as f:\n",
    "            f.write('\\n'.join(prediction))\n",
    "\n",
    "        os.system('%s < %s > %s' % (eval_script, predf, scoref))\n",
    "\n",
    "        eval_lines = [l.rstrip() for l in codecs.open(scoref, 'r', 'utf8')]\n",
    "\n",
    "        for i, line in enumerate(eval_lines):\n",
    "            print(line)\n",
    "            if i == 1:\n",
    "                new_F = float(line.strip().split()[-1])\n",
    "                if new_F > best_F:\n",
    "                    best_F = new_F\n",
    "                    save = True\n",
    "                    print('the best F is ', new_F)\n",
    "        \n",
    "        return best_F, new_F, save\n",
    "    \n",
    "    def evaluate_srl(self, model, dataset, best_F, \n",
    "                        eval_script='/home/ubuntu/PGMProject/datasets/srlconll-1.1/bin/srl-eval.pl',\n",
    "                        checkpoint_folder='.', record_confmat = False, batch_size = 80):\n",
    "        \n",
    "        v1= 'PERL5LIB=\"$HOME/PGMProject/datasets/srlconll-1.1/lib:$PERL5LIB\"'\n",
    "        v2= 'PATH=\"$HOME/PGMProject/datasets/srlconll-1.1/bin:$PATH\"'\n",
    "\n",
    "        model.eval()\n",
    "        \n",
    "        prediction = []\n",
    "        save = False\n",
    "        new_F = 0.0\n",
    "        confusion_matrix = torch.zeros((len(self.tag_to_id) - 2, len(self.tag_to_id) - 2))\n",
    "    \n",
    "        data_batches = create_batches(dataset, batch_size = batch_size, str_words = True,\n",
    "                                      tag_padded = False)\n",
    "        \n",
    "        predf = os.path.join(self.result_path, self.model_name, \n",
    "                             checkpoint_folder ,'pred1.txt')\n",
    "        goldf = os.path.join(self.result_path, self.model_name, \n",
    "                             checkpoint_folder ,'gold1.txt')\n",
    "        scoref = os.path.join(self.result_path, self.model_name, \n",
    "                             checkpoint_folder ,'score1.txt')\n",
    "        \n",
    "        predfile = open(predf,'w+')\n",
    "        goldfile = open(goldf,'w+')\n",
    "\n",
    "        for data in data_batches:\n",
    "\n",
    "            words = data['words']\n",
    "            verbs = data['verbs']\n",
    "            caps = data['caps']\n",
    "            mask = data['tagsmask']\n",
    "\n",
    "            if self.usecuda:\n",
    "                words = Variable(torch.LongTensor(words)).cuda()\n",
    "                verbs = Variable(torch.LongTensor(verbs)).cuda()\n",
    "                caps = Variable(torch.LongTensor(caps)).cuda()\n",
    "                mask = Variable(torch.LongTensor(mask)).cuda()\n",
    "            else:\n",
    "                words = Variable(torch.LongTensor(words))\n",
    "                verbs = Variable(torch.LongTensor(verbs))\n",
    "                caps = Variable(torch.LongTensor(caps))\n",
    "                mask = Variable(torch.LongTensor(mask))\n",
    "\n",
    "            wordslen = data['wordslen']\n",
    "            str_words = data['str_words']\n",
    "            \n",
    "            _, out = model.decode(words, verbs, caps, wordslen, mask, usecuda = self.usecuda)\n",
    "                                \n",
    "            ground_truth_id = data['tags']\n",
    "            predicted_id = out            \n",
    "            \n",
    "            for (swords, sground_truth_id, spredicted_id, sverb) in \\\n",
    "                zip(str_words, ground_truth_id, predicted_id, data['verbs']):\n",
    "                tspredicted_id = [self.id_to_tag[idxt] for idxt in spredicted_id]\n",
    "                tsground_truth_id = [self.id_to_tag[idxt] for idxt in sground_truth_id]\n",
    "                try:\n",
    "                    verb_index = list(sverb).index(1)\n",
    "                except (KeyboardInterrupt, SystemExit):\n",
    "                    raise\n",
    "                except:\n",
    "                    verb_index = None\n",
    "                write_to_conll_eval_file(predfile,\n",
    "                                         goldfile,\n",
    "                                         verb_index,\n",
    "                                         swords,\n",
    "                                         tspredicted_id,\n",
    "                                         tsground_truth_id)\n",
    "\n",
    "        predfile.close()\n",
    "        goldfile.close()\n",
    "\n",
    "        out = os.system('%s %s %s %s %s > %s' % (v1, v2, eval_script, goldf, predf, scoref))\n",
    "        \n",
    "        eval_lines = [l.rstrip() for l in codecs.open(scoref, 'r', 'utf8')]\n",
    "\n",
    "        for i, line in enumerate(eval_lines):\n",
    "            print(line)\n",
    "            if 'Overall' in line:\n",
    "                new_F = float(line.strip().split()[-1])\n",
    "                if new_F > best_F:\n",
    "                    best_F = new_F\n",
    "                    save = True\n",
    "                    print('the best F is ', new_F)\n",
    "        \n",
    "        return best_F, new_F, save"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "parameters = OrderedDict()\n",
    "parameters['wrdim'] = 100\n",
    "parameters['ptrnd'] = 'wordvectors/glove.6B.100d.txt'"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Found 36278 unique words (5905439 in total)\n",
      "Found 129 unique named entity tags\n",
      "Loading pretrained embeddings from wordvectors/glove.6B.100d.txt...\n",
      "225114 / 31577 / 23045 sentences in train / dev / test.\n"
     ]
    }
   ],
   "source": [
    "loader = Loader()\n",
    "train_data, dev_data, test_data, mappings = loader.load_conll05srl('datasets/conll05srl/', parameters)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "result_path = 'neural_srl/results/conll05srl'\n",
    "model_name = 'BiLSTM_CRF'"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 129,
   "metadata": {},
   "outputs": [],
   "source": [
    "evaluator = Evaluator(result_path, model_name, mappings)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 35,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/ubuntu/Active-NLP/neural_srl/modules/VanillaRNN.py:31: UserWarning: RNN module weights are not part of single contiguous chunk of memory. This means they need to be compacted at every call, possibly greatly increasing memory usage. To compact weights again call flatten_parameters().\n",
      "  output, _ = self.rnn(embedded)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "processed 590872 tokens with 60728 phrases; found: 60245 phrases; correct: 48039.\n",
      "accuracy:  88.09%; precision:  79.74%; recall:  79.11%; FB1:  79.42\n",
      "the best F is  79.42\n",
      "             ARG0: precision:  87.55%; recall:  88.37%; FB1:  87.95  13153\n",
      "             ARG1: precision:  81.15%; recall:  82.54%; FB1:  81.84  20943\n",
      "             ARG2: precision:  75.49%; recall:  73.72%; FB1:  74.59  6768\n",
      "             ARG3: precision:  66.33%; recall:  36.26%; FB1:  46.89  199\n",
      "             ARG4: precision:  70.94%; recall:  67.30%; FB1:  69.07  351\n",
      "             ARG5: precision:  66.67%; recall:  22.22%; FB1:  33.33  3\n",
      "             ARGA: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-ADJ: precision:  34.31%; recall:  34.81%; FB1:  34.56  137\n",
      "         ARGM-ADV: precision:  69.29%; recall:  55.48%; FB1:  61.62  1957\n",
      "         ARGM-CAU: precision:  70.77%; recall:  64.32%; FB1:  67.39  349\n",
      "         ARGM-COM: precision:  35.56%; recall:  55.17%; FB1:  43.24  45\n",
      "         ARGM-DIR: precision:  44.76%; recall:  46.27%; FB1:  45.50  429\n",
      "         ARGM-DIS: precision:  79.91%; recall:  79.05%; FB1:  79.48  2583\n",
      "         ARGM-DSP: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "         ARGM-EXT: precision:  47.52%; recall:  43.51%; FB1:  45.42  141\n",
      "         ARGM-GOL: precision:  35.48%; recall:  28.95%; FB1:  31.88  62\n",
      "         ARGM-LOC: precision:  60.87%; recall:  60.87%; FB1:  60.87  1536\n",
      "         ARGM-LVB: precision:  73.61%; recall:  80.30%; FB1:  76.81  72\n",
      "         ARGM-MNR: precision:  53.10%; recall:  56.21%; FB1:  54.61  1484\n",
      "         ARGM-MOD: precision:  96.45%; recall:  98.06%; FB1:  97.25  2255\n",
      "         ARGM-NEG: precision:  91.85%; recall:  97.29%; FB1:  94.49  1252\n",
      "         ARGM-PNC: precision:  28.57%; recall:   7.79%; FB1:  12.24  21\n",
      "         ARGM-PRD: precision:  20.78%; recall:   5.80%; FB1:   9.07  77\n",
      "         ARGM-PRP: precision:  58.47%; recall:  47.66%; FB1:  52.52  366\n",
      "         ARGM-REC: precision:  80.00%; recall:  11.76%; FB1:  20.51  5\n",
      "         ARGM-TMP: precision:  77.25%; recall:  81.77%; FB1:  79.45  4501\n",
      "           C-ARG0: precision:   0.00%; recall:   0.00%; FB1:   0.00  5\n",
      "           C-ARG1: precision:  39.29%; recall:  28.95%; FB1:  33.33  168\n",
      "           C-ARG2: precision:   7.14%; recall:   5.26%; FB1:   6.06  14\n",
      "           C-ARG3: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "           C-ARG4: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "       C-ARGM-ADV: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "       C-ARGM-EXT: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "       C-ARGM-MNR: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "       C-ARGM-MOD: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "       C-ARGM-TMP: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "           R-ARG0: precision:  88.12%; recall:  89.36%; FB1:  88.73  648\n",
      "           R-ARG1: precision:  86.01%; recall:  80.53%; FB1:  83.18  529\n",
      "           R-ARG2: precision:  63.33%; recall:  40.43%; FB1:  49.35  30\n",
      "           R-ARG3: precision:   0.00%; recall:   0.00%; FB1:   0.00  2\n",
      "           R-ARG4: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "       R-ARGM-ADV: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "       R-ARGM-CAU: precision:  50.00%; recall:  25.00%; FB1:  33.33  2\n",
      "       R-ARGM-DIR: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "       R-ARGM-EXT: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "       R-ARGM-LOC: precision:  58.43%; recall:  75.36%; FB1:  65.82  89\n",
      "       R-ARGM-MNR: precision:  60.00%; recall:  27.27%; FB1:  37.50  5\n",
      "       R-ARGM-PRD: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "       R-ARGM-PRP: precision:   0.00%; recall:   0.00%; FB1:   0.00  0\n",
      "       R-ARGM-TMP: precision:  63.93%; recall:  60.94%; FB1:  62.40  61\n",
      "                V: precision:   0.00%; recall:   0.00%; FB1:   0.00  3\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "(79.42, 79.42, True)"
      ]
     },
     "execution_count": 35,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "evaluator.evaluate_ner(model,test_data,0.0)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 130,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/ubuntu/Active-NLP/neural_srl/modules/VanillaRNN.py:31: UserWarning: RNN module weights are not part of single contiguous chunk of memory. This means they need to be compacted at every call, possibly greatly increasing memory usage. To compact weights again call flatten_parameters().\n",
      "  output, _ = self.rnn(embedded)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Number of Sentences    :       23045\n",
      "Number of Propositions :       23045\n",
      "Percentage of perfect props :  57.56\n",
      "\n",
      "              corr.  excess  missed    prec.    rec.      F1\n",
      "------------------------------------------------------------\n",
      "   Overall    47828   12263   12621    79.59   79.12   79.36\n",
      "the best F is  79.36\n",
      "----------\n",
      "      ARG0    11493    1661    1539    87.37   88.19   87.78\n",
      "      ARG1    16894    4082    3697    80.54   82.05   81.29\n",
      "      ARG2     5091    1679    1839    75.20   73.46   74.32\n",
      "      ARG3      132      67     232    66.33   36.26   46.89\n",
      "      ARG4      249     102     121    70.94   67.30   69.07\n",
      "      ARG5        2       1       7    66.67   22.22   33.33\n",
      "      ARGA        0       0       2     0.00    0.00    0.00\n",
      "  ARGM-ADJ       47      90      88    34.31   34.81   34.56\n",
      "  ARGM-ADV     1355     602    1089    69.24   55.44   61.58\n",
      "  ARGM-CAU      247     102     137    70.77   64.32   67.39\n",
      "  ARGM-COM       16      29      13    35.56   55.17   43.24\n",
      "  ARGM-DIR      192     237     223    44.76   46.27   45.50\n",
      "  ARGM-DIS     2064     519     547    79.91   79.05   79.48\n",
      "  ARGM-DSP        0       0       1     0.00    0.00    0.00\n",
      "  ARGM-EXT       67      74      87    47.52   43.51   45.42\n",
      "  ARGM-GOL       22      40      54    35.48   28.95   31.88\n",
      "  ARGM-LOC      935     601     601    60.87   60.87   60.87\n",
      "  ARGM-LVB       53      19      13    73.61   80.30   76.81\n",
      "  ARGM-MNR      788     696     614    53.10   56.21   54.61\n",
      "  ARGM-MOD     2175      80      43    96.45   98.06   97.25\n",
      "  ARGM-NEG     1150     102      32    91.85   97.29   94.49\n",
      "  ARGM-PNC        6      15      71    28.57    7.79   12.24\n",
      "  ARGM-PRD       16      61     260    20.78    5.80    9.07\n",
      "  ARGM-PRP      214     152     235    58.47   47.66   52.52\n",
      "  ARGM-REC        4       1      30    80.00   11.76   20.51\n",
      "  ARGM-TMP     3476    1025     776    77.23   81.75   79.42\n",
      "    R-ARG0      571      77      68    88.12   89.36   88.73\n",
      "    R-ARG1      455      74     110    86.01   80.53   83.18\n",
      "    R-ARG2       19      11      28    63.33   40.43   49.35\n",
      "    R-ARG3        0       2       1     0.00    0.00    0.00\n",
      "    R-ARG4        0       0       3     0.00    0.00    0.00\n",
      "R-ARGM-ADV        0       0       2     0.00    0.00    0.00\n",
      "R-ARGM-CAU        1       1       3    50.00   25.00   33.33\n",
      "R-ARGM-DIR        0       0       1     0.00    0.00    0.00\n",
      "R-ARGM-EXT        0       0       1     0.00    0.00    0.00\n",
      "R-ARGM-LOC       52      37      17    58.43   75.36   65.82\n",
      "R-ARGM-MNR        3       2       8    60.00   27.27   37.50\n",
      "R-ARGM-PRD        0       0       1     0.00    0.00    0.00\n",
      "R-ARGM-PRP        0       0       2     0.00    0.00    0.00\n",
      "R-ARGM-TMP       39      22      25    63.93   60.94   62.40\n",
      "------------------------------------------------------------\n",
      "         V    23044       3       1    99.99  100.00   99.99\n",
      "------------------------------------------------------------\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "(79.36, 79.36, True)"
      ]
     },
     "execution_count": 130,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "evaluator.evaluate_srl(model,test_data,0.0)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 123,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python [conda env:pytorch_p36]",
   "language": "python",
   "name": "conda-env-pytorch_p36-py"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.4"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
