{
  "nbformat": 4,
  "nbformat_minor": 0,
  "metadata": {
    "kernelspec": {
      "name": "python3",
      "display_name": "Python 3"
    },
    "language_info": {
      "pygments_lexer": "ipython3",
      "nbconvert_exporter": "python",
      "version": "3.6.4",
      "file_extension": ".py",
      "codemirror_mode": {
        "name": "ipython",
        "version": 3
      },
      "name": "python",
      "mimetype": "text/x-python"
    },
    "colab": {
      "name": "lstm-birnn-bahdanau-crf-biaffine.ipynb",
      "provenance": [],
      "collapsed_sections": []
    },
    "accelerator": "GPU"
  },
  "cells": [
    {
      "cell_type": "code",
      "metadata": {
        "_uuid": "8f2839f25d086af736a60e9eeb907d3b93b6e0e5",
        "_cell_guid": "b1076dfc-b9ad-4769-8c92-a6c4dae69d19",
        "trusted": true,
        "id": "Ljz2IbsWluHv",
        "colab_type": "code",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 1000
        },
        "outputId": "a2204647-1d43-4934-cec4-28a412730e57"
      },
      "source": [
        "!wget https://raw.githubusercontent.com/UniversalDependencies/UD_English-EWT/master/en_ewt-ud-dev.conllu\n",
        "!wget https://raw.githubusercontent.com/UniversalDependencies/UD_English-EWT/master/en_ewt-ud-train.conllu\n",
        "!wget https://raw.githubusercontent.com/UniversalDependencies/UD_English-EWT/master/en_ewt-ud-test.conllu\n",
        "!pip install malaya -U"
      ],
      "execution_count": 1,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "--2019-09-30 05:12:41--  https://raw.githubusercontent.com/UniversalDependencies/UD_English-EWT/master/en_ewt-ud-dev.conllu\n",
            "Resolving raw.githubusercontent.com (raw.githubusercontent.com)... 151.101.0.133, 151.101.64.133, 151.101.128.133, ...\n",
            "Connecting to raw.githubusercontent.com (raw.githubusercontent.com)|151.101.0.133|:443... connected.\n",
            "HTTP request sent, awaiting response... 200 OK\n",
            "Length: 1668174 (1.6M) [text/plain]\n",
            "Saving to: ‘en_ewt-ud-dev.conllu’\n",
            "\n",
            "en_ewt-ud-dev.conll 100%[===================>]   1.59M  --.-KB/s    in 0.01s   \n",
            "\n",
            "2019-09-30 05:12:47 (108 MB/s) - ‘en_ewt-ud-dev.conllu’ saved [1668174/1668174]\n",
            "\n",
            "--2019-09-30 05:12:49--  https://raw.githubusercontent.com/UniversalDependencies/UD_English-EWT/master/en_ewt-ud-train.conllu\n",
            "Resolving raw.githubusercontent.com (raw.githubusercontent.com)... 151.101.0.133, 151.101.64.133, 151.101.128.133, ...\n",
            "Connecting to raw.githubusercontent.com (raw.githubusercontent.com)|151.101.0.133|:443... connected.\n",
            "HTTP request sent, awaiting response... 200 OK\n",
            "Length: 13303045 (13M) [text/plain]\n",
            "Saving to: ‘en_ewt-ud-train.conllu’\n",
            "\n",
            "en_ewt-ud-train.con 100%[===================>]  12.69M  --.-KB/s    in 0.07s   \n",
            "\n",
            "2019-09-30 05:12:51 (178 MB/s) - ‘en_ewt-ud-train.conllu’ saved [13303045/13303045]\n",
            "\n",
            "--2019-09-30 05:12:53--  https://raw.githubusercontent.com/UniversalDependencies/UD_English-EWT/master/en_ewt-ud-test.conllu\n",
            "Resolving raw.githubusercontent.com (raw.githubusercontent.com)... 151.101.0.133, 151.101.64.133, 151.101.128.133, ...\n",
            "Connecting to raw.githubusercontent.com (raw.githubusercontent.com)|151.101.0.133|:443... connected.\n",
            "HTTP request sent, awaiting response... 200 OK\n",
            "Length: 1661985 (1.6M) [text/plain]\n",
            "Saving to: ‘en_ewt-ud-test.conllu’\n",
            "\n",
            "en_ewt-ud-test.conl 100%[===================>]   1.58M  --.-KB/s    in 0.03s   \n",
            "\n",
            "2019-09-30 05:12:54 (58.9 MB/s) - ‘en_ewt-ud-test.conllu’ saved [1661985/1661985]\n",
            "\n",
            "Collecting malaya\n",
            "\u001b[?25l  Downloading https://files.pythonhosted.org/packages/b1/11/5f8ea8da94136d1fb4db39931d4ed55ae51655a3212b33e5bf607271646e/malaya-2.7.7.0-py3-none-any.whl (2.1MB)\n",
            "\u001b[K     |████████████████████████████████| 2.1MB 34.6MB/s \n",
            "\u001b[?25hRequirement already satisfied, skipping upgrade: tensorflow in /usr/local/lib/python3.6/dist-packages (from malaya) (1.14.0)\n",
            "Collecting sentencepiece (from malaya)\n",
            "\u001b[?25l  Downloading https://files.pythonhosted.org/packages/14/3d/efb655a670b98f62ec32d66954e1109f403db4d937c50d779a75b9763a29/sentencepiece-0.1.83-cp36-cp36m-manylinux1_x86_64.whl (1.0MB)\n",
            "\u001b[K     |████████████████████████████████| 1.0MB 42.9MB/s \n",
            "\u001b[?25hCollecting PySastrawi (from malaya)\n",
            "\u001b[?25l  Downloading https://files.pythonhosted.org/packages/61/84/b0a5454a040f81e81e6a95a5d5635f20ad43cc0c288f8b4966b339084962/PySastrawi-1.2.0-py2.py3-none-any.whl (210kB)\n",
            "\u001b[K     |████████████████████████████████| 215kB 54.5MB/s \n",
            "\u001b[?25hRequirement already satisfied, skipping upgrade: sklearn in /usr/local/lib/python3.6/dist-packages (from malaya) (0.0)\n",
            "Collecting unidecode (from malaya)\n",
            "\u001b[?25l  Downloading https://files.pythonhosted.org/packages/d0/42/d9edfed04228bacea2d824904cae367ee9efd05e6cce7ceaaedd0b0ad964/Unidecode-1.1.1-py2.py3-none-any.whl (238kB)\n",
            "\u001b[K     |████████████████████████████████| 245kB 55.3MB/s \n",
            "\u001b[?25hRequirement already satisfied, skipping upgrade: xgboost in /usr/local/lib/python3.6/dist-packages (from malaya) (0.90)\n",
            "Requirement already satisfied, skipping upgrade: numpy in /usr/local/lib/python3.6/dist-packages (from malaya) (1.16.5)\n",
            "Collecting bert-tensorflow (from malaya)\n",
            "\u001b[?25l  Downloading https://files.pythonhosted.org/packages/a6/66/7eb4e8b6ea35b7cc54c322c816f976167a43019750279a8473d355800a93/bert_tensorflow-1.0.1-py2.py3-none-any.whl (67kB)\n",
            "\u001b[K     |████████████████████████████████| 71kB 37.1MB/s \n",
            "\u001b[?25hRequirement already satisfied, skipping upgrade: requests in /usr/local/lib/python3.6/dist-packages (from malaya) (2.21.0)\n",
            "Requirement already satisfied, skipping upgrade: networkx in /usr/local/lib/python3.6/dist-packages (from malaya) (2.3)\n",
            "Collecting dateparser (from malaya)\n",
            "\u001b[?25l  Downloading https://files.pythonhosted.org/packages/82/9d/51126ac615bbc4418478d725a5fa1a0f112059f6f111e4b48cfbe17ef9d0/dateparser-0.7.2-py2.py3-none-any.whl (352kB)\n",
            "\u001b[K     |████████████████████████████████| 358kB 59.1MB/s \n",
            "\u001b[?25hRequirement already satisfied, skipping upgrade: scipy in /usr/local/lib/python3.6/dist-packages (from malaya) (1.3.1)\n",
            "Requirement already satisfied, skipping upgrade: scikit-learn in /usr/local/lib/python3.6/dist-packages (from malaya) (0.21.3)\n",
            "Collecting ftfy (from malaya)\n",
            "\u001b[?25l  Downloading https://files.pythonhosted.org/packages/75/ca/2d9a5030eaf1bcd925dab392762b9709a7ad4bd486a90599d93cd79cb188/ftfy-5.6.tar.gz (58kB)\n",
            "\u001b[K     |████████████████████████████████| 61kB 27.2MB/s \n",
            "\u001b[?25hRequirement already satisfied, skipping upgrade: keras-applications>=1.0.6 in /usr/local/lib/python3.6/dist-packages (from tensorflow->malaya) (1.0.8)\n",
            "Requirement already satisfied, skipping upgrade: wrapt>=1.11.1 in /usr/local/lib/python3.6/dist-packages (from tensorflow->malaya) (1.11.2)\n",
            "Requirement already satisfied, skipping upgrade: wheel>=0.26 in /usr/local/lib/python3.6/dist-packages (from tensorflow->malaya) (0.33.6)\n",
            "Requirement already satisfied, skipping upgrade: gast>=0.2.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow->malaya) (0.2.2)\n",
            "Requirement already satisfied, skipping upgrade: grpcio>=1.8.6 in /usr/local/lib/python3.6/dist-packages (from tensorflow->malaya) (1.15.0)\n",
            "Requirement already satisfied, skipping upgrade: tensorboard<1.15.0,>=1.14.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow->malaya) (1.14.0)\n",
            "Requirement already satisfied, skipping upgrade: astor>=0.6.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow->malaya) (0.8.0)\n",
            "Requirement already satisfied, skipping upgrade: protobuf>=3.6.1 in /usr/local/lib/python3.6/dist-packages (from tensorflow->malaya) (3.7.1)\n",
            "Requirement already satisfied, skipping upgrade: six>=1.10.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow->malaya) (1.12.0)\n",
            "Requirement already satisfied, skipping upgrade: termcolor>=1.1.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow->malaya) (1.1.0)\n",
            "Requirement already satisfied, skipping upgrade: keras-preprocessing>=1.0.5 in /usr/local/lib/python3.6/dist-packages (from tensorflow->malaya) (1.1.0)\n",
            "Requirement already satisfied, skipping upgrade: tensorflow-estimator<1.15.0rc0,>=1.14.0rc0 in /usr/local/lib/python3.6/dist-packages (from tensorflow->malaya) (1.14.0)\n",
            "Requirement already satisfied, skipping upgrade: google-pasta>=0.1.6 in /usr/local/lib/python3.6/dist-packages (from tensorflow->malaya) (0.1.7)\n",
            "Requirement already satisfied, skipping upgrade: absl-py>=0.7.0 in /usr/local/lib/python3.6/dist-packages (from tensorflow->malaya) (0.8.0)\n",
            "Requirement already satisfied, skipping upgrade: urllib3<1.25,>=1.21.1 in /usr/local/lib/python3.6/dist-packages (from requests->malaya) (1.24.3)\n",
            "Requirement already satisfied, skipping upgrade: certifi>=2017.4.17 in /usr/local/lib/python3.6/dist-packages (from requests->malaya) (2019.6.16)\n",
            "Requirement already satisfied, skipping upgrade: idna<2.9,>=2.5 in /usr/local/lib/python3.6/dist-packages (from requests->malaya) (2.8)\n",
            "Requirement already satisfied, skipping upgrade: chardet<3.1.0,>=3.0.2 in /usr/local/lib/python3.6/dist-packages (from requests->malaya) (3.0.4)\n",
            "Requirement already satisfied, skipping upgrade: decorator>=4.3.0 in /usr/local/lib/python3.6/dist-packages (from networkx->malaya) (4.4.0)\n",
            "Requirement already satisfied, skipping upgrade: python-dateutil in /usr/local/lib/python3.6/dist-packages (from dateparser->malaya) (2.5.3)\n",
            "Collecting regex (from dateparser->malaya)\n",
            "\u001b[?25l  Downloading https://files.pythonhosted.org/packages/6f/a6/99eeb5904ab763db87af4bd71d9b1dfdd9792681240657a4c0a599c10a81/regex-2019.08.19.tar.gz (654kB)\n",
            "\u001b[K     |████████████████████████████████| 655kB 45.7MB/s \n",
            "\u001b[?25hRequirement already satisfied, skipping upgrade: pytz in /usr/local/lib/python3.6/dist-packages (from dateparser->malaya) (2018.9)\n",
            "Requirement already satisfied, skipping upgrade: tzlocal in /usr/local/lib/python3.6/dist-packages (from dateparser->malaya) (1.5.1)\n",
            "Requirement already satisfied, skipping upgrade: joblib>=0.11 in /usr/local/lib/python3.6/dist-packages (from scikit-learn->malaya) (0.13.2)\n",
            "Requirement already satisfied, skipping upgrade: wcwidth in /usr/local/lib/python3.6/dist-packages (from ftfy->malaya) (0.1.7)\n",
            "Requirement already satisfied, skipping upgrade: h5py in /usr/local/lib/python3.6/dist-packages (from keras-applications>=1.0.6->tensorflow->malaya) (2.8.0)\n",
            "Requirement already satisfied, skipping upgrade: markdown>=2.6.8 in /usr/local/lib/python3.6/dist-packages (from tensorboard<1.15.0,>=1.14.0->tensorflow->malaya) (3.1.1)\n",
            "Requirement already satisfied, skipping upgrade: setuptools>=41.0.0 in /usr/local/lib/python3.6/dist-packages (from tensorboard<1.15.0,>=1.14.0->tensorflow->malaya) (41.2.0)\n",
            "Requirement already satisfied, skipping upgrade: werkzeug>=0.11.15 in /usr/local/lib/python3.6/dist-packages (from tensorboard<1.15.0,>=1.14.0->tensorflow->malaya) (0.15.6)\n",
            "Building wheels for collected packages: ftfy, regex\n",
            "  Building wheel for ftfy (setup.py) ... \u001b[?25l\u001b[?25hdone\n",
            "  Created wheel for ftfy: filename=ftfy-5.6-cp36-none-any.whl size=44553 sha256=a67cd3a8dec5d9ab36f166a19c9d55a545fcf8376d2dd4be7f822f0a7bd433ec\n",
            "  Stored in directory: /root/.cache/pip/wheels/43/34/ce/cbb38d71543c408de56f3c5e26ce8ba495a0fa5a28eaaf1046\n",
            "  Building wheel for regex (setup.py) ... \u001b[?25l\u001b[?25hdone\n",
            "  Created wheel for regex: filename=regex-2019.8.19-cp36-cp36m-linux_x86_64.whl size=609237 sha256=98055d96bc0b1d7a1f7761963e06648e4aee0db39cb10bb5ccdb3a451bb447ce\n",
            "  Stored in directory: /root/.cache/pip/wheels/90/04/07/b5010fb816721eb3d6dd64ed5cc8111ca23f97fdab8619b5be\n",
            "Successfully built ftfy regex\n",
            "Installing collected packages: sentencepiece, PySastrawi, unidecode, bert-tensorflow, regex, dateparser, ftfy, malaya\n",
            "Successfully installed PySastrawi-1.2.0 bert-tensorflow-1.0.1 dateparser-0.7.2 ftfy-5.6 malaya-2.7.7.0 regex-2019.8.19 sentencepiece-0.1.83 unidecode-1.1.1\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "_uuid": "d629ff2d2480ee46fbb7e2d37f6b5fab8052498a",
        "_cell_guid": "79c7e3d0-c299-4dcb-8224-4455121ee9b0",
        "trusted": true,
        "id": "r3Uhw481luH7",
        "colab_type": "code",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 34
        },
        "outputId": "91eb9012-8b77-4c5b-8a3e-b3e11576028d"
      },
      "source": [
        "import malaya\n",
        "import re\n",
        "from malaya.texts._text_functions import split_into_sentences\n",
        "from malaya.texts import _regex\n",
        "import numpy as np\n",
        "import itertools\n",
        "import tensorflow as tf\n",
        "from tensorflow.keras.preprocessing.sequence import pad_sequences\n",
        "\n",
        "tokenizer = malaya.preprocessing._tokenizer\n",
        "splitter = split_into_sentences"
      ],
      "execution_count": 2,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "not found any version, deleting previous version models..\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "trusted": true,
        "id": "nrGvtUSEluIC",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "def is_number_regex(s):\n",
        "    if re.match(\"^\\d+?\\.\\d+?$\", s) is None:\n",
        "        return s.isdigit()\n",
        "    return True\n",
        "\n",
        "def preprocessing(w):\n",
        "    if is_number_regex(w):\n",
        "        return '<NUM>'\n",
        "    elif re.match(_regex._money, w):\n",
        "        return '<MONEY>'\n",
        "    elif re.match(_regex._date, w):\n",
        "        return '<DATE>'\n",
        "    elif re.match(_regex._expressions['email'], w):\n",
        "        return '<EMAIL>'\n",
        "    elif re.match(_regex._expressions['url'], w):\n",
        "        return '<URL>'\n",
        "    else:\n",
        "        w = ''.join(''.join(s)[:2] for _, s in itertools.groupby(w))\n",
        "        return w"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "trusted": true,
        "id": "4uJQAfRNluIH",
        "colab_type": "code",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 289
        },
        "outputId": "c9a41167-3117-49dd-c3f7-95c580e783ec"
      },
      "source": [
        "word2idx = {'PAD': 0,'UNK':1, '_ROOT': 2}\n",
        "tag2idx = {'PAD': 0, '_<ROOT>': 1}\n",
        "char2idx = {'PAD': 0,'UNK':1, '_ROOT': 2}\n",
        "word_idx = 3\n",
        "tag_idx = 2\n",
        "char_idx = 3\n",
        "\n",
        "special_tokens = ['<NUM>', '<MONEY>', '<DATE>', '<URL>', '<EMAIL>']\n",
        "\n",
        "for t in special_tokens:\n",
        "    word2idx[t] = word_idx\n",
        "    word_idx += 1\n",
        "    char2idx[t] = char_idx\n",
        "    char_idx += 1\n",
        "    \n",
        "word2idx, char2idx"
      ],
      "execution_count": 4,
      "outputs": [
        {
          "output_type": "execute_result",
          "data": {
            "text/plain": [
              "({'<DATE>': 5,\n",
              "  '<EMAIL>': 7,\n",
              "  '<MONEY>': 4,\n",
              "  '<NUM>': 3,\n",
              "  '<URL>': 6,\n",
              "  'PAD': 0,\n",
              "  'UNK': 1,\n",
              "  '_ROOT': 2},\n",
              " {'<DATE>': 5,\n",
              "  '<EMAIL>': 7,\n",
              "  '<MONEY>': 4,\n",
              "  '<NUM>': 3,\n",
              "  '<URL>': 6,\n",
              "  'PAD': 0,\n",
              "  'UNK': 1,\n",
              "  '_ROOT': 2})"
            ]
          },
          "metadata": {
            "tags": []
          },
          "execution_count": 4
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "trusted": true,
        "id": "HfJNrFwZluIO",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "PAD = \"_PAD\"\n",
        "PAD_POS = \"_PAD_POS\"\n",
        "PAD_TYPE = \"_<PAD>\"\n",
        "PAD_CHAR = \"_PAD_CHAR\"\n",
        "ROOT = \"_ROOT\"\n",
        "ROOT_POS = \"_ROOT_POS\"\n",
        "ROOT_TYPE = \"_<ROOT>\"\n",
        "ROOT_CHAR = \"_ROOT_CHAR\"\n",
        "END = \"_END\"\n",
        "END_POS = \"_END_POS\"\n",
        "END_TYPE = \"_<END>\"\n",
        "END_CHAR = \"_END_CHAR\"\n",
        "\n",
        "def process_corpus(corpus, until = None):\n",
        "    global word2idx, tag2idx, char2idx, word_idx, tag_idx, char_idx\n",
        "    sentences, words, depends, labels, pos, chars = [], [], [], [], [], []\n",
        "    temp_sentence, temp_word, temp_depend, temp_label, temp_pos = [], [], [], [], []\n",
        "    first_time = True\n",
        "    for sentence in corpus:\n",
        "        try:\n",
        "            if len(sentence):\n",
        "                if sentence[0] == '#':\n",
        "                    continue\n",
        "                if first_time:\n",
        "                    print(sentence)\n",
        "                    first_time = False\n",
        "                sentence = sentence.split('\\t')\n",
        "                for c in sentence[1]:\n",
        "                    if c not in char2idx:\n",
        "                        char2idx[c] = char_idx\n",
        "                        char_idx += 1\n",
        "                if sentence[7] not in tag2idx:\n",
        "                    tag2idx[sentence[7]] = tag_idx\n",
        "                    tag_idx += 1\n",
        "                sentence[1] = preprocessing(sentence[1])\n",
        "                if sentence[1] not in word2idx:\n",
        "                    word2idx[sentence[1]] = word_idx\n",
        "                    word_idx += 1\n",
        "                temp_word.append(word2idx[sentence[1]])\n",
        "                temp_depend.append(int(sentence[6]))\n",
        "                temp_label.append(tag2idx[sentence[7]])\n",
        "                temp_sentence.append(sentence[1])\n",
        "                temp_pos.append(sentence[3])\n",
        "            else:\n",
        "                if len(temp_sentence) < 2 or len(temp_word) != len(temp_label):\n",
        "                    temp_word = []\n",
        "                    temp_depend = []\n",
        "                    temp_label = []\n",
        "                    temp_sentence = []\n",
        "                    temp_pos = []\n",
        "                    continue\n",
        "                words.append(temp_word)\n",
        "                depends.append(temp_depend)\n",
        "                labels.append(temp_label)\n",
        "                sentences.append( temp_sentence)\n",
        "                pos.append(temp_pos)\n",
        "                char_ = [[char2idx['_ROOT']]]\n",
        "                for w in temp_sentence:\n",
        "                    if w in char2idx:\n",
        "                        char_.append([char2idx[w]])\n",
        "                    else:\n",
        "                        char_.append([char2idx[c] for c in w])\n",
        "                chars.append(char_)\n",
        "                temp_word = []\n",
        "                temp_depend = []\n",
        "                temp_label = []\n",
        "                temp_sentence = []\n",
        "                temp_pos = []\n",
        "        except Exception as e:\n",
        "            print(e, sentence)\n",
        "    return sentences[:-1], words[:-1], depends[:-1], labels[:-1], pos[:-1], chars[:-1]"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "trusted": true,
        "id": "aLFEmcKPluIV",
        "colab_type": "code",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 68
        },
        "outputId": "8e31626b-91e1-4951-96c2-01341e18ddea"
      },
      "source": [
        "with open('en_ewt-ud-dev.conllu') as fopen:\n",
        "    dev = fopen.read().split('\\n')\n",
        "\n",
        "sentences_dev, words_dev, depends_dev, labels_dev, _, _ = process_corpus(dev)"
      ],
      "execution_count": 6,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "1\tFrom\tfrom\tADP\tIN\t_\t3\tcase\t3:case\t_\n",
            "invalid literal for int() with base 10: '_' ['10.1', 'has', 'have', 'VERB', 'VBZ', '_', '_', '_', '8:parataxis', 'CopyOf=-1']\n",
            "invalid literal for int() with base 10: '_' ['21.1', 'has', 'have', 'VERB', 'VBZ', '_', '_', '_', '16:conj:and', 'CopyOf=-1']\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "trusted": true,
        "id": "AHD5Kgh_luIZ",
        "colab_type": "code",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 71
        },
        "outputId": "dc86ed43-5ce2-4747-dc6a-76e30e9ab2c4"
      },
      "source": [
        "with open('en_ewt-ud-test.conllu') as fopen:\n",
        "    test = fopen.read().split('\\n')\n",
        "\n",
        "sentences_test, words_test, depends_test, labels_test, _, _ = process_corpus(test)\n",
        "sentences_test.extend(sentences_dev)\n",
        "words_test.extend(words_dev)\n",
        "depends_test.extend(depends_dev)\n",
        "labels_test.extend(labels_dev)"
      ],
      "execution_count": 7,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "1\tWhat\twhat\tPRON\tWP\tPronType=Int\t0\troot\t0:root\t_\n",
            "invalid literal for int() with base 10: '_' ['24.1', 'left', 'left', 'VERB', 'VBN', 'Tense=Past|VerbForm=Part', '_', '_', '6:parataxis', 'CopyOf=6']\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "trusted": true,
        "id": "n39ztGEXluIe",
        "colab_type": "code",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 445
        },
        "outputId": "fd316d0f-b840-4ce3-fefe-edb15017dc93"
      },
      "source": [
        "with open('en_ewt-ud-train.conllu') as fopen:\n",
        "    train = fopen.read().split('\\n')\n",
        "\n",
        "sentences_train, words_train, depends_train, labels_train, _, _ = process_corpus(train)"
      ],
      "execution_count": 8,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "1\tAl\tAl\tPROPN\tNNP\tNumber=Sing\t0\troot\t0:root\tSpaceAfter=No\n",
            "invalid literal for int() with base 10: '_' ['8.1', 'reported', 'report', 'VERB', 'VBN', 'Tense=Past|VerbForm=Part|Voice=Pass', '_', '_', '5:conj:and', 'CopyOf=5']\n",
            "invalid literal for int() with base 10: '_' ['22.1', 'used', 'use', 'VERB', 'VBN', 'Tense=Past|VerbForm=Part', '_', '_', '13:advcl:with|17:conj:and', 'CopyOf=17']\n",
            "invalid literal for int() with base 10: '_' ['22.1', 'used', 'use', 'VERB', 'VBN', 'Tense=Past|VerbForm=Part', '_', '_', '13:advcl:with|17:conj:and', 'CopyOf=17']\n",
            "invalid literal for int() with base 10: '_' ['11.1', 'called', 'call', 'VERB', 'VBN', 'Tense=Past|VerbForm=Part|Voice=Pass', '_', '_', '3:conj:and', 'CopyOf=3']\n",
            "invalid literal for int() with base 10: '_' ['14.1', 'is', 'be', 'VERB', 'VBZ', '_', '_', '_', '1:conj:and', 'CopyOf=1']\n",
            "invalid literal for int() with base 10: '_' ['20.1', 'reflect', 'reflect', 'VERB', 'VBP', 'Mood=Ind|Tense=Pres|VerbForm=Fin', '_', '_', '7:acl:relcl|9:conj', 'CopyOf=9']\n",
            "invalid literal for int() with base 10: '_' ['21.1', 'recruited', 'recruit', 'VERB', 'VBD', 'Mood=Ind|Tense=Past|VerbForm=Fin', '_', '_', '9:conj:and', 'CopyOf=9']\n",
            "invalid literal for int() with base 10: '_' ['9.1', 'wish', 'wish', 'VERB', 'VBP', 'Mood=Ind|Tense=Pres|VerbForm=Fin', '_', '_', '2:conj:and', 'CopyOf=2']\n",
            "invalid literal for int() with base 10: '_' ['38.1', 'supplied', 'supply', 'VERB', 'VBN', 'Tense=Past|VerbForm=Part|Voice=Pass', '_', '_', '16:conj:and', 'CopyOf=16']\n",
            "invalid literal for int() with base 10: '_' ['18.1', 'keep', 'keep', 'VERB', 'VB', 'Mood=Imp|VerbForm=Fin', '_', '_', '14:conj:and', 'CopyOf=14']\n",
            "invalid literal for int() with base 10: '_' ['21.1', 'keep', 'keep', 'VERB', 'VB', 'Mood=Imp|VerbForm=Fin', '_', '_', '14:conj:and', 'CopyOf=14']\n",
            "invalid literal for int() with base 10: '_' ['18.1', 'mean', 'mean', 'VERB', 'VB', 'VerbForm=Inf', '_', '_', '8:conj', 'CopyOf=8']\n",
            "invalid literal for int() with base 10: '_' ['30.1', 'play', 'play', 'VERB', 'VBP', 'Mood=Ind|Tense=Pres|VerbForm=Fin', '_', '_', '18:acl:relcl|27:conj:but', 'CopyOf=27']\n",
            "invalid literal for int() with base 10: '_' ['22.1', 'have', 'have', 'VERB', 'VBP', 'Mood=Ind|Tense=Pres|VerbForm=Fin', '_', '_', '17:conj', 'CopyOf=17']\n",
            "invalid literal for int() with base 10: '_' ['27.1', 'have', 'have', 'VERB', 'VBP', 'Mood=Ind|Tense=Pres|VerbForm=Fin', '_', '_', '17:conj', 'CopyOf=17']\n",
            "invalid literal for int() with base 10: '_' ['49.1', 'helped', 'help', 'VERB', 'VBD', '_', '_', '_', '38:conj:but', 'CopyOf=38']\n",
            "invalid literal for int() with base 10: '_' ['7.1', 'found', 'find', 'VERB', 'VBD', 'Mood=Ind|Tense=Past|VerbForm=Fin', '_', '_', '3:conj', 'CopyOf=3']\n",
            "invalid literal for int() with base 10: '_' ['10.1', 'excited', 'excited', 'ADJ', 'JJ', 'Degree=Pos', '_', '_', '4:advcl', 'CopyOf=4']\n",
            "invalid literal for int() with base 10: '_' ['15.1', \"'s\", 'be', 'VERB', 'VBZ', '_', '_', '_', '2:conj:and', 'CopyOf=2']\n",
            "invalid literal for int() with base 10: '_' ['25.1', 'took', 'take', 'VERB', 'VBD', 'Mood=Ind|Tense=Past|VerbForm=Fin', '_', '_', '17:conj:and', 'CopyOf=17']\n",
            "invalid literal for int() with base 10: '_' ['10.1', 'loss', 'lose', 'VERB', 'VBD', 'Mood=Ind|Tense=Past|VerbForm=Fin', '_', '_', '3:conj:and', 'CopyOf=3']\n",
            "invalid literal for int() with base 10: '_' ['11.1', 'leave', 'leave', 'VERB', 'VB', 'VerbForm=Inf', '_', '_', '7:parataxis', 'CopyOf=7']\n",
            "invalid literal for int() with base 10: '_' ['24.1', 'charge', 'charge', 'VERB', 'VBP', 'Mood=Ind|Tense=Pres|VerbForm=Fin', '_', '_', '16:conj:and', 'CopyOf=16']\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "trusted": true,
        "id": "RZ8MwuF9luIo",
        "colab_type": "code",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 34
        },
        "outputId": "28460c3d-86bf-4e15-ef4b-2217734596a2"
      },
      "source": [
        "len(sentences_train), len(sentences_test)"
      ],
      "execution_count": 9,
      "outputs": [
        {
          "output_type": "execute_result",
          "data": {
            "text/plain": [
              "(12000, 3824)"
            ]
          },
          "metadata": {
            "tags": []
          },
          "execution_count": 9
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "trusted": true,
        "id": "Z7oKPBiMluIx",
        "colab_type": "code",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 34
        },
        "outputId": "09b7af1e-8ad9-4ede-eb27-720e891dfa5a"
      },
      "source": [
        "idx2word = {v:k for k, v in word2idx.items()}\n",
        "idx2tag = {v:k for k, v in tag2idx.items()}\n",
        "len(idx2word)"
      ],
      "execution_count": 10,
      "outputs": [
        {
          "output_type": "execute_result",
          "data": {
            "text/plain": [
              "21974"
            ]
          },
          "metadata": {
            "tags": []
          },
          "execution_count": 10
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "trusted": true,
        "id": "EikVfMyQluI2",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "def generate_char_seq(batch, UNK = 2):\n",
        "    maxlen_c = max([len(k) for k in batch])\n",
        "    x = [[len(i) for i in k] for k in batch]\n",
        "    maxlen = max([j for i in x for j in i])\n",
        "    temp = np.zeros((len(batch),maxlen_c,maxlen),dtype=np.int32)\n",
        "    for i in range(len(batch)):\n",
        "        for k in range(len(batch[i])):\n",
        "            for no, c in enumerate(batch[i][k]):\n",
        "                temp[i,k,-1-no] = char2idx.get(c, UNK)\n",
        "    return temp"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "trusted": true,
        "id": "izRVCDaNluI5",
        "colab_type": "code",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 34
        },
        "outputId": "bd93b03a-0a4b-4eb7-ed14-44876e33ca0d"
      },
      "source": [
        "generate_char_seq(sentences_train[:5]).shape"
      ],
      "execution_count": 12,
      "outputs": [
        {
          "output_type": "execute_result",
          "data": {
            "text/plain": [
              "(5, 36, 11)"
            ]
          },
          "metadata": {
            "tags": []
          },
          "execution_count": 12
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "trusted": true,
        "id": "gS8Wlel5luJD",
        "colab_type": "code",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 34
        },
        "outputId": "fb6df063-31c4-45e4-e590-8220685d4911"
      },
      "source": [
        "pad_sequences(words_train[:5],padding='post').shape"
      ],
      "execution_count": 13,
      "outputs": [
        {
          "output_type": "execute_result",
          "data": {
            "text/plain": [
              "(5, 36)"
            ]
          },
          "metadata": {
            "tags": []
          },
          "execution_count": 13
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "trusted": true,
        "id": "2EKNPE4mluJH",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "train_X = words_train\n",
        "train_Y = labels_train\n",
        "train_depends = depends_train\n",
        "train_char = sentences_train\n",
        "\n",
        "test_X = words_test\n",
        "test_Y = labels_test\n",
        "test_depends = depends_test\n",
        "test_char = sentences_test"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "trusted": true,
        "id": "IechxNL3luJW",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "class BiAAttention:\n",
        "    def __init__(self, input_size_encoder, input_size_decoder, num_labels):\n",
        "        self.input_size_encoder = input_size_encoder\n",
        "        self.input_size_decoder = input_size_decoder\n",
        "        self.num_labels = num_labels\n",
        "        \n",
        "        self.W_d = tf.get_variable(\"W_d\", shape=[self.num_labels, self.input_size_decoder],\n",
        "           initializer=tf.contrib.layers.xavier_initializer())\n",
        "        self.W_e = tf.get_variable(\"W_e\", shape=[self.num_labels, self.input_size_encoder],\n",
        "           initializer=tf.contrib.layers.xavier_initializer())\n",
        "        self.U = tf.get_variable(\"U\", shape=[self.num_labels, self.input_size_decoder, self.input_size_encoder],\n",
        "           initializer=tf.contrib.layers.xavier_initializer())\n",
        "        \n",
        "    def forward(self, input_d, input_e, mask_d=None, mask_e=None):\n",
        "        batch = tf.shape(input_d)[0]\n",
        "        length_decoder = tf.shape(input_d)[1]\n",
        "        length_encoder = tf.shape(input_e)[1]\n",
        "        out_d = tf.expand_dims(tf.matmul(self.W_d, tf.transpose(input_d, [0, 2, 1])), 3)\n",
        "        out_e = tf.expand_dims(tf.matmul(self.W_e, tf.transpose(input_e, [0, 2, 1])), 2)\n",
        "        output = tf.matmul(tf.expand_dims(input_d, 1), self.U)\n",
        "        output = tf.matmul(output, tf.transpose(tf.expand_dims(input_e, 1), [0, 1, 3, 2]))\n",
        "        \n",
        "        output = output + out_d + out_e\n",
        "        \n",
        "        if mask_d is not None:\n",
        "            d = tf.expand_dims(tf.expand_dims(mask_d, 1), 3)\n",
        "            e = tf.expand_dims(tf.expand_dims(mask_e, 1), 2)\n",
        "            output = output * d * e\n",
        "            \n",
        "        return output\n",
        "\n",
        "class Model:\n",
        "    def __init__(\n",
        "        self,\n",
        "        dim_word,\n",
        "        dim_char,\n",
        "        dropout,\n",
        "        learning_rate,\n",
        "        hidden_size_char,\n",
        "        hidden_size_word,\n",
        "        num_layers\n",
        "    ):\n",
        "        def cells(size, reuse = False):\n",
        "            return tf.contrib.rnn.DropoutWrapper(\n",
        "                tf.nn.rnn_cell.LSTMCell(\n",
        "                    size,\n",
        "                    initializer = tf.orthogonal_initializer(),\n",
        "                    reuse = reuse,\n",
        "                ),\n",
        "                output_keep_prob = dropout,\n",
        "            )\n",
        "        \n",
        "        def bahdanau(embedded, size):\n",
        "            attention_mechanism = tf.contrib.seq2seq.BahdanauAttention(\n",
        "                num_units = hidden_size_word, memory = embedded\n",
        "            )\n",
        "            return tf.contrib.seq2seq.AttentionWrapper(\n",
        "                cell = cells(hidden_size_word),\n",
        "                attention_mechanism = attention_mechanism,\n",
        "                attention_layer_size = hidden_size_word,\n",
        "            )\n",
        "        \n",
        "        self.word_ids = tf.placeholder(tf.int32, shape = [None, None])\n",
        "        self.char_ids = tf.placeholder(tf.int32, shape = [None, None, None])\n",
        "        self.labels = tf.placeholder(tf.int32, shape = [None, None])\n",
        "        self.depends = tf.placeholder(tf.int32, shape = [None, None])\n",
        "        self.maxlen = tf.shape(self.word_ids)[1]\n",
        "        self.lengths = tf.count_nonzero(self.word_ids, 1)\n",
        "        self.mask = tf.math.not_equal(self.word_ids, 0)\n",
        "        float_mask = tf.cast(self.mask, tf.float32)\n",
        "        \n",
        "        self.arc_h = tf.layers.Dense(hidden_size_word)\n",
        "        self.arc_c = tf.layers.Dense(hidden_size_word)\n",
        "        self.attention = BiAAttention(hidden_size_word, hidden_size_word, 1)\n",
        "\n",
        "        self.word_embeddings = tf.Variable(\n",
        "            tf.truncated_normal(\n",
        "                [len(word2idx), dim_word], stddev = 1.0 / np.sqrt(dim_word)\n",
        "            )\n",
        "        )\n",
        "        self.char_embeddings = tf.Variable(\n",
        "            tf.truncated_normal(\n",
        "                [len(char2idx), dim_char], stddev = 1.0 / np.sqrt(dim_char)\n",
        "            )\n",
        "        )\n",
        "\n",
        "        word_embedded = tf.nn.embedding_lookup(\n",
        "            self.word_embeddings, self.word_ids\n",
        "        )\n",
        "        char_embedded = tf.nn.embedding_lookup(\n",
        "            self.char_embeddings, self.char_ids\n",
        "        )\n",
        "        s = tf.shape(char_embedded)\n",
        "        char_embedded = tf.reshape(\n",
        "            char_embedded, shape = [s[0] * s[1], s[-2], dim_char]\n",
        "        )\n",
        "\n",
        "        for n in range(num_layers):\n",
        "            (out_fw, out_bw), (\n",
        "                state_fw,\n",
        "                state_bw,\n",
        "            ) = tf.nn.bidirectional_dynamic_rnn(\n",
        "                cell_fw = cells(hidden_size_char),\n",
        "                cell_bw = cells(hidden_size_char),\n",
        "                inputs = char_embedded,\n",
        "                dtype = tf.float32,\n",
        "                scope = 'bidirectional_rnn_char_%d' % (n),\n",
        "            )\n",
        "            char_embedded = tf.concat((out_fw, out_bw), 2)\n",
        "        output = tf.reshape(\n",
        "            char_embedded[:, -1], shape = [s[0], s[1], 2 * hidden_size_char]\n",
        "        )\n",
        "        word_embedded = tf.concat([word_embedded, output], axis = -1)\n",
        "\n",
        "        for n in range(num_layers):\n",
        "            (out_fw, out_bw), (\n",
        "                state_fw,\n",
        "                state_bw,\n",
        "            ) = tf.nn.bidirectional_dynamic_rnn(\n",
        "                cell_fw = bahdanau(word_embedded, hidden_size_word),\n",
        "                cell_bw = bahdanau(word_embedded, hidden_size_word),\n",
        "                inputs = word_embedded,\n",
        "                dtype = tf.float32,\n",
        "                scope = 'bidirectional_rnn_word_%d' % (n),\n",
        "            )\n",
        "            word_embedded = tf.concat((out_fw, out_bw), 2)\n",
        "\n",
        "        logits = tf.layers.dense(word_embedded, len(idx2tag))\n",
        "        log_likelihood, transition_params = tf.contrib.crf.crf_log_likelihood(\n",
        "            logits, self.labels, self.lengths\n",
        "        )\n",
        "        arc_h = tf.nn.elu(self.arc_h(word_embedded))\n",
        "        arc_c = tf.nn.elu(self.arc_c(word_embedded))\n",
        "        out_arc = tf.squeeze(self.attention.forward(arc_h, arc_h, mask_d=float_mask, mask_e=float_mask), axis = 1)\n",
        "        \n",
        "        batch = tf.shape(out_arc)[0]\n",
        "        batch_index = tf.range(0, batch)\n",
        "        max_len = tf.shape(out_arc)[1]\n",
        "        sec_max_len = tf.shape(out_arc)[2]\n",
        "        \n",
        "        minus_inf = -1e8\n",
        "        minus_mask = (1 - float_mask) * minus_inf\n",
        "        out_arc = out_arc + tf.expand_dims(minus_mask, axis = 2) + tf.expand_dims(minus_mask, axis = 1)\n",
        "        loss_arc = tf.nn.log_softmax(out_arc, dim=1)\n",
        "        loss_arc = loss_arc * tf.expand_dims(float_mask, axis = 2) * tf.expand_dims(float_mask, axis = 1)\n",
        "        num = tf.reduce_sum(float_mask) - tf.cast(batch, tf.float32)\n",
        "        \n",
        "        child_index = tf.tile(tf.expand_dims(tf.range(0, max_len), 1), [1, batch])\n",
        "        t = tf.transpose(self.depends)\n",
        "        broadcasted = tf.broadcast_to(batch_index, tf.shape(t))\n",
        "        concatenated = tf.transpose(tf.concat([tf.expand_dims(broadcasted, axis = 0),\n",
        "                                               tf.expand_dims(t, axis = 0),\n",
        "                                               tf.expand_dims(child_index, axis = 0)], axis = 0))\n",
        "        loss_arc = tf.gather_nd(loss_arc, concatenated)\n",
        "        loss_arc = tf.transpose(loss_arc, [1, 0])[1:]\n",
        "        \n",
        "        loss_arc = tf.reduce_sum(-loss_arc) / num\n",
        "        \n",
        "        self.cost = tf.reduce_mean(-log_likelihood) + loss_arc\n",
        "        \n",
        "        self.optimizer = tf.train.AdamOptimizer(\n",
        "            learning_rate = learning_rate\n",
        "        ).minimize(self.cost)\n",
        "        \n",
        "        mask = tf.sequence_mask(self.lengths, maxlen = self.maxlen)\n",
        "        \n",
        "        self.tags_seq, _ = tf.contrib.crf.crf_decode(\n",
        "            logits, transition_params, self.lengths\n",
        "        )\n",
        "        \n",
        "        out_arc = out_arc + tf.linalg.diag(tf.fill([max_len], -np.inf))\n",
        "        minus_mask = tf.expand_dims(tf.cast(1.0 - float_mask, tf.bool), axis = 2)\n",
        "        minus_mask = tf.tile(minus_mask, [1, 1, sec_max_len])\n",
        "        out_arc = tf.where(minus_mask, tf.fill(tf.shape(out_arc), -np.inf), out_arc)\n",
        "        self.heads = tf.argmax(out_arc, axis = 1)\n",
        "        \n",
        "        self.prediction = tf.boolean_mask(self.tags_seq, mask)\n",
        "        mask_label = tf.boolean_mask(self.labels, mask)\n",
        "        correct_pred = tf.equal(self.prediction, mask_label)\n",
        "        correct_index = tf.cast(correct_pred, tf.float32)\n",
        "        self.accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\n",
        "        \n",
        "        self.prediction = tf.cast(tf.boolean_mask(self.heads, mask), tf.int32)\n",
        "        mask_label = tf.boolean_mask(self.depends, mask)\n",
        "        correct_pred = tf.equal(self.prediction, mask_label)\n",
        "        correct_index = tf.cast(correct_pred, tf.float32)\n",
        "        self.accuracy_depends = tf.reduce_mean(tf.cast(correct_pred, tf.float32))"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "trusted": true,
        "id": "ORr-2ouXluJl",
        "colab_type": "code",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 496
        },
        "outputId": "5c0c29e2-2502-49ce-f641-8973f436b29e"
      },
      "source": [
        "tf.reset_default_graph()\n",
        "sess = tf.InteractiveSession()\n",
        "\n",
        "dim_word = 128\n",
        "dim_char = 256\n",
        "dropout = 1.0\n",
        "learning_rate = 1e-3\n",
        "hidden_size_char = 128\n",
        "hidden_size_word = 128\n",
        "num_layers = 2\n",
        "\n",
        "model = Model(dim_word,dim_char,dropout,learning_rate,hidden_size_char,hidden_size_word,num_layers)\n",
        "sess.run(tf.global_variables_initializer())"
      ],
      "execution_count": 16,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/python/util/deprecation.py:507: calling count_nonzero (from tensorflow.python.ops.math_ops) with axis is deprecated and will be removed in a future version.\n",
            "Instructions for updating:\n",
            "reduction_indices is deprecated, use axis instead\n",
            "WARNING:tensorflow:From <ipython-input-15-5f080a23faa2>:48: LSTMCell.__init__ (from tensorflow.python.ops.rnn_cell_impl) is deprecated and will be removed in a future version.\n",
            "Instructions for updating:\n",
            "This class is equivalent as tf.keras.layers.LSTMCell, and will be replaced by that in Tensorflow 2.0.\n",
            "WARNING:tensorflow:From <ipython-input-15-5f080a23faa2>:107: bidirectional_dynamic_rnn (from tensorflow.python.ops.rnn) is deprecated and will be removed in a future version.\n",
            "Instructions for updating:\n",
            "Please use `keras.layers.Bidirectional(keras.layers.RNN(cell))`, which is equivalent to this API\n",
            "WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/rnn.py:464: dynamic_rnn (from tensorflow.python.ops.rnn) is deprecated and will be removed in a future version.\n",
            "Instructions for updating:\n",
            "Please use `keras.layers.RNN(cell)`, which is equivalent to this API\n",
            "WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/rnn_cell_impl.py:961: calling Zeros.__init__ (from tensorflow.python.ops.init_ops) with dtype is deprecated and will be removed in a future version.\n",
            "Instructions for updating:\n",
            "Call initializer instance with the dtype argument instead of passing it to the constructor\n",
            "WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/init_ops.py:1251: calling VarianceScaling.__init__ (from tensorflow.python.ops.init_ops) with dtype is deprecated and will be removed in a future version.\n",
            "Instructions for updating:\n",
            "Call initializer instance with the dtype argument instead of passing it to the constructor\n",
            "WARNING:tensorflow:From <ipython-input-15-5f080a23faa2>:128: dense (from tensorflow.python.layers.core) is deprecated and will be removed in a future version.\n",
            "Instructions for updating:\n",
            "Use keras.layers.dense instead.\n",
            "WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/contrib/crf/python/ops/crf.py:99: add_dispatch_support.<locals>.wrapper (from tensorflow.python.ops.array_ops) is deprecated and will be removed in a future version.\n",
            "Instructions for updating:\n",
            "Use tf.where in 2.0, which has the same broadcast rule as np.where\n",
            "WARNING:tensorflow:From <ipython-input-15-5f080a23faa2>:144: calling log_softmax (from tensorflow.python.ops.nn_ops) with dim is deprecated and will be removed in a future version.\n",
            "Instructions for updating:\n",
            "dim is deprecated, use axis instead\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "trusted": true,
        "id": "4zkpDRaDluJq",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "batch_x = train_X[:5]\n",
        "batch_x = pad_sequences(batch_x,padding='post')\n",
        "batch_char = train_char[:5]\n",
        "batch_char = generate_char_seq(batch_char)\n",
        "batch_y = train_Y[:5]\n",
        "batch_y = pad_sequences(batch_y,padding='post')\n",
        "batch_depends = train_depends[:5]\n",
        "batch_depends = pad_sequences(batch_depends,padding='post')"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "trusted": true,
        "id": "wL67WIkMluJz",
        "colab_type": "code",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 34
        },
        "outputId": "5a4c71b2-49a3-439c-ed41-be31555eefb3"
      },
      "source": [
        "sess.run([model.accuracy, model.accuracy_depends, model.cost],\n",
        "        feed_dict = {model.word_ids: batch_x,\n",
        "                model.char_ids: batch_char,\n",
        "                model.labels: batch_y,\n",
        "                model.depends: batch_depends})"
      ],
      "execution_count": 18,
      "outputs": [
        {
          "output_type": "execute_result",
          "data": {
            "text/plain": [
              "[0.0, 0.094827585, 95.5533]"
            ]
          },
          "metadata": {
            "tags": []
          },
          "execution_count": 18
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "trusted": true,
        "id": "I0lyT0z-luJ3",
        "colab_type": "code",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 1000
        },
        "outputId": "4bbc5a14-c8ac-4123-a27b-e8a4e4c852a1"
      },
      "source": [
        "from tqdm import tqdm\n",
        "\n",
        "batch_size = 32\n",
        "epoch = 15\n",
        "\n",
        "for e in range(epoch):\n",
        "    train_acc, train_loss = [], []\n",
        "    test_acc, test_loss = [], []\n",
        "    train_acc_depends, test_acc_depends = [], []\n",
        "    \n",
        "    pbar = tqdm(\n",
        "        range(0, len(train_X), batch_size), desc = 'train minibatch loop'\n",
        "    )\n",
        "    for i in pbar:\n",
        "        index = min(i + batch_size, len(train_X))\n",
        "        batch_x = train_X[i: index]\n",
        "        batch_x = pad_sequences(batch_x,padding='post')\n",
        "        batch_char = train_char[i: index]\n",
        "        batch_char = generate_char_seq(batch_char)\n",
        "        batch_y = train_Y[i: index]\n",
        "        batch_y = pad_sequences(batch_y,padding='post')\n",
        "        batch_depends = train_depends[i: index]\n",
        "        batch_depends = pad_sequences(batch_depends,padding='post')\n",
        "        \n",
        "        acc_depends, acc, cost, _ = sess.run(\n",
        "            [model.accuracy_depends, model.accuracy, model.cost, model.optimizer],\n",
        "            feed_dict = {\n",
        "                model.word_ids: batch_x,\n",
        "                model.char_ids: batch_char,\n",
        "                model.labels: batch_y,\n",
        "                model.depends: batch_depends\n",
        "            },\n",
        "        )\n",
        "        train_loss.append(cost)\n",
        "        train_acc.append(acc)\n",
        "        train_acc_depends.append(acc_depends)\n",
        "        pbar.set_postfix(cost = cost, accuracy = acc, accuracy_depends = acc_depends)\n",
        "        \n",
        "    pbar = tqdm(\n",
        "        range(0, len(test_X), batch_size), desc = 'test minibatch loop'\n",
        "    )\n",
        "    for i in pbar:\n",
        "        index = min(i + batch_size, len(test_X))\n",
        "        batch_x = test_X[i: index]\n",
        "        batch_x = pad_sequences(batch_x,padding='post')\n",
        "        batch_char = test_char[i: index]\n",
        "        batch_char = generate_char_seq(batch_char)\n",
        "        batch_y = test_Y[i: index]\n",
        "        batch_y = pad_sequences(batch_y,padding='post')\n",
        "        batch_depends = test_depends[i: index]\n",
        "        batch_depends = pad_sequences(batch_depends,padding='post')\n",
        "        \n",
        "        acc_depends, acc, cost = sess.run(\n",
        "            [model.accuracy_depends, model.accuracy, model.cost],\n",
        "            feed_dict = {\n",
        "                model.word_ids: batch_x,\n",
        "                model.char_ids: batch_char,\n",
        "                model.labels: batch_y,\n",
        "                model.depends: batch_depends\n",
        "            },\n",
        "        )\n",
        "        test_loss.append(cost)\n",
        "        test_acc.append(acc)\n",
        "        test_acc_depends.append(acc_depends)\n",
        "        pbar.set_postfix(cost = cost, accuracy = acc, accuracy_depends = acc_depends)\n",
        "    \n",
        "    \n",
        "    print(\n",
        "    'epoch: %d, training loss: %f, training acc: %f, training depends: %f, valid loss: %f, valid acc: %f, valid depends: %f\\n'\n",
        "    % (e, np.mean(train_loss), \n",
        "       np.mean(train_acc), \n",
        "       np.mean(train_acc_depends), \n",
        "       np.mean(test_loss), \n",
        "       np.mean(test_acc), \n",
        "       np.mean(test_acc_depends)\n",
        "    ))\n",
        "        "
      ],
      "execution_count": 19,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "train minibatch loop: 100%|██████████| 375/375 [02:50<00:00,  1.73it/s, accuracy=0.803, accuracy_depends=0.559, cost=16.9]\n",
            "test minibatch loop: 100%|██████████| 120/120 [00:21<00:00,  6.03it/s, accuracy=0.862, accuracy_depends=0.636, cost=10.2]\n",
            "train minibatch loop:   0%|          | 0/375 [00:00<?, ?it/s]"
          ],
          "name": "stderr"
        },
        {
          "output_type": "stream",
          "text": [
            "epoch: 0, training loss: 32.401157, training acc: 0.499893, training depends: 0.333395, valid loss: 12.616477, valid acc: 0.752286, valid depends: 0.550964\n",
            "\n"
          ],
          "name": "stdout"
        },
        {
          "output_type": "stream",
          "text": [
            "train minibatch loop: 100%|██████████| 375/375 [02:46<00:00,  1.74it/s, accuracy=0.863, accuracy_depends=0.719, cost=10.5]\n",
            "test minibatch loop: 100%|██████████| 120/120 [00:21<00:00,  6.09it/s, accuracy=0.891, accuracy_depends=0.704, cost=7.06]\n",
            "train minibatch loop:   0%|          | 0/375 [00:00<?, ?it/s]"
          ],
          "name": "stderr"
        },
        {
          "output_type": "stream",
          "text": [
            "epoch: 1, training loss: 11.654610, training acc: 0.817275, training depends: 0.613238, valid loss: 8.984379, valid acc: 0.822647, valid depends: 0.640479\n",
            "\n"
          ],
          "name": "stdout"
        },
        {
          "output_type": "stream",
          "text": [
            "train minibatch loop: 100%|██████████| 375/375 [02:49<00:00,  1.71it/s, accuracy=0.903, accuracy_depends=0.752, cost=8.08]\n",
            "test minibatch loop: 100%|██████████| 120/120 [00:21<00:00,  5.75it/s, accuracy=0.899, accuracy_depends=0.733, cost=6.17]\n",
            "train minibatch loop:   0%|          | 0/375 [00:00<?, ?it/s]"
          ],
          "name": "stderr"
        },
        {
          "output_type": "stream",
          "text": [
            "epoch: 2, training loss: 7.874226, training acc: 0.877587, training depends: 0.682966, valid loss: 8.313724, valid acc: 0.838643, valid depends: 0.667915\n",
            "\n"
          ],
          "name": "stdout"
        },
        {
          "output_type": "stream",
          "text": [
            "train minibatch loop: 100%|██████████| 375/375 [02:46<00:00,  1.71it/s, accuracy=0.92, accuracy_depends=0.768, cost=6.84]\n",
            "test minibatch loop: 100%|██████████| 120/120 [00:21<00:00,  6.06it/s, accuracy=0.903, accuracy_depends=0.777, cost=5.96]\n",
            "train minibatch loop:   0%|          | 0/375 [00:00<?, ?it/s]"
          ],
          "name": "stderr"
        },
        {
          "output_type": "stream",
          "text": [
            "epoch: 3, training loss: 6.043023, training acc: 0.908013, training depends: 0.717450, valid loss: 8.258953, valid acc: 0.842917, valid depends: 0.681678\n",
            "\n"
          ],
          "name": "stdout"
        },
        {
          "output_type": "stream",
          "text": [
            "train minibatch loop: 100%|██████████| 375/375 [02:44<00:00,  1.75it/s, accuracy=0.939, accuracy_depends=0.799, cost=5.56]\n",
            "test minibatch loop: 100%|██████████| 120/120 [00:21<00:00,  6.17it/s, accuracy=0.919, accuracy_depends=0.798, cost=5.71]\n",
            "train minibatch loop:   0%|          | 0/375 [00:00<?, ?it/s]"
          ],
          "name": "stderr"
        },
        {
          "output_type": "stream",
          "text": [
            "epoch: 4, training loss: 4.851332, training acc: 0.926946, training depends: 0.738619, valid loss: 8.510642, valid acc: 0.848766, valid depends: 0.691324\n",
            "\n"
          ],
          "name": "stdout"
        },
        {
          "output_type": "stream",
          "text": [
            "train minibatch loop: 100%|██████████| 375/375 [02:44<00:00,  1.75it/s, accuracy=0.947, accuracy_depends=0.815, cost=4.41]\n",
            "test minibatch loop: 100%|██████████| 120/120 [00:21<00:00,  6.02it/s, accuracy=0.919, accuracy_depends=0.81, cost=5.32]\n",
            "train minibatch loop:   0%|          | 0/375 [00:00<?, ?it/s]"
          ],
          "name": "stderr"
        },
        {
          "output_type": "stream",
          "text": [
            "epoch: 5, training loss: 3.947296, training acc: 0.941130, training depends: 0.751846, valid loss: 8.953300, valid acc: 0.848426, valid depends: 0.688125\n",
            "\n"
          ],
          "name": "stdout"
        },
        {
          "output_type": "stream",
          "text": [
            "train minibatch loop: 100%|██████████| 375/375 [02:44<00:00,  1.77it/s, accuracy=0.955, accuracy_depends=0.822, cost=3.45]\n",
            "test minibatch loop: 100%|██████████| 120/120 [00:21<00:00,  6.22it/s, accuracy=0.911, accuracy_depends=0.798, cost=6.26]\n",
            "train minibatch loop:   0%|          | 0/375 [00:00<?, ?it/s]"
          ],
          "name": "stderr"
        },
        {
          "output_type": "stream",
          "text": [
            "epoch: 6, training loss: 3.262760, training acc: 0.952513, training depends: 0.763379, valid loss: 9.313190, valid acc: 0.850958, valid depends: 0.693280\n",
            "\n"
          ],
          "name": "stdout"
        },
        {
          "output_type": "stream",
          "text": [
            "train minibatch loop: 100%|██████████| 375/375 [02:43<00:00,  1.76it/s, accuracy=0.967, accuracy_depends=0.827, cost=3.08]\n",
            "test minibatch loop: 100%|██████████| 120/120 [00:21<00:00,  5.85it/s, accuracy=0.907, accuracy_depends=0.789, cost=6.98]\n",
            "train minibatch loop:   0%|          | 0/375 [00:00<?, ?it/s]"
          ],
          "name": "stderr"
        },
        {
          "output_type": "stream",
          "text": [
            "epoch: 7, training loss: 2.569048, training acc: 0.963230, training depends: 0.775451, valid loss: 9.958177, valid acc: 0.849756, valid depends: 0.694797\n",
            "\n"
          ],
          "name": "stdout"
        },
        {
          "output_type": "stream",
          "text": [
            "train minibatch loop: 100%|██████████| 375/375 [02:44<00:00,  1.76it/s, accuracy=0.973, accuracy_depends=0.834, cost=2.14]\n",
            "test minibatch loop: 100%|██████████| 120/120 [00:21<00:00,  6.07it/s, accuracy=0.919, accuracy_depends=0.789, cost=6.42]\n",
            "train minibatch loop:   0%|          | 0/375 [00:00<?, ?it/s]"
          ],
          "name": "stderr"
        },
        {
          "output_type": "stream",
          "text": [
            "epoch: 8, training loss: 1.959417, training acc: 0.972763, training depends: 0.788287, valid loss: 10.350948, valid acc: 0.852834, valid depends: 0.695817\n",
            "\n"
          ],
          "name": "stdout"
        },
        {
          "output_type": "stream",
          "text": [
            "train minibatch loop: 100%|██████████| 375/375 [02:43<00:00,  1.79it/s, accuracy=0.971, accuracy_depends=0.836, cost=2.1]\n",
            "test minibatch loop: 100%|██████████| 120/120 [00:21<00:00,  6.14it/s, accuracy=0.879, accuracy_depends=0.773, cost=8.35]\n",
            "train minibatch loop:   0%|          | 0/375 [00:00<?, ?it/s]"
          ],
          "name": "stderr"
        },
        {
          "output_type": "stream",
          "text": [
            "epoch: 9, training loss: 1.574149, training acc: 0.979302, training depends: 0.795839, valid loss: 11.177638, valid acc: 0.852465, valid depends: 0.702183\n",
            "\n"
          ],
          "name": "stdout"
        },
        {
          "output_type": "stream",
          "text": [
            "train minibatch loop: 100%|██████████| 375/375 [02:43<00:00,  1.75it/s, accuracy=0.987, accuracy_depends=0.834, cost=1.23]\n",
            "test minibatch loop: 100%|██████████| 120/120 [00:20<00:00,  6.26it/s, accuracy=0.915, accuracy_depends=0.794, cost=8.7]\n",
            "train minibatch loop:   0%|          | 0/375 [00:00<?, ?it/s]"
          ],
          "name": "stderr"
        },
        {
          "output_type": "stream",
          "text": [
            "epoch: 10, training loss: 1.237997, training acc: 0.984891, training depends: 0.804241, valid loss: 11.869824, valid acc: 0.848389, valid depends: 0.701258\n",
            "\n"
          ],
          "name": "stdout"
        },
        {
          "output_type": "stream",
          "text": [
            "train minibatch loop: 100%|██████████| 375/375 [02:43<00:00,  1.78it/s, accuracy=0.988, accuracy_depends=0.85, cost=1.12]\n",
            "test minibatch loop: 100%|██████████| 120/120 [00:21<00:00,  6.25it/s, accuracy=0.874, accuracy_depends=0.798, cost=8.92]\n",
            "train minibatch loop:   0%|          | 0/375 [00:00<?, ?it/s]"
          ],
          "name": "stderr"
        },
        {
          "output_type": "stream",
          "text": [
            "epoch: 11, training loss: 1.054887, training acc: 0.987574, training depends: 0.808066, valid loss: 11.984483, valid acc: 0.853984, valid depends: 0.705631\n",
            "\n"
          ],
          "name": "stdout"
        },
        {
          "output_type": "stream",
          "text": [
            "train minibatch loop: 100%|██████████| 375/375 [02:43<00:00,  1.76it/s, accuracy=0.992, accuracy_depends=0.846, cost=1.08]\n",
            "test minibatch loop: 100%|██████████| 120/120 [00:21<00:00,  5.93it/s, accuracy=0.915, accuracy_depends=0.789, cost=7.69]\n",
            "train minibatch loop:   0%|          | 0/375 [00:00<?, ?it/s]"
          ],
          "name": "stderr"
        },
        {
          "output_type": "stream",
          "text": [
            "epoch: 12, training loss: 0.890403, training acc: 0.990091, training depends: 0.816699, valid loss: 12.758488, valid acc: 0.852551, valid depends: 0.706672\n",
            "\n"
          ],
          "name": "stdout"
        },
        {
          "output_type": "stream",
          "text": [
            "train minibatch loop: 100%|██████████| 375/375 [02:43<00:00,  1.72it/s, accuracy=0.992, accuracy_depends=0.847, cost=0.864]\n",
            "test minibatch loop: 100%|██████████| 120/120 [00:20<00:00,  6.14it/s, accuracy=0.907, accuracy_depends=0.781, cost=8.05]\n",
            "train minibatch loop:   0%|          | 0/375 [00:00<?, ?it/s]"
          ],
          "name": "stderr"
        },
        {
          "output_type": "stream",
          "text": [
            "epoch: 13, training loss: 0.821644, training acc: 0.990760, training depends: 0.821848, valid loss: 12.964406, valid acc: 0.851949, valid depends: 0.706212\n",
            "\n"
          ],
          "name": "stdout"
        },
        {
          "output_type": "stream",
          "text": [
            "train minibatch loop: 100%|██████████| 375/375 [02:43<00:00,  1.72it/s, accuracy=0.995, accuracy_depends=0.852, cost=0.71]\n",
            "test minibatch loop: 100%|██████████| 120/120 [00:20<00:00,  6.39it/s, accuracy=0.895, accuracy_depends=0.789, cost=10]"
          ],
          "name": "stderr"
        },
        {
          "output_type": "stream",
          "text": [
            "epoch: 14, training loss: 0.734888, training acc: 0.992246, training depends: 0.824531, valid loss: 13.220814, valid acc: 0.851365, valid depends: 0.708206\n",
            "\n"
          ],
          "name": "stdout"
        },
        {
          "output_type": "stream",
          "text": [
            "\n"
          ],
          "name": "stderr"
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "trusted": true,
        "id": "OA5fV4xxluJ6",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "def evaluate(heads_pred, types_pred, heads, types, lengths,\n",
        "             symbolic_root=False, symbolic_end=False):\n",
        "    batch_size, _ = words.shape\n",
        "    ucorr = 0.\n",
        "    lcorr = 0.\n",
        "    total = 0.\n",
        "    ucomplete_match = 0.\n",
        "    lcomplete_match = 0.\n",
        "\n",
        "    corr_root = 0.\n",
        "    total_root = 0.\n",
        "    start = 1 if symbolic_root else 0\n",
        "    end = 1 if symbolic_end else 0\n",
        "    for i in range(batch_size):\n",
        "        ucm = 1.\n",
        "        lcm = 1.\n",
        "        for j in range(start, lengths[i] - end):\n",
        "\n",
        "            total += 1\n",
        "            if heads[i, j] == heads_pred[i, j]:\n",
        "                ucorr += 1\n",
        "                if types[i, j] == types_pred[i, j]:\n",
        "                    lcorr += 1\n",
        "                else:\n",
        "                    lcm = 0\n",
        "            else:\n",
        "                ucm = 0\n",
        "                lcm = 0\n",
        "\n",
        "            if heads[i, j] == 0:\n",
        "                total_root += 1\n",
        "                corr_root += 1 if heads_pred[i, j] == 0 else 0\n",
        "\n",
        "        ucomplete_match += ucm\n",
        "        lcomplete_match += lcm\n",
        "\n",
        "    return (ucorr, lcorr, total, ucomplete_match, lcomplete_match), \\\n",
        "           (corr_root, total_root), batch_size"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "trusted": true,
        "id": "9fBypTzSluKC",
        "colab_type": "code",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 170
        },
        "outputId": "2c26e01b-a6a4-4871-9f38-812df371c65f"
      },
      "source": [
        "tags_seq, heads = sess.run(\n",
        "    [model.tags_seq, model.heads],\n",
        "    feed_dict = {\n",
        "        model.word_ids: batch_x,\n",
        "        model.char_ids: batch_char\n",
        "    },\n",
        ")\n",
        "tags_seq[0], heads[0], batch_depends[0]"
      ],
      "execution_count": 21,
      "outputs": [
        {
          "output_type": "execute_result",
          "data": {
            "text/plain": [
              "(array([16,  6, 22, 26,  6, 18, 16,  5,  3, 13, 10, 11,  6, 12, 13, 10, 16,\n",
              "         7,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,\n",
              "         0], dtype=int32),\n",
              " array([ 2,  8,  5,  5,  2,  8,  8,  0, 11, 11,  8, 14, 14,  8, 16, 14, 14,\n",
              "         8,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,\n",
              "         0]),\n",
              " array([ 2,  8,  5,  5,  2,  8,  8,  0, 11, 11,  8, 14, 14,  8, 16, 14, 14,\n",
              "         8,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,\n",
              "         0], dtype=int32))"
            ]
          },
          "metadata": {
            "tags": []
          },
          "execution_count": 21
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "trusted": true,
        "id": "Afwz-4bvluKM",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "def evaluate(heads_pred, types_pred, heads, types, lengths,\n",
        "             symbolic_root=False, symbolic_end=False):\n",
        "    batch_size, _ = heads_pred.shape\n",
        "    ucorr = 0.\n",
        "    lcorr = 0.\n",
        "    total = 0.\n",
        "    ucomplete_match = 0.\n",
        "    lcomplete_match = 0.\n",
        "\n",
        "    corr_root = 0.\n",
        "    total_root = 0.\n",
        "    start = 1 if symbolic_root else 0\n",
        "    end = 1 if symbolic_end else 0\n",
        "    for i in range(batch_size):\n",
        "        ucm = 1.\n",
        "        lcm = 1.\n",
        "        for j in range(start, lengths[i] - end):\n",
        "\n",
        "            total += 1\n",
        "            if heads[i, j] == heads_pred[i, j]:\n",
        "                ucorr += 1\n",
        "                if types[i, j] == types_pred[i, j]:\n",
        "                    lcorr += 1\n",
        "                else:\n",
        "                    lcm = 0\n",
        "            else:\n",
        "                ucm = 0\n",
        "                lcm = 0\n",
        "\n",
        "            if heads[i, j] == 0:\n",
        "                total_root += 1\n",
        "                corr_root += 1 if heads_pred[i, j] == 0 else 0\n",
        "\n",
        "        ucomplete_match += ucm\n",
        "        lcomplete_match += lcm\n",
        "    \n",
        "    return ucorr / total, lcorr / total, corr_root / total_root"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "trusted": true,
        "id": "L_YC7rYLluKU",
        "colab_type": "code",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 34
        },
        "outputId": "cea2aa49-fec2-4adb-fdea-91f01f0fa237"
      },
      "source": [
        "arc_accuracy, type_accuracy, root_accuracy = evaluate(heads, tags_seq, batch_depends, batch_y, \n",
        "        np.count_nonzero(batch_x, axis = 1))\n",
        "arc_accuracy, type_accuracy, root_accuracy"
      ],
      "execution_count": 23,
      "outputs": [
        {
          "output_type": "execute_result",
          "data": {
            "text/plain": [
              "(0.7894736842105263, 0.7611336032388664, 0.875)"
            ]
          },
          "metadata": {
            "tags": []
          },
          "execution_count": 23
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "trusted": true,
        "id": "OM8uPNqFluKY",
        "colab_type": "code",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 34
        },
        "outputId": "dbc9b008-9109-45c1-db67-9e980743b303"
      },
      "source": [
        "arcs, types, roots = [], [], []\n",
        "\n",
        "pbar = tqdm(\n",
        "    range(0, len(test_X), batch_size), desc = 'test minibatch loop'\n",
        ")\n",
        "for i in pbar:\n",
        "    index = min(i + batch_size, len(test_X))\n",
        "    batch_x = test_X[i: index]\n",
        "    batch_x = pad_sequences(batch_x,padding='post')\n",
        "    batch_char = test_char[i: index]\n",
        "    batch_char = generate_char_seq(batch_char)\n",
        "    batch_y = test_Y[i: index]\n",
        "    batch_y = pad_sequences(batch_y,padding='post')\n",
        "    batch_depends = test_depends[i: index]\n",
        "    batch_depends = pad_sequences(batch_depends,padding='post')\n",
        "    \n",
        "    tags_seq, heads = sess.run(\n",
        "        [model.tags_seq, model.heads],\n",
        "        feed_dict = {\n",
        "            model.word_ids: batch_x,\n",
        "            model.char_ids: batch_char\n",
        "        },\n",
        "    )\n",
        "    \n",
        "    arc_accuracy, type_accuracy, root_accuracy = evaluate(heads, tags_seq, batch_depends, batch_y, \n",
        "            np.count_nonzero(batch_x, axis = 1))\n",
        "    pbar.set_postfix(arc_accuracy = arc_accuracy, type_accuracy = type_accuracy, \n",
        "                     root_accuracy = root_accuracy)\n",
        "    arcs.append(arc_accuracy)\n",
        "    types.append(type_accuracy)\n",
        "    roots.append(root_accuracy)"
      ],
      "execution_count": 24,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "test minibatch loop: 100%|██████████| 120/120 [00:19<00:00,  6.69it/s, arc_accuracy=0.789, root_accuracy=0.875, type_accuracy=0.761]\n"
          ],
          "name": "stderr"
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "trusted": true,
        "id": "ZKzFKsJXluKb",
        "colab_type": "code",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 68
        },
        "outputId": "045e56a7-2516-4275-c4e5-2f356791b1c8"
      },
      "source": [
        "print('arc accuracy:', np.mean(arcs))\n",
        "print('types accuracy:', np.mean(types))\n",
        "print('root accuracy:', np.mean(roots))"
      ],
      "execution_count": 25,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "arc accuracy: 0.7082063481789892\n",
            "types accuracy: 0.6533524914247569\n",
            "root accuracy: 0.6677083333333333\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "trusted": true,
        "id": "Ay_U7CirluKf",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        ""
      ],
      "execution_count": 0,
      "outputs": []
    }
  ]
}