{
  "nbformat": 4,
  "nbformat_minor": 0,
  "metadata": {
    "colab": {
      "name": "HarryPotterNLP.ipynb",
      "provenance": [],
      "authorship_tag": "ABX9TyPLj1azwPCZErZECKhmj203",
      "include_colab_link": true
    },
    "kernelspec": {
      "name": "python3",
      "display_name": "Python 3"
    },
    "language_info": {
      "name": "python"
    }
  },
  "cells": [
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "view-in-github",
        "colab_type": "text"
      },
      "source": [
        "<a href=\"https://colab.research.google.com/github/tomasonjo/blogs/blob/master/harry_potter/HarryPotterNLP.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "83XBTfrC3jss",
        "outputId": "e4e463b9-a26f-4954-fc5c-7b88fb87fdb6"
      },
      "source": [
        "# Install dependencies\n",
        "!pip install -U selenium neo4j spacy==2.3.2 neo4j\n",
        "\n",
        "# Setup SpaCy and NeuralCoref\n",
        "from google.colab import drive\n",
        "drive.mount('/content/gdrive')\n",
        "\n",
        "%rm neuralcoref\n",
        "!git clone https://github.com/huggingface/neuralcoref.git\n",
        "!pip install -U \n",
        "!python -m spacy download en_core_web_sm\n",
        "\n",
        "%cd neuralcoref\n",
        "\n",
        "!pip install -r requirements.txt\n",
        "!pip install -e ."
      ],
      "execution_count": 1,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "Requirement already up-to-date: selenium in /usr/local/lib/python3.7/dist-packages (3.141.0)\n",
            "Requirement already up-to-date: neo4j in /usr/local/lib/python3.7/dist-packages (4.3.3)\n",
            "Requirement already up-to-date: spacy==2.3.2 in /usr/local/lib/python3.7/dist-packages (2.3.2)\n",
            "Requirement already satisfied, skipping upgrade: urllib3 in /usr/local/lib/python3.7/dist-packages (from selenium) (1.24.3)\n",
            "Requirement already satisfied, skipping upgrade: pytz in /usr/local/lib/python3.7/dist-packages (from neo4j) (2018.9)\n",
            "Requirement already satisfied, skipping upgrade: blis<0.5.0,>=0.4.0 in /usr/local/lib/python3.7/dist-packages (from spacy==2.3.2) (0.4.1)\n",
            "Requirement already satisfied, skipping upgrade: catalogue<1.1.0,>=0.0.7 in /usr/local/lib/python3.7/dist-packages (from spacy==2.3.2) (1.0.0)\n",
            "Requirement already satisfied, skipping upgrade: tqdm<5.0.0,>=4.38.0 in /usr/local/lib/python3.7/dist-packages (from spacy==2.3.2) (4.41.1)\n",
            "Requirement already satisfied, skipping upgrade: srsly<1.1.0,>=1.0.2 in /usr/local/lib/python3.7/dist-packages (from spacy==2.3.2) (1.0.5)\n",
            "Requirement already satisfied, skipping upgrade: thinc==7.4.1 in /usr/local/lib/python3.7/dist-packages (from spacy==2.3.2) (7.4.1)\n",
            "Requirement already satisfied, skipping upgrade: numpy>=1.15.0 in /usr/local/lib/python3.7/dist-packages (from spacy==2.3.2) (1.19.5)\n",
            "Requirement already satisfied, skipping upgrade: wasabi<1.1.0,>=0.4.0 in /usr/local/lib/python3.7/dist-packages (from spacy==2.3.2) (0.8.2)\n",
            "Requirement already satisfied, skipping upgrade: requests<3.0.0,>=2.13.0 in /usr/local/lib/python3.7/dist-packages (from spacy==2.3.2) (2.23.0)\n",
            "Requirement already satisfied, skipping upgrade: plac<1.2.0,>=0.9.6 in /usr/local/lib/python3.7/dist-packages (from spacy==2.3.2) (1.1.3)\n",
            "Requirement already satisfied, skipping upgrade: murmurhash<1.1.0,>=0.28.0 in /usr/local/lib/python3.7/dist-packages (from spacy==2.3.2) (1.0.5)\n",
            "Requirement already satisfied, skipping upgrade: setuptools in /usr/local/lib/python3.7/dist-packages (from spacy==2.3.2) (57.0.0)\n",
            "Requirement already satisfied, skipping upgrade: preshed<3.1.0,>=3.0.2 in /usr/local/lib/python3.7/dist-packages (from spacy==2.3.2) (3.0.5)\n",
            "Requirement already satisfied, skipping upgrade: cymem<2.1.0,>=2.0.2 in /usr/local/lib/python3.7/dist-packages (from spacy==2.3.2) (2.0.5)\n",
            "Requirement already satisfied, skipping upgrade: importlib-metadata>=0.20; python_version < \"3.8\" in /usr/local/lib/python3.7/dist-packages (from catalogue<1.1.0,>=0.0.7->spacy==2.3.2) (4.6.0)\n",
            "Requirement already satisfied, skipping upgrade: idna<3,>=2.5 in /usr/local/lib/python3.7/dist-packages (from requests<3.0.0,>=2.13.0->spacy==2.3.2) (2.10)\n",
            "Requirement already satisfied, skipping upgrade: certifi>=2017.4.17 in /usr/local/lib/python3.7/dist-packages (from requests<3.0.0,>=2.13.0->spacy==2.3.2) (2021.5.30)\n",
            "Requirement already satisfied, skipping upgrade: chardet<4,>=3.0.2 in /usr/local/lib/python3.7/dist-packages (from requests<3.0.0,>=2.13.0->spacy==2.3.2) (3.0.4)\n",
            "Requirement already satisfied, skipping upgrade: typing-extensions>=3.6.4; python_version < \"3.8\" in /usr/local/lib/python3.7/dist-packages (from importlib-metadata>=0.20; python_version < \"3.8\"->catalogue<1.1.0,>=0.0.7->spacy==2.3.2) (3.7.4.3)\n",
            "Requirement already satisfied, skipping upgrade: zipp>=0.5 in /usr/local/lib/python3.7/dist-packages (from importlib-metadata>=0.20; python_version < \"3.8\"->catalogue<1.1.0,>=0.0.7->spacy==2.3.2) (3.4.1)\n",
            "Drive already mounted at /content/gdrive; to attempt to forcibly remount, call drive.mount(\"/content/gdrive\", force_remount=True).\n",
            "rm: cannot remove 'neuralcoref': Is a directory\n",
            "fatal: destination path 'neuralcoref' already exists and is not an empty directory.\n",
            "\u001b[31mERROR: You must give at least one requirement to install (see \"pip help install\")\u001b[0m\n",
            "Requirement already satisfied: en_core_web_sm==2.3.1 from https://github.com/explosion/spacy-models/releases/download/en_core_web_sm-2.3.1/en_core_web_sm-2.3.1.tar.gz#egg=en_core_web_sm==2.3.1 in /usr/local/lib/python3.7/dist-packages (2.3.1)\n",
            "Requirement already satisfied: spacy<2.4.0,>=2.3.0 in /usr/local/lib/python3.7/dist-packages (from en_core_web_sm==2.3.1) (2.3.2)\n",
            "Requirement already satisfied: cymem<2.1.0,>=2.0.2 in /usr/local/lib/python3.7/dist-packages (from spacy<2.4.0,>=2.3.0->en_core_web_sm==2.3.1) (2.0.5)\n",
            "Requirement already satisfied: setuptools in /usr/local/lib/python3.7/dist-packages (from spacy<2.4.0,>=2.3.0->en_core_web_sm==2.3.1) (57.0.0)\n",
            "Requirement already satisfied: wasabi<1.1.0,>=0.4.0 in /usr/local/lib/python3.7/dist-packages (from spacy<2.4.0,>=2.3.0->en_core_web_sm==2.3.1) (0.8.2)\n",
            "Requirement already satisfied: catalogue<1.1.0,>=0.0.7 in /usr/local/lib/python3.7/dist-packages (from spacy<2.4.0,>=2.3.0->en_core_web_sm==2.3.1) (1.0.0)\n",
            "Requirement already satisfied: plac<1.2.0,>=0.9.6 in /usr/local/lib/python3.7/dist-packages (from spacy<2.4.0,>=2.3.0->en_core_web_sm==2.3.1) (1.1.3)\n",
            "Requirement already satisfied: tqdm<5.0.0,>=4.38.0 in /usr/local/lib/python3.7/dist-packages (from spacy<2.4.0,>=2.3.0->en_core_web_sm==2.3.1) (4.41.1)\n",
            "Requirement already satisfied: murmurhash<1.1.0,>=0.28.0 in /usr/local/lib/python3.7/dist-packages (from spacy<2.4.0,>=2.3.0->en_core_web_sm==2.3.1) (1.0.5)\n",
            "Requirement already satisfied: preshed<3.1.0,>=3.0.2 in /usr/local/lib/python3.7/dist-packages (from spacy<2.4.0,>=2.3.0->en_core_web_sm==2.3.1) (3.0.5)\n",
            "Requirement already satisfied: blis<0.5.0,>=0.4.0 in /usr/local/lib/python3.7/dist-packages (from spacy<2.4.0,>=2.3.0->en_core_web_sm==2.3.1) (0.4.1)\n",
            "Requirement already satisfied: thinc==7.4.1 in /usr/local/lib/python3.7/dist-packages (from spacy<2.4.0,>=2.3.0->en_core_web_sm==2.3.1) (7.4.1)\n",
            "Requirement already satisfied: numpy>=1.15.0 in /usr/local/lib/python3.7/dist-packages (from spacy<2.4.0,>=2.3.0->en_core_web_sm==2.3.1) (1.19.5)\n",
            "Requirement already satisfied: requests<3.0.0,>=2.13.0 in /usr/local/lib/python3.7/dist-packages (from spacy<2.4.0,>=2.3.0->en_core_web_sm==2.3.1) (2.23.0)\n",
            "Requirement already satisfied: srsly<1.1.0,>=1.0.2 in /usr/local/lib/python3.7/dist-packages (from spacy<2.4.0,>=2.3.0->en_core_web_sm==2.3.1) (1.0.5)\n",
            "Requirement already satisfied: importlib-metadata>=0.20; python_version < \"3.8\" in /usr/local/lib/python3.7/dist-packages (from catalogue<1.1.0,>=0.0.7->spacy<2.4.0,>=2.3.0->en_core_web_sm==2.3.1) (4.6.0)\n",
            "Requirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /usr/local/lib/python3.7/dist-packages (from requests<3.0.0,>=2.13.0->spacy<2.4.0,>=2.3.0->en_core_web_sm==2.3.1) (1.24.3)\n",
            "Requirement already satisfied: idna<3,>=2.5 in /usr/local/lib/python3.7/dist-packages (from requests<3.0.0,>=2.13.0->spacy<2.4.0,>=2.3.0->en_core_web_sm==2.3.1) (2.10)\n",
            "Requirement already satisfied: chardet<4,>=3.0.2 in /usr/local/lib/python3.7/dist-packages (from requests<3.0.0,>=2.13.0->spacy<2.4.0,>=2.3.0->en_core_web_sm==2.3.1) (3.0.4)\n",
            "Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.7/dist-packages (from requests<3.0.0,>=2.13.0->spacy<2.4.0,>=2.3.0->en_core_web_sm==2.3.1) (2021.5.30)\n",
            "Requirement already satisfied: zipp>=0.5 in /usr/local/lib/python3.7/dist-packages (from importlib-metadata>=0.20; python_version < \"3.8\"->catalogue<1.1.0,>=0.0.7->spacy<2.4.0,>=2.3.0->en_core_web_sm==2.3.1) (3.4.1)\n",
            "Requirement already satisfied: typing-extensions>=3.6.4; python_version < \"3.8\" in /usr/local/lib/python3.7/dist-packages (from importlib-metadata>=0.20; python_version < \"3.8\"->catalogue<1.1.0,>=0.0.7->spacy<2.4.0,>=2.3.0->en_core_web_sm==2.3.1) (3.7.4.3)\n",
            "\u001b[38;5;2m✔ Download and installation successful\u001b[0m\n",
            "You can now load the model via spacy.load('en_core_web_sm')\n",
            "/content/neuralcoref\n",
            "Requirement already satisfied: spacy<3.0.0,>=2.1.0 in /usr/local/lib/python3.7/dist-packages (from -r requirements.txt (line 1)) (2.3.2)\n",
            "Requirement already satisfied: cython>=0.25 in /usr/local/lib/python3.7/dist-packages (from -r requirements.txt (line 2)) (0.29.23)\n",
            "Requirement already satisfied: pytest in /usr/local/lib/python3.7/dist-packages (from -r requirements.txt (line 3)) (3.6.4)\n",
            "Requirement already satisfied: catalogue<1.1.0,>=0.0.7 in /usr/local/lib/python3.7/dist-packages (from spacy<3.0.0,>=2.1.0->-r requirements.txt (line 1)) (1.0.0)\n",
            "Requirement already satisfied: blis<0.5.0,>=0.4.0 in /usr/local/lib/python3.7/dist-packages (from spacy<3.0.0,>=2.1.0->-r requirements.txt (line 1)) (0.4.1)\n",
            "Requirement already satisfied: plac<1.2.0,>=0.9.6 in /usr/local/lib/python3.7/dist-packages (from spacy<3.0.0,>=2.1.0->-r requirements.txt (line 1)) (1.1.3)\n",
            "Requirement already satisfied: setuptools in /usr/local/lib/python3.7/dist-packages (from spacy<3.0.0,>=2.1.0->-r requirements.txt (line 1)) (57.0.0)\n",
            "Requirement already satisfied: tqdm<5.0.0,>=4.38.0 in /usr/local/lib/python3.7/dist-packages (from spacy<3.0.0,>=2.1.0->-r requirements.txt (line 1)) (4.41.1)\n",
            "Requirement already satisfied: numpy>=1.15.0 in /usr/local/lib/python3.7/dist-packages (from spacy<3.0.0,>=2.1.0->-r requirements.txt (line 1)) (1.19.5)\n",
            "Requirement already satisfied: srsly<1.1.0,>=1.0.2 in /usr/local/lib/python3.7/dist-packages (from spacy<3.0.0,>=2.1.0->-r requirements.txt (line 1)) (1.0.5)\n",
            "Requirement already satisfied: wasabi<1.1.0,>=0.4.0 in /usr/local/lib/python3.7/dist-packages (from spacy<3.0.0,>=2.1.0->-r requirements.txt (line 1)) (0.8.2)\n",
            "Requirement already satisfied: thinc==7.4.1 in /usr/local/lib/python3.7/dist-packages (from spacy<3.0.0,>=2.1.0->-r requirements.txt (line 1)) (7.4.1)\n",
            "Requirement already satisfied: cymem<2.1.0,>=2.0.2 in /usr/local/lib/python3.7/dist-packages (from spacy<3.0.0,>=2.1.0->-r requirements.txt (line 1)) (2.0.5)\n",
            "Requirement already satisfied: murmurhash<1.1.0,>=0.28.0 in /usr/local/lib/python3.7/dist-packages (from spacy<3.0.0,>=2.1.0->-r requirements.txt (line 1)) (1.0.5)\n",
            "Requirement already satisfied: requests<3.0.0,>=2.13.0 in /usr/local/lib/python3.7/dist-packages (from spacy<3.0.0,>=2.1.0->-r requirements.txt (line 1)) (2.23.0)\n",
            "Requirement already satisfied: preshed<3.1.0,>=3.0.2 in /usr/local/lib/python3.7/dist-packages (from spacy<3.0.0,>=2.1.0->-r requirements.txt (line 1)) (3.0.5)\n",
            "Requirement already satisfied: atomicwrites>=1.0 in /usr/local/lib/python3.7/dist-packages (from pytest->-r requirements.txt (line 3)) (1.4.0)\n",
            "Requirement already satisfied: six>=1.10.0 in /usr/local/lib/python3.7/dist-packages (from pytest->-r requirements.txt (line 3)) (1.15.0)\n",
            "Requirement already satisfied: pluggy<0.8,>=0.5 in /usr/local/lib/python3.7/dist-packages (from pytest->-r requirements.txt (line 3)) (0.7.1)\n",
            "Requirement already satisfied: attrs>=17.4.0 in /usr/local/lib/python3.7/dist-packages (from pytest->-r requirements.txt (line 3)) (21.2.0)\n",
            "Requirement already satisfied: py>=1.5.0 in /usr/local/lib/python3.7/dist-packages (from pytest->-r requirements.txt (line 3)) (1.10.0)\n",
            "Requirement already satisfied: more-itertools>=4.0.0 in /usr/local/lib/python3.7/dist-packages (from pytest->-r requirements.txt (line 3)) (8.8.0)\n",
            "Requirement already satisfied: importlib-metadata>=0.20; python_version < \"3.8\" in /usr/local/lib/python3.7/dist-packages (from catalogue<1.1.0,>=0.0.7->spacy<3.0.0,>=2.1.0->-r requirements.txt (line 1)) (4.6.0)\n",
            "Requirement already satisfied: idna<3,>=2.5 in /usr/local/lib/python3.7/dist-packages (from requests<3.0.0,>=2.13.0->spacy<3.0.0,>=2.1.0->-r requirements.txt (line 1)) (2.10)\n",
            "Requirement already satisfied: chardet<4,>=3.0.2 in /usr/local/lib/python3.7/dist-packages (from requests<3.0.0,>=2.13.0->spacy<3.0.0,>=2.1.0->-r requirements.txt (line 1)) (3.0.4)\n",
            "Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.7/dist-packages (from requests<3.0.0,>=2.13.0->spacy<3.0.0,>=2.1.0->-r requirements.txt (line 1)) (2021.5.30)\n",
            "Requirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /usr/local/lib/python3.7/dist-packages (from requests<3.0.0,>=2.13.0->spacy<3.0.0,>=2.1.0->-r requirements.txt (line 1)) (1.24.3)\n",
            "Requirement already satisfied: zipp>=0.5 in /usr/local/lib/python3.7/dist-packages (from importlib-metadata>=0.20; python_version < \"3.8\"->catalogue<1.1.0,>=0.0.7->spacy<3.0.0,>=2.1.0->-r requirements.txt (line 1)) (3.4.1)\n",
            "Requirement already satisfied: typing-extensions>=3.6.4; python_version < \"3.8\" in /usr/local/lib/python3.7/dist-packages (from importlib-metadata>=0.20; python_version < \"3.8\"->catalogue<1.1.0,>=0.0.7->spacy<3.0.0,>=2.1.0->-r requirements.txt (line 1)) (3.7.4.3)\n",
            "Obtaining file:///content/neuralcoref\n",
            "Requirement already satisfied: numpy>=1.15.0 in /usr/local/lib/python3.7/dist-packages (from neuralcoref==4.0) (1.19.5)\n",
            "Requirement already satisfied: boto3 in /usr/local/lib/python3.7/dist-packages (from neuralcoref==4.0) (1.17.112)\n",
            "Requirement already satisfied: requests<3.0.0,>=2.13.0 in /usr/local/lib/python3.7/dist-packages (from neuralcoref==4.0) (2.23.0)\n",
            "Requirement already satisfied: spacy<3.0.0,>=2.1.0 in /usr/local/lib/python3.7/dist-packages (from neuralcoref==4.0) (2.3.2)\n",
            "Requirement already satisfied: jmespath<1.0.0,>=0.7.1 in /usr/local/lib/python3.7/dist-packages (from boto3->neuralcoref==4.0) (0.10.0)\n",
            "Requirement already satisfied: botocore<1.21.0,>=1.20.112 in /usr/local/lib/python3.7/dist-packages (from boto3->neuralcoref==4.0) (1.20.112)\n",
            "Requirement already satisfied: s3transfer<0.5.0,>=0.4.0 in /usr/local/lib/python3.7/dist-packages (from boto3->neuralcoref==4.0) (0.4.2)\n",
            "Requirement already satisfied: idna<3,>=2.5 in /usr/local/lib/python3.7/dist-packages (from requests<3.0.0,>=2.13.0->neuralcoref==4.0) (2.10)\n",
            "Requirement already satisfied: chardet<4,>=3.0.2 in /usr/local/lib/python3.7/dist-packages (from requests<3.0.0,>=2.13.0->neuralcoref==4.0) (3.0.4)\n",
            "Requirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /usr/local/lib/python3.7/dist-packages (from requests<3.0.0,>=2.13.0->neuralcoref==4.0) (1.24.3)\n",
            "Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.7/dist-packages (from requests<3.0.0,>=2.13.0->neuralcoref==4.0) (2021.5.30)\n",
            "Requirement already satisfied: preshed<3.1.0,>=3.0.2 in /usr/local/lib/python3.7/dist-packages (from spacy<3.0.0,>=2.1.0->neuralcoref==4.0) (3.0.5)\n",
            "Requirement already satisfied: catalogue<1.1.0,>=0.0.7 in /usr/local/lib/python3.7/dist-packages (from spacy<3.0.0,>=2.1.0->neuralcoref==4.0) (1.0.0)\n",
            "Requirement already satisfied: tqdm<5.0.0,>=4.38.0 in /usr/local/lib/python3.7/dist-packages (from spacy<3.0.0,>=2.1.0->neuralcoref==4.0) (4.41.1)\n",
            "Requirement already satisfied: thinc==7.4.1 in /usr/local/lib/python3.7/dist-packages (from spacy<3.0.0,>=2.1.0->neuralcoref==4.0) (7.4.1)\n",
            "Requirement already satisfied: murmurhash<1.1.0,>=0.28.0 in /usr/local/lib/python3.7/dist-packages (from spacy<3.0.0,>=2.1.0->neuralcoref==4.0) (1.0.5)\n",
            "Requirement already satisfied: wasabi<1.1.0,>=0.4.0 in /usr/local/lib/python3.7/dist-packages (from spacy<3.0.0,>=2.1.0->neuralcoref==4.0) (0.8.2)\n",
            "Requirement already satisfied: cymem<2.1.0,>=2.0.2 in /usr/local/lib/python3.7/dist-packages (from spacy<3.0.0,>=2.1.0->neuralcoref==4.0) (2.0.5)\n",
            "Requirement already satisfied: srsly<1.1.0,>=1.0.2 in /usr/local/lib/python3.7/dist-packages (from spacy<3.0.0,>=2.1.0->neuralcoref==4.0) (1.0.5)\n",
            "Requirement already satisfied: plac<1.2.0,>=0.9.6 in /usr/local/lib/python3.7/dist-packages (from spacy<3.0.0,>=2.1.0->neuralcoref==4.0) (1.1.3)\n",
            "Requirement already satisfied: setuptools in /usr/local/lib/python3.7/dist-packages (from spacy<3.0.0,>=2.1.0->neuralcoref==4.0) (57.0.0)\n",
            "Requirement already satisfied: blis<0.5.0,>=0.4.0 in /usr/local/lib/python3.7/dist-packages (from spacy<3.0.0,>=2.1.0->neuralcoref==4.0) (0.4.1)\n",
            "Requirement already satisfied: python-dateutil<3.0.0,>=2.1 in /usr/local/lib/python3.7/dist-packages (from botocore<1.21.0,>=1.20.112->boto3->neuralcoref==4.0) (2.8.1)\n",
            "Requirement already satisfied: importlib-metadata>=0.20; python_version < \"3.8\" in /usr/local/lib/python3.7/dist-packages (from catalogue<1.1.0,>=0.0.7->spacy<3.0.0,>=2.1.0->neuralcoref==4.0) (4.6.0)\n",
            "Requirement already satisfied: six>=1.5 in /usr/local/lib/python3.7/dist-packages (from python-dateutil<3.0.0,>=2.1->botocore<1.21.0,>=1.20.112->boto3->neuralcoref==4.0) (1.15.0)\n",
            "Requirement already satisfied: zipp>=0.5 in /usr/local/lib/python3.7/dist-packages (from importlib-metadata>=0.20; python_version < \"3.8\"->catalogue<1.1.0,>=0.0.7->spacy<3.0.0,>=2.1.0->neuralcoref==4.0) (3.4.1)\n",
            "Requirement already satisfied: typing-extensions>=3.6.4; python_version < \"3.8\" in /usr/local/lib/python3.7/dist-packages (from importlib-metadata>=0.20; python_version < \"3.8\"->catalogue<1.1.0,>=0.0.7->spacy<3.0.0,>=2.1.0->neuralcoref==4.0) (3.7.4.3)\n",
            "Installing collected packages: neuralcoref\n",
            "  Found existing installation: neuralcoref 4.0\n",
            "    Can't uninstall 'neuralcoref'. No files were found to uninstall.\n",
            "  Running setup.py develop for neuralcoref\n",
            "Successfully installed neuralcoref\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "1hjxRelzWxJV"
      },
      "source": [
        "## Restart runtime in order for SpaCy and NeuralCoref to work"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "fEJ4mPFrWvZ8",
        "outputId": "fcb7e155-3125-44fd-9c60-ac481409b544"
      },
      "source": [
        "# Setup selenium\n",
        "!apt-get update\n",
        "!apt install chromium-chromedriver\n",
        "!cp /usr/lib/chromium-browser/chromedriver /usr/bin\n",
        "import sys\n",
        "sys.path.insert(0,'/usr/lib/chromium-browser/chromedriver')\n",
        "from selenium import webdriver\n",
        "chrome_options = webdriver.ChromeOptions()\n",
        "chrome_options.add_argument('--headless')\n",
        "chrome_options.add_argument('--no-sandbox')\n",
        "chrome_options.add_argument('--disable-dev-shm-usage')\n",
        "wd = webdriver.Chrome('chromedriver',chrome_options=chrome_options)"
      ],
      "execution_count": 2,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "\r0% [Working]\r            \rGet:1 http://security.ubuntu.com/ubuntu bionic-security InRelease [88.7 kB]\n",
            "Hit:2 https://cloud.r-project.org/bin/linux/ubuntu bionic-cran40/ InRelease\n",
            "Ign:3 https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64  InRelease\n",
            "Hit:4 http://ppa.launchpad.net/c2d4u.team/c2d4u4.0+/ubuntu bionic InRelease\n",
            "Hit:5 http://archive.ubuntu.com/ubuntu bionic InRelease\n",
            "Ign:6 https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1804/x86_64  InRelease\n",
            "Hit:7 https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64  Release\n",
            "Get:8 http://archive.ubuntu.com/ubuntu bionic-updates InRelease [88.7 kB]\n",
            "Hit:9 https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1804/x86_64  Release\n",
            "Hit:10 http://ppa.launchpad.net/cran/libgit2/ubuntu bionic InRelease\n",
            "Hit:11 http://ppa.launchpad.net/deadsnakes/ppa/ubuntu bionic InRelease\n",
            "Get:12 http://archive.ubuntu.com/ubuntu bionic-backports InRelease [74.6 kB]\n",
            "Hit:13 http://ppa.launchpad.net/graphics-drivers/ppa/ubuntu bionic InRelease\n",
            "Fetched 252 kB in 3s (88.1 kB/s)\n",
            "Reading package lists... Done\n",
            "Reading package lists... Done\n",
            "Building dependency tree       \n",
            "Reading state information... Done\n",
            "chromium-chromedriver is already the newest version (91.0.4472.101-0ubuntu0.18.04.1).\n",
            "0 upgraded, 0 newly installed, 0 to remove and 59 not upgraded.\n",
            "cp: '/usr/lib/chromium-browser/chromedriver' and '/usr/bin/chromedriver' are the same file\n"
          ],
          "name": "stdout"
        },
        {
          "output_type": "stream",
          "text": [
            "/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:12: DeprecationWarning: use options instead of chrome_options\n",
            "  if sys.path[0] == '':\n"
          ],
          "name": "stderr"
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "y48YBwODZLU_"
      },
      "source": [
        "def enrich_single_item(item):\n",
        "  try:\n",
        "    # Find the HTML element with required data\n",
        "    div = wd.find_element_by_xpath(f\"//div[@data-source = '{item}']\")\n",
        "    # Extract relevant data from \"a\" or \"div\" tag\n",
        "    try:\n",
        "      result = div.find_element_by_tag_name(\"a\").text.split('[')[0].strip()\n",
        "      if result[0] == '[':\n",
        "        raise Exception\n",
        "    except:\n",
        "      result = div.find_element_by_tag_name(\"div\").text.split('[')[0].strip()\n",
        "    return result\n",
        "  except:\n",
        "    return None\n",
        "  "
      ],
      "execution_count": 3,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "bbAt2RBQLiTb"
      },
      "source": [
        "# Harry Potter fandom page scraping\n",
        "\n",
        "We will use Selenium for web scraping. As mentioned, we will begin by scraping the characters in the Harry Potter and the Philosopher's Stone book. The list of characters by chapter is available under the CC-BY-SA license, so we don't have to worry about any copyright infringement. Additionally, each of the characters has a web page with detailed information about the character. For example, if you check out the Hermione Granger page, you can observe a structured table with additional information. We will use the alias section for the entity extraction and add other character details like house and blood type to enrich our knowledge graph."
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "sPV9wtbaP4zx"
      },
      "source": [
        "import time\n",
        "\n",
        "def get_characters(url):\n",
        "  # Get the list of characters by chapter\n",
        "  wd.get(url)\n",
        "  character_dict = dict()\n",
        "  elem = wd.find_element_by_class_name(\"mw-parser-output\")\n",
        "  \n",
        "  # Locate character by chapter\n",
        "  tables = elem.find_elements_by_tag_name('table')\n",
        "  for i, chapter in enumerate(tables):\n",
        "    list_of_characters = []\n",
        "    characters = chapter.find_elements_by_tag_name('a')\n",
        "    for character in characters:\n",
        "      if not character.get_attribute('title'):\n",
        "        continue\n",
        "      list_of_characters.append({'title': character.get_attribute('title'), 'url': character.get_attribute('href')})\n",
        "    character_dict['chapter_' + str(i + 1)] = list_of_characters\n",
        "  # Enrich characters with additional information\n",
        "  for chapter in character_dict:\n",
        "    for index, character in enumerate(character_dict[chapter]):\n",
        "      # Rate limit sleep\n",
        "      time.sleep(1)\n",
        "      # Get the character page with selenium\n",
        "      wd.get(character['url'])\n",
        "      # Enrich aliases\n",
        "      try:\n",
        "        alias_div = wd.find_element_by_xpath(\"//div[@data-source = 'alias']\")\n",
        "        aliases = alias_div.find_elements_by_tag_name('li')\n",
        "        result = []\n",
        "        for a in aliases:\n",
        "          # Ignore under the cloak-guise and the name he told\n",
        "          if \"disguise\" in a.text or \"the name he told\" in a.text:\n",
        "            continue\n",
        "          alias = a.text.split('[')[0].split('(')[0].strip()\n",
        "          result.append(alias)\n",
        "        character_dict[chapter][index]['aliases'] = result\n",
        "      except:\n",
        "        pass\n",
        "      # Enrich loyalties\n",
        "      try:\n",
        "        loyalty_div = wd.find_element_by_xpath(\"//div[@data-source = 'loyalty']\")\n",
        "        loyalties = loyalty_div.find_elements_by_tag_name('li')\n",
        "        result = []\n",
        "        for l in loyalties:\n",
        "          loyalty = l.text.split('[')[0].split('(')[0].strip()\n",
        "          result.append(loyalty)\n",
        "        character_dict[chapter][index]['loyalty'] = result\n",
        "      except:\n",
        "        pass\n",
        "      # Enrich family relationships\n",
        "      try:\n",
        "        family_div = wd.find_element_by_xpath(\"//div[@data-source = 'family']\")\n",
        "        relationships = family_div.find_elements_by_tag_name('li')\n",
        "        result = []\n",
        "        for r in relationships:\n",
        "          rel = r.text.split('[')[0].split('(')[0].strip()\n",
        "          rel_type = r.text.split('(')[-1].split(')')[0].split('[')[0]\n",
        "          result.append({'person':rel, 'type': rel_type})\n",
        "        character_dict[chapter][index]['family'] = result\n",
        "      except:\n",
        "        pass\n",
        "      # Enrich blood\n",
        "      character_dict[chapter][index]['blood'] = enrich_single_item('blood')\n",
        "      # Enrich nationality\n",
        "      character_dict[chapter][index]['nationality'] = enrich_single_item('nationality')\n",
        "      # Enrich species\n",
        "      character_dict[chapter][index]['species'] = enrich_single_item('species')\n",
        "      # Enrich house\n",
        "      character_dict[chapter][index]['house'] = enrich_single_item('house')\n",
        "      # Enrich gender\n",
        "      character_dict[chapter][index]['gender'] = enrich_single_item('gender')\n",
        "  return character_dict\n",
        "\n",
        "\n",
        "\n",
        "\n",
        "    "
      ],
      "execution_count": 4,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "vHyfyNZAL-He"
      },
      "source": [
        "We want to ignore all the aliases under the disguise of Polyjuice. It seems he also told Stanley Shunpike that he was Neville Longbottom, which we will also skip."
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "Qbuweyr_dvXd"
      },
      "source": [
        "character_dict = get_characters(\"https://harrypotter.fandom.com/wiki/Harry_Potter_and_the_Philosopher%27s_Stone_(character_index)\")"
      ],
      "execution_count": 5,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "KZtmiAGTMBY1"
      },
      "source": [
        "Before we continue with named entity extraction from the book, we will store the scraped information about the characters to Neo4j."
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "mXJB0NZwjDRD"
      },
      "source": [
        "from neo4j import GraphDatabase\n",
        "# Change the host and user/password combination to your neo4j\n",
        "# Will not work with a localhost bolt url\n",
        "host = 'bolt://3.239.224.171:7687'\n",
        "user = 'neo4j'\n",
        "password = 'rainbow-mittens-track'\n",
        "driver = GraphDatabase.driver(host,auth=(user, password))"
      ],
      "execution_count": 6,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "hTuwAqhF6zOG"
      },
      "source": [
        "entity_query = \"\"\"\n",
        "UNWIND $data as row\n",
        "MERGE (c:Character{name:row.title})\n",
        "SET c.url = row.url,\n",
        "    c.aliases = row.aliases,\n",
        "    c.blood = row.blood,\n",
        "    c.nationality = row.nationality,\n",
        "    c.species = row.species,\n",
        "    c.gender = row.gender\n",
        "FOREACH (h in CASE WHEN row.house IS NOT NULL THEN [1] ELSE [] END | MERGE (h1:House{name:row.house}) MERGE (c)-[:BELONGS_TO]->(h1))\n",
        "FOREACH (l in row.loyalty | MERGE (g:Group{name:l}) MERGE (c)-[:LOYAL_TO]->(g))\n",
        "FOREACH (f in row.family | MERGE (f1:Character{name:f.person}) MERGE (c)-[t:FAMILY_MEMBER]->(f1) SET t.type = f.type)    \n",
        "\n",
        "\"\"\"\n",
        "with driver.session() as session:\n",
        "  for chapter in character_dict:\n",
        "    session.run(entity_query, {'data': character_dict[chapter]})\n"
      ],
      "execution_count": 7,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "bZQcLB0tPj7A"
      },
      "source": [
        "def get_character_dict(chapter):\n",
        "  super_list = list()\n",
        "  dicts = [character_dict['chapter_' + str(i)] for i in range(1,chapter + 1)]\n",
        "  for d in dicts:\n",
        "    for item in d:\n",
        "      super_list.append(item)\n",
        "  return super_list"
      ],
      "execution_count": 8,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "Y_ux1TNM0xbs",
        "outputId": "e3627365-eaeb-4fc8-9191-ee37e7078389"
      },
      "source": [
        "import spacy\n",
        "from spacy.matcher import Matcher\n",
        "import neuralcoref\n",
        "\n",
        "nlp = spacy.load('en_core_web_sm')\n",
        "neuralcoref.add_to_pipe(nlp)\n",
        "\n",
        "doc1 = nlp('My sister has a dog. She loves him.')\n",
        "print(doc1._.coref_clusters)"
      ],
      "execution_count": 9,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "/usr/lib/python3.7/importlib/_bootstrap.py:219: RuntimeWarning: spacy.morphology.Morphology size changed, may indicate binary incompatibility. Expected 104 from C header, got 112 from PyObject\n",
            "  return f(*args, **kwds)\n",
            "/usr/lib/python3.7/importlib/_bootstrap.py:219: RuntimeWarning: spacy.vocab.Vocab size changed, may indicate binary incompatibility. Expected 96 from C header, got 112 from PyObject\n",
            "  return f(*args, **kwds)\n"
          ],
          "name": "stderr"
        },
        {
          "output_type": "stream",
          "text": [
            "[My sister: [My sister, She], a dog: [a dog, him]]\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "MmJr_TUZMS-U"
      },
      "source": [
        "First of all, we have to get our hands on the text from the book. I've found a GitHub repository that contains the text of the first four Harry Potter books. There is no license attached to the data, so I will assume we can use the data for educational purposes within the limits of fair use. If you actually want to read the book, please go and buy it.\n",
        "Getting the text from a GitHub file is quite easy:"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "aiL14Fjetfz7"
      },
      "source": [
        "import requests\n",
        "\n",
        "def get_text(url):\n",
        "  try:\n",
        "    return requests.get(url).text\n",
        "  except:\n",
        "    print(\"No text was found\")\n",
        "    return None"
      ],
      "execution_count": 10,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "G6ztaeJ_MZ7-"
      },
      "source": [
        "We have to be careful to provide the link to the raw text content, and it should work. When I first did the entity extraction, I forgot to use the co-reference resolution technique beforehand. Co-reference resolution is the task of determining linguistic expressions that refer to the same real-world entity. In simple terms, we replace the pronouns with the referenced entities. For a real-world example, check out my Information extraction pipeline post. I've been searching for open-source co-reference resolution models, but as far as I know, there are only two. The first is NeuralCoref that works on top of SpaCy, and AllenNLP provides the second model. Since I have already used NeuralCoref before, I decided to look at how the AllenNLP model works. Unfortunately, I quickly ran out of memory (Colab has 16GB RAM) when I input a whole chapter into the model. Then I sliced a chapter into a list of sentences, but it worked really slow, probably due to using the BERT framework. So, I defaulted to use NeuralCoref, which can easily handle a whole chapter and works faster. I have copied the code I have already used before:"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "ao2_gk2t1E6H"
      },
      "source": [
        "def coref_resolution(text):\n",
        "    \"\"\"Function that executes coreference resolution on a given text\"\"\"\n",
        "    doc = nlp(text)\n",
        "    # fetches tokens with whitespaces from spacy document\n",
        "    tok_list = list(token.text_with_ws for token in doc)\n",
        "    for cluster in doc._.coref_clusters:\n",
        "        # get tokens from representative cluster name\n",
        "        cluster_main_words = set(cluster.main.text.split(' '))\n",
        "        for coref in cluster:\n",
        "            if coref != cluster.main:  # if coreference element is not the representative element of that cluster\n",
        "                if coref.text != cluster.main.text and bool(set(coref.text.split(' ')).intersection(cluster_main_words)) == False:\n",
        "                    # if coreference element text and representative element text are not equal and none of the coreference element words are in representative element. This was done to handle nested coreference scenarios\n",
        "                    tok_list[coref.start] = cluster.main.text + \\\n",
        "                        doc[coref.end-1].whitespace_\n",
        "                    for i in range(coref.start+1, coref.end):\n",
        "                        tok_list[i] = \"\"\n",
        "\n",
        "    return \"\".join(tok_list)"
      ],
      "execution_count": 11,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "xhPATSAxMea2"
      },
      "source": [
        "Now that we have our text ready, it is time to extract mentioned characters from the text.\n",
        "# Entity recognition with SpaCy's rule-based matching\n",
        "First, I wanted to be cool and use a Named Entity Recognition model. I've tried models from SpaCy, HuggingFace, Flair, and even Stanford NLP. None of them worked well enough to satisfy my requirements. So instead of training my model, I decided to use SpaCy's rule-based pattern matching feature. We already know which characters we are looking for based on the data we scraped from the HP fandom site. Now we just have to find a way to match them in the text as perfectly as possible. We have to define the text patterns for each of the character."
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "uTYPaNjNtzIV"
      },
      "source": [
        "def get_matcher_patterns(character):\n",
        "  matcher_pattern = []\n",
        "  stop_words = ['of', 'the', 'at', 'family', 'keeper', 'wizard', 'fat', 'de', 'hogwarts', 'hotel', 'owner', 'express']\n",
        "  parts_of_name = [el for el in character['title'].split(' ') if len(el) > 2]\n",
        "  # Append the whole pattern\n",
        "  matcher_pattern.append([{\"LOWER\": n.lower(), \"IS_TITLE\": True} for n in parts_of_name])\n",
        "  \n",
        "  # Append parts of names\n",
        "  if not \"'\" in character['title']: # Skip names like Vernon Dursley's secretary\n",
        "    for n in parts_of_name:\n",
        "      if n.lower() in stop_words: # Skip appending stop words\n",
        "        continue\n",
        "      matcher_pattern.append([{\"LOWER\": n.lower(), \"IS_TITLE\": True}])\n",
        "      # Special case for Ronald Weasley -> Also add Ron\n",
        "      if n == \"Ronald\":\n",
        "        matcher_pattern.append([{\"LOWER\": \"ron\", \"IS_TITLE\": True}])\n",
        "  return matcher_pattern"
      ],
      "execution_count": 12,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "Pj8dCXWGMof2"
      },
      "source": [
        "There is an issue we must overcome. It mostly happens when a person is referenced by their last name, and there are many characters with that last name. We must come up with a generic solution for entity disambiguation."
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "P_p_6YPT4t_K"
      },
      "source": [
        "import random\n",
        "\n",
        "hardcoded_options = dict()\n",
        "hardcoded_options['Malfoy'] = ['Draco Malfoy']\n",
        "hardcoded_options['Patil'] = ['Padma Patil', 'Parvati Patil']\n",
        "hardcoded_options['Tom'] = ['Tom']\n",
        "\n",
        "def handle_multiple_options(result, doc):\n",
        "  needs_deduplication = [(i,x) for i,x in enumerate(result) if len(x['string_id']) > 1]\n",
        "  for index, multiple_options in needs_deduplication:\n",
        "    # Special logic for Dursleys, if there if Mr. then Vernon, if Mrs. then Petunia\n",
        "    prefix = doc[multiple_options['start']-3 : multiple_options['start']]\n",
        "    if (multiple_options['text'] == 'Dursley') and (\"Mr.\" in prefix.text):\n",
        "      resolution = [\"Vernon Dursley\"]\n",
        "    elif (multiple_options['text'] == 'Dursley') and (\"Mrs.\" in prefix.text):\n",
        "      resolution = [\"Petunia Dursley\"]\n",
        "    # Find nearest entity\n",
        "    else:\n",
        "      end_char = multiple_options['end']\n",
        "      distance = sys.maxsize\n",
        "      resolution = []\n",
        "      for possible_option in result:\n",
        "        # Skip multiple options and entities that don't have any of the multiple option\n",
        "        if (not len(possible_option['string_id']) == 1) or (not possible_option['string_id'][0] in multiple_options['string_id']):\n",
        "          continue\n",
        "        new_distance = abs(multiple_options['end'] - possible_option['end'])\n",
        "        if new_distance < distance:\n",
        "          distance = new_distance\n",
        "          resolution = possible_option['string_id']\n",
        "      \n",
        "      if not resolution:\n",
        "        try:\n",
        "          ho = hardcoded_options[multiple_options['text']]\n",
        "          if len(ho) == 1:\n",
        "            resolution = ho\n",
        "          else:\n",
        "            resolution = [random.choice(ho)]\n",
        "        except:\n",
        "          print(f\"no way to disambiguate {multiple_options['text']} from options: {multiple_options['string_id']}\")\n",
        "    \n",
        "    result[index]['string_id'] = resolution\n",
        "  return result"
      ],
      "execution_count": 13,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "8iPA44rqNMQX"
      },
      "source": [
        "This is a bit longer function. We start by identifying which entities require disambiguation. I introduced a unique logic disambiguating the \"Dursley\" term. If \"Mr.\" is present before the \"Dursley\" term, then we reference Vernon, and if \"Mrs.\" is present, we choose \"Petunia\". Next, we have a more generic solution. The algorithm assigns the reference to the nearest neighbor out of the options. For example, suppose we can choose between \"Harry Potter\", \"James Potter\", and \"Lily Potter\". In that case, the algorithm identifies the nearest of those three entities in the text and assigns the current item its value. There are some exceptions where their full or first name is not referenced within the same chapter, and I have added hardcoded options as a last resort.\n",
        "# Infer relationships between characters\n",
        "We are finished with the hard part. Inferring relationships between characters is very simple. First, we need to define the distance threshold of interaction or relation between two characters. As mentioned, we will use the same distance threshold as was used in the GoT extraction. That is, if two characters co-occur within the distance of 14 words, then we assume they must have interacted. I have also merged entities not to skew results. What do I mean by joining entities? Suppose we have the following two sentences:\n",
        "\"Harry was having a good day. He went to talk to Dumbledore in the afternoon.\"\n",
        "Our entity extraction process will identify three entities, \"Harry\", \"He\" as a reference to Harry, and \"Dumbledore\". If we took the naive approach, we could infer two interactions between Harry and Dumbledore as two references of \"Harry\" are close to \"Dumbledore\". However, I want to avoid that, so I have merged entities in a sequence that refers to the same character as a single entity. Finally, we have to count the number of interactions between the pairs of characters."
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "Exid13U8U_Vv"
      },
      "source": [
        "from collections import Counter\n",
        "\n",
        "def get_distances(result, distance_threshold):\n",
        "  #sort by start character\n",
        "  result = sorted(result, key=lambda k: k['start'])\n",
        "  compact_entities = []\n",
        "  # Merge entities\n",
        "  for entity in result:\n",
        "    # If the same entity occurs, prolong the end \n",
        "    if (len(compact_entities) > 0) and (compact_entities[-1]['string_id'] == entity['string_id']):\n",
        "      compact_entities[-1]['end'] = entity['end']\n",
        "    else:\n",
        "      compact_entities.append(entity)\n",
        "  distances = list()\n",
        "  # Iterate over all entities\n",
        "  for index, source in enumerate(compact_entities[:-1]):\n",
        "    # Compare with entities that come after the given one\n",
        "    for target in compact_entities[index + 1:]:\n",
        "      if (source['string_id'] != target['string_id']) and (abs(source['end'] - target['start']) < distance_threshold):\n",
        "        link = sorted([source['string_id'][0], target['string_id'][0]])\n",
        "        distances.append(link)\n",
        "      else:\n",
        "        break\n",
        "  # Count the number of interactions\n",
        "  return Counter(map(tuple, distances))\n",
        "  \n",
        "\n"
      ],
      "execution_count": 14,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "mC7Zz1J4NS1d"
      },
      "source": [
        "# Store results to Neo4j graph database\n",
        "We have extracted the interactions network between character, and the only thing left is to store the results into a graph database. The import query is very straightforward as we are dealing with a monopartite network.\n",
        "If you are using the Colab notebook I have prepared, then it would be easiest to create either a Neo4j Sandbox or Aura database instance to store the results."
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "nhSSNmDTS2e2"
      },
      "source": [
        "def store_to_neo4j(distances):\n",
        "  data = [{'source': el[0], 'target': el[1], 'weight': distances[el]} for el in distances]\n",
        "  with driver.session() as session:\n",
        "    session.run(\"\"\"\n",
        "    UNWIND $data as row\n",
        "    MERGE (c:Character{name:row.source})\n",
        "    MERGE (t:Character{name:row.target})\n",
        "    MERGE (c)-[i:INTERACTS]-(t)\n",
        "    SET i.weight = coalesce(i.weight,0) + row.weight\n",
        "    \"\"\", {'data': data})"
      ],
      "execution_count": 15,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "Ox_L2XypNWiu"
      },
      "source": [
        "Put it all together"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "00XAYT0cMXXt"
      },
      "source": [
        "url = \"https://raw.githubusercontent.com/amephraim/nlp/master/texts/J.%20K.%20Rowling%20-%20Harry%20Potter%201%20-%20Sorcerer's%20Stone.txt\"\n",
        "text = get_text(url)\n",
        "chapters = text.split(\"CHAPTER\")[1:]\n",
        "def get_characters_in_chapter(chapter):\n",
        "  c = chapters[chapter - 1]\n",
        "  # Prepare characters matcher\n",
        "  matcher = Matcher(nlp.vocab)\n",
        "  for character in get_character_dict(chapter):\n",
        "    matcher_pattern = get_matcher_patterns(character)\n",
        "    matcher.add(character['title'], matcher_pattern)\n",
        "\n",
        "  # Prepare text\n",
        "  lines = c.split('\\n')[1:]\n",
        "  lines = list(filter(None, lines))\n",
        "  chapter_title = lines[0]\n",
        "  print(chapter_title)\n",
        "  text = \" \".join(lines[1:])\n",
        "  \n",
        "  # Run coreference resolution\n",
        "  text = coref_resolution(text)\n",
        "\n",
        "  # Find matches\n",
        "  doc = nlp(text)\n",
        "  matches = matcher(doc)\n",
        "  result = []\n",
        "  for match_id, start, end in matches:\n",
        "      string_id = nlp.vocab.strings[match_id]  # Get string representation\n",
        "      span = doc[start:end]  # The matched span\n",
        "\n",
        "      # Get predicates for correct result appendment\n",
        "      exists_longer = [(start == e['start'] and end < e['end']) or (start > e['start'] and end == e['end']) for e in result]\n",
        "      same = [start == e['start'] and end == e['end'] for e in result]\n",
        "      shorter_end = [start == e['start'] and end > e['end'] for e in result]\n",
        "      shorter_start = [start < e['start'] and end == e['end'] for e in result]\n",
        "      \n",
        "      # Append to results\n",
        "      if any(exists_longer): # If there is a longer version of the given entity already in results\n",
        "        continue\n",
        "      \n",
        "      if any(shorter_end): # If there is any entity with the same start span but has shorter end\n",
        "        del result[shorter_end.index(True)]\n",
        "        result.append({'string_id': [string_id], 'start': start, 'end': end, 'text': span.text}) \n",
        "      elif any(shorter_start): # If there is any entity with the same end span but has shorter start\n",
        "        del result[shorter_start.index(True)]\n",
        "        result.append =({'string_id': [string_id], 'start': start, 'end': end, 'text': span.text}) \n",
        "      elif not any(same): # If not exists yet\n",
        "        result.append({'string_id': [string_id], 'start': start, 'end': end, 'text': span.text})\n",
        "      else: # Add more entities to a single span\n",
        "        i = same.index(True)\n",
        "        result[i]['string_id'].append(string_id)\n",
        "  \n",
        "  # Handle results where there are multiple options\n",
        "  handle_multiple_options(result, doc)\n",
        "  return result\n",
        "\n",
        "\n"
      ],
      "execution_count": 16,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "7yubxuhcNZL5"
      },
      "source": [
        "Run the code for each chapter of the book"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "YM1pT6Ul9Hka",
        "outputId": "ad564229-bbae-42fe-9e3b-cacbeca2d328"
      },
      "source": [
        "for c in range(1,len(chapters) + 1):\n",
        "  end = get_characters_in_chapter(c)\n",
        "  distances = get_distances(end, 14)\n",
        "  store_to_neo4j(distances)"
      ],
      "execution_count": 17,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "THE BOY WHO LIVED\n",
            "THE VANISHING GLASS\n",
            "THE LETTERS FROM NO ONE\n",
            "THE KEEPER OF THE KEYS\n",
            "DIAGON ALLEY\n",
            "THE JOURNEY FROM PLATFORM NINE AND THREE-QUARTERS\n",
            "THE SORTING HAT\n",
            "THE POTIONS MASTER\n",
            "THE MIDNIGHT DUEL\n",
            "HALLOWEEN\n",
            "QUIDDITCH\n",
            "THE MIRROR OF ERISED\n",
            "NICOLAS FLAMEL\n",
            "NORBERT THE NORWEGIAN RIDGEBACK\n",
            "THE FORIBIDDEN FOREST\n",
            "THROUGH THE TRAPDOOR\n",
            "THE MAN WITH TWO FACES\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "2AqzbcxvepdF"
      },
      "source": [
        ""
      ],
      "execution_count": 17,
      "outputs": []
    }
  ]
}