{
 "nbformat": 4,
 "nbformat_minor": 0,
 "metadata": {
  "colab": {
   "name": "NLU_tokenization_example.ipynb",
   "provenance": [
    {
     "file_id": "1pgqoRJ6yGWbTLWdLnRvwG5DLSU3rxuMq",
     "timestamp": 1599401652794
    },
    {
     "file_id": "1JrlfuV2jNGTdOXvaWIoHTSf6BscDMkN7",
     "timestamp": 1599401257319
    },
    {
     "file_id": "1svpqtC3cY6JnRGeJngIPl2raqxdowpyi",
     "timestamp": 1599400881246
    },
    {
     "file_id": "1tW833T3HS8F5Lvn6LgeDd5LW5226syKN",
     "timestamp": 1599398724652
    },
    {
     "file_id": "1CYzHfQyFCdvIOVO2Z5aggVI9c0hDEOrw",
     "timestamp": 1599354735581
    }
   ],
   "collapsed_sections": []
  },
  "kernelspec": {
   "name": "python3",
   "display_name": "Python 3"
  }
 },
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "rBXrqlGEYA8G"
   },
   "source": [
    "![JohnSnowLabs](https://nlp.johnsnowlabs.com/assets/images/logo.png)\n",
    "\n",
    "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/JohnSnowLabs/nlu/blob/master/examples/colab/component_examples/text_pre_processing_and_cleaning/NLU_tokenization_example.ipynb)\n",
    "\n",
    "# Tokenization with NLU \n",
    "\n",
    "Tokenization is the process of splitting input texts into segments which corrospond to words.    \n",
    "\n",
    "I. e. 'He was hungry' consists of the tokens [He,was,hungry]\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "# 1. Install Java and NLU"
   ]
  },
  {
   "cell_type": "code",
   "metadata": {
    "id": "M2-GiYL6xurJ",
    "colab": {
     "base_uri": "https://localhost:8080/"
    },
    "executionInfo": {
     "status": "ok",
     "timestamp": 1619911492789,
     "user_tz": -120,
     "elapsed": 119255,
     "user": {
      "displayName": "Christian Kasim Loan",
      "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjqAD-ircKP-s5Eh6JSdkDggDczfqQbJGU_IRb4Hw=s64",
      "userId": "14469489166467359317"
     }
    },
    "outputId": "789fae0e-ed69-45ce-d5c4-cd3e5bdc5000"
   },
   "source": [
    "!wget https://setup.johnsnowlabs.com/nlu/colab.sh -O - | bash\n",
    "  \n",
    "\n",
    "import nlu"
   ],
   "execution_count": null,
   "outputs": [
    {
     "output_type": "stream",
     "text": [
      "--2021-05-01 23:22:53--  https://raw.githubusercontent.com/JohnSnowLabs/nlu/master/scripts/colab_setup.sh\n",
      "Resolving raw.githubusercontent.com (raw.githubusercontent.com)... 185.199.108.133, 185.199.109.133, 185.199.110.133, ...\n",
      "Connecting to raw.githubusercontent.com (raw.githubusercontent.com)|185.199.108.133|:443... connected.\n",
      "HTTP request sent, awaiting response... 200 OK\n",
      "Length: 1671 (1.6K) [text/plain]\n",
      "Saving to: ‘STDOUT’\n",
      "\n",
      "\r-                     0%[                    ]       0  --.-KB/s               Installing  NLU 3.0.0 with  PySpark 3.0.2 and Spark NLP 3.0.1 for Google Colab ...\n",
      "\r-                   100%[===================>]   1.63K  --.-KB/s    in 0.001s  \n",
      "\n",
      "2021-05-01 23:22:54 (1.43 MB/s) - written to stdout [1671/1671]\n",
      "\n",
      "\u001B[K     |████████████████████████████████| 204.8MB 73kB/s \n",
      "\u001B[K     |████████████████████████████████| 153kB 54.2MB/s \n",
      "\u001B[K     |████████████████████████████████| 204kB 22.6MB/s \n",
      "\u001B[K     |████████████████████████████████| 204kB 51.6MB/s \n",
      "\u001B[?25h  Building wheel for pyspark (setup.py) ... \u001B[?25l\u001B[?25hdone\n"
     ],
     "name": "stdout"
    }
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "N_CL8HZ8Ydry"
   },
   "source": [
    "## 2. Load Model and lemmatize sample string"
   ]
  },
  {
   "cell_type": "code",
   "metadata": {
    "id": "j2ZZZvr1uGpx",
    "colab": {
     "base_uri": "https://localhost:8080/",
     "height": 131
    },
    "executionInfo": {
     "status": "ok",
     "timestamp": 1619911534182,
     "user_tz": -120,
     "elapsed": 160635,
     "user": {
      "displayName": "Christian Kasim Loan",
      "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjqAD-ircKP-s5Eh6JSdkDggDczfqQbJGU_IRb4Hw=s64",
      "userId": "14469489166467359317"
     }
    },
    "outputId": "449db32a-a3aa-45ac-db15-c7da025a8dcd"
   },
   "source": [
    "import nlu\n",
    "pipe = nlu.load('tokenize')\n",
    "pipe.predict('He was suprised by the diversity of NLU')"
   ],
   "execution_count": null,
   "outputs": [
    {
     "output_type": "stream",
     "text": [
      "sentence_detector_dl download started this may take some time.\n",
      "Approximate size to download 354.6 KB\n",
      "[OK!]\n"
     ],
     "name": "stdout"
    },
    {
     "output_type": "execute_result",
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>document</th>\n",
       "      <th>sentence</th>\n",
       "      <th>token</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>He was suprised by the diversity of NLU</td>\n",
       "      <td>[He was suprised by the diversity of NLU]</td>\n",
       "      <td>[He, was, suprised, by, the, diversity, of, NLU]</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "                                  document  ...                                             token\n",
       "0  He was suprised by the diversity of NLU  ...  [He, was, suprised, by, the, diversity, of, NLU]\n",
       "\n",
       "[1 rows x 3 columns]"
      ]
     },
     "metadata": {
      "tags": []
     },
     "execution_count": 2
    }
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "IRSEzc-RCceu"
   },
   "source": [
    "# 3. Get one row per token by setting outputlevel to token.    "
   ]
  },
  {
   "cell_type": "code",
   "metadata": {
    "id": "9bujAZtOCfRW",
    "colab": {
     "base_uri": "https://localhost:8080/",
     "height": 297
    },
    "executionInfo": {
     "status": "ok",
     "timestamp": 1619911535143,
     "user_tz": -120,
     "elapsed": 161588,
     "user": {
      "displayName": "Christian Kasim Loan",
      "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjqAD-ircKP-s5Eh6JSdkDggDczfqQbJGU_IRb4Hw=s64",
      "userId": "14469489166467359317"
     }
    },
    "outputId": "577b54eb-681b-4878-dd77-af39cb493b62"
   },
   "source": [
    "pipe.predict('He was suprised by the diversity of NLU', output_level='token')"
   ],
   "execution_count": null,
   "outputs": [
    {
     "output_type": "execute_result",
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>token</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>He</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>was</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>suprised</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>by</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>the</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>diversity</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>of</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>NLU</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "       token\n",
       "0         He\n",
       "0        was\n",
       "0   suprised\n",
       "0         by\n",
       "0        the\n",
       "0  diversity\n",
       "0         of\n",
       "0        NLU"
      ]
     },
     "metadata": {
      "tags": []
     },
     "execution_count": 3
    }
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "uXb-FMA6mX13"
   },
   "source": [
    "# 4. Checkout possible configurations for the Tokenizer"
   ]
  },
  {
   "cell_type": "code",
   "metadata": {
    "id": "9qUF7jPlme-R",
    "colab": {
     "base_uri": "https://localhost:8080/"
    },
    "executionInfo": {
     "status": "ok",
     "timestamp": 1619911535144,
     "user_tz": -120,
     "elapsed": 161585,
     "user": {
      "displayName": "Christian Kasim Loan",
      "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjqAD-ircKP-s5Eh6JSdkDggDczfqQbJGU_IRb4Hw=s64",
      "userId": "14469489166467359317"
     }
    },
    "outputId": "a4cf93b6-6202-48f2-a2c0-e31b6abf69bb"
   },
   "source": [
    "pipe.print_info()"
   ],
   "execution_count": null,
   "outputs": [
    {
     "output_type": "stream",
     "text": [
      "The following parameters are configurable for this NLU pipeline (You can copy paste the examples) :\n",
      ">>> pipe['default_tokenizer'] has settable params:\n",
      "pipe['default_tokenizer'].setTargetPattern('\\S+')    | Info: pattern to grab from text as token candidates. Defaults \\S+ | Currently set to : \\S+\n",
      "pipe['default_tokenizer'].setContextChars(['.', ',', ';', ':', '!', '?', '*', '-', '(', ')', '\"', \"'\"])  | Info: character list used to separate from token boundaries | Currently set to : ['.', ',', ';', ':', '!', '?', '*', '-', '(', ')', '\"', \"'\"]\n",
      "pipe['default_tokenizer'].setCaseSensitiveExceptions(True)  | Info: Whether to care for case sensitiveness in exceptions | Currently set to : True\n",
      "pipe['default_tokenizer'].setMinLength(0)            | Info: Set the minimum allowed legth for each token | Currently set to : 0\n",
      "pipe['default_tokenizer'].setMaxLength(99999)        | Info: Set the maximum allowed legth for each token | Currently set to : 99999\n",
      ">>> pipe['deep_sentence_detector@SentenceDetectorDLModel_c83c27f46b97'] has settable params:\n",
      "pipe['deep_sentence_detector@SentenceDetectorDLModel_c83c27f46b97'].setExplodeSentences(False)  | Info: whether to explode each sentence into a different row, for better parallelization. Defaults to false. | Currently set to : False\n",
      "pipe['deep_sentence_detector@SentenceDetectorDLModel_c83c27f46b97'].setStorageRef('SentenceDetectorDLModel_c83c27f46b97')  | Info: storage unique identifier | Currently set to : SentenceDetectorDLModel_c83c27f46b97\n",
      "pipe['deep_sentence_detector@SentenceDetectorDLModel_c83c27f46b97'].setEncoder(com.johnsnowlabs.nlp.annotators.sentence_detector_dl.SentenceDetectorDLEncoder@60928804)  | Info: Data encoder | Currently set to : com.johnsnowlabs.nlp.annotators.sentence_detector_dl.SentenceDetectorDLEncoder@60928804\n",
      "pipe['deep_sentence_detector@SentenceDetectorDLModel_c83c27f46b97'].setImpossiblePenultimates(['Bros', 'No', 'al', 'vs', 'etc', 'Fig', 'Dr', 'Prof', 'PhD', 'MD', 'Co', 'Corp', 'Inc', 'bros', 'VS', 'Vs', 'ETC', 'fig', 'dr', 'prof', 'PHD', 'phd', 'md', 'co', 'corp', 'inc', 'Jan', 'Feb', 'Mar', 'Apr', 'Jul', 'Aug', 'Sep', 'Sept', 'Oct', 'Nov', 'Dec', 'St', 'st', 'AM', 'PM', 'am', 'pm', 'e.g', 'f.e', 'i.e'])  | Info: Impossible penultimates | Currently set to : ['Bros', 'No', 'al', 'vs', 'etc', 'Fig', 'Dr', 'Prof', 'PhD', 'MD', 'Co', 'Corp', 'Inc', 'bros', 'VS', 'Vs', 'ETC', 'fig', 'dr', 'prof', 'PHD', 'phd', 'md', 'co', 'corp', 'inc', 'Jan', 'Feb', 'Mar', 'Apr', 'Jul', 'Aug', 'Sep', 'Sept', 'Oct', 'Nov', 'Dec', 'St', 'st', 'AM', 'PM', 'am', 'pm', 'e.g', 'f.e', 'i.e']\n",
      "pipe['deep_sentence_detector@SentenceDetectorDLModel_c83c27f46b97'].setModelArchitecture('cnn')  | Info: Model architecture (CNN) | Currently set to : cnn\n",
      ">>> pipe['document_assembler'] has settable params:\n",
      "pipe['document_assembler'].setCleanupMode('shrink')  | Info: possible values: disabled, inplace, inplace_full, shrink, shrink_full, each, each_full, delete_full | Currently set to : shrink\n"
     ],
     "name": "stdout"
    }
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "ON37vb9KmnJ2"
   },
   "source": [
    "# 4.1 Configure  Context Chars  \n",
    "By defining custom context chars, we can get extra tokens from suffixes that match the context chars. \n"
   ]
  },
  {
   "cell_type": "code",
   "metadata": {
    "id": "iD376MeemfZG",
    "colab": {
     "base_uri": "https://localhost:8080/",
     "height": 173
    },
    "executionInfo": {
     "status": "ok",
     "timestamp": 1619911536222,
     "user_tz": -120,
     "elapsed": 162657,
     "user": {
      "displayName": "Christian Kasim Loan",
      "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjqAD-ircKP-s5Eh6JSdkDggDczfqQbJGU_IRb4Hw=s64",
      "userId": "14469489166467359317"
     }
    },
    "outputId": "e7842d13-0e90-485c-e32c-42608b4ea3fc"
   },
   "source": [
    "pipe['default_tokenizer'].setContextChars([',','!','o','d'])\n",
    "pipe.predict('Hello, world!')"
   ],
   "execution_count": null,
   "outputs": [
    {
     "output_type": "execute_result",
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>token</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>Hello</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>,</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>world</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>!</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "   token\n",
       "0  Hello\n",
       "0      ,\n",
       "0  world\n",
       "0      !"
      ]
     },
     "metadata": {
      "tags": []
     },
     "execution_count": 5
    }
   ]
  },
  {
   "cell_type": "code",
   "metadata": {
    "id": "Aen1EcOQnmYf"
   },
   "source": [],
   "execution_count": null,
   "outputs": []
  }
 ]
}
