{
  "nbformat": 4,
  "nbformat_minor": 0,
  "metadata": {
    "kernelspec": {
      "display_name": "Python 3",
      "language": "python",
      "name": "python3"
    },
    "language_info": {
      "codemirror_mode": {
        "name": "ipython",
        "version": 3
      },
      "file_extension": ".py",
      "mimetype": "text/x-python",
      "name": "python",
      "nbconvert_exporter": "python",
      "pygments_lexer": "ipython3",
      "version": "3.6.5"
    },
    "toc": {
      "base_numbering": 1,
      "nav_menu": {},
      "number_sections": true,
      "sideBar": true,
      "skip_h1_title": false,
      "title_cell": "Table of Contents",
      "title_sidebar": "Contents",
      "toc_cell": false,
      "toc_position": {},
      "toc_section_display": true,
      "toc_window_display": false
    },
    "colab": {
      "name": "NMT - Start the StanfordCoreNLPServer.ipynb",
      "provenance": [],
      "include_colab_link": true
    }
  },
  "cells": [
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "view-in-github",
        "colab_type": "text"
      },
      "source": [
        "<a href=\"https://colab.research.google.com/github/lilanpei/NMT/blob/master/NMT%20-%20Start%20the%20StanfordCoreNLPServer.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "aKMZn_RGRj2P",
        "colab_type": "text"
      },
      "source": [
        "## Start the StanfordCoreNLPServer for tokenizing\n",
        "Stanford CoreNLP API in NLTK :\n",
        "https://github.com/nltk/nltk/wiki/Stanford-CoreNLP-API-in-NLTK"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "k5MtO5kxRj2R",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "with open(r'H:\\HLT\\en-Testing.txt','w',encoding=\"utf-8\") as ef:\n",
        "    with open(r'H:\\HLT\\zh-Testing.txt','w',encoding=\"utf-8\") as cf:\n",
        "        with open(r'H:\\NLP\\UM-Corpus\\data\\Testing\\Testing-Data.txt',encoding=\"utf-8\") as f:\n",
        "            for i, line in enumerate(f):\n",
        "                if i%2==1:\n",
        "                    cf.write(line)\n",
        "                else:\n",
        "                    ef.write(line)"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "mJvu6yssRj2X",
        "colab_type": "code",
        "colab": {},
        "outputId": "3bdc3145-8e2a-44cd-b36e-31cc068888df"
      },
      "source": [
        "import glob\n",
        "import os\n",
        "path = r'H:\\NLP\\UM-Corpus\\data\\Bilingual'\n",
        "with open(r'H:\\HLT\\en.txt','a+',encoding=\"utf-8\") as ef:\n",
        "    with open(r'H:\\HLT\\zh.txt','a+',encoding=\"utf-8\") as cf:\n",
        "        for path, dirs, files in os.walk(path):\n",
        "            for d in dirs:\n",
        "                for filename in glob.iglob(os.path.join(path, d, '*.txt')):\n",
        "                    with open(filename,encoding=\"utf-8\") as f:\n",
        "                        print(filename)\n",
        "                        for i, line in enumerate(f):\n",
        "                            if i%2==1:\n",
        "                                cf.write(line)\n",
        "                            else:\n",
        "                                ef.write(line)"
      ],
      "execution_count": null,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "H:\\NLP\\UM-Corpus\\data\\Bilingual\\Education\\Bi-Education.txt\n",
            "H:\\NLP\\UM-Corpus\\data\\Bilingual\\Laws\\Bi-Laws.txt\n",
            "H:\\NLP\\UM-Corpus\\data\\Bilingual\\Microblog\\Bi-Microblog.txt\n",
            "H:\\NLP\\UM-Corpus\\data\\Bilingual\\News\\Bi-News.txt\n",
            "H:\\NLP\\UM-Corpus\\data\\Bilingual\\Science\\Bi-Science.txt\n",
            "H:\\NLP\\UM-Corpus\\data\\Bilingual\\Spoken\\Bi-Spoken.txt\n",
            "H:\\NLP\\UM-Corpus\\data\\Bilingual\\Subtitles\\Bi-Subtitles.txt\n",
            "H:\\NLP\\UM-Corpus\\data\\Bilingual\\Thesis\\Bi-Thesis.txt\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "CXtLYQXORj2b",
        "colab_type": "code",
        "colab": {},
        "outputId": "a2d10553-91c3-4d18-87a7-3fe27fc9362b"
      },
      "source": [
        "!pip3 install -U nltk"
      ],
      "execution_count": null,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "Collecting nltk\n",
            "  Downloading https://files.pythonhosted.org/packages/92/75/ce35194d8e3022203cca0d2f896dbb88689f9b3fce8e9f9cff942913519d/nltk-3.5.zip (1.4MB)\n",
            "Requirement already satisfied, skipping upgrade: click in h:\\anaconda3\\lib\\site-packages (from nltk) (7.0)\n",
            "Collecting joblib (from nltk)\n",
            "  Downloading https://files.pythonhosted.org/packages/28/5c/cf6a2b65a321c4a209efcdf64c2689efae2cb62661f8f6f4bb28547cf1bf/joblib-0.14.1-py2.py3-none-any.whl (294kB)\n",
            "Collecting regex (from nltk)\n",
            "  Downloading https://files.pythonhosted.org/packages/96/a9/2049c5aea2659f1b2de919fe2bede239f8c0ade232b8aac35b5864dfa04d/regex-2020.5.7-cp36-cp36m-win_amd64.whl (272kB)\n",
            "Requirement already satisfied, skipping upgrade: tqdm in h:\\anaconda3\\lib\\site-packages (from nltk) (4.32.2)\n",
            "Building wheels for collected packages: nltk\n",
            "  Building wheel for nltk (setup.py): started\n",
            "  Building wheel for nltk (setup.py): finished with status 'done'\n",
            "  Stored in directory: C:\\Users\\lilan\\AppData\\Local\\pip\\Cache\\wheels\\ae\\8c\\3f\\b1fe0ba04555b08b57ab52ab7f86023639a526d8bc8d384306\n",
            "Successfully built nltk\n",
            "Installing collected packages: joblib, regex, nltk\n",
            "  Found existing installation: nltk 3.4.4\n",
            "    Uninstalling nltk-3.4.4:\n",
            "      Successfully uninstalled nltk-3.4.4\n",
            "Successfully installed joblib-0.14.1 nltk-3.5 regex-2020.5.7\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "pLo5U0DKRj2f",
        "colab_type": "code",
        "colab": {},
        "outputId": "e6738e16-72f8-4d15-caf2-e01498ea4a83"
      },
      "source": [
        "import wget\n",
        "wget.download(\"http://nlp.stanford.edu/software/stanford-corenlp-full-2018-02-27.zip\", out = r\"H:\\HLT\\stanford-corenlp-full-2018-02-27.zip\")\n",
        "# Get the Chinese model \n",
        "wget.download(\"http://nlp.stanford.edu/software/stanford-chinese-corenlp-2018-02-27-models.jar\", out = r\"H:\\HLT\\stanford-corenlp-full-2018-02-27\")\n",
        "wget.download(\"https://raw.githubusercontent.com/stanfordnlp/CoreNLP/master/src/edu/stanford/nlp/pipeline/StanfordCoreNLP-chinese.properties\", out = r\"H:\\HLT\\stanford-corenlp-full-2018-02-27\")"
      ],
      "execution_count": null,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "100% [...........................................................................] 1979 / 1979"
          ],
          "name": "stdout"
        },
        {
          "output_type": "execute_result",
          "data": {
            "text/plain": [
              "'H:\\\\HLT\\\\stanford-corenlp-full-2018-02-27/StanfordCoreNLP-chinese (1).properties'"
            ]
          },
          "metadata": {
            "tags": []
          },
          "execution_count": 2
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "1gFwpLFCRj2k",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "import zipfile\n",
        "with zipfile.ZipFile(r\"H:\\HLT\\stanford-corenlp-full-2018-02-27.zip\",\"r\") as zip_ref:\n",
        "    zip_ref.extractall(r\"H:\\HLT\")"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "ZRzbrQGzRj2o",
        "colab_type": "code",
        "colab": {},
        "outputId": "e4e3e9a4-8066-4f7e-8518-e75e248984e4"
      },
      "source": [
        "%cd \"H:\\HLT\\stanford-corenlp-full-2018-02-27\"\n",
        "! java -Xmx4g -cp \"*\" edu.stanford.nlp.pipeline.StanfordCoreNLPServer \\\n",
        "-serverProperties StanfordCoreNLP-chinese.properties \\\n",
        "-preload tokenize,ssplit,pos,lemma,ner,parse \\\n",
        "-status_port 9001  -port 9001 -timeout 15000"
      ],
      "execution_count": null,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "H:\\HLT\\stanford-corenlp-full-2018-02-27\n"
          ],
          "name": "stdout"
        },
        {
          "output_type": "stream",
          "text": [
            "[main] INFO CoreNLP - --- StanfordCoreNLPServer#main() called ---\n",
            "[main] INFO CoreNLP - setting default constituency parser\n",
            "[main] INFO CoreNLP - warning: cannot find edu/stanford/nlp/models/srparser/englishSR.ser.gz\n",
            "[main] INFO CoreNLP - using: edu/stanford/nlp/models/lexparser/englishPCFG.ser.gz instead\n",
            "[main] INFO CoreNLP - to use shift reduce parser download English models jar from:\n",
            "[main] INFO CoreNLP - http://stanfordnlp.github.io/CoreNLP/download.html\n",
            "[main] INFO CoreNLP -     Threads: 4\n",
            "[main] INFO edu.stanford.nlp.pipeline.StanfordCoreNLP - Adding annotator tokenize\n",
            "[main] INFO edu.stanford.nlp.ie.AbstractSequenceClassifier - Loading classifier from edu/stanford/nlp/models/segmenter/chinese/ctb.gz ... done [11.8 sec].\n",
            "[main] INFO edu.stanford.nlp.pipeline.StanfordCoreNLP - Adding annotator ssplit\n",
            "[main] INFO edu.stanford.nlp.pipeline.StanfordCoreNLP - Adding annotator pos\n",
            "[main] INFO edu.stanford.nlp.tagger.maxent.MaxentTagger - Loading POS tagger from edu/stanford/nlp/models/pos-tagger/chinese-distsim/chinese-distsim.tagger ... done [5.7 sec].\n",
            "[main] INFO edu.stanford.nlp.pipeline.StanfordCoreNLP - Adding annotator lemma\n",
            "[main] INFO edu.stanford.nlp.pipeline.StanfordCoreNLP - Adding annotator ner\n",
            "[main] INFO edu.stanford.nlp.ie.AbstractSequenceClassifier - Loading classifier from edu/stanford/nlp/models/ner/chinese.misc.distsim.crf.ser.gz ... done [3.8 sec].\n",
            "[main] WARN edu.stanford.nlp.pipeline.TokensRegexNERAnnotator - TokensRegexNERAnnotator ner.fine.regexner: Entry has multiple types for ner: 巴伐利亚 STATE_OR_PROVINCE\tMISC,GPE,LOCATION\t1.  Taking type to be MISC\n",
            "[main] WARN edu.stanford.nlp.pipeline.TokensRegexNERAnnotator - TokensRegexNERAnnotator ner.fine.regexner: Entry has multiple types for ner: 巴伐利亚 州 STATE_OR_PROVINCE\tMISC,GPE,LOCATION\t1.  Taking type to be MISC\n",
            "[main] INFO edu.stanford.nlp.pipeline.TokensRegexNERAnnotator - TokensRegexNERAnnotator ner.fine.regexner: Read 21238 unique entries out of 21249 from edu/stanford/nlp/models/kbp/chinese/cn_regexner_mapping.tab, 0 TokensRegex patterns.\n",
            "[main] INFO edu.stanford.nlp.pipeline.StanfordCoreNLP - Adding annotator parse\n",
            "[main] INFO edu.stanford.nlp.parser.common.ParserGrammar - Loading parser from serialized file edu/stanford/nlp/models/srparser/chineseSR.ser.gz ... done [27.7 sec].\n",
            "[main] INFO CoreNLP - Starting server...\n",
            "java.net.BindException: Address already in use: bind\n",
            "\tat sun.nio.ch.Net.bind0(Native Method)\n",
            "\tat sun.nio.ch.Net.bind(Unknown Source)\n",
            "\tat sun.nio.ch.Net.bind(Unknown Source)\n",
            "\tat sun.nio.ch.ServerSocketChannelImpl.bind(Unknown Source)\n",
            "\tat sun.nio.ch.ServerSocketAdaptor.bind(Unknown Source)\n",
            "\tat sun.net.httpserver.ServerImpl.<init>(Unknown Source)\n",
            "\tat sun.net.httpserver.HttpServerImpl.<init>(Unknown Source)\n",
            "\tat sun.net.httpserver.DefaultHttpServerProvider.createHttpServer(Unknown Source)\n",
            "\tat com.sun.net.httpserver.HttpServer.create(Unknown Source)\n",
            "\tat edu.stanford.nlp.pipeline.StanfordCoreNLPServer.run(StanfordCoreNLPServer.java:1423)\n",
            "\tat edu.stanford.nlp.pipeline.StanfordCoreNLPServer.main(StanfordCoreNLPServer.java:1515)\n",
            "[Thread-0] INFO CoreNLP - CoreNLP Server is shutting down.\n"
          ],
          "name": "stderr"
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "lAilfGYVRj2t",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "import glob\n",
        "import os\n",
        "from nltk.parse import CoreNLPParser\n",
        "parser = CoreNLPParser('http://localhost:9001')\n",
        "path = r'H:\\NLP\\UM-Corpus\\data\\Bilingual'\n",
        "\n",
        "for path, dirs, files in os.walk(path):\n",
        "    for d in dirs:\n",
        "        for filename in glob.iglob(os.path.join(path, d, '*.txt')):\n",
        "            fn =  \"H:\\\\HLT\\\\UM-Corpus\\\\\" + filename.replace(path + \"\\\\\" + d + \"\\\\\",\"\")[:-4] + \"_zh.tok\"\n",
        "            print(fn, filename)\n",
        "            with open(fn,'w',encoding=\"utf-8\") as cf:\n",
        "                with open(filename,'r',encoding=\"utf-8\") as f:\n",
        "                    for i, line in enumerate(f):\n",
        "                        if i%2==1:\n",
        "                            cf.write((' '.join(list(parser.tokenize(line)))) + '\\n')"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "kPL24jueRj2y",
        "colab_type": "code",
        "colab": {},
        "outputId": "1d99d18b-7afe-40c9-f52c-39a5fd01c134"
      },
      "source": [
        "%cd \"H:\\HLT\\stanford-corenlp-full-2018-02-27\"\n",
        "! java -Xmx4g -cp \"*\" edu.stanford.nlp.pipeline.StanfordCoreNLPServer \\\n",
        "-preload tokenize,ssplit,pos,lemma,ner,parse,depparse \\\n",
        "-status_port 9001 -port 9001 -timeout 15000"
      ],
      "execution_count": null,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "H:\\HLT\\stanford-corenlp-full-2018-02-27\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "ImGF5QF4Rj22",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "import glob\n",
        "import os\n",
        "from nltk.parse import CoreNLPParser\n",
        "parser = CoreNLPParser('http://localhost:9001')\n",
        "path = r'H:\\NLP\\UM-Corpus\\data\\Bilingual'\n",
        "\n",
        "for path, dirs, files in os.walk(path):\n",
        "    for d in dirs:\n",
        "        for filename in glob.iglob(os.path.join(path, d, '*.txt')):\n",
        "            fn =  \"H:\\\\HLT\\\\UM-Corpus\\\\\" + filename.replace(path + \"\\\\\" + d + \"\\\\\",\"\")[:-4] + \"_en.tok\"\n",
        "            print(fn, filename)\n",
        "            with open(fn,'w',encoding=\"utf-8\") as cf:\n",
        "                with open(filename,'r',encoding=\"utf-8\") as f:\n",
        "                    for i, line in enumerate(f):\n",
        "                        if i%2==0:\n",
        "                            cf.write((' '.join(list(parser.tokenize(line)))) + '\\n')"
      ],
      "execution_count": null,
      "outputs": []
    }
  ]
}