{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import xml.etree.ElementTree as etree\n",
    "import codecs\n",
    "import csv\n",
    "import time\n",
    "import os\n",
    "import re\n",
    "import gzip"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "PATH_WIKI_XML = 'd:/MMD- Project - Power2TheWiki/data/uk/'\n",
    "PATH_WIKI_OUT = 'd:/MGontar/Storages/Google Drive/20. Data Science/0.UCU Study/Mining Massive Datasets/Project/out/'"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "WIKI_FILENAMES = []\n",
    "for file in os.listdir(PATH_WIKI_XML):\n",
    "    if file.endswith(\".xml\"):\n",
    "        WIKI_FILENAMES.append(file)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "ENCODING = \"utf-8\"\n",
    "\n",
    "\n",
    "# Nicely formatted time string\n",
    "def hms_string(sec_elapsed):\n",
    "    h = int(sec_elapsed / (60 * 60))\n",
    "    m = int((sec_elapsed % (60 * 60)) / 60)\n",
    "    s = sec_elapsed % 60\n",
    "    return \"{}:{:>02}:{:>05.2f}\".format(h, m, s)\n",
    "\n",
    "\n",
    "def strip_tag_name(t):\n",
    "    t = elem.tag\n",
    "    idx = k = t.rfind(\"}\")\n",
    "    if idx != -1:\n",
    "        t = t[idx + 1:]\n",
    "    return t\n",
    "\n",
    "\n",
    "totalCount = 0\n",
    "articleCount = 0\n",
    "redirectCount = 0\n",
    "total_article_text_len = 0"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "100,000\n",
      "200,000\n",
      "300,000\n",
      "400,000\n",
      "500,000\n",
      "600,000\n",
      "700,000\n",
      "800,000\n",
      "900,000\n",
      "1,000,000\n",
      "1,100,000\n",
      "1,200,000\n",
      "File processed: ukwiki-20180620-pages-meta-current.xml\n",
      "Total pages: 1,239,608\n",
      "Article pages: 796,714\n",
      "Redirect pages: 442,894\n",
      "Total article lenght: 3,767,484,723\n",
      "Elapsed time: 0:16:20.81\n",
      "Total pages: 1,239,608\n",
      "Article pages: 796,714\n",
      "Redirect pages: 442,894\n",
      "Total article lenght: 3,767,484,723\n",
      "Elapsed time: 0:16:20.81\n"
     ]
    }
   ],
   "source": [
    "start_time = time.time()\n",
    "regex_links = re.compile(r\"\\[\\[(?P<article>(?!.*?\\:).*?)(?:\\{\\{.*\\}\\})?(?:\\|(?P<text>(?!.*?\\:).*?))?\\]\\]\")\n",
    "for WikiXML in WIKI_FILENAMES:\n",
    "    pathWikiXML = os.path.join(PATH_WIKI_XML, WikiXML)\n",
    "    pathArticles = os.path.join(PATH_WIKI_OUT, WikiXML+\"_art.csv\")\n",
    "    pathArticlesRedirect = os.path.join(PATH_WIKI_OUT, WikiXML+\"_red.csv\")\n",
    "    with codecs.open(pathArticles, \"w\", ENCODING) as articlesFH, \\\n",
    "    codecs.open(pathArticlesRedirect, \"w\", ENCODING) as redirectFH:\n",
    "        articlesWriter = csv.writer(articlesFH, quoting=csv.QUOTE_MINIMAL)\n",
    "        articlesWriter.writerow(['id', 'title', 'text_len', 'link_pos', 'link_val', 'link_txt']) \n",
    "        redirectWriter = csv.writer(redirectFH, quoting=csv.QUOTE_MINIMAL)\n",
    "        redirectWriter.writerow(['id', 'title', 'redirect'])\n",
    "        for event, elem in etree.iterparse(pathWikiXML, events=('start', 'end')):\n",
    "            tname = strip_tag_name(elem.tag)\n",
    "\n",
    "            if event == 'start':\n",
    "                if tname == 'page':\n",
    "                    title = ''\n",
    "                    id = -1\n",
    "                    redirect = ''\n",
    "                    inrevision = False\n",
    "                    ns = 0\n",
    "                    article_text_len = 0\n",
    "                    links = []\n",
    "                elif tname == 'revision':\n",
    "                    # Do not pick up on revision id's\n",
    "                    inrevision = True\n",
    "            else:\n",
    "                if tname == 'title':\n",
    "                    title = elem.text\n",
    "                elif tname == 'id' and not inrevision:\n",
    "                    id = int(elem.text)\n",
    "                elif tname == 'redirect':\n",
    "                    redirect = elem.attrib['title']\n",
    "                elif tname == 'ns':\n",
    "                    ns = int(elem.text)\n",
    "                elif tname == 'page' and ns == 0:\n",
    "                    totalCount += 1\n",
    "                        \n",
    "                    if len(redirect) == 0:\n",
    "                        articleCount += 1\n",
    "                        total_article_text_len += article_text_len\n",
    "                        if len(links) == 0:\n",
    "                            articlesWriter.writerow([id, title, article_text_len, 0, \"\", \"\"])\n",
    "                        for link in links:\n",
    "                            articlesWriter.writerow([id, title, article_text_len, link[0], link[1], link[2]])\n",
    "                    else:\n",
    "                        redirectCount += 1\n",
    "                        redirectWriter.writerow([id, title, redirect])\n",
    "\n",
    "                    if totalCount > 1 and (totalCount % 100000) == 0:\n",
    "                        print(\"{:,}\".format(totalCount))\n",
    "                elif tname == 'text' and elem.text != None:\n",
    "                    article_text_len = len(elem.text)\n",
    "                    for match in regex_links.finditer(elem.text):    \n",
    "                        link_pos = match.start()\n",
    "                        link_title = match.group(\"article\")\n",
    "                        link_title = link_title.replace(\"&nbsp;\", \" \")\n",
    "                        link_title = link_title.replace(\"&ndash;\", \"-\")\n",
    "                        link_title = link_title.replace(\"&mdash;\", \"—\")\n",
    "                        link_title = link_title.replace(\"%20\", \" \")\n",
    "                        \n",
    "                        link_text = match.group(\"text\")\n",
    "                        links.append((link_pos,link_title,link_text))\n",
    "\n",
    "\n",
    "                elem.clear()\n",
    "    \n",
    "    fn_in = pathArticles\n",
    "    with open(fn_in, 'rb') as f_in, gzip.open(fn_in+'.gz', 'wb') as f_out:\n",
    "        f_out.writelines(f_in)\n",
    "    os.remove(fn_in)\n",
    "    \n",
    "    fn_in = pathArticlesRedirect\n",
    "    with open(fn_in, 'rb') as f_in, gzip.open(fn_in+'.gz', 'wb') as f_out:\n",
    "        f_out.writelines(f_in)\n",
    "    os.remove(fn_in)\n",
    "    \n",
    "    elapsed_time = time.time() - start_time\n",
    "    print(\"File processed: {}\".format(WikiXML))\n",
    "    print(\"Total pages: {:,}\".format(totalCount))\n",
    "    print(\"Article pages: {:,}\".format(articleCount))\n",
    "    print(\"Redirect pages: {:,}\".format(redirectCount))\n",
    "    print(\"Total article lenght: {:,}\".format(total_article_text_len))\n",
    "    print(\"Elapsed time: {}\".format(hms_string(elapsed_time)))\n",
    "\n",
    "elapsed_time = time.time() - start_time\n",
    "\n",
    "print(\"Total pages: {:,}\".format(totalCount))\n",
    "print(\"Article pages: {:,}\".format(articleCount))\n",
    "print(\"Redirect pages: {:,}\".format(redirectCount))\n",
    "print(\"Total article lenght: {:,}\".format(total_article_text_len))\n",
    "print(\"Elapsed time: {}\".format(hms_string(elapsed_time)))"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
