{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "from igraph import *\n",
    "import pandas as pd\n",
    "import numpy as np\n",
    "import time\n",
    "import os\n",
    "import re\n",
    "import progressbar"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Load uk_wiki"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 254,
   "metadata": {},
   "outputs": [],
   "source": [
    "#path to data\n",
    "PATH_TO_DATA = '../data/'\n",
    "PATH_TO_DATA_UK = PATH_TO_DATA+\"ukwiki/\"\n",
    "PATH_TO_DATA_EN = PATH_TO_DATA+\"enwiki/\""
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 255,
   "metadata": {},
   "outputs": [],
   "source": [
    "# get list of archives to unpack for uk wiki\n",
    "UKWIKI_ART_FNMS = []\n",
    "for file in os.listdir(PATH_TO_DATA_UK):\n",
    "    if re.match(r\"ukwiki-20180620-pages-meta-current\\d{2}-p\\d+p\\d+.xml_art.csv.gz\", file):\n",
    "        UKWIKI_ART_FNMS.append(file) \n",
    "        \n",
    "UK_ID_NAME = \"ukwiki-20180620-id_name.csv.gz\""
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 256,
   "metadata": {},
   "outputs": [],
   "source": [
    "def unpack(file_name):\n",
    "    file_name_new = file_name.replace(\".gz\",\"\")\n",
    "    with gzip.open(file_name, 'rb') as f_in, open(file_name_new, 'wb') as f_out:\n",
    "        f_out.writelines(f_in)\n",
    "    return file_name_new"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 257,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "1/8\n",
      "../data/ukwiki/ukwiki-20180620-pages-meta-current02-p5503943p11007884.xml_art.csv.gz\n",
      "df_articles size: (4820555, 2). df_articles columns: ['id', 'link_id']\n",
      "\n",
      "2/8\n",
      "../data/ukwiki/ukwiki-20180620-pages-meta-current02-p5501931p11003859.xml_art.csv.gz\n",
      "df_articles size: (4819664, 2). df_articles columns: ['id', 'link_id']\n",
      "\n",
      "3/8\n",
      "../data/ukwiki/ukwiki-20180620-pages-meta-current03-p11003860p16505788.xml_art.csv.gz\n",
      "df_articles size: (4513547, 2). df_articles columns: ['id', 'link_id']\n",
      "\n",
      "4/8\n",
      "../data/ukwiki/ukwiki-20180620-pages-meta-current04-p16505789p22007717.xml_art.csv.gz\n",
      "df_articles size: (4878606, 2). df_articles columns: ['id', 'link_id']\n",
      "\n",
      "5/8\n",
      "../data/ukwiki/ukwiki-20180620-pages-meta-current01-p1p5503942.xml_art.csv.gz\n",
      "df_articles size: (5005870, 2). df_articles columns: ['id', 'link_id']\n",
      "\n",
      "6/8\n",
      "../data/ukwiki/ukwiki-20180620-pages-meta-current01-p1p5501930.xml_art.csv.gz\n",
      "df_articles size: (5005711, 2). df_articles columns: ['id', 'link_id']\n",
      "\n",
      "7/8\n",
      "../data/ukwiki/ukwiki-20180620-pages-meta-current03-p11007885p16511825.xml_art.csv.gz\n",
      "df_articles size: (4509166, 2). df_articles columns: ['id', 'link_id']\n",
      "\n",
      "8/8\n",
      "../data/ukwiki/ukwiki-20180620-pages-meta-current04-p16511826p22015766.xml_art.csv.gz\n",
      "df_articles size: (4873060, 2). df_articles columns: ['id', 'link_id']\n",
      "\n",
      "Total time: 2.3 minutes\n"
     ]
    }
   ],
   "source": [
    "start_time = time.time()\n",
    "# loading all articles to dataframe\n",
    "df_uk = pd.DataFrame()\n",
    "n_files = len(UKWIKI_ART_FNMS)\n",
    "for index in range(n_files):\n",
    "    print(str(index+1) + '/' + str(n_files))\n",
    "    fn = UKWIKI_ART_FNMS[index]\n",
    "    fn = PATH_TO_DATA_UK+fn\n",
    "    print(fn)\n",
    "    fn_new = unpack(fn)\n",
    "    df_articles = pd.read_csv(fn_new, encoding='UTF-8', quotechar=\"\\\"\")  \n",
    "    df_articles = df_articles[df_articles['is_red_link']==False][['id','link_id']]\n",
    "    df_uk = pd.concat((df_uk, df_articles))\n",
    "    print(\"df_articles size: {}. df_articles columns: {}\\n\".format(df_articles.shape, list(df_articles.columns)))\n",
    "    os.remove(fn_new)\n",
    "    \n",
    "print('Total time: %.1f minutes' % ((time.time() - start_time)/60))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 258,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "(38426179, 2)\n",
      "Index(['id', 'link_id'], dtype='object')\n"
     ]
    }
   ],
   "source": [
    "print(df_uk.shape)\n",
    "print(df_uk.columns)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 259,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "(796714, 2)\n",
      "Index(['id', 'title'], dtype='object')\n"
     ]
    }
   ],
   "source": [
    "# article names\n",
    "df_uk_name = pd.read_csv(PATH_TO_DATA_UK+UK_ID_NAME,  encoding='UTF-8', quotechar=\"\\\"\")\n",
    "df_uk_name = df_uk_name[['id','title']]\n",
    "print(df_uk_name.shape)\n",
    "print(df_uk_name.columns)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Load en_wiki"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "metadata": {},
   "outputs": [],
   "source": [
    "# get list of archives to unpack for en wiki\n",
    "ENWIKI_ART_FNMS = []\n",
    "for file in os.listdir(PATH_TO_DATA_EN):\n",
    "    if re.match(r\"enwiki-20180620-pages-meta-current\\d{2}-p\\d+p\\d+.xml_art.csv.gz\", file):\n",
    "        ENWIKI_ART_FNMS.append(file)\n",
    "\n",
    "EN_ID_NAME = \"enwiki-20180620-id_name.csv.gz\""
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "1/55\n",
      "../data/enwiki/enwiki-20180620-pages-meta-current26-p39567203p41067203.xml_art.csv.gz\n",
      "df_articles size: (3684353, 4). df_articles columns: ['id', 'link_id', 'link_val', 'is_red_link']\n",
      "\n",
      "2/55\n",
      "../data/enwiki/enwiki-20180620-pages-meta-current19-p16120543p17620543.xml_art.csv.gz\n",
      "df_articles size: (3284004, 4). df_articles columns: ['id', 'link_id', 'link_val', 'is_red_link']\n",
      "\n",
      "3/55\n",
      "../data/enwiki/enwiki-20180620-pages-meta-current27-p57663462p57726175.xml_art.csv.gz\n",
      "df_articles size: (89311, 4). df_articles columns: ['id', 'link_id', 'link_val', 'is_red_link']\n",
      "\n",
      "4/55\n",
      "../data/enwiki/enwiki-20180620-pages-meta-current14-p7697598p7744799.xml_art.csv.gz\n",
      "df_articles size: (143742, 4). df_articles columns: ['id', 'link_id', 'link_val', 'is_red_link']\n",
      "\n",
      "5/55\n",
      "../data/enwiki/enwiki-20180620-pages-meta-current22-p25427984p26823660.xml_art.csv.gz\n",
      "df_articles size: (3288047, 4). df_articles columns: ['id', 'link_id', 'link_val', 'is_red_link']\n",
      "\n",
      "6/55\n",
      "../data/enwiki/enwiki-20180620-pages-meta-current27-p53163462p54663462.xml_art.csv.gz\n",
      "df_articles size: (3018421, 4). df_articles columns: ['id', 'link_id', 'link_val', 'is_red_link']\n",
      "\n",
      "7/55\n",
      "../data/enwiki/enwiki-20180620-pages-meta-current09-p1791080p2336422.xml_art.csv.gz\n",
      "df_articles size: (4821027, 4). df_articles columns: ['id', 'link_id', 'link_val', 'is_red_link']\n",
      "\n",
      "8/55\n",
      "../data/enwiki/enwiki-20180620-pages-meta-current27-p42663462p44163462.xml_art.csv.gz\n",
      "df_articles size: (3631110, 4). df_articles columns: ['id', 'link_id', 'link_val', 'is_red_link']\n",
      "\n",
      "9/55\n",
      "../data/enwiki/enwiki-20180620-pages-meta-current23-p29823661p30503449.xml_art.csv.gz\n",
      "df_articles size: (1210090, 4). df_articles columns: ['id', 'link_id', 'link_val', 'is_red_link']\n",
      "\n",
      "10/55\n",
      "../data/enwiki/enwiki-20180620-pages-meta-current16-p9518050p11018050.xml_art.csv.gz\n",
      "df_articles size: (3933072, 4). df_articles columns: ['id', 'link_id', 'link_val', 'is_red_link']\n",
      "\n",
      "11/55\n",
      "../data/enwiki/enwiki-20180620-pages-meta-current27-p54663462p56163462.xml_art.csv.gz\n",
      "df_articles size: (2565412, 4). df_articles columns: ['id', 'link_id', 'link_val', 'is_red_link']\n",
      "\n",
      "12/55\n",
      "../data/enwiki/enwiki-20180620-pages-meta-current07-p892914p1268691.xml_art.csv.gz\n",
      "df_articles size: (4770178, 4). df_articles columns: ['id', 'link_id', 'link_val', 'is_red_link']\n",
      "\n",
      "13/55\n",
      "../data/enwiki/enwiki-20180620-pages-meta-current25-p36952816p38067202.xml_art.csv.gz\n",
      "df_articles size: (2491078, 4). df_articles columns: ['id', 'link_id', 'link_val', 'is_red_link']\n",
      "\n",
      "14/55\n",
      "../data/enwiki/enwiki-20180620-pages-meta-current24-p30503451p32003451.xml_art.csv.gz\n",
      "df_articles size: (3875804, 4). df_articles columns: ['id', 'link_id', 'link_val', 'is_red_link']\n",
      "\n",
      "15/55\n",
      "../data/enwiki/enwiki-20180620-pages-meta-current20-p18754736p20254736.xml_art.csv.gz\n",
      "df_articles size: (3709904, 4). df_articles columns: ['id', 'link_id', 'link_val', 'is_red_link']\n",
      "\n",
      "16/55\n",
      "../data/enwiki/enwiki-20180620-pages-meta-current06-p565314p892912.xml_art.csv.gz\n",
      "df_articles size: (4666353, 4). df_articles columns: ['id', 'link_id', 'link_val', 'is_red_link']\n",
      "\n",
      "17/55\n",
      "../data/enwiki/enwiki-20180620-pages-meta-current08-p1268692p1791079.xml_art.csv.gz\n",
      "df_articles size: (4955988, 4). df_articles columns: ['id', 'link_id', 'link_val', 'is_red_link']\n",
      "\n",
      "18/55\n",
      "../data/enwiki/enwiki-20180620-pages-meta-current15-p9244803p9518048.xml_art.csv.gz\n",
      "df_articles size: (774772, 4). df_articles columns: ['id', 'link_id', 'link_val', 'is_red_link']\n",
      "\n",
      "19/55\n",
      "../data/enwiki/enwiki-20180620-pages-meta-current16-p11018050p11539266.xml_art.csv.gz\n",
      "df_articles size: (1546529, 4). df_articles columns: ['id', 'link_id', 'link_val', 'is_red_link']\n",
      "\n",
      "20/55\n",
      "../data/enwiki/enwiki-20180620-pages-meta-current18-p13693074p15193074.xml_art.csv.gz\n",
      "df_articles size: (4042316, 4). df_articles columns: ['id', 'link_id', 'link_val', 'is_red_link']\n",
      "\n",
      "21/55\n",
      "../data/enwiki/enwiki-20180620-pages-meta-current27-p48663462p50163462.xml_art.csv.gz\n",
      "df_articles size: (2778591, 4). df_articles columns: ['id', 'link_id', 'link_val', 'is_red_link']\n",
      "\n",
      "22/55\n",
      "../data/enwiki/enwiki-20180620-pages-meta-current21-p21222158p22722158.xml_art.csv.gz\n",
      "df_articles size: (3905236, 4). df_articles columns: ['id', 'link_id', 'link_val', 'is_red_link']\n",
      "\n",
      "23/55\n",
      "../data/enwiki/enwiki-20180620-pages-meta-current27-p56163462p57663462.xml_art.csv.gz\n",
      "df_articles size: (2586066, 4). df_articles columns: ['id', 'link_id', 'link_val', 'is_red_link']\n",
      "\n",
      "24/55\n",
      "../data/enwiki/enwiki-20180620-pages-meta-current25-p33952816p35452816.xml_art.csv.gz\n",
      "df_articles size: (2963738, 4). df_articles columns: ['id', 'link_id', 'link_val', 'is_red_link']\n",
      "\n",
      "25/55\n",
      "../data/enwiki/enwiki-20180620-pages-meta-current26-p38067203p39567203.xml_art.csv.gz\n",
      "df_articles size: (3473966, 4). df_articles columns: ['id', 'link_id', 'link_val', 'is_red_link']\n",
      "\n",
      "26/55\n",
      "../data/enwiki/enwiki-20180620-pages-meta-current02-p30304p88444.xml_art.csv.gz\n",
      "df_articles size: (2992798, 4). df_articles columns: ['id', 'link_id', 'link_val', 'is_red_link']\n",
      "\n",
      "27/55\n",
      "../data/enwiki/enwiki-20180620-pages-meta-current14-p6197598p7697598.xml_art.csv.gz\n",
      "df_articles size: (5016147, 4). df_articles columns: ['id', 'link_id', 'link_val', 'is_red_link']\n",
      "\n",
      "28/55\n",
      "../data/enwiki/enwiki-20180620-pages-meta-current17-p13039268p13693071.xml_art.csv.gz\n",
      "df_articles size: (1665519, 4). df_articles columns: ['id', 'link_id', 'link_val', 'is_red_link']\n",
      "\n",
      "29/55\n",
      "../data/enwiki/enwiki-20180620-pages-meta-current24-p33503451p33952815.xml_art.csv.gz\n",
      "df_articles size: (1012409, 4). df_articles columns: ['id', 'link_id', 'link_val', 'is_red_link']\n",
      "\n",
      "30/55\n",
      "../data/enwiki/enwiki-20180620-pages-meta-current05-p352690p565313.xml_art.csv.gz\n",
      "df_articles size: (4287103, 4). df_articles columns: ['id', 'link_id', 'link_val', 'is_red_link']\n",
      "\n",
      "31/55\n",
      "../data/enwiki/enwiki-20180620-pages-meta-current17-p11539268p13039268.xml_art.csv.gz\n",
      "df_articles size: (4909430, 4). df_articles columns: ['id', 'link_id', 'link_val', 'is_red_link']\n",
      "\n",
      "32/55\n",
      "../data/enwiki/enwiki-20180620-pages-meta-current12-p3926863p5040436.xml_art.csv.gz\n",
      "df_articles size: (5359049, 4). df_articles columns: ['id', 'link_id', 'link_val', 'is_red_link']\n",
      "\n",
      "33/55\n",
      "../data/enwiki/enwiki-20180620-pages-meta-current21-p22722158p23927983.xml_art.csv.gz\n",
      "df_articles size: (3164786, 4). df_articles columns: ['id', 'link_id', 'link_val', 'is_red_link']\n",
      "\n",
      "34/55\n",
      "../data/enwiki/enwiki-20180620-pages-meta-current24-p32003451p33503451.xml_art.csv.gz\n",
      "df_articles size: (3474499, 4). df_articles columns: ['id', 'link_id', 'link_val', 'is_red_link']\n",
      "\n",
      "35/55\n",
      "../data/enwiki/enwiki-20180620-pages-meta-current18-p15193074p16120542.xml_art.csv.gz\n",
      "df_articles size: (2235800, 4). df_articles columns: ['id', 'link_id', 'link_val', 'is_red_link']\n",
      "\n",
      "36/55\n",
      "../data/enwiki/enwiki-20180620-pages-meta-current23-p26823661p28323661.xml_art.csv.gz\n",
      "df_articles size: (3746572, 4). df_articles columns: ['id', 'link_id', 'link_val', 'is_red_link']\n",
      "\n",
      "37/55\n",
      "../data/enwiki/enwiki-20180620-pages-meta-current03-p88445p200507.xml_art.csv.gz\n",
      "df_articles size: (4650971, 4). df_articles columns: ['id', 'link_id', 'link_val', 'is_red_link']\n",
      "\n",
      "38/55\n",
      "../data/enwiki/enwiki-20180620-pages-meta-current23-p28323661p29823661.xml_art.csv.gz\n",
      "df_articles size: (3011964, 4). df_articles columns: ['id', 'link_id', 'link_val', 'is_red_link']\n",
      "\n",
      "39/55\n",
      "../data/enwiki/enwiki-20180620-pages-meta-current25-p35452816p36952816.xml_art.csv.gz\n",
      "df_articles size: (3710855, 4). df_articles columns: ['id', 'link_id', 'link_val', 'is_red_link']\n",
      "\n",
      "40/55\n",
      "../data/enwiki/enwiki-20180620-pages-meta-current01-p10p30303.xml_art.csv.gz\n",
      "df_articles size: (2184354, 4). df_articles columns: ['id', 'link_id', 'link_val', 'is_red_link']\n",
      "\n",
      "41/55\n",
      "../data/enwiki/enwiki-20180620-pages-meta-current04-p200511p352689.xml_art.csv.gz\n",
      "df_articles size: (3801927, 4). df_articles columns: ['id', 'link_id', 'link_val', 'is_red_link']\n",
      "\n",
      "42/55\n",
      "../data/enwiki/enwiki-20180620-pages-meta-current27-p44163462p45663462.xml_art.csv.gz\n",
      "df_articles size: (3142144, 4). df_articles columns: ['id', 'link_id', 'link_val', 'is_red_link']\n",
      "\n",
      "43/55\n",
      "../data/enwiki/enwiki-20180620-pages-meta-current27-p51663462p53163462.xml_art.csv.gz\n",
      "df_articles size: (2652971, 4). df_articles columns: ['id', 'link_id', 'link_val', 'is_red_link']\n",
      "\n",
      "44/55\n",
      "../data/enwiki/enwiki-20180620-pages-meta-current11-p3046514p3926861.xml_art.csv.gz\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "df_articles size: (5168413, 4). df_articles columns: ['id', 'link_id', 'link_val', 'is_red_link']\n",
      "\n",
      "45/55\n",
      "../data/enwiki/enwiki-20180620-pages-meta-current20-p20254736p21222156.xml_art.csv.gz\n",
      "df_articles size: (2270783, 4). df_articles columns: ['id', 'link_id', 'link_val', 'is_red_link']\n",
      "\n",
      "46/55\n",
      "../data/enwiki/enwiki-20180620-pages-meta-current13-p5040438p6197594.xml_art.csv.gz\n",
      "df_articles size: (5139668, 4). df_articles columns: ['id', 'link_id', 'link_val', 'is_red_link']\n",
      "\n",
      "47/55\n",
      "../data/enwiki/enwiki-20180620-pages-meta-current15-p7744803p9244803.xml_art.csv.gz\n",
      "df_articles size: (4598605, 4). df_articles columns: ['id', 'link_id', 'link_val', 'is_red_link']\n",
      "\n",
      "48/55\n",
      "../data/enwiki/enwiki-20180620-pages-meta-current26-p42567203p42663461.xml_art.csv.gz\n",
      "df_articles size: (212304, 4). df_articles columns: ['id', 'link_id', 'link_val', 'is_red_link']\n",
      "\n",
      "49/55\n",
      "../data/enwiki/enwiki-20180620-pages-meta-current22-p23927984p25427984.xml_art.csv.gz\n",
      "df_articles size: (3875227, 4). df_articles columns: ['id', 'link_id', 'link_val', 'is_red_link']\n",
      "\n",
      "50/55\n",
      "../data/enwiki/enwiki-20180620-pages-meta-current27-p47163462p48663462.xml_art.csv.gz\n",
      "df_articles size: (2915763, 4). df_articles columns: ['id', 'link_id', 'link_val', 'is_red_link']\n",
      "\n",
      "51/55\n",
      "../data/enwiki/enwiki-20180620-pages-meta-current10-p2336425p3046511.xml_art.csv.gz\n",
      "df_articles size: (4845736, 4). df_articles columns: ['id', 'link_id', 'link_val', 'is_red_link']\n",
      "\n",
      "52/55\n",
      "../data/enwiki/enwiki-20180620-pages-meta-current27-p45663462p47163462.xml_art.csv.gz\n",
      "df_articles size: (2404507, 4). df_articles columns: ['id', 'link_id', 'link_val', 'is_red_link']\n",
      "\n",
      "53/55\n",
      "../data/enwiki/enwiki-20180620-pages-meta-current27-p50163462p51663462.xml_art.csv.gz\n",
      "df_articles size: (3167466, 4). df_articles columns: ['id', 'link_id', 'link_val', 'is_red_link']\n",
      "\n",
      "54/55\n",
      "../data/enwiki/enwiki-20180620-pages-meta-current26-p41067203p42567203.xml_art.csv.gz\n",
      "df_articles size: (3465756, 4). df_articles columns: ['id', 'link_id', 'link_val', 'is_red_link']\n",
      "\n",
      "55/55\n",
      "../data/enwiki/enwiki-20180620-pages-meta-current19-p17620543p18754735.xml_art.csv.gz\n",
      "df_articles size: (2784914, 4). df_articles columns: ['id', 'link_id', 'link_val', 'is_red_link']\n",
      "\n",
      "Total time: 12.7 minutes\n"
     ]
    }
   ],
   "source": [
    "start_time = time.time()\n",
    "\n",
    "# loading existing articles and red links to dataframes\n",
    "df_en_blue = pd.DataFrame()\n",
    "df_en_red = pd.DataFrame()\n",
    "n_files = len(ENWIKI_ART_FNMS)\n",
    "for index in range(n_files):\n",
    "    print(str(index+1) + '/' + str(n_files))\n",
    "    fn = ENWIKI_ART_FNMS[index]\n",
    "    fn = PATH_TO_DATA_EN+fn\n",
    "    print(fn)\n",
    "    fn_new = unpack(fn)\n",
    "    df_articles = pd.read_csv(fn_new, encoding='ISO-8859-1',quotechar=\"'\", usecols=[0,1,4,6])\n",
    "    \n",
    "    df_articles_blue = df_articles[df_articles['is_red_link']==False]\n",
    "    df_en_blue = pd.concat((df_en_blue, df_articles_blue[['id','link_id']]))\n",
    "    df_articles_red = df_articles[df_articles['is_red_link']]\n",
    "    df_en_red = pd.concat((df_en_red, df_articles_red[['id','link_val']]))\n",
    "    \n",
    "    print(\"df_articles size: {}. df_articles columns: {}\\n\".format(df_articles.shape, list(df_articles.columns)))\n",
    "    os.remove(fn_new)\n",
    "\n",
    "print('Total time: %.1f minutes' % ((time.time() - start_time)/60))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "(168384829, 2)\n",
      "Index(['id', 'link_id'], dtype='object')\n",
      "(9692719, 2)\n",
      "Index(['id', 'link_val'], dtype='object')\n"
     ]
    }
   ],
   "source": [
    "print(df_en_blue.shape)\n",
    "print(df_en_blue.columns)\n",
    "\n",
    "print(df_en_red.shape)\n",
    "print(df_en_red.columns)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "(5669865, 2)\n",
      "Index(['id', 'title'], dtype='object')\n"
     ]
    }
   ],
   "source": [
    "# article names\n",
    "df_en_name = pd.read_csv(PATH_TO_DATA_EN+EN_ID_NAME, encoding='ISO-8859-1', \n",
    "                         quotechar=\"'\", escapechar =\"\\\\\", usecols = [0,1])\n",
    "\n",
    "print(df_en_name.shape)\n",
    "print(df_en_name.columns)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "(11480803, 3)\n",
      "Index(['id', 'title', 'is_red_link'], dtype='object')\n"
     ]
    }
   ],
   "source": [
    "# add ids for read links\n",
    "\n",
    "# find maximum id among blue links\n",
    "max_id = np.max(df_en_name['id'])\n",
    "\n",
    "# find unique titles among red links, create df with\n",
    "red_links = df_en_red['link_val'].unique()\n",
    "red_links_ids = pd.DataFrame({'title': red_links, \n",
    "                              'id': np.arange(max_id+1, len(red_links)+max_id+1)})\n",
    "red_links_ids['is_red_link'] = True\n",
    "\n",
    "# create dataframe with titles and red link tickets / add it to article titles df\n",
    "df_en_name['is_red_link'] = False\n",
    "df_en_name = pd.concat((df_en_name,red_links_ids))\n",
    "df_en_name = df_en_name.reset_index(drop=True)\n",
    "\n",
    "print(df_en_name.shape)\n",
    "print(df_en_name.columns)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "metadata": {},
   "outputs": [],
   "source": [
    "# add link ids to df_en_red\n",
    "df_en_red = df_en_red.merge(right = red_links_ids, left_on = 'link_val', right_on = 'title', how = 'left')\n",
    "df_en_red = df_en_red[['id_x','id_y']]\n",
    "df_en_red.columns = ['id', 'link_id']\n",
    "\n",
    "# concatenate red and blue article to one df\n",
    "df_en = pd.concat((df_en_red, df_en_blue))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 27,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "(178077548, 2)\n",
      "178077548\n"
     ]
    }
   ],
   "source": [
    "print(df_en.shape)\n",
    "print(df_en_blue.shape[0] + df_en_red.shape[0])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 28,
   "metadata": {},
   "outputs": [],
   "source": [
    "del red_links, red_links_ids, df_en_red, df_en_blue"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Load uk-en correpondences"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "fn = PATH_TO_DATA+'link/20180620-langlinks_uk_en.csv.gz'\n",
    "df_link = pd.read_csv(fn,  encoding='UTF-8', quotechar='\\'')\n",
    "print(df_link.shape)\n",
    "print(df_link.columns)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# remove values links to negative values\n",
    "df_link = df_link[(df_link['id_en']>0) & (df_link['id_en']!=49244)]\n",
    "print(df_link.shape)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# add en correspondences to uk article names\n",
    "df_uk_name = df_uk_name.merge(right = df_link, left_on = 'id', right_on = 'id_uk', how = 'left')\n",
    "df_uk_name = df_uk_name[['id','title', 'id_en']]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 164,
   "metadata": {},
   "outputs": [],
   "source": [
    "# several en articles have multiple corresponding uk acticles, just remove oldest one uk article\n",
    "df_uk_translated = (df_uk_name[~df_uk_name['id_en'].isnull()])[['id', 'id_en']]\n",
    "df_uk_translated.columns = ['id_uk','id_en']\n",
    "df_uk_translated = df_uk_translated.sort_values(by = 'id_uk')\n",
    "df_uk_translated = df_uk_translated.drop_duplicates(keep = 'first', subset = ['id_en'])\n",
    "\n",
    "# add uk correspondences to en article names\n",
    "df_en_name = df_en_name.merge(right = df_uk_translated, left_on = 'id', right_on = 'id_en', how = 'left')\n",
    "df_en_name = df_en_name[['id','title','is_red_link','id_uk']]\n",
    "df_en_name = df_en_name.sort_values(by = 'id')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 176,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>id</th>\n",
       "      <th>title</th>\n",
       "      <th>id_en</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>3</td>\n",
       "      <td>Головна сторінка</td>\n",
       "      <td>NaN</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>13</td>\n",
       "      <td>Географія</td>\n",
       "      <td>18963910.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>584</td>\n",
       "      <td>Атом</td>\n",
       "      <td>902.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>3</th>\n",
       "      <td>585</td>\n",
       "      <td>Мільярд</td>\n",
       "      <td>1136363.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>4</th>\n",
       "      <td>586</td>\n",
       "      <td>Ядро</td>\n",
       "      <td>NaN</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "    id             title       id_en\n",
       "0    3  Головна сторінка         NaN\n",
       "1   13         Географія  18963910.0\n",
       "2  584              Атом       902.0\n",
       "3  585           Мільярд   1136363.0\n",
       "4  586              Ядро         NaN"
      ]
     },
     "execution_count": 176,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "df_uk_name.head()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 181,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>id</th>\n",
       "      <th>title</th>\n",
       "      <th>is_red_link</th>\n",
       "      <th>id_uk</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>12</td>\n",
       "      <td>Anarchism</td>\n",
       "      <td>False</td>\n",
       "      <td>12101.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>25</td>\n",
       "      <td>Autism</td>\n",
       "      <td>False</td>\n",
       "      <td>37656.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>39</td>\n",
       "      <td>Albedo</td>\n",
       "      <td>False</td>\n",
       "      <td>10899.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>3</th>\n",
       "      <td>290</td>\n",
       "      <td>A</td>\n",
       "      <td>False</td>\n",
       "      <td>235422.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>4</th>\n",
       "      <td>303</td>\n",
       "      <td>Alabama</td>\n",
       "      <td>False</td>\n",
       "      <td>6320.0</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "    id      title  is_red_link     id_uk\n",
       "0   12  Anarchism        False   12101.0\n",
       "1   25     Autism        False   37656.0\n",
       "2   39     Albedo        False   10899.0\n",
       "3  290          A        False  235422.0\n",
       "4  303    Alabama        False    6320.0"
      ]
     },
     "execution_count": 181,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "df_en_name.head()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 182,
   "metadata": {},
   "outputs": [],
   "source": [
    "# save names to files\n",
    "df_uk_name.to_csv('uk_names.csv.gz', compression='gzip', header=True, index=False)\n",
    "df_en_name.to_csv('en_names.csv.gz', compression='gzip', header=True, index=False)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 261,
   "metadata": {},
   "outputs": [],
   "source": [
    "# load names \n",
    "df_uk_name = pd.read_csv('uk_names.csv.gz')\n",
    "df_en_name = pd.read_csv('en_names.csv.gz')"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Encode uk nontranslated article by its incoming links"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 262,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Nontranslated uk acticles: 355796\n",
      "Nontranslated uk acticles with more at least 10 incoming links:  82122\n"
     ]
    }
   ],
   "source": [
    "# select non-translated articles\n",
    "uk_nontranslated = np.array(df_uk_name[df_uk_name['id_en'].isnull()]['id'])\n",
    "print('Nontranslated uk acticles: %6d' % (len(uk_nontranslated)))\n",
    "\n",
    "# select non-translated acticles that have at least 5 distinct incoming links\n",
    "uk_nontranslated = df_uk[df_uk['link_id'].isin(uk_nontranslated)].groupby('link_id') \\\n",
    "            .agg({'id': lambda x: x.nunique()})\n",
    "uk_nontranslated = uk_nontranslated.reset_index()\n",
    "uk_nontranslated.columns = ['id','n_incoming']\n",
    "uk_nontranslated = uk_nontranslated[uk_nontranslated['n_incoming']>=5]\n",
    "uk_nontranslated = np.array(uk_nontranslated['id'])\n",
    "uk_nontranslated_ids = np.sort(uk_nontranslated)\n",
    "print('Nontranslated uk acticles with more at least 5 incoming links: %6d' % (len(uk_nontranslated)))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 263,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100% (82122 of 82122) |##################| Elapsed Time: 0:00:15 Time:  0:00:15\n"
     ]
    }
   ],
   "source": [
    "# encode each nontranslated article by its incoming links\n",
    "uk_nontranslated_encoding_df = df_uk[df_uk['link_id'].isin(uk_nontranslated)]\n",
    "uk_nontranslated_encoding_df = uk_nontranslated_encoding_df.sort_values(by = ['link_id','id'])\n",
    "uk_nontranslated_encoding_df = uk_nontranslated_encoding_df.reset_index(drop = True)\n",
    "indices = np.array(uk_nontranslated_encoding_df.drop_duplicates(keep='first', subset=['link_id']).index)\n",
    "\n",
    "uk_nontranslated_encoding = []\n",
    "pbar = progressbar.ProgressBar()\n",
    "for i in pbar(range(0, len(indices))):\n",
    "    if i == len(indices) - 1:\n",
    "        this_encoding = set(uk_nontranslated_encoding_df.iloc[indices[i]:]['id'])\n",
    "    else: \n",
    "        this_encoding = set(uk_nontranslated_encoding_df.iloc[indices[i]:indices[i+1]]['id'])\n",
    "    uk_nontranslated_encoding.append(this_encoding)\n",
    "    \n",
    "del uk_nontranslated_encoding_df, uk_nontranslated"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 264,
   "metadata": {},
   "outputs": [],
   "source": [
    "# save encoding to file\n",
    "np.save(file='uk_nontranslated_encoding.npy', arr=uk_nontranslated_encoding)\n",
    "np.save(file='uk_nontranslated_ids.npy', arr=uk_nontranslated_ids)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "# load encoding from file\n",
    "uk_nontranslated_encoding = np.load('uk_nontranslated_encoding.npy')\n",
    "uk_nontranslated_ids = np.load('uk_nontranslated_ids.npy')"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Encode red links incoming uk links"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 36,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Number of red links in en wiki: 5810938\n",
      "Number of red links with at least 5 distinct incoming uk links: 3593\n"
     ]
    }
   ],
   "source": [
    "# select only red links from en_wiki\n",
    "df_en_red = df_en[df_en['link_id']>max_id]\n",
    "en_red_ids = df_en_red['link_id'].unique()\n",
    "print('Number of red links in en wiki: %d' %(len(en_red_ids)))\n",
    "\n",
    "# for every red link find its correponding uk incoming link (only distinct)\n",
    "df_en_red_encoding = df_en_red.merge(right=df_en_name, left_on='id', \n",
    "                                         right_on='id', how='left')[['id_uk','link_id']]\n",
    "df_en_red_encoding = df_en_red_encoding[~df_en_red_encoding['id_uk'].isnull()]\n",
    "df_en_red_encoding = df_en_red_encoding.drop_duplicates()\n",
    "\n",
    "# for every red link calculate number of incoming links\n",
    "df_en_red_incoming = df_en_red_encoding['link_id'].value_counts().reset_index()\n",
    "df_en_red_incoming.columns = ['id','n_incoming']\n",
    "\n",
    "# select only those red links that can be encoded with at least 5 uk incoming links\n",
    "en_red_ids = np.array(df_en_red_incoming[df_en_red_incoming['n_incoming']>=5]['id'])\n",
    "print('Number of red links with at least 5 distinct incoming uk links: %d' % (len(en_red_ids)))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 37,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100% (3593 of 3593) |####################| Elapsed Time: 0:00:06 Time:  0:00:06\n"
     ]
    }
   ],
   "source": [
    "# encode top red links using incoming uk links\n",
    "en_red_ids = np.sort(en_red_ids)\n",
    "df_en_red_encoding = df_en_red_encoding[df_en_red_encoding['link_id'].isin(en_red_ids)]\n",
    "df_en_red_encoding = df_en_red_encoding.sort_values(by = 'link_id').reset_index(drop = True)\n",
    "indices = np.array(df_en_red_encoding.drop_duplicates(keep='first', subset=['link_id']).index)\n",
    "\n",
    "en_red_encoding = []\n",
    "pbar = progressbar.ProgressBar()\n",
    "for i in pbar(range(0, len(indices))):\n",
    "    if i == len(indices)-1:\n",
    "        this_encoding = set(df_en_red_encoding.iloc[indices[i]:]['id_uk'])\n",
    "    else:\n",
    "        this_encoding = set(df_en_red_encoding.iloc[indices[i]:indices[i+1]]['id_uk'])\n",
    "    en_red_encoding.append(this_encoding)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 245,
   "metadata": {},
   "outputs": [],
   "source": [
    "del df_en_red_incoming, df_en_red_encoding"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 238,
   "metadata": {},
   "outputs": [],
   "source": [
    "# save encoding to file\n",
    "np.save(file='en_red_ids.npy', arr=en_red_ids)\n",
    "np.save(file='en_red_encoding.npy', arr=en_red_encoding)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "# load encoding from file\n",
    "en_red_ids = np.load('en_red_ids.npy')\n",
    "en_red_encoding = np.load('en_red_encoding.npy')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 49,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100% (3593 of 3593) |####################| Elapsed Time: 0:16:12 Time:  0:16:12\n"
     ]
    }
   ],
   "source": [
    "# create df with incoming en ids and names for every red link\n",
    "incoming_en_link_names = []\n",
    "n_incoming_en = []\n",
    "incoming_uk_link_names = []\n",
    "n_incoming_uk = []\n",
    "\n",
    "pbar = progressbar.ProgressBar()\n",
    "for i in pbar(range(len(en_red_ids))):\n",
    "    red_id = en_red_ids[i]\n",
    "    incoming_ids = list(df_en_red[df_en_red['link_id'] == red_id]['id'])\n",
    "    incoming_names = set(df_en_name[df_en_name['id'].isin(incoming_ids)]['title'])\n",
    "    incoming_uk_names = set(df_uk_name[df_uk_name['id'].isin(en_red_encoding[i])]['title'])\n",
    "              \n",
    "    incoming_en_link_names.append(incoming_names)\n",
    "    n_incoming_en.append(len(incoming_names))\n",
    "    incoming_uk_link_names.append(incoming_uk_names) \n",
    "    n_incoming_uk.append(len(incoming_uk_names))\n",
    "    \n",
    "df_red_links = pd.DataFrame({'red_link_id': en_red_ids, \n",
    "                          'red_link_name': list(df_en_name[df_en_name['id'].isin(en_red_ids)]['title']),\n",
    "                          'incoming_en_names': incoming_en_link_names,\n",
    "                          'n_incoming_en': n_incoming_en,\n",
    "                          'incoming_uk_names': incoming_uk_link_names,\n",
    "                          'n_incoming_uk': n_incoming_uk\n",
    "                          })\n",
    "              \n",
    "df_red_links = df_red_links[['red_link_id','red_link_name','incoming_en_names',\n",
    "                             'n_incoming_en', 'incoming_uk_names', 'n_incoming_uk']]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 53,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "(3593, 6)\n"
     ]
    },
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>red_link_id</th>\n",
       "      <th>red_link_name</th>\n",
       "      <th>incoming_en_names</th>\n",
       "      <th>n_incoming_en</th>\n",
       "      <th>incoming_uk_names</th>\n",
       "      <th>n_incoming_uk</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>57726536</td>\n",
       "      <td>Cleonini</td>\n",
       "      <td>{Pachycerus, Rhabdorrhynchus, Liocleonus clath...</td>\n",
       "      <td>7</td>\n",
       "      <td>{Pachycerus, Rhabdorrhynchus, Cyphocleonus, Li...</td>\n",
       "      <td>5</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>57726733</td>\n",
       "      <td>West Coast Mafia Records</td>\n",
       "      <td>{The Final Chapter (C-Bo album), Gas Chamber (...</td>\n",
       "      <td>24</td>\n",
       "      <td>{Life as a Rider, Cashville Records, West Coas...</td>\n",
       "      <td>18</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>57726734</td>\n",
       "      <td>Killa Tay</td>\n",
       "      <td>{JT the Bigga Figga, C-Bo, E.D.I. Mean, A Mill...</td>\n",
       "      <td>35</td>\n",
       "      <td>{Life as a Rider, Дискографія Yukmouth, Blow (...</td>\n",
       "      <td>16</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>3</th>\n",
       "      <td>57726809</td>\n",
       "      <td>Denver Film Critics Society</td>\n",
       "      <td>{The Croods, The Social Network (soundtrack), ...</td>\n",
       "      <td>24</td>\n",
       "      <td>{Гіліян Флінн, Сімейка Крудсів, Б'ютифул, Сієн...</td>\n",
       "      <td>11</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>4</th>\n",
       "      <td>57727161</td>\n",
       "      <td>Wim Smet</td>\n",
       "      <td>{2011â12 Oud-Heverlee Leuven season, 2012â...</td>\n",
       "      <td>13</td>\n",
       "      <td>{Кубок Бельгії з футболу 2016—2017, Кубок Бель...</td>\n",
       "      <td>6</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "   red_link_id                red_link_name  \\\n",
       "0     57726536                     Cleonini   \n",
       "1     57726733     West Coast Mafia Records   \n",
       "2     57726734                    Killa Tay   \n",
       "3     57726809  Denver Film Critics Society   \n",
       "4     57727161                     Wim Smet   \n",
       "\n",
       "                                   incoming_en_names  n_incoming_en  \\\n",
       "0  {Pachycerus, Rhabdorrhynchus, Liocleonus clath...              7   \n",
       "1  {The Final Chapter (C-Bo album), Gas Chamber (...             24   \n",
       "2  {JT the Bigga Figga, C-Bo, E.D.I. Mean, A Mill...             35   \n",
       "3  {The Croods, The Social Network (soundtrack), ...             24   \n",
       "4  {2011â12 Oud-Heverlee Leuven season, 2012â...             13   \n",
       "\n",
       "                                   incoming_uk_names  n_incoming_uk  \n",
       "0  {Pachycerus, Rhabdorrhynchus, Cyphocleonus, Li...              5  \n",
       "1  {Life as a Rider, Cashville Records, West Coas...             18  \n",
       "2  {Life as a Rider, Дискографія Yukmouth, Blow (...             16  \n",
       "3  {Гіліян Флінн, Сімейка Крудсів, Б'ютифул, Сієн...             11  \n",
       "4  {Кубок Бельгії з футболу 2016—2017, Кубок Бель...              6  "
      ]
     },
     "execution_count": 53,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "print(df_red_links.shape)\n",
    "df_red_links.head()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 50,
   "metadata": {},
   "outputs": [],
   "source": [
    "# save red links incoming links to files\n",
    "df_red_links.to_csv('red_links_summary.csv.gz', compression='gzip', header=True, index=False)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "# load \n",
    "df_red_links = pd.read_csv('red_links_summary.csv.gz')"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Find most similar uk articles for red links"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 265,
   "metadata": {},
   "outputs": [],
   "source": [
    "# jaccard similarity\n",
    "def jaccard(a, b):\n",
    "    c = a.intersection(b)\n",
    "    return len(c) / (len(a) + len(b) - len(c))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 268,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100% (3593 of 3593) |####################| Elapsed Time: 0:04:32 Time:  0:04:32\n"
     ]
    }
   ],
   "source": [
    "uk_article_list = []\n",
    "uk_article_found = []\n",
    "\n",
    "pbar = progressbar.ProgressBar()\n",
    "for j in pbar(range(len(en_red_ids))):\n",
    "    red_link = en_red_ids[j]\n",
    "    red_link_encoding = en_red_encoding[j]\n",
    "    similarities = []\n",
    "    for i in range(len(uk_nontranslated_ids)):\n",
    "        similarity = jaccard(red_link_encoding, uk_nontranslated_encoding[i])\n",
    "        similarities.append(similarity)\n",
    "    \n",
    "    # select top 5 most similar uk article, zero similarities excluded\n",
    "    similarities = np.array(similarities)\n",
    "    indices = np.argsort(-similarities)[:5]\n",
    "    score = np.round(similarities[indices],3)\n",
    "    n_nonzero = np.sum(score>0)\n",
    "    indices = np.sort(indices[:n_nonzero])\n",
    "\n",
    "    # scores and article names sorted by article ids\n",
    "    score = np.round(similarities[indices],3)\n",
    "    uk_article_ids = uk_nontranslated_ids[indices]\n",
    "    uk_article_names = list(df_uk_name[df_uk_name['id'].isin(uk_article_ids)]['title'])\n",
    "    \n",
    "    # sort by jaccard similarity\n",
    "    d = dict(zip(uk_article_names, score))\n",
    "    d_sorted = sorted(((value, key) for (key,value) in d.items()), reverse=True)\n",
    "    \n",
    "    # save to list\n",
    "    uk_article_list.append(d_sorted)\n",
    "    \n",
    "    if len(uk_article_names)>0:\n",
    "        uk_article_found.append(True)\n",
    "    else:\n",
    "        uk_article_found.append(False)\n",
    "\n",
    "        \n",
    "# create df to show results\n",
    "df_similarities = pd.DataFrame({'red_link_id': en_red_ids,\n",
    "                   'uk_similar_found': uk_article_found, \n",
    "                   'uk_similar': uk_article_list\n",
    "                  })\n",
    "    \n",
    "    "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 269,
   "metadata": {},
   "outputs": [],
   "source": [
    "df_red_results = df_red_links.merge(right=df_similarities, right_on = 'red_link_id',\n",
    "                                     left_on = 'red_link_id', how = 'left')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 270,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>red_link_id</th>\n",
       "      <th>red_link_name</th>\n",
       "      <th>incoming_en_names</th>\n",
       "      <th>n_incoming_en</th>\n",
       "      <th>incoming_uk_names</th>\n",
       "      <th>n_incoming_uk</th>\n",
       "      <th>uk_similar</th>\n",
       "      <th>uk_similar_found</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>57726536</td>\n",
       "      <td>Cleonini</td>\n",
       "      <td>{Pachycerus, Rhabdorrhynchus, Liocleonus clath...</td>\n",
       "      <td>7</td>\n",
       "      <td>{Pachycerus, Rhabdorrhynchus, Cyphocleonus, Li...</td>\n",
       "      <td>5</td>\n",
       "      <td>[(0.067, Тер-Мінасян Маргарита Єрвандівна), (0...</td>\n",
       "      <td>True</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>57726733</td>\n",
       "      <td>West Coast Mafia Records</td>\n",
       "      <td>{The Final Chapter (C-Bo album), Gas Chamber (...</td>\n",
       "      <td>24</td>\n",
       "      <td>{Life as a Rider, Cashville Records, West Coas...</td>\n",
       "      <td>18</td>\n",
       "      <td>[(0.842, West Coast Mafia Records), (0.091, Пе...</td>\n",
       "      <td>True</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>57726734</td>\n",
       "      <td>Killa Tay</td>\n",
       "      <td>{JT the Bigga Figga, C-Bo, E.D.I. Mean, A Mill...</td>\n",
       "      <td>35</td>\n",
       "      <td>{Life as a Rider, Дискографія Yukmouth, Blow (...</td>\n",
       "      <td>16</td>\n",
       "      <td>[(0.375, West Coast Mafia Records), (0.007, РЕ...</td>\n",
       "      <td>True</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>3</th>\n",
       "      <td>57726809</td>\n",
       "      <td>Denver Film Critics Society</td>\n",
       "      <td>{The Croods, The Social Network (soundtrack), ...</td>\n",
       "      <td>24</td>\n",
       "      <td>{Гіліян Флінн, Сімейка Крудсів, Б'ютифул, Сієн...</td>\n",
       "      <td>11</td>\n",
       "      <td>[(0.043, Задніпровський Назар Олександрович), ...</td>\n",
       "      <td>True</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>4</th>\n",
       "      <td>57727161</td>\n",
       "      <td>Wim Smet</td>\n",
       "      <td>{2011â12 Oud-Heverlee Leuven season, 2012â...</td>\n",
       "      <td>13</td>\n",
       "      <td>{Кубок Бельгії з футболу 2016—2017, Кубок Бель...</td>\n",
       "      <td>6</td>\n",
       "      <td>[(0.091, Павел Рачковський)]</td>\n",
       "      <td>True</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "   red_link_id                red_link_name  \\\n",
       "0     57726536                     Cleonini   \n",
       "1     57726733     West Coast Mafia Records   \n",
       "2     57726734                    Killa Tay   \n",
       "3     57726809  Denver Film Critics Society   \n",
       "4     57727161                     Wim Smet   \n",
       "\n",
       "                                   incoming_en_names  n_incoming_en  \\\n",
       "0  {Pachycerus, Rhabdorrhynchus, Liocleonus clath...              7   \n",
       "1  {The Final Chapter (C-Bo album), Gas Chamber (...             24   \n",
       "2  {JT the Bigga Figga, C-Bo, E.D.I. Mean, A Mill...             35   \n",
       "3  {The Croods, The Social Network (soundtrack), ...             24   \n",
       "4  {2011â12 Oud-Heverlee Leuven season, 2012â...             13   \n",
       "\n",
       "                                   incoming_uk_names  n_incoming_uk  \\\n",
       "0  {Pachycerus, Rhabdorrhynchus, Cyphocleonus, Li...              5   \n",
       "1  {Life as a Rider, Cashville Records, West Coas...             18   \n",
       "2  {Life as a Rider, Дискографія Yukmouth, Blow (...             16   \n",
       "3  {Гіліян Флінн, Сімейка Крудсів, Б'ютифул, Сієн...             11   \n",
       "4  {Кубок Бельгії з футболу 2016—2017, Кубок Бель...              6   \n",
       "\n",
       "                                          uk_similar  uk_similar_found  \n",
       "0  [(0.067, Тер-Мінасян Маргарита Єрвандівна), (0...              True  \n",
       "1  [(0.842, West Coast Mafia Records), (0.091, Пе...              True  \n",
       "2  [(0.375, West Coast Mafia Records), (0.007, РЕ...              True  \n",
       "3  [(0.043, Задніпровський Назар Олександрович), ...              True  \n",
       "4                       [(0.091, Павел Рачковський)]              True  "
      ]
     },
     "execution_count": 270,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "df_red_results.head()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 271,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "True     2467\n",
       "False    1126\n",
       "Name: uk_similar_found, dtype: int64"
      ]
     },
     "execution_count": 271,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "df_red_results['uk_similar_found'].value_counts()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 281,
   "metadata": {},
   "outputs": [],
   "source": [
    "# save results to file\n",
    "df_red_results.to_csv('red_links_results.csv.gz', compression='gzip', \n",
    "                      header=True, index=False, encoding = 'UTF-16')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# load \n",
    "df_red_results = pd.read_csv('red_links_results.csv.gz', encoding = 'UTF-16')"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.4"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 1
}
