{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Phase 2. Augment data\n",
    "\n",
    "## Contents\n",
    "- [Configuration](#Configuration)\n",
    "  - [Imports](#Imports)\n",
    "  - [Variables](#Variables)\n",
    "  - [Support functions](#Support-functions)\n",
    "- [Users' Botscore](#Users'-Botscore)\n",
    "  - [Execute Botscore update](#Execute-Botscore-update)\n",
    "- [Friendship analysis](#Friendship-analysis)\n",
    "  - [Execute friendship analysis](#Execute-friendship-analysis)\n",
    "- [Tag verified users with a political party](#Tag-verified-users-with-a-political-party)\n",
    "  - [Execute tagging functions](#Execute-tagging-functions)\n",
    "- [Augment political party tags](#Augment-political-party-tags)\n",
    "  - [Execute users' tagging](#Execute-users'-tagging)\n",
    "- [Bag of words creation](#Bag-of-words-creation)\n",
    "  - [Hashtags definitions](#Hashtags-definitions)\n",
    "  - [Verified political party regex definitions](#Verified-political-party-regex-definitions)\n",
    "  - [Matching of interactions within BOWs](#Matching-of-interactions-within-BOWs)\n",
    "- [Sentiment analysis extraction](#Sentiment-analysis-extraction)\n",
    "  - [Execution of the sentiment analysis and database update](#Execution-of-the-sentiment-analysis-and-database-update)\n",
    "- [Anonymization](#Anonymization)\n",
    "  - [Calculate UUIDs for the users](#Calculate-UUIDs-for-the-users)\n",
    "  - [Calculate UUIDs for the tweets](#Calculate-UUIDs-for-the-tweets)\n",
    "  - [Swap users' IDs](#Swap-users'-IDs)\n",
    "  - [Check for potentially missed ObjectIDs](#Check-for-potentially-missed-ObjectIDs)\n",
    "  - [Strip information from the tweets and swap IDs](#Strip-information-from-the-tweets-and-swap-IDs)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Configuration"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Imports"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Utilities\n",
    "from IPython.display import display\n",
    "from fastprogress import master_bar, progress_bar\n",
    "from datetime import datetime\n",
    "from unidecode import unidecode\n",
    "import os\n",
    "import ntpath\n",
    "import numpy as np\n",
    "import statistics \n",
    "import re\n",
    "import math\n",
    "import random\n",
    "import datetime\n",
    "import uuid\n",
    "import numbers\n",
    "from collections.abc import MutableMapping\n",
    "import pandas as pd\n",
    "from multiprocessing import Pool\n",
    "\n",
    "# Botometer API\n",
    "import botometer\n",
    "\n",
    "# MongoDB functionality\n",
    "from pymongo.errors import BulkWriteError\n",
    "from pymongo import MongoClient, InsertOne, UpdateOne, DeleteOne, UpdateMany, DeleteMany\n",
    "from pymongo.bulk import BulkOperationBuilder\n",
    "from bson import ObjectId\n",
    "\n",
    "# Tweet API for friendships\n",
    "import tweepy\n",
    "\n",
    "# Specific imports for sentiment analysis\n",
    "import emoji\n",
    "import classifier as cl\n",
    "\n",
    "# Sentiment algorithm\n",
    "clf = cl.SentimentClassifier()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Variables"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Directories where CSV data is stored\n",
    "ROOT_DIR = \"ABOSLUTE_PATH_TO_ROOT_FOLDER\"\n",
    "DATA_DIR = ROOT_DIR + \"data/\"\n",
    "# Change path to root\n",
    "os.chdir(ROOT_DIR)\n",
    "\n",
    "# Botometer and Twitter Keys for parallel processing\n",
    "keys = {\n",
    "     #0: botometer.Botometer(wait_on_ratelimit=True, rapidapi_key='RAPID_API_KEY', **{'consumer_key':'TWITTER_DEV_CONSUMER_KEY', 'consumer_secret':'TWITTER_DEV_CONSUMER_SECRET'}),\n",
    "     #1: botometer.Botometer(wait_on_ratelimit=True, rapidapi_key='RAPID_API_KEY', **{'consumer_key':'TWITTER_DEV_CONSUMER_KEY', 'consumer_secret':'TWITTER_DEV_CONSUMER_SECRET'}),\n",
    "}\n",
    "\n",
    "# MongoDB parameters\n",
    "mongoclient = MongoClient('IP_ADDRESS', PORT)\n",
    "db = mongoclient.BB10NPUBLIC\n",
    "# It will automatically create the tweets' and users' collections."
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Support Functions"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def make_objid(text):\n",
    "    \"\"\"Makes an ObjectId of 4 bytes\n",
    "    \n",
    "    Keyword arguments:\n",
    "    text -- string to be converted into Object ID\n",
    "    \"\"\"\n",
    "    text = str(text)\n",
    "    if not text.strip():\n",
    "        return None\n",
    "    try:\n",
    "        return ObjectId(text.rjust(24,\"0\"))\n",
    "    except Exception as ex:\n",
    "        print(text, ex)\n",
    "        return None\n",
    "\n",
    "def flatten(d, parent_key='', sep='_'):\n",
    "    \"\"\"Formats MongoDB results\n",
    "    \n",
    "    Keyword arguments:\n",
    "    d -- dictionary with key and uncleaned values\n",
    "    parent_key --\n",
    "    sep --\n",
    "    \"\"\"\n",
    "    items = []\n",
    "    for k, v in d.items():\n",
    "        new_key = parent_key + sep + k if parent_key else k\n",
    "        if isinstance(v, MutableMapping):\n",
    "            items.extend(flatten(v, new_key, sep=sep).items())\n",
    "        else:\n",
    "            items.append((new_key, v))\n",
    "    return dict(items)\n",
    "\n",
    "def chunks(l, n):\n",
    "    \"\"\"Yield successive n-sized chunks from l.\"\"\"\n",
    "    for i in range(0, len(l), n):\n",
    "        yield l[i:i + n]"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Users' Botscore"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_users_without_botscore(user_collection):\n",
    "    \"\"\"\n",
    "    Extracts the ObjectID of those users without botscore\n",
    "    \n",
    "    Keyword arguments:\n",
    "    user_collection -- MongoDB Users' Collection\n",
    "    \"\"\"\n",
    "    users = list(user_collection.find({'$and' : [ {'scores': { '$exists': False }},     \n",
    "                               { '$or' : \n",
    "                                        [{'ignore' : {'$exists' : False}}, \n",
    "                                        { '$and' : \n",
    "                                                 [ {'ignore' : {'$exists' : True}},\n",
    "                                                   {'ignore' : False} ]\n",
    "                                        }]\n",
    "                               }\n",
    "                             ]\n",
    "                   },\n",
    "                   {'_id': 1}))\n",
    "    \n",
    "    users = [u['_id'] for u in users]\n",
    "    print(\"Number of users without botscore:\",len(users))\n",
    "    return users\n",
    "\n",
    "def get_new_users(user_collection):\n",
    "    \"\"\"\n",
    "    Extracts the ObjectID of those users annotated with botscore -1 (first time)\n",
    "    \n",
    "    Keyword arguments:\n",
    "    user_collection -- MongoDB Users' Collection\n",
    "    \"\"\"\n",
    "    users = list(user_collection.find({'scores': -1},\n",
    "                                      {'_id': 1}))\n",
    "    \n",
    "    users = [u['_id'] for u in users]\n",
    "    print(\"Number of users to consult the botscore:\",len(users))\n",
    "    return users\n",
    "\n",
    "def get_botscore_by_userid(user_id):\n",
    "    \"\"\"\n",
    "    Collects the botscore from Botometer\n",
    "    \n",
    "    Keyword arguments:\n",
    "    user_id -- Twitter users' identificator\n",
    "    \"\"\"\n",
    "    \n",
    "    try:\n",
    "        botometer_instance = random.choice(keys)\n",
    "        consumer_key = botometer_instance.consumer_key\n",
    "        result = botometer_instance.check_account(user_id)\n",
    "        return UpdateOne({'_id': user_id}, \n",
    "                         {'$set': {'scores': result}},\n",
    "                         upsert=True\n",
    "                        )\n",
    "    except Exception as e:\n",
    "        # Locked account (private)\n",
    "        auth_match = re.search('Not authorized', str(e))\n",
    "        timeline_match = re.search('has no tweets in timeline', str(e))\n",
    "        notExist_match = re.search('Sorry, that page does not exist', str(e))\n",
    "        overCapacity_match = re.search('Over capacity', str(e))\n",
    "        \n",
    "        if auth_match:\n",
    "            return UpdateOne({'_id': make_objid(user_id)},\n",
    "                             {'$unset': {'scores':\"\"},\n",
    "                              '$set': {'ignore': False, 'ignore_reason': 'not authorized'},\n",
    "                              '$push': {'ignore_key_used': consumer_key}},\n",
    "                             upsert=True\n",
    "                            )\n",
    "        elif overCapacity_match:\n",
    "            return UpdateOne({'_id': make_objid(user_id)}, \n",
    "                             {'$unset': {'scores':\"\"},\n",
    "                              '$set': {'ignore': False, 'ignore_reason': 'over capacity'},\n",
    "                              '$push': {'ignore_key_used': consumer_key}},\n",
    "                             upsert=True\n",
    "                            )\n",
    "        elif timeline_match:\n",
    "            #print(\"User\", user_id, \" has no tweets in timeline\")\n",
    "            return UpdateOne({'_id': make_objid(user_id)}, \n",
    "                             {'$unset': {'scores':\"\"},\n",
    "                              '$set': {'ignore': True, 'ignore_reason': 'has no tweets in timeline'}},\n",
    "                              upsert=True\n",
    "                            )\n",
    "        elif notExist_match:\n",
    "            #print(\"User\", user_id, \" does not exists anymore\")\n",
    "            return UpdateOne({'_id': make_objid(user_id)}, \n",
    "                             {'$unset': {'scores':\"\"},\n",
    "                              '$set': {'ignore': True, 'ignore_reason': 'does not exists anymore'}},\n",
    "                              upsert=True\n",
    "                            )\n",
    "        else:\n",
    "            print(\"Exception. User:\", user_id, \"API:\", consumer_key, \"Message:\", e)\n",
    "        return None\n",
    "\n",
    "    \n",
    "def botscores_to_mongodb(users, user_collection, processes=18):\n",
    "    \"\"\"\n",
    "    Saves a list of users' botscores in MongoDB.\n",
    "    The process can be paralelized with available keys for more speed and handle API Twitter limits\n",
    "    Note: This method should be improved by implementing non-blocking calls\n",
    "\n",
    "    Keyword arguments:\n",
    "    users -- list of Twitter users' identificator\n",
    "    processes -- number of processes to employ (must be less or equal to the number of available keys)\n",
    "    \"\"\"\n",
    "    \n",
    "    pool = Pool(processes=processes)\n",
    "    processes = []\n",
    "\n",
    "    for uid in progress_bar(users):       \n",
    "        processes.append(pool.apply_async(\n",
    "            get_botscore_by_userid, \n",
    "            (uid,)\n",
    "        ))\n",
    "\n",
    "    pool.close()\n",
    "\n",
    "\n",
    "    #pool.join()\n",
    "    print('Getting user botscores...')\n",
    "    operations = []\n",
    "    for p in progress_bar(processes):\n",
    "        #p.wait()\n",
    "        response = p.get()\n",
    "        if response is not None:\n",
    "            operations.append(response)\n",
    "        \n",
    "        \n",
    "        if len(operations) > 1000:\n",
    "            results = user_collection.bulk_write(operations)\n",
    "            print(\"M:\", str(results.matched_count).rjust(8, \" \"),\n",
    "                  \" I:\", str(results.inserted_count).rjust(8, \" \"),\n",
    "                  \" U:\", str(results.upserted_count).rjust(8, \" \"))\n",
    "            operations = []\n",
    "\n",
    "    if len(operations) > 0: \n",
    "        results = user_collection.bulk_write(operations)\n",
    "        print(\"M:\", str(results.matched_count).rjust(8, \" \"),\n",
    "              \" I:\", str(results.inserted_count).rjust(8, \" \"),\n",
    "              \" U:\", str(results.upserted_count).rjust(8, \" \"))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Execute Botscore update"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "%%time\n",
    "# get users for whom the botscore has never been consulted\n",
    "users = get_new_users(db.users)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# try to find the botscore for users that was already tried at another time\n",
    "users = get_users_without_botscore(db.users)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "users[1]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# update the database with botscores\n",
    "botscores_to_mongodb(users, db.users, 12)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Removed users that are not available"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "users_operations = []\n",
    "tweets_operations = []\n",
    "for u in users:\n",
    "    users_operations.append(DeleteOne({'_id': u}))\n",
    "    tweets_operations.append(DeleteMany({\n",
    "        '$or': [\n",
    "            {'user_id': u}, \n",
    "            {'in_reply_to_user_id': u}, \n",
    "            {'retweet_or_quote_user_id': u}\n",
    "        ]}))\n",
    "    if len(users_operations) > 3000:\n",
    "        print(len(users_operations), \" users to be deleted\", end=\"; \")\n",
    "        results = db.users.bulk_write(users_operations, ordered=False)\n",
    "        print(\"U:\", str(results.deleted_count).rjust(8, \" \"), end=\"; \")\n",
    "        results = db.tweets.bulk_write(tweets_operations, ordered=False)\n",
    "        print(\"T:\", str(results.deleted_count).rjust(8, \" \"))\n",
    "        users_operations = []\n",
    "        tweets_operations = []\n",
    "    \n",
    "if len(users_operations) > 0:\n",
    "    print(len(users_operations), \" users to be deleted\", end=\"; \")\n",
    "    results = db.users.bulk_write(users_operations, ordered=False)\n",
    "    print(\"U:\", str(results.deleted_count).rjust(8, \" \"), end=\"; \")\n",
    "    results = db.tweets.bulk_write(tweets_operations, ordered=False)\n",
    "    print(\"T:\", str(results.deleted_count).rjust(8, \" \"))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Friendship analysis"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_user_ids(user_collection):\n",
    "    \"\"\"\n",
    "    Extracts the ObjectID of all users\n",
    "    \n",
    "    Keyword arguments:\n",
    "    user_collection -- MongoDB Users' Collection\n",
    "    \"\"\"\n",
    "    \n",
    "    total_users = list(user_collection.find({},{'_id': 1}))\n",
    "    total_users = [u['_id'] for u in total_users]\n",
    "    print(\"Number of total users:\",len(total_users))\n",
    "    return total_users\n",
    "\n",
    "def get_users_without_friends(user_collection):\n",
    "    \"\"\"\n",
    "    Extracts the ObjectID of users with no friends consulted\n",
    "    \n",
    "    Keyword arguments:\n",
    "    user_collection -- MongoDB Users' Collection\n",
    "    \"\"\"\n",
    "    pipeline = [\n",
    "    {\n",
    "        '$match': {\n",
    "            'ignore' : {\n",
    "                '$exists':False\n",
    "            },\n",
    "            'friends': {\n",
    "                '$exists':False\n",
    "            }\n",
    "        }\n",
    "    } \n",
    "    ,\n",
    "    {\n",
    "        '$project': {\n",
    "            '_id':1 \n",
    "        }\n",
    "    }\n",
    "    ]\n",
    "    \n",
    "    print(\"Query\", end=\" \")\n",
    "    users = user_collection.aggregate(pipeline, allowDiskUse=True)\n",
    "    print(\"OK; List\", end=\" \")\n",
    "    users = list(users)\n",
    "    users = [u['_id'] for u in users]\n",
    "    print(\"OK; Total records:\", len(users))\n",
    "    return users\n",
    "\n",
    "def get_bots_without_friends(user_collection):\n",
    "    \"\"\"\n",
    "    Extracts the ObjectID of bots with no friends consulted\n",
    "    \n",
    "    Keyword arguments:\n",
    "    user_collection -- MongoDB Users' Collection\n",
    "    \"\"\"\n",
    "    \n",
    "    # 95th percentile\n",
    "    p95 = 0.6908019160064479\n",
    "\n",
    "    pipeline = [\n",
    "    {\n",
    "        '$match': {\n",
    "            'ignore' : {\n",
    "                '$exists':False\n",
    "            },\n",
    "            'friends': {\n",
    "                '$exists':False\n",
    "            },\n",
    "            'scores.scores.universal': {\n",
    "                '$gte': p95\n",
    "            }\n",
    "        }\n",
    "    } \n",
    "    ,\n",
    "    {\n",
    "        '$project': {\n",
    "            '_id':1 \n",
    "        }\n",
    "    }\n",
    "    ]\n",
    "    \n",
    "    print(\"Query\", end=\" \")\n",
    "    bots = user_collection.aggregate(pipeline, allowDiskUse=True)\n",
    "    print(\"OK; List\", end=\" \")\n",
    "    bots = list(bots)\n",
    "    bots = [b['_id'] for b in bots]\n",
    "    print(\"OK; Total records:\", len(bots))\n",
    "    return bots\n",
    "\n",
    "def get_friendships_by_userid(user_id, total_users, user_collection):\n",
    "    \"\"\"\n",
    "    Consults followers and followings of a user and save in MongoDB\n",
    "    those who are within the total recollected sample of users.\n",
    "    \n",
    "    Keyword arguments:\n",
    "    user_id -- Twitter users' identificator\n",
    "    total_users -- List of the total of Twitter users' identificators within our database\n",
    "    user_collection -- MongoDB Users' Collection\n",
    "    \"\"\"\n",
    "    botometer_instance = random.choice(keys)\n",
    "    consumer_key = botometer_instance.consumer_key\n",
    "    consumer_secret = botometer_instance.consumer_secret\n",
    "\n",
    "    filter_uid = {'_id': user_id}\n",
    "    message = \"Checking:\" + str(user_id) + \" \"\n",
    "    filter_content = {}\n",
    "\n",
    "    try:\n",
    "        auth = tweepy.AppAuthHandler(consumer_key,consumer_secret)\n",
    "        api = tweepy.API(auth, wait_on_rate_limit=True)\n",
    "        methods = [api.friends_ids,api.followers_ids]\n",
    "        political_friendship_ids = {\n",
    "            'friends' : [],\n",
    "            'followers': [],\n",
    "        }\n",
    "\n",
    "        for name,method in zip(['friends','followers'],[api.friends_ids,api.followers_ids]):\n",
    "            #print(\"\\tQuerying\", name, method)\n",
    "            for friendships in tweepy.Cursor(method, user_id = user_id, count=5000).pages():\n",
    "                #print(\"\\tCrawled:\", len(friendships), end=\"; \")\n",
    "                filtered = [make_objid(f) for f in friendships if make_objid(f) in total_users]\n",
    "                #print(\"In our DB:\", len(filtered), end=\"; \")\n",
    "                political_friendship_ids[name] += filtered\n",
    "\n",
    "        message += \"\\tFriends:\" + str(len(political_friendship_ids['friends']))\n",
    "        message += \"\\tFollowers:\" + str(len(political_friendship_ids['followers']))\n",
    "        filter_content = {\n",
    "            '$push': {\n",
    "                'friends' : {\n",
    "                    '$each' : political_friendship_ids['friends']\n",
    "                },\n",
    "                'followers' : {\n",
    "                    '$each' : political_friendship_ids['followers']\n",
    "                }\n",
    "            }\n",
    "        }\n",
    "\n",
    "    except tweepy.TweepError as err:\n",
    "        print(message+\"tweepy.TweepError=\", err)\n",
    "        filter_content = {\n",
    "            '$set': {\n",
    "                'ignore': True, 'ignore_reason': str(err)\n",
    "            },\n",
    "            '$push': {\n",
    "                'ignore_key_used': consumer_key}\n",
    "        }\n",
    "    except Exception as e:\n",
    "        print(message+\"Exception. User:\", user_id, \"API:\", consumer_key, \"Message:\", e)\n",
    "        return False\n",
    "\n",
    "    #print(filter_uid,filter_content)\n",
    "    res = user_collection.update_one(filter_uid, filter_content, upsert=True)\n",
    "    print(message + \"\\tMa:\", res.matched_count, \"\\tMo:\", res.modified_count, \"\\tUp:\", res.upserted_id, \";\\tDONE!\")\n",
    "    return True\n",
    "\n",
    "def friendships_to_mongodb(users_to_analyze,total_users, processes=18):\n",
    "    \"\"\"\n",
    "    Extracts the followers a followings of a list of users and save in\n",
    "    database those who are within the total recollected sample of users.\n",
    "    Note: This method should be improved by implementing non-blocking calls\n",
    "    \n",
    "    Keyword arguments:\n",
    "    users_to_analyze -- List of Twitter users' identificators to analyze the friendships\n",
    "    total_users -- List of the total of Twitter users' identificators within our database\n",
    "    processes -- number of processes to employ (must be less or equal to the number of available keys)\n",
    "    \"\"\"\n",
    "    \n",
    "    pool = Pool(processes=processes)\n",
    "    processes = []\n",
    "    print('Preparing processes...')\n",
    "    for uid in progress_bar(users_to_analyze):       \n",
    "        processes.append(pool.apply_async(\n",
    "            get_friendships_by_userid, \n",
    "            (uid, total_users, MongoClient('127.0.0.1',27017).botbusters.users)   # each process should have a new MongoClient session!\n",
    "        ))\n",
    "\n",
    "    pool.close()\n",
    "    #pool.join()\n",
    "    operations = []\n",
    "    print('Getting friendships...')\n",
    "    for p in progress_bar(processes):\n",
    "        p.get()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Execute friendship analysis"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# get the list of all users to know always with which followers/followings of the consulted ones it is necessary to keep\n",
    "total_users = get_user_ids(db.users)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# get bots without friends: we prefer to give priority to bots, then to other users\n",
    "bots_without_friends = get_bots_without_friends(db.users)\n",
    "friendships_to_mongodb(bots_without_friends,total_users,processes=10)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# get users without friends once bots have been processed\n",
    "users_without_friends = get_users_without_friends(db.users)\n",
    "friendships_to_mongodb(users_without_friends,total_users,processes=10)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Tag verified users with a political party"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_name_description_verified_users(user_metadata_collection):\n",
    "    \"\"\"\n",
    "    Extracts the name and description of verified users. \n",
    "    \n",
    "    Keyword arguments:\n",
    "    user_metadata_collection -- MongoDB Users' Metadata Collection (containing screen name and description of users)\n",
    "    \"\"\"\n",
    "    \n",
    "    # pipeline to extract user_id and screen_name+user_description verified accounts\n",
    "    # as we have different temporal instances of this information, we select the first one\n",
    "    political_verified_users_pipeline = [\n",
    "    {\n",
    "        '$match': {\n",
    "            'user_verified': True\n",
    "        }\n",
    "    }\n",
    "    , {\n",
    "        '$group': {\n",
    "            '_id': '$user_id', \n",
    "            'user_info': {\n",
    "                '$push': {\n",
    "                    '$concat': [\n",
    "                        '$user_screen_name', ' ', '$user_description'\n",
    "                    ]\n",
    "                }\n",
    "            }\n",
    "        }\n",
    "    }\n",
    "    ]\n",
    "    \n",
    "    verified_users = list(user_metadata_collection.aggregate(political_verified_users_pipeline,allowDiskUse=True))\n",
    "    print(len(verified_users),\"verified users found!\")\n",
    "    return verified_users\n",
    "\n",
    "def get_political_parties(verified_users):\n",
    "    \"\"\"\n",
    "    Returns the users belonging to the five main political parties.\n",
    "    \n",
    "    Keyword arguments:\n",
    "    verified_users -- List of user ids + user info (description+name) \n",
    "    \"\"\"\n",
    "    \n",
    "    parties = {\n",
    "        'UP': set([]),\n",
    "        'PSOE': set([]),\n",
    "        'Ciudadanos': set([]),\n",
    "        'PP': set([]),\n",
    "        'VOX': set([])\n",
    "    }\n",
    "\n",
    "    # regexs used against user info (screen name + description) to determine the political party\n",
    "    regexs  = {\n",
    "        'UP': '.*(Unidas Podemos|Podemos| Podem|Izquierda Unida|iunida|iu ).*',\n",
    "        'PSOE': '.*(PSOE|Partido Socialista|psc).*',\n",
    "        'Ciudadanos': '.*(Cs_| Cs |Ciudadanos|ciutadans).*',\n",
    "        'PP': '.*( PP|populares|Partido popular).*',\n",
    "        'VOX': '.*( VOX|@vox_es).*'\n",
    "    }\n",
    "\n",
    "    print('Getting political party of verified users...')\n",
    "    \n",
    "    ## Fills PARTIES dictionary with users\n",
    "    for verified_user in progress_bar(verified_users):\n",
    "        partyFound = False\n",
    "        for political_party in ['UP','PSOE','PP','VOX','Ciudadanos']:\n",
    "            for info in verified_user['user_info']:\n",
    "                if info is not None:\n",
    "                    if re.match(regexs[political_party],info,re.IGNORECASE):\n",
    "                        parties[political_party].add(verified_user['_id'])\n",
    "                        partyFound = True\n",
    "                        break\n",
    "            if partyFound:\n",
    "                break\n",
    "    return parties\n",
    "\n",
    "def political_parties_to_mongodb(parties, user_collection):\n",
    "    \"\"\"\n",
    "    Inserts in DB the political party of those users which have been identified\n",
    "    \n",
    "    Keyword arguments:\n",
    "    parties -- Dictionary with the five political parties as keys, and a list of users' ObjectID as values.\n",
    "    \"\"\"\n",
    "    operations = []\n",
    "    for political_party in ['UP','PSOE','Ciudadanos','PP','VOX']:\n",
    "        for user_id in parties[political_party]:\n",
    "            operations.append(UpdateOne({'_id': user_id}, \n",
    "                             {'$set': {'political_party': political_party}},\n",
    "                             upsert=False\n",
    "                            ))\n",
    "\n",
    "    print(len(operations), \"users related to political parties saved!\")\n",
    "    results = user_collection.bulk_write(operations)\n",
    "    print(\"M:\", str(results.matched_count).rjust(8, \" \"),\n",
    "              \" I:\", str(results.inserted_count).rjust(8, \" \"),\n",
    "              \" U:\", str(results.upserted_count).rjust(8, \" \"))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Execute tagging functions\n",
    "\n",
    "Each tagged user is then manually verified."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# getting the name and description of those users with Twitter's VERIFIED attribute set to true\n",
    "verified_users = get_name_description_verified_users(db.users_metadata)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# getting group of user ids belonging to each party\n",
    "parties = get_political_parties(verified_users)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# update users in database if political party has been identified\n",
    "political_parties_to_mongodb(parties, db.users)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Augment political party tags\n",
    "\n",
    "Retrieve the users with the most tweets and tag them with the political party. Users' will need to be manually validated."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_users_for_political_party(user_metadata_collection):\n",
    "    \"\"\"\n",
    "    Extracts no verified humans ordered by number of interactions\n",
    "    \n",
    "    Keyword argument:\n",
    "    user_metadata_collection -- MongoDB Users' Metadata Collection (containing screen name and description of users)\n",
    "    \"\"\"\n",
    "    \n",
    "    # 75th percentile of botscores\n",
    "    p75 = 0.23633691139538376\n",
    "    \n",
    "    users_pipeline = [\n",
    "    {\n",
    "        '$match': {\n",
    "            'user_verified': False\n",
    "        }\n",
    "    }, {\n",
    "        '$group': {\n",
    "            '_id': '$user_id', \n",
    "            'user_screen_names': {\n",
    "                '$push': '$user_screen_name'\n",
    "            }, \n",
    "            'user_descriptions': {\n",
    "                '$push': '$user_description'\n",
    "            }, \n",
    "            'count': {\n",
    "                '$sum': 1\n",
    "            }\n",
    "        }\n",
    "    }, {\n",
    "        '$sort': {\n",
    "            'count': -1\n",
    "        }\n",
    "    }, {\n",
    "        '$lookup': {\n",
    "            'from': 'users', \n",
    "            'localField': '_id', \n",
    "            'foreignField': '_id', \n",
    "            'as': 'user'\n",
    "        }\n",
    "    }, {\n",
    "        '$unwind': {\n",
    "            'path': '$user'\n",
    "        }\n",
    "    }, {\n",
    "        '$match': {\n",
    "            'user.scores.scores.universal': {\n",
    "                '$lte': p75\n",
    "            }\n",
    "        }\n",
    "    }, {\n",
    "        '$project': {\n",
    "            '_id': 1, \n",
    "            'user_screen_names': 1, \n",
    "            'user_descriptions': 1\n",
    "        }\n",
    "    }\n",
    "    ]\n",
    "\n",
    "    users = list(user_metadata_collection.aggregate(users_pipeline,allowDiskUse=True))\n",
    "    return users\n",
    "\n",
    "\n",
    "def get_augmented_political_parties(users):\n",
    "    \"\"\"\n",
    "    Returns the users belonging to the five main political parties.\n",
    "    \n",
    "    Keyword arguments:\n",
    "    users -- List of user ids + user info (description+name) \n",
    "    \"\"\"\n",
    "    parties = {\n",
    "        'UP': set([]),\n",
    "        'PSOE': set([]),\n",
    "        'Ciudadanos': set([]),\n",
    "        'PP': set([]),\n",
    "        'VOX': set([])\n",
    "    }\n",
    "\n",
    "    # regexs used against user info (screen name + description) to determine the political party\n",
    "    regexs  = {\n",
    "        'UP': '.*(Unidas Podemos|Podemos| Podem|Izquierda Unida|iunida|iu ).*',\n",
    "        'PSOE': '.*(PSOE|Partido Socialista|psc).*',\n",
    "        'Ciudadanos': '.*(Cs_| Cs |Ciudadanos|ciutadans).*',\n",
    "        'PP': '.*( PP|populares|Partido popular).*',\n",
    "        'VOX': '.*( VOX|@vox_es).*'\n",
    "    }\n",
    "\n",
    "    print('Getting political party for augmented users...')\n",
    "    \n",
    "    ## Fills PARTIES dictionary with users\n",
    "    for user in progress_bar(users):\n",
    "        partyFound = False\n",
    "        if user['user_screen_names'] is not None:\n",
    "            for screen_name in user['user_screen_names']:\n",
    "                    for political_party in ['VOX','PSOE','UP', 'PP','Ciudadanos']:\n",
    "                            if screen_name is not None:\n",
    "                                # only regex on screen name to avoid ambiguous descriptions (for example, with Ciudadanos)\n",
    "                                if re.match(regexs[political_party],screen_name,re.IGNORECASE):\n",
    "                                    parties[political_party].append(user['_id'])\n",
    "                                    partyFound = True\n",
    "                                    break\n",
    "                    if partyFound:\n",
    "                        break\n",
    "    return parties"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Execute users' tagging"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# getting users ordered by number of interactions\n",
    "users = get_users_for_political_party(db.users_metadata)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# getting users with identified polical party\n",
    "parties = get_augmented_political_parties(users)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# only save the most influential users of each political party (depending on remining numbers to reach at least 200)\n",
    "\n",
    "# check how many users we need to reach at least 200 users for each political party\n",
    "verified_users = list(db.users.find({'political_party': { '$exists': True}},{'_id':1,'political_party':1}))\n",
    "users_to_check = {\n",
    "    'Ciudadanos':0,\n",
    "    'PP':0,\n",
    "    'PSOE':0,\n",
    "    'UP':0,\n",
    "    'VOX':0\n",
    "}\n",
    "\n",
    "for party, party_group in verified_users.groupby(by=['political_party']):\n",
    "    users_to_check[party] = (200-len(party_group)) + (200-len(party_group))//4\n",
    "\n",
    "# we select the necessary users from all the analyzed users\n",
    "parties_to_mongodb = {\n",
    "    'VOX': [],\n",
    "    'PSOE':[],\n",
    "    'UP':[],\n",
    "    'PP':[],\n",
    "    'Ciudadanos':[]\n",
    "}\n",
    "\n",
    "for p in PARTIES:\n",
    "    number = users_to_check[p]\n",
    "    for i in range(0,number):\n",
    "        parties_to_mongodb[p].append(parties[p][i])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# update database with manually labeled users\n",
    "political_parties_to_mongodb(parties_to_mongodb)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Bag-of-words creation\n",
    "\n",
    "### Hashtags definitions"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "BAG_OF_WORDS = {\n",
    "    \"VOX\": [\n",
    "        \"#VOX\",  \n",
    "        \"#EspañaSiempre\",\n",
    "        \"Abascal\",\n",
    "        \"Santiago Abascal\",\n",
    "        \"Santi Abascal\",\n",
    "    ], \n",
    "    \"PP\": [\n",
    "        \"#PartidoPopular\", \n",
    "        \"Partido Popular\",\n",
    "        \"#PP\", \n",
    "        \"#PorTodoLoQueNosUne\",\n",
    "        \"Pablo Casado\",\n",
    "    ], \n",
    "    \"CIUDADANOS\": [\n",
    "        \"#Ciudadanos\", \n",
    "        \"#Cs \", \n",
    "        \"#EspañaEnMarcha\",\n",
    "        \"Albert Rivera\",\n",
    "        \"Rivera\",\n",
    "    ], \n",
    "    \"PSOE\": [\n",
    "        \"#AhoraSí\", \n",
    "        \"#AhoraEspaña\", \n",
    "        \"#PSOE\", \n",
    "        \"#PSOEcompraVotos\",\n",
    "        \"Pedro Sánchez\",\n",
    "    ], \n",
    "    \"UP\": [\n",
    "        \"#UnidasPodemos\", \n",
    "        \"Unidas Podemos\",\n",
    "        \"#ElPoderDeLaGente\", \n",
    "        \"#MamadasPodemos\", \n",
    "        \"#SePuede\", \n",
    "        \"#UnGobiernoContigo\",\n",
    "        \"Pablo Iglesias\",\n",
    "    ], \n",
    "    \"Elecciones\": [\n",
    "        \"#10N\", \n",
    "        \"#10NElecciones\", \n",
    "        \"#10Noviembre\", \n",
    "        \"#Elecciones10N\", \n",
    "        \"#eleccionesgenerales10N\", \n",
    "        \"#EleccionesNoviembre2019\", \n",
    "    ], \n",
    "    \"Exhumacion\": [\n",
    "        \"#exhumacionFranco\", \n",
    "        \"#francisfrancoesp\", \n",
    "        \"#FrancoCalientaQueSales\", \n",
    "        \"#unboxingfranco\", \n",
    "    ], \n",
    "    \"Cataluña\": [\n",
    "        \"#116YA\", \n",
    "        \"#disturbiosBarcelona\", \n",
    "        \"#EstadoDeExcepcion\", \n",
    "        \"#MarlaskaDimisionYa\", \n",
    "        \"#SpainIsAFascistState\", \n",
    "        \"#ThisIsTheRealSpain\", \n",
    "        \"#tsunamidemocractic\", \n",
    "        \"#tsunamiinfiltrado\", \n",
    "    ], \n",
    "    \"Debates\": [\n",
    "        \"#Debate10N\", \n",
    "        \"#DebateA5\", \n",
    "        \"#Debatea7RTVE\", \n",
    "        \"#DebateElectoral\", \n",
    "        \"#DebatePresidencial\", \n",
    "        \"#ElDebate4N\", \n",
    "        \"#ElDebateEnRTVE\", \n",
    "        \"#UltimaOportunidadL6\", \n",
    "    ], \n",
    "    \"AbascalEH\": [\n",
    "        \"#SantiagoAbascalEH\", \n",
    "        \"#elhormigueroabascal\",\n",
    "        \"#BoicotElHormiguero\",\n",
    "    ]\n",
    "}"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Verified political party regex definitions"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "verified_parties_users_pipeline = [\n",
    "    {\n",
    "        '$match': {\n",
    "            'political_party': {\n",
    "                '$exists': True\n",
    "            }\n",
    "        }\n",
    "    }, {\n",
    "        '$lookup': {\n",
    "            'from': 'users_metadata', \n",
    "            'localField': '_id', \n",
    "            'foreignField': 'user_id', \n",
    "            'as': 'user'\n",
    "        }\n",
    "    }, {\n",
    "        '$unwind': {\n",
    "            'path': '$user'\n",
    "        }\n",
    "    }, {\n",
    "        '$group': {\n",
    "            '_id': '$user.user_screen_name', \n",
    "            'political_party': {\n",
    "                '$first': '$political_party'\n",
    "            }\n",
    "        }\n",
    "    }\n",
    "]\n",
    "\n",
    "\n",
    "verified_users = list(db.users.aggregate(verified_parties_users_pipeline))\n",
    "\n",
    "for vu in progress_bar(verified_users):\n",
    "    BAG_OF_WORDS[vu['political_party'].upper()].append('@'+vu['_id'])"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Matching of interactions within BOWs"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_original_interactions(tweet_collection):\n",
    "    \"\"\"\n",
    "    Extracts the Object ID, text and retweet count of originals, replies and quotes\n",
    "    \n",
    "    Keyword arguments:\n",
    "    tweet_collection -- MongoDB Tweets' Collection  \n",
    "    \"\"\"\n",
    "    original_interactions = list(tweet_collection.find(\n",
    "        filter={'tweet_type': {'$in' : ['original','reply','quote']}}, #  'retweet_count': {'$gte': 100}},\n",
    "        projection={'_id': 1, 'text':1, 'retweet_count':1},\n",
    "    ))\n",
    "\n",
    "    print(\"Original interactions:\",len(original_interactions))    \n",
    "    return original_interactions\n",
    "\n",
    "def make_dictionary_keywords(bag_of_words, tweet_id, text, progress=None):\n",
    "    \"\"\"\n",
    "    Checks matching keywords for a specific tweets' text. Returns a boolean dictionary of\n",
    "    specific keywords and a boolean dictionary summarizing by themes\n",
    "    \n",
    "    Keyword arguments:\n",
    "    bag_of_words -- dictionary of parties/themes as keys, list of associated keywords as values\n",
    "    tweet_id -- ObjectID of the tweet\n",
    "    text -- Text of the tweet\n",
    "    progress -- Progress bar\n",
    "    \"\"\"\n",
    "    \n",
    "    keywords = {}           # for specific terms\n",
    "    keywords_summary = {}   # for themes\n",
    "    if progress is not None:\n",
    "        pb = progress_bar(bag_of_words.items(), parent=progress)\n",
    "    else:\n",
    "        pb = bag_of_words.items()\n",
    "    \n",
    "\n",
    "    for party, phashtags in pb:            \n",
    "        keywords[party] = {}\n",
    "\n",
    "        # checks for all hashtags, wither as hashtag as it is, \n",
    "        # or as the closest unicode character (ì -> i), \n",
    "        # or without the # and as the closes unicode character\n",
    "        for hashtag in phashtags:\n",
    "            try:\n",
    "                match = re.search(hashtag, text, re.IGNORECASE) is not None               \n",
    "                if match:\n",
    "                    keywords[party][hashtag] = True\n",
    "                else:\n",
    "                    if len(hashtag) < 3:\n",
    "                        continue\n",
    "\n",
    "                    check = re.sub(r'^[@#]', '', hashtag)\n",
    "                    match = re.search(check, unidecode(text), re.IGNORECASE) is not None\n",
    "                    if match:\n",
    "                        keywords[party][hashtag] = True\n",
    "                    else:\n",
    "                        keywords[party][hashtag] = re.search(unidecode(check), unidecode(text), re.IGNORECASE) is not None\n",
    "            except Exception as ex:\n",
    "                print(party,hashtag,ex,tweet_id,text)\n",
    "                keywords[party][hashtag] = False\n",
    "                continue\n",
    "\n",
    "        keywords_summary[party] = any(match for match in keywords[party].values())\n",
    "        \n",
    "    return (keywords, keywords_summary)\n",
    "\n",
    "def bows_to_mongodb(bag_of_words,original_interactions,tweet_collection):\n",
    "    \"\"\"\n",
    "    Inserts in DB, for each original, reply or quote, the bag-of-word dictionary containing true/false in every possible keyword depending if they match on the text\n",
    "    In addition, the bag-of-word of the tweet is propagated to associated retweets.\n",
    "    \n",
    "    Keyword arguments:\n",
    "    bag_of_words -- dictionary of parties/themes as keys, list of associated keywords as values\n",
    "    original_interactions -- List of tweets' ObjectID and text (originals, replies and quotes)\n",
    "    tweet_collection -- MongoDB Tweets' Collection  \n",
    "    \"\"\"\n",
    "    operations = []\n",
    "    for original_tweet in progress_bar(original_interactions):\n",
    "\n",
    "        tweet_id = original_tweet['_id']\n",
    "        text = original_tweet['text']\n",
    "\n",
    "        keywords, keywords_summary = make_dictionary_keywords(bag_of_words, tweet_id, text)\n",
    "\n",
    "        operations.append(UpdateOne({'_id': tweet_id}, \n",
    "                                     {'$set': { 'keywords': keywords,\n",
    "                                                'keywords_summary': keywords_summary}},\n",
    "                                     upsert=False))\n",
    "\n",
    "        if original_tweet['retweet_count']>0:\n",
    "            operations.append(UpdateMany({'retweet_or_quote_id': tweet_id, 'tweet_type': 'retweet'}, \n",
    "                                         {'$set': { 'keywords': keywords,\n",
    "                                                    'keywords_summary': keywords_summary}},\n",
    "                                         upsert=False))\n",
    "\n",
    "        if len(operations) > 25000:\n",
    "            results = tweet_collection.bulk_write(operations)\n",
    "            print(\"Ma:\", str(results.matched_count).rjust(8, \" \"),\n",
    "                  \" Mo:\", str(results.modified_count).rjust(8, \" \")\n",
    "                 )\n",
    "            operations = []\n",
    "\n",
    "    if len(operations) > 0: \n",
    "        results = tweet_collection.bulk_write(operations)\n",
    "        print(\"Ma:\", str(results.matched_count).rjust(8, \" \"),\n",
    "              \" Mo:\", str(results.modified_count).rjust(8, \" \"))   "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# getting originals, replies and quotes...\n",
    "original_interactions = get_original_interactions(db.tweets)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# updates database with BOWs\n",
    "bows_to_mongodb(BAG_OF_WORDS,soriginal_interactions,db.tweets)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Sentiment analysis extraction"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_tweets_without_sentiment(tweet_collection):\n",
    "    \"\"\"\n",
    "    Extracts the ObjectID, tweet type, text and retweet count of originals, replies and quotes without sentiment score\n",
    "    \n",
    "    Keyword arguments:\n",
    "    tweet_collection -- MongoDB Tweets' Collection\n",
    "    \"\"\"\n",
    "    \n",
    "    tweets_without_sentiment = list(tweet_collection.find(\n",
    "    filter={ 'sentiment_score' : {'$exists' : False },\n",
    "        'tweet_type': {'$in' : ['original','reply','quote']}},\n",
    "    projection={'_id': 1, 'tweet_type':1,'text':1,'retweet_count':1},\n",
    "    ))\n",
    "\n",
    "    print(\"Tweets without sentiment extracted:\",len(tweets_without_sentiment))\n",
    "    return tweets_without_sentiment\n",
    "\n",
    "def get_original_interactions(tweet_collection):\n",
    "    \"\"\"\n",
    "    Extracts the ObjectID, text and retweet count of originals, replies and quotes\n",
    "    \n",
    "    Keyword arguments:\n",
    "    tweet_collection -- MongoDB Tweets' Collection  \n",
    "    \"\"\"\n",
    "    original_interactions = list(tweet_collection.find(\n",
    "        filter={'tweet_type': {'$in' : ['original','reply','quote']}},\n",
    "        projection={'_id': 1, 'text':1, 'retweet_count':1},\n",
    "    ))\n",
    "\n",
    "    print(\"Original interactions:\",len(original_interactions))    \n",
    "    return original_interactions\n",
    "\n",
    "def clean_str(tweet: str):\n",
    "    \"\"\"\n",
    "    Preprocess tweet text before sentiment analysis.\n",
    "    \n",
    "    Keyword arguments:\n",
    "    tweet -- text to be cleaned\n",
    "    \"\"\"\n",
    "    \n",
    "    def processK(text):\n",
    "        dirtyKQ = re.compile('[^o]k[i|e]')\n",
    "        dirtyKC = re.compile('[^o]k[a|o|u]')\n",
    "        dirtyK = re.compile('[^o]?k')\n",
    "        while dirtyKQ.search(text)!=None:\n",
    "            grp = dirtyKQ.search(text).group()\n",
    "            text = text.replace(grp,grp[0]+'qu'+grp[-1], 1)\n",
    "        while dirtyKC.search(text)!=None:\n",
    "            grp = dirtyKC.search(text).group()\n",
    "            text = text.replace(grp,grp[0]+'c'+grp[-1], 1)\n",
    "        while dirtyK.search(text)!=None:\n",
    "            grp = dirtyK.search(text).group()\n",
    "            if len(grp) > 1:\n",
    "                text = text.replace(grp,grp[0]+\"que\", 1)\n",
    "            else:\n",
    "                text = text.replace(grp,\"que\", 1)\n",
    "        return text\n",
    "\n",
    "    def replaceAccents(text):\n",
    "        return unidecode(text)\n",
    "    \n",
    "    def remove_user_handlers(text):\n",
    "        return re.sub('@[\\w]+', 'USER', text)\n",
    "    \n",
    "    def replaceVerbs(text):\n",
    "        punctuation = list(\"[.,:;!?]()\")\n",
    "        if len(text)==0:\n",
    "            text = text\n",
    "        else:\n",
    "            addBack = False\n",
    "            if text[-1] in punctuation:\n",
    "                endPunctu = text[-1]\n",
    "                addBack = True\n",
    "                text = text[:-1]\n",
    "            for infinitif in cl.sentimentPipeline.dictConjug.keys():\n",
    "                foundMatch = any(e in text.replace(' ','_') for e in set(cl.sentimentPipeline.dictConjug[infinitif]))\n",
    "                if foundMatch==False:\n",
    "                    pass\n",
    "                else:\n",
    "                    text = text.replace(' ','_')\n",
    "                    matches = [e for e in set(cl.sentimentPipeline.dictConjug[infinitif]) if '_'+e+'_' in x]\n",
    "                    for e in matches:\n",
    "                        text = text.replace('_'+e+'_','_'+infinitif+'_')\n",
    "                    del matches\n",
    "                    if text.split('_')[0] in set(cl.sentimentPipeline.dictConjug[infinitif]):\n",
    "                        text = '_'.join([infinitif] + text.split('_')[1:])   \n",
    "                        \n",
    "                    if '_'.join(text.split('_')[:2]) in set(cl.sentimentPipeline.dictConjug[infinitif]):\n",
    "                        text = '_'.join([infinitif] + text.split('_')[2:])\n",
    "                        \n",
    "                    if text.split('_')[-1] in set(cl.sentimentPipeline.dictConjug[infinitif]):\n",
    "                        text = '_'.join(text.split('_')[:-1] + [infinitif])\n",
    "                        \n",
    "                    if '_'.join(text.split('_')[-2:]) in set(cl.sentimentPipeline.dictConjug[infinitif]):\n",
    "                        text = '_'.join(text.split('_')[:-2] + [infinitif])\n",
    "\n",
    "                    text = text.replace('_',' ')\n",
    "            if addBack:\n",
    "                text = text + endPunctu\n",
    "            return text\n",
    "        \n",
    "    x = tweet.lower()\n",
    "    \n",
    "    # Remove URL\n",
    "    x = re.sub(r'https?:\\/\\/(www\\.)?[-a-zA-Z0-9@:%._\\+~#=]{1,256}\\.[a-zA-Z0-9()]{1,6}\\b([-a-zA-Z0-9()@:%_\\+.~#?&//=]*)', '', x)\n",
    "    x = str(x).replace('\\r','').replace('\\n','')\n",
    "    \n",
    "    # Process emojies\n",
    "    placeholder_emoji = 'BASTIONEMOJIBASTION'\n",
    "    regex_emoji = re.compile(r':\\w+:')\n",
    "    x = emoji.demojize(x)\n",
    "    emojies = regex_emoji.findall(x)\n",
    "    x = regex_emoji.sub(placeholder_emoji, x)\n",
    "    \n",
    "    x = replaceAccents(x)\n",
    "    #x = remove_user_handlers(x)\n",
    "    x = cl.sentimentPipeline.processNumbers(x)\n",
    "    x = cl.sentimentPipeline.processDetails(x)\n",
    "    x = cl.sentimentPipeline.processRep(x)\n",
    "    x = cl.sentimentPipeline.processJaja(x)\n",
    "    x = cl.sentimentPipeline.processSpaces(x)\n",
    "    x = cl.sentimentPipeline.processPoint(x)\n",
    "    x = cl.sentimentPipeline.processExps(x)\n",
    "    x = processK(x)\n",
    "    x = replaceVerbs(x)\n",
    "    tokens = cl.sentimentPipeline.word_tokenize(x)\n",
    "\n",
    "    result = ' '.join(str(re.sub('[¿?;,¡!`~\"#@\\(\\)\\'.]','',' '.join(tokens))).split()).replace(cl.sentimentPipeline.uglySeparator,'_')\n",
    "    \n",
    "    # Reinsert emojies\n",
    "    for match in emojies:\n",
    "        result = result.replace(placeholder_emoji, match, 1)\n",
    "    return result\n",
    "\n",
    "def sentiment_analysis_to_mongodb(original_tweets,tweet_collection):\n",
    "    \"\"\"\n",
    "    Updates the DB with sentiment score of each tweet, propagating it to associated retweets\n",
    "    \n",
    "    Keyword arguments:\n",
    "    original_tweets -- List of tweets' ObjectID and text (originals, replies and quotes)\n",
    "    tweet_collection -- MongoDB Tweets' Collection  \n",
    "    \"\"\"\n",
    "    operations=[]\n",
    "    for original_tweet in progress_bar(original_tweets):\n",
    "        tweet_id = original_tweet['_id']\n",
    "        text = original_tweet['text']\n",
    "\n",
    "        try:\n",
    "            clean = clean_str(text)       \n",
    "        except Exception as ex:\n",
    "            print(original_tweet, ex, end=\";\")\n",
    "            if not re.sub(r'https?:\\/\\/(www\\.)?[-a-zA-Z0-9@:%._\\+~#=]{1,256}\\.[a-zA-Z0-9()]{1,6}\\b([-a-zA-Z0-9()@:%_\\+.~#?&//=]*)', '', text).strip():\n",
    "                print(\"Tweet\", tweet_id, \" is empty. Ignoring\")\n",
    "            else:\n",
    "                print(\"Catastrophic failure with tweet\", tweet_id, text, ex)\n",
    "\n",
    "            operations.append(UpdateOne({'_id': tweet_id}, \n",
    "                        {'$unset': { 'sentiment_score': \"\", 'clean_text':\"\"}},\n",
    "                         upsert=False\n",
    "                        ))\n",
    "            operations.append(UpdateMany({'retweet_or_quote_id': tweet_id, 'tweet_type':'retweet'}, \n",
    "                         {'$unset': { 'sentiment_score': \"\"}},\n",
    "                         upsert=False\n",
    "                        ))\n",
    "            continue\n",
    "\n",
    "                        \n",
    "        sentiment_score = clf.predict(clean)\n",
    "\n",
    "        operations.append(UpdateOne({'_id': tweet_id}, \n",
    "                             {'$set': { 'clean_text': clean,\n",
    "                                        'sentiment_score': sentiment_score}},\n",
    "                             upsert=False\n",
    "                            ))\n",
    "\n",
    "        operations.append(UpdateMany({'retweet_or_quote_id': tweet_id, 'tweet_type':'retweet'}, \n",
    "                             {'$set': { 'sentiment_score': sentiment_score}},\n",
    "                             upsert=False\n",
    "                            ))\n",
    "\n",
    "        if len(operations) > 10000:\n",
    "            results = tweet_collection.bulk_write(operations)\n",
    "            print(\"Ma:\", str(results.matched_count).rjust(8, \" \"),\n",
    "                  \" Mo:\", str(results.modified_count).rjust(8, \" \"))\n",
    "            operations = []\n",
    "\n",
    "    if len(operations) > 0: \n",
    "        results = tweet_collection.bulk_write(operations)\n",
    "        print(\"Ma:\", str(results.matched_count).rjust(8, \" \"),\n",
    "              \" Mo:\", str(results.modified_count).rjust(8, \" \"))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "#### Execution of the sentiment analysis and database update"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "original_interactions = get_original_interactions(db.tweets)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "%%time\n",
    "sentiment_analysis_to_mongodb(original_interactions,db.tweets)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Anonymization\n",
    "\n",
    "\n",
    "### Calculate UUIDs for the users"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "%%time\n",
    "pipeline = [\n",
    "        {\n",
    "            '$project': {\n",
    "                '_id':1 \n",
    "            }\n",
    "        }\n",
    "    ]\n",
    "    \n",
    "print(\"Query\", end=\" \")\n",
    "users = db.users.aggregate(pipeline, allowDiskUse=True)\n",
    "print(\"OK; List\", end=\" \")\n",
    "users = list(users)\n",
    "users = [u['_id'] for u in users]\n",
    "print(\"OK; Total records:\", len(users))\n",
    "\n",
    "uuids = [uuid.uuid4() for _ in users]\n",
    "\n",
    "print(\"Total users: \", len(users))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "%%time\n",
    "\n",
    "users_operations = []\n",
    "tweets_operations = []\n",
    "for u,uid in progress_bar(list(zip(users,uuids))):\n",
    "    users_operations.append(UpdateOne({'_id': u}, {'$set': {'uuid': uid}}))\n",
    "    tweets_operations.append(UpdateMany({'user_id': u}, {'$set': {'user_id': uid}}))\n",
    "    tweets_operations.append(UpdateMany({'retweet_or_quote_user_id': u}, {'$set': {'retweet_or_quote_user_id': uid}}))\n",
    "    tweets_operations.append(UpdateMany({'in_reply_to_user_id': u}, {'$set': {'in_reply_to_user_id': uid}}))\n",
    "    \n",
    "\n",
    "    if len(tweets_operations) > 10000:\n",
    "        try:\n",
    "            results = db.users.bulk_write(users_operations)\n",
    "            print(\"USERS: Ma:\", str(results.matched_count).rjust(8, \" \"), \" Mo:\", str(results.modified_count).rjust(8, \" \"), end=\"; \")\n",
    "        except BulkWriteError as bwe:\n",
    "            print(bwe.details)\n",
    "            break\n",
    "        try:\n",
    "            results = db.tweets.bulk_write(tweets_operations)\n",
    "            print(\"TWEETS: Ma:\", str(results.matched_count).rjust(8, \" \"), \" Mo:\", str(results.modified_count).rjust(8, \" \"))\n",
    "        except BulkWriteError as bwe:\n",
    "            print(bwe.details)\n",
    "            break\n",
    "        users_operations = []\n",
    "        tweets_operations = []\n",
    "\n",
    "if len(tweets_operations) > 0: \n",
    "    results = db.users.bulk_write(users_operations)\n",
    "    print(\"USERS: Ma:\", str(results.matched_count).rjust(8, \" \"), \" Mo:\", str(results.modified_count).rjust(8, \" \"), end=\"; \")\n",
    "    results = db.tweets.bulk_write(tweets_operations)\n",
    "    print(\"TWEETS: Ma:\", str(results.matched_count).rjust(8, \" \"), \" Mo:\", str(results.modified_count).rjust(8, \" \"))\n",
    "    users_operations = []\n",
    "    tweets_operations = []"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Calculate UUIDs for the tweets"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "%%time\n",
    "pipeline = [\n",
    "        {\n",
    "            '$project': {\n",
    "                '_id':1 \n",
    "            }\n",
    "        }\n",
    "    ]\n",
    "    \n",
    "print(\"Query\", end=\" \")\n",
    "tweets = db.tweets.aggregate(pipeline, allowDiskUse=True)\n",
    "print(\"OK; List\", end=\" \")\n",
    "tweets = list(tweets)\n",
    "tweets = [t['_id'] for t in tweets]\n",
    "print(\"OK; Total records:\", len(tweets))\n",
    "\n",
    "tuids = [uuid.uuid4() for _ in tweets]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "%%time\n",
    "tweets_operations = []\n",
    "for u,uid in progress_bar(list(zip(tweets,tuids))):\n",
    "    tweets_operations.append(UpdateOne({'_id': u}, {'$set': {'uuid': uid}}))\n",
    "    tweets_operations.append(UpdateMany({'in_reply_to_status_id': u}, {'$set': {'in_reply_to_status_id': uid}}))\n",
    "    tweets_operations.append(UpdateMany({'retweet_or_quote_id': u}, {'$set': {'retweet_or_quote_id': uid}}))\n",
    "\n",
    "    if len(tweets_operations) > 10000:\n",
    "        try:\n",
    "            results = db.tweets.bulk_write(tweets_operations)\n",
    "            print(\"TWEETS: Ma:\", str(results.matched_count).rjust(8, \" \"), \" Mo:\", str(results.modified_count).rjust(8, \" \"))\n",
    "        except BulkWriteError as bwe:\n",
    "            print(bwe.details)\n",
    "            break\n",
    "        tweets_operations = []\n",
    "\n",
    "if len(tweets_operations) > 0: \n",
    "    results = db.users.bulk_write(users_operations)\n",
    "    print(\"USERS: Ma:\", str(results.matched_count).rjust(8, \" \"), \" Mo:\", str(results.modified_count).rjust(8, \" \"), end=\"; \")\n",
    "    results = db.tweets.bulk_write(tweets_operations)\n",
    "    print(\"TWEETS: Ma:\", str(results.matched_count).rjust(8, \" \"), \" Mo:\", str(results.modified_count).rjust(8, \" \"))\n",
    "    users_operations = []\n",
    "    tweets_operations = []"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "#### Swap users' IDs"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "%%time\n",
    "print(\"Query\", end=\" \")\n",
    "users = db.users.find({})\n",
    "print(\"OK; List\", end=\" \")\n",
    "users = list(users)\n",
    "print(\"OK; Total records:\", len(users))\n",
    "\n",
    "users_uuids_map = {}\n",
    "for u in progress_bar(users):\n",
    "    users_uuids_map[u['_id']] = u['uuid']"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "insert_operations = []\n",
    "delete_operations = []\n",
    "exceptions = []\n",
    "\n",
    "for u in progress_bar(users):\n",
    "    try:\n",
    "        user = u.copy()\n",
    "\n",
    "        try:\n",
    "            user['_id'] = u['uuid']\n",
    "            del(user['uuid'])\n",
    "        except Exception as ex:\n",
    "            print(\"User\",u['_id'],\"- Exception: \", type(ex).__name__, ex, \"Added to exception list\")\n",
    "            exceptions.append(u)\n",
    "            continue\n",
    "\n",
    "        try:\n",
    "            del(user['scores']['user'])\n",
    "        except Exception as ex:\n",
    "            if type(ex).__name__ == \"KeyError\":\n",
    "                pass\n",
    "            else:\n",
    "                print(\"User\",u['_id'],\"- Exception: \", type(ex).__name__, ex, \"Added to exception list\")\n",
    "                exceptions.append(u)\n",
    "                continue\n",
    "                \n",
    "        try:            \n",
    "            if 'followers' in u:\n",
    "                user['followers'] = []\n",
    "                for f in u['followers']:\n",
    "                    if f in users_uuids_map:\n",
    "                        user['followers'].append(users_uuids_map[f])\n",
    "            if 'friends' in u:\n",
    "                user['friends'] = []\n",
    "                for f in u['friends']:\n",
    "                    if f in users_uuids_map:\n",
    "                        user['friends'].append(users_uuids_map[f])\n",
    "        except Exception as ex:\n",
    "            print(\"User\",u['_id'],\"- Exception: \", type(ex).__name__, ex, \"Added to exception list\")\n",
    "            exceptions.append(u)\n",
    "            break\n",
    "\n",
    "        insert_operations.append(InsertOne(user))\n",
    "        delete_operations.append(DeleteOne({'_id': u['_id']}))\n",
    "        \n",
    "    except Exception as ex:\n",
    "        print(\"User\",u['_id'],\"- Exception: \", type(ex).__name__, ex, \"Added to exception list\")\n",
    "        exceptions.append(u)\n",
    "        continue"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "%%time\n",
    "results = db.users.bulk_write(insert_operations)\n",
    "print(\"USERS: Ma:\", str(results.matched_count).rjust(8, \" \"), \n",
    "      \" In:\", str(results.inserted_count).rjust(8, \" \"),\n",
    "      \" Mo:\", str(results.modified_count).rjust(8, \" \"),  \n",
    "      \" De:\", str(results.deleted_count).rjust(8, \" \"), \n",
    "      end=\"; \")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "%%time\n",
    "results = db.users.bulk_write(delete_operations)\n",
    "print(\"USERS: Ma:\", str(results.matched_count).rjust(8, \" \"), \n",
    "      \" In:\", str(results.inserted_count).rjust(8, \" \"),\n",
    "      \" Mo:\", str(results.modified_count).rjust(8, \" \"),  \n",
    "      \" De:\", str(results.deleted_count).rjust(8, \" \"), \n",
    "      end=\"; \")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "#### Check for potentially missed ObjectIDs"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "%%time\n",
    "print(\"Query\", end=\" \")\n",
    "tweets = db.tweets.find({})\n",
    "print(\"OK; List\", end=\" \")\n",
    "tweets = list(tweets)\n",
    "print(\"OK; Total records:\", len(tweets))\n",
    "\n",
    "tweets_uuids_map = {}\n",
    "for t in progress_bar(tweets):\n",
    "    tweets_uuids_map[t['_id']] = t['uuid']"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "update_operations = []\n",
    "changes = {}\n",
    "changes['in_reply_to_status_id'] = 0\n",
    "changes['retweet_or_quote_id'] = 0\n",
    "\n",
    "pb = progress_bar(oids)\n",
    "for oid in pb:\n",
    "    for var in ['in_reply_to_status_id', 'retweet_or_quote_id']:\n",
    "        if oid[var] is not None and not isinstance(oid[var], uuid.UUID):\n",
    "            try:\n",
    "                tuid = tweets_uuids_map.get(oid[var], uuid.uuid4())\n",
    "                update_operations.append(UpdateMany({var: oid[var]}, {'$set': {var: tuid}}))\n",
    "                tweets_uuids_map[oid[var]] = tuid\n",
    "                changes[var] += 1\n",
    "            except Exception as ex:\n",
    "                print(\"Tweet\",oid['_id'],\" (\", var, \")\\t Exception: \", type(ex).__name__, ex)\n",
    "                break\n",
    "    pb.comment = \"Updates: \" + str(len(update_operations))\n",
    "    \n",
    "print(\"OK; Total records:\", len(update_operations), \n",
    "      \" of which: \", changes['in_reply_to_status_id'], \"in_reply_to_status_id and\", changes['retweet_or_quote_id'], \"retweet_or_quote_id\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "%%time\n",
    "results = db.tweets.bulk_write(update_operations)\n",
    "print(\"TWEETS: Ma:\", str(results.matched_count).rjust(8, \" \"), \n",
    "      \" In:\", str(results.inserted_count).rjust(8, \" \"),\n",
    "      \" Mo:\", str(results.modified_count).rjust(8, \" \"),  \n",
    "      \" De:\", str(results.deleted_count).rjust(8, \" \"), \n",
    "      end=\"; \")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "#### Strip information from the tweets and swap IDs"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "print(\"Query\", end=\" \")\n",
    "tweets = db.tweets.find({},{\n",
    "            'keywords_summary': True, \n",
    "            'sentiment_score': True, \n",
    "            'created_at': True, \n",
    "            'favorite_count': True, \n",
    "            'in_reply_to_status_id': True, \n",
    "            'in_reply_to_user_id': True, \n",
    "            'retweet_count': True, \n",
    "            'retweet_or_quote_id': True, \n",
    "            'retweet_or_quote_user_id': True, \n",
    "            'tweet_type': True, \n",
    "            'user_id': True,\n",
    "            'uuid': True,\n",
    "        })\n",
    "print(\"OK; List\", end=\" \")\n",
    "tweets = list(tweets)\n",
    "print(\"OK; Total records:\", len(tweets))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "insert_operations = []\n",
    "delete_operations = []\n",
    "exceptions = []\n",
    "pb = progress_bar(tweets)\n",
    "for t in pb:\n",
    "    \n",
    "    if 'uuid' not in t:\n",
    "        if t['_id'] in tweets_uuids_map:\n",
    "            t['uuid'] = tweets_uuids_map[t['_id']]\n",
    "        else:\n",
    "            exceptions.append(t)\n",
    "            pb.comment = \"Exceptions: \" + str(len(exceptions))\n",
    "            print(t)\n",
    "            break \n",
    "    \n",
    "    try:\n",
    "        tweet = t.copy()\n",
    "        \n",
    "        try:\n",
    "            tweet['_id'] = t['uuid']\n",
    "            del(tweet['uuid'])\n",
    "        except Exception as ex:\n",
    "            #print(\"Tweet\",t['_id'],\"- Exception: \", type(ex).__name__, ex, \"Added to exception list\")\n",
    "            exceptions.append(t)\n",
    "            pb.comment = \"Exceptions: \" + str(len(exceptions))\n",
    "            continue\n",
    "            \n",
    "        \n",
    "        try:\n",
    "            # Replace timestamp information to avoid precise tweet identification\n",
    "            tweet['created_at'] = t['created_at'].replace(hour=12, minute=0, second=0, microsecond=0)\n",
    "        except Exception as ex:\n",
    "            #print(\"Tweet\",t['_id'],\"- Exception: \", type(ex).__name__, ex, \"Added to exception list\")\n",
    "            exceptions.append(t)\n",
    "            pb.comment = \"Exceptions: \" + str(len(exceptions))\n",
    "            continue\n",
    "        \n",
    "\n",
    "        insert_operations.append(InsertOne(tweet))\n",
    "        delete_operations.append(DeleteOne({'_id': t['_id']}))\n",
    "        \n",
    "    except Exception as ex:\n",
    "        print(\"Tweet\",t['_id'],\"- Exception: \", type(ex).__name__, ex, \"Added to exception list\")\n",
    "        exceptions.append(t)\n",
    "        continue"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "%%time\n",
    "results = db.tweets.bulk_write(insert_operations)\n",
    "print(\"TWEETS: Ma:\", str(results.matched_count).rjust(8, \" \"), \n",
    "      \" In:\", str(results.inserted_count).rjust(8, \" \"),\n",
    "      \" Mo:\", str(results.modified_count).rjust(8, \" \"),  \n",
    "      \" De:\", str(results.deleted_count).rjust(8, \" \"), \n",
    "      end=\"; \")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "%%time\n",
    "results = db.tweets.bulk_write(delete_operations)\n",
    "print(\"TWEETS: Ma:\", str(results.matched_count).rjust(8, \" \"), \n",
    "      \" In:\", str(results.inserted_count).rjust(8, \" \"),\n",
    "      \" Mo:\", str(results.modified_count).rjust(8, \" \"),  \n",
    "      \" De:\", str(results.deleted_count).rjust(8, \" \"), \n",
    "      end=\"; \")"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.4"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
