{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Phase 1. Data import\n",
    "\n",
    "## Contents\n",
    "- [Configuration](#Configuration)\n",
    "  - [Imports](#Imports)\n",
    "  - [Variables](#Variables)\n",
    "  - [Support functions](#Support-functions)\n",
    "- [Load SFMs' data in MongoDB](#Load-SFMs'-data-in-MongoDB)\n",
    "  - [Preliminaries](#Preliminaries)\n",
    "  - [Execute](#Execute)\n",
    "-  [Complete the data](#Complete-the-data)\n",
    "  - [Support functions](#Support-functions)\n",
    "  - [Complete database with missing references](#Complete-database-with-missing-references)\n",
    "  - [Remove tweets without references](#Remove-tweets-without-references)\n",
    "- [Update users collection according to new database status](#Update-users-collection-according-to-new-database-status)\n",
    "  - [Remove those users with no interactions](#Remove-those-users-with-no-interactions)\n",
    "  - [Create the new users](#Create-the-new-users)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Configuration"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Imports"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Utilities\n",
    "from IPython.display import display\n",
    "from fastprogress import master_bar, progress_bar\n",
    "from datetime import datetime\n",
    "from unidecode import unidecode\n",
    "import os\n",
    "import ntpath\n",
    "import numpy as np\n",
    "import statistics \n",
    "import re\n",
    "import math\n",
    "import random\n",
    "import datetime\n",
    "import numbers\n",
    "from collections.abc import MutableMapping\n",
    "import pandas as pd\n",
    "\n",
    "# Botometer API\n",
    "import botometer\n",
    "\n",
    "# MongoDB functionality\n",
    "from pymongo.errors import BulkWriteError\n",
    "from pymongo import MongoClient, InsertOne, UpdateOne, DeleteOne\n",
    "from pymongo.bulk import BulkOperationBuilder\n",
    "from bson import ObjectId"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Variables"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Directories where CSV data is stored\n",
    "ROOT_DIR = \"ABOSLUTE_PATH_TO_ROOT_FOLDER\"\n",
    "DATA_DIR = ROOT_DIR + \"data/\"\n",
    "# Change path to root\n",
    "os.chdir(ROOT_DIR)\n",
    "\n",
    "# Botometer and Twitter Keys for parallel processing\n",
    "keys = {\n",
    "     0: botometer.Botometer(wait_on_ratelimit=True, rapidapi_key='RAPID_API_KEY', **{'consumer_key':'TWITTER_DEV_CONSUMER_KEY', 'consumer_secret':'TWITTER_DEV_CONSUMER_SECRET'}),\n",
    "     1: botometer.Botometer(wait_on_ratelimit=True, rapidapi_key='RAPID_API_KEY', **{'consumer_key':'TWITTER_DEV_CONSUMER_KEY', 'consumer_secret':'TWITTER_DEV_CONSUMER_SECRET'}),\n",
    "}\n",
    "\n",
    "# MongoDB parameters\n",
    "mongoclient = MongoClient('IP_ADDRESS', PORT)\n",
    "db = mongoclient.botbusters\n",
    "# It will automatically create the tweets' and users' collections."
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Support functions"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def make_objid(text):\n",
    "    \"\"\"Makes an ObjectId of 4 bytes\n",
    "    \n",
    "    Keyword arguments:\n",
    "    text -- string to be converted into Object ID\n",
    "    \"\"\"\n",
    "    text = str(text)\n",
    "    if not text.strip():\n",
    "        return None\n",
    "    try:\n",
    "        return ObjectId(text.rjust(24,\"0\"))\n",
    "    except Exception as ex:\n",
    "        print(text, ex)\n",
    "        return None\n",
    "\n",
    "def remove_retweet_text(tweet_type, text):\n",
    "    \"\"\"Unidecodes text of originals, replies and quotes\n",
    "    Removes text from retweets\n",
    "    \n",
    "    Keyword arguments:\n",
    "    tweet_type -- tweet type of the interaction being processed\n",
    "    text -- text of the interaction\n",
    "    \"\"\"\n",
    "    if tweet_type == 'retweet':\n",
    "        return None\n",
    "    else:\n",
    "        return unidecode(text)\n",
    "\n",
    "\n",
    "def flatten(d, parent_key='', sep='_'):\n",
    "    \"\"\"Formats MongoDB results\n",
    "    \n",
    "    Keyword arguments:\n",
    "    d -- dictionary with key and uncleaned values\n",
    "    parent_key --\n",
    "    sep --\n",
    "    \"\"\"\n",
    "    items = []\n",
    "    for k, v in d.items():\n",
    "        new_key = parent_key + sep + k if parent_key else k\n",
    "        if isinstance(v, MutableMapping):\n",
    "            items.extend(flatten(v, new_key, sep=sep).items())\n",
    "        else:\n",
    "            items.append((new_key, v))\n",
    "    return dict(items)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Load SFMs' data in MongoDB\n",
    "\n",
    "### Preliminaries\n",
    "\n",
    "Hardcoding the CSVs' header and types permits to bypass the validation heuristic, thus increasing processing speed."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Columns to be imported\n",
    "tweets_users_columns = {\n",
    "    # tweets\n",
    "    'text':                         str,\n",
    "    'tweet_type':                   str,\n",
    "    'favorite_count':               int,\n",
    "    'in_reply_to_status_id':        str,\n",
    "    'in_reply_to_user_id':          str,\n",
    "    'lang':                         str,\n",
    "    'place':                        str,\n",
    "    'retweet_count':                int,\n",
    "    'retweet_or_quote_id':          str,\n",
    "    'retweet_or_quote_user_id':     str,\n",
    "    'parsed_created_at':            str,\n",
    "    # both tweets and timeline\n",
    "    'id':                           str,   # tweet id\n",
    "    'created_at':                   str,   # tweet date\n",
    "    # users timeline\n",
    "    'user_id':                      str,\n",
    "    'user_screen_name':             str,\n",
    "    'user_created_at':              str,   # this should be constant for each user\n",
    "    'user_default_profile_image':   bool,\n",
    "    'user_description':             str,\n",
    "    'user_favourites_count':        int,\n",
    "    'user_followers_count':         int,\n",
    "    'user_friends_count':           int,\n",
    "    'user_listed_count':            int,\n",
    "    'user_location':                str,\n",
    "    'user_name':                    str,\n",
    "    'user_statuses_count':          int,\n",
    "    'user_time_zone':               str,\n",
    "    'user_urls':                    str,\n",
    "    'user_verified':                bool,\n",
    "}\n",
    "\n",
    "# Columns to be ignored\n",
    "drop_columns = {\n",
    "    'tweet_url':                    str,\n",
    "    'in_reply_to_screen_name':      str,\n",
    "    'retweet_or_quote_screen_name': str,\n",
    "    'source':                       str,\n",
    "    'hashtags':                     str,\n",
    "    'urls':                         str,\n",
    "    'created_at':                   str,\n",
    "    'coordinates':                  str,\n",
    "    'media':                        str,\n",
    "    'possibly_sensitive':           str,\n",
    "}"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def process_csv(filename):\n",
    "    \"\"\"Parses a CSV with SFM format returning two DataFrames with the user and tweet info.\n",
    "    \n",
    "    Keyword arguments:\n",
    "    filename -- name of the CSV\n",
    "    \"\"\"\n",
    "    print(\"Processing\", ntpath.basename(filename), end=\"\\t\")\n",
    "\n",
    "    df = pd.read_csv(filename, low_memory=False, keep_default_na=False, \n",
    "                     dtype=tweets_users_columns, \n",
    "                     usecols=list(tweets_users_columns.keys()))\n",
    "\n",
    "    print(\"CSV\", end=\" \")\n",
    "\n",
    "    # Make index as ObjectID\n",
    "    df['_id'] = df['id'].apply(make_objid)\n",
    "    df.set_index('_id', drop=False, inplace=True)\n",
    "    df.drop(columns=['id'], inplace=True)\n",
    "\n",
    "    # Create ObjectIDs to avoid any potential issue\n",
    "    df['user_id'] = df['user_id'].apply(make_objid)\n",
    "    df['retweet_or_quote_user_id'] = df['retweet_or_quote_user_id'].apply(make_objid)\n",
    "    df['retweet_or_quote_id'] = df['retweet_or_quote_id'].apply(make_objid)\n",
    "    df['in_reply_to_status_id'] = df['in_reply_to_status_id'].apply(make_objid)\n",
    "    df['in_reply_to_user_id'] = df['in_reply_to_user_id'].apply(make_objid)\n",
    "\n",
    "    # Make datetime objects\n",
    "    df['created_at'] = pd.to_datetime(df['created_at'], infer_datetime_format=False, format=\"%a %b %d %H:%M:%S %z %Y\")\n",
    "    df['user_created_at'] = pd.to_datetime(df['user_created_at'], infer_datetime_format=False, format=\"%a %b %d %H:%M:%S %z %Y\")\n",
    "\n",
    "    df.drop(columns=['parsed_created_at'], inplace=True)\n",
    "\n",
    "    # Remove text related to retweets\n",
    "    df['text'] = np.vectorize(remove_retweet_text)(df.tweet_type, df['text'])\n",
    "\n",
    "    # Make None instead of empty strings\n",
    "    df = df.applymap(lambda x: None if not str(x).strip() else x)\n",
    "\n",
    "    print(\"OK\", end=\"; \")\n",
    "\n",
    "    print(\"#:\", len(df), end=\" entries; \")\n",
    "\n",
    "    # Separate tweet info from user info\n",
    "    df_tweets = df[['_id', 'created_at', 'text', 'tweet_type', 'favorite_count', \n",
    "                    'in_reply_to_status_id', 'in_reply_to_user_id', 'lang', 'place', \n",
    "                    'retweet_count', 'retweet_or_quote_id', 'retweet_or_quote_user_id', 'user_id']]\n",
    "    df_users = df[['_id', 'created_at','user_id', 'user_screen_name', 'user_created_at', 'user_default_profile_image', \n",
    "                   'user_description', 'user_favourites_count', 'user_followers_count', 'user_friends_count', \n",
    "                   'user_listed_count', 'user_location', 'user_name', 'user_statuses_count', \n",
    "                   'user_time_zone', 'user_urls', 'user_verified']]\n",
    "\n",
    "    return df_tweets,df_users\n",
    "\n",
    "\n",
    "def csv_to_mongodb(filename, tweet_collection, user_metadata_collection):\n",
    "    \"\"\"Saves a CSV with SFM format in MongoDB.\n",
    "\n",
    "    Keyword arguments:\n",
    "    filename -- name of the CSV\n",
    "    user_metadata_collection -- MongoDB Users' Metadata Collection (containing users metadata)\n",
    "    tweet_collection -- MongoDB Tweets' Collection  \n",
    "    \"\"\"\n",
    "    try:\n",
    "        df_tweets,df_users = process_csv(filename)\n",
    "        \n",
    "        display(df_tweets.head(5))\n",
    "        display(df_users.head(5))\n",
    "\n",
    "        if df_tweets is None or df_users is None:\n",
    "            return\n",
    "\n",
    "        print(\"Preparing DB operations...\", end=\" \")\n",
    "        tweets_operations = []\n",
    "        users_operations = []\n",
    "\n",
    "        records = df_users.to_dict('records')\n",
    "        for record in records:\n",
    "            users_operations.append(UpdateOne({'_id': record['_id']},\n",
    "                                                 {'$set': record},\n",
    "                                                 upsert=True\n",
    "                                                )\n",
    "                                      )\n",
    "\n",
    "        print(\"Users OK\", end=\"; \")\n",
    "\n",
    "        records = df_tweets.to_dict('records')\n",
    "        for record in records:\n",
    "            tweets_operations.append(UpdateOne({'_id': record['_id']},\n",
    "                                                 {'$set': record},\n",
    "                                                 upsert=True\n",
    "                                            ))\n",
    "        print(\"Tweets OK\", end=\"; \")\n",
    "\n",
    "\n",
    "        print(\"READY to BULK\", end=\"; \")\n",
    "\n",
    "        results = tweet_collection.bulk_write(tweets_operations)\n",
    "        print(\"Tweets M:\", str(results.matched_count).rjust(8, \" \"),\n",
    "              \" I:\", str(results.inserted_count).rjust(8, \" \"),\n",
    "              \" U:\", str(results.upserted_count).rjust(8, \" \"),\n",
    "              end=\"; \"\n",
    "             )\n",
    "        results = user_metadata_collection.bulk_write(users_operations)\n",
    "        print(\"Users M:\", str(results.matched_count).rjust(8, \" \"),\n",
    "              \" I:\", str(results.inserted_count).rjust(8, \" \"),\n",
    "              \" U:\", str(results.upserted_count).rjust(8, \" \")\n",
    "             )\n",
    "\n",
    "    except Exception as e:\n",
    "        print(\"Exception. Message:\", e)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Execute"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Save in MongoDB those CSVs with SFM format from any given folders\n",
    "data_folders = ['harvester/processed/hashtags']\n",
    "df = None\n",
    "for folder in data_folders:\n",
    "    for file in progress_bar(os.listdir(DATA_DIR + folder)):\n",
    "        if file.endswith(\".csv\"):\n",
    "            csv_to_mongodb(os.path.join(DATA_DIR + folder, file), db.tweets, db.users_metadata)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Complete the data\n",
    "Collect those tweets that belongs to the observation period and are referenced by collected tweets. Discard all those originated outside the observation period.\n",
    "\n",
    "### Support functions"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_referenced_tweets(tweet_collection):\n",
    "    \"\"\"Extracts the ObjectID of replies, quotes and originals\n",
    "    \n",
    "    Keyword arguments:\n",
    "    tweet_collection -- MongoDB Tweets' Collection\n",
    "    \"\"\"\n",
    "    ids = list(tweet_collection.find({'tweet_type': {'$in':['reply','quote','original']}},\n",
    "                                          {'_id': 1}))\n",
    "    return ids\n",
    "\n",
    "def get_referencing_tweets(tweet_collection):\n",
    "    \"\"\"Extracts the ObjectID of replies, quotes and retweets\n",
    "    \n",
    "    Keyword arguments:\n",
    "    tweet_collection -- MongoDB Tweets' Collection\n",
    "    \"\"\"\n",
    "    ids = list(tweet_collection.find({'tweet_type': {'$in':['reply','quote','retweet']}},\n",
    "                                          {'_id': 1, 'tweet_type': 1, 'retweet_or_quote_id':1, 'in_reply_to_status_id':1, 'retweet_count':1}))\n",
    "    return ids\n",
    "\n",
    "def get_original_tweet(original_tweet_id, instance_number):\n",
    "    \"\"\"Consults a Status object through Twitter API.\n",
    "    \n",
    "    Keyword arguments:\n",
    "    original_tweet_id -- Tweets' ObjectID\n",
    "    instance_number -- the instance of keys to use\n",
    "    \"\"\"\n",
    "    try:\n",
    "        botometer_instance = keys[instance_number]\n",
    "        consumer_key = botometer_instance.consumer_key\n",
    "        consumer_secret = botometer_instance.consumer_secret\n",
    "        auth = tweepy.AppAuthHandler(consumer_key,consumer_secret)\n",
    "        api = tweepy.API(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True)\n",
    "        result = api.get_status(id=original_tweet_id, \n",
    "                                include_entities=False,\n",
    "                                tweet_mode='extended',\n",
    "                                include_card_uri=False,\n",
    "                                include_ext_alt_text=False,\n",
    "                                include_my_retweet=False\n",
    "                               )\n",
    "        print(original_tweet_id,'original tweet found', end=\"; \")\n",
    "        return result\n",
    "    except Exception as e:\n",
    "        if re.match('.*(144|63|34).*',e.response.text):\n",
    "            print(original_tweet_id,'original tweet not found',end=\"; \")\n",
    "            return False\n",
    "        else:\n",
    "            print(original_tweet_id,'original tweet not found',str(e), end=\"; \")\n",
    "            return False\n",
    "\n",
    "def process_tweet(result):\n",
    "    \"\"\"Processes a Status object, the result extracted from Twitter API, to parse info for our database format\n",
    "    Returns a dictionary for both Tweet and Users' Metadata info.\n",
    "    \n",
    "    Keyword arguments:\n",
    "    result -- Status object in json format received from Twitter API\n",
    "    \"\"\"                                                  \n",
    "    created_at = pd.to_datetime(result.created_at, infer_datetime_format=False, format=\"%a %b %d %H:%M:%S %z %Y\")\n",
    "        \n",
    "    if (created_at >= START_DATE) & (created_at <= END_DATE):\n",
    "\n",
    "        result = result._json\n",
    "\n",
    "        new_tweet = {}\n",
    "        new_tweet['_id'] = make_objid(result['id'])\n",
    "        new_tweet['created_at'] = created_at\n",
    "        new_tweet['text'] = result['full_text']\n",
    "        new_tweet['tweet_type'] = 'original'\n",
    "        new_tweet['favorite_count'] = result['favorite_count']\n",
    "        new_tweet['in_reply_to_status_id'] = make_objid(result['in_reply_to_status_id'])\n",
    "        new_tweet['in_reply_to_user_id'] = make_objid(result['in_reply_to_user_id'])\n",
    "        new_tweet['lang'] = result['lang']\n",
    "        new_tweet['place'] = result['place']\n",
    "        new_tweet['retweet_count'] = result['retweet_count']\n",
    "        new_tweet['retweet_or_quote_id'] = None\n",
    "        new_tweet['retweet_or_quote_user_id'] = None\n",
    "        new_tweet['user_id'] = make_objid(result['user']['id'])\n",
    "\n",
    "\n",
    "        if 'retweeted_status' in result:\n",
    "            new_tweet['tweet_type'] = 'retweet'\n",
    "            new_tweet['retweet_or_quote_id'] = make_objid(result['retweeted_status']['id'])\n",
    "            new_tweet['retweet_or_quote_user_id'] = make_objid(result['retweeted_status']['user']['id'])\n",
    "        elif 'quoted_status' in result:\n",
    "            new_tweet['tweet_type'] = 'quote'\n",
    "            new_tweet['retweet_or_quote_id'] = make_objid(result['quoted_status']['id'])\n",
    "            new_tweet['retweet_or_quote_user_id'] = make_objid(result['quoted_status']['user']['id'])\n",
    "\n",
    "\n",
    "        new_user_metadata = {}\n",
    "        new_user_metadata['_id'] = make_objid(result['id'])\n",
    "    \n",
    "        user_result = result['user']\n",
    "        new_user_metadata['created_at']=created_at\n",
    "        new_user_metadata['user_created_at']= pd.to_datetime(user_result['created_at'], infer_datetime_format=False, format=\"%a %b %d %H:%M:%S %z %Y\")\n",
    "        new_user_metadata['user_default_profile_image']=user_result['default_profile_image']\n",
    "        new_user_metadata['user_description']=user_result['description']\n",
    "        new_user_metadata['user_favourites_count']=user_result['favourites_count']\n",
    "        new_user_metadata['user_followers_count']=user_result['followers_count']\n",
    "        new_user_metadata['user_friends_count']=user_result['friends_count']\n",
    "        new_user_metadata['user_id']=make_objid(user_result['id'])\n",
    "        new_user_metadata['user_listed_count']=user_result['listed_count']\n",
    "        new_user_metadata['user_location']=user_result['location']\n",
    "        new_user_metadata['user_name']=user_result['name']\n",
    "        new_user_metadata['user_screen_name']=user_result['screen_name']\n",
    "        new_user_metadata['user_statuses_count']=user_result['statuses_count']\n",
    "        new_user_metadata['user_time_zone']=user_result['time_zone']\n",
    "        new_user_metadata['user_url']=user_result['url']\n",
    "        new_user_metadata['user_verified']=user_result['verified']\n",
    "        \n",
    "        return new_tweet,new_user_metadata\n",
    "    else:\n",
    "        print('original tweet not in date range:',result.created_at, end=\"; \")\n",
    "        return (False,)\n",
    "\n",
    "def add_tweet_and_user_to_mongodb(processed_tweet, tweet_collection, user_metadata_collection):\n",
    "    \"\"\"\n",
    "    Adds a processed Status object response with our format in MongoDB.\n",
    "    \n",
    "    Keyword arguments:\n",
    "    processed_tweet -- a tuple with both tweet and user metadata info (result of applying process_tweet)\n",
    "    tweet_collection -- MongoDB Tweets' Collection  \n",
    "    user_metadata_collection -- MongoDB Users' Metadata Collection (containing users metadata)\n",
    "    \"\"\"\n",
    "    new_tweet = processed_tweet[0]\n",
    "    new_user = processed_tweet[1]\n",
    "    original_tweet_id = new_tweet['_id']\n",
    "\n",
    "    results = tweet_collection.update_one({'_id':original_tweet_id},\n",
    "                                   {'$set':new_tweet},\n",
    "                                    upsert=True)\n",
    "\n",
    "    #print(\"New tweet Ma:\", str(results.matched_count).rjust(8, \" \"),\" Mo:\", str(results.modified_count).rjust(8, \" \"))\n",
    "\n",
    "    results = user_metadata_collection.update_one({'_id':original_tweet_id},\n",
    "                                           {'$set':new_user},\n",
    "                                           upsert=True)\n",
    "    #print(\"New user metadata Ma:\", str(results.matched_count).rjust(8, \" \"),\" Mo:\", str(results.modified_count).rjust(8, \" \"))\n",
    "    print('original tweet added (', new_tweet['created_at'],')')\n",
    "\n",
    "def get_tweet_info(tweet):\n",
    "    \"\"\"\n",
    "    Gets the tweet type, tweet id and referenced tweet id\n",
    "    \n",
    "    Keyword arguments:\n",
    "    tweet -- tweet extracted from MongoDB\n",
    "    \"\"\"\n",
    "    tweet_type = tweet['tweet_type']\n",
    "    tweet_id = tweet['_id']\n",
    "    \n",
    "    if tweet_type in ['retweet','quote']:\n",
    "        original_tweet_id = tweet['retweet_or_quote_id']\n",
    "    elif tweet_type=='reply':\n",
    "        original_tweet_id = tweet['in_reply_to_status_id']\n",
    "    else:\n",
    "        original_tweet_id = None\n",
    "    \n",
    "    return tweet_type, tweet_id, original_tweet_id\n",
    "\n",
    "def get_referenced_id(tweet):\n",
    "    \"\"\"\n",
    "    Gets the referenced id of the tweet. \n",
    "    If it is retweet or quote, retweet_or_quote_id is returned.\n",
    "    If it is reply, in_reply_to_status_id is returned.\n",
    "    Otherwise, if original, return None.\n",
    "    \n",
    "    Keyword arguments:\n",
    "    tweet -- tweet extracted from MongoDB\n",
    "    \"\"\"\n",
    "    tweet_type = tweet['tweet_type']\n",
    "    tweet_id = tweet['_id']\n",
    "    \n",
    "    if tweet_type in ['retweet','quote']:\n",
    "        original_tweet_id = tweet['retweet_or_quote_id']\n",
    "    elif tweet_type=='reply':\n",
    "        original_tweet_id = tweet['in_reply_to_status_id']\n",
    "    else:\n",
    "        original_tweet_id = None\n",
    "    \n",
    "    return original_tweet_id"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Complete database with missing references\n",
    "\n",
    "<ol>\n",
    "<li>If the referenced tweet is in our database --> OK</li>\n",
    "<li>If the referenced tweet is not in our database --> collect it from Twitter:\n",
    "    <ol type=\"a\">\n",
    "        <li>If it is not found (erased, private account, etc.) --> nothing</li>\n",
    "        <li>If it is found, but out of our date range (10-04-2019 --- 10-11-2019) --> nothing</li>\n",
    "        <li>It it is found and within our time window (10-04-2019 --- 10-11-2019):\n",
    "            <ol type=\"i\">\n",
    "                <li>If it is a original tweet --> add to database</li>\n",
    "                <li>If it is a reply or quote --> recursive call (1)</li>\n",
    "            </ol>\n",
    "        </li>\n",
    "    </ol>\n",
    "</li>\n",
    "</ol>"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# main function for completing database\n",
    "def complete_references(tweet,key,original_ids_processed,original_ids):\n",
    "    \"\"\"\n",
    "    Completes the references of a tweet in MongoDB by checking if retweets, quotes and replies referenced tweets are also in our database.\n",
    "    \n",
    "    1. If the referenced tweet is in our database --> OK\n",
    "    2. If the referenced tweet is not in our database --> collect it from Twitter:\n",
    "        2.a If it is not found (erased, private account, etc.) --> nothing\n",
    "        2.b If it is found, but out of our date range (10-04-2019 --- 10-11-2019) --> nothing\n",
    "        2.c It it is found and within our time window (10-04-2019 --- 10-11-2019):\n",
    "            2.c.1 If it is a original tweet --> add to database\n",
    "            2.c.2 If it is a reply or quote --> recursive call (1)\n",
    "    \n",
    "    Keyword arguments:\n",
    "    tweet -- tweet extracted from MongoDB\n",
    "    original_ids_processed -- list of Tweets' ObjectIDs just processed (cache for speed and efficiency)\n",
    "    original_ids -- list of Tweets' ObjectIDs existing in database\n",
    "    \"\"\"\n",
    "    tweet_type, tweet_id, original_tweet_id = get_tweet_info(tweet)\n",
    "    \n",
    "    # original tweet collected in previous interactions\n",
    "    if original_tweet_id in original_ids_processed:\n",
    "        return original_ids_processed[original_tweet_id]\n",
    "    \n",
    "    # original tweet in database\n",
    "    if original_tweet_id in original_ids:\n",
    "        original_ids_processed[original_tweet_id] = True\n",
    "        return True\n",
    "    \n",
    "    # original tweet should be collected\n",
    "    original_tweet = get_original_tweet(original_tweet_id,key)\n",
    "\n",
    "    # the original tweet could not be collected\n",
    "    if original_tweet is False:\n",
    "        original_ids_processed[original_tweet_id] = False\n",
    "        return False\n",
    "\n",
    "    processed_tweet = process_tweet(original_tweet)\n",
    "\n",
    "    # the original tweet is within our time window\n",
    "    if processed_tweet[0] is False:\n",
    "        original_ids_processed[original_tweet_id] = False\n",
    "        return False\n",
    "\n",
    "    # before adding the tweet, its references (in case of reply or quote) should be also completed\n",
    "    referenced_tweet_type, referenced_tweet_id, referenced_original_tweet_id = get_tweet_info(processed_tweet[0])\n",
    "    if referenced_tweet_type in ['reply','quote']:\n",
    "        references_completed = complete_references(processed_tweet[0],key,original_ids_processed,original_ids)   # recursive method\n",
    "    else:\n",
    "        references_completed = True\n",
    "\n",
    "    if references_completed:\n",
    "        add_tweet_and_user_to_mongodb(processed_tweet, db.tweets, db.users_metadata)\n",
    "        \n",
    "    original_ids_processed[original_tweet_id] = references_completed\n",
    "\n",
    "    return references_completed"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Get IDs of tweets that can be referenced (original, quote, replies)\n",
    "referenced_tweets = get_referenced_tweets(db.tweets)\n",
    "referenced_tweets = [rt['_id'] for rt in referenced_tweets]  # build a list with only ids\n",
    "# Get IDs of tweets that can reference other tweets (retweet, quote, replies)\n",
    "referencing_tweets = get_referencing_tweets(db.tweets)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "original_ids_processed = {}   # cache list of processed tweets\n",
    "key=0                         # key to use in Twitter API calls\n",
    "iterations=0                  # counter of interactions for intermediate backups of the cache\n",
    "number_of_keys = 25           # at least one\n",
    "\n",
    "# complete references of referencing tweets (retwets, quotes and replies)\n",
    "for tweet in progress_bar(referencing_tweets):\n",
    "    complete_references(tweet,key,original_ids_processed,original_ids)\n",
    "    key = (key+1)%number_of_keys  # iterate over available keys to manage API limits\n",
    "    \n",
    "    # as this is a slow process, we save from time to time the cache of the tweets already processed\n",
    "    if iterations%100000==0:\n",
    "        with open('ids_processed.pickle', 'wb') as handle:\n",
    "            pickle.dump(original_ids_processed, handle, protocol=pickle.HIGHEST_PROTOCOL)\n",
    "            \n",
    "    iterations+=1"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Remove tweets without references\n",
    "\n",
    "Once the previous steps have finished, tweets which still do not have their reference tweet are removed\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# get referenced tweets and build DataFrame with associated ObjectIDs\n",
    "referenced_tweets = get_referenced_tweets()\n",
    "referenced_tweets = [rt['_id'] for rt in referenced_tweets]\n",
    "df_referenced = pd.DataFrame(referenced_tweets)\n",
    "df_referenced.columns = ['_id']\n",
    "\n",
    "# get referencing tweets and build DataFrame with associated ObjectIDs and referenced ObjectIDs\n",
    "referencing_tweets = get_referencing_tweets()\n",
    "df_referencing = pd.DataFrame(referencing_tweets)\n",
    "referencing_values = [get_referenced_id(rt) for rt in referencing_tweets]\n",
    "df_referencing['referenced_id'] = referencing_values\n",
    "df_referencing = df_referencing[['_id','referenced_id']]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# initialization\n",
    "df_updated_referenced = df_referenced\n",
    "df_updated_referencing = df_referencing\n",
    "\n",
    "# check tweets without reference that should be removed\n",
    "df_remove = df_updated_referencing[~df_updated_referencing['referenced_id'].isin(df_updated_referenced['_id'])]\n",
    "tweets_without_reference = df_remove.shape[0]\n",
    "\n",
    "iterations=0\n",
    "# while database has got tweets without reference, we should iteratively remove tweets\n",
    "while (tweets_without_reference > 0):    \n",
    "    \n",
    "    # remove tweets of df_remove\n",
    "    operations = []\n",
    "    for tweet_to_delete in df_remove._id.values:\n",
    "        operations.append(DeleteOne({'_id': tweet_to_delete}))\n",
    "\n",
    "    results = db.tweets.bulk_write(operations)\n",
    "    print(\"tweets M:\", str(results.matched_count).rjust(8, \" \"),\n",
    "          \" D:\", str(results.deleted_count).rjust(8, \" \"), end='; ')\n",
    "    results = db.users_metadata.bulk_write(operations)\n",
    "    print(\"users metadata M:\", str(results.matched_count).rjust(8, \" \"),\n",
    "          \" D:\", str(results.deleted_count).rjust(8, \" \"))\n",
    "    \n",
    "    # update dataframes removing deleted tweets\n",
    "    df_updated_referencing = df_updated_referencing[~df_updated_referencing['_id'].isin(df_remove['_id'])]\n",
    "    df_updated_referenced = df_updated_referenced[~df_updated_referenced['_id'].isin(df_remove['_id'])]\n",
    "    \n",
    "    # we should check tweets without reference again (referenced tweets may be deleted in previous iteration)\n",
    "    df_remove = df_updated_referencing[~df_updated_referencing['referenced_id'].isin(df_updated_referenced['_id'])]\n",
    "    tweets_without_reference = df_remove.shape[0]\n",
    "    print(tweets_without_reference)\n",
    "\n",
    "    # update control variables\n",
    "    iterations+=1\n",
    "    \n",
    "print('DONE!',iterations,\"were necessary for database to recursively converge!\")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Update users collection according to new database status"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_user_ids(user_collection):\n",
    "    \"\"\"\n",
    "    Extracts the ObjectIDs of users\n",
    "    \n",
    "    Keyword arguments:\n",
    "    user_collection -- MongoDB Users' Collection  \n",
    "    \"\"\"\n",
    "    ids = list(user_collection.find({},{'_id': 1}))\n",
    "    #ids = [i['_id'] for i in ids]\n",
    "    print(len(ids),'users extracted!')\n",
    "    return ids\n",
    "\n",
    "def get_tweet_user_ids(tweet_collection):\n",
    "    \"\"\"\n",
    "    Extracts the ObjectIDs of users that have created tweets\n",
    "    \n",
    "    Keyword arguments:\n",
    "    tweet_collection -- MongoDB Tweets' Collection  \n",
    "    \"\"\"    \n",
    "    \n",
    "    pipeline = [\n",
    "    {\n",
    "        '$group': {\n",
    "            '_id': '$user_id'\n",
    "        }\n",
    "    }\n",
    "    ]\n",
    "    \n",
    "    ids = list(tweet_collection.aggregate(pipeline))\n",
    "    #ids = [i['_id'] for i in ids]\n",
    "    print(len(ids),'users with tweets extracted!')\n",
    "    return ids"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Remove those users with no interactions\n",
    "These users only had interactions that have been deleted, so they can be safely discarded"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# get a list of users' ObjecIDs from user collection\n",
    "user_ids = get_user_ids(db.users)\n",
    "df_user_ids = pd.DataFrame(user_ids)\n",
    "\n",
    "# get a list of users' ObjecIDs from tweet collection (authorships)\n",
    "tweet_user_ids = get_tweet_user_ids(db.tweets)\n",
    "df_tweet_user_ids = pd.DataFrame(tweet_user_ids)\n",
    "\n",
    "# we should remove those users from user collection that finally do not have interactions\n",
    "df_users_remove = df_user_ids[~df_user_ids['_id'].isin(df_tweet_user_ids['_id'])]\n",
    "df_users_remove.info()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# update MongoDB\n",
    "operations = []\n",
    "for user_to_delete in df_users_remove._id.values:\n",
    "    operations.append(DeleteOne({'_id': user_to_delete}))\n",
    "results = db.users.bulk_write(operations)\n",
    "print(\"users D:\", str(results.deleted_count).rjust(8, \" \"))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Create the new users"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# get a list of users' ObjecIDs from user collection\n",
    "user_ids = get_user_ids(db.users)\n",
    "df_user_ids = pd.DataFrame(user_ids)\n",
    "\n",
    "# get a list of users' ObjecIDs from tweet collection (authorships)\n",
    "tweet_user_ids = get_tweet_user_ids(db.tweets)\n",
    "df_tweet_user_ids = pd.DataFrame(tweet_user_ids)\n",
    "\n",
    "# in this case, we add those new users that have been added due to the process of completing references\n",
    "df_users_add = df_tweet_user_ids[~df_tweet_user_ids['_id'].isin(df_user_ids['_id'])]\n",
    "df_users_add.info()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# update MongoDB\n",
    "operations = []\n",
    "for user_to_add in df_users_add._id.values:\n",
    "    operations.append(UpdateOne({'_id': user_to_add}, \n",
    "                                {'$set': {'scores': -1}},\n",
    "                                upsert=True))\n",
    "results = db.users.bulk_write(operations)\n",
    "print(\"users U:\", str(results.upserted_count).rjust(8, \" \"))"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.4"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
