{
  "nbformat": 4,
  "nbformat_minor": 0,
  "metadata": {
    "colab": {
      "name": "trumpy_tweet.ipynb",
      "provenance": [],
      "collapsed_sections": [
        "AgGb8kcJnCen",
        "f0j_rPjko65j",
        "IAx7bm1Jcf5J",
        "bpmcdLf2cjVO",
        "W0wzTidCdBZ1",
        "xFVy38mGdEc9",
        "POr9Y7-pxUXx",
        "CANgsy5UxXwm",
        "QuUhdHi8xksm",
        "y1tc073kxo5v",
        "kanzQ10r0zbb",
        "j7Zht0zu02Jc",
        "4Xrzvxap7amh"
      ]
    },
    "kernelspec": {
      "name": "python3",
      "display_name": "Python 3"
    },
    "accelerator": "GPU"
  },
  "cells": [
    {
      "cell_type": "code",
      "metadata": {
        "id": "ih5rQ_WKCpNO",
        "colab_type": "code",
        "outputId": "792ad6f9-d255-48fa-ed7a-bb40f81f4ad3",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 122
        }
      },
      "source": [
        "from google.colab import drive\n",
        "drive.mount('/content/gdrive/')"
      ],
      "execution_count": 1,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "Go to this URL in a browser: https://accounts.google.com/o/oauth2/auth?client_id=947318989803-6bn6qk8qdgf4n4g3pfee6491hc0brc4i.apps.googleusercontent.com&redirect_uri=urn%3aietf%3awg%3aoauth%3a2.0%3aoob&response_type=code&scope=email%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdocs.test%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdrive%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdrive.photos.readonly%20https%3a%2f%2fwww.googleapis.com%2fauth%2fpeopleapi.readonly\n",
            "\n",
            "Enter your authorization code:\n",
            "··········\n",
            "Mounted at /content/gdrive/\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "Jk063CymFFw4",
        "colab_type": "code",
        "outputId": "46ea4c4a-40f9-4b96-8b00-b3949dd46566",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 34
        }
      },
      "source": [
        "%cd /content/gdrive/My Drive/TrumpTweetGenerator"
      ],
      "execution_count": 2,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "/content/gdrive/My Drive/TrumpTweetGenerator\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "8FSr-vP6YIBg",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "pip install markovify"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "du3_SofM47NM",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "import markovify"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "4lmX2qzfET6S",
        "colab_type": "code",
        "outputId": "00ed41f4-9e6d-4c1a-a9ad-50a02c38f30f",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 85
        }
      },
      "source": [
        "from google.colab import files\n",
        "\n",
        "import numpy as np\n",
        "import pandas as pd\n",
        "import json\n",
        "import os\n",
        "import random\n",
        "import re\n",
        "\n",
        "import spacy\n",
        "from spacy.tokenizer import Tokenizer\n",
        "from spacy.lang.en import English\n",
        "import nltk\n",
        "from nltk.corpus import stopwords\n",
        "nltk.download('punkt')\n",
        "nltk.download('stopwords')\n",
        "\n",
        "from sklearn.model_selection import train_test_split\n",
        "from sklearn import metrics\n",
        "\n",
        "from datetime import datetime"
      ],
      "execution_count": 4,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "[nltk_data] Downloading package punkt to /root/nltk_data...\n",
            "[nltk_data]   Unzipping tokenizers/punkt.zip.\n",
            "[nltk_data] Downloading package stopwords to /root/nltk_data...\n",
            "[nltk_data]   Unzipping corpora/stopwords.zip.\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "qnhg0TEXIG-e",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "pd.set_option('max_colwidth',500)\n",
        "random.seed(5067)"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "tT89X0KlJL1X",
        "colab_type": "text"
      },
      "source": [
        "## Get Trump's tweets"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "3sv3RhIrHvJ9",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "# result datafram\n",
        "df_trumptweet = pd.DataFrame()"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "syrYZbMTHYUx",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "# read in tweets of each year and append them to the result dataframe\n",
        "for data_file in os.listdir('data/trumptweet/'):\n",
        "    df = pd.read_json('data/trumptweet/' + data_file)\n",
        "    df_trumptweet = df_trumptweet.append(df)"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "CdCv4jtxIQnl",
        "colab_type": "code",
        "outputId": "0c17ed63-0d27-4ca9-cdcc-97ff8d9d9ad5",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 34
        }
      },
      "source": [
        "len(df_trumptweet)"
      ],
      "execution_count": 18,
      "outputs": [
        {
          "output_type": "execute_result",
          "data": {
            "text/plain": [
              "36307"
            ]
          },
          "metadata": {
            "tags": []
          },
          "execution_count": 18
        }
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "Na-y98stfXZR",
        "colab_type": "text"
      },
      "source": [
        "## Get other people's tweets"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "v_gkVkSKfas7",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "df_othertweet = pd.DataFrame()"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "xh1uwvOAleR6",
        "colab_type": "code",
        "outputId": "38affbb1-7ee8-4033-abd1-904984496194",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 51
        }
      },
      "source": [
        "!ls 'data/othertweet/'"
      ],
      "execution_count": 20,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "BarackObama.csv\t\t\t\t\tHillaryClintonTweets.csv\n",
            "HillaryClinton2014-01-01To2016-10-14Tweets.csv\tKimKardashianTweets.csv\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "oU7AZvv2fhq_",
        "colab_type": "code",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 153
        },
        "outputId": "8886a90d-8aae-4bf6-ae81-71df5058f36c"
      },
      "source": [
        "for data_file in os.listdir('data/othertweet/'):\n",
        "    df = pd.read_csv('data/othertweet/' + data_file, engine='python')\n",
        "    df_othertweet = df_othertweet.append(df, ignore_index  = True)"
      ],
      "execution_count": 60,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "/usr/local/lib/python3.6/dist-packages/pandas/core/frame.py:7138: FutureWarning: Sorting because non-concatenation axis is not aligned. A future version\n",
            "of pandas will change to not sort by default.\n",
            "\n",
            "To accept the future behavior, pass 'sort=False'.\n",
            "\n",
            "To retain the current behavior and silence the warning, pass 'sort=True'.\n",
            "\n",
            "  sort=sort,\n"
          ],
          "name": "stderr"
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "_4YeCLcKgYzn",
        "colab_type": "code",
        "outputId": "bebd4ad6-ef09-4a43-9c34-53779b237f1f",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 34
        }
      },
      "source": [
        "len(df_othertweet)"
      ],
      "execution_count": 22,
      "outputs": [
        {
          "output_type": "execute_result",
          "data": {
            "text/plain": [
              "24196"
            ]
          },
          "metadata": {
            "tags": []
          },
          "execution_count": 22
        }
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "colab_type": "text",
        "id": "u-BN90bt2sOD"
      },
      "source": [
        "## Proprecessing"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "wpBwUucN2lj5",
        "colab_type": "text"
      },
      "source": [
        "### remove retweets"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "iPbamtssJI-x",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "df_trumptweet = df_trumptweet[df_trumptweet['is_retweet']==False]"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "TS-n9PurgrEH",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "df_othertweet = df_othertweet[['date', 'id', 'retweet', 'text', 'author']]\n",
        "df_othertweet = df_othertweet[df_othertweet['retweet']==False]"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "FBCr0wuCGmbJ",
        "colab_type": "text"
      },
      "source": [
        "### clean tweet"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "T59qoRMDHdsb",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "def clean_tweet(text):\n",
        "    # remove quotes\n",
        "    text = re.sub(r'\"@.*', '', text)\n",
        "    text = re.sub(r'^“.*”$', '', text)\n",
        "    # remove URLs\n",
        "    text = re.sub(r'https*:\\/\\/\\S*', '', text)\n",
        "    # remove \\n\n",
        "    text = re.sub('\\n', ' ', text)\n",
        "    # replace '&amp' with 'and'\n",
        "    text = re.sub('&amp;', 'and', text)\n",
        "    # remove extra whitespaces\n",
        "    text = re.sub(r'\\s+', ' ', text)\n",
        "    # remove picture link\n",
        "    text = re.sub(r'pic\\.twitter\\.com\\/\\S*', '', text)\n",
        "    return(text)"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "7bKdl0pVPfHv",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "df_trumptweet['cleaned_text'] = list(map(clean_tweet, df_trumptweet['text']))"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "4qxuI8XZCt2L",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "df_othertweet['cleaned_text'] = list(map(clean_tweet, df_othertweet['text']))"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "KciIGDoAFUfX",
        "colab_type": "text"
      },
      "source": [
        "### remove empty tweets"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "D5Pgo_lFInKh",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "df_trumptweet['cleaned_text'].replace(' ', np.nan, inplace = True)\n",
        "df_trumptweet['cleaned_text'].replace('', np.nan, inplace = True)\n",
        "df_trumptweet.dropna(subset = ['cleaned_text'], inplace = True)"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "O7dQWeUfGwHi",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "df_othertweet['cleaned_text'].replace(' ', np.nan, inplace = True)\n",
        "df_othertweet['cleaned_text'].replace('', np.nan, inplace = True)\n",
        "df_othertweet.dropna(subset = ['cleaned_text'], inplace = True)"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "TyPH1USogF_M",
        "colab_type": "code",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 34
        },
        "outputId": "ab22988f-f45b-4bcb-9e88-343b0f42ad20"
      },
      "source": [
        "len(df_trumptweet), len(df_othertweet)"
      ],
      "execution_count": 30,
      "outputs": [
        {
          "output_type": "execute_result",
          "data": {
            "text/plain": [
              "(24595, 24127)"
            ]
          },
          "metadata": {
            "tags": []
          },
          "execution_count": 30
        }
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "fyaGeUDJ3Pln",
        "colab_type": "text"
      },
      "source": [
        "### save data\n"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "uubEmrCt2ybG",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "df_trumptweet['cleaned_text'].to_csv( 'trumptweet_cleaned_text.csv', index = False)\n",
        "\n",
        "# load data\n",
        "# pd.read_csv('trumptweet_cleaned_text.csv', names = ['cleaned_text'])"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "vMfYySVS67sG",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "df_trumptweet.to_csv( 'trumptweet.csv')\n",
        "df_othertweet.to_csv( 'othertweet.csv')"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "aQ6sowLjevOT",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "# load data\n",
        "# df_trumptweet = pd.read_csv('trumptweet.csv', lineterminator='\\n', index_col=0)\n",
        "# df_othertweet = pd.read_csv('othertweet.csv', lineterminator='\\n', index_col=0)"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "XlxxILqhnoTV",
        "colab_type": "text"
      },
      "source": [
        "## Classifier Modeling"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "9vKF2ZojtY8A",
        "colab_type": "text"
      },
      "source": [
        "#### Prepare data for classifier"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "HhyZeUE3p94o",
        "colab_type": "code",
        "outputId": "6d53a3ad-e644-442f-b357-91c7061136c2",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 221
        }
      },
      "source": [
        "# combine Trump and other's tweets\n",
        "df_t = df_trumptweet[['cleaned_text']]\n",
        "df_t['author'] = 1 # 1 = trump\n",
        "df_o = df_othertweet[['cleaned_text']]\n",
        "df_o['author'] = 0 # 0 = non-trump\n",
        "\n",
        "df_combine = df_t.append(df_o)"
      ],
      "execution_count": 46,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "/usr/local/lib/python3.6/dist-packages/ipykernel_launcher.py:2: SettingWithCopyWarning: \n",
            "A value is trying to be set on a copy of a slice from a DataFrame.\n",
            "Try using .loc[row_indexer,col_indexer] = value instead\n",
            "\n",
            "See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n",
            "  \n",
            "/usr/local/lib/python3.6/dist-packages/ipykernel_launcher.py:4: SettingWithCopyWarning: \n",
            "A value is trying to be set on a copy of a slice from a DataFrame.\n",
            "Try using .loc[row_indexer,col_indexer] = value instead\n",
            "\n",
            "See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n",
            "  after removing the cwd from sys.path.\n"
          ],
          "name": "stderr"
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "WyKCxUPVfC5a",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "def prepare_tweet_clf(t):\n",
        "    # clean\n",
        "    t = t.lower()\n",
        "    t = re.sub(\"'ll\", ' will', t) # replace abbreviations\n",
        "    t = re.sub(\"won't\", 'will not', t)\n",
        "    t = re.sub(\"n't\", ' not', t)\n",
        "    t = re.sub(r'@[A-Za-z0-9_]+', '', t) # remove @mention\n",
        "    t = re.sub(r'#[A-Za-z0-9_]+', '', t) # remove #tag\n",
        "    t = re.sub(r'[^a-zA-Z ]', '', t) # remove special characters\n",
        "\n",
        "    # remove stopwords\n",
        "    stop = set(stopwords.words('english'))\n",
        "    stop.update(['rt', 'cc'])\n",
        "    stop = stop - set(['no', 'not', 'never'])\n",
        "    t = [word for word in t.split(' ') if word not in stop]\n",
        "    t = ' '.join(t)\n",
        "    return t"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "zAvdr_gGy8h7",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "df_combine['cleaned_text_clf'] = list(map(prepare_tweet_clf, df_combine['cleaned_text']))"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "z2mr1UuN5czZ",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "# save data\n",
        "df_combine.to_csv( 'combinetweet.csv')"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "E06BMen15ns2",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "# load data\n",
        "# df_combine = pd.read_csv('combinetweet.csv', lineterminator='\\n', index_col=0)"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "ePuS_BTjrQLd",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "# split into train, test, validation\n",
        "df_train, df_notrain = train_test_split(df_combine, test_size=0.2)\n",
        "df_test, df_validation = train_test_split(df_notrain, test_size=0.5)\n",
        "\n",
        "x_train, y_train = df_train['cleaned_text_clf'], df_train['author']\n",
        "x_notrain, y_notrain = df_notrain['cleaned_text_clf'], df_notrain['author']\n",
        "x_test, y_test = df_test['cleaned_text_clf'], df_test['author']\n",
        "x_validation, y_validation = df_validation['cleaned_text_clf'], df_validation['author']"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "LJ53VP19jHVZ",
        "colab_type": "code",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 34
        },
        "outputId": "c0c871f4-879c-4287-8a3c-5f78f4c35f7e"
      },
      "source": [
        "len(x_train), len(x_notrain), len(x_validation), len(x_test)"
      ],
      "execution_count": 53,
      "outputs": [
        {
          "output_type": "execute_result",
          "data": {
            "text/plain": [
              "(38977, 9745, 4873, 4872)"
            ]
          },
          "metadata": {
            "tags": []
          },
          "execution_count": 53
        }
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "OTHZUN33tyPL",
        "colab_type": "text"
      },
      "source": [
        "#### Multinomial Naive Bayes model"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "AgGb8kcJnCen",
        "colab_type": "text"
      },
      "source": [
        "##### define model"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "c-OavIAANVnE",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "from sklearn.pipeline import Pipeline\n",
        "from sklearn.naive_bayes import MultinomialNB\n",
        "from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer\n",
        "from sklearn.model_selection import GridSearchCV\n",
        "\n",
        "text_clf = Pipeline([('vect', CountVectorizer()), ('tfidf', TfidfTransformer()), ('clf', MultinomialNB())])\n",
        "\n",
        "tuned_parameters = {\n",
        "    'vect__ngram_range': [(1, 1), (1, 2), (1, 3), (2, 2), (2, 3), (3, 3)],\n",
        "    'tfidf__use_idf': (True, False),\n",
        "    'tfidf__norm': ('l1', 'l2'),\n",
        "    'clf__alpha': [1, 1e-1, 1e-2]\n",
        "}"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "INCDZWUanEhl",
        "colab_type": "text"
      },
      "source": [
        "##### train model"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "NeQ7cHguNfCf",
        "colab_type": "code",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 34
        },
        "outputId": "0cce2bee-7e6c-453d-adba-f4a0b65c76f0"
      },
      "source": [
        "score = 'precision'  # optimize against precision (= true Trump / predicted to be Trump) \n",
        "np.errstate(divide='ignore')\n",
        "model_clf_nb = GridSearchCV(text_clf, tuned_parameters, cv=10, scoring=score)\n",
        "model_clf_nb.fit(x_train, y_train)\n",
        "\n",
        "print(\"Best parameters set found on training set:\", model_clf_nb.best_params_)"
      ],
      "execution_count": 29,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "Best parameters set found on training set: {'clf__alpha': 0.1, 'tfidf__norm': 'l2', 'tfidf__use_idf': False, 'vect__ngram_range': (1, 3)}\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "3tZvJXLRnGRM",
        "colab_type": "text"
      },
      "source": [
        "##### save model"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "w3i6_Q2pPeMC",
        "colab_type": "code",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 34
        },
        "outputId": "00f6cd7a-ee00-4c0f-8dc4-a6888e325a29"
      },
      "source": [
        "from joblib import dump\n",
        "dump(model_clf_nb, 'model_clf_NB.joblib') "
      ],
      "execution_count": 32,
      "outputs": [
        {
          "output_type": "execute_result",
          "data": {
            "text/plain": [
              "['model_clf_NB.joblib']"
            ]
          },
          "metadata": {
            "tags": []
          },
          "execution_count": 32
        }
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "VOfDtT8RnI4B",
        "colab_type": "text"
      },
      "source": [
        "##### load model"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "W7_8gSQNP3rm",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "from joblib import load\n",
        "model_clf_nb = load('model_clf_NB.joblib') "
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "8yZNIv0FnNiu",
        "colab_type": "text"
      },
      "source": [
        "##### print statistics"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "NLfBQGwGuf5X",
        "colab_type": "code",
        "outputId": "aa7e93bb-e781-42e0-d3db-bcf874b92a4c",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 487
        }
      },
      "source": [
        "from sklearn.metrics import classification_report\n",
        "print(classification_report(y_notrain, model_clf_nb.predict(x_notrain), digits=4))\n",
        "\n",
        "import matplotlib.pyplot as plt\n",
        "from sklearn.metrics import confusion_matrix\n",
        "CM = confusion_matrix(y_notrain, model_clf_nb.predict(x_notrain))\n",
        "from mlxtend.plotting import plot_confusion_matrix\n",
        "fig, ax = plot_confusion_matrix(conf_mat=CM ,  figsize=(5, 5))\n",
        "plt.show()"
      ],
      "execution_count": 55,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "              precision    recall  f1-score   support\n",
            "\n",
            "           0     0.9804    0.9772    0.9788      4822\n",
            "           1     0.9777    0.9809    0.9793      4923\n",
            "\n",
            "    accuracy                         0.9791      9745\n",
            "   macro avg     0.9791    0.9790    0.9791      9745\n",
            "weighted avg     0.9791    0.9791    0.9791      9745\n",
            "\n"
          ],
          "name": "stdout"
        },
        {
          "output_type": "display_data",
          "data": {
            "image/png": "iVBORw0KGgoAAAANSUhEUgAAATkAAAE9CAYAAABwcBXnAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjIsIGh0\ndHA6Ly9tYXRwbG90bGliLm9yZy8li6FKAAATvklEQVR4nO3debRVdd2A8ecLFxEIERxSQNJUZEpQ\nEDXnUl8ccigHnMjUV1PRIC0tTbNJVFw5NWiFmibOOZCzr6bhAIiGKKAkKKAGqBlqocLv/eMerheZ\njnj2PYefz2etu+4++5yz9/esddez9j7TjZQSkpSrZtUeQJKKZOQkZc3IScqakZOUNSMnKWtGTlLW\n6qo9QGPRolWK1dpVewzVqC26da72CKpRL788nblz58bSrqutyK3Wjpa9jqj2GKpRo0dfUO0RVKO2\n27rfMq/zdFVS1oycpKwZOUlZM3KSsmbkJGXNyEnKmpGTlDUjJylrRk5S1oycpKwZOUlZM3KSsmbk\nJGXNyEnKmpGTlDUjJylrRk5S1oycpKwZOUlZM3KSsmbkJGXNyEnKmpGTlDUjJylrRk5S1oycpKwZ\nOUlZM3KSsmbkJGXNyEnKmpGTlDUjJylrRk5S1oycpKwZOUlZM3KSsmbkJGXNyEnKmpGTlDUjJylr\nRk5S1oycpKwZOUlZM3KSsmbkJGXNyEnKmpGTlDUjJylrRk5S1oycpKwZOUlZM3KSsmbkJGXNyEnK\nmpGTlDUjJylrRk5S1oycpKwZOUlZM3KSsmbkJGXNyDWxZs2Cx/84hFsu/BYAD1x+PE9cM5QnrhnK\nS6PO5MbzvwlA1y+sw8O/H8y/Hj2XIYft1HD/zuu2455fH8f460/lqZGncOLB21flcahYxx1zFF06\nrkvfPr0a1t1y801s2bsnrVdrxlPjxi12+wvOO5ee3TZh856bcf999zb1uDWtrsiNR8QA4GKgOfD7\nlNKwIve3Khh88A5MmT6btm1aArDrcb9puG7ksEHc+dfnAHjr3+9xyoW38bWdei12/w8XLOT0i0fx\nzJRZfK51Sx67+js8OOYFJk+b3XQPQoU74ptH8u0TBnPMUYMa1vXs2Yvrb7yVwScct9htJz3/PDfd\ncD3j//4cr736KnsO2JVnn3+B5s2bN/XYNamwI7mIaA78CtgD6AEcEhE9itrfqqDTuu0YsF03rrz9\nySWua9umJTv13Zg7H5kIwJy33uWpSTP54MMFi93u9Tfm8cyUWQC88958Jk+fTcd12hU/vJrU9jvs\nSIcOHRZb1617d7puttkStx115+0cePBAWrZsyYYbbcTGG2/C2DFjmmrUmlfk6Wp/YGpK6aWU0vvA\n9cC+Be6v5l0wdB/OuOwvLExpieu+tmMvHh43lXnvzi97e13Wb0+frh0Z+9wrlRxTq5hZs2bRufMG\nDZc7derMq6/OquJEtaXIyHUCZjS6PLO07jNpj+26M/vNd3h68tL/+A7avQ833vdM2dtr02o1Rg4b\nxPd+eccnCqP0WVPoc3LliIhjgWMBWK1tdYcp0La9N2TvHXsw4MvdaNmyBWu0acmIHx/CUT8eyVrt\nWtOv5wYcfNrVZW2rrnkzRg4bxA33PM3tD08seHLVuk6dOjFz5kfHE7NmzaRjx8/s8cQSijySmwVs\n0Ohy59K6xaSUrkgp9Usp9Yu61gWOU11n/fpuNvnaz+m2/7kMOvNaHh43laN+PBKA/b+yOXf/bRLz\n3/+wrG399syDmDJ9NpeMfKTIkbWK2GvvfbjphuuZP38+06dNY+rUF9mqf/9qj1UzijySGwtsGhEb\nUR+3gcChBe5vlXXgbn0Y/seHFlv3+Q5tGX31ybRtszoLFyYGD9yeLQYO50ubrM9he/bl2Rdf44lr\nhgJw9m/u5t7HJldjdBVk0OGH8OhfH2bu3LlsvGFnfnTWObTv0IHvDjmJuXPm8PV992Lz3n248657\n6dGzJ9848CC22LwHdXV1XHTJr3xltZFIS3kSvGIbj9gTuIj6t5CMSCn9fHm3b9ZmvdSy1xGFzaNV\n21ujL6j2CKpR223dj6eeGhdLu67Q5+RSSncBdxW5D0laHj/xIClrRk5S1oycpKwZOUlZM3KSsmbk\nJGXNyEnKmpGTlDUjJylrRk5S1oycpKwZOUlZM3KSsmbkJGXNyEnKmpGTlDUjJylrRk5S1oycpKwZ\nOUlZM3KSsmbkJGXNyEnKmpGTlDUjJylrRk5S1oycpKwZOUlZM3KSsmbkJGXNyEnKmpGTlDUjJylr\nRk5S1oycpKwZOUlZM3KSsmbkJGXNyEnKmpGTlDUjJylrdcu6IiLmAWnRxdLvVFpOKaU1Cp5Nkj61\nZUYupdS2KQeRpCKUdboaEdtHxLdKy2tHxEbFjiVJlbHCyEXE2cBpwA9Kq1YDri1yKEmqlHKO5PYH\n9gHeBUgpvQp4KitplVBO5N5PKSVKL0JERJtiR5KkyikncjdGxOXAmhHxv8ADwO+KHUuSKmOZr64u\nklIaHhG7Af8GugJnpZTuL3wySaqAFUau5FmgFfWnrM8WN44kVVY5r64eA4wBvg4cADwREUcVPZgk\nVUI5R3LfA7ZIKb0BEBFrAY8BI4ocTJIqoZwXHt4A5jW6PK+0TpJq3vI+u/rd0uJU4MmIuJ365+T2\nBSY0wWyS9Kkt73R10Rt+/1H6WeT24saRpMpa3gf0z2nKQSSpCCt84SEi1gG+D/QEVl+0PqX0lQLn\nkqSKKOeFhz8Bk4GNgHOA6cDYAmeSpIopJ3JrpZT+AHyQUvprSukowKM4SauEct4n90Hp92sRsRfw\nKtChuJEkqXLKidzPIqIdcApwKbAGMLTQqSSpQsr5gP6o0uLbwC7FjiNJlbW8NwNfykf/yGYJKaWT\nC5lIkipoeUdy45psipItunVm9OgLmnq3WkW032pwtUdQjZo/5ZVlXre8NwNfXcg0ktSE/OfSkrJm\n5CRlzchJylo53wzcNSIejIiJpcubR8SZxY8mSZ9eOUdyv6P+H0t/AJBSmgAMLHIoSaqUciLXOqU0\n5mPrPixiGEmqtHIiNzciNuajfy59APBaoVNJUoWU89nVE4ErgG4RMQuYBhxe6FSSVCHlfHb1JWDX\niGgDNEspzVvRfSSpVpTzzcBnfewyACmlnxQ0kyRVTDmnq+82Wl4d2BuYVMw4klRZ5ZyuXtj4ckQM\nB+4tbCJJqqCV+cRDa6BzpQeRpCKU85zcs3z0vXLNgXUAn4+TtEoo5zm5vRstfwj8M6Xkm4ElrRKW\nG7mIaA7cm1Lq1kTzSFJFLfc5uZTSAmBKRHRponkkqaLKOV1tDzwXEWNo9HaSlNI+hU0lSRVSTuR+\nVPgUklSQciK3Z0rptMYrIuI84K/FjCRJlVPO++R2W8q6PSo9iCQVYXn/d/V44ATgixExodFVbYHR\nRQ8mSZWwvNPV64C7gXOB0xutn5dSerPQqSSpQpb3f1ffBt4GDmm6cSSpsvxvXZKyZuQkZc3IScqa\nkZOUNSMnKWtGTlLWjJykrBk5SVkzcpKyZuQkZc3IScqakZOUNSMnKWtGTlLWjJykrBk5SVkzcpKy\nZuQkZc3IScqakZOUNSMnKWtGTlLWjJykrBk5SVkzcpKyZuQkZc3IScqakZOUNSMnKWtGTlLWjJyk\nrBk5SVkzcpKyZuQkZc3IScqakZOUNSMnKWtGrkZcdsnF9O3Tiy179+TSiy9a7LqLfnkhrVoEc+fO\nrdJ0airNmgWPjzyNWy7+NgA79+/KY9edxhPXn86DI4byxQ3WBuDkw7/C+FvOYMwNP+Cu355El/Xb\nN2zjZyfvy7ibfsi4m37IAbtvWZXHUUsKi1xEjIiI2RExsah95OK5iRO5csTvePSxMYx56u/cfdco\n/jF1KgAzZszgwfvvY4MuXao8pZrC4EN3Ycq0fzZcvuSHA/nWGVexzcBh3HD3OE4/ZgAAz0yewXaH\nnU//g8/lzw8+zc+/sx8AA7bvSZ/uG7D1wGHseMRwhgz6Km3brF6Vx1IrijySuwoYUOD2szF58iS2\n2mprWrduTV1dHTvsuBO33XYrAN8/dSg/P/d8IqLKU6pondZdkwHb9+TKPz/WsC6lxBqlSK3RthWv\nzXkbgEfGvch//vsBAGMmTKfT59cEoPsX1+Nv46eyYMFC3vvv+zz74ix2/3L3Jn4ktaWwyKWUHgHe\nLGr7OenZsxejRz/KG2+8wXvvvcc9d9/FzBkzuPOO2+nYsROb9+5d7RHVBC743jc44+LbWLgwNaw7\n4SfX8edLT2DqPT/l0L22YviV9y9xvyP325Z7Rz8PwIQX6qPWavUWrLVmG3bq15XO67Vf4j6fJXXV\nHkDQrXt3Tjn1NL62x+60btOG3r378P78+Zw/7BeMuvu+ao+nJrDHDr2Y/eY8np40gx36btqw/qTD\ndmH/k37N2IkvM3TQVznvlK9zwk+ua7h+4J5bsWWPLux2zMUAPPjEZPr2/AIPXXUKc996hycnTGPB\ngoVN/nhqSdVfeIiIYyNiXESMmzN3TrXHqZojjzqax8Y8xQMPPcKa7dvTvUdPXp4+jf59e7PZJhsy\na+ZMtu2/Ja+//nq1R1UBtu3zRfbe6UtM/ss5/HHYt9h5q67cesm3+VLXToyd+DIAN983nm16b9Rw\nn1223ozTjv4fDhhyOe9/8GHD+vP/cC/bDBzG3sdfRkTw4iuzm/zx1JKqRy6ldEVKqV9Kqd86a69T\n7XGqZvbs+j/EV155hdtvu5XDB32TV16dzZSp05kydTqdOnfm8THjWW+99ao8qYpw1qV3sMmAH9Ft\nr7MZdPqVPDz2BQ4cegVrfK4Vm3RZF4CvbNOt4UWJ3pt15rIzBnLA0MuZ89Y7Ddtp1izo0K4NAL02\n7UivTTvywOOTm/4B1RBPV2vEIQd9gzfffIMWdS246JJfseaaa1Z7JFXZggULOfGn1zFy+DEsTAv5\n17//w3E/vhaAXwzdjzatW/Kn848GYMbrb3HgkMtpUdecB0YMAWDeO//lqDOu/syfrkZKacW3WpkN\nR4wEdgbWBv4JnJ1S+sPy7tO3b780+slxhcyjVV/7rQZXewTVqPlTbmThe7OX+haEwo7kUkqHFLVt\nSSpX1Z+Tk6QiGTlJWTNykrJm5CRlzchJypqRk5Q1Iycpa0ZOUtaMnKSsGTlJWTNykrJm5CRlzchJ\nypqRk5Q1Iycpa0ZOUtaMnKSsGTlJWTNykrJm5CRlzchJypqRk5Q1Iycpa0ZOUtaMnKSsGTlJWTNy\nkrJm5CRlzchJypqRk5Q1Iycpa0ZOUtaMnKSsGTlJWTNykrJm5CRlzchJypqRk5Q1Iycpa0ZOUtaM\nnKSsGTlJWTNykrJm5CRlzchJypqRk5Q1Iycpa0ZOUtaMnKSsGTlJWTNykrJm5CRlzchJypqRk5Q1\nIycpa0ZOUtaMnKSsGTlJWTNykrJm5CRlzchJypqRk5Q1Iycpa5FSqvYMDSJiDvByteeoIWsDc6s9\nhGqSfxuL+0JKaZ2lXVFTkdPiImJcSqlftedQ7fFvo3yerkrKmpGTlDUjV9uuqPYAqln+bZTJ5+Qk\nZc0jOUlZM3I1KCIGRMSUiJgaEadXex7VjogYERGzI2JitWdZVRi5GhMRzYFfAXsAPYBDIqJHdadS\nDbkKGFDtIVYlRq729AemppReSim9D1wP7FvlmVQjUkqPAG9We45ViZGrPZ2AGY0uzyytk7QSjJyk\nrBm52jML2KDR5c6ldZJWgpGrPWOBTSNio4hYDRgI3FHlmaRVlpGrMSmlD4HBwL3AJODGlNJz1Z1K\ntSIiRgKPA5tFxMyIOLraM9U6P/EgKWseyUnKmpGTlDUjJylrRk5S1oycpKwZOTWJiHin9LtjRNy8\ngtsOiYjWn3D7O0fEqHLXf+w2R0bEZZ9wf9MjYu1Pch9Vh5HTSit9Y8onklJ6NaV0wApuNgT4RJGT\nlsXIaQkRsWFETI6IP0XEpIi4edGRVekI5ryIGA8cGBEbR8Q9EfFURDwaEd1Kt9soIh6PiGcj4mcf\n2/bE0nLziBgeERMjYkJEnBQRJwMdgYci4qHS7XYvbWt8RNwUEZ8rrR9QmnM88PUyHlf/0naejojH\nImKzRldvEBEPR8SLEXF2o/scHhFjIuKZiLh8ZcKuKksp+ePPYj/AhkACtitdHgGcWlqeDny/0W0f\nBDYtLW8N/F9p+Q5gUGn5ROCdRtueWFo+HrgZqCtd7tBoH2uXltcGHgHalC6fBpwFrE79t7VsCgRw\nIzBqKY9l50XrgTUa7WtX4JbS8pHAa8BaQCtgItAP6A7cCbQo3e7XjR5Tw4z+1PZP3Up0UZ8NM1JK\no0vL1wInA8NLl28AKB1RfRm4KSIW3a9l6fd2wDdKy9cA5y1lH7sCv031H2UjpbS070nbhvovDx1d\n2sdq1H+sqRswLaX0YmmWa4FjV/CY2gFXR8Sm1Ee8RaPr7k8pvVHa1q3A9sCHQF9gbGnfrYDZK9iH\naoyR07J8/PN+jS+/W/rdDPhXSqlPmdtYGUF9gA5ZbGXEsva5PD8FHkop7R8RGwIPN7puaY83gKtT\nSj9YiX2pRvicnJalS0RsW1o+FPjbx2+QUvo3MC0iDgSIer1LV4+m/htUAA5bxj7uB46LiLrS/TuU\n1s8D2paWnwC2i4hNSrdpExFdgcnAhhGxcel2i0VwGdrx0ddWHfmx63aLiA4R0QrYrzT/g8ABEbHu\novki4gtl7Ec1xMhpWaYAJ0bEJKA98Jtl3O4w4OiI+DvwHB99Vft3Svd/lmV/s/HvgVeACaX7H1pa\nfwVwT0Q8lFKaQ32QRkbEBEqnqiml/1J/evqX0gsP5ZxGng+cGxFPs+RZzBjgFmAC9c/VjUspPQ+c\nCdxX2vf9wPpl7Ec1xG8h0RJKp3KjUkq9qjyK9Kl5JCcpax7JScqaR3KSsmbkJGXNyEnKmpGTlDUj\nJylrRk5S1v4fImpyXq7lHGgAAAAASUVORK5CYII=\n",
            "text/plain": [
              "<Figure size 360x360 with 1 Axes>"
            ]
          },
          "metadata": {
            "tags": []
          }
        }
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "f0j_rPjko65j",
        "colab_type": "text"
      },
      "source": [
        "#### LSTM classifier model"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "aHKnx14hpRCH",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "import tensorflow as tf\n",
        "\n",
        "from tensorflow.keras.preprocessing.text import Tokenizer\n",
        "from tensorflow.keras.preprocessing.sequence import pad_sequences"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "IAx7bm1Jcf5J",
        "colab_type": "text"
      },
      "source": [
        "##### load data"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "colab_type": "code",
        "outputId": "cd8e3f6f-df56-4036-c1b9-039e930b5920",
        "id": "Fj5uDNcwPeZ2",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 34
        }
      },
      "source": [
        "max_length = 30\n",
        "trunc_type='post'\n",
        "padding_type='post'\n",
        "oov_tok = \"<OOV>\"\n",
        "vocab_len=49999\n",
        "\n",
        "tokenizer = Tokenizer(num_words=vocab_len+1, oov_token=oov_tok)\n",
        "tokenizer.fit_on_texts(x_test)\n",
        "\n",
        "word_index = tokenizer.word_index\n",
        "vocab_size=len(word_index)\n",
        "print('Size of Vocabulary: ',vocab_size)\n",
        "\n",
        "train_sequences = tokenizer.texts_to_sequences(x_train)\n",
        "train_padded = pad_sequences(train_sequences, maxlen=max_length, padding=padding_type, truncating=trunc_type)\n",
        "\n",
        "val_sequences = tokenizer.texts_to_sequences(x_validation)\n",
        "val_padded = pad_sequences(val_sequences, maxlen=max_length, padding=padding_type, truncating=trunc_type)\n",
        "\n",
        "test_sequences = tokenizer.texts_to_sequences(x_test)\n",
        "test_padded = pad_sequences(test_sequences, maxlen=max_length, padding=padding_type, truncating=trunc_type)\n",
        "\n",
        "y_train = np.expand_dims(y_train, axis=1)\n",
        "y_validation = np.expand_dims(y_validation, axis=1)"
      ],
      "execution_count": 113,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "Size of Vocabulary:  8510\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "eiB1mSREvier",
        "colab_type": "code",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 102
        },
        "outputId": "273fccb6-d6e7-4c65-90a2-05b004bd4a84"
      },
      "source": [
        "# # download word embedding\n",
        "# !wget http://nlp.stanford.edu/data/glove.twitter.27B.zip\n",
        "!unzip glove*.zip"
      ],
      "execution_count": 115,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "Archive:  glove.twitter.27B.zip\n",
            "replace glove.twitter.27B.25d.txt? [y]es, [n]o, [A]ll, [N]one, [r]ename: n\n",
            "replace glove.twitter.27B.50d.txt? [y]es, [n]o, [A]ll, [N]one, [r]ename: n\n",
            "replace glove.twitter.27B.100d.txt? [y]es, [n]o, [A]ll, [N]one, [r]ename: n\n",
            "replace glove.twitter.27B.200d.txt? [y]es, [n]o, [A]ll, [N]one, [r]ename: n\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "K27jMxMr3BmC",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "embeddings_index1 = {}\n",
        "with open('glove.twitter.27B.200d.txt') as f:\n",
        "    for line in f:\n",
        "        values = line.split()\n",
        "        word = values[0]\n",
        "        coefs = np.asarray(values[1:], dtype='float32')\n",
        "        embeddings_index1[word] = coefs\n",
        "f.close()"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "V8OG7QNh3s9V",
        "colab_type": "code",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 34
        },
        "outputId": "5799b145-c98a-42ae-f007-2e7b362cb2a0"
      },
      "source": [
        "embedding_dim1=200\n",
        "embeddings_matrix1 = np.zeros((vocab_size, embedding_dim1))\n",
        "for word, i in word_index.items():\n",
        "    embedding_vector = embeddings_index1.get(word)\n",
        "    if embedding_vector is not None:\n",
        "        embeddings_matrix1[i-1] = embedding_vector\n",
        "print('Shape of the Embeddings Matrix (GloVe Twitter): ',embeddings_matrix1.shape)"
      ],
      "execution_count": 117,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "Shape of the Embeddings Matrix (GloVe Twitter):  (8510, 200)\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "bpmcdLf2cjVO",
        "colab_type": "text"
      },
      "source": [
        "##### define model"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "rwh29HOZ5Zds",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "model_clf_lstm = tf.keras.Sequential([\n",
        "    tf.keras.layers.Embedding(vocab_size, embedding_dim1, input_length=max_length, weights = [embeddings_matrix1], trainable = False),\n",
        "    tf.keras.layers.Dropout(0.4),\n",
        "    tf.keras.layers.Bidirectional(tf.keras.layers.CuDNNLSTM(128,return_sequences=True)),\n",
        "    tf.keras.layers.Bidirectional(tf.keras.layers.CuDNNLSTM(128)),\n",
        "    tf.keras.layers.Dropout(0.2),\n",
        "    tf.keras.layers.Dense(128, activation='relu'),\n",
        "    tf.keras.layers.Dropout(0.4),\n",
        "    tf.keras.layers.Dense(64, activation='relu'),\n",
        "    tf.keras.layers.Dropout(0.5),\n",
        "    tf.keras.layers.Dense(1, activation='sigmoid')\n",
        "])"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "h0uixAX8c220",
        "colab_type": "text"
      },
      "source": [
        "##### train model"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "A-lBlZj65aoW",
        "colab_type": "code",
        "outputId": "2ebc326a-025d-4dd1-80ef-346b3ed7e653",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 714
        }
      },
      "source": [
        "reduce = tf. keras.callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=3, mode='auto')\n",
        "model_clf_lstm.compile(loss='binary_crossentropy',optimizer='adam',metrics=['accuracy'])\n",
        "num_epochs = 20\n",
        "history1 = model_clf_lstm.fit(train_padded, \n",
        "                                              y_train, \n",
        "                                              epochs=num_epochs, \n",
        "                                              batch_size=256, \n",
        "                                              validation_data=(val_padded,y_validation),\n",
        "                                              callbacks=[reduce],\n",
        "                                              verbose=1)"
      ],
      "execution_count": 119,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "Train on 38977 samples, validate on 4873 samples\n",
            "Epoch 1/20\n",
            "38977/38977 [==============================] - 19s 495us/sample - loss: 0.6061 - acc: 0.6571 - val_loss: 0.5159 - val_acc: 0.7496\n",
            "Epoch 2/20\n",
            "38977/38977 [==============================] - 4s 105us/sample - loss: 0.4920 - acc: 0.7589 - val_loss: 0.4048 - val_acc: 0.8198\n",
            "Epoch 3/20\n",
            "38977/38977 [==============================] - 4s 105us/sample - loss: 0.4187 - acc: 0.8097 - val_loss: 0.3541 - val_acc: 0.8414\n",
            "Epoch 4/20\n",
            "38977/38977 [==============================] - 4s 105us/sample - loss: 0.3748 - acc: 0.8342 - val_loss: 0.3795 - val_acc: 0.8165\n",
            "Epoch 5/20\n",
            "38977/38977 [==============================] - 4s 105us/sample - loss: 0.3432 - acc: 0.8503 - val_loss: 0.3049 - val_acc: 0.8693\n",
            "Epoch 6/20\n",
            "38977/38977 [==============================] - 4s 104us/sample - loss: 0.3194 - acc: 0.8626 - val_loss: 0.3048 - val_acc: 0.8607\n",
            "Epoch 7/20\n",
            "38977/38977 [==============================] - 4s 105us/sample - loss: 0.3053 - acc: 0.8690 - val_loss: 0.2940 - val_acc: 0.8744\n",
            "Epoch 8/20\n",
            "38977/38977 [==============================] - 4s 105us/sample - loss: 0.2851 - acc: 0.8787 - val_loss: 0.2747 - val_acc: 0.8808\n",
            "Epoch 9/20\n",
            "38977/38977 [==============================] - 4s 105us/sample - loss: 0.2702 - acc: 0.8853 - val_loss: 0.2751 - val_acc: 0.8824\n",
            "Epoch 10/20\n",
            "38977/38977 [==============================] - 4s 105us/sample - loss: 0.2607 - acc: 0.8912 - val_loss: 0.2692 - val_acc: 0.8851\n",
            "Epoch 11/20\n",
            "38977/38977 [==============================] - 4s 105us/sample - loss: 0.2539 - acc: 0.8941 - val_loss: 0.2910 - val_acc: 0.8779\n",
            "Epoch 12/20\n",
            "38977/38977 [==============================] - 4s 106us/sample - loss: 0.2416 - acc: 0.8989 - val_loss: 0.2577 - val_acc: 0.8867\n",
            "Epoch 13/20\n",
            "38977/38977 [==============================] - 4s 105us/sample - loss: 0.2279 - acc: 0.9057 - val_loss: 0.2506 - val_acc: 0.8939\n",
            "Epoch 14/20\n",
            "38977/38977 [==============================] - 4s 105us/sample - loss: 0.2243 - acc: 0.9073 - val_loss: 0.2560 - val_acc: 0.8921\n",
            "Epoch 15/20\n",
            "38977/38977 [==============================] - 4s 105us/sample - loss: 0.2151 - acc: 0.9123 - val_loss: 0.2635 - val_acc: 0.8931\n",
            "Epoch 16/20\n",
            "38977/38977 [==============================] - 4s 104us/sample - loss: 0.2067 - acc: 0.9157 - val_loss: 0.2505 - val_acc: 0.8943\n",
            "Epoch 17/20\n",
            "38977/38977 [==============================] - 4s 104us/sample - loss: 0.1979 - acc: 0.9200 - val_loss: 0.2723 - val_acc: 0.8933\n",
            "Epoch 18/20\n",
            "38977/38977 [==============================] - 4s 105us/sample - loss: 0.1884 - acc: 0.9241 - val_loss: 0.2683 - val_acc: 0.8890\n",
            "Epoch 19/20\n",
            "38977/38977 [==============================] - 5s 122us/sample - loss: 0.1810 - acc: 0.9273 - val_loss: 0.2618 - val_acc: 0.8931\n",
            "Epoch 20/20\n",
            "38977/38977 [==============================] - 4s 104us/sample - loss: 0.1606 - acc: 0.9372 - val_loss: 0.2586 - val_acc: 0.9033\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "W0wzTidCdBZ1",
        "colab_type": "text"
      },
      "source": [
        "##### save model"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "zc2pqXq1ijIB",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "timestring = datetime.now().strftime('%Y%m%d_%H%M%S')\n",
        "file_name = 'model_clf_lstm_{}.json'.format(timestring)\n",
        "weight_name = 'model_clf_lstm_weight_{}.h5'.format(timestring)\n",
        "\n",
        "# serialize model to JSON\n",
        "clf_lstm_model_json = model_clf_lstm.to_json()\n",
        "with open(file_name, 'w') as json_file:\n",
        "    json_file.write(clf_lstm_model_json)\n",
        "\n",
        "# serialize weights to HDF5\n",
        "model_clf_lstm.save_weights(weight_name)"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "xFVy38mGdEc9",
        "colab_type": "text"
      },
      "source": [
        "##### load model"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "o6H9Qu8dimIq",
        "colab_type": "code",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 292
        },
        "outputId": "ebc1a6ad-47e1-4831-a707-3ebd8dd51866"
      },
      "source": [
        "# load model\n",
        "# load json and create model\n",
        "\n",
        "from tensorflow.keras.models import model_from_json\n",
        "\n",
        "json_file = open('clf_lstm_model.json', 'r')\n",
        "loaded_model_json = json_file.read()\n",
        "json_file.close()\n",
        "model_clf_lstm = model_from_json(loaded_model_json)\n",
        "# load weights into new model\n",
        "model_clf_lstm.load_weights('clf_lstm_model.h5')"
      ],
      "execution_count": 111,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow_core/python/keras/initializers.py:119: calling RandomUniform.__init__ (from tensorflow.python.ops.init_ops) with dtype is deprecated and will be removed in a future version.\n",
            "Instructions for updating:\n",
            "Call initializer instance with the dtype argument instead of passing it to the constructor\n",
            "WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow_core/python/ops/resource_variable_ops.py:1630: calling BaseResourceVariable.__init__ (from tensorflow.python.ops.resource_variable_ops) with constraint is deprecated and will be removed in a future version.\n",
            "Instructions for updating:\n",
            "If using Keras pass *_constraint arguments to layers.\n",
            "WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow_core/python/ops/init_ops.py:97: calling GlorotUniform.__init__ (from tensorflow.python.ops.init_ops) with dtype is deprecated and will be removed in a future version.\n",
            "Instructions for updating:\n",
            "Call initializer instance with the dtype argument instead of passing it to the constructor\n",
            "WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow_core/python/ops/init_ops.py:97: calling Orthogonal.__init__ (from tensorflow.python.ops.init_ops) with dtype is deprecated and will be removed in a future version.\n",
            "Instructions for updating:\n",
            "Call initializer instance with the dtype argument instead of passing it to the constructor\n",
            "WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow_core/python/ops/init_ops.py:97: calling Zeros.__init__ (from tensorflow.python.ops.init_ops) with dtype is deprecated and will be removed in a future version.\n",
            "Instructions for updating:\n",
            "Call initializer instance with the dtype argument instead of passing it to the constructor\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "r0kglxqndKnw",
        "colab_type": "text"
      },
      "source": [
        "##### print some statistics"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "guFe-RHztmXu",
        "colab_type": "code",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 504
        },
        "outputId": "4d2c453a-e287-49b4-c3da-632e79324f82"
      },
      "source": [
        "pred_test = model_clf_lstm.predict(test_padded, batch_size=256,  verbose=1)\n",
        "pred_test_labels = (pred_test>0.5).astype(int)\n",
        "\n",
        "print(classification_report(y_test, pred_test_labels, digits=4))\n",
        "\n",
        "import matplotlib.pyplot as plt\n",
        "from sklearn.metrics import confusion_matrix\n",
        "CM = confusion_matrix(y_test, pred_test_labels)\n",
        "from mlxtend.plotting import plot_confusion_matrix\n",
        "fig, ax = plot_confusion_matrix(conf_mat=CM ,  figsize=(5, 5))\n",
        "plt.show()"
      ],
      "execution_count": 121,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "4872/4872 [==============================] - 7s 1ms/sample\n",
            "              precision    recall  f1-score   support\n",
            "\n",
            "           0     0.9008    0.8885    0.8946      2431\n",
            "           1     0.8905    0.9025    0.8964      2441\n",
            "\n",
            "    accuracy                         0.8955      4872\n",
            "   macro avg     0.8956    0.8955    0.8955      4872\n",
            "weighted avg     0.8956    0.8955    0.8955      4872\n",
            "\n"
          ],
          "name": "stdout"
        },
        {
          "output_type": "display_data",
          "data": {
            "image/png": "iVBORw0KGgoAAAANSUhEUgAAATkAAAE9CAYAAABwcBXnAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjIsIGh0\ndHA6Ly9tYXRwbG90bGliLm9yZy8li6FKAAAVJUlEQVR4nO3de7hVdZ2A8fd7OIKIgCiQXETxxlGx\nEBFGukwzmpnj5A0VxDFSc2o076Y+aYylpmk9ZeqYpaNliAZTOIx51zTQAJEBDBATFAEBacQDXrj9\n5o+zPR5UYGtnnb35+X6e5zxn7bXX3uu7H/F91tq3EyklJClXNZUeQJKKZOQkZc3IScqakZOUNSMn\nKWtGTlLWais9QFNR2zZFm46VHkNVql9dz0qPoCr10ovzefXVV+ODrquuyLXpSJt9hld6DFWpx5+4\nptIjqEp9bvDAjV7n6aqkrBk5SVkzcpKyZuQkZc3IScqakZOUNSMnKWtGTlLWjJykrBk5SVkzcpKy\nZuQkZc3IScqakZOUNSMnKWtGTlLWjJykrBk5SVkzcpKyZuQkZc3IScqakZOUNSMnKWtGTlLWjJyk\nrBk5SVkzcpKyZuQkZc3IScqakZOUNSMnKWtGTlLWjJykrBk5SVkzcpKyZuQkZc3IScqakZOUNSMn\nKWtGTlLWjJykrBk5SVkzcpKyZuQkZc3IScqakZOUNSMnKWtGTlLWjJykrBk5SVkzcpKyZuQkZc3I\nScqakZOUNSMnKWtGTlLWjJykrBk5SVkzcpKyZuQkZc3IScqakWtBPbtux303foOpoy/g6dEXcPrx\nnwXg6IM+ydOjL2DVU9fQf6+eG9ym7+7deOyWb/L06AuYPOp82rSuBWC/up5MHnU+M8dezA/PO7LF\nH4uK9fKCBRx2yEEM6NeXA/bblxuvvw6Ar5w4lMED+zN4YH/22XNXBg/sD8Dy5cs57JCD2HGHDpx3\n9jcrOXrVqS3yziPiUOAnQCvgFymlq4rcX7Vbu24dF/3kHqbNWci227Rh4i/P4eFJz/HsX15h6Ldu\n4/qLh2ywfatWNdx62Qmc8u+jmDF3Mdt33IY1a9cBcN2Fx3D6lXczaeZL/O7Hp3LIgXU88OTsSjws\nFaC2tpYrr76Gfvv1p76+ns8eeAD/eNDB3H7H6MZtLr7wfDp26AjA1ltvzSUjL2PWn2fy52efrdTY\nVamwI7mIaAXcAHwJ2BsYFhF7F7W/LcEry+uZNmchACvfeJvZ85bQvUtH5sxfytyXlr1v+4MH7cnM\n5xczY+5iAP664g3Wr0/suEN72rfbmkkzXwJg1L1P889/37flHogKt2O3bvTbr+EorX379vSpq2PR\nwoWN16eU+O2Y3zDk+KEAtGvXjsGf/gxt2mxdkXmrWZGnqwOB51NKL6SUVgOjgSMK3N8WpVe3TvTr\n04PJz7640W326NWFlBL3XHcaE395Duf+yz8A0L1rRxYufa1xu4VLX6N7146Fz6zKeHH+fKZPm8aA\ngYMa10344xN0/cQn2H33PSo42ZahyNPVHsCCJpdfBgZtZNuPlXZtW3PnVV/hgh+No37V2xvdrrZV\nKwb3681nvvIT3nhrNb+/8etMnf0yr698swWnVSWtXLmSE4cdy1XX/ogOHTo0rh9z92iGHDe0gpNt\nOSr+wkNEnBYRUyJiSlr7RqXHKVxtqxruvHoEd90/lXGPzdjktguXvsYfn3mB5StW8ebba7hvwiz2\n69ODRUtX0KPrdo3b9ei6HYuWrih6dLWwNWvWcOLQIRw39ASOOPLoxvVr167lnnG/5Zghx1Vwui1H\nkZFbCOzU5HLP0roNpJRuTikNSCkNiNptChynOtx06fHMmbeE60Y9vtltH3xqDvvs1o22bbaiVasa\nPtt/N2bNW8Iry+upX/UWA/v2AuCEw/Zn/OMzix5dLSilxOn/eip96vbim2eds8F1jz7yEHvuWUeP\nnj03cms1VeTp6mRgj4joTUPchgInFLi/qjf4U70ZftgAZsxdxFN3nAvAyBvvpU3rWn503lF07rQt\n//WjU5k+dxFfPvNmXqt/k+tG/YE/3n42KSXunzib+ybMAuCsH4zl5u8MpW2brXhg4mzun+grqzl5\ncuIE7hx1B/v03bfxbSIjv3s5Xzz0MMbcfRfHHn/8+26zz567Ul//OqtXr2b8f49j3Pj7qNvrY/1a\nHwCRUiruziMOA35Mw1tIbk0pXbGp7Wva7Zja7DO8sHm0ZVv2xDWVHkFV6nODBzL16SnxQdcV+j65\nlNK9wL1F7kOSNqXiLzxIUpGMnKSsGTlJWTNykrJm5CRlzchJypqRk5Q1Iycpa0ZOUtaMnKSsGTlJ\nWTNykrJm5CRlzchJypqRk5Q1Iycpa0ZOUtaMnKSsGTlJWTNykrJm5CRlzchJypqRk5Q1Iycpa0ZO\nUtaMnKSsGTlJWTNykrJm5CRlzchJypqRk5Q1Iycpa0ZOUtaMnKSsGTlJWTNykrJm5CRlzchJypqR\nk5Q1Iycpa0ZOUtZqN3ZFRNQD6Z2Lpd+ptJxSSh0Knk2S/mYbjVxKqX1LDiJJRSjrdDUiPhMRXy0t\nd46I3sWOJUnNY7ORi4iRwIXAxaVVrYE7ihxKkppLOUdyRwFfBlYBpJQWAZ7KStoilBO51SmlROlF\niIhoV+xIktR8yonc3RHxM2C7iPga8BDw82LHkqTmsdFXV9+RUro2Ir4AvA7sCXwnpfRg4ZNJUjPY\nbORKZgBtaThlnVHcOJLUvMp5dfVUYBJwNDAEeCoiTi56MElqDuUcyV0A7JdSWg4QETsAE4FbixxM\nkppDOS88LAfqm1yuL62TpKq3qc+unltafB74U0SMo+E5uSOA6S0wmyT9zTZ1uvrOG37/Uvp5x7ji\nxpGk5rWpD+hf1pKDSFIRNvvCQ0R0Ab4F7ANs/c76lNI/FjiXJDWLcl54+DUwG+gNXAbMByYXOJMk\nNZtyIrdDSukWYE1K6Q8ppZMBj+IkbRHKeZ/cmtLvxRHxT8AiYPviRpKk5lNO5C6PiI7AecBPgQ7A\nOYVOJUnNpJwP6I8vLa4A/qHYcSSpeW3qzcA/5d0/ZPM+KaUzC5lIkprRpo7kprTYFCX71fVkwsQf\ntvRutYXodMAZlR5BVertOS9t9LpNvRn49kKmkaQW5B+XlpQ1Iycpa0ZOUtbK+WbgPSPi4YiYWbr8\nyYi4pPjRJOlvV86R3M9p+MPSawBSStOBoUUOJUnNpZzIbZNSmvSedWuLGEaSmls5kXs1Inbj3T8u\nPQRYXOhUktRMyvns6unAzUBdRCwE5gEnFjqVJDWTcj67+gJwcES0A2pSSvWbu40kVYtyvhn4O++5\nDEBK6bsFzSRJzaac09VVTZa3Bg4HZhUzjiQ1r3JOVzf4xHxEXAvcX9hEktSMPsonHrYBejb3IJJU\nhHKek5vBu98r1wroAvh8nKQtQjnPyR3eZHktsCSl5JuBJW0RNhm5iGgF3J9SqmuheSSpWW3yObmU\n0jpgTkT0aqF5JKlZlXO62gl4NiIm0eTtJCmlLxc2lSQ1k3Iid2nhU0hSQcqJ3GEppQubroiIq4E/\nFDOSJDWfct4n94UPWPel5h5Ekoqwqb+7+g3g34BdI2J6k6vaAxOKHkySmsOmTldHAb8Hvg9c1GR9\nfUrpr4VOJUnNZFN/d3UFsAIY1nLjSFLz8q91ScqakZOUNSMnKWtGTlLWjJykrBk5SVkzcpKyZuQk\nZc3IScqakZOUNSMnKWtGTlLWjJykrBk5SVkzcpKyZuQkZc3IScqakZOUNSMnKWtGTlLWjJykrBk5\nSVkzcpKyZuQkZc3IScqakZOUNSMnKWtGTlLWjJykrBk5SVkzcpKyZuQkZc3IScqakZOUNSMnKWtG\nTlLWjJykrNVWeoCPqwULFnDqV09i6dIlRAQnn3IaZ5x5FpeNvJTx94yjpqaGLl27cvMtt9G9e3dW\nrFjByV85kQUvvcTadWs5+5zzOWnEVyv9MNSMen5iO37xvZPoukN7UoJbx07ghjsf48qzj+Swz/Vl\n9Zp1zHv5VU4beQcrVr4JwPknH8KIIw5k3fr1nPeDMTz05CzatK7loVvOpnXrWmpbteK3Dz3D5Tfd\nW+FHVzmRUirmjiNuBQ4HlqaU+pZzm/33H5Am/GlKIfNUm8WLF/PK4sXs178/9fX1DB60P3eP+R09\nevakQ4cOANzw0+uYPevP/PTGm/jBVVeyYsUKrvj+1SxbtoxP7dOH+S+/QuvWrSv8SFpOpwPOqPQI\nhdqxcwd27NyBabNfZttt2jBx1IUcd+7N9Oi6HY9Nfo5169Zz+ZlHAHDJdeOo23VHbv/+CD574rV0\n69KRe286g32P/C7r1yfatW3NqjdXU1tbwyO3nsv514xh0oz5lX2ABXp7zt2sf2NpfNB1RZ6u3gYc\nWuD9b9G6devGfv37A9C+fXvq6vZi0aKFjYEDeOONVUQ0/HeLCFbW15NSYtXKlXTafntqaz0Qz8kr\nr77OtNkvA7DyjbeZPe8VunfZjoefms26desBmDRjHj0+sR0Ah3/+k/zm/qmsXrOWFxct5y8LXuWA\nvrsAsOrN1QBsVduK2tpWFHUwsyUo7P+SlNLjEbFLUfefkxfnz2fatGc4YOAgAEZe+m1+fccv6dix\nI/c9+CgAX/+3Mxhy1JfZtVd36uvr+dWou6ip8SnVXPXqtj39+vRk8sz5G6w/6YgDGfPAVAB6dOnI\nn5ocnS1c+n9079oRgJqaYOKoC9ltpy787K7HmTzzxZYaver4f0mFrVy5kmHHHcM1P/xx41HcZd+7\ngufnLWDosOHcdOP1ADz4wP188lP9eOGlRfxpyjTOOesMXn/99UqOroK0a9uaO689lQuuHUv9qrca\n13/rlC+ybt16Rt87ebP3sX594u+GXsXuX7yEAX13Zu/duhU5clWreOQi4rSImBIRU5a9uqzS47So\nNWvWMOy4Yzh+2HCOPOro911//LDh/O63YwH41e3/yRFHHU1EsNvuu7PLLr2ZM3t2S4+sgtXW1nDn\ntV/jrt9PYdwj/9u4/sR/HsRhn+vLiG/f1rhu4bIV9NyxU+PlHl07sWjpig3ub8XKN/nDlOc4ZPDe\nhc9erSoeuZTSzSmlASmlAV06d6n0OC0mpcTXv3YKfer24qxzzm1c//zcuY3L4+8Zx5596gDYaade\nPPbIwwAsWbKE556bQ+9dd23ZoVW4m0YOZ868V7jujkca131h8F6cO+Jghpz9M958a03j+v95bDrH\nfrE/rbeqZefuO7B7ry5Mnjmfzp22peO2bQHYus1WHDSojjnzl7T4Y6kWPnNdIRMnTGDUr39F3777\nMmj/fgBcdvmV3PaftzD3uTnURA29dt6Z6264CYCLvn0pp50yggH99iWRuOLKq+ncuXMFH4Ga2+B+\nuzL88EHMeG4hT42+CICR19/DDy84ljataxn/Hw2vLk+aMZ8zrxjNrBdeYewDz/DM2G+zdt16zr7q\nbtavT+zYuQM//+6/0KqmhpqaYOyDU/n9EzMr+dAqqsi3kNwJfB7oDCwBRqaUbtnUbT5ObyHRh5f7\nW0j00W3qLSRFvro6rKj7lqRyVfw5OUkqkpGTlDUjJylrRk5S1oycpKwZOUlZM3KSsmbkJGXNyEnK\nmpGTlDUjJylrRk5S1oycpKwZOUlZM3KSsmbkJGXNyEnKmpGTlDUjJylrRk5S1oycpKwZOUlZM3KS\nsmbkJGXNyEnKmpGTlDUjJylrRk5S1oycpKwZOUlZM3KSsmbkJGXNyEnKmpGTlDUjJylrRk5S1oyc\npKwZOUlZM3KSsmbkJGXNyEnKmpGTlDUjJylrRk5S1oycpKwZOUlZM3KSsmbkJGXNyEnKmpGTlDUj\nJylrRk5S1oycpKwZOUlZM3KSsmbkJGXNyEnKmpGTlDUjJylrRk5S1oycpKwZOUlZM3KSshYppUrP\n0CgilgEvVnqOKtIZeLXSQ6gq+W9jQzunlLp80BVVFTltKCKmpJQGVHoOVR//bZTP01VJWTNykrJm\n5KrbzZUeQFXLfxtl8jk5SVnzSE5S1oxcFYqIQyNiTkQ8HxEXVXoeVY+IuDUilkbEzErPsqUwclUm\nIloBNwBfAvYGhkXE3pWdSlXkNuDQSg+xJTFy1Wcg8HxK6YWU0mpgNHBEhWdSlUgpPQ78tdJzbEmM\nXPXpASxocvnl0jpJH4GRk5Q1I1d9FgI7Nbncs7RO0kdg5KrPZGCPiOgdEa2BocA9FZ5J2mIZuSqT\nUloLnAHcD8wC7k4pPVvZqVQtIuJO4EmgT0S8HBGnVHqmaucnHiRlzSM5SVkzcpKyZuQkZc3IScqa\nkZOUNSOnFhERK0u/u0fEmM1se3ZEbPMh7//zETG+3PXv2WZERFz/Ifc3PyI6f5jbqDKMnD6y0jem\nfCgppUUppSGb2exs4ENFTtoYI6f3iYhdImJ2RPw6ImZFxJh3jqxKRzBXR8RU4NiI2C0i7ouIpyPi\niYioK23XOyKejIgZEXH5e+57Zmm5VURcGxEzI2J6RHwzIs4EugOPRsSjpe0OKd3X1Ij4TURsW1p/\naGnOqcDRZTyugaX7eSYiJkZEnyZX7xQRj0XE3IgY2eQ2J0bEpIiYFhE/+yhhV4WllPzxZ4MfYBcg\nAZ8uXb4VOL+0PB/4VpNtHwb2KC0PAh4pLd8DnFRaPh1Y2eS+Z5aWvwGMAWpLl7dvso/OpeXOwONA\nu9LlC4HvAFvT8G0tewAB3A2M/4DH8vl31gMdmuzrYGBsaXkEsBjYAWgLzAQGAHsB/w1sVdruxiaP\nqXFGf6r7p/YjdFEfDwtSShNKy3cAZwLXli7fBVA6ohoM/CYi3rldm9LvTwPHlJZ/BVz9Afs4GLgp\nNXyUjZTSB31P2t/R8OWhE0r7aE3Dx5rqgHkppbmlWe4ATtvMY+oI3B4Re9AQ8a2aXPdgSml56b7+\nC/gMsBbYH5hc2ndbYOlm9qEqY+S0Me/9vF/Ty6tKv2uA11JK/cq8j48iaAjQsA1WRmxsn5vyPeDR\nlNJREbEL8FiT6z7o8QZwe0rp4o+wL1UJn5PTxvSKiANLyycAf3zvBiml14F5EXEsQDT4VOnqCTR8\ngwrA8I3s40HgXyOitnT77Uvr64H2peWngE9HxO6lbdpFxJ7AbGCXiNittN0GEdyIjrz7tVUj3nPd\nFyJi+4hoCxxZmv9hYEhEdH1nvojYuYz9qIoYOW3MHOD0iJgFdAL+YyPbDQdOiYj/BZ7l3a9qP6t0\n+xls/JuNfwG8BEwv3f6E0vqbgfsi4tGU0jIagnRnREyndKqaUnqLhtPT/ym98FDOaeQPgO9HxDO8\n/yxmEjAWmE7Dc3VTUkp/Bi4BHijt+0GgWxn7URXxW0j0PqVTufEppb4VHkX6m3kkJylrHslJyppH\ncpKyZuQkZc3IScqakZOUNSMnKWtGTlLW/h9Sgxu+d/qOIgAAAABJRU5ErkJggg==\n",
            "text/plain": [
              "<Figure size 360x360 with 1 Axes>"
            ]
          },
          "metadata": {
            "tags": []
          }
        }
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "QzAb86QrdCi1",
        "colab_type": "text"
      },
      "source": [
        "#### Application of the classifier"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "VZKVGAIfBeT_",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "def tweets_report(tweet_list):\n",
        "    proba_list = model_clf_nb.predict_proba(tweet_list)[:,1]\n",
        "\n",
        "    index_max = np.argmax(proba_list)\n",
        "    q3_index = np.argsort(proba_list)[len(proba_list)-len(proba_list)//4]\n",
        "    median_index = np.argsort(proba_list)[len(proba_list)//2]\n",
        "\n",
        "    dummy_list = list(map(lambda x : (x>0.5).astype(int), proba_list))\n",
        "\n",
        "    print('Proportion of tweets fooled the classifier: ', np.mean(dummy_list), '\\n',\n",
        "    'The best tweet: ', tweet_list[index_max], np.max(proba_list), '\\n',\n",
        "    'The 75th percentile tweet: ', tweet_list[q3_index], proba_list[q3_index], '\\n',\n",
        "    'The median tweet: ', tweet_list[median_index], proba_list[median_index])\n"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "HZF8cxRuldmj",
        "colab_type": "text"
      },
      "source": [
        "## Generator Modeling"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "slVplfMIlfmo",
        "colab_type": "text"
      },
      "source": [
        "### Markov Chain Model"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "RyKrOb5Np1N2",
        "colab_type": "text"
      },
      "source": [
        "#### baseline model"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "POr9Y7-pxUXx",
        "colab_type": "text"
      },
      "source": [
        "##### load data"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "Ju4IcLVxUO-P",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "# markovify takes string as input\n",
        "training_text = '.'.join(df_trumptweet['cleaned_text'])"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "CANgsy5UxXwm",
        "colab_type": "text"
      },
      "source": [
        "##### train model"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "mO7fr5svnOwR",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "model_markov_baseline = markovify.Text(training_text)"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "QuUhdHi8xksm",
        "colab_type": "text"
      },
      "source": [
        "##### save model"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "T1C9gr_Ixmz0",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "with open('markov_baseline.json', 'w') as f:\n",
        "    json.dump(model_markov_baseline.to_json(), f)"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "y1tc073kxo5v",
        "colab_type": "text"
      },
      "source": [
        "##### load model"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "ZCoIlh4v-Vlm",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "# with open('markov_baseline.json') as f:    \n",
        "#     model_markov_baseline = json.load(f)"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "B8fZBM1nxt10",
        "colab_type": "text"
      },
      "source": [
        "##### print statistics & generate sentences"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "JqB3oQFNnZA3",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "# generate sentence\n",
        "sentence_markov_baseline = []\n",
        "for _ in range(1000):\n",
        "    sentence = model_markov_baseline.make_short_sentence(140)\n",
        "    sentence_markov_baseline.append(sentence)\n",
        "    #print(sentence)"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "0-LW8744yhyQ",
        "colab_type": "code",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 85
        },
        "outputId": "401e8bf5-a1f6-4efb-98b5-c6f8cbe61a92"
      },
      "source": [
        "tweets_report(sentence_markov_baseline)"
      ],
      "execution_count": 127,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "Proportion of tweets fooled the classifier:  0.93 \n",
            " The best tweet:  From a large shipment of military weapons and supplies to my GREAT members at Trump International Golf Links in Scotland! 0.9999707653840255 \n",
            " The 75th percentile tweet:  Watch it, should be ashamed for using the celebrity’s name followed by new champion Chris Weidman! 0.9848810213321352 \n",
            " The median percentile tweet:  #Debates .USA has the guts to even greater unemployment. 0.9406103707039736\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "xMJyKKJP15jJ",
        "colab_type": "text"
      },
      "source": [
        "#### '3-grams' model"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "FmvefTkN2ABu",
        "colab_type": "text"
      },
      "source": [
        "Using three - instead of two - words ahead to predic the following word. "
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "colab_type": "code",
        "id": "BrywRJZ-2A2W",
        "colab": {}
      },
      "source": [
        "training_text = '.'.join(df_trumptweet['cleaned_text'])"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "colab_type": "code",
        "id": "HYvNyccz2A2Z",
        "colab": {}
      },
      "source": [
        "model_markov_3gram = markovify.Text(training_text, state_size=3)"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "colab_type": "code",
        "id": "xogm99jA2A2c",
        "colab": {}
      },
      "source": [
        "with open('markov_3gram.json', 'w') as f:\n",
        "    json.dump(model_markov_3gram.to_json(), f)"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "colab_type": "code",
        "id": "KBoTFsV02A2f",
        "colab": {}
      },
      "source": [
        "# with open('markov_3gram.json') as f:    \n",
        "#     model_markov_3gram = json.load(f)"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "colab_type": "code",
        "id": "0rHKO3Y82A2m",
        "colab": {}
      },
      "source": [
        "sentence_markov_3gram = []\n",
        "for _ in range(1000):\n",
        "    sentence = model_markov_3gram.make_short_sentence(140)\n",
        "    sentence_markov_3gram.append(sentence)"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "colab_type": "code",
        "outputId": "1a7ec91c-5ba7-4864-cf1e-5275def8c4bc",
        "id": "tUevC3XH2A2o",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 85
        }
      },
      "source": [
        "tweets_report(sentence_markov_3gram)"
      ],
      "execution_count": 128,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "Proportion of tweets fooled the classifier:  0.946 \n",
            " The best tweet:  The Rigged Witch Hunt headed by 13 Angry Democrats and others who are totally corrupt and/or conflicted. 0.9999878115987703 \n",
            " The 75th percentile tweet:  Thank you, a very wise move that Ted Cruz FORGOT to file. 0.9875842721781313 \n",
            " The median percentile tweet:  In the meantime they continue to be drawn to it. 0.9554522093898657\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "akwN1ybEp5wZ",
        "colab_type": "text"
      },
      "source": [
        "#### add POS, state_size = 3 to model"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "VlpSZ0Y30XTq",
        "colab_type": "text"
      },
      "source": [
        "##### define model"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "-HHwgDQ2odRz",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "nlp = spacy.load(\"en\")\n",
        "\n",
        "class POSifiedText(markovify.Text):\n",
        "\n",
        "    def word_split(self, sentence):\n",
        "        return [\"::\".join((word.orth_, word.pos_)) for word in nlp(sentence)]\n",
        "\n",
        "    def word_join(self, words):\n",
        "        sentence = \" \".join(word.split(\"::\")[0] for word in words)\n",
        "\n",
        "        return sentence"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "X0HegHTYrdup",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "model_markov_pos = POSifiedText(training_text, state_size=3)"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "kanzQ10r0zbb",
        "colab_type": "text"
      },
      "source": [
        "##### save model"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "FJ-apu3I8IXJ",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "with open('model_markov_pos.json', 'w') as f:\n",
        "    json.dump(model_markov_pos.to_json(), f)"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "j7Zht0zu02Jc",
        "colab_type": "text"
      },
      "source": [
        "##### load model"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "hmoEs0KG04Ae",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "# with open('markov_basic.json') as f:    \n",
        "#     model_markov_basic = json.load(f)"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "9i7bpt8T09bC",
        "colab_type": "text"
      },
      "source": [
        "##### print statistics & generate sentences"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "53yMCgu3uSfE",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "sentence_markov_pos = []\n",
        "for _ in range(1000):\n",
        "    sentence = model_markov_pos.make_short_sentence(140)\n",
        "    sentence_markov_pos.append(sentence)"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "KmU1GN_-1Lya",
        "colab_type": "code",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 85
        },
        "outputId": "59659cd9-9a8a-4b2f-b369-e1eda5157006"
      },
      "source": [
        "tweets_report(sentence_markov_pos)"
      ],
      "execution_count": 131,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "Proportion of tweets fooled the classifier:  0.947 \n",
            " The best tweet:  We want victory .. Trump International Golf Links Scotland . 0.9999837412710088 \n",
            " The 75th percentile tweet:  I will beat Hillary easily , but Lindsey Graham says I wo n't be around much longer , it 's called Envy . 0.986747834772992 \n",
            " The median percentile tweet:  .On behalf of an entire Nation , THANK YOU for today 's update and GREAT WORK ! 0.9502348960281569\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "4w_obLvrYagC",
        "colab_type": "text"
      },
      "source": [
        "### LSTM RNN model"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "haflw4R0C01r",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "!pip install -q textgenrnn"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "rIW0K59wDSHZ",
        "colab_type": "code",
        "outputId": "02316b48-7b6b-438b-f7c2-94c22fb3f90b",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 80
        }
      },
      "source": [
        "from textgenrnn import textgenrnn"
      ],
      "execution_count": 58,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "Using TensorFlow backend.\n"
          ],
          "name": "stderr"
        },
        {
          "output_type": "display_data",
          "data": {
            "text/html": [
              "<p style=\"color: red;\">\n",
              "The default version of TensorFlow in Colab will soon switch to TensorFlow 2.x.<br>\n",
              "We recommend you <a href=\"https://www.tensorflow.org/guide/migrate\" target=\"_blank\">upgrade</a> now \n",
              "or ensure your notebook will continue to use TensorFlow 1.x via the <code>%tensorflow_version 1.x</code> magic:\n",
              "<a href=\"https://colab.research.google.com/notebooks/tensorflow_version.ipynb\" target=\"_blank\">more info</a>.</p>\n"
            ],
            "text/plain": [
              "<IPython.core.display.HTML object>"
            ]
          },
          "metadata": {
            "tags": []
          }
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "JbCr4wkPJJf1",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "trumptweet_list = list(df_trumptweet['cleaned_text'])\n",
        "trumptweet_file = open(\"trumptweet_text.txt\",\"w+\")\n",
        "trumptweet_list = \"\\n\".join(trumptweet_list)\n",
        "trumptweet_file.writelines(trumptweet_list)\n",
        "trumptweet_file.close() "
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "Sd3sLYtvIrOk",
        "colab_type": "text"
      },
      "source": [
        "#### character-level"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "MmabiB7_6_XY",
        "colab_type": "text"
      },
      "source": [
        "##### load data"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "TSjjtU-gYhoG",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "file_name = \"trumptweet_text.txt\"\n",
        "model_name = 'model_rnn_char'"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "UVmVapPA7R6I",
        "colab_type": "code",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 71
        },
        "outputId": "9bf317e7-b89c-4562-94a5-90853b8e0990"
      },
      "source": [
        "# df_trumptweet['cleaned_text'].to_csv( 'trumptweet_cleaned_text.csv', index = False)\n",
        "# file_name = 'trumptweet_cleaned_text.csv'"
      ],
      "execution_count": 59,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "/usr/local/lib/python3.6/dist-packages/ipykernel_launcher.py:1: FutureWarning: The signature of `Series.to_csv` was aligned to that of `DataFrame.to_csv`, and argument 'header' will change its default value from False to True: please pass an explicit value to suppress this warning.\n",
            "  \"\"\"Entry point for launching an IPython kernel.\n"
          ],
          "name": "stderr"
        }
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "Jjwfw-va7BbZ",
        "colab_type": "text"
      },
      "source": [
        "##### define model"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "fqgd1KQHDUbU",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "model_cfg_char = {\n",
        "    'word_level': False,   # set to True if want to train a word-level model (requires more data and smaller max_length)\n",
        "    'rnn_size': 128,   # number of LSTM cells of each layer (128/256 recommended)\n",
        "    'rnn_layers': 3,   # number of LSTM layers (>=2 recommended)\n",
        "    'rnn_bidirectional': True,   # consider text both forwards and backward, can give a training boost\n",
        "    'max_length': 30,   # number of tokens to consider before predicting the next (20-40 for characters, 5-10 for words recommended)\n",
        "    'max_words': 10000,   # maximum number of words to model; the rest will be ignored (word-level model only)\n",
        "}\n",
        "\n",
        "train_cfg_char = {\n",
        "    'line_delimited': True,   # set to True if each text has its own line in the source file\n",
        "    'num_epochs': 20,   # set higher to train the model for longer\n",
        "    'gen_epochs': 10,   # generates sample text from model after given number of epochs\n",
        "    'train_size': 0.8,   # proportion of input data to train on: setting < 1.0 limits model from learning perfectly\n",
        "    'dropout': 0.0,   # ignore a random proportion of source tokens each epoch, allowing model to generalize better\n",
        "    'validation': True,   # If train__size < 1.0, test on holdout dataset; will make overall training slower\n",
        "    'is_csv': False   # set to True if file is a CSV exported from Excel/BigQuery/pandas\n",
        "}"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "6_IKRr2S7eeT",
        "colab_type": "text"
      },
      "source": [
        "##### train model"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "nZhSJN2KEBtO",
        "colab_type": "code",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 1000
        },
        "outputId": "a975ef57-d2d1-41b3-edd1-643ae4ffe745"
      },
      "source": [
        "textgen_char = textgenrnn(name=model_name)\n",
        "\n",
        "train_function_char = textgen_char.train_from_file if train_cfg_char['line_delimited'] else textgen_char.train_from_largetext_file\n",
        "\n",
        "train_function_char(\n",
        "    file_path=file_name,\n",
        "    new_model=True,\n",
        "    num_epochs=train_cfg_char['num_epochs'],\n",
        "    gen_epochs=train_cfg_char['gen_epochs'],\n",
        "    batch_size=1024, \n",
        "    train_size=train_cfg_char['train_size'],\n",
        "    dropout=train_cfg_char['dropout'],\n",
        "    validation=train_cfg_char['validation'],\n",
        "    is_csv=train_cfg_char['is_csv'],\n",
        "    rnn_layers=model_cfg_char['rnn_layers'],\n",
        "    rnn_size=model_cfg_char['rnn_size'],\n",
        "    rnn_bidirectional=model_cfg_char['rnn_bidirectional'],\n",
        "    max_length=model_cfg_char['max_length'],\n",
        "    dim_embeddings=100,\n",
        "    word_level=model_cfg_char['word_level'])"
      ],
      "execution_count": 99,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "24,594 texts collected.\n",
            "Training new model w/ 3-layer, 128-cell Bidirectional LSTMs\n",
            "Training on 2,182,817 character sequences.\n",
            "Epoch 1/20\n",
            "2131/2131 [==============================] - 330s 155ms/step - loss: 1.8683 - val_loss: 1.4832\n",
            "Epoch 2/20\n",
            "2131/2131 [==============================] - 327s 153ms/step - loss: 1.4100 - val_loss: 1.3845\n",
            "Epoch 3/20\n",
            "2131/2131 [==============================] - 327s 153ms/step - loss: 1.3249 - val_loss: 1.3347\n",
            "Epoch 4/20\n",
            "2131/2131 [==============================] - 325s 152ms/step - loss: 1.2770 - val_loss: 1.3137\n",
            "Epoch 5/20\n",
            "2131/2131 [==============================] - 327s 153ms/step - loss: 1.2421 - val_loss: 1.2933\n",
            "Epoch 6/20\n",
            "2131/2131 [==============================] - 326s 153ms/step - loss: 1.2138 - val_loss: 1.2769\n",
            "Epoch 7/20\n",
            "2131/2131 [==============================] - 326s 153ms/step - loss: 1.1883 - val_loss: 1.2659\n",
            "Epoch 8/20\n",
            "2131/2131 [==============================] - 327s 153ms/step - loss: 1.1652 - val_loss: 1.2586\n",
            "Epoch 9/20\n",
            "2131/2131 [==============================] - 330s 155ms/step - loss: 1.1432 - val_loss: 1.2502\n",
            "Epoch 10/20\n",
            "2131/2131 [==============================] - 329s 154ms/step - loss: 1.1214 - val_loss: 1.2486\n",
            "####################\n",
            "Temperature: 0.2\n",
            "####################\n",
            "When will the world that the Democrats are all about the U.S. Senate for the U.S. today. We will be a great time to get the world. If you are great for the U.S. and the U.S. has been a great guy. I will be at the @WhiteHouse today at the @WhiteHouse today at the @WhiteHouse today at the @WhiteHous\n",
            "\n",
            "Thank you @wordoreman. Thank you for the U.S. and the U.S. is a total disaster. The worst is that the Democrats are all about the U.S. in the U.S. and the world. The worst is a total disaster. Be sure to take the most important things are all about the U.S. and the U.S. has been a total loser.\n",
            "\n",
            "Will be interviewed on @foxandfriends at 7:00 A.M. and the U.S. will be a great player and the problem is a total loser.\n",
            "\n",
            "####################\n",
            "Temperature: 0.5\n",
            "####################\n",
            "The warm newspapers and losers. The problem with your border security and gave considered by a fantastic guy!\n",
            "\n",
            "A great honor to have the best economy is a very bad and losers are watching the people of the United States and the U.S. and the U.S. and the Republican Party's jeners and the gold class of the United States this year on the U.S. MAKE AMERICA GREAT AGAIN!\n",
            "\n",
            "The only phone of the United States and the U.S. Military and the @WhiteHouse today at the @WhiteHouse and the failing @nytimes will soon be there and produce the many way to succeed. What a very positive and pathetic job at @BarackObama will be a great guy!\n",
            "\n",
            "####################\n",
            "Temperature: 1.0\n",
            "####################\n",
            "Why aren't it not Nashville!\n",
            "\n",
            "Think you to America’s families, success walked NAFTA ITHIP. The rigged signment' can president WH had to pay. #ImWithYou \n",
            "\n",
            "Unemployment who ups the Perule Steve signs will be great Mink let Yom, I am long dealing fireward (a. But is the way Obama.\n",
            "\n",
            "Epoch 11/20\n",
            "2131/2131 [==============================] - 329s 154ms/step - loss: 1.0990 - val_loss: 1.2475\n",
            "Epoch 12/20\n",
            "2131/2131 [==============================] - 329s 154ms/step - loss: 1.0772 - val_loss: 1.2413\n",
            "Epoch 13/20\n",
            "2131/2131 [==============================] - 328s 154ms/step - loss: 1.0549 - val_loss: 1.2437\n",
            "Epoch 14/20\n",
            "2131/2131 [==============================] - 331s 155ms/step - loss: 1.0322 - val_loss: 1.2421\n",
            "Epoch 15/20\n",
            "2131/2131 [==============================] - 326s 153ms/step - loss: 1.0093 - val_loss: 1.2455\n",
            "Epoch 16/20\n",
            "2131/2131 [==============================] - 330s 155ms/step - loss: 0.9857 - val_loss: 1.2465\n",
            "Epoch 17/20\n",
            "2131/2131 [==============================] - 333s 156ms/step - loss: 0.9627 - val_loss: 1.2516\n",
            "Epoch 18/20\n",
            "2131/2131 [==============================] - 331s 155ms/step - loss: 0.9395 - val_loss: 1.2589\n",
            "Epoch 19/20\n",
            "2131/2131 [==============================] - 328s 154ms/step - loss: 0.9174 - val_loss: 1.2652\n",
            "Epoch 20/20\n",
            "2131/2131 [==============================] - 329s 155ms/step - loss: 0.8968 - val_loss: 1.2707\n",
            "####################\n",
            "Temperature: 0.2\n",
            "####################\n",
            "I will be interviewed on @foxandfriends at 7:00 A.M. Enjoy!\n",
            "\n",
            "The U.S. has been a total disaster and a great guy and special interests and state of the people of the U.S. Senate and the people of Indiana! #MakeAmericaGreatAgain #Trump2016 \n",
            "\n",
            "The Fake News Media has been a total disaster. They are all talk and no action to the U.S. Military and the United States are working hard to do something that I have always out of the State Department of Justice of the United States with the U.S. in the world. I am going to have a clue. We will n\n",
            "\n",
            "####################\n",
            "Temperature: 0.5\n",
            "####################\n",
            "The more than @chucktodd realize that the Democrats are with you all of the building and is not a very soon.\n",
            "\n",
            ".@MissUSA contestant and loving the fact that I have long been a terrible job as a fantastic job done by a constitutional political career, and the worst thing you can also imagine what the salt of the Democrats are with the economy, and your face on the past 100 days will be doing really good. St\n",
            "\n",
            "I am so happy that I have ever had a great time high and shirts and the best resort on the terrorist attack, the so-called soldiers will be a great guy and the family of the political survival of the massive partisan control states. Be sure to watch Celebrity Apprentice tonight at 8:00 P.M. (I don\n",
            "\n",
            "####################\n",
            "Temperature: 1.0\n",
            "####################\n",
            "@DannyZuker @THEGaryBusey calls hard (and talk. -- Mark, Margaret Of IA Country lie: \n",
            "\n",
            "@Sean_LasforRhyans Thanks Night! \n",
            "\n",
            "@MikeRobertNews True and thanks!\n",
            "\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "4Xrzvxap7amh",
        "colab_type": "text"
      },
      "source": [
        "##### save model"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "OojavG4e_HL2",
        "colab_type": "text"
      },
      "source": [
        "The model saves the weights automatically after each epoch. "
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "3bIXfS5k7jLH",
        "colab_type": "text"
      },
      "source": [
        "##### load model"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "cWzC4-PiXpIY",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "textgen = textgenrnn(weights_path=model_name+'_weights.hdf5',\n",
        "                                  vocab_path=model_name+'_vocab.json',\n",
        "                                  config_path=model_name+'_config.json')"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "nEUm-oQ870WR",
        "colab_type": "text"
      },
      "source": [
        "##### print statistics and generate sentences"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "K1MNjl1bAFOY",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "temperature = [0.5]   \n",
        "prefix = None  \n",
        "\n",
        "if train_cfg_char['line_delimited']:\n",
        "  n = 1000\n",
        "  max_gen_length = 60 if model_cfg_char['word_level'] else 300\n",
        "else:\n",
        "  n = 1\n",
        "  max_gen_length = 2000 if model_cfg_char['word_level'] else 10000\n",
        "\n",
        "sentences_char = textgen_char.generate(n=n, temperature=0.5, max_gen_length = 280, return_as_list=True)"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "2blcBR4yEbXA",
        "colab_type": "code",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 105
        },
        "outputId": "5e4e8d6f-95d4-4fa1-922a-f6d8afe0ad24"
      },
      "source": [
        "tweets_report(sentences_char)"
      ],
      "execution_count": 102,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "Proportion of tweets fooled the classifier:  0.985 \n",
            " The best tweet:  .@marklevinshow interview discussing the Celebrity Apprentice and Crooked Hillary Clinton will only get born in the world. I will be the best thing to have the haters and the truth in the New York Times - just reported that I am the only one that the Fake News Media will do a g 0.9999960971624224 \n",
            " The 75th percentile tweet:  My @SquawkCNBC interview discussing the boardroom of the United States of Americans will be a great deal for the fact that China is always going to be the greatest of the world, and all others, get ready for a winner! 0.9959520972232001 \n",
            " The median tweet:  A MUST has spent records and positive process of Edwarding to President Obama for a failed policy in the tragic deal done by @SecretaryZinkens will be a great guy who spends and the field in the U.S. Military will be a disgrace to the American people and the victims of the Stoc 0.9798632189690395\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "7qsd1mY6_UYT",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "# save the list (1000 generated tweets)\n",
        "charlevel_file = open(\"generated_tweet_charlevel.txt\",\"w+\")\n",
        "sentences_char_temp = \"\\n\".join(sentences_char)\n",
        "charlevel_file.writelines(sentences_char_temp)\n",
        "charlevel_file.close() "
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "drzgUChUFCgK",
        "colab_type": "text"
      },
      "source": [
        "#### word-level"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "UuNpeTuvQ-KX",
        "colab_type": "text"
      },
      "source": [
        "##### load data"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "iAqJkBFJJz6I",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "file_name = \"trumptweet_text.txt\"\n",
        "model_name = 'model_rnn_word'"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "SCyaF7crOrUD",
        "colab_type": "text"
      },
      "source": [
        "##### define model"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "S-ED3AvXJb9d",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "model_cfg_word = {\n",
        "    'word_level': True,   # set to True if want to train a word-level model (requires more data and smaller max_length)\n",
        "    'rnn_size': 256,   # number of LSTM cells of each layer (128/256 recommended)\n",
        "    'rnn_layers': 3,   # number of LSTM layers (>=2 recommended)\n",
        "    'rnn_bidirectional': True,   # consider text both forwards and backward, can give a training boost\n",
        "    'max_length': 8,   # number of tokens to consider before predicting the next (20-40 for characters, 5-10 for words recommended)\n",
        "    'max_words': 10000,   # maximum number of words to model; the rest will be ignored (word-level model only)\n",
        "}\n",
        "\n",
        "train_cfg_word = {\n",
        "    'line_delimited': True,   # set to True if each text has its own line in the source file\n",
        "    'num_epochs': 30,   # set higher to train the model for longer [# it was 20]\n",
        "    'gen_epochs': 10,   # generates sample text from model after given number of epochs\n",
        "    'train_size': 0.8,   # proportion of input data to train on: setting < 1.0 limits model from learning perfectly\n",
        "    'dropout': 0.0,   # ignore a random proportion of source tokens each epoch, allowing model to generalize better\n",
        "    'validation': True,   # If train__size < 1.0, test on holdout dataset; will make overall training slower\n",
        "    'is_csv': False   # set to True if file is a CSV exported from Excel/BigQuery/pandas\n",
        "}"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "5LlJbbAdRLto",
        "colab_type": "text"
      },
      "source": [
        "##### train model"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "qcwNGXedKR0Z",
        "colab_type": "code",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 1000
        },
        "outputId": "e1a90ba1-055b-4e7c-e2e1-515e0e792620"
      },
      "source": [
        "textgen_word = textgenrnn(name=model_name)\n",
        "\n",
        "train_function_word = textgen_word.train_from_file if train_cfg_word['line_delimited'] else textgen_word.train_from_largetext_file\n",
        "\n",
        "train_function_word(\n",
        "    file_path=file_name,\n",
        "    new_model=True,\n",
        "    num_epochs=train_cfg_word['num_epochs'],\n",
        "    gen_epochs=train_cfg_word['gen_epochs'],\n",
        "    batch_size=1024, \n",
        "    train_size=train_cfg_word['train_size'],\n",
        "    dropout=train_cfg_word['dropout'],\n",
        "    validation=train_cfg_word['validation'],\n",
        "    is_csv=train_cfg_word['is_csv'],\n",
        "    rnn_layers=model_cfg_word['rnn_layers'],\n",
        "    rnn_size=model_cfg_word['rnn_size'],\n",
        "    rnn_bidirectional=model_cfg_word['rnn_bidirectional'],\n",
        "    max_length=model_cfg_word['max_length'],\n",
        "    dim_embeddings=100,\n",
        "    word_level=model_cfg_word['word_level'])"
      ],
      "execution_count": 106,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "24,594 texts collected.\n",
            "Training new model w/ 3-layer, 256-cell Bidirectional LSTMs\n",
            "Training on 484,200 word sequences.\n",
            "Epoch 1/30\n",
            "472/472 [==============================] - 91s 193ms/step - loss: 6.0433 - val_loss: 5.1758\n",
            "Epoch 2/30\n",
            "472/472 [==============================] - 87s 185ms/step - loss: 4.7993 - val_loss: 4.8121\n",
            "Epoch 3/30\n",
            "472/472 [==============================] - 89s 188ms/step - loss: 4.2842 - val_loss: 4.7168\n",
            "Epoch 4/30\n",
            "472/472 [==============================] - 88s 187ms/step - loss: 3.8224 - val_loss: 4.7920\n",
            "Epoch 5/30\n",
            "472/472 [==============================] - 88s 187ms/step - loss: 3.3722 - val_loss: 4.9127\n",
            "Epoch 6/30\n",
            "472/472 [==============================] - 89s 188ms/step - loss: 2.9538 - val_loss: 5.0809\n",
            "Epoch 7/30\n",
            "472/472 [==============================] - 89s 188ms/step - loss: 2.5777 - val_loss: 5.2312\n",
            "Epoch 8/30\n",
            "472/472 [==============================] - 88s 187ms/step - loss: 2.2443 - val_loss: 5.4092\n",
            "Epoch 9/30\n",
            "472/472 [==============================] - 89s 188ms/step - loss: 1.9591 - val_loss: 5.6009\n",
            "Epoch 10/30\n",
            "472/472 [==============================] - 89s 188ms/step - loss: 1.7121 - val_loss: 5.7631\n",
            "####################\n",
            "Temperature: 0.2\n",
            "####################\n",
            "@ @ billmaher thanks .\n",
            "\n",
            "@ @ billmaher thanks .\n",
            "\n",
            "@ thanks .\n",
            "\n",
            "####################\n",
            "Temperature: 0.5\n",
            "####################\n",
            "@ thanks !\n",
            "\n",
            "@ @ billmaher thanks .\n",
            "\n",
            "@ @ erictrump @ ivankatrump thanks anthony .\n",
            "\n",
            "####################\n",
            "Temperature: 1.0\n",
            "####################\n",
            "entrepreneurs : trust your instincts and no matter what is happening !\n",
            "\n",
            "praying for great people at trump tower in ireland . such a big success !\n",
            "\n",
            "join my team on saturday at 7 : 00 p . m . ( eastern ) . enjoy !\n",
            "\n",
            "Epoch 11/30\n",
            "472/472 [==============================] - 88s 187ms/step - loss: 1.4871 - val_loss: 5.9736\n",
            "Epoch 12/30\n",
            "472/472 [==============================] - 88s 187ms/step - loss: 1.2980 - val_loss: 6.1568\n",
            "Epoch 13/30\n",
            "472/472 [==============================] - 88s 187ms/step - loss: 1.1375 - val_loss: 6.3369\n",
            "Epoch 14/30\n",
            "472/472 [==============================] - 88s 187ms/step - loss: 1.0016 - val_loss: 6.5002\n",
            "Epoch 15/30\n",
            "472/472 [==============================] - 88s 187ms/step - loss: 0.8873 - val_loss: 6.6464\n",
            "Epoch 16/30\n",
            "471/472 [============================>.] - ETA: 0s - loss: 0.7915Epoch 17/30\n",
            "472/472 [==============================] - 89s 188ms/step - loss: 0.7209 - val_loss: 6.9475\n",
            "Epoch 18/30\n",
            "472/472 [==============================] - 89s 188ms/step - loss: 0.6661 - val_loss: 7.0622\n",
            "Epoch 19/30\n",
            "472/472 [==============================] - 89s 188ms/step - loss: 0.6161 - val_loss: 7.1625\n",
            "Epoch 20/30\n",
            "472/472 [==============================] - 89s 188ms/step - loss: 0.5829 - val_loss: 7.2771\n",
            "####################\n",
            "Temperature: 0.2\n",
            "####################\n",
            "@ thanks .\n",
            "\n",
            ". @ barackobama reported over the election when he can ' t said in the race , he was in the world i a fantastic guy !\n",
            "\n",
            "@ thanks .\n",
            "\n",
            "####################\n",
            "Temperature: 0.5\n",
            "####################\n",
            "the fake news is at it again , this a total mess !\n",
            "\n",
            "@ thanks .\n",
            "\n",
            "@ thanks .\n",
            "\n",
            "####################\n",
            "Temperature: 1.0\n",
            "####################\n",
            "@ barackobama has played golf in an debate . @ surplus and ( cont )\n",
            "\n",
            "megyn kelly has two great damage to eric obstruction . race will be court -\n",
            "\n",
            "obama ' s speech ' on afghanistan are killing the $ 22 . highly russian record rate is going to speak crooked hillary ( because he are strictly collusion !\n",
            "\n",
            "Epoch 21/30\n",
            "472/472 [==============================] - 88s 187ms/step - loss: 0.5587 - val_loss: 7.3366\n",
            "Epoch 22/30\n",
            "472/472 [==============================] - 89s 188ms/step - loss: 0.5374 - val_loss: 7.3726\n",
            "Epoch 23/30\n",
            "472/472 [==============================] - 89s 188ms/step - loss: 0.5169 - val_loss: 7.4446\n",
            "Epoch 24/30\n",
            "472/472 [==============================] - 88s 187ms/step - loss: 0.5064 - val_loss: 7.4988\n",
            "Epoch 25/30\n",
            "472/472 [==============================] - 88s 187ms/step - loss: 0.4947 - val_loss: 7.5277\n",
            "Epoch 26/30\n",
            "472/472 [==============================] - 89s 188ms/step - loss: 0.4819 - val_loss: 7.5650\n",
            "Epoch 27/30\n",
            "472/472 [==============================] - 89s 189ms/step - loss: 0.4697 - val_loss: 7.5860\n",
            "Epoch 28/30\n",
            "472/472 [==============================] - 89s 188ms/step - loss: 0.4595 - val_loss: 7.6148\n",
            "Epoch 29/30\n",
            "472/472 [==============================] - 89s 188ms/step - loss: 0.4511 - val_loss: 7.6484\n",
            "Epoch 30/30\n",
            "472/472 [==============================] - 89s 188ms/step - loss: 0.4380 - val_loss: 7.6356\n",
            "####################\n",
            "Temperature: 0.2\n",
            "####################\n",
            "@ thanks .\n",
            "\n",
            "@ thanks .\n",
            "\n",
            "@ thanks .\n",
            "\n",
            "####################\n",
            "Temperature: 0.5\n",
            "####################\n",
            "i am very proud to have been necessary !\n",
            "\n",
            "it is my great honor to support our veterans with you ! you can join me by the big rally at very big letter for the press . now she is a of home . thanks deal for the good picture !\n",
            "\n",
            "@ thanks .\n",
            "\n",
            "####################\n",
            "Temperature: 1.0\n",
            "####################\n",
            "the failing @ nytimes reporters don ' t even call us anymore , they just write whatever they want to write , trump campaign and \" - - much more !\n",
            "\n",
            "@ thanks chris !\n",
            "\n",
            "@ brian _ legit , the highly respected of georgia with @ , @ trumpchicago , @ trumpnewyork , @ fortune and had a strong support - deal .\n",
            "\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "gCKBUBWMRnaY",
        "colab_type": "text"
      },
      "source": [
        "##### save model"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "TZEPkhnsRyRz",
        "colab_type": "text"
      },
      "source": [
        "The model saves the weights automatically after each epoch. "
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "HsLC94XyRor6",
        "colab_type": "text"
      },
      "source": [
        "##### load model"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "4f3P7dDsR08D",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "textgen_word = textgenrnn(weights_path=model_name+'_weights.hdf5',\n",
        "                                                    vocab_path=model_name+'_vocab.json',\n",
        "                                                    config_path=model_name+'_config.json')"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "npCDbxtGRp04",
        "colab_type": "text"
      },
      "source": [
        "##### print statistics and generate sentenses"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "GNQjY74AK5za",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "temperature = [0.5]   \n",
        "prefix = None  \n",
        "\n",
        "if train_cfg_small['line_delimited']:\n",
        "  n = 1000\n",
        "  max_gen_length = 60 if model_cfg_word['word_level'] else 300\n",
        "else:\n",
        "  n = 1\n",
        "  max_gen_length = 2000 if model_cfg_word['word_level'] else 10000\n",
        "\n",
        "sentences_word = textgen_word.generate(n=n, temperature=0.5, max_gen_length = 140, return_as_list=True)"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "colab_type": "code",
        "id": "pvaYpVRJSAjp",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 105
        },
        "outputId": "d6f4a9c1-b85e-48b6-f230-4eb3bc94052f"
      },
      "source": [
        "tweets_report(sentences_word)"
      ],
      "execution_count": 123,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "Proportion of tweets fooled the classifier:  0.967 \n",
            " The best tweet:  the big story that the fake news media refuses to report me for @ cnn . my great honor to speak at the @ ussarizona . thank you ! # trump2016 0.999961919339227 \n",
            " The 75th percentile tweet:  \" i have great respect for the people that represent the united us care . remember that the great president ? he is a nice guy ! 0.9771959667746702 \n",
            " The median tweet:  . @ hillaryclinton is on the front page of the illinois . . . but write a . that ’ s because it should be an economic failure ' s to mexico . the problem ' 12 . you are both more . 0.8993477128327447\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "zGmSitY6__0N",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "# save the list (1000 generated tweets)\n",
        "wordlevel_file = open(\"generated_tweet_wordlevel.txt\",\"w+\")\n",
        "sentences_word_temp = \"\\n\".join(sentences_word)\n",
        "wordlevel_file.writelines(sentences_word_temp)\n",
        "wordlevel_file.close() "
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "uQMqofr8wCYU",
        "colab_type": "text"
      },
      "source": [
        "## reference"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "TfxF0kBPwISX",
        "colab_type": "text"
      },
      "source": [
        "NB classifier \n",
        "https://towardsdatascience.com/sentiment-analysis-of-tweets-using-multinomial-naive-bayes-1009ed24276b\n",
        "\n",
        "LSTM classifier\n",
        "https://github.com/manashpratim/Tweet-Classification/blob/master/Tweet_Classification.ipynb\n",
        "\n",
        "markovify (Markov chain generator)\n",
        "https://github.com/jsvine/markovify\n",
        "\n",
        "textgenrnn (LSTM generator)\n",
        "https://github.com/minimaxir/textgenrnn"
      ]
    }
  ]
}