{
  "nbformat": 4,
  "nbformat_minor": 0,
  "metadata": {
    "colab": {
      "name": "Bert_airline_senti_analysis.ipynb",
      "version": "0.3.2",
      "provenance": [],
      "collapsed_sections": []
    },
    "kernelspec": {
      "name": "python3",
      "display_name": "Python 3"
    },
    "accelerator": "GPU"
  },
  "cells": [
    {
      "cell_type": "code",
      "metadata": {
        "id": "liScsVi8HuhK",
        "colab_type": "code",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 35
        },
        "outputId": "cdb04231-c99d-436e-e820-277a3613f655"
      },
      "source": [
        "# Check if GPU is Available or Not.\n",
        "# Print if available. \n",
        "\n",
        "import tensorflow as tf\n",
        "\n",
        "device_name = tf.test.gpu_device_name()\n",
        "if device_name != '/device:GPU:0':\n",
        "  raise SystemError('GPU device not found')\n",
        "print('Found GPU at: {}'.format(device_name))"
      ],
      "execution_count": 1,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "Found GPU at: /device:GPU:0\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "RLJ4g3sqIJkm",
        "colab_type": "code",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 372
        },
        "outputId": "c26d25de-de69-42dc-fd7a-3d20d9a3474f"
      },
      "source": [
        "# install the required bert pretrained model\n",
        "!pip install pytorch-pretrained-bert pytorch-nlp"
      ],
      "execution_count": 2,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "Requirement already satisfied: pytorch-pretrained-bert in /usr/local/lib/python3.6/dist-packages (0.6.2)\n",
            "Requirement already satisfied: pytorch-nlp in /usr/local/lib/python3.6/dist-packages (0.4.1)\n",
            "Requirement already satisfied: tqdm in /usr/local/lib/python3.6/dist-packages (from pytorch-pretrained-bert) (4.28.1)\n",
            "Requirement already satisfied: regex in /usr/local/lib/python3.6/dist-packages (from pytorch-pretrained-bert) (2019.8.19)\n",
            "Requirement already satisfied: numpy in /usr/local/lib/python3.6/dist-packages (from pytorch-pretrained-bert) (1.16.4)\n",
            "Requirement already satisfied: torch>=0.4.1 in /usr/local/lib/python3.6/dist-packages (from pytorch-pretrained-bert) (1.1.0)\n",
            "Requirement already satisfied: requests in /usr/local/lib/python3.6/dist-packages (from pytorch-pretrained-bert) (2.21.0)\n",
            "Requirement already satisfied: boto3 in /usr/local/lib/python3.6/dist-packages (from pytorch-pretrained-bert) (1.9.216)\n",
            "Requirement already satisfied: pandas in /usr/local/lib/python3.6/dist-packages (from pytorch-nlp) (0.24.2)\n",
            "Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.6/dist-packages (from requests->pytorch-pretrained-bert) (2019.6.16)\n",
            "Requirement already satisfied: chardet<3.1.0,>=3.0.2 in /usr/local/lib/python3.6/dist-packages (from requests->pytorch-pretrained-bert) (3.0.4)\n",
            "Requirement already satisfied: idna<2.9,>=2.5 in /usr/local/lib/python3.6/dist-packages (from requests->pytorch-pretrained-bert) (2.8)\n",
            "Requirement already satisfied: urllib3<1.25,>=1.21.1 in /usr/local/lib/python3.6/dist-packages (from requests->pytorch-pretrained-bert) (1.24.3)\n",
            "Requirement already satisfied: jmespath<1.0.0,>=0.7.1 in /usr/local/lib/python3.6/dist-packages (from boto3->pytorch-pretrained-bert) (0.9.4)\n",
            "Requirement already satisfied: s3transfer<0.3.0,>=0.2.0 in /usr/local/lib/python3.6/dist-packages (from boto3->pytorch-pretrained-bert) (0.2.1)\n",
            "Requirement already satisfied: botocore<1.13.0,>=1.12.216 in /usr/local/lib/python3.6/dist-packages (from boto3->pytorch-pretrained-bert) (1.12.216)\n",
            "Requirement already satisfied: python-dateutil>=2.5.0 in /usr/local/lib/python3.6/dist-packages (from pandas->pytorch-nlp) (2.5.3)\n",
            "Requirement already satisfied: pytz>=2011k in /usr/local/lib/python3.6/dist-packages (from pandas->pytorch-nlp) (2018.9)\n",
            "Requirement already satisfied: docutils<0.16,>=0.10 in /usr/local/lib/python3.6/dist-packages (from botocore<1.13.0,>=1.12.216->boto3->pytorch-pretrained-bert) (0.15.2)\n",
            "Requirement already satisfied: six>=1.5 in /usr/local/lib/python3.6/dist-packages (from python-dateutil>=2.5.0->pandas->pytorch-nlp) (1.12.0)\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "1Bp1D8HrIO3F",
        "colab_type": "code",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 35
        },
        "outputId": "d18bdfc1-bf1e-43a4-d71d-e5dd6fa589bb"
      },
      "source": [
        "# import the required libraries.\n",
        "\n",
        "import re\n",
        "import torch\n",
        "from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler\n",
        "from keras.preprocessing.sequence import pad_sequences\n",
        "from sklearn.model_selection import train_test_split\n",
        "from pytorch_pretrained_bert import BertTokenizer, BertConfig\n",
        "from pytorch_pretrained_bert import BertAdam, BertForSequenceClassification\n",
        "from tqdm import tqdm, trange\n",
        "import pandas as pd\n",
        "import io\n",
        "import numpy as np\n",
        "import matplotlib.pyplot as plt\n",
        "% matplotlib inline"
      ],
      "execution_count": 3,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "Using TensorFlow backend.\n"
          ],
          "name": "stderr"
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "mKw39ESnIXeS",
        "colab_type": "code",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 35
        },
        "outputId": "f054c6f5-4f19-4f62-ef85-1fcdb0925301"
      },
      "source": [
        "device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n",
        "n_gpu = torch.cuda.device_count()\n",
        "torch.cuda.get_device_name(0)"
      ],
      "execution_count": 4,
      "outputs": [
        {
          "output_type": "execute_result",
          "data": {
            "text/plain": [
              "'Tesla T4'"
            ]
          },
          "metadata": {
            "tags": []
          },
          "execution_count": 4
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "1dd4E-IGIbgq",
        "colab_type": "code",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 35
        },
        "outputId": "a44f04b5-20a4-473a-c1ad-d8ee554a4a64"
      },
      "source": [
        "from google.colab import drive\n",
        "drive.mount('/content/gdrive')"
      ],
      "execution_count": 5,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "Drive already mounted at /content/gdrive; to attempt to forcibly remount, call drive.mount(\"/content/gdrive\", force_remount=True).\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "m6r4cWrCIkRa",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "# Load the dataset.\n",
        "df = pd.read_csv('/content/gdrive/My Drive/Tweets.csv',encoding='utf-8')"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "oer-b3mqIpzf",
        "colab_type": "code",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 433
        },
        "outputId": "de5d31cc-0950-4e36-e6d4-108cd8c86c00"
      },
      "source": [
        "df.head()\n"
      ],
      "execution_count": 24,
      "outputs": [
        {
          "output_type": "execute_result",
          "data": {
            "text/html": [
              "<div>\n",
              "<style scoped>\n",
              "    .dataframe tbody tr th:only-of-type {\n",
              "        vertical-align: middle;\n",
              "    }\n",
              "\n",
              "    .dataframe tbody tr th {\n",
              "        vertical-align: top;\n",
              "    }\n",
              "\n",
              "    .dataframe thead th {\n",
              "        text-align: right;\n",
              "    }\n",
              "</style>\n",
              "<table border=\"1\" class=\"dataframe\">\n",
              "  <thead>\n",
              "    <tr style=\"text-align: right;\">\n",
              "      <th></th>\n",
              "      <th>tweet_id</th>\n",
              "      <th>airline_sentiment</th>\n",
              "      <th>airline_sentiment_confidence</th>\n",
              "      <th>negativereason</th>\n",
              "      <th>negativereason_confidence</th>\n",
              "      <th>airline</th>\n",
              "      <th>airline_sentiment_gold</th>\n",
              "      <th>name</th>\n",
              "      <th>negativereason_gold</th>\n",
              "      <th>retweet_count</th>\n",
              "      <th>text</th>\n",
              "      <th>tweet_coord</th>\n",
              "      <th>tweet_created</th>\n",
              "      <th>tweet_location</th>\n",
              "      <th>user_timezone</th>\n",
              "    </tr>\n",
              "  </thead>\n",
              "  <tbody>\n",
              "    <tr>\n",
              "      <th>0</th>\n",
              "      <td>570306133677760513</td>\n",
              "      <td>neutral</td>\n",
              "      <td>1.0000</td>\n",
              "      <td>NaN</td>\n",
              "      <td>NaN</td>\n",
              "      <td>Virgin America</td>\n",
              "      <td>NaN</td>\n",
              "      <td>cairdin</td>\n",
              "      <td>NaN</td>\n",
              "      <td>0</td>\n",
              "      <td>@VirginAmerica What @dhepburn said.</td>\n",
              "      <td>NaN</td>\n",
              "      <td>2015-02-24 11:35:52 -0800</td>\n",
              "      <td>NaN</td>\n",
              "      <td>Eastern Time (US &amp; Canada)</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <th>1</th>\n",
              "      <td>570301130888122368</td>\n",
              "      <td>positive</td>\n",
              "      <td>0.3486</td>\n",
              "      <td>NaN</td>\n",
              "      <td>0.0000</td>\n",
              "      <td>Virgin America</td>\n",
              "      <td>NaN</td>\n",
              "      <td>jnardino</td>\n",
              "      <td>NaN</td>\n",
              "      <td>0</td>\n",
              "      <td>@VirginAmerica plus you've added commercials t...</td>\n",
              "      <td>NaN</td>\n",
              "      <td>2015-02-24 11:15:59 -0800</td>\n",
              "      <td>NaN</td>\n",
              "      <td>Pacific Time (US &amp; Canada)</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <th>2</th>\n",
              "      <td>570301083672813571</td>\n",
              "      <td>neutral</td>\n",
              "      <td>0.6837</td>\n",
              "      <td>NaN</td>\n",
              "      <td>NaN</td>\n",
              "      <td>Virgin America</td>\n",
              "      <td>NaN</td>\n",
              "      <td>yvonnalynn</td>\n",
              "      <td>NaN</td>\n",
              "      <td>0</td>\n",
              "      <td>@VirginAmerica I didn't today... Must mean I n...</td>\n",
              "      <td>NaN</td>\n",
              "      <td>2015-02-24 11:15:48 -0800</td>\n",
              "      <td>Lets Play</td>\n",
              "      <td>Central Time (US &amp; Canada)</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <th>3</th>\n",
              "      <td>570301031407624196</td>\n",
              "      <td>negative</td>\n",
              "      <td>1.0000</td>\n",
              "      <td>Bad Flight</td>\n",
              "      <td>0.7033</td>\n",
              "      <td>Virgin America</td>\n",
              "      <td>NaN</td>\n",
              "      <td>jnardino</td>\n",
              "      <td>NaN</td>\n",
              "      <td>0</td>\n",
              "      <td>@VirginAmerica it's really aggressive to blast...</td>\n",
              "      <td>NaN</td>\n",
              "      <td>2015-02-24 11:15:36 -0800</td>\n",
              "      <td>NaN</td>\n",
              "      <td>Pacific Time (US &amp; Canada)</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <th>4</th>\n",
              "      <td>570300817074462722</td>\n",
              "      <td>negative</td>\n",
              "      <td>1.0000</td>\n",
              "      <td>Can't Tell</td>\n",
              "      <td>1.0000</td>\n",
              "      <td>Virgin America</td>\n",
              "      <td>NaN</td>\n",
              "      <td>jnardino</td>\n",
              "      <td>NaN</td>\n",
              "      <td>0</td>\n",
              "      <td>@VirginAmerica and it's a really big bad thing...</td>\n",
              "      <td>NaN</td>\n",
              "      <td>2015-02-24 11:14:45 -0800</td>\n",
              "      <td>NaN</td>\n",
              "      <td>Pacific Time (US &amp; Canada)</td>\n",
              "    </tr>\n",
              "  </tbody>\n",
              "</table>\n",
              "</div>"
            ],
            "text/plain": [
              "             tweet_id  ...               user_timezone\n",
              "0  570306133677760513  ...  Eastern Time (US & Canada)\n",
              "1  570301130888122368  ...  Pacific Time (US & Canada)\n",
              "2  570301083672813571  ...  Central Time (US & Canada)\n",
              "3  570301031407624196  ...  Pacific Time (US & Canada)\n",
              "4  570300817074462722  ...  Pacific Time (US & Canada)\n",
              "\n",
              "[5 rows x 15 columns]"
            ]
          },
          "metadata": {
            "tags": []
          },
          "execution_count": 24
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "b0bxNYJ4IskJ",
        "colab_type": "code",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 467
        },
        "outputId": "55629b68-6882-4e09-82f5-6eb2a712a7a3"
      },
      "source": [
        "df.tail()"
      ],
      "execution_count": 25,
      "outputs": [
        {
          "output_type": "execute_result",
          "data": {
            "text/html": [
              "<div>\n",
              "<style scoped>\n",
              "    .dataframe tbody tr th:only-of-type {\n",
              "        vertical-align: middle;\n",
              "    }\n",
              "\n",
              "    .dataframe tbody tr th {\n",
              "        vertical-align: top;\n",
              "    }\n",
              "\n",
              "    .dataframe thead th {\n",
              "        text-align: right;\n",
              "    }\n",
              "</style>\n",
              "<table border=\"1\" class=\"dataframe\">\n",
              "  <thead>\n",
              "    <tr style=\"text-align: right;\">\n",
              "      <th></th>\n",
              "      <th>tweet_id</th>\n",
              "      <th>airline_sentiment</th>\n",
              "      <th>airline_sentiment_confidence</th>\n",
              "      <th>negativereason</th>\n",
              "      <th>negativereason_confidence</th>\n",
              "      <th>airline</th>\n",
              "      <th>airline_sentiment_gold</th>\n",
              "      <th>name</th>\n",
              "      <th>negativereason_gold</th>\n",
              "      <th>retweet_count</th>\n",
              "      <th>text</th>\n",
              "      <th>tweet_coord</th>\n",
              "      <th>tweet_created</th>\n",
              "      <th>tweet_location</th>\n",
              "      <th>user_timezone</th>\n",
              "    </tr>\n",
              "  </thead>\n",
              "  <tbody>\n",
              "    <tr>\n",
              "      <th>14635</th>\n",
              "      <td>569587686496825344</td>\n",
              "      <td>positive</td>\n",
              "      <td>0.3487</td>\n",
              "      <td>NaN</td>\n",
              "      <td>0.0000</td>\n",
              "      <td>American</td>\n",
              "      <td>NaN</td>\n",
              "      <td>KristenReenders</td>\n",
              "      <td>NaN</td>\n",
              "      <td>0</td>\n",
              "      <td>@AmericanAir thank you we got on a different f...</td>\n",
              "      <td>NaN</td>\n",
              "      <td>2015-02-22 12:01:01 -0800</td>\n",
              "      <td>NaN</td>\n",
              "      <td>NaN</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <th>14636</th>\n",
              "      <td>569587371693355008</td>\n",
              "      <td>negative</td>\n",
              "      <td>1.0000</td>\n",
              "      <td>Customer Service Issue</td>\n",
              "      <td>1.0000</td>\n",
              "      <td>American</td>\n",
              "      <td>NaN</td>\n",
              "      <td>itsropes</td>\n",
              "      <td>NaN</td>\n",
              "      <td>0</td>\n",
              "      <td>@AmericanAir leaving over 20 minutes Late Flig...</td>\n",
              "      <td>NaN</td>\n",
              "      <td>2015-02-22 11:59:46 -0800</td>\n",
              "      <td>Texas</td>\n",
              "      <td>NaN</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <th>14637</th>\n",
              "      <td>569587242672398336</td>\n",
              "      <td>neutral</td>\n",
              "      <td>1.0000</td>\n",
              "      <td>NaN</td>\n",
              "      <td>NaN</td>\n",
              "      <td>American</td>\n",
              "      <td>NaN</td>\n",
              "      <td>sanyabun</td>\n",
              "      <td>NaN</td>\n",
              "      <td>0</td>\n",
              "      <td>@AmericanAir Please bring American Airlines to...</td>\n",
              "      <td>NaN</td>\n",
              "      <td>2015-02-22 11:59:15 -0800</td>\n",
              "      <td>Nigeria,lagos</td>\n",
              "      <td>NaN</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <th>14638</th>\n",
              "      <td>569587188687634433</td>\n",
              "      <td>negative</td>\n",
              "      <td>1.0000</td>\n",
              "      <td>Customer Service Issue</td>\n",
              "      <td>0.6659</td>\n",
              "      <td>American</td>\n",
              "      <td>NaN</td>\n",
              "      <td>SraJackson</td>\n",
              "      <td>NaN</td>\n",
              "      <td>0</td>\n",
              "      <td>@AmericanAir you have my money, you change my ...</td>\n",
              "      <td>NaN</td>\n",
              "      <td>2015-02-22 11:59:02 -0800</td>\n",
              "      <td>New Jersey</td>\n",
              "      <td>Eastern Time (US &amp; Canada)</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <th>14639</th>\n",
              "      <td>569587140490866689</td>\n",
              "      <td>neutral</td>\n",
              "      <td>0.6771</td>\n",
              "      <td>NaN</td>\n",
              "      <td>0.0000</td>\n",
              "      <td>American</td>\n",
              "      <td>NaN</td>\n",
              "      <td>daviddtwu</td>\n",
              "      <td>NaN</td>\n",
              "      <td>0</td>\n",
              "      <td>@AmericanAir we have 8 ppl so we need 2 know h...</td>\n",
              "      <td>NaN</td>\n",
              "      <td>2015-02-22 11:58:51 -0800</td>\n",
              "      <td>dallas, TX</td>\n",
              "      <td>NaN</td>\n",
              "    </tr>\n",
              "  </tbody>\n",
              "</table>\n",
              "</div>"
            ],
            "text/plain": [
              "                 tweet_id  ...               user_timezone\n",
              "14635  569587686496825344  ...                         NaN\n",
              "14636  569587371693355008  ...                         NaN\n",
              "14637  569587242672398336  ...                         NaN\n",
              "14638  569587188687634433  ...  Eastern Time (US & Canada)\n",
              "14639  569587140490866689  ...                         NaN\n",
              "\n",
              "[5 rows x 15 columns]"
            ]
          },
          "metadata": {
            "tags": []
          },
          "execution_count": 25
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "d0gbZ335Iugq",
        "colab_type": "code",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 321
        },
        "outputId": "f1635c12-d6a2-464f-eda9-4126fec5e7a2"
      },
      "source": [
        "df['airline_sentiment'].value_counts().plot(kind='bar')"
      ],
      "execution_count": 26,
      "outputs": [
        {
          "output_type": "execute_result",
          "data": {
            "text/plain": [
              "<matplotlib.axes._subplots.AxesSubplot at 0x7fe3852596d8>"
            ]
          },
          "metadata": {
            "tags": []
          },
          "execution_count": 26
        },
        {
          "output_type": "display_data",
          "data": {
            "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYAAAAEeCAYAAACDq8KMAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4zLCBo\ndHRwOi8vbWF0cGxvdGxpYi5vcmcvnQurowAAEoRJREFUeJzt3XuwXWV9xvHvIxEVL1wkWhvQRE21\neIVJAeuMbaXlotVQL4iKpg6dzFSs1jpVtE5xVFrtVFE71coIDiotUtRC1eogoh21ouGicpEhRREi\nSjRcolYl8Osf6w0eMyeefUKy1zm+38/Mnqz1rnfv81tzJufZ613vWitVhSSpP/cYuwBJ0jgMAEnq\nlAEgSZ0yACSpUwaAJHXKAJCkThkAktQpA0CSOmUASFKnDABJ6tSSsQv4Vfbdd99avnz52GVI0qJy\n8cUX/6Cqls7Vb0EHwPLly1m3bt3YZUjSopLkukn6OQQkSZ0yACSpUwaAJHXKAJCkThkAktQpA0CS\nOmUASFKnDABJ6tSCvhBs2paf+ImxS9ilvv2Wp49dgqQFxCMASeqUASBJnTIAJKlTBoAkdcoAkKRO\nGQCS1CkDQJI6ZQBIUqcMAEnqlAEgSZ0yACSpUwaAJHXKAJCkThkAktQpA0CSOmUASFKnDABJ6pQB\nIEmdMgAkqVMGgCR1ygCQpE4ZAJLUqYkCIMkrk1yR5PIk/5bk3klWJLkoyfokH06ye+t7r7a+vm1f\nPuNzXtvar05yxK7ZJUnSJOYMgCTLgJcDq6rqscBuwLHAW4FTquqRwM3A8e0txwM3t/ZTWj+SHNDe\n9xjgSODdSXbbubsjSZrUpENAS4D7JFkC7AHcCDwVOKdtPwM4ui2vbuu07YclSWs/q6p+VlXfAtYD\nB9/9XZAk7Yg5A6CqNgD/CHyH4Q//rcDFwC1VtaV1uwFY1paXAde3925p/R84s32W90iSpmySIaC9\nGb69rwB+E7gvwxDOLpFkbZJ1SdZt3LhxV/0YSereJENAfwh8q6o2VtXtwEeBJwN7tSEhgP2ADW15\nA7A/QNu+J/DDme2zvOcuVXVqVa2qqlVLly7dgV2SJE1ikgD4DnBokj3aWP5hwJXAhcBzWp81wLlt\n+by2Ttv+2aqq1n5smyW0AlgJfGXn7IYkab6WzNWhqi5Kcg5wCbAFuBQ4FfgEcFaSN7e209pbTgM+\nmGQ9sIlh5g9VdUWSsxnCYwtwQlXdsZP3R5I0oTkDAKCqTgJO2qb5WmaZxVNVPwWeu53PORk4eZ41\nSpJ2Aa8ElqROGQCS1CkDQJI6ZQBIUqcMAEnqlAEgSZ0yACSpUwaAJHXKAJCkThkAktQpA0CSOmUA\nSFKnDABJ6pQBIEmdMgAkqVMGgCR1ygCQpE4ZAJLUKQNAkjplAEhSpwwASeqUASBJnTIAJKlTBoAk\ndcoAkKROGQCS1CkDQJI6ZQBIUqcMAEnqlAEgSZ0yACSpUwaAJHXKAJCkThkAktQpA0CSOmUASFKn\nJgqAJHslOSfJN5NcleRJSfZJcn6Sa9q/e7e+SfKuJOuTfD3JQTM+Z03rf02SNbtqpyRJc5v0COCd\nwKeq6tHAE4CrgBOBC6pqJXBBWwc4CljZXmuB9wAk2Qc4CTgEOBg4aWtoSJKmb84ASLIn8BTgNICq\n+nlV3QKsBs5o3c4Ajm7Lq4EP1ODLwF5JHgIcAZxfVZuq6mbgfODInbo3kqSJTXIEsALYCLw/yaVJ\n3pfkvsCDq+rG1ud7wIPb8jLg+hnvv6G1ba9dkjSCSQJgCXAQ8J6qOhD4Mb8Y7gGgqgqonVFQkrVJ\n1iVZt3Hjxp3xkZKkWUwSADcAN1TVRW39HIZA+H4b2qH9e1PbvgHYf8b792tt22v/JVV1alWtqqpV\nS5cunc++SJLmYc4AqKrvAdcneVRrOgy4EjgP2DqTZw1wbls+D3hxmw10KHBrGyr6NHB4kr3byd/D\nW5skaQRLJuz3F8CZSXYHrgVewhAeZyc5HrgOOKb1/STwNGA98JPWl6ralORNwFdbvzdW1aadsheS\npHmbKACq6jJg1SybDpulbwEnbOdzTgdOn0+BkqRdwyuBJalTBoAkdcoAkKROGQCS1CkDQJI6ZQBI\nUqcMAEnqlAEgSZ0yACSpUwaAJHXKAJCkThkAktQpA0CSOmUASFKnDABJ6pQBIEmdMgAkqVMGgCR1\nygCQpE4ZAJLUKQNAkjplAEhSpwwASeqUASBJnTIAJKlTBoAkdcoAkKROGQCS1CkDQJI6ZQBIUqcM\nAEnqlAEgSZ0yACSpUwaAJHXKAJCkThkAktSpiQMgyW5JLk3y8ba+IslFSdYn+XCS3Vv7vdr6+rZ9\n+YzPeG1rvzrJETt7ZyRJk5vPEcArgKtmrL8VOKWqHgncDBzf2o8Hbm7tp7R+JDkAOBZ4DHAk8O4k\nu9298iVJO2qiAEiyH/B04H1tPcBTgXNalzOAo9vy6rZO235Y678aOKuqflZV3wLWAwfvjJ2QJM3f\npEcA7wBeDdzZ1h8I3FJVW9r6DcCytrwMuB6gbb+19b+rfZb3SJKmbM4ASPLHwE1VdfEU6iHJ2iTr\nkqzbuHHjNH6kJHVpkiOAJwPPTPJt4CyGoZ93AnslWdL67AdsaMsbgP0B2vY9gR/ObJ/lPXepqlOr\nalVVrVq6dOm8d0iSNJk5A6CqXltV+1XVcoaTuJ+tqhcCFwLPad3WAOe25fPaOm37Z6uqWvuxbZbQ\nCmAl8JWdtieSpHlZMneX7XoNcFaSNwOXAqe19tOADyZZD2xiCA2q6ookZwNXAluAE6rqjrvx8yVJ\nd8O8AqCqPgd8ri1fyyyzeKrqp8Bzt/P+k4GT51ukJGnn80pgSeqUASBJnTIAJKlTBoAkdcoAkKRO\nGQCS1CkDQJI6ZQBIUqcMAEnqlAEgSZ0yACSpUwaAJHXKAJCkThkAktQpA0CSOnV3HggjLSjLT/zE\n2CXsUt9+y9PHLkG/ZjwCkKROGQCS1CkDQJI6ZQBIUqcMAEnqlAEgSZ0yACSpUwaAJHXKAJCkThkA\nktQpA0CSOmUASFKnDABJ6pQBIEmdMgAkqVMGgCR1ygfCSBqdD/MZh0cAktQpA0CSOmUASFKn5gyA\nJPsnuTDJlUmuSPKK1r5PkvOTXNP+3bu1J8m7kqxP8vUkB834rDWt/zVJ1uy63ZIkzWWSI4AtwKuq\n6gDgUOCEJAcAJwIXVNVK4IK2DnAUsLK91gLvgSEwgJOAQ4CDgZO2hoYkafrmDICqurGqLmnLm4Gr\ngGXAauCM1u0M4Oi2vBr4QA2+DOyV5CHAEcD5VbWpqm4GzgeO3Kl7I0ma2LzOASRZDhwIXAQ8uKpu\nbJu+Bzy4LS8Drp/xthta2/baJUkjmDgAktwP+Ajwl1V128xtVVVA7YyCkqxNsi7Juo0bN+6Mj5Qk\nzWKiAEhyT4Y//mdW1Udb8/fb0A7t35ta+wZg/xlv36+1ba/9l1TVqVW1qqpWLV26dD77Ikmah0lm\nAQU4Dbiqqt4+Y9N5wNaZPGuAc2e0v7jNBjoUuLUNFX0aODzJ3u3k7+GtTZI0gkluBfFk4EXAN5Jc\n1tpeB7wFODvJ8cB1wDFt2yeBpwHrgZ8ALwGoqk1J3gR8tfV7Y1Vt2il7IUmatzkDoKq+AGQ7mw+b\npX8BJ2zns04HTp9PgZKkXcMrgSWpUwaAJHXKAJCkThkAktQpA0CSOmUASFKnDABJ6pQBIEmdMgAk\nqVMGgCR1ygCQpE4ZAJLUKQNAkjplAEhSpwwASeqUASBJnTIAJKlTBoAkdcoAkKROGQCS1CkDQJI6\nZQBIUqcMAEnqlAEgSZ0yACSpUwaAJHXKAJCkThkAktQpA0CSOmUASFKnDABJ6pQBIEmdMgAkqVMG\ngCR1ygCQpE4ZAJLUqakHQJIjk1ydZH2SE6f98yVJg6kGQJLdgH8GjgIOAJ6f5IBp1iBJGkz7COBg\nYH1VXVtVPwfOAlZPuQZJEtMPgGXA9TPWb2htkqQpWzJ2AdtKshZY21Z/lOTqMevZxfYFfjCtH5a3\nTusndcPf3+L16/67e9gknaYdABuA/Wes79fa7lJVpwKnTrOosSRZV1Wrxq5DO8bf3+Ll724w7SGg\nrwIrk6xIsjtwLHDelGuQJDHlI4Cq2pLkZcCngd2A06vqimnWIEkaTP0cQFV9EvjktH/uAtXFUNev\nMX9/i5e/OyBVNXYNkqQReCsISeqUASBJnTIAJKlTBsAIktwnyaPGrkPqSQbHJfnbtv7QJAePXdeY\nDIApS/IM4DLgU239iUm8FmKBS7I5yW2zvDYnuW3s+jSRdwNPAp7f1jcz3JyyWwvuVhAdeAPDTfE+\nB1BVlyVZMWZBmltV3X/sGnS3HVJVByW5FKCqbm4XpHbLAJi+26vq1iQz25yLu8gkeRBw763rVfWd\nEcvRZG5vt6QvgCRLgTvHLWlcDgFN3xVJXgDslmRlkn8CvjR2UZpMkmcmuQb4FvB54NvAf41alCb1\nLuBjwIOSnAx8Afi7cUsalxeCTVmSPYC/AQ5vTZ8G3lxVPx2vKk0qydeApwKfqaoDk/wBcFxVHT9y\naZpAkkcDhwEBLqiqq0YuaVQGwJQlOaiqLhm7Du2YrXeRbEFwYFXdmeRrVfWEsWvTr5bkXcBZVeUR\nd+M5gOl7W5LfAM4BPlxVl49dkOblliT3A/4bODPJTcCPR65Jk7kYeH2bgv0xhjBYN3JNo/IIYAQt\nAI4Bngc8gCEI3jxuVZpEkvsC/8dw/uyFwJ7AmVX1w1EL08SS7AM8m+F29A+tqpUjlzQaA2BESR4H\nvBp4XlV1PR1tMWgzSD5TVX8wdi3ace3ir+cxPI/8qqp6xsgljcZZQFOW5LeTvCHJN4CtM4D2G7ks\nTaCq7gDuTLLn2LVo/pL8Q5vB9UbgcmBVz3/8wXMAYzgd+DBwRFV9d+xiNG8/Ar6R5HxmjP1X1cvH\nK0kT+l/gSVU1tWcBL3QOAUnzkGTNLM1VVR+YejGaSJJHV9U3kxw02/aeZ+V5BDAlSc6uqmPa0M/M\n1A3DH5DHj1Sa5mevqnrnzIYkrxirGE3kr4C1wNtm2VYM13V0ySOAKUnykKq6McnDZtteVddNuybN\nX5JLquqgbdouraoDx6pJk0ly720vuJytrSeeBJ6SqrqxLb60qq6b+QJeOmZtmluS5yf5T2BFkvNm\nvC4ENo1dnyYy2wVgXV8U5hDQ9P0R8Jpt2o6apU0Ly5eAG4F9+eWhhM3A10epSBNp190sA+6T5ECG\nYVcYrsHZY7TCFgADYEqS/DnDN/2HJ5n5B+P+wBfHqUqTakdq1zHcT16LyxHAnzJMt377jPbNwOvG\nKGih8BzAlLS543sDfw+cOGPT5qpyCGGRSLKZX5zE3x24J/DjqnrAeFVpEkmeXVUfGbuOhcQAGIn3\nk1/8MjzUYTVwaFWdOFd/jSPJcVX1oSSvYpZnb1TV22d5Wxc8CTxlSZ7h/eR/PdTgPxiGGLRw3bf9\nez+GIddtX93yCGDKvJ/84pbkWTNW7wGsAn6vqjw3oEXHI4Dpu73dOfIeSe5RVRcy/BHR4vCMGa8j\nGE4krh61Ik2k3QvoAUnumeSCJBuTHDd2XWNyFtD0eT/5RayqXjJ2Ddphh1fVq5P8CcPQ67MY/h9+\naNSqRuQRwPStZrif/CuBTzHcoKrrOxIuJkl+q317vLytPz7J68euSxPZ+oX36cC/V9WtYxazEHgO\nQJqHJJ8H/hp479bbPyS5vKoeO25lmkuStwBHM3wBOxjYC/h4VR0yamEj8ghgypJsTnLbNq/rk3ws\nycPHrk9z2qOqvrJN25ZRKtG8tKm6v8vwHIDbGYZeuz5/4zmA6XsHcAPwrwyXpB8LPAK4hOFZAb8/\nWmWaxA+SPII2nzzJcxhuEaEFLsk9geOApwyXcPB54F9GLWpkDgFNWZKvVdUTtmm7rKqeONs2LSzt\nKO1Uhm+SNzNcz/FC7+a68CV5H8OV22e0phcBd1TVn41X1bg8Api+nyQ5BjinrT8H2Ho7WtN44dsA\nvB+4ENgHuA1Yw/CYQS1sv7PNF6zPtutyuuU5gOl7IcM3j5uA77fl45LcB3jZmIVpIucyzNq6Hfgu\nwyMinca7ONzRhu+Au47m7hixntE5BCTNgzN+Fq8khzEcvV3bmpYDL2kXY3bJI4Apcx75ovelJI8b\nuwjtkC8C7wXuZHiIz3uB/xm1opF5BDBlziNf3JJcCTyS4eTvz/CZzotGkrMZztmc2ZpewPCM5+eO\nV9W4PAk8fXtU1VfaNLStnEe+eBw1dgHaYY+tqgNmrF/YAr1bBsD0OY98EXO656J2SZJDq+rLAEkO\nAdaNXNOoHAKaMueRS+NIchXwKGDrw5ceClzNcATe5TCeATBlSe7FMPd/Ob+YR15V5TxyaRdK8rBf\ntb3HL2EOAU3fucAtDLd++O7ItUjd6PEP/Fw8ApgyZ/xIWii8DmD6nEcuaUHwCGDKnEcuaaEwAKZs\neyeiHJ+UNG0GgCR1ynMAktQpA0CSOmUASFKnDABJ6pQBIEmd+n/6sxmzxpWBagAAAABJRU5ErkJg\ngg==\n",
            "text/plain": [
              "<Figure size 432x288 with 1 Axes>"
            ]
          },
          "metadata": {
            "tags": []
          }
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "1jDV7eB3LTek",
        "colab_type": "code",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 351
        },
        "outputId": "61ba3fff-9305-453c-9718-a7e7a9073bd8"
      },
      "source": [
        "df['airline'].value_counts().plot(kind='bar')"
      ],
      "execution_count": 27,
      "outputs": [
        {
          "output_type": "execute_result",
          "data": {
            "text/plain": [
              "<matplotlib.axes._subplots.AxesSubplot at 0x7fe385205e80>"
            ]
          },
          "metadata": {
            "tags": []
          },
          "execution_count": 27
        },
        {
          "output_type": "display_data",
          "data": {
            "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYAAAAE8CAYAAADJz2axAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4zLCBo\ndHRwOi8vbWF0cGxvdGxpYi5vcmcvnQurowAAIABJREFUeJzt3XuYXXV97/H3xwQQFQTKSDEJJGKs\nRSuBhovaYxEqBLyAN4T2QOTQpj2FVm1tC9pzsCin6lNL1QoelGiwSkRFSTUVwsVSWrkkXAWkjIAl\nESEYQBAPmvg5f6zfyM4wyeyZWTNrz16f1/PsZ9b6rbX3/q5nZvZ3r99VtomIiPZ5RtMBREREM5IA\nIiJaKgkgIqKlkgAiIloqCSAioqWSACIiWqrrBCBphqQbJX297M+TdK2kQUlflLRtKd+u7A+W43M7\nXuO0Un6npMPrvpiIiOjeWO4A3gHc0bH/IeAs2y8EHgZOKuUnAQ+X8rPKeUjaGzgWeAmwCDhb0oyJ\nhR8REePVVQKQNBt4LfDpsi/gEODL5ZRlwNFl+6iyTzl+aDn/KGC57Sdt3wMMAgfUcRERETF2M7s8\n7x+AvwR2KPu/Ajxie2PZXwvMKtuzgPsAbG+U9Gg5fxZwTcdrdj5nRLvuuqvnzp3bZYgREQGwZs2a\nh2wPjHbeqAlA0uuAB22vkXRwHcGN8n5LgCUAe+yxB6tXr57st4yI6CuSvt/Ned1UAb0SeIOke4Hl\nVFU/HwV2kjSUQGYD68r2OmBOCWIm8FzgR53lIzznl2yfa3uh7YUDA6MmsIiIGKdRE4Dt02zPtj2X\nqhH3Ctu/B1wJvKWcthi4uGyvKPuU41e4mnFuBXBs6SU0D5gPXFfblURExJh02wYwkr8Clkv6AHAj\ncF4pPw/4nKRBYANV0sD2bZIuBG4HNgIn2940gfePiIgJUC9PB71w4UKnDSAiYmwkrbG9cLTzMhI4\nIqKlkgAiIloqCSAioqWSACIiWioJICKipSbSDbRnzT31G1P6fvd+8LVT+n4REXXIHUBEREslAURE\ntFQSQERESyUBRES0VBJARERLJQFERLRUEkBEREslAUREtFQSQERESyUBRES0VBJARERLJQFERLRU\nEkBEREuNmgAkPVPSdZJulnSbpL8p5Z+VdI+km8pjQSmXpI9JGpR0i6T9Ol5rsaS7ymPx5F1WRESM\nppvpoJ8EDrH9uKRtgKsl/Us59he2vzzs/COA+eVxIHAOcKCkXYDTgYWAgTWSVth+uI4LiYiIsRn1\nDsCVx8vuNuXhrTzlKOD88rxrgJ0k7Q4cDqyyvaF86K8CFk0s/IiIGK+u2gAkzZB0E/Ag1Yf4teXQ\nmaWa5yxJ25WyWcB9HU9fW8q2VD78vZZIWi1p9fr168d4ORER0a2uEoDtTbYXALOBAyS9FDgNeDGw\nP7AL8Fd1BGT7XNsLbS8cGBio4yUjImIEY+oFZPsR4Epgke37SzXPk8BngAPKaeuAOR1Pm13KtlQe\nEREN6KYX0ICkncr29sBrgO+Wen0kCTga+E55ygrghNIb6CDgUdv3A5cAh0naWdLOwGGlLCIiGtBN\nL6DdgWWSZlAljAttf13SFZIGAAE3AX9Uzl8JHAkMAk8AJwLY3iDp/cD15bwzbG+o71IiImIsRk0A\ntm8B9h2h/JAtnG/g5C0cWwosHWOMERExCTISOCKipZIAIiJaKgkgIqKlkgAiIloqCSAioqWSACIi\nWioJICKipZIAIiJaKgkgIqKlkgAiIloqCSAioqWSACIiWioJICKipZIAIiJaKgkgIqKlkgAiIloq\nCSAioqWSACIiWqqbReGfKek6STdLuk3S35TyeZKulTQo6YuSti3l25X9wXJ8bsdrnVbK75R0+GRd\nVEREjK6bO4AngUNs7wMsABZJOgj4EHCW7RcCDwMnlfNPAh4u5WeV85C0N3As8BJgEXB2WWg+IiIa\nMGoCcOXxsrtNeRg4BPhyKV8GHF22jyr7lOOHSlIpX277Sdv3AIPAAbVcRUREjFlXbQCSZki6CXgQ\nWAV8D3jE9sZyylpgVtmeBdwHUI4/CvxKZ/kIz+l8ryWSVktavX79+rFfUUREdKWrBGB7k+0FwGyq\nb+0vnqyAbJ9re6HthQMDA5P1NhERrTemXkC2HwGuBF4O7CRpZjk0G1hXttcBcwDK8ecCP+osH+E5\nERExxbrpBTQgaaeyvT3wGuAOqkTwlnLaYuDisr2i7FOOX2HbpfzY0ktoHjAfuK6uC4mIiLGZOfop\n7A4sKz12ngFcaPvrkm4Hlkv6AHAjcF45/zzgc5IGgQ1UPX+wfZukC4HbgY3AybY31Xs5ERHRrVET\ngO1bgH1HKL+bEXrx2P5/wFu38FpnAmeOPczoNPfUb0zp+937wddO6ftFxNTISOCIiJZKAoiIaKkk\ngIiIlkoCiIhoqSSAiIiWSgKIiGipJICIiJbqZiBYxJTKOIeIqZE7gIiIlkoCiIhoqSSAiIiWSgKI\niGipJICIiJZKAoiIaKkkgIiIlkoCiIhoqSSAiIiWSgKIiGipbhaFnyPpSkm3S7pN0jtK+fskrZN0\nU3kc2fGc0yQNSrpT0uEd5YtK2aCkUyfnkiIiohvdzAW0Efhz2zdI2gFYI2lVOXaW7b/rPFnS3lQL\nwb8EeD5wmaQXlcOfAF4DrAWul7TC9u11XEhERIxNN4vC3w/cX7Yfk3QHMGsrTzkKWG77SeAeSYM8\ntXj8YFlMHknLy7lJABERDRhTG4CkucC+wLWl6BRJt0haKmnnUjYLuK/jaWtL2ZbKh7/HEkmrJa1e\nv379WMKLiIgx6DoBSHoO8BXgnbZ/DJwD7AUsoLpD+EgdAdk+1/ZC2wsHBgbqeMmIiBhBV+sBSNqG\n6sP/87YvArD9QMfxTwFfL7vrgDkdT59dythKeURETLFuegEJOA+4w/bfd5Tv3nHaG4HvlO0VwLGS\ntpM0D5gPXAdcD8yXNE/StlQNxSvquYyIiBirbu4AXgkcD9wq6aZS9h7gOEkLAAP3An8IYPs2SRdS\nNe5uBE62vQlA0inAJcAMYKnt22q8loiIGINuegFdDWiEQyu38pwzgTNHKF+5tedFtEGWvIxekZHA\nEREtlQQQEdFSSQARES2VBBAR0VJJABERLZUEEBHRUkkAEREtlQQQEdFSSQARES2VBBAR0VJdzQYa\nEdGtTHUxfeQOICKipZIAIiJaKgkgIqKlkgAiIloqCSAioqWSACIiWioJICKipbpZFH6OpCsl3S7p\nNknvKOW7SFol6a7yc+dSLkkfkzQo6RZJ+3W81uJy/l2SFk/eZUVExGi6uQPYCPy57b2Bg4CTJe0N\nnApcbns+cHnZBzgCmF8eS4BzoEoYwOnAgcABwOlDSSMiIqbeqAnA9v22byjbjwF3ALOAo4Bl5bRl\nwNFl+yjgfFeuAXaStDtwOLDK9gbbDwOrgEW1Xk1ERHRtTG0AkuYC+wLXArvZvr8c+iGwW9meBdzX\n8bS1pWxL5RER0YCuE4Ck5wBfAd5p+8edx2wbcB0BSVoiabWk1evXr6/jJSMiYgRdJQBJ21B9+H/e\n9kWl+IFStUP5+WApXwfM6Xj67FK2pfLN2D7X9kLbCwcGBsZyLRERMQbd9AIScB5wh+2/7zi0Ahjq\nybMYuLij/ITSG+gg4NFSVXQJcJiknUvj72GlLCIiGtDNdNCvBI4HbpV0Uyl7D/BB4EJJJwHfB44p\nx1YCRwKDwBPAiQC2N0h6P3B9Oe8M2xtquYqIiBizUROA7asBbeHwoSOcb+DkLbzWUmDpWAKMiIjJ\nkZHAEREtlQQQEdFSSQARES2VBBAR0VJJABERLZUEEBHRUkkAEREtlQQQEdFSSQARES2VBBAR0VJJ\nABERLZUEEBHRUkkAEREtlQQQEdFSSQARES2VBBAR0VJJABERLZUEEBHRUt0sCr9U0oOSvtNR9j5J\n6yTdVB5Hdhw7TdKgpDslHd5RvqiUDUo6tf5LiYiIsejmDuCzwKIRys+yvaA8VgJI2hs4FnhJec7Z\nkmZImgF8AjgC2Bs4rpwbEREN6WZR+Kskze3y9Y4Cltt+ErhH0iBwQDk2aPtuAEnLy7m3jzniiIio\nxUTaAE6RdEupItq5lM0C7us4Z20p21J5REQ0ZLwJ4BxgL2ABcD/wkboCkrRE0mpJq9evX1/Xy0ZE\nxDDjSgC2H7C9yfYvgE/xVDXPOmBOx6mzS9mWykd67XNtL7S9cGBgYDzhRUREF8aVACTt3rH7RmCo\nh9AK4FhJ20maB8wHrgOuB+ZLmidpW6qG4hXjDzsiIiZq1EZgSRcABwO7SloLnA4cLGkBYOBe4A8B\nbN8m6UKqxt2NwMm2N5XXOQW4BJgBLLV9W+1XExERXeumF9BxIxSft5XzzwTOHKF8JbByTNFFRMSk\nyUjgiIiWSgKIiGipJICIiJZKAoiIaKkkgIiIlkoCiIhoqSSAiIiWSgKIiGipJICIiJZKAoiIaKkk\ngIiIlkoCiIhoqSSAiIiWSgKIiGipJICIiJZKAoiIaKkkgIiIlkoCiIhoqVETgKSlkh6U9J2Osl0k\nrZJ0V/m5cymXpI9JGpR0i6T9Op6zuJx/l6TFk3M5ERHRrW7uAD4LLBpWdipwue35wOVlH+AIYH55\nLAHOgSphUC0mfyBwAHD6UNKIiIhmjJoAbF8FbBhWfBSwrGwvA47uKD/flWuAnSTtDhwOrLK9wfbD\nwCqenlQiImIKjbcNYDfb95ftHwK7le1ZwH0d560tZVsqj4iIhky4Edi2AdcQCwCSlkhaLWn1+vXr\n63rZiIgYZrwJ4IFStUP5+WApXwfM6ThvdinbUvnT2D7X9kLbCwcGBsYZXkREjGa8CWAFMNSTZzFw\ncUf5CaU30EHAo6Wq6BLgMEk7l8bfw0pZREQ0ZOZoJ0i6ADgY2FXSWqrePB8ELpR0EvB94Jhy+krg\nSGAQeAI4EcD2BknvB64v551he3jDckRETKFRE4Dt47Zw6NARzjVw8hZeZymwdEzRRUTEpMlI4IiI\nlkoCiIhoqSSAiIiWSgKIiGipJICIiJZKAoiIaKkkgIiIlkoCiIhoqSSAiIiWSgKIiGipJICIiJZK\nAoiIaKkkgIiIlkoCiIhoqSSAiIiWSgKIiGipJICIiJZKAoiIaKkJJQBJ90q6VdJNklaXsl0krZJ0\nV/m5cymXpI9JGpR0i6T96riAiIgYnzruAF5te4HthWX/VOBy2/OBy8s+wBHA/PJYApxTw3tHRMQ4\nTUYV0FHAsrK9DDi6o/x8V64BdpK0+yS8f0REdGGiCcDApZLWSFpSynazfX/Z/iGwW9meBdzX8dy1\npSwiIhowc4LP/y3b6yQ9D1gl6budB21bksfygiWRLAHYY489JhheRER95p76jSl9v3s/+NpJff0J\n3QHYXld+Pgh8FTgAeGCoaqf8fLCcvg6Y0/H02aVs+Guea3uh7YUDAwMTCS8iIrZi3AlA0rMl7TC0\nDRwGfAdYASwupy0GLi7bK4ATSm+gg4BHO6qKIiJiik2kCmg34KuShl7nC7a/Kel64EJJJwHfB44p\n568EjgQGgSeAEyfw3hERMUHjTgC27wb2GaH8R8ChI5QbOHm87xcREfXKSOCIiJZKAoiIaKkkgIiI\nlkoCiIhoqSSAiIiWSgKIiGipJICIiJZKAoiIaKkkgIiIlkoCiIhoqSSAiIiWSgKIiGipJICIiJZK\nAoiIaKkkgIiIlkoCiIhoqSSAiIiWSgKIiGipKU8AkhZJulPSoKRTp/r9IyKiMqUJQNIM4BPAEcDe\nwHGS9p7KGCIiojLVdwAHAIO277b9M2A5cNQUxxAREUx9ApgF3Nexv7aURUTEFJPtqXsz6S3AItu/\nX/aPBw60fUrHOUuAJWX314A7pyxA2BV4aArfb6rl+qa3XN/0NdXXtqftgdFOmjkVkXRYB8zp2J9d\nyn7J9rnAuVMZ1BBJq20vbOK9p0Kub3rL9U1fvXptU10FdD0wX9I8SdsCxwIrpjiGiIhgiu8AbG+U\ndApwCTADWGr7tqmMISIiKlNdBYTtlcDKqX7fLjVS9TSFcn3TW65v+urJa5vSRuCIiOgdmQoiIqKl\nkgAiIloqCaCPSdpL0nZl+2BJfyppp6bjiu4M/e5GK4sYr9YmAEm7bO3RdHw1+QqwSdILqRqh5gBf\naDakGINvd1kWPUbSQZKul/S4pJ9J2iTpx03HNdyU9wLqIWsAAwL2AB4u2zsB/wXMay602vyidL19\nI/Bx2x+XdGPTQdVF0ouAvwD2pONv2fYhjQVVA0m/SjVFyvaS9qX6uwTYEXhWY4FNAknzgb+lmhzy\nmUPltl/QWFD1+EeqcU5fAhYCJwAvajSiEbQ2AdieByDpU8BXS/dUJB0BHN1kbDX6uaTjgMXA60vZ\nNg3GU7cvAZ8EPgVsajiWOh0OvJ1qpPxHeCoBPAa8p6GYJstngNOBs4BXAyfSJzUTtgclzbC9CfhM\n+fJ1WtNxdWp9N1BJt9r+jdHKpqMy1fYfAd+2fYGkecAxtj/UcGi1kLTG9m82HcdkkfRm219pOo7J\nNPQ77Pyf64ffq6SrgN8BPg38ELgfeLvtfRoNbJi+yLQT9ANJfy1pbnm8F/hB00HVZC/gnbYvALB9\nT798+Bf/LOmPJe3eh+03ALMl7ajKpyXdIOmwpoOq2ZOSngHcJemUUl35nKaDqsHxVJ+vpwA/oWp/\ne3OjEY0gdwDVB8bpwKuo2gSuAs6wvaHRwGog6Z+Al1M1Bi+1/d2GQ6qVpHtGKHYf1B8DIOlm2/tI\nOpzqTu6vgc/Z3q/h0GojaX/gDqq2t/dTtXN82Pa1jQY2QZKeDfzU9i/K/gxgO9tPNBvZ5lqfAIZI\nerbtnzQdR90k7QgcR1W3aqo61wtsP9ZoYDEqSbfYfpmkjwLfsv1VSTfa3rfp2Ooi6a22vzRa2XQj\n6Rrgd2w/XvafA1xq+xXNRra51lcBSXqFpNupvoUgaR9JZzccVm1s/xj4MtXqa7sDbwRukPQnjQZW\nE0kvlXSMpBOGHk3HVKM1ki4FjgQukbQD8IuGY6rbSI2iPdVQOk7PHPrwByjbPdeDq7W9gDqcRdXr\nYgWA7ZslvarZkOoh6Q1U3/xfCJwPHGD7QUnPAm4HPt5kfBMl6XTgYKouhCup1pq+mupa+8FJwALg\nbttPSPoVqt/ntFd62x0JzJL0sY5DOwIbm4mqVj+RtJ/tGwAk/Sbw04ZjepokAMD2fZI6i/qlS+Gb\ngbNsX9VZWD5MTmoopjq9BdgHuNH2iZJ2A/6p4ZjqZKrk9jrgDODZdPSVn+Z+QDUW5w3l55DHgHc1\nElG93gl8SdIPqLrx/irwtmZDerokALhP0isAS9oGeAelOmi6s714K8cun8pYJslPbf9C0sbS1vEg\nm684N92dTVXlcwhVAniMqkF//yaDqoPtm4GbJf2T7X74xr8Z29dLejHVsrYAd9r+eZMxjSQJoOpd\n8VGqkZfrgEuBP240oppIOoiqmufXgW2pFuH5ie0dGw2sPqvL3EafovoW+Tj9NVXCgbb3Gxq9bfvh\nspLetCfpVqo7HIbdfQNg+2VTHVMdJB1i+wpJbxp26EWSsH1RI4FtQRIA/Jrt3+sskPRK4N8biqdO\n02I4+njZHkrUn5T0TWBH27c0GVPNfl66Dw59UA7QP43Ar2s6gEny28AVPDXyvpOBnkoAre8GKumG\n4f2qRyqbjlQWoh7qTljK+qYbYRk0dIXtR8v+TsDBtr/WbGT1kPR7VPXG+wHLqNo8/nq6d5EcTtKe\nwHzbl0naHpg5nbspl4Ftb7F9YdOxjKa1CUDSy4FXUDXWnNVxaEfgjb02ZHs8pstw9PGSdJPtBcPK\n+ibBAZR65EOpGhIvt90X7VNDJP0BsATYxfZeZXK4T9o+tOHQJmToy1fTcYymzeMAtqUacj4T2KHj\n8WOqb1r9YFoMR5+Akf5++6ZaU9L7qX5nn7X9j/324V+cDLyS6v8O23cBz2s0onpcJundkub08jQl\nrb0DGCJpT9vfbzqOySDpUOA/bPdc/+M6SFoKPAJ8ohSdTPVN8u2NBVUjSScC/41qOo/HgH8DrrJ9\ncaOB1UjStbYPHLpzkzQTuGG6NgIPmS7TlLQ2AUj6B9vvlPTPlEa2Trbf0EBYtZK0jOrDYwPlwwO4\n2vbDjQZWkzLfyv+iquYCWAV8oN+m9FC1PsAxwLuBnW3v0HBItZH0YaokfgLwJ1Q98G63/d5GA2uJ\nNieA37S9RtJvj3Tc9r9OdUyTRdLzqaq13g0833bfVJP0M0mfphoI9gBVAr+a6ttx3/SbLw2mJwGH\nUbVzXAJ82tP8g6mMtv8zYA/bS0rbxq/Z/nrDoW2mtQmgDST9d6oqhN8AHqL6APk329O6r3wb7t4A\nJH0VeD7VtB3/SlX9c3ezUdWvdG/F9vqmY6mLpC9SjU05wfZLS0L4j+GdFprW+gRQ+vy/j6eWFRQ9\nWFc3HpIeAr5HtWrWlbbvbTaierTp7g1A0q9TzVf1LmCG7dkNhzRhqkZ/nU7VQWGoMX8T1dKlZzQW\nWE06umD/slfa0PTeTcfWKVUBcB7VP9Ya+mcOIABs7yrpJVRrHZxZbkPvtH18w6FNSPnwnwEsGT6I\nr59Ieh3VHdyrqObLv4KqKqgfvIuq98/+tu8BkPQC4BxJ77J91laf3ft+VsY0DA3i2wt4stmQni4J\nAB61/S9NBzEZyvw4e1Dd3cwFnkufjCS1vUnSnpK2tf2zpuOZJIuoPvA/artfVqkbcjzwGtsPDRXY\nvrtUW17K5mNzpqPTgW8CcyR9nirZvb3RiEaQKiDpg1Rz5FxER4YemsZ1OpN0C1W9/9VU9cdrGw6p\nVpLOp5rnaAXVOAcAbP99Y0FFVyR9x/ZLx3psOinTdx9EVa18TWey6xW5A4ADy8+hRahFddt2SDPh\n1KNUkayy/edNxzKJvlcez6AaxNdXyoRiH6IaGCWeap/qh8n8tnbX1i93dLOovlzOBF7Vi5PBtfYO\nQNKfDW2WnwbWU/WTH2kQx7Qj6du2X950HJNN0rPcY2ut1kHSIPD6fhwBLGkTHXdtnYeoVtPaZopD\nqlUZpPgy4Daeqna17f/RXFRP1+Y7gJG+Me4JvFfS+2wvn+qAJsFNklZQzQbaWUXSU99CxqvM53Qe\n1ZQee0jaB/jDjllCp7sH+vHDH8D2jKZjmGQH2d676SBG09o7gC0p83Vc1iezgX5mhOKe+xYyXpKu\npRrgtqKjq920rz/umEv+t6lWkvoam7dP9UUC72eSzgM+Yvv2pmPZmjbfAYzI9gaNtELFNGS7L9aP\n3Rr353KenXPJP0E1SnZIz80pHyM6H/i2pB9SJe+h9puemuMoCWAYSa8GpvVcOZL+0vaHJX2ckUfK\n/mkDYU2GvlzOcyhxS3ql7c0WJioDF6P3nUfV1fVWerjrdWsTQOeSdB12oVqs+oSpj6hWQx+CqxuN\nYvKNtJznyY1GVK+PUy0GM1pZ9J71tlc0HcRoWtsGUFYh6mTgR/02k2QnSc+k6lXSVytK9Zs2LFbU\n7ySdTTV6+5/p4fab1t4B9OsaAMOV8QCHA8dR1SX/G1WvoGlP0jyqKYTn0vG33AeTwQ1frGhIPy1W\n1O+2p/rg7+n2m9beAfS7MlHa7wJHAtdRDUV/QT/1l5d0M1Vd62b1rP0yGVw/L1bURpL2t31903F0\nSgLoQ5LWAv8FnAN8zfZjku6xPa/h0Go1tJpU03FMFklXMnIj/rQepd4mkvamuvs+Dnik19YJbm0V\nUJ/7MnA08DZgk6SLGeGDpA98VNLpVI2/fTWPU/Huju1nUq3n3DeLwfQrSXN56kP/51QDTBf24nTs\nuQPoU2Usw8FUf4RHUs0EehKw0vbjDYZWG0l/S9XV7ntsPty+b78hS7rO9gFNxxEjk/Rtqsb65cBy\n23f18t137gD6VFlS70rgytJHfqgh+Gxg1yZjq9Fbqdo1+mXysM2UUelDnkE1YeFzGwonuvMAVbfk\n3YAB4C56+O47dwAtI2l72z9tOo46SPoa1aIwDzYdy2SQdA/Vh4eoqn7uAc6wfXWjgcVWSXou8Caq\nL1zzqbqDHm77ukYDG0ESQExbkr5FNePi9TzVBmDbRzUWVEQHSc8DjqFKBnvYntNwSJtJAohpa9ia\nwKJaPvFY2y9pKKRalaq7/0m1JCTAt4D/a/vnjQUV49aL3XqTAFpC0s5U3dD66hcuaV+q8Q5vpaoi\nucj2x5uNqh6SPg1sAywrRccDm2z/fnNRRT9JI3AfkvS/gQttf1fSdlRrk+4DbJT0u7YvazbCiZH0\nIp7qZvcQ8EWqLzOvbjSw+u0/bNqHK8rgt4haPKPpAGJSvA24s2wvLj8HqOaX/z+NRFSv71It2fk6\n279VvvH3wzTQw22StNfQjqQX0J/XGQ3JHUB/+llHVc/hVP2RNwF3SOqH3/mbgGOpurh+k6rPdV+s\n4TDMX1Bd491lfy7Q92s89ANJA8Af8PR5qnpqMabcAfSnJyW9tPwRvppqpOyQZzUUU21sf832scCL\nqcY6vBN4nqRzJB229Wf3Pkn7S/pV25dTdSO8iGqg26VAqoCmh4upxmxcBnyj49FT0gjchyQdBHyW\nqtrnH2y/v5QfCRxv+7gGw5sUpZH7rcDbbB/adDwTIekG4HfK6nSvorrD+RNgAfDrtjMjaI+TdJPt\nBU3HMZokgIgeI+nmocZfSZ+gWlzkfWV/WnywtJ2kDwD/YXtl07FsTT/UB8cwkv5sWJGpestcbfue\nBkKKsZkhaabtjcChwJKOY/mfnR7eAbxH0pNUE8INrQm8Y7NhbS5/TP1phxHK5gLvlfQ+28unOJ4Y\nmwuAf5X0EPBTqkV8kPRC4NEmA4vu2B7pf7DnpAqoRcrkYpfZzpqyPa604+wOXDq0TGkZ//CcPpru\nuu9IenEZfzPi/1iv/e6SAFpG0o229206joh+JOlc20vKYj7D9dxU5akCahFJrwYebjqOiH5le0n5\nOS1GpScB9CFJt/L0Och3AX4AnDD1EUW0i6Q3jVD8KHBrL01fniqgPiRpz2FFBn40VJccEZNL0jeA\nl1MNVIRqdb41wDyqNR0+11Bom8kdQB/qtSlnI1poJtWgvQcAJO0GnA8cCFwF9EQCyFQQERH1mzP0\n4V88WMo2UI0L6Am5A4iIqN8qfpSZAAABQ0lEQVS3JH0d+FLZf3MpezbwSHNhbS5tABERNZMkqllr\nf6sU/TvwlV5bkCkJICKiRpJmUA247PmuoGkDiIioUVl74xeSntt0LKNJG0BERP0eB26VtAr4Zfdr\n23/aXEhPlwQQEVG/i8qjp6UNICKipXIHEBFRE0kX2j5mC9OxYPtlDYS1RbkDiIioiaTdbd8/wnQs\nQO+N0k8CiIioSVnC8wu2/73pWLqRbqAREfX5T+DvJN0r6cOSenrtjdwBRETUrFQBHVse21Mt83mB\n7f9sNLBhkgAiIiZRuQtYCrzM9oym4+mUKqCIiJpJminp9ZI+D/wLcCfV3EA9JXcAERE1kfQa4Djg\nSOA6YDlwca8uxpQEEBFRE0lXAF+gmvmz59ffTgKIiGiptAFERLRUEkBEREslAUREtFQSQERESyUB\nRES01P8HOD7rjyt2/uoAAAAASUVORK5CYII=\n",
            "text/plain": [
              "<Figure size 432x288 with 1 Axes>"
            ]
          },
          "metadata": {
            "tags": []
          }
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "kabNp_QdLnMJ",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "df.rename(columns={'airline_sentiment':'label'},inplace=True)"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "8InWqH6MYEy6",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "df = df[['label','text']]"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "n0spyW1oYN_l",
        "colab_type": "code",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 194
        },
        "outputId": "db9b3e6d-7abf-43bd-ef17-56c06b8f005e"
      },
      "source": [
        "df.head()"
      ],
      "execution_count": 30,
      "outputs": [
        {
          "output_type": "execute_result",
          "data": {
            "text/html": [
              "<div>\n",
              "<style scoped>\n",
              "    .dataframe tbody tr th:only-of-type {\n",
              "        vertical-align: middle;\n",
              "    }\n",
              "\n",
              "    .dataframe tbody tr th {\n",
              "        vertical-align: top;\n",
              "    }\n",
              "\n",
              "    .dataframe thead th {\n",
              "        text-align: right;\n",
              "    }\n",
              "</style>\n",
              "<table border=\"1\" class=\"dataframe\">\n",
              "  <thead>\n",
              "    <tr style=\"text-align: right;\">\n",
              "      <th></th>\n",
              "      <th>label</th>\n",
              "      <th>text</th>\n",
              "    </tr>\n",
              "  </thead>\n",
              "  <tbody>\n",
              "    <tr>\n",
              "      <th>0</th>\n",
              "      <td>neutral</td>\n",
              "      <td>@VirginAmerica What @dhepburn said.</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <th>1</th>\n",
              "      <td>positive</td>\n",
              "      <td>@VirginAmerica plus you've added commercials t...</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <th>2</th>\n",
              "      <td>neutral</td>\n",
              "      <td>@VirginAmerica I didn't today... Must mean I n...</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <th>3</th>\n",
              "      <td>negative</td>\n",
              "      <td>@VirginAmerica it's really aggressive to blast...</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <th>4</th>\n",
              "      <td>negative</td>\n",
              "      <td>@VirginAmerica and it's a really big bad thing...</td>\n",
              "    </tr>\n",
              "  </tbody>\n",
              "</table>\n",
              "</div>"
            ],
            "text/plain": [
              "      label                                               text\n",
              "0   neutral                @VirginAmerica What @dhepburn said.\n",
              "1  positive  @VirginAmerica plus you've added commercials t...\n",
              "2   neutral  @VirginAmerica I didn't today... Must mean I n...\n",
              "3  negative  @VirginAmerica it's really aggressive to blast...\n",
              "4  negative  @VirginAmerica and it's a really big bad thing..."
            ]
          },
          "metadata": {
            "tags": []
          },
          "execution_count": 30
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "61sFViJ1YPdY",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "df.loc[:,'sentiment'] = df.label.map({'negative':0,'neutral':2,'positive':1})"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "6jPZmvk1g8Ce",
        "colab_type": "code",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 194
        },
        "outputId": "eb4d6e60-9949-43c8-9135-887080584c2e"
      },
      "source": [
        "df = df.drop(['label'], axis=1)\n",
        "df.head()"
      ],
      "execution_count": 32,
      "outputs": [
        {
          "output_type": "execute_result",
          "data": {
            "text/html": [
              "<div>\n",
              "<style scoped>\n",
              "    .dataframe tbody tr th:only-of-type {\n",
              "        vertical-align: middle;\n",
              "    }\n",
              "\n",
              "    .dataframe tbody tr th {\n",
              "        vertical-align: top;\n",
              "    }\n",
              "\n",
              "    .dataframe thead th {\n",
              "        text-align: right;\n",
              "    }\n",
              "</style>\n",
              "<table border=\"1\" class=\"dataframe\">\n",
              "  <thead>\n",
              "    <tr style=\"text-align: right;\">\n",
              "      <th></th>\n",
              "      <th>text</th>\n",
              "      <th>sentiment</th>\n",
              "    </tr>\n",
              "  </thead>\n",
              "  <tbody>\n",
              "    <tr>\n",
              "      <th>0</th>\n",
              "      <td>@VirginAmerica What @dhepburn said.</td>\n",
              "      <td>2</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <th>1</th>\n",
              "      <td>@VirginAmerica plus you've added commercials t...</td>\n",
              "      <td>1</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <th>2</th>\n",
              "      <td>@VirginAmerica I didn't today... Must mean I n...</td>\n",
              "      <td>2</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <th>3</th>\n",
              "      <td>@VirginAmerica it's really aggressive to blast...</td>\n",
              "      <td>0</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <th>4</th>\n",
              "      <td>@VirginAmerica and it's a really big bad thing...</td>\n",
              "      <td>0</td>\n",
              "    </tr>\n",
              "  </tbody>\n",
              "</table>\n",
              "</div>"
            ],
            "text/plain": [
              "                                                text  sentiment\n",
              "0                @VirginAmerica What @dhepburn said.          2\n",
              "1  @VirginAmerica plus you've added commercials t...          1\n",
              "2  @VirginAmerica I didn't today... Must mean I n...          2\n",
              "3  @VirginAmerica it's really aggressive to blast...          0\n",
              "4  @VirginAmerica and it's a really big bad thing...          0"
            ]
          },
          "metadata": {
            "tags": []
          },
          "execution_count": 32
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "xCbwJ7oPhXc8",
        "colab_type": "code",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 35
        },
        "outputId": "66ae29f3-25af-4eba-94d6-f721e6b4d39d"
      },
      "source": [
        "type(df['sentiment'])"
      ],
      "execution_count": 33,
      "outputs": [
        {
          "output_type": "execute_result",
          "data": {
            "text/plain": [
              "pandas.core.series.Series"
            ]
          },
          "metadata": {
            "tags": []
          },
          "execution_count": 33
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "DEpAtAXahv5K",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "def preprocess_tweet(tweet):\n",
        "\t#Preprocess the text in a single tweet\n",
        "\t#arguments: tweet = a single tweet in form of string \n",
        "\t#convert the tweet to lower case\n",
        "\ttweet.lower()\n",
        "\t#convert all urls to sting \"URL\"\n",
        "\ttweet = re.sub('((www\\.[^\\s]+)|(https?://[^\\s]+))','URL',tweet)\n",
        "\t#convert all @username to \"at_user\"\n",
        "  #tweet = re.sub('@[^\\s]+','AT_USER', tweet)\n",
        "\t#correct all multiple white spaces to a single white space\n",
        "\ttweet = re.sub('[\\s]+', ' ', tweet)\n",
        "\t#convert \"#topic\" to just \"topic\"\n",
        "\ttweet = re.sub(r'#([^\\s]+)', r'\\1', tweet)\n",
        "\treturn tweet"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "WgVrSOyriWft",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "df['text'] = df['text'].apply(preprocess_tweet)"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "ulp2gsOkiutk",
        "colab_type": "code",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 194
        },
        "outputId": "45b3fb8c-4346-48bd-a1cf-09fed2faa0d4"
      },
      "source": [
        "df.head()"
      ],
      "execution_count": 36,
      "outputs": [
        {
          "output_type": "execute_result",
          "data": {
            "text/html": [
              "<div>\n",
              "<style scoped>\n",
              "    .dataframe tbody tr th:only-of-type {\n",
              "        vertical-align: middle;\n",
              "    }\n",
              "\n",
              "    .dataframe tbody tr th {\n",
              "        vertical-align: top;\n",
              "    }\n",
              "\n",
              "    .dataframe thead th {\n",
              "        text-align: right;\n",
              "    }\n",
              "</style>\n",
              "<table border=\"1\" class=\"dataframe\">\n",
              "  <thead>\n",
              "    <tr style=\"text-align: right;\">\n",
              "      <th></th>\n",
              "      <th>text</th>\n",
              "      <th>sentiment</th>\n",
              "    </tr>\n",
              "  </thead>\n",
              "  <tbody>\n",
              "    <tr>\n",
              "      <th>0</th>\n",
              "      <td>@VirginAmerica What @dhepburn said.</td>\n",
              "      <td>2</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <th>1</th>\n",
              "      <td>@VirginAmerica plus you've added commercials t...</td>\n",
              "      <td>1</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <th>2</th>\n",
              "      <td>@VirginAmerica I didn't today... Must mean I n...</td>\n",
              "      <td>2</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <th>3</th>\n",
              "      <td>@VirginAmerica it's really aggressive to blast...</td>\n",
              "      <td>0</td>\n",
              "    </tr>\n",
              "    <tr>\n",
              "      <th>4</th>\n",
              "      <td>@VirginAmerica and it's a really big bad thing...</td>\n",
              "      <td>0</td>\n",
              "    </tr>\n",
              "  </tbody>\n",
              "</table>\n",
              "</div>"
            ],
            "text/plain": [
              "                                                text  sentiment\n",
              "0                @VirginAmerica What @dhepburn said.          2\n",
              "1  @VirginAmerica plus you've added commercials t...          1\n",
              "2  @VirginAmerica I didn't today... Must mean I n...          2\n",
              "3  @VirginAmerica it's really aggressive to blast...          0\n",
              "4  @VirginAmerica and it's a really big bad thing...          0"
            ]
          },
          "metadata": {
            "tags": []
          },
          "execution_count": 36
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "mIogWlqHju1k",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "sentences = df.text.values\n"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "RSyczTMDj4Jr",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "# We need to add special tokens at the beginning and end of each sentence for BERT to work properly\n",
        "sentences = [\"[CLS] \" + sentence + \" [SEP]\" for sentence in sentences]"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "XVx_Od8dj66r",
        "colab_type": "code",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 52
        },
        "outputId": "b5e32de5-b2df-4afd-eb20-47bdc2e0cce1"
      },
      "source": [
        "tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True)\n",
        "\n",
        "tokenized_texts = [tokenizer.tokenize(sent) for sent in sentences]\n",
        "print (\"Tokenize the first sentence:\")\n",
        "print (tokenized_texts[0])"
      ],
      "execution_count": 39,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "Tokenize the first sentence:\n",
            "['[CLS]', '@', 'virgin', '##ame', '##rica', 'what', '@', 'dh', '##ep', '##burn', 'said', '.', '[SEP]']\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "IcP2S3IakyeD",
        "colab_type": "code",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 35
        },
        "outputId": "c8c8790a-1ad4-4fb0-d07d-79fda8a7fc0e"
      },
      "source": [
        "#loading the labels value\n",
        "labels = df.sentiment.values\n",
        "type(labels)\n",
        "type(labels[0])"
      ],
      "execution_count": 42,
      "outputs": [
        {
          "output_type": "execute_result",
          "data": {
            "text/plain": [
              "numpy.int64"
            ]
          },
          "metadata": {
            "tags": []
          },
          "execution_count": 42
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "6g1AFO8Lk8C7",
        "colab_type": "code",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 35
        },
        "outputId": "daf9f6e4-ce72-469a-f028-745e7eeeec59"
      },
      "source": [
        "#finding maximum length of a tweet\n",
        "print(df.text.map(lambda x: len(x)).max())"
      ],
      "execution_count": 44,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "186\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "dNZ0HCBRlGWk",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "# Set the maximum sequence length.\n",
        "#The longest sequence in our training set its 186\n",
        "# We will set it to 256\n",
        "\n",
        "MAX_LEN = 256"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "q4P3cAOqlW_U",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "# Pad our input tokens\n",
        "input_ids = pad_sequences([tokenizer.convert_tokens_to_ids(txt) for txt in tokenized_texts],\n",
        "                          maxlen=MAX_LEN, dtype=\"long\", truncating=\"post\", padding=\"post\")"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "F8jFiXqklZ6t",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "# Use the BERT tokenizer to convert the tokens to their index numbers in the BERT vocabulary\n",
        "input_ids = [tokenizer.convert_tokens_to_ids(x) for x in tokenized_texts]"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "XqD9EbPylc0t",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "#Pad the sequences\n",
        "input_ids = pad_sequences(input_ids, maxlen=MAX_LEN, dtype=\"long\", truncating=\"post\", padding=\"post\")"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "tUP5a0KMlgDb",
        "colab_type": "code",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 532
        },
        "outputId": "0a1942fc-2e49-4b48-d8da-20ae7fe5df79"
      },
      "source": [
        "#Printing to see how the second tweet after preprocessing looks.\n",
        "input_ids[1]"
      ],
      "execution_count": 49,
      "outputs": [
        {
          "output_type": "execute_result",
          "data": {
            "text/plain": [
              "array([  101,  1030,  6261, 14074, 14735,  4606,  2017,  1005,  2310,\n",
              "        2794, 12698,  2000,  1996,  3325,  1012,  1012,  1012, 26997,\n",
              "        2100,  1012,   102,     0,     0,     0,     0,     0,     0,\n",
              "           0,     0,     0,     0,     0,     0,     0,     0,     0,\n",
              "           0,     0,     0,     0,     0,     0,     0,     0,     0,\n",
              "           0,     0,     0,     0,     0,     0,     0,     0,     0,\n",
              "           0,     0,     0,     0,     0,     0,     0,     0,     0,\n",
              "           0,     0,     0,     0,     0,     0,     0,     0,     0,\n",
              "           0,     0,     0,     0,     0,     0,     0,     0,     0,\n",
              "           0,     0,     0,     0,     0,     0,     0,     0,     0,\n",
              "           0,     0,     0,     0,     0,     0,     0,     0,     0,\n",
              "           0,     0,     0,     0,     0,     0,     0,     0,     0,\n",
              "           0,     0,     0,     0,     0,     0,     0,     0,     0,\n",
              "           0,     0,     0,     0,     0,     0,     0,     0,     0,\n",
              "           0,     0,     0,     0,     0,     0,     0,     0,     0,\n",
              "           0,     0,     0,     0,     0,     0,     0,     0,     0,\n",
              "           0,     0,     0,     0,     0,     0,     0,     0,     0,\n",
              "           0,     0,     0,     0,     0,     0,     0,     0,     0,\n",
              "           0,     0,     0,     0,     0,     0,     0,     0,     0,\n",
              "           0,     0,     0,     0,     0,     0,     0,     0,     0,\n",
              "           0,     0,     0,     0,     0,     0,     0,     0,     0,\n",
              "           0,     0,     0,     0,     0,     0,     0,     0,     0,\n",
              "           0,     0,     0,     0,     0,     0,     0,     0,     0,\n",
              "           0,     0,     0,     0,     0,     0,     0,     0,     0,\n",
              "           0,     0,     0,     0,     0,     0,     0,     0,     0,\n",
              "           0,     0,     0,     0,     0,     0,     0,     0,     0,\n",
              "           0,     0,     0,     0,     0,     0,     0,     0,     0,\n",
              "           0,     0,     0,     0,     0,     0,     0,     0,     0,\n",
              "           0,     0,     0,     0])"
            ]
          },
          "metadata": {
            "tags": []
          },
          "execution_count": 49
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "BlyziILglj4E",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "# Create attention masks\n",
        "attention_masks = []\n",
        "\n",
        "# Create a mask of 1s for each token followed by 0s for padding\n",
        "for seq in input_ids:\n",
        "  seq_mask = [float(i>0) for i in seq]\n",
        "  attention_masks.append(seq_mask)"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "MpNkRzhHlpgT",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "# Use train_test_split to split our data into train and validation sets for training\n",
        "\n",
        "train_inputs, validation_inputs, train_labels, validation_labels = train_test_split(input_ids, labels, \n",
        "                                                            random_state=2018, test_size=0.1)\n",
        "train_masks, validation_masks, _, _ = train_test_split(attention_masks, input_ids,\n",
        "                                             random_state=2018, test_size=0.1)"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "WG4aafbbluy8",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "# Convert all of our data into torch tensors, the required datatype for our model\n",
        "\n",
        "train_inputs = torch.tensor(train_inputs)\n",
        "validation_inputs = torch.tensor(validation_inputs)\n",
        "train_labels = torch.tensor(train_labels)\n",
        "validation_labels = torch.tensor(validation_labels)\n",
        "train_masks = torch.tensor(train_masks)\n",
        "validation_masks = torch.tensor(validation_masks)"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "vrodXlS2mriu",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "# Select a batch size for training. For fine-tuning BERT on a specific task\n",
        "# A batch size of 16 or 32 is preferred \n",
        "# If Cuda goes out of memory try lowering the batch_size.\n",
        "batch_size = 16\n",
        "\n",
        "# Create an iterator of our data with torch DataLoader. This helps save on memory during training because, unlike a for loop, \n",
        "# with an iterator the entire dataset does not need to be loaded into memory\n",
        "\n",
        "train_data = TensorDataset(train_inputs, train_masks, train_labels)\n",
        "train_sampler = RandomSampler(train_data)\n",
        "train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=batch_size)\n",
        "\n",
        "validation_data = TensorDataset(validation_inputs, validation_masks, validation_labels)\n",
        "validation_sampler = SequentialSampler(validation_data)\n",
        "validation_dataloader = DataLoader(validation_data, sampler=validation_sampler, batch_size=batch_size)\n"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "unEsI7LQnd86",
        "colab_type": "code",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 1000
        },
        "outputId": "086c4bba-7ef4-438f-9e40-8495d3891c96"
      },
      "source": [
        "# Load BertForSequenceClassification, the pretrained BERT model with a single linear classification layer on top. \n",
        "# We make the num_label = 3 positive,negative,neutral\n",
        "model = BertForSequenceClassification.from_pretrained(\"bert-base-uncased\", num_labels=3)\n",
        "model.cuda()"
      ],
      "execution_count": 78,
      "outputs": [
        {
          "output_type": "execute_result",
          "data": {
            "text/plain": [
              "BertForSequenceClassification(\n",
              "  (bert): BertModel(\n",
              "    (embeddings): BertEmbeddings(\n",
              "      (word_embeddings): Embedding(30522, 768, padding_idx=0)\n",
              "      (position_embeddings): Embedding(512, 768)\n",
              "      (token_type_embeddings): Embedding(2, 768)\n",
              "      (LayerNorm): BertLayerNorm()\n",
              "      (dropout): Dropout(p=0.1)\n",
              "    )\n",
              "    (encoder): BertEncoder(\n",
              "      (layer): ModuleList(\n",
              "        (0): BertLayer(\n",
              "          (attention): BertAttention(\n",
              "            (self): BertSelfAttention(\n",
              "              (query): Linear(in_features=768, out_features=768, bias=True)\n",
              "              (key): Linear(in_features=768, out_features=768, bias=True)\n",
              "              (value): Linear(in_features=768, out_features=768, bias=True)\n",
              "              (dropout): Dropout(p=0.1)\n",
              "            )\n",
              "            (output): BertSelfOutput(\n",
              "              (dense): Linear(in_features=768, out_features=768, bias=True)\n",
              "              (LayerNorm): BertLayerNorm()\n",
              "              (dropout): Dropout(p=0.1)\n",
              "            )\n",
              "          )\n",
              "          (intermediate): BertIntermediate(\n",
              "            (dense): Linear(in_features=768, out_features=3072, bias=True)\n",
              "          )\n",
              "          (output): BertOutput(\n",
              "            (dense): Linear(in_features=3072, out_features=768, bias=True)\n",
              "            (LayerNorm): BertLayerNorm()\n",
              "            (dropout): Dropout(p=0.1)\n",
              "          )\n",
              "        )\n",
              "        (1): BertLayer(\n",
              "          (attention): BertAttention(\n",
              "            (self): BertSelfAttention(\n",
              "              (query): Linear(in_features=768, out_features=768, bias=True)\n",
              "              (key): Linear(in_features=768, out_features=768, bias=True)\n",
              "              (value): Linear(in_features=768, out_features=768, bias=True)\n",
              "              (dropout): Dropout(p=0.1)\n",
              "            )\n",
              "            (output): BertSelfOutput(\n",
              "              (dense): Linear(in_features=768, out_features=768, bias=True)\n",
              "              (LayerNorm): BertLayerNorm()\n",
              "              (dropout): Dropout(p=0.1)\n",
              "            )\n",
              "          )\n",
              "          (intermediate): BertIntermediate(\n",
              "            (dense): Linear(in_features=768, out_features=3072, bias=True)\n",
              "          )\n",
              "          (output): BertOutput(\n",
              "            (dense): Linear(in_features=3072, out_features=768, bias=True)\n",
              "            (LayerNorm): BertLayerNorm()\n",
              "            (dropout): Dropout(p=0.1)\n",
              "          )\n",
              "        )\n",
              "        (2): BertLayer(\n",
              "          (attention): BertAttention(\n",
              "            (self): BertSelfAttention(\n",
              "              (query): Linear(in_features=768, out_features=768, bias=True)\n",
              "              (key): Linear(in_features=768, out_features=768, bias=True)\n",
              "              (value): Linear(in_features=768, out_features=768, bias=True)\n",
              "              (dropout): Dropout(p=0.1)\n",
              "            )\n",
              "            (output): BertSelfOutput(\n",
              "              (dense): Linear(in_features=768, out_features=768, bias=True)\n",
              "              (LayerNorm): BertLayerNorm()\n",
              "              (dropout): Dropout(p=0.1)\n",
              "            )\n",
              "          )\n",
              "          (intermediate): BertIntermediate(\n",
              "            (dense): Linear(in_features=768, out_features=3072, bias=True)\n",
              "          )\n",
              "          (output): BertOutput(\n",
              "            (dense): Linear(in_features=3072, out_features=768, bias=True)\n",
              "            (LayerNorm): BertLayerNorm()\n",
              "            (dropout): Dropout(p=0.1)\n",
              "          )\n",
              "        )\n",
              "        (3): BertLayer(\n",
              "          (attention): BertAttention(\n",
              "            (self): BertSelfAttention(\n",
              "              (query): Linear(in_features=768, out_features=768, bias=True)\n",
              "              (key): Linear(in_features=768, out_features=768, bias=True)\n",
              "              (value): Linear(in_features=768, out_features=768, bias=True)\n",
              "              (dropout): Dropout(p=0.1)\n",
              "            )\n",
              "            (output): BertSelfOutput(\n",
              "              (dense): Linear(in_features=768, out_features=768, bias=True)\n",
              "              (LayerNorm): BertLayerNorm()\n",
              "              (dropout): Dropout(p=0.1)\n",
              "            )\n",
              "          )\n",
              "          (intermediate): BertIntermediate(\n",
              "            (dense): Linear(in_features=768, out_features=3072, bias=True)\n",
              "          )\n",
              "          (output): BertOutput(\n",
              "            (dense): Linear(in_features=3072, out_features=768, bias=True)\n",
              "            (LayerNorm): BertLayerNorm()\n",
              "            (dropout): Dropout(p=0.1)\n",
              "          )\n",
              "        )\n",
              "        (4): BertLayer(\n",
              "          (attention): BertAttention(\n",
              "            (self): BertSelfAttention(\n",
              "              (query): Linear(in_features=768, out_features=768, bias=True)\n",
              "              (key): Linear(in_features=768, out_features=768, bias=True)\n",
              "              (value): Linear(in_features=768, out_features=768, bias=True)\n",
              "              (dropout): Dropout(p=0.1)\n",
              "            )\n",
              "            (output): BertSelfOutput(\n",
              "              (dense): Linear(in_features=768, out_features=768, bias=True)\n",
              "              (LayerNorm): BertLayerNorm()\n",
              "              (dropout): Dropout(p=0.1)\n",
              "            )\n",
              "          )\n",
              "          (intermediate): BertIntermediate(\n",
              "            (dense): Linear(in_features=768, out_features=3072, bias=True)\n",
              "          )\n",
              "          (output): BertOutput(\n",
              "            (dense): Linear(in_features=3072, out_features=768, bias=True)\n",
              "            (LayerNorm): BertLayerNorm()\n",
              "            (dropout): Dropout(p=0.1)\n",
              "          )\n",
              "        )\n",
              "        (5): BertLayer(\n",
              "          (attention): BertAttention(\n",
              "            (self): BertSelfAttention(\n",
              "              (query): Linear(in_features=768, out_features=768, bias=True)\n",
              "              (key): Linear(in_features=768, out_features=768, bias=True)\n",
              "              (value): Linear(in_features=768, out_features=768, bias=True)\n",
              "              (dropout): Dropout(p=0.1)\n",
              "            )\n",
              "            (output): BertSelfOutput(\n",
              "              (dense): Linear(in_features=768, out_features=768, bias=True)\n",
              "              (LayerNorm): BertLayerNorm()\n",
              "              (dropout): Dropout(p=0.1)\n",
              "            )\n",
              "          )\n",
              "          (intermediate): BertIntermediate(\n",
              "            (dense): Linear(in_features=768, out_features=3072, bias=True)\n",
              "          )\n",
              "          (output): BertOutput(\n",
              "            (dense): Linear(in_features=3072, out_features=768, bias=True)\n",
              "            (LayerNorm): BertLayerNorm()\n",
              "            (dropout): Dropout(p=0.1)\n",
              "          )\n",
              "        )\n",
              "        (6): BertLayer(\n",
              "          (attention): BertAttention(\n",
              "            (self): BertSelfAttention(\n",
              "              (query): Linear(in_features=768, out_features=768, bias=True)\n",
              "              (key): Linear(in_features=768, out_features=768, bias=True)\n",
              "              (value): Linear(in_features=768, out_features=768, bias=True)\n",
              "              (dropout): Dropout(p=0.1)\n",
              "            )\n",
              "            (output): BertSelfOutput(\n",
              "              (dense): Linear(in_features=768, out_features=768, bias=True)\n",
              "              (LayerNorm): BertLayerNorm()\n",
              "              (dropout): Dropout(p=0.1)\n",
              "            )\n",
              "          )\n",
              "          (intermediate): BertIntermediate(\n",
              "            (dense): Linear(in_features=768, out_features=3072, bias=True)\n",
              "          )\n",
              "          (output): BertOutput(\n",
              "            (dense): Linear(in_features=3072, out_features=768, bias=True)\n",
              "            (LayerNorm): BertLayerNorm()\n",
              "            (dropout): Dropout(p=0.1)\n",
              "          )\n",
              "        )\n",
              "        (7): BertLayer(\n",
              "          (attention): BertAttention(\n",
              "            (self): BertSelfAttention(\n",
              "              (query): Linear(in_features=768, out_features=768, bias=True)\n",
              "              (key): Linear(in_features=768, out_features=768, bias=True)\n",
              "              (value): Linear(in_features=768, out_features=768, bias=True)\n",
              "              (dropout): Dropout(p=0.1)\n",
              "            )\n",
              "            (output): BertSelfOutput(\n",
              "              (dense): Linear(in_features=768, out_features=768, bias=True)\n",
              "              (LayerNorm): BertLayerNorm()\n",
              "              (dropout): Dropout(p=0.1)\n",
              "            )\n",
              "          )\n",
              "          (intermediate): BertIntermediate(\n",
              "            (dense): Linear(in_features=768, out_features=3072, bias=True)\n",
              "          )\n",
              "          (output): BertOutput(\n",
              "            (dense): Linear(in_features=3072, out_features=768, bias=True)\n",
              "            (LayerNorm): BertLayerNorm()\n",
              "            (dropout): Dropout(p=0.1)\n",
              "          )\n",
              "        )\n",
              "        (8): BertLayer(\n",
              "          (attention): BertAttention(\n",
              "            (self): BertSelfAttention(\n",
              "              (query): Linear(in_features=768, out_features=768, bias=True)\n",
              "              (key): Linear(in_features=768, out_features=768, bias=True)\n",
              "              (value): Linear(in_features=768, out_features=768, bias=True)\n",
              "              (dropout): Dropout(p=0.1)\n",
              "            )\n",
              "            (output): BertSelfOutput(\n",
              "              (dense): Linear(in_features=768, out_features=768, bias=True)\n",
              "              (LayerNorm): BertLayerNorm()\n",
              "              (dropout): Dropout(p=0.1)\n",
              "            )\n",
              "          )\n",
              "          (intermediate): BertIntermediate(\n",
              "            (dense): Linear(in_features=768, out_features=3072, bias=True)\n",
              "          )\n",
              "          (output): BertOutput(\n",
              "            (dense): Linear(in_features=3072, out_features=768, bias=True)\n",
              "            (LayerNorm): BertLayerNorm()\n",
              "            (dropout): Dropout(p=0.1)\n",
              "          )\n",
              "        )\n",
              "        (9): BertLayer(\n",
              "          (attention): BertAttention(\n",
              "            (self): BertSelfAttention(\n",
              "              (query): Linear(in_features=768, out_features=768, bias=True)\n",
              "              (key): Linear(in_features=768, out_features=768, bias=True)\n",
              "              (value): Linear(in_features=768, out_features=768, bias=True)\n",
              "              (dropout): Dropout(p=0.1)\n",
              "            )\n",
              "            (output): BertSelfOutput(\n",
              "              (dense): Linear(in_features=768, out_features=768, bias=True)\n",
              "              (LayerNorm): BertLayerNorm()\n",
              "              (dropout): Dropout(p=0.1)\n",
              "            )\n",
              "          )\n",
              "          (intermediate): BertIntermediate(\n",
              "            (dense): Linear(in_features=768, out_features=3072, bias=True)\n",
              "          )\n",
              "          (output): BertOutput(\n",
              "            (dense): Linear(in_features=3072, out_features=768, bias=True)\n",
              "            (LayerNorm): BertLayerNorm()\n",
              "            (dropout): Dropout(p=0.1)\n",
              "          )\n",
              "        )\n",
              "        (10): BertLayer(\n",
              "          (attention): BertAttention(\n",
              "            (self): BertSelfAttention(\n",
              "              (query): Linear(in_features=768, out_features=768, bias=True)\n",
              "              (key): Linear(in_features=768, out_features=768, bias=True)\n",
              "              (value): Linear(in_features=768, out_features=768, bias=True)\n",
              "              (dropout): Dropout(p=0.1)\n",
              "            )\n",
              "            (output): BertSelfOutput(\n",
              "              (dense): Linear(in_features=768, out_features=768, bias=True)\n",
              "              (LayerNorm): BertLayerNorm()\n",
              "              (dropout): Dropout(p=0.1)\n",
              "            )\n",
              "          )\n",
              "          (intermediate): BertIntermediate(\n",
              "            (dense): Linear(in_features=768, out_features=3072, bias=True)\n",
              "          )\n",
              "          (output): BertOutput(\n",
              "            (dense): Linear(in_features=3072, out_features=768, bias=True)\n",
              "            (LayerNorm): BertLayerNorm()\n",
              "            (dropout): Dropout(p=0.1)\n",
              "          )\n",
              "        )\n",
              "        (11): BertLayer(\n",
              "          (attention): BertAttention(\n",
              "            (self): BertSelfAttention(\n",
              "              (query): Linear(in_features=768, out_features=768, bias=True)\n",
              "              (key): Linear(in_features=768, out_features=768, bias=True)\n",
              "              (value): Linear(in_features=768, out_features=768, bias=True)\n",
              "              (dropout): Dropout(p=0.1)\n",
              "            )\n",
              "            (output): BertSelfOutput(\n",
              "              (dense): Linear(in_features=768, out_features=768, bias=True)\n",
              "              (LayerNorm): BertLayerNorm()\n",
              "              (dropout): Dropout(p=0.1)\n",
              "            )\n",
              "          )\n",
              "          (intermediate): BertIntermediate(\n",
              "            (dense): Linear(in_features=768, out_features=3072, bias=True)\n",
              "          )\n",
              "          (output): BertOutput(\n",
              "            (dense): Linear(in_features=3072, out_features=768, bias=True)\n",
              "            (LayerNorm): BertLayerNorm()\n",
              "            (dropout): Dropout(p=0.1)\n",
              "          )\n",
              "        )\n",
              "      )\n",
              "    )\n",
              "    (pooler): BertPooler(\n",
              "      (dense): Linear(in_features=768, out_features=768, bias=True)\n",
              "      (activation): Tanh()\n",
              "    )\n",
              "  )\n",
              "  (dropout): Dropout(p=0.1)\n",
              "  (classifier): Linear(in_features=768, out_features=3, bias=True)\n",
              ")"
            ]
          },
          "metadata": {
            "tags": []
          },
          "execution_count": 78
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "jEHLtl2noba3",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "# Set different weight decays for different layers of the model.\n",
        "\n",
        "param_optimizer = list(model.named_parameters())\n",
        "no_decay = ['bias', 'gamma', 'beta']\n",
        "optimizer_grouped_parameters = [\n",
        "    {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],\n",
        "     'weight_decay_rate': 0.01},\n",
        "    {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)],\n",
        "     'weight_decay_rate': 0.0}\n",
        "]"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "QR8YuLpJq84g",
        "colab_type": "code",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 35
        },
        "outputId": "9eb7a0fc-17cf-4561-d4d8-00aa1afa43b3"
      },
      "source": [
        "# This variable contains all of the hyperparemeter information our training loop needs\n",
        "optimizer = BertAdam(optimizer_grouped_parameters,\n",
        "                     lr=3e-5,\n",
        "                     warmup=.1)"
      ],
      "execution_count": 80,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "W0905 20:30:25.947302 140619630413696 optimization.py:46] t_total value of -1 results in schedule not being applied\n"
          ],
          "name": "stderr"
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "GN53abwKrGb2",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "# Function to calculate the accuracy of our predictions vs labels\n",
        "def flat_accuracy(preds, labels):\n",
        "    pred_flat = np.argmax(preds, axis=1).flatten()\n",
        "    labels_flat = labels.flatten()\n",
        "    return np.sum(pred_flat == labels_flat) / len(labels_flat)"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "0gZfU4YXtXSU",
        "colab_type": "code",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 195
        },
        "outputId": "366366e8-f577-4cb7-8f67-b597800c55c7"
      },
      "source": [
        "# Store our loss and accuracy for plotting\n",
        "train_loss_set = []\n",
        "\n",
        "# Number of training epochs (authors recommend between 2 and 4)\n",
        "epochs = 10\n",
        "\n",
        "# trange is a tqdm wrapper around the normal python range\n",
        "for _ in trange(epochs, desc=\"Epoch\"):\n",
        "  \n",
        "  \n",
        "  # Training\n",
        "  \n",
        "  # Set our model to training mode (as opposed to evaluation mode)\n",
        "  model.train()\n",
        "  \n",
        "  # Tracking variables\n",
        "  tr_loss = 0\n",
        "  nb_tr_examples, nb_tr_steps = 0, 0\n",
        "  \n",
        "  # Train the data for one epoch\n",
        "  for step, batch in enumerate(train_dataloader):\n",
        "    # Add batch to GPU\n",
        "    batch = tuple(t.to(device) for t in batch)\n",
        "    # Unpack the inputs from our dataloader\n",
        "    b_input_ids, b_input_mask, b_labels = batch\n",
        "    # Clear out the gradients (by default they accumulate)\n",
        "    optimizer.zero_grad()\n",
        "    # Forward pass\n",
        "    loss = model(b_input_ids, token_type_ids=None, attention_mask=b_input_mask, labels=b_labels)\n",
        "    train_loss_set.append(loss.item())    \n",
        "    # Backward pass\n",
        "    loss.backward()\n",
        "    # Update parameters and take a step using the computed gradient\n",
        "    optimizer.step()\n",
        "    \n",
        "    \n",
        "    # Update tracking variables\n",
        "    tr_loss += loss.item()\n",
        "    nb_tr_examples += b_input_ids.size(0)\n",
        "    nb_tr_steps += 1\n",
        "\n",
        "  print(\"Train loss: {}\".format(tr_loss/nb_tr_steps))\n",
        "    \n",
        "    \n",
        "  # Validation\n",
        "\n",
        "  # Put model in evaluation mode to evaluate loss on the validation set\n",
        "  model.eval()\n",
        "\n",
        "  # Tracking variables \n",
        "  eval_loss, eval_accuracy = 0, 0\n",
        "  nb_eval_steps, nb_eval_examples = 0, 0\n",
        "\n",
        "  # Evaluate data for one epoch\n",
        "  for batch in validation_dataloader:\n",
        "    # Add batch to GPU\n",
        "    batch = tuple(t.to(device) for t in batch)\n",
        "    # Unpack the inputs from our dataloader\n",
        "    b_input_ids, b_input_mask, b_labels = batch\n",
        "    # Telling the model not to compute or store gradients, saving memory and speeding up validation\n",
        "    with torch.no_grad():\n",
        "      # Forward pass, calculate logit predictions\n",
        "      logits = model(b_input_ids, token_type_ids=None, attention_mask=b_input_mask)\n",
        "    \n",
        "    # Move logits and labels to CPU\n",
        "    logits = logits.detach().cpu().numpy()\n",
        "    label_ids = b_labels.to('cpu').numpy()\n",
        "\n",
        "    tmp_eval_accuracy = flat_accuracy(logits, label_ids)\n",
        "    \n",
        "    eval_accuracy += tmp_eval_accuracy\n",
        "    nb_eval_steps += 1\n",
        "\n",
        "  print(\"Validation Accuracy: {}\".format(eval_accuracy/nb_eval_steps))"
      ],
      "execution_count": 0,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "\n",
            "Epoch:   0%|          | 0/10 [00:00<?, ?it/s]\u001b[A"
          ],
          "name": "stderr"
        },
        {
          "output_type": "stream",
          "text": [
            "Train loss: 0.4968884838564159\n"
          ],
          "name": "stdout"
        },
        {
          "output_type": "stream",
          "text": [
            "\n",
            "Epoch:  10%|█         | 1/10 [12:16<1:50:30, 736.67s/it]\u001b[A"
          ],
          "name": "stderr"
        },
        {
          "output_type": "stream",
          "text": [
            "Validation Accuracy: 0.8539402173913043\n",
            "Train loss: 0.26912240365303114\n"
          ],
          "name": "stdout"
        },
        {
          "output_type": "stream",
          "text": [
            "\n",
            "Epoch:  20%|██        | 2/10 [24:40<1:38:29, 738.74s/it]\u001b[A"
          ],
          "name": "stderr"
        },
        {
          "output_type": "stream",
          "text": [
            "Validation Accuracy: 0.8430706521739131\n",
            "Train loss: 0.14118304594591694\n"
          ],
          "name": "stdout"
        },
        {
          "output_type": "stream",
          "text": [
            "\n",
            "Epoch:  30%|███       | 3/10 [37:01<1:26:17, 739.59s/it]\u001b[A"
          ],
          "name": "stderr"
        },
        {
          "output_type": "stream",
          "text": [
            "Validation Accuracy: 0.8498641304347826\n"
          ],
          "name": "stdout"
        }
      ]
    }
  ]
}