{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Data Source -- https://www.kaggle.com/columbine/imdb-dataset-sentiment-analysis-in-csv-format"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#Listing 7.1 – Import packages for RNN\n",
    "\n",
    "import numpy as np # linear algebra\n",
    "import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\n",
    "import torch\n",
    "from torch import nn,optim\n",
    "import torchtext\n",
    "from torchtext import data\n",
    "\n",
    "#Check if we have GPU enabled\n",
    "if torch.cuda.is_available():\n",
    "    device = \"cuda\"\n",
    "else:\n",
    "    device = \"cpu\"\n",
    "print(\"Device =\",device)\n",
    "\n",
    "input_data_path = \"/kaggle/input/imdb-dataset-sentiment-analysis-in-csv-format/\"\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#Listing 7.2 – Read data into memory\n",
    "\n",
    "#Read the csv dataset using pandas\n",
    "df = pd.read_csv(\"/input/imdb-dataset-sentiment-analysis-in-csv-format/Train.csv\")\n",
    "print(\"DF.shape :\\n\",df.shape)\n",
    "print(\"df.label = \",df.label.value_counts())\n",
    "df.head()\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#Listing 7.3 – Define tokemizer, fields and dataset for train and val\n",
    "\n",
    "#Define a custom tokenizer\n",
    "my_tokenizer  = lambda x:str(x).split()\n",
    "\n",
    "#Define fields for our input dataset\n",
    "TEXT = data.Field(sequential=True, lower= True,tokenize = my_tokenizer,use_vocab=True)\n",
    "LABEL  = data.Field(sequential = False,use_vocab = False)\n",
    "\n",
    "\n",
    "#Define inut fields as a list of tuples of fields\n",
    "trainval_fields = [(\"text\",TEXT),(\"label\",LABEL)]\n",
    "\n",
    "#Contruct dataset\n",
    "train_data, val_data = data.TabularDataset.splits(path = input_data_path\n",
    ", train = \"Train.csv\", validation = \"Valid.csv\", format = \"csv\"\n",
    ", skip_header = True, fields = trainval_fields)\n",
    "\n",
    "#Build vocabulary\n",
    "MAX_VOCAB_SIZE = 25000\n",
    "TEXT.build_vocab(train_data, max_size = MAX_VOCAB_SIZE)\n",
    "\n",
    "\n",
    "#Define iterators for  train and validation\n",
    "train_iterator  = data.BucketIterator(train_data, device = device\n",
    ", batch_size = 32\n",
    ", sort_key = lambda x:len(x.text)\n",
    ",sort_within_batch = False\n",
    ",repeat = False)\n",
    "\n",
    "val_iterator = data.BucketIterator(val_data, device = device, batch_size= 32\n",
    ", sort_key = lambda x:len(x.text)\n",
    ", sort_within_batch = False\n",
    ", repeat = False)\n",
    "\n",
    "print(TEXT.vocab.freqs.most_common()[:10])\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#Listing 7.4 – Define RNN class\n",
    "\n",
    "class RNNModel(nn.Module):\n",
    "\n",
    "    def __init__(self,embedding_dim,input_dim,hidden_dim,output_dim):\n",
    "        super().__init__()\n",
    "        self.Embedding  = nn.Embedding(input_dim,embedding_dim)\n",
    "        self.rnn  = nn.RNN(embedding_dim,hidden_dim)\n",
    "        self.fc  = nn.Linear(hidden_dim,output_dim)        \n",
    "\n",
    "    def forward(self,text):\n",
    "        embed = self.Embedding(text)\n",
    "        output, hidden = self.rnn(embed)\n",
    "        out  = self.fc(hidden.squeeze(0))\n",
    "        return(out)\n",
    "\n",
    "#Define model \n",
    "INPUT_DIM = len(TEXT.vocab)\n",
    "EMBEDDING_DIM = 100\n",
    "HIDDEN_DIM = 256\n",
    "OUTPUT_DIM = 1\n",
    "\n",
    "#Create model instance\n",
    "model = RNNModel(EMBEDDING_DIM, INPUT_DIM,HIDDEN_DIM, OUTPUT_DIM)\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#Listing 7.5 – Define training and evaluation step\n",
    "\n",
    "\n",
    "#Define training step\n",
    "def train(model, data_iterator,optimizer,loss_function):\n",
    "    epoch_loss,epoch_acc,epoch_denom = 0,0,0\n",
    "\n",
    "    model.train()    #Explicitly set model to train mode\n",
    "\n",
    "    for i, batch in enumerate(data_iterator):\n",
    "\n",
    "        optimizer.zero_grad()\n",
    "        predictions = model(batch.text)\n",
    "\n",
    "        loss = loss_function(predictions.reshape(-1,1), batch.label.float().reshape(-1,1))\n",
    "        acc = accuracy(predictions.reshape(-1,1), batch.label.reshape(-1,1))\n",
    "\n",
    "        loss.backward()\n",
    "        optimizer.step()\n",
    "\n",
    "        epoch_loss += loss.item()\n",
    "        epoch_acc += acc.item()\n",
    "        epoch_denom += len(batch)\n",
    "\n",
    "    return epoch_loss/epoch_denom,epoch_acc, epoch_denom\n",
    "\n",
    "#Define evaluation step\n",
    "def evaluate(model, data_iterator,loss_function):\n",
    "    epoch_loss,epoch_acc,epoch_denom = 0,0,0\n",
    "    \n",
    "    model.eval()     #Explcitly set model to eval mode\n",
    "\n",
    "    for i, batch in enumerate(data_iterator):\n",
    "        with torch.no_grad():\n",
    "            predictions = model(batch.text)\n",
    "\n",
    "            loss = loss_function(predictions.reshape(-1,1), batch.label.float().reshape(-1,1))\n",
    "            acc = accuracy(predictions.reshape(-1,1), batch.label.reshape(-1,1))\n",
    "\n",
    "            epoch_loss += loss.item()\n",
    "            epoch_acc += acc.item()\n",
    "            epoch_denom += len(batch)\n",
    "            \n",
    "    return epoch_loss/epoch_denom, epoch_acc, epoch_denom\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#Listing 7.6 – Define accuracy function, loss function, optimizer and instantiate model\n",
    "\n",
    "\n",
    "#Compute binary accuracy\n",
    "def accuracy(preds, y):\n",
    "    rounded_preds = torch.round(torch.sigmoid(preds))\n",
    "\n",
    "    #Count the number of correctly predicted outcomes\t\n",
    "    correct = (rounded_preds == y).float()\n",
    "    acc = correct.sum()\n",
    "\n",
    "    return acc\n",
    "\n",
    "#Define optimizer, loss function \n",
    "optimizer = torch.optim.Adam(model.parameters(), lr=1e-3)\n",
    "criterion = nn.BCEWithLogitsLoss()\n",
    "\n",
    "#Transfer components to GPU, if avaiable.\n",
    "model = model.to(device)\n",
    "criterion = criterion.to(device)\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#Listing 7.7 – Train model for 5 epochs\n",
    "\n",
    "n_epochs = 5\n",
    "\n",
    "for epoch in range(n_epochs):\n",
    "    #Train and evaluate     \n",
    "    train_loss, train_acc,train_num = train(model, train_iterator, optimizer, criterion)\n",
    "    valid_loss, valid_acc,val_num = evaluate(model, val_iterator,criterion)\n",
    "\n",
    "    print(\"Epoch-\",epoch)    \n",
    "\n",
    "    print(f'\\tTrain  Loss: {train_loss: .3f} | Train Predicted Correct : {train_acc} \n",
    "| Train Denom: {train_num} | PercAccuracy: {train_acc/train_num}')\n",
    "    print(f'\\tValid  Loss: {valid_loss: .3f} | Valid Predicted Correct: {valid_acc}\n",
    "| Val Denom: {val_num}| PercAccuracy: {train_acc/train_num}')\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#Listing 7.8 – Import required packages | Fresh exercise\n",
    "\n",
    "\n",
    "import numpy as np # linear algebra\n",
    "import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\n",
    "import torch,torchtext\n",
    "from torch import nn, optim\n",
    "from torch.optim import Adam\n",
    "from torchtext import data\n",
    "\n",
    "if torch.cuda.is_available():\n",
    "    device = \"cuda\"\n",
    "else:\n",
    "    device = \"cpu\"\n",
    "print(\"Device =\",device)\n",
    "\n",
    "input_data_path = \" /input/imdb-dataset-sentiment-analysis-in-csv-format/\"\n",
    "\n",
    "#Define fields for our input dataset\n",
    "TEXT = data.Field(sequential=True, lower= True,tokenize = 'spacy', include_lengths = True)\n",
    "LABEL  = data.Field(sequential = False,use_vocab = False)\n",
    "\n",
    "\n",
    "#Define a list of tuples of fields\n",
    "trainval_fields = [(\"text\",TEXT),(\"label\",LABEL)]\n",
    "\n",
    "#Contruct dataset\n",
    "train_data, val_data = data.TabularDataset.splits(path = input_data_path\n",
    ", train = \"Train.csv\", validation = \"Valid.csv\", format = \"csv\"\n",
    ", skip_header = True, fields = trainval_fields)\n",
    "\n",
    "#Build Vocab using pretrained\n",
    "MAX_VOCAB_SIZE = 25000\n",
    "TEXT.build_vocab(train_data, max_size = MAX_VOCAB_SIZE,   vectors = 'fasttext.simple.300d')\n",
    "BATCH_SIZE = 64\n",
    "\n",
    "train_iterator, val_iterator =  data.BucketIterator.splits(\n",
    "                                                            (train_data, val_data), \n",
    "                                                            batch_size = BATCH_SIZE,\n",
    "                                                            sort_key  = lambda x:len(x.text),\n",
    "                                                            sort_within_batch = True,\n",
    "                                                            device = device)\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#Listing 7.9 – Define RNN (improved) class\n",
    "\n",
    "\n",
    "class ImprovedRNN(nn.Module):\n",
    "    def __init__(self, vocab_size, embedding_dim, hidden_dim, output_dim, n_layers, \n",
    "                 bidirectional, dropout, pad_idx):\n",
    "        \n",
    "        super().__init__()     \n",
    "        self.embedding = nn.Embedding(vocab_size, embedding_dim, padding_idx = pad_idx)\n",
    "        self.lstm = nn.LSTM(embedding_dim, \n",
    "                           hidden_dim, \n",
    "                           num_layers=n_layers, \n",
    "                           bidirectional=bidirectional, \n",
    "                           dropout=dropout)\n",
    "        self.fc = nn.Linear(hidden_dim * 2, output_dim)\n",
    "        self.dropout = nn.Dropout(dropout)\n",
    "        \n",
    "    def forward(self, text, text_lengths):\n",
    "                \n",
    "        embedded = self.dropout(self.embedding(text))\n",
    "                \n",
    "        #pack sequence\n",
    "        packed_embedded = nn.utils.rnn.pack_padded_sequence(embedded, text_lengths)\n",
    "        packed_output, (hidden, cell) = self.lstm(packed_embedded)\n",
    "        \n",
    "        #unpack sequence\n",
    "        output, output_lengths = nn.utils.rnn.pad_packed_sequence(packed_output)\n",
    "        hidden = self.dropout(torch.cat((hidden[-2,:,:], hidden[-1,:,:]), dim = 1))\n",
    "\n",
    "        return self.fc(hidden)\n",
    "\n",
    "\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#Listing 7.10 – Define model properties and copy pretrained weights\n",
    "\n",
    "\n",
    "#Define model input parameters\n",
    "INPUT_DIM = len(TEXT.vocab)\n",
    "EMBEDDING_DIM = 300\n",
    "HIDDEN_DIM = 256\n",
    "OUTPUT_DIM = 1\n",
    "N_LAYERS = 2\n",
    "BIDIRECTIONAL = True\n",
    "DROPOUT = 0.5\n",
    "\n",
    "#Create model instance\n",
    "model = ImprovedRNN(INPUT_DIM, \n",
    "            EMBEDDING_DIM, \n",
    "            HIDDEN_DIM, \n",
    "            OUTPUT_DIM, \n",
    "            N_LAYERS, \n",
    "            BIDIRECTIONAL, \n",
    "            DROPOUT, \n",
    "            PAD_IDX)\n",
    "\n",
    "#Copy pretrained vector weights\n",
    "model.embedding.weight.data.copy_(pretrained_embeddings)\n",
    "\n",
    "#Initialize the embedding with 0 for pad as well as unknown tokens\n",
    "UNK_IDX = TEXT.vocab.stoi[TEXT.unk_token]\n",
    "model.embedding.weight.data[UNK_IDX] = torch.zeros(EMBEDDING_DIM)\n",
    "PAD_IDX = TEXT.vocab.stoi[TEXT.pad_token]\n",
    "model.embedding.weight.data[PAD_IDX] = torch.zeros(EMBEDDING_DIM)\n",
    "\n",
    "print(model.embedding.weight.data)\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "Listing 7.11 – Training improved model\n",
    "#Define train step\n",
    "def train(model, iterator, optimizer, criterion):\n",
    "    \n",
    "    epoch_loss,epoch_acc,epoch_denom = 0,0,0\n",
    "    \n",
    "    model.train()\n",
    "    \n",
    "    for batch in iterator:\n",
    "        \n",
    "        optimizer.zero_grad()        \n",
    "        text, text_lengths = batch.text\n",
    "        predictions = model(text, text_lengths).squeeze(1)        \n",
    "        loss = criterion(predictions.reshape(-1,1), batch.label.float().reshape(-1,1))        \n",
    "        acc = accuracy(predictions, batch.label)\n",
    "        \n",
    "        loss.backward()\n",
    "        \n",
    "        optimizer.step()\n",
    "        \n",
    "        epoch_loss += loss.item()\n",
    "        epoch_acc += acc.item()\n",
    "        epoch_denom += len(batch)\n",
    "        \n",
    "    return epoch_loss/epoch_denom, epoch_acc, epoch_denom\n",
    "\n",
    "#Define evaluate step\n",
    "def evaluate(model, iterator, criterion):\n",
    "    \n",
    "    epoch_loss,epoch_acc,epoch_denom = 0,0,0    \n",
    "    model.eval()\n",
    "    \n",
    "    with torch.no_grad():    \n",
    "        for batch in iterator:\n",
    "            text, text_lengths = batch.text            \n",
    "            predictions = model(text, text_lengths).squeeze(1)            \n",
    "            loss = criterion(predictions, batch.label.float())         \n",
    "            acc = accuracy(predictions, batch.label)\n",
    "            epoch_loss += loss.item()\n",
    "            epoch_acc += acc.item()\n",
    "            epoch_denom += len(batch)           \n",
    "        \n",
    "    return epoch_loss/epoch_denom, epoch_acc, epoch_denom\n",
    "\n",
    "#Define optimizer, loss funciton and load to GPU\n",
    "optimizer = optim.Adam(model.parameters())\n",
    "criterion = nn.BCEWithLogitsLoss()\n",
    "\n",
    "model = model.to(device)\n",
    "criterion = criterion.to(device)\n",
    "\n",
    "#similar to previous exercise, we deifne our accuracy function \n",
    "def accuracy(preds, y):\n",
    "    rounded_preds = torch.round(torch.sigmoid(preds))\n",
    "\n",
    "    correct = (rounded_preds == y).float()\n",
    "    acc = correct.sum()\n",
    "\n",
    "    return acc\n",
    "\n",
    "#Finally lets train our model for 5 epochs\n",
    "N_EPOCHS = 5\n",
    "\n",
    "for epoch in range(N_EPOCHS):\n",
    "\n",
    "    train_loss, train_acc,train_num = train(model, train_iterator, optimizer, criterion)\n",
    "    valid_loss, valid_acc,val_num = evaluate(model, val_iterator, criterion)\n",
    "    print(\"Epoch-\",epoch)\n",
    "    print(f'\\tTrain  Loss: {train_loss: .3f} | Train Predicted Correct : {train_acc} \n",
    "| Train Denom: {train_num} | PercAccuracy: {train_acc/train_num}')\n",
    "    print(f'\\tValid  Loss: {valid_loss: .3f} | Valid Predicted Correct: {valid_acc} \n",
    "| Val Denom: {val_num}| PercAccuracy: {train_acc/train_num}')\n"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
