{
 "cells": [
  {
   "cell_type": "markdown",
   "source": [
    "[2021-09-13 22:36] 1 gram best: 71.59%\n",
    "[2021-09-14 00:25] 2 gram best: 71.52%\n",
    "[2021-09-14 01:03] 1 gram weighted_sample epochs=50 best: 71.72%"
   ],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "source": [
    "from torch.utils.data import (TensorDataset, DataLoader, RandomSampler,\n",
    "                              SequentialSampler, WeightedRandomSampler)\n",
    "import pandas as pd\n",
    "import numpy as np\n",
    "\n",
    "import random\n",
    "import time\n",
    "\n",
    "from common.configs.path import paths\n",
    "from common.configs.tools import label_map\n",
    "\n",
    "from sklearn.model_selection import train_test_split\n",
    "\n",
    "import torch\n",
    "import torch.nn as nn\n",
    "import torch.nn.functional as F\n",
    "import torch.optim as optim\n",
    "from torch.autograd import Variable\n",
    "\n",
    "from torch.utils.tensorboard import SummaryWriter\n",
    "\n",
    "import pickle\n",
    "\n",
    "import json\n",
    "\n",
    "import nltk\n",
    "from nltk import ngrams\n",
    "\n",
    "from gensim.models import Word2Vec\n",
    "from gensim.models.fasttext import FastText\n",
    "\n",
    "import warnings\n",
    "warnings.filterwarnings(\"ignore\")\n",
    "\n",
    "from tqdm import tqdm\n",
    "\n",
    "%matplotlib inline"
   ],
   "outputs": [],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "source": [
    "if torch.cuda.is_available():\n",
    "    print('gpu is available: {}'.format(torch.cuda.get_device_name(0)))\n",
    "    print('device count: {}'.format(torch.cuda.device_count()))\n",
    "\n",
    "device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n",
    "print('Using device:', device)\n",
    "print()\n",
    "\n",
    "def save_model(model, path):\n",
    "    with open(path, 'wb') as f:\n",
    "        pickle.dump(model, f)\n",
    "\n",
    "\n",
    "def load_model(path):\n",
    "    with open(path, 'rb') as f:\n",
    "        model = pickle.load(f)\n",
    "    return model\n",
    "\n",
    "def load_json(path):\n",
    "    with open(path, 'r') as f:\n",
    "        dict_ = json.load(f)\n",
    "    return dict_\n",
    "\n",
    "def save_json(path, dt):\n",
    "    with open(path, 'w') as f:\n",
    "        json.dump(dt, f)\n",
    "    print(path, 'saved.')\n",
    "\n",
    "def load_npy(path):\n",
    "    return np.load(path)\n",
    "\n",
    "def save_text(path, line):\n",
    "    with open(path, 'r') as f:\n",
    "        f.write(line)\n",
    "\n",
    "def train_word_vector(text, gram):\n",
    "    w2v = Word2Vec(sentences=text, size=300, window=5, min_count=1, workers=-1)\n",
    "    ftt = FastText(sentences=text, size=300, window=5, min_count=1, workers=-1)\n",
    "    w2v.save(r'./output/w2v_{}gram.model'.format(gram))\n",
    "    ftt.save(r'./output/ftt_{}gram.model'.format(gram))\n",
    "    return w2v, ftt\n",
    "\n",
    "def comb_vector(w2v_m, ftt_m):\n",
    "    return {w: (np.concatenate((w2v_m.wv[w], ftt_m.wv[w]))).tolist() for w in w2v_m.wv.vocab.keys()}"
   ],
   "outputs": [
    {
     "output_type": "stream",
     "name": "stdout",
     "text": [
      "gpu is available: NVIDIA GeForce GTX 1050 with Max-Q Design\n",
      "device count: 1\n",
      "Using device: cuda\n",
      "\n"
     ]
    }
   ],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "source": [
    "train_df = pd.read_csv(paths['train_data'])\n",
    "test_df = pd.read_csv(paths['test_data'])\n",
    "train_df.label = train_df.label.apply(lambda e: label_map[e])\n",
    "Y = train_df.label.values"
   ],
   "outputs": [],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "source": [
    "# text = pd.concat([train_df['text'], test_df['text']])\n",
    "# text_1_gram = [nltk.word_tokenize(line) for line in text.tolist()]\n",
    "# text_2_gram = [[' '.join(grams) for grams in ngrams(nltk.word_tokenize(line), n=2)] for line in text.to_list()]\n",
    "# text_3_gram = [[' '.join(grams) for grams in ngrams(nltk.word_tokenize(line), n=3)] for line in text.to_list()]\n",
    "\n",
    "# save_json(r'./output/corpus_1_gram.json', text_1_gram)\n",
    "# save_json(r'./output/corpus_2_gram.json', text_2_gram)\n",
    "# save_json(r'./output/corpus_3_gram.json', text_3_gram)\n",
    "\n",
    "# w2c_1_gram, ftt_1_gram = train_word_vector(text_1_gram, 1)\n",
    "# word_embeddings_1_gram = comb_vector(w2c_1_gram, ftt_1_gram)\n",
    "# save_json(r'./output/word_embeddings_1_gram.json', word_embeddings_1_gram)\n",
    "\n",
    "\n",
    "# w2c_2_gram, ftt_2_gram = train_word_vector(text_2_gram, 2)\n",
    "# word_embeddings_2_gram = comb_vector(w2c_2_gram, ftt_2_gram)\n",
    "# save_json(r'./output/word_embeddings_2_gram.json', word_embeddings_2_gram)\n",
    "\n",
    "# w2c_3_gram, ftt_3_gram = train_word_vector(text_3_gram, 3)\n",
    "# word_embeddings_3_gram = comb_vector(w2c_3_gram, ftt_3_gram)\n",
    "# save_json(r'./output/word_embeddings_3_gram.json', word_embeddings_3_gram)\n",
    "\n",
    "text_1_gram = load_json(r'./output/corpus_1_gram.json')\n",
    "# text_2_gram = load_json(r'./output/corpus_2_gram.json')\n",
    "# text_3_gram = load_json(r'./output/corpus_3_gram.json')\n",
    "\n",
    "word_embeddings_1_gram = load_json(r'./output/word_embeddings_1_gram.json')\n",
    "# word_embeddings_2_gram = load_json(r'./output/word_embeddings_2_gram.json')\n",
    "# word_embeddings_3_gram = load_json(r'./output/word_embeddings_3_gram.json')"
   ],
   "outputs": [],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "source": [
    "text_1_gram = np.array(text_1_gram)\n",
    "\n",
    "labels = np.array(Y)\n",
    "texts = text_1_gram[:labels.shape[0]]\n"
   ],
   "outputs": [],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "source": [
    "def word_idx(texts):\n",
    "    \"\"\"Tokenize texts, build vocabulary and find maximum sentence length.\n",
    "    \n",
    "    Args:\n",
    "        texts (List[str]): List of text data\n",
    "    \n",
    "    Returns:\n",
    "        word2idx (Dict): Vocabulary built from the corpus\n",
    "    \"\"\"\n",
    "    word2idx = {}\n",
    "\n",
    "    # Add <pad> and <unk> tokens to the vocabulary\n",
    "    word2idx['<pad>'] = 0\n",
    "    word2idx['<unk>'] = 1\n",
    "\n",
    "    # Building our vocab from the corpus starting from index 2\n",
    "    idx = 2\n",
    "    for sent in texts:\n",
    "\n",
    "        # Add new token to `word2idx`\n",
    "        for token in sent:\n",
    "            if token not in word2idx:\n",
    "                word2idx[token] = idx\n",
    "                idx += 1\n",
    "\n",
    "        # Update `max_len`\n",
    "\n",
    "    return word2idx\n",
    "\n",
    "def encode(tokenized_texts, word2idx, max_len):\n",
    "    \"\"\"Pad each sentence to the maximum sentence length and encode tokens to\n",
    "    their index in the vocabulary.\n",
    "\n",
    "    Returns:\n",
    "        input_ids (np.array): Array of token indexes in the vocabulary with\n",
    "            shape (N, max_len). It will the input of our CNN model.\n",
    "    \"\"\"\n",
    "\n",
    "    input_ids = []\n",
    "    for tokenized_sent in tokenized_texts:\n",
    "        # Pad sentences to max_len\n",
    "        tokenized_sent += ['<pad>'] * (max_len - len(tokenized_sent))\n",
    "\n",
    "        if len(tokenized_sent) > max_len:\n",
    "            tokenized_sent = tokenized_sent[:max_len]\n",
    "\n",
    "        # Encode tokens to input_ids\n",
    "        input_id = np.array([word2idx.get(token) for token in tokenized_sent])\n",
    "        input_ids.append(input_id)\n",
    "    \n",
    "    return np.array(input_ids)"
   ],
   "outputs": [],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "source": [
    "from tqdm.notebook import tqdm\n",
    "\n",
    "def load_pretrained_vectors(word2idx, fname):\n",
    "    \"\"\"Load pretrained vectors and create embedding layers.\n",
    "    \n",
    "    Args:\n",
    "        word2idx (Dict): Vocabulary built from the corpus\n",
    "        fname (str): Path to pretrained vector file\n",
    "\n",
    "    Returns:\n",
    "        embeddings (np.array): Embedding matrix with shape (N, d) where N is\n",
    "            the size of word2idx and d is embedding dimension\n",
    "    \"\"\"\n",
    "\n",
    "    print(\"Loading pretrained vectors...\")\n",
    "    fin = open(fname, 'r', encoding='utf-8', newline='\\n', errors='ignore')\n",
    "    word_embeddings = load_json(fname)\n",
    "\n",
    "    d = 600\n",
    "\n",
    "    # Initilize random embeddings\n",
    "    embeddings = np.random.uniform(-0.25, 0.25, (len(word2idx), d))\n",
    "    embeddings[word2idx['<pad>']] = np.zeros((d,))\n",
    "\n",
    "    # Load pretrained vectors\n",
    "    count = 0\n",
    "    for word, vector in tqdm(word_embeddings.items()):\n",
    "        if word in word2idx:\n",
    "            count += 1\n",
    "            embeddings[word2idx[word]] = np.array(vector, dtype=np.float32)\n",
    "\n",
    "    return Variable(torch.from_numpy(embeddings))"
   ],
   "outputs": [],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "source": [
    "word2idx = word_idx(texts)\n",
    "input_ids = encode(texts, word2idx, max_len=64)\n",
    "embeddings = load_pretrained_vectors(word2idx, r'./output/word_embeddings_1_gram.json')"
   ],
   "outputs": [
    {
     "output_type": "stream",
     "name": "stdout",
     "text": [
      "Loading pretrained vectors...\n"
     ]
    },
    {
     "output_type": "display_data",
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "version_major": 2,
       "version_minor": 0,
       "model_id": "4c59c0c9202f43c2823bac98f2e65721"
      },
      "text/plain": [
       "  0%|          | 0/3456 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {}
    }
   ],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 30,
   "source": [
    "from collections import Counter\n",
    "\n",
    "def get_class_distribution(classes):\n",
    "      counter = {cl: 0 for cl in range(0, 35)}\n",
    "      for cl in classes:\n",
    "            if cl in counter:\n",
    "                  counter[cl] += 1\n",
    "      return counter\n",
    "\n",
    "def data_loader(train_inputs, val_inputs, train_labels, val_labels,\n",
    "                batch_size=50):\n",
    "    \"\"\"Convert train and validation sets to torch.Tensors and load them to\n",
    "    DataLoader.\n",
    "    \"\"\"\n",
    "\n",
    "    # Convert data type to torch.Tensor\n",
    "    train_inputs, val_inputs, train_labels, val_labels =\\\n",
    "    tuple(torch.tensor(data) for data in\n",
    "          [train_inputs, val_inputs, train_labels, val_labels])\n",
    "\n",
    "    # Specify batch_size\n",
    "    batch_size = batch_size\n",
    "\n",
    "    # Create DataLoader for training data\n",
    "    train_data = TensorDataset(train_inputs, train_labels)\n",
    "\n",
    "    target_list = torch.tensor([t for _, t in train_data])\n",
    "    target_list = target_list[torch.randperm(len(target_list))]\n",
    "\n",
    "    counter = get_class_distribution(train_labels.numpy())\n",
    "\n",
    "    class_count = [i for i in get_class_distribution(train_labels.numpy()).values()]\n",
    "    class_weights = 1./torch.tensor(class_count, dtype=torch.float)\n",
    "    class_weights_all = class_weights[target_list]\n",
    "    \n",
    "    \n",
    "    weighted_sampler = WeightedRandomSampler(\n",
    "                        weights=class_weights_all,\n",
    "                        num_samples=len(class_weights_all),\n",
    "                        replacement=True)\n",
    "\n",
    "\n",
    "#     train_sampler = RandomSampler(train_data)\n",
    "    train_dataloader = DataLoader(train_data, sampler=weighted_sampler, batch_size=batch_size)\n",
    "\n",
    "    # Create DataLoader for validation data\n",
    "    val_data = TensorDataset(val_inputs, val_labels)\n",
    "    val_sampler = SequentialSampler(val_data)\n",
    "    val_dataloader = DataLoader(val_data, sampler=val_sampler, batch_size=batch_size)\n",
    "\n",
    "    return train_dataloader, val_dataloader"
   ],
   "outputs": [],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 31,
   "source": [
    "# Train Test Split\n",
    "train_inputs, val_inputs, train_labels, val_labels = train_test_split(\n",
    "    input_ids, labels, test_size=0.1, random_state=42)\n",
    "\n",
    "# Load data to PyTorch DataLoader\n",
    "train_dataloader, val_dataloader = data_loader(train_inputs, val_inputs, train_labels, val_labels, batch_size=50)"
   ],
   "outputs": [],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 32,
   "source": [
    "# Sample configuration:\n",
    "filter_sizes = [2, 3, 4]\n",
    "num_filters = [2, 2, 2]"
   ],
   "outputs": [],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 33,
   "source": [
    "class TextCNN(nn.Module):\n",
    "    \"\"\"An 1D Convulational Neural Network for Sentence Classification.\"\"\"\n",
    "    def __init__(self,\n",
    "                 pretrained_embedding=None,\n",
    "                 freeze_embedding=False,\n",
    "                 vocab_size=None,\n",
    "                 embed_dim=600,\n",
    "                 filter_sizes=[3, 4, 5],\n",
    "                 num_filters=[100, 100, 100],\n",
    "                 num_classes=35,\n",
    "                 dropout=0.5):\n",
    "        \"\"\"\n",
    "        The constructor for CNN_NLP class.\n",
    "\n",
    "        Args:\n",
    "            pretrained_embedding (torch.Tensor): Pretrained embeddings with\n",
    "                shape (vocab_size, embed_dim)\n",
    "            freeze_embedding (bool): Set to False to fine-tune pretraiend\n",
    "                vectors. Default: False\n",
    "            vocab_size (int): Need to be specified when not pretrained word\n",
    "                embeddings are not used.\n",
    "            embed_dim (int): Dimension of word vectors. Need to be specified\n",
    "                when pretrained word embeddings are not used. Default: 300\n",
    "            filter_sizes (List[int]): List of filter sizes. Default: [3, 4, 5]\n",
    "            num_filters (List[int]): List of number of filters, has the same\n",
    "                length as `filter_sizes`. Default: [100, 100, 100]\n",
    "            n_classes (int): Number of classes. Default: 2\n",
    "            dropout (float): Dropout rate. Default: 0.5\n",
    "        \"\"\"\n",
    "\n",
    "        super(TextCNN, self).__init__()\n",
    "        # Embedding layer\n",
    "        if pretrained_embedding is not None:\n",
    "            self.vocab_size, self.embed_dim = pretrained_embedding.shape\n",
    "            self.embedding = nn.Embedding.from_pretrained(pretrained_embedding,\n",
    "                                                          freeze=freeze_embedding)\n",
    "        else:\n",
    "            self.embed_dim = embed_dim\n",
    "            self.embedding = nn.Embedding(num_embeddings=vocab_size,\n",
    "                                          embedding_dim=self.embed_dim,\n",
    "                                          padding_idx=0,\n",
    "                                          max_norm=5.0)\n",
    "        # Conv Network\n",
    "        self.conv1d_list = nn.ModuleList([\n",
    "            nn.Conv1d(in_channels=self.embed_dim,\n",
    "                      out_channels=num_filters[i],\n",
    "                      kernel_size=filter_sizes[i])\n",
    "            for i in range(len(filter_sizes))\n",
    "        ])\n",
    "        # Fully-connected layer and Dropout\n",
    "        self.fc = nn.Linear(np.sum(num_filters), num_classes)\n",
    "        self.dropout = nn.Dropout(p=dropout)\n",
    "\n",
    "    def forward(self, input_ids):\n",
    "        \"\"\"Perform a forward pass through the network.\n",
    "\n",
    "        Args:\n",
    "            input_ids (torch.Tensor): A tensor of token ids with shape\n",
    "                (batch_size, max_sent_length)\n",
    "\n",
    "        Returns:\n",
    "            logits (torch.Tensor): Output logits with shape (batch_size,\n",
    "                n_classes)\n",
    "        \"\"\"\n",
    "\n",
    "        # Get embeddings from `input_ids`. Output shape: (b, max_len, embed_dim)\n",
    "        x_embed = self.embedding(input_ids).float()\n",
    "\n",
    "        # Permute `x_embed` to match input shape requirement of `nn.Conv1d`.\n",
    "        # Output shape: (b, embed_dim, max_len)\n",
    "        x_reshaped = x_embed.permute(0, 2, 1)\n",
    "\n",
    "        # Apply CNN and ReLU. Output shape: (b, num_filters[i], L_out)\n",
    "        x_conv_list = [F.relu(conv1d(x_reshaped)) for conv1d in self.conv1d_list]\n",
    "\n",
    "        # Max pooling. Output shape: (b, num_filters[i], 1)\n",
    "        x_pool_list = [F.max_pool1d(x_conv, kernel_size=x_conv.shape[2])\n",
    "            for x_conv in x_conv_list]\n",
    "        \n",
    "        # Concatenate x_pool_list to feed the fully connected layer.\n",
    "        # Output shape: (b, sum(num_filters))\n",
    "        x_fc = torch.cat([x_pool.squeeze(dim=2) for x_pool in x_pool_list],\n",
    "                         dim=1)\n",
    "        \n",
    "        # Compute logits. Output shape: (b, n_classes)\n",
    "        logits = self.fc(self.dropout(x_fc))\n",
    "\n",
    "        return logits"
   ],
   "outputs": [],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 34,
   "source": [
    "def initilize_model(pretrained_embedding=None,\n",
    "                    freeze_embedding=False,\n",
    "                    vocab_size=None,\n",
    "                    embed_dim=600,\n",
    "                    filter_sizes=[3, 4, 5],\n",
    "                    num_filters=[100, 100, 100],\n",
    "                    num_classes=35,\n",
    "                    dropout=0.5,\n",
    "                    learning_rate=0.01):\n",
    "    \"\"\"Instantiate a CNN model and an optimizer.\"\"\"\n",
    "\n",
    "    assert (len(filter_sizes) == len(num_filters)), \"filter_sizes and \\\n",
    "    num_filters need to be of the same length.\"\n",
    "\n",
    "    # Instantiate CNN model\n",
    "    cnn_model = TextCNN(pretrained_embedding=pretrained_embedding,\n",
    "                        freeze_embedding=freeze_embedding,\n",
    "                        vocab_size=vocab_size,\n",
    "                        embed_dim=embed_dim,\n",
    "                        filter_sizes=filter_sizes,\n",
    "                        num_filters=num_filters,\n",
    "                        num_classes=35,\n",
    "                        dropout=0.5)\n",
    "    \n",
    "    # Send model to `device` (GPU/CPU)\n",
    "    cnn_model.to(device)\n",
    "\n",
    "    # Instantiate Adadelta optimizer\n",
    "    optimizer = optim.Adadelta(cnn_model.parameters(),\n",
    "                               lr=learning_rate,\n",
    "                               rho=0.95)\n",
    "\n",
    "    return cnn_model, optimizer"
   ],
   "outputs": [],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 35,
   "source": [
    "# Specify loss function\n",
    "loss_fn = nn.CrossEntropyLoss()\n",
    "\n",
    "def set_seed(seed_value=42):\n",
    "    \"\"\"Set seed for reproducibility.\"\"\"\n",
    "\n",
    "    random.seed(seed_value)\n",
    "    np.random.seed(seed_value)\n",
    "    torch.manual_seed(seed_value)\n",
    "    torch.cuda.manual_seed_all(seed_value)\n",
    "\n",
    "def train(model, optimizer, train_dataloader, val_dataloader=None, epochs=10):\n",
    "    \"\"\"Train the CNN model.\"\"\"\n",
    "    \n",
    "    # Tracking best validation accuracy\n",
    "    best_accuracy = 0\n",
    "\n",
    "    # Start training loop\n",
    "    print(\"Start training...\\n\")\n",
    "    print(f\"{'Epoch':^7} | {'Train Loss':^12} | {'Val Loss':^10} | {'Val Acc':^9} | {'Elapsed':^9}\")\n",
    "    print(\"-\"*60)\n",
    "\n",
    "    for epoch_i in range(epochs):\n",
    "        # =======================================\n",
    "        #               Training\n",
    "        # =======================================\n",
    "\n",
    "        # Tracking time and loss\n",
    "        t0_epoch = time.time()\n",
    "        total_loss = 0\n",
    "\n",
    "        # Put the model into the training mode\n",
    "        model.train()\n",
    "\n",
    "        for step, batch in enumerate(train_dataloader):\n",
    "            # Load batch to GPU\n",
    "            b_input_ids, b_labels = tuple(t.to(device) for t in batch)\n",
    "\n",
    "            # Zero out any previously calculated gradients\n",
    "            model.zero_grad()\n",
    "\n",
    "            # Perform a forward pass. This will return logits.\n",
    "            logits = model(b_input_ids)\n",
    "\n",
    "            # Compute loss and accumulate the loss values\n",
    "            loss = loss_fn(logits, b_labels)\n",
    "            total_loss += loss.item()\n",
    "\n",
    "            # Perform a backward pass to calculate gradients\n",
    "            loss.backward(retain_graph=True)\n",
    "\n",
    "            # Update parameters\n",
    "            optimizer.step()\n",
    "\n",
    "        # Calculate the average loss over the entire training data\n",
    "        avg_train_loss = total_loss / len(train_dataloader)\n",
    "\n",
    "        # =======================================\n",
    "        #               Evaluation\n",
    "        # =======================================\n",
    "        if val_dataloader is not None:\n",
    "            # After the completion of each training epoch, measure the model's\n",
    "            # performance on our validation set.\n",
    "            val_loss, val_accuracy = evaluate(model, val_dataloader)\n",
    "\n",
    "            # Track the best accuracy\n",
    "            if val_accuracy > best_accuracy:\n",
    "                best_accuracy = val_accuracy\n",
    "\n",
    "            # Print performance over the entire training data\n",
    "            time_elapsed = time.time() - t0_epoch\n",
    "            print(f\"{epoch_i + 1:^7} | {avg_train_loss:^12.6f} | {val_loss:^10.6f} | {val_accuracy:^9.2f} | {time_elapsed:^9.2f}\")\n",
    "            \n",
    "    print(\"\\n\")\n",
    "    print(f\"Training complete! Best accuracy: {best_accuracy:.2f}%.\")\n",
    "\n",
    "def evaluate(model, val_dataloader):\n",
    "    \"\"\"After the completion of each training epoch, measure the model's\n",
    "    performance on our validation set.\n",
    "    \"\"\"\n",
    "    # Put the model into the evaluation mode. The dropout layers are disabled\n",
    "    # during the test time.\n",
    "    model.eval()\n",
    "\n",
    "    # Tracking variables\n",
    "    val_accuracy = []\n",
    "    val_loss = []\n",
    "\n",
    "    # For each batch in our validation set...\n",
    "    for batch in val_dataloader:\n",
    "        # Load batch to GPU\n",
    "        b_input_ids, b_labels = tuple(t.to(device) for t in batch)\n",
    "\n",
    "        # Compute logits\n",
    "        with torch.no_grad():\n",
    "            logits = model(b_input_ids)\n",
    "\n",
    "        # Compute loss\n",
    "        loss = loss_fn(logits, b_labels)\n",
    "        val_loss.append(loss.item())\n",
    "\n",
    "        # Get the predictions\n",
    "        preds = torch.argmax(logits, dim=1).flatten()\n",
    "\n",
    "        # Calculate the accuracy rate\n",
    "        accuracy = (preds == b_labels).cpu().numpy().mean() * 100\n",
    "        val_accuracy.append(accuracy)\n",
    "\n",
    "    # Compute the average accuracy and loss over the validation set.\n",
    "    val_loss = np.mean(val_loss)\n",
    "    val_accuracy = np.mean(val_accuracy)\n",
    "\n",
    "    return val_loss, val_accuracy"
   ],
   "outputs": [],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 39,
   "source": [
    "set_seed(42)\n",
    "cnn_rand, optimizer = initilize_model(vocab_size=len(word2idx),\n",
    "                                      embed_dim=600,\n",
    "                                      learning_rate=0.15,\n",
    "                                      dropout=0.5)\n",
    "train(cnn_rand, optimizer, train_dataloader, val_dataloader, epochs=50)"
   ],
   "outputs": [
    {
     "output_type": "stream",
     "name": "stdout",
     "text": [
      "Start training...\n",
      "\n",
      " Epoch  |  Train Loss  |  Val Loss  |  Val Acc  |  Elapsed \n",
      "------------------------------------------------------------\n",
      "   1    |   2.497447   |  2.011351  |   49.79   |   6.18   \n",
      "   2    |   1.697468   |  1.638032  |   60.83   |   6.13   \n",
      "   3    |   1.379780   |  1.451010  |   64.76   |   6.12   \n",
      "   4    |   1.195839   |  1.330763  |   66.28   |   6.16   \n",
      "   5    |   1.047910   |  1.273216  |   67.17   |   6.14   \n",
      "   6    |   0.982870   |  1.221697  |   68.00   |   6.10   \n",
      "   7    |   0.892588   |  1.210463  |   67.45   |   6.14   \n",
      "   8    |   0.834642   |  1.170449  |   69.10   |   6.15   \n",
      "   9    |   0.760384   |  1.151182  |   68.28   |   6.09   \n",
      "  10    |   0.724623   |  1.131016  |   68.90   |   6.10   \n",
      "  11    |   0.675441   |  1.127759  |   68.28   |   6.11   \n",
      "  12    |   0.610536   |  1.113847  |   69.17   |   6.08   \n",
      "  13    |   0.588615   |  1.114644  |   69.17   |   6.11   \n",
      "  14    |   0.545643   |  1.112031  |   69.66   |   6.09   \n",
      "  15    |   0.515246   |  1.101907  |   69.59   |   6.09   \n",
      "  16    |   0.478643   |  1.103817  |   69.86   |   6.07   \n",
      "  17    |   0.464003   |  1.085640  |   69.38   |   6.07   \n",
      "  18    |   0.415320   |  1.087526  |   70.14   |   6.09   \n",
      "  19    |   0.406681   |  1.088654  |   70.41   |   6.12   \n",
      "  20    |   0.373125   |  1.077839  |   70.62   |   6.03   \n",
      "  21    |   0.343992   |  1.084965  |   70.21   |   6.21   \n",
      "  22    |   0.343446   |  1.077746  |   70.14   |   6.14   \n",
      "  23    |   0.321915   |  1.078973  |   71.10   |   6.08   \n",
      "  24    |   0.305062   |  1.068611  |   70.69   |   6.06   \n",
      "  25    |   0.291269   |  1.085740  |   70.69   |   6.03   \n",
      "  26    |   0.280293   |  1.091738  |   70.90   |   6.07   \n",
      "  27    |   0.269848   |  1.089476  |   70.48   |   6.09   \n",
      "  28    |   0.252258   |  1.102612  |   69.45   |   6.03   \n",
      "  29    |   0.243552   |  1.089278  |   70.76   |   6.03   \n",
      "  30    |   0.236306   |  1.100821  |   71.03   |   6.05   \n",
      "  31    |   0.228238   |  1.080657  |   70.34   |   6.05   \n",
      "  32    |   0.219503   |  1.096956  |   70.07   |   6.05   \n",
      "  33    |   0.204757   |  1.093190  |   70.07   |   6.06   \n",
      "  34    |   0.208107   |  1.103475  |   69.93   |   6.01   \n",
      "  35    |   0.195710   |  1.136210  |   70.28   |   6.13   \n",
      "  36    |   0.180567   |  1.113786  |   70.62   |   6.07   \n",
      "  37    |   0.180667   |  1.107094  |   70.76   |   6.09   \n",
      "  38    |   0.160783   |  1.123761  |   70.34   |   6.56   \n",
      "  39    |   0.168707   |  1.137086  |   70.34   |   7.92   \n",
      "  40    |   0.158505   |  1.133109  |   71.17   |   8.20   \n",
      "  41    |   0.148972   |  1.129310  |   70.62   |   7.66   \n",
      "  42    |   0.144266   |  1.142436  |   70.28   |   7.88   \n",
      "  43    |   0.150059   |  1.140780  |   70.62   |   8.30   \n",
      "  44    |   0.136826   |  1.170250  |   69.86   |   8.30   \n",
      "  45    |   0.134880   |  1.145475  |   70.69   |   8.29   \n",
      "  46    |   0.135432   |  1.169036  |   70.34   |   8.24   \n",
      "  47    |   0.126627   |  1.173251  |   70.07   |   8.25   \n",
      "  48    |   0.132866   |  1.174589  |   70.48   |   8.28   \n",
      "  49    |   0.117807   |  1.172928  |   69.38   |   8.26   \n",
      "  50    |   0.114951   |  1.187988  |   65.79   |   8.38   \n",
      "\n",
      "\n",
      "Training complete! Best accuracy: 71.17%.\n"
     ]
    }
   ],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 40,
   "source": [
    "# CNN-non-static: fastText pretrained word vectors are fine-tuned during training.\n",
    "set_seed(42)\n",
    "cnn_non_static, optimizer = initilize_model(pretrained_embedding=embeddings,\n",
    "                                            freeze_embedding=False,\n",
    "                                            learning_rate=0.25,\n",
    "                                            dropout=0.5)\n",
    "train(cnn_non_static, optimizer, train_dataloader, val_dataloader, epochs=50)"
   ],
   "outputs": [
    {
     "output_type": "stream",
     "name": "stdout",
     "text": [
      "Start training...\n",
      "\n",
      " Epoch  |  Train Loss  |  Val Loss  |  Val Acc  |  Elapsed \n",
      "------------------------------------------------------------\n",
      "   1    |   3.233609   |  3.102920  |   14.83   |   9.62   \n",
      "   2    |   3.000130   |  2.839119  |   26.34   |   9.50   \n",
      "   3    |   2.589180   |  2.395149  |   36.48   |   8.54   \n",
      "   4    |   2.370410   |  2.259009  |   40.83   |   8.58   \n",
      "   5    |   2.197382   |  2.104769  |   47.66   |   7.92   \n",
      "   6    |   2.004162   |  1.911902  |   51.79   |   8.52   \n",
      "   7    |   1.838230   |  1.785233  |   54.00   |   8.62   \n",
      "   8    |   1.683418   |  1.662916  |   56.41   |   8.69   \n",
      "   9    |   1.540736   |  1.553906  |   58.97   |   8.91   \n",
      "  10    |   1.445729   |  1.455858  |   61.31   |   8.46   \n",
      "  11    |   1.313923   |  1.381950  |   65.52   |   8.73   \n",
      "  12    |   1.271699   |  1.325069  |   65.86   |   8.50   \n",
      "  13    |   1.208172   |  1.274261  |   66.34   |   9.44   \n",
      "  14    |   1.122696   |  1.232972  |   67.52   |   9.83   \n",
      "  15    |   1.072591   |  1.218667  |   67.86   |   8.82   \n",
      "  16    |   1.045629   |  1.175132  |   68.83   |   8.66   \n",
      "  17    |   0.995894   |  1.158958  |   69.10   |   8.74   \n",
      "  18    |   0.926348   |  1.132024  |   69.52   |   8.97   \n",
      "  19    |   0.902230   |  1.120553  |   70.28   |   8.45   \n",
      "  20    |   0.864388   |  1.089332  |   70.62   |   9.17   \n",
      "  21    |   0.822673   |  1.080644  |   70.00   |   9.23   \n",
      "  22    |   0.767736   |  1.099430  |   70.07   |   9.11   \n",
      "  23    |   0.746451   |  1.083958  |   70.83   |   9.23   \n",
      "  24    |   0.744052   |  1.083411  |   70.97   |   8.86   \n",
      "  25    |   0.703061   |  1.080587  |   70.90   |   8.51   \n",
      "  26    |   0.685893   |  1.071633  |   70.55   |   8.58   \n",
      "  27    |   0.661810   |  1.083339  |   70.48   |   9.03   \n",
      "  28    |   0.645306   |  1.098551  |   70.69   |   9.07   \n",
      "  29    |   0.599286   |  1.086270  |   70.48   |   8.81   \n",
      "  30    |   0.597226   |  1.102637  |   70.21   |   9.22   \n",
      "  31    |   0.576231   |  1.094281  |   69.72   |   9.45   \n",
      "  32    |   0.569538   |  1.097109  |   69.86   |   8.71   \n",
      "  33    |   0.549441   |  1.099915  |   71.72   |   9.30   \n",
      "  34    |   0.526159   |  1.099483  |   70.83   |   9.48   \n",
      "  35    |   0.522199   |  1.088311  |   70.34   |   9.50   \n",
      "  36    |   0.501537   |  1.108921  |   70.07   |   9.27   \n",
      "  37    |   0.486878   |  1.088170  |   70.55   |   9.15   \n",
      "  38    |   0.457972   |  1.113752  |   71.10   |   9.54   \n",
      "  39    |   0.468009   |  1.134013  |   71.03   |   9.20   \n",
      "  40    |   0.450558   |  1.105012  |   70.90   |   9.32   \n",
      "  41    |   0.451050   |  1.100539  |   71.17   |   9.34   \n",
      "  42    |   0.400878   |  1.126514  |   70.55   |   9.42   \n",
      "  43    |   0.414308   |  1.127756  |   70.14   |   8.92   \n",
      "  44    |   0.392374   |  1.137445  |   71.72   |   9.02   \n",
      "  45    |   0.375177   |  1.115545  |   71.59   |   9.03   \n",
      "  46    |   0.374284   |  1.155022  |   70.41   |   9.01   \n",
      "  47    |   0.367539   |  1.138695  |   70.62   |   9.03   \n",
      "  48    |   0.351572   |  1.156657  |   71.10   |   8.95   \n",
      "  49    |   0.336747   |  1.163549  |   70.34   |   8.99   \n",
      "  50    |   0.329242   |  1.161286  |   71.17   |   8.99   \n",
      "\n",
      "\n",
      "Training complete! Best accuracy: 71.72%.\n"
     ]
    }
   ],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "source": [
    "# CNN-static: fastText pretrained word vectors are used and freezed during training.\n",
    "set_seed(42)\n",
    "cnn_static, optimizer = initilize_model(pretrained_embedding=embeddings,\n",
    "                                        freeze_embedding=True,\n",
    "                                        learning_rate=0.25,\n",
    "                                        dropout=0.5)\n",
    "train(cnn_static, optimizer, train_dataloader, val_dataloader, epochs=50)"
   ],
   "outputs": [],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 49,
   "source": [
    "def predict(tokens, model=cnn_rand.to(\"cpu\"), max_len=64):\n",
    "    \"\"\"Predict probability that a review is positive.\"\"\"\n",
    "\n",
    "    # Tokenize, pad and encode text\n",
    "    padded_tokens = tokens + ['<pad>'] * (max_len - len(tokens))\n",
    "    input_id = [word2idx.get(token, word2idx['<unk>']) for token in padded_tokens]\n",
    "\n",
    "    input_id = torch.tensor(input_id).unsqueeze(dim=0)\n",
    "\n",
    "    # Compute logits\n",
    "    logits = model.forward(input_id)\n",
    "    \n",
    "    return torch.argmax(logits, dim=1).flatten()"
   ],
   "outputs": [],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 63,
   "source": [
    "output = pd.DataFrame(columns=['id', 'label'])\n",
    "test_set = text_1_gram[labels.shape[0]:]\n",
    "reverse_label = {id_: label for label, id_ in label_map.items()}\n",
    "for i, text in tqdm(enumerate(test_set)):\n",
    "    label = reverse_label[predict(text).numpy()[0]]\n",
    "    output.loc[i] = [i, label]\n",
    "output.to_csv(r'result.csv', index=False)"
   ],
   "outputs": [
    {
     "output_type": "display_data",
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "version_major": 2,
       "version_minor": 0,
       "model_id": "8b1a42e1bdf14e6b85afb0c362ba7f16"
      },
      "text/plain": [
       "0it [00:00, ?it/s]"
      ]
     },
     "metadata": {}
    }
   ],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "source": [],
   "outputs": [],
   "metadata": {}
  }
 ],
 "metadata": {
  "orig_nbformat": 4,
  "language_info": {
   "name": "python",
   "version": "3.7.6",
   "mimetype": "text/x-python",
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "pygments_lexer": "ipython3",
   "nbconvert_exporter": "python",
   "file_extension": ".py"
  },
  "kernelspec": {
   "name": "python3",
   "display_name": "Python 3.7.6 64-bit ('base': conda)"
  },
  "interpreter": {
   "hash": "7b4c34fa5edc2b5c200e84280da452af41185f443fff3e767a73b82cf30c2550"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}