{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "import json\n",
    "import torch\n",
    "from rnn.dataset import Dictionary, MyDataset\n",
    "import pandas as pd\n",
    "import random\n",
    "import torch.autograd as autograd\n",
    "import torch.nn as nn\n",
    "import torch.functional as F\n",
    "import torch.optim as optim\n",
    "\n",
    "from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [],
   "source": [
    "class Const(object):\n",
    "    MAX_VOCAB_SIZE = 10000\n",
    "    MAX_SAMPLE_LENGTH = 30\n",
    "    BATCH_SIZE = 32\n",
    "    VAL_BATCH_SIZE = 32\n",
    "const = Const()\n",
    "\n",
    "# Load dataset, transform word to id\n",
    "all_samples = pd.read_csv('./data/lstm.cvs').to_dict('records')\n",
    "all_text = []\n",
    "for sample in all_samples:\n",
    "    if isinstance(sample['texts'], str):\n",
    "        sample['processed_text'] = sample['texts'].split(' ')\n",
    "    else:\n",
    "        # special treat for empty tweets\n",
    "        sample['processed_text'] = ['null']\n",
    "    all_text.extend(sample['processed_text'])\n",
    "dictionary = Dictionary(all_text, max_vocab_size=const.MAX_VOCAB_SIZE)\n",
    "\n",
    "for idx, sample in enumerate(all_samples):\n",
    "    sample['processed_id'] = [dictionary.to_idx(word) for word in sample['processed_text']]\n",
    "\n",
    "# train & test split\n",
    "random.shuffle(all_samples)\n",
    "train_samples = all_samples[:int(len(all_samples) * 0.8)]\n",
    "test_samples = all_samples[int(len(all_samples) * 0.8):]\n",
    "\n",
    "train_dataset = MyDataset(train_samples, dictionary.to_idx('_unk'), const.MAX_SAMPLE_LENGTH)\n",
    "val_dataset = MyDataset(train_samples, dictionary.to_idx('_unk'), const.MAX_SAMPLE_LENGTH)\n",
    "train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=const.BATCH_SIZE, shuffle=True, num_workers=4)\n",
    "val_dataloader = torch.utils.data.DataLoader(val_dataset, batch_size=const.VAL_BATCH_SIZE, shuffle=False, num_workers=4)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [],
   "source": [
    "sample = iter(train_dataloader).next()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 80,
   "metadata": {},
   "outputs": [],
   "source": [
    "class LSTMClassifier(nn.Module):\n",
    "\n",
    "    def __init__(self, max_vocab_size, embed_dim, hidden_dim,\n",
    "                 dropout_rate=0.5, num_layers=1, rnn_type='LSTM', pretrained_embeddings=None, concat_feature=False):\n",
    "        super(LSTMClassifier, self).__init__()\n",
    "        self.embedding = nn.Embedding(max_vocab_size, embed_dim)\n",
    "        self.rnn_type = rnn_type\n",
    "        if rnn_type in ['LSTM', 'GRU']:\n",
    "            self.rnn = getattr(nn, rnn_type)(embed_dim, hidden_dim, num_layers, dropout=dropout_rate)\n",
    "        else:\n",
    "            try:\n",
    "                nonlinearity = {'RNN_TANH': 'tanh', 'RNN_RELU': 'relu'}[rnn_type]\n",
    "            except KeyError:\n",
    "                raise ValueError(\"\"\"An invalid option for `--model` was supplied,\n",
    "                                 options are ['LSTM', 'GRU', 'RNN_TANH' or 'RNN_RELU']\"\"\")\n",
    "            self.rnn = nn.RNN(embed_dim, hidden_dim, num_layers, nonlinearity=nonlinearity, dropout=dropout)\n",
    "        self.output_dropout = nn.Dropout(dropout_rate)\n",
    "        # 2-classes classification\n",
    "        if concat_feature is False:\n",
    "            self.hidden2out = nn.Linear(hidden_dim, 2)\n",
    "        else:\n",
    "            self.hidden2out = nn.Linear(hidden_dim + 2, 2)\n",
    "        self.concat_feature = concat_feature\n",
    "\n",
    "        if pretrained_embeddings is not None:\n",
    "            self.embedding.weight.data.copy_(torch.from_numpy(pretrained_embeddings))\n",
    "        self.cross_entropy_loss = nn.CrossEntropyLoss()\n",
    "\n",
    "    def forward(self, sample):\n",
    "        '''\n",
    "        Input: sample\n",
    "            key: word_id, retweet_count, favorite_count, text_length, label\n",
    "        Output: output\n",
    "            key: logit\n",
    "        '''\n",
    "        # batch_size x length x embedding_size\n",
    "        embeds = self.embedding(sample['word_id'])\n",
    "        packed_input = pack_padded_sequence(embeds, sample['length'], batch_first=True, enforce_sorted=False)\n",
    "        output, hidden = self.rnn(packed_input)\n",
    "\n",
    "        if self.rnn_type == 'LSTM':\n",
    "            hidden = hidden[0][-1]\n",
    "        else:\n",
    "            hidden = hidden[-1]\n",
    "        \n",
    "        if self.concat_feature:\n",
    "            hidden = torch.cat(\n",
    "                [\n",
    "                    hidden,\n",
    "                    sample['retweet_count'].reshape(hidden.shape[0], 1),\n",
    "                    sample['favorite_count'].reshape(hidden.shape[0], 1),\n",
    "                ],\n",
    "                dim=1\n",
    "            )\n",
    "\n",
    "        output = self.output_dropout(hidden)\n",
    "        output = self.hidden2out(output)\n",
    "        output = {'logit': output}\n",
    "        return output\n",
    "    \n",
    "    def cal_loss(self, sample, output):\n",
    "        '''\n",
    "        Input: sample, output\n",
    "        Output: loss (scalar)\n",
    "        '''\n",
    "        return self.cross_entropy_loss(output['logit'], sample['label'])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 87,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "torch.Size([32, 128])"
      ]
     },
     "execution_count": 87,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "hidden.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 95,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "torch.Size([32, 130])"
      ]
     },
     "execution_count": 95,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "torch.cat(\n",
    "    [\n",
    "        hidden,\n",
    "        sample['retweet_count'].reshape(hidden.shape[0], 1),\n",
    "        sample['favorite_count'].reshape(hidden.shape[0], 1),\n",
    "    ],\n",
    "    dim=1\n",
    ").shape\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 79,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor(0.6899, grad_fn=<NllLossBackward>)"
      ]
     },
     "execution_count": 79,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 34,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "PackedSequence(data=tensor([[ 0.0405,  0.0625, -0.0175,  ..., -0.1562,  0.0019, -0.0698],\n",
       "        [-0.1963,  0.1338,  0.1191,  ..., -0.0339,  0.1650, -0.0830],\n",
       "        [ 0.0239, -0.0461,  0.0039,  ..., -0.2695, -0.0688, -0.2754],\n",
       "        ...,\n",
       "        [-0.0315,  0.2637,  0.1001,  ..., -0.1992,  0.1162, -0.0752],\n",
       "        [-0.0030,  0.0762,  0.0515,  ..., -0.0654,  0.0430, -0.0286],\n",
       "        [-0.3594,  0.0020, -0.0557,  ..., -0.4785,  0.0081,  0.1816]],\n",
       "       grad_fn=<PackPaddedSequenceBackward>), batch_sizes=tensor([32, 31, 29, 27, 26, 26, 23, 20, 19, 16, 12, 10,  6,  5,  4,  3,  3,  3,\n",
       "         3,  3,  3,  3,  2,  2,  2,  1]), sorted_indices=tensor([16,  2,  0, 14, 19, 15,  7, 17, 27, 13,  9, 12, 25, 24, 29, 30,  4,  5,\n",
       "        22, 26, 23, 20,  3, 21, 18,  8, 10,  1, 28,  6, 31, 11]), unsorted_indices=tensor([ 2, 27,  1, 22, 16, 17, 29,  6, 25, 10, 26, 31, 11,  9,  3,  5,  0,  7,\n",
       "        24,  4, 21, 23, 18, 20, 13, 12, 19,  8, 28, 14, 15, 30]))"
      ]
     },
     "execution_count": 34,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.4"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
