{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch\n",
    "import torch.nn as nn\n",
    "from torch.autograd import Variable\n",
    "from torch.utils.data import Dataset, DataLoader\n",
    "from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence\n",
    "import torch.optim as optim\n",
    "import torch.nn.functional as F\n",
    "from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "I0105 02:43:51.116637 139743199024896 file_utils.py:39] PyTorch version 1.3.0 available.\n",
      "I0105 02:43:51.389599 139743199024896 modeling_xlnet.py:194] Better speed can be achieved with apex installed from https://www.github.com/nvidia/apex .\n"
     ]
    }
   ],
   "source": [
    "from transformers import *"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "from tqdm import tqdm_notebook as tqdm\n",
    "import preprocessor as p\n",
    "import numpy as np\n",
    "import matplotlib.pyplot as plt\n",
    "import random\n",
    "from collections import Counter\n",
    "import spacy\n",
    "from tqdm import tqdm, tqdm_notebook, tnrange\n",
    "import pandas as pd\n",
    "from sklearn.model_selection import train_test_split\n",
    "from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score, classification_report, confusion_matrix"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [],
   "source": [
    "class RecArch(nn.Module):\n",
    "    def __init__(self, vocab_size, embedding_dim, hidden_dim, output_dim, num_layers, bidir, rnnType,device):\n",
    "        super(RecArch, self).__init__()\n",
    "        \n",
    "        self.vocab_size = vocab_size\n",
    "        self.embedding_dim = embedding_dim\n",
    "        self.hidden_dim = hidden_dim\n",
    "        self.output_dim = output_dim\n",
    "        self.num_layers = num_layers\n",
    "        self.device = device\n",
    "        self.rnnType = rnnType\n",
    "        self.bidirectional = bidir\n",
    "        \n",
    "        if self.bidirectional:\n",
    "            self.numDirs = 2\n",
    "        else:\n",
    "            self.numDirs = 1\n",
    "        \n",
    "        self.emb = nn.Embedding(self.vocab_size, embedding_dim)\n",
    "        \n",
    "        if self.rnnType == 'lstm':\n",
    "            self.recNN = nn.LSTM(embedding_dim,hidden_dim, num_layers,batch_first=True,bidirectional=self.bidirectional)\n",
    "            \n",
    "        if self.rnnType == 'gru':\n",
    "            self.recNN = nn.GRU(embedding_dim, hidden_dim, num_layers, batch_first=True,bidirectional=self.bidirectional)\n",
    "            \n",
    "        if self.rnnType == 'rnn':\n",
    "            self.recNN = nn.RNN(embedding_dim, hidden_dim, num_layers, batch_first=True, nonlinearity='tanh',bidirectional=self.bidirectional)\n",
    "        \n",
    "        self.fc = nn.Linear(self.numDirs*hidden_dim,output_dim)\n",
    "    \n",
    "    def forward(self,x,encMode=False):\n",
    "        embs = self.emb(x)\n",
    "        embs = embs.view(x.size(0),-1,self.embedding_dim).to(self.device)\n",
    "        \n",
    "        h0 = Variable(torch.zeros(self.numDirs*self.num_layers,x.size(0),self.hidden_dim),requires_grad=True).to(self.device)\n",
    "        \n",
    "        if self.rnnType == 'lstm':        \n",
    "            c0 = Variable(torch.zeros(self.numDirs*self.num_layers,x.size(0),self.hidden_dim),requires_grad=True).to(self.device)\n",
    "            \n",
    "            out,(hn,cn) = self.recNN(embs,(h0,c0))\n",
    "        \n",
    "        else:\n",
    "            out, hn = self.recNN(embs, h0)\n",
    "        \n",
    "#         print(out[:,-1,:].shape)\n",
    "        if not encMode:\n",
    "            out = self.fc(out[:, -1, :])\n",
    "        else:\n",
    "            out = out[:,-1,:]\n",
    "        return out"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "class RecAttnArch(nn.Module):\n",
    "    def __init__(self, vocab_size, embedding_dim, hidden_dim, output_dim, num_layers, bidir, rnnType, attnType,device):\n",
    "        super(RecAttnArch, self).__init__()\n",
    "        \n",
    "        self.vocab_size = vocab_size\n",
    "        self.embedding_dim = embedding_dim\n",
    "        self.hidden_dim = hidden_dim\n",
    "        self.output_dim = output_dim\n",
    "        self.num_layers = num_layers\n",
    "        self.device = device\n",
    "        self.rnnType = rnnType\n",
    "        self.attnType = attnType\n",
    "        self.bidirectional = bidir\n",
    "        \n",
    "        if self.bidirectional:\n",
    "            self.numDirs = 2\n",
    "        else:\n",
    "            self.numDirs = 1\n",
    "        \n",
    "        self.emb = nn.Embedding(self.vocab_size, embedding_dim)\n",
    "        \n",
    "        if self.rnnType == 'lstm':\n",
    "            self.recNN = nn.LSTM(embedding_dim,hidden_dim, num_layers,batch_first=True,bidirectional=self.bidirectional)\n",
    "            \n",
    "        if self.rnnType == 'gru':\n",
    "            self.recNN = nn.GRU(embedding_dim, hidden_dim, num_layers, batch_first=True,bidirectional=self.bidirectional)\n",
    "            \n",
    "        if self.rnnType == 'rnn':\n",
    "            self.recNN = nn.RNN(embedding_dim, hidden_dim, num_layers, batch_first=True, nonlinearity='tanh',bidirectional=self.bidirectional)\n",
    "        \n",
    "        self.query_vector = nn.Parameter(torch.rand(hidden_dim*self.numDirs,1)).float()\n",
    "        \n",
    "        self.attnWgtMatrixSize = [self.numDirs*self.hidden_dim, self.numDirs*self.hidden_dim]\n",
    "        self.attnWgtMatrix = nn.Parameter(torch.randn(self.attnWgtMatrixSize).float()) # Multiplicative Attention\n",
    "    \n",
    "        self.softmax = nn.Softmax(dim=1)\n",
    "        \n",
    "        if self.attnType == 'dot':\n",
    "            self.fc = nn.Linear(self.numDirs*self.hidden_dim, output_dim)\n",
    "        \n",
    "        if self.attnType == 'self':\n",
    "            self.fc = nn.Linear(self.numDirs*30*self.hidden_dim, output_dim)\n",
    "    \n",
    "    \n",
    "    def forward(self,x,encMode=False):\n",
    "        embs = self.emb(x)\n",
    "        embs = embs.view(x.size(0),-1,self.embedding_dim).to(self.device)\n",
    "        \n",
    "        h0 = Variable(torch.zeros(self.numDirs*self.num_layers,x.size(0),self.hidden_dim),requires_grad=True).to(self.device)\n",
    "        \n",
    "        if self.rnnType == 'lstm':        \n",
    "            c0 = Variable(torch.zeros(self.numDirs*self.num_layers,x.size(0),self.hidden_dim),requires_grad=True).to(self.device)\n",
    "            \n",
    "            out,(hn,cn) = self.recNN(embs,(h0,c0))\n",
    "        \n",
    "        else:\n",
    "            out, hn = self.recNN(embs, h0)\n",
    "        \n",
    "        if self.attnType == 'dot':\n",
    "            Hw = out\n",
    "            attn_weights = self.softmax(Hw.matmul(self.query_vector))\n",
    "\n",
    "            out = out.mul(attn_weights)\n",
    "            context_vector = torch.sum(out,dim=1)\n",
    "            \n",
    "            fc_out = context_vector\n",
    "            \n",
    "        if self.attnType == 'self':\n",
    "            queryMatrix = out\n",
    "            keyMatrix = out.permute(0,2,1)\n",
    "            \n",
    "            attnScores = torch.bmm( torch.matmul(queryMatrix,self.attnWgtMatrix), keyMatrix )\n",
    "            attnScores = F.softmax(attnScores, dim=2)\n",
    "            hidden_matrix = torch.bmm(attnScores, queryMatrix)\n",
    "            \n",
    "            fc_out = hidden_matrix.view(-1, hidden_matrix.size()[1]*hidden_matrix.size()[2])\n",
    "        \n",
    "        if encMode:\n",
    "            return fc_out\n",
    "            \n",
    "        else:\n",
    "            return self.fc(fc_out)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "class AttentionTextEncoder(nn.Module):\n",
    "    def __init__(self, encoderType, params, X, y, device):\n",
    "        super(AttentionTextEncoder, self).__init__()\n",
    "        \n",
    "        self.encoderType = encoderType\n",
    "        self.dataProcessed = False\n",
    "        if params['attnType'] == 'self':\n",
    "            self.seq_dim = 30\n",
    "        else:\n",
    "            self.seq_dim = 1\n",
    "            \n",
    "        self.preprocess(X,y)\n",
    "        \n",
    "        if encoderType == 'attn':\n",
    "            self.textEncoder = RecAttnArch(len(self.word2idx), params['embedding_dim'], params['hidden_dim'], \n",
    "                                       params['output_dim'], params['num_layers'], params['bidir'], \n",
    "                                       params['rnnType'],params['attnType'],device).to(device)\n",
    "    \n",
    "    def tokenize(self,text):\n",
    "        p.set_options(p.OPT.URL, p.OPT.MENTION, p.OPT.EMOJI ,p.OPT.HASHTAG)\n",
    "        return p.tokenize(text).split()\n",
    "    \n",
    "    def indexer(self,split_text):\n",
    "        sent2idx = []\n",
    "        for w in split_text:\n",
    "            if w.lower() in self.word2idx:\n",
    "                sent2idx.append(self.word2idx[w.lower()])\n",
    "            else:\n",
    "                sent2idx.append(self.word2idx['_UNK'])\n",
    "\n",
    "        return sent2idx\n",
    "    \n",
    "    def pad_data(self, s, maxlen=30):\n",
    "        padded = np.zeros((maxlen,), dtype=np.int64)\n",
    "        if len(s) > maxlen: padded[:] = s[:maxlen]\n",
    "        else: padded[:len(s)] = s\n",
    "        return padded\n",
    "    \n",
    "    def preprocess(self,X,y):\n",
    "        clean_text = [self.tokenize(x) for x in X]\n",
    "        \n",
    "        words = Counter()\n",
    "        for sent in tqdm(clean_text):\n",
    "            words.update(w.lower() for w in sent)\n",
    "            \n",
    "        words = sorted(words, key=words.get, reverse=True)\n",
    "        # add <pad> and <unk> token to vocab which will be used later\n",
    "        words = ['_PAD','_UNK'] + words\n",
    "        \n",
    "        self.word2idx = {o:i for i,o in enumerate(words)}\n",
    "        \n",
    "        sent2idx = [torch.tensor(self.indexer(x)) for x in clean_text]\n",
    "        paddedX = [ self.pad_data(x) for x in sent2idx ]\n",
    "        \n",
    "        train_inputs, validation_inputs, train_labels, validation_labels = train_test_split(paddedX, y, random_state=2018)\n",
    "        \n",
    "        train_inputs = torch.tensor(train_inputs)\n",
    "        validation_inputs = torch.tensor(validation_inputs)\n",
    "\n",
    "        train_labels = torch.tensor(train_labels)\n",
    "        validation_labels = torch.tensor(validation_labels)\n",
    "\n",
    "        self.batch_size = 100\n",
    "\n",
    "        train_data = TensorDataset(train_inputs, train_labels)\n",
    "        train_sampler = RandomSampler(train_data)\n",
    "        self.train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=self.batch_size)\n",
    "\n",
    "        validation_data = TensorDataset(validation_inputs, validation_labels)\n",
    "        validation_sampler = SequentialSampler(validation_data)\n",
    "        self.validation_dataloader = DataLoader(validation_data, sampler=validation_sampler, batch_size=self.batch_size)\n",
    "\n",
    "        self.dataProcessed = True\n",
    "        return self.dataProcessed\n",
    "    \n",
    "    def forward(self,text):\n",
    "        x = self.tokenize(text)\n",
    "        seq_len = len(x)\n",
    "        x = self.indexer(x)\n",
    "        x = torch.tensor(self.pad_data(x))\n",
    "        x = Variable(x.view(-1, 30, 1)).to(device)\n",
    "        out = self.textEncoder(x,True)\n",
    "        return out\n",
    "    \n",
    "    def trainModel(self):\n",
    "        if torch.cuda.is_available():\n",
    "            device = 'cpu'\n",
    "        \n",
    "        model = self.textEncoder.to(device)\n",
    "        model.device = device\n",
    "        optimizer = torch.optim.Adam(model.parameters(),lr=0.01)\n",
    "        criterion = torch.nn.CrossEntropyLoss()\n",
    "        \n",
    "        count = 0\n",
    "        seq_dim = 30\n",
    "        num_epochs = 200\n",
    "        \n",
    "        maxAcc = 0\n",
    "        \n",
    "        for epoch in tqdm(range(num_epochs)):\n",
    "            train_losses = []\n",
    "            val_losses = []\n",
    "            for i, (text,label) in enumerate(self.train_dataloader):\n",
    "                text = Variable(text.view(-1, seq_dim, 1)).to(device)\n",
    "                label = Variable(label).to(device)\n",
    "\n",
    "        #         print(sexism_label)\n",
    "\n",
    "                optimizer.zero_grad()\n",
    "                outputs = model(text)\n",
    "\n",
    "        #         print(outputs)\n",
    "\n",
    "                loss = criterion(outputs, label)\n",
    "                train_losses.append(loss.data.cpu())\n",
    "\n",
    "                loss.backward()\n",
    "                optimizer.step()\n",
    "                count += 1\n",
    "\n",
    "                if count % 50 == 0:    \n",
    "                    correct = 0\n",
    "                    total = 0\n",
    "\n",
    "                    allLabels = []\n",
    "                    allPreds = []\n",
    "\n",
    "                    for i, (text,label) in enumerate(self.validation_dataloader):\n",
    "                        labels=[]\n",
    "                        text = Variable(text.view(-1, seq_dim, 1)).to(device)\n",
    "                        label = Variable(label).to(device)\n",
    "\n",
    "                        predicted = model(text)\n",
    "                        predicted =  torch.softmax(predicted,1)\n",
    "                        predicted = torch.max(predicted, 1)[1].cpu().numpy().tolist()\n",
    "        #                 print(predicted)\n",
    "        #                 print(sexism_label)\n",
    "                        allLabels += (label.cpu().numpy().tolist())\n",
    "                        allPreds += (predicted)\n",
    "\n",
    "                    valacc = accuracy_score(allLabels, allPreds)\n",
    "                    recscore = recall_score(allLabels, allPreds,average='macro')\n",
    "                    precscore = precision_score(allLabels, allPreds,average='macro')\n",
    "                    f1score = f1_score(allLabels, allPreds,average='macro')\n",
    "                    cr = classification_report(allLabels, allPreds)\n",
    "                    print(f'acc: {valacc} recall {recscore} prec: {precscore} f1: {f1score}')\n",
    "                    print(cr)\n",
    "                    \n",
    "                    if valacc > maxAcc:\n",
    "                        maxAcc = valacc\n",
    "                        self.optimalParams = model.state_dict()\n",
    "                        \n",
    "        self.textEncoder = model.to('cpu')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "class TextEncoder(nn.Module):\n",
    "    def __init__(self, encoderType, params, X, y, device):\n",
    "        super(TextEncoder, self).__init__()\n",
    "        \n",
    "        self.encoderType = encoderType\n",
    "        self.dataProcessed = False\n",
    "        \n",
    "        self.preprocess(X,y)\n",
    "        \n",
    "        if encoderType == 'rnn':\n",
    "            self.textEncoder = RecArch(len(self.word2idx), params['embedding_dim'], params['hidden_dim'], \n",
    "                                       params['output_dim'], params['num_layers'], params['bidir'], \n",
    "                                       params['rnnType'],device).to(device)\n",
    "    \n",
    "    def tokenize(self,text):\n",
    "        p.set_options(p.OPT.URL, p.OPT.MENTION, p.OPT.EMOJI ,p.OPT.HASHTAG)\n",
    "        return p.tokenize(text).split()\n",
    "    \n",
    "    def indexer(self,split_text):\n",
    "        sent2idx = []\n",
    "        for w in split_text:\n",
    "            if w.lower() in self.word2idx:\n",
    "                sent2idx.append(self.word2idx[w.lower()])\n",
    "            else:\n",
    "                sent2idx.append(self.word2idx['_UNK'])\n",
    "\n",
    "        return sent2idx\n",
    "    \n",
    "    def pad_data(self, s, maxlen=30):\n",
    "        padded = np.zeros((maxlen,), dtype=np.int64)\n",
    "        if len(s) > maxlen: padded[:] = s[:maxlen]\n",
    "        else: padded[:len(s)] = s\n",
    "        return padded\n",
    "    \n",
    "    def preprocess(self,X,y):\n",
    "        clean_text = [self.tokenize(x) for x in X]\n",
    "        \n",
    "        words = Counter()\n",
    "        for sent in tqdm(clean_text):\n",
    "            words.update(w.lower() for w in sent)\n",
    "            \n",
    "        words = sorted(words, key=words.get, reverse=True)\n",
    "        # add <pad> and <unk> token to vocab which will be used later\n",
    "        words = ['_PAD','_UNK'] + words\n",
    "        \n",
    "        self.word2idx = {o:i for i,o in enumerate(words)}\n",
    "        \n",
    "        sent2idx = [torch.tensor(self.indexer(x)) for x in clean_text]\n",
    "        paddedX = [ self.pad_data(x) for x in sent2idx ]\n",
    "        \n",
    "        train_inputs, validation_inputs, train_labels, validation_labels = train_test_split(paddedX, y, random_state=2018)\n",
    "        \n",
    "        train_inputs = torch.tensor(train_inputs)\n",
    "        validation_inputs = torch.tensor(validation_inputs)\n",
    "\n",
    "        train_labels = torch.tensor(train_labels)\n",
    "        validation_labels = torch.tensor(validation_labels)\n",
    "\n",
    "        self.batch_size = 100\n",
    "\n",
    "        train_data = TensorDataset(train_inputs, train_labels)\n",
    "        train_sampler = RandomSampler(train_data)\n",
    "        self.train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=self.batch_size)\n",
    "\n",
    "        validation_data = TensorDataset(validation_inputs, validation_labels)\n",
    "        validation_sampler = SequentialSampler(validation_data)\n",
    "        self.validation_dataloader = DataLoader(validation_data, sampler=validation_sampler, batch_size=self.batch_size)\n",
    "\n",
    "        self.dataProcessed = True\n",
    "        return self.dataProcessed\n",
    "    \n",
    "    def forward(self,text):\n",
    "        x = self.tokenize(text)\n",
    "        seq_len = len(x)\n",
    "        x = self.indexer(x)\n",
    "        x = torch.tensor(self.pad_data(x))\n",
    "        x = Variable(x.view(-1, 30, 1)).to(device)\n",
    "        out = self.textEncoder(x,True)\n",
    "        return out\n",
    "    \n",
    "    def trainModel(self):\n",
    "        \n",
    "        model = self.textEncoder.to(device)\n",
    "        model.device = device\n",
    "        optimizer = torch.optim.Adam(model.parameters(),lr=0.01)\n",
    "        criterion = torch.nn.CrossEntropyLoss()\n",
    "        \n",
    "        count = 0\n",
    "        seq_dim = 30\n",
    "        num_epochs = 200\n",
    "        \n",
    "        maxAcc = 0\n",
    "        \n",
    "        for epoch in tqdm(range(num_epochs)):\n",
    "            train_losses = []\n",
    "            val_losses = []\n",
    "            for i, (text,label) in enumerate(self.train_dataloader):\n",
    "                text = Variable(text.view(-1, seq_dim, 1)).to(device)\n",
    "                label = Variable(label).to(device)\n",
    "\n",
    "        #         print(sexism_label)\n",
    "\n",
    "                optimizer.zero_grad()\n",
    "                outputs = model(text)\n",
    "\n",
    "        #         print(outputs)\n",
    "\n",
    "                loss = criterion(outputs, label)\n",
    "                train_losses.append(loss.data.cpu())\n",
    "\n",
    "                loss.backward()\n",
    "                optimizer.step()\n",
    "                count += 1\n",
    "\n",
    "                if count % 50 == 0:    \n",
    "                    correct = 0\n",
    "                    total = 0\n",
    "\n",
    "                    allLabels = []\n",
    "                    allPreds = []\n",
    "\n",
    "                    for i, (text,label) in enumerate(self.validation_dataloader):\n",
    "                        labels=[]\n",
    "                        text = Variable(text.view(-1, seq_dim, 1)).to(device)\n",
    "                        label = Variable(label).to(device)\n",
    "\n",
    "                        predicted = model(text)\n",
    "                        predicted =  torch.softmax(predicted,1)\n",
    "                        predicted = torch.max(predicted, 1)[1].cpu().numpy().tolist()\n",
    "        #                 print(predicted)\n",
    "        #                 print(sexism_label)\n",
    "                        allLabels += (label.cpu().numpy().tolist())\n",
    "                        allPreds += (predicted)\n",
    "\n",
    "                    valacc = accuracy_score(allLabels, allPreds)\n",
    "                    recscore = recall_score(allLabels, allPreds,average='macro')\n",
    "                    precscore = precision_score(allLabels, allPreds,average='macro')\n",
    "                    f1score = f1_score(allLabels, allPreds,average='macro')\n",
    "                    cr = classification_report(allLabels, allPreds)\n",
    "                    print(f'acc: {valacc} recall {recscore} prec: {precscore} f1: {f1score}')\n",
    "                    print(cr)\n",
    "                    \n",
    "                    if valacc > maxAcc:\n",
    "                        maxAcc = valacc\n",
    "                        self.optimalParams = model.state_dict()\n",
    "                        \n",
    "        self.textEncoder = model.to('cpu')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "class BertTextEncoder(nn.Module):\n",
    "    def __init__(self, encoderType, params, X, y, device):\n",
    "        super(BertTextEncoder, self).__init__()\n",
    "        \n",
    "        self.encoderType = encoderType\n",
    "        self.dataProcessed = False\n",
    "        self.tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')\n",
    "        self.preprocess(X,y)\n",
    "        \n",
    "        if encoderType == 'bert':\n",
    "            self.textEncoder = BertModel.from_pretrained(\"bert-base-uncased\", num_labels = 4,\n",
    "                                                         output_hidden_states=True).to(device)\n",
    "            self.fc = torch.nn.Linear(768,4).to(device)\n",
    "            \n",
    "        if encoderType == 'bertSC':\n",
    "            self.textEncoder = BertForSequenceClassification.from_pretrained(\"bert-base-uncased\", \n",
    "                                                                             num_labels = 4,).to(device)\n",
    "    \n",
    "    def pad_data(self, s, maxlen=30):\n",
    "        padded = np.zeros((maxlen,), dtype=np.int64)\n",
    "        if len(s) > maxlen: padded[:] = s[:maxlen]\n",
    "        else: padded[:len(s)] = s\n",
    "        return padded\n",
    "    \n",
    "    def forward(self,text):\n",
    "        x = torch.tensor(self.tokenizer.encode(text, add_special_tokens=True)).unsqueeze(0)\n",
    "        outputs = self.textEncoder(x)\n",
    "        return outputs[1].reshape(-1)\n",
    "    \n",
    "    def preprocess(self,X,y):\n",
    "        tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')\n",
    "        sent2idx = [torch.tensor(tokenizer.encode(x, add_special_tokens=True)) for x in X]\n",
    "        paddedX = [ self.pad_data(x) for x in sent2idx ]\n",
    "\n",
    "        attention_masks = []\n",
    "\n",
    "        for sent in paddedX:\n",
    "            att_mask = [int(token_id > 0) for token_id in sent]\n",
    "\n",
    "            attention_masks.append(att_mask)\n",
    "\n",
    "        train_inputs, validation_inputs, train_labels, validation_labels = train_test_split(paddedX, y, random_state=2018)\n",
    "        train_masks, validation_masks, _, _ = train_test_split(attention_masks, y, random_state=2018)\n",
    "\n",
    "        train_inputs = torch.tensor(train_inputs)\n",
    "        validation_inputs = torch.tensor(validation_inputs)\n",
    "\n",
    "        train_labels = torch.tensor(train_labels)\n",
    "        validation_labels = torch.tensor(validation_labels)\n",
    "\n",
    "        train_masks = torch.tensor(train_masks)\n",
    "        validation_masks = torch.tensor(validation_masks)\n",
    "\n",
    "        self.batch_size = 32\n",
    "\n",
    "        train_data = TensorDataset(train_inputs, train_masks, train_labels)\n",
    "        train_sampler = RandomSampler(train_data)\n",
    "        self.train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=self.batch_size)\n",
    "\n",
    "        validation_data = TensorDataset(validation_inputs, validation_masks, validation_labels)\n",
    "        validation_sampler = SequentialSampler(validation_data)\n",
    "        self.validation_dataloader = DataLoader(validation_data, sampler=validation_sampler, batch_size=self.batch_size)\n",
    "\n",
    "        self.dataProcessed = True\n",
    "        return self.dataProcessed\n",
    "        \n",
    "    def trainModel(self):\n",
    "        if torch.cuda.is_available():\n",
    "            device = 'cuda:0'\n",
    "        \n",
    "        model = self.textEncoder.to(device)\n",
    "        self.fc = self.fc.to(device)\n",
    "        optimizer = AdamW(model.parameters(),\n",
    "                  lr = 2e-5, # args.learning_rate - default is 5e-5, our notebook had 2e-5\n",
    "                  eps = 1e-8 # args.adam_epsilon  - default is 1e-8.\n",
    "                )\n",
    "        criterion = nn.CrossEntropyLoss()\n",
    "        maxAcc = 0\n",
    "        epochs = 4\n",
    "        total_steps = len(self.train_dataloader) * epochs\n",
    "        \n",
    "        for epoch_i in range(0, epochs):\n",
    "            total_loss = 0\n",
    "\n",
    "            model.train()\n",
    "\n",
    "            for step, batch in enumerate(self.train_dataloader):\n",
    "                b_input_ids = batch[0].to(device)\n",
    "                b_input_mask = batch[1].to(device)\n",
    "                b_labels = batch[2].to(device)\n",
    "\n",
    "                model.zero_grad()        \n",
    "                \n",
    "                if self.encoderType == 'bertSC':\n",
    "                    outputs = model(b_input_ids, \n",
    "                                token_type_ids=None, \n",
    "                                attention_mask=b_input_mask, \n",
    "                                labels=b_labels)\n",
    "\n",
    "                    loss = outputs[0]\n",
    "                    \n",
    "                if self.encoderType == 'bert':\n",
    "                    outputs = model(b_input_ids, \n",
    "                                token_type_ids=None, \n",
    "                                attention_mask=b_input_mask,)\n",
    "                    \n",
    "                    logits = self.fc(outputs[1])\n",
    "                    \n",
    "                    loss = criterion(logits,b_labels)\n",
    "\n",
    "                total_loss += loss.item()\n",
    "\n",
    "                loss.backward()\n",
    "                torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)\n",
    "\n",
    "                optimizer.step()\n",
    "\n",
    "            model.eval()\n",
    "            \n",
    "            allLabels = []\n",
    "            allPreds = []\n",
    "\n",
    "            for batch in self.validation_dataloader:\n",
    "\n",
    "                batch = tuple(t.to(device) for t in batch)\n",
    "\n",
    "                b_input_ids, b_input_mask, b_labels = batch\n",
    "                \n",
    "                if self.encoderType == 'bertSC':   \n",
    "                    with torch.no_grad():        \n",
    "                        outputs = model(b_input_ids, \n",
    "                                        token_type_ids=None, \n",
    "                                        attention_mask=b_input_mask)\n",
    "                        logits = outputs[0]\n",
    "                        \n",
    "                if self.encoderType == 'bert':   \n",
    "                    with torch.no_grad():\n",
    "                        outputs = model(b_input_ids, \n",
    "                                        token_type_ids=None, \n",
    "                                        attention_mask=b_input_mask)\n",
    "                        \n",
    "                        logits = self.fc(outputs[1])\n",
    "                        print(logits.shape)\n",
    "                \n",
    "                logits = logits.detach().cpu().numpy()\n",
    "                label_ids = b_labels.to('cpu').numpy()\n",
    "                \n",
    "                allPreds += torch.max(torch.tensor(logits),1)[1].numpy().tolist()\n",
    "                allLabels += label_ids.tolist()\n",
    "                \n",
    "            valacc = accuracy_score(allLabels, allPreds)\n",
    "            recscore = recall_score(allLabels, allPreds,average='macro')\n",
    "            precscore = precision_score(allLabels, allPreds,average='macro')\n",
    "            f1score = f1_score(allLabels, allPreds,average='macro')\n",
    "            cr = classification_report(allLabels, allPreds)\n",
    "\n",
    "            print(f'acc: {valacc} recall {recscore} prec: {precscore} f1: {f1score}')\n",
    "            print(cr)\n",
    "            print('\\n')\n",
    "            \n",
    "            if valacc > maxAcc:\n",
    "                        maxAcc = valacc\n",
    "                        self.optimalParams = model.state_dict()\n",
    "            \n",
    "        self.textEncoder = model.to('cpu')"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "trainer = TextEncoder('rnn',params,X,y,device)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "trainer.textEncoder"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "trainer.trainModel()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "scrolled": true
   },
   "source": [
    "bertTrainer = BertTextEncoder('bertSC',{},X,y,device)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "bertTrainer.textEncoder"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "bertTrainer.trainModel()"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "fakenews",
   "language": "python",
   "name": "fakenews"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.4"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
