{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:pytorch_pretrained_bert.tokenization:loading vocabulary file https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-vocab.txt from cache at C:\\Users\\micha\\.pytorch_pretrained_bert\\26bc1ad6c0ac742e9b52263248f6d0f00068293b33709fae12320c0e35ccfbbb.542ce4285a40d23a559526243235df47c5f75c197f04f37d1a0c124c32c9a084\n"
     ]
    }
   ],
   "source": [
    "import torch\n",
    "from pytorch_pretrained_bert import BertTokenizer, BertModel, BertForMaskedLM\n",
    "\n",
    "# OPTIONAL: if you want to have more information on what's happening, activate the logger as follows\n",
    "import logging\n",
    "logging.basicConfig(level=logging.INFO)\n",
    "\n",
    "# Load pre-trained model tokenizer (vocabulary)\n",
    "tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "from __future__ import print_function, division\n",
    "\n",
    "import torch\n",
    "import torch.nn as nn\n",
    "import torch.optim as optim\n",
    "from torch.optim import lr_scheduler\n",
    "import numpy as np\n",
    "import torchvision\n",
    "from torchvision import datasets, models, transforms\n",
    "import matplotlib.pyplot as plt\n",
    "import time\n",
    "import os\n",
    "import copy\n",
    "from torch.utils.data import Dataset, DataLoader\n",
    "from PIL import Image\n",
    "from random import randrange\n",
    "import torch.nn.functional as F"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "text ='what is a pug'"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "zz = tokenizer.tokenize(text)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "class BertLayerNorm(nn.Module):\n",
    "        def __init__(self, hidden_size, eps=1e-12):\n",
    "            \"\"\"Construct a layernorm module in the TF style (epsilon inside the square root).\n",
    "            \"\"\"\n",
    "            super(BertLayerNorm, self).__init__()\n",
    "            self.weight = nn.Parameter(torch.ones(hidden_size))\n",
    "            self.bias = nn.Parameter(torch.zeros(hidden_size))\n",
    "            self.variance_epsilon = eps\n",
    "\n",
    "        def forward(self, x):\n",
    "            u = x.mean(-1, keepdim=True)\n",
    "            s = (x - u).pow(2).mean(-1, keepdim=True)\n",
    "            x = (x - u) / torch.sqrt(s + self.variance_epsilon)\n",
    "            return self.weight * x + self.bias\n",
    "        \n",
    "\n",
    "class BertForSequenceClassification(nn.Module):\n",
    "    \"\"\"BERT model for classification.\n",
    "    This module is composed of the BERT model with a linear layer on top of\n",
    "    the pooled output.\n",
    "    Params:\n",
    "        `config`: a BertConfig class instance with the configuration to build a new model.\n",
    "        `num_labels`: the number of classes for the classifier. Default = 2.\n",
    "    Inputs:\n",
    "        `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]\n",
    "            with the word token indices in the vocabulary. Items in the batch should begin with the special \"CLS\" token. (see the tokens preprocessing logic in the scripts\n",
    "            `extract_features.py`, `run_classifier.py` and `run_squad.py`)\n",
    "        `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token\n",
    "            types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to\n",
    "            a `sentence B` token (see BERT paper for more details).\n",
    "        `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices\n",
    "            selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max\n",
    "            input sequence length in the current batch. It's the mask that we typically use for attention when\n",
    "            a batch has varying length sentences.\n",
    "        `labels`: labels for the classification output: torch.LongTensor of shape [batch_size]\n",
    "            with indices selected in [0, ..., num_labels].\n",
    "    Outputs:\n",
    "        if `labels` is not `None`:\n",
    "            Outputs the CrossEntropy classification loss of the output with the labels.\n",
    "        if `labels` is `None`:\n",
    "            Outputs the classification logits of shape [batch_size, num_labels].\n",
    "    Example usage:\n",
    "    ```python\n",
    "    # Already been converted into WordPiece token ids\n",
    "    input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])\n",
    "    input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])\n",
    "    token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])\n",
    "    config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,\n",
    "        num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)\n",
    "    num_labels = 2\n",
    "    model = BertForSequenceClassification(config, num_labels)\n",
    "    logits = model(input_ids, token_type_ids, input_mask)\n",
    "    ```\n",
    "    \"\"\"\n",
    "    def __init__(self, num_labels=2):\n",
    "        super(BertForSequenceClassification, self).__init__()\n",
    "        self.num_labels = num_labels\n",
    "        self.bert = BertModel.from_pretrained('bert-base-uncased')\n",
    "        self.dropout = nn.Dropout(config.hidden_dropout_prob)\n",
    "        self.classifier = nn.Linear(config.hidden_size, num_labels)\n",
    "        nn.init.xavier_normal_(self.classifier.weight)\n",
    "    def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None):\n",
    "        _, pooled_output = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False)\n",
    "        pooled_output = self.dropout(pooled_output)\n",
    "        logits = self.classifier(pooled_output)\n",
    "\n",
    "        return logits\n",
    "    def freeze_bert_encoder(self):\n",
    "        for param in self.bert.parameters():\n",
    "            param.requires_grad = False\n",
    "    \n",
    "    def unfreeze_bert_encoder(self):\n",
    "        for param in self.bert.parameters():\n",
    "            param.requires_grad = True"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:pytorch_pretrained_bert.modeling:loading archive file https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased.tar.gz from cache at C:\\Users\\micha\\.pytorch_pretrained_bert\\9c41111e2de84547a463fd39217199738d1e3deb72d4fec4399e6e241983c6f0.ae3cef932725ca7a30cdcb93fc6e09150a55e2a130ec7af63975a16c153ae2ba\n",
      "INFO:pytorch_pretrained_bert.modeling:extracting archive file C:\\Users\\micha\\.pytorch_pretrained_bert\\9c41111e2de84547a463fd39217199738d1e3deb72d4fec4399e6e241983c6f0.ae3cef932725ca7a30cdcb93fc6e09150a55e2a130ec7af63975a16c153ae2ba to temp dir C:\\Users\\micha\\AppData\\Local\\Temp\\tmp8r_e_uyq\n",
      "INFO:pytorch_pretrained_bert.modeling:Model config {\n",
      "  \"attention_probs_dropout_prob\": 0.1,\n",
      "  \"hidden_act\": \"gelu\",\n",
      "  \"hidden_dropout_prob\": 0.1,\n",
      "  \"hidden_size\": 768,\n",
      "  \"initializer_range\": 0.02,\n",
      "  \"intermediate_size\": 3072,\n",
      "  \"max_position_embeddings\": 512,\n",
      "  \"num_attention_heads\": 12,\n",
      "  \"num_hidden_layers\": 12,\n",
      "  \"type_vocab_size\": 2,\n",
      "  \"vocab_size\": 30522\n",
      "}\n",
      "\n"
     ]
    }
   ],
   "source": [
    "from pytorch_pretrained_bert import BertConfig\n",
    "\n",
    "config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,\n",
    "        num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)\n",
    "\n",
    "num_labels = 2\n",
    "model = BertForSequenceClassification(num_labels)\n",
    "\n",
    "# Convert inputs to PyTorch tensors\n",
    "tokens_tensor = torch.tensor([tokenizer.convert_tokens_to_ids(zz)])\n",
    "\n",
    "logits = model(tokens_tensor)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[-0.3231, -0.6415]], grad_fn=<AddmmBackward>)"
      ]
     },
     "execution_count": 7,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "logits"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "import pandas as pd\n",
    "\n",
    "dat = pd.read_csv('IMDB Dataset.csv')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>review</th>\n",
       "      <th>sentiment</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>One of the other reviewers has mentioned that ...</td>\n",
       "      <td>positive</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>A wonderful little production. &lt;br /&gt;&lt;br /&gt;The...</td>\n",
       "      <td>positive</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>I thought this was a wonderful way to spend ti...</td>\n",
       "      <td>positive</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>3</th>\n",
       "      <td>Basically there's a family where a little boy ...</td>\n",
       "      <td>negative</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>4</th>\n",
       "      <td>Petter Mattei's \"Love in the Time of Money\" is...</td>\n",
       "      <td>positive</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "                                              review sentiment\n",
       "0  One of the other reviewers has mentioned that ...  positive\n",
       "1  A wonderful little production. <br /><br />The...  positive\n",
       "2  I thought this was a wonderful way to spend ti...  positive\n",
       "3  Basically there's a family where a little boy ...  negative\n",
       "4  Petter Mattei's \"Love in the Time of Money\" is...  positive"
      ]
     },
     "execution_count": 9,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "dat.head()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "[['a',\n",
       "  'wonderful',\n",
       "  'little',\n",
       "  'production',\n",
       "  '.',\n",
       "  '<',\n",
       "  'br',\n",
       "  '/',\n",
       "  '>',\n",
       "  '<',\n",
       "  'br',\n",
       "  '/',\n",
       "  '>',\n",
       "  'the',\n",
       "  'filming',\n",
       "  'technique',\n",
       "  'is',\n",
       "  'very',\n",
       "  'una',\n",
       "  '##ss',\n",
       "  '##uming',\n",
       "  '-',\n",
       "  'very',\n",
       "  'old',\n",
       "  '-',\n",
       "  'time',\n",
       "  '-',\n",
       "  'bbc',\n",
       "  'fashion',\n",
       "  'and',\n",
       "  'gives',\n",
       "  'a',\n",
       "  'comforting',\n",
       "  ',',\n",
       "  'and',\n",
       "  'sometimes',\n",
       "  'discomfort',\n",
       "  '##ing',\n",
       "  ',',\n",
       "  'sense',\n",
       "  'of',\n",
       "  'realism',\n",
       "  'to',\n",
       "  'the',\n",
       "  'entire',\n",
       "  'piece',\n",
       "  '.',\n",
       "  '<',\n",
       "  'br',\n",
       "  '/',\n",
       "  '>',\n",
       "  '<',\n",
       "  'br',\n",
       "  '/',\n",
       "  '>',\n",
       "  'the',\n",
       "  'actors',\n",
       "  'are',\n",
       "  'extremely',\n",
       "  'well',\n",
       "  'chosen',\n",
       "  '-',\n",
       "  'michael',\n",
       "  'sheen',\n",
       "  'not',\n",
       "  'only',\n",
       "  '\"',\n",
       "  'has',\n",
       "  'got',\n",
       "  'all',\n",
       "  'the',\n",
       "  'polar',\n",
       "  '##i',\n",
       "  '\"',\n",
       "  'but',\n",
       "  'he',\n",
       "  'has',\n",
       "  'all',\n",
       "  'the',\n",
       "  'voices',\n",
       "  'down',\n",
       "  'pat',\n",
       "  'too',\n",
       "  '!',\n",
       "  'you',\n",
       "  'can',\n",
       "  'truly',\n",
       "  'see',\n",
       "  'the',\n",
       "  'seam',\n",
       "  '##less',\n",
       "  'editing',\n",
       "  'guided',\n",
       "  'by',\n",
       "  'the',\n",
       "  'references',\n",
       "  'to',\n",
       "  'williams',\n",
       "  \"'\",\n",
       "  'diary',\n",
       "  'entries',\n",
       "  ',',\n",
       "  'not',\n",
       "  'only',\n",
       "  'is',\n",
       "  'it',\n",
       "  'well',\n",
       "  'worth',\n",
       "  'the',\n",
       "  'watching',\n",
       "  'but',\n",
       "  'it',\n",
       "  'is',\n",
       "  'a',\n",
       "  'terrific',\n",
       "  '##ly',\n",
       "  'written',\n",
       "  'and',\n",
       "  'performed',\n",
       "  'piece',\n",
       "  '.',\n",
       "  'a',\n",
       "  'master',\n",
       "  '##ful',\n",
       "  'production',\n",
       "  'about',\n",
       "  'one',\n",
       "  'of',\n",
       "  'the',\n",
       "  'great',\n",
       "  'master',\n",
       "  \"'\",\n",
       "  's',\n",
       "  'of',\n",
       "  'comedy',\n",
       "  'and',\n",
       "  'his',\n",
       "  'life',\n",
       "  '.',\n",
       "  '<',\n",
       "  'br',\n",
       "  '/',\n",
       "  '>',\n",
       "  '<',\n",
       "  'br',\n",
       "  '/',\n",
       "  '>',\n",
       "  'the',\n",
       "  'realism',\n",
       "  'really',\n",
       "  'comes',\n",
       "  'home',\n",
       "  'with',\n",
       "  'the',\n",
       "  'little',\n",
       "  'things',\n",
       "  ':',\n",
       "  'the',\n",
       "  'fantasy',\n",
       "  'of',\n",
       "  'the',\n",
       "  'guard',\n",
       "  'which',\n",
       "  ',',\n",
       "  'rather',\n",
       "  'than',\n",
       "  'use',\n",
       "  'the',\n",
       "  'traditional',\n",
       "  \"'\",\n",
       "  'dream',\n",
       "  \"'\",\n",
       "  'techniques',\n",
       "  'remains',\n",
       "  'solid',\n",
       "  'then',\n",
       "  'disappears',\n",
       "  '.',\n",
       "  'it',\n",
       "  'plays',\n",
       "  'on',\n",
       "  'our',\n",
       "  'knowledge',\n",
       "  'and',\n",
       "  'our',\n",
       "  'senses',\n",
       "  ',',\n",
       "  'particularly',\n",
       "  'with',\n",
       "  'the',\n",
       "  'scenes',\n",
       "  'concerning',\n",
       "  'orton',\n",
       "  'and',\n",
       "  'hall',\n",
       "  '##i',\n",
       "  '##well',\n",
       "  'and',\n",
       "  'the',\n",
       "  'sets',\n",
       "  '(',\n",
       "  'particularly',\n",
       "  'of',\n",
       "  'their',\n",
       "  'flat',\n",
       "  'with',\n",
       "  'hall',\n",
       "  '##i',\n",
       "  '##well',\n",
       "  \"'\",\n",
       "  's',\n",
       "  'murals',\n",
       "  'decor',\n",
       "  '##ating',\n",
       "  'every',\n",
       "  'surface',\n",
       "  ')',\n",
       "  'are',\n",
       "  'terribly',\n",
       "  'well',\n",
       "  'done',\n",
       "  '.'],\n",
       " ['basically',\n",
       "  'there',\n",
       "  \"'\",\n",
       "  's',\n",
       "  'a',\n",
       "  'family',\n",
       "  'where',\n",
       "  'a',\n",
       "  'little',\n",
       "  'boy',\n",
       "  '(',\n",
       "  'jake',\n",
       "  ')',\n",
       "  'thinks',\n",
       "  'there',\n",
       "  \"'\",\n",
       "  's',\n",
       "  'a',\n",
       "  'zombie',\n",
       "  'in',\n",
       "  'his',\n",
       "  'closet',\n",
       "  '&',\n",
       "  'his',\n",
       "  'parents',\n",
       "  'are',\n",
       "  'fighting',\n",
       "  'all',\n",
       "  'the',\n",
       "  'time',\n",
       "  '.',\n",
       "  '<',\n",
       "  'br',\n",
       "  '/',\n",
       "  '>',\n",
       "  '<',\n",
       "  'br',\n",
       "  '/',\n",
       "  '>',\n",
       "  'this',\n",
       "  'movie',\n",
       "  'is',\n",
       "  'slower',\n",
       "  'than',\n",
       "  'a',\n",
       "  'soap',\n",
       "  'opera',\n",
       "  '.',\n",
       "  '.',\n",
       "  '.',\n",
       "  'and',\n",
       "  'suddenly',\n",
       "  ',',\n",
       "  'jake',\n",
       "  'decides',\n",
       "  'to',\n",
       "  'become',\n",
       "  'ram',\n",
       "  '##bo',\n",
       "  'and',\n",
       "  'kill',\n",
       "  'the',\n",
       "  'zombie',\n",
       "  '.',\n",
       "  '<',\n",
       "  'br',\n",
       "  '/',\n",
       "  '>',\n",
       "  '<',\n",
       "  'br',\n",
       "  '/',\n",
       "  '>',\n",
       "  'ok',\n",
       "  ',',\n",
       "  'first',\n",
       "  'of',\n",
       "  'all',\n",
       "  'when',\n",
       "  'you',\n",
       "  \"'\",\n",
       "  're',\n",
       "  'going',\n",
       "  'to',\n",
       "  'make',\n",
       "  'a',\n",
       "  'film',\n",
       "  'you',\n",
       "  'must',\n",
       "  'decide',\n",
       "  'if',\n",
       "  'its',\n",
       "  'a',\n",
       "  'thriller',\n",
       "  'or',\n",
       "  'a',\n",
       "  'drama',\n",
       "  '!',\n",
       "  'as',\n",
       "  'a',\n",
       "  'drama',\n",
       "  'the',\n",
       "  'movie',\n",
       "  'is',\n",
       "  'watch',\n",
       "  '##able',\n",
       "  '.',\n",
       "  'parents',\n",
       "  'are',\n",
       "  'di',\n",
       "  '##vor',\n",
       "  '##cing',\n",
       "  '&',\n",
       "  'arguing',\n",
       "  'like',\n",
       "  'in',\n",
       "  'real',\n",
       "  'life',\n",
       "  '.',\n",
       "  'and',\n",
       "  'then',\n",
       "  'we',\n",
       "  'have',\n",
       "  'jake',\n",
       "  'with',\n",
       "  'his',\n",
       "  'closet',\n",
       "  'which',\n",
       "  'totally',\n",
       "  'ruins',\n",
       "  'all',\n",
       "  'the',\n",
       "  'film',\n",
       "  '!',\n",
       "  'i',\n",
       "  'expected',\n",
       "  'to',\n",
       "  'see',\n",
       "  'a',\n",
       "  'boo',\n",
       "  '##ge',\n",
       "  '##yman',\n",
       "  'similar',\n",
       "  'movie',\n",
       "  ',',\n",
       "  'and',\n",
       "  'instead',\n",
       "  'i',\n",
       "  'watched',\n",
       "  'a',\n",
       "  'drama',\n",
       "  'with',\n",
       "  'some',\n",
       "  'meaningless',\n",
       "  'thriller',\n",
       "  'spots',\n",
       "  '.',\n",
       "  '<',\n",
       "  'br',\n",
       "  '/',\n",
       "  '>',\n",
       "  '<',\n",
       "  'br',\n",
       "  '/',\n",
       "  '>',\n",
       "  '3',\n",
       "  'out',\n",
       "  'of',\n",
       "  '10',\n",
       "  'just',\n",
       "  'for',\n",
       "  'the',\n",
       "  'well',\n",
       "  'playing',\n",
       "  'parents',\n",
       "  '&',\n",
       "  'descent',\n",
       "  'dial',\n",
       "  '##og',\n",
       "  '##s',\n",
       "  '.',\n",
       "  'as',\n",
       "  'for',\n",
       "  'the',\n",
       "  'shots',\n",
       "  'with',\n",
       "  'jake',\n",
       "  ':',\n",
       "  'just',\n",
       "  'ignore',\n",
       "  'them',\n",
       "  '.']]"
      ]
     },
     "execution_count": 10,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "zz = tokenizer.tokenize(dat.review[1])\n",
    "z1z = tokenizer.tokenize(dat.review[3])\n",
    "\n",
    "[zz,z1z ]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [],
   "source": [
    "zzz = tokenizer.convert_tokens_to_ids(zz)\n",
    "zzzz = tokenizer.convert_tokens_to_ids(z1z)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "[1037,\n",
       " 6919,\n",
       " 2210,\n",
       " 2537,\n",
       " 1012,\n",
       " 1026,\n",
       " 7987,\n",
       " 1013,\n",
       " 1028,\n",
       " 1026,\n",
       " 7987,\n",
       " 1013,\n",
       " 1028,\n",
       " 1996,\n",
       " 7467,\n",
       " 6028,\n",
       " 2003,\n",
       " 2200,\n",
       " 14477,\n",
       " 4757,\n",
       " 24270,\n",
       " 1011,\n",
       " 2200,\n",
       " 2214,\n",
       " 1011,\n",
       " 2051,\n",
       " 1011,\n",
       " 4035,\n",
       " 4827,\n",
       " 1998,\n",
       " 3957,\n",
       " 1037,\n",
       " 16334,\n",
       " 1010,\n",
       " 1998,\n",
       " 2823,\n",
       " 17964,\n",
       " 2075,\n",
       " 1010,\n",
       " 3168,\n",
       " 1997,\n",
       " 15650,\n",
       " 2000,\n",
       " 1996,\n",
       " 2972,\n",
       " 3538,\n",
       " 1012,\n",
       " 1026,\n",
       " 7987,\n",
       " 1013,\n",
       " 1028,\n",
       " 1026,\n",
       " 7987,\n",
       " 1013,\n",
       " 1028,\n",
       " 1996,\n",
       " 5889,\n",
       " 2024,\n",
       " 5186,\n",
       " 2092,\n",
       " 4217,\n",
       " 1011,\n",
       " 2745,\n",
       " 20682,\n",
       " 2025,\n",
       " 2069,\n",
       " 1000,\n",
       " 2038,\n",
       " 2288,\n",
       " 2035,\n",
       " 1996,\n",
       " 11508,\n",
       " 2072,\n",
       " 1000,\n",
       " 2021,\n",
       " 2002,\n",
       " 2038,\n",
       " 2035,\n",
       " 1996,\n",
       " 5755,\n",
       " 2091,\n",
       " 6986,\n",
       " 2205,\n",
       " 999,\n",
       " 2017,\n",
       " 2064,\n",
       " 5621,\n",
       " 2156,\n",
       " 1996,\n",
       " 25180,\n",
       " 3238,\n",
       " 9260,\n",
       " 8546,\n",
       " 2011,\n",
       " 1996,\n",
       " 7604,\n",
       " 2000,\n",
       " 3766,\n",
       " 1005,\n",
       " 9708,\n",
       " 10445,\n",
       " 1010,\n",
       " 2025,\n",
       " 2069,\n",
       " 2003,\n",
       " 2009,\n",
       " 2092,\n",
       " 4276,\n",
       " 1996,\n",
       " 3666,\n",
       " 2021,\n",
       " 2009,\n",
       " 2003,\n",
       " 1037,\n",
       " 27547,\n",
       " 2135,\n",
       " 2517,\n",
       " 1998,\n",
       " 2864,\n",
       " 3538,\n",
       " 1012,\n",
       " 1037,\n",
       " 3040,\n",
       " 3993,\n",
       " 2537,\n",
       " 2055,\n",
       " 2028,\n",
       " 1997,\n",
       " 1996,\n",
       " 2307,\n",
       " 3040,\n",
       " 1005,\n",
       " 1055,\n",
       " 1997,\n",
       " 4038,\n",
       " 1998,\n",
       " 2010,\n",
       " 2166,\n",
       " 1012,\n",
       " 1026,\n",
       " 7987,\n",
       " 1013,\n",
       " 1028,\n",
       " 1026,\n",
       " 7987,\n",
       " 1013,\n",
       " 1028,\n",
       " 1996,\n",
       " 15650,\n",
       " 2428,\n",
       " 3310,\n",
       " 2188,\n",
       " 2007,\n",
       " 1996,\n",
       " 2210,\n",
       " 2477,\n",
       " 1024,\n",
       " 1996,\n",
       " 5913,\n",
       " 1997,\n",
       " 1996,\n",
       " 3457,\n",
       " 2029,\n",
       " 1010,\n",
       " 2738,\n",
       " 2084,\n",
       " 2224,\n",
       " 1996,\n",
       " 3151,\n",
       " 1005,\n",
       " 3959,\n",
       " 1005,\n",
       " 5461,\n",
       " 3464,\n",
       " 5024,\n",
       " 2059,\n",
       " 17144,\n",
       " 1012,\n",
       " 2009,\n",
       " 3248,\n",
       " 2006,\n",
       " 2256,\n",
       " 3716,\n",
       " 1998,\n",
       " 2256,\n",
       " 9456,\n",
       " 1010,\n",
       " 3391,\n",
       " 2007,\n",
       " 1996,\n",
       " 5019,\n",
       " 7175,\n",
       " 25161,\n",
       " 1998,\n",
       " 2534,\n",
       " 2072,\n",
       " 4381,\n",
       " 1998,\n",
       " 1996,\n",
       " 4520,\n",
       " 1006,\n",
       " 3391,\n",
       " 1997,\n",
       " 2037,\n",
       " 4257,\n",
       " 2007,\n",
       " 2534,\n",
       " 2072,\n",
       " 4381,\n",
       " 1005,\n",
       " 1055,\n",
       " 19016,\n",
       " 25545,\n",
       " 5844,\n",
       " 2296,\n",
       " 3302,\n",
       " 1007,\n",
       " 2024,\n",
       " 16668,\n",
       " 2092,\n",
       " 2589,\n",
       " 1012]"
      ]
     },
     "execution_count": 12,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "zzz"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "tokens_tensor = torch.tensor([zzz,zzz])\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[ 1037,  6919,  2210,  2537,  1012,  1026,  7987,  1013,  1028,  1026,\n",
       "          7987,  1013,  1028,  1996,  7467,  6028,  2003,  2200, 14477,  4757,\n",
       "         24270,  1011,  2200,  2214,  1011,  2051,  1011,  4035,  4827,  1998,\n",
       "          3957,  1037, 16334,  1010,  1998,  2823, 17964,  2075,  1010,  3168,\n",
       "          1997, 15650,  2000,  1996,  2972,  3538,  1012,  1026,  7987,  1013,\n",
       "          1028,  1026,  7987,  1013,  1028,  1996,  5889,  2024,  5186,  2092,\n",
       "          4217,  1011,  2745, 20682,  2025,  2069,  1000,  2038,  2288,  2035,\n",
       "          1996, 11508,  2072,  1000,  2021,  2002,  2038,  2035,  1996,  5755,\n",
       "          2091,  6986,  2205,   999,  2017,  2064,  5621,  2156,  1996, 25180,\n",
       "          3238,  9260,  8546,  2011,  1996,  7604,  2000,  3766,  1005,  9708,\n",
       "         10445,  1010,  2025,  2069,  2003,  2009,  2092,  4276,  1996,  3666,\n",
       "          2021,  2009,  2003,  1037, 27547,  2135,  2517,  1998,  2864,  3538,\n",
       "          1012,  1037,  3040,  3993,  2537,  2055,  2028,  1997,  1996,  2307,\n",
       "          3040,  1005,  1055,  1997,  4038,  1998,  2010,  2166,  1012,  1026,\n",
       "          7987,  1013,  1028,  1026,  7987,  1013,  1028,  1996, 15650,  2428,\n",
       "          3310,  2188,  2007,  1996,  2210,  2477,  1024,  1996,  5913,  1997,\n",
       "          1996,  3457,  2029,  1010,  2738,  2084,  2224,  1996,  3151,  1005,\n",
       "          3959,  1005,  5461,  3464,  5024,  2059, 17144,  1012,  2009,  3248,\n",
       "          2006,  2256,  3716,  1998,  2256,  9456,  1010,  3391,  2007,  1996,\n",
       "          5019,  7175, 25161,  1998,  2534,  2072,  4381,  1998,  1996,  4520,\n",
       "          1006,  3391,  1997,  2037,  4257,  2007,  2534,  2072,  4381,  1005,\n",
       "          1055, 19016, 25545,  5844,  2296,  3302,  1007,  2024, 16668,  2092,\n",
       "          2589,  1012],\n",
       "        [ 1037,  6919,  2210,  2537,  1012,  1026,  7987,  1013,  1028,  1026,\n",
       "          7987,  1013,  1028,  1996,  7467,  6028,  2003,  2200, 14477,  4757,\n",
       "         24270,  1011,  2200,  2214,  1011,  2051,  1011,  4035,  4827,  1998,\n",
       "          3957,  1037, 16334,  1010,  1998,  2823, 17964,  2075,  1010,  3168,\n",
       "          1997, 15650,  2000,  1996,  2972,  3538,  1012,  1026,  7987,  1013,\n",
       "          1028,  1026,  7987,  1013,  1028,  1996,  5889,  2024,  5186,  2092,\n",
       "          4217,  1011,  2745, 20682,  2025,  2069,  1000,  2038,  2288,  2035,\n",
       "          1996, 11508,  2072,  1000,  2021,  2002,  2038,  2035,  1996,  5755,\n",
       "          2091,  6986,  2205,   999,  2017,  2064,  5621,  2156,  1996, 25180,\n",
       "          3238,  9260,  8546,  2011,  1996,  7604,  2000,  3766,  1005,  9708,\n",
       "         10445,  1010,  2025,  2069,  2003,  2009,  2092,  4276,  1996,  3666,\n",
       "          2021,  2009,  2003,  1037, 27547,  2135,  2517,  1998,  2864,  3538,\n",
       "          1012,  1037,  3040,  3993,  2537,  2055,  2028,  1997,  1996,  2307,\n",
       "          3040,  1005,  1055,  1997,  4038,  1998,  2010,  2166,  1012,  1026,\n",
       "          7987,  1013,  1028,  1026,  7987,  1013,  1028,  1996, 15650,  2428,\n",
       "          3310,  2188,  2007,  1996,  2210,  2477,  1024,  1996,  5913,  1997,\n",
       "          1996,  3457,  2029,  1010,  2738,  2084,  2224,  1996,  3151,  1005,\n",
       "          3959,  1005,  5461,  3464,  5024,  2059, 17144,  1012,  2009,  3248,\n",
       "          2006,  2256,  3716,  1998,  2256,  9456,  1010,  3391,  2007,  1996,\n",
       "          5019,  7175, 25161,  1998,  2534,  2072,  4381,  1998,  1996,  4520,\n",
       "          1006,  3391,  1997,  2037,  4257,  2007,  2534,  2072,  4381,  1005,\n",
       "          1055, 19016, 25545,  5844,  2296,  3302,  1007,  2024, 16668,  2092,\n",
       "          2589,  1012]])"
      ]
     },
     "execution_count": 14,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "tokens_tensor"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[-0.2910, -0.7843],\n",
       "        [ 0.5690,  0.1703]], grad_fn=<AddmmBackward>)"
      ]
     },
     "execution_count": 15,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "logits = model(tokens_tensor)\n",
    "logits"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[0.6209, 0.3791],\n",
       "        [0.5984, 0.4016]], grad_fn=<SoftmaxBackward>)"
      ]
     },
     "execution_count": 16,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "import torch.nn.functional as F\n",
    "\n",
    "F.softmax(logits,dim=1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {},
   "outputs": [],
   "source": [
    "from sklearn.model_selection import train_test_split\n",
    "X = dat['review']\n",
    "y = dat['sentiment']\n",
    "\n",
    "\n",
    "X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.10, random_state=42)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {},
   "outputs": [],
   "source": [
    "X_train = X_train.values.tolist()\n",
    "X_test = X_test.values.tolist()\n",
    "\n",
    "y_train = pd.get_dummies(y_train).values.tolist()\n",
    "y_test = pd.get_dummies(y_test).values.tolist()\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "max_seq_length = 256\n",
    "class text_dataset(Dataset):\n",
    "    def __init__(self,x_y_list, transform=None):\n",
    "        \n",
    "        self.x_y_list = x_y_list\n",
    "        self.transform = transform\n",
    "        \n",
    "    def __getitem__(self,index):\n",
    "        \n",
    "        tokenized_review = tokenizer.tokenize(self.x_y_list[0][index])\n",
    "        \n",
    "        if len(tokenized_review) > max_seq_length:\n",
    "            tokenized_review = tokenized_review[:max_seq_length]\n",
    "            \n",
    "        ids_review  = tokenizer.convert_tokens_to_ids(tokenized_review)\n",
    "\n",
    "        padding = [0] * (max_seq_length - len(ids_review))\n",
    "        \n",
    "        ids_review += padding\n",
    "        \n",
    "        assert len(ids_review) == max_seq_length\n",
    "        \n",
    "        #print(ids_review)\n",
    "        ids_review = torch.tensor(ids_review)\n",
    "        \n",
    "        sentiment = self.x_y_list[1][index] # color        \n",
    "        list_of_labels = [torch.from_numpy(np.array(sentiment))]\n",
    "        \n",
    "        \n",
    "        return ids_review, list_of_labels[0]\n",
    "    \n",
    "    def __len__(self):\n",
    "        return len(self.x_y_list[0])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "cuda:0\n"
     ]
    }
   ],
   "source": [
    "\n",
    "batch_size = 16\n",
    "\n",
    "train_lists = [X_train, y_train]\n",
    "test_lists = [X_test, y_test]\n",
    "\n",
    "training_dataset = text_dataset(x_y_list = train_lists )\n",
    "\n",
    "test_dataset = text_dataset(x_y_list = test_lists )\n",
    "\n",
    "dataloaders_dict = {'train': torch.utils.data.DataLoader(training_dataset, batch_size=batch_size, shuffle=True, num_workers=0),\n",
    "                   'val':torch.utils.data.DataLoader(test_dataset, batch_size=batch_size, shuffle=True, num_workers=0)\n",
    "                   }\n",
    "dataset_sizes = {'train':len(train_lists[0]),\n",
    "                'val':len(test_lists[0])}\n",
    "\n",
    "device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n",
    "print(device)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "metadata": {},
   "outputs": [],
   "source": [
    "def train_model(model, criterion, optimizer, scheduler, num_epochs=25):\n",
    "    since = time.time()\n",
    "    print('starting')\n",
    "    best_model_wts = copy.deepcopy(model.state_dict())\n",
    "    best_loss = 100\n",
    "\n",
    "    for epoch in range(num_epochs):\n",
    "        print('Epoch {}/{}'.format(epoch, num_epochs - 1))\n",
    "        print('-' * 10)\n",
    "\n",
    "        # Each epoch has a training and validation phase\n",
    "        for phase in ['train', 'val']:\n",
    "            if phase == 'train':\n",
    "                scheduler.step()\n",
    "                model.train()  # Set model to training mode\n",
    "            else:\n",
    "                model.eval()   # Set model to evaluate mode\n",
    "\n",
    "            running_loss = 0.0\n",
    "            \n",
    "            sentiment_corrects = 0\n",
    "            \n",
    "            \n",
    "            # Iterate over data.\n",
    "            for inputs, sentiment in dataloaders_dict[phase]:\n",
    "                #inputs = inputs\n",
    "                #print(len(inputs),type(inputs),inputs)\n",
    "                #inputs = torch.from_numpy(np.array(inputs)).to(device) \n",
    "                inputs = inputs.to(device) \n",
    "\n",
    "                sentiment = sentiment.to(device)\n",
    "                \n",
    "                # zero the parameter gradients\n",
    "                optimizer.zero_grad()\n",
    "\n",
    "                # forward\n",
    "                # track history if only in train\n",
    "                with torch.set_grad_enabled(phase == 'train'):\n",
    "                    #print(inputs)\n",
    "                    outputs = model(inputs)\n",
    "\n",
    "                    outputs = F.softmax(outputs,dim=1)\n",
    "                    \n",
    "                    loss = criterion(outputs, torch.max(sentiment.float(), 1)[1])\n",
    "                    # backward + optimize only if in training phase\n",
    "                    if phase == 'train':\n",
    "                        \n",
    "                        loss.backward()\n",
    "                        optimizer.step()\n",
    "\n",
    "                # statistics\n",
    "                running_loss += loss.item() * inputs.size(0)\n",
    "\n",
    "                \n",
    "                sentiment_corrects += torch.sum(torch.max(outputs, 1)[1] == torch.max(sentiment, 1)[1])\n",
    "\n",
    "                \n",
    "            epoch_loss = running_loss / dataset_sizes[phase]\n",
    "\n",
    "            \n",
    "            sentiment_acc = sentiment_corrects.double() / dataset_sizes[phase]\n",
    "\n",
    "            print('{} total loss: {:.4f} '.format(phase,epoch_loss ))\n",
    "            print('{} sentiment_acc: {:.4f}'.format(\n",
    "                phase, sentiment_acc))\n",
    "\n",
    "            if phase == 'val' and epoch_loss < best_loss:\n",
    "                print('saving with loss of {}'.format(epoch_loss),\n",
    "                      'improved over previous {}'.format(best_loss))\n",
    "                best_loss = epoch_loss\n",
    "                best_model_wts = copy.deepcopy(model.state_dict())\n",
    "                torch.save(model.state_dict(), 'bert_model_test.pth')\n",
    "\n",
    "\n",
    "        print()\n",
    "\n",
    "    time_elapsed = time.time() - since\n",
    "    print('Training complete in {:.0f}m {:.0f}s'.format(\n",
    "        time_elapsed // 60, time_elapsed % 60))\n",
    "    print('Best val Acc: {:4f}'.format(float(best_loss)))\n",
    "\n",
    "    # load best model weights\n",
    "    model.load_state_dict(best_model_wts)\n",
    "    return model"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "BertForSequenceClassification(\n",
       "  (bert): BertModel(\n",
       "    (embeddings): BertEmbeddings(\n",
       "      (word_embeddings): Embedding(30522, 768, padding_idx=0)\n",
       "      (position_embeddings): Embedding(512, 768)\n",
       "      (token_type_embeddings): Embedding(2, 768)\n",
       "      (LayerNorm): BertLayerNorm()\n",
       "      (dropout): Dropout(p=0.1)\n",
       "    )\n",
       "    (encoder): BertEncoder(\n",
       "      (layer): ModuleList(\n",
       "        (0): BertLayer(\n",
       "          (attention): BertAttention(\n",
       "            (self): BertSelfAttention(\n",
       "              (query): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (key): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (value): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (dropout): Dropout(p=0.1)\n",
       "            )\n",
       "            (output): BertSelfOutput(\n",
       "              (dense): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (LayerNorm): BertLayerNorm()\n",
       "              (dropout): Dropout(p=0.1)\n",
       "            )\n",
       "          )\n",
       "          (intermediate): BertIntermediate(\n",
       "            (dense): Linear(in_features=768, out_features=3072, bias=True)\n",
       "          )\n",
       "          (output): BertOutput(\n",
       "            (dense): Linear(in_features=3072, out_features=768, bias=True)\n",
       "            (LayerNorm): BertLayerNorm()\n",
       "            (dropout): Dropout(p=0.1)\n",
       "          )\n",
       "        )\n",
       "        (1): BertLayer(\n",
       "          (attention): BertAttention(\n",
       "            (self): BertSelfAttention(\n",
       "              (query): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (key): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (value): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (dropout): Dropout(p=0.1)\n",
       "            )\n",
       "            (output): BertSelfOutput(\n",
       "              (dense): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (LayerNorm): BertLayerNorm()\n",
       "              (dropout): Dropout(p=0.1)\n",
       "            )\n",
       "          )\n",
       "          (intermediate): BertIntermediate(\n",
       "            (dense): Linear(in_features=768, out_features=3072, bias=True)\n",
       "          )\n",
       "          (output): BertOutput(\n",
       "            (dense): Linear(in_features=3072, out_features=768, bias=True)\n",
       "            (LayerNorm): BertLayerNorm()\n",
       "            (dropout): Dropout(p=0.1)\n",
       "          )\n",
       "        )\n",
       "        (2): BertLayer(\n",
       "          (attention): BertAttention(\n",
       "            (self): BertSelfAttention(\n",
       "              (query): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (key): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (value): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (dropout): Dropout(p=0.1)\n",
       "            )\n",
       "            (output): BertSelfOutput(\n",
       "              (dense): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (LayerNorm): BertLayerNorm()\n",
       "              (dropout): Dropout(p=0.1)\n",
       "            )\n",
       "          )\n",
       "          (intermediate): BertIntermediate(\n",
       "            (dense): Linear(in_features=768, out_features=3072, bias=True)\n",
       "          )\n",
       "          (output): BertOutput(\n",
       "            (dense): Linear(in_features=3072, out_features=768, bias=True)\n",
       "            (LayerNorm): BertLayerNorm()\n",
       "            (dropout): Dropout(p=0.1)\n",
       "          )\n",
       "        )\n",
       "        (3): BertLayer(\n",
       "          (attention): BertAttention(\n",
       "            (self): BertSelfAttention(\n",
       "              (query): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (key): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (value): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (dropout): Dropout(p=0.1)\n",
       "            )\n",
       "            (output): BertSelfOutput(\n",
       "              (dense): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (LayerNorm): BertLayerNorm()\n",
       "              (dropout): Dropout(p=0.1)\n",
       "            )\n",
       "          )\n",
       "          (intermediate): BertIntermediate(\n",
       "            (dense): Linear(in_features=768, out_features=3072, bias=True)\n",
       "          )\n",
       "          (output): BertOutput(\n",
       "            (dense): Linear(in_features=3072, out_features=768, bias=True)\n",
       "            (LayerNorm): BertLayerNorm()\n",
       "            (dropout): Dropout(p=0.1)\n",
       "          )\n",
       "        )\n",
       "        (4): BertLayer(\n",
       "          (attention): BertAttention(\n",
       "            (self): BertSelfAttention(\n",
       "              (query): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (key): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (value): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (dropout): Dropout(p=0.1)\n",
       "            )\n",
       "            (output): BertSelfOutput(\n",
       "              (dense): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (LayerNorm): BertLayerNorm()\n",
       "              (dropout): Dropout(p=0.1)\n",
       "            )\n",
       "          )\n",
       "          (intermediate): BertIntermediate(\n",
       "            (dense): Linear(in_features=768, out_features=3072, bias=True)\n",
       "          )\n",
       "          (output): BertOutput(\n",
       "            (dense): Linear(in_features=3072, out_features=768, bias=True)\n",
       "            (LayerNorm): BertLayerNorm()\n",
       "            (dropout): Dropout(p=0.1)\n",
       "          )\n",
       "        )\n",
       "        (5): BertLayer(\n",
       "          (attention): BertAttention(\n",
       "            (self): BertSelfAttention(\n",
       "              (query): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (key): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (value): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (dropout): Dropout(p=0.1)\n",
       "            )\n",
       "            (output): BertSelfOutput(\n",
       "              (dense): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (LayerNorm): BertLayerNorm()\n",
       "              (dropout): Dropout(p=0.1)\n",
       "            )\n",
       "          )\n",
       "          (intermediate): BertIntermediate(\n",
       "            (dense): Linear(in_features=768, out_features=3072, bias=True)\n",
       "          )\n",
       "          (output): BertOutput(\n",
       "            (dense): Linear(in_features=3072, out_features=768, bias=True)\n",
       "            (LayerNorm): BertLayerNorm()\n",
       "            (dropout): Dropout(p=0.1)\n",
       "          )\n",
       "        )\n",
       "        (6): BertLayer(\n",
       "          (attention): BertAttention(\n",
       "            (self): BertSelfAttention(\n",
       "              (query): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (key): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (value): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (dropout): Dropout(p=0.1)\n",
       "            )\n",
       "            (output): BertSelfOutput(\n",
       "              (dense): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (LayerNorm): BertLayerNorm()\n",
       "              (dropout): Dropout(p=0.1)\n",
       "            )\n",
       "          )\n",
       "          (intermediate): BertIntermediate(\n",
       "            (dense): Linear(in_features=768, out_features=3072, bias=True)\n",
       "          )\n",
       "          (output): BertOutput(\n",
       "            (dense): Linear(in_features=3072, out_features=768, bias=True)\n",
       "            (LayerNorm): BertLayerNorm()\n",
       "            (dropout): Dropout(p=0.1)\n",
       "          )\n",
       "        )\n",
       "        (7): BertLayer(\n",
       "          (attention): BertAttention(\n",
       "            (self): BertSelfAttention(\n",
       "              (query): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (key): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (value): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (dropout): Dropout(p=0.1)\n",
       "            )\n",
       "            (output): BertSelfOutput(\n",
       "              (dense): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (LayerNorm): BertLayerNorm()\n",
       "              (dropout): Dropout(p=0.1)\n",
       "            )\n",
       "          )\n",
       "          (intermediate): BertIntermediate(\n",
       "            (dense): Linear(in_features=768, out_features=3072, bias=True)\n",
       "          )\n",
       "          (output): BertOutput(\n",
       "            (dense): Linear(in_features=3072, out_features=768, bias=True)\n",
       "            (LayerNorm): BertLayerNorm()\n",
       "            (dropout): Dropout(p=0.1)\n",
       "          )\n",
       "        )\n",
       "        (8): BertLayer(\n",
       "          (attention): BertAttention(\n",
       "            (self): BertSelfAttention(\n",
       "              (query): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (key): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (value): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (dropout): Dropout(p=0.1)\n",
       "            )\n",
       "            (output): BertSelfOutput(\n",
       "              (dense): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (LayerNorm): BertLayerNorm()\n",
       "              (dropout): Dropout(p=0.1)\n",
       "            )\n",
       "          )\n",
       "          (intermediate): BertIntermediate(\n",
       "            (dense): Linear(in_features=768, out_features=3072, bias=True)\n",
       "          )\n",
       "          (output): BertOutput(\n",
       "            (dense): Linear(in_features=3072, out_features=768, bias=True)\n",
       "            (LayerNorm): BertLayerNorm()\n",
       "            (dropout): Dropout(p=0.1)\n",
       "          )\n",
       "        )\n",
       "        (9): BertLayer(\n",
       "          (attention): BertAttention(\n",
       "            (self): BertSelfAttention(\n",
       "              (query): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (key): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (value): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (dropout): Dropout(p=0.1)\n",
       "            )\n",
       "            (output): BertSelfOutput(\n",
       "              (dense): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (LayerNorm): BertLayerNorm()\n",
       "              (dropout): Dropout(p=0.1)\n",
       "            )\n",
       "          )\n",
       "          (intermediate): BertIntermediate(\n",
       "            (dense): Linear(in_features=768, out_features=3072, bias=True)\n",
       "          )\n",
       "          (output): BertOutput(\n",
       "            (dense): Linear(in_features=3072, out_features=768, bias=True)\n",
       "            (LayerNorm): BertLayerNorm()\n",
       "            (dropout): Dropout(p=0.1)\n",
       "          )\n",
       "        )\n",
       "        (10): BertLayer(\n",
       "          (attention): BertAttention(\n",
       "            (self): BertSelfAttention(\n",
       "              (query): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (key): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (value): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (dropout): Dropout(p=0.1)\n",
       "            )\n",
       "            (output): BertSelfOutput(\n",
       "              (dense): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (LayerNorm): BertLayerNorm()\n",
       "              (dropout): Dropout(p=0.1)\n",
       "            )\n",
       "          )\n",
       "          (intermediate): BertIntermediate(\n",
       "            (dense): Linear(in_features=768, out_features=3072, bias=True)\n",
       "          )\n",
       "          (output): BertOutput(\n",
       "            (dense): Linear(in_features=3072, out_features=768, bias=True)\n",
       "            (LayerNorm): BertLayerNorm()\n",
       "            (dropout): Dropout(p=0.1)\n",
       "          )\n",
       "        )\n",
       "        (11): BertLayer(\n",
       "          (attention): BertAttention(\n",
       "            (self): BertSelfAttention(\n",
       "              (query): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (key): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (value): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (dropout): Dropout(p=0.1)\n",
       "            )\n",
       "            (output): BertSelfOutput(\n",
       "              (dense): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (LayerNorm): BertLayerNorm()\n",
       "              (dropout): Dropout(p=0.1)\n",
       "            )\n",
       "          )\n",
       "          (intermediate): BertIntermediate(\n",
       "            (dense): Linear(in_features=768, out_features=3072, bias=True)\n",
       "          )\n",
       "          (output): BertOutput(\n",
       "            (dense): Linear(in_features=3072, out_features=768, bias=True)\n",
       "            (LayerNorm): BertLayerNorm()\n",
       "            (dropout): Dropout(p=0.1)\n",
       "          )\n",
       "        )\n",
       "      )\n",
       "    )\n",
       "    (pooler): BertPooler(\n",
       "      (dense): Linear(in_features=768, out_features=768, bias=True)\n",
       "      (activation): Tanh()\n",
       "    )\n",
       "  )\n",
       "  (dropout): Dropout(p=0.1)\n",
       "  (classifier): Linear(in_features=768, out_features=2, bias=True)\n",
       ")"
      ]
     },
     "execution_count": 22,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "model.to(device)\n",
    "\n",
    "#model.freeze_bert_encoder()\n",
    "#model.classifier.weight.requires_grad = True\n",
    "model\n",
    "\n",
    "#    def freeze_bert_encoder(self):\n",
    "#        for param in self.bert.parameters():\n",
    "#            param.requires_grad = False\n",
    "#    \n",
    "#    def unfreeze_bert_encoder(self):\n",
    "#        for param in self.bert.parameters():\n",
    "#            param.requires_grad = True"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "metadata": {},
   "outputs": [],
   "source": [
    "lrlast = .001\n",
    "lrmain = .00001\n",
    "optim1 = optim.Adam(\n",
    "    [\n",
    "        {\"params\":model.bert.parameters(),\"lr\": lrmain},\n",
    "        {\"params\":model.classifier.parameters(), \"lr\": lrlast},\n",
    "       \n",
    "   ])\n",
    "\n",
    "#optim1 = optim.Adam(model.parameters(), lr=0.001)#,momentum=.9)\n",
    "# Observe that all parameters are being optimized\n",
    "optimizer_ft = optim1\n",
    "criterion = nn.CrossEntropyLoss()\n",
    "\n",
    "# Decay LR by a factor of 0.1 every 7 epochs\n",
    "exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=3, gamma=0.1)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "starting\n",
      "Epoch 0/9\n",
      "----------\n",
      "train total loss: 0.4340 \n",
      "train sentiment_acc: 0.8728\n",
      "val total loss: 0.4089 \n",
      "val sentiment_acc: 0.8992\n",
      "saving with loss of 0.408905384349823 improved over previous 100\n",
      "\n",
      "Epoch 1/9\n",
      "----------\n",
      "train total loss: 0.4028 \n",
      "train sentiment_acc: 0.9079\n",
      "val total loss: 0.4188 \n",
      "val sentiment_acc: 0.8922\n",
      "\n",
      "Epoch 2/9\n",
      "----------\n",
      "train total loss: 0.3813 \n",
      "train sentiment_acc: 0.9303\n",
      "val total loss: 0.3991 \n",
      "val sentiment_acc: 0.9114\n",
      "saving with loss of 0.39912535858154297 improved over previous 0.408905384349823\n",
      "\n",
      "Epoch 3/9\n",
      "----------\n",
      "train total loss: 0.3734 \n",
      "train sentiment_acc: 0.9383\n",
      "val total loss: 0.3990 \n",
      "val sentiment_acc: 0.9096\n",
      "saving with loss of 0.3990362864971161 improved over previous 0.39912535858154297\n",
      "\n",
      "Epoch 4/9\n",
      "----------\n",
      "train total loss: 0.3684 \n",
      "train sentiment_acc: 0.9439\n",
      "val total loss: 0.3959 \n",
      "val sentiment_acc: 0.9160\n",
      "saving with loss of 0.39590675020217897 improved over previous 0.3990362864971161\n",
      "\n",
      "Epoch 5/9\n",
      "----------\n",
      "train total loss: 0.3648 \n",
      "train sentiment_acc: 0.9477\n",
      "val total loss: 0.3956 \n",
      "val sentiment_acc: 0.9156\n",
      "saving with loss of 0.3956153021812439 improved over previous 0.39590675020217897\n",
      "\n",
      "Epoch 6/9\n",
      "----------\n",
      "train total loss: 0.3641 \n",
      "train sentiment_acc: 0.9483\n",
      "val total loss: 0.3955 \n",
      "val sentiment_acc: 0.9156\n",
      "saving with loss of 0.3954721034049988 improved over previous 0.3956153021812439\n",
      "\n",
      "Epoch 7/9\n",
      "----------\n",
      "train total loss: 0.3637 \n",
      "train sentiment_acc: 0.9489\n",
      "val total loss: 0.3954 \n",
      "val sentiment_acc: 0.9152\n",
      "saving with loss of 0.39540266304016114 improved over previous 0.3954721034049988\n",
      "\n",
      "Epoch 8/9\n",
      "----------\n",
      "train total loss: 0.3633 \n",
      "train sentiment_acc: 0.9490\n",
      "val total loss: 0.3953 \n",
      "val sentiment_acc: 0.9160\n",
      "saving with loss of 0.3953090229034424 improved over previous 0.39540266304016114\n",
      "\n",
      "Epoch 9/9\n",
      "----------\n",
      "train total loss: 0.3629 \n",
      "train sentiment_acc: 0.9493\n",
      "val total loss: 0.3953 \n",
      "val sentiment_acc: 0.9160\n",
      "saving with loss of 0.3952811773300171 improved over previous 0.3953090229034424\n",
      "\n",
      "Training complete in 243m 48s\n",
      "Best val Acc: 0.395281\n"
     ]
    }
   ],
   "source": [
    "model_ft1 = train_model(model, criterion, optimizer_ft, exp_lr_scheduler,\n",
    "                       num_epochs=10)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "BertForSequenceClassification(\n",
       "  (bert): BertModel(\n",
       "    (embeddings): BertEmbeddings(\n",
       "      (word_embeddings): Embedding(30522, 768, padding_idx=0)\n",
       "      (position_embeddings): Embedding(512, 768)\n",
       "      (token_type_embeddings): Embedding(2, 768)\n",
       "      (LayerNorm): BertLayerNorm()\n",
       "      (dropout): Dropout(p=0.1)\n",
       "    )\n",
       "    (encoder): BertEncoder(\n",
       "      (layer): ModuleList(\n",
       "        (0): BertLayer(\n",
       "          (attention): BertAttention(\n",
       "            (self): BertSelfAttention(\n",
       "              (query): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (key): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (value): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (dropout): Dropout(p=0.1)\n",
       "            )\n",
       "            (output): BertSelfOutput(\n",
       "              (dense): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (LayerNorm): BertLayerNorm()\n",
       "              (dropout): Dropout(p=0.1)\n",
       "            )\n",
       "          )\n",
       "          (intermediate): BertIntermediate(\n",
       "            (dense): Linear(in_features=768, out_features=3072, bias=True)\n",
       "          )\n",
       "          (output): BertOutput(\n",
       "            (dense): Linear(in_features=3072, out_features=768, bias=True)\n",
       "            (LayerNorm): BertLayerNorm()\n",
       "            (dropout): Dropout(p=0.1)\n",
       "          )\n",
       "        )\n",
       "        (1): BertLayer(\n",
       "          (attention): BertAttention(\n",
       "            (self): BertSelfAttention(\n",
       "              (query): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (key): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (value): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (dropout): Dropout(p=0.1)\n",
       "            )\n",
       "            (output): BertSelfOutput(\n",
       "              (dense): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (LayerNorm): BertLayerNorm()\n",
       "              (dropout): Dropout(p=0.1)\n",
       "            )\n",
       "          )\n",
       "          (intermediate): BertIntermediate(\n",
       "            (dense): Linear(in_features=768, out_features=3072, bias=True)\n",
       "          )\n",
       "          (output): BertOutput(\n",
       "            (dense): Linear(in_features=3072, out_features=768, bias=True)\n",
       "            (LayerNorm): BertLayerNorm()\n",
       "            (dropout): Dropout(p=0.1)\n",
       "          )\n",
       "        )\n",
       "        (2): BertLayer(\n",
       "          (attention): BertAttention(\n",
       "            (self): BertSelfAttention(\n",
       "              (query): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (key): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (value): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (dropout): Dropout(p=0.1)\n",
       "            )\n",
       "            (output): BertSelfOutput(\n",
       "              (dense): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (LayerNorm): BertLayerNorm()\n",
       "              (dropout): Dropout(p=0.1)\n",
       "            )\n",
       "          )\n",
       "          (intermediate): BertIntermediate(\n",
       "            (dense): Linear(in_features=768, out_features=3072, bias=True)\n",
       "          )\n",
       "          (output): BertOutput(\n",
       "            (dense): Linear(in_features=3072, out_features=768, bias=True)\n",
       "            (LayerNorm): BertLayerNorm()\n",
       "            (dropout): Dropout(p=0.1)\n",
       "          )\n",
       "        )\n",
       "        (3): BertLayer(\n",
       "          (attention): BertAttention(\n",
       "            (self): BertSelfAttention(\n",
       "              (query): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (key): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (value): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (dropout): Dropout(p=0.1)\n",
       "            )\n",
       "            (output): BertSelfOutput(\n",
       "              (dense): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (LayerNorm): BertLayerNorm()\n",
       "              (dropout): Dropout(p=0.1)\n",
       "            )\n",
       "          )\n",
       "          (intermediate): BertIntermediate(\n",
       "            (dense): Linear(in_features=768, out_features=3072, bias=True)\n",
       "          )\n",
       "          (output): BertOutput(\n",
       "            (dense): Linear(in_features=3072, out_features=768, bias=True)\n",
       "            (LayerNorm): BertLayerNorm()\n",
       "            (dropout): Dropout(p=0.1)\n",
       "          )\n",
       "        )\n",
       "        (4): BertLayer(\n",
       "          (attention): BertAttention(\n",
       "            (self): BertSelfAttention(\n",
       "              (query): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (key): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (value): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (dropout): Dropout(p=0.1)\n",
       "            )\n",
       "            (output): BertSelfOutput(\n",
       "              (dense): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (LayerNorm): BertLayerNorm()\n",
       "              (dropout): Dropout(p=0.1)\n",
       "            )\n",
       "          )\n",
       "          (intermediate): BertIntermediate(\n",
       "            (dense): Linear(in_features=768, out_features=3072, bias=True)\n",
       "          )\n",
       "          (output): BertOutput(\n",
       "            (dense): Linear(in_features=3072, out_features=768, bias=True)\n",
       "            (LayerNorm): BertLayerNorm()\n",
       "            (dropout): Dropout(p=0.1)\n",
       "          )\n",
       "        )\n",
       "        (5): BertLayer(\n",
       "          (attention): BertAttention(\n",
       "            (self): BertSelfAttention(\n",
       "              (query): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (key): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (value): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (dropout): Dropout(p=0.1)\n",
       "            )\n",
       "            (output): BertSelfOutput(\n",
       "              (dense): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (LayerNorm): BertLayerNorm()\n",
       "              (dropout): Dropout(p=0.1)\n",
       "            )\n",
       "          )\n",
       "          (intermediate): BertIntermediate(\n",
       "            (dense): Linear(in_features=768, out_features=3072, bias=True)\n",
       "          )\n",
       "          (output): BertOutput(\n",
       "            (dense): Linear(in_features=3072, out_features=768, bias=True)\n",
       "            (LayerNorm): BertLayerNorm()\n",
       "            (dropout): Dropout(p=0.1)\n",
       "          )\n",
       "        )\n",
       "        (6): BertLayer(\n",
       "          (attention): BertAttention(\n",
       "            (self): BertSelfAttention(\n",
       "              (query): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (key): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (value): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (dropout): Dropout(p=0.1)\n",
       "            )\n",
       "            (output): BertSelfOutput(\n",
       "              (dense): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (LayerNorm): BertLayerNorm()\n",
       "              (dropout): Dropout(p=0.1)\n",
       "            )\n",
       "          )\n",
       "          (intermediate): BertIntermediate(\n",
       "            (dense): Linear(in_features=768, out_features=3072, bias=True)\n",
       "          )\n",
       "          (output): BertOutput(\n",
       "            (dense): Linear(in_features=3072, out_features=768, bias=True)\n",
       "            (LayerNorm): BertLayerNorm()\n",
       "            (dropout): Dropout(p=0.1)\n",
       "          )\n",
       "        )\n",
       "        (7): BertLayer(\n",
       "          (attention): BertAttention(\n",
       "            (self): BertSelfAttention(\n",
       "              (query): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (key): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (value): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (dropout): Dropout(p=0.1)\n",
       "            )\n",
       "            (output): BertSelfOutput(\n",
       "              (dense): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (LayerNorm): BertLayerNorm()\n",
       "              (dropout): Dropout(p=0.1)\n",
       "            )\n",
       "          )\n",
       "          (intermediate): BertIntermediate(\n",
       "            (dense): Linear(in_features=768, out_features=3072, bias=True)\n",
       "          )\n",
       "          (output): BertOutput(\n",
       "            (dense): Linear(in_features=3072, out_features=768, bias=True)\n",
       "            (LayerNorm): BertLayerNorm()\n",
       "            (dropout): Dropout(p=0.1)\n",
       "          )\n",
       "        )\n",
       "        (8): BertLayer(\n",
       "          (attention): BertAttention(\n",
       "            (self): BertSelfAttention(\n",
       "              (query): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (key): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (value): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (dropout): Dropout(p=0.1)\n",
       "            )\n",
       "            (output): BertSelfOutput(\n",
       "              (dense): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (LayerNorm): BertLayerNorm()\n",
       "              (dropout): Dropout(p=0.1)\n",
       "            )\n",
       "          )\n",
       "          (intermediate): BertIntermediate(\n",
       "            (dense): Linear(in_features=768, out_features=3072, bias=True)\n",
       "          )\n",
       "          (output): BertOutput(\n",
       "            (dense): Linear(in_features=3072, out_features=768, bias=True)\n",
       "            (LayerNorm): BertLayerNorm()\n",
       "            (dropout): Dropout(p=0.1)\n",
       "          )\n",
       "        )\n",
       "        (9): BertLayer(\n",
       "          (attention): BertAttention(\n",
       "            (self): BertSelfAttention(\n",
       "              (query): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (key): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (value): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (dropout): Dropout(p=0.1)\n",
       "            )\n",
       "            (output): BertSelfOutput(\n",
       "              (dense): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (LayerNorm): BertLayerNorm()\n",
       "              (dropout): Dropout(p=0.1)\n",
       "            )\n",
       "          )\n",
       "          (intermediate): BertIntermediate(\n",
       "            (dense): Linear(in_features=768, out_features=3072, bias=True)\n",
       "          )\n",
       "          (output): BertOutput(\n",
       "            (dense): Linear(in_features=3072, out_features=768, bias=True)\n",
       "            (LayerNorm): BertLayerNorm()\n",
       "            (dropout): Dropout(p=0.1)\n",
       "          )\n",
       "        )\n",
       "        (10): BertLayer(\n",
       "          (attention): BertAttention(\n",
       "            (self): BertSelfAttention(\n",
       "              (query): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (key): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (value): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (dropout): Dropout(p=0.1)\n",
       "            )\n",
       "            (output): BertSelfOutput(\n",
       "              (dense): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (LayerNorm): BertLayerNorm()\n",
       "              (dropout): Dropout(p=0.1)\n",
       "            )\n",
       "          )\n",
       "          (intermediate): BertIntermediate(\n",
       "            (dense): Linear(in_features=768, out_features=3072, bias=True)\n",
       "          )\n",
       "          (output): BertOutput(\n",
       "            (dense): Linear(in_features=3072, out_features=768, bias=True)\n",
       "            (LayerNorm): BertLayerNorm()\n",
       "            (dropout): Dropout(p=0.1)\n",
       "          )\n",
       "        )\n",
       "        (11): BertLayer(\n",
       "          (attention): BertAttention(\n",
       "            (self): BertSelfAttention(\n",
       "              (query): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (key): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (value): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (dropout): Dropout(p=0.1)\n",
       "            )\n",
       "            (output): BertSelfOutput(\n",
       "              (dense): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (LayerNorm): BertLayerNorm()\n",
       "              (dropout): Dropout(p=0.1)\n",
       "            )\n",
       "          )\n",
       "          (intermediate): BertIntermediate(\n",
       "            (dense): Linear(in_features=768, out_features=3072, bias=True)\n",
       "          )\n",
       "          (output): BertOutput(\n",
       "            (dense): Linear(in_features=3072, out_features=768, bias=True)\n",
       "            (LayerNorm): BertLayerNorm()\n",
       "            (dropout): Dropout(p=0.1)\n",
       "          )\n",
       "        )\n",
       "      )\n",
       "    )\n",
       "    (pooler): BertPooler(\n",
       "      (dense): Linear(in_features=768, out_features=768, bias=True)\n",
       "      (activation): Tanh()\n",
       "    )\n",
       "  )\n",
       "  (dropout): Dropout(p=0.1)\n",
       "  (classifier): Linear(in_features=768, out_features=2, bias=True)\n",
       ")"
      ]
     },
     "execution_count": 25,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "model"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.3"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
